]> jfr.im git - irc/rqf/shadowircd.git/blob - libratbox/src/kqueue.c
51d92c8f7208833939ba6a07bbc1f3d818763ec2
[irc/rqf/shadowircd.git] / libratbox / src / kqueue.c
1 /*
2 * ircd-ratbox: A slightly useful ircd.
3 * kqueue.c: FreeBSD kqueue compatible network routines.
4 *
5 * Copyright (C) 1990 Jarkko Oikarinen and University of Oulu, Co Center
6 * Copyright (C) 1996-2002 Hybrid Development Team
7 * Copyright (C) 2001 Adrian Chadd <adrian@creative.net.au>
8 * Copyright (C) 2002-2005 ircd-ratbox development team
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
23 * USA
24 *
25 * $Id: kqueue.c 25364 2008-05-14 17:55:22Z jilles $
26 */
27
28 #include <libratbox_config.h>
29 #include <ratbox_lib.h>
30 #include <commio-int.h>
31 #include <event-int.h>
32
33 #if defined(HAVE_SYS_EVENT_H) && (HAVE_KEVENT)
34
35 #include <sys/event.h>
36
37 /* jlemon goofed up and didn't add EV_SET until fbsd 4.3 */
38
39 #ifndef EV_SET
40 #define EV_SET(kevp, a, b, c, d, e, f) do { \
41 (kevp)->ident = (a); \
42 (kevp)->filter = (b); \
43 (kevp)->flags = (c); \
44 (kevp)->fflags = (d); \
45 (kevp)->data = (e); \
46 (kevp)->udata = (f); \
47 } while(0)
48 #endif
49
50 #ifdef EVFILT_TIMER
51 #define KQUEUE_SCHED_EVENT
52 #endif
53
54
55 static void kq_update_events(rb_fde_t *, short, PF *);
56 static int kq;
57 static struct timespec zero_timespec;
58
59 static struct kevent *kqlst; /* kevent buffer */
60 static struct kevent *kqout; /* kevent output buffer */
61 static int kqmax; /* max structs to buffer */
62 static int kqoff; /* offset into the buffer */
63
64
65 int
66 rb_setup_fd_kqueue(rb_fde_t * F)
67 {
68 return 0;
69 }
70
71 static void
72 kq_update_events(rb_fde_t * F, short filter, PF * handler)
73 {
74 PF *cur_handler;
75 int kep_flags;
76
77 switch (filter)
78 {
79 case EVFILT_READ:
80 cur_handler = F->read_handler;
81 break;
82 case EVFILT_WRITE:
83 cur_handler = F->write_handler;
84 break;
85 default:
86 /* XXX bad! -- adrian */
87 return;
88 break;
89 }
90
91 if((cur_handler == NULL && handler != NULL) || (cur_handler != NULL && handler == NULL))
92 {
93 struct kevent *kep;
94
95 kep = kqlst + kqoff;
96
97 if(handler != NULL)
98 {
99 kep_flags = EV_ADD | EV_ONESHOT;
100 }
101 else
102 {
103 kep_flags = EV_DELETE;
104 }
105
106 EV_SET(kep, (uintptr_t) F->fd, filter, kep_flags, 0, 0, (void *) F);
107
108 if(++kqoff == kqmax)
109 {
110 int ret, i;
111
112 /* Add them one at a time, because there may be
113 * already closed fds in it. The kernel will try
114 * to report invalid fds in the output; if there
115 * is no space, it silently stops processing the
116 * array at that point. We cannot give output space
117 * because that would also return events we cannot
118 * process at this point.
119 */
120 for (i = 0; i < kqoff; i++)
121 {
122 ret = kevent(kq, kqlst + i, 1, NULL, 0, &zero_timespec);
123 /* jdc -- someone needs to do error checking... */
124 /* EBADF is normal here -- jilles */
125 if(ret == -1 && errno != EBADF)
126 rb_lib_log("kq_update_events(): kevent(): %s", strerror(errno));
127 }
128 kqoff = 0;
129 }
130 }
131 }
132
133
134
135 /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
136 /* Public functions */
137
138
139 /*
140 * rb_init_netio
141 *
142 * This is a needed exported function which will be called to initialise
143 * the network loop code.
144 */
145 int
146 rb_init_netio_kqueue(void)
147 {
148 kq = kqueue();
149 if(kq < 0)
150 {
151 return errno;
152 }
153 kqmax = getdtablesize();
154 kqlst = rb_malloc(sizeof(struct kevent) * kqmax);
155 kqout = rb_malloc(sizeof(struct kevent) * kqmax);
156 rb_open(kq, RB_FD_UNKNOWN, "kqueue fd");
157 zero_timespec.tv_sec = 0;
158 zero_timespec.tv_nsec = 0;
159
160 return 0;
161 }
162
163 /*
164 * rb_setselect
165 *
166 * This is a needed exported function which will be called to register
167 * and deregister interest in a pending IO state for a given FD.
168 */
169 void
170 rb_setselect_kqueue(rb_fde_t * F, unsigned int type, PF * handler, void *client_data)
171 {
172 lrb_assert(IsFDOpen(F));
173
174 if(type & RB_SELECT_READ)
175 {
176 kq_update_events(F, EVFILT_READ, handler);
177 F->read_handler = handler;
178 F->read_data = client_data;
179 }
180 if(type & RB_SELECT_WRITE)
181 {
182 kq_update_events(F, EVFILT_WRITE, handler);
183 F->write_handler = handler;
184 F->write_data = client_data;
185 }
186 }
187
188 /*
189 * Check all connections for new connections and input data that is to be
190 * processed. Also check for connections with data queued and whether we can
191 * write it out.
192 */
193
194 /*
195 * rb_select
196 *
197 * Called to do the new-style IO, courtesy of squid (like most of this
198 * new IO code). This routine handles the stuff we've hidden in
199 * rb_setselect and fd_table[] and calls callbacks for IO ready
200 * events.
201 */
202
203 int
204 rb_select_kqueue(long delay)
205 {
206 int num, i;
207 struct timespec poll_time;
208 struct timespec *pt;
209 rb_fde_t *F;
210
211
212 if(delay < 0) {
213 pt = NULL;
214 }
215 else {
216 pt = &poll_time;
217 poll_time.tv_sec = delay / 1000;
218 poll_time.tv_nsec = (delay % 1000) * 1000000;
219 }
220
221 for (;;)
222 {
223 num = kevent(kq, kqlst, kqoff, kqout, kqmax, pt);
224 kqoff = 0;
225
226 if(num >= 0)
227 break;
228
229 if(rb_ignore_errno(errno))
230 break;
231
232 rb_set_time();
233
234 return RB_ERROR;
235
236 /* NOTREACHED */
237 }
238
239 rb_set_time();
240
241 if(num == 0)
242 return RB_OK; /* No error.. */
243
244 for (i = 0; i < num; i++)
245 {
246 PF *hdl = NULL;
247
248 if(kqout[i].flags & EV_ERROR)
249 {
250 errno = kqout[i].data;
251 /* XXX error == bad! -- adrian */
252 continue; /* XXX! */
253 }
254
255 switch (kqout[i].filter)
256 {
257
258 case EVFILT_READ:
259 F = kqout[i].udata;
260 if((hdl = F->read_handler) != NULL)
261 {
262 F->read_handler = NULL;
263 hdl(F, F->read_data);
264 }
265
266 break;
267
268 case EVFILT_WRITE:
269 F = kqout[i].udata;
270 if((hdl = F->write_handler) != NULL)
271 {
272 F->write_handler = NULL;
273 hdl(F, F->write_data);
274 }
275 break;
276 #if defined(EVFILT_TIMER)
277 case EVFILT_TIMER:
278 rb_run_event(kqout[i].udata);
279 break;
280 #endif
281 default:
282 /* Bad! -- adrian */
283 break;
284 }
285 }
286 return RB_OK;
287 }
288
289 #if defined(KQUEUE_SCHED_EVENT)
290 static int can_do_event = 0;
291 int
292 rb_kqueue_supports_event(void)
293 {
294 struct kevent kv;
295 struct timespec ts;
296 int xkq;
297
298 if(can_do_event == 1)
299 return 1;
300 if(can_do_event == -1)
301 return 0;
302
303 xkq = kqueue();
304 ts.tv_sec = 0;
305 ts.tv_nsec = 1000;
306
307
308 EV_SET(&kv, (uintptr_t) 0x0, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 1, 0);
309 if(kevent(xkq, &kv, 1, NULL, 0, NULL) < 0)
310 {
311 can_do_event = -1;
312 close(xkq);
313 return 0;
314 }
315 close(xkq);
316 can_do_event = 1;
317 return 1;
318 }
319
320 int
321 rb_kqueue_sched_event(struct ev_entry *event, int when)
322 {
323 struct kevent kev;
324 int kep_flags;
325
326 kep_flags = EV_ADD;
327 if(event->frequency == 0)
328 kep_flags |= EV_ONESHOT;
329 EV_SET(&kev, (uintptr_t) event, EVFILT_TIMER, kep_flags, 0, when * 1000, event);
330 if(kevent(kq, &kev, 1, NULL, 0, NULL) < 0)
331 return 0;
332 return 1;
333 }
334
335 void
336 rb_kqueue_unsched_event(struct ev_entry *event)
337 {
338 struct kevent kev;
339 EV_SET(&kev, (uintptr_t) event, EVFILT_TIMER, EV_DELETE, 0, 0, event);
340 kevent(kq, &kev, 1, NULL, 0, NULL);
341 }
342
343 void
344 rb_kqueue_init_event(void)
345 {
346 return;
347 }
348 #endif /* KQUEUE_SCHED_EVENT */
349
350 #else /* kqueue not supported */
351 int
352 rb_init_netio_kqueue(void)
353 {
354 errno = ENOSYS;
355 return -1;
356 }
357
358 void
359 rb_setselect_kqueue(rb_fde_t * F, unsigned int type, PF * handler, void *client_data)
360 {
361 errno = ENOSYS;
362 return;
363 }
364
365 int
366 rb_select_kqueue(long delay)
367 {
368 errno = ENOSYS;
369 return -1;
370 }
371
372 int
373 rb_setup_fd_kqueue(rb_fde_t * F)
374 {
375 errno = ENOSYS;
376 return -1;
377 }
378
379 #endif
380
381 #if !defined(HAVE_KEVENT) || !defined(KQUEUE_SCHED_EVENT)
382 void
383 rb_kqueue_init_event(void)
384 {
385 return;
386 }
387
388 int
389 rb_kqueue_sched_event(struct ev_entry *event, int when)
390 {
391 errno = ENOSYS;
392 return -1;
393 }
394
395 void
396 rb_kqueue_unsched_event(struct ev_entry *event)
397 {
398 return;
399 }
400
401 int
402 rb_kqueue_supports_event(void)
403 {
404 errno = ENOSYS;
405 return 0;
406 }
407 #endif /* !HAVE_KEVENT || !KQUEUE_SCHED_EVENT */