]> jfr.im git - irc/rqf/shadowircd.git/blame - libratbox/src/kqueue.c
dlink -> rb_dlink
[irc/rqf/shadowircd.git] / libratbox / src / kqueue.c
CommitLineData
b57f37fb
WP
1/*
2 * ircd-ratbox: A slightly useful ircd.
3 * kqueue.c: FreeBSD kqueue compatible network routines.
4 *
5 * Copyright (C) 1990 Jarkko Oikarinen and University of Oulu, Co Center
6 * Copyright (C) 1996-2002 Hybrid Development Team
7 * Copyright (C) 2001 Adrian Chadd <adrian@creative.net.au>
8 * Copyright (C) 2002-2005 ircd-ratbox development team
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
23 * USA
24 *
25 * $Id: kqueue.c 25038 2008-01-23 16:03:08Z androsyn $
26 */
27
28#include <libratbox_config.h>
29#include <ratbox_lib.h>
30#include <commio-int.h>
31#include <event-int.h>
32
33#if defined(HAVE_SYS_EVENT_H) && (HAVE_KEVENT)
34
35#include <sys/event.h>
36
37#define KE_LENGTH 128
38
39/* jlemon goofed up and didn't add EV_SET until fbsd 4.3 */
40
41#ifndef EV_SET
42#define EV_SET(kevp, a, b, c, d, e, f) do { \
43 (kevp)->ident = (a); \
44 (kevp)->filter = (b); \
45 (kevp)->flags = (c); \
46 (kevp)->fflags = (d); \
47 (kevp)->data = (e); \
48 (kevp)->udata = (f); \
49} while(0)
50#endif
51
52#ifdef EVFILT_TIMER
53#define KQUEUE_SCHED_EVENT
54#endif
55
56
57static void kq_update_events(rb_fde_t *, short, PF *);
58static int kq;
59static struct timespec zero_timespec;
60
61static struct kevent *kqlst; /* kevent buffer */
62static int kqmax; /* max structs to buffer */
63static int kqoff; /* offset into the buffer */
64
65
66int
67rb_setup_fd_kqueue(rb_fde_t * F)
68{
69 return 0;
70}
71
72static void
73kq_update_events(rb_fde_t * F, short filter, PF * handler)
74{
75 PF *cur_handler;
76 int kep_flags;
77
78 switch (filter)
79 {
80 case EVFILT_READ:
81 cur_handler = F->read_handler;
82 break;
83 case EVFILT_WRITE:
84 cur_handler = F->write_handler;
85 break;
86 default:
87 /* XXX bad! -- adrian */
88 return;
89 break;
90 }
91
92 if((cur_handler == NULL && handler != NULL) || (cur_handler != NULL && handler == NULL))
93 {
94 struct kevent *kep;
95
96 kep = kqlst + kqoff;
97
98 if(handler != NULL)
99 {
100 if(filter == EVFILT_WRITE)
101 kep_flags = (EV_ADD | EV_ONESHOT);
102 else
103 kep_flags = EV_ADD;
104 }
105 else
106 {
107 kep_flags = EV_DELETE;
108 }
109
110 EV_SET(kep, (uintptr_t) F->fd, filter, kep_flags, 0, 0, (void *) F);
111
112 if(++kqoff == kqmax)
113 {
114 int ret;
115
116 ret = kevent(kq, kqlst, kqoff, NULL, 0, &zero_timespec);
117 /* jdc -- someone needs to do error checking... */
118 if(ret == -1)
119 {
120 rb_lib_log("kq_update_events(): kevent(): %s", strerror(errno));
121 return;
122 }
123 kqoff = 0;
124 }
125 }
126}
127
128
129
130/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
131/* Public functions */
132
133
134/*
135 * rb_init_netio
136 *
137 * This is a needed exported function which will be called to initialise
138 * the network loop code.
139 */
140int
141rb_init_netio_kqueue(void)
142{
143 kq = kqueue();
144 if(kq < 0)
145 {
146 return errno;
147 }
148 kqmax = getdtablesize();
149 kqlst = rb_malloc(sizeof(struct kevent) * kqmax);
150 rb_open(kq, RB_FD_UNKNOWN, "kqueue fd");
151 zero_timespec.tv_sec = 0;
152 zero_timespec.tv_nsec = 0;
153
154 return 0;
155}
156
157/*
158 * rb_setselect
159 *
160 * This is a needed exported function which will be called to register
161 * and deregister interest in a pending IO state for a given FD.
162 */
163void
164rb_setselect_kqueue(rb_fde_t * F, unsigned int type, PF * handler, void *client_data)
165{
166 lrb_assert(IsFDOpen(F));
167
168 if(type & RB_SELECT_READ)
169 {
170 kq_update_events(F, EVFILT_READ, handler);
171 F->read_handler = handler;
172 F->read_data = client_data;
173 }
174 if(type & RB_SELECT_WRITE)
175 {
176 kq_update_events(F, EVFILT_WRITE, handler);
177 F->write_handler = handler;
178 F->write_data = client_data;
179 }
180}
181
182/*
183 * Check all connections for new connections and input data that is to be
184 * processed. Also check for connections with data queued and whether we can
185 * write it out.
186 */
187
188/*
189 * rb_select
190 *
191 * Called to do the new-style IO, courtesy of squid (like most of this
192 * new IO code). This routine handles the stuff we've hidden in
193 * rb_setselect and fd_table[] and calls callbacks for IO ready
194 * events.
195 */
196
197int
198rb_select_kqueue(long delay)
199{
200 int num, i;
201 static struct kevent ke[KE_LENGTH];
202 struct timespec poll_time;
203 struct timespec *pt;
204 rb_fde_t *F;
205
206
207 if(delay < 0) {
208 pt = NULL;
209 }
210 else {
211 pt = &poll_time;
212 poll_time.tv_sec = delay / 1000;
213 poll_time.tv_nsec = (delay % 1000) * 1000000;
214 }
215
216 for (;;)
217 {
218 num = kevent(kq, kqlst, kqoff, ke, KE_LENGTH, pt);
219 kqoff = 0;
220
221 if(num >= 0)
222 break;
223
224 if(rb_ignore_errno(errno))
225 break;
226
227 rb_set_time();
228
229 return RB_ERROR;
230
231 /* NOTREACHED */
232 }
233
234 rb_set_time();
235
236 if(num == 0)
237 return RB_OK; /* No error.. */
238
239 for (i = 0; i < num; i++)
240 {
241 PF *hdl = NULL;
242
243 if(ke[i].flags & EV_ERROR)
244 {
245 errno = ke[i].data;
246 /* XXX error == bad! -- adrian */
247 continue; /* XXX! */
248 }
249
250 switch (ke[i].filter)
251 {
252
253 case EVFILT_READ:
254 F = ke[i].udata;
255 if((hdl = F->read_handler) != NULL)
256 {
257 F->read_handler = NULL;
258 hdl(F, F->read_data);
259 }
260
261 break;
262
263 case EVFILT_WRITE:
264 F = ke[i].udata;
265 if((hdl = F->write_handler) != NULL)
266 {
267 F->write_handler = NULL;
268 hdl(F, F->write_data);
269 }
270 break;
271#if defined(EVFILT_TIMER)
272 case EVFILT_TIMER:
273 rb_run_event(ke[i].udata);
274 break;
275#endif
276 default:
277 /* Bad! -- adrian */
278 break;
279 }
280 }
281 return RB_OK;
282}
283static int can_do_event = 0;
284int
285rb_kqueue_supports_event(void)
286{
287 struct kevent kv;
288 struct timespec ts;
289 int xkq;
290
291 if(can_do_event == 1)
292 return 1;
293 if(can_do_event == -1)
294 return 0;
295
296 xkq = kqueue();
297 ts.tv_sec = 0;
298 ts.tv_nsec = 1000;
299
300
301 EV_SET(&kv, (uintptr_t) 0x0, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 1, 0);
302 if(kevent(xkq, &kv, 1, NULL, 0, NULL) < 0)
303 {
304 can_do_event = -1;
305 close(xkq);
306 return 0;
307 }
308 close(xkq);
309 can_do_event = 1;
310 return 1;
311}
312
313int
314rb_kqueue_sched_event(struct ev_entry *event, int when)
315{
316 struct kevent kev;
317 int kep_flags;
318
319 kep_flags = EV_ADD;
320 if(event->frequency == 0)
321 kep_flags |= EV_ONESHOT;
322 EV_SET(&kev, (uintptr_t) event, EVFILT_TIMER, kep_flags, 0, when * 1000, event);
323 if(kevent(kq, &kev, 1, NULL, 0, NULL) < 0)
324 return 0;
325 return 1;
326}
327
328void
329rb_kqueue_unsched_event(struct ev_entry *event)
330{
331 struct kevent kev;
332 EV_SET(&kev, (uintptr_t) event, EVFILT_TIMER, EV_DELETE, 0, 0, event);
333 kevent(kq, &kev, 1, NULL, 0, NULL);
334}
335
336void
337rb_kqueue_init_event(void)
338{
339 return;
340}
341
342#else /* kqueue not supported */
343int
344rb_init_netio_kqueue(void)
345{
346 errno = ENOSYS;
347 return -1;
348}
349
350void
351rb_setselect_kqueue(rb_fde_t * F, unsigned int type, PF * handler, void *client_data)
352{
353 errno = ENOSYS;
354 return;
355}
356
357int
358rb_select_kqueue(long delay)
359{
360 errno = ENOSYS;
361 return -1;
362}
363
364int
365rb_setup_fd_kqueue(rb_fde_t * F)
366{
367 errno = ENOSYS;
368 return -1;
369}
370
371#endif
372
373#if !defined(HAVE_KEVENT) || !defined(KQUEUE_SCHED_EVENT)
374void
375rb_kqueue_init_event(void)
376{
377 return;
378}
379
380int
381rb_kqueue_sched_event(struct ev_entry *event, int when)
382{
383 errno = ENOSYS;
384 return -1;
385}
386
387void
388rb_kqueue_unsched_event(struct ev_entry *event)
389{
390 return;
391}
392
393int
394rb_kqueue_supports_event(void)
395{
396 errno = ENOSYS;
397 return 0;
398}
399#endif /* !HAVE_KEVENT || !KQUEUE_SCHED_EVENT */