]> jfr.im git - irc/rqf/shadowircd.git/blame - libratbox/src/kqueue.c
Copied libratbox and related stuff from shadowircd upstream.
[irc/rqf/shadowircd.git] / libratbox / src / kqueue.c
CommitLineData
b57f37fb
WP
1/*
2 * ircd-ratbox: A slightly useful ircd.
3 * kqueue.c: FreeBSD kqueue compatible network routines.
4 *
5 * Copyright (C) 1990 Jarkko Oikarinen and University of Oulu, Co Center
6 * Copyright (C) 1996-2002 Hybrid Development Team
7 * Copyright (C) 2001 Adrian Chadd <adrian@creative.net.au>
8 * Copyright (C) 2002-2005 ircd-ratbox development team
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
23 * USA
24 *
94b4fbf9 25 * $Id: kqueue.c 26092 2008-09-19 15:13:52Z androsyn $
b57f37fb
WP
26 */
27
28#include <libratbox_config.h>
29#include <ratbox_lib.h>
30#include <commio-int.h>
31#include <event-int.h>
32
33#if defined(HAVE_SYS_EVENT_H) && (HAVE_KEVENT)
34
35#include <sys/event.h>
36
b57f37fb
WP
37/* jlemon goofed up and didn't add EV_SET until fbsd 4.3 */
38
39#ifndef EV_SET
40#define EV_SET(kevp, a, b, c, d, e, f) do { \
41 (kevp)->ident = (a); \
42 (kevp)->filter = (b); \
43 (kevp)->flags = (c); \
44 (kevp)->fflags = (d); \
45 (kevp)->data = (e); \
46 (kevp)->udata = (f); \
47} while(0)
48#endif
49
50#ifdef EVFILT_TIMER
51#define KQUEUE_SCHED_EVENT
52#endif
53
54
55static void kq_update_events(rb_fde_t *, short, PF *);
56static int kq;
57static struct timespec zero_timespec;
58
59static struct kevent *kqlst; /* kevent buffer */
98686f18 60static struct kevent *kqout; /* kevent output buffer */
b57f37fb
WP
61static int kqmax; /* max structs to buffer */
62static int kqoff; /* offset into the buffer */
63
64
65int
94b4fbf9 66rb_setup_fd_kqueue(rb_fde_t *F)
b57f37fb
WP
67{
68 return 0;
69}
70
71static void
94b4fbf9 72kq_update_events(rb_fde_t *F, short filter, PF * handler)
b57f37fb
WP
73{
74 PF *cur_handler;
75 int kep_flags;
76
77 switch (filter)
78 {
79 case EVFILT_READ:
80 cur_handler = F->read_handler;
81 break;
82 case EVFILT_WRITE:
83 cur_handler = F->write_handler;
84 break;
85 default:
86 /* XXX bad! -- adrian */
87 return;
88 break;
89 }
90
91 if((cur_handler == NULL && handler != NULL) || (cur_handler != NULL && handler == NULL))
92 {
93 struct kevent *kep;
94
95 kep = kqlst + kqoff;
96
97 if(handler != NULL)
98 {
c50bb62c 99 kep_flags = EV_ADD | EV_ONESHOT;
b57f37fb
WP
100 }
101 else
102 {
103 kep_flags = EV_DELETE;
104 }
105
94b4fbf9 106 EV_SET(kep, (uintptr_t)F->fd, filter, kep_flags, 0, 0, (void *)F);
b57f37fb
WP
107
108 if(++kqoff == kqmax)
109 {
98686f18
JT
110 int ret, i;
111
112 /* Add them one at a time, because there may be
113 * already closed fds in it. The kernel will try
114 * to report invalid fds in the output; if there
115 * is no space, it silently stops processing the
116 * array at that point. We cannot give output space
117 * because that would also return events we cannot
118 * process at this point.
119 */
94b4fbf9 120 for(i = 0; i < kqoff; i++)
b57f37fb 121 {
98686f18
JT
122 ret = kevent(kq, kqlst + i, 1, NULL, 0, &zero_timespec);
123 /* jdc -- someone needs to do error checking... */
e0bd36a5
JT
124 /* EBADF is normal here -- jilles */
125 if(ret == -1 && errno != EBADF)
94b4fbf9
VY
126 rb_lib_log("kq_update_events(): kevent(): %s",
127 strerror(errno));
b57f37fb
WP
128 }
129 kqoff = 0;
130 }
131 }
132}
133
134
135
136/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
137/* Public functions */
138
139
140/*
141 * rb_init_netio
142 *
143 * This is a needed exported function which will be called to initialise
144 * the network loop code.
145 */
146int
147rb_init_netio_kqueue(void)
148{
149 kq = kqueue();
150 if(kq < 0)
151 {
152 return errno;
153 }
154 kqmax = getdtablesize();
155 kqlst = rb_malloc(sizeof(struct kevent) * kqmax);
98686f18 156 kqout = rb_malloc(sizeof(struct kevent) * kqmax);
b57f37fb
WP
157 rb_open(kq, RB_FD_UNKNOWN, "kqueue fd");
158 zero_timespec.tv_sec = 0;
159 zero_timespec.tv_nsec = 0;
160
161 return 0;
162}
163
164/*
165 * rb_setselect
166 *
167 * This is a needed exported function which will be called to register
168 * and deregister interest in a pending IO state for a given FD.
169 */
170void
94b4fbf9 171rb_setselect_kqueue(rb_fde_t *F, unsigned int type, PF * handler, void *client_data)
b57f37fb
WP
172{
173 lrb_assert(IsFDOpen(F));
174
175 if(type & RB_SELECT_READ)
176 {
177 kq_update_events(F, EVFILT_READ, handler);
178 F->read_handler = handler;
179 F->read_data = client_data;
180 }
181 if(type & RB_SELECT_WRITE)
182 {
183 kq_update_events(F, EVFILT_WRITE, handler);
184 F->write_handler = handler;
185 F->write_data = client_data;
186 }
187}
188
189/*
190 * Check all connections for new connections and input data that is to be
191 * processed. Also check for connections with data queued and whether we can
192 * write it out.
193 */
194
195/*
196 * rb_select
197 *
198 * Called to do the new-style IO, courtesy of squid (like most of this
199 * new IO code). This routine handles the stuff we've hidden in
200 * rb_setselect and fd_table[] and calls callbacks for IO ready
201 * events.
202 */
203
204int
205rb_select_kqueue(long delay)
206{
207 int num, i;
b57f37fb
WP
208 struct timespec poll_time;
209 struct timespec *pt;
210 rb_fde_t *F;
211
212
94b4fbf9
VY
213 if(delay < 0)
214 {
b57f37fb
WP
215 pt = NULL;
216 }
94b4fbf9
VY
217 else
218 {
b57f37fb
WP
219 pt = &poll_time;
220 poll_time.tv_sec = delay / 1000;
221 poll_time.tv_nsec = (delay % 1000) * 1000000;
222 }
223
94b4fbf9 224 for(;;)
b57f37fb 225 {
98686f18 226 num = kevent(kq, kqlst, kqoff, kqout, kqmax, pt);
b57f37fb
WP
227 kqoff = 0;
228
229 if(num >= 0)
230 break;
231
232 if(rb_ignore_errno(errno))
233 break;
234
235 rb_set_time();
236
237 return RB_ERROR;
238
239 /* NOTREACHED */
240 }
241
242 rb_set_time();
243
244 if(num == 0)
245 return RB_OK; /* No error.. */
246
94b4fbf9 247 for(i = 0; i < num; i++)
b57f37fb
WP
248 {
249 PF *hdl = NULL;
250
98686f18 251 if(kqout[i].flags & EV_ERROR)
b57f37fb 252 {
98686f18 253 errno = kqout[i].data;
b57f37fb
WP
254 /* XXX error == bad! -- adrian */
255 continue; /* XXX! */
256 }
257
98686f18 258 switch (kqout[i].filter)
b57f37fb
WP
259 {
260
261 case EVFILT_READ:
98686f18 262 F = kqout[i].udata;
b57f37fb
WP
263 if((hdl = F->read_handler) != NULL)
264 {
265 F->read_handler = NULL;
266 hdl(F, F->read_data);
267 }
268
269 break;
270
271 case EVFILT_WRITE:
98686f18 272 F = kqout[i].udata;
b57f37fb
WP
273 if((hdl = F->write_handler) != NULL)
274 {
275 F->write_handler = NULL;
276 hdl(F, F->write_data);
277 }
278 break;
279#if defined(EVFILT_TIMER)
280 case EVFILT_TIMER:
98686f18 281 rb_run_event(kqout[i].udata);
b57f37fb
WP
282 break;
283#endif
284 default:
285 /* Bad! -- adrian */
286 break;
287 }
288 }
289 return RB_OK;
290}
033be687
VY
291
292#if defined(KQUEUE_SCHED_EVENT)
b57f37fb
WP
293static int can_do_event = 0;
294int
295rb_kqueue_supports_event(void)
296{
297 struct kevent kv;
298 struct timespec ts;
299 int xkq;
300
301 if(can_do_event == 1)
302 return 1;
303 if(can_do_event == -1)
304 return 0;
305
306 xkq = kqueue();
307 ts.tv_sec = 0;
308 ts.tv_nsec = 1000;
309
310
94b4fbf9 311 EV_SET(&kv, (uintptr_t)0x0, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 1, 0);
b57f37fb
WP
312 if(kevent(xkq, &kv, 1, NULL, 0, NULL) < 0)
313 {
314 can_do_event = -1;
315 close(xkq);
316 return 0;
317 }
318 close(xkq);
319 can_do_event = 1;
320 return 1;
321}
322
323int
324rb_kqueue_sched_event(struct ev_entry *event, int when)
325{
326 struct kevent kev;
327 int kep_flags;
328
329 kep_flags = EV_ADD;
330 if(event->frequency == 0)
331 kep_flags |= EV_ONESHOT;
94b4fbf9 332 EV_SET(&kev, (uintptr_t)event, EVFILT_TIMER, kep_flags, 0, when * 1000, event);
b57f37fb 333 if(kevent(kq, &kev, 1, NULL, 0, NULL) < 0)
94b4fbf9 334 return 0;
b57f37fb
WP
335 return 1;
336}
337
338void
339rb_kqueue_unsched_event(struct ev_entry *event)
340{
341 struct kevent kev;
94b4fbf9 342 EV_SET(&kev, (uintptr_t)event, EVFILT_TIMER, EV_DELETE, 0, 0, event);
b57f37fb
WP
343 kevent(kq, &kev, 1, NULL, 0, NULL);
344}
345
346void
347rb_kqueue_init_event(void)
348{
349 return;
350}
033be687 351#endif /* KQUEUE_SCHED_EVENT */
b57f37fb
WP
352
353#else /* kqueue not supported */
354int
355rb_init_netio_kqueue(void)
356{
357 errno = ENOSYS;
358 return -1;
359}
360
361void
94b4fbf9 362rb_setselect_kqueue(rb_fde_t *F, unsigned int type, PF * handler, void *client_data)
b57f37fb
WP
363{
364 errno = ENOSYS;
365 return;
366}
367
368int
369rb_select_kqueue(long delay)
370{
371 errno = ENOSYS;
372 return -1;
373}
374
375int
94b4fbf9 376rb_setup_fd_kqueue(rb_fde_t *F)
b57f37fb
WP
377{
378 errno = ENOSYS;
379 return -1;
380}
381
382#endif
383
384#if !defined(HAVE_KEVENT) || !defined(KQUEUE_SCHED_EVENT)
385void
386rb_kqueue_init_event(void)
387{
388 return;
389}
390
391int
392rb_kqueue_sched_event(struct ev_entry *event, int when)
393{
394 errno = ENOSYS;
395 return -1;
396}
397
398void
399rb_kqueue_unsched_event(struct ev_entry *event)
400{
401 return;
402}
403
404int
405rb_kqueue_supports_event(void)
406{
407 errno = ENOSYS;
408 return 0;
409}
410#endif /* !HAVE_KEVENT || !KQUEUE_SCHED_EVENT */