]> jfr.im git - irc/rqf/shadowircd.git/blame - libratbox/src/kqueue.c
Removal of ancient SVN ID's part one
[irc/rqf/shadowircd.git] / libratbox / src / kqueue.c
CommitLineData
b57f37fb
WP
1/*
2 * ircd-ratbox: A slightly useful ircd.
3 * kqueue.c: FreeBSD kqueue compatible network routines.
4 *
5 * Copyright (C) 1990 Jarkko Oikarinen and University of Oulu, Co Center
6 * Copyright (C) 1996-2002 Hybrid Development Team
7 * Copyright (C) 2001 Adrian Chadd <adrian@creative.net.au>
8 * Copyright (C) 2002-2005 ircd-ratbox development team
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
23 * USA
24 *
b57f37fb
WP
25 */
26
27#include <libratbox_config.h>
28#include <ratbox_lib.h>
29#include <commio-int.h>
30#include <event-int.h>
31
32#if defined(HAVE_SYS_EVENT_H) && (HAVE_KEVENT)
33
34#include <sys/event.h>
35
b57f37fb
WP
36/* jlemon goofed up and didn't add EV_SET until fbsd 4.3 */
37
38#ifndef EV_SET
39#define EV_SET(kevp, a, b, c, d, e, f) do { \
40 (kevp)->ident = (a); \
41 (kevp)->filter = (b); \
42 (kevp)->flags = (c); \
43 (kevp)->fflags = (d); \
44 (kevp)->data = (e); \
45 (kevp)->udata = (f); \
46} while(0)
47#endif
48
49#ifdef EVFILT_TIMER
50#define KQUEUE_SCHED_EVENT
51#endif
52
53
54static void kq_update_events(rb_fde_t *, short, PF *);
55static int kq;
56static struct timespec zero_timespec;
57
58static struct kevent *kqlst; /* kevent buffer */
98686f18 59static struct kevent *kqout; /* kevent output buffer */
b57f37fb
WP
60static int kqmax; /* max structs to buffer */
61static int kqoff; /* offset into the buffer */
62
63
64int
94b4fbf9 65rb_setup_fd_kqueue(rb_fde_t *F)
b57f37fb
WP
66{
67 return 0;
68}
69
70static void
94b4fbf9 71kq_update_events(rb_fde_t *F, short filter, PF * handler)
b57f37fb
WP
72{
73 PF *cur_handler;
74 int kep_flags;
75
76 switch (filter)
77 {
78 case EVFILT_READ:
79 cur_handler = F->read_handler;
80 break;
81 case EVFILT_WRITE:
82 cur_handler = F->write_handler;
83 break;
84 default:
85 /* XXX bad! -- adrian */
86 return;
87 break;
88 }
89
90 if((cur_handler == NULL && handler != NULL) || (cur_handler != NULL && handler == NULL))
91 {
92 struct kevent *kep;
93
94 kep = kqlst + kqoff;
95
96 if(handler != NULL)
97 {
c50bb62c 98 kep_flags = EV_ADD | EV_ONESHOT;
b57f37fb
WP
99 }
100 else
101 {
102 kep_flags = EV_DELETE;
103 }
104
94b4fbf9 105 EV_SET(kep, (uintptr_t)F->fd, filter, kep_flags, 0, 0, (void *)F);
b57f37fb
WP
106
107 if(++kqoff == kqmax)
108 {
98686f18
JT
109 int ret, i;
110
111 /* Add them one at a time, because there may be
112 * already closed fds in it. The kernel will try
113 * to report invalid fds in the output; if there
114 * is no space, it silently stops processing the
115 * array at that point. We cannot give output space
116 * because that would also return events we cannot
117 * process at this point.
118 */
94b4fbf9 119 for(i = 0; i < kqoff; i++)
b57f37fb 120 {
98686f18
JT
121 ret = kevent(kq, kqlst + i, 1, NULL, 0, &zero_timespec);
122 /* jdc -- someone needs to do error checking... */
e0bd36a5
JT
123 /* EBADF is normal here -- jilles */
124 if(ret == -1 && errno != EBADF)
94b4fbf9
VY
125 rb_lib_log("kq_update_events(): kevent(): %s",
126 strerror(errno));
b57f37fb
WP
127 }
128 kqoff = 0;
129 }
130 }
131}
132
133
134
135/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
136/* Public functions */
137
138
139/*
140 * rb_init_netio
141 *
142 * This is a needed exported function which will be called to initialise
143 * the network loop code.
144 */
145int
146rb_init_netio_kqueue(void)
147{
148 kq = kqueue();
149 if(kq < 0)
150 {
151 return errno;
152 }
153 kqmax = getdtablesize();
154 kqlst = rb_malloc(sizeof(struct kevent) * kqmax);
98686f18 155 kqout = rb_malloc(sizeof(struct kevent) * kqmax);
b57f37fb
WP
156 rb_open(kq, RB_FD_UNKNOWN, "kqueue fd");
157 zero_timespec.tv_sec = 0;
158 zero_timespec.tv_nsec = 0;
159
160 return 0;
161}
162
163/*
164 * rb_setselect
165 *
166 * This is a needed exported function which will be called to register
167 * and deregister interest in a pending IO state for a given FD.
168 */
169void
94b4fbf9 170rb_setselect_kqueue(rb_fde_t *F, unsigned int type, PF * handler, void *client_data)
b57f37fb
WP
171{
172 lrb_assert(IsFDOpen(F));
173
174 if(type & RB_SELECT_READ)
175 {
176 kq_update_events(F, EVFILT_READ, handler);
177 F->read_handler = handler;
178 F->read_data = client_data;
179 }
180 if(type & RB_SELECT_WRITE)
181 {
182 kq_update_events(F, EVFILT_WRITE, handler);
183 F->write_handler = handler;
184 F->write_data = client_data;
185 }
186}
187
188/*
189 * Check all connections for new connections and input data that is to be
190 * processed. Also check for connections with data queued and whether we can
191 * write it out.
192 */
193
194/*
195 * rb_select
196 *
197 * Called to do the new-style IO, courtesy of squid (like most of this
198 * new IO code). This routine handles the stuff we've hidden in
199 * rb_setselect and fd_table[] and calls callbacks for IO ready
200 * events.
201 */
202
203int
204rb_select_kqueue(long delay)
205{
206 int num, i;
b57f37fb
WP
207 struct timespec poll_time;
208 struct timespec *pt;
209 rb_fde_t *F;
210
211
94b4fbf9
VY
212 if(delay < 0)
213 {
b57f37fb
WP
214 pt = NULL;
215 }
94b4fbf9
VY
216 else
217 {
b57f37fb
WP
218 pt = &poll_time;
219 poll_time.tv_sec = delay / 1000;
220 poll_time.tv_nsec = (delay % 1000) * 1000000;
221 }
222
94b4fbf9 223 for(;;)
b57f37fb 224 {
98686f18 225 num = kevent(kq, kqlst, kqoff, kqout, kqmax, pt);
b57f37fb
WP
226 kqoff = 0;
227
228 if(num >= 0)
229 break;
230
231 if(rb_ignore_errno(errno))
232 break;
233
234 rb_set_time();
235
236 return RB_ERROR;
237
238 /* NOTREACHED */
239 }
240
241 rb_set_time();
242
243 if(num == 0)
244 return RB_OK; /* No error.. */
245
94b4fbf9 246 for(i = 0; i < num; i++)
b57f37fb
WP
247 {
248 PF *hdl = NULL;
249
98686f18 250 if(kqout[i].flags & EV_ERROR)
b57f37fb 251 {
98686f18 252 errno = kqout[i].data;
b57f37fb
WP
253 /* XXX error == bad! -- adrian */
254 continue; /* XXX! */
255 }
256
98686f18 257 switch (kqout[i].filter)
b57f37fb
WP
258 {
259
260 case EVFILT_READ:
98686f18 261 F = kqout[i].udata;
b57f37fb
WP
262 if((hdl = F->read_handler) != NULL)
263 {
264 F->read_handler = NULL;
265 hdl(F, F->read_data);
266 }
267
268 break;
269
270 case EVFILT_WRITE:
98686f18 271 F = kqout[i].udata;
b57f37fb
WP
272 if((hdl = F->write_handler) != NULL)
273 {
274 F->write_handler = NULL;
275 hdl(F, F->write_data);
276 }
277 break;
278#if defined(EVFILT_TIMER)
279 case EVFILT_TIMER:
98686f18 280 rb_run_event(kqout[i].udata);
b57f37fb
WP
281 break;
282#endif
283 default:
284 /* Bad! -- adrian */
285 break;
286 }
287 }
288 return RB_OK;
289}
033be687
VY
290
291#if defined(KQUEUE_SCHED_EVENT)
b57f37fb
WP
292static int can_do_event = 0;
293int
294rb_kqueue_supports_event(void)
295{
296 struct kevent kv;
297 struct timespec ts;
298 int xkq;
299
300 if(can_do_event == 1)
301 return 1;
302 if(can_do_event == -1)
303 return 0;
304
305 xkq = kqueue();
306 ts.tv_sec = 0;
307 ts.tv_nsec = 1000;
308
309
94b4fbf9 310 EV_SET(&kv, (uintptr_t)0x0, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 1, 0);
b57f37fb
WP
311 if(kevent(xkq, &kv, 1, NULL, 0, NULL) < 0)
312 {
313 can_do_event = -1;
314 close(xkq);
315 return 0;
316 }
317 close(xkq);
318 can_do_event = 1;
319 return 1;
320}
321
322int
323rb_kqueue_sched_event(struct ev_entry *event, int when)
324{
325 struct kevent kev;
326 int kep_flags;
327
328 kep_flags = EV_ADD;
329 if(event->frequency == 0)
330 kep_flags |= EV_ONESHOT;
94b4fbf9 331 EV_SET(&kev, (uintptr_t)event, EVFILT_TIMER, kep_flags, 0, when * 1000, event);
b57f37fb 332 if(kevent(kq, &kev, 1, NULL, 0, NULL) < 0)
94b4fbf9 333 return 0;
b57f37fb
WP
334 return 1;
335}
336
337void
338rb_kqueue_unsched_event(struct ev_entry *event)
339{
340 struct kevent kev;
94b4fbf9 341 EV_SET(&kev, (uintptr_t)event, EVFILT_TIMER, EV_DELETE, 0, 0, event);
b57f37fb
WP
342 kevent(kq, &kev, 1, NULL, 0, NULL);
343}
344
345void
346rb_kqueue_init_event(void)
347{
348 return;
349}
033be687 350#endif /* KQUEUE_SCHED_EVENT */
b57f37fb
WP
351
352#else /* kqueue not supported */
353int
354rb_init_netio_kqueue(void)
355{
356 errno = ENOSYS;
357 return -1;
358}
359
360void
94b4fbf9 361rb_setselect_kqueue(rb_fde_t *F, unsigned int type, PF * handler, void *client_data)
b57f37fb
WP
362{
363 errno = ENOSYS;
364 return;
365}
366
367int
368rb_select_kqueue(long delay)
369{
370 errno = ENOSYS;
371 return -1;
372}
373
374int
94b4fbf9 375rb_setup_fd_kqueue(rb_fde_t *F)
b57f37fb
WP
376{
377 errno = ENOSYS;
378 return -1;
379}
380
381#endif
382
383#if !defined(HAVE_KEVENT) || !defined(KQUEUE_SCHED_EVENT)
384void
385rb_kqueue_init_event(void)
386{
387 return;
388}
389
390int
391rb_kqueue_sched_event(struct ev_entry *event, int when)
392{
393 errno = ENOSYS;
394 return -1;
395}
396
397void
398rb_kqueue_unsched_event(struct ev_entry *event)
399{
400 return;
401}
402
403int
404rb_kqueue_supports_event(void)
405{
406 errno = ENOSYS;
407 return 0;
408}
409#endif /* !HAVE_KEVENT || !KQUEUE_SCHED_EVENT */