blob: 8647229b8eb5ff4c95ab56246b87f736e0039202 [file] [log] [blame]
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08005 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07007 */
8
9#include "includes.h"
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -070010#include <assert.h>
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070011
12#include "common.h"
13#include "trace.h"
14#include "list.h"
15#include "eloop.h"
16
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070017#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll
19#endif
20
21#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
22#define CONFIG_ELOOP_SELECT
23#endif
24
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080025#ifdef CONFIG_ELOOP_POLL
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080026#include <poll.h>
27#endif /* CONFIG_ELOOP_POLL */
28
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070029#ifdef CONFIG_ELOOP_EPOLL
30#include <sys/epoll.h>
31#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070032
33struct eloop_sock {
34 int sock;
35 void *eloop_data;
36 void *user_data;
37 eloop_sock_handler handler;
38 WPA_TRACE_REF(eloop);
39 WPA_TRACE_REF(user);
40 WPA_TRACE_INFO
41};
42
43struct eloop_timeout {
44 struct dl_list list;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -080045 struct os_reltime time;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070046 void *eloop_data;
47 void *user_data;
48 eloop_timeout_handler handler;
49 WPA_TRACE_REF(eloop);
50 WPA_TRACE_REF(user);
51 WPA_TRACE_INFO
52};
53
54struct eloop_signal {
55 int sig;
56 void *user_data;
57 eloop_signal_handler handler;
58 int signaled;
59};
60
61struct eloop_sock_table {
62 int count;
63 struct eloop_sock *table;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070064 eloop_event_type type;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070065 int changed;
66};
67
68struct eloop_data {
69 int max_sock;
70
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080071 int count; /* sum of all table counts */
72#ifdef CONFIG_ELOOP_POLL
73 int max_pollfd_map; /* number of pollfds_map currently allocated */
74 int max_poll_fds; /* number of pollfds currently allocated */
75 struct pollfd *pollfds;
76 struct pollfd **pollfds_map;
77#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070078#ifdef CONFIG_ELOOP_EPOLL
79 int epollfd;
80 int epoll_max_event_num;
81 int epoll_max_fd;
82 struct eloop_sock *epoll_table;
83 struct epoll_event *epoll_events;
84#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070085 struct eloop_sock_table readers;
86 struct eloop_sock_table writers;
87 struct eloop_sock_table exceptions;
88
89 struct dl_list timeout;
90
91 int signal_count;
92 struct eloop_signal *signals;
93 int signaled;
94 int pending_terminate;
95
96 int terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070097};
98
99static struct eloop_data eloop;
100
101
102#ifdef WPA_TRACE
103
104static void eloop_sigsegv_handler(int sig)
105{
106 wpa_trace_show("eloop SIGSEGV");
107 abort();
108}
109
110static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
111{
112 int i;
113 if (table == NULL || table->table == NULL)
114 return;
115 for (i = 0; i < table->count; i++) {
116 wpa_trace_add_ref(&table->table[i], eloop,
117 table->table[i].eloop_data);
118 wpa_trace_add_ref(&table->table[i], user,
119 table->table[i].user_data);
120 }
121}
122
123
124static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
125{
126 int i;
127 if (table == NULL || table->table == NULL)
128 return;
129 for (i = 0; i < table->count; i++) {
130 wpa_trace_remove_ref(&table->table[i], eloop,
131 table->table[i].eloop_data);
132 wpa_trace_remove_ref(&table->table[i], user,
133 table->table[i].user_data);
134 }
135}
136
137#else /* WPA_TRACE */
138
139#define eloop_trace_sock_add_ref(table) do { } while (0)
140#define eloop_trace_sock_remove_ref(table) do { } while (0)
141
142#endif /* WPA_TRACE */
143
144
145int eloop_init(void)
146{
147 os_memset(&eloop, 0, sizeof(eloop));
148 dl_list_init(&eloop.timeout);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700149#ifdef CONFIG_ELOOP_EPOLL
150 eloop.epollfd = epoll_create1(0);
151 if (eloop.epollfd < 0) {
152 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
153 __func__, strerror(errno));
154 return -1;
155 }
156 eloop.readers.type = EVENT_TYPE_READ;
157 eloop.writers.type = EVENT_TYPE_WRITE;
158 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
159#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700160#ifdef WPA_TRACE
161 signal(SIGSEGV, eloop_sigsegv_handler);
162#endif /* WPA_TRACE */
163 return 0;
164}
165
166
167static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
168 int sock, eloop_sock_handler handler,
169 void *eloop_data, void *user_data)
170{
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700171#ifdef CONFIG_ELOOP_EPOLL
172 struct eloop_sock *temp_table;
173 struct epoll_event ev, *temp_events;
174 int next;
175#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700176 struct eloop_sock *tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800177 int new_max_sock;
178
179 if (sock > eloop.max_sock)
180 new_max_sock = sock;
181 else
182 new_max_sock = eloop.max_sock;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700183
184 if (table == NULL)
185 return -1;
186
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800187#ifdef CONFIG_ELOOP_POLL
188 if (new_max_sock >= eloop.max_pollfd_map) {
189 struct pollfd **nmap;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700190 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
191 sizeof(struct pollfd *));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800192 if (nmap == NULL)
193 return -1;
194
195 eloop.max_pollfd_map = new_max_sock + 50;
196 eloop.pollfds_map = nmap;
197 }
198
199 if (eloop.count + 1 > eloop.max_poll_fds) {
200 struct pollfd *n;
201 int nmax = eloop.count + 1 + 50;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700202 n = os_realloc_array(eloop.pollfds, nmax,
203 sizeof(struct pollfd));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800204 if (n == NULL)
205 return -1;
206
207 eloop.max_poll_fds = nmax;
208 eloop.pollfds = n;
209 }
210#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700211#ifdef CONFIG_ELOOP_EPOLL
212 if (new_max_sock >= eloop.epoll_max_fd) {
213 next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
214 temp_table = os_realloc_array(eloop.epoll_table, next,
215 sizeof(struct eloop_sock));
216 if (temp_table == NULL)
217 return -1;
218
219 eloop.epoll_max_fd = next;
220 eloop.epoll_table = temp_table;
221 }
222
223 if (eloop.count + 1 > eloop.epoll_max_event_num) {
224 next = eloop.epoll_max_event_num == 0 ? 8 :
225 eloop.epoll_max_event_num * 2;
226 temp_events = os_realloc_array(eloop.epoll_events, next,
227 sizeof(struct epoll_event));
228 if (temp_events == NULL) {
229 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
230 "%s\n", __func__, strerror(errno));
231 return -1;
232 }
233
234 eloop.epoll_max_event_num = next;
235 eloop.epoll_events = temp_events;
236 }
237#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800238
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700239 eloop_trace_sock_remove_ref(table);
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700240 tmp = os_realloc_array(table->table, table->count + 1,
241 sizeof(struct eloop_sock));
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800242 if (tmp == NULL) {
243 eloop_trace_sock_add_ref(table);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700244 return -1;
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800245 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700246
247 tmp[table->count].sock = sock;
248 tmp[table->count].eloop_data = eloop_data;
249 tmp[table->count].user_data = user_data;
250 tmp[table->count].handler = handler;
251 wpa_trace_record(&tmp[table->count]);
252 table->count++;
253 table->table = tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800254 eloop.max_sock = new_max_sock;
255 eloop.count++;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700256 table->changed = 1;
257 eloop_trace_sock_add_ref(table);
258
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700259#ifdef CONFIG_ELOOP_EPOLL
260 os_memset(&ev, 0, sizeof(ev));
261 switch (table->type) {
262 case EVENT_TYPE_READ:
263 ev.events = EPOLLIN;
264 break;
265 case EVENT_TYPE_WRITE:
266 ev.events = EPOLLOUT;
267 break;
268 /*
269 * Exceptions are always checked when using epoll, but I suppose it's
270 * possible that someone registered a socket *only* for exception
271 * handling.
272 */
273 case EVENT_TYPE_EXCEPTION:
274 ev.events = EPOLLERR | EPOLLHUP;
275 break;
276 }
277 ev.data.fd = sock;
278 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
279 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
280 "failed. %s\n", __func__, sock, strerror(errno));
281 return -1;
282 }
283 os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
284 sizeof(struct eloop_sock));
285#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700286 return 0;
287}
288
289
290static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
291 int sock)
292{
293 int i;
294
295 if (table == NULL || table->table == NULL || table->count == 0)
296 return;
297
298 for (i = 0; i < table->count; i++) {
299 if (table->table[i].sock == sock)
300 break;
301 }
302 if (i == table->count)
303 return;
304 eloop_trace_sock_remove_ref(table);
305 if (i != table->count - 1) {
306 os_memmove(&table->table[i], &table->table[i + 1],
307 (table->count - i - 1) *
308 sizeof(struct eloop_sock));
309 }
310 table->count--;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800311 eloop.count--;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700312 table->changed = 1;
313 eloop_trace_sock_add_ref(table);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700314#ifdef CONFIG_ELOOP_EPOLL
315 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
316 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
317 "failed. %s\n", __func__, sock, strerror(errno));
318 return;
319 }
320 os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
321#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700322}
323
324
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800325#ifdef CONFIG_ELOOP_POLL
326
327static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
328{
329 if (fd < mx && fd >= 0)
330 return pollfds_map[fd];
331 return NULL;
332}
333
334
335static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
336 struct eloop_sock_table *writers,
337 struct eloop_sock_table *exceptions,
338 struct pollfd *pollfds,
339 struct pollfd **pollfds_map,
340 int max_pollfd_map)
341{
342 int i;
343 int nxt = 0;
344 int fd;
345 struct pollfd *pfd;
346
347 /* Clear pollfd lookup map. It will be re-populated below. */
348 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
349
350 if (readers && readers->table) {
351 for (i = 0; i < readers->count; i++) {
352 fd = readers->table[i].sock;
353 assert(fd >= 0 && fd < max_pollfd_map);
354 pollfds[nxt].fd = fd;
355 pollfds[nxt].events = POLLIN;
356 pollfds[nxt].revents = 0;
357 pollfds_map[fd] = &(pollfds[nxt]);
358 nxt++;
359 }
360 }
361
362 if (writers && writers->table) {
363 for (i = 0; i < writers->count; i++) {
364 /*
365 * See if we already added this descriptor, update it
366 * if so.
367 */
368 fd = writers->table[i].sock;
369 assert(fd >= 0 && fd < max_pollfd_map);
370 pfd = pollfds_map[fd];
371 if (!pfd) {
372 pfd = &(pollfds[nxt]);
373 pfd->events = 0;
374 pfd->fd = fd;
375 pollfds[i].revents = 0;
376 pollfds_map[fd] = pfd;
377 nxt++;
378 }
Dmitry Shmidt04949592012-07-19 12:16:46 -0700379 pfd->events |= POLLOUT;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800380 }
381 }
382
383 /*
384 * Exceptions are always checked when using poll, but I suppose it's
385 * possible that someone registered a socket *only* for exception
386 * handling. Set the POLLIN bit in this case.
387 */
388 if (exceptions && exceptions->table) {
389 for (i = 0; i < exceptions->count; i++) {
390 /*
391 * See if we already added this descriptor, just use it
392 * if so.
393 */
394 fd = exceptions->table[i].sock;
395 assert(fd >= 0 && fd < max_pollfd_map);
396 pfd = pollfds_map[fd];
397 if (!pfd) {
398 pfd = &(pollfds[nxt]);
399 pfd->events = POLLIN;
400 pfd->fd = fd;
401 pollfds[i].revents = 0;
402 pollfds_map[fd] = pfd;
403 nxt++;
404 }
405 }
406 }
407
408 return nxt;
409}
410
411
412static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
413 struct pollfd **pollfds_map,
414 int max_pollfd_map,
415 short int revents)
416{
417 int i;
418 struct pollfd *pfd;
419
420 if (!table || !table->table)
421 return 0;
422
423 table->changed = 0;
424 for (i = 0; i < table->count; i++) {
425 pfd = find_pollfd(pollfds_map, table->table[i].sock,
426 max_pollfd_map);
427 if (!pfd)
428 continue;
429
430 if (!(pfd->revents & revents))
431 continue;
432
433 table->table[i].handler(table->table[i].sock,
434 table->table[i].eloop_data,
435 table->table[i].user_data);
436 if (table->changed)
437 return 1;
438 }
439
440 return 0;
441}
442
443
444static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
445 struct eloop_sock_table *writers,
446 struct eloop_sock_table *exceptions,
447 struct pollfd **pollfds_map,
448 int max_pollfd_map)
449{
450 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
Dmitry Shmidt04949592012-07-19 12:16:46 -0700451 max_pollfd_map, POLLIN | POLLERR |
452 POLLHUP))
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800453 return; /* pollfds may be invalid at this point */
454
455 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
456 max_pollfd_map, POLLOUT))
457 return; /* pollfds may be invalid at this point */
458
459 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
460 max_pollfd_map, POLLERR | POLLHUP);
461}
462
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700463#endif /* CONFIG_ELOOP_POLL */
464
465#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800466
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700467static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
468 fd_set *fds)
469{
470 int i;
471
472 FD_ZERO(fds);
473
474 if (table->table == NULL)
475 return;
476
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700477 for (i = 0; i < table->count; i++) {
478 assert(table->table[i].sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700479 FD_SET(table->table[i].sock, fds);
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700480 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700481}
482
483
484static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
485 fd_set *fds)
486{
487 int i;
488
489 if (table == NULL || table->table == NULL)
490 return;
491
492 table->changed = 0;
493 for (i = 0; i < table->count; i++) {
494 if (FD_ISSET(table->table[i].sock, fds)) {
495 table->table[i].handler(table->table[i].sock,
496 table->table[i].eloop_data,
497 table->table[i].user_data);
498 if (table->changed)
499 break;
500 }
501 }
502}
503
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700504#endif /* CONFIG_ELOOP_SELECT */
505
506
507#ifdef CONFIG_ELOOP_EPOLL
508static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
509{
510 struct eloop_sock *table;
511 int i;
512
513 for (i = 0; i < nfds; i++) {
514 table = &eloop.epoll_table[events[i].data.fd];
515 if (table->handler == NULL)
516 continue;
517 table->handler(table->sock, table->eloop_data,
518 table->user_data);
Dmitry Shmidtd80a4012015-11-05 16:35:40 -0800519 if (eloop.readers.changed ||
520 eloop.writers.changed ||
521 eloop.exceptions.changed)
522 break;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700523 }
524}
525#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800526
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700527
528static void eloop_sock_table_destroy(struct eloop_sock_table *table)
529{
530 if (table) {
531 int i;
532 for (i = 0; i < table->count && table->table; i++) {
533 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
534 "sock=%d eloop_data=%p user_data=%p "
535 "handler=%p",
536 table->table[i].sock,
537 table->table[i].eloop_data,
538 table->table[i].user_data,
539 table->table[i].handler);
540 wpa_trace_dump_funcname("eloop unregistered socket "
541 "handler",
542 table->table[i].handler);
543 wpa_trace_dump("eloop sock", &table->table[i]);
544 }
545 os_free(table->table);
546 }
547}
548
549
550int eloop_register_read_sock(int sock, eloop_sock_handler handler,
551 void *eloop_data, void *user_data)
552{
553 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
554 eloop_data, user_data);
555}
556
557
558void eloop_unregister_read_sock(int sock)
559{
560 eloop_unregister_sock(sock, EVENT_TYPE_READ);
561}
562
563
564static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
565{
566 switch (type) {
567 case EVENT_TYPE_READ:
568 return &eloop.readers;
569 case EVENT_TYPE_WRITE:
570 return &eloop.writers;
571 case EVENT_TYPE_EXCEPTION:
572 return &eloop.exceptions;
573 }
574
575 return NULL;
576}
577
578
579int eloop_register_sock(int sock, eloop_event_type type,
580 eloop_sock_handler handler,
581 void *eloop_data, void *user_data)
582{
583 struct eloop_sock_table *table;
584
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700585 assert(sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700586 table = eloop_get_sock_table(type);
587 return eloop_sock_table_add_sock(table, sock, handler,
588 eloop_data, user_data);
589}
590
591
592void eloop_unregister_sock(int sock, eloop_event_type type)
593{
594 struct eloop_sock_table *table;
595
596 table = eloop_get_sock_table(type);
597 eloop_sock_table_remove_sock(table, sock);
598}
599
600
601int eloop_register_timeout(unsigned int secs, unsigned int usecs,
602 eloop_timeout_handler handler,
603 void *eloop_data, void *user_data)
604{
605 struct eloop_timeout *timeout, *tmp;
606 os_time_t now_sec;
607
608 timeout = os_zalloc(sizeof(*timeout));
609 if (timeout == NULL)
610 return -1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800611 if (os_get_reltime(&timeout->time) < 0) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700612 os_free(timeout);
613 return -1;
614 }
615 now_sec = timeout->time.sec;
616 timeout->time.sec += secs;
617 if (timeout->time.sec < now_sec) {
618 /*
619 * Integer overflow - assume long enough timeout to be assumed
620 * to be infinite, i.e., the timeout would never happen.
621 */
622 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
623 "ever happen - ignore it", secs);
624 os_free(timeout);
625 return 0;
626 }
627 timeout->time.usec += usecs;
628 while (timeout->time.usec >= 1000000) {
629 timeout->time.sec++;
630 timeout->time.usec -= 1000000;
631 }
632 timeout->eloop_data = eloop_data;
633 timeout->user_data = user_data;
634 timeout->handler = handler;
635 wpa_trace_add_ref(timeout, eloop, eloop_data);
636 wpa_trace_add_ref(timeout, user, user_data);
637 wpa_trace_record(timeout);
638
639 /* Maintain timeouts in order of increasing time */
640 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800641 if (os_reltime_before(&timeout->time, &tmp->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700642 dl_list_add(tmp->list.prev, &timeout->list);
643 return 0;
644 }
645 }
646 dl_list_add_tail(&eloop.timeout, &timeout->list);
647
648 return 0;
649}
650
651
652static void eloop_remove_timeout(struct eloop_timeout *timeout)
653{
654 dl_list_del(&timeout->list);
655 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
656 wpa_trace_remove_ref(timeout, user, timeout->user_data);
657 os_free(timeout);
658}
659
660
661int eloop_cancel_timeout(eloop_timeout_handler handler,
662 void *eloop_data, void *user_data)
663{
664 struct eloop_timeout *timeout, *prev;
665 int removed = 0;
666
667 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
668 struct eloop_timeout, list) {
669 if (timeout->handler == handler &&
670 (timeout->eloop_data == eloop_data ||
671 eloop_data == ELOOP_ALL_CTX) &&
672 (timeout->user_data == user_data ||
673 user_data == ELOOP_ALL_CTX)) {
674 eloop_remove_timeout(timeout);
675 removed++;
676 }
677 }
678
679 return removed;
680}
681
682
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800683int eloop_cancel_timeout_one(eloop_timeout_handler handler,
684 void *eloop_data, void *user_data,
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800685 struct os_reltime *remaining)
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800686{
687 struct eloop_timeout *timeout, *prev;
688 int removed = 0;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800689 struct os_reltime now;
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800690
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800691 os_get_reltime(&now);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800692 remaining->sec = remaining->usec = 0;
693
694 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
695 struct eloop_timeout, list) {
696 if (timeout->handler == handler &&
697 (timeout->eloop_data == eloop_data) &&
698 (timeout->user_data == user_data)) {
699 removed = 1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800700 if (os_reltime_before(&now, &timeout->time))
701 os_reltime_sub(&timeout->time, &now, remaining);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800702 eloop_remove_timeout(timeout);
703 break;
704 }
705 }
706 return removed;
707}
708
709
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700710int eloop_is_timeout_registered(eloop_timeout_handler handler,
711 void *eloop_data, void *user_data)
712{
713 struct eloop_timeout *tmp;
714
715 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
716 if (tmp->handler == handler &&
717 tmp->eloop_data == eloop_data &&
718 tmp->user_data == user_data)
719 return 1;
720 }
721
722 return 0;
723}
724
725
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800726int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
727 eloop_timeout_handler handler, void *eloop_data,
728 void *user_data)
729{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800730 struct os_reltime now, requested, remaining;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800731 struct eloop_timeout *tmp;
732
733 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
734 if (tmp->handler == handler &&
735 tmp->eloop_data == eloop_data &&
736 tmp->user_data == user_data) {
737 requested.sec = req_secs;
738 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800739 os_get_reltime(&now);
740 os_reltime_sub(&tmp->time, &now, &remaining);
741 if (os_reltime_before(&requested, &remaining)) {
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800742 eloop_cancel_timeout(handler, eloop_data,
743 user_data);
744 eloop_register_timeout(requested.sec,
745 requested.usec,
746 handler, eloop_data,
747 user_data);
748 return 1;
749 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800750 return 0;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800751 }
752 }
753
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800754 return -1;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800755}
756
757
Dmitry Shmidt54605472013-11-08 11:10:19 -0800758int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
759 eloop_timeout_handler handler, void *eloop_data,
760 void *user_data)
761{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800762 struct os_reltime now, requested, remaining;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800763 struct eloop_timeout *tmp;
764
765 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
766 if (tmp->handler == handler &&
767 tmp->eloop_data == eloop_data &&
768 tmp->user_data == user_data) {
769 requested.sec = req_secs;
770 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800771 os_get_reltime(&now);
772 os_reltime_sub(&tmp->time, &now, &remaining);
773 if (os_reltime_before(&remaining, &requested)) {
Dmitry Shmidt54605472013-11-08 11:10:19 -0800774 eloop_cancel_timeout(handler, eloop_data,
775 user_data);
776 eloop_register_timeout(requested.sec,
777 requested.usec,
778 handler, eloop_data,
779 user_data);
780 return 1;
781 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800782 return 0;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800783 }
784 }
785
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800786 return -1;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800787}
788
789
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700790#ifndef CONFIG_NATIVE_WINDOWS
791static void eloop_handle_alarm(int sig)
792{
793 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
794 "two seconds. Looks like there\n"
795 "is a bug that ends up in a busy loop that "
796 "prevents clean shutdown.\n"
797 "Killing program forcefully.\n");
798 exit(1);
799}
800#endif /* CONFIG_NATIVE_WINDOWS */
801
802
803static void eloop_handle_signal(int sig)
804{
805 int i;
806
807#ifndef CONFIG_NATIVE_WINDOWS
808 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
809 /* Use SIGALRM to break out from potential busy loops that
810 * would not allow the program to be killed. */
811 eloop.pending_terminate = 1;
812 signal(SIGALRM, eloop_handle_alarm);
813 alarm(2);
814 }
815#endif /* CONFIG_NATIVE_WINDOWS */
816
817 eloop.signaled++;
818 for (i = 0; i < eloop.signal_count; i++) {
819 if (eloop.signals[i].sig == sig) {
820 eloop.signals[i].signaled++;
821 break;
822 }
823 }
824}
825
826
827static void eloop_process_pending_signals(void)
828{
829 int i;
830
831 if (eloop.signaled == 0)
832 return;
833 eloop.signaled = 0;
834
835 if (eloop.pending_terminate) {
836#ifndef CONFIG_NATIVE_WINDOWS
837 alarm(0);
838#endif /* CONFIG_NATIVE_WINDOWS */
839 eloop.pending_terminate = 0;
840 }
841
842 for (i = 0; i < eloop.signal_count; i++) {
843 if (eloop.signals[i].signaled) {
844 eloop.signals[i].signaled = 0;
845 eloop.signals[i].handler(eloop.signals[i].sig,
846 eloop.signals[i].user_data);
847 }
848 }
849}
850
851
852int eloop_register_signal(int sig, eloop_signal_handler handler,
853 void *user_data)
854{
855 struct eloop_signal *tmp;
856
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700857 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
858 sizeof(struct eloop_signal));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700859 if (tmp == NULL)
860 return -1;
861
862 tmp[eloop.signal_count].sig = sig;
863 tmp[eloop.signal_count].user_data = user_data;
864 tmp[eloop.signal_count].handler = handler;
865 tmp[eloop.signal_count].signaled = 0;
866 eloop.signal_count++;
867 eloop.signals = tmp;
868 signal(sig, eloop_handle_signal);
869
870 return 0;
871}
872
873
874int eloop_register_signal_terminate(eloop_signal_handler handler,
875 void *user_data)
876{
877 int ret = eloop_register_signal(SIGINT, handler, user_data);
878 if (ret == 0)
879 ret = eloop_register_signal(SIGTERM, handler, user_data);
880 return ret;
881}
882
883
884int eloop_register_signal_reconfig(eloop_signal_handler handler,
885 void *user_data)
886{
887#ifdef CONFIG_NATIVE_WINDOWS
888 return 0;
889#else /* CONFIG_NATIVE_WINDOWS */
890 return eloop_register_signal(SIGHUP, handler, user_data);
891#endif /* CONFIG_NATIVE_WINDOWS */
892}
893
894
895void eloop_run(void)
896{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800897#ifdef CONFIG_ELOOP_POLL
898 int num_poll_fds;
899 int timeout_ms = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700900#endif /* CONFIG_ELOOP_POLL */
901#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700902 fd_set *rfds, *wfds, *efds;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700903 struct timeval _tv;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700904#endif /* CONFIG_ELOOP_SELECT */
905#ifdef CONFIG_ELOOP_EPOLL
906 int timeout_ms = -1;
907#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800908 int res;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800909 struct os_reltime tv, now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700910
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700911#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700912 rfds = os_malloc(sizeof(*rfds));
913 wfds = os_malloc(sizeof(*wfds));
914 efds = os_malloc(sizeof(*efds));
915 if (rfds == NULL || wfds == NULL || efds == NULL)
916 goto out;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700917#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700918
919 while (!eloop.terminate &&
920 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
921 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
922 struct eloop_timeout *timeout;
Dmitry Shmidtd80a4012015-11-05 16:35:40 -0800923
924 if (eloop.pending_terminate) {
925 /*
926 * This may happen in some corner cases where a signal
927 * is received during a blocking operation. We need to
928 * process the pending signals and exit if requested to
929 * avoid hitting the SIGALRM limit if the blocking
930 * operation took more than two seconds.
931 */
932 eloop_process_pending_signals();
933 if (eloop.terminate)
934 break;
935 }
936
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700937 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
938 list);
939 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800940 os_get_reltime(&now);
941 if (os_reltime_before(&now, &timeout->time))
942 os_reltime_sub(&timeout->time, &now, &tv);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700943 else
944 tv.sec = tv.usec = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700945#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800946 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700947#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
948#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700949 _tv.tv_sec = tv.sec;
950 _tv.tv_usec = tv.usec;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700951#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700952 }
953
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800954#ifdef CONFIG_ELOOP_POLL
955 num_poll_fds = eloop_sock_table_set_fds(
956 &eloop.readers, &eloop.writers, &eloop.exceptions,
957 eloop.pollfds, eloop.pollfds_map,
958 eloop.max_pollfd_map);
959 res = poll(eloop.pollfds, num_poll_fds,
960 timeout ? timeout_ms : -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700961#endif /* CONFIG_ELOOP_POLL */
962#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700963 eloop_sock_table_set_fds(&eloop.readers, rfds);
964 eloop_sock_table_set_fds(&eloop.writers, wfds);
965 eloop_sock_table_set_fds(&eloop.exceptions, efds);
966 res = select(eloop.max_sock + 1, rfds, wfds, efds,
967 timeout ? &_tv : NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700968#endif /* CONFIG_ELOOP_SELECT */
969#ifdef CONFIG_ELOOP_EPOLL
970 if (eloop.count == 0) {
971 res = 0;
972 } else {
973 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
974 eloop.count, timeout_ms);
975 }
976#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700977 if (res < 0 && errno != EINTR && errno != 0) {
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700978 wpa_printf(MSG_ERROR, "eloop: %s: %s",
979#ifdef CONFIG_ELOOP_POLL
980 "poll"
981#endif /* CONFIG_ELOOP_POLL */
982#ifdef CONFIG_ELOOP_SELECT
983 "select"
984#endif /* CONFIG_ELOOP_SELECT */
985#ifdef CONFIG_ELOOP_EPOLL
986 "epoll"
987#endif /* CONFIG_ELOOP_EPOLL */
988 , strerror(errno));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700989 goto out;
990 }
Dmitry Shmidtd80a4012015-11-05 16:35:40 -0800991
992 eloop.readers.changed = 0;
993 eloop.writers.changed = 0;
994 eloop.exceptions.changed = 0;
995
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700996 eloop_process_pending_signals();
997
998 /* check if some registered timeouts have occurred */
999 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1000 list);
1001 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001002 os_get_reltime(&now);
1003 if (!os_reltime_before(&now, &timeout->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001004 void *eloop_data = timeout->eloop_data;
1005 void *user_data = timeout->user_data;
1006 eloop_timeout_handler handler =
1007 timeout->handler;
1008 eloop_remove_timeout(timeout);
1009 handler(eloop_data, user_data);
1010 }
1011
1012 }
1013
1014 if (res <= 0)
1015 continue;
1016
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001017 if (eloop.readers.changed ||
1018 eloop.writers.changed ||
1019 eloop.exceptions.changed) {
1020 /*
1021 * Sockets may have been closed and reopened with the
1022 * same FD in the signal or timeout handlers, so we
1023 * must skip the previous results and check again
1024 * whether any of the currently registered sockets have
1025 * events.
1026 */
1027 continue;
1028 }
1029
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001030#ifdef CONFIG_ELOOP_POLL
1031 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1032 &eloop.exceptions, eloop.pollfds_map,
1033 eloop.max_pollfd_map);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001034#endif /* CONFIG_ELOOP_POLL */
1035#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001036 eloop_sock_table_dispatch(&eloop.readers, rfds);
1037 eloop_sock_table_dispatch(&eloop.writers, wfds);
1038 eloop_sock_table_dispatch(&eloop.exceptions, efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001039#endif /* CONFIG_ELOOP_SELECT */
1040#ifdef CONFIG_ELOOP_EPOLL
1041 eloop_sock_table_dispatch(eloop.epoll_events, res);
1042#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001043 }
1044
Dmitry Shmidtea69e842013-05-13 14:52:28 -07001045 eloop.terminate = 0;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001046out:
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001047#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001048 os_free(rfds);
1049 os_free(wfds);
1050 os_free(efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001051#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001052 return;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001053}
1054
1055
1056void eloop_terminate(void)
1057{
1058 eloop.terminate = 1;
1059}
1060
1061
1062void eloop_destroy(void)
1063{
1064 struct eloop_timeout *timeout, *prev;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001065 struct os_reltime now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001066
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001067 os_get_reltime(&now);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001068 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1069 struct eloop_timeout, list) {
1070 int sec, usec;
1071 sec = timeout->time.sec - now.sec;
1072 usec = timeout->time.usec - now.usec;
1073 if (timeout->time.usec < now.usec) {
1074 sec--;
1075 usec += 1000000;
1076 }
1077 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1078 "eloop_data=%p user_data=%p handler=%p",
1079 sec, usec, timeout->eloop_data, timeout->user_data,
1080 timeout->handler);
1081 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1082 timeout->handler);
1083 wpa_trace_dump("eloop timeout", timeout);
1084 eloop_remove_timeout(timeout);
1085 }
1086 eloop_sock_table_destroy(&eloop.readers);
1087 eloop_sock_table_destroy(&eloop.writers);
1088 eloop_sock_table_destroy(&eloop.exceptions);
1089 os_free(eloop.signals);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001090
1091#ifdef CONFIG_ELOOP_POLL
1092 os_free(eloop.pollfds);
1093 os_free(eloop.pollfds_map);
1094#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001095#ifdef CONFIG_ELOOP_EPOLL
1096 os_free(eloop.epoll_table);
1097 os_free(eloop.epoll_events);
1098 close(eloop.epollfd);
1099#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001100}
1101
1102
1103int eloop_terminated(void)
1104{
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001105 return eloop.terminate || eloop.pending_terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001106}
1107
1108
1109void eloop_wait_for_read_sock(int sock)
1110{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001111#ifdef CONFIG_ELOOP_POLL
1112 struct pollfd pfd;
1113
1114 if (sock < 0)
1115 return;
1116
1117 os_memset(&pfd, 0, sizeof(pfd));
1118 pfd.fd = sock;
1119 pfd.events = POLLIN;
1120
1121 poll(&pfd, 1, -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001122#endif /* CONFIG_ELOOP_POLL */
1123#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1124 /*
1125 * We can use epoll() here. But epoll() requres 4 system calls.
1126 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1127 * epoll fd. So select() is better for performance here.
1128 */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001129 fd_set rfds;
1130
1131 if (sock < 0)
1132 return;
1133
1134 FD_ZERO(&rfds);
1135 FD_SET(sock, &rfds);
1136 select(sock + 1, &rfds, NULL, NULL, NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001137#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001138}
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001139
1140#ifdef CONFIG_ELOOP_SELECT
1141#undef CONFIG_ELOOP_SELECT
1142#endif /* CONFIG_ELOOP_SELECT */