blob: 5691f154409565a32af11fa2caa4952f8181f562 [file] [log] [blame]
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08005 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07007 */
8
9#include "includes.h"
10
11#include "common.h"
12#include "trace.h"
13#include "list.h"
14#include "eloop.h"
15
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080016#ifdef CONFIG_ELOOP_POLL
17#include <assert.h>
18#include <poll.h>
19#endif /* CONFIG_ELOOP_POLL */
20
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070021
22struct eloop_sock {
23 int sock;
24 void *eloop_data;
25 void *user_data;
26 eloop_sock_handler handler;
27 WPA_TRACE_REF(eloop);
28 WPA_TRACE_REF(user);
29 WPA_TRACE_INFO
30};
31
32struct eloop_timeout {
33 struct dl_list list;
34 struct os_time time;
35 void *eloop_data;
36 void *user_data;
37 eloop_timeout_handler handler;
38 WPA_TRACE_REF(eloop);
39 WPA_TRACE_REF(user);
40 WPA_TRACE_INFO
41};
42
43struct eloop_signal {
44 int sig;
45 void *user_data;
46 eloop_signal_handler handler;
47 int signaled;
48};
49
50struct eloop_sock_table {
51 int count;
52 struct eloop_sock *table;
53 int changed;
54};
55
56struct eloop_data {
57 int max_sock;
58
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080059 int count; /* sum of all table counts */
60#ifdef CONFIG_ELOOP_POLL
61 int max_pollfd_map; /* number of pollfds_map currently allocated */
62 int max_poll_fds; /* number of pollfds currently allocated */
63 struct pollfd *pollfds;
64 struct pollfd **pollfds_map;
65#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070066 struct eloop_sock_table readers;
67 struct eloop_sock_table writers;
68 struct eloop_sock_table exceptions;
69
70 struct dl_list timeout;
71
72 int signal_count;
73 struct eloop_signal *signals;
74 int signaled;
75 int pending_terminate;
76
77 int terminate;
78 int reader_table_changed;
79};
80
81static struct eloop_data eloop;
82
83
84#ifdef WPA_TRACE
85
86static void eloop_sigsegv_handler(int sig)
87{
88 wpa_trace_show("eloop SIGSEGV");
89 abort();
90}
91
92static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
93{
94 int i;
95 if (table == NULL || table->table == NULL)
96 return;
97 for (i = 0; i < table->count; i++) {
98 wpa_trace_add_ref(&table->table[i], eloop,
99 table->table[i].eloop_data);
100 wpa_trace_add_ref(&table->table[i], user,
101 table->table[i].user_data);
102 }
103}
104
105
106static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
107{
108 int i;
109 if (table == NULL || table->table == NULL)
110 return;
111 for (i = 0; i < table->count; i++) {
112 wpa_trace_remove_ref(&table->table[i], eloop,
113 table->table[i].eloop_data);
114 wpa_trace_remove_ref(&table->table[i], user,
115 table->table[i].user_data);
116 }
117}
118
119#else /* WPA_TRACE */
120
121#define eloop_trace_sock_add_ref(table) do { } while (0)
122#define eloop_trace_sock_remove_ref(table) do { } while (0)
123
124#endif /* WPA_TRACE */
125
126
127int eloop_init(void)
128{
129 os_memset(&eloop, 0, sizeof(eloop));
130 dl_list_init(&eloop.timeout);
131#ifdef WPA_TRACE
132 signal(SIGSEGV, eloop_sigsegv_handler);
133#endif /* WPA_TRACE */
134 return 0;
135}
136
137
138static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
139 int sock, eloop_sock_handler handler,
140 void *eloop_data, void *user_data)
141{
142 struct eloop_sock *tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800143 int new_max_sock;
144
145 if (sock > eloop.max_sock)
146 new_max_sock = sock;
147 else
148 new_max_sock = eloop.max_sock;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700149
150 if (table == NULL)
151 return -1;
152
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800153#ifdef CONFIG_ELOOP_POLL
154 if (new_max_sock >= eloop.max_pollfd_map) {
155 struct pollfd **nmap;
156 nmap = os_realloc(eloop.pollfds_map, sizeof(struct pollfd *) *
157 (new_max_sock + 50));
158 if (nmap == NULL)
159 return -1;
160
161 eloop.max_pollfd_map = new_max_sock + 50;
162 eloop.pollfds_map = nmap;
163 }
164
165 if (eloop.count + 1 > eloop.max_poll_fds) {
166 struct pollfd *n;
167 int nmax = eloop.count + 1 + 50;
168 n = os_realloc(eloop.pollfds, sizeof(struct pollfd) * nmax);
169 if (n == NULL)
170 return -1;
171
172 eloop.max_poll_fds = nmax;
173 eloop.pollfds = n;
174 }
175#endif /* CONFIG_ELOOP_POLL */
176
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700177 eloop_trace_sock_remove_ref(table);
178 tmp = (struct eloop_sock *)
179 os_realloc(table->table,
180 (table->count + 1) * sizeof(struct eloop_sock));
181 if (tmp == NULL)
182 return -1;
183
184 tmp[table->count].sock = sock;
185 tmp[table->count].eloop_data = eloop_data;
186 tmp[table->count].user_data = user_data;
187 tmp[table->count].handler = handler;
188 wpa_trace_record(&tmp[table->count]);
189 table->count++;
190 table->table = tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800191 eloop.max_sock = new_max_sock;
192 eloop.count++;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700193 table->changed = 1;
194 eloop_trace_sock_add_ref(table);
195
196 return 0;
197}
198
199
200static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
201 int sock)
202{
203 int i;
204
205 if (table == NULL || table->table == NULL || table->count == 0)
206 return;
207
208 for (i = 0; i < table->count; i++) {
209 if (table->table[i].sock == sock)
210 break;
211 }
212 if (i == table->count)
213 return;
214 eloop_trace_sock_remove_ref(table);
215 if (i != table->count - 1) {
216 os_memmove(&table->table[i], &table->table[i + 1],
217 (table->count - i - 1) *
218 sizeof(struct eloop_sock));
219 }
220 table->count--;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800221 eloop.count--;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700222 table->changed = 1;
223 eloop_trace_sock_add_ref(table);
224}
225
226
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800227#ifdef CONFIG_ELOOP_POLL
228
229static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
230{
231 if (fd < mx && fd >= 0)
232 return pollfds_map[fd];
233 return NULL;
234}
235
236
237static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
238 struct eloop_sock_table *writers,
239 struct eloop_sock_table *exceptions,
240 struct pollfd *pollfds,
241 struct pollfd **pollfds_map,
242 int max_pollfd_map)
243{
244 int i;
245 int nxt = 0;
246 int fd;
247 struct pollfd *pfd;
248
249 /* Clear pollfd lookup map. It will be re-populated below. */
250 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
251
252 if (readers && readers->table) {
253 for (i = 0; i < readers->count; i++) {
254 fd = readers->table[i].sock;
255 assert(fd >= 0 && fd < max_pollfd_map);
256 pollfds[nxt].fd = fd;
257 pollfds[nxt].events = POLLIN;
258 pollfds[nxt].revents = 0;
259 pollfds_map[fd] = &(pollfds[nxt]);
260 nxt++;
261 }
262 }
263
264 if (writers && writers->table) {
265 for (i = 0; i < writers->count; i++) {
266 /*
267 * See if we already added this descriptor, update it
268 * if so.
269 */
270 fd = writers->table[i].sock;
271 assert(fd >= 0 && fd < max_pollfd_map);
272 pfd = pollfds_map[fd];
273 if (!pfd) {
274 pfd = &(pollfds[nxt]);
275 pfd->events = 0;
276 pfd->fd = fd;
277 pollfds[i].revents = 0;
278 pollfds_map[fd] = pfd;
279 nxt++;
280 }
281 pfd->events |= POLLIN;
282 }
283 }
284
285 /*
286 * Exceptions are always checked when using poll, but I suppose it's
287 * possible that someone registered a socket *only* for exception
288 * handling. Set the POLLIN bit in this case.
289 */
290 if (exceptions && exceptions->table) {
291 for (i = 0; i < exceptions->count; i++) {
292 /*
293 * See if we already added this descriptor, just use it
294 * if so.
295 */
296 fd = exceptions->table[i].sock;
297 assert(fd >= 0 && fd < max_pollfd_map);
298 pfd = pollfds_map[fd];
299 if (!pfd) {
300 pfd = &(pollfds[nxt]);
301 pfd->events = POLLIN;
302 pfd->fd = fd;
303 pollfds[i].revents = 0;
304 pollfds_map[fd] = pfd;
305 nxt++;
306 }
307 }
308 }
309
310 return nxt;
311}
312
313
314static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
315 struct pollfd **pollfds_map,
316 int max_pollfd_map,
317 short int revents)
318{
319 int i;
320 struct pollfd *pfd;
321
322 if (!table || !table->table)
323 return 0;
324
325 table->changed = 0;
326 for (i = 0; i < table->count; i++) {
327 pfd = find_pollfd(pollfds_map, table->table[i].sock,
328 max_pollfd_map);
329 if (!pfd)
330 continue;
331
332 if (!(pfd->revents & revents))
333 continue;
334
335 table->table[i].handler(table->table[i].sock,
336 table->table[i].eloop_data,
337 table->table[i].user_data);
338 if (table->changed)
339 return 1;
340 }
341
342 return 0;
343}
344
345
346static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
347 struct eloop_sock_table *writers,
348 struct eloop_sock_table *exceptions,
349 struct pollfd **pollfds_map,
350 int max_pollfd_map)
351{
352 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
353 max_pollfd_map, POLLIN))
354 return; /* pollfds may be invalid at this point */
355
356 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
357 max_pollfd_map, POLLOUT))
358 return; /* pollfds may be invalid at this point */
359
360 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
361 max_pollfd_map, POLLERR | POLLHUP);
362}
363
364#else /* CONFIG_ELOOP_POLL */
365
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700366static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
367 fd_set *fds)
368{
369 int i;
370
371 FD_ZERO(fds);
372
373 if (table->table == NULL)
374 return;
375
376 for (i = 0; i < table->count; i++)
377 FD_SET(table->table[i].sock, fds);
378}
379
380
381static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
382 fd_set *fds)
383{
384 int i;
385
386 if (table == NULL || table->table == NULL)
387 return;
388
389 table->changed = 0;
390 for (i = 0; i < table->count; i++) {
391 if (FD_ISSET(table->table[i].sock, fds)) {
392 table->table[i].handler(table->table[i].sock,
393 table->table[i].eloop_data,
394 table->table[i].user_data);
395 if (table->changed)
396 break;
397 }
398 }
399}
400
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800401#endif /* CONFIG_ELOOP_POLL */
402
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700403
404static void eloop_sock_table_destroy(struct eloop_sock_table *table)
405{
406 if (table) {
407 int i;
408 for (i = 0; i < table->count && table->table; i++) {
409 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
410 "sock=%d eloop_data=%p user_data=%p "
411 "handler=%p",
412 table->table[i].sock,
413 table->table[i].eloop_data,
414 table->table[i].user_data,
415 table->table[i].handler);
416 wpa_trace_dump_funcname("eloop unregistered socket "
417 "handler",
418 table->table[i].handler);
419 wpa_trace_dump("eloop sock", &table->table[i]);
420 }
421 os_free(table->table);
422 }
423}
424
425
426int eloop_register_read_sock(int sock, eloop_sock_handler handler,
427 void *eloop_data, void *user_data)
428{
429 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
430 eloop_data, user_data);
431}
432
433
434void eloop_unregister_read_sock(int sock)
435{
436 eloop_unregister_sock(sock, EVENT_TYPE_READ);
437}
438
439
440static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
441{
442 switch (type) {
443 case EVENT_TYPE_READ:
444 return &eloop.readers;
445 case EVENT_TYPE_WRITE:
446 return &eloop.writers;
447 case EVENT_TYPE_EXCEPTION:
448 return &eloop.exceptions;
449 }
450
451 return NULL;
452}
453
454
455int eloop_register_sock(int sock, eloop_event_type type,
456 eloop_sock_handler handler,
457 void *eloop_data, void *user_data)
458{
459 struct eloop_sock_table *table;
460
461 table = eloop_get_sock_table(type);
462 return eloop_sock_table_add_sock(table, sock, handler,
463 eloop_data, user_data);
464}
465
466
467void eloop_unregister_sock(int sock, eloop_event_type type)
468{
469 struct eloop_sock_table *table;
470
471 table = eloop_get_sock_table(type);
472 eloop_sock_table_remove_sock(table, sock);
473}
474
475
476int eloop_register_timeout(unsigned int secs, unsigned int usecs,
477 eloop_timeout_handler handler,
478 void *eloop_data, void *user_data)
479{
480 struct eloop_timeout *timeout, *tmp;
481 os_time_t now_sec;
482
483 timeout = os_zalloc(sizeof(*timeout));
484 if (timeout == NULL)
485 return -1;
486 if (os_get_time(&timeout->time) < 0) {
487 os_free(timeout);
488 return -1;
489 }
490 now_sec = timeout->time.sec;
491 timeout->time.sec += secs;
492 if (timeout->time.sec < now_sec) {
493 /*
494 * Integer overflow - assume long enough timeout to be assumed
495 * to be infinite, i.e., the timeout would never happen.
496 */
497 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
498 "ever happen - ignore it", secs);
499 os_free(timeout);
500 return 0;
501 }
502 timeout->time.usec += usecs;
503 while (timeout->time.usec >= 1000000) {
504 timeout->time.sec++;
505 timeout->time.usec -= 1000000;
506 }
507 timeout->eloop_data = eloop_data;
508 timeout->user_data = user_data;
509 timeout->handler = handler;
510 wpa_trace_add_ref(timeout, eloop, eloop_data);
511 wpa_trace_add_ref(timeout, user, user_data);
512 wpa_trace_record(timeout);
513
514 /* Maintain timeouts in order of increasing time */
515 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
516 if (os_time_before(&timeout->time, &tmp->time)) {
517 dl_list_add(tmp->list.prev, &timeout->list);
518 return 0;
519 }
520 }
521 dl_list_add_tail(&eloop.timeout, &timeout->list);
522
523 return 0;
524}
525
526
527static void eloop_remove_timeout(struct eloop_timeout *timeout)
528{
529 dl_list_del(&timeout->list);
530 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
531 wpa_trace_remove_ref(timeout, user, timeout->user_data);
532 os_free(timeout);
533}
534
535
536int eloop_cancel_timeout(eloop_timeout_handler handler,
537 void *eloop_data, void *user_data)
538{
539 struct eloop_timeout *timeout, *prev;
540 int removed = 0;
541
542 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
543 struct eloop_timeout, list) {
544 if (timeout->handler == handler &&
545 (timeout->eloop_data == eloop_data ||
546 eloop_data == ELOOP_ALL_CTX) &&
547 (timeout->user_data == user_data ||
548 user_data == ELOOP_ALL_CTX)) {
549 eloop_remove_timeout(timeout);
550 removed++;
551 }
552 }
553
554 return removed;
555}
556
557
558int eloop_is_timeout_registered(eloop_timeout_handler handler,
559 void *eloop_data, void *user_data)
560{
561 struct eloop_timeout *tmp;
562
563 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
564 if (tmp->handler == handler &&
565 tmp->eloop_data == eloop_data &&
566 tmp->user_data == user_data)
567 return 1;
568 }
569
570 return 0;
571}
572
573
574#ifndef CONFIG_NATIVE_WINDOWS
575static void eloop_handle_alarm(int sig)
576{
577 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
578 "two seconds. Looks like there\n"
579 "is a bug that ends up in a busy loop that "
580 "prevents clean shutdown.\n"
581 "Killing program forcefully.\n");
582 exit(1);
583}
584#endif /* CONFIG_NATIVE_WINDOWS */
585
586
587static void eloop_handle_signal(int sig)
588{
589 int i;
590
591#ifndef CONFIG_NATIVE_WINDOWS
592 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
593 /* Use SIGALRM to break out from potential busy loops that
594 * would not allow the program to be killed. */
595 eloop.pending_terminate = 1;
596 signal(SIGALRM, eloop_handle_alarm);
597 alarm(2);
598 }
599#endif /* CONFIG_NATIVE_WINDOWS */
600
601 eloop.signaled++;
602 for (i = 0; i < eloop.signal_count; i++) {
603 if (eloop.signals[i].sig == sig) {
604 eloop.signals[i].signaled++;
605 break;
606 }
607 }
608}
609
610
611static void eloop_process_pending_signals(void)
612{
613 int i;
614
615 if (eloop.signaled == 0)
616 return;
617 eloop.signaled = 0;
618
619 if (eloop.pending_terminate) {
620#ifndef CONFIG_NATIVE_WINDOWS
621 alarm(0);
622#endif /* CONFIG_NATIVE_WINDOWS */
623 eloop.pending_terminate = 0;
624 }
625
626 for (i = 0; i < eloop.signal_count; i++) {
627 if (eloop.signals[i].signaled) {
628 eloop.signals[i].signaled = 0;
629 eloop.signals[i].handler(eloop.signals[i].sig,
630 eloop.signals[i].user_data);
631 }
632 }
633}
634
635
636int eloop_register_signal(int sig, eloop_signal_handler handler,
637 void *user_data)
638{
639 struct eloop_signal *tmp;
640
641 tmp = (struct eloop_signal *)
642 os_realloc(eloop.signals,
643 (eloop.signal_count + 1) *
644 sizeof(struct eloop_signal));
645 if (tmp == NULL)
646 return -1;
647
648 tmp[eloop.signal_count].sig = sig;
649 tmp[eloop.signal_count].user_data = user_data;
650 tmp[eloop.signal_count].handler = handler;
651 tmp[eloop.signal_count].signaled = 0;
652 eloop.signal_count++;
653 eloop.signals = tmp;
654 signal(sig, eloop_handle_signal);
655
656 return 0;
657}
658
659
660int eloop_register_signal_terminate(eloop_signal_handler handler,
661 void *user_data)
662{
663 int ret = eloop_register_signal(SIGINT, handler, user_data);
664 if (ret == 0)
665 ret = eloop_register_signal(SIGTERM, handler, user_data);
666 return ret;
667}
668
669
670int eloop_register_signal_reconfig(eloop_signal_handler handler,
671 void *user_data)
672{
673#ifdef CONFIG_NATIVE_WINDOWS
674 return 0;
675#else /* CONFIG_NATIVE_WINDOWS */
676 return eloop_register_signal(SIGHUP, handler, user_data);
677#endif /* CONFIG_NATIVE_WINDOWS */
678}
679
680
681void eloop_run(void)
682{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800683#ifdef CONFIG_ELOOP_POLL
684 int num_poll_fds;
685 int timeout_ms = 0;
686#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700687 fd_set *rfds, *wfds, *efds;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700688 struct timeval _tv;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800689#endif /* CONFIG_ELOOP_POLL */
690 int res;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700691 struct os_time tv, now;
692
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800693#ifndef CONFIG_ELOOP_POLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700694 rfds = os_malloc(sizeof(*rfds));
695 wfds = os_malloc(sizeof(*wfds));
696 efds = os_malloc(sizeof(*efds));
697 if (rfds == NULL || wfds == NULL || efds == NULL)
698 goto out;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800699#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700700
701 while (!eloop.terminate &&
702 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
703 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
704 struct eloop_timeout *timeout;
705 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
706 list);
707 if (timeout) {
708 os_get_time(&now);
709 if (os_time_before(&now, &timeout->time))
710 os_time_sub(&timeout->time, &now, &tv);
711 else
712 tv.sec = tv.usec = 0;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800713#ifdef CONFIG_ELOOP_POLL
714 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
715#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700716 _tv.tv_sec = tv.sec;
717 _tv.tv_usec = tv.usec;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800718#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700719 }
720
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800721#ifdef CONFIG_ELOOP_POLL
722 num_poll_fds = eloop_sock_table_set_fds(
723 &eloop.readers, &eloop.writers, &eloop.exceptions,
724 eloop.pollfds, eloop.pollfds_map,
725 eloop.max_pollfd_map);
726 res = poll(eloop.pollfds, num_poll_fds,
727 timeout ? timeout_ms : -1);
728
729 if (res < 0 && errno != EINTR && errno != 0) {
730 perror("poll");
731 goto out;
732 }
733#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700734 eloop_sock_table_set_fds(&eloop.readers, rfds);
735 eloop_sock_table_set_fds(&eloop.writers, wfds);
736 eloop_sock_table_set_fds(&eloop.exceptions, efds);
737 res = select(eloop.max_sock + 1, rfds, wfds, efds,
738 timeout ? &_tv : NULL);
739 if (res < 0 && errno != EINTR && errno != 0) {
740 perror("select");
741 goto out;
742 }
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800743#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700744 eloop_process_pending_signals();
745
746 /* check if some registered timeouts have occurred */
747 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
748 list);
749 if (timeout) {
750 os_get_time(&now);
751 if (!os_time_before(&now, &timeout->time)) {
752 void *eloop_data = timeout->eloop_data;
753 void *user_data = timeout->user_data;
754 eloop_timeout_handler handler =
755 timeout->handler;
756 eloop_remove_timeout(timeout);
757 handler(eloop_data, user_data);
758 }
759
760 }
761
762 if (res <= 0)
763 continue;
764
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800765#ifdef CONFIG_ELOOP_POLL
766 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
767 &eloop.exceptions, eloop.pollfds_map,
768 eloop.max_pollfd_map);
769#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700770 eloop_sock_table_dispatch(&eloop.readers, rfds);
771 eloop_sock_table_dispatch(&eloop.writers, wfds);
772 eloop_sock_table_dispatch(&eloop.exceptions, efds);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800773#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700774 }
775
776out:
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800777#ifndef CONFIG_ELOOP_POLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700778 os_free(rfds);
779 os_free(wfds);
780 os_free(efds);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800781#endif /* CONFIG_ELOOP_POLL */
782 return;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700783}
784
785
786void eloop_terminate(void)
787{
788 eloop.terminate = 1;
789}
790
791
792void eloop_destroy(void)
793{
794 struct eloop_timeout *timeout, *prev;
795 struct os_time now;
796
797 os_get_time(&now);
798 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
799 struct eloop_timeout, list) {
800 int sec, usec;
801 sec = timeout->time.sec - now.sec;
802 usec = timeout->time.usec - now.usec;
803 if (timeout->time.usec < now.usec) {
804 sec--;
805 usec += 1000000;
806 }
807 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
808 "eloop_data=%p user_data=%p handler=%p",
809 sec, usec, timeout->eloop_data, timeout->user_data,
810 timeout->handler);
811 wpa_trace_dump_funcname("eloop unregistered timeout handler",
812 timeout->handler);
813 wpa_trace_dump("eloop timeout", timeout);
814 eloop_remove_timeout(timeout);
815 }
816 eloop_sock_table_destroy(&eloop.readers);
817 eloop_sock_table_destroy(&eloop.writers);
818 eloop_sock_table_destroy(&eloop.exceptions);
819 os_free(eloop.signals);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800820
821#ifdef CONFIG_ELOOP_POLL
822 os_free(eloop.pollfds);
823 os_free(eloop.pollfds_map);
824#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700825}
826
827
828int eloop_terminated(void)
829{
830 return eloop.terminate;
831}
832
833
834void eloop_wait_for_read_sock(int sock)
835{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800836#ifdef CONFIG_ELOOP_POLL
837 struct pollfd pfd;
838
839 if (sock < 0)
840 return;
841
842 os_memset(&pfd, 0, sizeof(pfd));
843 pfd.fd = sock;
844 pfd.events = POLLIN;
845
846 poll(&pfd, 1, -1);
847#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700848 fd_set rfds;
849
850 if (sock < 0)
851 return;
852
853 FD_ZERO(&rfds);
854 FD_SET(sock, &rfds);
855 select(sock + 1, &rfds, NULL, NULL, NULL);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800856#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700857}