blob: 2667c8c9abfc30e098dac88dd858157f9b609907 [file] [log] [blame]
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08005 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07007 */
8
9#include "includes.h"
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -070010#include <assert.h>
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070011
12#include "common.h"
13#include "trace.h"
14#include "list.h"
15#include "eloop.h"
16
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080017#ifdef CONFIG_ELOOP_POLL
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080018#include <poll.h>
19#endif /* CONFIG_ELOOP_POLL */
20
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070021
22struct eloop_sock {
23 int sock;
24 void *eloop_data;
25 void *user_data;
26 eloop_sock_handler handler;
27 WPA_TRACE_REF(eloop);
28 WPA_TRACE_REF(user);
29 WPA_TRACE_INFO
30};
31
32struct eloop_timeout {
33 struct dl_list list;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -080034 struct os_reltime time;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070035 void *eloop_data;
36 void *user_data;
37 eloop_timeout_handler handler;
38 WPA_TRACE_REF(eloop);
39 WPA_TRACE_REF(user);
40 WPA_TRACE_INFO
41};
42
43struct eloop_signal {
44 int sig;
45 void *user_data;
46 eloop_signal_handler handler;
47 int signaled;
48};
49
50struct eloop_sock_table {
51 int count;
52 struct eloop_sock *table;
53 int changed;
54};
55
56struct eloop_data {
57 int max_sock;
58
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080059 int count; /* sum of all table counts */
60#ifdef CONFIG_ELOOP_POLL
61 int max_pollfd_map; /* number of pollfds_map currently allocated */
62 int max_poll_fds; /* number of pollfds currently allocated */
63 struct pollfd *pollfds;
64 struct pollfd **pollfds_map;
65#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070066 struct eloop_sock_table readers;
67 struct eloop_sock_table writers;
68 struct eloop_sock_table exceptions;
69
70 struct dl_list timeout;
71
72 int signal_count;
73 struct eloop_signal *signals;
74 int signaled;
75 int pending_terminate;
76
77 int terminate;
78 int reader_table_changed;
79};
80
81static struct eloop_data eloop;
82
83
84#ifdef WPA_TRACE
85
86static void eloop_sigsegv_handler(int sig)
87{
88 wpa_trace_show("eloop SIGSEGV");
89 abort();
90}
91
92static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
93{
94 int i;
95 if (table == NULL || table->table == NULL)
96 return;
97 for (i = 0; i < table->count; i++) {
98 wpa_trace_add_ref(&table->table[i], eloop,
99 table->table[i].eloop_data);
100 wpa_trace_add_ref(&table->table[i], user,
101 table->table[i].user_data);
102 }
103}
104
105
106static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
107{
108 int i;
109 if (table == NULL || table->table == NULL)
110 return;
111 for (i = 0; i < table->count; i++) {
112 wpa_trace_remove_ref(&table->table[i], eloop,
113 table->table[i].eloop_data);
114 wpa_trace_remove_ref(&table->table[i], user,
115 table->table[i].user_data);
116 }
117}
118
119#else /* WPA_TRACE */
120
121#define eloop_trace_sock_add_ref(table) do { } while (0)
122#define eloop_trace_sock_remove_ref(table) do { } while (0)
123
124#endif /* WPA_TRACE */
125
126
127int eloop_init(void)
128{
129 os_memset(&eloop, 0, sizeof(eloop));
130 dl_list_init(&eloop.timeout);
131#ifdef WPA_TRACE
132 signal(SIGSEGV, eloop_sigsegv_handler);
133#endif /* WPA_TRACE */
134 return 0;
135}
136
137
138static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
139 int sock, eloop_sock_handler handler,
140 void *eloop_data, void *user_data)
141{
142 struct eloop_sock *tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800143 int new_max_sock;
144
145 if (sock > eloop.max_sock)
146 new_max_sock = sock;
147 else
148 new_max_sock = eloop.max_sock;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700149
150 if (table == NULL)
151 return -1;
152
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800153#ifdef CONFIG_ELOOP_POLL
154 if (new_max_sock >= eloop.max_pollfd_map) {
155 struct pollfd **nmap;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700156 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
157 sizeof(struct pollfd *));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800158 if (nmap == NULL)
159 return -1;
160
161 eloop.max_pollfd_map = new_max_sock + 50;
162 eloop.pollfds_map = nmap;
163 }
164
165 if (eloop.count + 1 > eloop.max_poll_fds) {
166 struct pollfd *n;
167 int nmax = eloop.count + 1 + 50;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700168 n = os_realloc_array(eloop.pollfds, nmax,
169 sizeof(struct pollfd));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800170 if (n == NULL)
171 return -1;
172
173 eloop.max_poll_fds = nmax;
174 eloop.pollfds = n;
175 }
176#endif /* CONFIG_ELOOP_POLL */
177
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700178 eloop_trace_sock_remove_ref(table);
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700179 tmp = os_realloc_array(table->table, table->count + 1,
180 sizeof(struct eloop_sock));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700181 if (tmp == NULL)
182 return -1;
183
184 tmp[table->count].sock = sock;
185 tmp[table->count].eloop_data = eloop_data;
186 tmp[table->count].user_data = user_data;
187 tmp[table->count].handler = handler;
188 wpa_trace_record(&tmp[table->count]);
189 table->count++;
190 table->table = tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800191 eloop.max_sock = new_max_sock;
192 eloop.count++;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700193 table->changed = 1;
194 eloop_trace_sock_add_ref(table);
195
196 return 0;
197}
198
199
200static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
201 int sock)
202{
203 int i;
204
205 if (table == NULL || table->table == NULL || table->count == 0)
206 return;
207
208 for (i = 0; i < table->count; i++) {
209 if (table->table[i].sock == sock)
210 break;
211 }
212 if (i == table->count)
213 return;
214 eloop_trace_sock_remove_ref(table);
215 if (i != table->count - 1) {
216 os_memmove(&table->table[i], &table->table[i + 1],
217 (table->count - i - 1) *
218 sizeof(struct eloop_sock));
219 }
220 table->count--;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800221 eloop.count--;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700222 table->changed = 1;
223 eloop_trace_sock_add_ref(table);
224}
225
226
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800227#ifdef CONFIG_ELOOP_POLL
228
229static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
230{
231 if (fd < mx && fd >= 0)
232 return pollfds_map[fd];
233 return NULL;
234}
235
236
237static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
238 struct eloop_sock_table *writers,
239 struct eloop_sock_table *exceptions,
240 struct pollfd *pollfds,
241 struct pollfd **pollfds_map,
242 int max_pollfd_map)
243{
244 int i;
245 int nxt = 0;
246 int fd;
247 struct pollfd *pfd;
248
249 /* Clear pollfd lookup map. It will be re-populated below. */
250 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
251
252 if (readers && readers->table) {
253 for (i = 0; i < readers->count; i++) {
254 fd = readers->table[i].sock;
255 assert(fd >= 0 && fd < max_pollfd_map);
256 pollfds[nxt].fd = fd;
257 pollfds[nxt].events = POLLIN;
258 pollfds[nxt].revents = 0;
259 pollfds_map[fd] = &(pollfds[nxt]);
260 nxt++;
261 }
262 }
263
264 if (writers && writers->table) {
265 for (i = 0; i < writers->count; i++) {
266 /*
267 * See if we already added this descriptor, update it
268 * if so.
269 */
270 fd = writers->table[i].sock;
271 assert(fd >= 0 && fd < max_pollfd_map);
272 pfd = pollfds_map[fd];
273 if (!pfd) {
274 pfd = &(pollfds[nxt]);
275 pfd->events = 0;
276 pfd->fd = fd;
277 pollfds[i].revents = 0;
278 pollfds_map[fd] = pfd;
279 nxt++;
280 }
Dmitry Shmidt04949592012-07-19 12:16:46 -0700281 pfd->events |= POLLOUT;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800282 }
283 }
284
285 /*
286 * Exceptions are always checked when using poll, but I suppose it's
287 * possible that someone registered a socket *only* for exception
288 * handling. Set the POLLIN bit in this case.
289 */
290 if (exceptions && exceptions->table) {
291 for (i = 0; i < exceptions->count; i++) {
292 /*
293 * See if we already added this descriptor, just use it
294 * if so.
295 */
296 fd = exceptions->table[i].sock;
297 assert(fd >= 0 && fd < max_pollfd_map);
298 pfd = pollfds_map[fd];
299 if (!pfd) {
300 pfd = &(pollfds[nxt]);
301 pfd->events = POLLIN;
302 pfd->fd = fd;
303 pollfds[i].revents = 0;
304 pollfds_map[fd] = pfd;
305 nxt++;
306 }
307 }
308 }
309
310 return nxt;
311}
312
313
314static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
315 struct pollfd **pollfds_map,
316 int max_pollfd_map,
317 short int revents)
318{
319 int i;
320 struct pollfd *pfd;
321
322 if (!table || !table->table)
323 return 0;
324
325 table->changed = 0;
326 for (i = 0; i < table->count; i++) {
327 pfd = find_pollfd(pollfds_map, table->table[i].sock,
328 max_pollfd_map);
329 if (!pfd)
330 continue;
331
332 if (!(pfd->revents & revents))
333 continue;
334
335 table->table[i].handler(table->table[i].sock,
336 table->table[i].eloop_data,
337 table->table[i].user_data);
338 if (table->changed)
339 return 1;
340 }
341
342 return 0;
343}
344
345
346static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
347 struct eloop_sock_table *writers,
348 struct eloop_sock_table *exceptions,
349 struct pollfd **pollfds_map,
350 int max_pollfd_map)
351{
352 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
Dmitry Shmidt04949592012-07-19 12:16:46 -0700353 max_pollfd_map, POLLIN | POLLERR |
354 POLLHUP))
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800355 return; /* pollfds may be invalid at this point */
356
357 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
358 max_pollfd_map, POLLOUT))
359 return; /* pollfds may be invalid at this point */
360
361 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
362 max_pollfd_map, POLLERR | POLLHUP);
363}
364
365#else /* CONFIG_ELOOP_POLL */
366
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700367static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
368 fd_set *fds)
369{
370 int i;
371
372 FD_ZERO(fds);
373
374 if (table->table == NULL)
375 return;
376
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700377 for (i = 0; i < table->count; i++) {
378 assert(table->table[i].sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700379 FD_SET(table->table[i].sock, fds);
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700380 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700381}
382
383
384static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
385 fd_set *fds)
386{
387 int i;
388
389 if (table == NULL || table->table == NULL)
390 return;
391
392 table->changed = 0;
393 for (i = 0; i < table->count; i++) {
394 if (FD_ISSET(table->table[i].sock, fds)) {
395 table->table[i].handler(table->table[i].sock,
396 table->table[i].eloop_data,
397 table->table[i].user_data);
398 if (table->changed)
399 break;
400 }
401 }
402}
403
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800404#endif /* CONFIG_ELOOP_POLL */
405
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700406
407static void eloop_sock_table_destroy(struct eloop_sock_table *table)
408{
409 if (table) {
410 int i;
411 for (i = 0; i < table->count && table->table; i++) {
412 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
413 "sock=%d eloop_data=%p user_data=%p "
414 "handler=%p",
415 table->table[i].sock,
416 table->table[i].eloop_data,
417 table->table[i].user_data,
418 table->table[i].handler);
419 wpa_trace_dump_funcname("eloop unregistered socket "
420 "handler",
421 table->table[i].handler);
422 wpa_trace_dump("eloop sock", &table->table[i]);
423 }
424 os_free(table->table);
425 }
426}
427
428
429int eloop_register_read_sock(int sock, eloop_sock_handler handler,
430 void *eloop_data, void *user_data)
431{
432 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
433 eloop_data, user_data);
434}
435
436
437void eloop_unregister_read_sock(int sock)
438{
439 eloop_unregister_sock(sock, EVENT_TYPE_READ);
440}
441
442
443static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
444{
445 switch (type) {
446 case EVENT_TYPE_READ:
447 return &eloop.readers;
448 case EVENT_TYPE_WRITE:
449 return &eloop.writers;
450 case EVENT_TYPE_EXCEPTION:
451 return &eloop.exceptions;
452 }
453
454 return NULL;
455}
456
457
458int eloop_register_sock(int sock, eloop_event_type type,
459 eloop_sock_handler handler,
460 void *eloop_data, void *user_data)
461{
462 struct eloop_sock_table *table;
463
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700464 assert(sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700465 table = eloop_get_sock_table(type);
466 return eloop_sock_table_add_sock(table, sock, handler,
467 eloop_data, user_data);
468}
469
470
471void eloop_unregister_sock(int sock, eloop_event_type type)
472{
473 struct eloop_sock_table *table;
474
475 table = eloop_get_sock_table(type);
476 eloop_sock_table_remove_sock(table, sock);
477}
478
479
480int eloop_register_timeout(unsigned int secs, unsigned int usecs,
481 eloop_timeout_handler handler,
482 void *eloop_data, void *user_data)
483{
484 struct eloop_timeout *timeout, *tmp;
485 os_time_t now_sec;
486
487 timeout = os_zalloc(sizeof(*timeout));
488 if (timeout == NULL)
489 return -1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800490 if (os_get_reltime(&timeout->time) < 0) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700491 os_free(timeout);
492 return -1;
493 }
494 now_sec = timeout->time.sec;
495 timeout->time.sec += secs;
496 if (timeout->time.sec < now_sec) {
497 /*
498 * Integer overflow - assume long enough timeout to be assumed
499 * to be infinite, i.e., the timeout would never happen.
500 */
501 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
502 "ever happen - ignore it", secs);
503 os_free(timeout);
504 return 0;
505 }
506 timeout->time.usec += usecs;
507 while (timeout->time.usec >= 1000000) {
508 timeout->time.sec++;
509 timeout->time.usec -= 1000000;
510 }
511 timeout->eloop_data = eloop_data;
512 timeout->user_data = user_data;
513 timeout->handler = handler;
514 wpa_trace_add_ref(timeout, eloop, eloop_data);
515 wpa_trace_add_ref(timeout, user, user_data);
516 wpa_trace_record(timeout);
517
518 /* Maintain timeouts in order of increasing time */
519 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800520 if (os_reltime_before(&timeout->time, &tmp->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700521 dl_list_add(tmp->list.prev, &timeout->list);
522 return 0;
523 }
524 }
525 dl_list_add_tail(&eloop.timeout, &timeout->list);
526
527 return 0;
528}
529
530
531static void eloop_remove_timeout(struct eloop_timeout *timeout)
532{
533 dl_list_del(&timeout->list);
534 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
535 wpa_trace_remove_ref(timeout, user, timeout->user_data);
536 os_free(timeout);
537}
538
539
540int eloop_cancel_timeout(eloop_timeout_handler handler,
541 void *eloop_data, void *user_data)
542{
543 struct eloop_timeout *timeout, *prev;
544 int removed = 0;
545
546 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
547 struct eloop_timeout, list) {
548 if (timeout->handler == handler &&
549 (timeout->eloop_data == eloop_data ||
550 eloop_data == ELOOP_ALL_CTX) &&
551 (timeout->user_data == user_data ||
552 user_data == ELOOP_ALL_CTX)) {
553 eloop_remove_timeout(timeout);
554 removed++;
555 }
556 }
557
558 return removed;
559}
560
561
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800562int eloop_cancel_timeout_one(eloop_timeout_handler handler,
563 void *eloop_data, void *user_data,
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800564 struct os_reltime *remaining)
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800565{
566 struct eloop_timeout *timeout, *prev;
567 int removed = 0;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800568 struct os_reltime now;
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800569
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800570 os_get_reltime(&now);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800571 remaining->sec = remaining->usec = 0;
572
573 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
574 struct eloop_timeout, list) {
575 if (timeout->handler == handler &&
576 (timeout->eloop_data == eloop_data) &&
577 (timeout->user_data == user_data)) {
578 removed = 1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800579 if (os_reltime_before(&now, &timeout->time))
580 os_reltime_sub(&timeout->time, &now, remaining);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800581 eloop_remove_timeout(timeout);
582 break;
583 }
584 }
585 return removed;
586}
587
588
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700589int eloop_is_timeout_registered(eloop_timeout_handler handler,
590 void *eloop_data, void *user_data)
591{
592 struct eloop_timeout *tmp;
593
594 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
595 if (tmp->handler == handler &&
596 tmp->eloop_data == eloop_data &&
597 tmp->user_data == user_data)
598 return 1;
599 }
600
601 return 0;
602}
603
604
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800605int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
606 eloop_timeout_handler handler, void *eloop_data,
607 void *user_data)
608{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800609 struct os_reltime now, requested, remaining;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800610 struct eloop_timeout *tmp;
611
612 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
613 if (tmp->handler == handler &&
614 tmp->eloop_data == eloop_data &&
615 tmp->user_data == user_data) {
616 requested.sec = req_secs;
617 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800618 os_get_reltime(&now);
619 os_reltime_sub(&tmp->time, &now, &remaining);
620 if (os_reltime_before(&requested, &remaining)) {
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800621 eloop_cancel_timeout(handler, eloop_data,
622 user_data);
623 eloop_register_timeout(requested.sec,
624 requested.usec,
625 handler, eloop_data,
626 user_data);
627 return 1;
628 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800629 return 0;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800630 }
631 }
632
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800633 return -1;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800634}
635
636
Dmitry Shmidt54605472013-11-08 11:10:19 -0800637int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
638 eloop_timeout_handler handler, void *eloop_data,
639 void *user_data)
640{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800641 struct os_reltime now, requested, remaining;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800642 struct eloop_timeout *tmp;
643
644 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
645 if (tmp->handler == handler &&
646 tmp->eloop_data == eloop_data &&
647 tmp->user_data == user_data) {
648 requested.sec = req_secs;
649 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800650 os_get_reltime(&now);
651 os_reltime_sub(&tmp->time, &now, &remaining);
652 if (os_reltime_before(&remaining, &requested)) {
Dmitry Shmidt54605472013-11-08 11:10:19 -0800653 eloop_cancel_timeout(handler, eloop_data,
654 user_data);
655 eloop_register_timeout(requested.sec,
656 requested.usec,
657 handler, eloop_data,
658 user_data);
659 return 1;
660 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800661 return 0;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800662 }
663 }
664
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800665 return -1;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800666}
667
668
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700669#ifndef CONFIG_NATIVE_WINDOWS
670static void eloop_handle_alarm(int sig)
671{
672 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
673 "two seconds. Looks like there\n"
674 "is a bug that ends up in a busy loop that "
675 "prevents clean shutdown.\n"
676 "Killing program forcefully.\n");
677 exit(1);
678}
679#endif /* CONFIG_NATIVE_WINDOWS */
680
681
682static void eloop_handle_signal(int sig)
683{
684 int i;
685
686#ifndef CONFIG_NATIVE_WINDOWS
687 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
688 /* Use SIGALRM to break out from potential busy loops that
689 * would not allow the program to be killed. */
690 eloop.pending_terminate = 1;
691 signal(SIGALRM, eloop_handle_alarm);
692 alarm(2);
693 }
694#endif /* CONFIG_NATIVE_WINDOWS */
695
696 eloop.signaled++;
697 for (i = 0; i < eloop.signal_count; i++) {
698 if (eloop.signals[i].sig == sig) {
699 eloop.signals[i].signaled++;
700 break;
701 }
702 }
703}
704
705
706static void eloop_process_pending_signals(void)
707{
708 int i;
709
710 if (eloop.signaled == 0)
711 return;
712 eloop.signaled = 0;
713
714 if (eloop.pending_terminate) {
715#ifndef CONFIG_NATIVE_WINDOWS
716 alarm(0);
717#endif /* CONFIG_NATIVE_WINDOWS */
718 eloop.pending_terminate = 0;
719 }
720
721 for (i = 0; i < eloop.signal_count; i++) {
722 if (eloop.signals[i].signaled) {
723 eloop.signals[i].signaled = 0;
724 eloop.signals[i].handler(eloop.signals[i].sig,
725 eloop.signals[i].user_data);
726 }
727 }
728}
729
730
731int eloop_register_signal(int sig, eloop_signal_handler handler,
732 void *user_data)
733{
734 struct eloop_signal *tmp;
735
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700736 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
737 sizeof(struct eloop_signal));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700738 if (tmp == NULL)
739 return -1;
740
741 tmp[eloop.signal_count].sig = sig;
742 tmp[eloop.signal_count].user_data = user_data;
743 tmp[eloop.signal_count].handler = handler;
744 tmp[eloop.signal_count].signaled = 0;
745 eloop.signal_count++;
746 eloop.signals = tmp;
747 signal(sig, eloop_handle_signal);
748
749 return 0;
750}
751
752
753int eloop_register_signal_terminate(eloop_signal_handler handler,
754 void *user_data)
755{
756 int ret = eloop_register_signal(SIGINT, handler, user_data);
757 if (ret == 0)
758 ret = eloop_register_signal(SIGTERM, handler, user_data);
759 return ret;
760}
761
762
763int eloop_register_signal_reconfig(eloop_signal_handler handler,
764 void *user_data)
765{
766#ifdef CONFIG_NATIVE_WINDOWS
767 return 0;
768#else /* CONFIG_NATIVE_WINDOWS */
769 return eloop_register_signal(SIGHUP, handler, user_data);
770#endif /* CONFIG_NATIVE_WINDOWS */
771}
772
773
774void eloop_run(void)
775{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800776#ifdef CONFIG_ELOOP_POLL
777 int num_poll_fds;
778 int timeout_ms = 0;
779#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700780 fd_set *rfds, *wfds, *efds;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700781 struct timeval _tv;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800782#endif /* CONFIG_ELOOP_POLL */
783 int res;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800784 struct os_reltime tv, now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700785
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800786#ifndef CONFIG_ELOOP_POLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700787 rfds = os_malloc(sizeof(*rfds));
788 wfds = os_malloc(sizeof(*wfds));
789 efds = os_malloc(sizeof(*efds));
790 if (rfds == NULL || wfds == NULL || efds == NULL)
791 goto out;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800792#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700793
794 while (!eloop.terminate &&
795 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
796 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
797 struct eloop_timeout *timeout;
798 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
799 list);
800 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800801 os_get_reltime(&now);
802 if (os_reltime_before(&now, &timeout->time))
803 os_reltime_sub(&timeout->time, &now, &tv);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700804 else
805 tv.sec = tv.usec = 0;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800806#ifdef CONFIG_ELOOP_POLL
807 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
808#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700809 _tv.tv_sec = tv.sec;
810 _tv.tv_usec = tv.usec;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800811#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700812 }
813
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800814#ifdef CONFIG_ELOOP_POLL
815 num_poll_fds = eloop_sock_table_set_fds(
816 &eloop.readers, &eloop.writers, &eloop.exceptions,
817 eloop.pollfds, eloop.pollfds_map,
818 eloop.max_pollfd_map);
819 res = poll(eloop.pollfds, num_poll_fds,
820 timeout ? timeout_ms : -1);
821
822 if (res < 0 && errno != EINTR && errno != 0) {
Dmitry Shmidtcce06662013-11-04 18:44:24 -0800823 wpa_printf(MSG_INFO, "eloop: poll: %s",
824 strerror(errno));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800825 goto out;
826 }
827#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700828 eloop_sock_table_set_fds(&eloop.readers, rfds);
829 eloop_sock_table_set_fds(&eloop.writers, wfds);
830 eloop_sock_table_set_fds(&eloop.exceptions, efds);
831 res = select(eloop.max_sock + 1, rfds, wfds, efds,
832 timeout ? &_tv : NULL);
833 if (res < 0 && errno != EINTR && errno != 0) {
Dmitry Shmidtcce06662013-11-04 18:44:24 -0800834 wpa_printf(MSG_INFO, "eloop: select: %s",
835 strerror(errno));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700836 goto out;
837 }
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800838#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700839 eloop_process_pending_signals();
840
841 /* check if some registered timeouts have occurred */
842 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
843 list);
844 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800845 os_get_reltime(&now);
846 if (!os_reltime_before(&now, &timeout->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700847 void *eloop_data = timeout->eloop_data;
848 void *user_data = timeout->user_data;
849 eloop_timeout_handler handler =
850 timeout->handler;
851 eloop_remove_timeout(timeout);
852 handler(eloop_data, user_data);
853 }
854
855 }
856
857 if (res <= 0)
858 continue;
859
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800860#ifdef CONFIG_ELOOP_POLL
861 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
862 &eloop.exceptions, eloop.pollfds_map,
863 eloop.max_pollfd_map);
864#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700865 eloop_sock_table_dispatch(&eloop.readers, rfds);
866 eloop_sock_table_dispatch(&eloop.writers, wfds);
867 eloop_sock_table_dispatch(&eloop.exceptions, efds);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800868#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700869 }
870
Dmitry Shmidtea69e842013-05-13 14:52:28 -0700871 eloop.terminate = 0;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700872out:
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800873#ifndef CONFIG_ELOOP_POLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700874 os_free(rfds);
875 os_free(wfds);
876 os_free(efds);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800877#endif /* CONFIG_ELOOP_POLL */
878 return;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700879}
880
881
882void eloop_terminate(void)
883{
884 eloop.terminate = 1;
885}
886
887
888void eloop_destroy(void)
889{
890 struct eloop_timeout *timeout, *prev;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800891 struct os_reltime now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700892
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800893 os_get_reltime(&now);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700894 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
895 struct eloop_timeout, list) {
896 int sec, usec;
897 sec = timeout->time.sec - now.sec;
898 usec = timeout->time.usec - now.usec;
899 if (timeout->time.usec < now.usec) {
900 sec--;
901 usec += 1000000;
902 }
903 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
904 "eloop_data=%p user_data=%p handler=%p",
905 sec, usec, timeout->eloop_data, timeout->user_data,
906 timeout->handler);
907 wpa_trace_dump_funcname("eloop unregistered timeout handler",
908 timeout->handler);
909 wpa_trace_dump("eloop timeout", timeout);
910 eloop_remove_timeout(timeout);
911 }
912 eloop_sock_table_destroy(&eloop.readers);
913 eloop_sock_table_destroy(&eloop.writers);
914 eloop_sock_table_destroy(&eloop.exceptions);
915 os_free(eloop.signals);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800916
917#ifdef CONFIG_ELOOP_POLL
918 os_free(eloop.pollfds);
919 os_free(eloop.pollfds_map);
920#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700921}
922
923
924int eloop_terminated(void)
925{
926 return eloop.terminate;
927}
928
929
930void eloop_wait_for_read_sock(int sock)
931{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800932#ifdef CONFIG_ELOOP_POLL
933 struct pollfd pfd;
934
935 if (sock < 0)
936 return;
937
938 os_memset(&pfd, 0, sizeof(pfd));
939 pfd.fd = sock;
940 pfd.events = POLLIN;
941
942 poll(&pfd, 1, -1);
943#else /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700944 fd_set rfds;
945
946 if (sock < 0)
947 return;
948
949 FD_ZERO(&rfds);
950 FD_SET(sock, &rfds);
951 select(sock + 1, &rfds, NULL, NULL, NULL);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800952#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700953}