blob: 4a565ebdbd0a989d0ccd3de3989054e3e3f9a399 [file] [log] [blame]
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08005 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07007 */
8
9#include "includes.h"
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -070010#include <assert.h>
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070011
12#include "common.h"
13#include "trace.h"
14#include "list.h"
15#include "eloop.h"
16
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070017#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll
19#endif
20
21#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
22#define CONFIG_ELOOP_SELECT
23#endif
24
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080025#ifdef CONFIG_ELOOP_POLL
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080026#include <poll.h>
27#endif /* CONFIG_ELOOP_POLL */
28
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070029#ifdef CONFIG_ELOOP_EPOLL
30#include <sys/epoll.h>
31#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070032
33struct eloop_sock {
34 int sock;
35 void *eloop_data;
36 void *user_data;
37 eloop_sock_handler handler;
38 WPA_TRACE_REF(eloop);
39 WPA_TRACE_REF(user);
40 WPA_TRACE_INFO
41};
42
43struct eloop_timeout {
44 struct dl_list list;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -080045 struct os_reltime time;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070046 void *eloop_data;
47 void *user_data;
48 eloop_timeout_handler handler;
49 WPA_TRACE_REF(eloop);
50 WPA_TRACE_REF(user);
51 WPA_TRACE_INFO
52};
53
54struct eloop_signal {
55 int sig;
56 void *user_data;
57 eloop_signal_handler handler;
58 int signaled;
59};
60
61struct eloop_sock_table {
62 int count;
63 struct eloop_sock *table;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070064#ifdef CONFIG_ELOOP_EPOLL
65 eloop_event_type type;
66#else /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070067 int changed;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070068#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070069};
70
71struct eloop_data {
72 int max_sock;
73
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080074 int count; /* sum of all table counts */
75#ifdef CONFIG_ELOOP_POLL
76 int max_pollfd_map; /* number of pollfds_map currently allocated */
77 int max_poll_fds; /* number of pollfds currently allocated */
78 struct pollfd *pollfds;
79 struct pollfd **pollfds_map;
80#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070081#ifdef CONFIG_ELOOP_EPOLL
82 int epollfd;
83 int epoll_max_event_num;
84 int epoll_max_fd;
85 struct eloop_sock *epoll_table;
86 struct epoll_event *epoll_events;
87#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070088 struct eloop_sock_table readers;
89 struct eloop_sock_table writers;
90 struct eloop_sock_table exceptions;
91
92 struct dl_list timeout;
93
94 int signal_count;
95 struct eloop_signal *signals;
96 int signaled;
97 int pending_terminate;
98
99 int terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700100};
101
102static struct eloop_data eloop;
103
104
105#ifdef WPA_TRACE
106
107static void eloop_sigsegv_handler(int sig)
108{
109 wpa_trace_show("eloop SIGSEGV");
110 abort();
111}
112
113static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
114{
115 int i;
116 if (table == NULL || table->table == NULL)
117 return;
118 for (i = 0; i < table->count; i++) {
119 wpa_trace_add_ref(&table->table[i], eloop,
120 table->table[i].eloop_data);
121 wpa_trace_add_ref(&table->table[i], user,
122 table->table[i].user_data);
123 }
124}
125
126
127static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
128{
129 int i;
130 if (table == NULL || table->table == NULL)
131 return;
132 for (i = 0; i < table->count; i++) {
133 wpa_trace_remove_ref(&table->table[i], eloop,
134 table->table[i].eloop_data);
135 wpa_trace_remove_ref(&table->table[i], user,
136 table->table[i].user_data);
137 }
138}
139
140#else /* WPA_TRACE */
141
142#define eloop_trace_sock_add_ref(table) do { } while (0)
143#define eloop_trace_sock_remove_ref(table) do { } while (0)
144
145#endif /* WPA_TRACE */
146
147
148int eloop_init(void)
149{
150 os_memset(&eloop, 0, sizeof(eloop));
151 dl_list_init(&eloop.timeout);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700152#ifdef CONFIG_ELOOP_EPOLL
153 eloop.epollfd = epoll_create1(0);
154 if (eloop.epollfd < 0) {
155 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
156 __func__, strerror(errno));
157 return -1;
158 }
159 eloop.readers.type = EVENT_TYPE_READ;
160 eloop.writers.type = EVENT_TYPE_WRITE;
161 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
162#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700163#ifdef WPA_TRACE
164 signal(SIGSEGV, eloop_sigsegv_handler);
165#endif /* WPA_TRACE */
166 return 0;
167}
168
169
170static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
171 int sock, eloop_sock_handler handler,
172 void *eloop_data, void *user_data)
173{
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700174#ifdef CONFIG_ELOOP_EPOLL
175 struct eloop_sock *temp_table;
176 struct epoll_event ev, *temp_events;
177 int next;
178#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700179 struct eloop_sock *tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800180 int new_max_sock;
181
182 if (sock > eloop.max_sock)
183 new_max_sock = sock;
184 else
185 new_max_sock = eloop.max_sock;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700186
187 if (table == NULL)
188 return -1;
189
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800190#ifdef CONFIG_ELOOP_POLL
191 if (new_max_sock >= eloop.max_pollfd_map) {
192 struct pollfd **nmap;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700193 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
194 sizeof(struct pollfd *));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800195 if (nmap == NULL)
196 return -1;
197
198 eloop.max_pollfd_map = new_max_sock + 50;
199 eloop.pollfds_map = nmap;
200 }
201
202 if (eloop.count + 1 > eloop.max_poll_fds) {
203 struct pollfd *n;
204 int nmax = eloop.count + 1 + 50;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700205 n = os_realloc_array(eloop.pollfds, nmax,
206 sizeof(struct pollfd));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800207 if (n == NULL)
208 return -1;
209
210 eloop.max_poll_fds = nmax;
211 eloop.pollfds = n;
212 }
213#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700214#ifdef CONFIG_ELOOP_EPOLL
215 if (new_max_sock >= eloop.epoll_max_fd) {
216 next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
217 temp_table = os_realloc_array(eloop.epoll_table, next,
218 sizeof(struct eloop_sock));
219 if (temp_table == NULL)
220 return -1;
221
222 eloop.epoll_max_fd = next;
223 eloop.epoll_table = temp_table;
224 }
225
226 if (eloop.count + 1 > eloop.epoll_max_event_num) {
227 next = eloop.epoll_max_event_num == 0 ? 8 :
228 eloop.epoll_max_event_num * 2;
229 temp_events = os_realloc_array(eloop.epoll_events, next,
230 sizeof(struct epoll_event));
231 if (temp_events == NULL) {
232 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
233 "%s\n", __func__, strerror(errno));
234 return -1;
235 }
236
237 eloop.epoll_max_event_num = next;
238 eloop.epoll_events = temp_events;
239 }
240#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800241
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700242 eloop_trace_sock_remove_ref(table);
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700243 tmp = os_realloc_array(table->table, table->count + 1,
244 sizeof(struct eloop_sock));
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800245 if (tmp == NULL) {
246 eloop_trace_sock_add_ref(table);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700247 return -1;
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800248 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700249
250 tmp[table->count].sock = sock;
251 tmp[table->count].eloop_data = eloop_data;
252 tmp[table->count].user_data = user_data;
253 tmp[table->count].handler = handler;
254 wpa_trace_record(&tmp[table->count]);
255 table->count++;
256 table->table = tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800257 eloop.max_sock = new_max_sock;
258 eloop.count++;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700259#ifndef CONFIG_ELOOP_EPOLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700260 table->changed = 1;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700261#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700262 eloop_trace_sock_add_ref(table);
263
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700264#ifdef CONFIG_ELOOP_EPOLL
265 os_memset(&ev, 0, sizeof(ev));
266 switch (table->type) {
267 case EVENT_TYPE_READ:
268 ev.events = EPOLLIN;
269 break;
270 case EVENT_TYPE_WRITE:
271 ev.events = EPOLLOUT;
272 break;
273 /*
274 * Exceptions are always checked when using epoll, but I suppose it's
275 * possible that someone registered a socket *only* for exception
276 * handling.
277 */
278 case EVENT_TYPE_EXCEPTION:
279 ev.events = EPOLLERR | EPOLLHUP;
280 break;
281 }
282 ev.data.fd = sock;
283 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
284 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
285 "failed. %s\n", __func__, sock, strerror(errno));
286 return -1;
287 }
288 os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
289 sizeof(struct eloop_sock));
290#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700291 return 0;
292}
293
294
295static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
296 int sock)
297{
298 int i;
299
300 if (table == NULL || table->table == NULL || table->count == 0)
301 return;
302
303 for (i = 0; i < table->count; i++) {
304 if (table->table[i].sock == sock)
305 break;
306 }
307 if (i == table->count)
308 return;
309 eloop_trace_sock_remove_ref(table);
310 if (i != table->count - 1) {
311 os_memmove(&table->table[i], &table->table[i + 1],
312 (table->count - i - 1) *
313 sizeof(struct eloop_sock));
314 }
315 table->count--;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800316 eloop.count--;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700317#ifndef CONFIG_ELOOP_EPOLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700318 table->changed = 1;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700319#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700320 eloop_trace_sock_add_ref(table);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700321#ifdef CONFIG_ELOOP_EPOLL
322 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
323 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
324 "failed. %s\n", __func__, sock, strerror(errno));
325 return;
326 }
327 os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
328#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700329}
330
331
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800332#ifdef CONFIG_ELOOP_POLL
333
334static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
335{
336 if (fd < mx && fd >= 0)
337 return pollfds_map[fd];
338 return NULL;
339}
340
341
342static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
343 struct eloop_sock_table *writers,
344 struct eloop_sock_table *exceptions,
345 struct pollfd *pollfds,
346 struct pollfd **pollfds_map,
347 int max_pollfd_map)
348{
349 int i;
350 int nxt = 0;
351 int fd;
352 struct pollfd *pfd;
353
354 /* Clear pollfd lookup map. It will be re-populated below. */
355 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
356
357 if (readers && readers->table) {
358 for (i = 0; i < readers->count; i++) {
359 fd = readers->table[i].sock;
360 assert(fd >= 0 && fd < max_pollfd_map);
361 pollfds[nxt].fd = fd;
362 pollfds[nxt].events = POLLIN;
363 pollfds[nxt].revents = 0;
364 pollfds_map[fd] = &(pollfds[nxt]);
365 nxt++;
366 }
367 }
368
369 if (writers && writers->table) {
370 for (i = 0; i < writers->count; i++) {
371 /*
372 * See if we already added this descriptor, update it
373 * if so.
374 */
375 fd = writers->table[i].sock;
376 assert(fd >= 0 && fd < max_pollfd_map);
377 pfd = pollfds_map[fd];
378 if (!pfd) {
379 pfd = &(pollfds[nxt]);
380 pfd->events = 0;
381 pfd->fd = fd;
382 pollfds[i].revents = 0;
383 pollfds_map[fd] = pfd;
384 nxt++;
385 }
Dmitry Shmidt04949592012-07-19 12:16:46 -0700386 pfd->events |= POLLOUT;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800387 }
388 }
389
390 /*
391 * Exceptions are always checked when using poll, but I suppose it's
392 * possible that someone registered a socket *only* for exception
393 * handling. Set the POLLIN bit in this case.
394 */
395 if (exceptions && exceptions->table) {
396 for (i = 0; i < exceptions->count; i++) {
397 /*
398 * See if we already added this descriptor, just use it
399 * if so.
400 */
401 fd = exceptions->table[i].sock;
402 assert(fd >= 0 && fd < max_pollfd_map);
403 pfd = pollfds_map[fd];
404 if (!pfd) {
405 pfd = &(pollfds[nxt]);
406 pfd->events = POLLIN;
407 pfd->fd = fd;
408 pollfds[i].revents = 0;
409 pollfds_map[fd] = pfd;
410 nxt++;
411 }
412 }
413 }
414
415 return nxt;
416}
417
418
419static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
420 struct pollfd **pollfds_map,
421 int max_pollfd_map,
422 short int revents)
423{
424 int i;
425 struct pollfd *pfd;
426
427 if (!table || !table->table)
428 return 0;
429
430 table->changed = 0;
431 for (i = 0; i < table->count; i++) {
432 pfd = find_pollfd(pollfds_map, table->table[i].sock,
433 max_pollfd_map);
434 if (!pfd)
435 continue;
436
437 if (!(pfd->revents & revents))
438 continue;
439
440 table->table[i].handler(table->table[i].sock,
441 table->table[i].eloop_data,
442 table->table[i].user_data);
443 if (table->changed)
444 return 1;
445 }
446
447 return 0;
448}
449
450
451static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
452 struct eloop_sock_table *writers,
453 struct eloop_sock_table *exceptions,
454 struct pollfd **pollfds_map,
455 int max_pollfd_map)
456{
457 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
Dmitry Shmidt04949592012-07-19 12:16:46 -0700458 max_pollfd_map, POLLIN | POLLERR |
459 POLLHUP))
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800460 return; /* pollfds may be invalid at this point */
461
462 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
463 max_pollfd_map, POLLOUT))
464 return; /* pollfds may be invalid at this point */
465
466 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
467 max_pollfd_map, POLLERR | POLLHUP);
468}
469
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700470#endif /* CONFIG_ELOOP_POLL */
471
472#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800473
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700474static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
475 fd_set *fds)
476{
477 int i;
478
479 FD_ZERO(fds);
480
481 if (table->table == NULL)
482 return;
483
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700484 for (i = 0; i < table->count; i++) {
485 assert(table->table[i].sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700486 FD_SET(table->table[i].sock, fds);
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700487 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700488}
489
490
491static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
492 fd_set *fds)
493{
494 int i;
495
496 if (table == NULL || table->table == NULL)
497 return;
498
499 table->changed = 0;
500 for (i = 0; i < table->count; i++) {
501 if (FD_ISSET(table->table[i].sock, fds)) {
502 table->table[i].handler(table->table[i].sock,
503 table->table[i].eloop_data,
504 table->table[i].user_data);
505 if (table->changed)
506 break;
507 }
508 }
509}
510
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700511#endif /* CONFIG_ELOOP_SELECT */
512
513
514#ifdef CONFIG_ELOOP_EPOLL
515static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
516{
517 struct eloop_sock *table;
518 int i;
519
520 for (i = 0; i < nfds; i++) {
521 table = &eloop.epoll_table[events[i].data.fd];
522 if (table->handler == NULL)
523 continue;
524 table->handler(table->sock, table->eloop_data,
525 table->user_data);
526 }
527}
528#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800529
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700530
531static void eloop_sock_table_destroy(struct eloop_sock_table *table)
532{
533 if (table) {
534 int i;
535 for (i = 0; i < table->count && table->table; i++) {
536 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
537 "sock=%d eloop_data=%p user_data=%p "
538 "handler=%p",
539 table->table[i].sock,
540 table->table[i].eloop_data,
541 table->table[i].user_data,
542 table->table[i].handler);
543 wpa_trace_dump_funcname("eloop unregistered socket "
544 "handler",
545 table->table[i].handler);
546 wpa_trace_dump("eloop sock", &table->table[i]);
547 }
548 os_free(table->table);
549 }
550}
551
552
553int eloop_register_read_sock(int sock, eloop_sock_handler handler,
554 void *eloop_data, void *user_data)
555{
556 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
557 eloop_data, user_data);
558}
559
560
561void eloop_unregister_read_sock(int sock)
562{
563 eloop_unregister_sock(sock, EVENT_TYPE_READ);
564}
565
566
567static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
568{
569 switch (type) {
570 case EVENT_TYPE_READ:
571 return &eloop.readers;
572 case EVENT_TYPE_WRITE:
573 return &eloop.writers;
574 case EVENT_TYPE_EXCEPTION:
575 return &eloop.exceptions;
576 }
577
578 return NULL;
579}
580
581
582int eloop_register_sock(int sock, eloop_event_type type,
583 eloop_sock_handler handler,
584 void *eloop_data, void *user_data)
585{
586 struct eloop_sock_table *table;
587
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700588 assert(sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700589 table = eloop_get_sock_table(type);
590 return eloop_sock_table_add_sock(table, sock, handler,
591 eloop_data, user_data);
592}
593
594
595void eloop_unregister_sock(int sock, eloop_event_type type)
596{
597 struct eloop_sock_table *table;
598
599 table = eloop_get_sock_table(type);
600 eloop_sock_table_remove_sock(table, sock);
601}
602
603
604int eloop_register_timeout(unsigned int secs, unsigned int usecs,
605 eloop_timeout_handler handler,
606 void *eloop_data, void *user_data)
607{
608 struct eloop_timeout *timeout, *tmp;
609 os_time_t now_sec;
610
611 timeout = os_zalloc(sizeof(*timeout));
612 if (timeout == NULL)
613 return -1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800614 if (os_get_reltime(&timeout->time) < 0) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700615 os_free(timeout);
616 return -1;
617 }
618 now_sec = timeout->time.sec;
619 timeout->time.sec += secs;
620 if (timeout->time.sec < now_sec) {
621 /*
622 * Integer overflow - assume long enough timeout to be assumed
623 * to be infinite, i.e., the timeout would never happen.
624 */
625 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
626 "ever happen - ignore it", secs);
627 os_free(timeout);
628 return 0;
629 }
630 timeout->time.usec += usecs;
631 while (timeout->time.usec >= 1000000) {
632 timeout->time.sec++;
633 timeout->time.usec -= 1000000;
634 }
635 timeout->eloop_data = eloop_data;
636 timeout->user_data = user_data;
637 timeout->handler = handler;
638 wpa_trace_add_ref(timeout, eloop, eloop_data);
639 wpa_trace_add_ref(timeout, user, user_data);
640 wpa_trace_record(timeout);
641
642 /* Maintain timeouts in order of increasing time */
643 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800644 if (os_reltime_before(&timeout->time, &tmp->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700645 dl_list_add(tmp->list.prev, &timeout->list);
646 return 0;
647 }
648 }
649 dl_list_add_tail(&eloop.timeout, &timeout->list);
650
651 return 0;
652}
653
654
655static void eloop_remove_timeout(struct eloop_timeout *timeout)
656{
657 dl_list_del(&timeout->list);
658 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
659 wpa_trace_remove_ref(timeout, user, timeout->user_data);
660 os_free(timeout);
661}
662
663
664int eloop_cancel_timeout(eloop_timeout_handler handler,
665 void *eloop_data, void *user_data)
666{
667 struct eloop_timeout *timeout, *prev;
668 int removed = 0;
669
670 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
671 struct eloop_timeout, list) {
672 if (timeout->handler == handler &&
673 (timeout->eloop_data == eloop_data ||
674 eloop_data == ELOOP_ALL_CTX) &&
675 (timeout->user_data == user_data ||
676 user_data == ELOOP_ALL_CTX)) {
677 eloop_remove_timeout(timeout);
678 removed++;
679 }
680 }
681
682 return removed;
683}
684
685
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800686int eloop_cancel_timeout_one(eloop_timeout_handler handler,
687 void *eloop_data, void *user_data,
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800688 struct os_reltime *remaining)
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800689{
690 struct eloop_timeout *timeout, *prev;
691 int removed = 0;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800692 struct os_reltime now;
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800693
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800694 os_get_reltime(&now);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800695 remaining->sec = remaining->usec = 0;
696
697 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
698 struct eloop_timeout, list) {
699 if (timeout->handler == handler &&
700 (timeout->eloop_data == eloop_data) &&
701 (timeout->user_data == user_data)) {
702 removed = 1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800703 if (os_reltime_before(&now, &timeout->time))
704 os_reltime_sub(&timeout->time, &now, remaining);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800705 eloop_remove_timeout(timeout);
706 break;
707 }
708 }
709 return removed;
710}
711
712
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700713int eloop_is_timeout_registered(eloop_timeout_handler handler,
714 void *eloop_data, void *user_data)
715{
716 struct eloop_timeout *tmp;
717
718 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
719 if (tmp->handler == handler &&
720 tmp->eloop_data == eloop_data &&
721 tmp->user_data == user_data)
722 return 1;
723 }
724
725 return 0;
726}
727
728
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800729int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
730 eloop_timeout_handler handler, void *eloop_data,
731 void *user_data)
732{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800733 struct os_reltime now, requested, remaining;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800734 struct eloop_timeout *tmp;
735
736 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
737 if (tmp->handler == handler &&
738 tmp->eloop_data == eloop_data &&
739 tmp->user_data == user_data) {
740 requested.sec = req_secs;
741 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800742 os_get_reltime(&now);
743 os_reltime_sub(&tmp->time, &now, &remaining);
744 if (os_reltime_before(&requested, &remaining)) {
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800745 eloop_cancel_timeout(handler, eloop_data,
746 user_data);
747 eloop_register_timeout(requested.sec,
748 requested.usec,
749 handler, eloop_data,
750 user_data);
751 return 1;
752 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800753 return 0;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800754 }
755 }
756
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800757 return -1;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800758}
759
760
Dmitry Shmidt54605472013-11-08 11:10:19 -0800761int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
762 eloop_timeout_handler handler, void *eloop_data,
763 void *user_data)
764{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800765 struct os_reltime now, requested, remaining;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800766 struct eloop_timeout *tmp;
767
768 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
769 if (tmp->handler == handler &&
770 tmp->eloop_data == eloop_data &&
771 tmp->user_data == user_data) {
772 requested.sec = req_secs;
773 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800774 os_get_reltime(&now);
775 os_reltime_sub(&tmp->time, &now, &remaining);
776 if (os_reltime_before(&remaining, &requested)) {
Dmitry Shmidt54605472013-11-08 11:10:19 -0800777 eloop_cancel_timeout(handler, eloop_data,
778 user_data);
779 eloop_register_timeout(requested.sec,
780 requested.usec,
781 handler, eloop_data,
782 user_data);
783 return 1;
784 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800785 return 0;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800786 }
787 }
788
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800789 return -1;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800790}
791
792
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700793#ifndef CONFIG_NATIVE_WINDOWS
794static void eloop_handle_alarm(int sig)
795{
796 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
797 "two seconds. Looks like there\n"
798 "is a bug that ends up in a busy loop that "
799 "prevents clean shutdown.\n"
800 "Killing program forcefully.\n");
801 exit(1);
802}
803#endif /* CONFIG_NATIVE_WINDOWS */
804
805
806static void eloop_handle_signal(int sig)
807{
808 int i;
809
810#ifndef CONFIG_NATIVE_WINDOWS
811 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
812 /* Use SIGALRM to break out from potential busy loops that
813 * would not allow the program to be killed. */
814 eloop.pending_terminate = 1;
815 signal(SIGALRM, eloop_handle_alarm);
816 alarm(2);
817 }
818#endif /* CONFIG_NATIVE_WINDOWS */
819
820 eloop.signaled++;
821 for (i = 0; i < eloop.signal_count; i++) {
822 if (eloop.signals[i].sig == sig) {
823 eloop.signals[i].signaled++;
824 break;
825 }
826 }
827}
828
829
830static void eloop_process_pending_signals(void)
831{
832 int i;
833
834 if (eloop.signaled == 0)
835 return;
836 eloop.signaled = 0;
837
838 if (eloop.pending_terminate) {
839#ifndef CONFIG_NATIVE_WINDOWS
840 alarm(0);
841#endif /* CONFIG_NATIVE_WINDOWS */
842 eloop.pending_terminate = 0;
843 }
844
845 for (i = 0; i < eloop.signal_count; i++) {
846 if (eloop.signals[i].signaled) {
847 eloop.signals[i].signaled = 0;
848 eloop.signals[i].handler(eloop.signals[i].sig,
849 eloop.signals[i].user_data);
850 }
851 }
852}
853
854
855int eloop_register_signal(int sig, eloop_signal_handler handler,
856 void *user_data)
857{
858 struct eloop_signal *tmp;
859
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700860 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
861 sizeof(struct eloop_signal));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700862 if (tmp == NULL)
863 return -1;
864
865 tmp[eloop.signal_count].sig = sig;
866 tmp[eloop.signal_count].user_data = user_data;
867 tmp[eloop.signal_count].handler = handler;
868 tmp[eloop.signal_count].signaled = 0;
869 eloop.signal_count++;
870 eloop.signals = tmp;
871 signal(sig, eloop_handle_signal);
872
873 return 0;
874}
875
876
877int eloop_register_signal_terminate(eloop_signal_handler handler,
878 void *user_data)
879{
880 int ret = eloop_register_signal(SIGINT, handler, user_data);
881 if (ret == 0)
882 ret = eloop_register_signal(SIGTERM, handler, user_data);
883 return ret;
884}
885
886
887int eloop_register_signal_reconfig(eloop_signal_handler handler,
888 void *user_data)
889{
890#ifdef CONFIG_NATIVE_WINDOWS
891 return 0;
892#else /* CONFIG_NATIVE_WINDOWS */
893 return eloop_register_signal(SIGHUP, handler, user_data);
894#endif /* CONFIG_NATIVE_WINDOWS */
895}
896
897
898void eloop_run(void)
899{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800900#ifdef CONFIG_ELOOP_POLL
901 int num_poll_fds;
902 int timeout_ms = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700903#endif /* CONFIG_ELOOP_POLL */
904#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700905 fd_set *rfds, *wfds, *efds;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700906 struct timeval _tv;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700907#endif /* CONFIG_ELOOP_SELECT */
908#ifdef CONFIG_ELOOP_EPOLL
909 int timeout_ms = -1;
910#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800911 int res;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800912 struct os_reltime tv, now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700913
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700914#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700915 rfds = os_malloc(sizeof(*rfds));
916 wfds = os_malloc(sizeof(*wfds));
917 efds = os_malloc(sizeof(*efds));
918 if (rfds == NULL || wfds == NULL || efds == NULL)
919 goto out;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700920#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700921
922 while (!eloop.terminate &&
923 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
924 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
925 struct eloop_timeout *timeout;
926 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
927 list);
928 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800929 os_get_reltime(&now);
930 if (os_reltime_before(&now, &timeout->time))
931 os_reltime_sub(&timeout->time, &now, &tv);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700932 else
933 tv.sec = tv.usec = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700934#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800935 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700936#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
937#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700938 _tv.tv_sec = tv.sec;
939 _tv.tv_usec = tv.usec;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700940#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700941 }
942
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800943#ifdef CONFIG_ELOOP_POLL
944 num_poll_fds = eloop_sock_table_set_fds(
945 &eloop.readers, &eloop.writers, &eloop.exceptions,
946 eloop.pollfds, eloop.pollfds_map,
947 eloop.max_pollfd_map);
948 res = poll(eloop.pollfds, num_poll_fds,
949 timeout ? timeout_ms : -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700950#endif /* CONFIG_ELOOP_POLL */
951#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700952 eloop_sock_table_set_fds(&eloop.readers, rfds);
953 eloop_sock_table_set_fds(&eloop.writers, wfds);
954 eloop_sock_table_set_fds(&eloop.exceptions, efds);
955 res = select(eloop.max_sock + 1, rfds, wfds, efds,
956 timeout ? &_tv : NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700957#endif /* CONFIG_ELOOP_SELECT */
958#ifdef CONFIG_ELOOP_EPOLL
959 if (eloop.count == 0) {
960 res = 0;
961 } else {
962 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
963 eloop.count, timeout_ms);
964 }
965#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700966 if (res < 0 && errno != EINTR && errno != 0) {
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700967 wpa_printf(MSG_ERROR, "eloop: %s: %s",
968#ifdef CONFIG_ELOOP_POLL
969 "poll"
970#endif /* CONFIG_ELOOP_POLL */
971#ifdef CONFIG_ELOOP_SELECT
972 "select"
973#endif /* CONFIG_ELOOP_SELECT */
974#ifdef CONFIG_ELOOP_EPOLL
975 "epoll"
976#endif /* CONFIG_ELOOP_EPOLL */
977 , strerror(errno));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700978 goto out;
979 }
980 eloop_process_pending_signals();
981
982 /* check if some registered timeouts have occurred */
983 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
984 list);
985 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800986 os_get_reltime(&now);
987 if (!os_reltime_before(&now, &timeout->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700988 void *eloop_data = timeout->eloop_data;
989 void *user_data = timeout->user_data;
990 eloop_timeout_handler handler =
991 timeout->handler;
992 eloop_remove_timeout(timeout);
993 handler(eloop_data, user_data);
994 }
995
996 }
997
998 if (res <= 0)
999 continue;
1000
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001001#ifdef CONFIG_ELOOP_POLL
1002 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1003 &eloop.exceptions, eloop.pollfds_map,
1004 eloop.max_pollfd_map);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001005#endif /* CONFIG_ELOOP_POLL */
1006#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001007 eloop_sock_table_dispatch(&eloop.readers, rfds);
1008 eloop_sock_table_dispatch(&eloop.writers, wfds);
1009 eloop_sock_table_dispatch(&eloop.exceptions, efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001010#endif /* CONFIG_ELOOP_SELECT */
1011#ifdef CONFIG_ELOOP_EPOLL
1012 eloop_sock_table_dispatch(eloop.epoll_events, res);
1013#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001014 }
1015
Dmitry Shmidtea69e842013-05-13 14:52:28 -07001016 eloop.terminate = 0;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001017out:
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001018#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001019 os_free(rfds);
1020 os_free(wfds);
1021 os_free(efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001022#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001023 return;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001024}
1025
1026
1027void eloop_terminate(void)
1028{
1029 eloop.terminate = 1;
1030}
1031
1032
1033void eloop_destroy(void)
1034{
1035 struct eloop_timeout *timeout, *prev;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001036 struct os_reltime now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001037
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001038 os_get_reltime(&now);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001039 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1040 struct eloop_timeout, list) {
1041 int sec, usec;
1042 sec = timeout->time.sec - now.sec;
1043 usec = timeout->time.usec - now.usec;
1044 if (timeout->time.usec < now.usec) {
1045 sec--;
1046 usec += 1000000;
1047 }
1048 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1049 "eloop_data=%p user_data=%p handler=%p",
1050 sec, usec, timeout->eloop_data, timeout->user_data,
1051 timeout->handler);
1052 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1053 timeout->handler);
1054 wpa_trace_dump("eloop timeout", timeout);
1055 eloop_remove_timeout(timeout);
1056 }
1057 eloop_sock_table_destroy(&eloop.readers);
1058 eloop_sock_table_destroy(&eloop.writers);
1059 eloop_sock_table_destroy(&eloop.exceptions);
1060 os_free(eloop.signals);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001061
1062#ifdef CONFIG_ELOOP_POLL
1063 os_free(eloop.pollfds);
1064 os_free(eloop.pollfds_map);
1065#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001066#ifdef CONFIG_ELOOP_EPOLL
1067 os_free(eloop.epoll_table);
1068 os_free(eloop.epoll_events);
1069 close(eloop.epollfd);
1070#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001071}
1072
1073
1074int eloop_terminated(void)
1075{
1076 return eloop.terminate;
1077}
1078
1079
1080void eloop_wait_for_read_sock(int sock)
1081{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001082#ifdef CONFIG_ELOOP_POLL
1083 struct pollfd pfd;
1084
1085 if (sock < 0)
1086 return;
1087
1088 os_memset(&pfd, 0, sizeof(pfd));
1089 pfd.fd = sock;
1090 pfd.events = POLLIN;
1091
1092 poll(&pfd, 1, -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001093#endif /* CONFIG_ELOOP_POLL */
1094#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1095 /*
1096 * We can use epoll() here. But epoll() requres 4 system calls.
1097 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1098 * epoll fd. So select() is better for performance here.
1099 */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001100 fd_set rfds;
1101
1102 if (sock < 0)
1103 return;
1104
1105 FD_ZERO(&rfds);
1106 FD_SET(sock, &rfds);
1107 select(sock + 1, &rfds, NULL, NULL, NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001108#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001109}
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001110
1111#ifdef CONFIG_ELOOP_SELECT
1112#undef CONFIG_ELOOP_SELECT
1113#endif /* CONFIG_ELOOP_SELECT */