blob: 0da6de45e5a322f435fef9e620a386fcd502bb52 [file] [log] [blame]
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08005 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07007 */
8
9#include "includes.h"
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -070010#include <assert.h>
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070011
12#include "common.h"
13#include "trace.h"
14#include "list.h"
15#include "eloop.h"
16
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070017#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll
19#endif
20
21#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
22#define CONFIG_ELOOP_SELECT
23#endif
24
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080025#ifdef CONFIG_ELOOP_POLL
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080026#include <poll.h>
27#endif /* CONFIG_ELOOP_POLL */
28
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070029#ifdef CONFIG_ELOOP_EPOLL
30#include <sys/epoll.h>
31#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070032
33struct eloop_sock {
34 int sock;
35 void *eloop_data;
36 void *user_data;
37 eloop_sock_handler handler;
38 WPA_TRACE_REF(eloop);
39 WPA_TRACE_REF(user);
40 WPA_TRACE_INFO
41};
42
43struct eloop_timeout {
44 struct dl_list list;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -080045 struct os_reltime time;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070046 void *eloop_data;
47 void *user_data;
48 eloop_timeout_handler handler;
49 WPA_TRACE_REF(eloop);
50 WPA_TRACE_REF(user);
51 WPA_TRACE_INFO
52};
53
54struct eloop_signal {
55 int sig;
56 void *user_data;
57 eloop_signal_handler handler;
58 int signaled;
59};
60
61struct eloop_sock_table {
62 int count;
63 struct eloop_sock *table;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070064#ifdef CONFIG_ELOOP_EPOLL
65 eloop_event_type type;
66#else /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070067 int changed;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070068#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070069};
70
71struct eloop_data {
72 int max_sock;
73
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080074 int count; /* sum of all table counts */
75#ifdef CONFIG_ELOOP_POLL
76 int max_pollfd_map; /* number of pollfds_map currently allocated */
77 int max_poll_fds; /* number of pollfds currently allocated */
78 struct pollfd *pollfds;
79 struct pollfd **pollfds_map;
80#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070081#ifdef CONFIG_ELOOP_EPOLL
82 int epollfd;
83 int epoll_max_event_num;
84 int epoll_max_fd;
85 struct eloop_sock *epoll_table;
86 struct epoll_event *epoll_events;
87#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070088 struct eloop_sock_table readers;
89 struct eloop_sock_table writers;
90 struct eloop_sock_table exceptions;
91
92 struct dl_list timeout;
93
94 int signal_count;
95 struct eloop_signal *signals;
96 int signaled;
97 int pending_terminate;
98
99 int terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700100};
101
102static struct eloop_data eloop;
103
104
105#ifdef WPA_TRACE
106
107static void eloop_sigsegv_handler(int sig)
108{
109 wpa_trace_show("eloop SIGSEGV");
110 abort();
111}
112
113static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
114{
115 int i;
116 if (table == NULL || table->table == NULL)
117 return;
118 for (i = 0; i < table->count; i++) {
119 wpa_trace_add_ref(&table->table[i], eloop,
120 table->table[i].eloop_data);
121 wpa_trace_add_ref(&table->table[i], user,
122 table->table[i].user_data);
123 }
124}
125
126
127static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
128{
129 int i;
130 if (table == NULL || table->table == NULL)
131 return;
132 for (i = 0; i < table->count; i++) {
133 wpa_trace_remove_ref(&table->table[i], eloop,
134 table->table[i].eloop_data);
135 wpa_trace_remove_ref(&table->table[i], user,
136 table->table[i].user_data);
137 }
138}
139
140#else /* WPA_TRACE */
141
142#define eloop_trace_sock_add_ref(table) do { } while (0)
143#define eloop_trace_sock_remove_ref(table) do { } while (0)
144
145#endif /* WPA_TRACE */
146
147
148int eloop_init(void)
149{
150 os_memset(&eloop, 0, sizeof(eloop));
151 dl_list_init(&eloop.timeout);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700152#ifdef CONFIG_ELOOP_EPOLL
153 eloop.epollfd = epoll_create1(0);
154 if (eloop.epollfd < 0) {
155 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
156 __func__, strerror(errno));
157 return -1;
158 }
159 eloop.readers.type = EVENT_TYPE_READ;
160 eloop.writers.type = EVENT_TYPE_WRITE;
161 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
162#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700163#ifdef WPA_TRACE
164 signal(SIGSEGV, eloop_sigsegv_handler);
165#endif /* WPA_TRACE */
166 return 0;
167}
168
169
170static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
171 int sock, eloop_sock_handler handler,
172 void *eloop_data, void *user_data)
173{
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700174#ifdef CONFIG_ELOOP_EPOLL
175 struct eloop_sock *temp_table;
176 struct epoll_event ev, *temp_events;
177 int next;
178#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700179 struct eloop_sock *tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800180 int new_max_sock;
181
182 if (sock > eloop.max_sock)
183 new_max_sock = sock;
184 else
185 new_max_sock = eloop.max_sock;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700186
187 if (table == NULL)
188 return -1;
189
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800190#ifdef CONFIG_ELOOP_POLL
191 if (new_max_sock >= eloop.max_pollfd_map) {
192 struct pollfd **nmap;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700193 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
194 sizeof(struct pollfd *));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800195 if (nmap == NULL)
196 return -1;
197
198 eloop.max_pollfd_map = new_max_sock + 50;
199 eloop.pollfds_map = nmap;
200 }
201
202 if (eloop.count + 1 > eloop.max_poll_fds) {
203 struct pollfd *n;
204 int nmax = eloop.count + 1 + 50;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700205 n = os_realloc_array(eloop.pollfds, nmax,
206 sizeof(struct pollfd));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800207 if (n == NULL)
208 return -1;
209
210 eloop.max_poll_fds = nmax;
211 eloop.pollfds = n;
212 }
213#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700214#ifdef CONFIG_ELOOP_EPOLL
215 if (new_max_sock >= eloop.epoll_max_fd) {
216 next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
217 temp_table = os_realloc_array(eloop.epoll_table, next,
218 sizeof(struct eloop_sock));
219 if (temp_table == NULL)
220 return -1;
221
222 eloop.epoll_max_fd = next;
223 eloop.epoll_table = temp_table;
224 }
225
226 if (eloop.count + 1 > eloop.epoll_max_event_num) {
227 next = eloop.epoll_max_event_num == 0 ? 8 :
228 eloop.epoll_max_event_num * 2;
229 temp_events = os_realloc_array(eloop.epoll_events, next,
230 sizeof(struct epoll_event));
231 if (temp_events == NULL) {
232 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
233 "%s\n", __func__, strerror(errno));
234 return -1;
235 }
236
237 eloop.epoll_max_event_num = next;
238 eloop.epoll_events = temp_events;
239 }
240#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800241
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700242 eloop_trace_sock_remove_ref(table);
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700243 tmp = os_realloc_array(table->table, table->count + 1,
244 sizeof(struct eloop_sock));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700245 if (tmp == NULL)
246 return -1;
247
248 tmp[table->count].sock = sock;
249 tmp[table->count].eloop_data = eloop_data;
250 tmp[table->count].user_data = user_data;
251 tmp[table->count].handler = handler;
252 wpa_trace_record(&tmp[table->count]);
253 table->count++;
254 table->table = tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800255 eloop.max_sock = new_max_sock;
256 eloop.count++;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700257#ifndef CONFIG_ELOOP_EPOLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700258 table->changed = 1;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700259#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700260 eloop_trace_sock_add_ref(table);
261
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700262#ifdef CONFIG_ELOOP_EPOLL
263 os_memset(&ev, 0, sizeof(ev));
264 switch (table->type) {
265 case EVENT_TYPE_READ:
266 ev.events = EPOLLIN;
267 break;
268 case EVENT_TYPE_WRITE:
269 ev.events = EPOLLOUT;
270 break;
271 /*
272 * Exceptions are always checked when using epoll, but I suppose it's
273 * possible that someone registered a socket *only* for exception
274 * handling.
275 */
276 case EVENT_TYPE_EXCEPTION:
277 ev.events = EPOLLERR | EPOLLHUP;
278 break;
279 }
280 ev.data.fd = sock;
281 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
282 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
283 "failed. %s\n", __func__, sock, strerror(errno));
284 return -1;
285 }
286 os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
287 sizeof(struct eloop_sock));
288#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700289 return 0;
290}
291
292
293static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
294 int sock)
295{
296 int i;
297
298 if (table == NULL || table->table == NULL || table->count == 0)
299 return;
300
301 for (i = 0; i < table->count; i++) {
302 if (table->table[i].sock == sock)
303 break;
304 }
305 if (i == table->count)
306 return;
307 eloop_trace_sock_remove_ref(table);
308 if (i != table->count - 1) {
309 os_memmove(&table->table[i], &table->table[i + 1],
310 (table->count - i - 1) *
311 sizeof(struct eloop_sock));
312 }
313 table->count--;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800314 eloop.count--;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700315#ifndef CONFIG_ELOOP_EPOLL
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700316 table->changed = 1;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700317#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700318 eloop_trace_sock_add_ref(table);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700319#ifdef CONFIG_ELOOP_EPOLL
320 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
321 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
322 "failed. %s\n", __func__, sock, strerror(errno));
323 return;
324 }
325 os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
326#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700327}
328
329
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800330#ifdef CONFIG_ELOOP_POLL
331
332static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
333{
334 if (fd < mx && fd >= 0)
335 return pollfds_map[fd];
336 return NULL;
337}
338
339
340static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
341 struct eloop_sock_table *writers,
342 struct eloop_sock_table *exceptions,
343 struct pollfd *pollfds,
344 struct pollfd **pollfds_map,
345 int max_pollfd_map)
346{
347 int i;
348 int nxt = 0;
349 int fd;
350 struct pollfd *pfd;
351
352 /* Clear pollfd lookup map. It will be re-populated below. */
353 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
354
355 if (readers && readers->table) {
356 for (i = 0; i < readers->count; i++) {
357 fd = readers->table[i].sock;
358 assert(fd >= 0 && fd < max_pollfd_map);
359 pollfds[nxt].fd = fd;
360 pollfds[nxt].events = POLLIN;
361 pollfds[nxt].revents = 0;
362 pollfds_map[fd] = &(pollfds[nxt]);
363 nxt++;
364 }
365 }
366
367 if (writers && writers->table) {
368 for (i = 0; i < writers->count; i++) {
369 /*
370 * See if we already added this descriptor, update it
371 * if so.
372 */
373 fd = writers->table[i].sock;
374 assert(fd >= 0 && fd < max_pollfd_map);
375 pfd = pollfds_map[fd];
376 if (!pfd) {
377 pfd = &(pollfds[nxt]);
378 pfd->events = 0;
379 pfd->fd = fd;
380 pollfds[i].revents = 0;
381 pollfds_map[fd] = pfd;
382 nxt++;
383 }
Dmitry Shmidt04949592012-07-19 12:16:46 -0700384 pfd->events |= POLLOUT;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800385 }
386 }
387
388 /*
389 * Exceptions are always checked when using poll, but I suppose it's
390 * possible that someone registered a socket *only* for exception
391 * handling. Set the POLLIN bit in this case.
392 */
393 if (exceptions && exceptions->table) {
394 for (i = 0; i < exceptions->count; i++) {
395 /*
396 * See if we already added this descriptor, just use it
397 * if so.
398 */
399 fd = exceptions->table[i].sock;
400 assert(fd >= 0 && fd < max_pollfd_map);
401 pfd = pollfds_map[fd];
402 if (!pfd) {
403 pfd = &(pollfds[nxt]);
404 pfd->events = POLLIN;
405 pfd->fd = fd;
406 pollfds[i].revents = 0;
407 pollfds_map[fd] = pfd;
408 nxt++;
409 }
410 }
411 }
412
413 return nxt;
414}
415
416
417static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
418 struct pollfd **pollfds_map,
419 int max_pollfd_map,
420 short int revents)
421{
422 int i;
423 struct pollfd *pfd;
424
425 if (!table || !table->table)
426 return 0;
427
428 table->changed = 0;
429 for (i = 0; i < table->count; i++) {
430 pfd = find_pollfd(pollfds_map, table->table[i].sock,
431 max_pollfd_map);
432 if (!pfd)
433 continue;
434
435 if (!(pfd->revents & revents))
436 continue;
437
438 table->table[i].handler(table->table[i].sock,
439 table->table[i].eloop_data,
440 table->table[i].user_data);
441 if (table->changed)
442 return 1;
443 }
444
445 return 0;
446}
447
448
449static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
450 struct eloop_sock_table *writers,
451 struct eloop_sock_table *exceptions,
452 struct pollfd **pollfds_map,
453 int max_pollfd_map)
454{
455 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
Dmitry Shmidt04949592012-07-19 12:16:46 -0700456 max_pollfd_map, POLLIN | POLLERR |
457 POLLHUP))
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800458 return; /* pollfds may be invalid at this point */
459
460 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
461 max_pollfd_map, POLLOUT))
462 return; /* pollfds may be invalid at this point */
463
464 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
465 max_pollfd_map, POLLERR | POLLHUP);
466}
467
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700468#endif /* CONFIG_ELOOP_POLL */
469
470#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800471
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700472static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
473 fd_set *fds)
474{
475 int i;
476
477 FD_ZERO(fds);
478
479 if (table->table == NULL)
480 return;
481
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700482 for (i = 0; i < table->count; i++) {
483 assert(table->table[i].sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700484 FD_SET(table->table[i].sock, fds);
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700485 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700486}
487
488
489static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
490 fd_set *fds)
491{
492 int i;
493
494 if (table == NULL || table->table == NULL)
495 return;
496
497 table->changed = 0;
498 for (i = 0; i < table->count; i++) {
499 if (FD_ISSET(table->table[i].sock, fds)) {
500 table->table[i].handler(table->table[i].sock,
501 table->table[i].eloop_data,
502 table->table[i].user_data);
503 if (table->changed)
504 break;
505 }
506 }
507}
508
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700509#endif /* CONFIG_ELOOP_SELECT */
510
511
512#ifdef CONFIG_ELOOP_EPOLL
513static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
514{
515 struct eloop_sock *table;
516 int i;
517
518 for (i = 0; i < nfds; i++) {
519 table = &eloop.epoll_table[events[i].data.fd];
520 if (table->handler == NULL)
521 continue;
522 table->handler(table->sock, table->eloop_data,
523 table->user_data);
524 }
525}
526#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800527
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700528
529static void eloop_sock_table_destroy(struct eloop_sock_table *table)
530{
531 if (table) {
532 int i;
533 for (i = 0; i < table->count && table->table; i++) {
534 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
535 "sock=%d eloop_data=%p user_data=%p "
536 "handler=%p",
537 table->table[i].sock,
538 table->table[i].eloop_data,
539 table->table[i].user_data,
540 table->table[i].handler);
541 wpa_trace_dump_funcname("eloop unregistered socket "
542 "handler",
543 table->table[i].handler);
544 wpa_trace_dump("eloop sock", &table->table[i]);
545 }
546 os_free(table->table);
547 }
548}
549
550
551int eloop_register_read_sock(int sock, eloop_sock_handler handler,
552 void *eloop_data, void *user_data)
553{
554 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
555 eloop_data, user_data);
556}
557
558
559void eloop_unregister_read_sock(int sock)
560{
561 eloop_unregister_sock(sock, EVENT_TYPE_READ);
562}
563
564
565static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
566{
567 switch (type) {
568 case EVENT_TYPE_READ:
569 return &eloop.readers;
570 case EVENT_TYPE_WRITE:
571 return &eloop.writers;
572 case EVENT_TYPE_EXCEPTION:
573 return &eloop.exceptions;
574 }
575
576 return NULL;
577}
578
579
580int eloop_register_sock(int sock, eloop_event_type type,
581 eloop_sock_handler handler,
582 void *eloop_data, void *user_data)
583{
584 struct eloop_sock_table *table;
585
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700586 assert(sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700587 table = eloop_get_sock_table(type);
588 return eloop_sock_table_add_sock(table, sock, handler,
589 eloop_data, user_data);
590}
591
592
593void eloop_unregister_sock(int sock, eloop_event_type type)
594{
595 struct eloop_sock_table *table;
596
597 table = eloop_get_sock_table(type);
598 eloop_sock_table_remove_sock(table, sock);
599}
600
601
602int eloop_register_timeout(unsigned int secs, unsigned int usecs,
603 eloop_timeout_handler handler,
604 void *eloop_data, void *user_data)
605{
606 struct eloop_timeout *timeout, *tmp;
607 os_time_t now_sec;
608
609 timeout = os_zalloc(sizeof(*timeout));
610 if (timeout == NULL)
611 return -1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800612 if (os_get_reltime(&timeout->time) < 0) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700613 os_free(timeout);
614 return -1;
615 }
616 now_sec = timeout->time.sec;
617 timeout->time.sec += secs;
618 if (timeout->time.sec < now_sec) {
619 /*
620 * Integer overflow - assume long enough timeout to be assumed
621 * to be infinite, i.e., the timeout would never happen.
622 */
623 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
624 "ever happen - ignore it", secs);
625 os_free(timeout);
626 return 0;
627 }
628 timeout->time.usec += usecs;
629 while (timeout->time.usec >= 1000000) {
630 timeout->time.sec++;
631 timeout->time.usec -= 1000000;
632 }
633 timeout->eloop_data = eloop_data;
634 timeout->user_data = user_data;
635 timeout->handler = handler;
636 wpa_trace_add_ref(timeout, eloop, eloop_data);
637 wpa_trace_add_ref(timeout, user, user_data);
638 wpa_trace_record(timeout);
639
640 /* Maintain timeouts in order of increasing time */
641 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800642 if (os_reltime_before(&timeout->time, &tmp->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700643 dl_list_add(tmp->list.prev, &timeout->list);
644 return 0;
645 }
646 }
647 dl_list_add_tail(&eloop.timeout, &timeout->list);
648
649 return 0;
650}
651
652
653static void eloop_remove_timeout(struct eloop_timeout *timeout)
654{
655 dl_list_del(&timeout->list);
656 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
657 wpa_trace_remove_ref(timeout, user, timeout->user_data);
658 os_free(timeout);
659}
660
661
662int eloop_cancel_timeout(eloop_timeout_handler handler,
663 void *eloop_data, void *user_data)
664{
665 struct eloop_timeout *timeout, *prev;
666 int removed = 0;
667
668 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
669 struct eloop_timeout, list) {
670 if (timeout->handler == handler &&
671 (timeout->eloop_data == eloop_data ||
672 eloop_data == ELOOP_ALL_CTX) &&
673 (timeout->user_data == user_data ||
674 user_data == ELOOP_ALL_CTX)) {
675 eloop_remove_timeout(timeout);
676 removed++;
677 }
678 }
679
680 return removed;
681}
682
683
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800684int eloop_cancel_timeout_one(eloop_timeout_handler handler,
685 void *eloop_data, void *user_data,
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800686 struct os_reltime *remaining)
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800687{
688 struct eloop_timeout *timeout, *prev;
689 int removed = 0;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800690 struct os_reltime now;
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800691
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800692 os_get_reltime(&now);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800693 remaining->sec = remaining->usec = 0;
694
695 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
696 struct eloop_timeout, list) {
697 if (timeout->handler == handler &&
698 (timeout->eloop_data == eloop_data) &&
699 (timeout->user_data == user_data)) {
700 removed = 1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800701 if (os_reltime_before(&now, &timeout->time))
702 os_reltime_sub(&timeout->time, &now, remaining);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800703 eloop_remove_timeout(timeout);
704 break;
705 }
706 }
707 return removed;
708}
709
710
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700711int eloop_is_timeout_registered(eloop_timeout_handler handler,
712 void *eloop_data, void *user_data)
713{
714 struct eloop_timeout *tmp;
715
716 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
717 if (tmp->handler == handler &&
718 tmp->eloop_data == eloop_data &&
719 tmp->user_data == user_data)
720 return 1;
721 }
722
723 return 0;
724}
725
726
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800727int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
728 eloop_timeout_handler handler, void *eloop_data,
729 void *user_data)
730{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800731 struct os_reltime now, requested, remaining;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800732 struct eloop_timeout *tmp;
733
734 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
735 if (tmp->handler == handler &&
736 tmp->eloop_data == eloop_data &&
737 tmp->user_data == user_data) {
738 requested.sec = req_secs;
739 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800740 os_get_reltime(&now);
741 os_reltime_sub(&tmp->time, &now, &remaining);
742 if (os_reltime_before(&requested, &remaining)) {
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800743 eloop_cancel_timeout(handler, eloop_data,
744 user_data);
745 eloop_register_timeout(requested.sec,
746 requested.usec,
747 handler, eloop_data,
748 user_data);
749 return 1;
750 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800751 return 0;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800752 }
753 }
754
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800755 return -1;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800756}
757
758
Dmitry Shmidt54605472013-11-08 11:10:19 -0800759int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
760 eloop_timeout_handler handler, void *eloop_data,
761 void *user_data)
762{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800763 struct os_reltime now, requested, remaining;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800764 struct eloop_timeout *tmp;
765
766 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
767 if (tmp->handler == handler &&
768 tmp->eloop_data == eloop_data &&
769 tmp->user_data == user_data) {
770 requested.sec = req_secs;
771 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800772 os_get_reltime(&now);
773 os_reltime_sub(&tmp->time, &now, &remaining);
774 if (os_reltime_before(&remaining, &requested)) {
Dmitry Shmidt54605472013-11-08 11:10:19 -0800775 eloop_cancel_timeout(handler, eloop_data,
776 user_data);
777 eloop_register_timeout(requested.sec,
778 requested.usec,
779 handler, eloop_data,
780 user_data);
781 return 1;
782 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800783 return 0;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800784 }
785 }
786
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800787 return -1;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800788}
789
790
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700791#ifndef CONFIG_NATIVE_WINDOWS
792static void eloop_handle_alarm(int sig)
793{
794 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
795 "two seconds. Looks like there\n"
796 "is a bug that ends up in a busy loop that "
797 "prevents clean shutdown.\n"
798 "Killing program forcefully.\n");
799 exit(1);
800}
801#endif /* CONFIG_NATIVE_WINDOWS */
802
803
804static void eloop_handle_signal(int sig)
805{
806 int i;
807
808#ifndef CONFIG_NATIVE_WINDOWS
809 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
810 /* Use SIGALRM to break out from potential busy loops that
811 * would not allow the program to be killed. */
812 eloop.pending_terminate = 1;
813 signal(SIGALRM, eloop_handle_alarm);
814 alarm(2);
815 }
816#endif /* CONFIG_NATIVE_WINDOWS */
817
818 eloop.signaled++;
819 for (i = 0; i < eloop.signal_count; i++) {
820 if (eloop.signals[i].sig == sig) {
821 eloop.signals[i].signaled++;
822 break;
823 }
824 }
825}
826
827
828static void eloop_process_pending_signals(void)
829{
830 int i;
831
832 if (eloop.signaled == 0)
833 return;
834 eloop.signaled = 0;
835
836 if (eloop.pending_terminate) {
837#ifndef CONFIG_NATIVE_WINDOWS
838 alarm(0);
839#endif /* CONFIG_NATIVE_WINDOWS */
840 eloop.pending_terminate = 0;
841 }
842
843 for (i = 0; i < eloop.signal_count; i++) {
844 if (eloop.signals[i].signaled) {
845 eloop.signals[i].signaled = 0;
846 eloop.signals[i].handler(eloop.signals[i].sig,
847 eloop.signals[i].user_data);
848 }
849 }
850}
851
852
853int eloop_register_signal(int sig, eloop_signal_handler handler,
854 void *user_data)
855{
856 struct eloop_signal *tmp;
857
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700858 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
859 sizeof(struct eloop_signal));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700860 if (tmp == NULL)
861 return -1;
862
863 tmp[eloop.signal_count].sig = sig;
864 tmp[eloop.signal_count].user_data = user_data;
865 tmp[eloop.signal_count].handler = handler;
866 tmp[eloop.signal_count].signaled = 0;
867 eloop.signal_count++;
868 eloop.signals = tmp;
869 signal(sig, eloop_handle_signal);
870
871 return 0;
872}
873
874
875int eloop_register_signal_terminate(eloop_signal_handler handler,
876 void *user_data)
877{
878 int ret = eloop_register_signal(SIGINT, handler, user_data);
879 if (ret == 0)
880 ret = eloop_register_signal(SIGTERM, handler, user_data);
881 return ret;
882}
883
884
885int eloop_register_signal_reconfig(eloop_signal_handler handler,
886 void *user_data)
887{
888#ifdef CONFIG_NATIVE_WINDOWS
889 return 0;
890#else /* CONFIG_NATIVE_WINDOWS */
891 return eloop_register_signal(SIGHUP, handler, user_data);
892#endif /* CONFIG_NATIVE_WINDOWS */
893}
894
895
896void eloop_run(void)
897{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800898#ifdef CONFIG_ELOOP_POLL
899 int num_poll_fds;
900 int timeout_ms = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700901#endif /* CONFIG_ELOOP_POLL */
902#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700903 fd_set *rfds, *wfds, *efds;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700904 struct timeval _tv;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700905#endif /* CONFIG_ELOOP_SELECT */
906#ifdef CONFIG_ELOOP_EPOLL
907 int timeout_ms = -1;
908#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800909 int res;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800910 struct os_reltime tv, now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700911
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700912#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700913 rfds = os_malloc(sizeof(*rfds));
914 wfds = os_malloc(sizeof(*wfds));
915 efds = os_malloc(sizeof(*efds));
916 if (rfds == NULL || wfds == NULL || efds == NULL)
917 goto out;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700918#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700919
920 while (!eloop.terminate &&
921 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
922 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
923 struct eloop_timeout *timeout;
924 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
925 list);
926 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800927 os_get_reltime(&now);
928 if (os_reltime_before(&now, &timeout->time))
929 os_reltime_sub(&timeout->time, &now, &tv);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700930 else
931 tv.sec = tv.usec = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700932#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800933 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700934#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
935#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700936 _tv.tv_sec = tv.sec;
937 _tv.tv_usec = tv.usec;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700938#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700939 }
940
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800941#ifdef CONFIG_ELOOP_POLL
942 num_poll_fds = eloop_sock_table_set_fds(
943 &eloop.readers, &eloop.writers, &eloop.exceptions,
944 eloop.pollfds, eloop.pollfds_map,
945 eloop.max_pollfd_map);
946 res = poll(eloop.pollfds, num_poll_fds,
947 timeout ? timeout_ms : -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700948#endif /* CONFIG_ELOOP_POLL */
949#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700950 eloop_sock_table_set_fds(&eloop.readers, rfds);
951 eloop_sock_table_set_fds(&eloop.writers, wfds);
952 eloop_sock_table_set_fds(&eloop.exceptions, efds);
953 res = select(eloop.max_sock + 1, rfds, wfds, efds,
954 timeout ? &_tv : NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700955#endif /* CONFIG_ELOOP_SELECT */
956#ifdef CONFIG_ELOOP_EPOLL
957 if (eloop.count == 0) {
958 res = 0;
959 } else {
960 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
961 eloop.count, timeout_ms);
962 }
963#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700964 if (res < 0 && errno != EINTR && errno != 0) {
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700965 wpa_printf(MSG_ERROR, "eloop: %s: %s",
966#ifdef CONFIG_ELOOP_POLL
967 "poll"
968#endif /* CONFIG_ELOOP_POLL */
969#ifdef CONFIG_ELOOP_SELECT
970 "select"
971#endif /* CONFIG_ELOOP_SELECT */
972#ifdef CONFIG_ELOOP_EPOLL
973 "epoll"
974#endif /* CONFIG_ELOOP_EPOLL */
975 , strerror(errno));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700976 goto out;
977 }
978 eloop_process_pending_signals();
979
980 /* check if some registered timeouts have occurred */
981 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
982 list);
983 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800984 os_get_reltime(&now);
985 if (!os_reltime_before(&now, &timeout->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700986 void *eloop_data = timeout->eloop_data;
987 void *user_data = timeout->user_data;
988 eloop_timeout_handler handler =
989 timeout->handler;
990 eloop_remove_timeout(timeout);
991 handler(eloop_data, user_data);
992 }
993
994 }
995
996 if (res <= 0)
997 continue;
998
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800999#ifdef CONFIG_ELOOP_POLL
1000 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1001 &eloop.exceptions, eloop.pollfds_map,
1002 eloop.max_pollfd_map);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001003#endif /* CONFIG_ELOOP_POLL */
1004#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001005 eloop_sock_table_dispatch(&eloop.readers, rfds);
1006 eloop_sock_table_dispatch(&eloop.writers, wfds);
1007 eloop_sock_table_dispatch(&eloop.exceptions, efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001008#endif /* CONFIG_ELOOP_SELECT */
1009#ifdef CONFIG_ELOOP_EPOLL
1010 eloop_sock_table_dispatch(eloop.epoll_events, res);
1011#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001012 }
1013
Dmitry Shmidtea69e842013-05-13 14:52:28 -07001014 eloop.terminate = 0;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001015out:
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001016#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001017 os_free(rfds);
1018 os_free(wfds);
1019 os_free(efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001020#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001021 return;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001022}
1023
1024
1025void eloop_terminate(void)
1026{
1027 eloop.terminate = 1;
1028}
1029
1030
1031void eloop_destroy(void)
1032{
1033 struct eloop_timeout *timeout, *prev;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001034 struct os_reltime now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001035
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001036 os_get_reltime(&now);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001037 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1038 struct eloop_timeout, list) {
1039 int sec, usec;
1040 sec = timeout->time.sec - now.sec;
1041 usec = timeout->time.usec - now.usec;
1042 if (timeout->time.usec < now.usec) {
1043 sec--;
1044 usec += 1000000;
1045 }
1046 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1047 "eloop_data=%p user_data=%p handler=%p",
1048 sec, usec, timeout->eloop_data, timeout->user_data,
1049 timeout->handler);
1050 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1051 timeout->handler);
1052 wpa_trace_dump("eloop timeout", timeout);
1053 eloop_remove_timeout(timeout);
1054 }
1055 eloop_sock_table_destroy(&eloop.readers);
1056 eloop_sock_table_destroy(&eloop.writers);
1057 eloop_sock_table_destroy(&eloop.exceptions);
1058 os_free(eloop.signals);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001059
1060#ifdef CONFIG_ELOOP_POLL
1061 os_free(eloop.pollfds);
1062 os_free(eloop.pollfds_map);
1063#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001064#ifdef CONFIG_ELOOP_EPOLL
1065 os_free(eloop.epoll_table);
1066 os_free(eloop.epoll_events);
1067 close(eloop.epollfd);
1068#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001069}
1070
1071
1072int eloop_terminated(void)
1073{
1074 return eloop.terminate;
1075}
1076
1077
1078void eloop_wait_for_read_sock(int sock)
1079{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001080#ifdef CONFIG_ELOOP_POLL
1081 struct pollfd pfd;
1082
1083 if (sock < 0)
1084 return;
1085
1086 os_memset(&pfd, 0, sizeof(pfd));
1087 pfd.fd = sock;
1088 pfd.events = POLLIN;
1089
1090 poll(&pfd, 1, -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001091#endif /* CONFIG_ELOOP_POLL */
1092#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1093 /*
1094 * We can use epoll() here. But epoll() requres 4 system calls.
1095 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1096 * epoll fd. So select() is better for performance here.
1097 */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001098 fd_set rfds;
1099
1100 if (sock < 0)
1101 return;
1102
1103 FD_ZERO(&rfds);
1104 FD_SET(sock, &rfds);
1105 select(sock + 1, &rfds, NULL, NULL, NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001106#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001107}
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001108
1109#ifdef CONFIG_ELOOP_SELECT
1110#undef CONFIG_ELOOP_SELECT
1111#endif /* CONFIG_ELOOP_SELECT */