blob: bb375be1095e6a95d9e9c808342fd4e34bf2806f [file] [log] [blame]
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08005 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07007 */
8
9#include "includes.h"
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -070010#include <assert.h>
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070011
12#include "common.h"
13#include "trace.h"
14#include "list.h"
15#include "eloop.h"
16
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070017#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll
19#endif
20
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080021#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22#error Do not define both of poll and kqueue
23#endif
24
25#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26 !defined(CONFIG_ELOOP_KQUEUE)
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070027#define CONFIG_ELOOP_SELECT
28#endif
29
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080030#ifdef CONFIG_ELOOP_POLL
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080031#include <poll.h>
32#endif /* CONFIG_ELOOP_POLL */
33
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070034#ifdef CONFIG_ELOOP_EPOLL
35#include <sys/epoll.h>
36#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070037
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080038#ifdef CONFIG_ELOOP_KQUEUE
39#include <sys/event.h>
40#endif /* CONFIG_ELOOP_KQUEUE */
41
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070042struct eloop_sock {
43 int sock;
44 void *eloop_data;
45 void *user_data;
46 eloop_sock_handler handler;
47 WPA_TRACE_REF(eloop);
48 WPA_TRACE_REF(user);
49 WPA_TRACE_INFO
50};
51
52struct eloop_timeout {
53 struct dl_list list;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -080054 struct os_reltime time;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070055 void *eloop_data;
56 void *user_data;
57 eloop_timeout_handler handler;
58 WPA_TRACE_REF(eloop);
59 WPA_TRACE_REF(user);
60 WPA_TRACE_INFO
61};
62
63struct eloop_signal {
64 int sig;
65 void *user_data;
66 eloop_signal_handler handler;
67 int signaled;
68};
69
70struct eloop_sock_table {
71 int count;
72 struct eloop_sock *table;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070073 eloop_event_type type;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070074 int changed;
75};
76
77struct eloop_data {
78 int max_sock;
79
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080080 int count; /* sum of all table counts */
81#ifdef CONFIG_ELOOP_POLL
82 int max_pollfd_map; /* number of pollfds_map currently allocated */
83 int max_poll_fds; /* number of pollfds currently allocated */
84 struct pollfd *pollfds;
85 struct pollfd **pollfds_map;
86#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080087#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88 int max_fd;
89 struct eloop_sock *fd_table;
90#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070091#ifdef CONFIG_ELOOP_EPOLL
92 int epollfd;
93 int epoll_max_event_num;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070094 struct epoll_event *epoll_events;
95#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080096#ifdef CONFIG_ELOOP_KQUEUE
97 int kqueuefd;
98 int kqueue_nevents;
99 struct kevent *kqueue_events;
100#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700101 struct eloop_sock_table readers;
102 struct eloop_sock_table writers;
103 struct eloop_sock_table exceptions;
104
105 struct dl_list timeout;
106
107 int signal_count;
108 struct eloop_signal *signals;
109 int signaled;
110 int pending_terminate;
111
112 int terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700113};
114
115static struct eloop_data eloop;
116
117
118#ifdef WPA_TRACE
119
120static void eloop_sigsegv_handler(int sig)
121{
122 wpa_trace_show("eloop SIGSEGV");
123 abort();
124}
125
126static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127{
128 int i;
129 if (table == NULL || table->table == NULL)
130 return;
131 for (i = 0; i < table->count; i++) {
132 wpa_trace_add_ref(&table->table[i], eloop,
133 table->table[i].eloop_data);
134 wpa_trace_add_ref(&table->table[i], user,
135 table->table[i].user_data);
136 }
137}
138
139
140static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
141{
142 int i;
143 if (table == NULL || table->table == NULL)
144 return;
145 for (i = 0; i < table->count; i++) {
146 wpa_trace_remove_ref(&table->table[i], eloop,
147 table->table[i].eloop_data);
148 wpa_trace_remove_ref(&table->table[i], user,
149 table->table[i].user_data);
150 }
151}
152
153#else /* WPA_TRACE */
154
155#define eloop_trace_sock_add_ref(table) do { } while (0)
156#define eloop_trace_sock_remove_ref(table) do { } while (0)
157
158#endif /* WPA_TRACE */
159
160
161int eloop_init(void)
162{
163 os_memset(&eloop, 0, sizeof(eloop));
164 dl_list_init(&eloop.timeout);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700165#ifdef CONFIG_ELOOP_EPOLL
166 eloop.epollfd = epoll_create1(0);
167 if (eloop.epollfd < 0) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800168 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700169 __func__, strerror(errno));
170 return -1;
171 }
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800172#endif /* CONFIG_ELOOP_EPOLL */
173#ifdef CONFIG_ELOOP_KQUEUE
174 eloop.kqueuefd = kqueue();
175 if (eloop.kqueuefd < 0) {
176 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
177 __func__, strerror(errno));
178 return -1;
179 }
180#endif /* CONFIG_ELOOP_KQUEUE */
181#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700182 eloop.readers.type = EVENT_TYPE_READ;
183 eloop.writers.type = EVENT_TYPE_WRITE;
184 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800185#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700186#ifdef WPA_TRACE
187 signal(SIGSEGV, eloop_sigsegv_handler);
188#endif /* WPA_TRACE */
189 return 0;
190}
191
192
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800193#ifdef CONFIG_ELOOP_EPOLL
194static int eloop_sock_queue(int sock, eloop_event_type type)
195{
196 struct epoll_event ev;
197
198 os_memset(&ev, 0, sizeof(ev));
199 switch (type) {
200 case EVENT_TYPE_READ:
201 ev.events = EPOLLIN;
202 break;
203 case EVENT_TYPE_WRITE:
204 ev.events = EPOLLOUT;
205 break;
206 /*
207 * Exceptions are always checked when using epoll, but I suppose it's
208 * possible that someone registered a socket *only* for exception
209 * handling.
210 */
211 case EVENT_TYPE_EXCEPTION:
212 ev.events = EPOLLERR | EPOLLHUP;
213 break;
214 }
215 ev.data.fd = sock;
216 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
217 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
218 __func__, sock, strerror(errno));
219 return -1;
220 }
221 return 0;
222}
223#endif /* CONFIG_ELOOP_EPOLL */
224
225
226#ifdef CONFIG_ELOOP_KQUEUE
Hai Shalom39bc25d2019-02-06 16:32:13 -0800227
Hai Shalom74f70d42019-02-11 14:42:39 -0800228static short event_type_kevent_filter(eloop_event_type type)
229{
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800230 switch (type) {
231 case EVENT_TYPE_READ:
Hai Shalom74f70d42019-02-11 14:42:39 -0800232 return EVFILT_READ;
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800233 case EVENT_TYPE_WRITE:
Hai Shalom74f70d42019-02-11 14:42:39 -0800234 return EVFILT_WRITE;
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800235 default:
Hai Shalom74f70d42019-02-11 14:42:39 -0800236 return 0;
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800237 }
Hai Shalom74f70d42019-02-11 14:42:39 -0800238}
239
240
241static int eloop_sock_queue(int sock, eloop_event_type type)
242{
243 struct kevent ke;
244
245 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800246 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
247 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
248 __func__, sock, strerror(errno));
249 return -1;
250 }
251 return 0;
252}
Hai Shalom74f70d42019-02-11 14:42:39 -0800253
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800254#endif /* CONFIG_ELOOP_KQUEUE */
255
256
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700257static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
258 int sock, eloop_sock_handler handler,
259 void *eloop_data, void *user_data)
260{
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700261#ifdef CONFIG_ELOOP_EPOLL
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800262 struct epoll_event *temp_events;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700263#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800264#ifdef CONFIG_ELOOP_KQUEUE
265 struct kevent *temp_events;
266#endif /* CONFIG_ELOOP_EPOLL */
267#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
268 struct eloop_sock *temp_table;
269 int next;
270#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700271 struct eloop_sock *tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800272 int new_max_sock;
273
274 if (sock > eloop.max_sock)
275 new_max_sock = sock;
276 else
277 new_max_sock = eloop.max_sock;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700278
279 if (table == NULL)
280 return -1;
281
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800282#ifdef CONFIG_ELOOP_POLL
283 if (new_max_sock >= eloop.max_pollfd_map) {
284 struct pollfd **nmap;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700285 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
286 sizeof(struct pollfd *));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800287 if (nmap == NULL)
288 return -1;
289
290 eloop.max_pollfd_map = new_max_sock + 50;
291 eloop.pollfds_map = nmap;
292 }
293
294 if (eloop.count + 1 > eloop.max_poll_fds) {
295 struct pollfd *n;
296 int nmax = eloop.count + 1 + 50;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700297 n = os_realloc_array(eloop.pollfds, nmax,
298 sizeof(struct pollfd));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800299 if (n == NULL)
300 return -1;
301
302 eloop.max_poll_fds = nmax;
303 eloop.pollfds = n;
304 }
305#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800306#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
307 if (new_max_sock >= eloop.max_fd) {
Hai Shalom74f70d42019-02-11 14:42:39 -0800308 next = new_max_sock + 16;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800309 temp_table = os_realloc_array(eloop.fd_table, next,
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700310 sizeof(struct eloop_sock));
311 if (temp_table == NULL)
312 return -1;
313
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800314 eloop.max_fd = next;
315 eloop.fd_table = temp_table;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700316 }
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800317#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700318
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800319#ifdef CONFIG_ELOOP_EPOLL
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700320 if (eloop.count + 1 > eloop.epoll_max_event_num) {
321 next = eloop.epoll_max_event_num == 0 ? 8 :
322 eloop.epoll_max_event_num * 2;
323 temp_events = os_realloc_array(eloop.epoll_events, next,
324 sizeof(struct epoll_event));
325 if (temp_events == NULL) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800326 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
327 __func__, strerror(errno));
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700328 return -1;
329 }
330
331 eloop.epoll_max_event_num = next;
332 eloop.epoll_events = temp_events;
333 }
334#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800335#ifdef CONFIG_ELOOP_KQUEUE
336 if (eloop.count + 1 > eloop.kqueue_nevents) {
337 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
338 temp_events = os_malloc(next * sizeof(*temp_events));
339 if (!temp_events) {
340 wpa_printf(MSG_ERROR,
341 "%s: malloc for kqueue failed: %s",
342 __func__, strerror(errno));
343 return -1;
344 }
345
346 os_free(eloop.kqueue_events);
347 eloop.kqueue_events = temp_events;
348 eloop.kqueue_nevents = next;
349 }
350#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800351
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700352 eloop_trace_sock_remove_ref(table);
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700353 tmp = os_realloc_array(table->table, table->count + 1,
354 sizeof(struct eloop_sock));
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800355 if (tmp == NULL) {
356 eloop_trace_sock_add_ref(table);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700357 return -1;
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800358 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700359
360 tmp[table->count].sock = sock;
361 tmp[table->count].eloop_data = eloop_data;
362 tmp[table->count].user_data = user_data;
363 tmp[table->count].handler = handler;
364 wpa_trace_record(&tmp[table->count]);
365 table->count++;
366 table->table = tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800367 eloop.max_sock = new_max_sock;
368 eloop.count++;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700369 table->changed = 1;
370 eloop_trace_sock_add_ref(table);
371
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800372#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
373 if (eloop_sock_queue(sock, table->type) < 0)
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700374 return -1;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800375 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700376 sizeof(struct eloop_sock));
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800377#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700378 return 0;
379}
380
381
382static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
383 int sock)
384{
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800385#ifdef CONFIG_ELOOP_KQUEUE
386 struct kevent ke;
387#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700388 int i;
389
390 if (table == NULL || table->table == NULL || table->count == 0)
391 return;
392
393 for (i = 0; i < table->count; i++) {
394 if (table->table[i].sock == sock)
395 break;
396 }
397 if (i == table->count)
398 return;
399 eloop_trace_sock_remove_ref(table);
400 if (i != table->count - 1) {
401 os_memmove(&table->table[i], &table->table[i + 1],
402 (table->count - i - 1) *
403 sizeof(struct eloop_sock));
404 }
405 table->count--;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800406 eloop.count--;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700407 table->changed = 1;
408 eloop_trace_sock_add_ref(table);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700409#ifdef CONFIG_ELOOP_EPOLL
410 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800411 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
412 __func__, sock, strerror(errno));
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700413 return;
414 }
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800415 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700416#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800417#ifdef CONFIG_ELOOP_KQUEUE
Hai Shalom74f70d42019-02-11 14:42:39 -0800418 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
419 0, 0);
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800420 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
421 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
422 __func__, sock, strerror(errno));
423 return;
424 }
425 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
426#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700427}
428
429
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800430#ifdef CONFIG_ELOOP_POLL
431
432static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
433{
434 if (fd < mx && fd >= 0)
435 return pollfds_map[fd];
436 return NULL;
437}
438
439
440static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
441 struct eloop_sock_table *writers,
442 struct eloop_sock_table *exceptions,
443 struct pollfd *pollfds,
444 struct pollfd **pollfds_map,
445 int max_pollfd_map)
446{
447 int i;
448 int nxt = 0;
449 int fd;
450 struct pollfd *pfd;
451
452 /* Clear pollfd lookup map. It will be re-populated below. */
453 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
454
455 if (readers && readers->table) {
456 for (i = 0; i < readers->count; i++) {
457 fd = readers->table[i].sock;
458 assert(fd >= 0 && fd < max_pollfd_map);
459 pollfds[nxt].fd = fd;
460 pollfds[nxt].events = POLLIN;
461 pollfds[nxt].revents = 0;
462 pollfds_map[fd] = &(pollfds[nxt]);
463 nxt++;
464 }
465 }
466
467 if (writers && writers->table) {
468 for (i = 0; i < writers->count; i++) {
469 /*
470 * See if we already added this descriptor, update it
471 * if so.
472 */
473 fd = writers->table[i].sock;
474 assert(fd >= 0 && fd < max_pollfd_map);
475 pfd = pollfds_map[fd];
476 if (!pfd) {
477 pfd = &(pollfds[nxt]);
478 pfd->events = 0;
479 pfd->fd = fd;
480 pollfds[i].revents = 0;
481 pollfds_map[fd] = pfd;
482 nxt++;
483 }
Dmitry Shmidt04949592012-07-19 12:16:46 -0700484 pfd->events |= POLLOUT;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800485 }
486 }
487
488 /*
489 * Exceptions are always checked when using poll, but I suppose it's
490 * possible that someone registered a socket *only* for exception
491 * handling. Set the POLLIN bit in this case.
492 */
493 if (exceptions && exceptions->table) {
494 for (i = 0; i < exceptions->count; i++) {
495 /*
496 * See if we already added this descriptor, just use it
497 * if so.
498 */
499 fd = exceptions->table[i].sock;
500 assert(fd >= 0 && fd < max_pollfd_map);
501 pfd = pollfds_map[fd];
502 if (!pfd) {
503 pfd = &(pollfds[nxt]);
504 pfd->events = POLLIN;
505 pfd->fd = fd;
506 pollfds[i].revents = 0;
507 pollfds_map[fd] = pfd;
508 nxt++;
509 }
510 }
511 }
512
513 return nxt;
514}
515
516
517static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
518 struct pollfd **pollfds_map,
519 int max_pollfd_map,
520 short int revents)
521{
522 int i;
523 struct pollfd *pfd;
524
525 if (!table || !table->table)
526 return 0;
527
528 table->changed = 0;
529 for (i = 0; i < table->count; i++) {
530 pfd = find_pollfd(pollfds_map, table->table[i].sock,
531 max_pollfd_map);
532 if (!pfd)
533 continue;
534
535 if (!(pfd->revents & revents))
536 continue;
537
538 table->table[i].handler(table->table[i].sock,
539 table->table[i].eloop_data,
540 table->table[i].user_data);
541 if (table->changed)
542 return 1;
543 }
544
545 return 0;
546}
547
548
549static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
550 struct eloop_sock_table *writers,
551 struct eloop_sock_table *exceptions,
552 struct pollfd **pollfds_map,
553 int max_pollfd_map)
554{
555 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
Dmitry Shmidt04949592012-07-19 12:16:46 -0700556 max_pollfd_map, POLLIN | POLLERR |
557 POLLHUP))
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800558 return; /* pollfds may be invalid at this point */
559
560 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
561 max_pollfd_map, POLLOUT))
562 return; /* pollfds may be invalid at this point */
563
564 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
565 max_pollfd_map, POLLERR | POLLHUP);
566}
567
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700568#endif /* CONFIG_ELOOP_POLL */
569
570#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800571
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700572static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
573 fd_set *fds)
574{
575 int i;
576
577 FD_ZERO(fds);
578
579 if (table->table == NULL)
580 return;
581
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700582 for (i = 0; i < table->count; i++) {
583 assert(table->table[i].sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700584 FD_SET(table->table[i].sock, fds);
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700585 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700586}
587
588
589static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
590 fd_set *fds)
591{
592 int i;
593
594 if (table == NULL || table->table == NULL)
595 return;
596
597 table->changed = 0;
598 for (i = 0; i < table->count; i++) {
599 if (FD_ISSET(table->table[i].sock, fds)) {
600 table->table[i].handler(table->table[i].sock,
601 table->table[i].eloop_data,
602 table->table[i].user_data);
603 if (table->changed)
604 break;
605 }
606 }
607}
608
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700609#endif /* CONFIG_ELOOP_SELECT */
610
611
612#ifdef CONFIG_ELOOP_EPOLL
613static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
614{
615 struct eloop_sock *table;
616 int i;
617
618 for (i = 0; i < nfds; i++) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800619 table = &eloop.fd_table[events[i].data.fd];
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700620 if (table->handler == NULL)
621 continue;
622 table->handler(table->sock, table->eloop_data,
623 table->user_data);
Dmitry Shmidtd80a4012015-11-05 16:35:40 -0800624 if (eloop.readers.changed ||
625 eloop.writers.changed ||
626 eloop.exceptions.changed)
627 break;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700628 }
629}
630#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800631
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700632
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800633#ifdef CONFIG_ELOOP_KQUEUE
634
635static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
636{
637 struct eloop_sock *table;
638 int i;
639
640 for (i = 0; i < nfds; i++) {
641 table = &eloop.fd_table[events[i].ident];
642 if (table->handler == NULL)
643 continue;
644 table->handler(table->sock, table->eloop_data,
645 table->user_data);
646 if (eloop.readers.changed ||
647 eloop.writers.changed ||
648 eloop.exceptions.changed)
649 break;
650 }
651}
652
653
654static int eloop_sock_table_requeue(struct eloop_sock_table *table)
655{
656 int i, r;
657
658 r = 0;
659 for (i = 0; i < table->count && table->table; i++) {
660 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
661 r = -1;
662 }
663 return r;
664}
665
666#endif /* CONFIG_ELOOP_KQUEUE */
667
668
669int eloop_sock_requeue(void)
670{
671 int r = 0;
672
673#ifdef CONFIG_ELOOP_KQUEUE
674 close(eloop.kqueuefd);
675 eloop.kqueuefd = kqueue();
676 if (eloop.kqueuefd < 0) {
677 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
678 __func__, strerror(errno));
679 return -1;
680 }
681
682 if (eloop_sock_table_requeue(&eloop.readers) < 0)
683 r = -1;
684 if (eloop_sock_table_requeue(&eloop.writers) < 0)
685 r = -1;
686 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
687 r = -1;
688#endif /* CONFIG_ELOOP_KQUEUE */
689
690 return r;
691}
692
693
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700694static void eloop_sock_table_destroy(struct eloop_sock_table *table)
695{
696 if (table) {
697 int i;
698 for (i = 0; i < table->count && table->table; i++) {
699 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
700 "sock=%d eloop_data=%p user_data=%p "
701 "handler=%p",
702 table->table[i].sock,
703 table->table[i].eloop_data,
704 table->table[i].user_data,
705 table->table[i].handler);
706 wpa_trace_dump_funcname("eloop unregistered socket "
707 "handler",
708 table->table[i].handler);
709 wpa_trace_dump("eloop sock", &table->table[i]);
710 }
711 os_free(table->table);
712 }
713}
714
715
716int eloop_register_read_sock(int sock, eloop_sock_handler handler,
717 void *eloop_data, void *user_data)
718{
719 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
720 eloop_data, user_data);
721}
722
723
724void eloop_unregister_read_sock(int sock)
725{
726 eloop_unregister_sock(sock, EVENT_TYPE_READ);
727}
728
729
730static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
731{
732 switch (type) {
733 case EVENT_TYPE_READ:
734 return &eloop.readers;
735 case EVENT_TYPE_WRITE:
736 return &eloop.writers;
737 case EVENT_TYPE_EXCEPTION:
738 return &eloop.exceptions;
739 }
740
741 return NULL;
742}
743
744
745int eloop_register_sock(int sock, eloop_event_type type,
746 eloop_sock_handler handler,
747 void *eloop_data, void *user_data)
748{
749 struct eloop_sock_table *table;
750
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700751 assert(sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700752 table = eloop_get_sock_table(type);
753 return eloop_sock_table_add_sock(table, sock, handler,
754 eloop_data, user_data);
755}
756
757
758void eloop_unregister_sock(int sock, eloop_event_type type)
759{
760 struct eloop_sock_table *table;
761
762 table = eloop_get_sock_table(type);
763 eloop_sock_table_remove_sock(table, sock);
764}
765
766
767int eloop_register_timeout(unsigned int secs, unsigned int usecs,
768 eloop_timeout_handler handler,
769 void *eloop_data, void *user_data)
770{
771 struct eloop_timeout *timeout, *tmp;
772 os_time_t now_sec;
773
774 timeout = os_zalloc(sizeof(*timeout));
775 if (timeout == NULL)
776 return -1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800777 if (os_get_reltime(&timeout->time) < 0) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700778 os_free(timeout);
779 return -1;
780 }
781 now_sec = timeout->time.sec;
782 timeout->time.sec += secs;
783 if (timeout->time.sec < now_sec) {
784 /*
785 * Integer overflow - assume long enough timeout to be assumed
786 * to be infinite, i.e., the timeout would never happen.
787 */
788 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
789 "ever happen - ignore it", secs);
790 os_free(timeout);
791 return 0;
792 }
793 timeout->time.usec += usecs;
794 while (timeout->time.usec >= 1000000) {
795 timeout->time.sec++;
796 timeout->time.usec -= 1000000;
797 }
798 timeout->eloop_data = eloop_data;
799 timeout->user_data = user_data;
800 timeout->handler = handler;
801 wpa_trace_add_ref(timeout, eloop, eloop_data);
802 wpa_trace_add_ref(timeout, user, user_data);
803 wpa_trace_record(timeout);
804
805 /* Maintain timeouts in order of increasing time */
806 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800807 if (os_reltime_before(&timeout->time, &tmp->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700808 dl_list_add(tmp->list.prev, &timeout->list);
809 return 0;
810 }
811 }
812 dl_list_add_tail(&eloop.timeout, &timeout->list);
813
814 return 0;
815}
816
817
818static void eloop_remove_timeout(struct eloop_timeout *timeout)
819{
820 dl_list_del(&timeout->list);
821 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
822 wpa_trace_remove_ref(timeout, user, timeout->user_data);
823 os_free(timeout);
824}
825
826
827int eloop_cancel_timeout(eloop_timeout_handler handler,
828 void *eloop_data, void *user_data)
829{
830 struct eloop_timeout *timeout, *prev;
831 int removed = 0;
832
833 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
834 struct eloop_timeout, list) {
835 if (timeout->handler == handler &&
836 (timeout->eloop_data == eloop_data ||
837 eloop_data == ELOOP_ALL_CTX) &&
838 (timeout->user_data == user_data ||
839 user_data == ELOOP_ALL_CTX)) {
840 eloop_remove_timeout(timeout);
841 removed++;
842 }
843 }
844
845 return removed;
846}
847
848
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800849int eloop_cancel_timeout_one(eloop_timeout_handler handler,
850 void *eloop_data, void *user_data,
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800851 struct os_reltime *remaining)
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800852{
853 struct eloop_timeout *timeout, *prev;
854 int removed = 0;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800855 struct os_reltime now;
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800856
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800857 os_get_reltime(&now);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800858 remaining->sec = remaining->usec = 0;
859
860 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
861 struct eloop_timeout, list) {
862 if (timeout->handler == handler &&
863 (timeout->eloop_data == eloop_data) &&
864 (timeout->user_data == user_data)) {
865 removed = 1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800866 if (os_reltime_before(&now, &timeout->time))
867 os_reltime_sub(&timeout->time, &now, remaining);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800868 eloop_remove_timeout(timeout);
869 break;
870 }
871 }
872 return removed;
873}
874
875
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700876int eloop_is_timeout_registered(eloop_timeout_handler handler,
877 void *eloop_data, void *user_data)
878{
879 struct eloop_timeout *tmp;
880
881 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
882 if (tmp->handler == handler &&
883 tmp->eloop_data == eloop_data &&
884 tmp->user_data == user_data)
885 return 1;
886 }
887
888 return 0;
889}
890
891
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800892int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
893 eloop_timeout_handler handler, void *eloop_data,
894 void *user_data)
895{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800896 struct os_reltime now, requested, remaining;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800897 struct eloop_timeout *tmp;
898
899 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
900 if (tmp->handler == handler &&
901 tmp->eloop_data == eloop_data &&
902 tmp->user_data == user_data) {
903 requested.sec = req_secs;
904 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800905 os_get_reltime(&now);
906 os_reltime_sub(&tmp->time, &now, &remaining);
907 if (os_reltime_before(&requested, &remaining)) {
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800908 eloop_cancel_timeout(handler, eloop_data,
909 user_data);
910 eloop_register_timeout(requested.sec,
911 requested.usec,
912 handler, eloop_data,
913 user_data);
914 return 1;
915 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800916 return 0;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800917 }
918 }
919
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800920 return -1;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800921}
922
923
Dmitry Shmidt54605472013-11-08 11:10:19 -0800924int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
925 eloop_timeout_handler handler, void *eloop_data,
926 void *user_data)
927{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800928 struct os_reltime now, requested, remaining;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800929 struct eloop_timeout *tmp;
930
931 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
932 if (tmp->handler == handler &&
933 tmp->eloop_data == eloop_data &&
934 tmp->user_data == user_data) {
935 requested.sec = req_secs;
936 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800937 os_get_reltime(&now);
938 os_reltime_sub(&tmp->time, &now, &remaining);
939 if (os_reltime_before(&remaining, &requested)) {
Dmitry Shmidt54605472013-11-08 11:10:19 -0800940 eloop_cancel_timeout(handler, eloop_data,
941 user_data);
942 eloop_register_timeout(requested.sec,
943 requested.usec,
944 handler, eloop_data,
945 user_data);
946 return 1;
947 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800948 return 0;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800949 }
950 }
951
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800952 return -1;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800953}
954
955
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700956#ifndef CONFIG_NATIVE_WINDOWS
957static void eloop_handle_alarm(int sig)
958{
959 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
960 "two seconds. Looks like there\n"
961 "is a bug that ends up in a busy loop that "
962 "prevents clean shutdown.\n"
963 "Killing program forcefully.\n");
964 exit(1);
965}
966#endif /* CONFIG_NATIVE_WINDOWS */
967
968
969static void eloop_handle_signal(int sig)
970{
971 int i;
972
973#ifndef CONFIG_NATIVE_WINDOWS
974 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
975 /* Use SIGALRM to break out from potential busy loops that
976 * would not allow the program to be killed. */
977 eloop.pending_terminate = 1;
978 signal(SIGALRM, eloop_handle_alarm);
979 alarm(2);
980 }
981#endif /* CONFIG_NATIVE_WINDOWS */
982
983 eloop.signaled++;
984 for (i = 0; i < eloop.signal_count; i++) {
985 if (eloop.signals[i].sig == sig) {
986 eloop.signals[i].signaled++;
987 break;
988 }
989 }
990}
991
992
993static void eloop_process_pending_signals(void)
994{
995 int i;
996
997 if (eloop.signaled == 0)
998 return;
999 eloop.signaled = 0;
1000
1001 if (eloop.pending_terminate) {
1002#ifndef CONFIG_NATIVE_WINDOWS
1003 alarm(0);
1004#endif /* CONFIG_NATIVE_WINDOWS */
1005 eloop.pending_terminate = 0;
1006 }
1007
1008 for (i = 0; i < eloop.signal_count; i++) {
1009 if (eloop.signals[i].signaled) {
1010 eloop.signals[i].signaled = 0;
1011 eloop.signals[i].handler(eloop.signals[i].sig,
1012 eloop.signals[i].user_data);
1013 }
1014 }
1015}
1016
1017
1018int eloop_register_signal(int sig, eloop_signal_handler handler,
1019 void *user_data)
1020{
1021 struct eloop_signal *tmp;
1022
Dmitry Shmidt61d9df32012-08-29 16:22:06 -07001023 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1024 sizeof(struct eloop_signal));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001025 if (tmp == NULL)
1026 return -1;
1027
1028 tmp[eloop.signal_count].sig = sig;
1029 tmp[eloop.signal_count].user_data = user_data;
1030 tmp[eloop.signal_count].handler = handler;
1031 tmp[eloop.signal_count].signaled = 0;
1032 eloop.signal_count++;
1033 eloop.signals = tmp;
1034 signal(sig, eloop_handle_signal);
1035
1036 return 0;
1037}
1038
1039
1040int eloop_register_signal_terminate(eloop_signal_handler handler,
1041 void *user_data)
1042{
1043 int ret = eloop_register_signal(SIGINT, handler, user_data);
1044 if (ret == 0)
1045 ret = eloop_register_signal(SIGTERM, handler, user_data);
1046 return ret;
1047}
1048
1049
1050int eloop_register_signal_reconfig(eloop_signal_handler handler,
1051 void *user_data)
1052{
1053#ifdef CONFIG_NATIVE_WINDOWS
1054 return 0;
1055#else /* CONFIG_NATIVE_WINDOWS */
1056 return eloop_register_signal(SIGHUP, handler, user_data);
1057#endif /* CONFIG_NATIVE_WINDOWS */
1058}
1059
1060
1061void eloop_run(void)
1062{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001063#ifdef CONFIG_ELOOP_POLL
1064 int num_poll_fds;
1065 int timeout_ms = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001066#endif /* CONFIG_ELOOP_POLL */
1067#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001068 fd_set *rfds, *wfds, *efds;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001069 struct timeval _tv;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001070#endif /* CONFIG_ELOOP_SELECT */
1071#ifdef CONFIG_ELOOP_EPOLL
1072 int timeout_ms = -1;
1073#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001074#ifdef CONFIG_ELOOP_KQUEUE
1075 struct timespec ts;
1076#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001077 int res;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001078 struct os_reltime tv, now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001079
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001080#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001081 rfds = os_malloc(sizeof(*rfds));
1082 wfds = os_malloc(sizeof(*wfds));
1083 efds = os_malloc(sizeof(*efds));
1084 if (rfds == NULL || wfds == NULL || efds == NULL)
1085 goto out;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001086#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001087
1088 while (!eloop.terminate &&
1089 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1090 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1091 struct eloop_timeout *timeout;
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001092
1093 if (eloop.pending_terminate) {
1094 /*
1095 * This may happen in some corner cases where a signal
1096 * is received during a blocking operation. We need to
1097 * process the pending signals and exit if requested to
1098 * avoid hitting the SIGALRM limit if the blocking
1099 * operation took more than two seconds.
1100 */
1101 eloop_process_pending_signals();
1102 if (eloop.terminate)
1103 break;
1104 }
1105
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001106 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1107 list);
1108 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001109 os_get_reltime(&now);
1110 if (os_reltime_before(&now, &timeout->time))
1111 os_reltime_sub(&timeout->time, &now, &tv);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001112 else
1113 tv.sec = tv.usec = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001114#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001115 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001116#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1117#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001118 _tv.tv_sec = tv.sec;
1119 _tv.tv_usec = tv.usec;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001120#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001121#ifdef CONFIG_ELOOP_KQUEUE
1122 ts.tv_sec = tv.sec;
1123 ts.tv_nsec = tv.usec * 1000L;
1124#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001125 }
1126
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001127#ifdef CONFIG_ELOOP_POLL
1128 num_poll_fds = eloop_sock_table_set_fds(
1129 &eloop.readers, &eloop.writers, &eloop.exceptions,
1130 eloop.pollfds, eloop.pollfds_map,
1131 eloop.max_pollfd_map);
1132 res = poll(eloop.pollfds, num_poll_fds,
1133 timeout ? timeout_ms : -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001134#endif /* CONFIG_ELOOP_POLL */
1135#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001136 eloop_sock_table_set_fds(&eloop.readers, rfds);
1137 eloop_sock_table_set_fds(&eloop.writers, wfds);
1138 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1139 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1140 timeout ? &_tv : NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001141#endif /* CONFIG_ELOOP_SELECT */
1142#ifdef CONFIG_ELOOP_EPOLL
1143 if (eloop.count == 0) {
1144 res = 0;
1145 } else {
1146 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1147 eloop.count, timeout_ms);
1148 }
1149#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001150#ifdef CONFIG_ELOOP_KQUEUE
1151 if (eloop.count == 0) {
1152 res = 0;
1153 } else {
1154 res = kevent(eloop.kqueuefd, NULL, 0,
1155 eloop.kqueue_events, eloop.kqueue_nevents,
1156 timeout ? &ts : NULL);
1157 }
1158#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001159 if (res < 0 && errno != EINTR && errno != 0) {
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001160 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1161#ifdef CONFIG_ELOOP_POLL
1162 "poll"
1163#endif /* CONFIG_ELOOP_POLL */
1164#ifdef CONFIG_ELOOP_SELECT
1165 "select"
1166#endif /* CONFIG_ELOOP_SELECT */
1167#ifdef CONFIG_ELOOP_EPOLL
1168 "epoll"
1169#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001170#ifdef CONFIG_ELOOP_KQUEUE
1171 "kqueue"
1172#endif /* CONFIG_ELOOP_EKQUEUE */
1173
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001174 , strerror(errno));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001175 goto out;
1176 }
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001177
1178 eloop.readers.changed = 0;
1179 eloop.writers.changed = 0;
1180 eloop.exceptions.changed = 0;
1181
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001182 eloop_process_pending_signals();
1183
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001184
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001185 /* check if some registered timeouts have occurred */
1186 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1187 list);
1188 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001189 os_get_reltime(&now);
1190 if (!os_reltime_before(&now, &timeout->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001191 void *eloop_data = timeout->eloop_data;
1192 void *user_data = timeout->user_data;
1193 eloop_timeout_handler handler =
1194 timeout->handler;
1195 eloop_remove_timeout(timeout);
1196 handler(eloop_data, user_data);
1197 }
1198
1199 }
1200
1201 if (res <= 0)
1202 continue;
1203
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001204 if (eloop.readers.changed ||
1205 eloop.writers.changed ||
1206 eloop.exceptions.changed) {
1207 /*
1208 * Sockets may have been closed and reopened with the
1209 * same FD in the signal or timeout handlers, so we
1210 * must skip the previous results and check again
1211 * whether any of the currently registered sockets have
1212 * events.
1213 */
1214 continue;
1215 }
1216
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001217#ifdef CONFIG_ELOOP_POLL
1218 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1219 &eloop.exceptions, eloop.pollfds_map,
1220 eloop.max_pollfd_map);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001221#endif /* CONFIG_ELOOP_POLL */
1222#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001223 eloop_sock_table_dispatch(&eloop.readers, rfds);
1224 eloop_sock_table_dispatch(&eloop.writers, wfds);
1225 eloop_sock_table_dispatch(&eloop.exceptions, efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001226#endif /* CONFIG_ELOOP_SELECT */
1227#ifdef CONFIG_ELOOP_EPOLL
1228 eloop_sock_table_dispatch(eloop.epoll_events, res);
1229#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001230#ifdef CONFIG_ELOOP_KQUEUE
1231 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1232#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001233 }
1234
Dmitry Shmidtea69e842013-05-13 14:52:28 -07001235 eloop.terminate = 0;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001236out:
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001237#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001238 os_free(rfds);
1239 os_free(wfds);
1240 os_free(efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001241#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001242 return;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001243}
1244
1245
1246void eloop_terminate(void)
1247{
1248 eloop.terminate = 1;
1249}
1250
1251
1252void eloop_destroy(void)
1253{
1254 struct eloop_timeout *timeout, *prev;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001255 struct os_reltime now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001256
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001257 os_get_reltime(&now);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001258 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1259 struct eloop_timeout, list) {
1260 int sec, usec;
1261 sec = timeout->time.sec - now.sec;
1262 usec = timeout->time.usec - now.usec;
1263 if (timeout->time.usec < now.usec) {
1264 sec--;
1265 usec += 1000000;
1266 }
1267 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1268 "eloop_data=%p user_data=%p handler=%p",
1269 sec, usec, timeout->eloop_data, timeout->user_data,
1270 timeout->handler);
1271 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1272 timeout->handler);
1273 wpa_trace_dump("eloop timeout", timeout);
1274 eloop_remove_timeout(timeout);
1275 }
1276 eloop_sock_table_destroy(&eloop.readers);
1277 eloop_sock_table_destroy(&eloop.writers);
1278 eloop_sock_table_destroy(&eloop.exceptions);
1279 os_free(eloop.signals);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001280
1281#ifdef CONFIG_ELOOP_POLL
1282 os_free(eloop.pollfds);
1283 os_free(eloop.pollfds_map);
1284#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001285#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1286 os_free(eloop.fd_table);
1287#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001288#ifdef CONFIG_ELOOP_EPOLL
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001289 os_free(eloop.epoll_events);
1290 close(eloop.epollfd);
1291#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001292#ifdef CONFIG_ELOOP_KQUEUE
1293 os_free(eloop.kqueue_events);
1294 close(eloop.kqueuefd);
1295#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001296}
1297
1298
1299int eloop_terminated(void)
1300{
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001301 return eloop.terminate || eloop.pending_terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001302}
1303
1304
1305void eloop_wait_for_read_sock(int sock)
1306{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001307#ifdef CONFIG_ELOOP_POLL
1308 struct pollfd pfd;
1309
1310 if (sock < 0)
1311 return;
1312
1313 os_memset(&pfd, 0, sizeof(pfd));
1314 pfd.fd = sock;
1315 pfd.events = POLLIN;
1316
1317 poll(&pfd, 1, -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001318#endif /* CONFIG_ELOOP_POLL */
1319#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1320 /*
1321 * We can use epoll() here. But epoll() requres 4 system calls.
1322 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1323 * epoll fd. So select() is better for performance here.
1324 */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001325 fd_set rfds;
1326
1327 if (sock < 0)
1328 return;
1329
1330 FD_ZERO(&rfds);
1331 FD_SET(sock, &rfds);
1332 select(sock + 1, &rfds, NULL, NULL, NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001333#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001334#ifdef CONFIG_ELOOP_KQUEUE
1335 int kfd;
1336 struct kevent ke1, ke2;
1337
1338 kfd = kqueue();
1339 if (kfd == -1)
1340 return;
Dmitry Shmidt57c2d392016-02-23 13:40:19 -08001341 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001342 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1343 close(kfd);
1344#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001345}
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001346
1347#ifdef CONFIG_ELOOP_SELECT
1348#undef CONFIG_ELOOP_SELECT
1349#endif /* CONFIG_ELOOP_SELECT */