blob: 00b0beff0b78a5577f94ebb39fca7a74f7ad0c30 [file] [log] [blame]
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001/*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08005 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07007 */
8
9#include "includes.h"
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -070010#include <assert.h>
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070011
12#include "common.h"
13#include "trace.h"
14#include "list.h"
15#include "eloop.h"
16
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070017#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18#error Do not define both of poll and epoll
19#endif
20
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080021#if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22#error Do not define both of poll and kqueue
23#endif
24
25#if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26 !defined(CONFIG_ELOOP_KQUEUE)
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070027#define CONFIG_ELOOP_SELECT
28#endif
29
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080030#ifdef CONFIG_ELOOP_POLL
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080031#include <poll.h>
32#endif /* CONFIG_ELOOP_POLL */
33
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070034#ifdef CONFIG_ELOOP_EPOLL
35#include <sys/epoll.h>
36#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070037
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080038#ifdef CONFIG_ELOOP_KQUEUE
39#include <sys/event.h>
40#endif /* CONFIG_ELOOP_KQUEUE */
41
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070042struct eloop_sock {
43 int sock;
44 void *eloop_data;
45 void *user_data;
46 eloop_sock_handler handler;
47 WPA_TRACE_REF(eloop);
48 WPA_TRACE_REF(user);
49 WPA_TRACE_INFO
50};
51
52struct eloop_timeout {
53 struct dl_list list;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -080054 struct os_reltime time;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070055 void *eloop_data;
56 void *user_data;
57 eloop_timeout_handler handler;
58 WPA_TRACE_REF(eloop);
59 WPA_TRACE_REF(user);
60 WPA_TRACE_INFO
61};
62
63struct eloop_signal {
64 int sig;
65 void *user_data;
66 eloop_signal_handler handler;
67 int signaled;
68};
69
70struct eloop_sock_table {
Hai Shalomfdcde762020-04-02 11:19:20 -070071 size_t count;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070072 struct eloop_sock *table;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070073 eloop_event_type type;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -070074 int changed;
75};
76
77struct eloop_data {
78 int max_sock;
79
Hai Shalomfdcde762020-04-02 11:19:20 -070080 size_t count; /* sum of all table counts */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080081#ifdef CONFIG_ELOOP_POLL
Hai Shalomfdcde762020-04-02 11:19:20 -070082 size_t max_pollfd_map; /* number of pollfds_map currently allocated */
83 size_t max_poll_fds; /* number of pollfds currently allocated */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -080084 struct pollfd *pollfds;
85 struct pollfd **pollfds_map;
86#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080087#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88 int max_fd;
89 struct eloop_sock *fd_table;
90#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070091#ifdef CONFIG_ELOOP_EPOLL
92 int epollfd;
Hai Shalomfdcde762020-04-02 11:19:20 -070093 size_t epoll_max_event_num;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -070094 struct epoll_event *epoll_events;
95#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080096#ifdef CONFIG_ELOOP_KQUEUE
97 int kqueuefd;
Hai Shalomfdcde762020-04-02 11:19:20 -070098 size_t kqueue_nevents;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -080099 struct kevent *kqueue_events;
100#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700101 struct eloop_sock_table readers;
102 struct eloop_sock_table writers;
103 struct eloop_sock_table exceptions;
104
105 struct dl_list timeout;
106
Hai Shalomfdcde762020-04-02 11:19:20 -0700107 size_t signal_count;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700108 struct eloop_signal *signals;
109 int signaled;
110 int pending_terminate;
111
112 int terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700113};
114
115static struct eloop_data eloop;
116
117
118#ifdef WPA_TRACE
119
120static void eloop_sigsegv_handler(int sig)
121{
122 wpa_trace_show("eloop SIGSEGV");
123 abort();
124}
125
126static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127{
Hai Shalomfdcde762020-04-02 11:19:20 -0700128 size_t i;
129
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700130 if (table == NULL || table->table == NULL)
131 return;
132 for (i = 0; i < table->count; i++) {
133 wpa_trace_add_ref(&table->table[i], eloop,
134 table->table[i].eloop_data);
135 wpa_trace_add_ref(&table->table[i], user,
136 table->table[i].user_data);
137 }
138}
139
140
141static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
142{
Hai Shalomfdcde762020-04-02 11:19:20 -0700143 size_t i;
144
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700145 if (table == NULL || table->table == NULL)
146 return;
147 for (i = 0; i < table->count; i++) {
148 wpa_trace_remove_ref(&table->table[i], eloop,
149 table->table[i].eloop_data);
150 wpa_trace_remove_ref(&table->table[i], user,
151 table->table[i].user_data);
152 }
153}
154
155#else /* WPA_TRACE */
156
157#define eloop_trace_sock_add_ref(table) do { } while (0)
158#define eloop_trace_sock_remove_ref(table) do { } while (0)
159
160#endif /* WPA_TRACE */
161
162
163int eloop_init(void)
164{
165 os_memset(&eloop, 0, sizeof(eloop));
166 dl_list_init(&eloop.timeout);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700167#ifdef CONFIG_ELOOP_EPOLL
168 eloop.epollfd = epoll_create1(0);
169 if (eloop.epollfd < 0) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800170 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700171 __func__, strerror(errno));
172 return -1;
173 }
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800174#endif /* CONFIG_ELOOP_EPOLL */
175#ifdef CONFIG_ELOOP_KQUEUE
176 eloop.kqueuefd = kqueue();
177 if (eloop.kqueuefd < 0) {
178 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
179 __func__, strerror(errno));
180 return -1;
181 }
182#endif /* CONFIG_ELOOP_KQUEUE */
183#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700184 eloop.readers.type = EVENT_TYPE_READ;
185 eloop.writers.type = EVENT_TYPE_WRITE;
186 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800187#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700188#ifdef WPA_TRACE
189 signal(SIGSEGV, eloop_sigsegv_handler);
190#endif /* WPA_TRACE */
191 return 0;
192}
193
194
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800195#ifdef CONFIG_ELOOP_EPOLL
196static int eloop_sock_queue(int sock, eloop_event_type type)
197{
198 struct epoll_event ev;
199
200 os_memset(&ev, 0, sizeof(ev));
201 switch (type) {
202 case EVENT_TYPE_READ:
203 ev.events = EPOLLIN;
204 break;
205 case EVENT_TYPE_WRITE:
206 ev.events = EPOLLOUT;
207 break;
208 /*
209 * Exceptions are always checked when using epoll, but I suppose it's
210 * possible that someone registered a socket *only* for exception
211 * handling.
212 */
213 case EVENT_TYPE_EXCEPTION:
214 ev.events = EPOLLERR | EPOLLHUP;
215 break;
216 }
217 ev.data.fd = sock;
218 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
219 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
220 __func__, sock, strerror(errno));
221 return -1;
222 }
223 return 0;
224}
225#endif /* CONFIG_ELOOP_EPOLL */
226
227
228#ifdef CONFIG_ELOOP_KQUEUE
Hai Shalom39bc25d2019-02-06 16:32:13 -0800229
Hai Shalom74f70d42019-02-11 14:42:39 -0800230static short event_type_kevent_filter(eloop_event_type type)
231{
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800232 switch (type) {
233 case EVENT_TYPE_READ:
Hai Shalom74f70d42019-02-11 14:42:39 -0800234 return EVFILT_READ;
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800235 case EVENT_TYPE_WRITE:
Hai Shalom74f70d42019-02-11 14:42:39 -0800236 return EVFILT_WRITE;
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800237 default:
Hai Shalom74f70d42019-02-11 14:42:39 -0800238 return 0;
Hai Shalombf6e0ba2019-02-11 12:01:50 -0800239 }
Hai Shalom74f70d42019-02-11 14:42:39 -0800240}
241
242
243static int eloop_sock_queue(int sock, eloop_event_type type)
244{
245 struct kevent ke;
246
247 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800248 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
249 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
250 __func__, sock, strerror(errno));
251 return -1;
252 }
253 return 0;
254}
Hai Shalom74f70d42019-02-11 14:42:39 -0800255
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800256#endif /* CONFIG_ELOOP_KQUEUE */
257
258
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700259static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
260 int sock, eloop_sock_handler handler,
261 void *eloop_data, void *user_data)
262{
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700263#ifdef CONFIG_ELOOP_EPOLL
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800264 struct epoll_event *temp_events;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700265#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800266#ifdef CONFIG_ELOOP_KQUEUE
267 struct kevent *temp_events;
268#endif /* CONFIG_ELOOP_EPOLL */
269#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
270 struct eloop_sock *temp_table;
Hai Shalomfdcde762020-04-02 11:19:20 -0700271 size_t next;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800272#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700273 struct eloop_sock *tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800274 int new_max_sock;
275
276 if (sock > eloop.max_sock)
277 new_max_sock = sock;
278 else
279 new_max_sock = eloop.max_sock;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700280
281 if (table == NULL)
282 return -1;
283
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800284#ifdef CONFIG_ELOOP_POLL
Hai Shalomfdcde762020-04-02 11:19:20 -0700285 if ((size_t) new_max_sock >= eloop.max_pollfd_map) {
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800286 struct pollfd **nmap;
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700287 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
288 sizeof(struct pollfd *));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800289 if (nmap == NULL)
290 return -1;
291
292 eloop.max_pollfd_map = new_max_sock + 50;
293 eloop.pollfds_map = nmap;
294 }
295
296 if (eloop.count + 1 > eloop.max_poll_fds) {
297 struct pollfd *n;
Hai Shalomfdcde762020-04-02 11:19:20 -0700298 size_t nmax = eloop.count + 1 + 50;
299
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700300 n = os_realloc_array(eloop.pollfds, nmax,
301 sizeof(struct pollfd));
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800302 if (n == NULL)
303 return -1;
304
305 eloop.max_poll_fds = nmax;
306 eloop.pollfds = n;
307 }
308#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800309#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
310 if (new_max_sock >= eloop.max_fd) {
Hai Shalom74f70d42019-02-11 14:42:39 -0800311 next = new_max_sock + 16;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800312 temp_table = os_realloc_array(eloop.fd_table, next,
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700313 sizeof(struct eloop_sock));
314 if (temp_table == NULL)
315 return -1;
316
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800317 eloop.max_fd = next;
318 eloop.fd_table = temp_table;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700319 }
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800320#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700321
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800322#ifdef CONFIG_ELOOP_EPOLL
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700323 if (eloop.count + 1 > eloop.epoll_max_event_num) {
324 next = eloop.epoll_max_event_num == 0 ? 8 :
325 eloop.epoll_max_event_num * 2;
326 temp_events = os_realloc_array(eloop.epoll_events, next,
327 sizeof(struct epoll_event));
328 if (temp_events == NULL) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800329 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
330 __func__, strerror(errno));
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700331 return -1;
332 }
333
334 eloop.epoll_max_event_num = next;
335 eloop.epoll_events = temp_events;
336 }
337#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800338#ifdef CONFIG_ELOOP_KQUEUE
339 if (eloop.count + 1 > eloop.kqueue_nevents) {
340 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
341 temp_events = os_malloc(next * sizeof(*temp_events));
342 if (!temp_events) {
343 wpa_printf(MSG_ERROR,
344 "%s: malloc for kqueue failed: %s",
345 __func__, strerror(errno));
346 return -1;
347 }
348
349 os_free(eloop.kqueue_events);
350 eloop.kqueue_events = temp_events;
351 eloop.kqueue_nevents = next;
352 }
353#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800354
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700355 eloop_trace_sock_remove_ref(table);
Dmitry Shmidt61d9df32012-08-29 16:22:06 -0700356 tmp = os_realloc_array(table->table, table->count + 1,
357 sizeof(struct eloop_sock));
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800358 if (tmp == NULL) {
359 eloop_trace_sock_add_ref(table);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700360 return -1;
Dmitry Shmidt746bde52015-01-12 13:01:47 -0800361 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700362
363 tmp[table->count].sock = sock;
364 tmp[table->count].eloop_data = eloop_data;
365 tmp[table->count].user_data = user_data;
366 tmp[table->count].handler = handler;
367 wpa_trace_record(&tmp[table->count]);
368 table->count++;
369 table->table = tmp;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800370 eloop.max_sock = new_max_sock;
371 eloop.count++;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700372 table->changed = 1;
373 eloop_trace_sock_add_ref(table);
374
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800375#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
376 if (eloop_sock_queue(sock, table->type) < 0)
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700377 return -1;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800378 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700379 sizeof(struct eloop_sock));
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800380#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700381 return 0;
382}
383
384
385static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
386 int sock)
387{
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800388#ifdef CONFIG_ELOOP_KQUEUE
389 struct kevent ke;
390#endif /* CONFIG_ELOOP_KQUEUE */
Hai Shalomfdcde762020-04-02 11:19:20 -0700391 size_t i;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700392
393 if (table == NULL || table->table == NULL || table->count == 0)
394 return;
395
396 for (i = 0; i < table->count; i++) {
397 if (table->table[i].sock == sock)
398 break;
399 }
400 if (i == table->count)
401 return;
402 eloop_trace_sock_remove_ref(table);
403 if (i != table->count - 1) {
404 os_memmove(&table->table[i], &table->table[i + 1],
405 (table->count - i - 1) *
406 sizeof(struct eloop_sock));
407 }
408 table->count--;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800409 eloop.count--;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700410 table->changed = 1;
411 eloop_trace_sock_add_ref(table);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700412#ifdef CONFIG_ELOOP_EPOLL
413 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800414 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
415 __func__, sock, strerror(errno));
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700416 return;
417 }
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800418 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700419#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800420#ifdef CONFIG_ELOOP_KQUEUE
Hai Shalom74f70d42019-02-11 14:42:39 -0800421 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
422 0, 0);
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800423 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
424 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
425 __func__, sock, strerror(errno));
426 return;
427 }
428 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
429#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700430}
431
432
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800433#ifdef CONFIG_ELOOP_POLL
434
435static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
436{
437 if (fd < mx && fd >= 0)
438 return pollfds_map[fd];
439 return NULL;
440}
441
442
443static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
444 struct eloop_sock_table *writers,
445 struct eloop_sock_table *exceptions,
446 struct pollfd *pollfds,
447 struct pollfd **pollfds_map,
448 int max_pollfd_map)
449{
Hai Shalomfdcde762020-04-02 11:19:20 -0700450 size_t i;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800451 int nxt = 0;
452 int fd;
453 struct pollfd *pfd;
454
455 /* Clear pollfd lookup map. It will be re-populated below. */
456 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
457
458 if (readers && readers->table) {
459 for (i = 0; i < readers->count; i++) {
460 fd = readers->table[i].sock;
461 assert(fd >= 0 && fd < max_pollfd_map);
462 pollfds[nxt].fd = fd;
463 pollfds[nxt].events = POLLIN;
464 pollfds[nxt].revents = 0;
465 pollfds_map[fd] = &(pollfds[nxt]);
466 nxt++;
467 }
468 }
469
470 if (writers && writers->table) {
471 for (i = 0; i < writers->count; i++) {
472 /*
473 * See if we already added this descriptor, update it
474 * if so.
475 */
476 fd = writers->table[i].sock;
477 assert(fd >= 0 && fd < max_pollfd_map);
478 pfd = pollfds_map[fd];
479 if (!pfd) {
480 pfd = &(pollfds[nxt]);
481 pfd->events = 0;
482 pfd->fd = fd;
483 pollfds[i].revents = 0;
484 pollfds_map[fd] = pfd;
485 nxt++;
486 }
Dmitry Shmidt04949592012-07-19 12:16:46 -0700487 pfd->events |= POLLOUT;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800488 }
489 }
490
491 /*
492 * Exceptions are always checked when using poll, but I suppose it's
493 * possible that someone registered a socket *only* for exception
494 * handling. Set the POLLIN bit in this case.
495 */
496 if (exceptions && exceptions->table) {
497 for (i = 0; i < exceptions->count; i++) {
498 /*
499 * See if we already added this descriptor, just use it
500 * if so.
501 */
502 fd = exceptions->table[i].sock;
503 assert(fd >= 0 && fd < max_pollfd_map);
504 pfd = pollfds_map[fd];
505 if (!pfd) {
506 pfd = &(pollfds[nxt]);
507 pfd->events = POLLIN;
508 pfd->fd = fd;
509 pollfds[i].revents = 0;
510 pollfds_map[fd] = pfd;
511 nxt++;
512 }
513 }
514 }
515
516 return nxt;
517}
518
519
520static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
521 struct pollfd **pollfds_map,
522 int max_pollfd_map,
523 short int revents)
524{
Hai Shalomfdcde762020-04-02 11:19:20 -0700525 size_t i;
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800526 struct pollfd *pfd;
527
528 if (!table || !table->table)
529 return 0;
530
531 table->changed = 0;
532 for (i = 0; i < table->count; i++) {
533 pfd = find_pollfd(pollfds_map, table->table[i].sock,
534 max_pollfd_map);
535 if (!pfd)
536 continue;
537
538 if (!(pfd->revents & revents))
539 continue;
540
541 table->table[i].handler(table->table[i].sock,
542 table->table[i].eloop_data,
543 table->table[i].user_data);
544 if (table->changed)
545 return 1;
546 }
547
548 return 0;
549}
550
551
552static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
553 struct eloop_sock_table *writers,
554 struct eloop_sock_table *exceptions,
555 struct pollfd **pollfds_map,
556 int max_pollfd_map)
557{
558 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
Dmitry Shmidt04949592012-07-19 12:16:46 -0700559 max_pollfd_map, POLLIN | POLLERR |
560 POLLHUP))
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800561 return; /* pollfds may be invalid at this point */
562
563 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
564 max_pollfd_map, POLLOUT))
565 return; /* pollfds may be invalid at this point */
566
567 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
568 max_pollfd_map, POLLERR | POLLHUP);
569}
570
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700571#endif /* CONFIG_ELOOP_POLL */
572
573#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800574
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700575static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
576 fd_set *fds)
577{
Hai Shalomfdcde762020-04-02 11:19:20 -0700578 size_t i;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700579
580 FD_ZERO(fds);
581
582 if (table->table == NULL)
583 return;
584
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700585 for (i = 0; i < table->count; i++) {
586 assert(table->table[i].sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700587 FD_SET(table->table[i].sock, fds);
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700588 }
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700589}
590
591
592static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
593 fd_set *fds)
594{
Hai Shalomfdcde762020-04-02 11:19:20 -0700595 size_t i;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700596
597 if (table == NULL || table->table == NULL)
598 return;
599
600 table->changed = 0;
601 for (i = 0; i < table->count; i++) {
602 if (FD_ISSET(table->table[i].sock, fds)) {
603 table->table[i].handler(table->table[i].sock,
604 table->table[i].eloop_data,
605 table->table[i].user_data);
606 if (table->changed)
607 break;
608 }
609 }
610}
611
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700612#endif /* CONFIG_ELOOP_SELECT */
613
614
615#ifdef CONFIG_ELOOP_EPOLL
616static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
617{
618 struct eloop_sock *table;
619 int i;
620
621 for (i = 0; i < nfds; i++) {
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800622 table = &eloop.fd_table[events[i].data.fd];
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700623 if (table->handler == NULL)
624 continue;
625 table->handler(table->sock, table->eloop_data,
626 table->user_data);
Dmitry Shmidtd80a4012015-11-05 16:35:40 -0800627 if (eloop.readers.changed ||
628 eloop.writers.changed ||
629 eloop.exceptions.changed)
630 break;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -0700631 }
632}
633#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -0800634
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700635
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800636#ifdef CONFIG_ELOOP_KQUEUE
637
638static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
639{
640 struct eloop_sock *table;
641 int i;
642
643 for (i = 0; i < nfds; i++) {
644 table = &eloop.fd_table[events[i].ident];
645 if (table->handler == NULL)
646 continue;
647 table->handler(table->sock, table->eloop_data,
648 table->user_data);
649 if (eloop.readers.changed ||
650 eloop.writers.changed ||
651 eloop.exceptions.changed)
652 break;
653 }
654}
655
656
657static int eloop_sock_table_requeue(struct eloop_sock_table *table)
658{
Hai Shalomfdcde762020-04-02 11:19:20 -0700659 size_t i;
660 int r;
Dmitry Shmidtb97e4282016-02-08 10:16:07 -0800661
662 r = 0;
663 for (i = 0; i < table->count && table->table; i++) {
664 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
665 r = -1;
666 }
667 return r;
668}
669
670#endif /* CONFIG_ELOOP_KQUEUE */
671
672
673int eloop_sock_requeue(void)
674{
675 int r = 0;
676
677#ifdef CONFIG_ELOOP_KQUEUE
678 close(eloop.kqueuefd);
679 eloop.kqueuefd = kqueue();
680 if (eloop.kqueuefd < 0) {
681 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
682 __func__, strerror(errno));
683 return -1;
684 }
685
686 if (eloop_sock_table_requeue(&eloop.readers) < 0)
687 r = -1;
688 if (eloop_sock_table_requeue(&eloop.writers) < 0)
689 r = -1;
690 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
691 r = -1;
692#endif /* CONFIG_ELOOP_KQUEUE */
693
694 return r;
695}
696
697
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700698static void eloop_sock_table_destroy(struct eloop_sock_table *table)
699{
700 if (table) {
Hai Shalomfdcde762020-04-02 11:19:20 -0700701 size_t i;
702
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700703 for (i = 0; i < table->count && table->table; i++) {
704 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
705 "sock=%d eloop_data=%p user_data=%p "
706 "handler=%p",
707 table->table[i].sock,
708 table->table[i].eloop_data,
709 table->table[i].user_data,
710 table->table[i].handler);
711 wpa_trace_dump_funcname("eloop unregistered socket "
712 "handler",
713 table->table[i].handler);
714 wpa_trace_dump("eloop sock", &table->table[i]);
715 }
716 os_free(table->table);
717 }
718}
719
720
721int eloop_register_read_sock(int sock, eloop_sock_handler handler,
722 void *eloop_data, void *user_data)
723{
724 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
725 eloop_data, user_data);
726}
727
728
729void eloop_unregister_read_sock(int sock)
730{
731 eloop_unregister_sock(sock, EVENT_TYPE_READ);
732}
733
734
735static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
736{
737 switch (type) {
738 case EVENT_TYPE_READ:
739 return &eloop.readers;
740 case EVENT_TYPE_WRITE:
741 return &eloop.writers;
742 case EVENT_TYPE_EXCEPTION:
743 return &eloop.exceptions;
744 }
745
746 return NULL;
747}
748
749
750int eloop_register_sock(int sock, eloop_event_type type,
751 eloop_sock_handler handler,
752 void *eloop_data, void *user_data)
753{
754 struct eloop_sock_table *table;
755
Dmitry Shmidtdf5a7e42014-04-02 12:59:59 -0700756 assert(sock >= 0);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700757 table = eloop_get_sock_table(type);
758 return eloop_sock_table_add_sock(table, sock, handler,
759 eloop_data, user_data);
760}
761
762
763void eloop_unregister_sock(int sock, eloop_event_type type)
764{
765 struct eloop_sock_table *table;
766
767 table = eloop_get_sock_table(type);
768 eloop_sock_table_remove_sock(table, sock);
769}
770
771
772int eloop_register_timeout(unsigned int secs, unsigned int usecs,
773 eloop_timeout_handler handler,
774 void *eloop_data, void *user_data)
775{
776 struct eloop_timeout *timeout, *tmp;
777 os_time_t now_sec;
778
779 timeout = os_zalloc(sizeof(*timeout));
780 if (timeout == NULL)
781 return -1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800782 if (os_get_reltime(&timeout->time) < 0) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700783 os_free(timeout);
784 return -1;
785 }
786 now_sec = timeout->time.sec;
787 timeout->time.sec += secs;
Hai Shaloma20dcd72022-02-04 13:43:00 -0800788 if (timeout->time.sec < now_sec)
789 goto overflow;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700790 timeout->time.usec += usecs;
791 while (timeout->time.usec >= 1000000) {
792 timeout->time.sec++;
793 timeout->time.usec -= 1000000;
794 }
Hai Shaloma20dcd72022-02-04 13:43:00 -0800795 if (timeout->time.sec < now_sec)
796 goto overflow;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700797 timeout->eloop_data = eloop_data;
798 timeout->user_data = user_data;
799 timeout->handler = handler;
800 wpa_trace_add_ref(timeout, eloop, eloop_data);
801 wpa_trace_add_ref(timeout, user, user_data);
802 wpa_trace_record(timeout);
803
804 /* Maintain timeouts in order of increasing time */
805 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800806 if (os_reltime_before(&timeout->time, &tmp->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700807 dl_list_add(tmp->list.prev, &timeout->list);
808 return 0;
809 }
810 }
811 dl_list_add_tail(&eloop.timeout, &timeout->list);
812
813 return 0;
Hai Shaloma20dcd72022-02-04 13:43:00 -0800814
815overflow:
816 /*
817 * Integer overflow - assume long enough timeout to be assumed
818 * to be infinite, i.e., the timeout would never happen.
819 */
820 wpa_printf(MSG_DEBUG,
821 "ELOOP: Too long timeout (secs=%u usecs=%u) to ever happen - ignore it",
822 secs,usecs);
823 os_free(timeout);
824 return 0;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700825}
826
827
828static void eloop_remove_timeout(struct eloop_timeout *timeout)
829{
830 dl_list_del(&timeout->list);
831 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
832 wpa_trace_remove_ref(timeout, user, timeout->user_data);
833 os_free(timeout);
834}
835
836
837int eloop_cancel_timeout(eloop_timeout_handler handler,
838 void *eloop_data, void *user_data)
839{
840 struct eloop_timeout *timeout, *prev;
841 int removed = 0;
842
843 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
844 struct eloop_timeout, list) {
845 if (timeout->handler == handler &&
846 (timeout->eloop_data == eloop_data ||
847 eloop_data == ELOOP_ALL_CTX) &&
848 (timeout->user_data == user_data ||
849 user_data == ELOOP_ALL_CTX)) {
850 eloop_remove_timeout(timeout);
851 removed++;
852 }
853 }
854
855 return removed;
856}
857
858
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800859int eloop_cancel_timeout_one(eloop_timeout_handler handler,
860 void *eloop_data, void *user_data,
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800861 struct os_reltime *remaining)
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800862{
863 struct eloop_timeout *timeout, *prev;
864 int removed = 0;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800865 struct os_reltime now;
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800866
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800867 os_get_reltime(&now);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800868 remaining->sec = remaining->usec = 0;
869
870 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
871 struct eloop_timeout, list) {
872 if (timeout->handler == handler &&
873 (timeout->eloop_data == eloop_data) &&
874 (timeout->user_data == user_data)) {
875 removed = 1;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800876 if (os_reltime_before(&now, &timeout->time))
877 os_reltime_sub(&timeout->time, &now, remaining);
Dmitry Shmidt4b9d52f2013-02-05 17:44:43 -0800878 eloop_remove_timeout(timeout);
879 break;
880 }
881 }
882 return removed;
883}
884
885
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700886int eloop_is_timeout_registered(eloop_timeout_handler handler,
887 void *eloop_data, void *user_data)
888{
889 struct eloop_timeout *tmp;
890
891 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
892 if (tmp->handler == handler &&
893 tmp->eloop_data == eloop_data &&
894 tmp->user_data == user_data)
895 return 1;
896 }
897
898 return 0;
899}
900
901
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800902int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
903 eloop_timeout_handler handler, void *eloop_data,
904 void *user_data)
905{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800906 struct os_reltime now, requested, remaining;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800907 struct eloop_timeout *tmp;
908
909 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
910 if (tmp->handler == handler &&
911 tmp->eloop_data == eloop_data &&
912 tmp->user_data == user_data) {
913 requested.sec = req_secs;
914 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800915 os_get_reltime(&now);
916 os_reltime_sub(&tmp->time, &now, &remaining);
917 if (os_reltime_before(&requested, &remaining)) {
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800918 eloop_cancel_timeout(handler, eloop_data,
919 user_data);
920 eloop_register_timeout(requested.sec,
921 requested.usec,
922 handler, eloop_data,
923 user_data);
924 return 1;
925 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800926 return 0;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800927 }
928 }
929
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800930 return -1;
Dmitry Shmidte0e48dc2013-11-18 12:00:06 -0800931}
932
933
Dmitry Shmidt54605472013-11-08 11:10:19 -0800934int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
935 eloop_timeout_handler handler, void *eloop_data,
936 void *user_data)
937{
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800938 struct os_reltime now, requested, remaining;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800939 struct eloop_timeout *tmp;
940
941 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
942 if (tmp->handler == handler &&
943 tmp->eloop_data == eloop_data &&
944 tmp->user_data == user_data) {
945 requested.sec = req_secs;
946 requested.usec = req_usecs;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -0800947 os_get_reltime(&now);
948 os_reltime_sub(&tmp->time, &now, &remaining);
949 if (os_reltime_before(&remaining, &requested)) {
Dmitry Shmidt54605472013-11-08 11:10:19 -0800950 eloop_cancel_timeout(handler, eloop_data,
951 user_data);
952 eloop_register_timeout(requested.sec,
953 requested.usec,
954 handler, eloop_data,
955 user_data);
956 return 1;
957 }
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800958 return 0;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800959 }
960 }
961
Dmitry Shmidtfb79edc2014-01-10 10:45:54 -0800962 return -1;
Dmitry Shmidt54605472013-11-08 11:10:19 -0800963}
964
965
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700966#ifndef CONFIG_NATIVE_WINDOWS
967static void eloop_handle_alarm(int sig)
968{
969 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
970 "two seconds. Looks like there\n"
971 "is a bug that ends up in a busy loop that "
972 "prevents clean shutdown.\n"
973 "Killing program forcefully.\n");
974 exit(1);
975}
976#endif /* CONFIG_NATIVE_WINDOWS */
977
978
979static void eloop_handle_signal(int sig)
980{
Hai Shalomfdcde762020-04-02 11:19:20 -0700981 size_t i;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -0700982
983#ifndef CONFIG_NATIVE_WINDOWS
984 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
985 /* Use SIGALRM to break out from potential busy loops that
986 * would not allow the program to be killed. */
987 eloop.pending_terminate = 1;
988 signal(SIGALRM, eloop_handle_alarm);
989 alarm(2);
990 }
991#endif /* CONFIG_NATIVE_WINDOWS */
992
993 eloop.signaled++;
994 for (i = 0; i < eloop.signal_count; i++) {
995 if (eloop.signals[i].sig == sig) {
996 eloop.signals[i].signaled++;
997 break;
998 }
999 }
1000}
1001
1002
1003static void eloop_process_pending_signals(void)
1004{
Hai Shalomfdcde762020-04-02 11:19:20 -07001005 size_t i;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001006
1007 if (eloop.signaled == 0)
1008 return;
1009 eloop.signaled = 0;
1010
1011 if (eloop.pending_terminate) {
1012#ifndef CONFIG_NATIVE_WINDOWS
1013 alarm(0);
1014#endif /* CONFIG_NATIVE_WINDOWS */
1015 eloop.pending_terminate = 0;
1016 }
1017
1018 for (i = 0; i < eloop.signal_count; i++) {
1019 if (eloop.signals[i].signaled) {
1020 eloop.signals[i].signaled = 0;
1021 eloop.signals[i].handler(eloop.signals[i].sig,
1022 eloop.signals[i].user_data);
1023 }
1024 }
1025}
1026
1027
1028int eloop_register_signal(int sig, eloop_signal_handler handler,
1029 void *user_data)
1030{
1031 struct eloop_signal *tmp;
1032
Dmitry Shmidt61d9df32012-08-29 16:22:06 -07001033 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1034 sizeof(struct eloop_signal));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001035 if (tmp == NULL)
1036 return -1;
1037
1038 tmp[eloop.signal_count].sig = sig;
1039 tmp[eloop.signal_count].user_data = user_data;
1040 tmp[eloop.signal_count].handler = handler;
1041 tmp[eloop.signal_count].signaled = 0;
1042 eloop.signal_count++;
1043 eloop.signals = tmp;
1044 signal(sig, eloop_handle_signal);
1045
1046 return 0;
1047}
1048
1049
1050int eloop_register_signal_terminate(eloop_signal_handler handler,
1051 void *user_data)
1052{
1053 int ret = eloop_register_signal(SIGINT, handler, user_data);
1054 if (ret == 0)
1055 ret = eloop_register_signal(SIGTERM, handler, user_data);
1056 return ret;
1057}
1058
1059
1060int eloop_register_signal_reconfig(eloop_signal_handler handler,
1061 void *user_data)
1062{
1063#ifdef CONFIG_NATIVE_WINDOWS
1064 return 0;
1065#else /* CONFIG_NATIVE_WINDOWS */
1066 return eloop_register_signal(SIGHUP, handler, user_data);
1067#endif /* CONFIG_NATIVE_WINDOWS */
1068}
1069
1070
1071void eloop_run(void)
1072{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001073#ifdef CONFIG_ELOOP_POLL
1074 int num_poll_fds;
1075 int timeout_ms = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001076#endif /* CONFIG_ELOOP_POLL */
1077#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001078 fd_set *rfds, *wfds, *efds;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001079 struct timeval _tv;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001080#endif /* CONFIG_ELOOP_SELECT */
1081#ifdef CONFIG_ELOOP_EPOLL
1082 int timeout_ms = -1;
1083#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001084#ifdef CONFIG_ELOOP_KQUEUE
1085 struct timespec ts;
1086#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001087 int res;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001088 struct os_reltime tv, now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001089
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001090#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001091 rfds = os_malloc(sizeof(*rfds));
1092 wfds = os_malloc(sizeof(*wfds));
1093 efds = os_malloc(sizeof(*efds));
1094 if (rfds == NULL || wfds == NULL || efds == NULL)
1095 goto out;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001096#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001097
1098 while (!eloop.terminate &&
1099 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1100 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1101 struct eloop_timeout *timeout;
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001102
1103 if (eloop.pending_terminate) {
1104 /*
1105 * This may happen in some corner cases where a signal
1106 * is received during a blocking operation. We need to
1107 * process the pending signals and exit if requested to
1108 * avoid hitting the SIGALRM limit if the blocking
1109 * operation took more than two seconds.
1110 */
1111 eloop_process_pending_signals();
1112 if (eloop.terminate)
1113 break;
1114 }
1115
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001116 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1117 list);
1118 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001119 os_get_reltime(&now);
1120 if (os_reltime_before(&now, &timeout->time))
1121 os_reltime_sub(&timeout->time, &now, &tv);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001122 else
1123 tv.sec = tv.usec = 0;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001124#if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001125 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001126#endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1127#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001128 _tv.tv_sec = tv.sec;
1129 _tv.tv_usec = tv.usec;
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001130#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001131#ifdef CONFIG_ELOOP_KQUEUE
1132 ts.tv_sec = tv.sec;
1133 ts.tv_nsec = tv.usec * 1000L;
1134#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001135 }
1136
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001137#ifdef CONFIG_ELOOP_POLL
1138 num_poll_fds = eloop_sock_table_set_fds(
1139 &eloop.readers, &eloop.writers, &eloop.exceptions,
1140 eloop.pollfds, eloop.pollfds_map,
1141 eloop.max_pollfd_map);
1142 res = poll(eloop.pollfds, num_poll_fds,
1143 timeout ? timeout_ms : -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001144#endif /* CONFIG_ELOOP_POLL */
1145#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001146 eloop_sock_table_set_fds(&eloop.readers, rfds);
1147 eloop_sock_table_set_fds(&eloop.writers, wfds);
1148 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1149 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1150 timeout ? &_tv : NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001151#endif /* CONFIG_ELOOP_SELECT */
1152#ifdef CONFIG_ELOOP_EPOLL
1153 if (eloop.count == 0) {
1154 res = 0;
1155 } else {
1156 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1157 eloop.count, timeout_ms);
1158 }
1159#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001160#ifdef CONFIG_ELOOP_KQUEUE
1161 if (eloop.count == 0) {
1162 res = 0;
1163 } else {
1164 res = kevent(eloop.kqueuefd, NULL, 0,
1165 eloop.kqueue_events, eloop.kqueue_nevents,
1166 timeout ? &ts : NULL);
1167 }
1168#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001169 if (res < 0 && errno != EINTR && errno != 0) {
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001170 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1171#ifdef CONFIG_ELOOP_POLL
1172 "poll"
1173#endif /* CONFIG_ELOOP_POLL */
1174#ifdef CONFIG_ELOOP_SELECT
1175 "select"
1176#endif /* CONFIG_ELOOP_SELECT */
1177#ifdef CONFIG_ELOOP_EPOLL
1178 "epoll"
1179#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001180#ifdef CONFIG_ELOOP_KQUEUE
1181 "kqueue"
1182#endif /* CONFIG_ELOOP_EKQUEUE */
1183
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001184 , strerror(errno));
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001185 goto out;
1186 }
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001187
1188 eloop.readers.changed = 0;
1189 eloop.writers.changed = 0;
1190 eloop.exceptions.changed = 0;
1191
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001192 eloop_process_pending_signals();
1193
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001194
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001195 /* check if some registered timeouts have occurred */
1196 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1197 list);
1198 if (timeout) {
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001199 os_get_reltime(&now);
1200 if (!os_reltime_before(&now, &timeout->time)) {
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001201 void *eloop_data = timeout->eloop_data;
1202 void *user_data = timeout->user_data;
1203 eloop_timeout_handler handler =
1204 timeout->handler;
1205 eloop_remove_timeout(timeout);
1206 handler(eloop_data, user_data);
1207 }
1208
1209 }
1210
1211 if (res <= 0)
1212 continue;
1213
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001214 if (eloop.readers.changed ||
1215 eloop.writers.changed ||
1216 eloop.exceptions.changed) {
1217 /*
1218 * Sockets may have been closed and reopened with the
1219 * same FD in the signal or timeout handlers, so we
1220 * must skip the previous results and check again
1221 * whether any of the currently registered sockets have
1222 * events.
1223 */
1224 continue;
1225 }
1226
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001227#ifdef CONFIG_ELOOP_POLL
1228 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1229 &eloop.exceptions, eloop.pollfds_map,
1230 eloop.max_pollfd_map);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001231#endif /* CONFIG_ELOOP_POLL */
1232#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001233 eloop_sock_table_dispatch(&eloop.readers, rfds);
1234 eloop_sock_table_dispatch(&eloop.writers, wfds);
1235 eloop_sock_table_dispatch(&eloop.exceptions, efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001236#endif /* CONFIG_ELOOP_SELECT */
1237#ifdef CONFIG_ELOOP_EPOLL
1238 eloop_sock_table_dispatch(eloop.epoll_events, res);
1239#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001240#ifdef CONFIG_ELOOP_KQUEUE
1241 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1242#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001243 }
1244
Dmitry Shmidtea69e842013-05-13 14:52:28 -07001245 eloop.terminate = 0;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001246out:
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001247#ifdef CONFIG_ELOOP_SELECT
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001248 os_free(rfds);
1249 os_free(wfds);
1250 os_free(efds);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001251#endif /* CONFIG_ELOOP_SELECT */
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001252 return;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001253}
1254
1255
1256void eloop_terminate(void)
1257{
1258 eloop.terminate = 1;
1259}
1260
1261
1262void eloop_destroy(void)
1263{
1264 struct eloop_timeout *timeout, *prev;
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001265 struct os_reltime now;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001266
Dmitry Shmidtfa3fc4a2013-11-21 13:34:38 -08001267 os_get_reltime(&now);
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001268 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1269 struct eloop_timeout, list) {
1270 int sec, usec;
1271 sec = timeout->time.sec - now.sec;
1272 usec = timeout->time.usec - now.usec;
1273 if (timeout->time.usec < now.usec) {
1274 sec--;
1275 usec += 1000000;
1276 }
1277 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1278 "eloop_data=%p user_data=%p handler=%p",
1279 sec, usec, timeout->eloop_data, timeout->user_data,
1280 timeout->handler);
1281 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1282 timeout->handler);
1283 wpa_trace_dump("eloop timeout", timeout);
1284 eloop_remove_timeout(timeout);
1285 }
1286 eloop_sock_table_destroy(&eloop.readers);
1287 eloop_sock_table_destroy(&eloop.writers);
1288 eloop_sock_table_destroy(&eloop.exceptions);
1289 os_free(eloop.signals);
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001290
1291#ifdef CONFIG_ELOOP_POLL
1292 os_free(eloop.pollfds);
1293 os_free(eloop.pollfds_map);
1294#endif /* CONFIG_ELOOP_POLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001295#if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1296 os_free(eloop.fd_table);
1297#endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001298#ifdef CONFIG_ELOOP_EPOLL
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001299 os_free(eloop.epoll_events);
1300 close(eloop.epollfd);
1301#endif /* CONFIG_ELOOP_EPOLL */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001302#ifdef CONFIG_ELOOP_KQUEUE
1303 os_free(eloop.kqueue_events);
1304 close(eloop.kqueuefd);
1305#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001306}
1307
1308
1309int eloop_terminated(void)
1310{
Dmitry Shmidtd80a4012015-11-05 16:35:40 -08001311 return eloop.terminate || eloop.pending_terminate;
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001312}
1313
1314
1315void eloop_wait_for_read_sock(int sock)
1316{
Dmitry Shmidtc5ec7f52012-03-06 16:33:24 -08001317#ifdef CONFIG_ELOOP_POLL
1318 struct pollfd pfd;
1319
1320 if (sock < 0)
1321 return;
1322
1323 os_memset(&pfd, 0, sizeof(pfd));
1324 pfd.fd = sock;
1325 pfd.events = POLLIN;
1326
1327 poll(&pfd, 1, -1);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001328#endif /* CONFIG_ELOOP_POLL */
1329#if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1330 /*
1331 * We can use epoll() here. But epoll() requres 4 system calls.
1332 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1333 * epoll fd. So select() is better for performance here.
1334 */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001335 fd_set rfds;
1336
1337 if (sock < 0)
1338 return;
1339
1340 FD_ZERO(&rfds);
1341 FD_SET(sock, &rfds);
1342 select(sock + 1, &rfds, NULL, NULL, NULL);
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001343#endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001344#ifdef CONFIG_ELOOP_KQUEUE
1345 int kfd;
1346 struct kevent ke1, ke2;
1347
1348 kfd = kqueue();
1349 if (kfd == -1)
1350 return;
Dmitry Shmidt57c2d392016-02-23 13:40:19 -08001351 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
Dmitry Shmidtb97e4282016-02-08 10:16:07 -08001352 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1353 close(kfd);
1354#endif /* CONFIG_ELOOP_KQUEUE */
Dmitry Shmidt8d520ff2011-05-09 14:06:53 -07001355}
Dmitry Shmidt50b691d2014-05-21 14:01:45 -07001356
1357#ifdef CONFIG_ELOOP_SELECT
1358#undef CONFIG_ELOOP_SELECT
1359#endif /* CONFIG_ELOOP_SELECT */