uloop.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. /*
  2. * uloop - event loop implementation
  3. *
  4. * Copyright (C) 2010-2016 Felix Fietkau <nbd@openwrt.org>
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <sys/time.h>
  19. #include <sys/types.h>
  20. #include <unistd.h>
  21. #include <stdio.h>
  22. #include <stdlib.h>
  23. #include <errno.h>
  24. #include <poll.h>
  25. #include <string.h>
  26. #include <fcntl.h>
  27. #include <stdbool.h>
  28. #include <limits.h>
  29. #include "uloop.h"
  30. #include "utils.h"
  31. #ifdef USE_KQUEUE
  32. #include <sys/event.h>
  33. #endif
  34. #ifdef USE_EPOLL
  35. #include <sys/epoll.h>
  36. #include <sys/timerfd.h>
  37. #endif
  38. #include <sys/wait.h>
  39. struct uloop_fd_event {
  40. struct uloop_fd *fd;
  41. unsigned int events;
  42. };
  43. struct uloop_fd_stack {
  44. struct uloop_fd_stack *next;
  45. struct uloop_fd *fd;
  46. unsigned int events;
  47. };
  48. static struct uloop_fd_stack *fd_stack = NULL;
  49. #define ULOOP_MAX_EVENTS 10
  50. static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
  51. static struct list_head processes = LIST_HEAD_INIT(processes);
  52. static struct list_head signals = LIST_HEAD_INIT(signals);
  53. static int poll_fd = -1;
  54. bool uloop_cancelled = false;
  55. bool uloop_handle_sigchld = true;
  56. static int uloop_status = 0;
  57. static bool do_sigchld = false;
  58. static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
  59. static int cur_fd, cur_nfds;
  60. static int uloop_run_depth = 0;
  61. uloop_fd_handler uloop_fd_set_cb = NULL;
  62. int uloop_fd_add(struct uloop_fd *sock, unsigned int flags);
  63. #ifdef USE_KQUEUE
  64. #include "uloop-kqueue.c"
  65. #endif
  66. #ifdef USE_EPOLL
  67. #include "uloop-epoll.c"
  68. #endif
  69. static void set_signo(uint64_t *signums, int signo)
  70. {
  71. if (signo >= 1 && signo <= 64)
  72. *signums |= (1u << (signo - 1));
  73. }
  74. static bool get_signo(uint64_t signums, int signo)
  75. {
  76. return (signo >= 1) && (signo <= 64) && (signums & (1u << (signo - 1)));
  77. }
  78. static void signal_consume(struct uloop_fd *fd, unsigned int events)
  79. {
  80. struct uloop_signal *usig, *usig_next;
  81. uint64_t signums = 0;
  82. uint8_t buf[32];
  83. ssize_t nsigs;
  84. do {
  85. nsigs = read(fd->fd, buf, sizeof(buf));
  86. for (ssize_t i = 0; i < nsigs; i++)
  87. set_signo(&signums, buf[i]);
  88. }
  89. while (nsigs > 0);
  90. list_for_each_entry_safe(usig, usig_next, &signals, list)
  91. if (get_signo(signums, usig->signo))
  92. usig->cb(usig);
  93. }
  94. static int waker_pipe = -1;
  95. static struct uloop_fd waker_fd = {
  96. .fd = -1,
  97. .cb = signal_consume,
  98. };
  99. static void waker_init_fd(int fd)
  100. {
  101. fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
  102. fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
  103. }
  104. static int waker_init(void)
  105. {
  106. int fds[2];
  107. if (waker_pipe >= 0)
  108. return 0;
  109. if (pipe(fds) < 0)
  110. return -1;
  111. waker_init_fd(fds[0]);
  112. waker_init_fd(fds[1]);
  113. waker_pipe = fds[1];
  114. waker_fd.fd = fds[0];
  115. waker_fd.cb = signal_consume;
  116. uloop_fd_add(&waker_fd, ULOOP_READ);
  117. return 0;
  118. }
  119. static void uloop_setup_signals(bool add);
  120. int uloop_init(void)
  121. {
  122. if (uloop_init_pollfd() < 0)
  123. return -1;
  124. if (waker_init() < 0) {
  125. uloop_done();
  126. return -1;
  127. }
  128. uloop_setup_signals(true);
  129. return 0;
  130. }
  131. static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
  132. {
  133. struct uloop_fd_stack *cur;
  134. /*
  135. * Do not buffer events for level-triggered fds, they will keep firing.
  136. * Caller needs to take care of recursion issues.
  137. */
  138. if (!(fd->flags & ULOOP_EDGE_TRIGGER))
  139. return false;
  140. for (cur = fd_stack; cur; cur = cur->next) {
  141. if (cur->fd != fd)
  142. continue;
  143. if (events < 0)
  144. cur->fd = NULL;
  145. else
  146. cur->events |= events | ULOOP_EVENT_BUFFERED;
  147. return true;
  148. }
  149. return false;
  150. }
  151. static void uloop_run_events(int64_t timeout)
  152. {
  153. struct uloop_fd_event *cur;
  154. struct uloop_fd *fd;
  155. if (!cur_nfds) {
  156. cur_fd = 0;
  157. cur_nfds = uloop_fetch_events(timeout);
  158. if (cur_nfds < 0)
  159. cur_nfds = 0;
  160. }
  161. while (cur_nfds > 0) {
  162. struct uloop_fd_stack stack_cur;
  163. unsigned int events;
  164. cur = &cur_fds[cur_fd++];
  165. cur_nfds--;
  166. fd = cur->fd;
  167. events = cur->events;
  168. if (!fd)
  169. continue;
  170. if (!fd->cb)
  171. continue;
  172. if (uloop_fd_stack_event(fd, cur->events))
  173. continue;
  174. stack_cur.next = fd_stack;
  175. stack_cur.fd = fd;
  176. fd_stack = &stack_cur;
  177. do {
  178. stack_cur.events = 0;
  179. fd->cb(fd, events);
  180. events = stack_cur.events & ULOOP_EVENT_MASK;
  181. } while (stack_cur.fd && events);
  182. fd_stack = stack_cur.next;
  183. return;
  184. }
  185. }
  186. int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
  187. {
  188. unsigned int fl;
  189. int ret;
  190. if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
  191. return uloop_fd_delete(sock);
  192. if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
  193. fl = fcntl(sock->fd, F_GETFL, 0);
  194. fl |= O_NONBLOCK;
  195. fcntl(sock->fd, F_SETFL, fl);
  196. }
  197. ret = register_poll(sock, flags);
  198. if (ret < 0)
  199. goto out;
  200. if (uloop_fd_set_cb)
  201. uloop_fd_set_cb(sock, flags);
  202. sock->flags = flags;
  203. sock->registered = true;
  204. sock->eof = false;
  205. sock->error = false;
  206. out:
  207. return ret;
  208. }
  209. int uloop_fd_delete(struct uloop_fd *fd)
  210. {
  211. int ret;
  212. int i;
  213. for (i = 0; i < cur_nfds; i++) {
  214. if (cur_fds[cur_fd + i].fd != fd)
  215. continue;
  216. cur_fds[cur_fd + i].fd = NULL;
  217. }
  218. if (!fd->registered)
  219. return 0;
  220. if (uloop_fd_set_cb)
  221. uloop_fd_set_cb(fd, 0);
  222. fd->registered = false;
  223. uloop_fd_stack_event(fd, -1);
  224. ret = __uloop_fd_delete(fd);
  225. fd->flags = 0;
  226. return ret;
  227. }
  228. static int64_t tv_diff(struct timeval *t1, struct timeval *t2)
  229. {
  230. return
  231. (t1->tv_sec - t2->tv_sec) * 1000 +
  232. (t1->tv_usec - t2->tv_usec) / 1000;
  233. }
  234. int uloop_timeout_add(struct uloop_timeout *timeout)
  235. {
  236. struct uloop_timeout *tmp;
  237. struct list_head *h = &timeouts;
  238. if (timeout->pending)
  239. return -1;
  240. list_for_each_entry(tmp, &timeouts, list) {
  241. if (tv_diff(&tmp->time, &timeout->time) > 0) {
  242. h = &tmp->list;
  243. break;
  244. }
  245. }
  246. list_add_tail(&timeout->list, h);
  247. timeout->pending = true;
  248. return 0;
  249. }
  250. static void uloop_gettime(struct timeval *tv)
  251. {
  252. struct timespec ts;
  253. clock_gettime(CLOCK_MONOTONIC, &ts);
  254. tv->tv_sec = ts.tv_sec;
  255. tv->tv_usec = ts.tv_nsec / 1000;
  256. }
  257. int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
  258. {
  259. struct timeval *time = &timeout->time;
  260. if (timeout->pending)
  261. uloop_timeout_cancel(timeout);
  262. uloop_gettime(time);
  263. time->tv_sec += msecs / 1000;
  264. time->tv_usec += (msecs % 1000) * 1000;
  265. if (time->tv_usec > 1000000) {
  266. time->tv_sec++;
  267. time->tv_usec -= 1000000;
  268. }
  269. return uloop_timeout_add(timeout);
  270. }
  271. int uloop_timeout_cancel(struct uloop_timeout *timeout)
  272. {
  273. if (!timeout->pending)
  274. return -1;
  275. list_del(&timeout->list);
  276. timeout->pending = false;
  277. return 0;
  278. }
  279. int uloop_timeout_remaining(struct uloop_timeout *timeout)
  280. {
  281. int64_t td;
  282. struct timeval now;
  283. if (!timeout->pending)
  284. return -1;
  285. uloop_gettime(&now);
  286. td = tv_diff(&timeout->time, &now);
  287. if (td > INT_MAX)
  288. return INT_MAX;
  289. else if (td < INT_MIN)
  290. return INT_MIN;
  291. else
  292. return (int)td;
  293. }
  294. int64_t uloop_timeout_remaining64(struct uloop_timeout *timeout)
  295. {
  296. struct timeval now;
  297. if (!timeout->pending)
  298. return -1;
  299. uloop_gettime(&now);
  300. return tv_diff(&timeout->time, &now);
  301. }
  302. int uloop_process_add(struct uloop_process *p)
  303. {
  304. struct uloop_process *tmp;
  305. struct list_head *h = &processes;
  306. if (p->pending)
  307. return -1;
  308. list_for_each_entry(tmp, &processes, list) {
  309. if (tmp->pid > p->pid) {
  310. h = &tmp->list;
  311. break;
  312. }
  313. }
  314. list_add_tail(&p->list, h);
  315. p->pending = true;
  316. return 0;
  317. }
  318. int uloop_process_delete(struct uloop_process *p)
  319. {
  320. if (!p->pending)
  321. return -1;
  322. list_del(&p->list);
  323. p->pending = false;
  324. return 0;
  325. }
  326. static void uloop_handle_processes(void)
  327. {
  328. struct uloop_process *p, *tmp;
  329. pid_t pid;
  330. int ret;
  331. do_sigchld = false;
  332. while (1) {
  333. pid = waitpid(-1, &ret, WNOHANG);
  334. if (pid < 0 && errno == EINTR)
  335. continue;
  336. if (pid <= 0)
  337. return;
  338. list_for_each_entry_safe(p, tmp, &processes, list) {
  339. if (p->pid < pid)
  340. continue;
  341. if (p->pid > pid)
  342. break;
  343. uloop_process_delete(p);
  344. p->cb(p, ret);
  345. }
  346. }
  347. }
  348. int uloop_interval_set(struct uloop_interval *timer, unsigned int msecs)
  349. {
  350. return timer_register(timer, msecs);
  351. }
  352. int uloop_interval_cancel(struct uloop_interval *timer)
  353. {
  354. return timer_remove(timer);
  355. }
  356. int64_t uloop_interval_remaining(struct uloop_interval *timer)
  357. {
  358. return timer_next(timer);
  359. }
  360. static void uloop_signal_wake(int signo)
  361. {
  362. uint8_t sigbyte = signo;
  363. if (signo == SIGCHLD)
  364. do_sigchld = true;
  365. do {
  366. if (write(waker_pipe, &sigbyte, 1) < 0) {
  367. if (errno == EINTR)
  368. continue;
  369. }
  370. break;
  371. } while (1);
  372. }
  373. static void uloop_handle_sigint(int signo)
  374. {
  375. uloop_status = signo;
  376. uloop_cancelled = true;
  377. uloop_signal_wake(signo);
  378. }
  379. static void uloop_install_handler(int signum, void (*handler)(int), struct sigaction* old, bool add)
  380. {
  381. struct sigaction s;
  382. struct sigaction *act;
  383. act = NULL;
  384. sigaction(signum, NULL, &s);
  385. if (add) {
  386. if (s.sa_handler == SIG_DFL) { /* Do not override existing custom signal handlers */
  387. memcpy(old, &s, sizeof(struct sigaction));
  388. s.sa_handler = handler;
  389. s.sa_flags = 0;
  390. act = &s;
  391. }
  392. }
  393. else if (s.sa_handler == handler) { /* Do not restore if someone modified our handler */
  394. act = old;
  395. }
  396. if (act != NULL)
  397. sigaction(signum, act, NULL);
  398. }
  399. static void uloop_ignore_signal(int signum, bool ignore)
  400. {
  401. struct sigaction s;
  402. void *new_handler = NULL;
  403. sigaction(signum, NULL, &s);
  404. if (ignore) {
  405. if (s.sa_handler == SIG_DFL) /* Ignore only if there isn't any custom handler */
  406. new_handler = SIG_IGN;
  407. } else {
  408. if (s.sa_handler == SIG_IGN) /* Restore only if noone modified our SIG_IGN */
  409. new_handler = SIG_DFL;
  410. }
  411. if (new_handler) {
  412. s.sa_handler = new_handler;
  413. s.sa_flags = 0;
  414. sigaction(signum, &s, NULL);
  415. }
  416. }
  417. static void uloop_setup_signals(bool add)
  418. {
  419. static struct sigaction old_sigint, old_sigchld, old_sigterm;
  420. uloop_install_handler(SIGINT, uloop_handle_sigint, &old_sigint, add);
  421. uloop_install_handler(SIGTERM, uloop_handle_sigint, &old_sigterm, add);
  422. if (uloop_handle_sigchld)
  423. uloop_install_handler(SIGCHLD, uloop_signal_wake, &old_sigchld, add);
  424. uloop_ignore_signal(SIGPIPE, add);
  425. }
  426. int uloop_signal_add(struct uloop_signal *s)
  427. {
  428. struct list_head *h = &signals;
  429. struct uloop_signal *tmp;
  430. struct sigaction sa;
  431. if (s->pending)
  432. return -1;
  433. list_for_each_entry(tmp, &signals, list) {
  434. if (tmp->signo > s->signo) {
  435. h = &tmp->list;
  436. break;
  437. }
  438. }
  439. list_add_tail(&s->list, h);
  440. s->pending = true;
  441. sigaction(s->signo, NULL, &s->orig);
  442. if (s->orig.sa_handler != uloop_signal_wake) {
  443. sa.sa_handler = uloop_signal_wake;
  444. sa.sa_flags = 0;
  445. sigemptyset(&sa.sa_mask);
  446. sigaction(s->signo, &sa, NULL);
  447. }
  448. return 0;
  449. }
  450. int uloop_signal_delete(struct uloop_signal *s)
  451. {
  452. if (!s->pending)
  453. return -1;
  454. list_del(&s->list);
  455. s->pending = false;
  456. if (s->orig.sa_handler != uloop_signal_wake)
  457. sigaction(s->signo, &s->orig, NULL);
  458. return 0;
  459. }
  460. int uloop_get_next_timeout(void)
  461. {
  462. struct uloop_timeout *timeout;
  463. struct timeval tv;
  464. int64_t diff;
  465. if (list_empty(&timeouts))
  466. return -1;
  467. uloop_gettime(&tv);
  468. timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
  469. diff = tv_diff(&timeout->time, &tv);
  470. if (diff < 0)
  471. return 0;
  472. if (diff > INT_MAX)
  473. return INT_MAX;
  474. return diff;
  475. }
  476. static void uloop_process_timeouts(void)
  477. {
  478. struct uloop_timeout *t;
  479. struct timeval tv;
  480. if (list_empty(&timeouts))
  481. return;
  482. uloop_gettime(&tv);
  483. while (!list_empty(&timeouts)) {
  484. t = list_first_entry(&timeouts, struct uloop_timeout, list);
  485. if (tv_diff(&t->time, &tv) > 0)
  486. break;
  487. uloop_timeout_cancel(t);
  488. if (t->cb)
  489. t->cb(t);
  490. }
  491. }
  492. static void uloop_clear_timeouts(void)
  493. {
  494. struct uloop_timeout *t, *tmp;
  495. list_for_each_entry_safe(t, tmp, &timeouts, list)
  496. uloop_timeout_cancel(t);
  497. }
  498. static void uloop_clear_processes(void)
  499. {
  500. struct uloop_process *p, *tmp;
  501. list_for_each_entry_safe(p, tmp, &processes, list)
  502. uloop_process_delete(p);
  503. }
  504. bool uloop_cancelling(void)
  505. {
  506. return uloop_run_depth > 0 && uloop_cancelled;
  507. }
  508. int uloop_run_timeout(int timeout)
  509. {
  510. int next_time = 0;
  511. uloop_run_depth++;
  512. uloop_status = 0;
  513. uloop_cancelled = false;
  514. do {
  515. uloop_process_timeouts();
  516. if (do_sigchld)
  517. uloop_handle_processes();
  518. if (uloop_cancelled)
  519. break;
  520. next_time = uloop_get_next_timeout();
  521. if (timeout >= 0 && (next_time < 0 || timeout < next_time))
  522. next_time = timeout;
  523. uloop_run_events(next_time);
  524. } while (!uloop_cancelled && timeout < 0);
  525. --uloop_run_depth;
  526. return uloop_status;
  527. }
  528. void uloop_done(void)
  529. {
  530. uloop_setup_signals(false);
  531. if (poll_fd >= 0) {
  532. close(poll_fd);
  533. poll_fd = -1;
  534. }
  535. if (waker_pipe >= 0) {
  536. uloop_fd_delete(&waker_fd);
  537. close(waker_pipe);
  538. close(waker_fd.fd);
  539. waker_pipe = -1;
  540. }
  541. uloop_clear_timeouts();
  542. uloop_clear_processes();
  543. }