dasynq.h 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344
  1. #ifndef DASYNQ_H_INCLUDED
  2. #define DASYNQ_H_INCLUDED
  3. #include "dasynq-config.h"
  4. #include "dasynq-flags.h"
  5. #include "dasynq-stableheap.h"
  6. #include "dasynq-interrupt.h"
  7. #include "dasynq-util.h"
  8. // Dasynq uses a "mix-in" pattern to produce an event loop implementation incorporating selectable
  9. // implementations of various components (main backend, timers, child process watch mechanism etc). In C++
  10. // this can be achieved by a template for some component which extends its own type parameter:
  11. //
  12. // template <typename Base> class X : public B { .... }
  13. //
  14. // (Note that in a sense this is actually the opposite of the so-called "Curiously Recurring Template"
  15. // pattern, which can be used to achieve a similar goal). We can chain several such components together to
  16. // "mix in" the functionality of each into the final class, eg:
  17. //
  18. // template <typename T> using loop_t =
  19. // epoll_loop<interrupt_channel<timer_fd_events<child_proc_events<T>>>>;
  20. //
  21. // (which defines an alias template "loop_t", whose implementation will use the epoll backend, a standard
  22. // interrupt channel implementation, a timerfd-based timer implementation, and the standard child process
  23. // watch implementation). We sometimes need the base class to be able to call derived-class members: to do
  24. // this we pass a reference to the derived instance into a template member function in the base, for example
  25. // the "init" function:
  26. //
  27. // template <typename T> void init(T *derived)
  28. // {
  29. // // can call method on derived:
  30. // derived->add_listener();
  31. // // chain to next class:
  32. // Base::init(derived);
  33. // }
  34. //
  35. // The 'loop_t' defined above is a template for a usable backend mechanism for the event_loop template
  36. // class. At the base all this is the event_dispatch class, defined below, which receives event
  37. // notifications and inserts them into a queue for processing. The event_loop class, also below, wraps this
  38. // (via composition) in an interface which can be used to register/de-register/enable/disable event
  39. // watchers, and which can process the queued events by calling the watcher callbacks. The event_loop class
  40. // also provides some synchronisation to ensure thread-safety, and abstracts away some differences between
  41. // backends.
  42. //
  43. // The differences are exposed as traits, partly via a separate traits class (loop_traits_t as defined
  44. // below, which contains the "main" traits, particularly the sigdata_t, fd_r and fd_s types). Note that the
  45. // event_dispatch class exposes the loop traits as traits_t, and these are then potentially augmented at
  46. // each stage of the mechanism inheritance chain (i.e. the final traits are exposed as
  47. // `loop_t<event_dispatch>::traits_t'.
  48. //
  49. // The trait members are:
  50. // sigdata_t - a wrapper for the siginfo_t type or equivalent used to pass signal parameters
  51. // fd_r - a file descriptor wrapper, if the backend is able to retrieve the file descriptor when
  52. // it receives an fd event. Not all backends can do this.
  53. // fd_s - a file descriptor storage wrapper. If the backend can retrieve file descriptors, this
  54. // will be empty (and ideally zero-size), otherwise it stores a file descriptor.
  55. // With an fd_r and fd_s instance you can always retrieve the file descriptor:
  56. // `fdr.get_fd(fds)' will return it.
  57. // has_bidi_fd_watch
  58. // - boolean indicating whether a single watch can support watching for both input and output
  59. // events simultaneously
  60. // has_separate_rw_fd_watches
  61. // - boolean indicating whether it is possible to add separate input and output watches for the
  62. // same fd. Either this or has_bidi_fd_watch must be true.
  63. // interrupt_after_fd_add
  64. // - boolean indicating if a loop interrupt must be forced after adding/enabling an fd watch.
  65. // interrupt_after_signal_add
  66. // - boolean indicating if a loop interrupt must be forced after adding or enabling a signal
  67. // watch.
  68. // supports_non_oneshot_fd
  69. // - boolean; if true, event_dispatch can arm an fd watch without ONESHOT and returning zero
  70. // events from receive_fd_event (the event notification function) will leave the descriptor
  71. // armed. If false, all fd watches are effectively ONESHOT (they can be re-armed immediately
  72. // after delivery by returning an appropriate event flag mask).
  73. // full_timer_support
  74. // - boolean indicating that the monotonic and system clocks are actually different clocks and
  75. // that timers against the system clock will work correctly if the system clock time is
  76. // adjusted. If false, the monotic clock may not be present at all (monotonic clock will map
  77. // to system clock), and timers against either clock are not guaranteed to work correctly if
  78. // the system clock is adjusted.
  79. #if DASYNQ_HAVE_EPOLL <= 0
  80. #if _POSIX_TIMERS > 0
  81. #include "dasynq-posixtimer.h"
  82. namespace dasynq {
  83. template <typename T, bool provide_mono_timer = true> using timer_events = posix_timer_events<T, provide_mono_timer>;
  84. }
  85. #else
  86. #include "dasynq-itimer.h"
  87. namespace dasynq {
  88. template <typename T, bool provide_mono_timer = true> using timer_events = itimer_events<T, provide_mono_timer>;
  89. }
  90. #endif
  91. #endif
  92. #if DASYNQ_HAVE_KQUEUE
  93. #if DASYNQ_KQUEUE_MACOS_WORKAROUND
  94. #include "dasynq-kqueue-macos.h"
  95. #include "dasynq-childproc.h"
  96. namespace dasynq {
  97. template <typename T> using loop_t = macos_kqueue_loop<timer_events<child_proc_events<interrupt_channel<T>>, false>>;
  98. using loop_traits_t = macos_kqueue_traits;
  99. }
  100. #else
  101. #include "dasynq-kqueue.h"
  102. #include "dasynq-childproc.h"
  103. namespace dasynq {
  104. template <typename T> using loop_t = kqueue_loop<timer_events<child_proc_events<interrupt_channel<T>>, false>>;
  105. using loop_traits_t = kqueue_traits;
  106. }
  107. #endif
  108. #elif DASYNQ_HAVE_EPOLL
  109. #include "dasynq-epoll.h"
  110. #include "dasynq-timerfd.h"
  111. #include "dasynq-childproc.h"
  112. namespace dasynq {
  113. template <typename T> using loop_t = epoll_loop<interrupt_channel<timer_fd_events<child_proc_events<T>>>>;
  114. using loop_traits_t = epoll_traits;
  115. }
  116. #else
  117. #include "dasynq-childproc.h"
  118. #if DASYNQ_HAVE_PSELECT
  119. #include "dasynq-pselect.h"
  120. namespace dasynq {
  121. template <typename T> using loop_t = pselect_events<timer_events<interrupt_channel<child_proc_events<T>>, false>>;
  122. using loop_traits_t = select_traits;
  123. }
  124. #else
  125. #include "dasynq-select.h"
  126. namespace dasynq {
  127. template <typename T> using loop_t = select_events<timer_events<interrupt_channel<child_proc_events<T>>, false>>;
  128. using loop_traits_t = select_traits;
  129. }
  130. #endif
  131. #endif
  132. #include <atomic>
  133. #include <condition_variable>
  134. #include <cstdint>
  135. #include <cstddef>
  136. #include <system_error>
  137. #include <unistd.h>
  138. #include <fcntl.h>
  139. #include "dasynq-mutex.h"
  140. #include "dasynq-basewatchers.h"
  141. namespace dasynq {
  142. /**
  143. * Values for rearm/disarm return from event handlers
  144. */
  145. enum class rearm
  146. {
  147. /** Re-arm the event watcher so that it receives further events */
  148. REARM,
  149. /** Disarm the event watcher so that it receives no further events, until it is re-armed explicitly */
  150. DISARM,
  151. /** Leave in current armed/disarmed state */
  152. NOOP,
  153. /** Remove the event watcher (and call "removed" callback) */
  154. REMOVE,
  155. /** The watcher has been removed - don't touch it! */
  156. REMOVED,
  157. /** RE-queue the watcher to have its notification called again */
  158. REQUEUE
  159. };
  160. namespace dprivate {
  161. // Classes for implementing a fair(ish) wait queue.
  162. // A queue node can be signalled when it reaches the head of
  163. // the queue.
  164. template <typename T_Mutex> class waitqueue;
  165. template <typename T_Mutex> class waitqueue_node;
  166. // Select an appropriate condition variable type for a mutex:
  167. // condition_variable if mutex is std::mutex, or condition_variable_any
  168. // otherwise.
  169. template <class T_Mutex> class condvar_selector;
  170. template <> class condvar_selector<std::mutex>
  171. {
  172. public:
  173. typedef std::condition_variable condvar;
  174. };
  175. template <class T_Mutex> class condvar_selector
  176. {
  177. public:
  178. typedef std::condition_variable_any condvar;
  179. };
  180. // For a single-threaded loop, the waitqueue is a no-op:
  181. template <> class waitqueue_node<null_mutex>
  182. {
  183. // Specialised waitqueue_node for null_mutex.
  184. friend class waitqueue<null_mutex>;
  185. public:
  186. void wait(std::unique_lock<null_mutex> &ul) { }
  187. void signal() { }
  188. DASYNQ_EMPTY_BODY;
  189. };
  190. template <typename T_Mutex> class waitqueue_node
  191. {
  192. typename condvar_selector<T_Mutex>::condvar condvar;
  193. friend class waitqueue<T_Mutex>;
  194. // ptr to next node in queue, set to null when added to queue tail:
  195. waitqueue_node * next;
  196. public:
  197. void signal()
  198. {
  199. condvar.notify_one();
  200. }
  201. void wait(std::unique_lock<T_Mutex> &mutex_lock)
  202. {
  203. condvar.wait(mutex_lock);
  204. }
  205. };
  206. template <> class waitqueue<null_mutex>
  207. {
  208. public:
  209. // remove current head of queue, return new head:
  210. waitqueue_node<null_mutex> * unqueue()
  211. {
  212. return nullptr;
  213. }
  214. waitqueue_node<null_mutex> * get_head()
  215. {
  216. return nullptr;
  217. }
  218. waitqueue_node<null_mutex> * get_second()
  219. {
  220. return nullptr;
  221. }
  222. bool check_head(waitqueue_node<null_mutex> &node)
  223. {
  224. return true;
  225. }
  226. bool is_empty()
  227. {
  228. return true;
  229. }
  230. void queue(waitqueue_node<null_mutex> *node)
  231. {
  232. }
  233. };
  234. template <typename T_Mutex> class waitqueue
  235. {
  236. waitqueue_node<T_Mutex> * tail = nullptr;
  237. waitqueue_node<T_Mutex> * head = nullptr;
  238. public:
  239. // remove current head of queue, return new head:
  240. waitqueue_node<T_Mutex> * unqueue()
  241. {
  242. head = head->next;
  243. if (head == nullptr) {
  244. tail = nullptr;
  245. }
  246. return head;
  247. }
  248. waitqueue_node<T_Mutex> * get_head()
  249. {
  250. return head;
  251. }
  252. waitqueue_node<T_Mutex> * get_second()
  253. {
  254. return head->next;
  255. }
  256. bool check_head(waitqueue_node<T_Mutex> &node)
  257. {
  258. return head == &node;
  259. }
  260. bool is_empty()
  261. {
  262. return head == nullptr;
  263. }
  264. void queue(waitqueue_node<T_Mutex> *node)
  265. {
  266. node->next = nullptr;
  267. if (tail) {
  268. tail->next = node;
  269. }
  270. else {
  271. head = node;
  272. }
  273. tail = node;
  274. }
  275. };
  276. // friend of event_loop for giving access to various private members
  277. class loop_access {
  278. public:
  279. template <typename Loop>
  280. static typename Loop::mutex_t &get_base_lock(Loop &loop) noexcept
  281. {
  282. return loop.get_base_lock();
  283. }
  284. template <typename Loop>
  285. static rearm process_fd_rearm(Loop &loop, typename Loop::base_fd_watcher *bfw,
  286. rearm rearm_type) noexcept
  287. {
  288. return loop.process_fd_rearm(bfw, rearm_type);
  289. }
  290. template <typename Loop>
  291. static rearm process_primary_rearm(Loop &loop, typename Loop::base_bidi_fd_watcher *bdfw,
  292. rearm rearm_type) noexcept
  293. {
  294. return loop.process_primary_rearm(bdfw, rearm_type);
  295. }
  296. template <typename Loop>
  297. static rearm process_secondary_rearm(Loop &loop, typename Loop::base_bidi_fd_watcher * bdfw,
  298. base_watcher * outw, rearm rearm_type) noexcept
  299. {
  300. return loop.process_secondary_rearm(bdfw, outw, rearm_type);
  301. }
  302. template <typename Loop>
  303. static void process_signal_rearm(Loop &loop, typename Loop::base_signal_watcher * bsw,
  304. rearm rearm_type) noexcept
  305. {
  306. loop.process_signal_rearm(bsw, rearm_type);
  307. }
  308. template <typename Loop>
  309. static void process_child_watch_rearm(Loop &loop, typename Loop::base_child_watcher *bcw,
  310. rearm rearm_type) noexcept
  311. {
  312. loop.process_child_watch_rearm(bcw, rearm_type);
  313. }
  314. template <typename Loop>
  315. static void process_timer_rearm(Loop &loop, typename Loop::base_timer_watcher *btw,
  316. rearm rearm_type) noexcept
  317. {
  318. loop.process_timer_rearm(btw, rearm_type);
  319. }
  320. template <typename Loop>
  321. static void requeue_watcher(Loop &loop, base_watcher *watcher) noexcept
  322. {
  323. loop.requeue_watcher(watcher);
  324. }
  325. template <typename Loop>
  326. static void release_watcher(Loop &loop, base_watcher *watcher) noexcept
  327. {
  328. loop.release_watcher(watcher);
  329. }
  330. };
  331. // Do standard post-dispatch processing for a watcher. This handles the case of removing or
  332. // re-queueing watchers depending on the rearm type. This is called from the individual
  333. // watcher dispatch functions to handle REMOVE or REQUEUE re-arm values.
  334. template <typename Loop> void post_dispatch(Loop &loop, base_watcher *watcher, rearm rearm_type)
  335. {
  336. if (rearm_type == rearm::REMOVE) {
  337. loop_access::get_base_lock(loop).unlock();
  338. loop_access::release_watcher(loop, watcher);
  339. watcher->watch_removed();
  340. loop_access::get_base_lock(loop).lock();
  341. }
  342. else if (rearm_type == rearm::REQUEUE) {
  343. loop_access::requeue_watcher(loop, watcher);
  344. }
  345. }
  346. // Post-dispatch handling for bidi fd watchers.
  347. template <typename Loop> void post_dispatch(Loop &loop, bidi_fd_watcher<Loop> *bdfd_watcher,
  348. base_watcher *out_watcher, rearm rearm_type)
  349. {
  350. base_watcher *watcher = (base_watcher *)bdfd_watcher;
  351. if (rearm_type == rearm::REMOVE) {
  352. loop_access::get_base_lock(loop).unlock();
  353. loop_access::release_watcher(loop, watcher);
  354. loop_access::release_watcher(loop, out_watcher);
  355. watcher->watch_removed();
  356. loop_access::get_base_lock(loop).lock();
  357. }
  358. else if (rearm_type == rearm::REQUEUE) {
  359. loop_access::requeue_watcher(loop, watcher);
  360. }
  361. }
  362. // The event_dispatch class serves as the base class (mixin) for the backend mechanism. It
  363. // mostly manages queing and dequeing of events and maintains/owns the relevant data
  364. // structures, including a mutex lock.
  365. //
  366. // The backend mechanism should call one of the receiveXXX functions to notify of an event
  367. // received. The watcher will then be queued.
  368. //
  369. // In general the functions should be called with lock held. In practice this means that the
  370. // event loop backend implementations (that deposit received events here) must obtain the
  371. // lock; they are also free to use it to protect their own internal data structures.
  372. template <typename Traits, typename LoopTraits> class event_dispatch
  373. {
  374. friend class dasynq::event_loop<typename LoopTraits::mutex_t, LoopTraits>;;
  375. public:
  376. using mutex_t = typename LoopTraits::mutex_t;
  377. using traits_t = Traits;
  378. private:
  379. // queue data structure/pointer
  380. prio_queue event_queue;
  381. using base_signal_watcher = dprivate::base_signal_watcher<typename traits_t::sigdata_t>;
  382. using base_child_watcher = dprivate::base_child_watcher;
  383. using base_timer_watcher = dprivate::base_timer_watcher;
  384. // Add a watcher into the queueing system (but don't queue it). Call with lock held.
  385. // may throw: std::bad_alloc
  386. void prepare_watcher(base_watcher *bwatcher)
  387. {
  388. allocate_handle(event_queue, bwatcher->heap_handle, bwatcher);
  389. }
  390. void queue_watcher(base_watcher *bwatcher) noexcept
  391. {
  392. event_queue.insert(bwatcher->heap_handle, bwatcher->priority);
  393. }
  394. void dequeue_watcher(base_watcher *bwatcher) noexcept
  395. {
  396. if (event_queue.is_queued(bwatcher->heap_handle)) {
  397. event_queue.remove(bwatcher->heap_handle);
  398. }
  399. }
  400. // Remove watcher from the queueing system
  401. void release_watcher(base_watcher *bwatcher) noexcept
  402. {
  403. event_queue.deallocate(bwatcher->heap_handle);
  404. }
  405. protected:
  406. mutex_t lock;
  407. template <typename T> void init(T *loop) noexcept { }
  408. void sigmaskf(int how, const sigset_t *set, sigset_t *oset)
  409. {
  410. LoopTraits::sigmaskf(how, set, oset);
  411. }
  412. // Receive a signal; return true to disable signal watch or false to leave enabled.
  413. // Called with lock held.
  414. template <typename T>
  415. bool receive_signal(T &loop_mech, typename Traits::sigdata_t & siginfo, void * userdata) noexcept
  416. {
  417. base_signal_watcher * bwatcher = static_cast<base_signal_watcher *>(userdata);
  418. bwatcher->siginfo = siginfo;
  419. queue_watcher(bwatcher);
  420. return true;
  421. }
  422. // Receive fd event delivered from backend mechansim. Returns the desired watch mask, as per
  423. // set_fd_enabled, which can be used to leave the watch disabled, re-enable it or re-enable
  424. // one direction of a bi-directional watcher.
  425. template <typename T>
  426. std::tuple<int, typename Traits::fd_s> receive_fd_event(T &loop_mech, typename Traits::fd_r fd_r,
  427. void * userdata, int flags) noexcept
  428. {
  429. base_fd_watcher * bfdw = static_cast<base_fd_watcher *>(userdata);
  430. bfdw->event_flags |= flags;
  431. typename Traits::fd_s watch_fd_s {bfdw->watch_fd};
  432. base_watcher * bwatcher = bfdw;
  433. bool is_multi_watch = bfdw->watch_flags & multi_watch;
  434. if (is_multi_watch) {
  435. base_bidi_fd_watcher *bbdw = static_cast<base_bidi_fd_watcher *>(bwatcher);
  436. bbdw->watch_flags &= ~flags;
  437. if ((flags & IN_EVENTS) && (flags & OUT_EVENTS)) {
  438. // Queue the secondary watcher first:
  439. queue_watcher(&bbdw->out_watcher);
  440. }
  441. else if (flags & OUT_EVENTS) {
  442. // Use the secondary watcher for queueing:
  443. bwatcher = &(bbdw->out_watcher);
  444. }
  445. }
  446. queue_watcher(bwatcher);
  447. if (is_multi_watch && ! traits_t::has_separate_rw_fd_watches) {
  448. // If this is a bidirectional fd-watch, it has been disabled in *both* directions
  449. // as the event was delivered. However, the other direction should not be disabled
  450. // yet, so we need to re-enable:
  451. int in_out_mask = IN_EVENTS | OUT_EVENTS;
  452. if ((bfdw->watch_flags & in_out_mask) != 0) {
  453. // We need to re-enable the other channel now:
  454. return std::make_tuple((bfdw->watch_flags & in_out_mask) | ONE_SHOT, watch_fd_s);
  455. // We are the polling thread: don't need to interrupt polling, even if it would
  456. // normally be required.
  457. }
  458. }
  459. return std::make_tuple(0, watch_fd_s);
  460. }
  461. // Child process terminated. Called with both the main lock and the reaper lock held.
  462. void receive_child_stat(pid_t child, int status, void * userdata) noexcept
  463. {
  464. base_child_watcher * watcher = static_cast<base_child_watcher *>(userdata);
  465. watcher->child_status = status;
  466. watcher->child_termd = true;
  467. queue_watcher(watcher);
  468. }
  469. void receive_timer_expiry(timer_handle_t & timer_handle, void * userdata, int intervals) noexcept
  470. {
  471. base_timer_watcher * watcher = static_cast<base_timer_watcher *>(userdata);
  472. watcher->intervals += intervals;
  473. queue_watcher(watcher);
  474. }
  475. // Pull a single event from the queue; returns nullptr if the queue is empty.
  476. // Call with lock held.
  477. base_watcher * pull_event() noexcept
  478. {
  479. if (event_queue.empty()) {
  480. return nullptr;
  481. }
  482. auto & rhndl = event_queue.get_root();
  483. base_watcher *r = dprivate::get_watcher(event_queue, rhndl);
  484. event_queue.pull_root();
  485. return r;
  486. }
  487. // Queue a watcher for removal, or issue "removed" callback to it.
  488. // Call with lock free.
  489. void issue_delete(base_watcher *watcher) noexcept
  490. {
  491. // This is only called when the attention lock is held, so if the watcher is not
  492. // active/queued now, it cannot become active (and will not be reported with an event)
  493. // during execution of this function.
  494. lock.lock();
  495. if (watcher->active) {
  496. // If the watcher is active, set deleteme true; the watcher will be removed
  497. // at the end of current processing (i.e. when active is set false).
  498. watcher->deleteme = true;
  499. lock.unlock();
  500. }
  501. else {
  502. // Actually do the delete.
  503. dequeue_watcher(watcher);
  504. release_watcher(watcher);
  505. lock.unlock();
  506. watcher->watch_removed();
  507. }
  508. }
  509. // Queue a watcher for removal, or issue "removed" callback to it.
  510. // Call with lock free.
  511. void issue_delete(base_bidi_fd_watcher *watcher) noexcept
  512. {
  513. lock.lock();
  514. if (watcher->active) {
  515. watcher->deleteme = true;
  516. release_watcher(watcher);
  517. }
  518. else {
  519. dequeue_watcher(watcher);
  520. release_watcher(watcher);
  521. watcher->read_removed = true;
  522. }
  523. base_watcher *secondary = &(watcher->out_watcher);
  524. if (secondary->active) {
  525. secondary->deleteme = true;
  526. release_watcher(watcher);
  527. }
  528. else {
  529. dequeue_watcher(secondary);
  530. release_watcher(watcher);
  531. watcher->write_removed = true;
  532. }
  533. if (watcher->read_removed && watcher->write_removed) {
  534. lock.unlock();
  535. watcher->watch_removed();
  536. }
  537. else {
  538. lock.unlock();
  539. }
  540. }
  541. event_dispatch() { }
  542. event_dispatch(const event_dispatch &) = delete;
  543. };
  544. }
  545. // This is the main event_loop implementation. It serves as an interface to the event loop backend (of which
  546. // it maintains an internal instance). It also serialises polling the backend and provides safe deletion of
  547. // watchers (see comments inline).
  548. //
  549. // The T_Mutex type parameter specifies the mutex type. A null_mutex can be used for a single-threaded event
  550. // loop; std::mutex, or any mutex providing a compatible interface, can be used for a thread-safe event
  551. // loop.
  552. //
  553. // The Traits type parameter specifies any required traits for the event loop. This specifies the back-end
  554. // to use (backend_t, a template) and the basic back-end traits (backend_traits_t).
  555. // The default is `default_traits<T_Mutex>'.
  556. //
  557. template <typename T_Mutex, typename Traits>
  558. class event_loop
  559. {
  560. using my_event_loop_t = event_loop<T_Mutex, Traits>;
  561. friend class dprivate::fd_watcher<my_event_loop_t>;
  562. friend class dprivate::bidi_fd_watcher<my_event_loop_t>;
  563. friend class dprivate::signal_watcher<my_event_loop_t>;
  564. friend class dprivate::child_proc_watcher<my_event_loop_t>;
  565. friend class dprivate::timer<my_event_loop_t>;
  566. friend class dprivate::loop_access;
  567. using backend_traits_t = typename Traits::backend_traits_t;
  568. template <typename T> using event_dispatch = dprivate::event_dispatch<T,Traits>;
  569. using dispatch_t = event_dispatch<backend_traits_t>;
  570. using loop_mech_t = typename Traits::template backend_t<dispatch_t>;
  571. using reaper_mutex_t = typename loop_mech_t::reaper_mutex_t;
  572. public:
  573. using traits_t = Traits;
  574. using loop_traits_t = typename loop_mech_t::traits_t;
  575. using mutex_t = T_Mutex;
  576. private:
  577. template <typename T> using waitqueue = dprivate::waitqueue<T>;
  578. template <typename T> using waitqueue_node = dprivate::waitqueue_node<T>;
  579. using base_watcher = dprivate::base_watcher;
  580. using base_signal_watcher = dprivate::base_signal_watcher<typename loop_traits_t::sigdata_t>;
  581. using base_fd_watcher = dprivate::base_fd_watcher;
  582. using base_bidi_fd_watcher = dprivate::base_bidi_fd_watcher;
  583. using base_child_watcher = dprivate::base_child_watcher;
  584. using base_timer_watcher = dprivate::base_timer_watcher;
  585. using watch_type_t = dprivate::watch_type_t;
  586. loop_mech_t loop_mech;
  587. // There is a complex problem with most asynchronous event notification mechanisms
  588. // when used in a multi-threaded environment. Generally, a file descriptor or other
  589. // event type that we are watching will be associated with some data used to manage
  590. // that event source. For example a web server needs to maintain information about
  591. // each client connection, such as the state of the connection (what protocol version
  592. // has been negotiated, etc; if a transfer is taking place, what file is being
  593. // transferred etc).
  594. //
  595. // However, sometimes we want to remove an event source (eg webserver wants to drop
  596. // a connection) and delete the associated data. The problem here is that it is
  597. // difficult to be sure when it is ok to actually remove the data, since when
  598. // requesting to unwatch the source in one thread it is still possible that an
  599. // event from that source is just being reported to another thread (in which case
  600. // the data will be needed).
  601. //
  602. // To solve that, we:
  603. // - allow only one thread to poll for events at a time, using a lock
  604. // - use the same lock to prevent polling, if we want to unwatch an event source
  605. // - generate an event to interrupt any polling that may already be occurring in
  606. // another thread
  607. // - mark handlers as active if they are currently executing, and
  608. // - when removing an active handler, simply set a flag which causes it to be
  609. // removed once the current processing is finished, rather than removing it
  610. // immediately.
  611. //
  612. // In particular the lock mechanism for preventing multiple threads polling and
  613. // for allowing polling to be interrupted is tricky. We can't use a simple mutex
  614. // since there is significant chance that it will be highly contended and there
  615. // are no guarantees that its acquisition will be fair. In particular, we don't
  616. // want a thread that is trying to unwatch a source being starved while another
  617. // thread polls the event source.
  618. //
  619. // So, we use two wait queues protected by a single mutex. The "attn_waitqueue"
  620. // (attention queue) is the high-priority queue, used for threads wanting to
  621. // unwatch event sources. The "wait_waitquueue" is the queue used by threads
  622. // that wish to actually poll for events, while they are waiting for the main
  623. // queue to become quiet.
  624. // - The head of the "attn_waitqueue" is always the holder of the lock
  625. // - Therefore, a poll-waiter must be moved from the wait_waitqueue to the
  626. // attn_waitqueue to actually gain the lock. This is only done if the
  627. // attn_waitqueue is otherwise empty.
  628. // - The mutex only protects manipulation of the wait queues, and so should not
  629. // be highly contended.
  630. //
  631. // To claim the lock for a poll-wait, the procedure is:
  632. // - check if the attn_waitqueue is empty;
  633. // - if it is, insert node at the head, thus claiming the lock, and return
  634. // - otherwise, insert node in the wait_waitqueue, and wait
  635. // To claim the lock for an unwatch, the procedure is:
  636. // - insert node in the attn_waitqueue
  637. // - if the node is at the head of the queue, lock is claimed; return
  638. // - otherwise, if a poll is in progress, interrupt it
  639. // - wait until our node is at the head of the attn_waitqueue
  640. mutex_t wait_lock; // protects the wait/attention queues
  641. bool long_poll_running = false; // whether any thread is polling the backend (with non-zero timeout)
  642. waitqueue<mutex_t> attn_waitqueue;
  643. waitqueue<mutex_t> wait_waitqueue;
  644. mutex_t &get_base_lock() noexcept
  645. {
  646. return loop_mech.lock;
  647. }
  648. reaper_mutex_t &get_reaper_lock() noexcept
  649. {
  650. return loop_mech.get_reaper_lock();
  651. }
  652. void register_signal(base_signal_watcher *callBack, int signo)
  653. {
  654. std::lock_guard<mutex_t> guard(loop_mech.lock);
  655. loop_mech.prepare_watcher(callBack);
  656. try {
  657. loop_mech.add_signal_watch_nolock(signo, callBack);
  658. if (backend_traits_t::interrupt_after_signal_add) {
  659. interrupt_if_necessary();
  660. }
  661. }
  662. catch (...) {
  663. loop_mech.release_watcher(callBack);
  664. throw;
  665. }
  666. }
  667. void deregister(base_signal_watcher *callBack, int signo) noexcept
  668. {
  669. loop_mech.remove_signal_watch(signo);
  670. waitqueue_node<T_Mutex> qnode;
  671. get_attn_lock(qnode);
  672. loop_mech.issue_delete(callBack);
  673. release_lock(qnode);
  674. }
  675. void register_fd(base_fd_watcher *callback, int fd, int eventmask, bool enabled, bool emulate = false)
  676. {
  677. std::lock_guard<mutex_t> guard(loop_mech.lock);
  678. loop_mech.prepare_watcher(callback);
  679. try {
  680. if (! loop_mech.add_fd_watch(fd, callback, eventmask | ONE_SHOT, enabled, emulate)) {
  681. callback->emulatefd = true;
  682. callback->emulate_enabled = enabled;
  683. if (enabled) {
  684. callback->event_flags = eventmask & IO_EVENTS;
  685. if (eventmask & IO_EVENTS) {
  686. requeue_watcher(callback);
  687. }
  688. }
  689. }
  690. else if (enabled && backend_traits_t::interrupt_after_fd_add) {
  691. interrupt_if_necessary();
  692. }
  693. }
  694. catch (...) {
  695. loop_mech.release_watcher(callback);
  696. throw;
  697. }
  698. }
  699. // Register a bidi fd watcher. The watch_flags should already be set to the eventmask to watch
  700. // (i.e. eventmask == callback->watch_flags is a pre-condition).
  701. void register_fd(base_bidi_fd_watcher *callback, int fd, int eventmask, bool emulate = false)
  702. {
  703. std::lock_guard<mutex_t> guard(loop_mech.lock);
  704. loop_mech.prepare_watcher(callback);
  705. try {
  706. loop_mech.prepare_watcher(&callback->out_watcher);
  707. try {
  708. bool do_interrupt = false;
  709. if (backend_traits_t::has_separate_rw_fd_watches) {
  710. int r = loop_mech.add_bidi_fd_watch(fd, callback, eventmask | ONE_SHOT, emulate);
  711. if (r & IN_EVENTS) {
  712. callback->emulatefd = true;
  713. if (eventmask & IN_EVENTS) {
  714. callback->watch_flags &= ~IN_EVENTS;
  715. requeue_watcher(callback);
  716. }
  717. }
  718. else if ((eventmask & IN_EVENTS) && backend_traits_t::interrupt_after_fd_add) {
  719. do_interrupt = true;
  720. }
  721. if (r & OUT_EVENTS) {
  722. callback->out_watcher.emulatefd = true;
  723. if (eventmask & OUT_EVENTS) {
  724. callback->watch_flags &= ~OUT_EVENTS;
  725. requeue_watcher(&callback->out_watcher);
  726. }
  727. }
  728. else if ((eventmask & OUT_EVENTS) && backend_traits_t::interrupt_after_fd_add) {
  729. do_interrupt = true;
  730. }
  731. }
  732. else {
  733. if (! loop_mech.add_fd_watch(fd, callback, eventmask | ONE_SHOT, true, emulate)) {
  734. callback->emulatefd = true;
  735. callback->out_watcher.emulatefd = true;
  736. if (eventmask & IN_EVENTS) {
  737. callback->watch_flags &= ~IN_EVENTS;
  738. requeue_watcher(callback);
  739. }
  740. if (eventmask & OUT_EVENTS) {
  741. callback->watch_flags &= ~OUT_EVENTS;
  742. requeue_watcher(&callback->out_watcher);
  743. }
  744. }
  745. else if (backend_traits_t::interrupt_after_fd_add) {
  746. do_interrupt = true;
  747. }
  748. }
  749. if (do_interrupt) {
  750. interrupt_if_necessary();
  751. }
  752. }
  753. catch (...) {
  754. loop_mech.release_watcher(&callback->out_watcher);
  755. throw;
  756. }
  757. }
  758. catch (...) {
  759. loop_mech.release_watcher(callback);
  760. throw;
  761. }
  762. }
  763. void set_fd_enabled(base_watcher *watcher, int fd, int watch_flags, bool enabled) noexcept
  764. {
  765. if (enabled) {
  766. loop_mech.enable_fd_watch(fd, watcher, watch_flags | ONE_SHOT);
  767. if (backend_traits_t::interrupt_after_fd_add) {
  768. interrupt_if_necessary();
  769. }
  770. }
  771. else {
  772. loop_mech.disable_fd_watch(fd, watch_flags);
  773. }
  774. }
  775. void set_fd_enabled_nolock(base_watcher *watcher, int fd, int watch_flags, bool enabled) noexcept
  776. {
  777. if (enabled) {
  778. loop_mech.enable_fd_watch_nolock(fd, watcher, watch_flags | ONE_SHOT);
  779. if (backend_traits_t::interrupt_after_fd_add) {
  780. interrupt_if_necessary();
  781. }
  782. }
  783. else {
  784. loop_mech.disable_fd_watch_nolock(fd, watch_flags);
  785. }
  786. }
  787. void deregister(base_fd_watcher *callback, int fd) noexcept
  788. {
  789. if (callback->emulatefd) {
  790. auto & ed = (dispatch_t &) loop_mech;
  791. ed.issue_delete(callback);
  792. return;
  793. }
  794. loop_mech.remove_fd_watch(fd, callback->watch_flags);
  795. waitqueue_node<T_Mutex> qnode;
  796. get_attn_lock(qnode);
  797. auto & ed = (dispatch_t &) loop_mech;
  798. ed.issue_delete(callback);
  799. release_lock(qnode);
  800. }
  801. void deregister(base_bidi_fd_watcher *callback, int fd) noexcept
  802. {
  803. if (backend_traits_t::has_separate_rw_fd_watches) {
  804. loop_mech.remove_bidi_fd_watch(fd);
  805. }
  806. else {
  807. loop_mech.remove_fd_watch(fd, callback->watch_flags);
  808. }
  809. waitqueue_node<T_Mutex> qnode;
  810. get_attn_lock(qnode);
  811. dispatch_t & ed = (dispatch_t &) loop_mech;
  812. ed.issue_delete(callback);
  813. release_lock(qnode);
  814. }
  815. void reserve_child_watch(base_child_watcher *callback)
  816. {
  817. std::lock_guard<mutex_t> guard(loop_mech.lock);
  818. loop_mech.prepare_watcher(callback);
  819. try {
  820. loop_mech.reserve_child_watch_nolock(callback->watch_handle);
  821. }
  822. catch (...) {
  823. loop_mech.release_watcher(callback);
  824. throw;
  825. }
  826. }
  827. void unreserve(base_child_watcher *callback) noexcept
  828. {
  829. std::lock_guard<mutex_t> guard(loop_mech.lock);
  830. loop_mech.unreserve_child_watch(callback->watch_handle);
  831. loop_mech.release_watcher(callback);
  832. }
  833. void register_child(base_child_watcher *callback, pid_t child)
  834. {
  835. std::lock_guard<mutex_t> guard(loop_mech.lock);
  836. loop_mech.prepare_watcher(callback);
  837. try {
  838. loop_mech.add_child_watch_nolock(callback->watch_handle, child, callback);
  839. }
  840. catch (...) {
  841. loop_mech.release_watcher(callback);
  842. throw;
  843. }
  844. }
  845. void register_reserved_child(base_child_watcher *callback, pid_t child) noexcept
  846. {
  847. loop_mech.add_reserved_child_watch(callback->watch_handle, child, callback);
  848. }
  849. void register_reserved_child_nolock(base_child_watcher *callback, pid_t child) noexcept
  850. {
  851. loop_mech.add_reserved_child_watch_nolock(callback->watch_handle, child, callback);
  852. }
  853. void deregister(base_child_watcher *callback, pid_t child) noexcept
  854. {
  855. loop_mech.remove_child_watch(callback->watch_handle);
  856. waitqueue_node<T_Mutex> qnode;
  857. get_attn_lock(qnode);
  858. loop_mech.issue_delete(callback);
  859. release_lock(qnode);
  860. }
  861. // Stop watching a child process, but retain watch reservation so that another child can be
  862. // watched without running into resource allocation issues.
  863. void stop_watch(base_child_watcher *callback) noexcept
  864. {
  865. loop_mech.stop_child_watch(callback->watch_handle);
  866. }
  867. void register_timer(base_timer_watcher *callback, clock_type clock)
  868. {
  869. std::lock_guard<mutex_t> guard(loop_mech.lock);
  870. loop_mech.prepare_watcher(callback);
  871. try {
  872. loop_mech.add_timer_nolock(callback->timer_handle, callback, clock);
  873. }
  874. catch (...) {
  875. loop_mech.release_watcher(callback);
  876. }
  877. }
  878. void set_timer(base_timer_watcher *callBack, const timespec &timeout, clock_type clock) noexcept
  879. {
  880. struct timespec interval {0, 0};
  881. loop_mech.set_timer(callBack->timer_handle, timeout, interval, true, clock);
  882. }
  883. void set_timer(base_timer_watcher *callBack, const timespec &timeout, const timespec &interval,
  884. clock_type clock) noexcept
  885. {
  886. loop_mech.set_timer(callBack->timer_handle, timeout, interval, true, clock);
  887. }
  888. void set_timer_rel(base_timer_watcher *callBack, const timespec &timeout, clock_type clock) noexcept
  889. {
  890. struct timespec interval {0, 0};
  891. loop_mech.set_timer_rel(callBack->timer_handle, timeout, interval, true, clock);
  892. }
  893. void set_timer_rel(base_timer_watcher *callBack, const timespec &timeout,
  894. const timespec &interval, clock_type clock) noexcept
  895. {
  896. loop_mech.set_timer_rel(callBack->timer_handle, timeout, interval, true, clock);
  897. }
  898. void set_timer_enabled(base_timer_watcher *callback, clock_type clock, bool enabled) noexcept
  899. {
  900. loop_mech.enable_timer(callback->timer_handle, enabled, clock);
  901. }
  902. void set_timer_enabled_nolock(base_timer_watcher *callback, clock_type clock, bool enabled) noexcept
  903. {
  904. loop_mech.enable_timer_nolock(callback->timer_handle, enabled, clock);
  905. }
  906. void stop_timer(base_timer_watcher *callback, clock_type clock) noexcept
  907. {
  908. loop_mech.stop_timer(callback->timer_handle, clock);
  909. }
  910. void deregister(base_timer_watcher *callback, clock_type clock) noexcept
  911. {
  912. loop_mech.remove_timer(callback->timer_handle, clock);
  913. waitqueue_node<T_Mutex> qnode;
  914. get_attn_lock(qnode);
  915. loop_mech.issue_delete(callback);
  916. release_lock(qnode);
  917. }
  918. void dequeue_watcher(base_watcher *watcher) noexcept
  919. {
  920. loop_mech.dequeue_watcher(watcher);
  921. }
  922. void requeue_watcher(base_watcher *watcher) noexcept
  923. {
  924. loop_mech.queue_watcher(watcher);
  925. interrupt_if_necessary();
  926. }
  927. void release_watcher(base_watcher *watcher) noexcept
  928. {
  929. loop_mech.release_watcher(watcher);
  930. }
  931. // Interrupt the current poll-waiter, if necessary - that is, if the loop is multi-thread safe, and if
  932. // there is currently another thread polling the backend event mechanism.
  933. void interrupt_if_necessary()
  934. {
  935. wait_lock.lock();
  936. bool attn_q_empty = attn_waitqueue.is_empty(); // (always false for single-threaded loops)
  937. wait_lock.unlock();
  938. if (! attn_q_empty) {
  939. loop_mech.interrupt_wait();
  940. }
  941. }
  942. // Acquire the attention lock (when held, ensures that no thread is polling the AEN
  943. // mechanism). This can be used to safely remove watches, since it is certain that
  944. // notification callbacks won't be run while the attention lock is held. Any in-progress
  945. // poll will be interrupted so that the lock should be acquired quickly.
  946. void get_attn_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  947. {
  948. std::unique_lock<T_Mutex> ulock(wait_lock);
  949. attn_waitqueue.queue(&qnode);
  950. if (! attn_waitqueue.check_head(qnode)) {
  951. if (long_poll_running) {
  952. // We want to interrupt any in-progress poll so that the attn queue will progress
  953. // but we don't want to do that unnecessarily. If we are 2nd in the queue then the
  954. // head must be doing the poll; interrupt it. Otherwise, we assume the 2nd has
  955. // already interrupted it.
  956. if (attn_waitqueue.get_second() == &qnode) {
  957. loop_mech.interrupt_wait();
  958. }
  959. }
  960. while (! attn_waitqueue.check_head(qnode)) {
  961. qnode.wait(ulock);
  962. }
  963. }
  964. }
  965. // Acquire the attention lock, but without interrupting any poll that's in progress
  966. // (prefer to fail in that case).
  967. bool poll_attn_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  968. {
  969. std::unique_lock<T_Mutex> ulock(wait_lock);
  970. if (long_poll_running) {
  971. // There are poll-waiters, bail out
  972. return false;
  973. }
  974. // Nobody's doing a long poll, wait until we're at the head of the attn queue and return
  975. // success:
  976. attn_waitqueue.queue(&qnode);
  977. while (! attn_waitqueue.check_head(qnode)) {
  978. qnode.wait(ulock);
  979. }
  980. return true;
  981. }
  982. // Acquire the poll-wait lock (to be held when polling the AEN mechanism; lower priority than
  983. // the attention lock). The poll-wait lock is used to prevent more than a single thread from
  984. // polling the event loop mechanism at a time; if this is not done, it is basically
  985. // impossible to safely deregister watches.
  986. void get_pollwait_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  987. {
  988. std::unique_lock<T_Mutex> ulock(wait_lock);
  989. if (attn_waitqueue.is_empty()) {
  990. // Queue is completely empty:
  991. attn_waitqueue.queue(&qnode);
  992. }
  993. else {
  994. wait_waitqueue.queue(&qnode);
  995. }
  996. while (! attn_waitqueue.check_head(qnode)) {
  997. qnode.wait(ulock);
  998. }
  999. long_poll_running = true;
  1000. }
  1001. // Release the poll-wait/attention lock.
  1002. void release_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  1003. {
  1004. std::unique_lock<T_Mutex> ulock(wait_lock);
  1005. long_poll_running = false;
  1006. waitqueue_node<T_Mutex> * nhead = attn_waitqueue.unqueue();
  1007. if (nhead != nullptr) {
  1008. // Someone else now owns the lock, signal them to wake them up
  1009. nhead->signal();
  1010. }
  1011. else {
  1012. // Nobody is waiting in attn_waitqueue (the high-priority queue) so check in
  1013. // wait_waitqueue (the low-priority queue)
  1014. if (! wait_waitqueue.is_empty()) {
  1015. auto nhead = wait_waitqueue.get_head();
  1016. wait_waitqueue.unqueue();
  1017. attn_waitqueue.queue(nhead);
  1018. long_poll_running = true;
  1019. nhead->signal();
  1020. }
  1021. }
  1022. }
  1023. void process_signal_rearm(base_signal_watcher * bsw, rearm rearm_type) noexcept
  1024. {
  1025. // Called with lock held
  1026. if (rearm_type == rearm::REARM) {
  1027. loop_mech.rearm_signal_watch_nolock(bsw->siginfo.get_signo(), bsw);
  1028. if (backend_traits_t::interrupt_after_signal_add) {
  1029. interrupt_if_necessary();
  1030. }
  1031. }
  1032. else if (rearm_type == rearm::REMOVE) {
  1033. loop_mech.remove_signal_watch_nolock(bsw->siginfo.get_signo());
  1034. }
  1035. // Note that signal watchers cannot (currently) be disarmed
  1036. }
  1037. // Process rearm return from an fd_watcher, including the primary watcher of a bidi_fd_watcher.
  1038. // Depending on the rearm value, we re-arm, remove, or disarm the watcher, etc.
  1039. rearm process_fd_rearm(base_fd_watcher * bfw, rearm rearm_type) noexcept
  1040. {
  1041. bool emulatedfd = static_cast<base_watcher *>(bfw)->emulatefd;
  1042. if (emulatedfd) {
  1043. if (rearm_type == rearm::REARM) {
  1044. bfw->emulate_enabled = true;
  1045. rearm_type = rearm::REQUEUE;
  1046. }
  1047. else if (rearm_type == rearm::DISARM) {
  1048. bfw->emulate_enabled = false;
  1049. }
  1050. else if (rearm_type == rearm::NOOP) {
  1051. if (bfw->emulate_enabled) {
  1052. rearm_type = rearm::REQUEUE;
  1053. }
  1054. }
  1055. }
  1056. else if (rearm_type == rearm::REARM) {
  1057. set_fd_enabled_nolock(bfw, bfw->watch_fd,
  1058. bfw->watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1059. }
  1060. else if (rearm_type == rearm::DISARM) {
  1061. loop_mech.disable_fd_watch_nolock(bfw->watch_fd, bfw->watch_flags);
  1062. }
  1063. else if (rearm_type == rearm::REMOVE) {
  1064. loop_mech.remove_fd_watch_nolock(bfw->watch_fd, bfw->watch_flags);
  1065. }
  1066. return rearm_type;
  1067. }
  1068. // Process rearm option from the primary watcher in bidi_fd_watcher
  1069. rearm process_primary_rearm(base_bidi_fd_watcher * bdfw, rearm rearm_type) noexcept
  1070. {
  1071. bool emulatedfd = static_cast<base_watcher *>(bdfw)->emulatefd;
  1072. // Called with lock held
  1073. if (rearm_type == rearm::REMOVE) {
  1074. bdfw->read_removed = 1;
  1075. if (backend_traits_t::has_separate_rw_fd_watches) {
  1076. bdfw->watch_flags &= ~IN_EVENTS;
  1077. if (! emulatedfd) {
  1078. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, IN_EVENTS);
  1079. }
  1080. return bdfw->write_removed ? rearm::REMOVE : rearm::NOOP;
  1081. }
  1082. else {
  1083. if (! bdfw->write_removed) {
  1084. if (bdfw->watch_flags & IN_EVENTS) {
  1085. bdfw->watch_flags &= ~IN_EVENTS;
  1086. if (! emulatedfd) {
  1087. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, bdfw->watch_flags,
  1088. bdfw->watch_flags != 0);
  1089. }
  1090. }
  1091. return rearm::NOOP;
  1092. }
  1093. else {
  1094. // both removed: actually remove
  1095. if (! emulatedfd) {
  1096. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, 0 /* not used */);
  1097. }
  1098. return rearm::REMOVE;
  1099. }
  1100. }
  1101. }
  1102. else if (rearm_type == rearm::DISARM) {
  1103. bdfw->watch_flags &= ~IN_EVENTS;
  1104. if (! emulatedfd) {
  1105. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1106. int watch_flags = bdfw->watch_flags & (IN_EVENTS | OUT_EVENTS);
  1107. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, watch_flags, watch_flags != 0);
  1108. }
  1109. else {
  1110. loop_mech.disable_fd_watch_nolock(bdfw->watch_fd, IN_EVENTS);
  1111. }
  1112. }
  1113. }
  1114. else if (rearm_type == rearm::REARM) {
  1115. if (! emulatedfd) {
  1116. bdfw->watch_flags |= IN_EVENTS;
  1117. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1118. int watch_flags = bdfw->watch_flags;
  1119. set_fd_enabled_nolock(bdfw, bdfw->watch_fd,
  1120. watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1121. }
  1122. else {
  1123. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, IN_EVENTS, true);
  1124. }
  1125. }
  1126. else {
  1127. bdfw->watch_flags &= ~IN_EVENTS;
  1128. rearm_type = rearm::REQUEUE;
  1129. }
  1130. }
  1131. else if (rearm_type == rearm::NOOP) {
  1132. if (bdfw->emulatefd) {
  1133. if (bdfw->watch_flags & IN_EVENTS) {
  1134. bdfw->watch_flags &= ~IN_EVENTS;
  1135. rearm_type = rearm::REQUEUE;
  1136. }
  1137. }
  1138. }
  1139. return rearm_type;
  1140. }
  1141. // Process re-arm for the secondary (output) watcher in a Bi-direction Fd watcher.
  1142. rearm process_secondary_rearm(base_bidi_fd_watcher * bdfw, base_watcher * outw, rearm rearm_type) noexcept
  1143. {
  1144. bool emulatedfd = outw->emulatefd;
  1145. // Called with lock held
  1146. if (emulatedfd) {
  1147. if (rearm_type == rearm::REMOVE) {
  1148. bdfw->write_removed = 1;
  1149. bdfw->watch_flags &= ~OUT_EVENTS;
  1150. rearm_type = bdfw->read_removed ? rearm::REMOVE : rearm::NOOP;
  1151. }
  1152. else if (rearm_type == rearm::DISARM) {
  1153. bdfw->watch_flags &= ~OUT_EVENTS;
  1154. }
  1155. else if (rearm_type == rearm::REARM) {
  1156. bdfw->watch_flags &= ~OUT_EVENTS;
  1157. rearm_type = rearm::REQUEUE;
  1158. }
  1159. else if (rearm_type == rearm::NOOP) {
  1160. if (bdfw->watch_flags & OUT_EVENTS) {
  1161. bdfw->watch_flags &= ~OUT_EVENTS;
  1162. rearm_type = rearm::REQUEUE;
  1163. }
  1164. }
  1165. return rearm_type;
  1166. }
  1167. else if (rearm_type == rearm::REMOVE) {
  1168. bdfw->write_removed = 1;
  1169. if (backend_traits_t::has_separate_rw_fd_watches) {
  1170. bdfw->watch_flags &= ~OUT_EVENTS;
  1171. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, OUT_EVENTS);
  1172. return bdfw->read_removed ? rearm::REMOVE : rearm::NOOP;
  1173. }
  1174. else {
  1175. if (! bdfw->read_removed) {
  1176. if (bdfw->watch_flags & OUT_EVENTS) {
  1177. bdfw->watch_flags &= ~OUT_EVENTS;
  1178. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, bdfw->watch_flags, true);
  1179. }
  1180. return rearm::NOOP;
  1181. }
  1182. else {
  1183. // both removed: actually remove
  1184. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, 0 /* not used */);
  1185. return rearm::REMOVE;
  1186. }
  1187. }
  1188. }
  1189. else if (rearm_type == rearm::DISARM) {
  1190. bdfw->watch_flags &= ~OUT_EVENTS;
  1191. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1192. int watch_flags = bdfw->watch_flags;
  1193. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1194. }
  1195. else {
  1196. loop_mech.disable_fd_watch_nolock(bdfw->watch_fd, OUT_EVENTS);
  1197. }
  1198. }
  1199. else if (rearm_type == rearm::REARM) {
  1200. bdfw->watch_flags |= OUT_EVENTS;
  1201. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1202. int watch_flags = bdfw->watch_flags;
  1203. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1204. }
  1205. else {
  1206. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, OUT_EVENTS | ONE_SHOT, true);
  1207. }
  1208. }
  1209. return rearm_type;
  1210. }
  1211. void process_child_watch_rearm(base_child_watcher *bcw, rearm rearm_type) noexcept
  1212. {
  1213. if (rearm_type == rearm::REMOVE || rearm_type == rearm::DISARM) {
  1214. loop_mech.unreserve_child_watch_nolock(bcw->watch_handle);
  1215. }
  1216. }
  1217. void process_timer_rearm(base_timer_watcher *btw, rearm rearm_type) noexcept
  1218. {
  1219. // Called with lock held
  1220. if (rearm_type == rearm::REARM) {
  1221. loop_mech.enable_timer_nolock(btw->timer_handle, true, btw->clock);
  1222. }
  1223. else if (rearm_type == rearm::REMOVE) {
  1224. loop_mech.remove_timer_nolock(btw->timer_handle, btw->clock);
  1225. }
  1226. else if (rearm_type == rearm::DISARM) {
  1227. loop_mech.enable_timer_nolock(btw->timer_handle, false, btw->clock);
  1228. }
  1229. }
  1230. // Process queued events; returns true if any events were processed.
  1231. // limit - maximum number of events to process before returning; -1 for
  1232. // no limit.
  1233. bool process_events(int limit) noexcept
  1234. {
  1235. loop_mech.lock.lock();
  1236. if (limit == 0) {
  1237. return false;
  1238. }
  1239. base_watcher * pqueue = loop_mech.pull_event();
  1240. bool active = false;
  1241. while (pqueue != nullptr) {
  1242. pqueue->active = true;
  1243. active = true;
  1244. base_bidi_fd_watcher *bbfw = nullptr;
  1245. // (Above variables are initialised only to silence compiler warnings).
  1246. if (pqueue->watchType == watch_type_t::SECONDARYFD) {
  1247. // construct a pointer to the main watcher, using integer arithmetic to avoid undefined
  1248. // pointer arithmetic:
  1249. uintptr_t rp = (uintptr_t)pqueue;
  1250. // Here we take the offset of a member from a non-standard-layout class, which is
  1251. // specified to have undefined result by the C++ language standard, but which
  1252. // in practice works fine:
  1253. _Pragma ("GCC diagnostic push")
  1254. _Pragma ("GCC diagnostic ignored \"-Winvalid-offsetof\"")
  1255. rp -= offsetof(base_bidi_fd_watcher, out_watcher);
  1256. _Pragma ("GCC diagnostic pop")
  1257. bbfw = (base_bidi_fd_watcher *)rp;
  1258. // issue a secondary dispatch:
  1259. bbfw->dispatch_second(this);
  1260. pqueue = loop_mech.pull_event();
  1261. continue;
  1262. }
  1263. pqueue->dispatch(this);
  1264. if (limit > 0) {
  1265. limit--;
  1266. if (limit == 0) break;
  1267. }
  1268. pqueue = loop_mech.pull_event();
  1269. }
  1270. loop_mech.lock.unlock();
  1271. return active;
  1272. }
  1273. public:
  1274. using fd_watcher = dprivate::fd_watcher<my_event_loop_t>;
  1275. using bidi_fd_watcher = dprivate::bidi_fd_watcher<my_event_loop_t>;
  1276. using signal_watcher = dprivate::signal_watcher<my_event_loop_t>;
  1277. using child_proc_watcher = dprivate::child_proc_watcher<my_event_loop_t>;
  1278. using timer = dprivate::timer<my_event_loop_t>;
  1279. template <typename D> using fd_watcher_impl = dprivate::fd_watcher_impl<my_event_loop_t, D>;
  1280. template <typename D> using bidi_fd_watcher_impl = dprivate::bidi_fd_watcher_impl<my_event_loop_t, D>;
  1281. template <typename D> using signal_watcher_impl = dprivate::signal_watcher_impl<my_event_loop_t, D>;
  1282. template <typename D> using child_proc_watcher_impl = dprivate::child_proc_watcher_impl<my_event_loop_t, D>;
  1283. template <typename D> using timer_impl = dprivate::timer_impl<my_event_loop_t, D>;
  1284. // Poll the event loop and process any pending events (up to a limit). If no events are pending, wait
  1285. // for and process at least one event.
  1286. void run(int limit = -1) noexcept
  1287. {
  1288. // Poll the mechanism first, in case high-priority events are pending:
  1289. waitqueue_node<T_Mutex> qnode;
  1290. get_pollwait_lock(qnode);
  1291. loop_mech.pull_events(false);
  1292. release_lock(qnode);
  1293. while (! process_events(limit)) {
  1294. // Pull events from the AEN mechanism and insert them in our internal queue:
  1295. get_pollwait_lock(qnode);
  1296. loop_mech.pull_events(true);
  1297. release_lock(qnode);
  1298. }
  1299. }
  1300. // Poll the event loop and process any pending events (up to a limit).
  1301. void poll(int limit = -1) noexcept
  1302. {
  1303. waitqueue_node<T_Mutex> qnode;
  1304. if (poll_attn_lock(qnode)) {
  1305. loop_mech.pull_events(false);
  1306. release_lock(qnode);
  1307. }
  1308. process_events(limit);
  1309. }
  1310. // Get the current time corresponding to a specific clock.
  1311. // ts - the timespec variable to receive the time
  1312. // clock - specifies the clock
  1313. // force_update (default = false) - if true, the time returned will be updated from
  1314. // the system rather than being a previously cached result. It may be more
  1315. // accurate, but note that reading from a system clock may be relatively expensive.
  1316. void get_time(timespec &ts, clock_type clock, bool force_update = false) noexcept
  1317. {
  1318. loop_mech.get_time(ts, clock, force_update);
  1319. }
  1320. void get_time(time_val &tv, clock_type clock, bool force_update = false) noexcept
  1321. {
  1322. loop_mech.get_time(tv, clock, force_update);
  1323. }
  1324. event_loop() { }
  1325. event_loop(const event_loop &other) = delete;
  1326. };
  1327. typedef event_loop<null_mutex> event_loop_n;
  1328. typedef event_loop<std::mutex> event_loop_th;
  1329. namespace dprivate {
  1330. // Posix signal event watcher
  1331. template <typename EventLoop>
  1332. class signal_watcher : private dprivate::base_signal_watcher<typename EventLoop::loop_traits_t::sigdata_t>
  1333. {
  1334. template <typename, typename> friend class signal_watcher_impl;
  1335. using base_watcher = dprivate::base_watcher;
  1336. using T_Mutex = typename EventLoop::mutex_t;
  1337. public:
  1338. using event_loop_t = EventLoop;
  1339. using siginfo_p = typename signal_watcher::siginfo_p;
  1340. // Register this watcher to watch the specified signal.
  1341. // If an attempt is made to register with more than one event loop at
  1342. // a time, behaviour is undefined. The signal should be masked before
  1343. // call.
  1344. inline void add_watch(event_loop_t &eloop, int signo, int prio = DEFAULT_PRIORITY)
  1345. {
  1346. base_watcher::init();
  1347. this->priority = prio;
  1348. this->siginfo.set_signo(signo);
  1349. eloop.register_signal(this, signo);
  1350. }
  1351. inline void deregister(event_loop_t &eloop) noexcept
  1352. {
  1353. eloop.deregister(this, this->siginfo.get_signo());
  1354. }
  1355. template <typename T>
  1356. static signal_watcher<event_loop_t> *add_watch(event_loop_t &eloop, int signo, T watch_hndlr)
  1357. {
  1358. class lambda_sig_watcher : public signal_watcher_impl<event_loop_t, lambda_sig_watcher>
  1359. {
  1360. private:
  1361. T watch_hndlr;
  1362. public:
  1363. lambda_sig_watcher(T watch_handlr_a) : watch_hndlr(watch_handlr_a)
  1364. {
  1365. //
  1366. }
  1367. rearm received(event_loop_t &eloop, int signo, siginfo_p siginfo)
  1368. {
  1369. return watch_hndlr(eloop, signo, siginfo);
  1370. }
  1371. void watch_removed() noexcept override
  1372. {
  1373. delete this;
  1374. }
  1375. };
  1376. lambda_sig_watcher * lsw = new lambda_sig_watcher(watch_hndlr);
  1377. lsw->add_watch(eloop, signo);
  1378. return lsw;
  1379. }
  1380. // virtual rearm received(EventLoop &eloop, int signo, siginfo_p siginfo) = 0;
  1381. };
  1382. template <typename EventLoop, typename Derived>
  1383. class signal_watcher_impl : public signal_watcher<EventLoop>
  1384. {
  1385. void dispatch(void *loop_ptr) noexcept override
  1386. {
  1387. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1388. loop_access::get_base_lock(loop).unlock();
  1389. auto rearm_type = static_cast<Derived *>(this)->received(loop, this->siginfo.get_signo(), this->siginfo);
  1390. loop_access::get_base_lock(loop).lock();
  1391. if (rearm_type != rearm::REMOVED) {
  1392. this->active = false;
  1393. if (this->deleteme) {
  1394. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1395. rearm_type = rearm::REMOVE;
  1396. }
  1397. loop_access::process_signal_rearm(loop, this, rearm_type);
  1398. post_dispatch(loop, this, rearm_type);
  1399. }
  1400. }
  1401. };
  1402. // Posix file descriptor event watcher
  1403. template <typename EventLoop>
  1404. class fd_watcher : private dprivate::base_fd_watcher
  1405. {
  1406. template <typename, typename> friend class fd_watcher_impl;
  1407. using base_watcher = dprivate::base_watcher;
  1408. using mutex_t = typename EventLoop::mutex_t;
  1409. protected:
  1410. // Set the types of event to watch. Only supported if loop_traits_t_t::has_bidi_fd_watch
  1411. // is true; otherwise has unspecified behavior.
  1412. // Only safe to call from within the callback handler (fdEvent). Might not take
  1413. // effect until the current callback handler returns with REARM.
  1414. void set_watch_flags(int newFlags)
  1415. {
  1416. this->watch_flags = newFlags;
  1417. }
  1418. public:
  1419. using event_loop_t = EventLoop;
  1420. // Register a file descriptor watcher with an event loop. Flags
  1421. // can be any combination of dasynq::IN_EVENTS / dasynq::OUT_EVENTS.
  1422. // Exactly one of IN_EVENTS/OUT_EVENTS must be specified if the event
  1423. // loop does not support bi-directional fd watchers (i.e. if
  1424. // ! loop_traits_t::has_bidi_fd_watch).
  1425. //
  1426. // Mechanisms supporting dual watchers allow for two watchers for a
  1427. // single file descriptor (one watching read status and the other
  1428. // write status). Others mechanisms support only a single watcher
  1429. // per file descriptor. Adding a watcher beyond what is supported
  1430. // causes undefined behavior.
  1431. //
  1432. // Can fail with std::bad_alloc or std::system_error.
  1433. void add_watch(event_loop_t &eloop, int fd, int flags, bool enabled = true, int prio = DEFAULT_PRIORITY)
  1434. {
  1435. base_watcher::init();
  1436. this->priority = prio;
  1437. this->watch_fd = fd;
  1438. this->watch_flags = flags;
  1439. eloop.register_fd(this, fd, flags, enabled, true);
  1440. }
  1441. void add_watch_noemu(event_loop_t &eloop, int fd, int flags, bool enabled = true, int prio = DEFAULT_PRIORITY)
  1442. {
  1443. base_watcher::init();
  1444. this->priority = prio;
  1445. this->watch_fd = fd;
  1446. this->watch_flags = flags;
  1447. eloop.register_fd(this, fd, flags, enabled, false);
  1448. }
  1449. int get_watched_fd()
  1450. {
  1451. return this->watch_fd;
  1452. }
  1453. // Deregister a file descriptor watcher.
  1454. //
  1455. // If other threads may be polling the event loop, it is not safe to assume
  1456. // the watcher is unregistered until the watch_removed() callback is issued
  1457. // (which will not occur until the event handler returns, if it is active).
  1458. // In a single threaded environment, it is safe to delete the watcher after
  1459. // calling this method as long as the handler (if it is active) accesses no
  1460. // internal state and returns rearm::REMOVED.
  1461. void deregister(event_loop_t &eloop) noexcept
  1462. {
  1463. eloop.deregister(this, this->watch_fd);
  1464. }
  1465. void set_enabled(event_loop_t &eloop, bool enable) noexcept
  1466. {
  1467. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1468. if (this->emulatefd) {
  1469. if (enable && ! this->emulate_enabled) {
  1470. loop_access::requeue_watcher(eloop, this);
  1471. }
  1472. this->emulate_enabled = enable;
  1473. }
  1474. else {
  1475. eloop.set_fd_enabled_nolock(this, this->watch_fd, this->watch_flags, enable);
  1476. }
  1477. if (! enable) {
  1478. eloop.dequeue_watcher(this);
  1479. }
  1480. }
  1481. // Add an Fd watch via a lambda. The watch is allocated dynamically and destroys
  1482. // itself when removed from the event loop.
  1483. template <typename T>
  1484. static fd_watcher<EventLoop> *add_watch(event_loop_t &eloop, int fd, int flags, T watchHndlr)
  1485. {
  1486. class lambda_fd_watcher : public fd_watcher_impl<event_loop_t, lambda_fd_watcher>
  1487. {
  1488. private:
  1489. T watchHndlr;
  1490. public:
  1491. lambda_fd_watcher(T watchHandlr_a) : watchHndlr(watchHandlr_a)
  1492. {
  1493. //
  1494. }
  1495. rearm fd_event(event_loop_t &eloop, int fd, int flags)
  1496. {
  1497. return watchHndlr(eloop, fd, flags);
  1498. }
  1499. void watch_removed() noexcept override
  1500. {
  1501. delete this;
  1502. }
  1503. };
  1504. lambda_fd_watcher * lfd = new lambda_fd_watcher(watchHndlr);
  1505. lfd->add_watch(eloop, fd, flags);
  1506. return lfd;
  1507. }
  1508. // virtual rearm fd_event(EventLoop &eloop, int fd, int flags) = 0;
  1509. };
  1510. template <typename EventLoop, typename Derived>
  1511. class fd_watcher_impl : public fd_watcher<EventLoop>
  1512. {
  1513. void dispatch(void *loop_ptr) noexcept override
  1514. {
  1515. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1516. // In case emulating, clear enabled here; REARM or explicit set_enabled will re-enable.
  1517. this->emulate_enabled = false;
  1518. loop_access::get_base_lock(loop).unlock();
  1519. auto rearm_type = static_cast<Derived *>(this)->fd_event(loop, this->watch_fd, this->event_flags);
  1520. loop_access::get_base_lock(loop).lock();
  1521. if (rearm_type != rearm::REMOVED) {
  1522. this->event_flags = 0;
  1523. this->active = false;
  1524. if (this->deleteme) {
  1525. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1526. rearm_type = rearm::REMOVE;
  1527. }
  1528. rearm_type = loop_access::process_fd_rearm(loop, this, rearm_type);
  1529. post_dispatch(loop, this, rearm_type);
  1530. }
  1531. }
  1532. };
  1533. // A Bi-directional file descriptor watcher with independent read- and write- channels.
  1534. // This watcher type has two event notification methods which can both potentially be
  1535. // active at the same time.
  1536. template <typename EventLoop>
  1537. class bidi_fd_watcher : private dprivate::base_bidi_fd_watcher
  1538. {
  1539. template <typename, typename> friend class bidi_fd_watcher_impl;
  1540. using base_watcher = dprivate::base_watcher;
  1541. using mutex_t = typename EventLoop::mutex_t;
  1542. void set_watch_enabled(EventLoop &eloop, bool in, bool b)
  1543. {
  1544. int events = in ? IN_EVENTS : OUT_EVENTS;
  1545. auto orig_flags = this->watch_flags;
  1546. if (b) {
  1547. this->watch_flags |= events;
  1548. }
  1549. else {
  1550. this->watch_flags &= ~events;
  1551. }
  1552. dprivate::base_watcher * watcher = in ? this : &this->out_watcher;
  1553. if (! watcher->emulatefd) {
  1554. if (EventLoop::loop_traits_t::has_separate_rw_fd_watches) {
  1555. eloop.set_fd_enabled_nolock(this, this->watch_fd, events | ONE_SHOT, b);
  1556. }
  1557. else {
  1558. eloop.set_fd_enabled_nolock(this, this->watch_fd,
  1559. (this->watch_flags & IO_EVENTS) | ONE_SHOT,
  1560. (this->watch_flags & IO_EVENTS) != 0);
  1561. }
  1562. }
  1563. else {
  1564. // emulation: if enabling a previously disabled watcher, must queue now:
  1565. if (b && (orig_flags != this->watch_flags)) {
  1566. this->watch_flags = orig_flags;
  1567. loop_access::requeue_watcher(eloop, watcher);
  1568. }
  1569. }
  1570. if (! b) {
  1571. eloop.dequeue_watcher(watcher);
  1572. }
  1573. }
  1574. public:
  1575. using event_loop_t = EventLoop;
  1576. void set_in_watch_enabled(event_loop_t &eloop, bool b) noexcept
  1577. {
  1578. eloop.get_base_lock().lock();
  1579. set_watch_enabled(eloop, true, b);
  1580. eloop.get_base_lock().unlock();
  1581. }
  1582. void set_out_watch_enabled(event_loop_t &eloop, bool b) noexcept
  1583. {
  1584. eloop.get_base_lock().lock();
  1585. set_watch_enabled(eloop, false, b);
  1586. eloop.get_base_lock().unlock();
  1587. }
  1588. // Set the watch flags, which enables/disables both the in-watch and the out-watch accordingly.
  1589. //
  1590. // Concurrency: this method can only be called if
  1591. // - it does not enable a watcher that might currently be active
  1592. /// - unless the event loop will not be polled while the watcher is active.
  1593. // (i.e. it is ok to call setWatchFlags from within the readReady/writeReady handlers if no other
  1594. // thread will poll the event loop; it is always ok to *dis*able a watcher that might be active,
  1595. // though the re-arm action returned by the callback may undo the effect).
  1596. void set_watches(event_loop_t &eloop, int new_flags) noexcept
  1597. {
  1598. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1599. bool use_emulation = this->emulatefd || this->out_watcher.emulatefd;
  1600. if (use_emulation || EventLoop::loop_traits_t::has_separate_rw_fd_watches) {
  1601. set_watch_enabled(eloop, true, (new_flags & IN_EVENTS) != 0);
  1602. set_watch_enabled(eloop, false, (new_flags & OUT_EVENTS) != 0);
  1603. }
  1604. else {
  1605. this->watch_flags = (this->watch_flags & ~IO_EVENTS) | new_flags;
  1606. eloop.set_fd_enabled_nolock((dprivate::base_watcher *) this, this->watch_fd, this->watch_flags & IO_EVENTS, true);
  1607. }
  1608. }
  1609. // Register a bi-direction file descriptor watcher with an event loop. Flags
  1610. // can be any combination of dasynq::IN_EVENTS / dasynq::OUT_EVENTS.
  1611. //
  1612. // Can fail with std::bad_alloc or std::system_error.
  1613. void add_watch(event_loop_t &eloop, int fd, int flags, int inprio = DEFAULT_PRIORITY, int outprio = DEFAULT_PRIORITY)
  1614. {
  1615. base_watcher::init();
  1616. this->out_watcher.base_watcher::init();
  1617. this->watch_fd = fd;
  1618. this->watch_flags = flags | dprivate::multi_watch;
  1619. this->read_removed = false;
  1620. this->write_removed = false;
  1621. this->priority = inprio;
  1622. this->set_priority(this->out_watcher, outprio);
  1623. eloop.register_fd(this, fd, flags, true);
  1624. }
  1625. void add_watch_noemu(event_loop_t &eloop, int fd, int flags, int inprio = DEFAULT_PRIORITY, int outprio = DEFAULT_PRIORITY)
  1626. {
  1627. base_watcher::init();
  1628. this->out_watcher.base_watcher::init();
  1629. this->watch_fd = fd;
  1630. this->watch_flags = flags | dprivate::multi_watch;
  1631. this->read_removed = false;
  1632. this->write_removed = false;
  1633. this->priority = inprio;
  1634. this->set_priority(this->out_watcher, outprio);
  1635. eloop.register_fd(this, fd, flags, false);
  1636. }
  1637. int get_watched_fd()
  1638. {
  1639. return this->watch_fd;
  1640. }
  1641. // Deregister a bi-direction file descriptor watcher.
  1642. //
  1643. // If other threads may be polling the event loop, it is not safe to assume
  1644. // the watcher is unregistered until the watch_removed() callback is issued
  1645. // (which will not occur until the event handler returns, if it is active).
  1646. // In a single threaded environment, it is safe to delete the watcher after
  1647. // calling this method as long as the handler (if it is active) accesses no
  1648. // internal state and returns rearm::REMOVED.
  1649. void deregister(event_loop_t &eloop) noexcept
  1650. {
  1651. eloop.deregister(this, this->watch_fd);
  1652. }
  1653. template <typename T>
  1654. static bidi_fd_watcher<event_loop_t> *add_watch(event_loop_t &eloop, int fd, int flags, T watch_hndlr)
  1655. {
  1656. class lambda_bidi_watcher : public bidi_fd_watcher_impl<event_loop_t, lambda_bidi_watcher>
  1657. {
  1658. private:
  1659. T watch_hndlr;
  1660. public:
  1661. lambda_bidi_watcher(T watch_handlr_a) : watch_hndlr(watch_handlr_a)
  1662. {
  1663. //
  1664. }
  1665. rearm read_ready(event_loop_t &eloop, int fd)
  1666. {
  1667. return watch_hndlr(eloop, fd, IN_EVENTS);
  1668. }
  1669. rearm write_ready(event_loop_t &eloop, int fd)
  1670. {
  1671. return watch_hndlr(eloop, fd, OUT_EVENTS);
  1672. }
  1673. void watch_removed() noexcept override
  1674. {
  1675. delete this;
  1676. }
  1677. };
  1678. lambda_bidi_watcher * lfd = new lambda_bidi_watcher(watch_hndlr);
  1679. lfd->add_watch(eloop, fd, flags);
  1680. return lfd;
  1681. }
  1682. // virtual rearm read_ready(EventLoop &eloop, int fd) noexcept = 0;
  1683. // virtual rearm write_ready(EventLoop &eloop, int fd) noexcept = 0;
  1684. };
  1685. template <typename EventLoop, typename Derived>
  1686. class bidi_fd_watcher_impl : public bidi_fd_watcher<EventLoop>
  1687. {
  1688. void dispatch(void *loop_ptr) noexcept override
  1689. {
  1690. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1691. this->emulate_enabled = false;
  1692. loop_access::get_base_lock(loop).unlock();
  1693. auto rearm_type = static_cast<Derived *>(this)->read_ready(loop, this->watch_fd);
  1694. loop_access::get_base_lock(loop).lock();
  1695. if (rearm_type != rearm::REMOVED) {
  1696. this->event_flags &= ~IN_EVENTS;
  1697. this->active = false;
  1698. if (this->deleteme) {
  1699. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1700. rearm_type = rearm::REMOVE;
  1701. }
  1702. rearm_type = loop_access::process_primary_rearm(loop, this, rearm_type);
  1703. auto &outwatcher = bidi_fd_watcher<EventLoop>::out_watcher;
  1704. post_dispatch(loop, this, &outwatcher, rearm_type);
  1705. }
  1706. }
  1707. void dispatch_second(void *loop_ptr) noexcept override
  1708. {
  1709. auto &outwatcher = bidi_fd_watcher<EventLoop>::out_watcher;
  1710. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1711. loop_access::get_base_lock(loop).unlock();
  1712. auto rearm_type = static_cast<Derived *>(this)->write_ready(loop, this->watch_fd);
  1713. loop_access::get_base_lock(loop).lock();
  1714. if (rearm_type != rearm::REMOVED) {
  1715. this->event_flags &= ~OUT_EVENTS;
  1716. outwatcher.active = false;
  1717. if (outwatcher.deleteme) {
  1718. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1719. rearm_type = rearm::REMOVE;
  1720. }
  1721. rearm_type = loop_access::process_secondary_rearm(loop, this, &outwatcher, rearm_type);
  1722. if (rearm_type == rearm::REQUEUE) {
  1723. post_dispatch(loop, &outwatcher, rearm_type);
  1724. }
  1725. else {
  1726. post_dispatch(loop, this, &outwatcher, rearm_type);
  1727. }
  1728. }
  1729. }
  1730. };
  1731. // Child process event watcher
  1732. template <typename EventLoop>
  1733. class child_proc_watcher : private dprivate::base_child_watcher
  1734. {
  1735. template <typename, typename> friend class child_proc_watcher_impl;
  1736. using base_watcher = dprivate::base_watcher;
  1737. using mutex_t = typename EventLoop::mutex_t;
  1738. public:
  1739. using event_loop_t = EventLoop;
  1740. // send a signal to this process, if it is still running, in a race-free manner.
  1741. // return is as for POSIX kill(); return is -1 with errno=ESRCH if process has
  1742. // already terminated.
  1743. int send_signal(event_loop_t &loop, int signo) noexcept
  1744. {
  1745. auto reaper_mutex = loop.get_reaper_mutex();
  1746. std::lock_guard<decltype(reaper_mutex)> guard(reaper_mutex);
  1747. if (this->child_termd) {
  1748. errno = ESRCH;
  1749. return -1;
  1750. }
  1751. return kill(this->watch_pid, signo);
  1752. }
  1753. // Reserve resources for a child watcher with the given event loop.
  1754. // Reservation can fail with std::bad_alloc. Some backends do not support
  1755. // reservation (it will always fail) - check loop_traits_t::supports_childwatch_reservation.
  1756. void reserve_watch(event_loop_t &eloop)
  1757. {
  1758. eloop.reserve_child_watch(this);
  1759. }
  1760. void unreserve(event_loop_t &eloop)
  1761. {
  1762. eloop.unreserve(this);
  1763. }
  1764. // Register a watcher for the given child process with an event loop.
  1765. // Registration can fail with std::bad_alloc.
  1766. // Note that in multi-threaded programs, use of this function may be prone to a
  1767. // race condition such that the child terminates before the watcher is registered.
  1768. void add_watch(event_loop_t &eloop, pid_t child, int prio = DEFAULT_PRIORITY)
  1769. {
  1770. base_watcher::init();
  1771. this->watch_pid = child;
  1772. this->priority = prio;
  1773. eloop.register_child(this, child);
  1774. }
  1775. // Register a watcher for the given child process with an event loop,
  1776. // after having reserved resources previously (using reserveWith).
  1777. // Registration cannot fail.
  1778. // Note that in multi-threaded programs, use of this function may be prone to a
  1779. // race condition such that the child terminates before the watcher is registered;
  1780. // use the "fork" member function to avoid this.
  1781. void add_reserved(event_loop_t &eloop, pid_t child, int prio = DEFAULT_PRIORITY) noexcept
  1782. {
  1783. base_watcher::init();
  1784. this->watch_pid = child;
  1785. this->priority = prio;
  1786. eloop.register_reserved_child(this, child);
  1787. }
  1788. void deregister(event_loop_t &eloop, pid_t child) noexcept
  1789. {
  1790. eloop.deregister(this, child);
  1791. }
  1792. // Stop watching the currently watched child, but retain watch reservation.
  1793. void stop_watch(event_loop_t &eloop) noexcept
  1794. {
  1795. eloop.stop_watch(this);
  1796. }
  1797. // Fork and watch the child with this watcher on the given event loop.
  1798. // If resource limitations prevent the child process from being watched, it is
  1799. // terminated immediately (or if the implementation allows, never started),
  1800. // and a suitable std::system_error or std::bad_alloc exception is thrown.
  1801. // Returns:
  1802. // - the child pid in the parent
  1803. // - 0 in the child
  1804. pid_t fork(event_loop_t &eloop, bool from_reserved = false, int prio = DEFAULT_PRIORITY)
  1805. {
  1806. base_watcher::init();
  1807. this->priority = prio;
  1808. if (EventLoop::loop_traits_t::supports_childwatch_reservation) {
  1809. // Reserve a watch, fork, then claim reservation
  1810. if (! from_reserved) {
  1811. reserve_watch(eloop);
  1812. }
  1813. auto &lock = eloop.get_base_lock();
  1814. lock.lock();
  1815. pid_t child = ::fork();
  1816. if (child == -1) {
  1817. // Unreserve watch.
  1818. lock.unlock();
  1819. unreserve(eloop);
  1820. throw std::system_error(errno, std::system_category());
  1821. }
  1822. if (child == 0) {
  1823. // I am the child
  1824. lock.unlock(); // may not really be necessary
  1825. return 0;
  1826. }
  1827. // Register this watcher.
  1828. this->watch_pid = child;
  1829. eloop.register_reserved_child_nolock(this, child);
  1830. lock.unlock();
  1831. return child;
  1832. }
  1833. else {
  1834. int pipefds[2];
  1835. if (pipe2(pipefds, O_CLOEXEC) == -1) {
  1836. throw std::system_error(errno, std::system_category());
  1837. }
  1838. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1839. pid_t child = ::fork();
  1840. if (child == -1) {
  1841. throw std::system_error(errno, std::system_category());
  1842. }
  1843. if (child == 0) {
  1844. // I am the child
  1845. close(pipefds[1]);
  1846. // Wait for message from parent before continuing:
  1847. int rr;
  1848. int r = read(pipefds[0], &rr, sizeof(rr));
  1849. while (r == -1 && errno == EINTR) {
  1850. r = read(pipefds[0], &rr, sizeof(rr));
  1851. }
  1852. if (r <= 0) _exit(0);
  1853. close(pipefds[0]);
  1854. return 0;
  1855. }
  1856. close(pipefds[0]); // close read end
  1857. // Register this watcher.
  1858. try {
  1859. this->watch_pid = child;
  1860. eloop.register_child(this, child);
  1861. // Continue in child (it doesn't matter what is written):
  1862. write(pipefds[1], &pipefds, sizeof(int));
  1863. close(pipefds[1]);
  1864. return child;
  1865. }
  1866. catch (...) {
  1867. close(pipefds[1]);
  1868. throw;
  1869. }
  1870. }
  1871. }
  1872. // virtual rearm child_status(EventLoop &eloop, pid_t child, int status) = 0;
  1873. };
  1874. template <typename EventLoop, typename Derived>
  1875. class child_proc_watcher_impl : public child_proc_watcher<EventLoop>
  1876. {
  1877. void dispatch(void *loop_ptr) noexcept override
  1878. {
  1879. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1880. loop_access::get_base_lock(loop).unlock();
  1881. auto rearm_type = static_cast<Derived *>(this)->status_change(loop, this->watch_pid, this->child_status);
  1882. loop_access::get_base_lock(loop).lock();
  1883. if (rearm_type != rearm::REMOVED) {
  1884. this->active = false;
  1885. if (this->deleteme) {
  1886. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1887. rearm_type = rearm::REMOVE;
  1888. }
  1889. loop_access::process_child_watch_rearm(loop, this, rearm_type);
  1890. // rearm_type = loop.process??;
  1891. post_dispatch(loop, this, rearm_type);
  1892. }
  1893. }
  1894. };
  1895. template <typename EventLoop>
  1896. class timer : private base_timer_watcher
  1897. {
  1898. template <typename, typename> friend class timer_impl;
  1899. using base_t = base_timer_watcher;
  1900. using mutex_t = typename EventLoop::mutex_t;
  1901. public:
  1902. using event_loop_t = EventLoop;
  1903. void add_timer(event_loop_t &eloop, clock_type clock = clock_type::MONOTONIC, int prio = DEFAULT_PRIORITY)
  1904. {
  1905. base_watcher::init();
  1906. this->priority = prio;
  1907. this->clock = clock;
  1908. this->intervals = 0;
  1909. eloop.register_timer(this, clock);
  1910. }
  1911. void arm_timer(event_loop_t &eloop, const timespec &timeout) noexcept
  1912. {
  1913. eloop.set_timer(this, timeout, base_t::clock);
  1914. }
  1915. void arm_timer(event_loop_t &eloop, const timespec &timeout, const timespec &interval) noexcept
  1916. {
  1917. eloop.set_timer(this, timeout, interval, base_t::clock);
  1918. }
  1919. // Arm timer, relative to now:
  1920. void arm_timer_rel(event_loop_t &eloop, const timespec &timeout) noexcept
  1921. {
  1922. eloop.set_timer_rel(this, timeout, base_t::clock);
  1923. }
  1924. void arm_timer_rel(event_loop_t &eloop, const timespec &timeout,
  1925. const timespec &interval) noexcept
  1926. {
  1927. eloop.set_timer_rel(this, timeout, interval, base_t::clock);
  1928. }
  1929. void stop_timer(event_loop_t &eloop) noexcept
  1930. {
  1931. eloop.stop_timer(this, base_t::clock);
  1932. }
  1933. void set_enabled(event_loop_t &eloop, clock_type clock, bool enabled) noexcept
  1934. {
  1935. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1936. eloop.set_timer_enabled_nolock(this, clock, enabled);
  1937. if (! enabled) {
  1938. eloop.dequeue_watcher(this);
  1939. }
  1940. }
  1941. void deregister(event_loop_t &eloop) noexcept
  1942. {
  1943. eloop.deregister(this, this->clock);
  1944. }
  1945. template <typename T>
  1946. static timer<EventLoop> *add_timer(EventLoop &eloop, clock_type clock, bool relative,
  1947. const timespec &timeout, const timespec &interval, T watch_hndlr)
  1948. {
  1949. class lambda_timer : public timer_impl<event_loop_t, lambda_timer>
  1950. {
  1951. private:
  1952. T watch_hndlr;
  1953. public:
  1954. lambda_timer(T watch_handlr_a) : watch_hndlr(watch_handlr_a)
  1955. {
  1956. //
  1957. }
  1958. rearm timer_expiry(event_loop_t &eloop, int intervals)
  1959. {
  1960. return watch_hndlr(eloop, intervals);
  1961. }
  1962. void watch_removed() noexcept override
  1963. {
  1964. delete this;
  1965. }
  1966. };
  1967. lambda_timer * lt = new lambda_timer(watch_hndlr);
  1968. lt->add_timer(eloop, clock);
  1969. if (relative) {
  1970. lt->arm_timer_rel(eloop, timeout, interval);
  1971. }
  1972. else {
  1973. lt->arm_timer(eloop, timeout, interval);
  1974. }
  1975. return lt;
  1976. }
  1977. // Timer expired, and the given number of intervals have elapsed before
  1978. // expiry event was queued. Normally intervals == 1 to indicate no
  1979. // overrun.
  1980. // virtual rearm timer_expiry(event_loop_t &eloop, int intervals) = 0;
  1981. };
  1982. template <typename EventLoop, typename Derived>
  1983. class timer_impl : public timer<EventLoop>
  1984. {
  1985. void dispatch(void *loop_ptr) noexcept override
  1986. {
  1987. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1988. loop_access::get_base_lock(loop).unlock();
  1989. auto intervals_report = this->intervals;
  1990. this->intervals = 0;
  1991. auto rearm_type = static_cast<Derived *>(this)->timer_expiry(loop, intervals_report);
  1992. loop_access::get_base_lock(loop).lock();
  1993. if (rearm_type != rearm::REMOVED) {
  1994. this->active = false;
  1995. if (this->deleteme) {
  1996. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1997. rearm_type = rearm::REMOVE;
  1998. }
  1999. loop_access::process_timer_rearm(loop, this, rearm_type);
  2000. post_dispatch(loop, this, rearm_type);
  2001. }
  2002. }
  2003. };
  2004. } // namespace dasynq::dprivate
  2005. } // namespace dasynq
  2006. #endif