2
0

dasynq.h 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392
  1. #ifndef DASYNQ_H_
  2. #define DASYNQ_H_
  3. #include "dasynq/config.h"
  4. #include "dasynq/flags.h"
  5. #include "dasynq/stableheap.h"
  6. #include "dasynq/interrupt.h"
  7. #include "dasynq/util.h"
  8. // Dasynq uses a "mix-in" pattern to produce an event loop implementation incorporating selectable
  9. // implementations of various components (main backend, timers, child process watch mechanism etc). In C++
  10. // this can be achieved by a template for some component which extends its own type parameter:
  11. //
  12. // template <typename Base> class X : public B { .... }
  13. //
  14. // (Note that in a sense this is actually the opposite of the so-called "Curiously Recurring Template"
  15. // pattern, which can be used to achieve a similar goal). We can chain several such components together to
  16. // "mix in" the functionality of each into the final class, eg:
  17. //
  18. // template <typename T> using loop_t =
  19. // epoll_loop<interrupt_channel<timer_fd_events<child_proc_events<T>>>>;
  20. //
  21. // (which defines an alias template "loop_t", whose implementation will use the epoll backend, a standard
  22. // interrupt channel implementation, a timerfd-based timer implementation, and the standard child process
  23. // watch implementation). We sometimes need the base class to be able to call derived-class members: to do
  24. // this we pass a reference to the derived instance into a template member function in the base, for example
  25. // the "init" function:
  26. //
  27. // template <typename T> void init(T *derived)
  28. // {
  29. // // can call method on derived:
  30. // derived->add_listener();
  31. // // chain to next class:
  32. // Base::init(derived);
  33. // }
  34. //
  35. // The 'loop_t' defined above is a template for a usable backend mechanism for the event_loop template
  36. // class. At the base all this is the event_dispatch class, defined below, which receives event
  37. // notifications and inserts them into a queue for processing. The event_loop class, also below, wraps this
  38. // (via composition) in an interface which can be used to register/de-register/enable/disable event
  39. // watchers, and which can process the queued events by calling the watcher callbacks. The event_loop class
  40. // also provides some synchronisation to ensure thread-safety, and abstracts away some differences between
  41. // backends.
  42. //
  43. // The differences are exposed as traits, partly via a separate traits class (loop_traits_t as defined
  44. // below, which contains the "main" traits, particularly the sigdata_t, fd_r and fd_s types). Note that the
  45. // event_dispatch class exposes the loop traits as traits_t, and these are then potentially augmented at
  46. // each stage of the mechanism inheritance chain (i.e. the final traits are exposed as
  47. // `loop_t<event_dispatch>::traits_t'.
  48. //
  49. // The trait members are:
  50. // sigdata_t - a wrapper for the siginfo_t type or equivalent used to pass signal parameters
  51. // fd_r - a file descriptor wrapper, if the backend is able to retrieve the file descriptor when
  52. // it receives an fd event. Not all backends can do this.
  53. // fd_s - a file descriptor storage wrapper. If the backend can retrieve file descriptors, this
  54. // will be empty (and ideally zero-size), otherwise it stores a file descriptor.
  55. // With an fd_r and fd_s instance you can always retrieve the file descriptor:
  56. // `fdr.get_fd(fds)' will return it.
  57. // has_bidi_fd_watch
  58. // - boolean indicating whether a single watch can support watching for both input and output
  59. // events simultaneously
  60. // has_separate_rw_fd_watches
  61. // - boolean indicating whether it is possible to add separate input and output watches for the
  62. // same fd. Either this or has_bidi_fd_watch must be true.
  63. // interrupt_after_fd_add
  64. // - boolean indicating if a loop interrupt must be forced after adding/enabling an fd watch.
  65. // interrupt_after_signal_add
  66. // - boolean indicating if a loop interrupt must be forced after adding or enabling a signal
  67. // watch.
  68. // supports_non_oneshot_fd
  69. // - boolean; if true, event_dispatch can arm an fd watch without ONESHOT and returning zero
  70. // events from receive_fd_event (the event notification function) will leave the descriptor
  71. // armed. If false, all fd watches are effectively ONESHOT (they can be re-armed immediately
  72. // after delivery by returning an appropriate event flag mask).
  73. // full_timer_support
  74. // - boolean indicating that the monotonic and system clocks are actually different clocks and
  75. // that timers against the system clock will work correctly if the system clock time is
  76. // adjusted. If false, the monotonic clock may not be present at all (monotonic clock will map
  77. // to system clock), and timers against either clock are not guaranteed to work correctly if
  78. // the system clock is adjusted.
  79. #if DASYNQ_HAVE_EPOLL <= 0
  80. #if _POSIX_TIMERS > 0
  81. #include "dasynq/posixtimer.h"
  82. namespace dasynq {
  83. inline namespace v2 {
  84. template <typename T, bool provide_mono_timer = true> using timer_events = posix_timer_events<T, provide_mono_timer>;
  85. } // namespace v2
  86. } // namespace dasynq
  87. #else
  88. #include "dasynq/itimer.h"
  89. namespace dasynq {
  90. inline namespace v2 {
  91. template <typename T, bool provide_mono_timer = true> using timer_events = itimer_events<T, provide_mono_timer>;
  92. } // namespace v2
  93. } // namespace dasynq
  94. #endif
  95. #endif
  96. #if DASYNQ_HAVE_KQUEUE
  97. #if DASYNQ_KQUEUE_MACOS_WORKAROUND
  98. #include "dasynq/kqueue-macos.h"
  99. #include "dasynq/childproc.h"
  100. namespace dasynq {
  101. inline namespace v2 {
  102. template <typename T> using loop_t = macos_kqueue_loop<timer_events<child_proc_events<interrupt_channel<T>>, false>>;
  103. using loop_traits_t = macos_kqueue_traits;
  104. } // namespace v2
  105. } // namespace dasynq
  106. #else
  107. #include "dasynq/kqueue.h"
  108. #include "dasynq/childproc.h"
  109. namespace dasynq {
  110. inline namespace v2 {
  111. template <typename T> using loop_t = kqueue_loop<timer_events<child_proc_events<interrupt_channel<T>>, false>>;
  112. using loop_traits_t = kqueue_traits;
  113. } // namespace v2
  114. } // namespace dasynq
  115. #endif
  116. #elif DASYNQ_HAVE_EPOLL
  117. #include "dasynq/epoll.h"
  118. #include "dasynq/timerfd.h"
  119. #include "dasynq/childproc.h"
  120. namespace dasynq {
  121. inline namespace v2 {
  122. template <typename T> using loop_t = epoll_loop<interrupt_channel<timer_fd_events<child_proc_events<T>>>>;
  123. using loop_traits_t = epoll_traits;
  124. } // namespace v2
  125. } // namespace dasynq
  126. #else
  127. #include "dasynq/childproc.h"
  128. #if DASYNQ_HAVE_PSELECT
  129. #include "dasynq/pselect.h"
  130. namespace dasynq {
  131. inline namespace v2 {
  132. template <typename T> using loop_t = pselect_events<timer_events<interrupt_channel<child_proc_events<T>>, false>>;
  133. using loop_traits_t = select_traits;
  134. } // namespace v2
  135. } // namespace dasynq
  136. #else
  137. #include "dasynq/select.h"
  138. namespace dasynq {
  139. inline namespace v2 {
  140. template <typename T> using loop_t = select_events<timer_events<interrupt_channel<child_proc_events<T>>, false>>;
  141. using loop_traits_t = select_traits;
  142. } // namespace v2
  143. } // namespace dasynq
  144. #endif
  145. #endif
  146. #include <atomic>
  147. #include <condition_variable>
  148. #include <cstdint>
  149. #include <cstddef>
  150. #include <system_error>
  151. #include <unistd.h>
  152. #include <fcntl.h>
  153. #include "dasynq/mutex.h"
  154. #include "dasynq/basewatchers.h"
  155. namespace dasynq {
  156. /**
  157. * Values for rearm/disarm return from event handlers
  158. */
  159. enum class rearm
  160. {
  161. /** Re-arm the event watcher so that it receives further events */
  162. REARM,
  163. /** Disarm the event watcher so that it receives no further events, until it is re-armed explicitly */
  164. DISARM,
  165. /** Leave in current armed/disarmed state */
  166. NOOP,
  167. /** Remove the event watcher (and call "removed" callback) */
  168. REMOVE,
  169. /** The watcher has been removed - don't touch it! */
  170. REMOVED,
  171. /** RE-queue the watcher to have its notification called again */
  172. REQUEUE
  173. };
  174. // Tag type to specify that initialisation should be delayed
  175. class delayed_init {
  176. DASYNQ_EMPTY_BODY
  177. };
  178. inline namespace v2 {
  179. namespace dprivate {
  180. // Classes for implementing a fair(ish) wait queue.
  181. // A queue node can be signalled when it reaches the head of
  182. // the queue.
  183. template <typename T_Mutex> class waitqueue;
  184. template <typename T_Mutex> class waitqueue_node;
  185. // Select an appropriate condition variable type for a mutex:
  186. // condition_variable if mutex is std::mutex, or condition_variable_any
  187. // otherwise.
  188. template <class T_Mutex> class condvar_selector;
  189. template <> class condvar_selector<std::mutex>
  190. {
  191. public:
  192. typedef std::condition_variable condvar;
  193. };
  194. template <class T_Mutex> class condvar_selector
  195. {
  196. public:
  197. typedef std::condition_variable_any condvar;
  198. };
  199. // For a single-threaded loop, the waitqueue is a no-op:
  200. template <> class waitqueue_node<null_mutex>
  201. {
  202. // Specialised waitqueue_node for null_mutex.
  203. friend class waitqueue<null_mutex>;
  204. public:
  205. void wait(std::unique_lock<null_mutex> &ul) { }
  206. void signal() { }
  207. DASYNQ_EMPTY_BODY
  208. };
  209. template <typename T_Mutex> class waitqueue_node
  210. {
  211. typename condvar_selector<T_Mutex>::condvar condvar;
  212. friend class waitqueue<T_Mutex>;
  213. // ptr to next node in queue, set to null when added to queue tail:
  214. waitqueue_node * next;
  215. public:
  216. void signal()
  217. {
  218. condvar.notify_one();
  219. }
  220. void wait(std::unique_lock<T_Mutex> &mutex_lock)
  221. {
  222. condvar.wait(mutex_lock);
  223. }
  224. };
  225. template <> class waitqueue<null_mutex>
  226. {
  227. public:
  228. // remove current head of queue, return new head:
  229. waitqueue_node<null_mutex> * unqueue()
  230. {
  231. return nullptr;
  232. }
  233. waitqueue_node<null_mutex> * get_head()
  234. {
  235. return nullptr;
  236. }
  237. waitqueue_node<null_mutex> * get_second()
  238. {
  239. return nullptr;
  240. }
  241. bool check_head(waitqueue_node<null_mutex> &node)
  242. {
  243. return true;
  244. }
  245. bool is_empty()
  246. {
  247. return true;
  248. }
  249. void queue(waitqueue_node<null_mutex> *node)
  250. {
  251. }
  252. };
  253. template <typename T_Mutex> class waitqueue
  254. {
  255. waitqueue_node<T_Mutex> * tail = nullptr;
  256. waitqueue_node<T_Mutex> * head = nullptr;
  257. public:
  258. // remove current head of queue, return new head:
  259. waitqueue_node<T_Mutex> * unqueue()
  260. {
  261. head = head->next;
  262. if (head == nullptr) {
  263. tail = nullptr;
  264. }
  265. return head;
  266. }
  267. waitqueue_node<T_Mutex> * get_head()
  268. {
  269. return head;
  270. }
  271. waitqueue_node<T_Mutex> * get_second()
  272. {
  273. return head->next;
  274. }
  275. bool check_head(waitqueue_node<T_Mutex> &node)
  276. {
  277. return head == &node;
  278. }
  279. bool is_empty()
  280. {
  281. return head == nullptr;
  282. }
  283. void queue(waitqueue_node<T_Mutex> *node)
  284. {
  285. node->next = nullptr;
  286. if (tail) {
  287. tail->next = node;
  288. }
  289. else {
  290. head = node;
  291. }
  292. tail = node;
  293. }
  294. };
  295. // friend of event_loop for giving access to various private members
  296. class loop_access {
  297. public:
  298. template <typename Loop>
  299. static typename Loop::mutex_t &get_base_lock(Loop &loop) noexcept
  300. {
  301. return loop.get_base_lock();
  302. }
  303. template <typename Loop>
  304. static rearm process_fd_rearm(Loop &loop, typename Loop::base_fd_watcher *bfw,
  305. rearm rearm_type) noexcept
  306. {
  307. return loop.process_fd_rearm(bfw, rearm_type);
  308. }
  309. template <typename Loop>
  310. static rearm process_primary_rearm(Loop &loop, typename Loop::base_bidi_fd_watcher *bdfw,
  311. rearm rearm_type) noexcept
  312. {
  313. return loop.process_primary_rearm(bdfw, rearm_type);
  314. }
  315. template <typename Loop>
  316. static rearm process_secondary_rearm(Loop &loop, typename Loop::base_bidi_fd_watcher * bdfw,
  317. base_watcher * outw, rearm rearm_type) noexcept
  318. {
  319. return loop.process_secondary_rearm(bdfw, outw, rearm_type);
  320. }
  321. template <typename Loop>
  322. static void process_signal_rearm(Loop &loop, typename Loop::base_signal_watcher * bsw,
  323. rearm rearm_type) noexcept
  324. {
  325. loop.process_signal_rearm(bsw, rearm_type);
  326. }
  327. template <typename Loop>
  328. static void process_child_watch_rearm(Loop &loop, typename Loop::base_child_watcher *bcw,
  329. rearm rearm_type) noexcept
  330. {
  331. loop.process_child_watch_rearm(bcw, rearm_type);
  332. }
  333. template <typename Loop>
  334. static void process_timer_rearm(Loop &loop, typename Loop::base_timer_watcher *btw,
  335. rearm rearm_type) noexcept
  336. {
  337. loop.process_timer_rearm(btw, rearm_type);
  338. }
  339. template <typename Loop>
  340. static void requeue_watcher(Loop &loop, base_watcher *watcher) noexcept
  341. {
  342. loop.requeue_watcher(watcher);
  343. }
  344. template <typename Loop>
  345. static void release_watcher(Loop &loop, base_watcher *watcher) noexcept
  346. {
  347. loop.release_watcher(watcher);
  348. }
  349. };
  350. // Do standard post-dispatch processing for a watcher. This handles the case of removing or
  351. // re-queueing watchers depending on the rearm type. This is called from the individual
  352. // watcher dispatch functions to handle REMOVE or REQUEUE re-arm values.
  353. template <typename Loop> void post_dispatch(Loop &loop, base_watcher *watcher, rearm rearm_type)
  354. {
  355. if (rearm_type == rearm::REMOVE) {
  356. loop_access::get_base_lock(loop).unlock();
  357. loop_access::release_watcher(loop, watcher);
  358. watcher->watch_removed();
  359. loop_access::get_base_lock(loop).lock();
  360. }
  361. else if (rearm_type == rearm::REQUEUE) {
  362. loop_access::requeue_watcher(loop, watcher);
  363. }
  364. }
  365. // Post-dispatch handling for bidi fd watchers.
  366. template <typename Loop> void post_dispatch(Loop &loop, bidi_fd_watcher<Loop> *bdfd_watcher,
  367. base_watcher *out_watcher, rearm rearm_type)
  368. {
  369. base_watcher *watcher = (base_watcher *)bdfd_watcher;
  370. if (rearm_type == rearm::REMOVE) {
  371. loop_access::get_base_lock(loop).unlock();
  372. loop_access::release_watcher(loop, watcher);
  373. loop_access::release_watcher(loop, out_watcher);
  374. watcher->watch_removed();
  375. loop_access::get_base_lock(loop).lock();
  376. }
  377. else if (rearm_type == rearm::REQUEUE) {
  378. loop_access::requeue_watcher(loop, watcher);
  379. }
  380. }
  381. // The event_dispatch class serves as the base class (mixin) for the backend mechanism. It
  382. // mostly manages queing and dequeing of events and maintains/owns the relevant data
  383. // structures, including a mutex lock.
  384. //
  385. // The backend mechanism should call one of the receiveXXX functions to notify of an event
  386. // received. The watcher will then be queued.
  387. //
  388. // In general the functions should be called with lock held. In practice this means that the
  389. // event loop backend implementations (that deposit received events here) must obtain the
  390. // lock; they are also free to use it to protect their own internal data structures.
  391. template <typename Traits, typename LoopTraits> class event_dispatch
  392. {
  393. friend class dasynq::event_loop<typename LoopTraits::mutex_t, LoopTraits>;
  394. public:
  395. using mutex_t = typename LoopTraits::mutex_t;
  396. using traits_t = Traits;
  397. using delayed_init = dasynq::delayed_init;
  398. private:
  399. // queue data structure/pointer
  400. prio_queue event_queue;
  401. using base_signal_watcher = dprivate::base_signal_watcher<typename traits_t::sigdata_t>;
  402. using base_child_watcher = dprivate::base_child_watcher<typename traits_t::proc_status_t>;
  403. using base_timer_watcher = dprivate::base_timer_watcher;
  404. // Add a watcher into the queueing system (but don't queue it). Call with lock held.
  405. // may throw: std::bad_alloc
  406. void prepare_watcher(base_watcher *bwatcher)
  407. {
  408. allocate_handle(event_queue, bwatcher->heap_handle, bwatcher);
  409. }
  410. void queue_watcher(base_watcher *bwatcher) noexcept
  411. {
  412. event_queue.insert(bwatcher->heap_handle, bwatcher->priority);
  413. }
  414. void dequeue_watcher(base_watcher *bwatcher) noexcept
  415. {
  416. if (event_queue.is_queued(bwatcher->heap_handle)) {
  417. event_queue.remove(bwatcher->heap_handle);
  418. }
  419. }
  420. // Remove watcher from the queueing system
  421. void release_watcher(base_watcher *bwatcher) noexcept
  422. {
  423. event_queue.deallocate(bwatcher->heap_handle);
  424. }
  425. protected:
  426. mutex_t lock;
  427. template <typename T> void init(T *loop) noexcept { }
  428. void cleanup() noexcept { }
  429. void sigmaskf(int how, const sigset_t *set, sigset_t *oset)
  430. {
  431. LoopTraits::sigmaskf(how, set, oset);
  432. }
  433. // Receive a signal; return true to disable signal watch or false to leave enabled.
  434. // Called with lock held.
  435. template <typename T>
  436. bool receive_signal(T &loop_mech, typename Traits::sigdata_t & siginfo, void * userdata) noexcept
  437. {
  438. base_signal_watcher * bwatcher = static_cast<base_signal_watcher *>(userdata);
  439. bwatcher->siginfo = siginfo;
  440. queue_watcher(bwatcher);
  441. return true;
  442. }
  443. // Receive fd event delivered from backend mechansim. Returns the desired watch mask, as per
  444. // set_fd_enabled, which can be used to leave the watch disabled, re-enable it or re-enable
  445. // one direction of a bi-directional watcher.
  446. template <typename T>
  447. std::tuple<int, typename Traits::fd_s> receive_fd_event(T &loop_mech, typename Traits::fd_r fd_r,
  448. void * userdata, int flags) noexcept
  449. {
  450. base_fd_watcher * bfdw = static_cast<base_fd_watcher *>(userdata);
  451. bfdw->event_flags |= flags;
  452. typename Traits::fd_s watch_fd_s {bfdw->watch_fd};
  453. base_watcher * bwatcher = bfdw;
  454. bool is_multi_watch = bfdw->watch_flags & multi_watch;
  455. if (is_multi_watch) {
  456. base_bidi_fd_watcher *bbdw = static_cast<base_bidi_fd_watcher *>(bwatcher);
  457. bbdw->watch_flags &= ~flags;
  458. if ((flags & IN_EVENTS) && (flags & OUT_EVENTS)) {
  459. // Queue the secondary watcher first:
  460. queue_watcher(&bbdw->out_watcher);
  461. }
  462. else if (flags & OUT_EVENTS) {
  463. // Use the secondary watcher for queueing:
  464. bwatcher = &(bbdw->out_watcher);
  465. }
  466. }
  467. queue_watcher(bwatcher);
  468. if (is_multi_watch && ! traits_t::has_separate_rw_fd_watches) {
  469. // If this is a bidirectional fd-watch, it has been disabled in *both* directions
  470. // as the event was delivered. However, the other direction should not be disabled
  471. // yet, so we need to re-enable:
  472. int in_out_mask = IN_EVENTS | OUT_EVENTS;
  473. if ((bfdw->watch_flags & in_out_mask) != 0) {
  474. // We need to re-enable the other channel now:
  475. return std::make_tuple((bfdw->watch_flags & in_out_mask) | ONE_SHOT, watch_fd_s);
  476. // We are the polling thread: don't need to interrupt polling, even if it would
  477. // normally be required.
  478. }
  479. }
  480. return std::make_tuple(0, watch_fd_s);
  481. }
  482. // Child process terminated. Called with both the main lock and the reaper lock held.
  483. void receive_child_stat(pid_t child, typename LoopTraits::backend_traits_t::proc_status_t status, void * userdata) noexcept
  484. {
  485. base_child_watcher * watcher = static_cast<base_child_watcher *>(userdata);
  486. watcher->child_status = status;
  487. watcher->child_termd = true;
  488. queue_watcher(watcher);
  489. }
  490. void receive_timer_expiry(timer_handle_t & timer_handle, void * userdata, int intervals) noexcept
  491. {
  492. base_timer_watcher * watcher = static_cast<base_timer_watcher *>(userdata);
  493. watcher->intervals += intervals;
  494. queue_watcher(watcher);
  495. }
  496. // Pull a single event from the queue; returns nullptr if the queue is empty.
  497. // Call with lock held.
  498. base_watcher * pull_queued_event() noexcept
  499. {
  500. if (event_queue.empty()) {
  501. return nullptr;
  502. }
  503. auto & rhndl = event_queue.get_root();
  504. base_watcher *r = dprivate::get_watcher(event_queue, rhndl);
  505. event_queue.pull_root();
  506. return r;
  507. }
  508. size_t num_queued_events() noexcept
  509. {
  510. return event_queue.size();
  511. }
  512. // Queue a watcher for removal, or issue "removed" callback to it.
  513. // Call with lock free.
  514. void issue_delete(base_watcher *watcher) noexcept
  515. {
  516. // This is only called when the attention lock is held, so if the watcher is not
  517. // active/queued now, it cannot become active (and will not be reported with an event)
  518. // during execution of this function.
  519. lock.lock();
  520. if (watcher->active) {
  521. // If the watcher is active, set deleteme true; the watcher will be removed
  522. // at the end of current processing (i.e. when active is set false).
  523. watcher->deleteme = true;
  524. lock.unlock();
  525. }
  526. else {
  527. // Actually do the delete.
  528. dequeue_watcher(watcher);
  529. release_watcher(watcher);
  530. lock.unlock();
  531. watcher->watch_removed();
  532. }
  533. }
  534. // Queue a watcher for removal, or issue "removed" callback to it.
  535. // Call with lock free.
  536. void issue_delete(base_bidi_fd_watcher *watcher) noexcept
  537. {
  538. lock.lock();
  539. if (watcher->active) {
  540. watcher->deleteme = true;
  541. release_watcher(watcher);
  542. }
  543. else {
  544. dequeue_watcher(watcher);
  545. release_watcher(watcher);
  546. watcher->read_removed = true;
  547. }
  548. base_watcher *secondary = &(watcher->out_watcher);
  549. if (secondary->active) {
  550. secondary->deleteme = true;
  551. release_watcher(watcher);
  552. }
  553. else {
  554. dequeue_watcher(secondary);
  555. release_watcher(watcher);
  556. watcher->write_removed = true;
  557. }
  558. if (watcher->read_removed && watcher->write_removed) {
  559. lock.unlock();
  560. watcher->watch_removed();
  561. }
  562. else {
  563. lock.unlock();
  564. }
  565. }
  566. event_dispatch() { }
  567. event_dispatch(const event_dispatch &) = delete;
  568. };
  569. } // namespace dprivate
  570. // This is the main event_loop implementation. It serves as an interface to the event loop backend (of which
  571. // it maintains an internal instance). It also serialises polling the backend and provides safe deletion of
  572. // watchers (see comments inline).
  573. //
  574. // The T_Mutex type parameter specifies the mutex type. A null_mutex can be used for a single-threaded event
  575. // loop; std::mutex, or any mutex providing a compatible interface, can be used for a thread-safe event
  576. // loop.
  577. //
  578. // The Traits type parameter specifies any required traits for the event loop. This specifies the back-end
  579. // to use (backend_t, a template) and the basic back-end traits (backend_traits_t).
  580. // The default is `default_traits<T_Mutex>'.
  581. //
  582. template <typename T_Mutex, typename Traits>
  583. class event_loop
  584. {
  585. using my_event_loop_t = event_loop<T_Mutex, Traits>;
  586. friend class dprivate::fd_watcher<my_event_loop_t>;
  587. friend class dprivate::bidi_fd_watcher<my_event_loop_t>;
  588. friend class dprivate::signal_watcher<my_event_loop_t>;
  589. friend class dprivate::child_proc_watcher<my_event_loop_t>;
  590. friend class dprivate::timer<my_event_loop_t>;
  591. friend class dprivate::loop_access;
  592. using backend_traits_t = typename Traits::backend_traits_t;
  593. template <typename T> using event_dispatch = dprivate::event_dispatch<T,Traits>;
  594. using dispatch_t = event_dispatch<backend_traits_t>;
  595. using loop_mech_t = typename Traits::template backend_t<dispatch_t>;
  596. using reaper_mutex_t = typename loop_mech_t::reaper_mutex_t;
  597. public:
  598. using traits_t = Traits;
  599. using loop_traits_t = typename loop_mech_t::traits_t;
  600. using mutex_t = T_Mutex;
  601. private:
  602. template <typename T> using waitqueue = dprivate::waitqueue<T>;
  603. template <typename T> using waitqueue_node = dprivate::waitqueue_node<T>;
  604. using base_watcher = dprivate::base_watcher;
  605. using base_signal_watcher = dprivate::base_signal_watcher<typename loop_traits_t::sigdata_t>;
  606. using base_fd_watcher = dprivate::base_fd_watcher;
  607. using base_bidi_fd_watcher = dprivate::base_bidi_fd_watcher;
  608. using base_child_watcher = dprivate::base_child_watcher<typename loop_traits_t::proc_status_t>;
  609. using base_timer_watcher = dprivate::base_timer_watcher;
  610. using watch_type_t = dprivate::watch_type_t;
  611. loop_mech_t loop_mech;
  612. // There is a complex problem with most asynchronous event notification mechanisms
  613. // when used in a multi-threaded environment. Generally, a file descriptor or other
  614. // event type that we are watching will be associated with some data used to manage
  615. // that event source. For example a web server needs to maintain information about
  616. // each client connection, such as the state of the connection (what protocol version
  617. // has been negotiated, etc; if a transfer is taking place, what file is being
  618. // transferred etc).
  619. //
  620. // However, sometimes we want to remove an event source (eg webserver wants to drop
  621. // a connection) and delete the associated data. The problem here is that it is
  622. // difficult to be sure when it is ok to actually remove the data, since when
  623. // requesting to unwatch the source in one thread it is still possible that an
  624. // event from that source is just being reported to another thread (in which case
  625. // the data will be needed).
  626. //
  627. // To solve that, we:
  628. // - allow only one thread to poll for events at a time, using a lock
  629. // - use the same lock to prevent polling, if we want to unwatch an event source
  630. // - generate an event to interrupt, when necessary, any polling that may already be occurring
  631. // in another thread
  632. // - mark handlers as active if they are currently executing, and
  633. // - when removing an active handler, simply set a flag which causes it to be
  634. // removed once the current processing is finished, rather than removing it
  635. // immediately.
  636. //
  637. // In particular the lock mechanism for preventing multiple threads polling and
  638. // for allowing polling to be interrupted is tricky. We can't use a simple mutex
  639. // since there is significant chance that it will be highly contended and there
  640. // are no guarantees that its acquisition will be fair. In particular, we don't
  641. // want a thread that is trying to unwatch a source being starved while another
  642. // thread polls the event source.
  643. //
  644. // So, we use two wait queues protected by a single mutex. The "attn_waitqueue"
  645. // (attention queue) is the high-priority queue, used for threads wanting to
  646. // unwatch event sources. The "wait_waitquueue" is the queue used by threads
  647. // that wish to actually poll for events, while they are waiting for the main
  648. // queue to become quiet.
  649. // - The head of the "attn_waitqueue" is always the holder of the lock
  650. // - Therefore, a poll-waiter must be moved from the wait_waitqueue to the
  651. // attn_waitqueue to actually gain the lock. This is only done if the
  652. // attn_waitqueue is otherwise empty.
  653. // - The mutex only protects manipulation of the wait queues, and so should not
  654. // be highly contended.
  655. //
  656. // To claim the lock for a poll-wait, the procedure is:
  657. // - check if the attn_waitqueue is empty;
  658. // - if it is, insert node at the head, thus claiming the lock, and return
  659. // - otherwise, insert node in the wait_waitqueue, and wait
  660. // To claim the lock for an unwatch, the procedure is:
  661. // - insert node in the attn_waitqueue
  662. // - if the node is at the head of the queue, lock is claimed; return
  663. // - otherwise, if a poll is in progress, interrupt it
  664. // - wait until our node is at the head of the attn_waitqueue
  665. //
  666. // Some backends also need to interrupted in order to add a new watch (eg select/pselect).
  667. // However, the attn_waitqueue lock doesn't generally need to be obtained for this.
  668. mutex_t wait_lock; // protects the wait/attention queues
  669. bool long_poll_running = false; // whether any thread is polling the backend (with non-zero timeout)
  670. waitqueue<mutex_t> attn_waitqueue;
  671. waitqueue<mutex_t> wait_waitqueue;
  672. mutex_t &get_base_lock() noexcept
  673. {
  674. return loop_mech.lock;
  675. }
  676. reaper_mutex_t &get_reaper_lock() noexcept
  677. {
  678. return loop_mech.get_reaper_lock();
  679. }
  680. void register_signal(base_signal_watcher *callBack, int signo)
  681. {
  682. std::lock_guard<mutex_t> guard(loop_mech.lock);
  683. loop_mech.prepare_watcher(callBack);
  684. try {
  685. loop_mech.add_signal_watch_nolock(signo, callBack);
  686. if (backend_traits_t::interrupt_after_signal_add) {
  687. interrupt_if_necessary();
  688. }
  689. }
  690. catch (...) {
  691. loop_mech.release_watcher(callBack);
  692. throw;
  693. }
  694. }
  695. void deregister(base_signal_watcher *callBack, int signo) noexcept
  696. {
  697. loop_mech.remove_signal_watch(signo);
  698. waitqueue_node<T_Mutex> qnode;
  699. get_attn_lock(qnode);
  700. loop_mech.issue_delete(callBack);
  701. release_lock(qnode);
  702. }
  703. void register_fd(base_fd_watcher *callback, int fd, int eventmask, bool enabled, bool emulate = false)
  704. {
  705. std::lock_guard<mutex_t> guard(loop_mech.lock);
  706. loop_mech.prepare_watcher(callback);
  707. try {
  708. if (! loop_mech.add_fd_watch(fd, callback, eventmask | ONE_SHOT, enabled, emulate)) {
  709. callback->emulatefd = true;
  710. callback->emulate_enabled = enabled;
  711. if (enabled) {
  712. callback->event_flags = eventmask & IO_EVENTS;
  713. if (eventmask & IO_EVENTS) {
  714. requeue_watcher(callback);
  715. }
  716. }
  717. }
  718. else if (enabled && backend_traits_t::interrupt_after_fd_add) {
  719. interrupt_if_necessary();
  720. }
  721. }
  722. catch (...) {
  723. loop_mech.release_watcher(callback);
  724. throw;
  725. }
  726. }
  727. // Register a bidi fd watcher. The watch_flags should already be set to the eventmask to watch
  728. // (i.e. eventmask == callback->watch_flags is a pre-condition).
  729. void register_fd(base_bidi_fd_watcher *callback, int fd, int eventmask, bool emulate = false)
  730. {
  731. std::lock_guard<mutex_t> guard(loop_mech.lock);
  732. loop_mech.prepare_watcher(callback);
  733. try {
  734. loop_mech.prepare_watcher(&callback->out_watcher);
  735. try {
  736. bool do_interrupt = false;
  737. if (backend_traits_t::has_separate_rw_fd_watches) {
  738. int r = loop_mech.add_bidi_fd_watch(fd, callback, eventmask | ONE_SHOT, emulate);
  739. if (r & IN_EVENTS) {
  740. callback->emulatefd = true;
  741. if (eventmask & IN_EVENTS) {
  742. callback->watch_flags &= ~IN_EVENTS;
  743. requeue_watcher(callback);
  744. }
  745. }
  746. else if ((eventmask & IN_EVENTS) && backend_traits_t::interrupt_after_fd_add) {
  747. do_interrupt = true;
  748. }
  749. if (r & OUT_EVENTS) {
  750. callback->out_watcher.emulatefd = true;
  751. if (eventmask & OUT_EVENTS) {
  752. callback->watch_flags &= ~OUT_EVENTS;
  753. requeue_watcher(&callback->out_watcher);
  754. }
  755. }
  756. else if ((eventmask & OUT_EVENTS) && backend_traits_t::interrupt_after_fd_add) {
  757. do_interrupt = true;
  758. }
  759. }
  760. else {
  761. if (! loop_mech.add_fd_watch(fd, callback, eventmask | ONE_SHOT, true, emulate)) {
  762. callback->emulatefd = true;
  763. callback->out_watcher.emulatefd = true;
  764. if (eventmask & IN_EVENTS) {
  765. callback->watch_flags &= ~IN_EVENTS;
  766. requeue_watcher(callback);
  767. }
  768. if (eventmask & OUT_EVENTS) {
  769. callback->watch_flags &= ~OUT_EVENTS;
  770. requeue_watcher(&callback->out_watcher);
  771. }
  772. }
  773. else if (backend_traits_t::interrupt_after_fd_add) {
  774. do_interrupt = true;
  775. }
  776. }
  777. if (do_interrupt) {
  778. interrupt_if_necessary();
  779. }
  780. }
  781. catch (...) {
  782. loop_mech.release_watcher(&callback->out_watcher);
  783. throw;
  784. }
  785. }
  786. catch (...) {
  787. loop_mech.release_watcher(callback);
  788. throw;
  789. }
  790. }
  791. void set_fd_enabled(base_watcher *watcher, int fd, int watch_flags, bool enabled) noexcept
  792. {
  793. if (enabled) {
  794. loop_mech.enable_fd_watch(fd, watcher, watch_flags | ONE_SHOT);
  795. if (backend_traits_t::interrupt_after_fd_add) {
  796. interrupt_if_necessary();
  797. }
  798. }
  799. else {
  800. loop_mech.disable_fd_watch(fd, watch_flags);
  801. }
  802. }
  803. void set_fd_enabled_nolock(base_watcher *watcher, int fd, int watch_flags, bool enabled) noexcept
  804. {
  805. if (enabled) {
  806. loop_mech.enable_fd_watch_nolock(fd, watcher, watch_flags | ONE_SHOT);
  807. if (backend_traits_t::interrupt_after_fd_add) {
  808. interrupt_if_necessary();
  809. }
  810. }
  811. else {
  812. loop_mech.disable_fd_watch_nolock(fd, watch_flags);
  813. }
  814. }
  815. void deregister(base_fd_watcher *callback, int fd) noexcept
  816. {
  817. if (callback->emulatefd) {
  818. auto & ed = (dispatch_t &) loop_mech;
  819. ed.issue_delete(callback);
  820. return;
  821. }
  822. loop_mech.remove_fd_watch(fd, callback->watch_flags);
  823. waitqueue_node<T_Mutex> qnode;
  824. get_attn_lock(qnode);
  825. auto & ed = (dispatch_t &) loop_mech;
  826. ed.issue_delete(callback);
  827. release_lock(qnode);
  828. }
  829. void deregister(base_bidi_fd_watcher *callback, int fd) noexcept
  830. {
  831. if (backend_traits_t::has_separate_rw_fd_watches) {
  832. loop_mech.remove_bidi_fd_watch(fd);
  833. }
  834. else {
  835. loop_mech.remove_fd_watch(fd, callback->watch_flags);
  836. }
  837. waitqueue_node<T_Mutex> qnode;
  838. get_attn_lock(qnode);
  839. dispatch_t & ed = (dispatch_t &) loop_mech;
  840. ed.issue_delete(callback);
  841. release_lock(qnode);
  842. }
  843. void reserve_child_watch(base_child_watcher *callback)
  844. {
  845. std::lock_guard<mutex_t> guard(loop_mech.lock);
  846. loop_mech.prepare_watcher(callback);
  847. try {
  848. loop_mech.reserve_child_watch_nolock(callback->watch_handle);
  849. }
  850. catch (...) {
  851. loop_mech.release_watcher(callback);
  852. throw;
  853. }
  854. }
  855. void unreserve(base_child_watcher *callback) noexcept
  856. {
  857. std::lock_guard<mutex_t> guard(loop_mech.lock);
  858. loop_mech.unreserve_child_watch(callback->watch_handle);
  859. loop_mech.release_watcher(callback);
  860. }
  861. void register_child(base_child_watcher *callback, pid_t child)
  862. {
  863. std::lock_guard<mutex_t> guard(loop_mech.lock);
  864. loop_mech.prepare_watcher(callback);
  865. try {
  866. loop_mech.add_child_watch_nolock(callback->watch_handle, child, callback);
  867. }
  868. catch (...) {
  869. loop_mech.release_watcher(callback);
  870. throw;
  871. }
  872. }
  873. void register_reserved_child(base_child_watcher *callback, pid_t child) noexcept
  874. {
  875. loop_mech.add_reserved_child_watch(callback->watch_handle, child, callback);
  876. }
  877. void register_reserved_child_nolock(base_child_watcher *callback, pid_t child) noexcept
  878. {
  879. loop_mech.add_reserved_child_watch_nolock(callback->watch_handle, child, callback);
  880. }
  881. void deregister(base_child_watcher *callback, pid_t child) noexcept
  882. {
  883. loop_mech.remove_child_watch(callback->watch_handle);
  884. waitqueue_node<T_Mutex> qnode;
  885. get_attn_lock(qnode);
  886. loop_mech.issue_delete(callback);
  887. release_lock(qnode);
  888. }
  889. // Stop watching a child process, but retain watch reservation so that another child can be
  890. // watched without running into resource allocation issues.
  891. void stop_watch(base_child_watcher *callback) noexcept
  892. {
  893. loop_mech.stop_child_watch(callback->watch_handle);
  894. }
  895. void register_timer(base_timer_watcher *callback, clock_type clock)
  896. {
  897. std::lock_guard<mutex_t> guard(loop_mech.lock);
  898. loop_mech.prepare_watcher(callback);
  899. try {
  900. loop_mech.add_timer_nolock(callback->timer_handle, callback, clock);
  901. }
  902. catch (...) {
  903. loop_mech.release_watcher(callback);
  904. }
  905. }
  906. void set_timer(base_timer_watcher *callback, const timespec &timeout, clock_type clock) noexcept
  907. {
  908. struct timespec interval {0, 0};
  909. loop_mech.set_timer(callback->timer_handle, timeout, interval, true, clock);
  910. }
  911. void set_timer(base_timer_watcher *callback, const timespec &timeout, const timespec &interval,
  912. clock_type clock) noexcept
  913. {
  914. loop_mech.set_timer(callback->timer_handle, timeout, interval, true, clock);
  915. }
  916. void set_timer_rel(base_timer_watcher *callback, const timespec &timeout, clock_type clock) noexcept
  917. {
  918. struct timespec interval {0, 0};
  919. loop_mech.set_timer_rel(callback->timer_handle, timeout, interval, true, clock);
  920. }
  921. void set_timer_rel(base_timer_watcher *callback, const timespec &timeout,
  922. const timespec &interval, clock_type clock) noexcept
  923. {
  924. loop_mech.set_timer_rel(callback->timer_handle, timeout, interval, true, clock);
  925. }
  926. void set_timer_enabled(base_timer_watcher *callback, clock_type clock, bool enabled) noexcept
  927. {
  928. loop_mech.enable_timer(callback->timer_handle, enabled, clock);
  929. }
  930. void set_timer_enabled_nolock(base_timer_watcher *callback, clock_type clock, bool enabled) noexcept
  931. {
  932. loop_mech.enable_timer_nolock(callback->timer_handle, enabled, clock);
  933. }
  934. void stop_timer(base_timer_watcher *callback, clock_type clock) noexcept
  935. {
  936. loop_mech.stop_timer(callback->timer_handle, clock);
  937. }
  938. void deregister(base_timer_watcher *callback, clock_type clock) noexcept
  939. {
  940. loop_mech.remove_timer(callback->timer_handle, clock);
  941. waitqueue_node<T_Mutex> qnode;
  942. get_attn_lock(qnode);
  943. loop_mech.issue_delete(callback);
  944. release_lock(qnode);
  945. }
  946. void dequeue_watcher(base_watcher *watcher) noexcept
  947. {
  948. loop_mech.dequeue_watcher(watcher);
  949. }
  950. void requeue_watcher(base_watcher *watcher) noexcept
  951. {
  952. loop_mech.queue_watcher(watcher);
  953. interrupt_if_necessary();
  954. }
  955. void release_watcher(base_watcher *watcher) noexcept
  956. {
  957. loop_mech.release_watcher(watcher);
  958. }
  959. // Interrupt the current poll-waiter, if necessary - that is, if the loop is multi-thread safe, and if
  960. // there is currently another thread polling the backend event mechanism.
  961. void interrupt_if_necessary()
  962. {
  963. wait_lock.lock();
  964. bool attn_q_empty = attn_waitqueue.is_empty(); // (always true for single-threaded loops)
  965. wait_lock.unlock();
  966. if (! attn_q_empty) {
  967. loop_mech.interrupt_wait();
  968. }
  969. }
  970. // Acquire the attention lock (when held, ensures that no thread is polling the AEN
  971. // mechanism). This can be used to safely remove watches, since it is certain that
  972. // notification callbacks won't be run while the attention lock is held. Any in-progress
  973. // poll will be interrupted so that the lock should be acquired quickly.
  974. void get_attn_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  975. {
  976. std::unique_lock<T_Mutex> ulock(wait_lock);
  977. attn_waitqueue.queue(&qnode);
  978. if (! attn_waitqueue.check_head(qnode)) {
  979. if (long_poll_running) {
  980. // We want to interrupt any in-progress poll so that the attn queue will progress
  981. // but we don't want to do that unnecessarily. If we are 2nd in the queue then the
  982. // head must be doing the poll; interrupt it. Otherwise, we assume the 2nd has
  983. // already interrupted it.
  984. if (attn_waitqueue.get_second() == &qnode) {
  985. loop_mech.interrupt_wait();
  986. }
  987. }
  988. while (! attn_waitqueue.check_head(qnode)) {
  989. qnode.wait(ulock);
  990. }
  991. }
  992. }
  993. // Acquire the attention lock, but without interrupting any poll that's in progress
  994. // (prefer to fail in that case).
  995. bool poll_attn_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  996. {
  997. std::unique_lock<T_Mutex> ulock(wait_lock);
  998. if (long_poll_running) {
  999. // There are poll-waiters, bail out
  1000. return false;
  1001. }
  1002. // Nobody's doing a long poll, wait until we're at the head of the attn queue and return
  1003. // success:
  1004. attn_waitqueue.queue(&qnode);
  1005. while (! attn_waitqueue.check_head(qnode)) {
  1006. qnode.wait(ulock);
  1007. }
  1008. return true;
  1009. }
  1010. // Acquire the poll-wait lock (to be held when polling the AEN mechanism; lower priority than
  1011. // the attention lock). The poll-wait lock is used to prevent more than a single thread from
  1012. // polling the event loop mechanism at a time; if this is not done, it is basically
  1013. // impossible to safely deregister watches.
  1014. void get_pollwait_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  1015. {
  1016. std::unique_lock<T_Mutex> ulock(wait_lock);
  1017. if (attn_waitqueue.is_empty()) {
  1018. // Queue is completely empty:
  1019. attn_waitqueue.queue(&qnode);
  1020. }
  1021. else {
  1022. wait_waitqueue.queue(&qnode);
  1023. }
  1024. while (! attn_waitqueue.check_head(qnode)) {
  1025. qnode.wait(ulock);
  1026. }
  1027. long_poll_running = true;
  1028. }
  1029. // Release the poll-wait/attention lock.
  1030. void release_lock(waitqueue_node<T_Mutex> &qnode) noexcept
  1031. {
  1032. std::unique_lock<T_Mutex> ulock(wait_lock);
  1033. long_poll_running = false;
  1034. waitqueue_node<T_Mutex> * nhead = attn_waitqueue.unqueue();
  1035. if (nhead != nullptr) {
  1036. // Someone else now owns the lock, signal them to wake them up
  1037. nhead->signal();
  1038. }
  1039. else {
  1040. // Nobody is waiting in attn_waitqueue (the high-priority queue) so check in
  1041. // wait_waitqueue (the low-priority queue)
  1042. if (! wait_waitqueue.is_empty()) {
  1043. auto nhead = wait_waitqueue.get_head();
  1044. wait_waitqueue.unqueue();
  1045. attn_waitqueue.queue(nhead);
  1046. long_poll_running = true;
  1047. nhead->signal();
  1048. }
  1049. }
  1050. }
  1051. void process_signal_rearm(base_signal_watcher * bsw, rearm rearm_type) noexcept
  1052. {
  1053. // Called with lock held
  1054. if (rearm_type == rearm::REARM) {
  1055. loop_mech.rearm_signal_watch_nolock(bsw->siginfo.get_signo(), bsw);
  1056. if (backend_traits_t::interrupt_after_signal_add) {
  1057. interrupt_if_necessary();
  1058. }
  1059. }
  1060. else if (rearm_type == rearm::REMOVE) {
  1061. loop_mech.remove_signal_watch_nolock(bsw->siginfo.get_signo());
  1062. }
  1063. // Note that signal watchers cannot (currently) be disarmed
  1064. }
  1065. // Process rearm return from an fd_watcher, including the primary watcher of a bidi_fd_watcher.
  1066. // Depending on the rearm value, we re-arm, remove, or disarm the watcher, etc.
  1067. rearm process_fd_rearm(base_fd_watcher * bfw, rearm rearm_type) noexcept
  1068. {
  1069. bool emulatedfd = static_cast<base_watcher *>(bfw)->emulatefd;
  1070. if (emulatedfd) {
  1071. if (rearm_type == rearm::REARM) {
  1072. bfw->emulate_enabled = true;
  1073. rearm_type = rearm::REQUEUE;
  1074. }
  1075. else if (rearm_type == rearm::DISARM) {
  1076. bfw->emulate_enabled = false;
  1077. }
  1078. else if (rearm_type == rearm::NOOP) {
  1079. if (bfw->emulate_enabled) {
  1080. rearm_type = rearm::REQUEUE;
  1081. }
  1082. }
  1083. }
  1084. else if (rearm_type == rearm::REARM) {
  1085. set_fd_enabled_nolock(bfw, bfw->watch_fd,
  1086. bfw->watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1087. }
  1088. else if (rearm_type == rearm::DISARM) {
  1089. loop_mech.disable_fd_watch_nolock(bfw->watch_fd, bfw->watch_flags);
  1090. }
  1091. else if (rearm_type == rearm::REMOVE) {
  1092. loop_mech.remove_fd_watch_nolock(bfw->watch_fd, bfw->watch_flags);
  1093. }
  1094. return rearm_type;
  1095. }
  1096. // Process rearm option from the primary watcher in bidi_fd_watcher
  1097. rearm process_primary_rearm(base_bidi_fd_watcher * bdfw, rearm rearm_type) noexcept
  1098. {
  1099. bool emulatedfd = static_cast<base_watcher *>(bdfw)->emulatefd;
  1100. // Called with lock held
  1101. if (rearm_type == rearm::REMOVE) {
  1102. bdfw->read_removed = 1;
  1103. if (backend_traits_t::has_separate_rw_fd_watches) {
  1104. bdfw->watch_flags &= ~IN_EVENTS;
  1105. if (! emulatedfd) {
  1106. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, IN_EVENTS);
  1107. }
  1108. return bdfw->write_removed ? rearm::REMOVE : rearm::NOOP;
  1109. }
  1110. else {
  1111. if (! bdfw->write_removed) {
  1112. if (bdfw->watch_flags & IN_EVENTS) {
  1113. bdfw->watch_flags &= ~IN_EVENTS;
  1114. if (! emulatedfd) {
  1115. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, bdfw->watch_flags,
  1116. bdfw->watch_flags != 0);
  1117. }
  1118. }
  1119. return rearm::NOOP;
  1120. }
  1121. else {
  1122. // both removed: actually remove
  1123. if (! emulatedfd) {
  1124. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, 0 /* not used */);
  1125. }
  1126. return rearm::REMOVE;
  1127. }
  1128. }
  1129. }
  1130. else if (rearm_type == rearm::DISARM) {
  1131. bdfw->watch_flags &= ~IN_EVENTS;
  1132. if (! emulatedfd) {
  1133. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1134. int watch_flags = bdfw->watch_flags & (IN_EVENTS | OUT_EVENTS);
  1135. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, watch_flags, watch_flags != 0);
  1136. }
  1137. else {
  1138. loop_mech.disable_fd_watch_nolock(bdfw->watch_fd, IN_EVENTS);
  1139. }
  1140. }
  1141. }
  1142. else if (rearm_type == rearm::REARM) {
  1143. if (! emulatedfd) {
  1144. bdfw->watch_flags |= IN_EVENTS;
  1145. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1146. int watch_flags = bdfw->watch_flags;
  1147. set_fd_enabled_nolock(bdfw, bdfw->watch_fd,
  1148. watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1149. }
  1150. else {
  1151. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, IN_EVENTS, true);
  1152. }
  1153. }
  1154. else {
  1155. bdfw->watch_flags &= ~IN_EVENTS;
  1156. rearm_type = rearm::REQUEUE;
  1157. }
  1158. }
  1159. else if (rearm_type == rearm::NOOP) {
  1160. if (bdfw->emulatefd) {
  1161. if (bdfw->watch_flags & IN_EVENTS) {
  1162. bdfw->watch_flags &= ~IN_EVENTS;
  1163. rearm_type = rearm::REQUEUE;
  1164. }
  1165. }
  1166. }
  1167. return rearm_type;
  1168. }
  1169. // Process re-arm for the secondary (output) watcher in a Bi-direction Fd watcher.
  1170. rearm process_secondary_rearm(base_bidi_fd_watcher * bdfw, base_watcher * outw, rearm rearm_type) noexcept
  1171. {
  1172. bool emulatedfd = outw->emulatefd;
  1173. // Called with lock held
  1174. if (emulatedfd) {
  1175. if (rearm_type == rearm::REMOVE) {
  1176. bdfw->write_removed = 1;
  1177. bdfw->watch_flags &= ~OUT_EVENTS;
  1178. rearm_type = bdfw->read_removed ? rearm::REMOVE : rearm::NOOP;
  1179. }
  1180. else if (rearm_type == rearm::DISARM) {
  1181. bdfw->watch_flags &= ~OUT_EVENTS;
  1182. }
  1183. else if (rearm_type == rearm::REARM) {
  1184. bdfw->watch_flags &= ~OUT_EVENTS;
  1185. rearm_type = rearm::REQUEUE;
  1186. }
  1187. else if (rearm_type == rearm::NOOP) {
  1188. if (bdfw->watch_flags & OUT_EVENTS) {
  1189. bdfw->watch_flags &= ~OUT_EVENTS;
  1190. rearm_type = rearm::REQUEUE;
  1191. }
  1192. }
  1193. return rearm_type;
  1194. }
  1195. else if (rearm_type == rearm::REMOVE) {
  1196. bdfw->write_removed = 1;
  1197. if (backend_traits_t::has_separate_rw_fd_watches) {
  1198. bdfw->watch_flags &= ~OUT_EVENTS;
  1199. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, OUT_EVENTS);
  1200. return bdfw->read_removed ? rearm::REMOVE : rearm::NOOP;
  1201. }
  1202. else {
  1203. if (! bdfw->read_removed) {
  1204. if (bdfw->watch_flags & OUT_EVENTS) {
  1205. bdfw->watch_flags &= ~OUT_EVENTS;
  1206. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, bdfw->watch_flags, true);
  1207. }
  1208. return rearm::NOOP;
  1209. }
  1210. else {
  1211. // both removed: actually remove
  1212. loop_mech.remove_fd_watch_nolock(bdfw->watch_fd, 0 /* not used */);
  1213. return rearm::REMOVE;
  1214. }
  1215. }
  1216. }
  1217. else if (rearm_type == rearm::DISARM) {
  1218. bdfw->watch_flags &= ~OUT_EVENTS;
  1219. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1220. int watch_flags = bdfw->watch_flags;
  1221. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1222. }
  1223. else {
  1224. loop_mech.disable_fd_watch_nolock(bdfw->watch_fd, OUT_EVENTS);
  1225. }
  1226. }
  1227. else if (rearm_type == rearm::REARM) {
  1228. bdfw->watch_flags |= OUT_EVENTS;
  1229. if (! backend_traits_t::has_separate_rw_fd_watches) {
  1230. int watch_flags = bdfw->watch_flags;
  1231. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, watch_flags & (IN_EVENTS | OUT_EVENTS), true);
  1232. }
  1233. else {
  1234. set_fd_enabled_nolock(bdfw, bdfw->watch_fd, OUT_EVENTS | ONE_SHOT, true);
  1235. }
  1236. }
  1237. return rearm_type;
  1238. }
  1239. void process_child_watch_rearm(base_child_watcher *bcw, rearm rearm_type) noexcept
  1240. {
  1241. if (rearm_type == rearm::REMOVE || rearm_type == rearm::DISARM) {
  1242. loop_mech.unreserve_child_watch_nolock(bcw->watch_handle);
  1243. }
  1244. }
  1245. void process_timer_rearm(base_timer_watcher *btw, rearm rearm_type) noexcept
  1246. {
  1247. // Called with lock held
  1248. if (rearm_type == rearm::REARM) {
  1249. loop_mech.enable_timer_nolock(btw->timer_handle, true, btw->clock);
  1250. }
  1251. else if (rearm_type == rearm::REMOVE) {
  1252. loop_mech.remove_timer_nolock(btw->timer_handle, btw->clock);
  1253. }
  1254. else if (rearm_type == rearm::DISARM) {
  1255. loop_mech.enable_timer_nolock(btw->timer_handle, false, btw->clock);
  1256. }
  1257. }
  1258. // Process queued events; returns true if any events were processed.
  1259. // limit - maximum number of events to process before returning; -1 for
  1260. // no limit.
  1261. bool process_events(int limit) noexcept
  1262. {
  1263. loop_mech.lock.lock();
  1264. if (limit == 0) {
  1265. return false;
  1266. }
  1267. // limit processing to the number of events currently queued, to avoid prolonged processing
  1268. // of watchers which requeueu themselves immediately (including file watchers which are using
  1269. // emulation for watching regular files)
  1270. //
  1271. // If limit is -1 (no limit) we rely on this being always larger than/equal to the number of
  1272. // queued events when cast to size_t (which is unsigned).
  1273. limit = std::min(size_t(limit), loop_mech.num_queued_events());
  1274. base_watcher * pqueue = loop_mech.pull_queued_event();
  1275. bool active = false;
  1276. while (pqueue != nullptr) {
  1277. pqueue->active = true;
  1278. active = true;
  1279. base_bidi_fd_watcher *bbfw = nullptr;
  1280. // (Above variables are initialised only to silence compiler warnings).
  1281. if (pqueue->watchType == watch_type_t::SECONDARYFD) {
  1282. // construct a pointer to the main watcher, using integer arithmetic to avoid undefined
  1283. // pointer arithmetic:
  1284. uintptr_t rp = (uintptr_t)pqueue;
  1285. // Here we take the offset of a member from a non-standard-layout class, which is
  1286. // specified to have undefined result by the C++ language standard, but which
  1287. // in practice works fine:
  1288. _Pragma ("GCC diagnostic push")
  1289. _Pragma ("GCC diagnostic ignored \"-Winvalid-offsetof\"")
  1290. rp -= offsetof(base_bidi_fd_watcher, out_watcher);
  1291. _Pragma ("GCC diagnostic pop")
  1292. bbfw = (base_bidi_fd_watcher *)rp;
  1293. // issue a secondary dispatch:
  1294. bbfw->dispatch_second(this);
  1295. }
  1296. else {
  1297. pqueue->dispatch(this);
  1298. }
  1299. if (limit > 0) {
  1300. limit--;
  1301. if (limit == 0) break;
  1302. }
  1303. pqueue = loop_mech.pull_queued_event();
  1304. }
  1305. loop_mech.lock.unlock();
  1306. return active;
  1307. }
  1308. public:
  1309. using fd_watcher = dprivate::fd_watcher<my_event_loop_t>;
  1310. using bidi_fd_watcher = dprivate::bidi_fd_watcher<my_event_loop_t>;
  1311. using signal_watcher = dprivate::signal_watcher<my_event_loop_t>;
  1312. using child_proc_watcher = dprivate::child_proc_watcher<my_event_loop_t>;
  1313. using timer = dprivate::timer<my_event_loop_t>;
  1314. template <typename D> using fd_watcher_impl = dprivate::fd_watcher_impl<my_event_loop_t, D>;
  1315. template <typename D> using bidi_fd_watcher_impl = dprivate::bidi_fd_watcher_impl<my_event_loop_t, D>;
  1316. template <typename D> using signal_watcher_impl = dprivate::signal_watcher_impl<my_event_loop_t, D>;
  1317. template <typename D> using child_proc_watcher_impl = dprivate::child_proc_watcher_impl<my_event_loop_t, D>;
  1318. template <typename D> using timer_impl = dprivate::timer_impl<my_event_loop_t, D>;
  1319. // Poll the event loop and process any pending events (up to a limit). If no events are pending, wait
  1320. // for and process at least one event.
  1321. void run(int limit = -1) noexcept
  1322. {
  1323. // Poll the mechanism first, in case high-priority events are pending:
  1324. waitqueue_node<T_Mutex> qnode;
  1325. get_pollwait_lock(qnode);
  1326. loop_mech.pull_events(false);
  1327. release_lock(qnode);
  1328. while (! process_events(limit)) {
  1329. // Pull events from the AEN mechanism and insert them in our internal queue:
  1330. get_pollwait_lock(qnode);
  1331. loop_mech.pull_events(true);
  1332. release_lock(qnode);
  1333. }
  1334. }
  1335. // Poll the event loop and process any pending events (up to a limit).
  1336. void poll(int limit = -1) noexcept
  1337. {
  1338. waitqueue_node<T_Mutex> qnode;
  1339. if (poll_attn_lock(qnode)) {
  1340. loop_mech.pull_events(false);
  1341. release_lock(qnode);
  1342. }
  1343. process_events(limit);
  1344. }
  1345. // Get the current time corresponding to a specific clock.
  1346. // ts - the timespec variable to receive the time
  1347. // clock - specifies the clock
  1348. // force_update (default = false) - if true, the time returned will be updated from
  1349. // the system rather than being a previously cached result. It may be more
  1350. // accurate, but note that reading from a system clock may be relatively expensive.
  1351. void get_time(timespec &ts, clock_type clock, bool force_update = false) noexcept
  1352. {
  1353. loop_mech.get_time(ts, clock, force_update);
  1354. }
  1355. void get_time(time_val &tv, clock_type clock, bool force_update = false) noexcept
  1356. {
  1357. loop_mech.get_time(tv, clock, force_update);
  1358. }
  1359. event_loop() { }
  1360. event_loop(delayed_init d) noexcept : loop_mech(d) { }
  1361. event_loop(const event_loop &other) = delete;
  1362. // Perform delayed initialisation, if constructed with delayed_init
  1363. void init()
  1364. {
  1365. loop_mech.init();
  1366. }
  1367. };
  1368. typedef event_loop<null_mutex> event_loop_n;
  1369. typedef event_loop<std::mutex> event_loop_th;
  1370. namespace dprivate {
  1371. // Posix signal event watcher
  1372. template <typename EventLoop>
  1373. class signal_watcher : private dprivate::base_signal_watcher<typename EventLoop::loop_traits_t::sigdata_t>
  1374. {
  1375. template <typename, typename> friend class signal_watcher_impl;
  1376. using base_watcher = dprivate::base_watcher;
  1377. using T_Mutex = typename EventLoop::mutex_t;
  1378. public:
  1379. using event_loop_t = EventLoop;
  1380. using siginfo_p = typename signal_watcher::siginfo_p;
  1381. // Register this watcher to watch the specified signal.
  1382. // If an attempt is made to register with more than one event loop at
  1383. // a time, behaviour is undefined. The signal should be masked before
  1384. // call.
  1385. inline void add_watch(event_loop_t &eloop, int signo, int prio = DEFAULT_PRIORITY)
  1386. {
  1387. base_watcher::init();
  1388. this->priority = prio;
  1389. this->siginfo.set_signo(signo);
  1390. eloop.register_signal(this, signo);
  1391. }
  1392. inline void deregister(event_loop_t &eloop) noexcept
  1393. {
  1394. eloop.deregister(this, this->siginfo.get_signo());
  1395. }
  1396. template <typename T>
  1397. static signal_watcher<event_loop_t> *add_watch(event_loop_t &eloop, int signo, T watch_hndlr)
  1398. {
  1399. class lambda_sig_watcher : public signal_watcher_impl<event_loop_t, lambda_sig_watcher>
  1400. {
  1401. private:
  1402. T watch_hndlr;
  1403. public:
  1404. lambda_sig_watcher(T watch_handlr_a) : watch_hndlr(watch_handlr_a)
  1405. {
  1406. //
  1407. }
  1408. rearm received(event_loop_t &eloop, int signo, siginfo_p siginfo)
  1409. {
  1410. return watch_hndlr(eloop, signo, siginfo);
  1411. }
  1412. void watch_removed() noexcept override
  1413. {
  1414. delete this;
  1415. }
  1416. };
  1417. lambda_sig_watcher * lsw = new lambda_sig_watcher(watch_hndlr);
  1418. lsw->add_watch(eloop, signo);
  1419. return lsw;
  1420. }
  1421. // virtual rearm received(EventLoop &eloop, int signo, siginfo_p siginfo) = 0;
  1422. };
  1423. template <typename EventLoop, typename Derived>
  1424. class signal_watcher_impl : public signal_watcher<EventLoop>
  1425. {
  1426. void dispatch(void *loop_ptr) noexcept override
  1427. {
  1428. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1429. loop_access::get_base_lock(loop).unlock();
  1430. auto rearm_type = static_cast<Derived *>(this)->received(loop, this->siginfo.get_signo(), this->siginfo);
  1431. loop_access::get_base_lock(loop).lock();
  1432. if (rearm_type != rearm::REMOVED) {
  1433. this->active = false;
  1434. if (this->deleteme) {
  1435. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1436. rearm_type = rearm::REMOVE;
  1437. }
  1438. loop_access::process_signal_rearm(loop, this, rearm_type);
  1439. post_dispatch(loop, this, rearm_type);
  1440. }
  1441. }
  1442. };
  1443. // Posix file descriptor event watcher
  1444. template <typename EventLoop>
  1445. class fd_watcher : private dprivate::base_fd_watcher
  1446. {
  1447. template <typename, typename> friend class fd_watcher_impl;
  1448. using base_watcher = dprivate::base_watcher;
  1449. using mutex_t = typename EventLoop::mutex_t;
  1450. protected:
  1451. // Set the types of event to watch. Only supported if loop_traits_t_t::has_bidi_fd_watch
  1452. // is true; otherwise has unspecified behavior.
  1453. // Only safe to call from within the callback handler (fdEvent). Might not take
  1454. // effect until the current callback handler returns with REARM.
  1455. void set_watch_flags(int newFlags)
  1456. {
  1457. this->watch_flags = newFlags;
  1458. }
  1459. public:
  1460. using event_loop_t = EventLoop;
  1461. // Register a file descriptor watcher with an event loop. Flags
  1462. // can be any combination of dasynq::IN_EVENTS / dasynq::OUT_EVENTS.
  1463. // Exactly one of IN_EVENTS/OUT_EVENTS must be specified if the event
  1464. // loop does not support bi-directional fd watchers (i.e. if
  1465. // ! loop_traits_t::has_bidi_fd_watch).
  1466. //
  1467. // Mechanisms supporting dual watchers allow for two watchers for a
  1468. // single file descriptor (one watching read status and the other
  1469. // write status). Others mechanisms support only a single watcher
  1470. // per file descriptor. Adding a watcher beyond what is supported
  1471. // causes undefined behavior.
  1472. //
  1473. // Can fail with std::bad_alloc or std::system_error.
  1474. void add_watch(event_loop_t &eloop, int fd, int flags, bool enabled = true, int prio = DEFAULT_PRIORITY)
  1475. {
  1476. base_watcher::init();
  1477. this->priority = prio;
  1478. this->watch_fd = fd;
  1479. this->watch_flags = flags;
  1480. eloop.register_fd(this, fd, flags, enabled, true);
  1481. }
  1482. void add_watch_noemu(event_loop_t &eloop, int fd, int flags, bool enabled = true, int prio = DEFAULT_PRIORITY)
  1483. {
  1484. base_watcher::init();
  1485. this->priority = prio;
  1486. this->watch_fd = fd;
  1487. this->watch_flags = flags;
  1488. eloop.register_fd(this, fd, flags, enabled, false);
  1489. }
  1490. int get_watched_fd()
  1491. {
  1492. return this->watch_fd;
  1493. }
  1494. // Deregister a file descriptor watcher.
  1495. //
  1496. // If other threads may be polling the event loop, it is not safe to assume
  1497. // the watcher is unregistered until the watch_removed() callback is issued
  1498. // (which will not occur until the event handler returns, if it is active).
  1499. // In a single threaded environment, it is safe to delete the watcher after
  1500. // calling this method as long as the handler (if it is active) accesses no
  1501. // internal state and returns rearm::REMOVED.
  1502. void deregister(event_loop_t &eloop) noexcept
  1503. {
  1504. eloop.deregister(this, this->watch_fd);
  1505. }
  1506. void set_enabled(event_loop_t &eloop, bool enable) noexcept
  1507. {
  1508. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1509. if (this->emulatefd) {
  1510. if (enable && ! this->emulate_enabled) {
  1511. loop_access::requeue_watcher(eloop, this);
  1512. }
  1513. this->emulate_enabled = enable;
  1514. }
  1515. else {
  1516. eloop.set_fd_enabled_nolock(this, this->watch_fd, this->watch_flags, enable);
  1517. }
  1518. if (! enable) {
  1519. eloop.dequeue_watcher(this);
  1520. }
  1521. }
  1522. // Add an Fd watch via a lambda. The watch is allocated dynamically and destroys
  1523. // itself when removed from the event loop.
  1524. template <typename T>
  1525. static fd_watcher<EventLoop> *add_watch(event_loop_t &eloop, int fd, int flags, T watchHndlr)
  1526. {
  1527. class lambda_fd_watcher : public fd_watcher_impl<event_loop_t, lambda_fd_watcher>
  1528. {
  1529. private:
  1530. T watchHndlr;
  1531. public:
  1532. lambda_fd_watcher(T watchHandlr_a) : watchHndlr(watchHandlr_a)
  1533. {
  1534. //
  1535. }
  1536. rearm fd_event(event_loop_t &eloop, int fd, int flags)
  1537. {
  1538. return watchHndlr(eloop, fd, flags);
  1539. }
  1540. void watch_removed() noexcept override
  1541. {
  1542. delete this;
  1543. }
  1544. };
  1545. lambda_fd_watcher * lfd = new lambda_fd_watcher(watchHndlr);
  1546. lfd->add_watch(eloop, fd, flags);
  1547. return lfd;
  1548. }
  1549. // virtual rearm fd_event(EventLoop &eloop, int fd, int flags) = 0;
  1550. };
  1551. template <typename EventLoop, typename Derived>
  1552. class fd_watcher_impl : public fd_watcher<EventLoop>
  1553. {
  1554. void dispatch(void *loop_ptr) noexcept override
  1555. {
  1556. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1557. // In case emulating, clear enabled here; REARM or explicit set_enabled will re-enable.
  1558. this->emulate_enabled = false;
  1559. loop_access::get_base_lock(loop).unlock();
  1560. auto rearm_type = static_cast<Derived *>(this)->fd_event(loop, this->watch_fd, this->event_flags);
  1561. loop_access::get_base_lock(loop).lock();
  1562. if (rearm_type != rearm::REMOVED) {
  1563. this->event_flags = 0;
  1564. this->active = false;
  1565. if (this->deleteme) {
  1566. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1567. rearm_type = rearm::REMOVE;
  1568. }
  1569. rearm_type = loop_access::process_fd_rearm(loop, this, rearm_type);
  1570. post_dispatch(loop, this, rearm_type);
  1571. }
  1572. }
  1573. };
  1574. // A Bi-directional file descriptor watcher with independent read- and write- channels.
  1575. // This watcher type has two event notification methods which can both potentially be
  1576. // active at the same time.
  1577. template <typename EventLoop>
  1578. class bidi_fd_watcher : private dprivate::base_bidi_fd_watcher
  1579. {
  1580. template <typename, typename> friend class bidi_fd_watcher_impl;
  1581. using base_watcher = dprivate::base_watcher;
  1582. using mutex_t = typename EventLoop::mutex_t;
  1583. void set_watch_enabled(EventLoop &eloop, bool in, bool b)
  1584. {
  1585. int events = in ? IN_EVENTS : OUT_EVENTS;
  1586. auto orig_flags = this->watch_flags;
  1587. if (b) {
  1588. this->watch_flags |= events;
  1589. }
  1590. else {
  1591. this->watch_flags &= ~events;
  1592. }
  1593. dprivate::base_watcher * watcher = in ? this : &this->out_watcher;
  1594. if (! watcher->emulatefd) {
  1595. if (EventLoop::loop_traits_t::has_separate_rw_fd_watches) {
  1596. eloop.set_fd_enabled_nolock(this, this->watch_fd, events | ONE_SHOT, b);
  1597. }
  1598. else {
  1599. eloop.set_fd_enabled_nolock(this, this->watch_fd,
  1600. (this->watch_flags & IO_EVENTS) | ONE_SHOT,
  1601. (this->watch_flags & IO_EVENTS) != 0);
  1602. }
  1603. }
  1604. else {
  1605. // emulation: if enabling a previously disabled watcher, must queue now:
  1606. if (b && (orig_flags != this->watch_flags)) {
  1607. this->watch_flags = orig_flags;
  1608. loop_access::requeue_watcher(eloop, watcher);
  1609. }
  1610. }
  1611. if (! b) {
  1612. eloop.dequeue_watcher(watcher);
  1613. }
  1614. }
  1615. public:
  1616. using event_loop_t = EventLoop;
  1617. void set_in_watch_enabled(event_loop_t &eloop, bool b) noexcept
  1618. {
  1619. eloop.get_base_lock().lock();
  1620. set_watch_enabled(eloop, true, b);
  1621. eloop.get_base_lock().unlock();
  1622. }
  1623. void set_out_watch_enabled(event_loop_t &eloop, bool b) noexcept
  1624. {
  1625. eloop.get_base_lock().lock();
  1626. set_watch_enabled(eloop, false, b);
  1627. eloop.get_base_lock().unlock();
  1628. }
  1629. // Set the watch flags, which enables/disables both the in-watch and the out-watch accordingly.
  1630. //
  1631. // Concurrency: this method can only be called if
  1632. // - it does not enable a watcher that might currently be active
  1633. /// - unless the event loop will not be polled while the watcher is active.
  1634. // (i.e. it is ok to call setWatchFlags from within the readReady/writeReady handlers if no other
  1635. // thread will poll the event loop; it is always ok to *dis*able a watcher that might be active,
  1636. // though the re-arm action returned by the callback may undo the effect).
  1637. void set_watches(event_loop_t &eloop, int new_flags) noexcept
  1638. {
  1639. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1640. bool use_emulation = this->emulatefd || this->out_watcher.emulatefd;
  1641. if (use_emulation || EventLoop::loop_traits_t::has_separate_rw_fd_watches) {
  1642. set_watch_enabled(eloop, true, (new_flags & IN_EVENTS) != 0);
  1643. set_watch_enabled(eloop, false, (new_flags & OUT_EVENTS) != 0);
  1644. }
  1645. else {
  1646. this->watch_flags = (this->watch_flags & ~IO_EVENTS) | new_flags;
  1647. eloop.set_fd_enabled_nolock((dprivate::base_watcher *) this, this->watch_fd, this->watch_flags & IO_EVENTS, true);
  1648. }
  1649. }
  1650. // Register a bi-direction file descriptor watcher with an event loop. Flags
  1651. // can be any combination of dasynq::IN_EVENTS / dasynq::OUT_EVENTS.
  1652. //
  1653. // Can fail with std::bad_alloc or std::system_error.
  1654. void add_watch(event_loop_t &eloop, int fd, int flags, int inprio = DEFAULT_PRIORITY, int outprio = DEFAULT_PRIORITY)
  1655. {
  1656. base_watcher::init();
  1657. this->out_watcher.base_watcher::init();
  1658. this->watch_fd = fd;
  1659. this->watch_flags = flags | dprivate::multi_watch;
  1660. this->read_removed = false;
  1661. this->write_removed = false;
  1662. this->priority = inprio;
  1663. this->set_priority(this->out_watcher, outprio);
  1664. eloop.register_fd(this, fd, flags, true);
  1665. }
  1666. void add_watch_noemu(event_loop_t &eloop, int fd, int flags, int inprio = DEFAULT_PRIORITY, int outprio = DEFAULT_PRIORITY)
  1667. {
  1668. base_watcher::init();
  1669. this->out_watcher.base_watcher::init();
  1670. this->watch_fd = fd;
  1671. this->watch_flags = flags | dprivate::multi_watch;
  1672. this->read_removed = false;
  1673. this->write_removed = false;
  1674. this->priority = inprio;
  1675. this->set_priority(this->out_watcher, outprio);
  1676. eloop.register_fd(this, fd, flags, false);
  1677. }
  1678. int get_watched_fd()
  1679. {
  1680. return this->watch_fd;
  1681. }
  1682. // Deregister a bi-direction file descriptor watcher.
  1683. //
  1684. // If other threads may be polling the event loop, it is not safe to assume
  1685. // the watcher is unregistered until the watch_removed() callback is issued
  1686. // (which will not occur until the event handler returns, if it is active).
  1687. // In a single threaded environment, it is safe to delete the watcher after
  1688. // calling this method as long as the handler (if it is active) accesses no
  1689. // internal state and returns rearm::REMOVED.
  1690. void deregister(event_loop_t &eloop) noexcept
  1691. {
  1692. eloop.deregister(this, this->watch_fd);
  1693. }
  1694. template <typename T>
  1695. static bidi_fd_watcher<event_loop_t> *add_watch(event_loop_t &eloop, int fd, int flags, T watch_hndlr)
  1696. {
  1697. class lambda_bidi_watcher : public bidi_fd_watcher_impl<event_loop_t, lambda_bidi_watcher>
  1698. {
  1699. private:
  1700. T watch_hndlr;
  1701. public:
  1702. lambda_bidi_watcher(T watch_handlr_a) : watch_hndlr(watch_handlr_a)
  1703. {
  1704. //
  1705. }
  1706. rearm read_ready(event_loop_t &eloop, int fd)
  1707. {
  1708. return watch_hndlr(eloop, fd, IN_EVENTS);
  1709. }
  1710. rearm write_ready(event_loop_t &eloop, int fd)
  1711. {
  1712. return watch_hndlr(eloop, fd, OUT_EVENTS);
  1713. }
  1714. void watch_removed() noexcept override
  1715. {
  1716. delete this;
  1717. }
  1718. };
  1719. lambda_bidi_watcher * lfd = new lambda_bidi_watcher(watch_hndlr);
  1720. lfd->add_watch(eloop, fd, flags);
  1721. return lfd;
  1722. }
  1723. // virtual rearm read_ready(EventLoop &eloop, int fd) noexcept = 0;
  1724. // virtual rearm write_ready(EventLoop &eloop, int fd) noexcept = 0;
  1725. };
  1726. template <typename EventLoop, typename Derived>
  1727. class bidi_fd_watcher_impl : public bidi_fd_watcher<EventLoop>
  1728. {
  1729. void dispatch(void *loop_ptr) noexcept override
  1730. {
  1731. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1732. this->emulate_enabled = false;
  1733. loop_access::get_base_lock(loop).unlock();
  1734. auto rearm_type = static_cast<Derived *>(this)->read_ready(loop, this->watch_fd);
  1735. loop_access::get_base_lock(loop).lock();
  1736. if (rearm_type != rearm::REMOVED) {
  1737. this->event_flags &= ~IN_EVENTS;
  1738. this->active = false;
  1739. if (this->deleteme) {
  1740. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1741. rearm_type = rearm::REMOVE;
  1742. }
  1743. rearm_type = loop_access::process_primary_rearm(loop, this, rearm_type);
  1744. auto &outwatcher = bidi_fd_watcher<EventLoop>::out_watcher;
  1745. post_dispatch(loop, this, &outwatcher, rearm_type);
  1746. }
  1747. }
  1748. void dispatch_second(void *loop_ptr) noexcept override
  1749. {
  1750. auto &outwatcher = bidi_fd_watcher<EventLoop>::out_watcher;
  1751. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1752. loop_access::get_base_lock(loop).unlock();
  1753. auto rearm_type = static_cast<Derived *>(this)->write_ready(loop, this->watch_fd);
  1754. loop_access::get_base_lock(loop).lock();
  1755. if (rearm_type != rearm::REMOVED) {
  1756. this->event_flags &= ~OUT_EVENTS;
  1757. outwatcher.active = false;
  1758. if (outwatcher.deleteme) {
  1759. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1760. rearm_type = rearm::REMOVE;
  1761. }
  1762. rearm_type = loop_access::process_secondary_rearm(loop, this, &outwatcher, rearm_type);
  1763. if (rearm_type == rearm::REQUEUE) {
  1764. post_dispatch(loop, &outwatcher, rearm_type);
  1765. }
  1766. else {
  1767. post_dispatch(loop, this, &outwatcher, rearm_type);
  1768. }
  1769. }
  1770. }
  1771. };
  1772. // Child process event watcher
  1773. template <typename EventLoop>
  1774. class child_proc_watcher : private dprivate::base_child_watcher<typename EventLoop::loop_traits_t::proc_status_t>
  1775. {
  1776. template <typename, typename> friend class child_proc_watcher_impl;
  1777. using base_watcher = dprivate::base_watcher;
  1778. using mutex_t = typename EventLoop::mutex_t;
  1779. public:
  1780. using event_loop_t = EventLoop;
  1781. using proc_status_t = typename EventLoop::loop_traits_t::proc_status_t;
  1782. // send a signal to this process, if it is still running, in a race-free manner.
  1783. // return is as for POSIX kill(); return is -1 with errno=ESRCH if process has
  1784. // already terminated.
  1785. int send_signal(event_loop_t &loop, int signo) noexcept
  1786. {
  1787. auto reaper_mutex = loop.get_reaper_lock();
  1788. std::lock_guard<decltype(reaper_mutex)> guard(reaper_mutex);
  1789. if (this->child_termd) {
  1790. errno = ESRCH;
  1791. return -1;
  1792. }
  1793. return kill(this->watch_pid, signo);
  1794. }
  1795. // Reserve resources for a child watcher with the given event loop.
  1796. // Reservation can fail with std::bad_alloc. Some backends do not support
  1797. // reservation (it will always fail) - check loop_traits_t::supports_childwatch_reservation.
  1798. void reserve_watch(event_loop_t &eloop)
  1799. {
  1800. eloop.reserve_child_watch(this);
  1801. }
  1802. void unreserve(event_loop_t &eloop)
  1803. {
  1804. eloop.unreserve(this);
  1805. }
  1806. // Register a watcher for the given child process with an event loop.
  1807. // Registration can fail with std::bad_alloc.
  1808. // Note that in multi-threaded programs, use of this function may be prone to a
  1809. // race condition such that the child terminates before the watcher is registered.
  1810. void add_watch(event_loop_t &eloop, pid_t child, int prio = DEFAULT_PRIORITY)
  1811. {
  1812. base_watcher::init();
  1813. this->watch_pid = child;
  1814. this->priority = prio;
  1815. eloop.register_child(this, child);
  1816. }
  1817. // Register a watcher for the given child process with an event loop,
  1818. // after having reserved resources previously (using reserveWith).
  1819. // Registration cannot fail.
  1820. // Note that in multi-threaded programs, use of this function may be prone to a
  1821. // race condition such that the child terminates before the watcher is registered;
  1822. // use the "fork" member function to avoid this.
  1823. void add_reserved(event_loop_t &eloop, pid_t child, int prio = DEFAULT_PRIORITY) noexcept
  1824. {
  1825. base_watcher::init();
  1826. this->watch_pid = child;
  1827. this->priority = prio;
  1828. eloop.register_reserved_child(this, child);
  1829. }
  1830. void deregister(event_loop_t &eloop, pid_t child) noexcept
  1831. {
  1832. eloop.deregister(this, child);
  1833. }
  1834. // Stop watching the currently watched child, but retain watch reservation.
  1835. void stop_watch(event_loop_t &eloop) noexcept
  1836. {
  1837. eloop.stop_watch(this);
  1838. }
  1839. // Fork and watch the child with this watcher on the given event loop.
  1840. // If resource limitations prevent the child process from being watched, it is
  1841. // terminated immediately (or if the implementation allows, never started),
  1842. // and a suitable std::system_error or std::bad_alloc exception is thrown.
  1843. // Returns:
  1844. // - the child pid in the parent
  1845. // - 0 in the child
  1846. pid_t fork(event_loop_t &eloop, bool from_reserved = false, int prio = DEFAULT_PRIORITY)
  1847. {
  1848. base_watcher::init();
  1849. this->priority = prio;
  1850. if (EventLoop::loop_traits_t::supports_childwatch_reservation) {
  1851. // Reserve a watch, fork, then claim reservation
  1852. if (! from_reserved) {
  1853. reserve_watch(eloop);
  1854. }
  1855. auto &lock = eloop.get_base_lock();
  1856. lock.lock();
  1857. pid_t child = ::fork();
  1858. if (child == -1) {
  1859. // Unreserve watch.
  1860. lock.unlock();
  1861. unreserve(eloop);
  1862. throw std::system_error(errno, std::system_category());
  1863. }
  1864. if (child == 0) {
  1865. // I am the child
  1866. lock.unlock(); // may not really be necessary
  1867. return 0;
  1868. }
  1869. // Register this watcher.
  1870. this->watch_pid = child;
  1871. eloop.register_reserved_child_nolock(this, child);
  1872. lock.unlock();
  1873. return child;
  1874. }
  1875. else {
  1876. int pipefds[2];
  1877. if (pipe2(pipefds, O_CLOEXEC) == -1) {
  1878. throw std::system_error(errno, std::system_category());
  1879. }
  1880. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1881. pid_t child = ::fork();
  1882. if (child == -1) {
  1883. throw std::system_error(errno, std::system_category());
  1884. }
  1885. if (child == 0) {
  1886. // I am the child
  1887. close(pipefds[1]);
  1888. // Wait for message from parent before continuing:
  1889. int rr;
  1890. int r = read(pipefds[0], &rr, sizeof(rr));
  1891. while (r == -1 && errno == EINTR) {
  1892. r = read(pipefds[0], &rr, sizeof(rr));
  1893. }
  1894. if (r <= 0) _exit(0);
  1895. close(pipefds[0]);
  1896. return 0;
  1897. }
  1898. close(pipefds[0]); // close read end
  1899. // Register this watcher.
  1900. try {
  1901. this->watch_pid = child;
  1902. eloop.register_child(this, child);
  1903. // Continue in child (it doesn't matter what is written):
  1904. write(pipefds[1], &pipefds, sizeof(int));
  1905. close(pipefds[1]);
  1906. return child;
  1907. }
  1908. catch (...) {
  1909. close(pipefds[1]);
  1910. throw;
  1911. }
  1912. }
  1913. }
  1914. // virtual rearm child_status(EventLoop &eloop, pid_t child, proc_status_t status) = 0;
  1915. };
  1916. template <typename EventLoop, typename Derived>
  1917. class child_proc_watcher_impl : public child_proc_watcher<EventLoop>
  1918. {
  1919. void dispatch(void *loop_ptr) noexcept override
  1920. {
  1921. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  1922. loop_access::get_base_lock(loop).unlock();
  1923. auto rearm_type = static_cast<Derived *>(this)->status_change(loop, this->watch_pid, this->child_status);
  1924. loop_access::get_base_lock(loop).lock();
  1925. if (rearm_type != rearm::REMOVED) {
  1926. this->active = false;
  1927. if (this->deleteme) {
  1928. // We don't want a watch that is marked "deleteme" to re-arm itself.
  1929. rearm_type = rearm::REMOVE;
  1930. }
  1931. loop_access::process_child_watch_rearm(loop, this, rearm_type);
  1932. // rearm_type = loop.process??;
  1933. post_dispatch(loop, this, rearm_type);
  1934. }
  1935. }
  1936. };
  1937. template <typename EventLoop>
  1938. class timer : private base_timer_watcher
  1939. {
  1940. template <typename, typename> friend class timer_impl;
  1941. using base_t = base_timer_watcher;
  1942. using mutex_t = typename EventLoop::mutex_t;
  1943. public:
  1944. using event_loop_t = EventLoop;
  1945. void add_timer(event_loop_t &eloop, clock_type clock = clock_type::MONOTONIC, int prio = DEFAULT_PRIORITY)
  1946. {
  1947. base_watcher::init();
  1948. this->priority = prio;
  1949. this->clock = clock;
  1950. this->intervals = 0;
  1951. eloop.register_timer(this, clock);
  1952. }
  1953. void arm_timer(event_loop_t &eloop, const timespec &timeout) noexcept
  1954. {
  1955. eloop.set_timer(this, timeout, base_t::clock);
  1956. }
  1957. void arm_timer(event_loop_t &eloop, const timespec &timeout, const timespec &interval) noexcept
  1958. {
  1959. eloop.set_timer(this, timeout, interval, base_t::clock);
  1960. }
  1961. // Arm timer, relative to now:
  1962. void arm_timer_rel(event_loop_t &eloop, const timespec &timeout) noexcept
  1963. {
  1964. eloop.set_timer_rel(this, timeout, base_t::clock);
  1965. }
  1966. void arm_timer_rel(event_loop_t &eloop, const timespec &timeout,
  1967. const timespec &interval) noexcept
  1968. {
  1969. eloop.set_timer_rel(this, timeout, interval, base_t::clock);
  1970. }
  1971. void stop_timer(event_loop_t &eloop) noexcept
  1972. {
  1973. eloop.stop_timer(this, base_t::clock);
  1974. }
  1975. void set_enabled(event_loop_t &eloop, clock_type clock, bool enabled) noexcept
  1976. {
  1977. std::lock_guard<mutex_t> guard(eloop.get_base_lock());
  1978. eloop.set_timer_enabled_nolock(this, clock, enabled);
  1979. if (! enabled) {
  1980. eloop.dequeue_watcher(this);
  1981. }
  1982. }
  1983. void deregister(event_loop_t &eloop) noexcept
  1984. {
  1985. eloop.deregister(this, this->clock);
  1986. }
  1987. template <typename T>
  1988. static timer<EventLoop> *add_timer(EventLoop &eloop, clock_type clock, bool relative,
  1989. const timespec &timeout, const timespec &interval, T watch_hndlr)
  1990. {
  1991. class lambda_timer : public timer_impl<event_loop_t, lambda_timer>
  1992. {
  1993. private:
  1994. T watch_hndlr;
  1995. public:
  1996. lambda_timer(T watch_handlr_a) : watch_hndlr(watch_handlr_a)
  1997. {
  1998. //
  1999. }
  2000. rearm timer_expiry(event_loop_t &eloop, int intervals)
  2001. {
  2002. return watch_hndlr(eloop, intervals);
  2003. }
  2004. void watch_removed() noexcept override
  2005. {
  2006. delete this;
  2007. }
  2008. };
  2009. lambda_timer * lt = new lambda_timer(watch_hndlr);
  2010. lt->add_timer(eloop, clock);
  2011. if (relative) {
  2012. lt->arm_timer_rel(eloop, timeout, interval);
  2013. }
  2014. else {
  2015. lt->arm_timer(eloop, timeout, interval);
  2016. }
  2017. return lt;
  2018. }
  2019. // Timer expired, and the given number of intervals have elapsed before
  2020. // expiry event was queued. Normally intervals == 1 to indicate no
  2021. // overrun.
  2022. // virtual rearm timer_expiry(event_loop_t &eloop, int intervals) = 0;
  2023. };
  2024. template <typename EventLoop, typename Derived>
  2025. class timer_impl : public timer<EventLoop>
  2026. {
  2027. void dispatch(void *loop_ptr) noexcept override
  2028. {
  2029. EventLoop &loop = *static_cast<EventLoop *>(loop_ptr);
  2030. loop_access::get_base_lock(loop).unlock();
  2031. auto intervals_report = this->intervals;
  2032. this->intervals = 0;
  2033. auto rearm_type = static_cast<Derived *>(this)->timer_expiry(loop, intervals_report);
  2034. loop_access::get_base_lock(loop).lock();
  2035. if (rearm_type != rearm::REMOVED) {
  2036. this->active = false;
  2037. if (this->deleteme) {
  2038. // We don't want a watch that is marked "deleteme" to re-arm itself.
  2039. rearm_type = rearm::REMOVE;
  2040. }
  2041. loop_access::process_timer_rearm(loop, this, rearm_type);
  2042. post_dispatch(loop, this, rearm_type);
  2043. }
  2044. }
  2045. };
  2046. } // namespace dprivate
  2047. } // namespace v2
  2048. } // namespace dasynq
  2049. #endif /* DASYNQ_H_ */