threads_pthread.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /* We need to use the OPENSSL_fork_*() deprecated APIs */
  10. #define OPENSSL_SUPPRESS_DEPRECATED
  11. #include <openssl/crypto.h>
  12. #include <crypto/cryptlib.h>
  13. #include "internal/cryptlib.h"
  14. #include "internal/rcu.h"
  15. #include "rcu_internal.h"
  16. #if defined(__sun)
  17. # include <atomic.h>
  18. #endif
  19. #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
  20. /*
  21. * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
  22. * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
  23. * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
  24. * All of this makes impossible to use __atomic_is_lock_free here.
  25. *
  26. * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
  27. */
  28. #define BROKEN_CLANG_ATOMICS
  29. #endif
  30. #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
  31. # if defined(OPENSSL_SYS_UNIX)
  32. # include <sys/types.h>
  33. # include <unistd.h>
  34. #endif
  35. # include <assert.h>
  36. # ifdef PTHREAD_RWLOCK_INITIALIZER
  37. # define USE_RWLOCK
  38. # endif
  39. # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
  40. # define ATOMIC_LOAD_N(p,o) __atomic_load_n(p, o)
  41. # define ATOMIC_STORE_N(p, v, o) __atomic_store_n(p, v, o)
  42. # define ATOMIC_STORE(p, v, o) __atomic_store(p, v, o)
  43. # define ATOMIC_EXCHANGE_N(p, v, o) __atomic_exchange_n(p, v, o)
  44. # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
  45. # define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
  46. # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
  47. # define ATOMIC_AND_FETCH(p, m, o) __atomic_and_fetch(p, m, o)
  48. # define ATOMIC_OR_FETCH(p, m, o) __atomic_or_fetch(p, m, o)
  49. #else
  50. static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
  51. static inline void *fallback_atomic_load_n(void **p)
  52. {
  53. void *ret;
  54. pthread_mutex_lock(&atomic_sim_lock);
  55. ret = *(void **)p;
  56. pthread_mutex_unlock(&atomic_sim_lock);
  57. return ret;
  58. }
  59. # define ATOMIC_LOAD_N(p, o) fallback_atomic_load_n((void **)p)
  60. static inline void *fallback_atomic_store_n(void **p, void *v)
  61. {
  62. void *ret;
  63. pthread_mutex_lock(&atomic_sim_lock);
  64. ret = *p;
  65. *p = v;
  66. pthread_mutex_unlock(&atomic_sim_lock);
  67. return ret;
  68. }
  69. # define ATOMIC_STORE_N(p, v, o) fallback_atomic_store_n((void **)p, (void *)v)
  70. static inline void fallback_atomic_store(void **p, void **v)
  71. {
  72. void *ret;
  73. pthread_mutex_lock(&atomic_sim_lock);
  74. ret = *p;
  75. *p = *v;
  76. v = ret;
  77. pthread_mutex_unlock(&atomic_sim_lock);
  78. }
  79. # define ATOMIC_STORE(p, v, o) fallback_atomic_store((void **)p, (void **)v)
  80. static inline void *fallback_atomic_exchange_n(void **p, void *v)
  81. {
  82. void *ret;
  83. pthread_mutex_lock(&atomic_sim_lock);
  84. ret = *p;
  85. *p = v;
  86. pthread_mutex_unlock(&atomic_sim_lock);
  87. return ret;
  88. }
  89. #define ATOMIC_EXCHANGE_N(p, v, o) fallback_atomic_exchange_n((void **)p, (void *)v)
  90. static inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
  91. {
  92. uint64_t ret;
  93. pthread_mutex_lock(&atomic_sim_lock);
  94. *p += v;
  95. ret = *p;
  96. pthread_mutex_unlock(&atomic_sim_lock);
  97. return ret;
  98. }
  99. # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
  100. static inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
  101. {
  102. uint64_t ret;
  103. pthread_mutex_lock(&atomic_sim_lock);
  104. ret = *p;
  105. *p += v;
  106. pthread_mutex_unlock(&atomic_sim_lock);
  107. return ret;
  108. }
  109. # define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
  110. static inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
  111. {
  112. uint64_t ret;
  113. pthread_mutex_lock(&atomic_sim_lock);
  114. *p -= v;
  115. ret = *p;
  116. pthread_mutex_unlock(&atomic_sim_lock);
  117. return ret;
  118. }
  119. # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
  120. static inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
  121. {
  122. uint64_t ret;
  123. pthread_mutex_lock(&atomic_sim_lock);
  124. *p &= m;
  125. ret = *p;
  126. pthread_mutex_unlock(&atomic_sim_lock);
  127. return ret;
  128. }
  129. # define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
  130. static inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
  131. {
  132. uint64_t ret;
  133. pthread_mutex_lock(&atomic_sim_lock);
  134. *p |= m;
  135. ret = *p;
  136. pthread_mutex_unlock(&atomic_sim_lock);
  137. return ret;
  138. }
  139. # define ATOMIC_OR_FETCH(p, v, o) fallback_atomic_or_fetch(p, v)
  140. #endif
  141. static CRYPTO_THREAD_LOCAL rcu_thr_key;
  142. /*
  143. * users is broken up into 2 parts
  144. * bits 0-15 current readers
  145. * bit 32-63 - ID
  146. */
  147. # define READER_SHIFT 0
  148. # define ID_SHIFT 32
  149. # define READER_SIZE 16
  150. # define ID_SIZE 32
  151. # define READER_MASK (((uint64_t)1 << READER_SIZE) - 1)
  152. # define ID_MASK (((uint64_t)1 << ID_SIZE) - 1)
  153. # define READER_COUNT(x) (((uint64_t)(x) >> READER_SHIFT) & READER_MASK)
  154. # define ID_VAL(x) (((uint64_t)(x) >> ID_SHIFT) & ID_MASK)
  155. # define VAL_READER ((uint64_t)1 << READER_SHIFT)
  156. # define VAL_ID(x) ((uint64_t)x << ID_SHIFT)
  157. /*
  158. * This is the core of an rcu lock. It tracks the readers and writers for the
  159. * current quiescence point for a given lock. Users is the 64 bit value that
  160. * stores the READERS/ID as defined above
  161. *
  162. */
  163. struct rcu_qp {
  164. uint64_t users;
  165. };
  166. struct thread_qp {
  167. struct rcu_qp *qp;
  168. unsigned int depth;
  169. CRYPTO_RCU_LOCK *lock;
  170. };
  171. #define MAX_QPS 10
  172. /*
  173. * This is the per thread tracking data
  174. * that is assigned to each thread participating
  175. * in an rcu qp
  176. *
  177. * qp points to the qp that it last acquired
  178. *
  179. */
  180. struct rcu_thr_data {
  181. struct thread_qp thread_qps[MAX_QPS];
  182. };
  183. /*
  184. * This is the internal version of a CRYPTO_RCU_LOCK
  185. * it is cast from CRYPTO_RCU_LOCK
  186. */
  187. struct rcu_lock_st {
  188. /* Callbacks to call for next ossl_synchronize_rcu */
  189. struct rcu_cb_item *cb_items;
  190. /* rcu generation counter for in-order retirement */
  191. uint32_t id_ctr;
  192. /* Array of quiescent points for synchronization */
  193. struct rcu_qp *qp_group;
  194. /* Number of elements in qp_group array */
  195. size_t group_count;
  196. /* Index of the current qp in the qp_group array */
  197. uint64_t reader_idx;
  198. /* value of the next id_ctr value to be retired */
  199. uint32_t next_to_retire;
  200. /* index of the next free rcu_qp in the qp_group */
  201. uint64_t current_alloc_idx;
  202. /* number of qp's in qp_group array currently being retired */
  203. uint32_t writers_alloced;
  204. /* lock protecting write side operations */
  205. pthread_mutex_t write_lock;
  206. /* lock protecting updates to writers_alloced/current_alloc_idx */
  207. pthread_mutex_t alloc_lock;
  208. /* signal to wake threads waiting on alloc_lock */
  209. pthread_cond_t alloc_signal;
  210. /* lock to enforce in-order retirement */
  211. pthread_mutex_t prior_lock;
  212. /* signal to wake threads waiting on prior_lock */
  213. pthread_cond_t prior_signal;
  214. };
  215. /*
  216. * Called on thread exit to free the pthread key
  217. * associated with this thread, if any
  218. */
  219. static void free_rcu_thr_data(void *ptr)
  220. {
  221. struct rcu_thr_data *data =
  222. (struct rcu_thr_data *)CRYPTO_THREAD_get_local(&rcu_thr_key);
  223. OPENSSL_free(data);
  224. CRYPTO_THREAD_set_local(&rcu_thr_key, NULL);
  225. }
  226. static void ossl_rcu_init(void)
  227. {
  228. CRYPTO_THREAD_init_local(&rcu_thr_key, NULL);
  229. }
  230. /* Read side acquisition of the current qp */
  231. static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
  232. {
  233. uint64_t qp_idx;
  234. /* get the current qp index */
  235. for (;;) {
  236. /*
  237. * Notes on use of __ATOMIC_ACQUIRE
  238. * We need to ensure the following:
  239. * 1) That subsequent operations aren't optimized by hoisting them above
  240. * this operation. Specifically, we don't want the below re-load of
  241. * qp_idx to get optimized away
  242. * 2) We want to ensure that any updating of reader_idx on the write side
  243. * of the lock is flushed from a local cpu cache so that we see any
  244. * updates prior to the load. This is a non-issue on cache coherent
  245. * systems like x86, but is relevant on other arches
  246. * Note: This applies to the reload below as well
  247. */
  248. qp_idx = (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE);
  249. /*
  250. * Notes of use of __ATOMIC_RELEASE
  251. * This counter is only read by the write side of the lock, and so we
  252. * specify __ATOMIC_RELEASE here to ensure that the write side of the
  253. * lock see this during the spin loop read of users, as it waits for the
  254. * reader count to approach zero
  255. */
  256. ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
  257. __ATOMIC_RELEASE);
  258. /* if the idx hasn't changed, we're good, else try again */
  259. if (qp_idx == (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE))
  260. break;
  261. /*
  262. * Notes on use of __ATOMIC_RELEASE
  263. * As with the add above, we want to ensure that this decrement is
  264. * seen by the write side of the lock as soon as it happens to prevent
  265. * undue spinning waiting for write side completion
  266. */
  267. ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
  268. __ATOMIC_RELEASE);
  269. }
  270. return &lock->qp_group[qp_idx];
  271. }
  272. void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
  273. {
  274. struct rcu_thr_data *data;
  275. int i, available_qp = -1;
  276. /*
  277. * we're going to access current_qp here so ask the
  278. * processor to fetch it
  279. */
  280. data = CRYPTO_THREAD_get_local(&rcu_thr_key);
  281. if (data == NULL) {
  282. data = OPENSSL_zalloc(sizeof(*data));
  283. OPENSSL_assert(data != NULL);
  284. CRYPTO_THREAD_set_local(&rcu_thr_key, data);
  285. ossl_init_thread_start(NULL, NULL, free_rcu_thr_data);
  286. }
  287. for (i = 0; i < MAX_QPS; i++) {
  288. if (data->thread_qps[i].qp == NULL && available_qp == -1)
  289. available_qp = i;
  290. /* If we have a hold on this lock already, we're good */
  291. if (data->thread_qps[i].lock == lock) {
  292. data->thread_qps[i].depth++;
  293. return;
  294. }
  295. }
  296. /*
  297. * if we get here, then we don't have a hold on this lock yet
  298. */
  299. assert(available_qp != -1);
  300. data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
  301. data->thread_qps[available_qp].depth = 1;
  302. data->thread_qps[available_qp].lock = lock;
  303. }
  304. void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
  305. {
  306. int i;
  307. struct rcu_thr_data *data = CRYPTO_THREAD_get_local(&rcu_thr_key);
  308. uint64_t ret;
  309. assert(data != NULL);
  310. for (i = 0; i < MAX_QPS; i++) {
  311. if (data->thread_qps[i].lock == lock) {
  312. /*
  313. * As with read side acquisition, we use __ATOMIC_RELEASE here
  314. * to ensure that the decrement is published immediately
  315. * to any write side waiters
  316. */
  317. data->thread_qps[i].depth--;
  318. if (data->thread_qps[i].depth == 0) {
  319. ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, VAL_READER,
  320. __ATOMIC_RELEASE);
  321. OPENSSL_assert(ret != UINT64_MAX);
  322. data->thread_qps[i].qp = NULL;
  323. data->thread_qps[i].lock = NULL;
  324. }
  325. return;
  326. }
  327. }
  328. /*
  329. * If we get here, we're trying to unlock a lock that we never acquired -
  330. * that's fatal.
  331. */
  332. assert(0);
  333. }
  334. /*
  335. * Write side allocation routine to get the current qp
  336. * and replace it with a new one
  337. */
  338. static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
  339. {
  340. uint64_t new_id;
  341. uint64_t current_idx;
  342. pthread_mutex_lock(&lock->alloc_lock);
  343. /*
  344. * we need at least one qp to be available with one
  345. * left over, so that readers can start working on
  346. * one that isn't yet being waited on
  347. */
  348. while (lock->group_count - lock->writers_alloced < 2)
  349. /* we have to wait for one to be free */
  350. pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
  351. current_idx = lock->current_alloc_idx;
  352. /* Allocate the qp */
  353. lock->writers_alloced++;
  354. /* increment the allocation index */
  355. lock->current_alloc_idx =
  356. (lock->current_alloc_idx + 1) % lock->group_count;
  357. /* get and insert a new id */
  358. new_id = lock->id_ctr;
  359. lock->id_ctr++;
  360. new_id = VAL_ID(new_id);
  361. /*
  362. * Even though we are under a write side lock here
  363. * We need to use atomic instructions to ensure that the results
  364. * of this update are published to the read side prior to updating the
  365. * reader idx below
  366. */
  367. ATOMIC_AND_FETCH(&lock->qp_group[current_idx].users, ID_MASK,
  368. __ATOMIC_RELEASE);
  369. ATOMIC_OR_FETCH(&lock->qp_group[current_idx].users, new_id,
  370. __ATOMIC_RELEASE);
  371. /*
  372. * Update the reader index to be the prior qp.
  373. * Note the use of __ATOMIC_RELEASE here is based on the corresponding use
  374. * of __ATOMIC_ACQUIRE in get_hold_current_qp, as we want any publication
  375. * of this value to be seen on the read side immediately after it happens
  376. */
  377. ATOMIC_STORE_N(&lock->reader_idx, lock->current_alloc_idx,
  378. __ATOMIC_RELEASE);
  379. /* wake up any waiters */
  380. pthread_cond_signal(&lock->alloc_signal);
  381. pthread_mutex_unlock(&lock->alloc_lock);
  382. return &lock->qp_group[current_idx];
  383. }
  384. static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
  385. {
  386. pthread_mutex_lock(&lock->alloc_lock);
  387. lock->writers_alloced--;
  388. pthread_cond_signal(&lock->alloc_signal);
  389. pthread_mutex_unlock(&lock->alloc_lock);
  390. }
  391. static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
  392. int count)
  393. {
  394. struct rcu_qp *new =
  395. OPENSSL_zalloc(sizeof(*new) * count);
  396. lock->group_count = count;
  397. return new;
  398. }
  399. void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
  400. {
  401. pthread_mutex_lock(&lock->write_lock);
  402. }
  403. void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
  404. {
  405. pthread_mutex_unlock(&lock->write_lock);
  406. }
  407. void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
  408. {
  409. struct rcu_qp *qp;
  410. uint64_t count;
  411. struct rcu_cb_item *cb_items, *tmpcb;
  412. /*
  413. * __ATOMIC_ACQ_REL is used here to ensure that we get any prior published
  414. * writes before we read, and publish our write immediately
  415. */
  416. cb_items = ATOMIC_EXCHANGE_N(&lock->cb_items, NULL, __ATOMIC_ACQ_REL);
  417. qp = update_qp(lock);
  418. /*
  419. * wait for the reader count to reach zero
  420. * Note the use of __ATOMIC_ACQUIRE here to ensure that any
  421. * prior __ATOMIC_RELEASE write operation in get_hold_current_qp
  422. * is visible prior to our read
  423. */
  424. do {
  425. count = (uint64_t)ATOMIC_LOAD_N(&qp->users, __ATOMIC_ACQUIRE);
  426. } while (READER_COUNT(count) != 0);
  427. /* retire in order */
  428. pthread_mutex_lock(&lock->prior_lock);
  429. while (lock->next_to_retire != ID_VAL(count))
  430. pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
  431. lock->next_to_retire++;
  432. pthread_cond_broadcast(&lock->prior_signal);
  433. pthread_mutex_unlock(&lock->prior_lock);
  434. retire_qp(lock, qp);
  435. /* handle any callbacks that we have */
  436. while (cb_items != NULL) {
  437. tmpcb = cb_items;
  438. cb_items = cb_items->next;
  439. tmpcb->fn(tmpcb->data);
  440. OPENSSL_free(tmpcb);
  441. }
  442. }
  443. int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
  444. {
  445. struct rcu_cb_item *new =
  446. OPENSSL_zalloc(sizeof(*new));
  447. if (new == NULL)
  448. return 0;
  449. new->data = data;
  450. new->fn = cb;
  451. /*
  452. * Use __ATOMIC_ACQ_REL here to indicate that any prior writes to this
  453. * list are visible to us prior to reading, and publish the new value
  454. * immediately
  455. */
  456. new->next = ATOMIC_EXCHANGE_N(&lock->cb_items, new, __ATOMIC_ACQ_REL);
  457. return 1;
  458. }
  459. void *ossl_rcu_uptr_deref(void **p)
  460. {
  461. return (void *)ATOMIC_LOAD_N(p, __ATOMIC_ACQUIRE);
  462. }
  463. void ossl_rcu_assign_uptr(void **p, void **v)
  464. {
  465. ATOMIC_STORE(p, v, __ATOMIC_RELEASE);
  466. }
  467. static CRYPTO_ONCE rcu_init_once = CRYPTO_ONCE_STATIC_INIT;
  468. CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers)
  469. {
  470. struct rcu_lock_st *new;
  471. if (!CRYPTO_THREAD_run_once(&rcu_init_once, ossl_rcu_init))
  472. return NULL;
  473. if (num_writers < 1)
  474. num_writers = 1;
  475. new = OPENSSL_zalloc(sizeof(*new));
  476. if (new == NULL)
  477. return NULL;
  478. pthread_mutex_init(&new->write_lock, NULL);
  479. pthread_mutex_init(&new->prior_lock, NULL);
  480. pthread_mutex_init(&new->alloc_lock, NULL);
  481. pthread_cond_init(&new->prior_signal, NULL);
  482. pthread_cond_init(&new->alloc_signal, NULL);
  483. new->qp_group = allocate_new_qp_group(new, num_writers + 1);
  484. if (new->qp_group == NULL) {
  485. OPENSSL_free(new);
  486. new = NULL;
  487. }
  488. return new;
  489. }
  490. void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
  491. {
  492. struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
  493. if (lock == NULL)
  494. return;
  495. /* make sure we're synchronized */
  496. ossl_synchronize_rcu(rlock);
  497. OPENSSL_free(rlock->qp_group);
  498. /* There should only be a single qp left now */
  499. OPENSSL_free(rlock);
  500. }
  501. CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
  502. {
  503. # ifdef USE_RWLOCK
  504. CRYPTO_RWLOCK *lock;
  505. if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
  506. /* Don't set error, to avoid recursion blowup. */
  507. return NULL;
  508. if (pthread_rwlock_init(lock, NULL) != 0) {
  509. OPENSSL_free(lock);
  510. return NULL;
  511. }
  512. # else
  513. pthread_mutexattr_t attr;
  514. CRYPTO_RWLOCK *lock;
  515. if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
  516. /* Don't set error, to avoid recursion blowup. */
  517. return NULL;
  518. /*
  519. * We don't use recursive mutexes, but try to catch errors if we do.
  520. */
  521. pthread_mutexattr_init(&attr);
  522. # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
  523. # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
  524. pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
  525. # endif
  526. # else
  527. /* The SPT Thread Library does not define MUTEX attributes. */
  528. # endif
  529. if (pthread_mutex_init(lock, &attr) != 0) {
  530. pthread_mutexattr_destroy(&attr);
  531. OPENSSL_free(lock);
  532. return NULL;
  533. }
  534. pthread_mutexattr_destroy(&attr);
  535. # endif
  536. return lock;
  537. }
  538. __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
  539. {
  540. # ifdef USE_RWLOCK
  541. if (pthread_rwlock_rdlock(lock) != 0)
  542. return 0;
  543. # else
  544. if (pthread_mutex_lock(lock) != 0) {
  545. assert(errno != EDEADLK && errno != EBUSY);
  546. return 0;
  547. }
  548. # endif
  549. return 1;
  550. }
  551. __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
  552. {
  553. # ifdef USE_RWLOCK
  554. if (pthread_rwlock_wrlock(lock) != 0)
  555. return 0;
  556. # else
  557. if (pthread_mutex_lock(lock) != 0) {
  558. assert(errno != EDEADLK && errno != EBUSY);
  559. return 0;
  560. }
  561. # endif
  562. return 1;
  563. }
  564. int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
  565. {
  566. # ifdef USE_RWLOCK
  567. if (pthread_rwlock_unlock(lock) != 0)
  568. return 0;
  569. # else
  570. if (pthread_mutex_unlock(lock) != 0) {
  571. assert(errno != EPERM);
  572. return 0;
  573. }
  574. # endif
  575. return 1;
  576. }
  577. void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
  578. {
  579. if (lock == NULL)
  580. return;
  581. # ifdef USE_RWLOCK
  582. pthread_rwlock_destroy(lock);
  583. # else
  584. pthread_mutex_destroy(lock);
  585. # endif
  586. OPENSSL_free(lock);
  587. return;
  588. }
  589. int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
  590. {
  591. if (pthread_once(once, init) != 0)
  592. return 0;
  593. return 1;
  594. }
  595. int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
  596. {
  597. if (pthread_key_create(key, cleanup) != 0)
  598. return 0;
  599. return 1;
  600. }
  601. void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
  602. {
  603. return pthread_getspecific(*key);
  604. }
  605. int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
  606. {
  607. if (pthread_setspecific(*key, val) != 0)
  608. return 0;
  609. return 1;
  610. }
  611. int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
  612. {
  613. if (pthread_key_delete(*key) != 0)
  614. return 0;
  615. return 1;
  616. }
  617. CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
  618. {
  619. return pthread_self();
  620. }
  621. int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
  622. {
  623. return pthread_equal(a, b);
  624. }
  625. int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
  626. {
  627. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  628. if (__atomic_is_lock_free(sizeof(*val), val)) {
  629. *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
  630. return 1;
  631. }
  632. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  633. /* This will work for all future Solaris versions. */
  634. if (ret != NULL) {
  635. *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
  636. return 1;
  637. }
  638. # endif
  639. if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
  640. return 0;
  641. *val += amount;
  642. *ret = *val;
  643. if (!CRYPTO_THREAD_unlock(lock))
  644. return 0;
  645. return 1;
  646. }
  647. int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
  648. CRYPTO_RWLOCK *lock)
  649. {
  650. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  651. if (__atomic_is_lock_free(sizeof(*val), val)) {
  652. *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
  653. return 1;
  654. }
  655. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  656. /* This will work for all future Solaris versions. */
  657. if (ret != NULL) {
  658. *ret = atomic_or_64_nv(val, op);
  659. return 1;
  660. }
  661. # endif
  662. if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
  663. return 0;
  664. *val |= op;
  665. *ret = *val;
  666. if (!CRYPTO_THREAD_unlock(lock))
  667. return 0;
  668. return 1;
  669. }
  670. int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
  671. {
  672. # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
  673. if (__atomic_is_lock_free(sizeof(*val), val)) {
  674. __atomic_load(val, ret, __ATOMIC_ACQUIRE);
  675. return 1;
  676. }
  677. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  678. /* This will work for all future Solaris versions. */
  679. if (ret != NULL) {
  680. *ret = atomic_or_64_nv(val, 0);
  681. return 1;
  682. }
  683. # endif
  684. if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
  685. return 0;
  686. *ret = *val;
  687. if (!CRYPTO_THREAD_unlock(lock))
  688. return 0;
  689. return 1;
  690. }
  691. int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
  692. {
  693. # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
  694. if (__atomic_is_lock_free(sizeof(*val), val)) {
  695. __atomic_load(val, ret, __ATOMIC_ACQUIRE);
  696. return 1;
  697. }
  698. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  699. /* This will work for all future Solaris versions. */
  700. if (ret != NULL) {
  701. *ret = (int *)atomic_or_uint_nv((unsigned int *)val, 0);
  702. return 1;
  703. }
  704. # endif
  705. if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
  706. return 0;
  707. *ret = *val;
  708. if (!CRYPTO_THREAD_unlock(lock))
  709. return 0;
  710. return 1;
  711. }
  712. # ifndef FIPS_MODULE
  713. int openssl_init_fork_handlers(void)
  714. {
  715. return 1;
  716. }
  717. # endif /* FIPS_MODULE */
  718. int openssl_get_fork_id(void)
  719. {
  720. return getpid();
  721. }
  722. #endif