2
0

refcount.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #ifndef OSSL_INTERNAL_REFCOUNT_H
  10. # define OSSL_INTERNAL_REFCOUNT_H
  11. # pragma once
  12. # include <openssl/e_os2.h>
  13. # include <openssl/trace.h>
  14. # include <openssl/err.h>
  15. # if defined(OPENSSL_THREADS) && !defined(OPENSSL_DEV_NO_ATOMICS)
  16. # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
  17. && !defined(__STDC_NO_ATOMICS__)
  18. # include <stdatomic.h>
  19. # define HAVE_C11_ATOMICS
  20. # endif
  21. # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
  22. && ATOMIC_INT_LOCK_FREE > 0
  23. # define HAVE_ATOMICS 1
  24. typedef struct {
  25. _Atomic int val;
  26. } CRYPTO_REF_COUNT;
  27. static inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  28. {
  29. *ret = atomic_fetch_add_explicit(&refcnt->val, 1, memory_order_relaxed) + 1;
  30. return 1;
  31. }
  32. /*
  33. * Changes to shared structure other than reference counter have to be
  34. * serialized. And any kind of serialization implies a release fence. This
  35. * means that by the time reference counter is decremented all other
  36. * changes are visible on all processors. Hence decrement itself can be
  37. * relaxed. In case it hits zero, object will be destructed. Since it's
  38. * last use of the object, destructor programmer might reason that access
  39. * to mutable members doesn't have to be serialized anymore, which would
  40. * otherwise imply an acquire fence. Hence conditional acquire fence...
  41. */
  42. static inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  43. {
  44. *ret = atomic_fetch_sub_explicit(&refcnt->val, 1, memory_order_relaxed) - 1;
  45. if (*ret == 0)
  46. atomic_thread_fence(memory_order_acquire);
  47. return 1;
  48. }
  49. static inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  50. {
  51. *ret = atomic_load_explicit(&refcnt->val, memory_order_relaxed);
  52. return 1;
  53. }
  54. # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
  55. # define HAVE_ATOMICS 1
  56. typedef struct {
  57. int val;
  58. } CRYPTO_REF_COUNT;
  59. static __inline__ int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  60. {
  61. *ret = __atomic_fetch_add(&refcnt->val, 1, __ATOMIC_RELAXED) + 1;
  62. return 1;
  63. }
  64. static __inline__ int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  65. {
  66. *ret = __atomic_fetch_sub(&refcnt->val, 1, __ATOMIC_RELAXED) - 1;
  67. if (*ret == 0)
  68. __atomic_thread_fence(__ATOMIC_ACQUIRE);
  69. return 1;
  70. }
  71. static __inline__ int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  72. {
  73. *ret = __atomic_load_n(&refcnt->val, __ATOMIC_RELAXED);
  74. return 1;
  75. }
  76. # elif defined(__ICL) && defined(_WIN32)
  77. # define HAVE_ATOMICS 1
  78. typedef struct {
  79. volatile int val;
  80. } CRYPTO_REF_COUNT;
  81. static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  82. {
  83. *ret = _InterlockedExchangeAdd((void *)&refcnt->val, 1) + 1;
  84. return 1;
  85. }
  86. static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  87. {
  88. *ret = _InterlockedExchangeAdd((void *)&refcnt->val, -1) - 1;
  89. return 1;
  90. }
  91. static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  92. {
  93. *ret = _InterlockedOr((void *)&refcnt->val, 0);
  94. return 1;
  95. }
  96. # elif defined(_MSC_VER) && _MSC_VER>=1200
  97. # define HAVE_ATOMICS 1
  98. typedef struct {
  99. volatile int val;
  100. } CRYPTO_REF_COUNT;
  101. # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
  102. # include <intrin.h>
  103. # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
  104. # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
  105. # endif
  106. static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  107. {
  108. *ret = _InterlockedExchangeAdd_nf(&refcnt->val, 1) + 1;
  109. return 1;
  110. }
  111. static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  112. {
  113. *ret = _InterlockedExchangeAdd_nf(&refcnt->val, -1) - 1;
  114. if (*ret == 0)
  115. __dmb(_ARM_BARRIER_ISH);
  116. return 1;
  117. }
  118. static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  119. {
  120. *ret = _InterlockedOr_nf((void *)&refcnt->val, 0);
  121. return 1;
  122. }
  123. # else
  124. # if !defined(_WIN32_WCE)
  125. # pragma intrinsic(_InterlockedExchangeAdd)
  126. # else
  127. # if _WIN32_WCE >= 0x600
  128. extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
  129. # else
  130. /* under Windows CE we still have old-style Interlocked* functions */
  131. extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
  132. # define _InterlockedExchangeAdd InterlockedExchangeAdd
  133. # endif
  134. # endif
  135. static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  136. {
  137. *ret = _InterlockedExchangeAdd(&refcnt->val, 1) + 1;
  138. return 1;
  139. }
  140. static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  141. {
  142. *ret = _InterlockedExchangeAdd(&refcnt->val, -1) - 1;
  143. return 1;
  144. }
  145. static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  146. {
  147. *ret = _InterlockedExchangeAdd(&refcnt->val, 0);
  148. return 1;
  149. }
  150. # endif
  151. # endif
  152. # endif /* !OPENSSL_DEV_NO_ATOMICS */
  153. /*
  154. * All the refcounting implementations above define HAVE_ATOMICS, so if it's
  155. * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it
  156. * means we need to implement a fallback. This fallback uses locks.
  157. */
  158. # ifndef HAVE_ATOMICS
  159. typedef struct {
  160. int val;
  161. # ifdef OPENSSL_THREADS
  162. CRYPTO_RWLOCK *lock;
  163. # endif
  164. } CRYPTO_REF_COUNT;
  165. # ifdef OPENSSL_THREADS
  166. static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
  167. int *ret)
  168. {
  169. return CRYPTO_atomic_add(&refcnt->val, 1, ret, refcnt->lock);
  170. }
  171. static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
  172. int *ret)
  173. {
  174. return CRYPTO_atomic_add(&refcnt->val, -1, ret, refcnt->lock);
  175. }
  176. static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
  177. int *ret)
  178. {
  179. return CRYPTO_atomic_load_int(&refcnt->val, ret, refcnt->lock);
  180. }
  181. # define CRYPTO_NEW_FREE_DEFINED 1
  182. static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
  183. {
  184. refcnt->val = n;
  185. refcnt->lock = CRYPTO_THREAD_lock_new();
  186. if (refcnt->lock == NULL) {
  187. ERR_raise(ERR_LIB_CRYPTO, ERR_R_CRYPTO_LIB);
  188. return 0;
  189. }
  190. return 1;
  191. }
  192. static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
  193. {
  194. if (refcnt != NULL)
  195. CRYPTO_THREAD_lock_free(refcnt->lock);
  196. }
  197. # else /* OPENSSL_THREADS */
  198. static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
  199. int *ret)
  200. {
  201. refcnt->val++;
  202. *ret = refcnt->val;
  203. return 1;
  204. }
  205. static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
  206. int *ret)
  207. {
  208. refcnt->val--;
  209. *ret = refcnt->val;
  210. return 1;
  211. }
  212. static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
  213. int *ret)
  214. {
  215. *ret = refcnt->val;
  216. return 1;
  217. }
  218. # endif /* OPENSSL_THREADS */
  219. # endif
  220. # ifndef CRYPTO_NEW_FREE_DEFINED
  221. static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
  222. {
  223. refcnt->val = n;
  224. return 1;
  225. }
  226. static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
  227. {
  228. }
  229. # endif /* CRYPTO_NEW_FREE_DEFINED */
  230. #undef CRYPTO_NEW_FREE_DEFINED
  231. # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
  232. # define REF_ASSERT_ISNT(test) \
  233. (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
  234. # else
  235. # define REF_ASSERT_ISNT(i)
  236. # endif
  237. # define REF_PRINT_EX(text, count, object) \
  238. OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text));
  239. # define REF_PRINT_COUNT(text, object) \
  240. REF_PRINT_EX(text, object->references.val, (void *)object)
  241. #endif