hashtable.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * Copyright 2024 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. *
  9. *
  10. *
  11. * Notes On hash table design and layout
  12. * This hashtable uses a hopscotch algorithm to do indexing. The data structure
  13. * looks as follows:
  14. *
  15. * hash +--------------+
  16. * value+------->+ HT_VALUE |
  17. * + +--------------+
  18. * +-------+
  19. * | |
  20. * +---------------------------------------------------------+
  21. * | | | | | |
  22. * | entry | entry | entry | entry | |
  23. * | | | | | |
  24. * +---------------------------------------------------------+
  25. * | | |
  26. * | | |
  27. * +---------------------------------------------------------+
  28. * | + + +
  29. * | neighborhood[0] neighborhood[1] |
  30. * | |
  31. * | |
  32. * +---------------------------------------------------------+
  33. * |
  34. * +
  35. * neighborhoods
  36. *
  37. * On lookup/insert/delete, the items key is hashed to a 64 bit value
  38. * and the result is masked to provide an index into the neighborhoods
  39. * table. Once a neighborhood is determined, an in-order search is done
  40. * of the elements in the neighborhood indexes entries for a matching hash
  41. * value, if found, the corresponding HT_VALUE is used for the respective
  42. * operation. The number of entries in a neighborhood is determined at build
  43. * time based on the cacheline size of the target CPU. The intent is for a
  44. * neighborhood to have all entries in the neighborhood fit into a single cache
  45. * line to speed up lookups. If all entries in a neighborhood are in use at the
  46. * time of an insert, the table is expanded and rehashed.
  47. */
  48. #include <string.h>
  49. #include <internal/rcu.h>
  50. #include <internal/hashtable.h>
  51. #include <openssl/rand.h>
  52. /*
  53. * gcc defines __SANITIZE_THREAD__
  54. * but clang uses the feature attributes api
  55. * map the latter to the former
  56. */
  57. #if defined(__clang__) && defined(__has_feature)
  58. # if __has_feature(thread_sanitizer)
  59. # define __SANITIZE_THREADS__
  60. # endif
  61. #endif
  62. #ifdef __SANITIZE_THREADS__
  63. # include <sanitizer/tsan_interface.h>
  64. #endif
  65. #include "internal/numbers.h"
  66. /*
  67. * When we do a lookup/insert/delete, there is a high likelyhood
  68. * that we will iterate over at least part of the neighborhood list
  69. * As such, because we design a neighborhood entry to fit into a single
  70. * cache line it is advantageous, when supported to fetch the entire
  71. * structure for faster lookups
  72. */
  73. #if defined(__GNUC__) || defined(__CLANG__)
  74. #define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries)
  75. #else
  76. #define PREFETCH_NEIGHBORHOOD(x)
  77. #endif
  78. static ossl_unused uint64_t fnv1a_hash(uint8_t *key, size_t len)
  79. {
  80. uint64_t hash = 0xcbf29ce484222325ULL;
  81. size_t i;
  82. for (i = 0; i < len; i++) {
  83. hash ^= key[i];
  84. hash *= 0x00000100000001B3ULL;
  85. }
  86. return hash;
  87. }
  88. /*
  89. * Define our neighborhood list length
  90. * Note: It should always be a power of 2
  91. */
  92. #define DEFAULT_NEIGH_LEN_LOG 4
  93. #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG)
  94. /*
  95. * For now assume cache line size is 64 bytes
  96. */
  97. #define CACHE_LINE_BYTES 64
  98. #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES
  99. #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st))
  100. /*
  101. * Defines our chains of values
  102. */
  103. struct ht_internal_value_st {
  104. HT_VALUE value;
  105. HT *ht;
  106. };
  107. struct ht_neighborhood_entry_st {
  108. uint64_t hash;
  109. struct ht_internal_value_st *value;
  110. };
  111. struct ht_neighborhood_st {
  112. struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN];
  113. };
  114. /*
  115. * Updates to data in this struct
  116. * require an rcu sync after modification
  117. * prior to free
  118. */
  119. struct ht_mutable_data_st {
  120. struct ht_neighborhood_st *neighborhoods;
  121. void *neighborhood_ptr_to_free;
  122. uint64_t neighborhood_mask;
  123. };
  124. /*
  125. * Private data may be updated on the write
  126. * side only, and so do not require rcu sync
  127. */
  128. struct ht_write_private_data_st {
  129. size_t neighborhood_len;
  130. size_t value_count;
  131. int need_sync;
  132. };
  133. struct ht_internal_st {
  134. HT_CONFIG config;
  135. CRYPTO_RCU_LOCK *lock;
  136. CRYPTO_RWLOCK *atomic_lock;
  137. struct ht_mutable_data_st *md;
  138. struct ht_write_private_data_st wpd;
  139. };
  140. static void free_value(struct ht_internal_value_st *v);
  141. static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len,
  142. void **freeptr)
  143. {
  144. struct ht_neighborhood_st *ret;
  145. ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len,
  146. CACHE_LINE_BYTES, freeptr);
  147. /* fall back to regular malloc */
  148. if (ret == NULL) {
  149. ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len);
  150. if (ret == NULL)
  151. return NULL;
  152. }
  153. memset(ret, 0, sizeof(struct ht_neighborhood_st) * len);
  154. return ret;
  155. }
  156. static void internal_free_nop(HT_VALUE *v)
  157. {
  158. return;
  159. }
  160. HT *ossl_ht_new(HT_CONFIG *conf)
  161. {
  162. HT *new = OPENSSL_zalloc(sizeof(*new));
  163. if (new == NULL)
  164. return NULL;
  165. new->atomic_lock = CRYPTO_THREAD_lock_new();
  166. if (new->atomic_lock == NULL)
  167. goto err;
  168. memcpy(&new->config, conf, sizeof(*conf));
  169. if (new->config.init_neighborhoods != 0) {
  170. new->wpd.neighborhood_len = new->config.init_neighborhoods;
  171. /* round up to the next power of 2 */
  172. new->wpd.neighborhood_len--;
  173. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1;
  174. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2;
  175. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4;
  176. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8;
  177. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16;
  178. new->wpd.neighborhood_len++;
  179. } else {
  180. new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
  181. }
  182. if (new->config.ht_free_fn == NULL)
  183. new->config.ht_free_fn = internal_free_nop;
  184. new->md = OPENSSL_zalloc(sizeof(*new->md));
  185. if (new->md == NULL)
  186. goto err;
  187. new->md->neighborhoods =
  188. alloc_new_neighborhood_list(new->wpd.neighborhood_len,
  189. &new->md->neighborhood_ptr_to_free);
  190. if (new->md->neighborhoods == NULL)
  191. goto err;
  192. new->md->neighborhood_mask = new->wpd.neighborhood_len - 1;
  193. new->lock = ossl_rcu_lock_new(1, conf->ctx);
  194. if (new->lock == NULL)
  195. goto err;
  196. if (new->config.ht_hash_fn == NULL)
  197. new->config.ht_hash_fn = fnv1a_hash;
  198. return new;
  199. err:
  200. CRYPTO_THREAD_lock_free(new->atomic_lock);
  201. ossl_rcu_lock_free(new->lock);
  202. if (new->md != NULL)
  203. OPENSSL_free(new->md->neighborhood_ptr_to_free);
  204. OPENSSL_free(new->md);
  205. OPENSSL_free(new);
  206. return NULL;
  207. }
  208. void ossl_ht_read_lock(HT *htable)
  209. {
  210. ossl_rcu_read_lock(htable->lock);
  211. }
  212. void ossl_ht_read_unlock(HT *htable)
  213. {
  214. ossl_rcu_read_unlock(htable->lock);
  215. }
  216. void ossl_ht_write_lock(HT *htable)
  217. {
  218. ossl_rcu_write_lock(htable->lock);
  219. htable->wpd.need_sync = 0;
  220. }
  221. void ossl_ht_write_unlock(HT *htable)
  222. {
  223. int need_sync = htable->wpd.need_sync;
  224. htable->wpd.need_sync = 0;
  225. ossl_rcu_write_unlock(htable->lock);
  226. if (need_sync)
  227. ossl_synchronize_rcu(htable->lock);
  228. }
  229. static void free_oldmd(void *arg)
  230. {
  231. struct ht_mutable_data_st *oldmd = arg;
  232. size_t i, j;
  233. size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1;
  234. struct ht_internal_value_st *v;
  235. for (i = 0; i < neighborhood_len; i++) {
  236. PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]);
  237. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  238. if (oldmd->neighborhoods[i].entries[j].value != NULL) {
  239. v = oldmd->neighborhoods[i].entries[j].value;
  240. v->ht->config.ht_free_fn((HT_VALUE *)v);
  241. free_value(v);
  242. }
  243. }
  244. }
  245. OPENSSL_free(oldmd->neighborhood_ptr_to_free);
  246. OPENSSL_free(oldmd);
  247. }
  248. static int ossl_ht_flush_internal(HT *h)
  249. {
  250. struct ht_mutable_data_st *newmd = NULL;
  251. struct ht_mutable_data_st *oldmd = NULL;
  252. newmd = OPENSSL_zalloc(sizeof(*newmd));
  253. if (newmd == NULL)
  254. return 0;
  255. newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN,
  256. &newmd->neighborhood_ptr_to_free);
  257. if (newmd->neighborhoods == NULL) {
  258. OPENSSL_free(newmd);
  259. return 0;
  260. }
  261. newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1;
  262. /* Swap the old and new mutable data sets */
  263. oldmd = ossl_rcu_deref(&h->md);
  264. ossl_rcu_assign_ptr(&h->md, &newmd);
  265. /* Set the number of entries to 0 */
  266. h->wpd.value_count = 0;
  267. h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
  268. ossl_rcu_call(h->lock, free_oldmd, oldmd);
  269. h->wpd.need_sync = 1;
  270. return 1;
  271. }
  272. int ossl_ht_flush(HT *h)
  273. {
  274. return ossl_ht_flush_internal(h);
  275. }
  276. void ossl_ht_free(HT *h)
  277. {
  278. if (h == NULL)
  279. return;
  280. ossl_ht_write_lock(h);
  281. ossl_ht_flush_internal(h);
  282. ossl_ht_write_unlock(h);
  283. /* Freeing the lock does a final sync for us */
  284. CRYPTO_THREAD_lock_free(h->atomic_lock);
  285. ossl_rcu_lock_free(h->lock);
  286. OPENSSL_free(h->md->neighborhood_ptr_to_free);
  287. OPENSSL_free(h->md);
  288. OPENSSL_free(h);
  289. return;
  290. }
  291. size_t ossl_ht_count(HT *h)
  292. {
  293. size_t count;
  294. count = h->wpd.value_count;
  295. return count;
  296. }
  297. void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg),
  298. void *arg)
  299. {
  300. size_t i, j;
  301. struct ht_mutable_data_st *md;
  302. md = ossl_rcu_deref(&h->md);
  303. for (i = 0; i < md->neighborhood_mask + 1; i++) {
  304. PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
  305. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  306. if (md->neighborhoods[i].entries[j].value != NULL) {
  307. if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg))
  308. goto out;
  309. }
  310. }
  311. }
  312. out:
  313. return;
  314. }
  315. HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len,
  316. int (*filter)(HT_VALUE *obj, void *arg),
  317. void *arg)
  318. {
  319. struct ht_mutable_data_st *md;
  320. HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST)
  321. + (sizeof(HT_VALUE *) * max_len));
  322. size_t i, j;
  323. struct ht_internal_value_st *v;
  324. if (list == NULL)
  325. return NULL;
  326. /*
  327. * The list array lives just beyond the end of
  328. * the struct
  329. */
  330. list->list = (HT_VALUE **)(list + 1);
  331. md = ossl_rcu_deref(&h->md);
  332. for (i = 0; i < md->neighborhood_mask + 1; i++) {
  333. PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]);
  334. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  335. v = md->neighborhoods[i].entries[j].value;
  336. if (v != NULL && filter((HT_VALUE *)v, arg)) {
  337. list->list[list->list_len++] = (HT_VALUE *)v;
  338. if (list->list_len == max_len)
  339. goto out;
  340. }
  341. }
  342. }
  343. out:
  344. return list;
  345. }
  346. void ossl_ht_value_list_free(HT_VALUE_LIST *list)
  347. {
  348. OPENSSL_free(list);
  349. }
  350. static int compare_hash(uint64_t hash1, uint64_t hash2)
  351. {
  352. return (hash1 == hash2);
  353. }
  354. static void free_old_neigh_table(void *arg)
  355. {
  356. struct ht_mutable_data_st *oldmd = arg;
  357. OPENSSL_free(oldmd->neighborhood_ptr_to_free);
  358. OPENSSL_free(oldmd);
  359. }
  360. /*
  361. * Increase hash table bucket list
  362. * must be called with write_lock held
  363. */
  364. static int grow_hashtable(HT *h, size_t oldsize)
  365. {
  366. struct ht_mutable_data_st *newmd = OPENSSL_zalloc(sizeof(*newmd));
  367. struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md);
  368. int rc = 0;
  369. uint64_t oldi, oldj, newi, newj;
  370. uint64_t oldhash;
  371. struct ht_internal_value_st *oldv;
  372. int rehashed;
  373. size_t newsize = oldsize * 2;
  374. if (newmd == NULL)
  375. goto out;
  376. /* bucket list is always a power of 2 */
  377. newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2,
  378. &newmd->neighborhood_ptr_to_free);
  379. if (newmd->neighborhoods == NULL)
  380. goto out_free;
  381. /* being a power of 2 makes for easy mask computation */
  382. newmd->neighborhood_mask = (newsize - 1);
  383. /*
  384. * Now we need to start rehashing entries
  385. * Note we don't need to use atomics here as the new
  386. * mutable data hasn't been published
  387. */
  388. for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) {
  389. PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]);
  390. for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) {
  391. oldv = oldmd->neighborhoods[oldi].entries[oldj].value;
  392. if (oldv == NULL)
  393. continue;
  394. oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash;
  395. newi = oldhash & newmd->neighborhood_mask;
  396. rehashed = 0;
  397. for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) {
  398. if (newmd->neighborhoods[newi].entries[newj].value == NULL) {
  399. newmd->neighborhoods[newi].entries[newj].value = oldv;
  400. newmd->neighborhoods[newi].entries[newj].hash = oldhash;
  401. rehashed = 1;
  402. break;
  403. }
  404. }
  405. if (rehashed == 0) {
  406. /* we ran out of space in a neighborhood, grow again */
  407. OPENSSL_free(newmd->neighborhoods);
  408. OPENSSL_free(newmd);
  409. return grow_hashtable(h, newsize);
  410. }
  411. }
  412. }
  413. /*
  414. * Now that our entries are all hashed into the new bucket list
  415. * update our bucket_len and target_max_load
  416. */
  417. h->wpd.neighborhood_len = newsize;
  418. /*
  419. * Now we replace the old mutable data with the new
  420. */
  421. oldmd = ossl_rcu_deref(&h->md);
  422. ossl_rcu_assign_ptr(&h->md, &newmd);
  423. ossl_rcu_call(h->lock, free_old_neigh_table, oldmd);
  424. h->wpd.need_sync = 1;
  425. /*
  426. * And we're done
  427. */
  428. rc = 1;
  429. out:
  430. return rc;
  431. out_free:
  432. OPENSSL_free(newmd->neighborhoods);
  433. OPENSSL_free(newmd);
  434. goto out;
  435. }
  436. static void free_old_ht_value(void *arg)
  437. {
  438. HT_VALUE *h = (HT_VALUE *)arg;
  439. /*
  440. * Note, this is only called on replacement,
  441. * the caller is responsible for freeing the
  442. * held data, we just need to free the wrapping
  443. * struct here
  444. */
  445. OPENSSL_free(h);
  446. }
  447. static int ossl_ht_insert_locked(HT *h, uint64_t hash,
  448. struct ht_internal_value_st *newval,
  449. HT_VALUE **olddata)
  450. {
  451. struct ht_mutable_data_st *md = h->md;
  452. uint64_t neigh_idx = hash & md->neighborhood_mask;
  453. size_t j;
  454. uint64_t ihash;
  455. HT_VALUE *ival;
  456. size_t empty_idx = SIZE_MAX;
  457. PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
  458. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  459. ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
  460. CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
  461. &ihash, h->atomic_lock);
  462. if (ival == NULL)
  463. empty_idx = j;
  464. if (compare_hash(hash, ihash)) {
  465. if (olddata == NULL) {
  466. /* invalid */
  467. return 0;
  468. }
  469. /* Do a replacement */
  470. CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash,
  471. hash, h->atomic_lock);
  472. *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value;
  473. ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value,
  474. &newval);
  475. ossl_rcu_call(h->lock, free_old_ht_value, *olddata);
  476. h->wpd.need_sync = 1;
  477. return 1;
  478. }
  479. }
  480. /* If we get to here, its just an insert */
  481. if (empty_idx == SIZE_MAX)
  482. return -1; /* out of space */
  483. h->wpd.value_count++;
  484. CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash,
  485. hash, h->atomic_lock);
  486. ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value,
  487. &newval);
  488. return 1;
  489. }
  490. static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key,
  491. void *data,
  492. uintptr_t *type)
  493. {
  494. struct ht_internal_value_st *new;
  495. struct ht_internal_value_st *tmp;
  496. new = OPENSSL_malloc(sizeof(*new));
  497. if (new == NULL)
  498. return NULL;
  499. tmp = (struct ht_internal_value_st *)ossl_rcu_deref(&new);
  500. tmp->ht = h;
  501. tmp->value.value = data;
  502. tmp->value.type_id = type;
  503. return tmp;
  504. }
  505. static void free_value(struct ht_internal_value_st *v)
  506. {
  507. OPENSSL_free(v);
  508. }
  509. int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata)
  510. {
  511. struct ht_internal_value_st *newval = NULL;
  512. uint64_t hash;
  513. int rc = 0;
  514. if (data->value == NULL)
  515. goto out;
  516. newval = alloc_new_value(h, key, data->value, data->type_id);
  517. if (newval == NULL)
  518. goto out;
  519. /*
  520. * we have to take our lock here to prevent other changes
  521. * to the bucket list
  522. */
  523. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  524. try_again:
  525. rc = ossl_ht_insert_locked(h, hash, newval, olddata);
  526. if (rc == -1) {
  527. grow_hashtable(h, h->wpd.neighborhood_len);
  528. goto try_again;
  529. }
  530. if (rc == 0)
  531. free_value(newval);
  532. out:
  533. return rc;
  534. }
  535. HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key)
  536. {
  537. struct ht_mutable_data_st *md;
  538. uint64_t hash;
  539. uint64_t neigh_idx;
  540. struct ht_internal_value_st *vidx = NULL;
  541. size_t j;
  542. uint64_t ehash;
  543. HT_VALUE *ret = NULL;
  544. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  545. md = ossl_rcu_deref(&h->md);
  546. neigh_idx = hash & md->neighborhood_mask;
  547. PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
  548. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  549. CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
  550. &ehash, h->atomic_lock);
  551. if (compare_hash(hash, ehash)) {
  552. CRYPTO_atomic_load((uint64_t *)&md->neighborhoods[neigh_idx].entries[j].value,
  553. (uint64_t *)&vidx, h->atomic_lock);
  554. ret = (HT_VALUE *)vidx;
  555. break;
  556. }
  557. }
  558. return ret;
  559. }
  560. static void free_old_entry(void *arg)
  561. {
  562. struct ht_internal_value_st *v = arg;
  563. v->ht->config.ht_free_fn((HT_VALUE *)v);
  564. free_value(v);
  565. }
  566. int ossl_ht_delete(HT *h, HT_KEY *key)
  567. {
  568. uint64_t hash;
  569. uint64_t neigh_idx;
  570. size_t j;
  571. struct ht_internal_value_st *v = NULL;
  572. HT_VALUE *nv = NULL;
  573. int rc = 0;
  574. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  575. neigh_idx = hash & h->md->neighborhood_mask;
  576. PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]);
  577. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  578. if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash)) {
  579. h->wpd.value_count--;
  580. CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash,
  581. 0, h->atomic_lock);
  582. v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value;
  583. ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value,
  584. &nv);
  585. rc = 1;
  586. break;
  587. }
  588. }
  589. if (rc == 1) {
  590. ossl_rcu_call(h->lock, free_old_entry, v);
  591. h->wpd.need_sync = 1;
  592. }
  593. return rc;
  594. }