linuxkm_memory.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. /* linuxkm_memory.c
  2. *
  3. * Copyright (C) 2006-2023 wolfSSL Inc.
  4. *
  5. * This file is part of wolfSSL.
  6. *
  7. * wolfSSL is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * wolfSSL is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
  20. */
  21. /* included by wolfcrypt/src/memory.c */
  22. #ifdef HAVE_KVMALLOC
  23. /* adapted from kvrealloc() draft by Changli Gao, 2010-05-13 */
  24. void *lkm_realloc(void *ptr, size_t newsize) {
  25. void *nptr;
  26. size_t oldsize;
  27. if (unlikely(newsize == 0)) {
  28. kvfree(ptr);
  29. return ZERO_SIZE_PTR;
  30. }
  31. if (unlikely(ptr == NULL))
  32. return kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
  33. if (is_vmalloc_addr(ptr)) {
  34. /* no way to discern the size of the old allocation,
  35. * because the kernel doesn't export find_vm_area(). if
  36. * it did, we could then call get_vm_area_size() on the
  37. * returned struct vm_struct.
  38. */
  39. return NULL;
  40. } else {
  41. #ifndef __PIE__
  42. struct page *page;
  43. page = virt_to_head_page(ptr);
  44. if (PageSlab(page) || PageCompound(page)) {
  45. if (newsize < PAGE_SIZE)
  46. #endif /* ! __PIE__ */
  47. return krealloc(ptr, newsize, GFP_KERNEL);
  48. #ifndef __PIE__
  49. oldsize = ksize(ptr);
  50. } else {
  51. oldsize = page->private;
  52. if (newsize <= oldsize)
  53. return ptr;
  54. }
  55. #endif /* ! __PIE__ */
  56. }
  57. nptr = kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
  58. if (nptr != NULL) {
  59. memcpy(nptr, ptr, oldsize);
  60. kvfree(ptr);
  61. }
  62. return nptr;
  63. }
  64. #endif /* HAVE_KVMALLOC */
  65. #if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
  66. /* kernel 4.19 -- the most recent LTS before 5.4 -- lacks the necessary safety
  67. * checks in __kernel_fpu_begin(), and lacks TIF_NEED_FPU_LOAD.
  68. */
  69. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
  70. #error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS on x86 requires kernel 5.4.0 or higher.
  71. #endif
  72. static unsigned int wc_linuxkm_fpu_states_n_tracked = 0;
  73. struct wc_thread_fpu_count_ent {
  74. volatile pid_t pid;
  75. unsigned int fpu_state;
  76. };
  77. struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_states = NULL;
  78. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  79. #ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
  80. #error WOLFSSL_COMMERCIAL_LICENSE requires LINUXKM_FPU_STATES_FOLLOW_THREADS
  81. #endif
  82. #pragma GCC diagnostic push
  83. #pragma GCC diagnostic ignored "-Wunused-parameter"
  84. #pragma GCC diagnostic ignored "-Wnested-externs"
  85. /* avoid dependence on "alternatives_patched" and "xfd_validate_state()". */
  86. #undef CONFIG_X86_DEBUG_FPU
  87. #include "../kernel/fpu/internal.h"
  88. #include "../kernel/fpu/xstate.h"
  89. #pragma GCC diagnostic pop
  90. static union wc_linuxkm_fpu_savebuf {
  91. byte buf[1024]; /* must be 64-byte-aligned */
  92. struct fpstate fpstate;
  93. } *wc_linuxkm_fpu_savebufs = NULL;
  94. #endif /* WOLFSSL_COMMERCIAL_LICENSE */
  95. #define WC_FPU_COUNT_MASK 0x7fffffffU
  96. #define WC_FPU_SAVED_MASK 0x80000000U
  97. WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
  98. {
  99. if (wc_linuxkm_fpu_states != NULL) {
  100. static int warned_for_repeat_alloc = 0;
  101. if (! warned_for_repeat_alloc) {
  102. pr_err("attempt at repeat allocation"
  103. " in allocate_wolfcrypt_linuxkm_fpu_states\n");
  104. warned_for_repeat_alloc = 1;
  105. }
  106. return BAD_STATE_E;
  107. }
  108. #ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
  109. if (nr_cpu_ids >= 16)
  110. wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids * 2;
  111. else
  112. wc_linuxkm_fpu_states_n_tracked = 32;
  113. #else
  114. wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids;
  115. #endif
  116. wc_linuxkm_fpu_states =
  117. (struct wc_thread_fpu_count_ent *)malloc(
  118. wc_linuxkm_fpu_states_n_tracked * sizeof(wc_linuxkm_fpu_states[0]));
  119. if (! wc_linuxkm_fpu_states) {
  120. pr_err("allocation of %lu bytes for "
  121. "wc_linuxkm_fpu_states failed.\n",
  122. nr_cpu_ids * sizeof(struct fpu_state *));
  123. return MEMORY_E;
  124. }
  125. memset(wc_linuxkm_fpu_states, 0, wc_linuxkm_fpu_states_n_tracked
  126. * sizeof(wc_linuxkm_fpu_states[0]));
  127. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  128. wc_linuxkm_fpu_savebufs = (union wc_linuxkm_fpu_savebuf *)malloc(
  129. wc_linuxkm_fpu_states_n_tracked * sizeof(*wc_linuxkm_fpu_savebufs));
  130. if (! wc_linuxkm_fpu_savebufs) {
  131. pr_err("allocation of %lu bytes for "
  132. "wc_linuxkm_fpu_savebufs failed.\n",
  133. WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
  134. * sizeof(*wc_linuxkm_fpu_savebufs));
  135. free(wc_linuxkm_fpu_states);
  136. wc_linuxkm_fpu_states = NULL;
  137. return MEMORY_E;
  138. }
  139. if ((uintptr_t)wc_linuxkm_fpu_savebufs
  140. & (WC_LINUXKM_ROUND_UP_P_OF_2(sizeof(*wc_linuxkm_fpu_savebufs)) - 1))
  141. {
  142. pr_err("allocation of %lu bytes for "
  143. "wc_linuxkm_fpu_savebufs allocated with wrong alignment 0x%lx.\n",
  144. WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
  145. * sizeof(*wc_linuxkm_fpu_savebufs),
  146. (uintptr_t)wc_linuxkm_fpu_savebufs);
  147. free(wc_linuxkm_fpu_savebufs);
  148. wc_linuxkm_fpu_savebufs = NULL;
  149. free(wc_linuxkm_fpu_states);
  150. wc_linuxkm_fpu_states = NULL;
  151. return MEMORY_E;
  152. }
  153. #endif
  154. return 0;
  155. }
  156. void free_wolfcrypt_linuxkm_fpu_states(void) {
  157. struct wc_thread_fpu_count_ent *i, *i_endptr;
  158. pid_t i_pid;
  159. if (wc_linuxkm_fpu_states == NULL) {
  160. pr_err("free_wolfcrypt_linuxkm_fpu_states called"
  161. " before allocate_wolfcrypt_linuxkm_fpu_states.\n");
  162. return;
  163. }
  164. for (i = wc_linuxkm_fpu_states,
  165. i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
  166. i < i_endptr;
  167. ++i)
  168. {
  169. i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
  170. if (i_pid == 0)
  171. continue;
  172. if (i->fpu_state != 0) {
  173. pr_err("free_wolfcrypt_linuxkm_fpu_states called"
  174. " with nonzero state 0x%x for pid %d.\n", i->fpu_state, i_pid);
  175. i->fpu_state = 0;
  176. }
  177. }
  178. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  179. free(wc_linuxkm_fpu_savebufs);
  180. wc_linuxkm_fpu_savebufs = NULL;
  181. #endif
  182. free(wc_linuxkm_fpu_states);
  183. wc_linuxkm_fpu_states = NULL;
  184. }
  185. #ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
  186. /* legacy thread-local storage facility for tracking recursive fpu
  187. * pushing/popping
  188. */
  189. static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(int create_p) {
  190. struct wc_thread_fpu_count_ent *i, *i_endptr, *i_empty;
  191. pid_t my_pid = task_pid_nr(current), i_pid;
  192. {
  193. static int _warned_on_null = 0;
  194. if (wc_linuxkm_fpu_states == NULL)
  195. {
  196. if (_warned_on_null == 0) {
  197. pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
  198. " before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
  199. _warned_on_null = 1;
  200. }
  201. return NULL;
  202. }
  203. }
  204. i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
  205. for (;;) {
  206. for (i = wc_linuxkm_fpu_states,
  207. i_empty = NULL;
  208. i < i_endptr;
  209. ++i)
  210. {
  211. i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
  212. if (i_pid == my_pid)
  213. return i;
  214. if ((i_empty == NULL) && (i_pid == 0))
  215. i_empty = i;
  216. }
  217. if ((i_empty == NULL) || (! create_p))
  218. return NULL;
  219. i_pid = 0;
  220. if (__atomic_compare_exchange_n(
  221. &(i_empty->pid),
  222. &i_pid,
  223. my_pid,
  224. 0 /* weak */,
  225. __ATOMIC_SEQ_CST /* success_memmodel */,
  226. __ATOMIC_SEQ_CST /* failure_memmodel */))
  227. {
  228. return i_empty;
  229. }
  230. }
  231. }
  232. #else /* !LINUXKM_FPU_STATES_FOLLOW_THREADS */
  233. /* lock-free O(1)-lookup CPU-local storage facility for tracking recursive fpu
  234. * pushing/popping.
  235. *
  236. * caller must have already called kernel_fpu_begin() or preempt_disable()
  237. * before entering this or the streamlined inline version of it below.
  238. */
  239. static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int create_p) {
  240. int my_cpu = raw_smp_processor_id();
  241. pid_t my_pid = task_pid_nr(current), slot_pid;
  242. struct wc_thread_fpu_count_ent *slot;
  243. {
  244. static int _warned_on_null = 0;
  245. if (wc_linuxkm_fpu_states == NULL)
  246. {
  247. if (_warned_on_null == 0) {
  248. pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
  249. " before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
  250. _warned_on_null = 1;
  251. }
  252. return NULL;
  253. }
  254. }
  255. slot = &wc_linuxkm_fpu_states[my_cpu];
  256. slot_pid = __atomic_load_n(&slot->pid, __ATOMIC_CONSUME);
  257. if (slot_pid == my_pid) {
  258. if (create_p) {
  259. static int _warned_on_redundant_create_p = 0;
  260. if (_warned_on_redundant_create_p < 10) {
  261. pr_err("wc_linuxkm_fpu_state_assoc called with create_p=1 by"
  262. " pid %d on cpu %d with cpu slot already reserved by"
  263. " said pid.\n", my_pid, my_cpu);
  264. ++_warned_on_redundant_create_p;
  265. }
  266. }
  267. return slot;
  268. }
  269. if (create_p) {
  270. if (slot_pid == 0) {
  271. __atomic_store_n(&slot->pid, my_pid, __ATOMIC_RELEASE);
  272. return slot;
  273. } else {
  274. /* if the slot is already occupied, that can be benign due to a
  275. * migration, but it will require fixup by the thread that owns the
  276. * slot, which will happen when it releases its lock, or sooner (see
  277. * below).
  278. */
  279. static int _warned_on_mismatched_pid = 0;
  280. if (_warned_on_mismatched_pid < 10) {
  281. pr_warn("wc_linuxkm_fpu_state_assoc called by pid %d on cpu %d"
  282. " but cpu slot already reserved by pid %d.\n",
  283. my_pid, my_cpu, slot_pid);
  284. ++_warned_on_mismatched_pid;
  285. }
  286. return NULL;
  287. }
  288. } else {
  289. /* check for migration. this can happen despite our best efforts if any
  290. * I/O occured while locked, e.g. kernel messages like "uninitialized
  291. * urandom read". since we're locked now, we can safely migrate the
  292. * entry in wc_linuxkm_fpu_states[], freeing up the slot on the previous
  293. * cpu.
  294. */
  295. unsigned int cpu_i;
  296. for (cpu_i = 0; cpu_i < wc_linuxkm_fpu_states_n_tracked; ++cpu_i) {
  297. if (__atomic_load_n(
  298. &wc_linuxkm_fpu_states[cpu_i].pid,
  299. __ATOMIC_CONSUME)
  300. == my_pid)
  301. {
  302. wc_linuxkm_fpu_states[my_cpu] = wc_linuxkm_fpu_states[cpu_i];
  303. __atomic_store_n(&wc_linuxkm_fpu_states[cpu_i].fpu_state, 0,
  304. __ATOMIC_RELEASE);
  305. __atomic_store_n(&wc_linuxkm_fpu_states[cpu_i].pid, 0,
  306. __ATOMIC_RELEASE);
  307. return &wc_linuxkm_fpu_states[my_cpu];
  308. }
  309. }
  310. return NULL;
  311. }
  312. }
  313. static inline struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(
  314. int create_p)
  315. {
  316. int my_cpu = raw_smp_processor_id(); /* my_cpu is only trustworthy if we're
  317. * already nonpreemptible -- we'll
  318. * determine that soon enough by
  319. * checking if the pid matches or,
  320. * failing that, if create_p.
  321. */
  322. pid_t my_pid = task_pid_nr(current), slot_pid;
  323. struct wc_thread_fpu_count_ent *slot;
  324. if (unlikely(wc_linuxkm_fpu_states == NULL))
  325. return wc_linuxkm_fpu_state_assoc_unlikely(create_p);
  326. slot = &wc_linuxkm_fpu_states[my_cpu];
  327. slot_pid = __atomic_load_n(&slot->pid, __ATOMIC_CONSUME);
  328. if (slot_pid == my_pid) {
  329. if (unlikely(create_p))
  330. return wc_linuxkm_fpu_state_assoc_unlikely(create_p);
  331. else
  332. return slot;
  333. }
  334. if (likely(create_p)) {
  335. if (likely(slot_pid == 0)) {
  336. __atomic_store_n(&slot->pid, my_pid, __ATOMIC_RELEASE);
  337. return slot;
  338. } else {
  339. return wc_linuxkm_fpu_state_assoc_unlikely(create_p);
  340. }
  341. } else {
  342. return wc_linuxkm_fpu_state_assoc_unlikely(create_p);
  343. }
  344. }
  345. #endif /* !LINUXKM_FPU_STATES_FOLLOW_THREADS */
  346. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  347. static struct fpstate *wc_linuxkm_fpstate_buf_from_fpu_state(
  348. struct wc_thread_fpu_count_ent *state)
  349. {
  350. size_t i = (size_t)(state - wc_linuxkm_fpu_states) / sizeof(*state);
  351. return &wc_linuxkm_fpu_savebufs[i].fpstate;
  352. }
  353. #endif
  354. static void wc_linuxkm_fpu_state_release_unlikely(
  355. struct wc_thread_fpu_count_ent *ent)
  356. {
  357. if (ent->fpu_state != 0) {
  358. static int warned_nonzero_fpu_state = 0;
  359. if (! warned_nonzero_fpu_state) {
  360. pr_err("wc_linuxkm_fpu_state_free for pid %d"
  361. " with nonzero fpu_state 0x%x.\n", ent->pid, ent->fpu_state);
  362. warned_nonzero_fpu_state = 1;
  363. }
  364. ent->fpu_state = 0;
  365. }
  366. __atomic_store_n(&ent->pid, 0, __ATOMIC_RELEASE);
  367. }
  368. static inline void wc_linuxkm_fpu_state_release(
  369. struct wc_thread_fpu_count_ent *ent)
  370. {
  371. if (unlikely(ent->fpu_state != 0))
  372. return wc_linuxkm_fpu_state_release_unlikely(ent);
  373. __atomic_store_n(&ent->pid, 0, __ATOMIC_RELEASE);
  374. }
  375. WARN_UNUSED_RESULT int save_vector_registers_x86(void)
  376. {
  377. #ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
  378. struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(1);
  379. #else
  380. struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0);
  381. #endif
  382. /* allow for nested calls */
  383. #ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
  384. if (pstate == NULL)
  385. return MEMORY_E;
  386. #endif
  387. if (
  388. #ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
  389. (pstate != NULL) &&
  390. #endif
  391. (pstate->fpu_state != 0U))
  392. {
  393. if (unlikely((pstate->fpu_state & WC_FPU_COUNT_MASK)
  394. == WC_FPU_COUNT_MASK))
  395. {
  396. pr_err("save_vector_registers_x86 recursion register overflow for "
  397. "pid %d.\n", pstate->pid);
  398. return BAD_STATE_E;
  399. } else {
  400. ++pstate->fpu_state;
  401. return 0;
  402. }
  403. }
  404. if (irq_fpu_usable()
  405. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
  406. /* work around a kernel bug -- see linux commit 59f5ede3bc0f0.
  407. * what we really want here is this_cpu_read(in_kernel_fpu), but
  408. * in_kernel_fpu is an unexported static array.
  409. */
  410. && !test_thread_flag(TIF_NEED_FPU_LOAD)
  411. #endif
  412. )
  413. {
  414. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  415. struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
  416. fpregs_lock();
  417. fpstate->xfeatures = ~0UL;
  418. os_xsave(fpstate);
  419. #else /* !WOLFSSL_COMMERCIAL_LICENSE */
  420. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  421. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
  422. /* inhibit migration, which gums up the algorithm in
  423. * kernel_fpu_{begin,end}().
  424. */
  425. migrate_disable();
  426. #endif
  427. kernel_fpu_begin();
  428. #ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
  429. pstate = wc_linuxkm_fpu_state_assoc(1);
  430. if (pstate == NULL) {
  431. kernel_fpu_end();
  432. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  433. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
  434. !defined(WOLFSSL_COMMERCIAL_LICENSE)
  435. migrate_enable();
  436. #endif
  437. return BAD_STATE_E;
  438. }
  439. #endif
  440. #endif /* !WOLFSSL_COMMERCIAL_LICENSE */
  441. /* set msb to 0 to trigger kernel_fpu_end() at cleanup. */
  442. pstate->fpu_state = 1U;
  443. } else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0)) {
  444. static int warned_fpu_forbidden = 0;
  445. if (! warned_fpu_forbidden)
  446. pr_err("save_vector_registers_x86 called from IRQ handler.\n");
  447. #ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
  448. wc_linuxkm_fpu_state_release(pstate);
  449. #endif
  450. return BAD_STATE_E;
  451. } else if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
  452. static int warned_fpu_forbidden = 0;
  453. if (! warned_fpu_forbidden)
  454. pr_err("save_vector_registers_x86 called with !irq_fpu_usable from"
  455. " thread without previous FPU save.\n");
  456. #ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
  457. wc_linuxkm_fpu_state_release(pstate);
  458. #endif
  459. return BAD_STATE_E;
  460. } else {
  461. /* assume already safely in_kernel_fpu from caller, but recursively
  462. * preempt_disable() to be extra-safe.
  463. */
  464. preempt_disable();
  465. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  466. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
  467. !defined(WOLFSSL_COMMERCIAL_LICENSE)
  468. migrate_disable();
  469. #endif
  470. #ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
  471. pstate = wc_linuxkm_fpu_state_assoc(1);
  472. if (pstate == NULL) {
  473. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  474. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
  475. !defined(WOLFSSL_COMMERCIAL_LICENSE)
  476. migrate_enable();
  477. #endif
  478. preempt_enable();
  479. return BAD_STATE_E;
  480. }
  481. #endif
  482. /* set msb to 1 to inhibit kernel_fpu_end() at cleanup. */
  483. pstate->fpu_state =
  484. WC_FPU_SAVED_MASK + 1U;
  485. }
  486. return 0;
  487. }
  488. void restore_vector_registers_x86(void)
  489. {
  490. struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0);
  491. if (unlikely(pstate == NULL)) {
  492. pr_err("restore_vector_registers_x86 called by pid %d on CPU %d "
  493. "with no saved state.\n", task_pid_nr(current),
  494. raw_smp_processor_id());
  495. return;
  496. }
  497. if ((--pstate->fpu_state & WC_FPU_COUNT_MASK) > 0U) {
  498. return;
  499. }
  500. if (pstate->fpu_state == 0U) {
  501. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  502. struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
  503. os_xrstor(fpstate, fpstate->xfeatures);
  504. fpregs_unlock();
  505. #else
  506. #ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
  507. wc_linuxkm_fpu_state_release(pstate);
  508. #endif
  509. kernel_fpu_end();
  510. #endif
  511. } else {
  512. pstate->fpu_state = 0U;
  513. #ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
  514. wc_linuxkm_fpu_state_release(pstate);
  515. #endif
  516. preempt_enable();
  517. }
  518. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  519. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
  520. !defined(WOLFSSL_COMMERCIAL_LICENSE)
  521. migrate_enable();
  522. #endif
  523. #ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
  524. wc_linuxkm_fpu_state_release(pstate);
  525. #endif
  526. return;
  527. }
  528. #endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS && CONFIG_X86 */
  529. #if defined(__PIE__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
  530. /* needed in 6.1+ because show_free_areas() static definition in mm.h calls
  531. * __show_free_areas(), which isn't exported (neither was show_free_areas()).
  532. */
  533. void my__show_free_areas(
  534. unsigned int flags,
  535. nodemask_t *nodemask,
  536. int max_zone_idx)
  537. {
  538. (void)flags;
  539. (void)nodemask;
  540. (void)max_zone_idx;
  541. return;
  542. }
  543. #endif
  544. #if defined(__PIE__) && defined(CONFIG_FORTIFY_SOURCE)
  545. /* needed because FORTIFY_SOURCE inline implementations call fortify_panic(). */
  546. void __my_fortify_panic(const char *name) {
  547. pr_emerg("__my_fortify_panic in %s\n", name);
  548. BUG();
  549. }
  550. #endif