linuxkm_memory.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /* linuxkm_memory.c
  2. *
  3. * Copyright (C) 2006-2023 wolfSSL Inc.
  4. *
  5. * This file is part of wolfSSL.
  6. *
  7. * wolfSSL is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * wolfSSL is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
  20. */
  21. /* included by wolfcrypt/src/memory.c */
  22. #ifdef HAVE_KVMALLOC
  23. /* adapted from kvrealloc() draft by Changli Gao, 2010-05-13 */
  24. void *lkm_realloc(void *ptr, size_t newsize) {
  25. void *nptr;
  26. size_t oldsize;
  27. if (unlikely(newsize == 0)) {
  28. kvfree(ptr);
  29. return ZERO_SIZE_PTR;
  30. }
  31. if (unlikely(ptr == NULL))
  32. return kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
  33. if (is_vmalloc_addr(ptr)) {
  34. /* no way to discern the size of the old allocation,
  35. * because the kernel doesn't export find_vm_area(). if
  36. * it did, we could then call get_vm_area_size() on the
  37. * returned struct vm_struct.
  38. */
  39. return NULL;
  40. } else {
  41. #ifndef __PIE__
  42. struct page *page;
  43. page = virt_to_head_page(ptr);
  44. if (PageSlab(page) || PageCompound(page)) {
  45. if (newsize < PAGE_SIZE)
  46. #endif /* ! __PIE__ */
  47. return krealloc(ptr, newsize, GFP_KERNEL);
  48. #ifndef __PIE__
  49. oldsize = ksize(ptr);
  50. } else {
  51. oldsize = page->private;
  52. if (newsize <= oldsize)
  53. return ptr;
  54. }
  55. #endif /* ! __PIE__ */
  56. }
  57. nptr = kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
  58. if (nptr != NULL) {
  59. memcpy(nptr, ptr, oldsize);
  60. kvfree(ptr);
  61. }
  62. return nptr;
  63. }
  64. #endif /* HAVE_KVMALLOC */
  65. #if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
  66. static unsigned int wc_linuxkm_fpu_states_n_tracked = 0;
  67. struct wc_thread_fpu_count_ent {
  68. volatile pid_t pid;
  69. unsigned int fpu_state;
  70. };
  71. struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_states = NULL;
  72. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  73. #pragma GCC diagnostic push
  74. #pragma GCC diagnostic ignored "-Wunused-parameter"
  75. #pragma GCC diagnostic ignored "-Wnested-externs"
  76. /* avoid dependence on "alternatives_patched" and "xfd_validate_state()". */
  77. #undef CONFIG_X86_DEBUG_FPU
  78. #include "../kernel/fpu/internal.h"
  79. #include "../kernel/fpu/xstate.h"
  80. #pragma GCC diagnostic pop
  81. static union wc_linuxkm_fpu_savebuf {
  82. byte buf[1024]; /* must be 64-byte-aligned */
  83. struct fpstate fpstate;
  84. } *wc_linuxkm_fpu_savebufs = NULL;
  85. #endif /* WOLFSSL_COMMERCIAL_LICENSE */
  86. #define WC_FPU_COUNT_MASK 0x7fffffffU
  87. #define WC_FPU_SAVED_MASK 0x80000000U
  88. WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
  89. {
  90. if (wc_linuxkm_fpu_states != NULL) {
  91. static int warned_for_repeat_alloc = 0;
  92. if (! warned_for_repeat_alloc) {
  93. pr_err("attempt at repeat allocation"
  94. " in allocate_wolfcrypt_linuxkm_fpu_states\n");
  95. warned_for_repeat_alloc = 1;
  96. }
  97. return BAD_STATE_E;
  98. }
  99. if (nr_cpu_ids >= 16)
  100. wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids * 2;
  101. else
  102. wc_linuxkm_fpu_states_n_tracked = 32;
  103. wc_linuxkm_fpu_states =
  104. (struct wc_thread_fpu_count_ent *)malloc(
  105. wc_linuxkm_fpu_states_n_tracked * sizeof(wc_linuxkm_fpu_states[0]));
  106. if (! wc_linuxkm_fpu_states) {
  107. pr_err("allocation of %lu bytes for "
  108. "wc_linuxkm_fpu_states failed.\n",
  109. nr_cpu_ids * sizeof(struct fpu_state *));
  110. return MEMORY_E;
  111. }
  112. memset(wc_linuxkm_fpu_states, 0, wc_linuxkm_fpu_states_n_tracked
  113. * sizeof(wc_linuxkm_fpu_states[0]));
  114. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  115. wc_linuxkm_fpu_savebufs = (union wc_linuxkm_fpu_savebuf *)malloc(
  116. wc_linuxkm_fpu_states_n_tracked * sizeof(*wc_linuxkm_fpu_savebufs));
  117. if (! wc_linuxkm_fpu_savebufs) {
  118. pr_err("allocation of %lu bytes for "
  119. "wc_linuxkm_fpu_savebufs failed.\n",
  120. WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
  121. * sizeof(*wc_linuxkm_fpu_savebufs));
  122. free(wc_linuxkm_fpu_states);
  123. wc_linuxkm_fpu_states = NULL;
  124. return MEMORY_E;
  125. }
  126. if ((uintptr_t)wc_linuxkm_fpu_savebufs
  127. & (WC_LINUXKM_ROUND_UP_P_OF_2(sizeof(*wc_linuxkm_fpu_savebufs)) - 1))
  128. {
  129. pr_err("allocation of %lu bytes for "
  130. "wc_linuxkm_fpu_savebufs allocated with wrong alignment 0x%lx.\n",
  131. WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
  132. * sizeof(*wc_linuxkm_fpu_savebufs),
  133. (uintptr_t)wc_linuxkm_fpu_savebufs);
  134. free(wc_linuxkm_fpu_savebufs);
  135. wc_linuxkm_fpu_savebufs = NULL;
  136. free(wc_linuxkm_fpu_states);
  137. wc_linuxkm_fpu_states = NULL;
  138. return MEMORY_E;
  139. }
  140. #endif
  141. return 0;
  142. }
  143. void free_wolfcrypt_linuxkm_fpu_states(void) {
  144. struct wc_thread_fpu_count_ent *i, *i_endptr;
  145. pid_t i_pid;
  146. if (wc_linuxkm_fpu_states == NULL) {
  147. pr_err("free_wolfcrypt_linuxkm_fpu_states called"
  148. " before allocate_wolfcrypt_linuxkm_fpu_states.\n");
  149. return;
  150. }
  151. for (i = wc_linuxkm_fpu_states,
  152. i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
  153. i < i_endptr;
  154. ++i)
  155. {
  156. i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
  157. if (i_pid == 0)
  158. continue;
  159. if (i->fpu_state != 0) {
  160. pr_err("free_wolfcrypt_linuxkm_fpu_states called"
  161. " with nonzero state 0x%x for pid %d.\n", i->fpu_state, i_pid);
  162. i->fpu_state = 0;
  163. }
  164. }
  165. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  166. free(wc_linuxkm_fpu_savebufs);
  167. wc_linuxkm_fpu_savebufs = NULL;
  168. #endif
  169. free(wc_linuxkm_fpu_states);
  170. wc_linuxkm_fpu_states = NULL;
  171. }
  172. /* lock-(mostly)-free thread-local storage facility for tracking recursive fpu
  173. * pushing/popping
  174. */
  175. static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(int create_p) {
  176. struct wc_thread_fpu_count_ent *i, *i_endptr, *i_empty;
  177. pid_t my_pid = task_pid_nr(current), i_pid;
  178. {
  179. static int _warned_on_null = 0;
  180. if (wc_linuxkm_fpu_states == NULL)
  181. {
  182. if (_warned_on_null == 0) {
  183. pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
  184. " before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
  185. _warned_on_null = 1;
  186. }
  187. return NULL;
  188. }
  189. }
  190. i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
  191. for (;;) {
  192. for (i = wc_linuxkm_fpu_states,
  193. i_empty = NULL;
  194. i < i_endptr;
  195. ++i)
  196. {
  197. i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
  198. if (i_pid == my_pid)
  199. return i;
  200. if ((i_empty == NULL) && (i_pid == 0))
  201. i_empty = i;
  202. }
  203. if ((i_empty == NULL) || (! create_p))
  204. return NULL;
  205. i_pid = 0;
  206. if (__atomic_compare_exchange_n(
  207. &(i_empty->pid),
  208. &i_pid,
  209. my_pid,
  210. 0 /* weak */,
  211. __ATOMIC_SEQ_CST /* success_memmodel */,
  212. __ATOMIC_SEQ_CST /* failure_memmodel */))
  213. {
  214. return i_empty;
  215. }
  216. }
  217. }
  218. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  219. static struct fpstate *wc_linuxkm_fpstate_buf_from_fpu_state(
  220. struct wc_thread_fpu_count_ent *state)
  221. {
  222. size_t i = (size_t)(state - wc_linuxkm_fpu_states) / sizeof(*state);
  223. return &wc_linuxkm_fpu_savebufs[i].fpstate;
  224. }
  225. #endif
  226. static void wc_linuxkm_fpu_state_release(struct wc_thread_fpu_count_ent *ent) {
  227. if (ent->fpu_state != 0) {
  228. static int warned_nonzero_fpu_state = 0;
  229. if (! warned_nonzero_fpu_state) {
  230. pr_err("wc_linuxkm_fpu_state_free for pid %d"
  231. " with nonzero fpu_state 0x%x.\n", ent->pid, ent->fpu_state);
  232. warned_nonzero_fpu_state = 1;
  233. }
  234. ent->fpu_state = 0;
  235. }
  236. __atomic_store_n(&ent->pid, 0, __ATOMIC_RELEASE);
  237. }
  238. WARN_UNUSED_RESULT int save_vector_registers_x86(void)
  239. {
  240. struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(1);
  241. if (pstate == NULL)
  242. return MEMORY_E;
  243. /* allow for nested calls */
  244. if (pstate->fpu_state != 0U) {
  245. if ((pstate->fpu_state & WC_FPU_COUNT_MASK)
  246. == WC_FPU_COUNT_MASK)
  247. {
  248. pr_err("save_vector_registers_x86 recursion register overflow for "
  249. "pid %d.\n", pstate->pid);
  250. return BAD_STATE_E;
  251. } else {
  252. ++pstate->fpu_state;
  253. return 0;
  254. }
  255. }
  256. if (irq_fpu_usable()) {
  257. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  258. struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
  259. fpregs_lock();
  260. fpstate->xfeatures = ~0UL;
  261. os_xsave(fpstate);
  262. #else /* !WOLFSSL_COMMERCIAL_LICENSE */
  263. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  264. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
  265. /* inhibit migration, which gums up the algorithm in
  266. * kernel_fpu_{begin,end}().
  267. */
  268. migrate_disable();
  269. #endif
  270. kernel_fpu_begin();
  271. #endif /* !WOLFSSL_COMMERCIAL_LICENSE */
  272. /* set msb 0 to trigger kernel_fpu_end() at cleanup. */
  273. pstate->fpu_state = 1U;
  274. } else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0)) {
  275. static int warned_fpu_forbidden = 0;
  276. if (! warned_fpu_forbidden)
  277. pr_err("save_vector_registers_x86 called from IRQ handler.\n");
  278. wc_linuxkm_fpu_state_release(pstate);
  279. return BAD_STATE_E;
  280. } else {
  281. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  282. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
  283. !defined(WOLFSSL_COMMERCIAL_LICENSE)
  284. migrate_disable();
  285. #endif
  286. /* assume already safely in_kernel_fpu. */
  287. /* set msb 1 to inhibit kernel_fpu_end() at cleanup. */
  288. pstate->fpu_state =
  289. WC_FPU_SAVED_MASK + 1U;
  290. }
  291. return 0;
  292. }
  293. void restore_vector_registers_x86(void)
  294. {
  295. struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0);
  296. if (pstate == NULL) {
  297. pr_err("restore_vector_registers_x86 called by pid %d "
  298. "with no saved state.\n", task_pid_nr(current));
  299. return;
  300. }
  301. if ((--pstate->fpu_state & WC_FPU_COUNT_MASK) > 0U) {
  302. return;
  303. }
  304. if (pstate->fpu_state == 0U) {
  305. #ifdef WOLFSSL_COMMERCIAL_LICENSE
  306. struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
  307. os_xrstor(fpstate, fpstate->xfeatures);
  308. fpregs_unlock();
  309. #else
  310. kernel_fpu_end();
  311. #endif
  312. } else
  313. pstate->fpu_state = 0U;
  314. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
  315. (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
  316. !defined(WOLFSSL_COMMERCIAL_LICENSE)
  317. migrate_enable();
  318. #endif
  319. wc_linuxkm_fpu_state_release(pstate);
  320. return;
  321. }
  322. #endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS && CONFIG_X86 */
  323. #if defined(__PIE__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
  324. /* needed in 6.1+ because show_free_areas() static definition in mm.h calls
  325. * __show_free_areas(), which isn't exported (neither was show_free_areas()).
  326. */
  327. void my__show_free_areas(
  328. unsigned int flags,
  329. nodemask_t *nodemask,
  330. int max_zone_idx)
  331. {
  332. (void)flags;
  333. (void)nodemask;
  334. (void)max_zone_idx;
  335. return;
  336. }
  337. #endif
  338. #if defined(__PIE__) && defined(CONFIG_FORTIFY_SOURCE)
  339. /* needed because FORTIFY_SOURCE inline implementations call fortify_panic(). */
  340. void __my_fortify_panic(const char *name) {
  341. pr_emerg("__my_fortify_panic in %s\n", name);
  342. BUG();
  343. }
  344. #endif