linuxkm_memory.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /* linuxkm_memory.c
  2. *
  3. * Copyright (C) 2006-2022 wolfSSL Inc.
  4. *
  5. * This file is part of wolfSSL.
  6. *
  7. * wolfSSL is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * wolfSSL is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
  20. */
  21. /* included by wolfcrypt/src/memory.c */
  22. #if defined(WOLFSSL_LINUXKM_SIMD_X86)
  23. #ifdef LINUXKM_SIMD_IRQ
  24. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
  25. static union fpregs_state **wolfcrypt_linuxkm_fpu_states = NULL;
  26. #else
  27. static struct fpstate **wolfcrypt_linuxkm_fpu_states = NULL;
  28. #endif
  29. #else
  30. static unsigned int *wolfcrypt_linuxkm_fpu_states = NULL;
  31. #endif
  32. static WARN_UNUSED_RESULT inline int am_in_hard_interrupt_handler(void)
  33. {
  34. return (preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0;
  35. }
  36. WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
  37. {
  38. #ifdef LINUXKM_SIMD_IRQ
  39. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
  40. wolfcrypt_linuxkm_fpu_states =
  41. (union fpregs_state **)kzalloc(nr_cpu_ids
  42. * sizeof(struct fpu_state *),
  43. GFP_KERNEL);
  44. #else
  45. wolfcrypt_linuxkm_fpu_states =
  46. (struct fpstate **)kzalloc(nr_cpu_ids
  47. * sizeof(struct fpstate *),
  48. GFP_KERNEL);
  49. #endif
  50. #else
  51. wolfcrypt_linuxkm_fpu_states =
  52. (unsigned int *)kzalloc(nr_cpu_ids * sizeof(unsigned int),
  53. GFP_KERNEL);
  54. #endif
  55. if (! wolfcrypt_linuxkm_fpu_states) {
  56. pr_err("warning, allocation of %lu bytes for "
  57. "wolfcrypt_linuxkm_fpu_states failed.\n",
  58. nr_cpu_ids * sizeof(struct fpu_state *));
  59. return MEMORY_E;
  60. }
  61. #ifdef LINUXKM_SIMD_IRQ
  62. {
  63. typeof(nr_cpu_ids) i;
  64. for (i=0; i<nr_cpu_ids; ++i) {
  65. _Static_assert(sizeof(union fpregs_state) <= PAGE_SIZE,
  66. "union fpregs_state is larger than expected.");
  67. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
  68. wolfcrypt_linuxkm_fpu_states[i] =
  69. (union fpregs_state *)kzalloc(PAGE_SIZE
  70. /* sizeof(union fpregs_state) */,
  71. GFP_KERNEL);
  72. #else
  73. wolfcrypt_linuxkm_fpu_states[i] =
  74. (struct fpstate *)kzalloc(PAGE_SIZE
  75. /* sizeof(struct fpstate) */,
  76. GFP_KERNEL);
  77. #endif
  78. if (! wolfcrypt_linuxkm_fpu_states[i])
  79. break;
  80. /* double-check that the allocation is 64-byte-aligned as needed
  81. * for xsave.
  82. */
  83. if ((unsigned long)wolfcrypt_linuxkm_fpu_states[i] & 63UL) {
  84. pr_err("warning, allocation for wolfcrypt_linuxkm_fpu_states "
  85. "was not properly aligned (%px).\n",
  86. wolfcrypt_linuxkm_fpu_states[i]);
  87. kfree(wolfcrypt_linuxkm_fpu_states[i]);
  88. wolfcrypt_linuxkm_fpu_states[i] = 0;
  89. break;
  90. }
  91. }
  92. if (i < nr_cpu_ids) {
  93. pr_err("warning, only %u/%u allocations succeeded for "
  94. "wolfcrypt_linuxkm_fpu_states.\n",
  95. i, nr_cpu_ids);
  96. return MEMORY_E;
  97. }
  98. }
  99. #endif /* LINUXKM_SIMD_IRQ */
  100. return 0;
  101. }
  102. void free_wolfcrypt_linuxkm_fpu_states(void)
  103. {
  104. if (wolfcrypt_linuxkm_fpu_states) {
  105. #ifdef LINUXKM_SIMD_IRQ
  106. typeof(nr_cpu_ids) i;
  107. for (i=0; i<nr_cpu_ids; ++i) {
  108. if (wolfcrypt_linuxkm_fpu_states[i])
  109. kfree(wolfcrypt_linuxkm_fpu_states[i]);
  110. }
  111. #endif /* LINUXKM_SIMD_IRQ */
  112. kfree(wolfcrypt_linuxkm_fpu_states);
  113. wolfcrypt_linuxkm_fpu_states = 0;
  114. }
  115. }
  116. WARN_UNUSED_RESULT int save_vector_registers_x86(void)
  117. {
  118. int processor_id;
  119. preempt_disable();
  120. processor_id = smp_processor_id();
  121. {
  122. static int _warned_on_null = -1;
  123. if ((wolfcrypt_linuxkm_fpu_states == NULL)
  124. #ifdef LINUXKM_SIMD_IRQ
  125. || (wolfcrypt_linuxkm_fpu_states[processor_id] == NULL)
  126. #endif
  127. )
  128. {
  129. preempt_enable();
  130. if (_warned_on_null < processor_id) {
  131. _warned_on_null = processor_id;
  132. pr_err("save_vector_registers_x86 called for cpu id %d "
  133. "with null context buffer.\n", processor_id);
  134. }
  135. return BAD_STATE_E;
  136. }
  137. }
  138. if (! irq_fpu_usable()) {
  139. #ifdef LINUXKM_SIMD_IRQ
  140. if (am_in_hard_interrupt_handler()) {
  141. /* allow for nested calls */
  142. if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
  143. if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
  144. preempt_enable();
  145. pr_err("save_vector_registers_x86 recursion register overflow for "
  146. "cpu id %d.\n", processor_id);
  147. return BAD_STATE_E;
  148. } else {
  149. ++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
  150. return 0;
  151. }
  152. }
  153. /* note, fpregs_lock() is not needed here, because
  154. * interrupts/preemptions are already disabled here.
  155. */
  156. {
  157. /* save_fpregs_to_fpstate() only accesses fpu->state, which
  158. * has stringent alignment requirements (64 byte cache
  159. * line), but takes a pointer to the parent struct. work
  160. * around this.
  161. */
  162. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
  163. struct fpu *fake_fpu_pointer =
  164. (struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
  165. - offsetof(struct fpu, state));
  166. copy_fpregs_to_fpstate(fake_fpu_pointer);
  167. #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
  168. struct fpu *fake_fpu_pointer =
  169. (struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
  170. - offsetof(struct fpu, state));
  171. save_fpregs_to_fpstate(fake_fpu_pointer);
  172. #else
  173. struct fpu *fake_fpu_pointer =
  174. (struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
  175. - offsetof(struct fpu, fpstate));
  176. save_fpregs_to_fpstate(fake_fpu_pointer);
  177. #endif
  178. }
  179. /* mark the slot as used. */
  180. ((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
  181. /* note, not preempt_enable()ing, mirroring kernel_fpu_begin()
  182. * semantics, even though routine will have been entered already
  183. * non-preemptable.
  184. */
  185. return 0;
  186. } else
  187. #endif /* LINUXKM_SIMD_IRQ */
  188. {
  189. preempt_enable();
  190. return BAD_STATE_E;
  191. }
  192. } else {
  193. /* allow for nested calls */
  194. #ifdef LINUXKM_SIMD_IRQ
  195. if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
  196. if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
  197. preempt_enable();
  198. pr_err("save_vector_registers_x86 recursion register overflow for "
  199. "cpu id %d.\n", processor_id);
  200. return BAD_STATE_E;
  201. } else {
  202. ++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
  203. return 0;
  204. }
  205. }
  206. kernel_fpu_begin();
  207. preempt_enable(); /* kernel_fpu_begin() does its own
  208. * preempt_disable(). decrement ours.
  209. */
  210. ((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
  211. #else /* !LINUXKM_SIMD_IRQ */
  212. if (wolfcrypt_linuxkm_fpu_states[processor_id] != 0) {
  213. if (wolfcrypt_linuxkm_fpu_states[processor_id] == ~0U) {
  214. preempt_enable();
  215. pr_err("save_vector_registers_x86 recursion register overflow for "
  216. "cpu id %d.\n", processor_id);
  217. return BAD_STATE_E;
  218. } else {
  219. ++wolfcrypt_linuxkm_fpu_states[processor_id];
  220. return 0;
  221. }
  222. }
  223. kernel_fpu_begin();
  224. preempt_enable(); /* kernel_fpu_begin() does its own
  225. * preempt_disable(). decrement ours.
  226. */
  227. wolfcrypt_linuxkm_fpu_states[processor_id] = 1;
  228. #endif /* !LINUXKM_SIMD_IRQ */
  229. return 0;
  230. }
  231. }
  232. void restore_vector_registers_x86(void)
  233. {
  234. int processor_id = smp_processor_id();
  235. if ((wolfcrypt_linuxkm_fpu_states == NULL)
  236. #ifdef LINUXKM_SIMD_IRQ
  237. || (wolfcrypt_linuxkm_fpu_states[processor_id] == NULL)
  238. #endif
  239. )
  240. {
  241. pr_err("restore_vector_registers_x86 called for cpu id %d "
  242. "with null context buffer.\n", processor_id);
  243. return;
  244. }
  245. #ifdef LINUXKM_SIMD_IRQ
  246. if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 0)
  247. {
  248. pr_err("restore_vector_registers_x86 called for cpu id %d "
  249. "without saved context.\n", processor_id);
  250. return;
  251. }
  252. if (--((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] > 0) {
  253. preempt_enable(); /* preempt_disable count will still be nonzero after this decrement. */
  254. return;
  255. }
  256. if (am_in_hard_interrupt_handler()) {
  257. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
  258. copy_kernel_to_fpregs(wolfcrypt_linuxkm_fpu_states[processor_id]);
  259. #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
  260. __restore_fpregs_from_fpstate(wolfcrypt_linuxkm_fpu_states[processor_id],
  261. xfeatures_mask_all);
  262. #else
  263. restore_fpregs_from_fpstate(wolfcrypt_linuxkm_fpu_states[processor_id],
  264. fpu_kernel_cfg.max_features);
  265. #endif
  266. preempt_enable();
  267. } else {
  268. kernel_fpu_end();
  269. }
  270. #else /* !LINUXKM_SIMD_IRQ */
  271. if (wolfcrypt_linuxkm_fpu_states[processor_id] == 0)
  272. {
  273. pr_err("restore_vector_registers_x86 called for cpu id %d "
  274. "without saved context.\n", processor_id);
  275. return;
  276. }
  277. if (--wolfcrypt_linuxkm_fpu_states[processor_id] > 0) {
  278. preempt_enable(); /* preempt_disable count will still be nonzero after this decrement. */
  279. return;
  280. }
  281. kernel_fpu_end();
  282. #endif /* !LINUXKM_SIMD_IRQ */
  283. return;
  284. }
  285. #endif /* WOLFSSL_LINUXKM_SIMD_X86 && WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */
  286. #if defined(__PIE__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
  287. /* needed in 6.1+ because show_free_areas() static definition in mm.h calls
  288. * __show_free_areas(), which isn't exported (neither was show_free_areas()).
  289. */
  290. void my__show_free_areas(
  291. unsigned int flags,
  292. nodemask_t *nodemask,
  293. int max_zone_idx)
  294. {
  295. (void)flags;
  296. (void)nodemask;
  297. (void)max_zone_idx;
  298. return;
  299. }
  300. #endif