cpu_helpers.S 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /*
  2. * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch.h>
  7. #include <asm_macros.S>
  8. #include <assert_macros.S>
  9. #include <common/bl_common.h>
  10. #include <common/debug.h>
  11. #include <cpu_macros.S>
  12. #include <lib/cpus/cpu_ops.h>
  13. #include <lib/cpus/errata.h>
  14. #include <lib/el3_runtime/cpu_data.h>
  15. /* Reset fn is needed in BL at reset vector */
  16. #if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
  17. (defined(IMAGE_BL2) && RESET_TO_BL2)
  18. /*
  19. * The reset handler common to all platforms. After a matching
  20. * cpu_ops structure entry is found, the correponding reset_handler
  21. * in the cpu_ops is invoked.
  22. * Clobbers: x0 - x19, x30
  23. */
  24. .globl reset_handler
  25. func reset_handler
  26. mov x19, x30
  27. /* The plat_reset_handler can clobber x0 - x18, x30 */
  28. bl plat_reset_handler
  29. /* Get the matching cpu_ops pointer */
  30. bl get_cpu_ops_ptr
  31. #if ENABLE_ASSERTIONS
  32. /*
  33. * Assert if invalid cpu_ops obtained. If this is not valid, it may
  34. * suggest that the proper CPU file hasn't been included.
  35. */
  36. cmp x0, #0
  37. ASM_ASSERT(ne)
  38. #endif
  39. /* Get the cpu_ops reset handler */
  40. ldr x2, [x0, #CPU_RESET_FUNC]
  41. mov x30, x19
  42. cbz x2, 1f
  43. /* The cpu_ops reset handler can clobber x0 - x19, x30 */
  44. br x2
  45. 1:
  46. ret
  47. endfunc reset_handler
  48. #endif
  49. #ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
  50. /*
  51. * void prepare_cpu_pwr_dwn(unsigned int power_level)
  52. *
  53. * Prepare CPU power down function for all platforms. The function takes
  54. * a domain level to be powered down as its parameter. After the cpu_ops
  55. * pointer is retrieved from cpu_data, the handler for requested power
  56. * level is called.
  57. */
  58. .globl prepare_cpu_pwr_dwn
  59. func prepare_cpu_pwr_dwn
  60. /*
  61. * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
  62. * power down handler for the last power level
  63. */
  64. mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
  65. cmp x0, x2
  66. csel x2, x2, x0, hi
  67. mrs x1, tpidr_el3
  68. ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
  69. #if ENABLE_ASSERTIONS
  70. cmp x0, #0
  71. ASM_ASSERT(ne)
  72. #endif
  73. /* Get the appropriate power down handler */
  74. mov x1, #CPU_PWR_DWN_OPS
  75. add x1, x1, x2, lsl #3
  76. ldr x1, [x0, x1]
  77. #if ENABLE_ASSERTIONS
  78. cmp x1, #0
  79. ASM_ASSERT(ne)
  80. #endif
  81. br x1
  82. endfunc prepare_cpu_pwr_dwn
  83. /*
  84. * Initializes the cpu_ops_ptr if not already initialized
  85. * in cpu_data. This can be called without a runtime stack, but may
  86. * only be called after the MMU is enabled.
  87. * clobbers: x0 - x6, x10
  88. */
  89. .globl init_cpu_ops
  90. func init_cpu_ops
  91. mrs x6, tpidr_el3
  92. ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
  93. cbnz x0, 1f
  94. mov x10, x30
  95. bl get_cpu_ops_ptr
  96. str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
  97. mov x30, x10
  98. 1:
  99. ret
  100. endfunc init_cpu_ops
  101. #endif /* IMAGE_BL31 */
  102. #if defined(IMAGE_BL31) && CRASH_REPORTING
  103. /*
  104. * The cpu specific registers which need to be reported in a crash
  105. * are reported via cpu_ops cpu_reg_dump function. After a matching
  106. * cpu_ops structure entry is found, the correponding cpu_reg_dump
  107. * in the cpu_ops is invoked.
  108. */
  109. .globl do_cpu_reg_dump
  110. func do_cpu_reg_dump
  111. mov x16, x30
  112. /* Get the matching cpu_ops pointer */
  113. bl get_cpu_ops_ptr
  114. cbz x0, 1f
  115. /* Get the cpu_ops cpu_reg_dump */
  116. ldr x2, [x0, #CPU_REG_DUMP]
  117. cbz x2, 1f
  118. blr x2
  119. 1:
  120. mov x30, x16
  121. ret
  122. endfunc do_cpu_reg_dump
  123. #endif
  124. /*
  125. * The below function returns the cpu_ops structure matching the
  126. * midr of the core. It reads the MIDR_EL1 and finds the matching
  127. * entry in cpu_ops entries. Only the implementation and part number
  128. * are used to match the entries.
  129. *
  130. * If cpu_ops for the MIDR_EL1 cannot be found and
  131. * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
  132. * default cpu_ops with an MIDR value of 0.
  133. * (Implementation number 0x0 should be reserved for software use
  134. * and therefore no clashes should happen with that default value).
  135. *
  136. * Return :
  137. * x0 - The matching cpu_ops pointer on Success
  138. * x0 - 0 on failure.
  139. * Clobbers : x0 - x5
  140. */
  141. .globl get_cpu_ops_ptr
  142. func get_cpu_ops_ptr
  143. /* Read the MIDR_EL1 */
  144. mrs x2, midr_el1
  145. mov_imm x3, CPU_IMPL_PN_MASK
  146. /* Retain only the implementation and part number using mask */
  147. and w2, w2, w3
  148. /* Get the cpu_ops end location */
  149. adr_l x5, (__CPU_OPS_END__ + CPU_MIDR)
  150. /* Initialize the return parameter */
  151. mov x0, #0
  152. 1:
  153. /* Get the cpu_ops start location */
  154. adr_l x4, (__CPU_OPS_START__ + CPU_MIDR)
  155. 2:
  156. /* Check if we have reached end of list */
  157. cmp x4, x5
  158. b.eq search_def_ptr
  159. /* load the midr from the cpu_ops */
  160. ldr x1, [x4], #CPU_OPS_SIZE
  161. and w1, w1, w3
  162. /* Check if midr matches to midr of this core */
  163. cmp w1, w2
  164. b.ne 2b
  165. /* Subtract the increment and offset to get the cpu-ops pointer */
  166. sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
  167. #if ENABLE_ASSERTIONS
  168. cmp x0, #0
  169. ASM_ASSERT(ne)
  170. #endif
  171. #ifdef SUPPORT_UNKNOWN_MPID
  172. cbnz x2, exit_mpid_found
  173. /* Mark the unsupported MPID flag */
  174. adrp x1, unsupported_mpid_flag
  175. add x1, x1, :lo12:unsupported_mpid_flag
  176. str w2, [x1]
  177. exit_mpid_found:
  178. #endif
  179. ret
  180. /*
  181. * Search again for a default pointer (MIDR = 0x0)
  182. * or return error if already searched.
  183. */
  184. search_def_ptr:
  185. #ifdef SUPPORT_UNKNOWN_MPID
  186. cbz x2, error_exit
  187. mov x2, #0
  188. b 1b
  189. error_exit:
  190. #endif
  191. ret
  192. endfunc get_cpu_ops_ptr
  193. /*
  194. * Extract CPU revision and variant, and combine them into a single numeric for
  195. * easier comparison.
  196. */
  197. .globl cpu_get_rev_var
  198. func cpu_get_rev_var
  199. mrs x1, midr_el1
  200. /*
  201. * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
  202. * as variant[7:4] and revision[3:0] of x0.
  203. *
  204. * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
  205. * extract x1[3:0] into x0[3:0] retaining other bits.
  206. */
  207. ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
  208. bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
  209. ret
  210. endfunc cpu_get_rev_var
  211. /*
  212. * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
  213. * application purposes. If the revision-variant is less than or same as a given
  214. * value, indicates that errata applies; otherwise not.
  215. *
  216. * Shall clobber: x0-x3
  217. */
  218. .globl cpu_rev_var_ls
  219. func cpu_rev_var_ls
  220. mov x2, #ERRATA_APPLIES
  221. mov x3, #ERRATA_NOT_APPLIES
  222. cmp x0, x1
  223. csel x0, x2, x3, ls
  224. ret
  225. endfunc cpu_rev_var_ls
  226. /*
  227. * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
  228. * application purposes. If the revision-variant is higher than or same as a
  229. * given value, indicates that errata applies; otherwise not.
  230. *
  231. * Shall clobber: x0-x3
  232. */
  233. .globl cpu_rev_var_hs
  234. func cpu_rev_var_hs
  235. mov x2, #ERRATA_APPLIES
  236. mov x3, #ERRATA_NOT_APPLIES
  237. cmp x0, x1
  238. csel x0, x2, x3, hs
  239. ret
  240. endfunc cpu_rev_var_hs
  241. /*
  242. * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
  243. * application purposes. If the revision-variant is between or includes the given
  244. * values, this indicates that errata applies; otherwise not.
  245. *
  246. * Shall clobber: x0-x4
  247. */
  248. .globl cpu_rev_var_range
  249. func cpu_rev_var_range
  250. mov x3, #ERRATA_APPLIES
  251. mov x4, #ERRATA_NOT_APPLIES
  252. cmp x0, x1
  253. csel x1, x3, x4, hs
  254. cbz x1, 1f
  255. cmp x0, x2
  256. csel x1, x3, x4, ls
  257. 1:
  258. mov x0, x1
  259. ret
  260. endfunc cpu_rev_var_range
  261. /*
  262. * int check_wa_cve_2017_5715(void);
  263. *
  264. * This function returns:
  265. * - ERRATA_APPLIES when firmware mitigation is required.
  266. * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
  267. * - ERRATA_MISSING when firmware mitigation would be required but
  268. * is not compiled in.
  269. *
  270. * NOTE: Must be called only after cpu_ops have been initialized
  271. * in per-CPU data.
  272. */
  273. .globl check_wa_cve_2017_5715
  274. func check_wa_cve_2017_5715
  275. mrs x0, tpidr_el3
  276. #if ENABLE_ASSERTIONS
  277. cmp x0, #0
  278. ASM_ASSERT(ne)
  279. #endif
  280. ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
  281. #if ENABLE_ASSERTIONS
  282. cmp x0, #0
  283. ASM_ASSERT(ne)
  284. #endif
  285. ldr x0, [x0, #CPU_EXTRA1_FUNC]
  286. /*
  287. * If the reserved function pointer is NULL, this CPU
  288. * is unaffected by CVE-2017-5715 so bail out.
  289. */
  290. cmp x0, #CPU_NO_EXTRA1_FUNC
  291. beq 1f
  292. br x0
  293. 1:
  294. mov x0, #ERRATA_NOT_APPLIES
  295. ret
  296. endfunc check_wa_cve_2017_5715
  297. /*
  298. * void *wa_cve_2018_3639_get_disable_ptr(void);
  299. *
  300. * Returns a function pointer which is used to disable mitigation
  301. * for CVE-2018-3639.
  302. * The function pointer is only returned on cores that employ
  303. * dynamic mitigation. If the core uses static mitigation or is
  304. * unaffected by CVE-2018-3639 this function returns NULL.
  305. *
  306. * NOTE: Must be called only after cpu_ops have been initialized
  307. * in per-CPU data.
  308. */
  309. .globl wa_cve_2018_3639_get_disable_ptr
  310. func wa_cve_2018_3639_get_disable_ptr
  311. mrs x0, tpidr_el3
  312. #if ENABLE_ASSERTIONS
  313. cmp x0, #0
  314. ASM_ASSERT(ne)
  315. #endif
  316. ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
  317. #if ENABLE_ASSERTIONS
  318. cmp x0, #0
  319. ASM_ASSERT(ne)
  320. #endif
  321. ldr x0, [x0, #CPU_EXTRA2_FUNC]
  322. ret
  323. endfunc wa_cve_2018_3639_get_disable_ptr
  324. /*
  325. * int check_smccc_arch_wa3_applies(void);
  326. *
  327. * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
  328. * CVE-2022-23960 for this CPU. It returns:
  329. * - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
  330. * the CVE.
  331. * - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
  332. * mitigate the CVE.
  333. *
  334. * NOTE: Must be called only after cpu_ops have been initialized
  335. * in per-CPU data.
  336. */
  337. .globl check_smccc_arch_wa3_applies
  338. func check_smccc_arch_wa3_applies
  339. mrs x0, tpidr_el3
  340. #if ENABLE_ASSERTIONS
  341. cmp x0, #0
  342. ASM_ASSERT(ne)
  343. #endif
  344. ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
  345. #if ENABLE_ASSERTIONS
  346. cmp x0, #0
  347. ASM_ASSERT(ne)
  348. #endif
  349. ldr x0, [x0, #CPU_EXTRA3_FUNC]
  350. /*
  351. * If the reserved function pointer is NULL, this CPU
  352. * is unaffected by CVE-2022-23960 so bail out.
  353. */
  354. cmp x0, #CPU_NO_EXTRA3_FUNC
  355. beq 1f
  356. br x0
  357. 1:
  358. mov x0, #ERRATA_NOT_APPLIES
  359. ret
  360. endfunc check_smccc_arch_wa3_applies