cpu_helpers.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /*
  2. * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch.h>
  7. #include <asm_macros.S>
  8. #include <assert_macros.S>
  9. #include <cpu_macros.S>
  10. #include <common/bl_common.h>
  11. #include <lib/el3_runtime/cpu_data.h>
  12. #if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
  13. (defined(IMAGE_BL2) && RESET_TO_BL2)
  14. /*
  15. * The reset handler common to all platforms. After a matching
  16. * cpu_ops structure entry is found, the correponding reset_handler
  17. * in the cpu_ops is invoked. The reset handler is invoked very early
  18. * in the boot sequence and it is assumed that we can clobber r0 - r10
  19. * without the need to follow AAPCS.
  20. * Clobbers: r0 - r10
  21. */
  22. .globl reset_handler
  23. func reset_handler
  24. mov r8, lr
  25. /* The plat_reset_handler can clobber r0 - r7 */
  26. bl plat_reset_handler
  27. /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
  28. bl get_cpu_ops_ptr
  29. #if ENABLE_ASSERTIONS
  30. cmp r0, #0
  31. ASM_ASSERT(ne)
  32. #endif
  33. /* Get the cpu_ops reset handler */
  34. ldr r1, [r0, #CPU_RESET_FUNC]
  35. cmp r1, #0
  36. mov lr, r8
  37. bxne r1
  38. bx lr
  39. endfunc reset_handler
  40. #endif
  41. #ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
  42. /*
  43. * void prepare_cpu_pwr_dwn(unsigned int power_level)
  44. *
  45. * Prepare CPU power down function for all platforms. The function takes
  46. * a domain level to be powered down as its parameter. After the cpu_ops
  47. * pointer is retrieved from cpu_data, the handler for requested power
  48. * level is called.
  49. */
  50. .globl prepare_cpu_pwr_dwn
  51. func prepare_cpu_pwr_dwn
  52. /*
  53. * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
  54. * power down handler for the last power level
  55. */
  56. mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
  57. cmp r0, r2
  58. movhi r0, r2
  59. push {r0, lr}
  60. bl _cpu_data
  61. pop {r2, lr}
  62. ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
  63. #if ENABLE_ASSERTIONS
  64. cmp r0, #0
  65. ASM_ASSERT(ne)
  66. #endif
  67. /* Get the appropriate power down handler */
  68. mov r1, #CPU_PWR_DWN_OPS
  69. add r1, r1, r2, lsl #2
  70. ldr r1, [r0, r1]
  71. #if ENABLE_ASSERTIONS
  72. cmp r1, #0
  73. ASM_ASSERT(ne)
  74. #endif
  75. bx r1
  76. endfunc prepare_cpu_pwr_dwn
  77. /*
  78. * Initializes the cpu_ops_ptr if not already initialized
  79. * in cpu_data. This must only be called after the data cache
  80. * is enabled. AAPCS is followed.
  81. */
  82. .globl init_cpu_ops
  83. func init_cpu_ops
  84. push {r4 - r6, lr}
  85. bl _cpu_data
  86. mov r6, r0
  87. ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
  88. cmp r1, #0
  89. bne 1f
  90. bl get_cpu_ops_ptr
  91. #if ENABLE_ASSERTIONS
  92. cmp r0, #0
  93. ASM_ASSERT(ne)
  94. #endif
  95. str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
  96. 1:
  97. pop {r4 - r6, pc}
  98. endfunc init_cpu_ops
  99. #endif /* IMAGE_BL32 */
  100. /*
  101. * The below function returns the cpu_ops structure matching the
  102. * midr of the core. It reads the MIDR and finds the matching
  103. * entry in cpu_ops entries. Only the implementation and part number
  104. * are used to match the entries.
  105. * Return :
  106. * r0 - The matching cpu_ops pointer on Success
  107. * r0 - 0 on failure.
  108. * Clobbers: r0 - r5
  109. */
  110. .globl get_cpu_ops_ptr
  111. func get_cpu_ops_ptr
  112. /* Get the cpu_ops start and end locations */
  113. ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
  114. ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
  115. /* Initialize the return parameter */
  116. mov r0, #0
  117. /* Read the MIDR_EL1 */
  118. ldcopr r2, MIDR
  119. ldr r3, =CPU_IMPL_PN_MASK
  120. /* Retain only the implementation and part number using mask */
  121. and r2, r2, r3
  122. 1:
  123. /* Check if we have reached end of list */
  124. cmp r4, r5
  125. bhs error_exit
  126. /* load the midr from the cpu_ops */
  127. ldr r1, [r4], #CPU_OPS_SIZE
  128. and r1, r1, r3
  129. /* Check if midr matches to midr of this core */
  130. cmp r1, r2
  131. bne 1b
  132. /* Subtract the increment and offset to get the cpu-ops pointer */
  133. sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
  134. #if ENABLE_ASSERTIONS
  135. cmp r0, #0
  136. ASM_ASSERT(ne)
  137. #endif
  138. error_exit:
  139. bx lr
  140. endfunc get_cpu_ops_ptr
  141. /*
  142. * Extract CPU revision and variant, and combine them into a single numeric for
  143. * easier comparison.
  144. */
  145. .globl cpu_get_rev_var
  146. func cpu_get_rev_var
  147. ldcopr r1, MIDR
  148. /*
  149. * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
  150. * r0[0:7] as variant[7:4] and revision[3:0]:
  151. *
  152. * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
  153. * extract r1[3:0] into r0[3:0] retaining other bits.
  154. */
  155. ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
  156. bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
  157. bx lr
  158. endfunc cpu_get_rev_var
  159. /*
  160. * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
  161. * application purposes. If the revision-variant is less than or same as a given
  162. * value, indicates that errata applies; otherwise not.
  163. */
  164. .globl cpu_rev_var_ls
  165. func cpu_rev_var_ls
  166. cmp r0, r1
  167. movls r0, #ERRATA_APPLIES
  168. movhi r0, #ERRATA_NOT_APPLIES
  169. bx lr
  170. endfunc cpu_rev_var_ls
  171. /*
  172. * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
  173. * application purposes. If the revision-variant is higher than or same as a
  174. * given value, indicates that errata applies; otherwise not.
  175. */
  176. .globl cpu_rev_var_hs
  177. func cpu_rev_var_hs
  178. cmp r0, r1
  179. movge r0, #ERRATA_APPLIES
  180. movlt r0, #ERRATA_NOT_APPLIES
  181. bx lr
  182. endfunc cpu_rev_var_hs
  183. #if REPORT_ERRATA
  184. /*
  185. * void print_errata_status(void);
  186. *
  187. * Function to print errata status for CPUs of its class. Must be called only:
  188. *
  189. * - with MMU and data caches are enabled;
  190. * - after cpu_ops have been initialized in per-CPU data.
  191. */
  192. .globl print_errata_status
  193. func print_errata_status
  194. /* r12 is pushed only for the sake of 8-byte stack alignment */
  195. push {r4, r5, r12, lr}
  196. #ifdef IMAGE_BL1
  197. /*
  198. * BL1 doesn't have per-CPU data. So retrieve the CPU operations
  199. * directly.
  200. */
  201. bl get_cpu_ops_ptr
  202. ldr r0, [r0, #CPU_ERRATA_FUNC]
  203. cmp r0, #0
  204. blxne r0
  205. #else
  206. /*
  207. * Retrieve pointer to cpu_ops, and further, the errata printing
  208. * function. If it's non-NULL, jump to the function in turn.
  209. */
  210. bl _cpu_data
  211. #if ENABLE_ASSERTIONS
  212. cmp r0, #0
  213. ASM_ASSERT(ne)
  214. #endif
  215. ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
  216. #if ENABLE_ASSERTIONS
  217. cmp r1, #0
  218. ASM_ASSERT(ne)
  219. #endif
  220. ldr r0, [r1, #CPU_ERRATA_FUNC]
  221. cmp r0, #0
  222. beq 1f
  223. mov r4, r0
  224. /*
  225. * Load pointers to errata lock and printed flag. Call
  226. * errata_needs_reporting to check whether this CPU needs to report
  227. * errata status pertaining to its class.
  228. */
  229. ldr r0, [r1, #CPU_ERRATA_LOCK]
  230. ldr r1, [r1, #CPU_ERRATA_PRINTED]
  231. bl errata_needs_reporting
  232. cmp r0, #0
  233. blxne r4
  234. 1:
  235. #endif
  236. pop {r4, r5, r12, pc}
  237. endfunc print_errata_status
  238. #endif