cpu_helpers.S 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch.h>
  7. #include <asm_macros.S>
  8. #include <assert_macros.S>
  9. #include <cpu_macros.S>
  10. #include <common/bl_common.h>
  11. #include <lib/cpus/cpu_ops.h>
  12. #include <lib/el3_runtime/cpu_data.h>
  13. #if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
  14. (defined(IMAGE_BL2) && RESET_TO_BL2)
  15. /*
  16. * The reset handler common to all platforms. After a matching
  17. * cpu_ops structure entry is found, the correponding reset_handler
  18. * in the cpu_ops is invoked. The reset handler is invoked very early
  19. * in the boot sequence and it is assumed that we can clobber r0 - r10
  20. * without the need to follow AAPCS.
  21. * Clobbers: r0 - r10
  22. */
  23. .globl reset_handler
  24. func reset_handler
  25. mov r8, lr
  26. /* The plat_reset_handler can clobber r0 - r7 */
  27. bl plat_reset_handler
  28. /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
  29. bl get_cpu_ops_ptr
  30. #if ENABLE_ASSERTIONS
  31. cmp r0, #0
  32. ASM_ASSERT(ne)
  33. #endif
  34. /* Get the cpu_ops reset handler */
  35. ldr r1, [r0, #CPU_RESET_FUNC]
  36. cmp r1, #0
  37. mov lr, r8
  38. bxne r1
  39. bx lr
  40. endfunc reset_handler
  41. #endif
  42. #ifdef IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
  43. /*
  44. * void prepare_cpu_pwr_dwn(unsigned int power_level)
  45. *
  46. * Prepare CPU power down function for all platforms. The function takes
  47. * a domain level to be powered down as its parameter. After the cpu_ops
  48. * pointer is retrieved from cpu_data, the handler for requested power
  49. * level is called.
  50. */
  51. .globl prepare_cpu_pwr_dwn
  52. func prepare_cpu_pwr_dwn
  53. /*
  54. * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
  55. * power down handler for the last power level
  56. */
  57. mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
  58. cmp r0, r2
  59. movhi r0, r2
  60. push {r0, lr}
  61. bl _cpu_data
  62. pop {r2, lr}
  63. ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
  64. #if ENABLE_ASSERTIONS
  65. cmp r0, #0
  66. ASM_ASSERT(ne)
  67. #endif
  68. /* Get the appropriate power down handler */
  69. mov r1, #CPU_PWR_DWN_OPS
  70. add r1, r1, r2, lsl #2
  71. ldr r1, [r0, r1]
  72. #if ENABLE_ASSERTIONS
  73. cmp r1, #0
  74. ASM_ASSERT(ne)
  75. #endif
  76. bx r1
  77. endfunc prepare_cpu_pwr_dwn
  78. /*
  79. * Initializes the cpu_ops_ptr if not already initialized
  80. * in cpu_data. This must only be called after the data cache
  81. * is enabled. AAPCS is followed.
  82. */
  83. .globl init_cpu_ops
  84. func init_cpu_ops
  85. push {r4 - r6, lr}
  86. bl _cpu_data
  87. mov r6, r0
  88. ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
  89. cmp r1, #0
  90. bne 1f
  91. bl get_cpu_ops_ptr
  92. #if ENABLE_ASSERTIONS
  93. cmp r0, #0
  94. ASM_ASSERT(ne)
  95. #endif
  96. str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
  97. 1:
  98. pop {r4 - r6, pc}
  99. endfunc init_cpu_ops
  100. #endif /* IMAGE_BL32 */
  101. /*
  102. * The below function returns the cpu_ops structure matching the
  103. * midr of the core. It reads the MIDR and finds the matching
  104. * entry in cpu_ops entries. Only the implementation and part number
  105. * are used to match the entries.
  106. * Return :
  107. * r0 - The matching cpu_ops pointer on Success
  108. * r0 - 0 on failure.
  109. * Clobbers: r0 - r5
  110. */
  111. .globl get_cpu_ops_ptr
  112. func get_cpu_ops_ptr
  113. /* Get the cpu_ops start and end locations */
  114. ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
  115. ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
  116. /* Initialize the return parameter */
  117. mov r0, #0
  118. /* Read the MIDR_EL1 */
  119. ldcopr r2, MIDR
  120. ldr r3, =CPU_IMPL_PN_MASK
  121. /* Retain only the implementation and part number using mask */
  122. and r2, r2, r3
  123. 1:
  124. /* Check if we have reached end of list */
  125. cmp r4, r5
  126. bhs error_exit
  127. /* load the midr from the cpu_ops */
  128. ldr r1, [r4], #CPU_OPS_SIZE
  129. and r1, r1, r3
  130. /* Check if midr matches to midr of this core */
  131. cmp r1, r2
  132. bne 1b
  133. /* Subtract the increment and offset to get the cpu-ops pointer */
  134. sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
  135. #if ENABLE_ASSERTIONS
  136. cmp r0, #0
  137. ASM_ASSERT(ne)
  138. #endif
  139. error_exit:
  140. bx lr
  141. endfunc get_cpu_ops_ptr
  142. /*
  143. * Extract CPU revision and variant, and combine them into a single numeric for
  144. * easier comparison.
  145. */
  146. .globl cpu_get_rev_var
  147. func cpu_get_rev_var
  148. ldcopr r1, MIDR
  149. /*
  150. * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
  151. * r0[0:7] as variant[7:4] and revision[3:0]:
  152. *
  153. * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
  154. * extract r1[3:0] into r0[3:0] retaining other bits.
  155. */
  156. ubfx r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
  157. bfi r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
  158. bx lr
  159. endfunc cpu_get_rev_var
  160. /*
  161. * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
  162. * application purposes. If the revision-variant is less than or same as a given
  163. * value, indicates that errata applies; otherwise not.
  164. */
  165. .globl cpu_rev_var_ls
  166. func cpu_rev_var_ls
  167. cmp r0, r1
  168. movls r0, #ERRATA_APPLIES
  169. movhi r0, #ERRATA_NOT_APPLIES
  170. bx lr
  171. endfunc cpu_rev_var_ls
  172. /*
  173. * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
  174. * application purposes. If the revision-variant is higher than or same as a
  175. * given value, indicates that errata applies; otherwise not.
  176. */
  177. .globl cpu_rev_var_hs
  178. func cpu_rev_var_hs
  179. cmp r0, r1
  180. movge r0, #ERRATA_APPLIES
  181. movlt r0, #ERRATA_NOT_APPLIES
  182. bx lr
  183. endfunc cpu_rev_var_hs