entrypoint.S 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. /*
  2. * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch.h>
  7. #include <asm_macros.S>
  8. #include <common/bl_common.h>
  9. #include <common/runtime_svc.h>
  10. #include <context.h>
  11. #include <el3_common_macros.S>
  12. #include <lib/el3_runtime/cpu_data.h>
  13. #include <lib/pmf/aarch32/pmf_asm_macros.S>
  14. #include <lib/runtime_instr.h>
  15. #include <lib/xlat_tables/xlat_tables_defs.h>
  16. #include <smccc_helpers.h>
  17. #include <smccc_macros.S>
  18. .globl sp_min_vector_table
  19. .globl sp_min_entrypoint
  20. .globl sp_min_warm_entrypoint
  21. .globl sp_min_handle_smc
  22. .globl sp_min_handle_fiq
  23. #define FIXUP_SIZE ((BL32_LIMIT) - (BL32_BASE))
  24. .macro route_fiq_to_sp_min reg
  25. /* -----------------------------------------------------
  26. * FIQs are secure interrupts trapped by Monitor and non
  27. * secure is not allowed to mask the FIQs.
  28. * -----------------------------------------------------
  29. */
  30. ldcopr \reg, SCR
  31. orr \reg, \reg, #SCR_FIQ_BIT
  32. bic \reg, \reg, #SCR_FW_BIT
  33. stcopr \reg, SCR
  34. .endm
  35. .macro clrex_on_monitor_entry
  36. #if (ARM_ARCH_MAJOR == 7)
  37. /*
  38. * ARMv7 architectures need to clear the exclusive access when
  39. * entering Monitor mode.
  40. */
  41. clrex
  42. #endif
  43. .endm
  44. vector_base sp_min_vector_table
  45. b sp_min_entrypoint
  46. b plat_panic_handler /* Undef */
  47. b sp_min_handle_smc /* Syscall */
  48. b report_prefetch_abort /* Prefetch abort */
  49. b report_data_abort /* Data abort */
  50. b plat_panic_handler /* Reserved */
  51. b plat_panic_handler /* IRQ */
  52. b sp_min_handle_fiq /* FIQ */
  53. /*
  54. * The Cold boot/Reset entrypoint for SP_MIN
  55. */
  56. func sp_min_entrypoint
  57. /* ---------------------------------------------------------------
  58. * Stash the previous bootloader arguments r0 - r3 for later use.
  59. * ---------------------------------------------------------------
  60. */
  61. mov r9, r0
  62. mov r10, r1
  63. mov r11, r2
  64. mov r12, r3
  65. #if !RESET_TO_SP_MIN
  66. /* ---------------------------------------------------------------------
  67. * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
  68. * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
  69. * and primary/secondary CPU logic should not be executed in this case.
  70. *
  71. * Also, assume that the previous bootloader has already initialised the
  72. * SCTLR, including the CPU endianness, and has initialised the memory.
  73. * ---------------------------------------------------------------------
  74. */
  75. el3_entrypoint_common \
  76. _init_sctlr=0 \
  77. _warm_boot_mailbox=0 \
  78. _secondary_cold_boot=0 \
  79. _init_memory=0 \
  80. _init_c_runtime=1 \
  81. _exception_vectors=sp_min_vector_table \
  82. _pie_fixup_size=FIXUP_SIZE
  83. #else
  84. /* ---------------------------------------------------------------------
  85. * For RESET_TO_SP_MIN systems which have a programmable reset address,
  86. * sp_min_entrypoint() is executed only on the cold boot path so we can
  87. * skip the warm boot mailbox mechanism.
  88. * ---------------------------------------------------------------------
  89. */
  90. el3_entrypoint_common \
  91. _init_sctlr=1 \
  92. _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
  93. _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
  94. _init_memory=1 \
  95. _init_c_runtime=1 \
  96. _exception_vectors=sp_min_vector_table \
  97. _pie_fixup_size=FIXUP_SIZE
  98. #endif /* RESET_TO_SP_MIN */
  99. #if SP_MIN_WITH_SECURE_FIQ
  100. route_fiq_to_sp_min r4
  101. #endif
  102. /* ---------------------------------------------------------------------
  103. * Relay the previous bootloader's arguments to the platform layer
  104. * ---------------------------------------------------------------------
  105. */
  106. mov r0, r9
  107. mov r1, r10
  108. mov r2, r11
  109. mov r3, r12
  110. bl sp_min_setup
  111. /* Jump to the main function */
  112. bl sp_min_main
  113. /* -------------------------------------------------------------
  114. * Clean the .data & .bss sections to main memory. This ensures
  115. * that any global data which was initialised by the primary CPU
  116. * is visible to secondary CPUs before they enable their data
  117. * caches and participate in coherency.
  118. * -------------------------------------------------------------
  119. */
  120. ldr r0, =__DATA_START__
  121. ldr r1, =__DATA_END__
  122. sub r1, r1, r0
  123. bl clean_dcache_range
  124. ldr r0, =__BSS_START__
  125. ldr r1, =__BSS_END__
  126. sub r1, r1, r0
  127. bl clean_dcache_range
  128. bl smc_get_next_ctx
  129. /* r0 points to `smc_ctx_t` */
  130. /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
  131. b sp_min_exit
  132. endfunc sp_min_entrypoint
  133. /*
  134. * SMC handling function for SP_MIN.
  135. */
  136. func sp_min_handle_smc
  137. /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
  138. str lr, [sp, #SMC_CTX_LR_MON]
  139. #if ENABLE_RUNTIME_INSTRUMENTATION
  140. /*
  141. * Read the timestamp value and store it on top of the C runtime stack.
  142. * The value will be saved to the per-cpu data once the C stack is
  143. * available, as a valid stack is needed to call _cpu_data()
  144. */
  145. strd r0, r1, [sp, #SMC_CTX_GPREG_R0]
  146. ldcopr16 r0, r1, CNTPCT_64
  147. ldr lr, [sp, #SMC_CTX_SP_MON]
  148. strd r0, r1, [lr, #-8]!
  149. str lr, [sp, #SMC_CTX_SP_MON]
  150. ldrd r0, r1, [sp, #SMC_CTX_GPREG_R0]
  151. #endif
  152. smccc_save_gp_mode_regs
  153. clrex_on_monitor_entry
  154. /*
  155. * `sp` still points to `smc_ctx_t`. Save it to a register
  156. * and restore the C runtime stack pointer to `sp`.
  157. */
  158. mov r2, sp /* handle */
  159. ldr sp, [r2, #SMC_CTX_SP_MON]
  160. #if ENABLE_RUNTIME_INSTRUMENTATION
  161. /* Save handle to a callee saved register */
  162. mov r6, r2
  163. /*
  164. * Restore the timestamp value and store it in per-cpu data. The value
  165. * will be extracted from per-cpu data by the C level SMC handler and
  166. * saved to the PMF timestamp region.
  167. */
  168. ldrd r4, r5, [sp], #8
  169. bl _cpu_data
  170. strd r4, r5, [r0, #CPU_DATA_PMF_TS0_OFFSET]
  171. /* Restore handle */
  172. mov r2, r6
  173. #endif
  174. ldr r0, [r2, #SMC_CTX_SCR]
  175. and r3, r0, #SCR_NS_BIT /* flags */
  176. /* Switch to Secure Mode*/
  177. bic r0, #SCR_NS_BIT
  178. stcopr r0, SCR
  179. isb
  180. ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
  181. /* Check whether an SMC64 is issued */
  182. tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
  183. beq 1f
  184. /* SMC32 is not detected. Return error back to caller */
  185. mov r0, #SMC_UNK
  186. str r0, [r2, #SMC_CTX_GPREG_R0]
  187. mov r0, r2
  188. b sp_min_exit
  189. 1:
  190. /* SMC32 is detected */
  191. mov r1, #0 /* cookie */
  192. bl handle_runtime_svc
  193. /* `r0` points to `smc_ctx_t` */
  194. b sp_min_exit
  195. endfunc sp_min_handle_smc
  196. /*
  197. * Secure Interrupts handling function for SP_MIN.
  198. */
  199. func sp_min_handle_fiq
  200. #if !SP_MIN_WITH_SECURE_FIQ
  201. b plat_panic_handler
  202. #else
  203. /* FIQ has a +4 offset for lr compared to preferred return address */
  204. sub lr, lr, #4
  205. /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
  206. str lr, [sp, #SMC_CTX_LR_MON]
  207. smccc_save_gp_mode_regs
  208. clrex_on_monitor_entry
  209. /* load run-time stack */
  210. mov r2, sp
  211. ldr sp, [r2, #SMC_CTX_SP_MON]
  212. /* Switch to Secure Mode */
  213. ldr r0, [r2, #SMC_CTX_SCR]
  214. bic r0, #SCR_NS_BIT
  215. stcopr r0, SCR
  216. isb
  217. push {r2, r3}
  218. bl sp_min_fiq
  219. pop {r0, r3}
  220. b sp_min_exit
  221. #endif
  222. endfunc sp_min_handle_fiq
  223. /*
  224. * The Warm boot entrypoint for SP_MIN.
  225. */
  226. func sp_min_warm_entrypoint
  227. #if ENABLE_RUNTIME_INSTRUMENTATION
  228. /*
  229. * This timestamp update happens with cache off. The next
  230. * timestamp collection will need to do cache maintenance prior
  231. * to timestamp update.
  232. */
  233. pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
  234. ldcopr16 r2, r3, CNTPCT_64
  235. strd r2, r3, [r0]
  236. #endif
  237. /*
  238. * On the warm boot path, most of the EL3 initialisations performed by
  239. * 'el3_entrypoint_common' must be skipped:
  240. *
  241. * - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
  242. * programming the reset address do we need to initialied the SCTLR.
  243. * In other cases, we assume this has been taken care by the
  244. * entrypoint code.
  245. *
  246. * - No need to determine the type of boot, we know it is a warm boot.
  247. *
  248. * - Do not try to distinguish between primary and secondary CPUs, this
  249. * notion only exists for a cold boot.
  250. *
  251. * - No need to initialise the memory or the C runtime environment,
  252. * it has been done once and for all on the cold boot path.
  253. */
  254. el3_entrypoint_common \
  255. _init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
  256. _warm_boot_mailbox=0 \
  257. _secondary_cold_boot=0 \
  258. _init_memory=0 \
  259. _init_c_runtime=0 \
  260. _exception_vectors=sp_min_vector_table \
  261. _pie_fixup_size=0
  262. /*
  263. * We're about to enable MMU and participate in PSCI state coordination.
  264. *
  265. * The PSCI implementation invokes platform routines that enable CPUs to
  266. * participate in coherency. On a system where CPUs are not
  267. * cache-coherent without appropriate platform specific programming,
  268. * having caches enabled until such time might lead to coherency issues
  269. * (resulting from stale data getting speculatively fetched, among
  270. * others). Therefore we keep data caches disabled even after enabling
  271. * the MMU for such platforms.
  272. *
  273. * On systems with hardware-assisted coherency, or on single cluster
  274. * platforms, such platform specific programming is not required to
  275. * enter coherency (as CPUs already are); and there's no reason to have
  276. * caches disabled either.
  277. */
  278. #if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
  279. mov r0, #0
  280. #else
  281. mov r0, #DISABLE_DCACHE
  282. #endif
  283. bl bl32_plat_enable_mmu
  284. #if SP_MIN_WITH_SECURE_FIQ
  285. route_fiq_to_sp_min r0
  286. #endif
  287. bl sp_min_warm_boot
  288. bl smc_get_next_ctx
  289. /* r0 points to `smc_ctx_t` */
  290. /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
  291. #if ENABLE_RUNTIME_INSTRUMENTATION
  292. /* Save smc_ctx_t */
  293. mov r5, r0
  294. pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
  295. mov r4, r0
  296. /*
  297. * Invalidate before updating timestamp to ensure previous timestamp
  298. * updates on the same cache line with caches disabled are properly
  299. * seen by the same core. Without the cache invalidate, the core might
  300. * write into a stale cache line.
  301. */
  302. mov r1, #PMF_TS_SIZE
  303. bl inv_dcache_range
  304. ldcopr16 r0, r1, CNTPCT_64
  305. strd r0, r1, [r4]
  306. /* Restore smc_ctx_t */
  307. mov r0, r5
  308. #endif
  309. b sp_min_exit
  310. endfunc sp_min_warm_entrypoint
  311. /*
  312. * The function to restore the registers from SMC context and return
  313. * to the mode restored to SPSR.
  314. *
  315. * Arguments : r0 must point to the SMC context to restore from.
  316. */
  317. func sp_min_exit
  318. monitor_exit
  319. endfunc sp_min_exit