tsp_entrypoint.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. /*
  2. * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <platform_def.h>
  7. #include <arch.h>
  8. #include <asm_macros.S>
  9. #include <bl32/tsp/tsp.h>
  10. #include <lib/xlat_tables/xlat_tables_defs.h>
  11. #include <smccc_helpers.h>
  12. #include "../tsp_private.h"
  13. .globl tsp_entrypoint
  14. .globl tsp_vector_table
  15. #if SPMC_AT_EL3
  16. .globl tsp_cpu_on_entry
  17. #endif
  18. /* ---------------------------------------------
  19. * Populate the params in x0-x7 from the pointer
  20. * to the smc args structure in x0.
  21. * ---------------------------------------------
  22. */
  23. .macro restore_args_call_smc
  24. ldp x6, x7, [x0, #SMC_ARG6]
  25. ldp x4, x5, [x0, #SMC_ARG4]
  26. ldp x2, x3, [x0, #SMC_ARG2]
  27. ldp x0, x1, [x0, #SMC_ARG0]
  28. smc #0
  29. .endm
  30. .macro save_eret_context reg1 reg2
  31. mrs \reg1, elr_el1
  32. mrs \reg2, spsr_el1
  33. stp \reg1, \reg2, [sp, #-0x10]!
  34. stp x30, x18, [sp, #-0x10]!
  35. .endm
  36. .macro restore_eret_context reg1 reg2
  37. ldp x30, x18, [sp], #0x10
  38. ldp \reg1, \reg2, [sp], #0x10
  39. msr elr_el1, \reg1
  40. msr spsr_el1, \reg2
  41. .endm
  42. func tsp_entrypoint _align=3
  43. #if ENABLE_PIE
  44. /*
  45. * ------------------------------------------------------------
  46. * If PIE is enabled fixup the Global descriptor Table only
  47. * once during primary core cold boot path.
  48. *
  49. * Compile time base address, required for fixup, is calculated
  50. * using "pie_fixup" label present within first page.
  51. * ------------------------------------------------------------
  52. */
  53. pie_fixup:
  54. ldr x0, =pie_fixup
  55. and x0, x0, #~(PAGE_SIZE_MASK)
  56. mov_imm x1, (BL32_LIMIT - BL32_BASE)
  57. add x1, x1, x0
  58. bl fixup_gdt_reloc
  59. #endif /* ENABLE_PIE */
  60. /* ---------------------------------------------
  61. * Set the exception vector to something sane.
  62. * ---------------------------------------------
  63. */
  64. adr x0, tsp_exceptions
  65. msr vbar_el1, x0
  66. isb
  67. /* ---------------------------------------------
  68. * Enable the SError interrupt now that the
  69. * exception vectors have been setup.
  70. * ---------------------------------------------
  71. */
  72. msr daifclr, #DAIF_ABT_BIT
  73. /* ---------------------------------------------
  74. * Enable the instruction cache, stack pointer
  75. * and data access alignment checks and disable
  76. * speculative loads.
  77. * ---------------------------------------------
  78. */
  79. mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
  80. mrs x0, sctlr_el1
  81. orr x0, x0, x1
  82. bic x0, x0, #SCTLR_DSSBS_BIT
  83. msr sctlr_el1, x0
  84. isb
  85. /* ---------------------------------------------
  86. * Invalidate the RW memory used by the BL32
  87. * image. This includes the data and NOBITS
  88. * sections. This is done to safeguard against
  89. * possible corruption of this memory by dirty
  90. * cache lines in a system cache as a result of
  91. * use by an earlier boot loader stage. If PIE
  92. * is enabled however, RO sections including the
  93. * GOT may be modified during pie fixup.
  94. * Therefore, to be on the safe side, invalidate
  95. * the entire image region if PIE is enabled.
  96. * ---------------------------------------------
  97. */
  98. #if ENABLE_PIE
  99. #if SEPARATE_CODE_AND_RODATA
  100. adrp x0, __TEXT_START__
  101. add x0, x0, :lo12:__TEXT_START__
  102. #else
  103. adrp x0, __RO_START__
  104. add x0, x0, :lo12:__RO_START__
  105. #endif /* SEPARATE_CODE_AND_RODATA */
  106. #else
  107. adrp x0, __RW_START__
  108. add x0, x0, :lo12:__RW_START__
  109. #endif /* ENABLE_PIE */
  110. adrp x1, __RW_END__
  111. add x1, x1, :lo12:__RW_END__
  112. sub x1, x1, x0
  113. bl inv_dcache_range
  114. /* ---------------------------------------------
  115. * Zero out NOBITS sections. There are 2 of them:
  116. * - the .bss section;
  117. * - the coherent memory section.
  118. * ---------------------------------------------
  119. */
  120. adrp x0, __BSS_START__
  121. add x0, x0, :lo12:__BSS_START__
  122. adrp x1, __BSS_END__
  123. add x1, x1, :lo12:__BSS_END__
  124. sub x1, x1, x0
  125. bl zeromem
  126. #if USE_COHERENT_MEM
  127. adrp x0, __COHERENT_RAM_START__
  128. add x0, x0, :lo12:__COHERENT_RAM_START__
  129. adrp x1, __COHERENT_RAM_END_UNALIGNED__
  130. add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
  131. sub x1, x1, x0
  132. bl zeromem
  133. #endif
  134. /* --------------------------------------------
  135. * Allocate a stack whose memory will be marked
  136. * as Normal-IS-WBWA when the MMU is enabled.
  137. * There is no risk of reading stale stack
  138. * memory after enabling the MMU as only the
  139. * primary cpu is running at the moment.
  140. * --------------------------------------------
  141. */
  142. bl plat_set_my_stack
  143. /* ---------------------------------------------
  144. * Initialize the stack protector canary before
  145. * any C code is called.
  146. * ---------------------------------------------
  147. */
  148. #if STACK_PROTECTOR_ENABLED
  149. bl update_stack_protector_canary
  150. #endif
  151. /* ---------------------------------------------
  152. * Perform TSP setup
  153. * ---------------------------------------------
  154. */
  155. bl tsp_setup
  156. #if ENABLE_PAUTH
  157. /* ---------------------------------------------
  158. * Program APIAKey_EL1
  159. * and enable pointer authentication
  160. * ---------------------------------------------
  161. */
  162. bl pauth_init_enable_el1
  163. #endif /* ENABLE_PAUTH */
  164. /* ---------------------------------------------
  165. * Jump to main function.
  166. * ---------------------------------------------
  167. */
  168. bl tsp_main
  169. /* ---------------------------------------------
  170. * Tell TSPD that we are done initialising
  171. * ---------------------------------------------
  172. */
  173. mov x1, x0
  174. mov x0, #TSP_ENTRY_DONE
  175. smc #0
  176. tsp_entrypoint_panic:
  177. b tsp_entrypoint_panic
  178. endfunc tsp_entrypoint
  179. /* -------------------------------------------
  180. * Table of entrypoint vectors provided to the
  181. * TSPD for the various entrypoints
  182. * -------------------------------------------
  183. */
  184. vector_base tsp_vector_table
  185. b tsp_yield_smc_entry
  186. b tsp_fast_smc_entry
  187. b tsp_cpu_on_entry
  188. b tsp_cpu_off_entry
  189. b tsp_cpu_resume_entry
  190. b tsp_cpu_suspend_entry
  191. b tsp_sel1_intr_entry
  192. b tsp_system_off_entry
  193. b tsp_system_reset_entry
  194. b tsp_abort_yield_smc_entry
  195. /*---------------------------------------------
  196. * This entrypoint is used by the TSPD when this
  197. * cpu is to be turned off through a CPU_OFF
  198. * psci call to ask the TSP to perform any
  199. * bookeeping necessary. In the current
  200. * implementation, the TSPD expects the TSP to
  201. * re-initialise its state so nothing is done
  202. * here except for acknowledging the request.
  203. * ---------------------------------------------
  204. */
  205. func tsp_cpu_off_entry
  206. bl tsp_cpu_off_main
  207. restore_args_call_smc
  208. endfunc tsp_cpu_off_entry
  209. /*---------------------------------------------
  210. * This entrypoint is used by the TSPD when the
  211. * system is about to be switched off (through
  212. * a SYSTEM_OFF psci call) to ask the TSP to
  213. * perform any necessary bookkeeping.
  214. * ---------------------------------------------
  215. */
  216. func tsp_system_off_entry
  217. bl tsp_system_off_main
  218. restore_args_call_smc
  219. endfunc tsp_system_off_entry
  220. /*---------------------------------------------
  221. * This entrypoint is used by the TSPD when the
  222. * system is about to be reset (through a
  223. * SYSTEM_RESET psci call) to ask the TSP to
  224. * perform any necessary bookkeeping.
  225. * ---------------------------------------------
  226. */
  227. func tsp_system_reset_entry
  228. bl tsp_system_reset_main
  229. restore_args_call_smc
  230. endfunc tsp_system_reset_entry
  231. /*---------------------------------------------
  232. * This entrypoint is used by the TSPD when this
  233. * cpu is turned on using a CPU_ON psci call to
  234. * ask the TSP to initialise itself i.e. setup
  235. * the mmu, stacks etc. Minimal architectural
  236. * state will be initialised by the TSPD when
  237. * this function is entered i.e. Caches and MMU
  238. * will be turned off, the execution state
  239. * will be aarch64 and exceptions masked.
  240. * ---------------------------------------------
  241. */
  242. func tsp_cpu_on_entry
  243. /* ---------------------------------------------
  244. * Set the exception vector to something sane.
  245. * ---------------------------------------------
  246. */
  247. adr x0, tsp_exceptions
  248. msr vbar_el1, x0
  249. isb
  250. /* Enable the SError interrupt */
  251. msr daifclr, #DAIF_ABT_BIT
  252. /* ---------------------------------------------
  253. * Enable the instruction cache, stack pointer
  254. * and data access alignment checks
  255. * ---------------------------------------------
  256. */
  257. mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
  258. mrs x0, sctlr_el1
  259. orr x0, x0, x1
  260. msr sctlr_el1, x0
  261. isb
  262. /* --------------------------------------------
  263. * Give ourselves a stack whose memory will be
  264. * marked as Normal-IS-WBWA when the MMU is
  265. * enabled.
  266. * --------------------------------------------
  267. */
  268. bl plat_set_my_stack
  269. /* --------------------------------------------
  270. * Enable MMU and D-caches together.
  271. * --------------------------------------------
  272. */
  273. mov x0, #0
  274. bl bl32_plat_enable_mmu
  275. #if ENABLE_PAUTH
  276. /* ---------------------------------------------
  277. * Program APIAKey_EL1
  278. * and enable pointer authentication
  279. * ---------------------------------------------
  280. */
  281. bl pauth_init_enable_el1
  282. #endif /* ENABLE_PAUTH */
  283. /* ---------------------------------------------
  284. * Enter C runtime to perform any remaining
  285. * book keeping
  286. * ---------------------------------------------
  287. */
  288. bl tsp_cpu_on_main
  289. restore_args_call_smc
  290. /* Should never reach here */
  291. tsp_cpu_on_entry_panic:
  292. b tsp_cpu_on_entry_panic
  293. endfunc tsp_cpu_on_entry
  294. /*---------------------------------------------
  295. * This entrypoint is used by the TSPD when this
  296. * cpu is to be suspended through a CPU_SUSPEND
  297. * psci call to ask the TSP to perform any
  298. * bookeeping necessary. In the current
  299. * implementation, the TSPD saves and restores
  300. * the EL1 state.
  301. * ---------------------------------------------
  302. */
  303. func tsp_cpu_suspend_entry
  304. bl tsp_cpu_suspend_main
  305. restore_args_call_smc
  306. endfunc tsp_cpu_suspend_entry
  307. /*-------------------------------------------------
  308. * This entrypoint is used by the TSPD to pass
  309. * control for `synchronously` handling a S-EL1
  310. * Interrupt which was triggered while executing
  311. * in normal world. 'x0' contains a magic number
  312. * which indicates this. TSPD expects control to
  313. * be handed back at the end of interrupt
  314. * processing. This is done through an SMC.
  315. * The handover agreement is:
  316. *
  317. * 1. PSTATE.DAIF are set upon entry. 'x1' has
  318. * the ELR_EL3 from the non-secure state.
  319. * 2. TSP has to preserve the callee saved
  320. * general purpose registers, SP_EL1/EL0 and
  321. * LR.
  322. * 3. TSP has to preserve the system and vfp
  323. * registers (if applicable).
  324. * 4. TSP can use 'x0-x18' to enable its C
  325. * runtime.
  326. * 5. TSP returns to TSPD using an SMC with
  327. * 'x0' = TSP_HANDLED_S_EL1_INTR
  328. * ------------------------------------------------
  329. */
  330. func tsp_sel1_intr_entry
  331. #if DEBUG
  332. mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
  333. cmp x0, x2
  334. b.ne tsp_sel1_int_entry_panic
  335. #endif
  336. /*-------------------------------------------------
  337. * Save any previous context needed to perform
  338. * an exception return from S-EL1 e.g. context
  339. * from a previous Non secure Interrupt.
  340. * Update statistics and handle the S-EL1
  341. * interrupt before returning to the TSPD.
  342. * IRQ/FIQs are not enabled since that will
  343. * complicate the implementation. Execution
  344. * will be transferred back to the normal world
  345. * in any case. The handler can return 0
  346. * if the interrupt was handled or TSP_PREEMPTED
  347. * if the expected interrupt was preempted
  348. * by an interrupt that should be handled in EL3
  349. * e.g. Group 0 interrupt in GICv3. In both
  350. * the cases switch to EL3 using SMC with id
  351. * TSP_HANDLED_S_EL1_INTR. Any other return value
  352. * from the handler will result in panic.
  353. * ------------------------------------------------
  354. */
  355. save_eret_context x2 x3
  356. bl tsp_update_sync_sel1_intr_stats
  357. bl tsp_common_int_handler
  358. /* Check if the S-EL1 interrupt has been handled */
  359. cbnz x0, tsp_sel1_intr_check_preemption
  360. b tsp_sel1_intr_return
  361. tsp_sel1_intr_check_preemption:
  362. /* Check if the S-EL1 interrupt has been preempted */
  363. mov_imm x1, TSP_PREEMPTED
  364. cmp x0, x1
  365. b.ne tsp_sel1_int_entry_panic
  366. tsp_sel1_intr_return:
  367. mov_imm x0, TSP_HANDLED_S_EL1_INTR
  368. restore_eret_context x2 x3
  369. smc #0
  370. /* Should never reach here */
  371. tsp_sel1_int_entry_panic:
  372. no_ret plat_panic_handler
  373. endfunc tsp_sel1_intr_entry
  374. /*---------------------------------------------
  375. * This entrypoint is used by the TSPD when this
  376. * cpu resumes execution after an earlier
  377. * CPU_SUSPEND psci call to ask the TSP to
  378. * restore its saved context. In the current
  379. * implementation, the TSPD saves and restores
  380. * EL1 state so nothing is done here apart from
  381. * acknowledging the request.
  382. * ---------------------------------------------
  383. */
  384. func tsp_cpu_resume_entry
  385. bl tsp_cpu_resume_main
  386. restore_args_call_smc
  387. /* Should never reach here */
  388. no_ret plat_panic_handler
  389. endfunc tsp_cpu_resume_entry
  390. /*---------------------------------------------
  391. * This entrypoint is used by the TSPD to ask
  392. * the TSP to service a fast smc request.
  393. * ---------------------------------------------
  394. */
  395. func tsp_fast_smc_entry
  396. bl tsp_smc_handler
  397. restore_args_call_smc
  398. /* Should never reach here */
  399. no_ret plat_panic_handler
  400. endfunc tsp_fast_smc_entry
  401. /*---------------------------------------------
  402. * This entrypoint is used by the TSPD to ask
  403. * the TSP to service a Yielding SMC request.
  404. * We will enable preemption during execution
  405. * of tsp_smc_handler.
  406. * ---------------------------------------------
  407. */
  408. func tsp_yield_smc_entry
  409. msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
  410. bl tsp_smc_handler
  411. msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
  412. restore_args_call_smc
  413. /* Should never reach here */
  414. no_ret plat_panic_handler
  415. endfunc tsp_yield_smc_entry
  416. /*---------------------------------------------------------------------
  417. * This entrypoint is used by the TSPD to abort a pre-empted Yielding
  418. * SMC. It could be on behalf of non-secure world or because a CPU
  419. * suspend/CPU off request needs to abort the preempted SMC.
  420. * --------------------------------------------------------------------
  421. */
  422. func tsp_abort_yield_smc_entry
  423. /*
  424. * Exceptions masking is already done by the TSPD when entering this
  425. * hook so there is no need to do it here.
  426. */
  427. /* Reset the stack used by the pre-empted SMC */
  428. bl plat_set_my_stack
  429. /*
  430. * Allow some cleanup such as releasing locks.
  431. */
  432. bl tsp_abort_smc_handler
  433. restore_args_call_smc
  434. /* Should never reach here */
  435. bl plat_panic_handler
  436. endfunc tsp_abort_yield_smc_entry