el3_common_macros.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #ifndef EL3_COMMON_MACROS_S
  7. #define EL3_COMMON_MACROS_S
  8. #include <arch.h>
  9. #include <asm_macros.S>
  10. #include <assert_macros.S>
  11. #include <lib/xlat_tables/xlat_tables_defs.h>
  12. #define PAGE_START_MASK ~(PAGE_SIZE_MASK)
  13. /*
  14. * Helper macro to initialise EL3 registers we care about.
  15. */
  16. .macro el3_arch_init_common
  17. /* ---------------------------------------------------------------------
  18. * SCTLR has already been initialised - read current value before
  19. * modifying.
  20. *
  21. * SCTLR.I: Enable the instruction cache.
  22. *
  23. * SCTLR.A: Enable Alignment fault checking. All instructions that load
  24. * or store one or more registers have an alignment check that the
  25. * address being accessed is aligned to the size of the data element(s)
  26. * being accessed.
  27. * ---------------------------------------------------------------------
  28. */
  29. ldr r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
  30. ldcopr r0, SCTLR
  31. orr r0, r0, r1
  32. stcopr r0, SCTLR
  33. isb
  34. /* ---------------------------------------------------------------------
  35. * Initialise SCR, setting all fields rather than relying on the hw.
  36. *
  37. * SCR.SIF: Enabled so that Secure state instruction fetches from
  38. * Non-secure memory are not permitted.
  39. * ---------------------------------------------------------------------
  40. */
  41. ldr r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
  42. stcopr r0, SCR
  43. /* -----------------------------------------------------
  44. * Enable the Asynchronous data abort now that the
  45. * exception vectors have been setup.
  46. * -----------------------------------------------------
  47. */
  48. cpsie a
  49. isb
  50. /* ---------------------------------------------------------------------
  51. * Initialise NSACR, setting all the fields, except for the
  52. * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
  53. * fields are architecturally UNKNOWN on reset.
  54. *
  55. * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
  56. * cp11 field is ignored, but is set to same value as cp10. The cp10
  57. * field is set to allow access to Advanced SIMD and floating point
  58. * features from both Security states.
  59. *
  60. * NSACR.NSTRCDIS: When system register trace implemented, Set to one
  61. * so that NS System register accesses to all implemented trace
  62. * registers are disabled.
  63. * When system register trace is not implemented, this bit is RES0 and
  64. * hence set to zero.
  65. * ---------------------------------------------------------------------
  66. */
  67. ldcopr r0, NSACR
  68. and r0, r0, #NSACR_IMP_DEF_MASK
  69. orr r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
  70. ldcopr r1, ID_DFR0
  71. ubfx r1, r1, #ID_DFR0_COPTRC_SHIFT, #ID_DFR0_COPTRC_LENGTH
  72. cmp r1, #ID_DFR0_COPTRC_SUPPORTED
  73. bne 1f
  74. orr r0, r0, #NSTRCDIS_BIT
  75. 1:
  76. stcopr r0, NSACR
  77. isb
  78. /* ---------------------------------------------------------------------
  79. * Initialise CPACR, setting all fields rather than relying on hw. Some
  80. * fields are architecturally UNKNOWN on reset.
  81. *
  82. * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
  83. * to trace registers. Set to zero to allow access.
  84. *
  85. * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
  86. * cp11 field is ignored, but is set to same value as cp10. The cp10
  87. * field is set to allow full access from PL0 and PL1 to floating-point
  88. * and Advanced SIMD features.
  89. * ---------------------------------------------------------------------
  90. */
  91. ldr r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
  92. stcopr r0, CPACR
  93. isb
  94. /* ---------------------------------------------------------------------
  95. * Initialise FPEXC, setting all fields rather than relying on hw. Some
  96. * fields are architecturally UNKNOWN on reset and are set to zero
  97. * except for field(s) listed below.
  98. *
  99. * FPEXC.EN: Enable access to Advanced SIMD and floating point features
  100. * from all exception levels.
  101. *
  102. * __SOFTFP__: Predefined macro exposed by soft-float toolchain.
  103. * ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and
  104. * hard-float variants of toolchain, avoid compiling below code with
  105. * soft-float toolchain as "vmsr" instruction will not be recognized.
  106. * ---------------------------------------------------------------------
  107. */
  108. #if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__)
  109. ldr r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
  110. vmsr FPEXC, r0
  111. isb
  112. #endif
  113. #if (ARM_ARCH_MAJOR > 7)
  114. /* ---------------------------------------------------------------------
  115. * Initialise SDCR, setting all the fields rather than relying on hw.
  116. *
  117. * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
  118. * Secure EL1 are disabled.
  119. *
  120. * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
  121. * in Secure state. This bit is RES0 in versions of the architecture
  122. * earlier than ARMv8.5, setting it to 1 doesn't have any effect on
  123. * them.
  124. *
  125. * SDCR.TTRF: Set to one so that access to trace filter control
  126. * registers in non-monitor mode generate Monitor trap exception,
  127. * unless the access generates a higher priority exception when
  128. * trace filter control(FEAT_TRF) is implemented.
  129. * When FEAT_TRF is not implemented, this bit is RES0.
  130. * ---------------------------------------------------------------------
  131. */
  132. ldr r0, =((SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | \
  133. SDCR_SCCD_BIT) & ~SDCR_TTRF_BIT)
  134. ldcopr r1, ID_DFR0
  135. ubfx r1, r1, #ID_DFR0_TRACEFILT_SHIFT, #ID_DFR0_TRACEFILT_LENGTH
  136. cmp r1, #ID_DFR0_TRACEFILT_SUPPORTED
  137. bne 1f
  138. orr r0, r0, #SDCR_TTRF_BIT
  139. 1:
  140. stcopr r0, SDCR
  141. /* ---------------------------------------------------------------------
  142. * Initialise PMCR, setting all fields rather than relying
  143. * on hw. Some fields are architecturally UNKNOWN on reset.
  144. *
  145. * PMCR.LP: Set to one so that event counter overflow, that
  146. * is recorded in PMOVSCLR[0-30], occurs on the increment
  147. * that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU
  148. * is implemented. This bit is RES0 in versions of the architecture
  149. * earlier than ARMv8.5, setting it to 1 doesn't have any effect
  150. * on them.
  151. * This bit is Reserved, UNK/SBZP in ARMv7.
  152. *
  153. * PMCR.LC: Set to one so that cycle counter overflow, that
  154. * is recorded in PMOVSCLR[31], occurs on the increment
  155. * that changes PMCCNTR[63] from 1 to 0.
  156. * This bit is Reserved, UNK/SBZP in ARMv7.
  157. *
  158. * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
  159. * ---------------------------------------------------------------------
  160. */
  161. ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
  162. PMCR_LP_BIT)
  163. #else
  164. ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
  165. #endif
  166. stcopr r0, PMCR
  167. /*
  168. * If Data Independent Timing (DIT) functionality is implemented,
  169. * always enable DIT in EL3
  170. */
  171. ldcopr r0, ID_PFR0
  172. and r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
  173. cmp r0, #ID_PFR0_DIT_SUPPORTED
  174. bne 1f
  175. mrs r0, cpsr
  176. orr r0, r0, #CPSR_DIT_BIT
  177. msr cpsr_cxsf, r0
  178. 1:
  179. .endm
  180. /* -----------------------------------------------------------------------------
  181. * This is the super set of actions that need to be performed during a cold boot
  182. * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
  183. *
  184. * This macro will always perform reset handling, architectural initialisations
  185. * and stack setup. The rest of the actions are optional because they might not
  186. * be needed, depending on the context in which this macro is called. This is
  187. * why this macro is parameterised ; each parameter allows to enable/disable
  188. * some actions.
  189. *
  190. * _init_sctlr:
  191. * Whether the macro needs to initialise the SCTLR register including
  192. * configuring the endianness of data accesses.
  193. *
  194. * _warm_boot_mailbox:
  195. * Whether the macro needs to detect the type of boot (cold/warm). The
  196. * detection is based on the platform entrypoint address : if it is zero
  197. * then it is a cold boot, otherwise it is a warm boot. In the latter case,
  198. * this macro jumps on the platform entrypoint address.
  199. *
  200. * _secondary_cold_boot:
  201. * Whether the macro needs to identify the CPU that is calling it: primary
  202. * CPU or secondary CPU. The primary CPU will be allowed to carry on with
  203. * the platform initialisations, while the secondaries will be put in a
  204. * platform-specific state in the meantime.
  205. *
  206. * If the caller knows this macro will only be called by the primary CPU
  207. * then this parameter can be defined to 0 to skip this step.
  208. *
  209. * _init_memory:
  210. * Whether the macro needs to initialise the memory.
  211. *
  212. * _init_c_runtime:
  213. * Whether the macro needs to initialise the C runtime environment.
  214. *
  215. * _exception_vectors:
  216. * Address of the exception vectors to program in the VBAR_EL3 register.
  217. *
  218. * _pie_fixup_size:
  219. * Size of memory region to fixup Global Descriptor Table (GDT).
  220. *
  221. * A non-zero value is expected when firmware needs GDT to be fixed-up.
  222. *
  223. * -----------------------------------------------------------------------------
  224. */
  225. .macro el3_entrypoint_common \
  226. _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot, \
  227. _init_memory, _init_c_runtime, _exception_vectors, \
  228. _pie_fixup_size
  229. /* Make sure we are in Secure Mode */
  230. #if ENABLE_ASSERTIONS
  231. ldcopr r0, SCR
  232. tst r0, #SCR_NS_BIT
  233. ASM_ASSERT(eq)
  234. #endif
  235. .if \_init_sctlr
  236. /* -------------------------------------------------------------
  237. * This is the initialisation of SCTLR and so must ensure that
  238. * all fields are explicitly set rather than relying on hw. Some
  239. * fields reset to an IMPLEMENTATION DEFINED value.
  240. *
  241. * SCTLR.TE: Set to zero so that exceptions to an Exception
  242. * Level executing at PL1 are taken to A32 state.
  243. *
  244. * SCTLR.EE: Set the CPU endianness before doing anything that
  245. * might involve memory reads or writes. Set to zero to select
  246. * Little Endian.
  247. *
  248. * SCTLR.V: Set to zero to select the normal exception vectors
  249. * with base address held in VBAR.
  250. *
  251. * SCTLR.DSSBS: Set to zero to disable speculation store bypass
  252. * safe behaviour upon exception entry to EL3.
  253. * -------------------------------------------------------------
  254. */
  255. ldr r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
  256. SCTLR_V_BIT | SCTLR_DSSBS_BIT))
  257. stcopr r0, SCTLR
  258. isb
  259. .endif /* _init_sctlr */
  260. /* Switch to monitor mode */
  261. cps #MODE32_mon
  262. isb
  263. #if DISABLE_MTPMU
  264. bl mtpmu_disable
  265. #endif
  266. .if \_warm_boot_mailbox
  267. /* -------------------------------------------------------------
  268. * This code will be executed for both warm and cold resets.
  269. * Now is the time to distinguish between the two.
  270. * Query the platform entrypoint address and if it is not zero
  271. * then it means it is a warm boot so jump to this address.
  272. * -------------------------------------------------------------
  273. */
  274. bl plat_get_my_entrypoint
  275. cmp r0, #0
  276. bxne r0
  277. .endif /* _warm_boot_mailbox */
  278. .if \_pie_fixup_size
  279. #if ENABLE_PIE
  280. /*
  281. * ------------------------------------------------------------
  282. * If PIE is enabled fixup the Global descriptor Table only
  283. * once during primary core cold boot path.
  284. *
  285. * Compile time base address, required for fixup, is calculated
  286. * using "pie_fixup" label present within first page.
  287. * ------------------------------------------------------------
  288. */
  289. pie_fixup:
  290. ldr r0, =pie_fixup
  291. ldr r1, =PAGE_START_MASK
  292. and r0, r0, r1
  293. mov_imm r1, \_pie_fixup_size
  294. add r1, r1, r0
  295. bl fixup_gdt_reloc
  296. #endif /* ENABLE_PIE */
  297. .endif /* _pie_fixup_size */
  298. /* ---------------------------------------------------------------------
  299. * Set the exception vectors (VBAR/MVBAR).
  300. * ---------------------------------------------------------------------
  301. */
  302. ldr r0, =\_exception_vectors
  303. stcopr r0, VBAR
  304. stcopr r0, MVBAR
  305. isb
  306. /* ---------------------------------------------------------------------
  307. * It is a cold boot.
  308. * Perform any processor specific actions upon reset e.g. cache, TLB
  309. * invalidations etc.
  310. * ---------------------------------------------------------------------
  311. */
  312. bl reset_handler
  313. el3_arch_init_common
  314. .if \_secondary_cold_boot
  315. /* -------------------------------------------------------------
  316. * Check if this is a primary or secondary CPU cold boot.
  317. * The primary CPU will set up the platform while the
  318. * secondaries are placed in a platform-specific state until the
  319. * primary CPU performs the necessary actions to bring them out
  320. * of that state and allows entry into the OS.
  321. * -------------------------------------------------------------
  322. */
  323. bl plat_is_my_cpu_primary
  324. cmp r0, #0
  325. bne do_primary_cold_boot
  326. /* This is a cold boot on a secondary CPU */
  327. bl plat_secondary_cold_boot_setup
  328. /* plat_secondary_cold_boot_setup() is not supposed to return */
  329. no_ret plat_panic_handler
  330. do_primary_cold_boot:
  331. .endif /* _secondary_cold_boot */
  332. /* ---------------------------------------------------------------------
  333. * Initialize memory now. Secondary CPU initialization won't get to this
  334. * point.
  335. * ---------------------------------------------------------------------
  336. */
  337. .if \_init_memory
  338. bl platform_mem_init
  339. .endif /* _init_memory */
  340. /* ---------------------------------------------------------------------
  341. * Init C runtime environment:
  342. * - Zero-initialise the NOBITS sections. There are 2 of them:
  343. * - the .bss section;
  344. * - the coherent memory section (if any).
  345. * - Relocate the data section from ROM to RAM, if required.
  346. * ---------------------------------------------------------------------
  347. */
  348. .if \_init_c_runtime
  349. #if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
  350. /* -----------------------------------------------------------------
  351. * Invalidate the RW memory used by the image. This
  352. * includes the data and NOBITS sections. This is done to
  353. * safeguard against possible corruption of this memory by
  354. * dirty cache lines in a system cache as a result of use by
  355. * an earlier boot loader stage. If PIE is enabled however,
  356. * RO sections including the GOT may be modified during
  357. * pie fixup. Therefore, to be on the safe side, invalidate
  358. * the entire image region if PIE is enabled.
  359. * -----------------------------------------------------------------
  360. */
  361. #if ENABLE_PIE
  362. #if SEPARATE_CODE_AND_RODATA
  363. ldr r0, =__TEXT_START__
  364. #else
  365. ldr r0, =__RO_START__
  366. #endif /* SEPARATE_CODE_AND_RODATA */
  367. #else
  368. ldr r0, =__RW_START__
  369. #endif /* ENABLE_PIE */
  370. ldr r1, =__RW_END__
  371. sub r1, r1, r0
  372. bl inv_dcache_range
  373. #if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION
  374. ldr r0, =__BL2_NOLOAD_START__
  375. ldr r1, =__BL2_NOLOAD_END__
  376. sub r1, r1, r0
  377. bl inv_dcache_range
  378. #endif
  379. #endif
  380. /*
  381. * zeromem uses r12 whereas it is used to save previous BL arg3,
  382. * save it in r7
  383. */
  384. mov r7, r12
  385. ldr r0, =__BSS_START__
  386. ldr r1, =__BSS_END__
  387. sub r1, r1, r0
  388. bl zeromem
  389. #if USE_COHERENT_MEM
  390. ldr r0, =__COHERENT_RAM_START__
  391. ldr r1, =__COHERENT_RAM_END_UNALIGNED__
  392. sub r1, r1, r0
  393. bl zeromem
  394. #endif
  395. /* Restore r12 */
  396. mov r12, r7
  397. #if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
  398. /* -----------------------------------------------------
  399. * Copy data from ROM to RAM.
  400. * -----------------------------------------------------
  401. */
  402. ldr r0, =__DATA_RAM_START__
  403. ldr r1, =__DATA_ROM_START__
  404. ldr r2, =__DATA_RAM_END__
  405. sub r2, r2, r0
  406. bl memcpy4
  407. #endif
  408. .endif /* _init_c_runtime */
  409. /* ---------------------------------------------------------------------
  410. * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
  411. * the MMU is enabled. There is no risk of reading stale stack memory
  412. * after enabling the MMU as only the primary CPU is running at the
  413. * moment.
  414. * ---------------------------------------------------------------------
  415. */
  416. bl plat_set_my_stack
  417. #if STACK_PROTECTOR_ENABLED
  418. .if \_init_c_runtime
  419. bl update_stack_protector_canary
  420. .endif /* _init_c_runtime */
  421. #endif
  422. .endm
  423. #endif /* EL3_COMMON_MACROS_S */