runtime_exceptions.S 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. /*
  2. * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <platform_def.h>
  7. #include <arch.h>
  8. #include <asm_macros.S>
  9. #include <bl31/ea_handle.h>
  10. #include <bl31/interrupt_mgmt.h>
  11. #include <bl31/sync_handle.h>
  12. #include <common/runtime_svc.h>
  13. #include <context.h>
  14. #include <cpu_macros.S>
  15. #include <el3_common_macros.S>
  16. #include <lib/el3_runtime/cpu_data.h>
  17. #include <lib/smccc.h>
  18. .globl runtime_exceptions
  19. .globl sync_exception_sp_el0
  20. .globl irq_sp_el0
  21. .globl fiq_sp_el0
  22. .globl serror_sp_el0
  23. .globl sync_exception_sp_elx
  24. .globl irq_sp_elx
  25. .globl fiq_sp_elx
  26. .globl serror_sp_elx
  27. .globl sync_exception_aarch64
  28. .globl irq_aarch64
  29. .globl fiq_aarch64
  30. .globl serror_aarch64
  31. .globl sync_exception_aarch32
  32. .globl irq_aarch32
  33. .globl fiq_aarch32
  34. .globl serror_aarch32
  35. /*
  36. * Save LR and make x30 available as most of the routines in vector entry
  37. * need a free register
  38. */
  39. .macro save_x30
  40. str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  41. .endm
  42. .macro restore_x30
  43. ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  44. .endm
  45. /*
  46. * Macro that synchronizes errors (EA) and checks for pending SError.
  47. * On detecting a pending SError it either reflects it back to lower
  48. * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
  49. */
  50. .macro sync_and_handle_pending_serror
  51. synchronize_errors
  52. mrs x30, ISR_EL1
  53. tbz x30, #ISR_A_SHIFT, 2f
  54. #if FFH_SUPPORT
  55. mrs x30, scr_el3
  56. tst x30, #SCR_EA_BIT
  57. b.eq 1f
  58. bl handle_pending_async_ea
  59. b 2f
  60. #endif
  61. 1:
  62. /* This function never returns, but need LR for decision making */
  63. bl reflect_pending_async_ea_to_lower_el
  64. 2:
  65. .endm
  66. /* ---------------------------------------------------------------------
  67. * This macro handles Synchronous exceptions.
  68. * Only SMC exceptions are supported.
  69. * ---------------------------------------------------------------------
  70. */
  71. .macro handle_sync_exception
  72. #if ENABLE_RUNTIME_INSTRUMENTATION
  73. /*
  74. * Read the timestamp value and store it in per-cpu data. The value
  75. * will be extracted from per-cpu data by the C level SMC handler and
  76. * saved to the PMF timestamp region.
  77. */
  78. mrs x30, cntpct_el0
  79. str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
  80. mrs x29, tpidr_el3
  81. str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
  82. ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
  83. #endif
  84. mrs x30, esr_el3
  85. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  86. /* Handle SMC exceptions separately from other synchronous exceptions */
  87. cmp x30, #EC_AARCH32_SMC
  88. b.eq smc_handler32
  89. cmp x30, #EC_AARCH64_SMC
  90. b.eq sync_handler64
  91. cmp x30, #EC_AARCH64_SYS
  92. b.eq sync_handler64
  93. cmp x30, #EC_IMP_DEF_EL3
  94. b.eq imp_def_el3_handler
  95. /* If FFH Support then try to handle lower EL EA exceptions. */
  96. #if FFH_SUPPORT
  97. mrs x30, scr_el3
  98. tst x30, #SCR_EA_BIT
  99. b.eq 1f
  100. b handle_lower_el_sync_ea
  101. #endif
  102. 1:
  103. /* Synchronous exceptions other than the above are unhandled */
  104. b report_unhandled_exception
  105. .endm
  106. vector_base runtime_exceptions
  107. /* ---------------------------------------------------------------------
  108. * Current EL with SP_EL0 : 0x0 - 0x200
  109. * ---------------------------------------------------------------------
  110. */
  111. vector_entry sync_exception_sp_el0
  112. #ifdef MONITOR_TRAPS
  113. stp x29, x30, [sp, #-16]!
  114. mrs x30, esr_el3
  115. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  116. /* Check for BRK */
  117. cmp x30, #EC_BRK
  118. b.eq brk_handler
  119. ldp x29, x30, [sp], #16
  120. #endif /* MONITOR_TRAPS */
  121. /* We don't expect any synchronous exceptions from EL3 */
  122. b report_unhandled_exception
  123. end_vector_entry sync_exception_sp_el0
  124. vector_entry irq_sp_el0
  125. /*
  126. * EL3 code is non-reentrant. Any asynchronous exception is a serious
  127. * error. Loop infinitely.
  128. */
  129. b report_unhandled_interrupt
  130. end_vector_entry irq_sp_el0
  131. vector_entry fiq_sp_el0
  132. b report_unhandled_interrupt
  133. end_vector_entry fiq_sp_el0
  134. vector_entry serror_sp_el0
  135. no_ret plat_handle_el3_ea
  136. end_vector_entry serror_sp_el0
  137. /* ---------------------------------------------------------------------
  138. * Current EL with SP_ELx: 0x200 - 0x400
  139. * ---------------------------------------------------------------------
  140. */
  141. vector_entry sync_exception_sp_elx
  142. /*
  143. * This exception will trigger if anything went wrong during a previous
  144. * exception entry or exit or while handling an earlier unexpected
  145. * synchronous exception. There is a high probability that SP_EL3 is
  146. * corrupted.
  147. */
  148. b report_unhandled_exception
  149. end_vector_entry sync_exception_sp_elx
  150. vector_entry irq_sp_elx
  151. b report_unhandled_interrupt
  152. end_vector_entry irq_sp_elx
  153. vector_entry fiq_sp_elx
  154. b report_unhandled_interrupt
  155. end_vector_entry fiq_sp_elx
  156. vector_entry serror_sp_elx
  157. #if FFH_SUPPORT
  158. /*
  159. * This will trigger if the exception was taken due to SError in EL3 or
  160. * because of pending asynchronous external aborts from lower EL that got
  161. * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
  162. * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
  163. * The later case will occur when PSTATE.A bit is cleared in
  164. * "handle_pending_async_ea". This means we are doing a nested
  165. * exception in EL3. Call the handler for async EA which will eret back to
  166. * original el3 handler if it is nested exception. Also, unmask EA so that we
  167. * catch any further EA arise when handling this nested exception at EL3.
  168. */
  169. save_x30
  170. ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  171. cbz x30, 1f
  172. /*
  173. * This is nested exception handling, clear the flag to avoid taking this
  174. * path for further exceptions caused by EA handling
  175. */
  176. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  177. unmask_async_ea
  178. b handle_lower_el_async_ea
  179. 1:
  180. restore_x30
  181. #endif
  182. no_ret plat_handle_el3_ea
  183. end_vector_entry serror_sp_elx
  184. /* ---------------------------------------------------------------------
  185. * Lower EL using AArch64 : 0x400 - 0x600
  186. * ---------------------------------------------------------------------
  187. */
  188. vector_entry sync_exception_aarch64
  189. /*
  190. * This exception vector will be the entry point for SMCs and traps
  191. * that are unhandled at lower ELs most commonly. SP_EL3 should point
  192. * to a valid cpu context where the general purpose and system register
  193. * state can be saved.
  194. */
  195. save_x30
  196. apply_at_speculative_wa
  197. sync_and_handle_pending_serror
  198. unmask_async_ea
  199. handle_sync_exception
  200. end_vector_entry sync_exception_aarch64
  201. vector_entry irq_aarch64
  202. save_x30
  203. apply_at_speculative_wa
  204. sync_and_handle_pending_serror
  205. unmask_async_ea
  206. b handle_interrupt_exception
  207. end_vector_entry irq_aarch64
  208. vector_entry fiq_aarch64
  209. save_x30
  210. apply_at_speculative_wa
  211. sync_and_handle_pending_serror
  212. unmask_async_ea
  213. b handle_interrupt_exception
  214. end_vector_entry fiq_aarch64
  215. /*
  216. * Need to synchronize any outstanding SError since we can get a burst of errors.
  217. * So reuse the sync mechanism to catch any further errors which are pending.
  218. */
  219. vector_entry serror_aarch64
  220. #if FFH_SUPPORT
  221. save_x30
  222. apply_at_speculative_wa
  223. sync_and_handle_pending_serror
  224. unmask_async_ea
  225. b handle_lower_el_async_ea
  226. #else
  227. b report_unhandled_exception
  228. #endif
  229. end_vector_entry serror_aarch64
  230. /* ---------------------------------------------------------------------
  231. * Lower EL using AArch32 : 0x600 - 0x800
  232. * ---------------------------------------------------------------------
  233. */
  234. vector_entry sync_exception_aarch32
  235. /*
  236. * This exception vector will be the entry point for SMCs and traps
  237. * that are unhandled at lower ELs most commonly. SP_EL3 should point
  238. * to a valid cpu context where the general purpose and system register
  239. * state can be saved.
  240. */
  241. save_x30
  242. apply_at_speculative_wa
  243. sync_and_handle_pending_serror
  244. unmask_async_ea
  245. handle_sync_exception
  246. end_vector_entry sync_exception_aarch32
  247. vector_entry irq_aarch32
  248. save_x30
  249. apply_at_speculative_wa
  250. sync_and_handle_pending_serror
  251. unmask_async_ea
  252. b handle_interrupt_exception
  253. end_vector_entry irq_aarch32
  254. vector_entry fiq_aarch32
  255. save_x30
  256. apply_at_speculative_wa
  257. sync_and_handle_pending_serror
  258. unmask_async_ea
  259. b handle_interrupt_exception
  260. end_vector_entry fiq_aarch32
  261. /*
  262. * Need to synchronize any outstanding SError since we can get a burst of errors.
  263. * So reuse the sync mechanism to catch any further errors which are pending.
  264. */
  265. vector_entry serror_aarch32
  266. #if FFH_SUPPORT
  267. save_x30
  268. apply_at_speculative_wa
  269. sync_and_handle_pending_serror
  270. unmask_async_ea
  271. b handle_lower_el_async_ea
  272. #else
  273. b report_unhandled_exception
  274. #endif
  275. end_vector_entry serror_aarch32
  276. #ifdef MONITOR_TRAPS
  277. .section .rodata.brk_string, "aS"
  278. brk_location:
  279. .asciz "Error at instruction 0x"
  280. brk_message:
  281. .asciz "Unexpected BRK instruction with value 0x"
  282. #endif /* MONITOR_TRAPS */
  283. /* ---------------------------------------------------------------------
  284. * The following code handles secure monitor calls.
  285. * Depending upon the execution state from where the SMC has been
  286. * invoked, it frees some general purpose registers to perform the
  287. * remaining tasks. They involve finding the runtime service handler
  288. * that is the target of the SMC & switching to runtime stacks (SP_EL0)
  289. * before calling the handler.
  290. *
  291. * Note that x30 has been explicitly saved and can be used here
  292. * ---------------------------------------------------------------------
  293. */
  294. func sync_exception_handler
  295. smc_handler32:
  296. /* Check whether aarch32 issued an SMC64 */
  297. tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
  298. sync_handler64:
  299. /* NOTE: The code below must preserve x0-x4 */
  300. /*
  301. * Save general purpose and ARMv8.3-PAuth registers (if enabled).
  302. * Also save PMCR_EL0 and set the PSTATE to a known state.
  303. */
  304. bl prepare_el3_entry
  305. #if ENABLE_PAUTH
  306. /* Load and program APIAKey firmware key */
  307. bl pauth_load_bl31_apiakey
  308. #endif
  309. /*
  310. * Populate the parameters for the SMC handler.
  311. * We already have x0-x4 in place. x5 will point to a cookie (not used
  312. * now). x6 will point to the context structure (SP_EL3) and x7 will
  313. * contain flags we need to pass to the handler.
  314. */
  315. mov x5, xzr
  316. mov x6, sp
  317. /*
  318. * Restore the saved C runtime stack value which will become the new
  319. * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
  320. * structure prior to the last ERET from EL3.
  321. */
  322. ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
  323. /* Switch to SP_EL0 */
  324. msr spsel, #MODE_SP_EL0
  325. /*
  326. * Save the SPSR_EL3 and ELR_EL3 in case there is a world
  327. * switch during SMC handling.
  328. * TODO: Revisit if all system registers can be saved later.
  329. */
  330. mrs x16, spsr_el3
  331. mrs x17, elr_el3
  332. stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
  333. /* Load SCR_EL3 */
  334. mrs x18, scr_el3
  335. /* check for system register traps */
  336. mrs x16, esr_el3
  337. ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  338. cmp x17, #EC_AARCH64_SYS
  339. b.eq sysreg_handler64
  340. /* Clear flag register */
  341. mov x7, xzr
  342. #if ENABLE_RME
  343. /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
  344. ubfx x7, x18, #SCR_NSE_SHIFT, #1
  345. /*
  346. * Shift copied SCR_EL3.NSE bit by 5 to create space for
  347. * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
  348. * the SCR_EL3.NSE bit.
  349. */
  350. lsl x7, x7, #5
  351. #endif /* ENABLE_RME */
  352. /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
  353. bfi x7, x18, #0, #1
  354. mov sp, x12
  355. /*
  356. * Per SMCCC documentation, bits [23:17] must be zero for Fast
  357. * SMCs. Other values are reserved for future use. Ensure that
  358. * these bits are zeroes, if not report as unknown SMC.
  359. */
  360. tbz x0, #FUNCID_TYPE_SHIFT, 2f /* Skip check if its a Yield Call*/
  361. tst x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT)
  362. b.ne smc_unknown
  363. /*
  364. * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
  365. * passed through x0. Copy the SVE hint bit to flags and mask the
  366. * bit in smc_fid passed to the standard service dispatcher.
  367. * A service/dispatcher can retrieve the SVE hint bit state from
  368. * flags using the appropriate helper.
  369. */
  370. 2:
  371. and x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
  372. orr x7, x7, x16
  373. bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
  374. /* Get the unique owning entity number */
  375. ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
  376. ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
  377. orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
  378. /* Load descriptor index from array of indices */
  379. adrp x14, rt_svc_descs_indices
  380. add x14, x14, :lo12:rt_svc_descs_indices
  381. ldrb w15, [x14, x16]
  382. /* Any index greater than 127 is invalid. Check bit 7. */
  383. tbnz w15, 7, smc_unknown
  384. /*
  385. * Get the descriptor using the index
  386. * x11 = (base + off), w15 = index
  387. *
  388. * handler = (base + off) + (index << log2(size))
  389. */
  390. adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
  391. lsl w10, w15, #RT_SVC_SIZE_LOG2
  392. ldr x15, [x11, w10, uxtw]
  393. /*
  394. * Call the Secure Monitor Call handler and then drop directly into
  395. * el3_exit() which will program any remaining architectural state
  396. * prior to issuing the ERET to the desired lower EL.
  397. */
  398. #if DEBUG
  399. cbz x15, rt_svc_fw_critical_error
  400. #endif
  401. blr x15
  402. b el3_exit
  403. sysreg_handler64:
  404. mov x0, x16 /* ESR_EL3, containing syndrome information */
  405. mov x1, x6 /* lower EL's context */
  406. mov x19, x6 /* save context pointer for after the call */
  407. mov sp, x12 /* EL3 runtime stack, as loaded above */
  408. /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */
  409. bl handle_sysreg_trap
  410. /*
  411. * returns:
  412. * -1: unhandled trap, UNDEF injection into lower EL
  413. * 0: handled trap, return to the trapping instruction (repeating it)
  414. * 1: handled trap, return to the next instruction
  415. */
  416. tst w0, w0
  417. b.mi 2f /* negative: undefined exception injection */
  418. b.eq 1f /* zero: do not change ELR_EL3 */
  419. /* positive: advance the PC to continue after the instruction */
  420. ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
  421. add x1, x1, #4
  422. str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
  423. 1:
  424. b el3_exit
  425. 2:
  426. /*
  427. * UNDEF injection to lower EL, the support is only provided for lower
  428. * EL in AArch64 mode, for AArch32 mode it will do elx_panic as before.
  429. */
  430. mrs x0, spsr_el3
  431. tst x0, #(SPSR_M_MASK << SPSR_M_SHIFT)
  432. b.ne elx_panic
  433. /* Pass context pointer as an argument to inject_undef64 */
  434. mov x0, x19
  435. bl inject_undef64
  436. b el3_exit
  437. smc_unknown:
  438. /*
  439. * Unknown SMC call. Populate return value with SMC_UNK and call
  440. * el3_exit() which will restore the remaining architectural state
  441. * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
  442. * to the desired lower EL.
  443. */
  444. mov x0, #SMC_UNK
  445. str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  446. b el3_exit
  447. smc_prohibited:
  448. restore_ptw_el1_sys_regs
  449. ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
  450. ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  451. mov x0, #SMC_UNK
  452. exception_return
  453. #if DEBUG
  454. rt_svc_fw_critical_error:
  455. /* Switch to SP_ELx */
  456. msr spsel, #MODE_SP_ELX
  457. no_ret report_unhandled_exception
  458. #endif
  459. endfunc sync_exception_handler
  460. /* ---------------------------------------------------------------------
  461. * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
  462. * interrupts.
  463. *
  464. * Note that x30 has been explicitly saved and can be used here
  465. * ---------------------------------------------------------------------
  466. */
  467. func handle_interrupt_exception
  468. /*
  469. * Save general purpose and ARMv8.3-PAuth registers (if enabled).
  470. * Also save PMCR_EL0 and set the PSTATE to a known state.
  471. */
  472. bl prepare_el3_entry
  473. #if ENABLE_PAUTH
  474. /* Load and program APIAKey firmware key */
  475. bl pauth_load_bl31_apiakey
  476. #endif
  477. /* Save the EL3 system registers needed to return from this exception */
  478. mrs x0, spsr_el3
  479. mrs x1, elr_el3
  480. stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
  481. /* Switch to the runtime stack i.e. SP_EL0 */
  482. ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
  483. mov x20, sp
  484. msr spsel, #MODE_SP_EL0
  485. mov sp, x2
  486. /*
  487. * Find out whether this is a valid interrupt type.
  488. * If the interrupt controller reports a spurious interrupt then return
  489. * to where we came from.
  490. */
  491. bl plat_ic_get_pending_interrupt_type
  492. cmp x0, #INTR_TYPE_INVAL
  493. b.eq interrupt_exit
  494. /*
  495. * Get the registered handler for this interrupt type.
  496. * A NULL return value could be 'cause of the following conditions:
  497. *
  498. * a. An interrupt of a type was routed correctly but a handler for its
  499. * type was not registered.
  500. *
  501. * b. An interrupt of a type was not routed correctly so a handler for
  502. * its type was not registered.
  503. *
  504. * c. An interrupt of a type was routed correctly to EL3, but was
  505. * deasserted before its pending state could be read. Another
  506. * interrupt of a different type pended at the same time and its
  507. * type was reported as pending instead. However, a handler for this
  508. * type was not registered.
  509. *
  510. * a. and b. can only happen due to a programming error. The
  511. * occurrence of c. could be beyond the control of Trusted Firmware.
  512. * It makes sense to return from this exception instead of reporting an
  513. * error.
  514. */
  515. bl get_interrupt_type_handler
  516. cbz x0, interrupt_exit
  517. mov x21, x0
  518. mov x0, #INTR_ID_UNAVAILABLE
  519. /* Set the current security state in the 'flags' parameter */
  520. mrs x2, scr_el3
  521. ubfx x1, x2, #0, #1
  522. /* Restore the reference to the 'handle' i.e. SP_EL3 */
  523. mov x2, x20
  524. /* x3 will point to a cookie (not used now) */
  525. mov x3, xzr
  526. /* Call the interrupt type handler */
  527. blr x21
  528. interrupt_exit:
  529. /* Return from exception, possibly in a different security state */
  530. b el3_exit
  531. endfunc handle_interrupt_exception
  532. func imp_def_el3_handler
  533. /* Save GP registers */
  534. stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  535. stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
  536. stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
  537. /* Get the cpu_ops pointer */
  538. bl get_cpu_ops_ptr
  539. /* Get the cpu_ops exception handler */
  540. ldr x0, [x0, #CPU_E_HANDLER_FUNC]
  541. /*
  542. * If the reserved function pointer is NULL, this CPU does not have an
  543. * implementation defined exception handler function
  544. */
  545. cbz x0, el3_handler_exit
  546. mrs x1, esr_el3
  547. ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  548. blr x0
  549. el3_handler_exit:
  550. ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  551. ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
  552. ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
  553. restore_x30
  554. no_ret report_unhandled_exception
  555. endfunc imp_def_el3_handler
  556. /*
  557. * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode.
  558. *
  559. * This scenario may arise when there is an error (EA) in the system which is not
  560. * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
  561. * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
  562. *
  563. * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is
  564. * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL.
  565. *
  566. * This function assumes x30 has been saved.
  567. */
  568. func reflect_pending_async_ea_to_lower_el
  569. /*
  570. * As the original exception was not handled we need to ensure that we return
  571. * back to the instruction which caused the exception. To acheive that, eret
  572. * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
  573. * (Label "skip_smc_check").
  574. *
  575. * LIMITATION: It could be that async EA is masked at the target exception level
  576. * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
  577. * causes back and forth between lower EL and EL3. In case of back and forth between
  578. * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
  579. * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
  580. * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop
  581. * counter retains its value but if we do a normal el3_exit this flag gets cleared.
  582. * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
  583. * as per AArch64.TakeException pseudo code in Arm ARM.
  584. *
  585. * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
  586. * ELs, we can remove the el3_panic and handle the original exception first and
  587. * inject SError to lower EL before ereting back.
  588. */
  589. stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
  590. ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
  591. mrs x28, elr_el3
  592. cmp x29, x28
  593. b.eq check_loop_ctr
  594. str x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
  595. /* Zero the loop counter */
  596. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  597. b skip_loop_ctr
  598. check_loop_ctr:
  599. ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  600. add x29, x29, #1
  601. str x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  602. cmp x29, #ASYNC_EA_REPLAY_COUNTER
  603. b.ge el3_panic
  604. skip_loop_ctr:
  605. /*
  606. * Logic to distinguish if we came from SMC or any other exception.
  607. * Use offsets in vector entry to get which exception we are handling.
  608. * In each vector entry of size 0x200, address "0x0-0x80" is for sync
  609. * exception and "0x80-0x200" is for async exceptions.
  610. * Use vector base address (vbar_el3) and exception offset (LR) to
  611. * calculate whether the address we came from is any of the following
  612. * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
  613. */
  614. mrs x29, vbar_el3
  615. sub x30, x30, x29
  616. and x30, x30, #0x1ff
  617. cmp x30, #0x80
  618. b.ge skip_smc_check
  619. /* Its a synchronous exception, Now check if it is SMC or not? */
  620. mrs x30, esr_el3
  621. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  622. cmp x30, #EC_AARCH32_SMC
  623. b.eq subtract_elr_el3
  624. cmp x30, #EC_AARCH64_SMC
  625. b.eq subtract_elr_el3
  626. b skip_smc_check
  627. subtract_elr_el3:
  628. sub x28, x28, #4
  629. skip_smc_check:
  630. msr elr_el3, x28
  631. ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
  632. ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  633. exception_return
  634. endfunc reflect_pending_async_ea_to_lower_el
  635. /* ---------------------------------------------------------------------
  636. * The following code handles exceptions caused by BRK instructions.
  637. * Following a BRK instruction, the only real valid cause of action is
  638. * to print some information and panic, as the code that caused it is
  639. * likely in an inconsistent internal state.
  640. *
  641. * This is initially intended to be used in conjunction with
  642. * __builtin_trap.
  643. * ---------------------------------------------------------------------
  644. */
  645. #ifdef MONITOR_TRAPS
  646. func brk_handler
  647. /* Extract the ISS */
  648. mrs x10, esr_el3
  649. ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
  650. /* Ensure the console is initialized */
  651. bl plat_crash_console_init
  652. adr x4, brk_location
  653. bl asm_print_str
  654. mrs x4, elr_el3
  655. bl asm_print_hex
  656. bl asm_print_newline
  657. adr x4, brk_message
  658. bl asm_print_str
  659. mov x4, x10
  660. mov x5, #28
  661. bl asm_print_hex_bits
  662. bl asm_print_newline
  663. no_ret plat_panic_handler
  664. endfunc brk_handler
  665. #endif /* MONITOR_TRAPS */