runtime_exceptions.S 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <platform_def.h>
  7. #include <arch.h>
  8. #include <asm_macros.S>
  9. #include <bl31/ea_handle.h>
  10. #include <bl31/interrupt_mgmt.h>
  11. #include <bl31/sync_handle.h>
  12. #include <common/runtime_svc.h>
  13. #include <context.h>
  14. #include <cpu_macros.S>
  15. #include <el3_common_macros.S>
  16. #include <lib/el3_runtime/cpu_data.h>
  17. #include <lib/smccc.h>
  18. .globl runtime_exceptions
  19. .globl sync_exception_sp_el0
  20. .globl irq_sp_el0
  21. .globl fiq_sp_el0
  22. .globl serror_sp_el0
  23. .globl sync_exception_sp_elx
  24. .globl irq_sp_elx
  25. .globl fiq_sp_elx
  26. .globl serror_sp_elx
  27. .globl sync_exception_aarch64
  28. .globl irq_aarch64
  29. .globl fiq_aarch64
  30. .globl serror_aarch64
  31. .globl sync_exception_aarch32
  32. .globl irq_aarch32
  33. .globl fiq_aarch32
  34. .globl serror_aarch32
  35. /*
  36. * Save LR and make x30 available as most of the routines in vector entry
  37. * need a free register
  38. */
  39. .macro save_x30
  40. str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  41. .endm
  42. .macro restore_x30
  43. ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  44. .endm
  45. /*
  46. * Macro that synchronizes errors (EA) and checks for pending SError.
  47. * On detecting a pending SError it either reflects it back to lower
  48. * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
  49. */
  50. .macro sync_and_handle_pending_serror
  51. synchronize_errors
  52. mrs x30, ISR_EL1
  53. tbz x30, #ISR_A_SHIFT, 2f
  54. #if FFH_SUPPORT
  55. mrs x30, scr_el3
  56. tst x30, #SCR_EA_BIT
  57. b.eq 1f
  58. bl handle_pending_async_ea
  59. b 2f
  60. #endif
  61. 1:
  62. /* This function never returns, but need LR for decision making */
  63. bl reflect_pending_async_ea_to_lower_el
  64. 2:
  65. .endm
  66. /* ---------------------------------------------------------------------
  67. * This macro handles Synchronous exceptions.
  68. * Only SMC exceptions are supported.
  69. * ---------------------------------------------------------------------
  70. */
  71. .macro handle_sync_exception
  72. #if ENABLE_RUNTIME_INSTRUMENTATION
  73. /*
  74. * Read the timestamp value and store it in per-cpu data. The value
  75. * will be extracted from per-cpu data by the C level SMC handler and
  76. * saved to the PMF timestamp region.
  77. */
  78. mrs x30, cntpct_el0
  79. str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
  80. mrs x29, tpidr_el3
  81. str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
  82. ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
  83. #endif
  84. mrs x30, esr_el3
  85. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  86. /* Handle SMC exceptions separately from other synchronous exceptions */
  87. cmp x30, #EC_AARCH32_SMC
  88. b.eq smc_handler32
  89. cmp x30, #EC_AARCH64_SMC
  90. b.eq sync_handler64
  91. cmp x30, #EC_AARCH64_SYS
  92. b.eq sync_handler64
  93. cmp x30, #EC_IMP_DEF_EL3
  94. b.eq imp_def_el3_handler
  95. /* If FFH Support then try to handle lower EL EA exceptions. */
  96. #if FFH_SUPPORT
  97. mrs x30, scr_el3
  98. tst x30, #SCR_EA_BIT
  99. b.eq 1f
  100. b handle_lower_el_sync_ea
  101. #endif
  102. 1:
  103. /* Synchronous exceptions other than the above are unhandled */
  104. b report_unhandled_exception
  105. .endm
  106. vector_base runtime_exceptions
  107. /* ---------------------------------------------------------------------
  108. * Current EL with SP_EL0 : 0x0 - 0x200
  109. * ---------------------------------------------------------------------
  110. */
  111. vector_entry sync_exception_sp_el0
  112. #ifdef MONITOR_TRAPS
  113. stp x29, x30, [sp, #-16]!
  114. mrs x30, esr_el3
  115. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  116. /* Check for BRK */
  117. cmp x30, #EC_BRK
  118. b.eq brk_handler
  119. ldp x29, x30, [sp], #16
  120. #endif /* MONITOR_TRAPS */
  121. /* We don't expect any synchronous exceptions from EL3 */
  122. b report_unhandled_exception
  123. end_vector_entry sync_exception_sp_el0
  124. vector_entry irq_sp_el0
  125. /*
  126. * EL3 code is non-reentrant. Any asynchronous exception is a serious
  127. * error. Loop infinitely.
  128. */
  129. b report_unhandled_interrupt
  130. end_vector_entry irq_sp_el0
  131. vector_entry fiq_sp_el0
  132. b report_unhandled_interrupt
  133. end_vector_entry fiq_sp_el0
  134. vector_entry serror_sp_el0
  135. no_ret plat_handle_el3_ea
  136. end_vector_entry serror_sp_el0
  137. /* ---------------------------------------------------------------------
  138. * Current EL with SP_ELx: 0x200 - 0x400
  139. * ---------------------------------------------------------------------
  140. */
  141. vector_entry sync_exception_sp_elx
  142. /*
  143. * This exception will trigger if anything went wrong during a previous
  144. * exception entry or exit or while handling an earlier unexpected
  145. * synchronous exception. There is a high probability that SP_EL3 is
  146. * corrupted.
  147. */
  148. b report_unhandled_exception
  149. end_vector_entry sync_exception_sp_elx
  150. vector_entry irq_sp_elx
  151. b report_unhandled_interrupt
  152. end_vector_entry irq_sp_elx
  153. vector_entry fiq_sp_elx
  154. b report_unhandled_interrupt
  155. end_vector_entry fiq_sp_elx
  156. vector_entry serror_sp_elx
  157. #if FFH_SUPPORT
  158. /*
  159. * This will trigger if the exception was taken due to SError in EL3 or
  160. * because of pending asynchronous external aborts from lower EL that got
  161. * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
  162. * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
  163. * The later case will occur when PSTATE.A bit is cleared in
  164. * "handle_pending_async_ea". This means we are doing a nested
  165. * exception in EL3. Call the handler for async EA which will eret back to
  166. * original el3 handler if it is nested exception. Also, unmask EA so that we
  167. * catch any further EA arise when handling this nested exception at EL3.
  168. */
  169. save_x30
  170. ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  171. cbz x30, 1f
  172. /*
  173. * This is nested exception handling, clear the flag to avoid taking this
  174. * path for further exceptions caused by EA handling
  175. */
  176. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  177. unmask_async_ea
  178. b handle_lower_el_async_ea
  179. 1:
  180. restore_x30
  181. #endif
  182. no_ret plat_handle_el3_ea
  183. end_vector_entry serror_sp_elx
  184. /* ---------------------------------------------------------------------
  185. * Lower EL using AArch64 : 0x400 - 0x600
  186. * ---------------------------------------------------------------------
  187. */
  188. vector_entry sync_exception_aarch64
  189. /*
  190. * This exception vector will be the entry point for SMCs and traps
  191. * that are unhandled at lower ELs most commonly. SP_EL3 should point
  192. * to a valid cpu context where the general purpose and system register
  193. * state can be saved.
  194. */
  195. save_x30
  196. apply_at_speculative_wa
  197. sync_and_handle_pending_serror
  198. handle_sync_exception
  199. end_vector_entry sync_exception_aarch64
  200. vector_entry irq_aarch64
  201. save_x30
  202. apply_at_speculative_wa
  203. sync_and_handle_pending_serror
  204. b handle_interrupt_exception
  205. end_vector_entry irq_aarch64
  206. vector_entry fiq_aarch64
  207. save_x30
  208. apply_at_speculative_wa
  209. sync_and_handle_pending_serror
  210. b handle_interrupt_exception
  211. end_vector_entry fiq_aarch64
  212. /*
  213. * Need to synchronize any outstanding SError since we can get a burst of errors.
  214. * So reuse the sync mechanism to catch any further errors which are pending.
  215. */
  216. vector_entry serror_aarch64
  217. #if FFH_SUPPORT
  218. save_x30
  219. apply_at_speculative_wa
  220. sync_and_handle_pending_serror
  221. b handle_lower_el_async_ea
  222. #else
  223. b report_unhandled_exception
  224. #endif
  225. end_vector_entry serror_aarch64
  226. /* ---------------------------------------------------------------------
  227. * Lower EL using AArch32 : 0x600 - 0x800
  228. * ---------------------------------------------------------------------
  229. */
  230. vector_entry sync_exception_aarch32
  231. /*
  232. * This exception vector will be the entry point for SMCs and traps
  233. * that are unhandled at lower ELs most commonly. SP_EL3 should point
  234. * to a valid cpu context where the general purpose and system register
  235. * state can be saved.
  236. */
  237. save_x30
  238. apply_at_speculative_wa
  239. sync_and_handle_pending_serror
  240. handle_sync_exception
  241. end_vector_entry sync_exception_aarch32
  242. vector_entry irq_aarch32
  243. save_x30
  244. apply_at_speculative_wa
  245. sync_and_handle_pending_serror
  246. b handle_interrupt_exception
  247. end_vector_entry irq_aarch32
  248. vector_entry fiq_aarch32
  249. save_x30
  250. apply_at_speculative_wa
  251. sync_and_handle_pending_serror
  252. b handle_interrupt_exception
  253. end_vector_entry fiq_aarch32
  254. /*
  255. * Need to synchronize any outstanding SError since we can get a burst of errors.
  256. * So reuse the sync mechanism to catch any further errors which are pending.
  257. */
  258. vector_entry serror_aarch32
  259. #if FFH_SUPPORT
  260. save_x30
  261. apply_at_speculative_wa
  262. sync_and_handle_pending_serror
  263. b handle_lower_el_async_ea
  264. #else
  265. b report_unhandled_exception
  266. #endif
  267. end_vector_entry serror_aarch32
  268. #ifdef MONITOR_TRAPS
  269. .section .rodata.brk_string, "aS"
  270. brk_location:
  271. .asciz "Error at instruction 0x"
  272. brk_message:
  273. .asciz "Unexpected BRK instruction with value 0x"
  274. #endif /* MONITOR_TRAPS */
  275. /* ---------------------------------------------------------------------
  276. * The following code handles secure monitor calls.
  277. * Depending upon the execution state from where the SMC has been
  278. * invoked, it frees some general purpose registers to perform the
  279. * remaining tasks. They involve finding the runtime service handler
  280. * that is the target of the SMC & switching to runtime stacks (SP_EL0)
  281. * before calling the handler.
  282. *
  283. * Note that x30 has been explicitly saved and can be used here
  284. * ---------------------------------------------------------------------
  285. */
  286. func sync_exception_handler
  287. smc_handler32:
  288. /* Check whether aarch32 issued an SMC64 */
  289. tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
  290. sync_handler64:
  291. /* NOTE: The code below must preserve x0-x4 */
  292. /*
  293. * Save general purpose and ARMv8.3-PAuth registers (if enabled).
  294. * Also save PMCR_EL0 and set the PSTATE to a known state.
  295. */
  296. bl prepare_el3_entry
  297. #if ENABLE_PAUTH
  298. /* Load and program APIAKey firmware key */
  299. bl pauth_load_bl31_apiakey
  300. #endif
  301. /*
  302. * Populate the parameters for the SMC handler.
  303. * We already have x0-x4 in place. x5 will point to a cookie (not used
  304. * now). x6 will point to the context structure (SP_EL3) and x7 will
  305. * contain flags we need to pass to the handler.
  306. */
  307. mov x5, xzr
  308. mov x6, sp
  309. /*
  310. * Restore the saved C runtime stack value which will become the new
  311. * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
  312. * structure prior to the last ERET from EL3.
  313. */
  314. ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
  315. /* Switch to SP_EL0 */
  316. msr spsel, #MODE_SP_EL0
  317. /*
  318. * Save the SPSR_EL3 and ELR_EL3 in case there is a world
  319. * switch during SMC handling.
  320. * TODO: Revisit if all system registers can be saved later.
  321. */
  322. mrs x16, spsr_el3
  323. mrs x17, elr_el3
  324. stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
  325. /* Load SCR_EL3 */
  326. mrs x18, scr_el3
  327. /* check for system register traps */
  328. mrs x16, esr_el3
  329. ubfx x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  330. cmp x17, #EC_AARCH64_SYS
  331. b.eq sysreg_handler64
  332. /* Clear flag register */
  333. mov x7, xzr
  334. #if ENABLE_RME
  335. /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
  336. ubfx x7, x18, #SCR_NSE_SHIFT, #1
  337. /*
  338. * Shift copied SCR_EL3.NSE bit by 5 to create space for
  339. * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
  340. * the SCR_EL3.NSE bit.
  341. */
  342. lsl x7, x7, #5
  343. #endif /* ENABLE_RME */
  344. /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
  345. bfi x7, x18, #0, #1
  346. mov sp, x12
  347. /*
  348. * Per SMCCC documentation, bits [23:17] must be zero for Fast
  349. * SMCs. Other values are reserved for future use. Ensure that
  350. * these bits are zeroes, if not report as unknown SMC.
  351. */
  352. tbz x0, #FUNCID_TYPE_SHIFT, 2f /* Skip check if its a Yield Call*/
  353. tst x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT)
  354. b.ne smc_unknown
  355. /*
  356. * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
  357. * passed through x0. Copy the SVE hint bit to flags and mask the
  358. * bit in smc_fid passed to the standard service dispatcher.
  359. * A service/dispatcher can retrieve the SVE hint bit state from
  360. * flags using the appropriate helper.
  361. */
  362. 2:
  363. and x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
  364. orr x7, x7, x16
  365. bic x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
  366. /* Get the unique owning entity number */
  367. ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
  368. ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
  369. orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
  370. /* Load descriptor index from array of indices */
  371. adrp x14, rt_svc_descs_indices
  372. add x14, x14, :lo12:rt_svc_descs_indices
  373. ldrb w15, [x14, x16]
  374. /* Any index greater than 127 is invalid. Check bit 7. */
  375. tbnz w15, 7, smc_unknown
  376. /*
  377. * Get the descriptor using the index
  378. * x11 = (base + off), w15 = index
  379. *
  380. * handler = (base + off) + (index << log2(size))
  381. */
  382. adr_l x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
  383. lsl w10, w15, #RT_SVC_SIZE_LOG2
  384. ldr x15, [x11, w10, uxtw]
  385. /*
  386. * Call the Secure Monitor Call handler and then drop directly into
  387. * el3_exit() which will program any remaining architectural state
  388. * prior to issuing the ERET to the desired lower EL.
  389. */
  390. #if DEBUG
  391. cbz x15, rt_svc_fw_critical_error
  392. #endif
  393. blr x15
  394. b el3_exit
  395. sysreg_handler64:
  396. mov x0, x16 /* ESR_EL3, containing syndrome information */
  397. mov x1, x6 /* lower EL's context */
  398. mov x19, x6 /* save context pointer for after the call */
  399. mov sp, x12 /* EL3 runtime stack, as loaded above */
  400. /* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */
  401. bl handle_sysreg_trap
  402. /*
  403. * returns:
  404. * -1: unhandled trap, UNDEF injection into lower EL
  405. * 0: handled trap, return to the trapping instruction (repeating it)
  406. * 1: handled trap, return to the next instruction
  407. */
  408. tst w0, w0
  409. b.mi 2f /* negative: undefined exception injection */
  410. b.eq 1f /* zero: do not change ELR_EL3 */
  411. /* positive: advance the PC to continue after the instruction */
  412. ldr x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
  413. add x1, x1, #4
  414. str x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
  415. 1:
  416. b el3_exit
  417. 2:
  418. /*
  419. * UNDEF injection to lower EL, the support is only provided for lower
  420. * EL in AArch64 mode, for AArch32 mode it will do elx_panic as before.
  421. */
  422. mrs x0, spsr_el3
  423. tst x0, #(SPSR_M_MASK << SPSR_M_SHIFT)
  424. b.ne elx_panic
  425. /* Pass context pointer as an argument to inject_undef64 */
  426. mov x0, x19
  427. bl inject_undef64
  428. b el3_exit
  429. smc_unknown:
  430. /*
  431. * Unknown SMC call. Populate return value with SMC_UNK and call
  432. * el3_exit() which will restore the remaining architectural state
  433. * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
  434. * to the desired lower EL.
  435. */
  436. mov x0, #SMC_UNK
  437. str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  438. b el3_exit
  439. smc_prohibited:
  440. restore_ptw_el1_sys_regs
  441. ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
  442. ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  443. mov x0, #SMC_UNK
  444. exception_return
  445. #if DEBUG
  446. rt_svc_fw_critical_error:
  447. /* Switch to SP_ELx */
  448. msr spsel, #MODE_SP_ELX
  449. no_ret report_unhandled_exception
  450. #endif
  451. endfunc sync_exception_handler
  452. /* ---------------------------------------------------------------------
  453. * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
  454. * interrupts.
  455. *
  456. * Note that x30 has been explicitly saved and can be used here
  457. * ---------------------------------------------------------------------
  458. */
  459. func handle_interrupt_exception
  460. /*
  461. * Save general purpose and ARMv8.3-PAuth registers (if enabled).
  462. * Also save PMCR_EL0 and set the PSTATE to a known state.
  463. */
  464. bl prepare_el3_entry
  465. #if ENABLE_PAUTH
  466. /* Load and program APIAKey firmware key */
  467. bl pauth_load_bl31_apiakey
  468. #endif
  469. /* Save the EL3 system registers needed to return from this exception */
  470. mrs x0, spsr_el3
  471. mrs x1, elr_el3
  472. stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
  473. /* Switch to the runtime stack i.e. SP_EL0 */
  474. ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
  475. mov x20, sp
  476. msr spsel, #MODE_SP_EL0
  477. mov sp, x2
  478. /*
  479. * Find out whether this is a valid interrupt type.
  480. * If the interrupt controller reports a spurious interrupt then return
  481. * to where we came from.
  482. */
  483. bl plat_ic_get_pending_interrupt_type
  484. cmp x0, #INTR_TYPE_INVAL
  485. b.eq interrupt_exit
  486. /*
  487. * Get the registered handler for this interrupt type.
  488. * A NULL return value could be 'cause of the following conditions:
  489. *
  490. * a. An interrupt of a type was routed correctly but a handler for its
  491. * type was not registered.
  492. *
  493. * b. An interrupt of a type was not routed correctly so a handler for
  494. * its type was not registered.
  495. *
  496. * c. An interrupt of a type was routed correctly to EL3, but was
  497. * deasserted before its pending state could be read. Another
  498. * interrupt of a different type pended at the same time and its
  499. * type was reported as pending instead. However, a handler for this
  500. * type was not registered.
  501. *
  502. * a. and b. can only happen due to a programming error. The
  503. * occurrence of c. could be beyond the control of Trusted Firmware.
  504. * It makes sense to return from this exception instead of reporting an
  505. * error.
  506. */
  507. bl get_interrupt_type_handler
  508. cbz x0, interrupt_exit
  509. mov x21, x0
  510. mov x0, #INTR_ID_UNAVAILABLE
  511. /* Set the current security state in the 'flags' parameter */
  512. mrs x2, scr_el3
  513. ubfx x1, x2, #0, #1
  514. /* Restore the reference to the 'handle' i.e. SP_EL3 */
  515. mov x2, x20
  516. /* x3 will point to a cookie (not used now) */
  517. mov x3, xzr
  518. /* Call the interrupt type handler */
  519. blr x21
  520. interrupt_exit:
  521. /* Return from exception, possibly in a different security state */
  522. b el3_exit
  523. endfunc handle_interrupt_exception
  524. func imp_def_el3_handler
  525. /* Save GP registers */
  526. stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  527. stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
  528. stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
  529. /* Get the cpu_ops pointer */
  530. bl get_cpu_ops_ptr
  531. /* Get the cpu_ops exception handler */
  532. ldr x0, [x0, #CPU_E_HANDLER_FUNC]
  533. /*
  534. * If the reserved function pointer is NULL, this CPU does not have an
  535. * implementation defined exception handler function
  536. */
  537. cbz x0, el3_handler_exit
  538. mrs x1, esr_el3
  539. ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  540. blr x0
  541. el3_handler_exit:
  542. ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  543. ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
  544. ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
  545. restore_x30
  546. no_ret report_unhandled_exception
  547. endfunc imp_def_el3_handler
  548. /*
  549. * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode.
  550. *
  551. * This scenario may arise when there is an error (EA) in the system which is not
  552. * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
  553. * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
  554. *
  555. * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is
  556. * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL.
  557. *
  558. * This function assumes x30 has been saved.
  559. */
  560. func reflect_pending_async_ea_to_lower_el
  561. /*
  562. * As the original exception was not handled we need to ensure that we return
  563. * back to the instruction which caused the exception. To acheive that, eret
  564. * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
  565. * (Label "skip_smc_check").
  566. *
  567. * LIMITATION: It could be that async EA is masked at the target exception level
  568. * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
  569. * causes back and forth between lower EL and EL3. In case of back and forth between
  570. * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
  571. * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
  572. * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop
  573. * counter retains its value but if we do a normal el3_exit this flag gets cleared.
  574. * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
  575. * as per AArch64.TakeException pseudo code in Arm ARM.
  576. *
  577. * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
  578. * ELs, we can remove the el3_panic and handle the original exception first and
  579. * inject SError to lower EL before ereting back.
  580. */
  581. stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
  582. ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
  583. mrs x28, elr_el3
  584. cmp x29, x28
  585. b.eq check_loop_ctr
  586. str x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
  587. /* Zero the loop counter */
  588. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  589. b skip_loop_ctr
  590. check_loop_ctr:
  591. ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  592. add x29, x29, #1
  593. str x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  594. cmp x29, #ASYNC_EA_REPLAY_COUNTER
  595. b.ge el3_panic
  596. skip_loop_ctr:
  597. /*
  598. * Logic to distinguish if we came from SMC or any other exception.
  599. * Use offsets in vector entry to get which exception we are handling.
  600. * In each vector entry of size 0x200, address "0x0-0x80" is for sync
  601. * exception and "0x80-0x200" is for async exceptions.
  602. * Use vector base address (vbar_el3) and exception offset (LR) to
  603. * calculate whether the address we came from is any of the following
  604. * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
  605. */
  606. mrs x29, vbar_el3
  607. sub x30, x30, x29
  608. and x30, x30, #0x1ff
  609. cmp x30, #0x80
  610. b.ge skip_smc_check
  611. /* Its a synchronous exception, Now check if it is SMC or not? */
  612. mrs x30, esr_el3
  613. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  614. cmp x30, #EC_AARCH32_SMC
  615. b.eq subtract_elr_el3
  616. cmp x30, #EC_AARCH64_SMC
  617. b.eq subtract_elr_el3
  618. b skip_smc_check
  619. subtract_elr_el3:
  620. sub x28, x28, #4
  621. skip_smc_check:
  622. msr elr_el3, x28
  623. ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
  624. ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  625. exception_return
  626. endfunc reflect_pending_async_ea_to_lower_el
  627. /* ---------------------------------------------------------------------
  628. * The following code handles exceptions caused by BRK instructions.
  629. * Following a BRK instruction, the only real valid cause of action is
  630. * to print some information and panic, as the code that caused it is
  631. * likely in an inconsistent internal state.
  632. *
  633. * This is initially intended to be used in conjunction with
  634. * __builtin_trap.
  635. * ---------------------------------------------------------------------
  636. */
  637. #ifdef MONITOR_TRAPS
  638. func brk_handler
  639. /* Extract the ISS */
  640. mrs x10, esr_el3
  641. ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
  642. /* Ensure the console is initialized */
  643. bl plat_crash_console_init
  644. adr x4, brk_location
  645. bl asm_print_str
  646. mrs x4, elr_el3
  647. bl asm_print_hex
  648. bl asm_print_newline
  649. adr x4, brk_message
  650. bl asm_print_str
  651. mov x4, x10
  652. mov x5, #28
  653. bl asm_print_hex_bits
  654. bl asm_print_newline
  655. no_ret plat_panic_handler
  656. endfunc brk_handler
  657. #endif /* MONITOR_TRAPS */