ea_delegate.S 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
  3. * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: BSD-3-Clause
  6. */
  7. #include <assert_macros.S>
  8. #include <asm_macros.S>
  9. #include <assert_macros.S>
  10. #include <bl31/ea_handle.h>
  11. #include <context.h>
  12. #include <lib/extensions/ras_arch.h>
  13. #include <cpu_macros.S>
  14. #include <context.h>
  15. .globl handle_lower_el_sync_ea
  16. .globl handle_lower_el_async_ea
  17. .globl handle_pending_async_ea
  18. /*
  19. * This function handles Synchronous External Aborts from lower EL.
  20. *
  21. * It delegates the handling of the EA to platform handler, and upon successfully
  22. * handling the EA, exits EL3; otherwise panics.
  23. *
  24. * This function assumes x30 has been saved.
  25. */
  26. func handle_lower_el_sync_ea
  27. mrs x30, esr_el3
  28. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  29. /* Check for I/D aborts from lower EL */
  30. cmp x30, #EC_IABORT_LOWER_EL
  31. b.eq 1f
  32. cmp x30, #EC_DABORT_LOWER_EL
  33. b.eq 1f
  34. /* EA other than above are unhandled exceptions */
  35. no_ret report_unhandled_exception
  36. 1:
  37. /*
  38. * Save general purpose and ARMv8.3-PAuth registers (if enabled).
  39. * Also save PMCR_EL0 and set the PSTATE to a known state.
  40. */
  41. bl prepare_el3_entry
  42. #if ENABLE_PAUTH
  43. /* Load and program APIAKey firmware key */
  44. bl pauth_load_bl31_apiakey
  45. #endif
  46. /* Setup exception class and syndrome arguments for platform handler */
  47. mov x0, #ERROR_EA_SYNC
  48. mrs x1, esr_el3
  49. bl delegate_sync_ea
  50. /* el3_exit assumes SP_EL0 on entry */
  51. msr spsel, #MODE_SP_EL0
  52. b el3_exit
  53. endfunc handle_lower_el_sync_ea
  54. /*
  55. * This function handles SErrors from lower ELs.
  56. *
  57. * It delegates the handling of the EA to platform handler, and upon successfully
  58. * handling the EA, exits EL3; otherwise panics.
  59. *
  60. * This function assumes x30 has been saved.
  61. */
  62. func handle_lower_el_async_ea
  63. /*
  64. * Save general purpose and ARMv8.3-PAuth registers (if enabled).
  65. * Also save PMCR_EL0 and set the PSTATE to a known state.
  66. */
  67. bl prepare_el3_entry
  68. #if ENABLE_PAUTH
  69. /* Load and program APIAKey firmware key */
  70. bl pauth_load_bl31_apiakey
  71. #endif
  72. /* Setup exception class and syndrome arguments for platform handler */
  73. mov x0, #ERROR_EA_ASYNC
  74. mrs x1, esr_el3
  75. bl delegate_async_ea
  76. /* el3_exit assumes SP_EL0 on entry */
  77. msr spsel, #MODE_SP_EL0
  78. b el3_exit
  79. endfunc handle_lower_el_async_ea
  80. /*
  81. * Handler for async EA from lower EL synchronized at EL3 entry in FFH mode.
  82. *
  83. * This scenario may arise when there is an error (EA) in the system which is not
  84. * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
  85. * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
  86. *
  87. * On detecting the pending EA (via ISR_EL1.A), if the EA routing model is Firmware
  88. * First handling (FFH, SCR_EL3.EA = 1) this handler first handles the pending EA
  89. * and then handles the original exception.
  90. *
  91. * This function assumes x30 has been saved.
  92. */
  93. func handle_pending_async_ea
  94. /*
  95. * Prepare for nested handling of EA. Stash sysregs clobbered by nested
  96. * exception and handler
  97. */
  98. str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
  99. mrs x30, esr_el3
  100. str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
  101. mrs x30, spsr_el3
  102. str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
  103. mrs x30, elr_el3
  104. str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
  105. mov x30, #1
  106. str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
  107. /*
  108. * Restore the original x30 saved as part of entering EL3. This is not
  109. * required for the current function but for EL3 SError vector entry
  110. * once PSTATE.A bit is unmasked. We restore x30 and then the same
  111. * value is stored in EL3 SError vector entry.
  112. */
  113. ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
  114. /*
  115. * After clearing PSTATE.A bit pending SError will trigger at current EL.
  116. * Put explicit synchronization event to ensure newly unmasked interrupt
  117. * is taken immediately.
  118. */
  119. unmask_async_ea
  120. /* Restore the original exception information along with zeroing the storage */
  121. ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
  122. msr elr_el3, x30
  123. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
  124. ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
  125. msr spsr_el3, x30
  126. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
  127. ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
  128. msr esr_el3, x30
  129. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
  130. /*
  131. * If the original exception corresponds to SError from lower El, eret back
  132. * to lower EL, otherwise return to vector table for original exception handling.
  133. */
  134. ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  135. cmp x30, #EC_SERROR
  136. ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
  137. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
  138. b.eq 1f
  139. ret
  140. 1:
  141. exception_return
  142. endfunc handle_pending_async_ea
  143. /*
  144. * Prelude for Synchronous External Abort handling. This function assumes that
  145. * all GP registers have been saved by the caller.
  146. *
  147. * x0: EA reason
  148. * x1: EA syndrome
  149. */
  150. func delegate_sync_ea
  151. #if ENABLE_FEAT_RAS
  152. /*
  153. * Check for Uncontainable error type. If so, route to the platform
  154. * fatal error handler rather than the generic EA one.
  155. */
  156. ubfx x2, x1, #EABORT_SET_SHIFT, #EABORT_SET_WIDTH
  157. cmp x2, #ERROR_STATUS_SET_UC
  158. b.ne 1f
  159. /* Check fault status code */
  160. ubfx x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
  161. cmp x3, #SYNC_EA_FSC
  162. b.ne 1f
  163. no_ret plat_handle_uncontainable_ea
  164. 1:
  165. #endif
  166. b ea_proceed
  167. endfunc delegate_sync_ea
  168. /*
  169. * Prelude for Asynchronous External Abort handling. This function assumes that
  170. * all GP registers have been saved by the caller.
  171. *
  172. * x0: EA reason
  173. * x1: EA syndrome
  174. */
  175. func delegate_async_ea
  176. #if ENABLE_FEAT_RAS
  177. /* Check Exception Class to ensure SError, as this function should
  178. * only be invoked for SError. If that is not the case, which implies
  179. * either an HW error or programming error, panic.
  180. */
  181. ubfx x2, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  182. cmp x2, EC_SERROR
  183. b.ne el3_panic
  184. /*
  185. * Check for Implementation Defined Syndrome. If so, skip checking
  186. * Uncontainable error type from the syndrome as the format is unknown.
  187. */
  188. tbnz x1, #SERROR_IDS_BIT, 1f
  189. /* AET only valid when DFSC is 0x11 */
  190. ubfx x2, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
  191. cmp x2, #DFSC_SERROR
  192. b.ne 1f
  193. /*
  194. * Check for Uncontainable error type. If so, route to the platform
  195. * fatal error handler rather than the generic EA one.
  196. */
  197. ubfx x3, x1, #EABORT_AET_SHIFT, #EABORT_AET_WIDTH
  198. cmp x3, #ERROR_STATUS_UET_UC
  199. b.ne 1f
  200. no_ret plat_handle_uncontainable_ea
  201. 1:
  202. #endif
  203. b ea_proceed
  204. endfunc delegate_async_ea
  205. /*
  206. * Delegate External Abort handling to platform's EA handler. This function
  207. * assumes that all GP registers have been saved by the caller.
  208. *
  209. * x0: EA reason
  210. * x1: EA syndrome
  211. */
  212. func ea_proceed
  213. /*
  214. * If the ESR loaded earlier is not zero, we were processing an EA
  215. * already, and this is a double fault.
  216. */
  217. ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
  218. cbz x5, 1f
  219. no_ret plat_handle_double_fault
  220. 1:
  221. /* Save EL3 state */
  222. mrs x2, spsr_el3
  223. mrs x3, elr_el3
  224. stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
  225. /*
  226. * Save ESR as handling might involve lower ELs, and returning back to
  227. * EL3 from there would trample the original ESR.
  228. */
  229. mrs x4, scr_el3
  230. mrs x5, esr_el3
  231. stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
  232. /*
  233. * Setup rest of arguments, and call platform External Abort handler.
  234. *
  235. * x0: EA reason (already in place)
  236. * x1: Exception syndrome (already in place).
  237. * x2: Cookie (unused for now).
  238. * x3: Context pointer.
  239. * x4: Flags (security state from SCR for now).
  240. */
  241. mov x2, xzr
  242. mov x3, sp
  243. ubfx x4, x4, #0, #1
  244. /* Switch to runtime stack */
  245. ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
  246. msr spsel, #MODE_SP_EL0
  247. mov sp, x5
  248. mov x29, x30
  249. #if ENABLE_ASSERTIONS
  250. /* Stash the stack pointer */
  251. mov x28, sp
  252. #endif
  253. bl plat_ea_handler
  254. #if ENABLE_ASSERTIONS
  255. /*
  256. * Error handling flows might involve long jumps; so upon returning from
  257. * the platform error handler, validate that the we've completely
  258. * unwound the stack.
  259. */
  260. mov x27, sp
  261. cmp x28, x27
  262. ASM_ASSERT(eq)
  263. #endif
  264. /* Make SP point to context */
  265. msr spsel, #MODE_SP_ELX
  266. /* Restore EL3 state and ESR */
  267. ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
  268. msr spsr_el3, x1
  269. msr elr_el3, x2
  270. /* Restore ESR_EL3 and SCR_EL3 */
  271. ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
  272. msr scr_el3, x3
  273. msr esr_el3, x4
  274. #if ENABLE_ASSERTIONS
  275. cmp x4, xzr
  276. ASM_ASSERT(ne)
  277. #endif
  278. /* Clear ESR storage */
  279. str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
  280. ret x29
  281. endfunc ea_proceed