bl31_traps.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
  3. * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: BSD-3-Clause
  6. *
  7. * Dispatch synchronous system register traps from lower ELs.
  8. */
  9. #include <arch_features.h>
  10. #include <arch_helpers.h>
  11. #include <bl31/sync_handle.h>
  12. #include <context.h>
  13. #include <lib/el3_runtime/context_mgmt.h>
  14. int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx)
  15. {
  16. uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK;
  17. #if ENABLE_FEAT_RNG_TRAP
  18. if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) {
  19. return plat_handle_rng_trap(esr_el3, ctx);
  20. }
  21. #endif
  22. #if IMPDEF_SYSREG_TRAP
  23. if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) {
  24. return plat_handle_impdef_trap(esr_el3, ctx);
  25. }
  26. #endif
  27. return TRAP_RET_UNHANDLED;
  28. }
  29. static bool is_tge_enabled(void)
  30. {
  31. u_register_t hcr_el2 = read_hcr_el2();
  32. return ((is_feat_vhe_present()) && ((hcr_el2 & HCR_TGE_BIT) != 0U));
  33. }
  34. /*
  35. * This function is to ensure that undef injection does not happen into
  36. * non-existent S-EL2. This could happen when trap happens from S-EL{1,0}
  37. * and non-secure world is running with TGE bit set, considering EL3 does
  38. * not save/restore EL2 registers if only one world has EL2 enabled.
  39. * So reading hcr_el2.TGE would give NS world value.
  40. */
  41. static bool is_secure_trap_without_sel2(u_register_t scr)
  42. {
  43. return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0);
  44. }
  45. static unsigned int target_el(unsigned int from_el, u_register_t scr)
  46. {
  47. if (from_el > MODE_EL1) {
  48. return from_el;
  49. } else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) {
  50. return MODE_EL2;
  51. } else {
  52. return MODE_EL1;
  53. }
  54. }
  55. static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el)
  56. {
  57. unsigned int outgoing_el = GET_EL(spsr_el3);
  58. u_register_t elr_el3 = 0;
  59. if (outgoing_el == target_el) {
  60. /*
  61. * Target EL is either EL1 or EL2, lsb can tell us the SPsel
  62. * Thread mode : 0
  63. * Handler mode : 1
  64. */
  65. if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) {
  66. elr_el3 = vbar + CURRENT_EL_SPX;
  67. } else {
  68. elr_el3 = vbar + CURRENT_EL_SP0;
  69. }
  70. } else {
  71. /* Vector address for Lower EL using Aarch64 */
  72. elr_el3 = vbar + LOWER_EL_AARCH64;
  73. }
  74. return elr_el3;
  75. }
  76. /*
  77. * Explicitly create all bits of SPSR to get PSTATE at exception return.
  78. *
  79. * The code is based on "Aarch64.exceptions.takeexception" described in
  80. * DDI0602 revision 2023-06.
  81. * "https://developer.arm.com/documentation/ddi0602/2023-06/Shared-Pseudocode/
  82. * aarch64-exceptions-takeexception"
  83. *
  84. * NOTE: This piece of code must be reviewed every release to ensure that
  85. * we keep up with new ARCH features which introduces a new SPSR bit.
  86. *
  87. * TF-A 2.12 release review
  88. * The latest version available is 2024-09, which has two extra features which
  89. * impacts generation of SPSR, since these features are not implemented in TF-A
  90. * at the time of release, just log the feature names here to be taken up when
  91. * feature support is introduced.
  92. * - FEAT_PAuth_LR (2023 extension)
  93. * - FEAT_UINJ (2024 extension)
  94. */
  95. u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
  96. {
  97. u_register_t new_spsr = 0;
  98. u_register_t sctlr;
  99. /* Set M bits for target EL in AArch64 mode, also get sctlr */
  100. if (target_el == MODE_EL2) {
  101. sctlr = read_sctlr_el2();
  102. new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H;
  103. } else {
  104. sctlr = read_sctlr_el1();
  105. new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H;
  106. }
  107. /* Mask all exceptions, update DAIF bits */
  108. new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
  109. /* If FEAT_BTI is present, clear BTYPE bits */
  110. new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
  111. if (is_feat_bti_present()) {
  112. new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
  113. }
  114. /* If SSBS is implemented, take the value from SCTLR.DSSBS */
  115. new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64;
  116. if (is_feat_ssbs_present()) {
  117. if ((sctlr & SCTLR_DSSBS_BIT) != 0U) {
  118. new_spsr |= SPSR_SSBS_BIT_AARCH64;
  119. } else {
  120. new_spsr &= ~SPSR_SSBS_BIT_AARCH64;
  121. }
  122. }
  123. /* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */
  124. new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64;
  125. if (is_feat_nmi_present()) {
  126. if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) {
  127. new_spsr &= ~SPSR_ALLINT_BIT_AARCH64;
  128. } else {
  129. new_spsr |= SPSR_ALLINT_BIT_AARCH64;
  130. }
  131. }
  132. /* Clear PSTATE.IL bit explicitly */
  133. new_spsr &= ~SPSR_IL_BIT;
  134. /* Clear PSTATE.SS bit explicitly */
  135. new_spsr &= ~SPSR_SS_BIT;
  136. /* Update PSTATE.PAN bit */
  137. new_spsr |= old_spsr & SPSR_PAN_BIT;
  138. if (is_feat_pan_present() &&
  139. ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) &&
  140. ((sctlr & SCTLR_SPAN_BIT) == 0U)) {
  141. new_spsr |= SPSR_PAN_BIT;
  142. }
  143. /* Clear UAO bit if FEAT_UAO is present */
  144. new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64;
  145. if (is_feat_uao_present()) {
  146. new_spsr &= ~SPSR_UAO_BIT_AARCH64;
  147. }
  148. /* DIT bits are unchanged */
  149. new_spsr |= old_spsr & SPSR_DIT_BIT;
  150. /* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */
  151. new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64;
  152. if (is_feat_mte2_present()) {
  153. new_spsr |= SPSR_TCO_BIT_AARCH64;
  154. }
  155. /* NZCV bits are unchanged */
  156. new_spsr |= old_spsr & SPSR_NZCV;
  157. /* If FEAT_EBEP is present set PM bit */
  158. new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64;
  159. if (is_feat_ebep_present()) {
  160. new_spsr |= SPSR_PM_BIT_AARCH64;
  161. }
  162. /* If FEAT_SEBEP is present clear PPEND bit */
  163. new_spsr |= old_spsr & SPSR_PPEND_BIT;
  164. if (is_feat_sebep_present()) {
  165. new_spsr &= ~SPSR_PPEND_BIT;
  166. }
  167. /* If FEAT_GCS is present, update EXLOCK bit */
  168. new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64;
  169. if (is_feat_gcs_present()) {
  170. u_register_t gcscr;
  171. if (target_el == MODE_EL2) {
  172. gcscr = read_gcscr_el2();
  173. } else {
  174. gcscr = read_gcscr_el1();
  175. }
  176. new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
  177. }
  178. return new_spsr;
  179. }
  180. /*
  181. * Handler for injecting Undefined exception to lower EL which is caused by
  182. * lower EL accessing system registers of which (old)EL3 firmware is unaware.
  183. *
  184. * This is a safety net to avoid EL3 panics caused by system register access
  185. * that triggers an exception syndrome EC=0x18.
  186. */
  187. void inject_undef64(cpu_context_t *ctx)
  188. {
  189. u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT;
  190. el3_state_t *state = get_el3state_ctx(ctx);
  191. u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3);
  192. u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3);
  193. u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
  194. u_register_t new_spsr = 0;
  195. unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3);
  196. if (to_el == MODE_EL2) {
  197. write_elr_el2(elr_el3);
  198. elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el);
  199. write_esr_el2(esr);
  200. write_spsr_el2(old_spsr);
  201. } else {
  202. write_elr_el1(elr_el3);
  203. elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el);
  204. write_esr_el1(esr);
  205. write_spsr_el1(old_spsr);
  206. }
  207. new_spsr = create_spsr(old_spsr, to_el);
  208. write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
  209. write_ctx_reg(state, CTX_ELR_EL3, elr_el3);
  210. }