sdei_intr_mgmt.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /*
  2. * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <inttypes.h>
  8. #include <stdint.h>
  9. #include <string.h>
  10. #include <arch_helpers.h>
  11. #include <arch_features.h>
  12. #include <bl31/ehf.h>
  13. #include <bl31/interrupt_mgmt.h>
  14. #include <bl31/sync_handle.h>
  15. #include <common/bl_common.h>
  16. #include <common/debug.h>
  17. #include <common/runtime_svc.h>
  18. #include <lib/cassert.h>
  19. #include <services/sdei.h>
  20. #include "sdei_private.h"
  21. /* x0-x17 GPREGS context */
  22. #define SDEI_SAVED_GPREGS 18U
  23. /* Maximum preemption nesting levels: Critical priority and Normal priority */
  24. #define MAX_EVENT_NESTING 2U
  25. /* Per-CPU SDEI state access macro */
  26. #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()])
  27. /* Structure to store information about an outstanding dispatch */
  28. typedef struct sdei_dispatch_context {
  29. sdei_ev_map_t *map;
  30. uint64_t x[SDEI_SAVED_GPREGS];
  31. jmp_buf *dispatch_jmp;
  32. /* Exception state registers */
  33. uint64_t elr_el3;
  34. uint64_t spsr_el3;
  35. #if DYNAMIC_WORKAROUND_CVE_2018_3639
  36. /* CVE-2018-3639 mitigation state */
  37. uint64_t disable_cve_2018_3639;
  38. #endif
  39. } sdei_dispatch_context_t;
  40. /* Per-CPU SDEI state data */
  41. typedef struct sdei_cpu_state {
  42. sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
  43. unsigned short stack_top; /* Empty ascending */
  44. bool pe_masked;
  45. bool pending_enables;
  46. } sdei_cpu_state_t;
  47. /* SDEI states for all cores in the system */
  48. static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
  49. int64_t sdei_pe_mask(void)
  50. {
  51. int64_t ret = 0;
  52. sdei_cpu_state_t *state = sdei_get_this_pe_state();
  53. /*
  54. * Return value indicates whether this call had any effect in the mask
  55. * status of this PE.
  56. */
  57. if (!state->pe_masked) {
  58. state->pe_masked = true;
  59. ret = 1;
  60. }
  61. return ret;
  62. }
  63. void sdei_pe_unmask(void)
  64. {
  65. unsigned int i;
  66. sdei_ev_map_t *map;
  67. sdei_entry_t *se;
  68. sdei_cpu_state_t *state = sdei_get_this_pe_state();
  69. uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
  70. /*
  71. * If there are pending enables, iterate through the private mappings
  72. * and enable those bound maps that are in enabled state. Also, iterate
  73. * through shared mappings and enable interrupts of events that are
  74. * targeted to this PE.
  75. */
  76. if (state->pending_enables) {
  77. for_each_private_map(i, map) {
  78. se = get_event_entry(map);
  79. if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
  80. plat_ic_enable_interrupt(map->intr);
  81. }
  82. for_each_shared_map(i, map) {
  83. se = get_event_entry(map);
  84. sdei_map_lock(map);
  85. if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
  86. (se->reg_flags == SDEI_REGF_RM_PE) &&
  87. (se->affinity == my_mpidr)) {
  88. plat_ic_enable_interrupt(map->intr);
  89. }
  90. sdei_map_unlock(map);
  91. }
  92. }
  93. state->pending_enables = false;
  94. state->pe_masked = false;
  95. }
  96. /* Push a dispatch context to the dispatch stack */
  97. static sdei_dispatch_context_t *push_dispatch(void)
  98. {
  99. sdei_cpu_state_t *state = sdei_get_this_pe_state();
  100. sdei_dispatch_context_t *disp_ctx;
  101. /* Cannot have more than max events */
  102. assert(state->stack_top < MAX_EVENT_NESTING);
  103. disp_ctx = &state->dispatch_stack[state->stack_top];
  104. state->stack_top++;
  105. return disp_ctx;
  106. }
  107. /* Pop a dispatch context to the dispatch stack */
  108. static sdei_dispatch_context_t *pop_dispatch(void)
  109. {
  110. sdei_cpu_state_t *state = sdei_get_this_pe_state();
  111. if (state->stack_top == 0U)
  112. return NULL;
  113. assert(state->stack_top <= MAX_EVENT_NESTING);
  114. state->stack_top--;
  115. return &state->dispatch_stack[state->stack_top];
  116. }
  117. /* Retrieve the context at the top of dispatch stack */
  118. static sdei_dispatch_context_t *get_outstanding_dispatch(void)
  119. {
  120. sdei_cpu_state_t *state = sdei_get_this_pe_state();
  121. if (state->stack_top == 0U)
  122. return NULL;
  123. assert(state->stack_top <= MAX_EVENT_NESTING);
  124. return &state->dispatch_stack[state->stack_top - 1U];
  125. }
  126. static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
  127. void *tgt_ctx)
  128. {
  129. sdei_dispatch_context_t *disp_ctx;
  130. const gp_regs_t *tgt_gpregs;
  131. const el3_state_t *tgt_el3;
  132. assert(tgt_ctx != NULL);
  133. tgt_gpregs = get_gpregs_ctx(tgt_ctx);
  134. tgt_el3 = get_el3state_ctx(tgt_ctx);
  135. disp_ctx = push_dispatch();
  136. assert(disp_ctx != NULL);
  137. disp_ctx->map = map;
  138. /* Save general purpose and exception registers */
  139. memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
  140. disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
  141. disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
  142. return disp_ctx;
  143. }
  144. static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
  145. {
  146. gp_regs_t *tgt_gpregs;
  147. el3_state_t *tgt_el3;
  148. assert(tgt_ctx != NULL);
  149. tgt_gpregs = get_gpregs_ctx(tgt_ctx);
  150. tgt_el3 = get_el3state_ctx(tgt_ctx);
  151. CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
  152. foo);
  153. /* Restore general purpose and exception registers */
  154. memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
  155. write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
  156. write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
  157. #if DYNAMIC_WORKAROUND_CVE_2018_3639
  158. cve_2018_3639_t *tgt_cve_2018_3639;
  159. tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
  160. /* Restore CVE-2018-3639 mitigation state */
  161. write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
  162. disp_ctx->disable_cve_2018_3639);
  163. #endif
  164. }
  165. static void save_secure_context(void)
  166. {
  167. cm_el1_sysregs_context_save(SECURE);
  168. }
  169. /* Restore Secure context and arrange to resume it at the next ERET */
  170. static void restore_and_resume_secure_context(void)
  171. {
  172. cm_el1_sysregs_context_restore(SECURE);
  173. cm_set_next_eret_context(SECURE);
  174. }
  175. /*
  176. * Restore Non-secure context and arrange to resume it at the next ERET. Return
  177. * pointer to the Non-secure context.
  178. */
  179. static cpu_context_t *restore_and_resume_ns_context(void)
  180. {
  181. cpu_context_t *ns_ctx;
  182. cm_el1_sysregs_context_restore(NON_SECURE);
  183. cm_set_next_eret_context(NON_SECURE);
  184. ns_ctx = cm_get_context(NON_SECURE);
  185. assert(ns_ctx != NULL);
  186. return ns_ctx;
  187. }
  188. /*
  189. * Prepare for ERET:
  190. * - Set the ELR to the registered handler address
  191. * - Set the SPSR register by calling the common create_spsr() function
  192. */
  193. static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx)
  194. {
  195. unsigned int client_el = sdei_client_el();
  196. u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX,
  197. DISABLE_ALL_EXCEPTIONS);
  198. u_register_t interrupted_pstate = disp_ctx->spsr_el3;
  199. sdei_spsr = create_spsr(interrupted_pstate, client_el);
  200. cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr);
  201. }
  202. /*
  203. * Populate the Non-secure context so that the next ERET will dispatch to the
  204. * SDEI client.
  205. */
  206. static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
  207. cpu_context_t *ctx, jmp_buf *dispatch_jmp)
  208. {
  209. sdei_dispatch_context_t *disp_ctx;
  210. /* Push the event and context */
  211. disp_ctx = save_event_ctx(map, ctx);
  212. /*
  213. * Setup handler arguments:
  214. *
  215. * - x0: Event number
  216. * - x1: Handler argument supplied at the time of event registration
  217. * - x2: Interrupted PC
  218. * - x3: Interrupted SPSR
  219. */
  220. SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
  221. SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
  222. SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
  223. SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
  224. /* Setup the elr and spsr register to prepare for ERET */
  225. sdei_set_elr_spsr(se, disp_ctx);
  226. #if DYNAMIC_WORKAROUND_CVE_2018_3639
  227. cve_2018_3639_t *tgt_cve_2018_3639;
  228. tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
  229. /* Save CVE-2018-3639 mitigation state */
  230. disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
  231. CTX_CVE_2018_3639_DISABLE);
  232. /* Force SDEI handler to execute with mitigation enabled by default */
  233. write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
  234. #endif
  235. disp_ctx->dispatch_jmp = dispatch_jmp;
  236. }
  237. /* Handle a triggered SDEI interrupt while events were masked on this PE */
  238. static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
  239. sdei_cpu_state_t *state, unsigned int intr_raw)
  240. {
  241. uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
  242. bool disable = false;
  243. /* Nothing to do for event 0 */
  244. if (map->ev_num == SDEI_EVENT_0)
  245. return;
  246. /*
  247. * For a private event, or for a shared event specifically routed to
  248. * this CPU, we disable interrupt, leave the interrupt pending, and do
  249. * EOI.
  250. */
  251. if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
  252. disable = true;
  253. if (se->reg_flags == SDEI_REGF_RM_PE)
  254. assert(se->affinity == my_mpidr);
  255. if (disable) {
  256. plat_ic_disable_interrupt(map->intr);
  257. plat_ic_set_interrupt_pending(map->intr);
  258. plat_ic_end_of_interrupt(intr_raw);
  259. state->pending_enables = true;
  260. return;
  261. }
  262. /*
  263. * We just received a shared event with routing set to ANY PE. The
  264. * interrupt can't be delegated on this PE as SDEI events are masked.
  265. * However, because its routing mode is ANY, it is possible that the
  266. * event can be delegated on any other PE that hasn't masked events.
  267. * Therefore, we set the interrupt back pending so as to give other
  268. * suitable PEs a chance of handling it.
  269. */
  270. assert(plat_ic_is_spi(map->intr) != 0);
  271. plat_ic_set_interrupt_pending(map->intr);
  272. /*
  273. * Leaving the same interrupt pending also means that the same interrupt
  274. * can target this PE again as soon as this PE leaves EL3. Whether and
  275. * how often that happens depends on the implementation of GIC.
  276. *
  277. * We therefore call a platform handler to resolve this situation.
  278. */
  279. plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
  280. /* This PE is masked. We EOI the interrupt, as it can't be delegated */
  281. plat_ic_end_of_interrupt(intr_raw);
  282. }
  283. /* SDEI main interrupt handler */
  284. int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
  285. void *cookie)
  286. {
  287. sdei_entry_t *se;
  288. cpu_context_t *ctx;
  289. sdei_ev_map_t *map;
  290. const sdei_dispatch_context_t *disp_ctx;
  291. unsigned int sec_state;
  292. sdei_cpu_state_t *state;
  293. uint32_t intr;
  294. jmp_buf dispatch_jmp;
  295. const uint64_t mpidr = read_mpidr_el1();
  296. /*
  297. * To handle an event, the following conditions must be true:
  298. *
  299. * 1. Event must be signalled
  300. * 2. Event must be enabled
  301. * 3. This PE must be a target PE for the event
  302. * 4. PE must be unmasked for SDEI
  303. * 5. If this is a normal event, no event must be running
  304. * 6. If this is a critical event, no critical event must be running
  305. *
  306. * (1) and (2) are true when this function is running
  307. * (3) is enforced in GIC by selecting the appropriate routing option
  308. * (4) is satisfied by client calling PE_UNMASK
  309. * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
  310. * - Normal SDEI events belong to Normal SDE priority class
  311. * - Critical SDEI events belong to Critical CSDE priority class
  312. *
  313. * The interrupt has already been acknowledged, and therefore is active,
  314. * so no other PE can handle this event while we are at it.
  315. *
  316. * Find if this is an SDEI interrupt. There must be an event mapped to
  317. * this interrupt
  318. */
  319. intr = plat_ic_get_interrupt_id(intr_raw);
  320. map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
  321. if (map == NULL) {
  322. ERROR("No SDEI map for interrupt %u\n", intr);
  323. panic();
  324. }
  325. /*
  326. * Received interrupt number must either correspond to event 0, or must
  327. * be bound interrupt.
  328. */
  329. assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
  330. se = get_event_entry(map);
  331. state = sdei_get_this_pe_state();
  332. if (state->pe_masked) {
  333. /*
  334. * Interrupts received while this PE was masked can't be
  335. * dispatched.
  336. */
  337. SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n",
  338. map->intr, mpidr);
  339. if (is_event_shared(map))
  340. sdei_map_lock(map);
  341. handle_masked_trigger(map, se, state, intr_raw);
  342. if (is_event_shared(map))
  343. sdei_map_unlock(map);
  344. return 0;
  345. }
  346. /* Insert load barrier for signalled SDEI event */
  347. if (map->ev_num == SDEI_EVENT_0)
  348. dmbld();
  349. if (is_event_shared(map))
  350. sdei_map_lock(map);
  351. /* Assert shared event routed to this PE had been configured so */
  352. if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
  353. assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
  354. }
  355. if (!can_sdei_state_trans(se, DO_DISPATCH)) {
  356. SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
  357. map->ev_num, se->state);
  358. /*
  359. * If the event is registered, leave the interrupt pending so
  360. * that it's delivered when the event is enabled.
  361. */
  362. if (GET_EV_STATE(se, REGISTERED))
  363. plat_ic_set_interrupt_pending(map->intr);
  364. /*
  365. * The interrupt was disabled or unregistered after the handler
  366. * started to execute, which means now the interrupt is already
  367. * disabled and we just need to EOI the interrupt.
  368. */
  369. plat_ic_end_of_interrupt(intr_raw);
  370. if (is_event_shared(map))
  371. sdei_map_unlock(map);
  372. return 0;
  373. }
  374. disp_ctx = get_outstanding_dispatch();
  375. if (is_event_critical(map)) {
  376. /*
  377. * If this event is Critical, and if there's an outstanding
  378. * dispatch, assert the latter is a Normal dispatch. Critical
  379. * events can preempt an outstanding Normal event dispatch.
  380. */
  381. if (disp_ctx != NULL)
  382. assert(is_event_normal(disp_ctx->map));
  383. } else {
  384. /*
  385. * If this event is Normal, assert that there are no outstanding
  386. * dispatches. Normal events can't preempt any outstanding event
  387. * dispatches.
  388. */
  389. assert(disp_ctx == NULL);
  390. }
  391. sec_state = get_interrupt_src_ss(flags);
  392. if (is_event_shared(map))
  393. sdei_map_unlock(map);
  394. SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n",
  395. mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3());
  396. ctx = handle;
  397. /*
  398. * Check if we interrupted secure state. Perform a context switch so
  399. * that we can delegate to NS.
  400. */
  401. if (sec_state == SECURE) {
  402. save_secure_context();
  403. ctx = restore_and_resume_ns_context();
  404. }
  405. /* Synchronously dispatch event */
  406. setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
  407. begin_sdei_synchronous_dispatch(&dispatch_jmp);
  408. /*
  409. * We reach here when client completes the event.
  410. *
  411. * If the cause of dispatch originally interrupted the Secure world,
  412. * resume Secure.
  413. *
  414. * No need to save the Non-secure context ahead of a world switch: the
  415. * Non-secure context was fully saved before dispatch, and has been
  416. * returned to its pre-dispatch state.
  417. */
  418. if (sec_state == SECURE)
  419. restore_and_resume_secure_context();
  420. /*
  421. * The event was dispatched after receiving SDEI interrupt. With
  422. * the event handling completed, EOI the corresponding
  423. * interrupt.
  424. */
  425. if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
  426. ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num);
  427. panic();
  428. }
  429. plat_ic_end_of_interrupt(intr_raw);
  430. return 0;
  431. }
  432. /*
  433. * Explicitly dispatch the given SDEI event.
  434. *
  435. * When calling this API, the caller must be prepared for the SDEI dispatcher to
  436. * restore and make Non-secure context as active. This call returns only after
  437. * the client has completed the dispatch. Then, the Non-secure context will be
  438. * active, and the following ERET will return to Non-secure.
  439. *
  440. * Should the caller require re-entry to Secure, it must restore the Secure
  441. * context and program registers for ERET.
  442. */
  443. int sdei_dispatch_event(int ev_num)
  444. {
  445. sdei_entry_t *se;
  446. sdei_ev_map_t *map;
  447. cpu_context_t *ns_ctx;
  448. sdei_dispatch_context_t *disp_ctx;
  449. sdei_cpu_state_t *state;
  450. jmp_buf dispatch_jmp;
  451. /* Can't dispatch if events are masked on this PE */
  452. state = sdei_get_this_pe_state();
  453. if (state->pe_masked)
  454. return -1;
  455. /* Event 0 can't be dispatched */
  456. if (ev_num == SDEI_EVENT_0)
  457. return -1;
  458. /* Locate mapping corresponding to this event */
  459. map = find_event_map(ev_num);
  460. if (map == NULL)
  461. return -1;
  462. /* Only explicit events can be dispatched */
  463. if (!is_map_explicit(map))
  464. return -1;
  465. /* Examine state of dispatch stack */
  466. disp_ctx = get_outstanding_dispatch();
  467. if (disp_ctx != NULL) {
  468. /*
  469. * There's an outstanding dispatch. If the outstanding dispatch
  470. * is critical, no more dispatches are possible.
  471. */
  472. if (is_event_critical(disp_ctx->map))
  473. return -1;
  474. /*
  475. * If the outstanding dispatch is Normal, only critical events
  476. * can be dispatched.
  477. */
  478. if (is_event_normal(map))
  479. return -1;
  480. }
  481. se = get_event_entry(map);
  482. if (!can_sdei_state_trans(se, DO_DISPATCH))
  483. return -1;
  484. /*
  485. * Prepare for NS dispatch by restoring the Non-secure context and
  486. * marking that as active.
  487. */
  488. ns_ctx = restore_and_resume_ns_context();
  489. /* Activate the priority corresponding to the event being dispatched */
  490. ehf_activate_priority(sdei_event_priority(map));
  491. /* Dispatch event synchronously */
  492. setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
  493. begin_sdei_synchronous_dispatch(&dispatch_jmp);
  494. /*
  495. * We reach here when client completes the event.
  496. *
  497. * Deactivate the priority level that was activated at the time of
  498. * explicit dispatch.
  499. */
  500. ehf_deactivate_priority(sdei_event_priority(map));
  501. return 0;
  502. }
  503. static void end_sdei_synchronous_dispatch(jmp_buf *buffer)
  504. {
  505. longjmp(*buffer, 1);
  506. }
  507. int sdei_event_complete(bool resume, uint64_t pc)
  508. {
  509. sdei_dispatch_context_t *disp_ctx;
  510. sdei_entry_t *se;
  511. sdei_ev_map_t *map;
  512. cpu_context_t *ctx;
  513. sdei_action_t act;
  514. unsigned int client_el = sdei_client_el();
  515. /* Return error if called without an active event */
  516. disp_ctx = get_outstanding_dispatch();
  517. if (disp_ctx == NULL)
  518. return SDEI_EDENY;
  519. /* Validate resumption point */
  520. if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
  521. return SDEI_EDENY;
  522. map = disp_ctx->map;
  523. assert(map != NULL);
  524. se = get_event_entry(map);
  525. if (is_event_shared(map))
  526. sdei_map_lock(map);
  527. act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
  528. if (!can_sdei_state_trans(se, act)) {
  529. if (is_event_shared(map))
  530. sdei_map_unlock(map);
  531. return SDEI_EDENY;
  532. }
  533. if (is_event_shared(map))
  534. sdei_map_unlock(map);
  535. /* Having done sanity checks, pop dispatch */
  536. (void) pop_dispatch();
  537. SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
  538. map->ev_num, read_spsr_el3(), read_elr_el3());
  539. /*
  540. * Restore Non-secure to how it was originally interrupted. Once done,
  541. * it's up-to-date with the saved copy.
  542. */
  543. ctx = cm_get_context(NON_SECURE);
  544. restore_event_ctx(disp_ctx, ctx);
  545. if (resume) {
  546. /*
  547. * Complete-and-resume call. Prepare the Non-secure context
  548. * (currently active) for complete and resume.
  549. */
  550. cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
  551. MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
  552. /*
  553. * Make it look as if a synchronous exception were taken at the
  554. * supplied Non-secure resumption point. Populate SPSR and
  555. * ELR_ELx so that an ERET from there works as expected.
  556. *
  557. * The assumption is that the client, if necessary, would have
  558. * saved any live content in these registers before making this
  559. * call.
  560. */
  561. if (client_el == MODE_EL2) {
  562. write_elr_el2(disp_ctx->elr_el3);
  563. write_spsr_el2(disp_ctx->spsr_el3);
  564. } else {
  565. /* EL1 */
  566. write_elr_el1(disp_ctx->elr_el3);
  567. write_spsr_el1(disp_ctx->spsr_el3);
  568. }
  569. }
  570. /* End the outstanding dispatch */
  571. end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
  572. return 0;
  573. }
  574. int64_t sdei_event_context(void *handle, unsigned int param)
  575. {
  576. sdei_dispatch_context_t *disp_ctx;
  577. if (param >= SDEI_SAVED_GPREGS)
  578. return SDEI_EINVAL;
  579. /* Get outstanding dispatch on this CPU */
  580. disp_ctx = get_outstanding_dispatch();
  581. if (disp_ctx == NULL)
  582. return SDEI_EDENY;
  583. assert(disp_ctx->map != NULL);
  584. if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
  585. return SDEI_EDENY;
  586. /*
  587. * No locking is required for the Running status as this is the only CPU
  588. * which can complete the event
  589. */
  590. return (int64_t) disp_ctx->x[param];
  591. }