123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723 |
- /*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
- #include <assert.h>
- #include <inttypes.h>
- #include <stdint.h>
- #include <string.h>
- #include <arch_helpers.h>
- #include <arch_features.h>
- #include <bl31/ehf.h>
- #include <bl31/interrupt_mgmt.h>
- #include <bl31/sync_handle.h>
- #include <common/bl_common.h>
- #include <common/debug.h>
- #include <common/runtime_svc.h>
- #include <lib/cassert.h>
- #include <services/sdei.h>
- #include "sdei_private.h"
- /* x0-x17 GPREGS context */
- #define SDEI_SAVED_GPREGS 18U
- /* Maximum preemption nesting levels: Critical priority and Normal priority */
- #define MAX_EVENT_NESTING 2U
- /* Per-CPU SDEI state access macro */
- #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()])
- /* Structure to store information about an outstanding dispatch */
- typedef struct sdei_dispatch_context {
- sdei_ev_map_t *map;
- uint64_t x[SDEI_SAVED_GPREGS];
- jmp_buf *dispatch_jmp;
- /* Exception state registers */
- uint64_t elr_el3;
- uint64_t spsr_el3;
- #if DYNAMIC_WORKAROUND_CVE_2018_3639
- /* CVE-2018-3639 mitigation state */
- uint64_t disable_cve_2018_3639;
- #endif
- } sdei_dispatch_context_t;
- /* Per-CPU SDEI state data */
- typedef struct sdei_cpu_state {
- sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
- unsigned short stack_top; /* Empty ascending */
- bool pe_masked;
- bool pending_enables;
- } sdei_cpu_state_t;
- /* SDEI states for all cores in the system */
- static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
- int64_t sdei_pe_mask(void)
- {
- int64_t ret = 0;
- sdei_cpu_state_t *state = sdei_get_this_pe_state();
- /*
- * Return value indicates whether this call had any effect in the mask
- * status of this PE.
- */
- if (!state->pe_masked) {
- state->pe_masked = true;
- ret = 1;
- }
- return ret;
- }
- void sdei_pe_unmask(void)
- {
- unsigned int i;
- sdei_ev_map_t *map;
- sdei_entry_t *se;
- sdei_cpu_state_t *state = sdei_get_this_pe_state();
- uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
- /*
- * If there are pending enables, iterate through the private mappings
- * and enable those bound maps that are in enabled state. Also, iterate
- * through shared mappings and enable interrupts of events that are
- * targeted to this PE.
- */
- if (state->pending_enables) {
- for_each_private_map(i, map) {
- se = get_event_entry(map);
- if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
- plat_ic_enable_interrupt(map->intr);
- }
- for_each_shared_map(i, map) {
- se = get_event_entry(map);
- sdei_map_lock(map);
- if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
- (se->reg_flags == SDEI_REGF_RM_PE) &&
- (se->affinity == my_mpidr)) {
- plat_ic_enable_interrupt(map->intr);
- }
- sdei_map_unlock(map);
- }
- }
- state->pending_enables = false;
- state->pe_masked = false;
- }
- /* Push a dispatch context to the dispatch stack */
- static sdei_dispatch_context_t *push_dispatch(void)
- {
- sdei_cpu_state_t *state = sdei_get_this_pe_state();
- sdei_dispatch_context_t *disp_ctx;
- /* Cannot have more than max events */
- assert(state->stack_top < MAX_EVENT_NESTING);
- disp_ctx = &state->dispatch_stack[state->stack_top];
- state->stack_top++;
- return disp_ctx;
- }
- /* Pop a dispatch context to the dispatch stack */
- static sdei_dispatch_context_t *pop_dispatch(void)
- {
- sdei_cpu_state_t *state = sdei_get_this_pe_state();
- if (state->stack_top == 0U)
- return NULL;
- assert(state->stack_top <= MAX_EVENT_NESTING);
- state->stack_top--;
- return &state->dispatch_stack[state->stack_top];
- }
- /* Retrieve the context at the top of dispatch stack */
- static sdei_dispatch_context_t *get_outstanding_dispatch(void)
- {
- sdei_cpu_state_t *state = sdei_get_this_pe_state();
- if (state->stack_top == 0U)
- return NULL;
- assert(state->stack_top <= MAX_EVENT_NESTING);
- return &state->dispatch_stack[state->stack_top - 1U];
- }
- static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
- void *tgt_ctx)
- {
- sdei_dispatch_context_t *disp_ctx;
- const gp_regs_t *tgt_gpregs;
- const el3_state_t *tgt_el3;
- assert(tgt_ctx != NULL);
- tgt_gpregs = get_gpregs_ctx(tgt_ctx);
- tgt_el3 = get_el3state_ctx(tgt_ctx);
- disp_ctx = push_dispatch();
- assert(disp_ctx != NULL);
- disp_ctx->map = map;
- /* Save general purpose and exception registers */
- memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
- disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
- disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
- return disp_ctx;
- }
- static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
- {
- gp_regs_t *tgt_gpregs;
- el3_state_t *tgt_el3;
- assert(tgt_ctx != NULL);
- tgt_gpregs = get_gpregs_ctx(tgt_ctx);
- tgt_el3 = get_el3state_ctx(tgt_ctx);
- CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
- foo);
- /* Restore general purpose and exception registers */
- memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
- write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
- write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
- #if DYNAMIC_WORKAROUND_CVE_2018_3639
- cve_2018_3639_t *tgt_cve_2018_3639;
- tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
- /* Restore CVE-2018-3639 mitigation state */
- write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
- disp_ctx->disable_cve_2018_3639);
- #endif
- }
- static void save_secure_context(void)
- {
- cm_el1_sysregs_context_save(SECURE);
- }
- /* Restore Secure context and arrange to resume it at the next ERET */
- static void restore_and_resume_secure_context(void)
- {
- cm_el1_sysregs_context_restore(SECURE);
- cm_set_next_eret_context(SECURE);
- }
- /*
- * Restore Non-secure context and arrange to resume it at the next ERET. Return
- * pointer to the Non-secure context.
- */
- static cpu_context_t *restore_and_resume_ns_context(void)
- {
- cpu_context_t *ns_ctx;
- cm_el1_sysregs_context_restore(NON_SECURE);
- cm_set_next_eret_context(NON_SECURE);
- ns_ctx = cm_get_context(NON_SECURE);
- assert(ns_ctx != NULL);
- return ns_ctx;
- }
- /*
- * Prepare for ERET:
- * - Set the ELR to the registered handler address
- * - Set the SPSR register by calling the common create_spsr() function
- */
- static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx)
- {
- unsigned int client_el = sdei_client_el();
- u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX,
- DISABLE_ALL_EXCEPTIONS);
- u_register_t interrupted_pstate = disp_ctx->spsr_el3;
- sdei_spsr = create_spsr(interrupted_pstate, client_el);
- cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr);
- }
- /*
- * Populate the Non-secure context so that the next ERET will dispatch to the
- * SDEI client.
- */
- static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
- cpu_context_t *ctx, jmp_buf *dispatch_jmp)
- {
- sdei_dispatch_context_t *disp_ctx;
- /* Push the event and context */
- disp_ctx = save_event_ctx(map, ctx);
- /*
- * Setup handler arguments:
- *
- * - x0: Event number
- * - x1: Handler argument supplied at the time of event registration
- * - x2: Interrupted PC
- * - x3: Interrupted SPSR
- */
- SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
- SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
- SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
- SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
- /* Setup the elr and spsr register to prepare for ERET */
- sdei_set_elr_spsr(se, disp_ctx);
- #if DYNAMIC_WORKAROUND_CVE_2018_3639
- cve_2018_3639_t *tgt_cve_2018_3639;
- tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
- /* Save CVE-2018-3639 mitigation state */
- disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
- CTX_CVE_2018_3639_DISABLE);
- /* Force SDEI handler to execute with mitigation enabled by default */
- write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
- #endif
- disp_ctx->dispatch_jmp = dispatch_jmp;
- }
- /* Handle a triggered SDEI interrupt while events were masked on this PE */
- static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
- sdei_cpu_state_t *state, unsigned int intr_raw)
- {
- uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
- bool disable = false;
- /* Nothing to do for event 0 */
- if (map->ev_num == SDEI_EVENT_0)
- return;
- /*
- * For a private event, or for a shared event specifically routed to
- * this CPU, we disable interrupt, leave the interrupt pending, and do
- * EOI.
- */
- if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
- disable = true;
- if (se->reg_flags == SDEI_REGF_RM_PE)
- assert(se->affinity == my_mpidr);
- if (disable) {
- plat_ic_disable_interrupt(map->intr);
- plat_ic_set_interrupt_pending(map->intr);
- plat_ic_end_of_interrupt(intr_raw);
- state->pending_enables = true;
- return;
- }
- /*
- * We just received a shared event with routing set to ANY PE. The
- * interrupt can't be delegated on this PE as SDEI events are masked.
- * However, because its routing mode is ANY, it is possible that the
- * event can be delegated on any other PE that hasn't masked events.
- * Therefore, we set the interrupt back pending so as to give other
- * suitable PEs a chance of handling it.
- */
- assert(plat_ic_is_spi(map->intr) != 0);
- plat_ic_set_interrupt_pending(map->intr);
- /*
- * Leaving the same interrupt pending also means that the same interrupt
- * can target this PE again as soon as this PE leaves EL3. Whether and
- * how often that happens depends on the implementation of GIC.
- *
- * We therefore call a platform handler to resolve this situation.
- */
- plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
- /* This PE is masked. We EOI the interrupt, as it can't be delegated */
- plat_ic_end_of_interrupt(intr_raw);
- }
- /* SDEI main interrupt handler */
- int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
- void *cookie)
- {
- sdei_entry_t *se;
- cpu_context_t *ctx;
- sdei_ev_map_t *map;
- const sdei_dispatch_context_t *disp_ctx;
- unsigned int sec_state;
- sdei_cpu_state_t *state;
- uint32_t intr;
- jmp_buf dispatch_jmp;
- const uint64_t mpidr = read_mpidr_el1();
- /*
- * To handle an event, the following conditions must be true:
- *
- * 1. Event must be signalled
- * 2. Event must be enabled
- * 3. This PE must be a target PE for the event
- * 4. PE must be unmasked for SDEI
- * 5. If this is a normal event, no event must be running
- * 6. If this is a critical event, no critical event must be running
- *
- * (1) and (2) are true when this function is running
- * (3) is enforced in GIC by selecting the appropriate routing option
- * (4) is satisfied by client calling PE_UNMASK
- * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
- * - Normal SDEI events belong to Normal SDE priority class
- * - Critical SDEI events belong to Critical CSDE priority class
- *
- * The interrupt has already been acknowledged, and therefore is active,
- * so no other PE can handle this event while we are at it.
- *
- * Find if this is an SDEI interrupt. There must be an event mapped to
- * this interrupt
- */
- intr = plat_ic_get_interrupt_id(intr_raw);
- map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
- if (map == NULL) {
- ERROR("No SDEI map for interrupt %u\n", intr);
- panic();
- }
- /*
- * Received interrupt number must either correspond to event 0, or must
- * be bound interrupt.
- */
- assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
- se = get_event_entry(map);
- state = sdei_get_this_pe_state();
- if (state->pe_masked) {
- /*
- * Interrupts received while this PE was masked can't be
- * dispatched.
- */
- SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n",
- map->intr, mpidr);
- if (is_event_shared(map))
- sdei_map_lock(map);
- handle_masked_trigger(map, se, state, intr_raw);
- if (is_event_shared(map))
- sdei_map_unlock(map);
- return 0;
- }
- /* Insert load barrier for signalled SDEI event */
- if (map->ev_num == SDEI_EVENT_0)
- dmbld();
- if (is_event_shared(map))
- sdei_map_lock(map);
- /* Assert shared event routed to this PE had been configured so */
- if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
- assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
- }
- if (!can_sdei_state_trans(se, DO_DISPATCH)) {
- SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
- map->ev_num, se->state);
- /*
- * If the event is registered, leave the interrupt pending so
- * that it's delivered when the event is enabled.
- */
- if (GET_EV_STATE(se, REGISTERED))
- plat_ic_set_interrupt_pending(map->intr);
- /*
- * The interrupt was disabled or unregistered after the handler
- * started to execute, which means now the interrupt is already
- * disabled and we just need to EOI the interrupt.
- */
- plat_ic_end_of_interrupt(intr_raw);
- if (is_event_shared(map))
- sdei_map_unlock(map);
- return 0;
- }
- disp_ctx = get_outstanding_dispatch();
- if (is_event_critical(map)) {
- /*
- * If this event is Critical, and if there's an outstanding
- * dispatch, assert the latter is a Normal dispatch. Critical
- * events can preempt an outstanding Normal event dispatch.
- */
- if (disp_ctx != NULL)
- assert(is_event_normal(disp_ctx->map));
- } else {
- /*
- * If this event is Normal, assert that there are no outstanding
- * dispatches. Normal events can't preempt any outstanding event
- * dispatches.
- */
- assert(disp_ctx == NULL);
- }
- sec_state = get_interrupt_src_ss(flags);
- if (is_event_shared(map))
- sdei_map_unlock(map);
- SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n",
- mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3());
- ctx = handle;
- /*
- * Check if we interrupted secure state. Perform a context switch so
- * that we can delegate to NS.
- */
- if (sec_state == SECURE) {
- save_secure_context();
- ctx = restore_and_resume_ns_context();
- }
- /* Synchronously dispatch event */
- setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
- begin_sdei_synchronous_dispatch(&dispatch_jmp);
- /*
- * We reach here when client completes the event.
- *
- * If the cause of dispatch originally interrupted the Secure world,
- * resume Secure.
- *
- * No need to save the Non-secure context ahead of a world switch: the
- * Non-secure context was fully saved before dispatch, and has been
- * returned to its pre-dispatch state.
- */
- if (sec_state == SECURE)
- restore_and_resume_secure_context();
- /*
- * The event was dispatched after receiving SDEI interrupt. With
- * the event handling completed, EOI the corresponding
- * interrupt.
- */
- if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
- ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num);
- panic();
- }
- plat_ic_end_of_interrupt(intr_raw);
- return 0;
- }
- /*
- * Explicitly dispatch the given SDEI event.
- *
- * When calling this API, the caller must be prepared for the SDEI dispatcher to
- * restore and make Non-secure context as active. This call returns only after
- * the client has completed the dispatch. Then, the Non-secure context will be
- * active, and the following ERET will return to Non-secure.
- *
- * Should the caller require re-entry to Secure, it must restore the Secure
- * context and program registers for ERET.
- */
- int sdei_dispatch_event(int ev_num)
- {
- sdei_entry_t *se;
- sdei_ev_map_t *map;
- cpu_context_t *ns_ctx;
- sdei_dispatch_context_t *disp_ctx;
- sdei_cpu_state_t *state;
- jmp_buf dispatch_jmp;
- /* Can't dispatch if events are masked on this PE */
- state = sdei_get_this_pe_state();
- if (state->pe_masked)
- return -1;
- /* Event 0 can't be dispatched */
- if (ev_num == SDEI_EVENT_0)
- return -1;
- /* Locate mapping corresponding to this event */
- map = find_event_map(ev_num);
- if (map == NULL)
- return -1;
- /* Only explicit events can be dispatched */
- if (!is_map_explicit(map))
- return -1;
- /* Examine state of dispatch stack */
- disp_ctx = get_outstanding_dispatch();
- if (disp_ctx != NULL) {
- /*
- * There's an outstanding dispatch. If the outstanding dispatch
- * is critical, no more dispatches are possible.
- */
- if (is_event_critical(disp_ctx->map))
- return -1;
- /*
- * If the outstanding dispatch is Normal, only critical events
- * can be dispatched.
- */
- if (is_event_normal(map))
- return -1;
- }
- se = get_event_entry(map);
- if (!can_sdei_state_trans(se, DO_DISPATCH))
- return -1;
- /*
- * Prepare for NS dispatch by restoring the Non-secure context and
- * marking that as active.
- */
- ns_ctx = restore_and_resume_ns_context();
- /* Activate the priority corresponding to the event being dispatched */
- ehf_activate_priority(sdei_event_priority(map));
- /* Dispatch event synchronously */
- setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
- begin_sdei_synchronous_dispatch(&dispatch_jmp);
- /*
- * We reach here when client completes the event.
- *
- * Deactivate the priority level that was activated at the time of
- * explicit dispatch.
- */
- ehf_deactivate_priority(sdei_event_priority(map));
- return 0;
- }
- static void end_sdei_synchronous_dispatch(jmp_buf *buffer)
- {
- longjmp(*buffer, 1);
- }
- int sdei_event_complete(bool resume, uint64_t pc)
- {
- sdei_dispatch_context_t *disp_ctx;
- sdei_entry_t *se;
- sdei_ev_map_t *map;
- cpu_context_t *ctx;
- sdei_action_t act;
- unsigned int client_el = sdei_client_el();
- /* Return error if called without an active event */
- disp_ctx = get_outstanding_dispatch();
- if (disp_ctx == NULL)
- return SDEI_EDENY;
- /* Validate resumption point */
- if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
- return SDEI_EDENY;
- map = disp_ctx->map;
- assert(map != NULL);
- se = get_event_entry(map);
- if (is_event_shared(map))
- sdei_map_lock(map);
- act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
- if (!can_sdei_state_trans(se, act)) {
- if (is_event_shared(map))
- sdei_map_unlock(map);
- return SDEI_EDENY;
- }
- if (is_event_shared(map))
- sdei_map_unlock(map);
- /* Having done sanity checks, pop dispatch */
- (void) pop_dispatch();
- SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
- map->ev_num, read_spsr_el3(), read_elr_el3());
- /*
- * Restore Non-secure to how it was originally interrupted. Once done,
- * it's up-to-date with the saved copy.
- */
- ctx = cm_get_context(NON_SECURE);
- restore_event_ctx(disp_ctx, ctx);
- if (resume) {
- /*
- * Complete-and-resume call. Prepare the Non-secure context
- * (currently active) for complete and resume.
- */
- cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
- MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
- /*
- * Make it look as if a synchronous exception were taken at the
- * supplied Non-secure resumption point. Populate SPSR and
- * ELR_ELx so that an ERET from there works as expected.
- *
- * The assumption is that the client, if necessary, would have
- * saved any live content in these registers before making this
- * call.
- */
- if (client_el == MODE_EL2) {
- write_elr_el2(disp_ctx->elr_el3);
- write_spsr_el2(disp_ctx->spsr_el3);
- } else {
- /* EL1 */
- write_elr_el1(disp_ctx->elr_el3);
- write_spsr_el1(disp_ctx->spsr_el3);
- }
- }
- /* End the outstanding dispatch */
- end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
- return 0;
- }
- int64_t sdei_event_context(void *handle, unsigned int param)
- {
- sdei_dispatch_context_t *disp_ctx;
- if (param >= SDEI_SAVED_GPREGS)
- return SDEI_EINVAL;
- /* Get outstanding dispatch on this CPU */
- disp_ctx = get_outstanding_dispatch();
- if (disp_ctx == NULL)
- return SDEI_EDENY;
- assert(disp_ctx->map != NULL);
- if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
- return SDEI_EDENY;
- /*
- * No locking is required for the Running status as this is the only CPU
- * which can complete the event
- */
- return (int64_t) disp_ctx->x[param];
- }
|