sdei_main.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. /*
  2. * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch_helpers.h>
  7. #include <assert.h>
  8. #include <inttypes.h>
  9. #include <stddef.h>
  10. #include <stdint.h>
  11. #include <string.h>
  12. #include <bl31/bl31.h>
  13. #include <bl31/ehf.h>
  14. #include <bl31/interrupt_mgmt.h>
  15. #include <common/bl_common.h>
  16. #include <common/debug.h>
  17. #include <common/runtime_svc.h>
  18. #include <context.h>
  19. #include <lib/cassert.h>
  20. #include <lib/el3_runtime/pubsub.h>
  21. #include <lib/utils.h>
  22. #include <plat/common/platform.h>
  23. #include <services/sdei.h>
  24. #include "sdei_private.h"
  25. #define MAJOR_VERSION 1ULL
  26. #define MINOR_VERSION 0ULL
  27. #define VENDOR_VERSION 0ULL
  28. #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
  29. ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor))
  30. #define LOWEST_INTR_PRIORITY 0xff
  31. CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
  32. sdei_critical_must_have_higher_priority);
  33. static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
  34. /* Initialise SDEI map entries */
  35. static void init_map(sdei_ev_map_t *map)
  36. {
  37. map->reg_count = 0;
  38. }
  39. /* Convert mapping to SDEI class */
  40. static sdei_class_t map_to_class(sdei_ev_map_t *map)
  41. {
  42. return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
  43. }
  44. /* Clear SDEI event entries except state */
  45. static void clear_event_entries(sdei_entry_t *se)
  46. {
  47. se->ep = 0;
  48. se->arg = 0;
  49. se->affinity = 0;
  50. se->reg_flags = 0;
  51. }
  52. /* Perform CPU-specific state initialisation */
  53. static void *sdei_cpu_on_init(const void *arg)
  54. {
  55. unsigned int i;
  56. sdei_ev_map_t *map;
  57. sdei_entry_t *se;
  58. /* Initialize private mappings on this CPU */
  59. for_each_private_map(i, map) {
  60. se = get_event_entry(map);
  61. clear_event_entries(se);
  62. se->state = 0;
  63. }
  64. SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
  65. /* All PEs start with SDEI events masked */
  66. (void) sdei_pe_mask();
  67. return NULL;
  68. }
  69. /* CPU initialisation after wakeup from suspend */
  70. static void *sdei_cpu_wakeup_init(const void *arg)
  71. {
  72. SDEI_LOG("Events masked on %lx\n", read_mpidr_el1());
  73. /* All PEs wake up with SDEI events masked */
  74. sdei_pe_mask();
  75. return 0;
  76. }
  77. /* Initialise an SDEI class */
  78. static void sdei_class_init(sdei_class_t class)
  79. {
  80. unsigned int i;
  81. bool zero_found __unused = false;
  82. int ev_num_so_far __unused;
  83. sdei_ev_map_t *map;
  84. /* Sanity check and configuration of shared events */
  85. ev_num_so_far = -1;
  86. for_each_shared_map(i, map) {
  87. #if ENABLE_ASSERTIONS
  88. /* Ensure mappings are sorted */
  89. assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
  90. ev_num_so_far = map->ev_num;
  91. /* Event 0 must not be shared */
  92. assert(map->ev_num != SDEI_EVENT_0);
  93. /* Check for valid event */
  94. assert(map->ev_num >= 0);
  95. /* Make sure it's a shared event */
  96. assert(is_event_shared(map));
  97. /* No shared mapping should have signalable property */
  98. assert(!is_event_signalable(map));
  99. /* Shared mappings can't be explicit */
  100. assert(!is_map_explicit(map));
  101. #endif
  102. /* Skip initializing the wrong priority */
  103. if (map_to_class(map) != class)
  104. continue;
  105. /* Platform events are always bound, so set the bound flag */
  106. if (is_map_dynamic(map)) {
  107. assert(map->intr == SDEI_DYN_IRQ);
  108. assert(is_event_normal(map));
  109. num_dyn_shrd_slots++;
  110. } else {
  111. /* Shared mappings must be bound to shared interrupt */
  112. assert(plat_ic_is_spi(map->intr) != 0);
  113. set_map_bound(map);
  114. }
  115. init_map(map);
  116. }
  117. /* Sanity check and configuration of private events for this CPU */
  118. ev_num_so_far = -1;
  119. for_each_private_map(i, map) {
  120. #if ENABLE_ASSERTIONS
  121. /* Ensure mappings are sorted */
  122. assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
  123. ev_num_so_far = map->ev_num;
  124. if (map->ev_num == SDEI_EVENT_0) {
  125. zero_found = true;
  126. /* Event 0 must be a Secure SGI */
  127. assert(is_secure_sgi(map->intr));
  128. /*
  129. * Event 0 can have only have signalable flag (apart
  130. * from being private
  131. */
  132. assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
  133. SDEI_MAPF_PRIVATE));
  134. } else {
  135. /* No other mapping should have signalable property */
  136. assert(!is_event_signalable(map));
  137. }
  138. /* Check for valid event */
  139. assert(map->ev_num >= 0);
  140. /* Make sure it's a private event */
  141. assert(is_event_private(map));
  142. /*
  143. * Other than priority, explicit events can only have explicit
  144. * and private flags set.
  145. */
  146. if (is_map_explicit(map)) {
  147. assert((map->map_flags | SDEI_MAPF_CRITICAL) ==
  148. (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE
  149. | SDEI_MAPF_CRITICAL));
  150. }
  151. #endif
  152. /* Skip initializing the wrong priority */
  153. if (map_to_class(map) != class)
  154. continue;
  155. /* Platform events are always bound, so set the bound flag */
  156. if (map->ev_num != SDEI_EVENT_0) {
  157. if (is_map_dynamic(map)) {
  158. assert(map->intr == SDEI_DYN_IRQ);
  159. assert(is_event_normal(map));
  160. num_dyn_priv_slots++;
  161. } else if (is_map_explicit(map)) {
  162. /*
  163. * Explicit mappings don't have a backing
  164. * SDEI interrupt, but verify that anyway.
  165. */
  166. assert(map->intr == SDEI_DYN_IRQ);
  167. } else {
  168. /*
  169. * Private mappings must be bound to private
  170. * interrupt.
  171. */
  172. assert(plat_ic_is_ppi((unsigned) map->intr) != 0);
  173. set_map_bound(map);
  174. }
  175. }
  176. init_map(map);
  177. }
  178. /* Ensure event 0 is in the mapping */
  179. assert(zero_found);
  180. (void) sdei_cpu_on_init(NULL);
  181. }
  182. /* SDEI dispatcher initialisation */
  183. void sdei_init(void)
  184. {
  185. plat_sdei_setup();
  186. sdei_class_init(SDEI_CRITICAL);
  187. sdei_class_init(SDEI_NORMAL);
  188. /* Register priority level handlers */
  189. ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
  190. sdei_intr_handler);
  191. ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
  192. sdei_intr_handler);
  193. }
  194. /* Populate SDEI event entry */
  195. static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
  196. unsigned int flags, uint64_t affinity)
  197. {
  198. assert(se != NULL);
  199. se->ep = ep;
  200. se->arg = arg;
  201. se->affinity = (affinity & MPIDR_AFFINITY_MASK);
  202. se->reg_flags = flags;
  203. }
  204. static uint64_t sdei_version(void)
  205. {
  206. return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
  207. }
  208. /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
  209. static int validate_flags(uint64_t flags, uint64_t mpidr)
  210. {
  211. /* Validate flags */
  212. switch (flags) {
  213. case SDEI_REGF_RM_PE:
  214. if (!is_valid_mpidr(mpidr))
  215. return SDEI_EINVAL;
  216. break;
  217. case SDEI_REGF_RM_ANY:
  218. break;
  219. default:
  220. /* Unknown flags */
  221. return SDEI_EINVAL;
  222. }
  223. return 0;
  224. }
  225. /* Set routing of an SDEI event */
  226. static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
  227. {
  228. int ret;
  229. unsigned int routing;
  230. sdei_ev_map_t *map;
  231. sdei_entry_t *se;
  232. ret = validate_flags(flags, mpidr);
  233. if (ret != 0)
  234. return ret;
  235. /* Check if valid event number */
  236. map = find_event_map(ev_num);
  237. if (map == NULL)
  238. return SDEI_EINVAL;
  239. /* The event must not be private */
  240. if (is_event_private(map))
  241. return SDEI_EINVAL;
  242. se = get_event_entry(map);
  243. sdei_map_lock(map);
  244. if (!is_map_bound(map) || is_event_private(map)) {
  245. ret = SDEI_EINVAL;
  246. goto finish;
  247. }
  248. if (!can_sdei_state_trans(se, DO_ROUTING)) {
  249. ret = SDEI_EDENY;
  250. goto finish;
  251. }
  252. /* Choose appropriate routing */
  253. routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
  254. INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
  255. /* Update event registration flag */
  256. se->reg_flags = (unsigned int) flags;
  257. if (flags == SDEI_REGF_RM_PE) {
  258. se->affinity = (mpidr & MPIDR_AFFINITY_MASK);
  259. }
  260. /*
  261. * ROUTING_SET is permissible only when event composite state is
  262. * 'registered, disabled, and not running'. This means that the
  263. * interrupt is currently disabled, and not active.
  264. */
  265. plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
  266. finish:
  267. sdei_map_unlock(map);
  268. return ret;
  269. }
  270. /* Register handler and argument for an SDEI event */
  271. static int64_t sdei_event_register(int ev_num,
  272. uint64_t ep,
  273. uint64_t arg,
  274. uint64_t flags,
  275. uint64_t mpidr)
  276. {
  277. int ret;
  278. unsigned int routing;
  279. sdei_entry_t *se;
  280. sdei_ev_map_t *map;
  281. sdei_state_t backup_state;
  282. if ((ep == 0U) || (plat_sdei_validate_entry_point(
  283. ep, sdei_client_el()) != 0)) {
  284. return SDEI_EINVAL;
  285. }
  286. ret = validate_flags(flags, mpidr);
  287. if (ret != 0)
  288. return ret;
  289. /* Check if valid event number */
  290. map = find_event_map(ev_num);
  291. if (map == NULL)
  292. return SDEI_EINVAL;
  293. /* Private events always target the PE */
  294. if (is_event_private(map)) {
  295. /*
  296. * SDEI internally handles private events in the same manner
  297. * as public events with routing mode=RM_PE, since the routing
  298. * mode flag and affinity fields are not used when registering
  299. * a private event, set them here.
  300. */
  301. flags = SDEI_REGF_RM_PE;
  302. /*
  303. * Kernel may pass 0 as mpidr, as we set flags to
  304. * SDEI_REGF_RM_PE, so set mpidr also.
  305. */
  306. mpidr = read_mpidr_el1();
  307. }
  308. se = get_event_entry(map);
  309. /*
  310. * Even though register operation is per-event (additionally for private
  311. * events, registration is required individually), it has to be
  312. * serialised with respect to bind/release, which are global operations.
  313. * So we hold the lock throughout, unconditionally.
  314. */
  315. sdei_map_lock(map);
  316. backup_state = se->state;
  317. if (!can_sdei_state_trans(se, DO_REGISTER))
  318. goto fallback;
  319. /*
  320. * When registering for dynamic events, make sure it's been bound
  321. * already. This has to be the case as, without binding, the client
  322. * can't know about the event number to register for.
  323. */
  324. if (is_map_dynamic(map) && !is_map_bound(map))
  325. goto fallback;
  326. if (is_event_private(map)) {
  327. /* Multiple calls to register are possible for private events */
  328. assert(map->reg_count >= 0);
  329. } else {
  330. /* Only single call to register is possible for shared events */
  331. assert(map->reg_count == 0);
  332. }
  333. if (is_map_bound(map)) {
  334. /* Meanwhile, did any PE ACK the interrupt? */
  335. if (plat_ic_get_interrupt_active(map->intr) != 0U)
  336. goto fallback;
  337. /* The interrupt must currently owned by Non-secure */
  338. if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
  339. goto fallback;
  340. /*
  341. * Disable forwarding of new interrupt triggers to CPU
  342. * interface.
  343. */
  344. plat_ic_disable_interrupt(map->intr);
  345. /*
  346. * Any events that are triggered after register and before
  347. * enable should remain pending. Clear any previous interrupt
  348. * triggers which are pending (except for SGIs). This has no
  349. * affect on level-triggered interrupts.
  350. */
  351. if (ev_num != SDEI_EVENT_0)
  352. plat_ic_clear_interrupt_pending(map->intr);
  353. /* Map interrupt to EL3 and program the correct priority */
  354. plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
  355. /* Program the appropriate interrupt priority */
  356. plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
  357. /*
  358. * Set the routing mode for shared event as requested. We
  359. * already ensure that shared events get bound to SPIs.
  360. */
  361. if (is_event_shared(map)) {
  362. routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
  363. INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
  364. plat_ic_set_spi_routing(map->intr, routing,
  365. (u_register_t) mpidr);
  366. }
  367. }
  368. /* Populate event entries */
  369. set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr);
  370. /* Increment register count */
  371. map->reg_count++;
  372. sdei_map_unlock(map);
  373. return 0;
  374. fallback:
  375. /* Reinstate previous state */
  376. se->state = backup_state;
  377. sdei_map_unlock(map);
  378. return SDEI_EDENY;
  379. }
  380. /* Enable SDEI event */
  381. static int64_t sdei_event_enable(int ev_num)
  382. {
  383. sdei_ev_map_t *map;
  384. sdei_entry_t *se;
  385. int ret;
  386. bool before, after;
  387. /* Check if valid event number */
  388. map = find_event_map(ev_num);
  389. if (map == NULL)
  390. return SDEI_EINVAL;
  391. se = get_event_entry(map);
  392. ret = SDEI_EDENY;
  393. if (is_event_shared(map))
  394. sdei_map_lock(map);
  395. before = GET_EV_STATE(se, ENABLED);
  396. if (!can_sdei_state_trans(se, DO_ENABLE))
  397. goto finish;
  398. after = GET_EV_STATE(se, ENABLED);
  399. /*
  400. * Enable interrupt for bound events only if there's a change in enabled
  401. * state.
  402. */
  403. if (is_map_bound(map) && (!before && after))
  404. plat_ic_enable_interrupt(map->intr);
  405. ret = 0;
  406. finish:
  407. if (is_event_shared(map))
  408. sdei_map_unlock(map);
  409. return ret;
  410. }
  411. /* Disable SDEI event */
  412. static int sdei_event_disable(int ev_num)
  413. {
  414. sdei_ev_map_t *map;
  415. sdei_entry_t *se;
  416. int ret;
  417. bool before, after;
  418. /* Check if valid event number */
  419. map = find_event_map(ev_num);
  420. if (map == NULL)
  421. return SDEI_EINVAL;
  422. se = get_event_entry(map);
  423. ret = SDEI_EDENY;
  424. if (is_event_shared(map))
  425. sdei_map_lock(map);
  426. before = GET_EV_STATE(se, ENABLED);
  427. if (!can_sdei_state_trans(se, DO_DISABLE))
  428. goto finish;
  429. after = GET_EV_STATE(se, ENABLED);
  430. /*
  431. * Disable interrupt for bound events only if there's a change in
  432. * enabled state.
  433. */
  434. if (is_map_bound(map) && (before && !after))
  435. plat_ic_disable_interrupt(map->intr);
  436. ret = 0;
  437. finish:
  438. if (is_event_shared(map))
  439. sdei_map_unlock(map);
  440. return ret;
  441. }
  442. /* Query SDEI event information */
  443. static int64_t sdei_event_get_info(int ev_num, int info)
  444. {
  445. sdei_entry_t *se;
  446. sdei_ev_map_t *map;
  447. uint64_t flags;
  448. bool registered;
  449. uint64_t affinity;
  450. /* Check if valid event number */
  451. map = find_event_map(ev_num);
  452. if (map == NULL)
  453. return SDEI_EINVAL;
  454. se = get_event_entry(map);
  455. if (is_event_shared(map))
  456. sdei_map_lock(map);
  457. /* Sample state under lock */
  458. registered = GET_EV_STATE(se, REGISTERED);
  459. flags = se->reg_flags;
  460. affinity = se->affinity;
  461. if (is_event_shared(map))
  462. sdei_map_unlock(map);
  463. switch (info) {
  464. case SDEI_INFO_EV_TYPE:
  465. return is_event_shared(map);
  466. case SDEI_INFO_EV_NOT_SIGNALED:
  467. return !is_event_signalable(map);
  468. case SDEI_INFO_EV_PRIORITY:
  469. return is_event_critical(map);
  470. case SDEI_INFO_EV_ROUTING_MODE:
  471. if (!is_event_shared(map))
  472. return SDEI_EINVAL;
  473. if (!registered)
  474. return SDEI_EDENY;
  475. return (flags == SDEI_REGF_RM_PE);
  476. case SDEI_INFO_EV_ROUTING_AFF:
  477. if (!is_event_shared(map))
  478. return SDEI_EINVAL;
  479. if (!registered)
  480. return SDEI_EDENY;
  481. if (flags != SDEI_REGF_RM_PE)
  482. return SDEI_EINVAL;
  483. return affinity;
  484. default:
  485. return SDEI_EINVAL;
  486. }
  487. }
  488. /* Unregister an SDEI event */
  489. static int sdei_event_unregister(int ev_num)
  490. {
  491. int ret = 0;
  492. sdei_entry_t *se;
  493. sdei_ev_map_t *map;
  494. /* Check if valid event number */
  495. map = find_event_map(ev_num);
  496. if (map == NULL)
  497. return SDEI_EINVAL;
  498. se = get_event_entry(map);
  499. /*
  500. * Even though unregister operation is per-event (additionally for
  501. * private events, unregistration is required individually), it has to
  502. * be serialised with respect to bind/release, which are global
  503. * operations. So we hold the lock throughout, unconditionally.
  504. */
  505. sdei_map_lock(map);
  506. if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
  507. /*
  508. * Even if the call is invalid, and the handler is running (for
  509. * example, having unregistered from a running handler earlier),
  510. * return pending error code; otherwise, return deny.
  511. */
  512. ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
  513. goto finish;
  514. }
  515. map->reg_count--;
  516. if (is_event_private(map)) {
  517. /* Multiple calls to register are possible for private events */
  518. assert(map->reg_count >= 0);
  519. } else {
  520. /* Only single call to register is possible for shared events */
  521. assert(map->reg_count == 0);
  522. }
  523. if (is_map_bound(map)) {
  524. plat_ic_disable_interrupt(map->intr);
  525. /*
  526. * Clear pending interrupt. Skip for SGIs as they may not be
  527. * cleared on interrupt controllers.
  528. */
  529. if (ev_num != SDEI_EVENT_0)
  530. plat_ic_clear_interrupt_pending(map->intr);
  531. assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
  532. plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
  533. plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
  534. }
  535. clear_event_entries(se);
  536. /*
  537. * If the handler is running at the time of unregister, return the
  538. * pending error code.
  539. */
  540. if (GET_EV_STATE(se, RUNNING))
  541. ret = SDEI_EPEND;
  542. finish:
  543. sdei_map_unlock(map);
  544. return ret;
  545. }
  546. /* Query status of an SDEI event */
  547. static int sdei_event_status(int ev_num)
  548. {
  549. sdei_ev_map_t *map;
  550. sdei_entry_t *se;
  551. sdei_state_t state;
  552. /* Check if valid event number */
  553. map = find_event_map(ev_num);
  554. if (map == NULL)
  555. return SDEI_EINVAL;
  556. se = get_event_entry(map);
  557. if (is_event_shared(map))
  558. sdei_map_lock(map);
  559. /* State value directly maps to the expected return format */
  560. state = se->state;
  561. if (is_event_shared(map))
  562. sdei_map_unlock(map);
  563. return (int) state;
  564. }
  565. /* Bind an SDEI event to an interrupt */
  566. static int sdei_interrupt_bind(unsigned int intr_num)
  567. {
  568. sdei_ev_map_t *map;
  569. bool retry = true, shared_mapping;
  570. /* Interrupt must be either PPI or SPI */
  571. if (!(plat_ic_is_ppi(intr_num) || plat_ic_is_spi(intr_num)))
  572. return SDEI_EINVAL;
  573. shared_mapping = (plat_ic_is_spi(intr_num) != 0);
  574. do {
  575. /*
  576. * Bail out if there is already an event for this interrupt,
  577. * either platform-defined or dynamic.
  578. */
  579. map = find_event_map_by_intr(intr_num, shared_mapping);
  580. if (map != NULL) {
  581. if (is_map_dynamic(map)) {
  582. if (is_map_bound(map)) {
  583. /*
  584. * Dynamic event, already bound. Return
  585. * event number.
  586. */
  587. return map->ev_num;
  588. }
  589. } else {
  590. /* Binding non-dynamic event */
  591. return SDEI_EINVAL;
  592. }
  593. }
  594. /*
  595. * The interrupt is not bound yet. Try to find a free slot to
  596. * bind it. Free dynamic mappings have their interrupt set as
  597. * SDEI_DYN_IRQ.
  598. */
  599. map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
  600. if (map == NULL)
  601. return SDEI_ENOMEM;
  602. /* The returned mapping must be dynamic */
  603. if (!is_map_dynamic(map)) {
  604. return SDEI_ENOMEM;
  605. }
  606. /*
  607. * We cannot assert for bound maps here, as we might be racing
  608. * with another bind.
  609. */
  610. /* The requested interrupt must already belong to NS */
  611. if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
  612. return SDEI_EDENY;
  613. /*
  614. * Interrupt programming and ownership transfer are deferred
  615. * until register.
  616. */
  617. sdei_map_lock(map);
  618. if (!is_map_bound(map)) {
  619. map->intr = intr_num;
  620. set_map_bound(map);
  621. retry = false;
  622. }
  623. sdei_map_unlock(map);
  624. } while (retry);
  625. return map->ev_num;
  626. }
  627. /* Release a bound SDEI event previously to an interrupt */
  628. static int sdei_interrupt_release(int ev_num)
  629. {
  630. int ret = 0;
  631. sdei_ev_map_t *map;
  632. sdei_entry_t *se;
  633. /* Check if valid event number */
  634. map = find_event_map(ev_num);
  635. if (map == NULL)
  636. return SDEI_EINVAL;
  637. if (!is_map_dynamic(map))
  638. return SDEI_EINVAL;
  639. se = get_event_entry(map);
  640. sdei_map_lock(map);
  641. /* Event must have been unregistered before release */
  642. if (map->reg_count != 0) {
  643. ret = SDEI_EDENY;
  644. goto finish;
  645. }
  646. /*
  647. * Interrupt release never causes the state to change. We only check
  648. * whether it's permissible or not.
  649. */
  650. if (!can_sdei_state_trans(se, DO_RELEASE)) {
  651. ret = SDEI_EDENY;
  652. goto finish;
  653. }
  654. if (is_map_bound(map)) {
  655. /*
  656. * Deny release if the interrupt is active, which means it's
  657. * probably being acknowledged and handled elsewhere.
  658. */
  659. if (plat_ic_get_interrupt_active(map->intr) != 0U) {
  660. ret = SDEI_EDENY;
  661. goto finish;
  662. }
  663. /*
  664. * Interrupt programming and ownership transfer are already done
  665. * during unregister.
  666. */
  667. map->intr = SDEI_DYN_IRQ;
  668. clr_map_bound(map);
  669. } else {
  670. SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
  671. map->reg_count);
  672. ret = SDEI_EINVAL;
  673. }
  674. finish:
  675. sdei_map_unlock(map);
  676. return ret;
  677. }
  678. /* Perform reset of private SDEI events */
  679. static int sdei_private_reset(void)
  680. {
  681. sdei_ev_map_t *map;
  682. int ret = 0, final_ret = 0;
  683. unsigned int i;
  684. /* Unregister all private events */
  685. for_each_private_map(i, map) {
  686. /*
  687. * The unregister can fail if the event is not registered, which
  688. * is allowed, and a deny will be returned. But if the event is
  689. * running or unregister pending, the call fails.
  690. */
  691. ret = sdei_event_unregister(map->ev_num);
  692. if ((ret == SDEI_EPEND) && (final_ret == 0))
  693. final_ret = SDEI_EDENY;
  694. }
  695. return final_ret;
  696. }
  697. /* Perform reset of shared SDEI events */
  698. static int sdei_shared_reset(void)
  699. {
  700. const sdei_mapping_t *mapping;
  701. sdei_ev_map_t *map;
  702. int ret = 0, final_ret = 0;
  703. unsigned int i, j;
  704. /* Unregister all shared events */
  705. for_each_shared_map(i, map) {
  706. /*
  707. * The unregister can fail if the event is not registered, which
  708. * is allowed, and a deny will be returned. But if the event is
  709. * running or unregister pending, the call fails.
  710. */
  711. ret = sdei_event_unregister(map->ev_num);
  712. if ((ret == SDEI_EPEND) && (final_ret == 0))
  713. final_ret = SDEI_EDENY;
  714. }
  715. if (final_ret != 0)
  716. return final_ret;
  717. /*
  718. * Loop through both private and shared mappings, and release all
  719. * bindings.
  720. */
  721. for_each_mapping_type(i, mapping) {
  722. iterate_mapping(mapping, j, map) {
  723. /*
  724. * Release bindings for mappings that are dynamic and
  725. * bound.
  726. */
  727. if (is_map_dynamic(map) && is_map_bound(map)) {
  728. /*
  729. * Any failure to release would mean there is at
  730. * least a PE registered for the event.
  731. */
  732. ret = sdei_interrupt_release(map->ev_num);
  733. if ((ret != 0) && (final_ret == 0))
  734. final_ret = ret;
  735. }
  736. }
  737. }
  738. return final_ret;
  739. }
  740. /* Send a signal to another SDEI client PE */
  741. static int sdei_signal(int ev_num, uint64_t target_pe)
  742. {
  743. sdei_ev_map_t *map;
  744. /* Only event 0 can be signalled */
  745. if (ev_num != SDEI_EVENT_0)
  746. return SDEI_EINVAL;
  747. /* Find mapping for event 0 */
  748. map = find_event_map(SDEI_EVENT_0);
  749. if (map == NULL)
  750. return SDEI_EINVAL;
  751. /* The event must be signalable */
  752. if (!is_event_signalable(map))
  753. return SDEI_EINVAL;
  754. /* Validate target */
  755. if (!is_valid_mpidr(target_pe))
  756. return SDEI_EINVAL;
  757. /* Raise SGI. Platform will validate target_pe */
  758. plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe);
  759. return 0;
  760. }
  761. /* Query SDEI dispatcher features */
  762. static uint64_t sdei_features(unsigned int feature)
  763. {
  764. if (feature == SDEI_FEATURE_BIND_SLOTS) {
  765. return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
  766. num_dyn_shrd_slots);
  767. }
  768. return (uint64_t) SDEI_EINVAL;
  769. }
  770. /* SDEI top level handler for servicing SMCs */
  771. uint64_t sdei_smc_handler(uint32_t smc_fid,
  772. uint64_t x1,
  773. uint64_t x2,
  774. uint64_t x3,
  775. uint64_t x4,
  776. void *cookie,
  777. void *handle,
  778. uint64_t flags)
  779. {
  780. uint64_t x5;
  781. unsigned int ss = (unsigned int) get_interrupt_src_ss(flags);
  782. int64_t ret;
  783. bool resume = false;
  784. cpu_context_t *ctx = handle;
  785. int ev_num = (int) x1;
  786. if (ss != NON_SECURE)
  787. SMC_RET1(ctx, SMC_UNK);
  788. /* Verify the caller EL */
  789. if (GET_EL(read_spsr_el3()) != sdei_client_el())
  790. SMC_RET1(ctx, SMC_UNK);
  791. switch (smc_fid) {
  792. case SDEI_VERSION:
  793. SDEI_LOG("> VER\n");
  794. ret = (int64_t) sdei_version();
  795. SDEI_LOG("< VER:%" PRIx64 "\n", ret);
  796. SMC_RET1(ctx, ret);
  797. case SDEI_EVENT_REGISTER:
  798. x5 = SMC_GET_GP(ctx, CTX_GPREG_X5);
  799. SDEI_LOG("> REG(n:%d e:%" PRIx64 " a:%" PRIx64 " f:%x m:%" PRIx64 "\n", ev_num,
  800. x2, x3, (int) x4, x5);
  801. ret = sdei_event_register(ev_num, x2, x3, x4, x5);
  802. SDEI_LOG("< REG:%" PRId64 "\n", ret);
  803. SMC_RET1(ctx, ret);
  804. case SDEI_EVENT_ENABLE:
  805. SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
  806. ret = sdei_event_enable(ev_num);
  807. SDEI_LOG("< ENABLE:%" PRId64 "\n", ret);
  808. SMC_RET1(ctx, ret);
  809. case SDEI_EVENT_DISABLE:
  810. SDEI_LOG("> DISABLE(n:0x%x)\n", ev_num);
  811. ret = sdei_event_disable(ev_num);
  812. SDEI_LOG("< DISABLE:%" PRId64 "\n", ret);
  813. SMC_RET1(ctx, ret);
  814. case SDEI_EVENT_CONTEXT:
  815. SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
  816. ret = sdei_event_context(ctx, (unsigned int) x1);
  817. SDEI_LOG("< CTX:%" PRId64 "\n", ret);
  818. SMC_RET1(ctx, ret);
  819. case SDEI_EVENT_COMPLETE_AND_RESUME:
  820. resume = true;
  821. /* Fallthrough */
  822. case SDEI_EVENT_COMPLETE:
  823. SDEI_LOG("> COMPLETE(r:%u sta/ep:%" PRIx64 "):%lx\n",
  824. (unsigned int) resume, x1, read_mpidr_el1());
  825. ret = sdei_event_complete(resume, x1);
  826. SDEI_LOG("< COMPLETE:%" PRIx64 "\n", ret);
  827. /*
  828. * Set error code only if the call failed. If the call
  829. * succeeded, we discard the dispatched context, and restore the
  830. * interrupted context to a pristine condition, and therefore
  831. * shouldn't be modified. We don't return to the caller in this
  832. * case anyway.
  833. */
  834. if (ret != 0)
  835. SMC_RET1(ctx, ret);
  836. SMC_RET0(ctx);
  837. case SDEI_EVENT_STATUS:
  838. SDEI_LOG("> STAT(n:0x%x)\n", ev_num);
  839. ret = sdei_event_status(ev_num);
  840. SDEI_LOG("< STAT:%" PRId64 "\n", ret);
  841. SMC_RET1(ctx, ret);
  842. case SDEI_EVENT_GET_INFO:
  843. SDEI_LOG("> INFO(n:0x%x, %d)\n", ev_num, (int) x2);
  844. ret = sdei_event_get_info(ev_num, (int) x2);
  845. SDEI_LOG("< INFO:%" PRId64 "\n", ret);
  846. SMC_RET1(ctx, ret);
  847. case SDEI_EVENT_UNREGISTER:
  848. SDEI_LOG("> UNREG(n:0x%x)\n", ev_num);
  849. ret = sdei_event_unregister(ev_num);
  850. SDEI_LOG("< UNREG:%" PRId64 "\n", ret);
  851. SMC_RET1(ctx, ret);
  852. case SDEI_PE_UNMASK:
  853. SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
  854. sdei_pe_unmask();
  855. SDEI_LOG("< UNMASK:%d\n", 0);
  856. SMC_RET1(ctx, 0);
  857. case SDEI_PE_MASK:
  858. SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
  859. ret = sdei_pe_mask();
  860. SDEI_LOG("< MASK:%" PRId64 "\n", ret);
  861. SMC_RET1(ctx, ret);
  862. case SDEI_INTERRUPT_BIND:
  863. SDEI_LOG("> BIND(%d)\n", (int) x1);
  864. ret = sdei_interrupt_bind((unsigned int) x1);
  865. SDEI_LOG("< BIND:%" PRId64 "\n", ret);
  866. SMC_RET1(ctx, ret);
  867. case SDEI_INTERRUPT_RELEASE:
  868. SDEI_LOG("> REL(0x%x)\n", ev_num);
  869. ret = sdei_interrupt_release(ev_num);
  870. SDEI_LOG("< REL:%" PRId64 "\n", ret);
  871. SMC_RET1(ctx, ret);
  872. case SDEI_SHARED_RESET:
  873. SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
  874. ret = sdei_shared_reset();
  875. SDEI_LOG("< S_RESET:%" PRId64 "\n", ret);
  876. SMC_RET1(ctx, ret);
  877. case SDEI_PRIVATE_RESET:
  878. SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
  879. ret = sdei_private_reset();
  880. SDEI_LOG("< P_RESET:%" PRId64 "\n", ret);
  881. SMC_RET1(ctx, ret);
  882. case SDEI_EVENT_ROUTING_SET:
  883. SDEI_LOG("> ROUTE_SET(n:%d f:%" PRIx64 " aff:%" PRIx64 ")\n", ev_num, x2, x3);
  884. ret = sdei_event_routing_set(ev_num, x2, x3);
  885. SDEI_LOG("< ROUTE_SET:%" PRId64 "\n", ret);
  886. SMC_RET1(ctx, ret);
  887. case SDEI_FEATURES:
  888. SDEI_LOG("> FTRS(f:%" PRIx64 ")\n", x1);
  889. ret = (int64_t) sdei_features((unsigned int) x1);
  890. SDEI_LOG("< FTRS:%" PRIx64 "\n", ret);
  891. SMC_RET1(ctx, ret);
  892. case SDEI_EVENT_SIGNAL:
  893. SDEI_LOG("> SIGNAL(e:%d t:%" PRIx64 ")\n", ev_num, x2);
  894. ret = sdei_signal(ev_num, x2);
  895. SDEI_LOG("< SIGNAL:%" PRId64 "\n", ret);
  896. SMC_RET1(ctx, ret);
  897. default:
  898. /* Do nothing in default case */
  899. break;
  900. }
  901. WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
  902. SMC_RET1(ctx, SMC_UNK);
  903. }
  904. /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
  905. SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
  906. /* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */
  907. SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init);