tspd_main.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * Copyright (c) 2013-2024, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. /*******************************************************************************
  7. * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
  8. * plug-in component to the Secure Monitor, registered as a runtime service. The
  9. * SPD is expected to be a functional extension of the Secure Payload (SP) that
  10. * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
  11. * the Trusted OS/Applications range to the dispatcher. The SPD will either
  12. * handle the request locally or delegate it to the Secure Payload. It is also
  13. * responsible for initialising and maintaining communication with the SP.
  14. ******************************************************************************/
  15. #include <assert.h>
  16. #include <errno.h>
  17. #include <stddef.h>
  18. #include <string.h>
  19. #include <arch_helpers.h>
  20. #include <bl31/bl31.h>
  21. #include <bl31/ehf.h>
  22. #include <bl32/tsp/tsp.h>
  23. #include <common/bl_common.h>
  24. #include <common/debug.h>
  25. #include <common/runtime_svc.h>
  26. #include <lib/el3_runtime/context_mgmt.h>
  27. #include <plat/common/platform.h>
  28. #include <tools_share/uuid.h>
  29. #include "tspd_private.h"
  30. /*******************************************************************************
  31. * Address of the entrypoint vector table in the Secure Payload. It is
  32. * initialised once on the primary core after a cold boot.
  33. ******************************************************************************/
  34. tsp_vectors_t *tsp_vectors;
  35. /*******************************************************************************
  36. * Array to keep track of per-cpu Secure Payload state
  37. ******************************************************************************/
  38. tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
  39. /* TSP UID */
  40. DEFINE_SVC_UUID2(tsp_uuid,
  41. 0xa056305b, 0x9132, 0x7b42, 0x98, 0x11,
  42. 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa);
  43. int32_t tspd_init(void);
  44. /*
  45. * This helper function handles Secure EL1 preemption. The preemption could be
  46. * due Non Secure interrupts or EL3 interrupts. In both the cases we context
  47. * switch to the normal world and in case of EL3 interrupts, it will again be
  48. * routed to EL3 which will get handled at the exception vectors.
  49. */
  50. uint64_t tspd_handle_sp_preemption(void *handle)
  51. {
  52. cpu_context_t *ns_cpu_context;
  53. assert(handle == cm_get_context(SECURE));
  54. cm_el1_sysregs_context_save(SECURE);
  55. /* Get a reference to the non-secure context */
  56. ns_cpu_context = cm_get_context(NON_SECURE);
  57. assert(ns_cpu_context);
  58. /*
  59. * To allow Secure EL1 interrupt handler to re-enter TSP while TSP
  60. * is preempted, the secure system register context which will get
  61. * overwritten must be additionally saved. This is currently done
  62. * by the TSPD S-EL1 interrupt handler.
  63. */
  64. /*
  65. * Restore non-secure state.
  66. */
  67. cm_el1_sysregs_context_restore(NON_SECURE);
  68. cm_set_next_eret_context(NON_SECURE);
  69. /*
  70. * The TSP was preempted during execution of a Yielding SMC Call.
  71. * Return back to the normal world with SMC_PREEMPTED as error
  72. * code in x0.
  73. */
  74. SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
  75. }
  76. /*******************************************************************************
  77. * This function is the handler registered for S-EL1 interrupts by the TSPD. It
  78. * validates the interrupt and upon success arranges entry into the TSP at
  79. * 'tsp_sel1_intr_entry()' for handling the interrupt.
  80. * Typically, interrupts for a specific security state get handled in the same
  81. * security execption level if the execution is in the same security state. For
  82. * example, if a non-secure interrupt gets fired when CPU is executing in NS-EL2
  83. * it gets handled in the non-secure world.
  84. * However, interrupts belonging to the opposite security state typically demand
  85. * a world(context) switch. This is inline with the security principle which
  86. * states a secure interrupt has to be handled in the secure world.
  87. * Hence, the TSPD in EL3 expects the context(handle) for a secure interrupt to
  88. * be non-secure and vice versa.
  89. * However, a race condition between non-secure and secure interrupts can lead to
  90. * a scenario where the above assumptions do not hold true. This is demonstrated
  91. * below through Note 1.
  92. ******************************************************************************/
  93. static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
  94. uint32_t flags,
  95. void *handle,
  96. void *cookie)
  97. {
  98. uint32_t linear_id;
  99. tsp_context_t *tsp_ctx;
  100. /* Get a reference to this cpu's TSP context */
  101. linear_id = plat_my_core_pos();
  102. tsp_ctx = &tspd_sp_context[linear_id];
  103. #if TSP_NS_INTR_ASYNC_PREEMPT
  104. /*
  105. * Note 1:
  106. * Under the current interrupt routing model, interrupts from other
  107. * world are routed to EL3 when TSP_NS_INTR_ASYNC_PREEMPT is enabled.
  108. * Consider the following scenario:
  109. * 1/ A non-secure payload(like tftf) requests a secure service from
  110. * TSP by invoking a yielding SMC call.
  111. * 2/ Later, execution jumps to TSP in S-EL1 with the help of TSP
  112. * Dispatcher in Secure Monitor(EL3).
  113. * 3/ While CPU is executing TSP, a Non-secure interrupt gets fired.
  114. * this demands a context switch to the non-secure world through
  115. * secure monitor.
  116. * 4/ Consequently, TSP in S-EL1 get asynchronously pre-empted and
  117. * execution switches to secure monitor(EL3).
  118. * 5/ EL3 tries to triage the (Non-secure) interrupt based on the
  119. * highest pending interrupt.
  120. * 6/ However, while the NS Interrupt was pending, secure timer gets
  121. * fired which makes a S-EL1 interrupt to be pending.
  122. * 7/ Hence, execution jumps to this companion handler of S-EL1
  123. * interrupt (i.e., tspd_sel1_interrupt_handler) even though the TSP
  124. * was pre-empted due to non-secure interrupt.
  125. * 8/ The above sequence of events explain how TSP was pre-empted by
  126. * S-EL1 interrupt indirectly in an asynchronous way.
  127. * 9/ Hence, we track the TSP pre-emption by S-EL1 interrupt using a
  128. * boolean variable per each core.
  129. * 10/ This helps us to indicate that SMC call for TSP service was
  130. * pre-empted when execution resumes in non-secure world.
  131. */
  132. /* Check the security state when the exception was generated */
  133. if (get_interrupt_src_ss(flags) == NON_SECURE) {
  134. /* Sanity check the pointer to this cpu's context */
  135. assert(handle == cm_get_context(NON_SECURE));
  136. /* Save the non-secure context before entering the TSP */
  137. cm_el1_sysregs_context_save(NON_SECURE);
  138. tsp_ctx->preempted_by_sel1_intr = false;
  139. } else {
  140. /* Sanity check the pointer to this cpu's context */
  141. assert(handle == cm_get_context(SECURE));
  142. /* Save the secure context before entering the TSP for S-EL1
  143. * interrupt handling
  144. */
  145. cm_el1_sysregs_context_save(SECURE);
  146. tsp_ctx->preempted_by_sel1_intr = true;
  147. }
  148. #else
  149. /* Check the security state when the exception was generated */
  150. assert(get_interrupt_src_ss(flags) == NON_SECURE);
  151. /* Sanity check the pointer to this cpu's context */
  152. assert(handle == cm_get_context(NON_SECURE));
  153. /* Save the non-secure context before entering the TSP */
  154. cm_el1_sysregs_context_save(NON_SECURE);
  155. #endif
  156. assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
  157. /*
  158. * Determine if the TSP was previously preempted. Its last known
  159. * context has to be preserved in this case.
  160. * The TSP should return control to the TSPD after handling this
  161. * S-EL1 interrupt. Preserve essential EL3 context to allow entry into
  162. * the TSP at the S-EL1 interrupt entry point using the 'cpu_context'
  163. * structure. There is no need to save the secure system register
  164. * context since the TSP is supposed to preserve it during S-EL1
  165. * interrupt handling.
  166. */
  167. if (get_yield_smc_active_flag(tsp_ctx->state)) {
  168. tsp_ctx->saved_spsr_el3 = (uint32_t)SMC_GET_EL3(&tsp_ctx->cpu_ctx,
  169. CTX_SPSR_EL3);
  170. tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
  171. CTX_ELR_EL3);
  172. #if TSP_NS_INTR_ASYNC_PREEMPT
  173. memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
  174. #endif
  175. }
  176. cm_el1_sysregs_context_restore(SECURE);
  177. cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry,
  178. SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
  179. cm_set_next_eret_context(SECURE);
  180. /*
  181. * Tell the TSP that it has to handle a S-EL1 interrupt synchronously.
  182. * Also the instruction in normal world where the interrupt was
  183. * generated is passed for debugging purposes. It is safe to retrieve
  184. * this address from ELR_EL3 as the secure context will not take effect
  185. * until el3_exit().
  186. */
  187. SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3());
  188. }
  189. #if TSP_NS_INTR_ASYNC_PREEMPT
  190. /*******************************************************************************
  191. * This function is the handler registered for Non secure interrupts by the
  192. * TSPD. It validates the interrupt and upon success arranges entry into the
  193. * normal world for handling the interrupt.
  194. ******************************************************************************/
  195. static uint64_t tspd_ns_interrupt_handler(uint32_t id,
  196. uint32_t flags,
  197. void *handle,
  198. void *cookie)
  199. {
  200. /* Check the security state when the exception was generated */
  201. assert(get_interrupt_src_ss(flags) == SECURE);
  202. /*
  203. * Disable the routing of NS interrupts from secure world to EL3 while
  204. * interrupted on this core.
  205. */
  206. disable_intr_rm_local(INTR_TYPE_NS, SECURE);
  207. return tspd_handle_sp_preemption(handle);
  208. }
  209. #endif
  210. /*******************************************************************************
  211. * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
  212. * (aarch32/aarch64) if not already known and initialises the context for entry
  213. * into the SP for its initialisation.
  214. ******************************************************************************/
  215. static int32_t tspd_setup(void)
  216. {
  217. entry_point_info_t *tsp_ep_info;
  218. uint32_t linear_id;
  219. linear_id = plat_my_core_pos();
  220. /*
  221. * Get information about the Secure Payload (BL32) image. Its
  222. * absence is a critical failure. TODO: Add support to
  223. * conditionally include the SPD service
  224. */
  225. tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
  226. if (!tsp_ep_info) {
  227. WARN("No TSP provided by BL2 boot loader, Booting device"
  228. " without TSP initialization. SMC`s destined for TSP"
  229. " will return SMC_UNK\n");
  230. return 1;
  231. }
  232. /*
  233. * If there's no valid entry point for SP, we return a non-zero value
  234. * signalling failure initializing the service. We bail out without
  235. * registering any handlers
  236. */
  237. if (!tsp_ep_info->pc)
  238. return 1;
  239. /*
  240. * We could inspect the SP image and determine its execution
  241. * state i.e whether AArch32 or AArch64. Assuming it's AArch64
  242. * for the time being.
  243. */
  244. tspd_init_tsp_ep_state(tsp_ep_info,
  245. TSP_AARCH64,
  246. tsp_ep_info->pc,
  247. &tspd_sp_context[linear_id]);
  248. #if TSP_INIT_ASYNC
  249. bl31_set_next_image_type(SECURE);
  250. #else
  251. /*
  252. * All TSPD initialization done. Now register our init function with
  253. * BL31 for deferred invocation
  254. */
  255. bl31_register_bl32_init(&tspd_init);
  256. #endif
  257. return 0;
  258. }
  259. /*******************************************************************************
  260. * This function passes control to the Secure Payload image (BL32) for the first
  261. * time on the primary cpu after a cold boot. It assumes that a valid secure
  262. * context has already been created by tspd_setup() which can be directly used.
  263. * It also assumes that a valid non-secure context has been initialised by PSCI
  264. * so it does not need to save and restore any non-secure state. This function
  265. * performs a synchronous entry into the Secure payload. The SP passes control
  266. * back to this routine through a SMC.
  267. ******************************************************************************/
  268. int32_t tspd_init(void)
  269. {
  270. uint32_t linear_id = plat_my_core_pos();
  271. tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
  272. entry_point_info_t *tsp_entry_point;
  273. uint64_t rc;
  274. /*
  275. * Get information about the Secure Payload (BL32) image. Its
  276. * absence is a critical failure.
  277. */
  278. tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
  279. assert(tsp_entry_point);
  280. cm_init_my_context(tsp_entry_point);
  281. /*
  282. * Arrange for an entry into the test secure payload. It will be
  283. * returned via TSP_ENTRY_DONE case
  284. */
  285. rc = tspd_synchronous_sp_entry(tsp_ctx);
  286. assert(rc != 0);
  287. return rc;
  288. }
  289. /*******************************************************************************
  290. * This function is responsible for handling all SMCs in the Trusted OS/App
  291. * range from the non-secure state as defined in the SMC Calling Convention
  292. * Document. It is also responsible for communicating with the Secure payload
  293. * to delegate work and return results back to the non-secure state. Lastly it
  294. * will also return any information that the secure payload needs to do the
  295. * work assigned to it.
  296. ******************************************************************************/
  297. static uintptr_t tspd_smc_handler(uint32_t smc_fid,
  298. u_register_t x1,
  299. u_register_t x2,
  300. u_register_t x3,
  301. u_register_t x4,
  302. void *cookie,
  303. void *handle,
  304. u_register_t flags)
  305. {
  306. cpu_context_t *ns_cpu_context;
  307. uint32_t linear_id = plat_my_core_pos(), ns;
  308. tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
  309. uint64_t rc;
  310. #if TSP_INIT_ASYNC
  311. entry_point_info_t *next_image_info;
  312. #endif
  313. /* Determine which security state this SMC originated from */
  314. ns = is_caller_non_secure(flags);
  315. switch (smc_fid) {
  316. /*
  317. * This function ID is used by TSP to indicate that it was
  318. * preempted by a normal world IRQ.
  319. *
  320. */
  321. case TSP_PREEMPTED:
  322. if (ns)
  323. SMC_RET1(handle, SMC_UNK);
  324. return tspd_handle_sp_preemption(handle);
  325. /*
  326. * This function ID is used only by the TSP to indicate that it has
  327. * finished handling a S-EL1 interrupt or was preempted by a higher
  328. * priority pending EL3 interrupt. Execution should resume
  329. * in the normal world.
  330. */
  331. case TSP_HANDLED_S_EL1_INTR:
  332. if (ns)
  333. SMC_RET1(handle, SMC_UNK);
  334. assert(handle == cm_get_context(SECURE));
  335. /*
  336. * Restore the relevant EL3 state which saved to service
  337. * this SMC.
  338. */
  339. if (get_yield_smc_active_flag(tsp_ctx->state)) {
  340. SMC_SET_EL3(&tsp_ctx->cpu_ctx,
  341. CTX_SPSR_EL3,
  342. tsp_ctx->saved_spsr_el3);
  343. SMC_SET_EL3(&tsp_ctx->cpu_ctx,
  344. CTX_ELR_EL3,
  345. tsp_ctx->saved_elr_el3);
  346. #if TSP_NS_INTR_ASYNC_PREEMPT
  347. /*
  348. * Need to restore the previously interrupted
  349. * secure context.
  350. */
  351. memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx,
  352. TSPD_SP_CTX_SIZE);
  353. #endif
  354. }
  355. /* Get a reference to the non-secure context */
  356. ns_cpu_context = cm_get_context(NON_SECURE);
  357. assert(ns_cpu_context);
  358. /*
  359. * Restore non-secure state. There is no need to save the
  360. * secure system register context since the TSP was supposed
  361. * to preserve it during S-EL1 interrupt handling.
  362. */
  363. cm_el1_sysregs_context_restore(NON_SECURE);
  364. cm_set_next_eret_context(NON_SECURE);
  365. /* Refer to Note 1 in function tspd_sel1_interrupt_handler()*/
  366. #if TSP_NS_INTR_ASYNC_PREEMPT
  367. if (tsp_ctx->preempted_by_sel1_intr) {
  368. /* Reset the flag */
  369. tsp_ctx->preempted_by_sel1_intr = false;
  370. SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
  371. } else {
  372. SMC_RET0((uint64_t) ns_cpu_context);
  373. }
  374. #else
  375. SMC_RET0((uint64_t) ns_cpu_context);
  376. #endif
  377. /*
  378. * This function ID is used only by the SP to indicate it has
  379. * finished initialising itself after a cold boot
  380. */
  381. case TSP_ENTRY_DONE:
  382. if (ns)
  383. SMC_RET1(handle, SMC_UNK);
  384. /*
  385. * Stash the SP entry points information. This is done
  386. * only once on the primary cpu
  387. */
  388. assert(tsp_vectors == NULL);
  389. tsp_vectors = (tsp_vectors_t *) x1;
  390. if (tsp_vectors) {
  391. set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
  392. /*
  393. * TSP has been successfully initialized. Register power
  394. * management hooks with PSCI
  395. */
  396. psci_register_spd_pm_hook(&tspd_pm);
  397. /*
  398. * Register an interrupt handler for S-EL1 interrupts
  399. * when generated during code executing in the
  400. * non-secure state.
  401. */
  402. flags = 0;
  403. set_interrupt_rm_flag(flags, NON_SECURE);
  404. rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
  405. tspd_sel1_interrupt_handler,
  406. flags);
  407. if (rc)
  408. panic();
  409. #if TSP_NS_INTR_ASYNC_PREEMPT
  410. /*
  411. * Register an interrupt handler for NS interrupts when
  412. * generated during code executing in secure state are
  413. * routed to EL3.
  414. */
  415. flags = 0;
  416. set_interrupt_rm_flag(flags, SECURE);
  417. rc = register_interrupt_type_handler(INTR_TYPE_NS,
  418. tspd_ns_interrupt_handler,
  419. flags);
  420. if (rc)
  421. panic();
  422. /*
  423. * Disable the NS interrupt locally.
  424. */
  425. disable_intr_rm_local(INTR_TYPE_NS, SECURE);
  426. #endif
  427. }
  428. #if TSP_INIT_ASYNC
  429. /* Save the Secure EL1 system register context */
  430. assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
  431. cm_el1_sysregs_context_save(SECURE);
  432. /* Program EL3 registers to enable entry into the next EL */
  433. next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
  434. assert(next_image_info);
  435. assert(NON_SECURE ==
  436. GET_SECURITY_STATE(next_image_info->h.attr));
  437. cm_init_my_context(next_image_info);
  438. cm_prepare_el3_exit(NON_SECURE);
  439. SMC_RET0(cm_get_context(NON_SECURE));
  440. #else
  441. /*
  442. * SP reports completion. The SPD must have initiated
  443. * the original request through a synchronous entry
  444. * into the SP. Jump back to the original C runtime
  445. * context.
  446. */
  447. tspd_synchronous_sp_exit(tsp_ctx, x1);
  448. break;
  449. #endif
  450. /*
  451. * This function ID is used only by the SP to indicate it has finished
  452. * aborting a preempted Yielding SMC Call.
  453. */
  454. case TSP_ABORT_DONE:
  455. /*
  456. * These function IDs are used only by the SP to indicate it has
  457. * finished:
  458. * 1. turning itself on in response to an earlier psci
  459. * cpu_on request
  460. * 2. resuming itself after an earlier psci cpu_suspend
  461. * request.
  462. */
  463. case TSP_ON_DONE:
  464. case TSP_RESUME_DONE:
  465. /*
  466. * These function IDs are used only by the SP to indicate it has
  467. * finished:
  468. * 1. suspending itself after an earlier psci cpu_suspend
  469. * request.
  470. * 2. turning itself off in response to an earlier psci
  471. * cpu_off request.
  472. */
  473. case TSP_OFF_DONE:
  474. case TSP_SUSPEND_DONE:
  475. case TSP_SYSTEM_OFF_DONE:
  476. case TSP_SYSTEM_RESET_DONE:
  477. if (ns)
  478. SMC_RET1(handle, SMC_UNK);
  479. /*
  480. * SP reports completion. The SPD must have initiated the
  481. * original request through a synchronous entry into the SP.
  482. * Jump back to the original C runtime context, and pass x1 as
  483. * return value to the caller
  484. */
  485. tspd_synchronous_sp_exit(tsp_ctx, x1);
  486. break;
  487. /*
  488. * Request from non-secure client to perform an
  489. * arithmetic operation or response from secure
  490. * payload to an earlier request.
  491. */
  492. case TSP_FAST_FID(TSP_ADD):
  493. case TSP_FAST_FID(TSP_SUB):
  494. case TSP_FAST_FID(TSP_MUL):
  495. case TSP_FAST_FID(TSP_DIV):
  496. case TSP_YIELD_FID(TSP_ADD):
  497. case TSP_YIELD_FID(TSP_SUB):
  498. case TSP_YIELD_FID(TSP_MUL):
  499. case TSP_YIELD_FID(TSP_DIV):
  500. /*
  501. * Request from non-secure client to perform a check
  502. * of the DIT PSTATE bit.
  503. */
  504. case TSP_YIELD_FID(TSP_CHECK_DIT):
  505. /*
  506. * Request from non-secure client to modify the EL1
  507. * context registers.
  508. */
  509. case TSP_YIELD_FID(TSP_MODIFY_EL1_CTX):
  510. if (ns) {
  511. /*
  512. * This is a fresh request from the non-secure client.
  513. * The parameters are in x1 and x2. Figure out which
  514. * registers need to be preserved, save the non-secure
  515. * state and send the request to the secure payload.
  516. */
  517. assert(handle == cm_get_context(NON_SECURE));
  518. /* Check if we are already preempted */
  519. if (get_yield_smc_active_flag(tsp_ctx->state))
  520. SMC_RET1(handle, SMC_UNK);
  521. cm_el1_sysregs_context_save(NON_SECURE);
  522. /* Save x1 and x2 for use by TSP_GET_ARGS call below */
  523. store_tsp_args(tsp_ctx, x1, x2);
  524. /*
  525. * We are done stashing the non-secure context. Ask the
  526. * secure payload to do the work now.
  527. */
  528. /*
  529. * Verify if there is a valid context to use, copy the
  530. * operation type and parameters to the secure context
  531. * and jump to the fast smc entry point in the secure
  532. * payload. Entry into S-EL1 will take place upon exit
  533. * from this function.
  534. */
  535. assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
  536. /* Set appropriate entry for SMC.
  537. * We expect the TSP to manage the PSTATE.I and PSTATE.F
  538. * flags as appropriate.
  539. */
  540. if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
  541. cm_set_elr_el3(SECURE, (uint64_t)
  542. &tsp_vectors->fast_smc_entry);
  543. } else {
  544. set_yield_smc_active_flag(tsp_ctx->state);
  545. cm_set_elr_el3(SECURE, (uint64_t)
  546. &tsp_vectors->yield_smc_entry);
  547. #if TSP_NS_INTR_ASYNC_PREEMPT
  548. /*
  549. * Enable the routing of NS interrupts to EL3
  550. * during processing of a Yielding SMC Call on
  551. * this core.
  552. */
  553. enable_intr_rm_local(INTR_TYPE_NS, SECURE);
  554. #endif
  555. #if EL3_EXCEPTION_HANDLING
  556. /*
  557. * With EL3 exception handling, while an SMC is
  558. * being processed, Non-secure interrupts can't
  559. * preempt Secure execution. However, for
  560. * yielding SMCs, we want preemption to happen;
  561. * so explicitly allow NS preemption in this
  562. * case, and supply the preemption return code
  563. * for TSP.
  564. */
  565. ehf_allow_ns_preemption(TSP_PREEMPTED);
  566. #endif
  567. }
  568. cm_el1_sysregs_context_restore(SECURE);
  569. cm_set_next_eret_context(SECURE);
  570. SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
  571. } else {
  572. /*
  573. * This is the result from the secure client of an
  574. * earlier request. The results are in x1-x3. Copy it
  575. * into the non-secure context, save the secure state
  576. * and return to the non-secure state.
  577. */
  578. assert(handle == cm_get_context(SECURE));
  579. cm_el1_sysregs_context_save(SECURE);
  580. /* Get a reference to the non-secure context */
  581. ns_cpu_context = cm_get_context(NON_SECURE);
  582. assert(ns_cpu_context);
  583. /* Restore non-secure state */
  584. cm_el1_sysregs_context_restore(NON_SECURE);
  585. cm_set_next_eret_context(NON_SECURE);
  586. if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) {
  587. clr_yield_smc_active_flag(tsp_ctx->state);
  588. #if TSP_NS_INTR_ASYNC_PREEMPT
  589. /*
  590. * Disable the routing of NS interrupts to EL3
  591. * after processing of a Yielding SMC Call on
  592. * this core is finished.
  593. */
  594. disable_intr_rm_local(INTR_TYPE_NS, SECURE);
  595. #endif
  596. }
  597. SMC_RET3(ns_cpu_context, x1, x2, x3);
  598. }
  599. assert(0); /* Unreachable */
  600. /*
  601. * Request from the non-secure world to abort a preempted Yielding SMC
  602. * Call.
  603. */
  604. case TSP_FID_ABORT:
  605. /* ABORT should only be invoked by normal world */
  606. if (!ns) {
  607. assert(0);
  608. break;
  609. }
  610. assert(handle == cm_get_context(NON_SECURE));
  611. cm_el1_sysregs_context_save(NON_SECURE);
  612. /* Abort the preempted SMC request */
  613. if (!tspd_abort_preempted_smc(tsp_ctx)) {
  614. /*
  615. * If there was no preempted SMC to abort, return
  616. * SMC_UNK.
  617. *
  618. * Restoring the NON_SECURE context is not necessary as
  619. * the synchronous entry did not take place if the
  620. * return code of tspd_abort_preempted_smc is zero.
  621. */
  622. cm_set_next_eret_context(NON_SECURE);
  623. break;
  624. }
  625. cm_el1_sysregs_context_restore(NON_SECURE);
  626. cm_set_next_eret_context(NON_SECURE);
  627. SMC_RET1(handle, SMC_OK);
  628. /*
  629. * Request from non secure world to resume the preempted
  630. * Yielding SMC Call.
  631. */
  632. case TSP_FID_RESUME:
  633. /* RESUME should be invoked only by normal world */
  634. if (!ns) {
  635. assert(0);
  636. break;
  637. }
  638. /*
  639. * This is a resume request from the non-secure client.
  640. * save the non-secure state and send the request to
  641. * the secure payload.
  642. */
  643. assert(handle == cm_get_context(NON_SECURE));
  644. /* Check if we are already preempted before resume */
  645. if (!get_yield_smc_active_flag(tsp_ctx->state))
  646. SMC_RET1(handle, SMC_UNK);
  647. cm_el1_sysregs_context_save(NON_SECURE);
  648. /*
  649. * We are done stashing the non-secure context. Ask the
  650. * secure payload to do the work now.
  651. */
  652. #if TSP_NS_INTR_ASYNC_PREEMPT
  653. /*
  654. * Enable the routing of NS interrupts to EL3 during resumption
  655. * of a Yielding SMC Call on this core.
  656. */
  657. enable_intr_rm_local(INTR_TYPE_NS, SECURE);
  658. #endif
  659. #if EL3_EXCEPTION_HANDLING
  660. /*
  661. * Allow the resumed yielding SMC processing to be preempted by
  662. * Non-secure interrupts. Also, supply the preemption return
  663. * code for TSP.
  664. */
  665. ehf_allow_ns_preemption(TSP_PREEMPTED);
  666. #endif
  667. /* We just need to return to the preempted point in
  668. * TSP and the execution will resume as normal.
  669. */
  670. cm_el1_sysregs_context_restore(SECURE);
  671. cm_set_next_eret_context(SECURE);
  672. SMC_RET0(&tsp_ctx->cpu_ctx);
  673. /*
  674. * This is a request from the secure payload for more arguments
  675. * for an ongoing arithmetic operation requested by the
  676. * non-secure world. Simply return the arguments from the non-
  677. * secure client in the original call.
  678. */
  679. case TSP_GET_ARGS:
  680. if (ns)
  681. SMC_RET1(handle, SMC_UNK);
  682. get_tsp_args(tsp_ctx, x1, x2);
  683. SMC_RET2(handle, x1, x2);
  684. case TOS_CALL_COUNT:
  685. /*
  686. * Return the number of service function IDs implemented to
  687. * provide service to non-secure
  688. */
  689. SMC_RET1(handle, TSP_NUM_FID);
  690. case TOS_UID:
  691. /* Return TSP UID to the caller */
  692. SMC_UUID_RET(handle, tsp_uuid);
  693. case TOS_CALL_VERSION:
  694. /* Return the version of current implementation */
  695. SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR);
  696. default:
  697. break;
  698. }
  699. SMC_RET1(handle, SMC_UNK);
  700. }
  701. /* Define a SPD runtime service descriptor for fast SMC calls */
  702. DECLARE_RT_SVC(
  703. tspd_fast,
  704. OEN_TOS_START,
  705. OEN_TOS_END,
  706. SMC_TYPE_FAST,
  707. tspd_setup,
  708. tspd_smc_handler
  709. );
  710. /* Define a SPD runtime service descriptor for Yielding SMC Calls */
  711. DECLARE_RT_SVC(
  712. tspd_std,
  713. OEN_TOS_START,
  714. OEN_TOS_END,
  715. SMC_TYPE_YIELD,
  716. NULL,
  717. tspd_smc_handler
  718. );