trusty.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. /*
  2. * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
  3. * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: BSD-3-Clause
  6. */
  7. #include <assert.h>
  8. #include <inttypes.h>
  9. #include <lib/xlat_tables/xlat_tables_v2.h>
  10. #include <stdbool.h>
  11. #include <stdint.h>
  12. #include <string.h>
  13. #include <arch_helpers.h>
  14. #include <bl31/bl31.h>
  15. #include <bl31/interrupt_mgmt.h>
  16. #include <common/bl_common.h>
  17. #include <common/debug.h>
  18. #include <common/runtime_svc.h>
  19. #include <lib/el3_runtime/context_mgmt.h>
  20. #include <lib/smccc.h>
  21. #include <plat/common/platform.h>
  22. #include <tools_share/uuid.h>
  23. #include "sm_err.h"
  24. #include "smcall.h"
  25. /* Trusty UID: RFC-4122 compliant UUID version 4 */
  26. DEFINE_SVC_UUID2(trusty_uuid,
  27. 0x40ee25f0, 0xa2bc, 0x304c, 0x8c, 0x4c,
  28. 0xa1, 0x73, 0xc5, 0x7d, 0x8a, 0xf1);
  29. /* macro to check if Hypervisor is enabled in the HCR_EL2 register */
  30. #define HYP_ENABLE_FLAG 0x286001U
  31. /* length of Trusty's input parameters (in bytes) */
  32. #define TRUSTY_PARAMS_LEN_BYTES (4096U * 2)
  33. struct trusty_stack {
  34. uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
  35. uint32_t end;
  36. };
  37. struct trusty_cpu_ctx {
  38. cpu_context_t cpu_ctx;
  39. void *saved_sp;
  40. uint32_t saved_security_state;
  41. int32_t fiq_handler_active;
  42. uint64_t fiq_handler_pc;
  43. uint64_t fiq_handler_cpsr;
  44. uint64_t fiq_handler_sp;
  45. uint64_t fiq_pc;
  46. uint64_t fiq_cpsr;
  47. uint64_t fiq_sp_el1;
  48. gp_regs_t fiq_gpregs;
  49. struct trusty_stack secure_stack;
  50. };
  51. struct smc_args {
  52. uint64_t r0;
  53. uint64_t r1;
  54. uint64_t r2;
  55. uint64_t r3;
  56. uint64_t r4;
  57. uint64_t r5;
  58. uint64_t r6;
  59. uint64_t r7;
  60. };
  61. static struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
  62. struct smc_args trusty_init_context_stack(void **sp, void *new_stack);
  63. struct smc_args trusty_context_switch_helper(void **sp, void *smc_params);
  64. static uint32_t current_vmid;
  65. static struct trusty_cpu_ctx *get_trusty_ctx(void)
  66. {
  67. return &trusty_cpu_ctx[plat_my_core_pos()];
  68. }
  69. static bool is_hypervisor_mode(void)
  70. {
  71. uint64_t hcr = read_hcr();
  72. return ((hcr & HYP_ENABLE_FLAG) != 0U) ? true : false;
  73. }
  74. static struct smc_args trusty_context_switch(uint32_t security_state, uint64_t r0,
  75. uint64_t r1, uint64_t r2, uint64_t r3)
  76. {
  77. struct smc_args args, ret_args;
  78. struct trusty_cpu_ctx *ctx = get_trusty_ctx();
  79. struct trusty_cpu_ctx *ctx_smc;
  80. assert(ctx->saved_security_state != security_state);
  81. args.r7 = 0;
  82. if (is_hypervisor_mode()) {
  83. /* According to the ARM DEN0028A spec, VMID is stored in x7 */
  84. ctx_smc = cm_get_context(NON_SECURE);
  85. assert(ctx_smc != NULL);
  86. args.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
  87. }
  88. /* r4, r5, r6 reserved for future use. */
  89. args.r6 = 0;
  90. args.r5 = 0;
  91. args.r4 = 0;
  92. args.r3 = r3;
  93. args.r2 = r2;
  94. args.r1 = r1;
  95. args.r0 = r0;
  96. /*
  97. * To avoid the additional overhead in PSCI flow, skip FP context
  98. * saving/restoring in case of CPU suspend and resume, assuming that
  99. * when it's needed the PSCI caller has preserved FP context before
  100. * going here.
  101. */
  102. if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME) {
  103. simd_ctx_save(security_state, false);
  104. }
  105. cm_el1_sysregs_context_save(security_state);
  106. ctx->saved_security_state = security_state;
  107. ret_args = trusty_context_switch_helper(&ctx->saved_sp, &args);
  108. assert(ctx->saved_security_state == ((security_state == 0U) ? 1U : 0U));
  109. cm_el1_sysregs_context_restore(security_state);
  110. if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME) {
  111. simd_ctx_restore(security_state);
  112. }
  113. cm_set_next_eret_context(security_state);
  114. return ret_args;
  115. }
  116. static uint64_t trusty_fiq_handler(uint32_t id,
  117. uint32_t flags,
  118. void *handle,
  119. void *cookie)
  120. {
  121. struct smc_args ret;
  122. struct trusty_cpu_ctx *ctx = get_trusty_ctx();
  123. assert(!is_caller_secure(flags));
  124. ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
  125. if (ret.r0 != 0U) {
  126. SMC_RET0(handle);
  127. }
  128. if (ctx->fiq_handler_active != 0) {
  129. INFO("%s: fiq handler already active\n", __func__);
  130. SMC_RET0(handle);
  131. }
  132. ctx->fiq_handler_active = 1;
  133. (void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
  134. ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
  135. ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
  136. ctx->fiq_sp_el1 = read_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1);
  137. write_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1, ctx->fiq_handler_sp);
  138. cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr);
  139. SMC_RET0(handle);
  140. }
  141. static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
  142. uint64_t handler, uint64_t stack)
  143. {
  144. struct trusty_cpu_ctx *ctx;
  145. if (cpu >= (uint64_t)PLATFORM_CORE_COUNT) {
  146. ERROR("%s: cpu %" PRId64 " >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
  147. return (uint64_t)SM_ERR_INVALID_PARAMETERS;
  148. }
  149. ctx = &trusty_cpu_ctx[cpu];
  150. ctx->fiq_handler_pc = handler;
  151. ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
  152. ctx->fiq_handler_sp = stack;
  153. SMC_RET1(handle, 0);
  154. }
  155. static uint64_t trusty_get_fiq_regs(void *handle)
  156. {
  157. struct trusty_cpu_ctx *ctx = get_trusty_ctx();
  158. uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);
  159. SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
  160. }
  161. static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
  162. {
  163. struct smc_args ret;
  164. struct trusty_cpu_ctx *ctx = get_trusty_ctx();
  165. if (ctx->fiq_handler_active == 0) {
  166. NOTICE("%s: fiq handler not active\n", __func__);
  167. SMC_RET1(handle, (uint64_t)SM_ERR_INVALID_PARAMETERS);
  168. }
  169. ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
  170. if (ret.r0 != 1U) {
  171. INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %" PRId64 "\n",
  172. __func__, handle, ret.r0);
  173. }
  174. /*
  175. * Restore register state to state recorded on fiq entry.
  176. *
  177. * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
  178. * restore them.
  179. *
  180. * x1-x4 and x8-x17 need to be restored here because smc_handler64
  181. * corrupts them (el1 code also restored them).
  182. */
  183. (void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
  184. ctx->fiq_handler_active = 0;
  185. write_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1, ctx->fiq_sp_el1);
  186. cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr);
  187. SMC_RET0(handle);
  188. }
  189. static uintptr_t trusty_smc_handler(uint32_t smc_fid,
  190. u_register_t x1,
  191. u_register_t x2,
  192. u_register_t x3,
  193. u_register_t x4,
  194. void *cookie,
  195. void *handle,
  196. u_register_t flags)
  197. {
  198. struct smc_args ret;
  199. uint32_t vmid = 0U;
  200. entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);
  201. /*
  202. * Return success for SET_ROT_PARAMS if Trusty is not present, as
  203. * Verified Boot is not even supported and returning success here
  204. * would not compromise the boot process.
  205. */
  206. if ((ep_info == NULL) && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
  207. SMC_RET1(handle, 0);
  208. } else if (ep_info == NULL) {
  209. SMC_RET1(handle, SMC_UNK);
  210. } else {
  211. ; /* do nothing */
  212. }
  213. if (is_caller_secure(flags)) {
  214. if (smc_fid == SMC_YC_NS_RETURN) {
  215. ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
  216. SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
  217. ret.r4, ret.r5, ret.r6, ret.r7);
  218. }
  219. INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
  220. cpu %d, unknown smc\n",
  221. __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
  222. plat_my_core_pos());
  223. SMC_RET1(handle, SMC_UNK);
  224. } else {
  225. switch (smc_fid) {
  226. case SMC_FC64_GET_UUID:
  227. case SMC_FC_GET_UUID:
  228. /* provide the UUID for the service to the client */
  229. SMC_UUID_RET(handle, trusty_uuid);
  230. break;
  231. case SMC_FC64_SET_FIQ_HANDLER:
  232. return trusty_set_fiq_handler(handle, x1, x2, x3);
  233. case SMC_FC64_GET_FIQ_REGS:
  234. return trusty_get_fiq_regs(handle);
  235. case SMC_FC_FIQ_EXIT:
  236. return trusty_fiq_exit(handle, x1, x2, x3);
  237. default:
  238. /* Not all OENs greater than SMC_ENTITY_SECURE_MONITOR are supported */
  239. if (SMC_ENTITY(smc_fid) > SMC_ENTITY_SECURE_MONITOR) {
  240. VERBOSE("%s: unsupported SMC FID (0x%x)\n", __func__, smc_fid);
  241. SMC_RET1(handle, SMC_UNK);
  242. }
  243. if (is_hypervisor_mode())
  244. vmid = SMC_GET_GP(handle, CTX_GPREG_X7);
  245. if ((current_vmid != 0) && (current_vmid != vmid)) {
  246. /* This message will cause SMC mechanism
  247. * abnormal in multi-guest environment.
  248. * Change it to WARN in case you need it.
  249. */
  250. VERBOSE("Previous SMC not finished.\n");
  251. SMC_RET1(handle, SM_ERR_BUSY);
  252. }
  253. current_vmid = vmid;
  254. ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
  255. x2, x3);
  256. current_vmid = 0;
  257. SMC_RET1(handle, ret.r0);
  258. }
  259. }
  260. }
  261. static int32_t trusty_init(void)
  262. {
  263. entry_point_info_t *ep_info;
  264. struct smc_args zero_args = {0};
  265. struct trusty_cpu_ctx *ctx = get_trusty_ctx();
  266. uint32_t cpu = plat_my_core_pos();
  267. uint64_t reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
  268. CTX_SPSR_EL3));
  269. /*
  270. * Get information about the Trusty image. Its absence is a critical
  271. * failure.
  272. */
  273. ep_info = bl31_plat_get_next_image_ep_info(SECURE);
  274. assert(ep_info != NULL);
  275. simd_ctx_save(NON_SECURE, false);
  276. cm_el1_sysregs_context_save(NON_SECURE);
  277. cm_set_context(&ctx->cpu_ctx, SECURE);
  278. cm_init_my_context(ep_info);
  279. /*
  280. * Adjust secondary cpu entry point for 32 bit images to the
  281. * end of exception vectors
  282. */
  283. if ((cpu != 0U) && (reg_width == MODE_RW_32)) {
  284. INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
  285. cpu, ep_info->pc + (1U << 5));
  286. cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
  287. }
  288. cm_el1_sysregs_context_restore(SECURE);
  289. simd_ctx_restore(SECURE);
  290. cm_set_next_eret_context(SECURE);
  291. ctx->saved_security_state = ~0U; /* initial saved state is invalid */
  292. (void)trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
  293. (void)trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
  294. cm_el1_sysregs_context_restore(NON_SECURE);
  295. simd_ctx_restore(NON_SECURE);
  296. cm_set_next_eret_context(NON_SECURE);
  297. return 1;
  298. }
  299. static void trusty_cpu_suspend(uint32_t off)
  300. {
  301. struct smc_args ret;
  302. ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, off, 0, 0);
  303. if (ret.r0 != 0U) {
  304. INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %" PRId64 "\n",
  305. __func__, plat_my_core_pos(), ret.r0);
  306. }
  307. }
  308. static void trusty_cpu_resume(uint32_t on)
  309. {
  310. struct smc_args ret;
  311. ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, on, 0, 0);
  312. if (ret.r0 != 0U) {
  313. INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %" PRId64 "\n",
  314. __func__, plat_my_core_pos(), ret.r0);
  315. }
  316. }
  317. static int32_t trusty_cpu_off_handler(u_register_t max_off_lvl)
  318. {
  319. trusty_cpu_suspend(max_off_lvl);
  320. return 0;
  321. }
  322. static void trusty_cpu_on_finish_handler(u_register_t max_off_lvl)
  323. {
  324. struct trusty_cpu_ctx *ctx = get_trusty_ctx();
  325. if (ctx->saved_sp == NULL) {
  326. (void)trusty_init();
  327. } else {
  328. trusty_cpu_resume(max_off_lvl);
  329. }
  330. }
  331. static void trusty_cpu_suspend_handler(u_register_t max_off_lvl)
  332. {
  333. trusty_cpu_suspend(max_off_lvl);
  334. }
  335. static void trusty_cpu_suspend_finish_handler(u_register_t max_off_lvl)
  336. {
  337. trusty_cpu_resume(max_off_lvl);
  338. }
  339. static const spd_pm_ops_t trusty_pm = {
  340. .svc_off = trusty_cpu_off_handler,
  341. .svc_suspend = trusty_cpu_suspend_handler,
  342. .svc_on_finish = trusty_cpu_on_finish_handler,
  343. .svc_suspend_finish = trusty_cpu_suspend_finish_handler,
  344. };
  345. void plat_trusty_set_boot_args(aapcs64_params_t *args);
  346. #if !defined(TSP_SEC_MEM_SIZE) && defined(BL32_MEM_SIZE)
  347. #define TSP_SEC_MEM_SIZE BL32_MEM_SIZE
  348. #endif
  349. #ifdef TSP_SEC_MEM_SIZE
  350. #pragma weak plat_trusty_set_boot_args
  351. void plat_trusty_set_boot_args(aapcs64_params_t *args)
  352. {
  353. args->arg0 = TSP_SEC_MEM_SIZE;
  354. }
  355. #endif
  356. static int32_t trusty_setup(void)
  357. {
  358. entry_point_info_t *ep_info;
  359. uint32_t instr;
  360. uint32_t flags;
  361. int32_t ret;
  362. bool aarch32 = false;
  363. /* Get trusty's entry point info */
  364. ep_info = bl31_plat_get_next_image_ep_info(SECURE);
  365. if (ep_info == NULL) {
  366. VERBOSE("Trusty image missing.\n");
  367. return -1;
  368. }
  369. /* memmap first page of trusty's code memory before peeking */
  370. ret = mmap_add_dynamic_region(ep_info->pc, /* PA */
  371. ep_info->pc, /* VA */
  372. PAGE_SIZE, /* size */
  373. MT_SECURE | MT_RW_DATA); /* attrs */
  374. assert(ret == 0);
  375. /* peek into trusty's code to see if we have a 32-bit or 64-bit image */
  376. instr = *(uint32_t *)ep_info->pc;
  377. if (instr >> 24 == 0xeaU) {
  378. INFO("trusty: Found 32 bit image\n");
  379. aarch32 = true;
  380. } else if (instr >> 8 == 0xd53810U || instr >> 16 == 0x9400U) {
  381. INFO("trusty: Found 64 bit image\n");
  382. } else {
  383. ERROR("trusty: Found unknown image, 0x%x\n", instr);
  384. return -1;
  385. }
  386. /* unmap trusty's memory page */
  387. (void)mmap_remove_dynamic_region(ep_info->pc, PAGE_SIZE);
  388. SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
  389. if (!aarch32)
  390. ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
  391. DISABLE_ALL_EXCEPTIONS);
  392. else
  393. ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
  394. SPSR_E_LITTLE,
  395. DAIF_FIQ_BIT |
  396. DAIF_IRQ_BIT |
  397. DAIF_ABT_BIT);
  398. (void)memset(&ep_info->args, 0, sizeof(ep_info->args));
  399. plat_trusty_set_boot_args(&ep_info->args);
  400. /* register init handler */
  401. bl31_register_bl32_init(trusty_init);
  402. /* register power management hooks */
  403. psci_register_spd_pm_hook(&trusty_pm);
  404. /* register interrupt handler */
  405. flags = 0;
  406. set_interrupt_rm_flag(flags, NON_SECURE);
  407. ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
  408. trusty_fiq_handler,
  409. flags);
  410. if (ret != 0) {
  411. VERBOSE("trusty: failed to register fiq handler, ret = %d\n", ret);
  412. }
  413. if (aarch32) {
  414. entry_point_info_t *ns_ep_info;
  415. uint32_t spsr;
  416. ns_ep_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
  417. if (ns_ep_info == NULL) {
  418. NOTICE("Trusty: non-secure image missing.\n");
  419. return -1;
  420. }
  421. spsr = ns_ep_info->spsr;
  422. if (GET_RW(spsr) == MODE_RW_64 && GET_EL(spsr) == MODE_EL2) {
  423. spsr &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
  424. spsr |= MODE_EL1 << MODE_EL_SHIFT;
  425. }
  426. if (GET_RW(spsr) == MODE_RW_32 && GET_M32(spsr) == MODE32_hyp) {
  427. spsr &= ~(MODE32_MASK << MODE32_SHIFT);
  428. spsr |= MODE32_svc << MODE32_SHIFT;
  429. }
  430. if (spsr != ns_ep_info->spsr) {
  431. NOTICE("Trusty: Switch bl33 from EL2 to EL1 (spsr 0x%x -> 0x%x)\n",
  432. ns_ep_info->spsr, spsr);
  433. ns_ep_info->spsr = spsr;
  434. }
  435. }
  436. return 0;
  437. }
  438. /* Define a SPD runtime service descriptor for fast SMC calls */
  439. DECLARE_RT_SVC(
  440. trusty_fast,
  441. OEN_TOS_START,
  442. OEN_TOS_END,
  443. SMC_TYPE_FAST,
  444. trusty_setup,
  445. trusty_smc_handler
  446. );
  447. /* Define a SPD runtime service descriptor for yielding SMC calls */
  448. DECLARE_RT_SVC(
  449. trusty_std,
  450. OEN_TAP_START,
  451. SMC_ENTITY_SECURE_MONITOR,
  452. SMC_TYPE_YIELD,
  453. NULL,
  454. trusty_smc_handler
  455. );