plat_psci_handlers.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. /*
  2. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch.h>
  7. #include <assert.h>
  8. #include <stdbool.h>
  9. #include <string.h>
  10. #include <arch_helpers.h>
  11. #include <bpmp_ipc.h>
  12. #include <common/bl_common.h>
  13. #include <common/debug.h>
  14. #include <context.h>
  15. #include <drivers/delay_timer.h>
  16. #include <denver.h>
  17. #include <lib/el3_runtime/context_mgmt.h>
  18. #include <lib/psci/psci.h>
  19. #include <mce.h>
  20. #include <mce_private.h>
  21. #include <memctrl_v2.h>
  22. #include <plat/common/platform.h>
  23. #include <se.h>
  24. #include <smmu.h>
  25. #include <t194_nvg.h>
  26. #include <tegra194_private.h>
  27. #include <tegra_platform.h>
  28. #include <tegra_private.h>
  29. extern uint32_t __tegra194_cpu_reset_handler_data,
  30. __tegra194_cpu_reset_handler_end;
  31. /* TZDRAM offset for saving SMMU context */
  32. #define TEGRA194_SMMU_CTX_OFFSET 16U
  33. /* state id mask */
  34. #define TEGRA194_STATE_ID_MASK 0xFU
  35. /* constants to get power state's wake time */
  36. #define TEGRA194_WAKE_TIME_MASK 0x0FFFFFF0U
  37. #define TEGRA194_WAKE_TIME_SHIFT 4U
  38. /* default core wake mask for CPU_SUSPEND */
  39. #define TEGRA194_CORE_WAKE_MASK 0x180cU
  40. static struct t19x_psci_percpu_data {
  41. uint32_t wake_time;
  42. } __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
  43. int32_t tegra_soc_validate_power_state(uint32_t power_state,
  44. psci_power_state_t *req_state)
  45. {
  46. uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
  47. TEGRA194_STATE_ID_MASK;
  48. uint32_t cpu = plat_my_core_pos();
  49. int32_t ret = PSCI_E_SUCCESS;
  50. /* save the core wake time (in TSC ticks)*/
  51. t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
  52. << TEGRA194_WAKE_TIME_SHIFT;
  53. /*
  54. * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
  55. * that the correct value is read in tegra_soc_pwr_domain_suspend(),
  56. * which is called with caches disabled. It is possible to read a stale
  57. * value from DRAM in that function, because the L2 cache is not flushed
  58. * unless the cluster is entering CC6/CC7.
  59. */
  60. clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
  61. sizeof(t19x_percpu_data[cpu]));
  62. /* Sanity check the requested state id */
  63. switch (state_id) {
  64. case PSTATE_ID_CORE_IDLE:
  65. if (psci_get_pstate_type(power_state) != PSTATE_TYPE_STANDBY) {
  66. ret = PSCI_E_INVALID_PARAMS;
  67. break;
  68. }
  69. /* Core idle request */
  70. req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
  71. req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
  72. break;
  73. default:
  74. ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
  75. ret = PSCI_E_INVALID_PARAMS;
  76. break;
  77. }
  78. return ret;
  79. }
  80. int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
  81. {
  82. uint32_t cpu = plat_my_core_pos();
  83. mce_cstate_info_t cstate_info = { 0 };
  84. /* Program default wake mask */
  85. cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
  86. cstate_info.update_wake_mask = 1;
  87. mce_update_cstate_info(&cstate_info);
  88. /* Enter CPU idle */
  89. (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
  90. (uint64_t)TEGRA_NVG_CORE_C6,
  91. t19x_percpu_data[cpu].wake_time,
  92. 0U);
  93. return PSCI_E_SUCCESS;
  94. }
  95. int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
  96. {
  97. const plat_local_state_t *pwr_domain_state;
  98. uint8_t stateid_afflvl2;
  99. plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
  100. uint64_t mc_ctx_base;
  101. uint32_t val;
  102. mce_cstate_info_t sc7_cstate_info = {
  103. .cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
  104. .ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
  105. .system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
  106. .system_state_force = 1U,
  107. .update_wake_mask = 1U,
  108. };
  109. int32_t ret = 0;
  110. /* get the state ID */
  111. pwr_domain_state = target_state->pwr_domain_state;
  112. stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
  113. TEGRA194_STATE_ID_MASK;
  114. if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
  115. /* save 'Secure Boot' Processor Feature Config Register */
  116. val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
  117. mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
  118. /* save MC context */
  119. mc_ctx_base = params_from_bl2->tzdram_base +
  120. tegra194_get_mc_ctx_offset();
  121. tegra_mc_save_context((uintptr_t)mc_ctx_base);
  122. /*
  123. * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
  124. * since VDK does not support atomic se ctx save
  125. */
  126. if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
  127. ret = tegra_se_suspend();
  128. assert(ret == 0);
  129. }
  130. /* Prepare for system suspend */
  131. mce_update_cstate_info(&sc7_cstate_info);
  132. do {
  133. val = (uint32_t)mce_command_handler(
  134. (uint32_t)MCE_CMD_IS_SC7_ALLOWED,
  135. (uint32_t)TEGRA_NVG_CORE_C7,
  136. MCE_CORE_SLEEP_TIME_INFINITE,
  137. 0U);
  138. } while (val == 0U);
  139. /* Instruct the MCE to enter system suspend state */
  140. ret = mce_command_handler(
  141. (uint64_t)MCE_CMD_ENTER_CSTATE,
  142. (uint64_t)TEGRA_NVG_CORE_C7,
  143. MCE_CORE_SLEEP_TIME_INFINITE,
  144. 0U);
  145. assert(ret == 0);
  146. /* set system suspend state for house-keeping */
  147. tegra194_set_system_suspend_entry();
  148. }
  149. return PSCI_E_SUCCESS;
  150. }
  151. /*******************************************************************************
  152. * Helper function to check if this is the last ON CPU in the cluster
  153. ******************************************************************************/
  154. static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
  155. uint32_t ncpu)
  156. {
  157. plat_local_state_t target;
  158. bool last_on_cpu = true;
  159. uint32_t num_cpus = ncpu, pos = 0;
  160. do {
  161. target = states[pos];
  162. if (target != PLAT_MAX_OFF_STATE) {
  163. last_on_cpu = false;
  164. }
  165. --num_cpus;
  166. pos++;
  167. } while (num_cpus != 0U);
  168. return last_on_cpu;
  169. }
  170. /*******************************************************************************
  171. * Helper function to get target power state for the cluster
  172. ******************************************************************************/
  173. static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
  174. uint32_t ncpu)
  175. {
  176. uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
  177. plat_local_state_t target = states[core_pos];
  178. mce_cstate_info_t cstate_info = { 0 };
  179. /* CPU off */
  180. if (target == PLAT_MAX_OFF_STATE) {
  181. /* Enable cluster powerdn from last CPU in the cluster */
  182. if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
  183. /* Enable CC6 state and turn off wake mask */
  184. cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
  185. cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
  186. cstate_info.system_state_force = 1;
  187. cstate_info.update_wake_mask = 1U;
  188. mce_update_cstate_info(&cstate_info);
  189. } else {
  190. /* Turn off wake_mask */
  191. cstate_info.update_wake_mask = 1U;
  192. mce_update_cstate_info(&cstate_info);
  193. target = PSCI_LOCAL_STATE_RUN;
  194. }
  195. }
  196. return target;
  197. }
  198. /*******************************************************************************
  199. * Platform handler to calculate the proper target power level at the
  200. * specified affinity level
  201. ******************************************************************************/
  202. plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
  203. const plat_local_state_t *states,
  204. uint32_t ncpu)
  205. {
  206. plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
  207. uint32_t cpu = plat_my_core_pos();
  208. /* System Suspend */
  209. if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
  210. target = PSTATE_ID_SOC_POWERDN;
  211. }
  212. /* CPU off, CPU suspend */
  213. if (lvl == (uint32_t)MPIDR_AFFLVL1) {
  214. target = tegra_get_afflvl1_pwr_state(states, ncpu);
  215. }
  216. /* target cluster/system state */
  217. return target;
  218. }
  219. int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
  220. {
  221. const plat_local_state_t *pwr_domain_state =
  222. target_state->pwr_domain_state;
  223. plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
  224. uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
  225. TEGRA194_STATE_ID_MASK;
  226. uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE;
  227. uint64_t val;
  228. int32_t ret = PSCI_E_SUCCESS;
  229. if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
  230. val = params_from_bl2->tzdram_base +
  231. tegra194_get_cpu_reset_handler_size();
  232. /* initialise communication channel with BPMP */
  233. ret = tegra_bpmp_ipc_init();
  234. assert(ret == 0);
  235. /* Enable SE clock before SE context save */
  236. ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
  237. assert(ret == 0);
  238. /*
  239. * It is very unlikely that the BL31 image would be
  240. * bigger than 2^32 bytes
  241. */
  242. assert(src_len_in_bytes < UINT32_MAX);
  243. if (tegra_se_calculate_save_sha256(BL31_BASE,
  244. (uint32_t)src_len_in_bytes) != 0) {
  245. ERROR("Hash calculation failed. Reboot\n");
  246. (void)tegra_soc_prepare_system_reset();
  247. }
  248. /*
  249. * The TZRAM loses power when we enter system suspend. To
  250. * allow graceful exit from system suspend, we need to copy
  251. * BL3-1 over to TZDRAM.
  252. */
  253. val = params_from_bl2->tzdram_base +
  254. tegra194_get_cpu_reset_handler_size();
  255. memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
  256. src_len_in_bytes);
  257. /* Disable SE clock after SE context save */
  258. ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
  259. assert(ret == 0);
  260. }
  261. return ret;
  262. }
  263. int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
  264. {
  265. return PSCI_E_NOT_SUPPORTED;
  266. }
  267. int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
  268. {
  269. uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
  270. uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
  271. MPIDR_AFFINITY_BITS;
  272. int32_t ret = 0;
  273. if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
  274. ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
  275. return PSCI_E_NOT_PRESENT;
  276. }
  277. /* construct the target CPU # */
  278. target_cpu += (target_cluster << 1U);
  279. ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
  280. if (ret < 0) {
  281. return PSCI_E_DENIED;
  282. }
  283. return PSCI_E_SUCCESS;
  284. }
  285. int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
  286. {
  287. const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
  288. uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step;
  289. uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
  290. cpu_context_t *ctx = cm_get_context(NON_SECURE);
  291. uint64_t actlr_elx;
  292. /*
  293. * Reset power state info for CPUs when onlining, we set
  294. * deepest power when offlining a core but that may not be
  295. * requested by non-secure sw which controls idle states. It
  296. * will re-init this info from non-secure software when the
  297. * core come online.
  298. */
  299. actlr_elx = read_el1_ctx_common((get_el1_sysregs_ctx(ctx)), actlr_el1);
  300. actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
  301. actlr_elx |= DENVER_CPU_PMSTATE_C1;
  302. write_el1_ctx_common((get_el1_sysregs_ctx(ctx)), actlr_el1, actlr_elx);
  303. /*
  304. * Check if we are exiting from deep sleep and restore SE
  305. * context if we are.
  306. */
  307. if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
  308. #if ENABLE_STRICT_CHECKING_MODE
  309. /*
  310. * Enable strict checking after programming the GSC for
  311. * enabling TZSRAM and TZDRAM
  312. */
  313. mce_enable_strict_checking();
  314. #endif
  315. /* Init SMMU */
  316. tegra_smmu_init();
  317. /* Resume SE, RNG1 and PKA1 */
  318. tegra_se_resume();
  319. /*
  320. * Program XUSB STREAMIDs
  321. * ======================
  322. * T19x XUSB has support for XUSB virtualization. It will
  323. * have one physical function (PF) and four Virtual functions
  324. * (VF)
  325. *
  326. * There were below two SIDs for XUSB until T186.
  327. * 1) #define TEGRA_SID_XUSB_HOST 0x1bU
  328. * 2) #define TEGRA_SID_XUSB_DEV 0x1cU
  329. *
  330. * We have below four new SIDs added for VF(s)
  331. * 3) #define TEGRA_SID_XUSB_VF0 0x5dU
  332. * 4) #define TEGRA_SID_XUSB_VF1 0x5eU
  333. * 5) #define TEGRA_SID_XUSB_VF2 0x5fU
  334. * 6) #define TEGRA_SID_XUSB_VF3 0x60U
  335. *
  336. * When virtualization is enabled then we have to disable SID
  337. * override and program above SIDs in below newly added SID
  338. * registers in XUSB PADCTL MMIO space. These registers are
  339. * TZ protected and so need to be done in ATF.
  340. *
  341. * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
  342. * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0 (0x139cU)
  343. * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
  344. * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
  345. * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
  346. * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
  347. *
  348. * This change disables SID override and programs XUSB SIDs
  349. * in above registers to support both virtualization and
  350. * non-virtualization platforms
  351. */
  352. if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
  353. mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
  354. XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
  355. assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
  356. XUSB_PADCTL_HOST_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_HOST);
  357. mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
  358. XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
  359. assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
  360. XUSB_PADCTL_HOST_AXI_STREAMID_VF_0) == TEGRA_SID_XUSB_VF0);
  361. mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
  362. XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
  363. assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
  364. XUSB_PADCTL_HOST_AXI_STREAMID_VF_1) == TEGRA_SID_XUSB_VF1);
  365. mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
  366. XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
  367. assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
  368. XUSB_PADCTL_HOST_AXI_STREAMID_VF_2) == TEGRA_SID_XUSB_VF2);
  369. mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
  370. XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
  371. assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
  372. XUSB_PADCTL_HOST_AXI_STREAMID_VF_3) == TEGRA_SID_XUSB_VF3);
  373. mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
  374. XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
  375. assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
  376. XUSB_PADCTL_DEV_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_DEV);
  377. }
  378. }
  379. /*
  380. * Enable dual execution optimized translations for all ELx.
  381. */
  382. if (enable_ccplex_lock_step != 0U) {
  383. actlr_elx = read_actlr_el3();
  384. actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3;
  385. write_actlr_el3(actlr_elx);
  386. actlr_elx = read_actlr_el2();
  387. actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2;
  388. write_actlr_el2(actlr_elx);
  389. actlr_elx = read_actlr_el1();
  390. actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1;
  391. write_actlr_el1(actlr_elx);
  392. }
  393. return PSCI_E_SUCCESS;
  394. }
  395. int32_t tegra_soc_pwr_domain_off_early(const psci_power_state_t *target_state)
  396. {
  397. /* Do not power off the boot CPU */
  398. if (plat_is_my_cpu_primary()) {
  399. return PSCI_E_DENIED;
  400. }
  401. return PSCI_E_SUCCESS;
  402. }
  403. int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
  404. {
  405. uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
  406. int32_t ret = 0;
  407. (void)target_state;
  408. /* Disable Denver's DCO operations */
  409. if (impl == DENVER_IMPL) {
  410. denver_disable_dco();
  411. }
  412. /* Turn off CPU */
  413. ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
  414. (uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
  415. assert(ret == 0);
  416. return PSCI_E_SUCCESS;
  417. }
  418. __dead2 void tegra_soc_prepare_system_off(void)
  419. {
  420. /* System power off */
  421. mce_system_shutdown();
  422. wfi();
  423. /* wait for the system to power down */
  424. for (;;) {
  425. ;
  426. }
  427. }
  428. int32_t tegra_soc_prepare_system_reset(void)
  429. {
  430. /* System reboot */
  431. mce_system_reboot();
  432. return PSCI_E_SUCCESS;
  433. }