plat_pm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. /*
  2. * Copyright (c) 2019-2020, MediaTek Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. /* common headers */
  7. #include <arch_helpers.h>
  8. #include <assert.h>
  9. #include <common/debug.h>
  10. #include <lib/mmio.h>
  11. #include <lib/psci/psci.h>
  12. #include <errno.h>
  13. /* mediatek platform specific headers */
  14. #include <platform_def.h>
  15. #include <scu.h>
  16. #include <mt_gic_v3.h>
  17. #include <mtk_mcdi.h>
  18. #include <mtk_plat_common.h>
  19. #include <mtgpio.h>
  20. #include <mtspmc.h>
  21. #include <plat_dcm.h>
  22. #include <plat_debug.h>
  23. #include <plat_params.h>
  24. #include <plat_private.h>
  25. #include <power_tracer.h>
  26. #include <pmic.h>
  27. #include <spm.h>
  28. #include <spm_suspend.h>
  29. #include <sspm.h>
  30. #include <rtc.h>
  31. /* Local power state for power domains in Run state. */
  32. #define MTK_LOCAL_STATE_RUN 0
  33. /* Local power state for retention. */
  34. #define MTK_LOCAL_STATE_RET 1
  35. /* Local power state for OFF/power-down. */
  36. #define MTK_LOCAL_STATE_OFF 2
  37. #if PSCI_EXTENDED_STATE_ID
  38. /*
  39. * Macros used to parse state information from State-ID if it is using the
  40. * recommended encoding for State-ID.
  41. */
  42. #define MTK_LOCAL_PSTATE_WIDTH 4
  43. #define MTK_LOCAL_PSTATE_MASK ((1 << MTK_LOCAL_PSTATE_WIDTH) - 1)
  44. /* Macros to construct the composite power state */
  45. /* Make composite power state parameter till power level 0 */
  46. #define mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
  47. (((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
  48. #else /* !PSCI_EXTENDED_STATE_ID */
  49. #define mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
  50. (((lvl0_state) << PSTATE_ID_SHIFT) | \
  51. ((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
  52. ((type) << PSTATE_TYPE_SHIFT))
  53. #endif /* PSCI_EXTENDED_STATE_ID */
  54. /* Make composite power state parameter till power level 1 */
  55. #define mtk_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
  56. (((lvl1_state) << MTK_LOCAL_PSTATE_WIDTH) | \
  57. mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
  58. /* Make composite power state parameter till power level 2 */
  59. #define mtk_make_pwrstate_lvl2( \
  60. lvl2_state, lvl1_state, lvl0_state, pwr_lvl, type) \
  61. (((lvl2_state) << (MTK_LOCAL_PSTATE_WIDTH * 2)) | \
  62. mtk_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type))
  63. #define MTK_PWR_LVL0 0
  64. #define MTK_PWR_LVL1 1
  65. #define MTK_PWR_LVL2 2
  66. /* Macros to read the MTK power domain state */
  67. #define MTK_CORE_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL0]
  68. #define MTK_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[MTK_PWR_LVL1]
  69. #define MTK_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ? \
  70. (state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
  71. #if PSCI_EXTENDED_STATE_ID
  72. /*
  73. * The table storing the valid idle power states. Ensure that the
  74. * array entries are populated in ascending order of state-id to
  75. * enable us to use binary search during power state validation.
  76. * The table must be terminated by a NULL entry.
  77. */
  78. const unsigned int mtk_pm_idle_states[] = {
  79. /* State-id - 0x001 */
  80. mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
  81. MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
  82. /* State-id - 0x002 */
  83. mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
  84. MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
  85. /* State-id - 0x022 */
  86. mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
  87. MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
  88. #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
  89. /* State-id - 0x222 */
  90. mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
  91. MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
  92. #endif
  93. 0,
  94. };
  95. #endif
  96. #define CPU_IDX(cluster, cpu) ((cluster << 2) + cpu)
  97. #define ON true
  98. #define OFF false
  99. /* Pause MCDI when CPU hotplug */
  100. static bool HP_SSPM_PAUSE;
  101. /* CPU Hotplug by SSPM */
  102. static bool HP_SSPM_CTRL = true;
  103. /* Turn off cluster when CPU hotplug off */
  104. static bool HP_CLUSTER_OFF = true;
  105. /* Turn off cluster when CPU MCDI off */
  106. static bool MCDI_C2 = true;
  107. /* Enable MCDI */
  108. static bool MCDI_SSPM = true;
  109. static uintptr_t secure_entrypoint;
  110. static void mp1_L2_desel_config(void)
  111. {
  112. mmio_write_64(MCUCFG_BASE + 0x2200, 0x2092c820);
  113. dsb();
  114. }
  115. static bool clst_single_pwr(int cluster, int cpu)
  116. {
  117. uint32_t cpu_mask[2] = {0x00001e00, 0x000f0000};
  118. uint32_t cpu_pwr_bit[] = {9, 10, 11, 12, 16, 17, 18, 19};
  119. int my_idx = (cluster << 2) + cpu;
  120. uint32_t pwr_stat = mmio_read_32(0x10006180);
  121. return !(pwr_stat & (cpu_mask[cluster] & ~BIT(cpu_pwr_bit[my_idx])));
  122. }
  123. static bool clst_single_on(int cluster, int cpu)
  124. {
  125. uint32_t cpu_mask[2] = {0x0f, 0xf0};
  126. int my_idx = (cluster << 2) + cpu;
  127. uint32_t on_stat = mcdi_avail_cpu_mask_read();
  128. return !(on_stat & (cpu_mask[cluster] & ~BIT(my_idx)));
  129. }
  130. static void plat_cpu_pwrdwn_common(void)
  131. {
  132. /* Prevent interrupts from spuriously waking up this cpu */
  133. mt_gic_rdistif_save();
  134. mt_gic_cpuif_disable();
  135. }
  136. static void plat_cpu_pwron_common(void)
  137. {
  138. /* Enable the gic cpu interface */
  139. mt_gic_cpuif_enable();
  140. mt_gic_rdistif_init();
  141. mt_gic_rdistif_restore();
  142. }
  143. static void plat_cluster_pwrdwn_common(uint64_t mpidr, int cluster)
  144. {
  145. if (cluster > 0)
  146. mt_gic_sync_dcm_enable();
  147. /* Disable coherency */
  148. plat_mtk_cci_disable();
  149. disable_scu(mpidr);
  150. }
  151. static void plat_cluster_pwron_common(uint64_t mpidr, int cluster)
  152. {
  153. if (cluster > 0) {
  154. l2c_parity_check_setup();
  155. circular_buffer_setup();
  156. mp1_L2_desel_config();
  157. mt_gic_sync_dcm_disable();
  158. }
  159. /* Enable coherency */
  160. enable_scu(mpidr);
  161. plat_mtk_cci_enable();
  162. /* Enable big core dcm */
  163. plat_dcm_restore_cluster_on(mpidr);
  164. /* Enable rgu dcm */
  165. plat_dcm_rgu_enable();
  166. }
  167. static void plat_cpu_standby(plat_local_state_t cpu_state)
  168. {
  169. u_register_t scr;
  170. scr = read_scr_el3();
  171. write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
  172. isb();
  173. dsb();
  174. wfi();
  175. write_scr_el3(scr);
  176. }
  177. static void mcdi_ctrl_before_hotplug_on(int cluster, int cpu)
  178. {
  179. if (!HP_SSPM_CTRL && HP_SSPM_PAUSE && MCDI_SSPM) {
  180. mcdi_pause_clr(cluster, CPU_IDX(cluster, cpu), OFF);
  181. mcdi_pause_set(cluster, CPU_IDX(cluster, cpu), ON);
  182. }
  183. }
  184. static void mcdi_ctrl_before_hotplug_off(int cluster, int cpu, bool cluster_off)
  185. {
  186. if (!HP_SSPM_CTRL && HP_SSPM_PAUSE && MCDI_SSPM)
  187. mcdi_pause_set(cluster_off ? cluster : -1,
  188. CPU_IDX(cluster, cpu), OFF);
  189. }
  190. static void mcdi_ctrl_cluster_cpu_off(int cluster, int cpu, bool cluster_off)
  191. {
  192. if (MCDI_SSPM) {
  193. sspm_set_bootaddr(secure_entrypoint);
  194. sspm_standbywfi_irq_enable(CPU_IDX(cluster, cpu));
  195. if (cluster_off)
  196. sspm_cluster_pwr_off_notify(cluster);
  197. else
  198. sspm_cluster_pwr_on_notify(cluster);
  199. }
  200. }
  201. static void mcdi_ctrl_suspend(void)
  202. {
  203. if (MCDI_SSPM)
  204. mcdi_pause();
  205. }
  206. static void mcdi_ctrl_resume(void)
  207. {
  208. if (MCDI_SSPM)
  209. mcdi_unpause();
  210. }
  211. static void hotplug_ctrl_cluster_on(int cluster, int cpu)
  212. {
  213. if (HP_SSPM_CTRL && MCDI_SSPM) {
  214. mcdi_hotplug_clr(cluster, CPU_IDX(cluster, cpu), OFF);
  215. mcdi_hotplug_set(cluster, -1, ON);
  216. mcdi_hotplug_wait_ack(cluster, -1, ON);
  217. } else {
  218. /* power on cluster */
  219. if (!spm_get_cluster_powerstate(cluster))
  220. spm_poweron_cluster(cluster);
  221. }
  222. }
  223. static void hotplug_ctrl_cpu_on(int cluster, int cpu)
  224. {
  225. if (HP_SSPM_CTRL && MCDI_SSPM)
  226. mcdi_hotplug_set(cluster, CPU_IDX(cluster, cpu), ON);
  227. else
  228. spm_poweron_cpu(cluster, cpu);
  229. }
  230. static void hotplug_ctrl_cpu_on_finish(int cluster, int cpu)
  231. {
  232. spm_disable_cpu_auto_off(cluster, cpu);
  233. if (HP_SSPM_CTRL && MCDI_SSPM)
  234. mcdi_hotplug_clr(cluster, CPU_IDX(cluster, cpu), ON);
  235. else if (HP_SSPM_PAUSE && MCDI_SSPM)
  236. mcdi_pause_clr(cluster, CPU_IDX(cluster, cpu), ON);
  237. mcdi_avail_cpu_mask_set(BIT(CPU_IDX(cluster, cpu)));
  238. }
  239. static void hotplug_ctrl_cluster_cpu_off(int cluster, int cpu, bool cluster_off)
  240. {
  241. mcdi_avail_cpu_mask_clr(BIT(CPU_IDX(cluster, cpu)));
  242. if (HP_SSPM_CTRL && MCDI_SSPM) {
  243. mcdi_hotplug_set(cluster_off ? cluster : -1,
  244. CPU_IDX(cluster, cpu), OFF);
  245. } else {
  246. spm_enable_cpu_auto_off(cluster, cpu);
  247. if (cluster_off)
  248. spm_enable_cluster_auto_off(cluster);
  249. spm_set_cpu_power_off(cluster, cpu);
  250. }
  251. }
  252. static int plat_mtk_power_domain_on(unsigned long mpidr)
  253. {
  254. int cpu = MPIDR_AFFLVL0_VAL(mpidr);
  255. int cluster = MPIDR_AFFLVL1_VAL(mpidr);
  256. int clst_pwr = spm_get_cluster_powerstate(cluster);
  257. unsigned int i;
  258. mcdi_ctrl_before_hotplug_on(cluster, cpu);
  259. hotplug_ctrl_cluster_on(cluster, cpu);
  260. if (clst_pwr == 0) {
  261. /* init cpu reset arch as AARCH64 of cluster */
  262. for (i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER; i++) {
  263. mcucfg_init_archstate(cluster, i, 1);
  264. mcucfg_set_bootaddr(cluster, i, secure_entrypoint);
  265. }
  266. }
  267. hotplug_ctrl_cpu_on(cluster, cpu);
  268. return PSCI_E_SUCCESS;
  269. }
  270. static void plat_mtk_power_domain_off(const psci_power_state_t *state)
  271. {
  272. uint64_t mpidr = read_mpidr();
  273. int cpu = MPIDR_AFFLVL0_VAL(mpidr);
  274. int cluster = MPIDR_AFFLVL1_VAL(mpidr);
  275. const plat_local_state_t *pds = state->pwr_domain_state;
  276. bool afflvl1 = (pds[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF);
  277. bool cluster_off = (HP_CLUSTER_OFF && afflvl1 &&
  278. clst_single_on(cluster, cpu));
  279. plat_cpu_pwrdwn_common();
  280. if (cluster_off)
  281. plat_cluster_pwrdwn_common(mpidr, cluster);
  282. mcdi_ctrl_before_hotplug_off(cluster, cpu, cluster_off);
  283. hotplug_ctrl_cluster_cpu_off(cluster, cpu, cluster_off);
  284. }
  285. static void plat_mtk_power_domain_on_finish(const psci_power_state_t *state)
  286. {
  287. uint64_t mpidr = read_mpidr();
  288. int cpu = MPIDR_AFFLVL0_VAL(mpidr);
  289. int cluster = MPIDR_AFFLVL1_VAL(mpidr);
  290. const plat_local_state_t *pds = state->pwr_domain_state;
  291. bool afflvl1 = (pds[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF);
  292. if (afflvl1)
  293. plat_cluster_pwron_common(mpidr, cluster);
  294. plat_cpu_pwron_common();
  295. hotplug_ctrl_cpu_on_finish(cluster, cpu);
  296. }
  297. static void plat_mtk_power_domain_suspend(const psci_power_state_t *state)
  298. {
  299. uint64_t mpidr = read_mpidr();
  300. int cpu = MPIDR_AFFLVL0_VAL(mpidr);
  301. int cluster = MPIDR_AFFLVL1_VAL(mpidr);
  302. const plat_local_state_t *pds = state->pwr_domain_state;
  303. bool afflvl1 = (pds[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF);
  304. bool afflvl2 = (pds[MPIDR_AFFLVL2] == MTK_LOCAL_STATE_OFF);
  305. bool cluster_off = MCDI_C2 && afflvl1 && clst_single_pwr(cluster, cpu);
  306. plat_cpu_pwrdwn_common();
  307. plat_dcm_mcsi_a_backup();
  308. if (cluster_off || afflvl2)
  309. plat_cluster_pwrdwn_common(mpidr, cluster);
  310. if (afflvl2) {
  311. spm_data_t spm_d = { .cmd = SPM_SUSPEND };
  312. uint32_t *d = (uint32_t *)&spm_d;
  313. uint32_t l = sizeof(spm_d) / sizeof(uint32_t);
  314. mcdi_ctrl_suspend();
  315. spm_set_bootaddr(secure_entrypoint);
  316. if (MCDI_SSPM)
  317. sspm_ipi_send_non_blocking(IPI_ID_SUSPEND, d);
  318. spm_system_suspend();
  319. if (MCDI_SSPM)
  320. while (sspm_ipi_recv_non_blocking(IPI_ID_SUSPEND, d, l))
  321. ;
  322. mt_gic_distif_save();
  323. } else {
  324. mcdi_ctrl_cluster_cpu_off(cluster, cpu, cluster_off);
  325. }
  326. }
  327. static void plat_mtk_power_domain_suspend_finish(const psci_power_state_t *state)
  328. {
  329. uint64_t mpidr = read_mpidr();
  330. int cluster = MPIDR_AFFLVL1_VAL(mpidr);
  331. const plat_local_state_t *pds = state->pwr_domain_state;
  332. bool afflvl2 = (pds[MPIDR_AFFLVL2] == MTK_LOCAL_STATE_OFF);
  333. if (afflvl2) {
  334. spm_data_t spm_d = { .cmd = SPM_RESUME };
  335. uint32_t *d = (uint32_t *)&spm_d;
  336. uint32_t l = sizeof(spm_d) / sizeof(uint32_t);
  337. mt_gic_init();
  338. mt_gic_distif_restore();
  339. mt_gic_rdistif_restore();
  340. mmio_write_32(EMI_WFIFO, 0xf);
  341. if (MCDI_SSPM)
  342. sspm_ipi_send_non_blocking(IPI_ID_SUSPEND, d);
  343. spm_system_suspend_finish();
  344. if (MCDI_SSPM)
  345. while (sspm_ipi_recv_non_blocking(IPI_ID_SUSPEND, d, l))
  346. ;
  347. mcdi_ctrl_resume();
  348. } else {
  349. plat_cpu_pwron_common();
  350. }
  351. plat_cluster_pwron_common(mpidr, cluster);
  352. plat_dcm_mcsi_a_restore();
  353. }
  354. #if PSCI_EXTENDED_STATE_ID
  355. static int plat_mtk_validate_power_state(unsigned int power_state,
  356. psci_power_state_t *req_state)
  357. {
  358. unsigned int state_id;
  359. int i;
  360. assert(req_state);
  361. if (!MCDI_SSPM)
  362. return PSCI_E_INVALID_PARAMS;
  363. /*
  364. * Currently we are using a linear search for finding the matching
  365. * entry in the idle power state array. This can be made a binary
  366. * search if the number of entries justify the additional complexity.
  367. */
  368. for (i = 0; !!mtk_pm_idle_states[i]; i++) {
  369. if (power_state == mtk_pm_idle_states[i])
  370. break;
  371. }
  372. /* Return error if entry not found in the idle state array */
  373. if (!mtk_pm_idle_states[i])
  374. return PSCI_E_INVALID_PARAMS;
  375. i = 0;
  376. state_id = psci_get_pstate_id(power_state);
  377. /* Parse the State ID and populate the state info parameter */
  378. while (state_id) {
  379. req_state->pwr_domain_state[i++] = state_id &
  380. MTK_LOCAL_PSTATE_MASK;
  381. state_id >>= MTK_LOCAL_PSTATE_WIDTH;
  382. }
  383. return PSCI_E_SUCCESS;
  384. }
  385. #else /* if !PSCI_EXTENDED_STATE_ID */
  386. static int plat_mtk_validate_power_state(unsigned int power_state,
  387. psci_power_state_t *req_state)
  388. {
  389. int pstate = psci_get_pstate_type(power_state);
  390. int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
  391. int i;
  392. assert(req_state);
  393. if (pwr_lvl > PLAT_MAX_PWR_LVL)
  394. return PSCI_E_INVALID_PARAMS;
  395. /* Sanity check the requested state */
  396. if (pstate == PSTATE_TYPE_STANDBY) {
  397. /*
  398. * It's possible to enter standby only on power level 0
  399. * Ignore any other power level.
  400. */
  401. if (pwr_lvl != 0)
  402. return PSCI_E_INVALID_PARAMS;
  403. req_state->pwr_domain_state[MTK_PWR_LVL0] = MTK_LOCAL_STATE_RET;
  404. } else if (!MCDI_SSPM) {
  405. return PSCI_E_INVALID_PARAMS;
  406. } else {
  407. for (i = 0; i <= pwr_lvl; i++)
  408. req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
  409. }
  410. return PSCI_E_SUCCESS;
  411. }
  412. #endif /* PSCI_EXTENDED_STATE_ID */
  413. /*******************************************************************************
  414. * MTK handlers to shutdown/reboot the system
  415. ******************************************************************************/
  416. static void __dead2 plat_mtk_system_off(void)
  417. {
  418. INFO("MTK System Off\n");
  419. rtc_power_off_sequence();
  420. wk_pmic_enable_sdn_delay();
  421. pmic_power_off();
  422. wfi();
  423. ERROR("MTK System Off: operation not handled.\n");
  424. panic();
  425. }
  426. static void __dead2 plat_mtk_system_reset(void)
  427. {
  428. struct bl_aux_gpio_info *gpio_reset = plat_get_mtk_gpio_reset();
  429. INFO("MTK System Reset\n");
  430. mt_set_gpio_out(gpio_reset->index, gpio_reset->polarity);
  431. wfi();
  432. ERROR("MTK System Reset: operation not handled.\n");
  433. panic();
  434. }
  435. static void plat_mtk_get_sys_suspend_power_state(psci_power_state_t *req_state)
  436. {
  437. assert(PLAT_MAX_PWR_LVL >= 2);
  438. for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
  439. req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
  440. }
  441. /*******************************************************************************
  442. * MTK_platform handler called when an affinity instance is about to be turned
  443. * on. The level and mpidr determine the affinity instance.
  444. ******************************************************************************/
  445. static const plat_psci_ops_t plat_plat_pm_ops = {
  446. .cpu_standby = plat_cpu_standby,
  447. .pwr_domain_on = plat_mtk_power_domain_on,
  448. .pwr_domain_on_finish = plat_mtk_power_domain_on_finish,
  449. .pwr_domain_off = plat_mtk_power_domain_off,
  450. .pwr_domain_suspend = plat_mtk_power_domain_suspend,
  451. .pwr_domain_suspend_finish = plat_mtk_power_domain_suspend_finish,
  452. .system_off = plat_mtk_system_off,
  453. .system_reset = plat_mtk_system_reset,
  454. .validate_power_state = plat_mtk_validate_power_state,
  455. .get_sys_suspend_power_state = plat_mtk_get_sys_suspend_power_state
  456. };
  457. int plat_setup_psci_ops(uintptr_t sec_entrypoint,
  458. const plat_psci_ops_t **psci_ops)
  459. {
  460. unsigned int i;
  461. *psci_ops = &plat_plat_pm_ops;
  462. secure_entrypoint = sec_entrypoint;
  463. /* Init cpu reset arch as AARCH64 of cluster 0 */
  464. for (i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER; i++) {
  465. mcucfg_init_archstate(0, i, 1);
  466. mcucfg_set_bootaddr(0, i, secure_entrypoint);
  467. }
  468. if (!check_mcdi_ctl_stat()) {
  469. HP_SSPM_CTRL = false;
  470. MCDI_SSPM = false;
  471. }
  472. return 0;
  473. }