pmu.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. /*
  2. * Copyright (c) 2019-2024, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <platform_def.h>
  9. #include <arch_helpers.h>
  10. #include <bl31/bl31.h>
  11. #include <common/debug.h>
  12. #include <drivers/console.h>
  13. #include <drivers/delay_timer.h>
  14. #include <lib/bakery_lock.h>
  15. #include <lib/mmio.h>
  16. #include <plat/common/platform.h>
  17. #include <cpus_on_fixed_addr.h>
  18. #include <plat_private.h>
  19. #include <pmu.h>
  20. #include <px30_def.h>
  21. #include <secure.h>
  22. #include <soc.h>
  23. DEFINE_BAKERY_LOCK(rockchip_pd_lock);
  24. #define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock)
  25. #define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock)
  26. #define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock)
  27. static struct psram_data_t *psram_boot_cfg =
  28. (struct psram_data_t *)&sys_sleep_flag_sram;
  29. /*
  30. * There are two ways to powering on or off on core.
  31. * 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
  32. * it is core_pwr_pd mode
  33. * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
  34. * then, if the core enter into wfi, it power domain will be
  35. * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
  36. * so we need core_pm_cfg_info to distinguish which method be used now.
  37. */
  38. static uint32_t cores_pd_cfg_info[PLATFORM_CORE_COUNT]
  39. #if USE_COHERENT_MEM
  40. __attribute__ ((section(".tzfw_coherent_mem")))
  41. #endif
  42. ;
  43. struct px30_sleep_ddr_data {
  44. uint32_t clk_sel0;
  45. uint32_t cru_mode_save;
  46. uint32_t cru_pmu_mode_save;
  47. uint32_t ddrc_hwlpctl;
  48. uint32_t ddrc_pwrctrl;
  49. uint32_t ddrgrf_con0;
  50. uint32_t ddrgrf_con1;
  51. uint32_t ddrstdby_con0;
  52. uint32_t gpio0b_iomux;
  53. uint32_t gpio0c_iomux;
  54. uint32_t pmu_pwrmd_core_l;
  55. uint32_t pmu_pwrmd_core_h;
  56. uint32_t pmu_pwrmd_cmm_l;
  57. uint32_t pmu_pwrmd_cmm_h;
  58. uint32_t pmu_wkup_cfg2_l;
  59. uint32_t pmu_cru_clksel_con0;
  60. uint32_t pmugrf_soc_con0;
  61. uint32_t pmusgrf_soc_con0;
  62. uint32_t pmic_slp_iomux;
  63. uint32_t pgrf_pvtm_con[2];
  64. uint32_t cru_clk_gate[CRU_CLKGATES_CON_CNT];
  65. uint32_t cru_pmu_clk_gate[CRU_PMU_CLKGATE_CON_CNT];
  66. uint32_t cru_plls_con_save[END_PLL_ID][PLL_CON_CNT];
  67. uint32_t cpu_qos[CPU_AXI_QOS_NUM_REGS];
  68. uint32_t gpu_qos[CPU_AXI_QOS_NUM_REGS];
  69. uint32_t isp_128m_qos[CPU_AXI_QOS_NUM_REGS];
  70. uint32_t isp_rd_qos[CPU_AXI_QOS_NUM_REGS];
  71. uint32_t isp_wr_qos[CPU_AXI_QOS_NUM_REGS];
  72. uint32_t isp_m1_qos[CPU_AXI_QOS_NUM_REGS];
  73. uint32_t vip_qos[CPU_AXI_QOS_NUM_REGS];
  74. uint32_t rga_rd_qos[CPU_AXI_QOS_NUM_REGS];
  75. uint32_t rga_wr_qos[CPU_AXI_QOS_NUM_REGS];
  76. uint32_t vop_m0_qos[CPU_AXI_QOS_NUM_REGS];
  77. uint32_t vop_m1_qos[CPU_AXI_QOS_NUM_REGS];
  78. uint32_t vpu_qos[CPU_AXI_QOS_NUM_REGS];
  79. uint32_t vpu_r128_qos[CPU_AXI_QOS_NUM_REGS];
  80. uint32_t dcf_qos[CPU_AXI_QOS_NUM_REGS];
  81. uint32_t dmac_qos[CPU_AXI_QOS_NUM_REGS];
  82. uint32_t crypto_qos[CPU_AXI_QOS_NUM_REGS];
  83. uint32_t gmac_qos[CPU_AXI_QOS_NUM_REGS];
  84. uint32_t emmc_qos[CPU_AXI_QOS_NUM_REGS];
  85. uint32_t nand_qos[CPU_AXI_QOS_NUM_REGS];
  86. uint32_t sdio_qos[CPU_AXI_QOS_NUM_REGS];
  87. uint32_t sfc_qos[CPU_AXI_QOS_NUM_REGS];
  88. uint32_t sdmmc_qos[CPU_AXI_QOS_NUM_REGS];
  89. uint32_t usb_host_qos[CPU_AXI_QOS_NUM_REGS];
  90. uint32_t usb_otg_qos[CPU_AXI_QOS_NUM_REGS];
  91. };
  92. static struct px30_sleep_ddr_data ddr_data
  93. #if USE_COHERENT_MEM
  94. __attribute__ ((section(".tzfw_coherent_mem")))
  95. #endif
  96. ;
  97. static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
  98. {
  99. assert(cpu_id < PLATFORM_CORE_COUNT);
  100. return cores_pd_cfg_info[cpu_id];
  101. }
  102. static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
  103. {
  104. assert(cpu_id < PLATFORM_CORE_COUNT);
  105. cores_pd_cfg_info[cpu_id] = value;
  106. #if !USE_COHERENT_MEM
  107. flush_dcache_range((uintptr_t)&cores_pd_cfg_info[cpu_id],
  108. sizeof(uint32_t));
  109. #endif
  110. }
  111. static inline uint32_t pmu_power_domain_st(uint32_t pd)
  112. {
  113. return mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & BIT(pd) ?
  114. pmu_pd_off :
  115. pmu_pd_on;
  116. }
  117. static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state)
  118. {
  119. uint32_t loop = 0;
  120. int ret = 0;
  121. rockchip_pd_lock_get();
  122. mmio_write_32(PMU_BASE + PMU_PWRDN_CON,
  123. BITS_WITH_WMASK(pd_state, 0x1, pd));
  124. dsb();
  125. while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) {
  126. udelay(1);
  127. loop++;
  128. }
  129. if (pmu_power_domain_st(pd) != pd_state) {
  130. WARN("%s: %d, %d, error!\n", __func__, pd, pd_state);
  131. ret = -EINVAL;
  132. }
  133. rockchip_pd_lock_rls();
  134. return ret;
  135. }
  136. static inline uint32_t pmu_bus_idle_st(uint32_t bus)
  137. {
  138. return !!((mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus)) &&
  139. (mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus + 16)));
  140. }
  141. static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
  142. {
  143. uint32_t wait_cnt = 0;
  144. mmio_write_32(PMU_BASE + PMU_BUS_IDLE_REQ,
  145. BITS_WITH_WMASK(state, 0x1, bus));
  146. while (pmu_bus_idle_st(bus) != state &&
  147. wait_cnt < BUS_IDLE_LOOP) {
  148. udelay(1);
  149. wait_cnt++;
  150. }
  151. if (pmu_bus_idle_st(bus) != state)
  152. WARN("%s:idle_st=0x%x, bus_id=%d\n",
  153. __func__, mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), bus);
  154. }
  155. static void qos_save(void)
  156. {
  157. /* scu powerdomain will power off, so cpu qos should be saved */
  158. SAVE_QOS(ddr_data.cpu_qos, CPU);
  159. if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
  160. SAVE_QOS(ddr_data.gpu_qos, GPU);
  161. if (pmu_power_domain_st(PD_VI) == pmu_pd_on) {
  162. SAVE_QOS(ddr_data.isp_128m_qos, ISP_128M);
  163. SAVE_QOS(ddr_data.isp_rd_qos, ISP_RD);
  164. SAVE_QOS(ddr_data.isp_wr_qos, ISP_WR);
  165. SAVE_QOS(ddr_data.isp_m1_qos, ISP_M1);
  166. SAVE_QOS(ddr_data.vip_qos, VIP);
  167. }
  168. if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
  169. SAVE_QOS(ddr_data.rga_rd_qos, RGA_RD);
  170. SAVE_QOS(ddr_data.rga_wr_qos, RGA_WR);
  171. SAVE_QOS(ddr_data.vop_m0_qos, VOP_M0);
  172. SAVE_QOS(ddr_data.vop_m1_qos, VOP_M1);
  173. }
  174. if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) {
  175. SAVE_QOS(ddr_data.vpu_qos, VPU);
  176. SAVE_QOS(ddr_data.vpu_r128_qos, VPU_R128);
  177. }
  178. if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) {
  179. SAVE_QOS(ddr_data.emmc_qos, EMMC);
  180. SAVE_QOS(ddr_data.nand_qos, NAND);
  181. SAVE_QOS(ddr_data.sdio_qos, SDIO);
  182. SAVE_QOS(ddr_data.sfc_qos, SFC);
  183. }
  184. if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
  185. SAVE_QOS(ddr_data.gmac_qos, GMAC);
  186. if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on)
  187. SAVE_QOS(ddr_data.crypto_qos, CRYPTO);
  188. if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on)
  189. SAVE_QOS(ddr_data.sdmmc_qos, SDMMC);
  190. if (pmu_power_domain_st(PD_USB) == pmu_pd_on) {
  191. SAVE_QOS(ddr_data.usb_host_qos, USB_HOST);
  192. SAVE_QOS(ddr_data.usb_otg_qos, USB_OTG);
  193. }
  194. }
  195. static void qos_restore(void)
  196. {
  197. RESTORE_QOS(ddr_data.cpu_qos, CPU);
  198. if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
  199. RESTORE_QOS(ddr_data.gpu_qos, GPU);
  200. if (pmu_power_domain_st(PD_VI) == pmu_pd_on) {
  201. RESTORE_QOS(ddr_data.isp_128m_qos, ISP_128M);
  202. RESTORE_QOS(ddr_data.isp_rd_qos, ISP_RD);
  203. RESTORE_QOS(ddr_data.isp_wr_qos, ISP_WR);
  204. RESTORE_QOS(ddr_data.isp_m1_qos, ISP_M1);
  205. RESTORE_QOS(ddr_data.vip_qos, VIP);
  206. }
  207. if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
  208. RESTORE_QOS(ddr_data.rga_rd_qos, RGA_RD);
  209. RESTORE_QOS(ddr_data.rga_wr_qos, RGA_WR);
  210. RESTORE_QOS(ddr_data.vop_m0_qos, VOP_M0);
  211. RESTORE_QOS(ddr_data.vop_m1_qos, VOP_M1);
  212. }
  213. if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) {
  214. RESTORE_QOS(ddr_data.vpu_qos, VPU);
  215. RESTORE_QOS(ddr_data.vpu_r128_qos, VPU_R128);
  216. }
  217. if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) {
  218. RESTORE_QOS(ddr_data.emmc_qos, EMMC);
  219. RESTORE_QOS(ddr_data.nand_qos, NAND);
  220. RESTORE_QOS(ddr_data.sdio_qos, SDIO);
  221. RESTORE_QOS(ddr_data.sfc_qos, SFC);
  222. }
  223. if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
  224. RESTORE_QOS(ddr_data.gmac_qos, GMAC);
  225. if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on)
  226. RESTORE_QOS(ddr_data.crypto_qos, CRYPTO);
  227. if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on)
  228. RESTORE_QOS(ddr_data.sdmmc_qos, SDMMC);
  229. if (pmu_power_domain_st(PD_USB) == pmu_pd_on) {
  230. RESTORE_QOS(ddr_data.usb_host_qos, USB_HOST);
  231. RESTORE_QOS(ddr_data.usb_otg_qos, USB_OTG);
  232. }
  233. }
  234. static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
  235. {
  236. uint32_t state;
  237. if (pmu_power_domain_st(pd_id) == pd_state)
  238. goto out;
  239. if (pd_state == pmu_pd_on)
  240. pmu_power_domain_ctr(pd_id, pd_state);
  241. state = (pd_state == pmu_pd_off) ? bus_idle : bus_active;
  242. switch (pd_id) {
  243. case PD_GPU:
  244. pmu_bus_idle_req(BUS_ID_GPU, state);
  245. break;
  246. case PD_VI:
  247. pmu_bus_idle_req(BUS_ID_VI, state);
  248. break;
  249. case PD_VO:
  250. pmu_bus_idle_req(BUS_ID_VO, state);
  251. break;
  252. case PD_VPU:
  253. pmu_bus_idle_req(BUS_ID_VPU, state);
  254. break;
  255. case PD_MMC_NAND:
  256. pmu_bus_idle_req(BUS_ID_MMC, state);
  257. break;
  258. case PD_GMAC:
  259. pmu_bus_idle_req(BUS_ID_GMAC, state);
  260. break;
  261. case PD_CRYPTO:
  262. pmu_bus_idle_req(BUS_ID_CRYPTO, state);
  263. break;
  264. case PD_SDCARD:
  265. pmu_bus_idle_req(BUS_ID_SDCARD, state);
  266. break;
  267. case PD_USB:
  268. pmu_bus_idle_req(BUS_ID_USB, state);
  269. break;
  270. default:
  271. break;
  272. }
  273. if (pd_state == pmu_pd_off)
  274. pmu_power_domain_ctr(pd_id, pd_state);
  275. out:
  276. return 0;
  277. }
  278. static uint32_t pmu_powerdomain_state;
  279. static void pmu_power_domains_suspend(void)
  280. {
  281. uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT];
  282. clk_gate_con_save(clkgt_save);
  283. clk_gate_con_disable();
  284. qos_save();
  285. pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
  286. pmu_set_power_domain(PD_GPU, pmu_pd_off);
  287. pmu_set_power_domain(PD_VI, pmu_pd_off);
  288. pmu_set_power_domain(PD_VO, pmu_pd_off);
  289. pmu_set_power_domain(PD_VPU, pmu_pd_off);
  290. pmu_set_power_domain(PD_MMC_NAND, pmu_pd_off);
  291. pmu_set_power_domain(PD_GMAC, pmu_pd_off);
  292. pmu_set_power_domain(PD_CRYPTO, pmu_pd_off);
  293. pmu_set_power_domain(PD_SDCARD, pmu_pd_off);
  294. pmu_set_power_domain(PD_USB, pmu_pd_off);
  295. clk_gate_con_restore(clkgt_save);
  296. }
  297. static void pmu_power_domains_resume(void)
  298. {
  299. uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT];
  300. clk_gate_con_save(clkgt_save);
  301. clk_gate_con_disable();
  302. if (!(pmu_powerdomain_state & BIT(PD_USB)))
  303. pmu_set_power_domain(PD_USB, pmu_pd_on);
  304. if (!(pmu_powerdomain_state & BIT(PD_SDCARD)))
  305. pmu_set_power_domain(PD_SDCARD, pmu_pd_on);
  306. if (!(pmu_powerdomain_state & BIT(PD_CRYPTO)))
  307. pmu_set_power_domain(PD_CRYPTO, pmu_pd_on);
  308. if (!(pmu_powerdomain_state & BIT(PD_GMAC)))
  309. pmu_set_power_domain(PD_GMAC, pmu_pd_on);
  310. if (!(pmu_powerdomain_state & BIT(PD_MMC_NAND)))
  311. pmu_set_power_domain(PD_MMC_NAND, pmu_pd_on);
  312. if (!(pmu_powerdomain_state & BIT(PD_VPU)))
  313. pmu_set_power_domain(PD_VPU, pmu_pd_on);
  314. if (!(pmu_powerdomain_state & BIT(PD_VO)))
  315. pmu_set_power_domain(PD_VO, pmu_pd_on);
  316. if (!(pmu_powerdomain_state & BIT(PD_VI)))
  317. pmu_set_power_domain(PD_VI, pmu_pd_on);
  318. if (!(pmu_powerdomain_state & BIT(PD_GPU)))
  319. pmu_set_power_domain(PD_GPU, pmu_pd_on);
  320. qos_restore();
  321. clk_gate_con_restore(clkgt_save);
  322. }
  323. static int check_cpu_wfie(uint32_t cpu)
  324. {
  325. uint32_t loop = 0, wfie_msk = CKECK_WFEI_MSK << cpu;
  326. while (!(mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) &&
  327. (loop < WFEI_CHECK_LOOP)) {
  328. udelay(1);
  329. loop++;
  330. }
  331. if ((mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) == 0) {
  332. WARN("%s: %d, %d, error!\n", __func__, cpu, wfie_msk);
  333. return -EINVAL;
  334. }
  335. return 0;
  336. }
  337. static int cpus_power_domain_on(uint32_t cpu_id)
  338. {
  339. uint32_t cpu_pd, apm_value, cfg_info, loop = 0;
  340. cpu_pd = PD_CPU0 + cpu_id;
  341. cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
  342. if (cfg_info == core_pwr_pd) {
  343. /* disable apm cfg */
  344. mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
  345. WITH_16BITS_WMSK(CORES_PM_DISABLE));
  346. if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
  347. mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
  348. WITH_16BITS_WMSK(CORES_PM_DISABLE));
  349. pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
  350. }
  351. pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
  352. } else {
  353. /* wait cpu down */
  354. while (pmu_power_domain_st(cpu_pd) == pmu_pd_on && loop < 100) {
  355. udelay(2);
  356. loop++;
  357. }
  358. /* return error if can't wait cpu down */
  359. if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
  360. WARN("%s:can't wait cpu down\n", __func__);
  361. return -EINVAL;
  362. }
  363. /* power up cpu in power down state */
  364. apm_value = BIT(core_pm_sft_wakeup_en);
  365. mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
  366. WITH_16BITS_WMSK(apm_value));
  367. }
  368. return 0;
  369. }
  370. static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
  371. {
  372. uint32_t cpu_pd, apm_value;
  373. cpu_pd = PD_CPU0 + cpu_id;
  374. if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
  375. return 0;
  376. if (pd_cfg == core_pwr_pd) {
  377. if (check_cpu_wfie(cpu_id))
  378. return -EINVAL;
  379. /* disable apm cfg */
  380. mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
  381. WITH_16BITS_WMSK(CORES_PM_DISABLE));
  382. set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
  383. pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
  384. } else {
  385. set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
  386. apm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
  387. if (pd_cfg == core_pwr_wfi_int)
  388. apm_value |= BIT(core_pm_int_wakeup_en);
  389. mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
  390. WITH_16BITS_WMSK(apm_value));
  391. }
  392. return 0;
  393. }
  394. static void nonboot_cpus_off(void)
  395. {
  396. uint32_t boot_cpu, cpu;
  397. boot_cpu = plat_my_core_pos();
  398. for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
  399. if (cpu == boot_cpu)
  400. continue;
  401. cpus_power_domain_off(cpu, core_pwr_pd);
  402. }
  403. }
  404. int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr,
  405. uint64_t entrypoint)
  406. {
  407. uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
  408. assert(cpu_id < PLATFORM_CORE_COUNT);
  409. assert(cpuson_flags[cpu_id] == 0);
  410. cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
  411. cpuson_entry_point[cpu_id] = entrypoint;
  412. dsb();
  413. cpus_power_domain_on(cpu_id);
  414. return PSCI_E_SUCCESS;
  415. }
  416. int rockchip_soc_cores_pwr_dm_on_finish(void)
  417. {
  418. uint32_t cpu_id = plat_my_core_pos();
  419. mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
  420. WITH_16BITS_WMSK(CORES_PM_DISABLE));
  421. return PSCI_E_SUCCESS;
  422. }
  423. int rockchip_soc_cores_pwr_dm_off(void)
  424. {
  425. uint32_t cpu_id = plat_my_core_pos();
  426. cpus_power_domain_off(cpu_id, core_pwr_wfi);
  427. return PSCI_E_SUCCESS;
  428. }
  429. int rockchip_soc_cores_pwr_dm_suspend(void)
  430. {
  431. uint32_t cpu_id = plat_my_core_pos();
  432. assert(cpu_id < PLATFORM_CORE_COUNT);
  433. assert(cpuson_flags[cpu_id] == 0);
  434. cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
  435. cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
  436. dsb();
  437. cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
  438. return PSCI_E_SUCCESS;
  439. }
  440. int rockchip_soc_cores_pwr_dm_resume(void)
  441. {
  442. uint32_t cpu_id = plat_my_core_pos();
  443. /* Disable core_pm */
  444. mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
  445. WITH_16BITS_WMSK(CORES_PM_DISABLE));
  446. return PSCI_E_SUCCESS;
  447. }
  448. #define CLK_MSK_GATING(msk, con) \
  449. mmio_write_32(CRU_BASE + (con), ((msk) << 16) | 0xffff)
  450. #define CLK_MSK_UNGATING(msk, con) \
  451. mmio_write_32(CRU_BASE + (con), ((~(msk)) << 16) | 0xffff)
  452. static uint32_t clk_ungt_msk[CRU_CLKGATES_CON_CNT] = {
  453. 0xe0ff, 0xffff, 0x0000, 0x0000,
  454. 0x0000, 0x0380, 0x0000, 0x0000,
  455. 0x07c0, 0x0000, 0x0000, 0x000f,
  456. 0x0061, 0x1f02, 0x0440, 0x1801,
  457. 0x004b, 0x0000
  458. };
  459. static uint32_t clk_pmu_ungt_msk[CRU_PMU_CLKGATE_CON_CNT] = {
  460. 0xf1ff, 0x0310
  461. };
  462. void clk_gate_suspend(void)
  463. {
  464. int i;
  465. for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) {
  466. ddr_data.cru_clk_gate[i] =
  467. mmio_read_32(CRU_BASE + CRU_CLKGATES_CON(i));
  468. mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i),
  469. WITH_16BITS_WMSK(~clk_ungt_msk[i]));
  470. }
  471. for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++) {
  472. ddr_data.cru_pmu_clk_gate[i] =
  473. mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i));
  474. mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i),
  475. WITH_16BITS_WMSK(~clk_pmu_ungt_msk[i]));
  476. }
  477. }
  478. void clk_gate_resume(void)
  479. {
  480. int i;
  481. for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++)
  482. mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i),
  483. WITH_16BITS_WMSK(ddr_data.cru_pmu_clk_gate[i]));
  484. for (i = 0; i < CRU_CLKGATES_CON_CNT; i++)
  485. mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i),
  486. WITH_16BITS_WMSK(ddr_data.cru_clk_gate[i]));
  487. }
  488. static void pvtm_32k_config(void)
  489. {
  490. uint32_t pvtm_freq_khz, pvtm_div;
  491. ddr_data.pmu_cru_clksel_con0 =
  492. mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0));
  493. ddr_data.pgrf_pvtm_con[0] =
  494. mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON0);
  495. ddr_data.pgrf_pvtm_con[1] =
  496. mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON1);
  497. mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
  498. BITS_WITH_WMASK(0, 0x3, pgrf_pvtm_st));
  499. dsb();
  500. mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
  501. BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_en));
  502. dsb();
  503. mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1, PVTM_CALC_CNT);
  504. dsb();
  505. mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
  506. BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_st));
  507. /* pmugrf_pvtm_st0 will be clear after PVTM start,
  508. * which will cost about 6 cycles of pvtm at least.
  509. * So we wait 30 cycles of pvtm for security.
  510. */
  511. while (mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) < 30)
  512. ;
  513. dsb();
  514. while (!(mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST0) & 0x1))
  515. ;
  516. pvtm_freq_khz =
  517. (mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) * 24000 +
  518. PVTM_CALC_CNT / 2) / PVTM_CALC_CNT;
  519. pvtm_div = (pvtm_freq_khz + 16) / 32;
  520. /* pvtm_div = div_factor << 2 + 1,
  521. * so div_factor = (pvtm_div - 1) >> 2.
  522. * But the operation ">> 2" will clear the low bit of pvtm_div,
  523. * so we don't have to do "- 1" for compasation
  524. */
  525. pvtm_div = pvtm_div >> 2;
  526. if (pvtm_div > 0x3f)
  527. pvtm_div = 0x3f;
  528. mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
  529. BITS_WITH_WMASK(pvtm_div, 0x3f, pgrf_pvtm_div));
  530. /* select pvtm as 32k source */
  531. mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0),
  532. BITS_WITH_WMASK(1, 0x3U, 14));
  533. }
  534. static void pvtm_32k_config_restore(void)
  535. {
  536. mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0),
  537. ddr_data.pmu_cru_clksel_con0 | BITS_WMSK(0x3U, 14));
  538. mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
  539. WITH_16BITS_WMSK(ddr_data.pgrf_pvtm_con[0]));
  540. mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1,
  541. ddr_data.pgrf_pvtm_con[1]);
  542. }
  543. static void ddr_sleep_config(void)
  544. {
  545. /* disable ddr pd, sr */
  546. ddr_data.ddrc_pwrctrl = mmio_read_32(DDR_UPCTL_BASE + 0x30);
  547. mmio_write_32(DDR_UPCTL_BASE + 0x30, BITS_WITH_WMASK(0x0, 0x3, 0));
  548. /* disable ddr auto gt */
  549. ddr_data.ddrgrf_con1 = mmio_read_32(DDRGRF_BASE + 0x4);
  550. mmio_write_32(DDRGRF_BASE + 0x4, BITS_WITH_WMASK(0x0, 0x1f, 0));
  551. /* disable ddr standby */
  552. ddr_data.ddrstdby_con0 = mmio_read_32(DDR_STDBY_BASE + 0x0);
  553. mmio_write_32(DDR_STDBY_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 0));
  554. while ((mmio_read_32(DDR_UPCTL_BASE + 0x4) & 0x7) != 1)
  555. ;
  556. /* ddr pmu ctrl */
  557. ddr_data.ddrgrf_con0 = mmio_read_32(DDRGRF_BASE + 0x0);
  558. mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 5));
  559. dsb();
  560. mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x1, 0x1, 4));
  561. /* ddr ret sel */
  562. ddr_data.pmugrf_soc_con0 =
  563. mmio_read_32(PMUGRF_BASE + PMUGRF_SOC_CON(0));
  564. mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0),
  565. BITS_WITH_WMASK(0x0, 0x1, 12));
  566. }
  567. static void ddr_sleep_config_restore(void)
  568. {
  569. /* restore ddr ret sel */
  570. mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0),
  571. ddr_data.pmugrf_soc_con0 | BITS_WMSK(0x1, 12));
  572. /* restore ddr pmu ctrl */
  573. mmio_write_32(DDRGRF_BASE + 0x0,
  574. ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 4));
  575. dsb();
  576. mmio_write_32(DDRGRF_BASE + 0x0,
  577. ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 5));
  578. /* restore ddr standby */
  579. mmio_write_32(DDR_STDBY_BASE + 0x0,
  580. ddr_data.ddrstdby_con0 | BITS_WMSK(0x1, 0));
  581. /* restore ddr auto gt */
  582. mmio_write_32(DDRGRF_BASE + 0x4,
  583. ddr_data.ddrgrf_con1 | BITS_WMSK(0x1f, 0));
  584. /* restore ddr pd, sr */
  585. mmio_write_32(DDR_UPCTL_BASE + 0x30,
  586. ddr_data.ddrc_pwrctrl | BITS_WMSK(0x3, 0));
  587. }
  588. static void pmu_sleep_config(void)
  589. {
  590. uint32_t pwrmd_core_lo, pwrmd_core_hi, pwrmd_com_lo, pwrmd_com_hi;
  591. uint32_t pmu_wkup_cfg2_lo;
  592. uint32_t clk_freq_khz;
  593. /* save pmic_sleep iomux gpio0_a4 */
  594. ddr_data.pmic_slp_iomux = mmio_read_32(PMUGRF_BASE + GPIO0A_IOMUX);
  595. ddr_data.pmu_pwrmd_core_l =
  596. mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_LO);
  597. ddr_data.pmu_pwrmd_core_h =
  598. mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_HI);
  599. ddr_data.pmu_pwrmd_cmm_l =
  600. mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO);
  601. ddr_data.pmu_pwrmd_cmm_h =
  602. mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI);
  603. ddr_data.pmu_wkup_cfg2_l = mmio_read_32(PMU_BASE + PMU_WKUP_CFG2_LO);
  604. pwrmd_core_lo = BIT(pmu_global_int_dis) |
  605. BIT(pmu_core_src_gt) |
  606. BIT(pmu_cpu0_pd) |
  607. BIT(pmu_clr_core) |
  608. BIT(pmu_scu_pd) |
  609. BIT(pmu_l2_idle) |
  610. BIT(pmu_l2_flush) |
  611. BIT(pmu_clr_bus2main) |
  612. BIT(pmu_clr_peri2msch);
  613. pwrmd_core_hi = BIT(pmu_dpll_pd_en) |
  614. BIT(pmu_apll_pd_en) |
  615. BIT(pmu_cpll_pd_en) |
  616. BIT(pmu_gpll_pd_en) |
  617. BIT(pmu_npll_pd_en);
  618. pwrmd_com_lo = BIT(pmu_mode_en) |
  619. BIT(pmu_pll_pd) |
  620. BIT(pmu_pmu_use_if) |
  621. BIT(pmu_alive_use_if) |
  622. BIT(pmu_osc_dis) |
  623. BIT(pmu_sref_enter) |
  624. BIT(pmu_ddrc_gt) |
  625. BIT(pmu_clr_pmu) |
  626. BIT(pmu_clr_peri_pmu);
  627. pwrmd_com_hi = BIT(pmu_clr_bus) |
  628. BIT(pmu_clr_msch) |
  629. BIT(pmu_wakeup_begin_cfg);
  630. pmu_wkup_cfg2_lo = BIT(pmu_cluster_wkup_en) |
  631. BIT(pmu_gpio_wkup_en) |
  632. BIT(pmu_timer_wkup_en);
  633. /* set pmic_sleep iomux gpio0_a4 */
  634. mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX,
  635. BITS_WITH_WMASK(1, 0x3, 8));
  636. clk_freq_khz = 32;
  637. mmio_write_32(PMU_BASE + PMU_OSC_CNT_LO,
  638. WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff));
  639. mmio_write_32(PMU_BASE + PMU_OSC_CNT_HI,
  640. WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16));
  641. mmio_write_32(PMU_BASE + PMU_STABLE_CNT_LO,
  642. WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff));
  643. mmio_write_32(PMU_BASE + PMU_STABLE_CNT_HI,
  644. WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16));
  645. mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_LO,
  646. WITH_16BITS_WMSK(clk_freq_khz * 2 & 0xffff));
  647. mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_HI,
  648. WITH_16BITS_WMSK(clk_freq_khz * 2 >> 16));
  649. /* Pmu's clk has switched to 24M back When pmu FSM counts
  650. * the follow counters, so we should use 24M to calculate
  651. * these counters.
  652. */
  653. mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_LO,
  654. WITH_16BITS_WMSK(24000 * 2 & 0xffff));
  655. mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_HI,
  656. WITH_16BITS_WMSK(24000 * 2 >> 16));
  657. mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_LO,
  658. WITH_16BITS_WMSK(24000 * 2 & 0xffff));
  659. mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_HI,
  660. WITH_16BITS_WMSK(24000 * 2 >> 16));
  661. mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_LO,
  662. WITH_16BITS_WMSK(24000 * 5 & 0xffff));
  663. mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_HI,
  664. WITH_16BITS_WMSK(24000 * 5 >> 16));
  665. mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_LO,
  666. WITH_16BITS_WMSK(24000 * 2 & 0xffff));
  667. mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_HI,
  668. WITH_16BITS_WMSK(24000 * 2 >> 16));
  669. /* Config pmu power mode and pmu wakeup source */
  670. mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO,
  671. WITH_16BITS_WMSK(pwrmd_core_lo));
  672. mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI,
  673. WITH_16BITS_WMSK(pwrmd_core_hi));
  674. mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO,
  675. WITH_16BITS_WMSK(pwrmd_com_lo));
  676. mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI,
  677. WITH_16BITS_WMSK(pwrmd_com_hi));
  678. mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO,
  679. WITH_16BITS_WMSK(pmu_wkup_cfg2_lo));
  680. }
  681. static void pmu_sleep_restore(void)
  682. {
  683. mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO,
  684. WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_l));
  685. mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI,
  686. WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_h));
  687. mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO,
  688. WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_l));
  689. mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI,
  690. WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_h));
  691. mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO,
  692. WITH_16BITS_WMSK(ddr_data.pmu_wkup_cfg2_l));
  693. /* restore pmic_sleep iomux */
  694. mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX,
  695. WITH_16BITS_WMSK(ddr_data.pmic_slp_iomux));
  696. }
  697. static void soc_sleep_config(void)
  698. {
  699. ddr_data.gpio0c_iomux = mmio_read_32(PMUGRF_BASE + GPIO0C_IOMUX);
  700. pmu_sleep_config();
  701. ddr_sleep_config();
  702. pvtm_32k_config();
  703. }
  704. static void soc_sleep_restore(void)
  705. {
  706. secure_timer_init();
  707. pvtm_32k_config_restore();
  708. ddr_sleep_config_restore();
  709. pmu_sleep_restore();
  710. mmio_write_32(PMUGRF_BASE + GPIO0C_IOMUX,
  711. WITH_16BITS_WMSK(ddr_data.gpio0c_iomux));
  712. }
  713. static inline void pm_pll_wait_lock(uint32_t pll_base, uint32_t pll_id)
  714. {
  715. uint32_t delay = PLL_LOCKED_TIMEOUT;
  716. while (delay > 0) {
  717. if (mmio_read_32(pll_base + PLL_CON(1)) &
  718. PLL_LOCK_MSK)
  719. break;
  720. delay--;
  721. }
  722. if (delay == 0)
  723. ERROR("Can't wait pll:%d lock\n", pll_id);
  724. }
  725. static inline void pll_set_mode(uint32_t pll_id, uint32_t mode)
  726. {
  727. uint32_t val = BITS_WITH_WMASK(mode, 0x3, PLL_MODE_SHIFT(pll_id));
  728. if (pll_id != GPLL_ID)
  729. mmio_write_32(CRU_BASE + CRU_MODE, val);
  730. else
  731. mmio_write_32(PMUCRU_BASE + CRU_PMU_MODE,
  732. BITS_WITH_WMASK(mode, 0x3, 0));
  733. }
  734. static inline void pll_suspend(uint32_t pll_id)
  735. {
  736. int i;
  737. uint32_t pll_base;
  738. if (pll_id != GPLL_ID)
  739. pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0);
  740. else
  741. pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0);
  742. /* save pll con */
  743. for (i = 0; i < PLL_CON_CNT; i++)
  744. ddr_data.cru_plls_con_save[pll_id][i] =
  745. mmio_read_32(pll_base + PLL_CON(i));
  746. /* slow mode */
  747. pll_set_mode(pll_id, SLOW_MODE);
  748. }
  749. static inline void pll_resume(uint32_t pll_id)
  750. {
  751. uint32_t mode, pll_base;
  752. if (pll_id != GPLL_ID) {
  753. pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0);
  754. mode = (ddr_data.cru_mode_save >> PLL_MODE_SHIFT(pll_id)) & 0x3;
  755. } else {
  756. pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0);
  757. mode = ddr_data.cru_pmu_mode_save & 0x3;
  758. }
  759. /* if pll locked before suspend, we should wait atfer resume */
  760. if (ddr_data.cru_plls_con_save[pll_id][1] & PLL_LOCK_MSK)
  761. pm_pll_wait_lock(pll_base, pll_id);
  762. pll_set_mode(pll_id, mode);
  763. }
  764. static void pm_plls_suspend(void)
  765. {
  766. ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_MODE);
  767. ddr_data.cru_pmu_mode_save = mmio_read_32(PMUCRU_BASE + CRU_PMU_MODE);
  768. ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(0));
  769. pll_suspend(GPLL_ID);
  770. pll_suspend(NPLL_ID);
  771. pll_suspend(CPLL_ID);
  772. pll_suspend(APLL_ID);
  773. /* core */
  774. mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
  775. BITS_WITH_WMASK(0, 0xf, 0));
  776. /* pclk_dbg */
  777. mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
  778. BITS_WITH_WMASK(0, 0xf, 8));
  779. }
  780. static void pm_plls_resume(void)
  781. {
  782. /* pclk_dbg */
  783. mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
  784. ddr_data.clk_sel0 | BITS_WMSK(0xf, 8));
  785. /* core */
  786. mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
  787. ddr_data.clk_sel0 | BITS_WMSK(0xf, 0));
  788. pll_resume(APLL_ID);
  789. pll_resume(CPLL_ID);
  790. pll_resume(NPLL_ID);
  791. pll_resume(GPLL_ID);
  792. }
  793. int rockchip_soc_sys_pwr_dm_suspend(void)
  794. {
  795. pmu_power_domains_suspend();
  796. clk_gate_suspend();
  797. soc_sleep_config();
  798. pm_plls_suspend();
  799. psram_boot_cfg->pm_flag &= ~PM_WARM_BOOT_BIT;
  800. return 0;
  801. }
  802. int rockchip_soc_sys_pwr_dm_resume(void)
  803. {
  804. psram_boot_cfg->pm_flag |= PM_WARM_BOOT_BIT;
  805. pm_plls_resume();
  806. soc_sleep_restore();
  807. clk_gate_resume();
  808. pmu_power_domains_resume();
  809. plat_rockchip_gic_cpuif_enable();
  810. return 0;
  811. }
  812. void __dead2 rockchip_soc_soft_reset(void)
  813. {
  814. pll_set_mode(GPLL_ID, SLOW_MODE);
  815. pll_set_mode(CPLL_ID, SLOW_MODE);
  816. pll_set_mode(NPLL_ID, SLOW_MODE);
  817. pll_set_mode(APLL_ID, SLOW_MODE);
  818. dsb();
  819. mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
  820. dsb();
  821. /*
  822. * Maybe the HW needs some times to reset the system,
  823. * so we do not hope the core to execute valid codes.
  824. */
  825. psci_power_down_wfi();
  826. }
  827. void __dead2 rockchip_soc_system_off(void)
  828. {
  829. uint32_t val;
  830. /* set pmic_sleep pin(gpio0_a4) to gpio mode */
  831. mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX, BITS_WITH_WMASK(0, 0x3, 8));
  832. /* config output */
  833. val = mmio_read_32(GPIO0_BASE + SWPORTA_DDR);
  834. val |= BIT(4);
  835. mmio_write_32(GPIO0_BASE + SWPORTA_DDR, val);
  836. /* config output high level */
  837. val = mmio_read_32(GPIO0_BASE);
  838. val |= BIT(4);
  839. mmio_write_32(GPIO0_BASE, val);
  840. dsb();
  841. /*
  842. * Maybe the HW needs some times to reset the system,
  843. * so we do not hope the core to execute valid codes.
  844. */
  845. psci_power_down_wfi();
  846. }
  847. void rockchip_plat_mmu_el3(void)
  848. {
  849. /* TODO: support the el3 for px30 SoCs */
  850. }
  851. void plat_rockchip_pmu_init(void)
  852. {
  853. uint32_t cpu;
  854. rockchip_pd_lock_init();
  855. for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
  856. cpuson_flags[cpu] = 0;
  857. psram_boot_cfg->ddr_func = (uint64_t)0;
  858. psram_boot_cfg->ddr_data = (uint64_t)0;
  859. psram_boot_cfg->sp = PSRAM_SP_TOP;
  860. psram_boot_cfg->ddr_flag = 0x0;
  861. psram_boot_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
  862. psram_boot_cfg->pm_flag = PM_WARM_BOOT_BIT;
  863. nonboot_cpus_off();
  864. /* Remap pmu_sram's base address to boot address */
  865. mmio_write_32(PMUSGRF_BASE + PMUSGRF_SOC_CON(0),
  866. BITS_WITH_WMASK(1, 0x1, 13));
  867. INFO("%s: pd status %x\n",
  868. __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
  869. }