pmu.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. /*
  2. * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <platform_def.h>
  9. #include <arch_helpers.h>
  10. #include <common/debug.h>
  11. #include <drivers/delay_timer.h>
  12. #include <lib/mmio.h>
  13. #include <plat/common/platform.h>
  14. #include <ddr_rk3368.h>
  15. #include <plat_private.h>
  16. #include <pmu.h>
  17. #include <pmu_com.h>
  18. #include <rk3368_def.h>
  19. #include <soc.h>
  20. DEFINE_BAKERY_LOCK(rockchip_pd_lock);
  21. static uint32_t cpu_warm_boot_addr;
  22. void rk3368_flash_l2_b(void)
  23. {
  24. uint32_t wait_cnt = 0;
  25. regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
  26. dsb();
  27. while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
  28. & BIT(clst_b_l2_flsh_done))) {
  29. wait_cnt++;
  30. if (!(wait_cnt % MAX_WAIT_CONUT))
  31. WARN("%s:reg %x,wait\n", __func__,
  32. mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
  33. }
  34. regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
  35. }
  36. static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
  37. {
  38. uint32_t mask = BIT(req);
  39. uint32_t idle_mask = 0;
  40. uint32_t idle_target = 0;
  41. uint32_t val;
  42. uint32_t wait_cnt = 0;
  43. switch (req) {
  44. case bus_ide_req_clst_l:
  45. idle_mask = BIT(pmu_idle_ack_cluster_l);
  46. idle_target = (idle << pmu_idle_ack_cluster_l);
  47. break;
  48. case bus_ide_req_clst_b:
  49. idle_mask = BIT(pmu_idle_ack_cluster_b);
  50. idle_target = (idle << pmu_idle_ack_cluster_b);
  51. break;
  52. case bus_ide_req_cxcs:
  53. idle_mask = BIT(pmu_idle_ack_cxcs);
  54. idle_target = ((!idle) << pmu_idle_ack_cxcs);
  55. break;
  56. case bus_ide_req_cci400:
  57. idle_mask = BIT(pmu_idle_ack_cci400);
  58. idle_target = ((!idle) << pmu_idle_ack_cci400);
  59. break;
  60. case bus_ide_req_gpu:
  61. idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
  62. idle_target = (idle << pmu_idle_ack_gpu) |
  63. (idle << pmu_idle_gpu);
  64. break;
  65. case bus_ide_req_core:
  66. idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
  67. idle_target = (idle << pmu_idle_ack_core) |
  68. (idle << pmu_idle_core);
  69. break;
  70. case bus_ide_req_bus:
  71. idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
  72. idle_target = (idle << pmu_idle_ack_bus) |
  73. (idle << pmu_idle_bus);
  74. break;
  75. case bus_ide_req_dma:
  76. idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
  77. idle_target = (idle << pmu_idle_ack_dma) |
  78. (idle << pmu_idle_dma);
  79. break;
  80. case bus_ide_req_peri:
  81. idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
  82. idle_target = (idle << pmu_idle_ack_peri) |
  83. (idle << pmu_idle_peri);
  84. break;
  85. case bus_ide_req_video:
  86. idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
  87. idle_target = (idle << pmu_idle_ack_video) |
  88. (idle << pmu_idle_video);
  89. break;
  90. case bus_ide_req_vio:
  91. idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
  92. idle_target = (pmu_idle_ack_vio) |
  93. (idle << pmu_idle_vio);
  94. break;
  95. case bus_ide_req_alive:
  96. idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
  97. idle_target = (idle << pmu_idle_ack_alive) |
  98. (idle << pmu_idle_alive);
  99. break;
  100. case bus_ide_req_pmu:
  101. idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
  102. idle_target = (idle << pmu_idle_ack_pmu) |
  103. (idle << pmu_idle_pmu);
  104. break;
  105. case bus_ide_req_msch:
  106. idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
  107. idle_target = (idle << pmu_idle_ack_msch) |
  108. (idle << pmu_idle_msch);
  109. break;
  110. case bus_ide_req_cci:
  111. idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
  112. idle_target = (idle << pmu_idle_ack_cci) |
  113. (idle << pmu_idle_cci);
  114. break;
  115. default:
  116. ERROR("%s: Unsupported the idle request\n", __func__);
  117. break;
  118. }
  119. val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
  120. if (idle)
  121. val |= mask;
  122. else
  123. val &= ~mask;
  124. mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
  125. while ((mmio_read_32(PMU_BASE +
  126. PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
  127. wait_cnt++;
  128. if (!(wait_cnt % MAX_WAIT_CONUT))
  129. WARN("%s:st=%x(%x)\n", __func__,
  130. mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
  131. idle_mask);
  132. }
  133. return 0;
  134. }
  135. void pmu_scu_b_pwrup(void)
  136. {
  137. regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
  138. rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
  139. }
  140. static void pmu_scu_b_pwrdn(void)
  141. {
  142. uint32_t wait_cnt = 0;
  143. if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
  144. PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
  145. ERROR("%s: not all cpus is off\n", __func__);
  146. return;
  147. }
  148. rk3368_flash_l2_b();
  149. regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
  150. while (!(mmio_read_32(PMU_BASE +
  151. PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
  152. wait_cnt++;
  153. if (!(wait_cnt % MAX_WAIT_CONUT))
  154. ERROR("%s:wait cluster-b l2(%x)\n", __func__,
  155. mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
  156. }
  157. rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
  158. }
  159. static void pmu_sleep_mode_config(void)
  160. {
  161. uint32_t pwrmd_core, pwrmd_com;
  162. pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
  163. BIT(pmu_mdcr_scu_l_pd) |
  164. BIT(pmu_mdcr_l2_flush) |
  165. BIT(pmu_mdcr_l2_idle) |
  166. BIT(pmu_mdcr_clr_clst_l) |
  167. BIT(pmu_mdcr_clr_core) |
  168. BIT(pmu_mdcr_clr_cci) |
  169. BIT(pmu_mdcr_core_pd);
  170. pwrmd_com = BIT(pmu_mode_en) |
  171. BIT(pmu_mode_sref_enter) |
  172. BIT(pmu_mode_pwr_off);
  173. regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
  174. regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
  175. regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
  176. mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
  177. mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
  178. mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
  179. mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
  180. mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
  181. dsb();
  182. }
  183. static void pmu_set_sleep_mode(void)
  184. {
  185. pmu_sleep_mode_config();
  186. soc_sleep_config();
  187. regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
  188. regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
  189. pmu_scu_b_pwrdn();
  190. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
  191. ((uintptr_t)&pmu_cpuson_entrypoint >>
  192. CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
  193. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
  194. ((uintptr_t)&pmu_cpuson_entrypoint >>
  195. CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
  196. }
  197. static int cpus_id_power_domain(uint32_t cluster,
  198. uint32_t cpu,
  199. uint32_t pd_state,
  200. uint32_t wfie_msk)
  201. {
  202. uint32_t pd;
  203. uint64_t mpidr;
  204. if (cluster)
  205. pd = PD_CPUB0 + cpu;
  206. else
  207. pd = PD_CPUL0 + cpu;
  208. if (pmu_power_domain_st(pd) == pd_state)
  209. return 0;
  210. if (pd_state == pmu_pd_off) {
  211. mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
  212. if (check_cpu_wfie(mpidr, wfie_msk))
  213. return -EINVAL;
  214. }
  215. return pmu_power_domain_ctr(pd, pd_state);
  216. }
  217. static void nonboot_cpus_off(void)
  218. {
  219. uint32_t boot_cpu, boot_cluster, cpu;
  220. boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
  221. boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
  222. /* turn off noboot cpus */
  223. for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
  224. if (!boot_cluster && (cpu == boot_cpu))
  225. continue;
  226. cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
  227. }
  228. for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
  229. if (boot_cluster && (cpu == boot_cpu))
  230. continue;
  231. cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
  232. }
  233. }
  234. void sram_save(void)
  235. {
  236. /* TODO: support the sdram save for rk3368 SoCs*/
  237. }
  238. void sram_restore(void)
  239. {
  240. /* TODO: support the sdram restore for rk3368 SoCs */
  241. }
  242. int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
  243. {
  244. uint32_t cpu, cluster;
  245. uint32_t cpuon_id;
  246. cpu = MPIDR_AFFLVL0_VAL(mpidr);
  247. cluster = MPIDR_AFFLVL1_VAL(mpidr);
  248. /* Make sure the cpu is off,Before power up the cpu! */
  249. cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
  250. cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
  251. assert(cpuon_id < PLATFORM_CORE_COUNT);
  252. assert(cpuson_flags[cpuon_id] == 0);
  253. cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
  254. cpuson_entry_point[cpuon_id] = entrypoint;
  255. /* Switch boot addr to pmusram */
  256. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
  257. (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
  258. CPU_BOOT_ADDR_WMASK);
  259. dsb();
  260. cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
  261. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
  262. (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
  263. CPU_BOOT_ADDR_WMASK);
  264. return 0;
  265. }
  266. int rockchip_soc_cores_pwr_dm_on_finish(void)
  267. {
  268. return 0;
  269. }
  270. int rockchip_soc_sys_pwr_dm_resume(void)
  271. {
  272. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
  273. (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
  274. CPU_BOOT_ADDR_WMASK);
  275. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
  276. (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
  277. CPU_BOOT_ADDR_WMASK);
  278. pm_plls_resume();
  279. pmu_scu_b_pwrup();
  280. return 0;
  281. }
  282. int rockchip_soc_sys_pwr_dm_suspend(void)
  283. {
  284. nonboot_cpus_off();
  285. pmu_set_sleep_mode();
  286. return 0;
  287. }
  288. void rockchip_plat_mmu_el3(void)
  289. {
  290. /* TODO: support the el3 for rk3368 SoCs */
  291. }
  292. void plat_rockchip_pmu_init(void)
  293. {
  294. uint32_t cpu;
  295. /* register requires 32bits mode, switch it to 32 bits */
  296. cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
  297. for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
  298. cpuson_flags[cpu] = 0;
  299. nonboot_cpus_off();
  300. INFO("%s(%d): pd status %x\n", __func__, __LINE__,
  301. mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
  302. }