mt_cpu_pm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * Copyright (c) 2022-2023, MediaTek Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <stdint.h>
  8. #include <lib/spinlock.h>
  9. #include <lib/mtk_init/mtk_init.h>
  10. #include <lib/pm/mtk_pm.h>
  11. #include <lpm/mt_lp_rm.h>
  12. #include "mt_cpu_pm.h"
  13. #include "mt_cpu_pm_cpc.h"
  14. #include "mt_cpu_pm_mbox.h"
  15. #include "mt_smp.h"
  16. #include <mtk_mmap_pool.h>
  17. #include <platform_def.h>
  18. /*
  19. * The locker must use the bakery locker when cache turns off.
  20. * Using spin_lock will gain better performance.
  21. */
  22. #ifdef MT_CPU_PM_USING_BAKERY_LOCK
  23. DEFINE_BAKERY_LOCK(mt_cpu_pm_lock);
  24. #define plat_cpu_pm_lock_init() bakery_lock_init(&mt_cpu_pm_lock)
  25. #define plat_cpu_pm_lock() bakery_lock_get(&mt_cpu_pm_lock)
  26. #define plat_cpu_pm_unlock() bakery_lock_release(&mt_cpu_pm_lock)
  27. #else
  28. spinlock_t mt_cpu_pm_lock;
  29. #define plat_cpu_pm_lock_init()
  30. #define plat_cpu_pm_lock() spin_lock(&mt_cpu_pm_lock)
  31. #define plat_cpu_pm_unlock() spin_unlock(&mt_cpu_pm_lock)
  32. #endif
  33. enum mt_pwr_node {
  34. MT_PWR_NONMCUSYS = 0,
  35. MT_PWR_MCUSYS_PDN,
  36. MT_PWR_SUSPEND,
  37. MT_PWR_SYSTEM_MEM,
  38. MT_PWR_SYSTEM_PLL,
  39. MT_PWR_SYSTEM_BUS,
  40. MT_PWR_MAX,
  41. };
  42. #define CPU_PM_DEPD_INIT BIT(0)
  43. #define CPU_PM_DEPD_READY BIT(1)
  44. #define CPU_PM_PLAT_READY BIT(2)
  45. #ifdef CPU_PM_TINYSYS_SUPPORT
  46. #define CPU_PM_INIT_READY (CPU_PM_DEPD_INIT | CPU_PM_DEPD_READY)
  47. #define CPU_PM_LP_READY (CPU_PM_INIT_READY | CPU_PM_PLAT_READY)
  48. #else
  49. #define CPU_PM_LP_READY (CPU_PM_PLAT_READY)
  50. #endif
  51. #if CONFIG_MTK_PM_SUPPORT
  52. #if CONFIG_MTK_CPU_SUSPEND_EN || CONFIG_MTK_SMP_EN
  53. static void cpupm_cpu_resume_common(const struct mtk_cpupm_pwrstate *state)
  54. {
  55. CPU_PM_ASSERT(state != NULL);
  56. mtk_cpc_core_on_hint_clr(state->info.cpuid);
  57. }
  58. #endif
  59. #if CONFIG_MTK_SMP_EN
  60. static int cpupm_cpu_pwr_on_prepare(unsigned int cpu, uintptr_t entry)
  61. {
  62. struct cpu_pwr_ctrl pwr_ctrl;
  63. PER_CPU_PWR_CTRL(pwr_ctrl, cpu);
  64. mt_smp_core_bootup_address_set(&pwr_ctrl, entry);
  65. mt_smp_core_init_arch(0, cpu, 1, &pwr_ctrl);
  66. return mt_smp_power_core_on(cpu, &pwr_ctrl);
  67. }
  68. static void cpupm_cpu_resume_smp(const struct mtk_cpupm_pwrstate *state)
  69. {
  70. CPU_PM_ASSERT(state != NULL);
  71. plat_cpu_pm_lock();
  72. mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
  73. GIC_WAKEUP_IGNORE(state->info.cpuid));
  74. plat_cpu_pm_unlock();
  75. cpupm_cpu_resume_common(state);
  76. }
  77. static void cpupm_cpu_suspend_smp(const struct mtk_cpupm_pwrstate *state)
  78. {
  79. struct cpu_pwr_ctrl pwr_ctrl;
  80. CPU_PM_ASSERT(state != NULL);
  81. PER_CPU_PWR_CTRL(pwr_ctrl, state->info.cpuid);
  82. mt_smp_power_core_off(&pwr_ctrl);
  83. mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
  84. GIC_WAKEUP_IGNORE(state->info.cpuid));
  85. }
  86. static void cpupm_smp_init(unsigned int cpu, uintptr_t sec_entrypoint)
  87. {
  88. unsigned int reg;
  89. struct mtk_cpupm_pwrstate state = {
  90. .info = {
  91. .cpuid = cpu,
  92. .mode = MTK_CPU_PM_SMP,
  93. },
  94. .pwr = {
  95. .afflv = 0,
  96. .state_id = 0,
  97. },
  98. };
  99. reg = mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG);
  100. if ((reg & CPC_MCUSYS_CPC_RESET_PWR_ON_EN) != 0) {
  101. INFO("[%s:%d][CPU_PM] reset pwr on is enabled then clear it!\n",
  102. __func__, __LINE__);
  103. mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_MCUSYS_CPC_RESET_PWR_ON_EN);
  104. }
  105. cpupm_cpu_pwr_on_prepare(cpu, sec_entrypoint);
  106. cpupm_cpu_resume_smp(&state);
  107. }
  108. static struct mtk_cpu_smp_ops cpcv3_2_cpu_smp = {
  109. .init = cpupm_smp_init,
  110. .cpu_pwr_on_prepare = cpupm_cpu_pwr_on_prepare,
  111. .cpu_on = cpupm_cpu_resume_smp,
  112. .cpu_off = cpupm_cpu_suspend_smp,
  113. };
  114. #endif /* CONFIG_MTK_SMP_EN */
  115. #if CONFIG_MTK_CPU_SUSPEND_EN
  116. #define CPUPM_READY_MS (40000)
  117. #define CPUPM_ARCH_TIME_MS(ms) (ms * 1000 * SYS_COUNTER_FREQ_IN_MHZ)
  118. #define CPUPM_BOOTUP_TIME_THR CPUPM_ARCH_TIME_MS(CPUPM_READY_MS)
  119. static int mt_pwr_nodes[MT_PWR_MAX];
  120. static int plat_mt_lp_cpu_rc;
  121. static unsigned int cpu_pm_status;
  122. static unsigned int plat_prev_stateid;
  123. static int mcusys_prepare_suspend(const struct mtk_cpupm_pwrstate *state)
  124. {
  125. unsigned int stateid = state->pwr.state_id;
  126. if (mtk_cpc_mcusys_off_prepare() != CPC_SUCCESS) {
  127. goto mt_pwr_mcusysoff_break;
  128. }
  129. if (!IS_PLAT_SUSPEND_ID(stateid)) {
  130. if (mt_pwr_nodes[MT_PWR_SYSTEM_MEM] != 0) {
  131. stateid = MT_PLAT_PWR_STATE_SYSTEM_MEM;
  132. } else if (mt_pwr_nodes[MT_PWR_SYSTEM_PLL] != 0) {
  133. stateid = MT_PLAT_PWR_STATE_SYSTEM_PLL;
  134. } else if (mt_pwr_nodes[MT_PWR_SYSTEM_BUS] != 0) {
  135. stateid = MT_PLAT_PWR_STATE_SYSTEM_BUS;
  136. } else if (mt_pwr_nodes[MT_PWR_SUSPEND] != 0) {
  137. stateid = MT_PLAT_PWR_STATE_SUSPEND;
  138. } else {
  139. stateid = MT_PLAT_PWR_STATE_MCUSYS;
  140. }
  141. }
  142. plat_prev_stateid = stateid;
  143. plat_mt_lp_cpu_rc = mt_lp_rm_find_and_run_constraint(0, state->info.cpuid, stateid, NULL);
  144. if (plat_mt_lp_cpu_rc < 0) {
  145. goto mt_pwr_mcusysoff_reflect;
  146. }
  147. #ifdef CPU_PM_TINYSYS_SUPPORT
  148. mtk_set_cpu_pm_preffered_cpu(state->info.cpuid);
  149. #endif
  150. return MTK_CPUPM_E_OK;
  151. mt_pwr_mcusysoff_reflect:
  152. mtk_cpc_mcusys_off_reflect();
  153. mt_pwr_mcusysoff_break:
  154. plat_mt_lp_cpu_rc = -1;
  155. return MTK_CPUPM_E_FAIL;
  156. }
  157. static int mcusys_prepare_resume(const struct mtk_cpupm_pwrstate *state)
  158. {
  159. if (plat_mt_lp_cpu_rc < 0) {
  160. return MTK_CPUPM_E_FAIL;
  161. }
  162. mt_lp_rm_reset_constraint(plat_mt_lp_cpu_rc, state->info.cpuid, plat_prev_stateid);
  163. mtk_cpc_mcusys_off_reflect();
  164. return MTK_CPUPM_E_OK;
  165. }
  166. static unsigned int cpupm_do_pstate_off(const mtk_pstate_type psci_state,
  167. const struct mtk_cpupm_pwrstate *state)
  168. {
  169. unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
  170. if (!state || (state->pwr.afflv > PLAT_MAX_PWR_LVL)) {
  171. CPU_PM_ASSERT(0);
  172. }
  173. switch (state->pwr.state_id) {
  174. case MT_PLAT_PWR_STATE_SYSTEM_MEM:
  175. mt_pwr_nodes[MT_PWR_SYSTEM_MEM] += 1;
  176. break;
  177. case MT_PLAT_PWR_STATE_SYSTEM_PLL:
  178. mt_pwr_nodes[MT_PWR_SYSTEM_PLL] += 1;
  179. break;
  180. case MT_PLAT_PWR_STATE_SYSTEM_BUS:
  181. mt_pwr_nodes[MT_PWR_SYSTEM_BUS] += 1;
  182. break;
  183. case MT_PLAT_PWR_STATE_SUSPEND:
  184. mt_pwr_nodes[MT_PWR_SUSPEND] += 1;
  185. break;
  186. default:
  187. if (!IS_MT_PLAT_PWR_STATE_MCUSYS(state->pwr.state_id) &&
  188. !IS_PLAT_SYSTEM_SUSPEND(state->pwr.afflv)) {
  189. plat_cpu_pm_lock();
  190. mt_pwr_nodes[MT_PWR_NONMCUSYS] += 1;
  191. flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_NONMCUSYS],
  192. sizeof(mt_pwr_nodes[MT_PWR_NONMCUSYS]));
  193. plat_cpu_pm_unlock();
  194. }
  195. break;
  196. }
  197. if ((mt_pwr_nodes[MT_PWR_NONMCUSYS] == 0) && IS_PLAT_MCUSYSOFF_AFFLV(state->pwr.afflv)) {
  198. /* Prepare to power down mcusys */
  199. if (mcusys_prepare_suspend(state) == MTK_CPUPM_E_OK) {
  200. mt_pwr_nodes[MT_PWR_MCUSYS_PDN] += 1;
  201. flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_MCUSYS_PDN],
  202. sizeof(mt_pwr_nodes[MT_PWR_MCUSYS_PDN]));
  203. pstate |= (MT_CPUPM_PWR_DOMAIN_MCUSYS | MT_CPUPM_PWR_DOMAIN_CLUSTER);
  204. }
  205. }
  206. if (state->pwr.afflv >= PLAT_MT_CPU_SUSPEND_CLUSTER) {
  207. pstate |= MT_CPUPM_PWR_DOMAIN_CLUSTER;
  208. }
  209. if (psci_get_pstate_pwrlvl(psci_state) >= PLAT_MT_CPU_SUSPEND_CLUSTER) {
  210. pstate |= MT_CPUPM_PWR_DOMAIN_PERCORE_DSU;
  211. }
  212. return pstate;
  213. }
  214. static unsigned int cpupm_do_pstate_on(const mtk_pstate_type psci_state,
  215. const struct mtk_cpupm_pwrstate *state)
  216. {
  217. unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
  218. CPU_PM_ASSERT(state != NULL);
  219. if (state->pwr.afflv > PLAT_MAX_PWR_LVL) {
  220. CPU_PM_ASSERT(0);
  221. }
  222. if (mt_pwr_nodes[MT_PWR_MCUSYS_PDN] != 0) {
  223. mt_pwr_nodes[MT_PWR_MCUSYS_PDN] = 0;
  224. flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_MCUSYS_PDN],
  225. sizeof(mt_pwr_nodes[MT_PWR_MCUSYS_PDN]));
  226. pstate |= (MT_CPUPM_PWR_DOMAIN_MCUSYS | MT_CPUPM_PWR_DOMAIN_CLUSTER);
  227. mcusys_prepare_resume(state);
  228. }
  229. if (state->pwr.afflv >= PLAT_MT_CPU_SUSPEND_CLUSTER) {
  230. pstate |= MT_CPUPM_PWR_DOMAIN_CLUSTER;
  231. }
  232. switch (state->pwr.state_id) {
  233. case MT_PLAT_PWR_STATE_SYSTEM_MEM:
  234. mt_pwr_nodes[MT_PWR_SYSTEM_MEM] -= 1;
  235. CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_MEM] >= 0);
  236. break;
  237. case MT_PLAT_PWR_STATE_SYSTEM_PLL:
  238. mt_pwr_nodes[MT_PWR_SYSTEM_PLL] -= 1;
  239. CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_PLL] >= 0);
  240. break;
  241. case MT_PLAT_PWR_STATE_SYSTEM_BUS:
  242. mt_pwr_nodes[MT_PWR_SYSTEM_BUS] -= 1;
  243. CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_BUS] >= 0);
  244. break;
  245. case MT_PLAT_PWR_STATE_SUSPEND:
  246. mt_pwr_nodes[MT_PWR_SUSPEND] -= 1;
  247. CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SUSPEND] >= 0);
  248. break;
  249. default:
  250. if (!IS_MT_PLAT_PWR_STATE_MCUSYS(state->pwr.state_id) &&
  251. !IS_PLAT_SYSTEM_SUSPEND(state->pwr.afflv)) {
  252. plat_cpu_pm_lock();
  253. mt_pwr_nodes[MT_PWR_NONMCUSYS] -= 1;
  254. flush_dcache_range((uintptr_t)&mt_pwr_nodes[MT_PWR_NONMCUSYS],
  255. sizeof(mt_pwr_nodes[MT_PWR_NONMCUSYS]));
  256. plat_cpu_pm_unlock();
  257. }
  258. break;
  259. }
  260. if (IS_PLAT_SYSTEM_SUSPEND(state->pwr.afflv) ||
  261. (IS_PLAT_SYSTEM_RETENTION(state->pwr.afflv) && (mt_pwr_nodes[MT_PWR_SUSPEND] > 0))) {
  262. mtk_cpc_time_sync();
  263. }
  264. if (mt_pwr_nodes[MT_PWR_NONMCUSYS] < 0) {
  265. CPU_PM_ASSERT(0);
  266. }
  267. pstate |= MT_CPUPM_PWR_DOMAIN_PERCORE_DSU;
  268. return pstate;
  269. }
  270. static void cpupm_cpu_resume(const struct mtk_cpupm_pwrstate *state)
  271. {
  272. cpupm_cpu_resume_common(state);
  273. }
  274. static void cpupm_mcusys_resume(const struct mtk_cpupm_pwrstate *state)
  275. {
  276. assert(state != NULL);
  277. }
  278. static void cpupm_mcusys_suspend(const struct mtk_cpupm_pwrstate *state)
  279. {
  280. assert(state != NULL);
  281. }
  282. static unsigned int cpupm_get_pstate(enum mt_cpupm_pwr_domain domain,
  283. const mtk_pstate_type psci_state,
  284. const struct mtk_cpupm_pwrstate *state)
  285. {
  286. unsigned int pstate = 0;
  287. if (state == NULL) {
  288. return 0;
  289. }
  290. if (state->info.mode == MTK_CPU_PM_SMP) {
  291. pstate = MT_CPUPM_PWR_DOMAIN_CORE;
  292. } else {
  293. if (domain == CPUPM_PWR_OFF) {
  294. pstate = cpupm_do_pstate_off(psci_state, state);
  295. } else if (domain == CPUPM_PWR_ON) {
  296. pstate = cpupm_do_pstate_on(psci_state, state);
  297. } else {
  298. INFO("[%s:%d][CPU_PM] unknown pwr domain :%d\n",
  299. __func__, __LINE__, domain);
  300. assert(0);
  301. }
  302. }
  303. return pstate;
  304. }
  305. static int cpupm_init(void)
  306. {
  307. int ret = MTK_CPUPM_E_OK;
  308. #ifdef CPU_PM_TINYSYS_SUPPORT
  309. int status;
  310. if ((cpu_pm_status & CPU_PM_INIT_READY) == CPU_PM_INIT_READY) {
  311. return MTK_CPUPM_E_OK;
  312. }
  313. if (!(cpu_pm_status & CPU_PM_DEPD_INIT)) {
  314. status = mtk_lp_depd_condition(CPUPM_MBOX_WAIT_DEV_INIT);
  315. if (status == 0) {
  316. plat_cpu_pm_lock();
  317. cpu_pm_status |= CPU_PM_DEPD_INIT;
  318. plat_cpu_pm_unlock();
  319. }
  320. }
  321. if ((cpu_pm_status & CPU_PM_DEPD_INIT) && !(cpu_pm_status & CPU_PM_DEPD_READY)) {
  322. status = mtk_lp_depd_condition(CPUPM_MBOX_WAIT_TASK_READY);
  323. if (status == 0) {
  324. plat_cpu_pm_lock();
  325. cpu_pm_status |= CPU_PM_DEPD_READY;
  326. plat_cpu_pm_unlock();
  327. }
  328. }
  329. ret = ((cpu_pm_status & CPU_PM_INIT_READY) == CPU_PM_INIT_READY) ?
  330. MTK_CPUPM_E_OK : MTK_CPUPM_E_FAIL;
  331. #endif
  332. return ret;
  333. }
  334. static int cpupm_pwr_state_valid(unsigned int afflv, unsigned int state)
  335. {
  336. if (cpu_pm_status == CPU_PM_LP_READY) {
  337. return MTK_CPUPM_E_OK;
  338. }
  339. if (cpupm_init() != MTK_CPUPM_E_OK) {
  340. return MTK_CPUPM_E_FAIL;
  341. }
  342. if (read_cntpct_el0() >= (uint64_t)CPUPM_BOOTUP_TIME_THR) {
  343. plat_cpu_pm_lock();
  344. cpu_pm_status |= CPU_PM_PLAT_READY;
  345. plat_cpu_pm_unlock();
  346. }
  347. if (!IS_PLAT_SYSTEM_SUSPEND(afflv) && (cpu_pm_status & CPU_PM_PLAT_READY) == 0) {
  348. return MTK_CPUPM_E_FAIL;
  349. }
  350. return MTK_CPUPM_E_OK;
  351. }
  352. static struct mtk_cpu_pm_ops cpcv3_2_mcdi = {
  353. .get_pstate = cpupm_get_pstate,
  354. .pwr_state_valid = cpupm_pwr_state_valid,
  355. .cpu_resume = cpupm_cpu_resume,
  356. .mcusys_suspend = cpupm_mcusys_suspend,
  357. .mcusys_resume = cpupm_mcusys_resume,
  358. };
  359. #endif /* CONFIG_MTK_CPU_SUSPEND_EN */
  360. #endif /* CONFIG_MTK_PM_SUPPORT */
  361. /*
  362. * Depend on mtk pm methodology, the psci op init must
  363. * be invoked after cpu pm to avoid initialization fail.
  364. */
  365. int mt_plat_cpu_pm_init(void)
  366. {
  367. plat_cpu_pm_lock_init();
  368. mtk_cpc_init();
  369. #if CONFIG_MTK_PM_SUPPORT
  370. #if CONFIG_MTK_CPU_SUSPEND_EN
  371. register_cpu_pm_ops(CPU_PM_FN, &cpcv3_2_mcdi);
  372. #endif /* CONFIG_MTK_CPU_SUSPEND_EN */
  373. #if CONFIG_MTK_SMP_EN
  374. register_cpu_smp_ops(CPU_PM_FN, &cpcv3_2_cpu_smp);
  375. #endif /* CONFIG_MTK_SMP_EN */
  376. #endif /* CONFIG_MTK_PM_SUPPORT */
  377. INFO("[%s:%d] - CPU PM INIT finished\n", __func__, __LINE__);
  378. return 0;
  379. }
  380. MTK_ARCH_INIT(mt_plat_cpu_pm_init);
  381. static const mmap_region_t cpu_pm_mmap[] MTK_MMAP_SECTION = {
  382. #ifdef CPU_PM_TINYSYS_SUPPORT
  383. #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
  384. MAP_REGION_FLAT(CPU_EB_TCM_BASE, CPU_EB_TCM_SIZE, MT_DEVICE | MT_RW | MT_SECURE),
  385. #endif
  386. #endif
  387. {0}
  388. };
  389. DECLARE_MTK_MMAP_REGIONS(cpu_pm_mmap);