pwr_ctrl.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /*
  2. * Copyright (c) 2022, Mediatek Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <common/debug.h>
  9. #include <drivers/arm/gicv3.h>
  10. #include <lib/psci/psci.h>
  11. #include <lib/utils.h>
  12. #ifdef MTK_PUBEVENT_ENABLE
  13. #include <vendor_pubsub_events.h>
  14. #endif
  15. #include <plat/arm/common/plat_arm.h>
  16. #include <plat/common/platform.h>
  17. #include <dfd.h>
  18. #include <lib/mtk_init/mtk_init.h>
  19. #include <lib/pm/mtk_pm.h>
  20. #include <mt_gic_v3.h>
  21. #include <platform_def.h>
  22. #define IS_AFFLV_PUBEVENT(_pstate) \
  23. ((_pstate & (MT_CPUPM_PWR_DOMAIN_MCUSYS | MT_CPUPM_PWR_DOMAIN_CLUSTER)) != 0)
  24. #ifdef MTK_PUBEVENT_ENABLE
  25. #define MT_CPUPM_EVENT_PWR_ON(x) ({ \
  26. PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_on, (const void *)(x)); })
  27. #define MT_CPUPM_EVENT_PWR_OFF(x) ({ \
  28. PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_off, (const void *)(x)); })
  29. #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ \
  30. PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_on, (const void *)(x)); })
  31. #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ \
  32. PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_off, (const void *)(x)); })
  33. #else
  34. #define MT_CPUPM_EVENT_PWR_ON(x) ({ (void)x; })
  35. #define MT_CPUPM_EVENT_PWR_OFF(x) ({ (void)x; })
  36. #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ (void)x; })
  37. #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ (void)x; })
  38. #endif
  39. /*
  40. * The cpu require to cluster power stattus
  41. * [0] : The cpu require cluster power down
  42. * [1] : The cpu require cluster power on
  43. */
  44. #define coordinate_cluster(onoff) write_clusterpwrdn_el1(onoff)
  45. #define coordinate_cluster_pwron() coordinate_cluster(1)
  46. #define coordinate_cluster_pwroff() coordinate_cluster(0)
  47. /* defaultly disable all functions */
  48. #define MTK_CPUPM_FN_MASK_DEFAULT (0)
  49. struct mtk_cpu_pwr_ctrl {
  50. unsigned int fn_mask;
  51. struct mtk_cpu_pm_ops *ops;
  52. struct mtk_cpu_smp_ops *smp;
  53. };
  54. static struct mtk_cpu_pwr_ctrl mtk_cpu_pwr = {
  55. .fn_mask = MTK_CPUPM_FN_MASK_DEFAULT,
  56. .ops = NULL,
  57. };
  58. #define IS_CPUIDLE_FN_ENABLE(x) ((mtk_cpu_pwr.ops != NULL) && ((mtk_cpu_pwr.fn_mask & x) != 0))
  59. #define IS_CPUSMP_FN_ENABLE(x) ((mtk_cpu_pwr.smp != NULL) && ((mtk_cpu_pwr.fn_mask & x) != 0))
  60. /* per-cpu power state */
  61. static unsigned int armv8_2_power_state[PLATFORM_CORE_COUNT];
  62. #define armv8_2_get_pwr_stateid(cpu) psci_get_pstate_id(armv8_2_power_state[cpu])
  63. static unsigned int get_mediatek_pstate(unsigned int domain, unsigned int psci_state,
  64. struct mtk_cpupm_pwrstate *state)
  65. {
  66. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_CPUPM_GET_PWR_STATE)) {
  67. return mtk_cpu_pwr.ops->get_pstate(domain, psci_state, state);
  68. }
  69. return 0;
  70. }
  71. unsigned int armv8_2_get_pwr_afflv(const psci_power_state_t *state_info)
  72. {
  73. int i;
  74. for (i = (int)PLAT_MAX_PWR_LVL; i >= (int)PSCI_CPU_PWR_LVL; i--) {
  75. if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) {
  76. return (unsigned int) i;
  77. }
  78. }
  79. return PSCI_INVALID_PWR_LVL;
  80. }
  81. /* MediaTek mcusys power on control interface */
  82. static void armv8_2_mcusys_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
  83. {
  84. gicv3_distif_init();
  85. mt_gic_distif_restore();
  86. gic_sgi_restore_all();
  87. dfd_resume();
  88. /* Add code here that behavior before system enter mcusys'on */
  89. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_MCUSYS)) {
  90. mtk_cpu_pwr.ops->mcusys_resume(state);
  91. }
  92. }
  93. /* MediaTek mcusys power down control interface */
  94. static void armv8_2_mcusys_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
  95. {
  96. mt_gic_distif_save();
  97. gic_sgi_save_all();
  98. /* Add code here that behaves before entering mcusys off */
  99. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_MCUSYS)) {
  100. mtk_cpu_pwr.ops->mcusys_suspend(state);
  101. }
  102. }
  103. /* MediaTek Cluster power on control interface */
  104. static void armv8_2_cluster_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
  105. {
  106. /* Add code here that behavior before system enter cluster'on */
  107. #if defined(MTK_CM_MGR) && !defined(MTK_FPGA_EARLY_PORTING)
  108. /* init cpu stall counter */
  109. init_cpu_stall_counter_all();
  110. #endif
  111. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CLUSTER)) {
  112. mtk_cpu_pwr.ops->cluster_resume(state);
  113. }
  114. }
  115. /* MediaTek Cluster power down control interface */
  116. static void armv8_2_cluster_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
  117. {
  118. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CLUSTER)) {
  119. mtk_cpu_pwr.ops->cluster_suspend(state);
  120. }
  121. }
  122. /* MediaTek CPU power on control interface */
  123. static void armv8_2_cpu_pwr_on_common(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
  124. {
  125. coordinate_cluster_pwron();
  126. gicv3_rdistif_init(plat_my_core_pos());
  127. gicv3_cpuif_enable(plat_my_core_pos());
  128. /* If MCUSYS has been powered down then restore GIC redistributor for all CPUs. */
  129. if (IS_PLAT_SYSTEM_RETENTION(state->pwr.afflv)) {
  130. mt_gic_rdistif_restore_all();
  131. } else {
  132. mt_gic_rdistif_restore();
  133. }
  134. }
  135. /* MediaTek CPU power down control interface */
  136. static void armv8_2_cpu_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
  137. {
  138. if ((pstate & MT_CPUPM_PWR_DOMAIN_PERCORE_DSU) != 0) {
  139. coordinate_cluster_pwroff();
  140. }
  141. mt_gic_rdistif_save();
  142. gicv3_cpuif_disable(plat_my_core_pos());
  143. gicv3_rdistif_off(plat_my_core_pos());
  144. }
  145. static void armv8_2_cpu_pwr_resume(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
  146. {
  147. armv8_2_cpu_pwr_on_common(state, pstate);
  148. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CORE)) {
  149. mtk_cpu_pwr.ops->cpu_resume(state);
  150. }
  151. }
  152. static void armv8_2_cpu_pwr_suspend(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
  153. {
  154. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CORE)) {
  155. mtk_cpu_pwr.ops->cpu_suspend(state);
  156. }
  157. armv8_2_cpu_pwr_dwn_common(state, pstate);
  158. }
  159. static void armv8_2_cpu_pwr_on(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
  160. {
  161. armv8_2_cpu_pwr_on_common(state, pstate);
  162. if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_ON)) {
  163. mtk_cpu_pwr.smp->cpu_on(state);
  164. }
  165. }
  166. static void armv8_2_cpu_pwr_off(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
  167. {
  168. if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_OFF)) {
  169. mtk_cpu_pwr.smp->cpu_off(state);
  170. }
  171. armv8_2_cpu_pwr_dwn_common(state, pstate);
  172. }
  173. /* MediaTek PSCI power domain */
  174. static int armv8_2_power_domain_on(u_register_t mpidr)
  175. {
  176. int ret = PSCI_E_SUCCESS;
  177. int cpu = plat_core_pos_by_mpidr(mpidr);
  178. uintptr_t entry = plat_pm_get_warm_entry();
  179. if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_PWR_ON_CORE_PREPARE)) {
  180. if (mtk_cpu_pwr.smp->cpu_pwr_on_prepare(cpu, entry) != 0) {
  181. ret = PSCI_E_DENIED;
  182. }
  183. }
  184. INFO("CPU %u power domain prepare on\n", cpu);
  185. return ret;
  186. }
  187. /* MediaTek PSCI power domain */
  188. static void armv8_2_power_domain_on_finish(const psci_power_state_t *state)
  189. {
  190. struct mt_cpupm_event_data nb;
  191. unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE | MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
  192. struct mtk_cpupm_pwrstate pm_state = {
  193. .info = {
  194. .cpuid = plat_my_core_pos(),
  195. .mode = MTK_CPU_PM_SMP,
  196. },
  197. .pwr = {
  198. .afflv = armv8_2_get_pwr_afflv(state),
  199. .state_id = 0x0,
  200. },
  201. };
  202. armv8_2_cpu_pwr_on(&pm_state, pstate);
  203. nb.cpuid = pm_state.info.cpuid;
  204. nb.pwr_domain = pstate;
  205. MT_CPUPM_EVENT_PWR_ON(&nb);
  206. INFO("CPU %u power domain on finished\n", pm_state.info.cpuid);
  207. }
  208. /* MediaTek PSCI power domain */
  209. static void armv8_2_power_domain_off(const psci_power_state_t *state)
  210. {
  211. struct mt_cpupm_event_data nb;
  212. unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE | MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
  213. struct mtk_cpupm_pwrstate pm_state = {
  214. .info = {
  215. .cpuid = plat_my_core_pos(),
  216. .mode = MTK_CPU_PM_SMP,
  217. },
  218. .pwr = {
  219. .afflv = armv8_2_get_pwr_afflv(state),
  220. .state_id = 0x0,
  221. },
  222. };
  223. armv8_2_cpu_pwr_off(&pm_state, pstate);
  224. nb.cpuid = pm_state.info.cpuid;
  225. nb.pwr_domain = pstate;
  226. MT_CPUPM_EVENT_PWR_OFF(&nb);
  227. INFO("CPU %u power domain off\n", pm_state.info.cpuid);
  228. }
  229. /* MediaTek PSCI power domain */
  230. static void armv8_2_power_domain_suspend(const psci_power_state_t *state)
  231. {
  232. unsigned int pstate = 0;
  233. struct mt_cpupm_event_data nb;
  234. struct mtk_cpupm_pwrstate pm_state = {
  235. .info = {
  236. .cpuid = plat_my_core_pos(),
  237. .mode = MTK_CPU_PM_CPUIDLE,
  238. },
  239. };
  240. pm_state.pwr.state_id = armv8_2_get_pwr_stateid(pm_state.info.cpuid);
  241. pm_state.pwr.afflv = armv8_2_get_pwr_afflv(state);
  242. pm_state.pwr.raw = state;
  243. pstate = get_mediatek_pstate(CPUPM_PWR_OFF,
  244. armv8_2_power_state[pm_state.info.cpuid], &pm_state);
  245. armv8_2_cpu_pwr_suspend(&pm_state, pstate);
  246. if ((pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER) != 0) {
  247. armv8_2_cluster_pwr_dwn_common(&pm_state);
  248. }
  249. if ((pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS) != 0) {
  250. armv8_2_mcusys_pwr_dwn_common(&pm_state);
  251. }
  252. nb.cpuid = pm_state.info.cpuid;
  253. nb.pwr_domain = pstate;
  254. MT_CPUPM_EVENT_PWR_OFF(&nb);
  255. if (IS_AFFLV_PUBEVENT(pstate)) {
  256. MT_CPUPM_EVENT_AFFLV_PWR_OFF(&nb);
  257. }
  258. }
  259. /* MediaTek PSCI power domain */
  260. static void armv8_2_power_domain_suspend_finish(const psci_power_state_t *state)
  261. {
  262. unsigned int pstate = 0;
  263. struct mt_cpupm_event_data nb;
  264. struct mtk_cpupm_pwrstate pm_state = {
  265. .info = {
  266. .cpuid = plat_my_core_pos(),
  267. .mode = MTK_CPU_PM_CPUIDLE,
  268. },
  269. };
  270. pm_state.pwr.state_id = armv8_2_get_pwr_stateid(pm_state.info.cpuid);
  271. pm_state.pwr.afflv = armv8_2_get_pwr_afflv(state);
  272. pm_state.pwr.raw = state;
  273. pstate = get_mediatek_pstate(CPUPM_PWR_ON,
  274. armv8_2_power_state[pm_state.info.cpuid], &pm_state);
  275. if ((pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS) != 0) {
  276. armv8_2_mcusys_pwr_on_common(&pm_state);
  277. }
  278. if ((pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER) != 0) {
  279. armv8_2_cluster_pwr_on_common(&pm_state);
  280. }
  281. armv8_2_cpu_pwr_resume(&pm_state, pstate);
  282. nb.cpuid = pm_state.info.cpuid;
  283. nb.pwr_domain = pstate;
  284. MT_CPUPM_EVENT_PWR_ON(&nb);
  285. if (IS_AFFLV_PUBEVENT(pstate)) {
  286. MT_CPUPM_EVENT_AFFLV_PWR_ON(&nb);
  287. }
  288. }
  289. /* MediaTek PSCI power domain */
  290. static int armv8_2_validate_power_state(unsigned int power_state, psci_power_state_t *req_state)
  291. {
  292. unsigned int i;
  293. unsigned int pstate = psci_get_pstate_type(power_state);
  294. unsigned int aff_lvl = psci_get_pstate_pwrlvl(power_state);
  295. unsigned int my_core_pos = plat_my_core_pos();
  296. if (mtk_cpu_pwr.ops == NULL) {
  297. return PSCI_E_INVALID_PARAMS;
  298. }
  299. if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_STATE_VALID)) {
  300. if (mtk_cpu_pwr.ops->pwr_state_valid(aff_lvl, pstate) != 0) {
  301. return PSCI_E_INVALID_PARAMS;
  302. }
  303. }
  304. if (pstate == PSTATE_TYPE_STANDBY) {
  305. req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE;
  306. } else {
  307. for (i = PSCI_CPU_PWR_LVL; i <= aff_lvl; i++) {
  308. req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
  309. }
  310. }
  311. armv8_2_power_state[my_core_pos] = power_state;
  312. return PSCI_E_SUCCESS;
  313. }
  314. /* MediaTek PSCI power domain */
  315. #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
  316. static void armv8_2_get_sys_suspend_power_state(psci_power_state_t *req_state)
  317. {
  318. unsigned int i;
  319. int ret;
  320. unsigned int power_state;
  321. unsigned int my_core_pos = plat_my_core_pos();
  322. ret = mtk_cpu_pwr.ops->pwr_state_valid(PLAT_MAX_PWR_LVL,
  323. PSTATE_TYPE_POWERDOWN);
  324. if (ret != MTK_CPUPM_E_OK) {
  325. /* Avoid suspend due to platform is not ready. */
  326. req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] =
  327. PLAT_MAX_RET_STATE;
  328. for (i = PSCI_CPU_PWR_LVL + 1; i <= PLAT_MAX_PWR_LVL; i++) {
  329. req_state->pwr_domain_state[i] = PSCI_LOCAL_STATE_RUN;
  330. }
  331. power_state = psci_make_powerstate(0, PSTATE_TYPE_STANDBY, PSCI_CPU_PWR_LVL);
  332. } else {
  333. for (i = PSCI_CPU_PWR_LVL; i <= PLAT_MAX_PWR_LVL; i++) {
  334. req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
  335. }
  336. power_state = psci_make_powerstate(MT_PLAT_PWR_STATE_SUSPEND,
  337. PSTATE_TYPE_POWERDOWN, PLAT_MAX_PWR_LVL);
  338. }
  339. armv8_2_power_state[my_core_pos] = power_state;
  340. flush_dcache_range((uintptr_t)&armv8_2_power_state[my_core_pos],
  341. sizeof(armv8_2_power_state[my_core_pos]));
  342. }
  343. #endif
  344. static void armv8_2_pm_smp_init(unsigned int cpu_id, uintptr_t entry_point)
  345. {
  346. if (entry_point == 0) {
  347. ERROR("%s, warm_entry_point is null\n", __func__);
  348. panic();
  349. }
  350. if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_INIT)) {
  351. mtk_cpu_pwr.smp->init(cpu_id, entry_point);
  352. }
  353. INFO("[%s:%d] - Initialize finished\n", __func__, __LINE__);
  354. }
  355. static struct plat_pm_pwr_ctrl armv8_2_pwr_ops = {
  356. .pwr_domain_suspend = armv8_2_power_domain_suspend,
  357. .pwr_domain_suspend_finish = armv8_2_power_domain_suspend_finish,
  358. .validate_power_state = armv8_2_validate_power_state,
  359. #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
  360. .get_sys_suspend_power_state = armv8_2_get_sys_suspend_power_state,
  361. #endif
  362. };
  363. struct plat_pm_smp_ctrl armv8_2_smp_ops = {
  364. .init = armv8_2_pm_smp_init,
  365. .pwr_domain_on = armv8_2_power_domain_on,
  366. .pwr_domain_off = armv8_2_power_domain_off,
  367. .pwr_domain_on_finish = armv8_2_power_domain_on_finish,
  368. };
  369. #define ISSUE_CPU_PM_REG_FAIL(_success) ({ _success = false; assert(0); })
  370. #define CPM_PM_FN_CHECK(_fns, _ops, _id, _func, _result, _flag) ({ \
  371. if ((_fns & _id)) { \
  372. if (_ops->_func) \
  373. _flag |= _id; \
  374. else { \
  375. ISSUE_CPU_PM_REG_FAIL(_result); \
  376. } \
  377. } })
  378. int register_cpu_pm_ops(unsigned int fn_flags, struct mtk_cpu_pm_ops *ops)
  379. {
  380. bool success = true;
  381. unsigned int fns = 0;
  382. if ((ops == NULL) || (mtk_cpu_pwr.ops != NULL)) {
  383. ERROR("[%s:%d] register cpu_pm fail !!\n", __FILE__, __LINE__);
  384. return MTK_CPUPM_E_ERR;
  385. }
  386. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CORE,
  387. cpu_resume, success, fns);
  388. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CORE,
  389. cpu_suspend, success, fns);
  390. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CLUSTER,
  391. cluster_resume, success, fns);
  392. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CLUSTER,
  393. cluster_suspend, success, fns);
  394. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_MCUSYS,
  395. mcusys_resume, success, fns);
  396. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_MCUSYS,
  397. mcusys_suspend, success, fns);
  398. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_CPUPM_GET_PWR_STATE,
  399. get_pstate, success, fns);
  400. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_STATE_VALID,
  401. pwr_state_valid, success, fns);
  402. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_INIT,
  403. init, success, fns);
  404. if (success) {
  405. mtk_cpu_pwr.ops = ops;
  406. mtk_cpu_pwr.fn_mask |= fns;
  407. plat_pm_ops_setup_pwr(&armv8_2_pwr_ops);
  408. INFO("[%s:%d] CPU pwr ops register success, support:0x%x\n",
  409. __func__, __LINE__, fns);
  410. } else {
  411. ERROR("[%s:%d] register cpu_pm ops fail !, fn:0x%x\n",
  412. __func__, __LINE__, fn_flags);
  413. assert(0);
  414. }
  415. return MTK_CPUPM_E_OK;
  416. }
  417. int register_cpu_smp_ops(unsigned int fn_flags, struct mtk_cpu_smp_ops *ops)
  418. {
  419. bool success = true;
  420. unsigned int fns = 0;
  421. if ((ops == NULL) || (mtk_cpu_pwr.smp != NULL)) {
  422. ERROR("[%s:%d] register cpu_smp fail !!\n", __FILE__, __LINE__);
  423. return MTK_CPUPM_E_ERR;
  424. }
  425. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_INIT,
  426. init, success, fns);
  427. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_ON_CORE_PREPARE,
  428. cpu_pwr_on_prepare, success, fns);
  429. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_ON,
  430. cpu_on, success, fns);
  431. CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_OFF,
  432. cpu_off, success, fns);
  433. if (success == true) {
  434. mtk_cpu_pwr.smp = ops;
  435. mtk_cpu_pwr.fn_mask |= fns;
  436. plat_pm_ops_setup_smp(&armv8_2_smp_ops);
  437. INFO("[%s:%d] CPU smp ops register success, support:0x%x\n",
  438. __func__, __LINE__, fns);
  439. } else {
  440. ERROR("[%s:%d] register cpu_smp ops fail !, fn:0x%x\n",
  441. __func__, __LINE__, fn_flags);
  442. assert(0);
  443. }
  444. return MTK_CPUPM_E_OK;
  445. }