gpc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. /*
  2. * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <stdlib.h>
  7. #include <stdint.h>
  8. #include <stdbool.h>
  9. #include <arch_helpers.h>
  10. #include <common/debug.h>
  11. #include <drivers/delay_timer.h>
  12. #include <lib/mmio.h>
  13. #include <lib/psci/psci.h>
  14. #include <lib/smccc.h>
  15. #include <lib/spinlock.h>
  16. #include <plat/common/platform.h>
  17. #include <services/std_svc.h>
  18. #include <gpc.h>
  19. #include <platform_def.h>
  20. #define FSL_SIP_CONFIG_GPC_MASK U(0x00)
  21. #define FSL_SIP_CONFIG_GPC_UNMASK U(0x01)
  22. #define FSL_SIP_CONFIG_GPC_SET_WAKE U(0x02)
  23. #define FSL_SIP_CONFIG_GPC_PM_DOMAIN U(0x03)
  24. #define FSL_SIP_CONFIG_GPC_SET_AFF U(0x04)
  25. #define FSL_SIP_CONFIG_GPC_CORE_WAKE U(0x05)
  26. #define MAX_HW_IRQ_NUM U(128)
  27. #define MAX_IMR_NUM U(4)
  28. static uint32_t gpc_saved_imrs[16];
  29. static uint32_t gpc_wake_irqs[4];
  30. static uint32_t gpc_imr_offset[] = {
  31. IMX_GPC_BASE + IMR1_CORE0_A53,
  32. IMX_GPC_BASE + IMR1_CORE1_A53,
  33. IMX_GPC_BASE + IMR1_CORE2_A53,
  34. IMX_GPC_BASE + IMR1_CORE3_A53,
  35. IMX_GPC_BASE + IMR1_CORE0_M4,
  36. };
  37. spinlock_t gpc_imr_lock[4];
  38. static void gpc_imr_core_spin_lock(unsigned int core_id)
  39. {
  40. spin_lock(&gpc_imr_lock[core_id]);
  41. }
  42. static void gpc_imr_core_spin_unlock(unsigned int core_id)
  43. {
  44. spin_unlock(&gpc_imr_lock[core_id]);
  45. }
  46. static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx)
  47. {
  48. uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
  49. gpc_imr_core_spin_lock(core_id);
  50. gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg);
  51. mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]);
  52. gpc_imr_core_spin_unlock(core_id);
  53. }
  54. static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx)
  55. {
  56. uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
  57. uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4];
  58. gpc_imr_core_spin_lock(core_id);
  59. mmio_write_32(reg, val);
  60. gpc_imr_core_spin_unlock(core_id);
  61. }
  62. /*
  63. * On i.MX8MQ, only in system suspend mode, the A53 cluster can
  64. * enter LPM mode and shutdown the A53 PLAT power domain. So LPM
  65. * wakeup only used for system suspend. when system enter suspend,
  66. * any A53 CORE can be the last core to suspend the system, But
  67. * the LPM wakeup can only use the C0's IMR to wakeup A53 cluster
  68. * from LPM, so save C0's IMRs before suspend, restore back after
  69. * resume.
  70. */
  71. void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
  72. {
  73. unsigned int imr, core;
  74. if (pdn) {
  75. for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
  76. for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
  77. gpc_save_imr_lpm(core, imr);
  78. }
  79. }
  80. } else {
  81. for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
  82. for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
  83. gpc_restore_imr_lpm(core, imr);
  84. }
  85. }
  86. }
  87. }
  88. static void imx_gpc_hwirq_mask(unsigned int hwirq)
  89. {
  90. uintptr_t reg;
  91. unsigned int val;
  92. if (hwirq >= MAX_HW_IRQ_NUM) {
  93. return;
  94. }
  95. gpc_imr_core_spin_lock(0);
  96. reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
  97. val = mmio_read_32(reg);
  98. val |= 1 << hwirq % 32;
  99. mmio_write_32(reg, val);
  100. gpc_imr_core_spin_unlock(0);
  101. }
  102. static void imx_gpc_hwirq_unmask(unsigned int hwirq)
  103. {
  104. uintptr_t reg;
  105. unsigned int val;
  106. if (hwirq >= MAX_HW_IRQ_NUM) {
  107. return;
  108. }
  109. gpc_imr_core_spin_lock(0);
  110. reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
  111. val = mmio_read_32(reg);
  112. val &= ~(1 << hwirq % 32);
  113. mmio_write_32(reg, val);
  114. gpc_imr_core_spin_unlock(0);
  115. }
  116. static void imx_gpc_set_wake(uint32_t hwirq, bool on)
  117. {
  118. uint32_t mask, idx;
  119. if (hwirq >= MAX_HW_IRQ_NUM) {
  120. return;
  121. }
  122. mask = 1 << hwirq % 32;
  123. idx = hwirq / 32;
  124. gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
  125. gpc_wake_irqs[idx] & ~mask;
  126. }
  127. static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask)
  128. {
  129. gpc_imr_core_spin_lock(core_id);
  130. if (mask) {
  131. mmio_setbits_32(gpc_imr_offset[core_id], 1);
  132. } else {
  133. mmio_clrbits_32(gpc_imr_offset[core_id], 1);
  134. }
  135. dsb();
  136. gpc_imr_core_spin_unlock(core_id);
  137. }
  138. void imx_gpc_core_wake(uint32_t cpumask)
  139. {
  140. for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
  141. if (cpumask & (1 << i)) {
  142. imx_gpc_mask_irq0(i, false);
  143. }
  144. }
  145. }
  146. void imx_gpc_set_a53_core_awake(uint32_t core_id)
  147. {
  148. imx_gpc_mask_irq0(core_id, true);
  149. }
  150. static void imx_gpc_set_affinity(uint32_t hwirq, unsigned int cpu_idx)
  151. {
  152. uintptr_t reg;
  153. unsigned int val;
  154. if (hwirq >= MAX_HW_IRQ_NUM || cpu_idx >= 4) {
  155. return;
  156. }
  157. /*
  158. * using the mask/unmask bit as affinity function.unmask the
  159. * IMR bit to enable IRQ wakeup for this core.
  160. */
  161. gpc_imr_core_spin_lock(cpu_idx);
  162. reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4;
  163. val = mmio_read_32(reg);
  164. val &= ~(1 << hwirq % 32);
  165. mmio_write_32(reg, val);
  166. gpc_imr_core_spin_unlock(cpu_idx);
  167. /* clear affinity of other core */
  168. for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
  169. if (cpu_idx != i) {
  170. gpc_imr_core_spin_lock(i);
  171. reg = gpc_imr_offset[i] + (hwirq / 32) * 4;
  172. val = mmio_read_32(reg);
  173. val |= (1 << hwirq % 32);
  174. mmio_write_32(reg, val);
  175. gpc_imr_core_spin_unlock(i);
  176. }
  177. }
  178. }
  179. /* use wfi power down the core */
  180. void imx_set_cpu_pwr_off(unsigned int core_id)
  181. {
  182. bakery_lock_get(&gpc_lock);
  183. /* enable the wfi power down of the core */
  184. mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
  185. (1 << (core_id + 20)));
  186. bakery_lock_release(&gpc_lock);
  187. /* assert the pcg pcr bit of the core */
  188. mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
  189. };
  190. /* if out of lpm, we need to do reverse steps */
  191. void imx_set_cpu_lpm(unsigned int core_id, bool pdn)
  192. {
  193. bakery_lock_get(&gpc_lock);
  194. if (pdn) {
  195. /* enable the core WFI PDN & IRQ PUP */
  196. mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
  197. (1 << (core_id + 20)) | COREx_IRQ_WUP(core_id));
  198. /* assert the pcg pcr bit of the core */
  199. mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
  200. } else {
  201. /* disable CORE WFI PDN & IRQ PUP */
  202. mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
  203. COREx_IRQ_WUP(core_id));
  204. /* deassert the pcg pcr bit of the core */
  205. mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
  206. }
  207. bakery_lock_release(&gpc_lock);
  208. }
  209. void imx_pup_pdn_slot_config(int last_core, bool pdn)
  210. {
  211. if (pdn) {
  212. /* SLOT0 for A53 PLAT power down */
  213. mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(0), SLT_PLAT_PDN);
  214. /* SLOT1 for A53 PLAT power up */
  215. mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(1), SLT_PLAT_PUP);
  216. /* SLOT2 for A53 primary core power up */
  217. mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(2), SLT_COREx_PUP(last_core));
  218. /* ACK setting: PLAT ACK for PDN, CORE ACK for PUP */
  219. mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF,
  220. A53_PLAT_PDN_ACK | SLT_COREx_PUP_ACK(last_core));
  221. } else {
  222. mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), 0xFFFFFFFF);
  223. mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(1), 0xFFFFFFFF);
  224. mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(2), 0xFFFFFFFF);
  225. mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF,
  226. A53_DUMMY_PDN_ACK | A53_DUMMY_PUP_ACK);
  227. }
  228. }
  229. void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state)
  230. {
  231. uint32_t val;
  232. if (is_local_state_off(power_state)) {
  233. val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
  234. val |= A53_LPM_STOP; /* enable C0-C1's STOP mode */
  235. val &= ~CPU_CLOCK_ON_LPM; /* disable CPU clock in LPM mode */
  236. mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
  237. /* enable C2-3's STOP mode */
  238. mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_STOP);
  239. /* enable PLAT/SCU power down */
  240. val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD);
  241. val &= ~EN_L2_WFI_PDN;
  242. val |= L2PGE | EN_PLAT_PDN;
  243. val &= ~COREx_IRQ_WUP(last_core); /* disable IRQ PUP for last core */
  244. val |= COREx_LPM_PUP(last_core); /* enable LPM PUP for last core */
  245. mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val);
  246. imx_pup_pdn_slot_config(last_core, true);
  247. /* enable PLAT PGC */
  248. mmio_setbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1);
  249. } else {
  250. /* clear PLAT PGC */
  251. mmio_clrbits_32(IMX_GPC_BASE + A53_PLAT_PGC, 0x1);
  252. /* clear the slot and ack for cluster power down */
  253. imx_pup_pdn_slot_config(last_core, false);
  254. val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
  255. val &= ~A53_LPM_MASK; /* clear the C0~1 LPM */
  256. val |= CPU_CLOCK_ON_LPM; /* disable cpu clock in LPM */
  257. mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
  258. /* set A53 LPM to RUN mode */
  259. mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, A53_LPM_MASK);
  260. /* clear PLAT/SCU power down */
  261. val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD);
  262. val |= EN_L2_WFI_PDN;
  263. val &= ~(L2PGE | EN_PLAT_PDN);
  264. val &= ~COREx_LPM_PUP(last_core); /* disable C0's LPM PUP */
  265. mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val);
  266. }
  267. }
  268. #define MAX_PLL_NUM U(12)
  269. static const struct pll_override imx8mq_pll[MAX_PLL_NUM] = {
  270. {.reg = 0x0, .override_mask = 0x140000, },
  271. {.reg = 0x8, .override_mask = 0x140000, },
  272. {.reg = 0x10, .override_mask = 0x140000, },
  273. {.reg = 0x18, .override_mask = 0x140000, },
  274. {.reg = 0x20, .override_mask = 0x140000, },
  275. {.reg = 0x28, .override_mask = 0x140000, },
  276. {.reg = 0x30, .override_mask = 0x1555540, },
  277. {.reg = 0x3c, .override_mask = 0x1555540, },
  278. {.reg = 0x48, .override_mask = 0x140, },
  279. {.reg = 0x54, .override_mask = 0x140, },
  280. {.reg = 0x60, .override_mask = 0x140, },
  281. {.reg = 0x70, .override_mask = 0xa, },
  282. };
  283. void imx_anamix_override(bool enter)
  284. {
  285. unsigned int i;
  286. /* enable the pll override bit before entering DSM mode */
  287. for (i = 0; i < MAX_PLL_NUM; i++) {
  288. if (enter) {
  289. mmio_setbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg,
  290. imx8mq_pll[i].override_mask);
  291. } else {
  292. mmio_clrbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg,
  293. imx8mq_pll[i].override_mask);
  294. }
  295. }
  296. }
  297. int imx_gpc_handler(uint32_t smc_fid,
  298. u_register_t x1,
  299. u_register_t x2,
  300. u_register_t x3)
  301. {
  302. switch (x1) {
  303. case FSL_SIP_CONFIG_GPC_CORE_WAKE:
  304. imx_gpc_core_wake(x2);
  305. break;
  306. case FSL_SIP_CONFIG_GPC_SET_WAKE:
  307. imx_gpc_set_wake(x2, x3);
  308. break;
  309. case FSL_SIP_CONFIG_GPC_MASK:
  310. imx_gpc_hwirq_mask(x2);
  311. break;
  312. case FSL_SIP_CONFIG_GPC_UNMASK:
  313. imx_gpc_hwirq_unmask(x2);
  314. break;
  315. case FSL_SIP_CONFIG_GPC_SET_AFF:
  316. imx_gpc_set_affinity(x2, x3);
  317. break;
  318. default:
  319. return SMC_UNK;
  320. }
  321. return 0;
  322. }
  323. void imx_gpc_init(void)
  324. {
  325. uint32_t val;
  326. unsigned int i, j;
  327. /* mask all the interrupt by default */
  328. for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
  329. for (j = 0U; j < ARRAY_SIZE(gpc_imr_offset); j++) {
  330. mmio_write_32(gpc_imr_offset[j] + i * 4, ~0x0);
  331. }
  332. }
  333. /* Due to the hardware design requirement, need to make
  334. * sure GPR interrupt(#32) is unmasked during RUN mode to
  335. * avoid entering DSM mode by mistake.
  336. */
  337. for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
  338. mmio_write_32(gpc_imr_offset[i], ~0x1);
  339. }
  340. /* leave the IOMUX_GPC bit 12 on for core wakeup */
  341. mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12);
  342. /* use external IRQs to wakeup C0~C3 from LPM */
  343. val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
  344. val |= IRQ_SRC_A53_WUP;
  345. /* clear the MASTER0 LPM handshake */
  346. val &= ~MASTER0_LPM_HSK;
  347. mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
  348. /* mask M4 DSM trigger if M4 is NOT enabled */
  349. mmio_setbits_32(IMX_GPC_BASE + LPCR_M4, DSM_MODE_MASK);
  350. /* set all mix/PU in A53 domain */
  351. mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xfffd);
  352. /* set SCU timing */
  353. mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING,
  354. (0x59 << 10) | 0x5B | (0x2 << 20));
  355. /* set DUMMY PDN/PUP ACK by default for A53 domain */
  356. mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK |
  357. A53_DUMMY_PDN_ACK);
  358. /* disable DSM mode by default */
  359. mmio_clrbits_32(IMX_GPC_BASE + SLPCR, DSM_MODE_MASK);
  360. /*
  361. * USB PHY power up needs to make sure RESET bit in SRC is clear,
  362. * otherwise, the PU power up bit in GPC will NOT self-cleared.
  363. * only need to do it once.
  364. */
  365. mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
  366. mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
  367. /*
  368. * for USB OTG, the limitation are:
  369. * 1. before system clock config, the IPG clock run at 12.5MHz, delay time
  370. * should be longer than 82us.
  371. * 2. after system clock config, ipg clock run at 66.5MHz, delay time
  372. * be longer that 15.3 us.
  373. * Add 100us to make sure the USB OTG SRC is clear safely.
  374. */
  375. udelay(100);
  376. }