gpc.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /*
  2. * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <stdlib.h>
  7. #include <stdint.h>
  8. #include <stdbool.h>
  9. #include <common/debug.h>
  10. #include <drivers/delay_timer.h>
  11. #include <lib/mmio.h>
  12. #include <lib/psci/psci.h>
  13. #include <lib/smccc.h>
  14. #include <platform_def.h>
  15. #include <services/std_svc.h>
  16. #include <gpc.h>
  17. #include <imx_sip_svc.h>
  18. #define CCGR(x) (0x4000 + (x) * 16)
  19. enum pu_domain_id {
  20. HSIOMIX,
  21. PCIE,
  22. OTG1,
  23. OTG2,
  24. GPUMIX,
  25. VPUMIX,
  26. VPU_G1,
  27. VPU_G2,
  28. VPU_H1,
  29. DISPMIX,
  30. MIPI,
  31. /* below two domain only for ATF internal use */
  32. GPU2D,
  33. GPU3D,
  34. MAX_DOMAINS,
  35. };
  36. /* PU domain */
  37. static struct imx_pwr_domain pu_domains[] = {
  38. IMX_MIX_DOMAIN(HSIOMIX, false),
  39. IMX_PD_DOMAIN(PCIE, false),
  40. IMX_PD_DOMAIN(OTG1, true),
  41. IMX_PD_DOMAIN(OTG2, true),
  42. IMX_MIX_DOMAIN(GPUMIX, false),
  43. IMX_MIX_DOMAIN(VPUMIX, false),
  44. IMX_PD_DOMAIN(VPU_G1, false),
  45. IMX_PD_DOMAIN(VPU_G2, false),
  46. IMX_PD_DOMAIN(VPU_H1, false),
  47. IMX_MIX_DOMAIN(DISPMIX, false),
  48. IMX_PD_DOMAIN(MIPI, false),
  49. /* below two domain only for ATF internal use */
  50. IMX_MIX_DOMAIN(GPU2D, false),
  51. IMX_MIX_DOMAIN(GPU3D, false),
  52. };
  53. static unsigned int pu_domain_status;
  54. #define GPU_RCR 0x40
  55. #define VPU_RCR 0x44
  56. #define VPU_CTL_BASE 0x38330000
  57. #define BLK_SFT_RSTN_CSR 0x0
  58. #define H1_SFT_RSTN BIT(2)
  59. #define G1_SFT_RSTN BIT(1)
  60. #define G2_SFT_RSTN BIT(0)
  61. #define DISP_CTL_BASE 0x32e28000
  62. void vpu_sft_reset_assert(uint32_t domain_id)
  63. {
  64. uint32_t val;
  65. val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
  66. switch (domain_id) {
  67. case VPU_G1:
  68. val &= ~G1_SFT_RSTN;
  69. mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
  70. break;
  71. case VPU_G2:
  72. val &= ~G2_SFT_RSTN;
  73. mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
  74. break;
  75. case VPU_H1:
  76. val &= ~H1_SFT_RSTN;
  77. mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
  78. break;
  79. default:
  80. break;
  81. }
  82. }
  83. void vpu_sft_reset_deassert(uint32_t domain_id)
  84. {
  85. uint32_t val;
  86. val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
  87. switch (domain_id) {
  88. case VPU_G1:
  89. val |= G1_SFT_RSTN;
  90. mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
  91. break;
  92. case VPU_G2:
  93. val |= G2_SFT_RSTN;
  94. mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
  95. break;
  96. case VPU_H1:
  97. val |= H1_SFT_RSTN;
  98. mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
  99. break;
  100. default:
  101. break;
  102. }
  103. }
  104. void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on)
  105. {
  106. if (domain_id >= MAX_DOMAINS) {
  107. return;
  108. }
  109. struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id];
  110. if (on) {
  111. pu_domain_status |= (1 << domain_id);
  112. if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
  113. domain_id == VPU_H1) {
  114. vpu_sft_reset_assert(domain_id);
  115. }
  116. /* HSIOMIX has no PU bit, so skip for it */
  117. if (domain_id != HSIOMIX) {
  118. /* clear the PGC bit */
  119. mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
  120. /* power up the domain */
  121. mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req);
  122. /* wait for power request done */
  123. while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) {
  124. ;
  125. }
  126. }
  127. if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
  128. domain_id == VPU_H1) {
  129. vpu_sft_reset_deassert(domain_id);
  130. /* dealy for a while to make sure reset done */
  131. udelay(100);
  132. }
  133. if (domain_id == GPUMIX) {
  134. /* assert reset */
  135. mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x1);
  136. /* power up GPU2D */
  137. mmio_clrbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
  138. mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU2D_PWR_REQ);
  139. /* wait for power request done */
  140. while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU2D_PWR_REQ) {
  141. ;
  142. }
  143. udelay(1);
  144. /* power up GPU3D */
  145. mmio_clrbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
  146. mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU3D_PWR_REQ);
  147. /* wait for power request done */
  148. while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU3D_PWR_REQ) {
  149. ;
  150. }
  151. udelay(10);
  152. /* release the gpumix reset */
  153. mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x0);
  154. udelay(10);
  155. }
  156. /* vpu sft clock enable */
  157. if (domain_id == VPUMIX) {
  158. mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x1);
  159. udelay(5);
  160. mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x0);
  161. udelay(5);
  162. /* enable all clock */
  163. mmio_write_32(VPU_CTL_BASE + 0x4, 0x7);
  164. }
  165. if (domain_id == DISPMIX) {
  166. /* special setting for DISPMIX */
  167. mmio_write_32(DISP_CTL_BASE + 0x4, 0x1fff);
  168. mmio_write_32(DISP_CTL_BASE, 0x7f);
  169. mmio_write_32(DISP_CTL_BASE + 0x8, 0x30000);
  170. }
  171. /* handle the ADB400 sync */
  172. if (pwr_domain->need_sync) {
  173. /* clear adb power down request */
  174. mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
  175. /* wait for adb power request ack */
  176. while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
  177. ;
  178. }
  179. }
  180. if (domain_id == GPUMIX) {
  181. /* power up GPU2D ADB */
  182. mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
  183. /* wait for adb power request ack */
  184. while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
  185. ;
  186. }
  187. /* power up GPU3D ADB */
  188. mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
  189. /* wait for adb power request ack */
  190. while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
  191. ;
  192. }
  193. }
  194. } else {
  195. pu_domain_status &= ~(1 << domain_id);
  196. if (domain_id == OTG1 || domain_id == OTG2) {
  197. return;
  198. }
  199. /* GPU2D & GPU3D ADB power down */
  200. if (domain_id == GPUMIX) {
  201. mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
  202. /* wait for adb power request ack */
  203. while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
  204. ;
  205. }
  206. mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
  207. /* wait for adb power request ack */
  208. while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
  209. ;
  210. }
  211. }
  212. /* handle the ADB400 sync */
  213. if (pwr_domain->need_sync) {
  214. /* set adb power down request */
  215. mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
  216. /* wait for adb power request ack */
  217. while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
  218. ;
  219. }
  220. }
  221. if (domain_id == GPUMIX) {
  222. /* power down GPU2D */
  223. mmio_setbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
  224. mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU2D_PWR_REQ);
  225. /* wait for power request done */
  226. while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU2D_PWR_REQ) {
  227. ;
  228. }
  229. /* power down GPU3D */
  230. mmio_setbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
  231. mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU3D_PWR_REQ);
  232. /* wait for power request done */
  233. while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU3D_PWR_REQ) {
  234. ;
  235. }
  236. }
  237. /* HSIOMIX has no PU bit, so skip for it */
  238. if (domain_id != HSIOMIX) {
  239. /* set the PGC bit */
  240. mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
  241. /* power down the domain */
  242. mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req);
  243. /* wait for power request done */
  244. while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) {
  245. ;
  246. }
  247. }
  248. }
  249. }
  250. void imx_gpc_init(void)
  251. {
  252. unsigned int val;
  253. int i;
  254. /* mask all the wakeup irq by default */
  255. for (i = 0; i < 4; i++) {
  256. mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0);
  257. mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0);
  258. mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0);
  259. mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0);
  260. mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0);
  261. }
  262. val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
  263. /* use GIC wake_request to wakeup C0~C3 from LPM */
  264. val |= 0x30c00000;
  265. /* clear the MASTER0 LPM handshake */
  266. val &= ~(1 << 6);
  267. mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
  268. /* clear MASTER1 & MASTER2 mapping in CPU0(A53) */
  269. mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING |
  270. MASTER2_MAPPING));
  271. /* set all mix/PU in A53 domain */
  272. mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xffff);
  273. /*
  274. * Set the CORE & SCU power up timing:
  275. * SW = 0x1, SW2ISO = 0x1;
  276. * the CPU CORE and SCU power up timing counter
  277. * is drived by 32K OSC, each domain's power up
  278. * latency is (SW + SW2ISO) / 32768
  279. */
  280. mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x81);
  281. mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x81);
  282. mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x81);
  283. mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x81);
  284. mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x81);
  285. mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING,
  286. (0x59 << 10) | 0x5B | (0x2 << 20));
  287. /* set DUMMY PDN/PUP ACK by default for A53 domain */
  288. mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53,
  289. A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK);
  290. /* clear DSM by default */
  291. val = mmio_read_32(IMX_GPC_BASE + SLPCR);
  292. val &= ~SLPCR_EN_DSM;
  293. /* enable the fast wakeup wait mode */
  294. val |= SLPCR_A53_FASTWUP_WAIT_MODE;
  295. /* clear the RBC */
  296. val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT);
  297. /* set the STBY_COUNT to 0x5, (128 * 30)us */
  298. val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT);
  299. val |= (0x5 << SLPCR_STBY_COUNT_SHFT);
  300. mmio_write_32(IMX_GPC_BASE + SLPCR, val);
  301. /*
  302. * USB PHY power up needs to make sure RESET bit in SRC is clear,
  303. * otherwise, the PU power up bit in GPC will NOT self-cleared.
  304. * only need to do it once.
  305. */
  306. mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
  307. mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
  308. }