pmu.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626
  1. /*
  2. * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <string.h>
  9. #include <platform_def.h>
  10. #include <arch_helpers.h>
  11. #include <bl31/bl31.h>
  12. #include <common/debug.h>
  13. #include <drivers/arm/gicv3.h>
  14. #include <drivers/delay_timer.h>
  15. #include <drivers/gpio.h>
  16. #include <lib/bakery_lock.h>
  17. #include <lib/mmio.h>
  18. #include <plat/common/platform.h>
  19. #include <dfs.h>
  20. #include <m0_ctl.h>
  21. #include <plat_params.h>
  22. #include <plat_private.h>
  23. #include <pmu.h>
  24. #include <pmu_com.h>
  25. #include <pwm.h>
  26. #include <rk3399_def.h>
  27. #include <secure.h>
  28. #include <soc.h>
  29. #include <suspend.h>
  30. DEFINE_BAKERY_LOCK(rockchip_pd_lock);
  31. static uint32_t cpu_warm_boot_addr;
  32. static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT];
  33. static uint32_t store_cru[CRU_SDIO0_CON1 / 4 + 1];
  34. static uint32_t store_usbphy0[7];
  35. static uint32_t store_usbphy1[7];
  36. static uint32_t store_grf_io_vsel;
  37. static uint32_t store_grf_soc_con0;
  38. static uint32_t store_grf_soc_con1;
  39. static uint32_t store_grf_soc_con2;
  40. static uint32_t store_grf_soc_con3;
  41. static uint32_t store_grf_soc_con4;
  42. static uint32_t store_grf_soc_con7;
  43. static uint32_t store_grf_ddrc_con[4];
  44. static uint32_t store_wdt0[2];
  45. static uint32_t store_wdt1[2];
  46. static gicv3_dist_ctx_t dist_ctx;
  47. static gicv3_redist_ctx_t rdist_ctx;
  48. /*
  49. * There are two ways to powering on or off on core.
  50. * 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
  51. * it is core_pwr_pd mode
  52. * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
  53. * then, if the core enter into wfi, it power domain will be
  54. * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
  55. * so we need core_pm_cfg_info to distinguish which method be used now.
  56. */
  57. static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT]
  58. #if USE_COHERENT_MEM
  59. __attribute__ ((section(".tzfw_coherent_mem")))
  60. #endif
  61. ;/* coheront */
  62. static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
  63. {
  64. uint32_t bus_id = BIT(bus);
  65. uint32_t bus_req;
  66. uint32_t wait_cnt = 0;
  67. uint32_t bus_state, bus_ack;
  68. if (state)
  69. bus_req = BIT(bus);
  70. else
  71. bus_req = 0;
  72. mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req);
  73. do {
  74. bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id;
  75. bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id;
  76. if (bus_state == bus_req && bus_ack == bus_req)
  77. break;
  78. wait_cnt++;
  79. udelay(1);
  80. } while (wait_cnt < MAX_WAIT_COUNT);
  81. if (bus_state != bus_req || bus_ack != bus_req) {
  82. INFO("%s:st=%x(%x)\n", __func__,
  83. mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST),
  84. bus_state);
  85. INFO("%s:st=%x(%x)\n", __func__,
  86. mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK),
  87. bus_ack);
  88. }
  89. }
  90. struct pmu_slpdata_s pmu_slpdata;
  91. static void qos_restore(void)
  92. {
  93. if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
  94. RESTORE_QOS(pmu_slpdata.gpu_qos, GPU);
  95. if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
  96. RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
  97. RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
  98. }
  99. if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
  100. RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
  101. RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
  102. }
  103. if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
  104. RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
  105. RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
  106. RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
  107. }
  108. if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
  109. RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP);
  110. if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
  111. RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC);
  112. if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
  113. RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
  114. RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
  115. }
  116. if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
  117. RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
  118. if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
  119. RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC);
  120. if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
  121. RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO);
  122. if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
  123. RESTORE_QOS(pmu_slpdata.gic_qos, GIC);
  124. if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
  125. RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
  126. RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
  127. }
  128. if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
  129. RESTORE_QOS(pmu_slpdata.iep_qos, IEP);
  130. if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
  131. RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
  132. RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
  133. }
  134. if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
  135. RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
  136. RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
  137. RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
  138. }
  139. if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
  140. RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
  141. RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
  142. RESTORE_QOS(pmu_slpdata.dcf_qos, DCF);
  143. RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
  144. RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
  145. RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
  146. RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
  147. RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
  148. }
  149. if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
  150. RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
  151. if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
  152. RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
  153. RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
  154. }
  155. }
  156. static void qos_save(void)
  157. {
  158. if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
  159. SAVE_QOS(pmu_slpdata.gpu_qos, GPU);
  160. if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
  161. SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
  162. SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
  163. }
  164. if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
  165. SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
  166. SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
  167. }
  168. if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
  169. SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
  170. SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
  171. SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
  172. }
  173. if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
  174. SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP);
  175. if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
  176. SAVE_QOS(pmu_slpdata.gmac_qos, GMAC);
  177. if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
  178. SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
  179. SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
  180. }
  181. if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
  182. SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
  183. if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
  184. SAVE_QOS(pmu_slpdata.emmc_qos, EMMC);
  185. if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
  186. SAVE_QOS(pmu_slpdata.sdio_qos, SDIO);
  187. if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
  188. SAVE_QOS(pmu_slpdata.gic_qos, GIC);
  189. if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
  190. SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
  191. SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
  192. }
  193. if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
  194. SAVE_QOS(pmu_slpdata.iep_qos, IEP);
  195. if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
  196. SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
  197. SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
  198. }
  199. if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
  200. SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
  201. SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
  202. SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
  203. }
  204. if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
  205. SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
  206. SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
  207. SAVE_QOS(pmu_slpdata.dcf_qos, DCF);
  208. SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
  209. SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
  210. SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
  211. SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
  212. SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
  213. }
  214. if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
  215. SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
  216. if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
  217. SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
  218. SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
  219. }
  220. }
  221. static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
  222. {
  223. uint32_t state;
  224. if (pmu_power_domain_st(pd_id) == pd_state)
  225. goto out;
  226. if (pd_state == pmu_pd_on)
  227. pmu_power_domain_ctr(pd_id, pd_state);
  228. state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE;
  229. switch (pd_id) {
  230. case PD_GPU:
  231. pmu_bus_idle_req(BUS_ID_GPU, state);
  232. break;
  233. case PD_VIO:
  234. pmu_bus_idle_req(BUS_ID_VIO, state);
  235. break;
  236. case PD_ISP0:
  237. pmu_bus_idle_req(BUS_ID_ISP0, state);
  238. break;
  239. case PD_ISP1:
  240. pmu_bus_idle_req(BUS_ID_ISP1, state);
  241. break;
  242. case PD_VO:
  243. pmu_bus_idle_req(BUS_ID_VOPB, state);
  244. pmu_bus_idle_req(BUS_ID_VOPL, state);
  245. break;
  246. case PD_HDCP:
  247. pmu_bus_idle_req(BUS_ID_HDCP, state);
  248. break;
  249. case PD_TCPD0:
  250. break;
  251. case PD_TCPD1:
  252. break;
  253. case PD_GMAC:
  254. pmu_bus_idle_req(BUS_ID_GMAC, state);
  255. break;
  256. case PD_CCI:
  257. pmu_bus_idle_req(BUS_ID_CCIM0, state);
  258. pmu_bus_idle_req(BUS_ID_CCIM1, state);
  259. break;
  260. case PD_SD:
  261. pmu_bus_idle_req(BUS_ID_SD, state);
  262. break;
  263. case PD_EMMC:
  264. pmu_bus_idle_req(BUS_ID_EMMC, state);
  265. break;
  266. case PD_EDP:
  267. pmu_bus_idle_req(BUS_ID_EDP, state);
  268. break;
  269. case PD_SDIOAUDIO:
  270. pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state);
  271. break;
  272. case PD_GIC:
  273. pmu_bus_idle_req(BUS_ID_GIC, state);
  274. break;
  275. case PD_RGA:
  276. pmu_bus_idle_req(BUS_ID_RGA, state);
  277. break;
  278. case PD_VCODEC:
  279. pmu_bus_idle_req(BUS_ID_VCODEC, state);
  280. break;
  281. case PD_VDU:
  282. pmu_bus_idle_req(BUS_ID_VDU, state);
  283. break;
  284. case PD_IEP:
  285. pmu_bus_idle_req(BUS_ID_IEP, state);
  286. break;
  287. case PD_USB3:
  288. pmu_bus_idle_req(BUS_ID_USB3, state);
  289. break;
  290. case PD_PERIHP:
  291. pmu_bus_idle_req(BUS_ID_PERIHP, state);
  292. break;
  293. default:
  294. /* Do nothing in default case */
  295. break;
  296. }
  297. if (pd_state == pmu_pd_off)
  298. pmu_power_domain_ctr(pd_id, pd_state);
  299. out:
  300. return 0;
  301. }
  302. static uint32_t pmu_powerdomain_state;
  303. static void pmu_power_domains_suspend(void)
  304. {
  305. clk_gate_con_save();
  306. clk_gate_con_disable();
  307. qos_save();
  308. pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
  309. pmu_set_power_domain(PD_GPU, pmu_pd_off);
  310. pmu_set_power_domain(PD_TCPD0, pmu_pd_off);
  311. pmu_set_power_domain(PD_TCPD1, pmu_pd_off);
  312. pmu_set_power_domain(PD_VO, pmu_pd_off);
  313. pmu_set_power_domain(PD_ISP0, pmu_pd_off);
  314. pmu_set_power_domain(PD_ISP1, pmu_pd_off);
  315. pmu_set_power_domain(PD_HDCP, pmu_pd_off);
  316. pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off);
  317. pmu_set_power_domain(PD_GMAC, pmu_pd_off);
  318. pmu_set_power_domain(PD_EDP, pmu_pd_off);
  319. pmu_set_power_domain(PD_IEP, pmu_pd_off);
  320. pmu_set_power_domain(PD_RGA, pmu_pd_off);
  321. pmu_set_power_domain(PD_VCODEC, pmu_pd_off);
  322. pmu_set_power_domain(PD_VDU, pmu_pd_off);
  323. pmu_set_power_domain(PD_USB3, pmu_pd_off);
  324. pmu_set_power_domain(PD_EMMC, pmu_pd_off);
  325. pmu_set_power_domain(PD_VIO, pmu_pd_off);
  326. pmu_set_power_domain(PD_SD, pmu_pd_off);
  327. pmu_set_power_domain(PD_PERIHP, pmu_pd_off);
  328. clk_gate_con_restore();
  329. }
  330. static void pmu_power_domains_resume(void)
  331. {
  332. clk_gate_con_save();
  333. clk_gate_con_disable();
  334. if (!(pmu_powerdomain_state & BIT(PD_VDU)))
  335. pmu_set_power_domain(PD_VDU, pmu_pd_on);
  336. if (!(pmu_powerdomain_state & BIT(PD_VCODEC)))
  337. pmu_set_power_domain(PD_VCODEC, pmu_pd_on);
  338. if (!(pmu_powerdomain_state & BIT(PD_RGA)))
  339. pmu_set_power_domain(PD_RGA, pmu_pd_on);
  340. if (!(pmu_powerdomain_state & BIT(PD_IEP)))
  341. pmu_set_power_domain(PD_IEP, pmu_pd_on);
  342. if (!(pmu_powerdomain_state & BIT(PD_EDP)))
  343. pmu_set_power_domain(PD_EDP, pmu_pd_on);
  344. if (!(pmu_powerdomain_state & BIT(PD_GMAC)))
  345. pmu_set_power_domain(PD_GMAC, pmu_pd_on);
  346. if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO)))
  347. pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on);
  348. if (!(pmu_powerdomain_state & BIT(PD_HDCP)))
  349. pmu_set_power_domain(PD_HDCP, pmu_pd_on);
  350. if (!(pmu_powerdomain_state & BIT(PD_ISP1)))
  351. pmu_set_power_domain(PD_ISP1, pmu_pd_on);
  352. if (!(pmu_powerdomain_state & BIT(PD_ISP0)))
  353. pmu_set_power_domain(PD_ISP0, pmu_pd_on);
  354. if (!(pmu_powerdomain_state & BIT(PD_VO)))
  355. pmu_set_power_domain(PD_VO, pmu_pd_on);
  356. if (!(pmu_powerdomain_state & BIT(PD_TCPD1)))
  357. pmu_set_power_domain(PD_TCPD1, pmu_pd_on);
  358. if (!(pmu_powerdomain_state & BIT(PD_TCPD0)))
  359. pmu_set_power_domain(PD_TCPD0, pmu_pd_on);
  360. if (!(pmu_powerdomain_state & BIT(PD_GPU)))
  361. pmu_set_power_domain(PD_GPU, pmu_pd_on);
  362. if (!(pmu_powerdomain_state & BIT(PD_USB3)))
  363. pmu_set_power_domain(PD_USB3, pmu_pd_on);
  364. if (!(pmu_powerdomain_state & BIT(PD_EMMC)))
  365. pmu_set_power_domain(PD_EMMC, pmu_pd_on);
  366. if (!(pmu_powerdomain_state & BIT(PD_VIO)))
  367. pmu_set_power_domain(PD_VIO, pmu_pd_on);
  368. if (!(pmu_powerdomain_state & BIT(PD_SD)))
  369. pmu_set_power_domain(PD_SD, pmu_pd_on);
  370. if (!(pmu_powerdomain_state & BIT(PD_PERIHP)))
  371. pmu_set_power_domain(PD_PERIHP, pmu_pd_on);
  372. qos_restore();
  373. clk_gate_con_restore();
  374. }
  375. void pmu_power_domains_on(void)
  376. {
  377. clk_gate_con_disable();
  378. pmu_set_power_domain(PD_VDU, pmu_pd_on);
  379. pmu_set_power_domain(PD_VCODEC, pmu_pd_on);
  380. pmu_set_power_domain(PD_RGA, pmu_pd_on);
  381. pmu_set_power_domain(PD_IEP, pmu_pd_on);
  382. pmu_set_power_domain(PD_EDP, pmu_pd_on);
  383. pmu_set_power_domain(PD_GMAC, pmu_pd_on);
  384. pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on);
  385. pmu_set_power_domain(PD_HDCP, pmu_pd_on);
  386. pmu_set_power_domain(PD_ISP1, pmu_pd_on);
  387. pmu_set_power_domain(PD_ISP0, pmu_pd_on);
  388. pmu_set_power_domain(PD_VO, pmu_pd_on);
  389. pmu_set_power_domain(PD_TCPD1, pmu_pd_on);
  390. pmu_set_power_domain(PD_TCPD0, pmu_pd_on);
  391. pmu_set_power_domain(PD_GPU, pmu_pd_on);
  392. }
  393. void rk3399_flush_l2_b(void)
  394. {
  395. uint32_t wait_cnt = 0;
  396. mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
  397. dsb();
  398. /*
  399. * The Big cluster flush L2 cache took ~4ms by default, give 10ms for
  400. * the enough margin.
  401. */
  402. while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
  403. BIT(L2_FLUSHDONE_CLUSTER_B))) {
  404. wait_cnt++;
  405. udelay(10);
  406. if (wait_cnt == 10000 / 10)
  407. WARN("L2 cache flush on suspend took longer than 10ms\n");
  408. }
  409. mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
  410. }
  411. static void pmu_scu_b_pwrdn(void)
  412. {
  413. uint32_t wait_cnt = 0;
  414. if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
  415. (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) !=
  416. (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) {
  417. ERROR("%s: not all cpus is off\n", __func__);
  418. return;
  419. }
  420. rk3399_flush_l2_b();
  421. mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
  422. while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
  423. BIT(STANDBY_BY_WFIL2_CLUSTER_B))) {
  424. wait_cnt++;
  425. udelay(1);
  426. if (wait_cnt >= MAX_WAIT_COUNT)
  427. ERROR("%s:wait cluster-b l2(%x)\n", __func__,
  428. mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
  429. }
  430. }
  431. static void pmu_scu_b_pwrup(void)
  432. {
  433. mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
  434. }
  435. static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
  436. {
  437. assert(cpu_id < PLATFORM_CORE_COUNT);
  438. return core_pm_cfg_info[cpu_id];
  439. }
  440. static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
  441. {
  442. assert(cpu_id < PLATFORM_CORE_COUNT);
  443. core_pm_cfg_info[cpu_id] = value;
  444. #if !USE_COHERENT_MEM
  445. flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id],
  446. sizeof(uint32_t));
  447. #endif
  448. }
  449. static int cpus_power_domain_on(uint32_t cpu_id)
  450. {
  451. uint32_t cfg_info;
  452. uint32_t cpu_pd = PD_CPUL0 + cpu_id;
  453. /*
  454. * There are two ways to powering on or off on core.
  455. * 1) Control it power domain into on or off in PMU_PWRDN_CON reg
  456. * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
  457. * then, if the core enter into wfi, it power domain will be
  458. * powered off automatically.
  459. */
  460. cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
  461. if (cfg_info == core_pwr_pd) {
  462. /* disable core_pm cfg */
  463. mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
  464. CORES_PM_DISABLE);
  465. /* if the cores have be on, power off it firstly */
  466. if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
  467. mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0);
  468. pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
  469. }
  470. pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
  471. } else {
  472. if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
  473. WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
  474. return -EINVAL;
  475. }
  476. mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
  477. BIT(core_pm_sft_wakeup_en));
  478. dsb();
  479. }
  480. return 0;
  481. }
  482. static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
  483. {
  484. uint32_t cpu_pd;
  485. uint32_t core_pm_value;
  486. cpu_pd = PD_CPUL0 + cpu_id;
  487. if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
  488. return 0;
  489. if (pd_cfg == core_pwr_pd) {
  490. if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
  491. return -EINVAL;
  492. /* disable core_pm cfg */
  493. mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
  494. CORES_PM_DISABLE);
  495. set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
  496. pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
  497. } else {
  498. set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
  499. core_pm_value = BIT(core_pm_en);
  500. if (pd_cfg == core_pwr_wfi_int)
  501. core_pm_value |= BIT(core_pm_int_wakeup_en);
  502. mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
  503. core_pm_value);
  504. dsb();
  505. }
  506. return 0;
  507. }
  508. static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state)
  509. {
  510. uint32_t cpu_id = plat_my_core_pos();
  511. uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st;
  512. assert(cpu_id < PLATFORM_CORE_COUNT);
  513. if (lvl_state == PLAT_MAX_OFF_STATE) {
  514. if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) {
  515. pll_id = ALPLL_ID;
  516. clst_st_msk = CLST_L_CPUS_MSK;
  517. } else {
  518. pll_id = ABPLL_ID;
  519. clst_st_msk = CLST_B_CPUS_MSK <<
  520. PLATFORM_CLUSTER0_CORE_COUNT;
  521. }
  522. clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id));
  523. pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
  524. pmu_st &= clst_st_msk;
  525. if (pmu_st == clst_st_chk_msk) {
  526. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
  527. PLL_SLOW_MODE);
  528. clst_warmboot_data[pll_id] = PMU_CLST_RET;
  529. pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
  530. pmu_st &= clst_st_msk;
  531. if (pmu_st == clst_st_chk_msk)
  532. return;
  533. /*
  534. * it is mean that others cpu is up again,
  535. * we must resume the cfg at once.
  536. */
  537. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
  538. PLL_NOMAL_MODE);
  539. clst_warmboot_data[pll_id] = 0;
  540. }
  541. }
  542. }
  543. static int clst_pwr_domain_resume(plat_local_state_t lvl_state)
  544. {
  545. uint32_t cpu_id = plat_my_core_pos();
  546. uint32_t pll_id, pll_st;
  547. assert(cpu_id < PLATFORM_CORE_COUNT);
  548. if (lvl_state == PLAT_MAX_OFF_STATE) {
  549. if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT)
  550. pll_id = ALPLL_ID;
  551. else
  552. pll_id = ABPLL_ID;
  553. pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >>
  554. PLL_MODE_SHIFT;
  555. if (pll_st != NORMAL_MODE) {
  556. WARN("%s: clst (%d) is in error mode (%d)\n",
  557. __func__, pll_id, pll_st);
  558. return -1;
  559. }
  560. }
  561. return 0;
  562. }
  563. static void nonboot_cpus_off(void)
  564. {
  565. uint32_t boot_cpu, cpu;
  566. boot_cpu = plat_my_core_pos();
  567. /* turn off noboot cpus */
  568. for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
  569. if (cpu == boot_cpu)
  570. continue;
  571. cpus_power_domain_off(cpu, core_pwr_pd);
  572. }
  573. }
  574. int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
  575. {
  576. uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
  577. assert(cpu_id < PLATFORM_CORE_COUNT);
  578. assert(cpuson_flags[cpu_id] == 0);
  579. cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
  580. cpuson_entry_point[cpu_id] = entrypoint;
  581. dsb();
  582. cpus_power_domain_on(cpu_id);
  583. return PSCI_E_SUCCESS;
  584. }
  585. int rockchip_soc_cores_pwr_dm_off(void)
  586. {
  587. uint32_t cpu_id = plat_my_core_pos();
  588. cpus_power_domain_off(cpu_id, core_pwr_wfi);
  589. return PSCI_E_SUCCESS;
  590. }
  591. int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl,
  592. plat_local_state_t lvl_state)
  593. {
  594. if (lvl == MPIDR_AFFLVL1) {
  595. clst_pwr_domain_suspend(lvl_state);
  596. }
  597. return PSCI_E_SUCCESS;
  598. }
  599. int rockchip_soc_cores_pwr_dm_suspend(void)
  600. {
  601. uint32_t cpu_id = plat_my_core_pos();
  602. assert(cpu_id < PLATFORM_CORE_COUNT);
  603. assert(cpuson_flags[cpu_id] == 0);
  604. cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
  605. cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
  606. dsb();
  607. cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
  608. return PSCI_E_SUCCESS;
  609. }
  610. int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state)
  611. {
  612. if (lvl == MPIDR_AFFLVL1) {
  613. clst_pwr_domain_suspend(lvl_state);
  614. }
  615. return PSCI_E_SUCCESS;
  616. }
  617. int rockchip_soc_cores_pwr_dm_on_finish(void)
  618. {
  619. uint32_t cpu_id = plat_my_core_pos();
  620. mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
  621. CORES_PM_DISABLE);
  622. return PSCI_E_SUCCESS;
  623. }
  624. int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl,
  625. plat_local_state_t lvl_state)
  626. {
  627. if (lvl == MPIDR_AFFLVL1) {
  628. clst_pwr_domain_resume(lvl_state);
  629. }
  630. return PSCI_E_SUCCESS;
  631. }
  632. int rockchip_soc_cores_pwr_dm_resume(void)
  633. {
  634. uint32_t cpu_id = plat_my_core_pos();
  635. /* Disable core_pm */
  636. mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE);
  637. return PSCI_E_SUCCESS;
  638. }
  639. int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state)
  640. {
  641. if (lvl == MPIDR_AFFLVL1) {
  642. clst_pwr_domain_resume(lvl_state);
  643. }
  644. return PSCI_E_SUCCESS;
  645. }
  646. /**
  647. * init_pmu_counts - Init timing counts in the PMU register area
  648. *
  649. * At various points when we power up or down parts of the system we need
  650. * a delay to wait for power / clocks to become stable. The PMU has counters
  651. * to help software do the delay properly. Basically, it works like this:
  652. * - Software sets up counter values
  653. * - When software turns on something in the PMU, the counter kicks off
  654. * - The hardware sets a bit automatically when the counter has finished and
  655. * software knows that the initialization is done.
  656. *
  657. * It's software's job to setup these counters. The hardware power on default
  658. * for these settings is conservative, setting everything to 0x5dc0
  659. * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts).
  660. *
  661. * Note that some of these counters are only really used at suspend/resume
  662. * time (for instance, that's the only time we turn off/on the oscillator) and
  663. * others are used during normal runtime (like turning on/off a CPU or GPU) but
  664. * it doesn't hurt to init everything at boot.
  665. *
  666. * Also note that these counters can run off the 32 kHz clock or the 24 MHz
  667. * clock. While the 24 MHz clock can give us more precision, it's not always
  668. * available (like when we turn the oscillator off at sleep time). The
  669. * pmu_use_lf (lf: low freq) is available in power mode. Current understanding
  670. * is that counts work like this:
  671. * IF (pmu_use_lf == 0) || (power_mode_en == 0)
  672. * use the 24M OSC for counts
  673. * ELSE
  674. * use the 32K OSC for counts
  675. *
  676. * Notes:
  677. * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment
  678. * we always keep that 0. This apparently choose between using the PLL as
  679. * the source for the PMU vs. the 24M clock. If we ever set it to 1 we
  680. * should consider how it affects these counts (if at all).
  681. * - The power_mode_en is documented to auto-clear automatically when we leave
  682. * "power mode". That's why most clocks are on 24M. Only timings used when
  683. * in "power mode" are 32k.
  684. * - In some cases the kernel may override these counts.
  685. *
  686. * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs
  687. * in power mode, we need to ensure that they are available.
  688. */
  689. static void init_pmu_counts(void)
  690. {
  691. /* COUNTS FOR INSIDE POWER MODE */
  692. /*
  693. * From limited testing, need PMU stable >= 2ms, but go overkill
  694. * and choose 30 ms to match testing on past SoCs. Also let
  695. * OSC have 30 ms for stabilization.
  696. */
  697. mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30));
  698. mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30));
  699. /* Unclear what these should be; try 3 ms */
  700. mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3));
  701. /* Unclear what this should be, but set the default explicitly */
  702. mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0);
  703. /* COUNTS FOR OUTSIDE POWER MODE */
  704. /* Put something sorta conservative here until we know better */
  705. mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3));
  706. mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1));
  707. mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1));
  708. mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1));
  709. /*
  710. * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but
  711. * M0 code run in SRAM, and we need it to check whether cpu enter
  712. * FSM status, so we must wait M0 finish their code and enter WFI,
  713. * then we can shutdown SRAM, according FSM order:
  714. * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN
  715. * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get
  716. * the FSM status and enter WFI, then enable PMU_CLR_PERILP.
  717. */
  718. mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5));
  719. mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1));
  720. /*
  721. * Set CPU/GPU to 1 us.
  722. *
  723. * NOTE: Even though ATF doesn't configure the GPU we'll still setup
  724. * counts here. After all ATF controls all these other bits and also
  725. * chooses which clock these counters use.
  726. */
  727. mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1));
  728. mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1));
  729. mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1));
  730. mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1));
  731. }
  732. static uint32_t clk_ddrc_save;
  733. static void sys_slp_config(void)
  734. {
  735. uint32_t slp_mode_cfg = 0;
  736. /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */
  737. clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3));
  738. mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1));
  739. prepare_abpll_for_ddrctrl();
  740. sram_func_set_ddrctl_pll(ABPLL_ID);
  741. mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP);
  742. mmio_write_32(PMU_BASE + PMU_CCI500_CON,
  743. BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) |
  744. BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) |
  745. BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG));
  746. mmio_write_32(PMU_BASE + PMU_ADB400_CON,
  747. BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) |
  748. BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) |
  749. BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW));
  750. slp_mode_cfg = BIT(PMU_PWR_MODE_EN) |
  751. BIT(PMU_WKUP_RST_EN) |
  752. BIT(PMU_INPUT_CLAMP_EN) |
  753. BIT(PMU_POWER_OFF_REQ_CFG) |
  754. BIT(PMU_CPU0_PD_EN) |
  755. BIT(PMU_L2_FLUSH_EN) |
  756. BIT(PMU_L2_IDLE_EN) |
  757. BIT(PMU_SCU_PD_EN) |
  758. BIT(PMU_CCI_PD_EN) |
  759. BIT(PMU_CLK_CORE_SRC_GATE_EN) |
  760. BIT(PMU_ALIVE_USE_LF) |
  761. BIT(PMU_SREF0_ENTER_EN) |
  762. BIT(PMU_SREF1_ENTER_EN) |
  763. BIT(PMU_DDRC0_GATING_EN) |
  764. BIT(PMU_DDRC1_GATING_EN) |
  765. BIT(PMU_DDRIO0_RET_EN) |
  766. BIT(PMU_DDRIO0_RET_DE_REQ) |
  767. BIT(PMU_DDRIO1_RET_EN) |
  768. BIT(PMU_DDRIO1_RET_DE_REQ) |
  769. BIT(PMU_CENTER_PD_EN) |
  770. BIT(PMU_PERILP_PD_EN) |
  771. BIT(PMU_CLK_PERILP_SRC_GATE_EN) |
  772. BIT(PMU_PLL_PD_EN) |
  773. BIT(PMU_CLK_CENTER_SRC_GATE_EN) |
  774. BIT(PMU_OSC_DIS) |
  775. BIT(PMU_PMU_USE_LF);
  776. mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN));
  777. mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg);
  778. mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW);
  779. mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K);
  780. mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */
  781. }
  782. static void set_hw_idle(uint32_t hw_idle)
  783. {
  784. mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
  785. }
  786. static void clr_hw_idle(uint32_t hw_idle)
  787. {
  788. mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
  789. }
  790. static uint32_t iomux_status[12];
  791. static uint32_t pull_mode_status[12];
  792. static uint32_t gpio_direction[3];
  793. static uint32_t gpio_2_4_clk_gate;
  794. static void suspend_apio(void)
  795. {
  796. struct bl_aux_rk_apio_info *suspend_apio;
  797. int i;
  798. suspend_apio = plat_get_rockchip_suspend_apio();
  799. if (!suspend_apio)
  800. return;
  801. /* save gpio2 ~ gpio4 iomux and pull mode */
  802. for (i = 0; i < 12; i++) {
  803. iomux_status[i] = mmio_read_32(GRF_BASE +
  804. GRF_GPIO2A_IOMUX + i * 4);
  805. pull_mode_status[i] = mmio_read_32(GRF_BASE +
  806. GRF_GPIO2A_P + i * 4);
  807. }
  808. /* store gpio2 ~ gpio4 clock gate state */
  809. gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >>
  810. PCLK_GPIO2_GATE_SHIFT) & 0x07;
  811. /* enable gpio2 ~ gpio4 clock gate */
  812. mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
  813. BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT));
  814. /* save gpio2 ~ gpio4 direction */
  815. gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04);
  816. gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04);
  817. gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04);
  818. /* apio1 charge gpio3a0 ~ gpio3c7 */
  819. if (suspend_apio->apio1) {
  820. /* set gpio3a0 ~ gpio3c7 iomux to gpio */
  821. mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX,
  822. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  823. mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX,
  824. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  825. mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX,
  826. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  827. /* set gpio3a0 ~ gpio3c7 pull mode to pull none */
  828. mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0);
  829. mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0);
  830. mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0);
  831. /* set gpio3a0 ~ gpio3c7 to input */
  832. mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff);
  833. }
  834. /* apio2 charge gpio2a0 ~ gpio2b4 */
  835. if (suspend_apio->apio2) {
  836. /* set gpio2a0 ~ gpio2b4 iomux to gpio */
  837. mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX,
  838. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  839. mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX,
  840. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  841. /* set gpio2a0 ~ gpio2b4 pull mode to pull none */
  842. mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0);
  843. mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0);
  844. /* set gpio2a0 ~ gpio2b4 to input */
  845. mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff);
  846. }
  847. /* apio3 charge gpio2c0 ~ gpio2d4*/
  848. if (suspend_apio->apio3) {
  849. /* set gpio2a0 ~ gpio2b4 iomux to gpio */
  850. mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX,
  851. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  852. mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX,
  853. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  854. /* set gpio2c0 ~ gpio2d4 pull mode to pull none */
  855. mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0);
  856. mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0);
  857. /* set gpio2c0 ~ gpio2d4 to input */
  858. mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000);
  859. }
  860. /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */
  861. if (suspend_apio->apio4) {
  862. /* set gpio4c0 ~ gpio4d6 iomux to gpio */
  863. mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX,
  864. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  865. mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX,
  866. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  867. /* set gpio4c0 ~ gpio4d6 pull mode to pull none */
  868. mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0);
  869. mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0);
  870. /* set gpio4c0 ~ gpio4d6 to input */
  871. mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000);
  872. }
  873. /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/
  874. if (suspend_apio->apio5) {
  875. /* set gpio3d0 ~ gpio4a7 iomux to gpio */
  876. mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX,
  877. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  878. mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX,
  879. REG_SOC_WMSK | GRF_IOMUX_GPIO);
  880. /* set gpio3d0 ~ gpio4a7 pull mode to pull none */
  881. mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0);
  882. mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0);
  883. /* set gpio4c0 ~ gpio4d6 to input */
  884. mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000);
  885. mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff);
  886. }
  887. }
  888. static void resume_apio(void)
  889. {
  890. struct bl_aux_rk_apio_info *suspend_apio;
  891. int i;
  892. suspend_apio = plat_get_rockchip_suspend_apio();
  893. if (!suspend_apio)
  894. return;
  895. for (i = 0; i < 12; i++) {
  896. mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4,
  897. REG_SOC_WMSK | pull_mode_status[i]);
  898. mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4,
  899. REG_SOC_WMSK | iomux_status[i]);
  900. }
  901. /* set gpio2 ~ gpio4 direction back to store value */
  902. mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]);
  903. mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]);
  904. mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]);
  905. /* set gpio2 ~ gpio4 clock gate back to store value */
  906. mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31),
  907. BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07,
  908. PCLK_GPIO2_GATE_SHIFT));
  909. }
  910. static void suspend_gpio(void)
  911. {
  912. struct bl_aux_gpio_info *suspend_gpio;
  913. uint32_t count;
  914. int i;
  915. suspend_gpio = plat_get_rockchip_suspend_gpio(&count);
  916. for (i = 0; i < count; i++) {
  917. gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity);
  918. gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT);
  919. udelay(1);
  920. }
  921. }
  922. static void resume_gpio(void)
  923. {
  924. struct bl_aux_gpio_info *suspend_gpio;
  925. uint32_t count;
  926. int i;
  927. suspend_gpio = plat_get_rockchip_suspend_gpio(&count);
  928. for (i = count - 1; i >= 0; i--) {
  929. gpio_set_value(suspend_gpio[i].index,
  930. !suspend_gpio[i].polarity);
  931. gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT);
  932. udelay(1);
  933. }
  934. }
  935. void sram_save(void)
  936. {
  937. size_t text_size = (char *)&__bl31_sram_text_real_end -
  938. (char *)&__bl31_sram_text_start;
  939. size_t data_size = (char *)&__bl31_sram_data_real_end -
  940. (char *)&__bl31_sram_data_start;
  941. size_t incbin_size = (char *)&__sram_incbin_real_end -
  942. (char *)&__sram_incbin_start;
  943. memcpy(&store_sram[0], &__bl31_sram_text_start, text_size);
  944. memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size);
  945. memcpy(&store_sram[text_size + data_size], &__sram_incbin_start,
  946. incbin_size);
  947. }
  948. void sram_restore(void)
  949. {
  950. size_t text_size = (char *)&__bl31_sram_text_real_end -
  951. (char *)&__bl31_sram_text_start;
  952. size_t data_size = (char *)&__bl31_sram_data_real_end -
  953. (char *)&__bl31_sram_data_start;
  954. size_t incbin_size = (char *)&__sram_incbin_real_end -
  955. (char *)&__sram_incbin_start;
  956. memcpy(&__bl31_sram_text_start, &store_sram[0], text_size);
  957. memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size);
  958. memcpy(&__sram_incbin_start, &store_sram[text_size + data_size],
  959. incbin_size);
  960. }
  961. struct uart_debug {
  962. uint32_t uart_dll;
  963. uint32_t uart_dlh;
  964. uint32_t uart_ier;
  965. uint32_t uart_fcr;
  966. uint32_t uart_mcr;
  967. uint32_t uart_lcr;
  968. };
  969. #define UART_DLL 0x00
  970. #define UART_DLH 0x04
  971. #define UART_IER 0x04
  972. #define UART_FCR 0x08
  973. #define UART_LCR 0x0c
  974. #define UART_MCR 0x10
  975. #define UARTSRR 0x88
  976. #define UART_RESET BIT(0)
  977. #define UARTFCR_FIFOEN BIT(0)
  978. #define RCVR_FIFO_RESET BIT(1)
  979. #define XMIT_FIFO_RESET BIT(2)
  980. #define DIAGNOSTIC_MODE BIT(4)
  981. #define UARTLCR_DLAB BIT(7)
  982. static struct uart_debug uart_save;
  983. void suspend_uart(void)
  984. {
  985. uint32_t uart_base = rockchip_get_uart_base();
  986. if (uart_base == 0)
  987. return;
  988. uart_save.uart_lcr = mmio_read_32(uart_base + UART_LCR);
  989. uart_save.uart_ier = mmio_read_32(uart_base + UART_IER);
  990. uart_save.uart_mcr = mmio_read_32(uart_base + UART_MCR);
  991. mmio_write_32(uart_base + UART_LCR,
  992. uart_save.uart_lcr | UARTLCR_DLAB);
  993. uart_save.uart_dll = mmio_read_32(uart_base + UART_DLL);
  994. uart_save.uart_dlh = mmio_read_32(uart_base + UART_DLH);
  995. mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr);
  996. }
  997. void resume_uart(void)
  998. {
  999. uint32_t uart_base = rockchip_get_uart_base();
  1000. uint32_t uart_lcr;
  1001. if (uart_base == 0)
  1002. return;
  1003. mmio_write_32(uart_base + UARTSRR,
  1004. XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET);
  1005. uart_lcr = mmio_read_32(uart_base + UART_LCR);
  1006. mmio_write_32(uart_base + UART_MCR, DIAGNOSTIC_MODE);
  1007. mmio_write_32(uart_base + UART_LCR, uart_lcr | UARTLCR_DLAB);
  1008. mmio_write_32(uart_base + UART_DLL, uart_save.uart_dll);
  1009. mmio_write_32(uart_base + UART_DLH, uart_save.uart_dlh);
  1010. mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr);
  1011. mmio_write_32(uart_base + UART_IER, uart_save.uart_ier);
  1012. mmio_write_32(uart_base + UART_FCR, UARTFCR_FIFOEN);
  1013. mmio_write_32(uart_base + UART_MCR, uart_save.uart_mcr);
  1014. }
  1015. void save_usbphy(void)
  1016. {
  1017. store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0);
  1018. store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2);
  1019. store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3);
  1020. store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12);
  1021. store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13);
  1022. store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15);
  1023. store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16);
  1024. store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0);
  1025. store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2);
  1026. store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3);
  1027. store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12);
  1028. store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13);
  1029. store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15);
  1030. store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16);
  1031. }
  1032. void restore_usbphy(void)
  1033. {
  1034. mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0,
  1035. REG_SOC_WMSK | store_usbphy0[0]);
  1036. mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2,
  1037. REG_SOC_WMSK | store_usbphy0[1]);
  1038. mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3,
  1039. REG_SOC_WMSK | store_usbphy0[2]);
  1040. mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12,
  1041. REG_SOC_WMSK | store_usbphy0[3]);
  1042. mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13,
  1043. REG_SOC_WMSK | store_usbphy0[4]);
  1044. mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15,
  1045. REG_SOC_WMSK | store_usbphy0[5]);
  1046. mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16,
  1047. REG_SOC_WMSK | store_usbphy0[6]);
  1048. mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0,
  1049. REG_SOC_WMSK | store_usbphy1[0]);
  1050. mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2,
  1051. REG_SOC_WMSK | store_usbphy1[1]);
  1052. mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3,
  1053. REG_SOC_WMSK | store_usbphy1[2]);
  1054. mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12,
  1055. REG_SOC_WMSK | store_usbphy1[3]);
  1056. mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13,
  1057. REG_SOC_WMSK | store_usbphy1[4]);
  1058. mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15,
  1059. REG_SOC_WMSK | store_usbphy1[5]);
  1060. mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16,
  1061. REG_SOC_WMSK | store_usbphy1[6]);
  1062. }
  1063. void grf_register_save(void)
  1064. {
  1065. int i;
  1066. store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0));
  1067. store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1));
  1068. store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2));
  1069. store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3));
  1070. store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4));
  1071. store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7));
  1072. for (i = 0; i < 4; i++)
  1073. store_grf_ddrc_con[i] =
  1074. mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4);
  1075. store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL);
  1076. }
  1077. void grf_register_restore(void)
  1078. {
  1079. int i;
  1080. mmio_write_32(GRF_BASE + GRF_SOC_CON(0),
  1081. REG_SOC_WMSK | store_grf_soc_con0);
  1082. mmio_write_32(GRF_BASE + GRF_SOC_CON(1),
  1083. REG_SOC_WMSK | store_grf_soc_con1);
  1084. mmio_write_32(GRF_BASE + GRF_SOC_CON(2),
  1085. REG_SOC_WMSK | store_grf_soc_con2);
  1086. mmio_write_32(GRF_BASE + GRF_SOC_CON(3),
  1087. REG_SOC_WMSK | store_grf_soc_con3);
  1088. mmio_write_32(GRF_BASE + GRF_SOC_CON(4),
  1089. REG_SOC_WMSK | store_grf_soc_con4);
  1090. mmio_write_32(GRF_BASE + GRF_SOC_CON(7),
  1091. REG_SOC_WMSK | store_grf_soc_con7);
  1092. for (i = 0; i < 4; i++)
  1093. mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4,
  1094. REG_SOC_WMSK | store_grf_ddrc_con[i]);
  1095. mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel);
  1096. }
  1097. void cru_register_save(void)
  1098. {
  1099. int i;
  1100. for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4)
  1101. store_cru[i / 4] = mmio_read_32(CRU_BASE + i);
  1102. }
  1103. void cru_register_restore(void)
  1104. {
  1105. int i;
  1106. for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) {
  1107. /*
  1108. * since DPLL, CRU_CLKSEL_CON6 have been restore in
  1109. * dmc_resume, ABPLL will resote later, so skip them
  1110. */
  1111. if ((i == CRU_CLKSEL_CON6) ||
  1112. (i >= CRU_PLL_CON(ABPLL_ID, 0) &&
  1113. i <= CRU_PLL_CON(DPLL_ID, 5)))
  1114. continue;
  1115. if ((i == CRU_PLL_CON(ALPLL_ID, 2)) ||
  1116. (i == CRU_PLL_CON(CPLL_ID, 2)) ||
  1117. (i == CRU_PLL_CON(GPLL_ID, 2)) ||
  1118. (i == CRU_PLL_CON(NPLL_ID, 2)) ||
  1119. (i == CRU_PLL_CON(VPLL_ID, 2)))
  1120. mmio_write_32(CRU_BASE + i, store_cru[i / 4]);
  1121. /*
  1122. * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107
  1123. * not need do high 16bit mask
  1124. */
  1125. else if ((i > 0x27c && i < 0x2b0) || (i == 0x508))
  1126. mmio_write_32(CRU_BASE + i, store_cru[i / 4]);
  1127. else
  1128. mmio_write_32(CRU_BASE + i,
  1129. REG_SOC_WMSK | store_cru[i / 4]);
  1130. }
  1131. }
  1132. void wdt_register_save(void)
  1133. {
  1134. int i;
  1135. for (i = 0; i < 2; i++) {
  1136. store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4);
  1137. store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4);
  1138. }
  1139. pmu_enable_watchdog0 = (uint8_t) store_wdt0[0] & 0x1;
  1140. }
  1141. void wdt_register_restore(void)
  1142. {
  1143. int i;
  1144. for (i = 1; i >= 0; i--) {
  1145. mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]);
  1146. mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]);
  1147. }
  1148. /* write 0x76 to cnt_restart to keep watchdog alive */
  1149. mmio_write_32(WDT0_BASE + 0x0c, 0x76);
  1150. mmio_write_32(WDT1_BASE + 0x0c, 0x76);
  1151. }
  1152. int rockchip_soc_sys_pwr_dm_suspend(void)
  1153. {
  1154. uint32_t wait_cnt = 0;
  1155. uint32_t status = 0;
  1156. ddr_prepare_for_sys_suspend();
  1157. dmc_suspend();
  1158. pmu_scu_b_pwrdn();
  1159. gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
  1160. gicv3_distif_save(&dist_ctx);
  1161. /* need to save usbphy before shutdown PERIHP PD */
  1162. save_usbphy();
  1163. pmu_power_domains_suspend();
  1164. set_hw_idle(BIT(PMU_CLR_CENTER1) |
  1165. BIT(PMU_CLR_ALIVE) |
  1166. BIT(PMU_CLR_MSCH0) |
  1167. BIT(PMU_CLR_MSCH1) |
  1168. BIT(PMU_CLR_CCIM0) |
  1169. BIT(PMU_CLR_CCIM1) |
  1170. BIT(PMU_CLR_CENTER) |
  1171. BIT(PMU_CLR_PERILP) |
  1172. BIT(PMU_CLR_PERILPM0) |
  1173. BIT(PMU_CLR_GIC));
  1174. set_pmu_rsthold();
  1175. sys_slp_config();
  1176. m0_configure_execute_addr(M0PMU_BINCODE_BASE);
  1177. m0_start();
  1178. pmu_sgrf_rst_hld();
  1179. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
  1180. ((uintptr_t)&pmu_cpuson_entrypoint >>
  1181. CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
  1182. mmio_write_32(PMU_BASE + PMU_ADB400_CON,
  1183. BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
  1184. BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) |
  1185. BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW));
  1186. dsb();
  1187. status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
  1188. BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
  1189. BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
  1190. while ((mmio_read_32(PMU_BASE +
  1191. PMU_ADB400_ST) & status) != status) {
  1192. wait_cnt++;
  1193. if (wait_cnt >= MAX_WAIT_COUNT) {
  1194. ERROR("%s:wait cluster-b l2(%x)\n", __func__,
  1195. mmio_read_32(PMU_BASE + PMU_ADB400_ST));
  1196. panic();
  1197. }
  1198. udelay(1);
  1199. }
  1200. mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN));
  1201. wdt_register_save();
  1202. secure_watchdog_gate();
  1203. /*
  1204. * Disabling PLLs/PWM/DVFS is approaching WFI which is
  1205. * the last steps in suspend.
  1206. */
  1207. disable_dvfs_plls();
  1208. disable_pwms();
  1209. disable_nodvfs_plls();
  1210. suspend_apio();
  1211. suspend_gpio();
  1212. suspend_uart();
  1213. grf_register_save();
  1214. cru_register_save();
  1215. sram_save();
  1216. plat_rockchip_save_gpio();
  1217. return 0;
  1218. }
  1219. int rockchip_soc_sys_pwr_dm_resume(void)
  1220. {
  1221. uint32_t wait_cnt = 0;
  1222. uint32_t status = 0;
  1223. plat_rockchip_restore_gpio();
  1224. cru_register_restore();
  1225. grf_register_restore();
  1226. wdt_register_restore();
  1227. resume_uart();
  1228. resume_apio();
  1229. resume_gpio();
  1230. enable_nodvfs_plls();
  1231. enable_pwms();
  1232. /* PWM regulators take time to come up; give 300us to be safe. */
  1233. udelay(300);
  1234. enable_dvfs_plls();
  1235. secure_sgrf_init();
  1236. secure_sgrf_ddr_rgn_init();
  1237. /* restore clk_ddrc_bpll_src_en gate */
  1238. mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3),
  1239. BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0));
  1240. /*
  1241. * The wakeup status is not cleared by itself, we need to clear it
  1242. * manually. Otherwise we will alway query some interrupt next time.
  1243. *
  1244. * NOTE: If the kernel needs to query this, we might want to stash it
  1245. * somewhere.
  1246. */
  1247. mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff);
  1248. mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00);
  1249. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
  1250. (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
  1251. CPU_BOOT_ADDR_WMASK);
  1252. mmio_write_32(PMU_BASE + PMU_CCI500_CON,
  1253. WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) |
  1254. WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) |
  1255. WMSK_BIT(PMU_QGATING_CCI500_CFG));
  1256. dsb();
  1257. mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON,
  1258. BIT(PMU_SCU_B_PWRDWN_EN));
  1259. mmio_write_32(PMU_BASE + PMU_ADB400_CON,
  1260. WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
  1261. WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) |
  1262. WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) |
  1263. WMSK_BIT(PMU_CLR_CORE_L_HW) |
  1264. WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) |
  1265. WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW));
  1266. status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
  1267. BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
  1268. BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
  1269. while ((mmio_read_32(PMU_BASE +
  1270. PMU_ADB400_ST) & status)) {
  1271. wait_cnt++;
  1272. if (wait_cnt >= MAX_WAIT_COUNT) {
  1273. ERROR("%s:wait cluster-b l2(%x)\n", __func__,
  1274. mmio_read_32(PMU_BASE + PMU_ADB400_ST));
  1275. panic();
  1276. }
  1277. udelay(1);
  1278. }
  1279. pmu_scu_b_pwrup();
  1280. pmu_power_domains_resume();
  1281. restore_abpll();
  1282. clr_hw_idle(BIT(PMU_CLR_CENTER1) |
  1283. BIT(PMU_CLR_ALIVE) |
  1284. BIT(PMU_CLR_MSCH0) |
  1285. BIT(PMU_CLR_MSCH1) |
  1286. BIT(PMU_CLR_CCIM0) |
  1287. BIT(PMU_CLR_CCIM1) |
  1288. BIT(PMU_CLR_CENTER) |
  1289. BIT(PMU_CLR_PERILP) |
  1290. BIT(PMU_CLR_PERILPM0) |
  1291. BIT(PMU_CLR_GIC));
  1292. gicv3_distif_init_restore(&dist_ctx);
  1293. gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
  1294. plat_rockchip_gic_cpuif_enable();
  1295. m0_stop();
  1296. restore_usbphy();
  1297. ddr_prepare_for_sys_resume();
  1298. return 0;
  1299. }
  1300. void __dead2 rockchip_soc_soft_reset(void)
  1301. {
  1302. struct bl_aux_gpio_info *rst_gpio;
  1303. rst_gpio = plat_get_rockchip_gpio_reset();
  1304. if (rst_gpio) {
  1305. gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT);
  1306. gpio_set_value(rst_gpio->index, rst_gpio->polarity);
  1307. } else {
  1308. soc_global_soft_reset();
  1309. }
  1310. while (1)
  1311. ;
  1312. }
  1313. void __dead2 rockchip_soc_system_off(void)
  1314. {
  1315. struct bl_aux_gpio_info *poweroff_gpio;
  1316. poweroff_gpio = plat_get_rockchip_gpio_poweroff();
  1317. if (poweroff_gpio) {
  1318. /*
  1319. * if use tsadc over temp pin(GPIO1A6) as shutdown gpio,
  1320. * need to set this pin iomux back to gpio function
  1321. */
  1322. if (poweroff_gpio->index == TSADC_INT_PIN) {
  1323. mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX,
  1324. GPIO1A6_IOMUX);
  1325. }
  1326. gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT);
  1327. gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity);
  1328. } else {
  1329. WARN("Do nothing when system off\n");
  1330. }
  1331. while (1)
  1332. ;
  1333. }
  1334. void rockchip_plat_mmu_el3(void)
  1335. {
  1336. size_t sram_size;
  1337. /* sram.text size */
  1338. sram_size = (char *)&__bl31_sram_text_end -
  1339. (char *)&__bl31_sram_text_start;
  1340. mmap_add_region((unsigned long)&__bl31_sram_text_start,
  1341. (unsigned long)&__bl31_sram_text_start,
  1342. sram_size, MT_MEMORY | MT_RO | MT_SECURE);
  1343. /* sram.data size */
  1344. sram_size = (char *)&__bl31_sram_data_end -
  1345. (char *)&__bl31_sram_data_start;
  1346. mmap_add_region((unsigned long)&__bl31_sram_data_start,
  1347. (unsigned long)&__bl31_sram_data_start,
  1348. sram_size, MT_MEMORY | MT_RW | MT_SECURE);
  1349. sram_size = (char *)&__bl31_sram_stack_end -
  1350. (char *)&__bl31_sram_stack_start;
  1351. mmap_add_region((unsigned long)&__bl31_sram_stack_start,
  1352. (unsigned long)&__bl31_sram_stack_start,
  1353. sram_size, MT_MEMORY | MT_RW | MT_SECURE);
  1354. sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start;
  1355. mmap_add_region((unsigned long)&__sram_incbin_start,
  1356. (unsigned long)&__sram_incbin_start,
  1357. sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE);
  1358. }
  1359. void plat_rockchip_pmu_init(void)
  1360. {
  1361. uint32_t cpu;
  1362. rockchip_pd_lock_init();
  1363. /* register requires 32bits mode, switch it to 32 bits */
  1364. cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
  1365. for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
  1366. cpuson_flags[cpu] = 0;
  1367. for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++)
  1368. clst_warmboot_data[cpu] = 0;
  1369. /* config cpu's warm boot address */
  1370. mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
  1371. (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
  1372. CPU_BOOT_ADDR_WMASK);
  1373. mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE);
  1374. /*
  1375. * Enable Schmitt trigger for better 32 kHz input signal, which is
  1376. * important for suspend/resume reliability among other things.
  1377. */
  1378. mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE);
  1379. init_pmu_counts();
  1380. nonboot_cpus_off();
  1381. INFO("%s(%d): pd status %x\n", __func__, __LINE__,
  1382. mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
  1383. }