suspend.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <platform_def.h>
  7. #include <arch_helpers.h>
  8. #include <common/debug.h>
  9. #include <dram.h>
  10. #include <plat_private.h>
  11. #include <pmu.h>
  12. #include <pmu_bits.h>
  13. #include <pmu_regs.h>
  14. #include <rk3399_def.h>
  15. #include <secure.h>
  16. #include <soc.h>
  17. #include <suspend.h>
  18. #define PMUGRF_OS_REG0 0x300
  19. #define PMUGRF_OS_REG1 0x304
  20. #define PMUGRF_OS_REG2 0x308
  21. #define PMUGRF_OS_REG3 0x30c
  22. #define CRU_SFTRST_DDR_CTRL(ch, n) ((0x1 << (8 + 16 + (ch) * 4)) | \
  23. ((n) << (8 + (ch) * 4)))
  24. #define CRU_SFTRST_DDR_PHY(ch, n) ((0x1 << (9 + 16 + (ch) * 4)) | \
  25. ((n) << (9 + (ch) * 4)))
  26. #define FBDIV_ENC(n) ((n) << 16)
  27. #define FBDIV_DEC(n) (((n) >> 16) & 0xfff)
  28. #define POSTDIV2_ENC(n) ((n) << 12)
  29. #define POSTDIV2_DEC(n) (((n) >> 12) & 0x7)
  30. #define POSTDIV1_ENC(n) ((n) << 8)
  31. #define POSTDIV1_DEC(n) (((n) >> 8) & 0x7)
  32. #define REFDIV_ENC(n) (n)
  33. #define REFDIV_DEC(n) ((n) & 0x3f)
  34. /* PMU CRU */
  35. #define PMUCRU_RSTNHOLD_CON0 0x120
  36. #define PMUCRU_RSTNHOLD_CON1 0x124
  37. #define PRESET_GPIO0_HOLD(n) (((n) << 7) | WMSK_BIT(7))
  38. #define PRESET_GPIO1_HOLD(n) (((n) << 8) | WMSK_BIT(8))
  39. #define SYS_COUNTER_FREQ_IN_MHZ (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
  40. __pmusramdata uint32_t dpll_data[PLL_CON_COUNT];
  41. __pmusramdata uint32_t cru_clksel_con6;
  42. __pmusramdata uint8_t pmu_enable_watchdog0;
  43. /*
  44. * Copy @num registers from @src to @dst
  45. */
  46. static __pmusramfunc void sram_regcpy(uintptr_t dst, uintptr_t src,
  47. uint32_t num)
  48. {
  49. while (num--) {
  50. mmio_write_32(dst, mmio_read_32(src));
  51. dst += sizeof(uint32_t);
  52. src += sizeof(uint32_t);
  53. }
  54. }
  55. /*
  56. * Copy @num registers from @src to @dst
  57. * This is intentionally a copy of the sram_regcpy function. PMUSRAM functions
  58. * cannot be called from code running in DRAM.
  59. */
  60. static void dram_regcpy(uintptr_t dst, uintptr_t src, uint32_t num)
  61. {
  62. while (num--) {
  63. mmio_write_32(dst, mmio_read_32(src));
  64. dst += sizeof(uint32_t);
  65. src += sizeof(uint32_t);
  66. }
  67. }
  68. static __pmusramfunc uint32_t sram_get_timer_value(void)
  69. {
  70. /*
  71. * Generic delay timer implementation expects the timer to be a down
  72. * counter. We apply bitwise NOT operator to the tick values returned
  73. * by read_cntpct_el0() to simulate the down counter.
  74. */
  75. return (uint32_t)(~read_cntpct_el0());
  76. }
  77. static __pmusramfunc void sram_udelay(uint32_t usec)
  78. {
  79. uint32_t start, cnt, delta, total_ticks;
  80. /* counter is decreasing */
  81. start = sram_get_timer_value();
  82. total_ticks = usec * SYS_COUNTER_FREQ_IN_MHZ;
  83. do {
  84. cnt = sram_get_timer_value();
  85. if (cnt > start) {
  86. delta = UINT32_MAX - cnt;
  87. delta += start;
  88. } else
  89. delta = start - cnt;
  90. } while (delta <= total_ticks);
  91. }
  92. static __pmusramfunc void configure_sgrf(void)
  93. {
  94. /*
  95. * SGRF_DDR_RGN_DPLL_CLK and SGRF_DDR_RGN_RTC_CLK:
  96. * IC ECO bug, need to set this register.
  97. *
  98. * SGRF_DDR_RGN_BYPS:
  99. * After the PD_CENTER suspend/resume, the DDR region
  100. * related registers in the SGRF will be reset, we
  101. * need to re-initialize them.
  102. */
  103. mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
  104. SGRF_DDR_RGN_DPLL_CLK |
  105. SGRF_DDR_RGN_RTC_CLK |
  106. SGRF_DDR_RGN_BYPS);
  107. }
  108. static __pmusramfunc void rkclk_ddr_reset(uint32_t channel, uint32_t ctl,
  109. uint32_t phy)
  110. {
  111. channel &= 0x1;
  112. ctl &= 0x1;
  113. phy &= 0x1;
  114. mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(4),
  115. CRU_SFTRST_DDR_CTRL(channel, ctl) |
  116. CRU_SFTRST_DDR_PHY(channel, phy));
  117. }
  118. static __pmusramfunc void phy_pctrl_reset(uint32_t ch)
  119. {
  120. rkclk_ddr_reset(ch, 1, 1);
  121. sram_udelay(10);
  122. rkclk_ddr_reset(ch, 1, 0);
  123. sram_udelay(10);
  124. rkclk_ddr_reset(ch, 0, 0);
  125. sram_udelay(10);
  126. }
  127. static __pmusramfunc void set_cs_training_index(uint32_t ch, uint32_t rank)
  128. {
  129. uint32_t byte;
  130. /* PHY_8/136/264/392 phy_per_cs_training_index_X 1bit offset_24 */
  131. for (byte = 0; byte < 4; byte++)
  132. mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 24,
  133. rank << 24);
  134. }
  135. static __pmusramfunc void select_per_cs_training_index(uint32_t ch,
  136. uint32_t rank)
  137. {
  138. /* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */
  139. if ((mmio_read_32(PHY_REG(ch, 84)) >> 16) & 1)
  140. set_cs_training_index(ch, rank);
  141. }
  142. static __pmusramfunc void override_write_leveling_value(uint32_t ch)
  143. {
  144. uint32_t byte;
  145. for (byte = 0; byte < 4; byte++) {
  146. /*
  147. * PHY_8/136/264/392
  148. * phy_per_cs_training_multicast_en_X 1bit offset_16
  149. */
  150. mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 16,
  151. 1 << 16);
  152. mmio_clrsetbits_32(PHY_REG(ch, 63 + (128 * byte)),
  153. 0xffffu << 16,
  154. 0x200 << 16);
  155. }
  156. /* CTL_200 ctrlupd_req 1bit offset_8 */
  157. mmio_clrsetbits_32(CTL_REG(ch, 200), 0x1 << 8, 0x1 << 8);
  158. }
  159. static __pmusramfunc int data_training(uint32_t ch,
  160. struct rk3399_sdram_params *sdram_params,
  161. uint32_t training_flag)
  162. {
  163. uint32_t obs_0, obs_1, obs_2, obs_3, obs_err = 0;
  164. uint32_t rank = sdram_params->ch[ch].rank;
  165. uint32_t rank_mask;
  166. uint32_t i, tmp;
  167. if (sdram_params->dramtype == LPDDR4)
  168. rank_mask = (rank == 1) ? 0x5 : 0xf;
  169. else
  170. rank_mask = (rank == 1) ? 0x1 : 0x3;
  171. /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
  172. mmio_setbits_32(PHY_REG(ch, 927), (1 << 22));
  173. if (training_flag == PI_FULL_TRAINING) {
  174. if (sdram_params->dramtype == LPDDR4) {
  175. training_flag = PI_WRITE_LEVELING |
  176. PI_READ_GATE_TRAINING |
  177. PI_READ_LEVELING |
  178. PI_WDQ_LEVELING;
  179. } else if (sdram_params->dramtype == LPDDR3) {
  180. training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING |
  181. PI_READ_GATE_TRAINING;
  182. } else if (sdram_params->dramtype == DDR3) {
  183. training_flag = PI_WRITE_LEVELING |
  184. PI_READ_GATE_TRAINING |
  185. PI_READ_LEVELING;
  186. }
  187. }
  188. /* ca training(LPDDR4,LPDDR3 support) */
  189. if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) {
  190. for (i = 0; i < 4; i++) {
  191. if (!(rank_mask & (1 << i)))
  192. continue;
  193. select_per_cs_training_index(ch, i);
  194. /* PI_100 PI_CALVL_EN:RW:8:2 */
  195. mmio_clrsetbits_32(PI_REG(ch, 100), 0x3 << 8, 0x2 << 8);
  196. /* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */
  197. mmio_clrsetbits_32(PI_REG(ch, 92),
  198. (0x1 << 16) | (0x3 << 24),
  199. (0x1 << 16) | (i << 24));
  200. while (1) {
  201. /* PI_174 PI_INT_STATUS:RD:8:18 */
  202. tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
  203. /*
  204. * check status obs
  205. * PHY_532/660/788 phy_adr_calvl_obs1_:0:32
  206. */
  207. obs_0 = mmio_read_32(PHY_REG(ch, 532));
  208. obs_1 = mmio_read_32(PHY_REG(ch, 660));
  209. obs_2 = mmio_read_32(PHY_REG(ch, 788));
  210. if (((obs_0 >> 30) & 0x3) ||
  211. ((obs_1 >> 30) & 0x3) ||
  212. ((obs_2 >> 30) & 0x3))
  213. obs_err = 1;
  214. if ((((tmp >> 11) & 0x1) == 0x1) &&
  215. (((tmp >> 13) & 0x1) == 0x1) &&
  216. (((tmp >> 5) & 0x1) == 0x0) &&
  217. (obs_err == 0))
  218. break;
  219. else if ((((tmp >> 5) & 0x1) == 0x1) ||
  220. (obs_err == 1))
  221. return -1;
  222. }
  223. /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
  224. mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
  225. }
  226. mmio_clrbits_32(PI_REG(ch, 100), 0x3 << 8);
  227. }
  228. /* write leveling(LPDDR4,LPDDR3,DDR3 support) */
  229. if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) {
  230. for (i = 0; i < rank; i++) {
  231. select_per_cs_training_index(ch, i);
  232. /* PI_60 PI_WRLVL_EN:RW:8:2 */
  233. mmio_clrsetbits_32(PI_REG(ch, 60), 0x3 << 8, 0x2 << 8);
  234. /* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */
  235. mmio_clrsetbits_32(PI_REG(ch, 59),
  236. (0x1 << 8) | (0x3 << 16),
  237. (0x1 << 8) | (i << 16));
  238. while (1) {
  239. /* PI_174 PI_INT_STATUS:RD:8:18 */
  240. tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
  241. /*
  242. * check status obs, if error maybe can not
  243. * get leveling done PHY_40/168/296/424
  244. * phy_wrlvl_status_obs_X:0:13
  245. */
  246. obs_0 = mmio_read_32(PHY_REG(ch, 40));
  247. obs_1 = mmio_read_32(PHY_REG(ch, 168));
  248. obs_2 = mmio_read_32(PHY_REG(ch, 296));
  249. obs_3 = mmio_read_32(PHY_REG(ch, 424));
  250. if (((obs_0 >> 12) & 0x1) ||
  251. ((obs_1 >> 12) & 0x1) ||
  252. ((obs_2 >> 12) & 0x1) ||
  253. ((obs_3 >> 12) & 0x1))
  254. obs_err = 1;
  255. if ((((tmp >> 10) & 0x1) == 0x1) &&
  256. (((tmp >> 13) & 0x1) == 0x1) &&
  257. (((tmp >> 4) & 0x1) == 0x0) &&
  258. (obs_err == 0))
  259. break;
  260. else if ((((tmp >> 4) & 0x1) == 0x1) ||
  261. (obs_err == 1))
  262. return -1;
  263. }
  264. /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
  265. mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
  266. }
  267. override_write_leveling_value(ch);
  268. mmio_clrbits_32(PI_REG(ch, 60), 0x3 << 8);
  269. }
  270. /* read gate training(LPDDR4,LPDDR3,DDR3 support) */
  271. if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) {
  272. for (i = 0; i < rank; i++) {
  273. select_per_cs_training_index(ch, i);
  274. /* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */
  275. mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 24,
  276. 0x2 << 24);
  277. /*
  278. * PI_74 PI_RDLVL_GATE_REQ:WR:16:1
  279. * PI_RDLVL_CS:RW:24:2
  280. */
  281. mmio_clrsetbits_32(PI_REG(ch, 74),
  282. (0x1 << 16) | (0x3 << 24),
  283. (0x1 << 16) | (i << 24));
  284. while (1) {
  285. /* PI_174 PI_INT_STATUS:RD:8:18 */
  286. tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
  287. /*
  288. * check status obs
  289. * PHY_43/171/299/427
  290. * PHY_GTLVL_STATUS_OBS_x:16:8
  291. */
  292. obs_0 = mmio_read_32(PHY_REG(ch, 43));
  293. obs_1 = mmio_read_32(PHY_REG(ch, 171));
  294. obs_2 = mmio_read_32(PHY_REG(ch, 299));
  295. obs_3 = mmio_read_32(PHY_REG(ch, 427));
  296. if (((obs_0 >> (16 + 6)) & 0x3) ||
  297. ((obs_1 >> (16 + 6)) & 0x3) ||
  298. ((obs_2 >> (16 + 6)) & 0x3) ||
  299. ((obs_3 >> (16 + 6)) & 0x3))
  300. obs_err = 1;
  301. if ((((tmp >> 9) & 0x1) == 0x1) &&
  302. (((tmp >> 13) & 0x1) == 0x1) &&
  303. (((tmp >> 3) & 0x1) == 0x0) &&
  304. (obs_err == 0))
  305. break;
  306. else if ((((tmp >> 3) & 0x1) == 0x1) ||
  307. (obs_err == 1))
  308. return -1;
  309. }
  310. /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
  311. mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
  312. }
  313. mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 24);
  314. }
  315. /* read leveling(LPDDR4,LPDDR3,DDR3 support) */
  316. if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) {
  317. for (i = 0; i < rank; i++) {
  318. select_per_cs_training_index(ch, i);
  319. /* PI_80 PI_RDLVL_EN:RW:16:2 */
  320. mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 16,
  321. 0x2 << 16);
  322. /* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */
  323. mmio_clrsetbits_32(PI_REG(ch, 74),
  324. (0x1 << 8) | (0x3 << 24),
  325. (0x1 << 8) | (i << 24));
  326. while (1) {
  327. /* PI_174 PI_INT_STATUS:RD:8:18 */
  328. tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
  329. /*
  330. * make sure status obs not report error bit
  331. * PHY_46/174/302/430
  332. * phy_rdlvl_status_obs_X:16:8
  333. */
  334. if ((((tmp >> 8) & 0x1) == 0x1) &&
  335. (((tmp >> 13) & 0x1) == 0x1) &&
  336. (((tmp >> 2) & 0x1) == 0x0))
  337. break;
  338. else if (((tmp >> 2) & 0x1) == 0x1)
  339. return -1;
  340. }
  341. /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
  342. mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
  343. }
  344. mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 16);
  345. }
  346. /* wdq leveling(LPDDR4 support) */
  347. if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) {
  348. for (i = 0; i < 4; i++) {
  349. if (!(rank_mask & (1 << i)))
  350. continue;
  351. select_per_cs_training_index(ch, i);
  352. /*
  353. * disable PI_WDQLVL_VREF_EN before wdq leveling?
  354. * PI_181 PI_WDQLVL_VREF_EN:RW:8:1
  355. */
  356. mmio_clrbits_32(PI_REG(ch, 181), 0x1 << 8);
  357. /* PI_124 PI_WDQLVL_EN:RW:16:2 */
  358. mmio_clrsetbits_32(PI_REG(ch, 124), 0x3 << 16,
  359. 0x2 << 16);
  360. /* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */
  361. mmio_clrsetbits_32(PI_REG(ch, 121),
  362. (0x1 << 8) | (0x3 << 16),
  363. (0x1 << 8) | (i << 16));
  364. while (1) {
  365. /* PI_174 PI_INT_STATUS:RD:8:18 */
  366. tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
  367. if ((((tmp >> 12) & 0x1) == 0x1) &&
  368. (((tmp >> 13) & 0x1) == 0x1) &&
  369. (((tmp >> 6) & 0x1) == 0x0))
  370. break;
  371. else if (((tmp >> 6) & 0x1) == 0x1)
  372. return -1;
  373. }
  374. /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
  375. mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
  376. }
  377. mmio_clrbits_32(PI_REG(ch, 124), 0x3 << 16);
  378. }
  379. /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
  380. mmio_clrbits_32(PHY_REG(ch, 927), (1 << 22));
  381. return 0;
  382. }
  383. static __pmusramfunc void set_ddrconfig(
  384. struct rk3399_sdram_params *sdram_params,
  385. unsigned char channel, uint32_t ddrconfig)
  386. {
  387. /* only need to set ddrconfig */
  388. struct rk3399_sdram_channel *ch = &sdram_params->ch[channel];
  389. unsigned int cs0_cap = 0;
  390. unsigned int cs1_cap = 0;
  391. cs0_cap = (1 << (ch->cs0_row + ch->col + ch->bk + ch->bw - 20));
  392. if (ch->rank > 1)
  393. cs1_cap = cs0_cap >> (ch->cs0_row - ch->cs1_row);
  394. if (ch->row_3_4) {
  395. cs0_cap = cs0_cap * 3 / 4;
  396. cs1_cap = cs1_cap * 3 / 4;
  397. }
  398. mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICECONF,
  399. ddrconfig | (ddrconfig << 6));
  400. mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICESIZE,
  401. ((cs0_cap / 32) & 0xff) | (((cs1_cap / 32) & 0xff) << 8));
  402. }
  403. static __pmusramfunc void dram_all_config(
  404. struct rk3399_sdram_params *sdram_params)
  405. {
  406. unsigned int i;
  407. for (i = 0; i < 2; i++) {
  408. struct rk3399_sdram_channel *info = &sdram_params->ch[i];
  409. struct rk3399_msch_timings *noc = &info->noc_timings;
  410. if (sdram_params->ch[i].col == 0)
  411. continue;
  412. mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGA0,
  413. noc->ddrtiminga0.d32);
  414. mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGB0,
  415. noc->ddrtimingb0.d32);
  416. mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGC0,
  417. noc->ddrtimingc0.d32);
  418. mmio_write_32(MSCH_BASE(i) + MSCH_DEVTODEV0,
  419. noc->devtodev0.d32);
  420. mmio_write_32(MSCH_BASE(i) + MSCH_DDRMODE, noc->ddrmode.d32);
  421. /* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */
  422. if (sdram_params->ch[i].rank == 1)
  423. mmio_setbits_32(CTL_REG(i, 276), 1 << 17);
  424. }
  425. DDR_STRIDE(sdram_params->stride);
  426. /* reboot hold register set */
  427. mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
  428. CRU_PMU_SGRF_RST_RLS |
  429. PRESET_GPIO0_HOLD(1) |
  430. PRESET_GPIO1_HOLD(1));
  431. mmio_clrsetbits_32(CRU_BASE + CRU_GLB_RST_CON, 0x3, 0x3);
  432. }
  433. static __pmusramfunc void pctl_cfg(uint32_t ch,
  434. struct rk3399_sdram_params *sdram_params)
  435. {
  436. const uint32_t *params_ctl = sdram_params->pctl_regs.denali_ctl;
  437. const uint32_t *params_pi = sdram_params->pi_regs.denali_pi;
  438. const struct rk3399_ddr_publ_regs *phy_regs = &sdram_params->phy_regs;
  439. uint32_t tmp, tmp1, tmp2, i;
  440. /*
  441. * Workaround controller bug:
  442. * Do not program DRAM_CLASS until NO_PHY_IND_TRAIN_INT is programmed
  443. */
  444. sram_regcpy(CTL_REG(ch, 1), (uintptr_t)&params_ctl[1],
  445. CTL_REG_NUM - 1);
  446. mmio_write_32(CTL_REG(ch, 0), params_ctl[0]);
  447. sram_regcpy(PI_REG(ch, 0), (uintptr_t)&params_pi[0],
  448. PI_REG_NUM);
  449. sram_regcpy(PHY_REG(ch, 910), (uintptr_t)&phy_regs->phy896[910 - 896],
  450. 3);
  451. mmio_clrsetbits_32(CTL_REG(ch, 68), PWRUP_SREFRESH_EXIT,
  452. PWRUP_SREFRESH_EXIT);
  453. /* PHY_DLL_RST_EN */
  454. mmio_clrsetbits_32(PHY_REG(ch, 957), 0x3 << 24, 1 << 24);
  455. dmbst();
  456. mmio_setbits_32(PI_REG(ch, 0), START);
  457. mmio_setbits_32(CTL_REG(ch, 0), START);
  458. /* wait lock */
  459. while (1) {
  460. tmp = mmio_read_32(PHY_REG(ch, 920));
  461. tmp1 = mmio_read_32(PHY_REG(ch, 921));
  462. tmp2 = mmio_read_32(PHY_REG(ch, 922));
  463. if ((((tmp >> 16) & 0x1) == 0x1) &&
  464. (((tmp1 >> 16) & 0x1) == 0x1) &&
  465. (((tmp1 >> 0) & 0x1) == 0x1) &&
  466. (((tmp2 >> 0) & 0x1) == 0x1))
  467. break;
  468. /* if PLL bypass,don't need wait lock */
  469. if (mmio_read_32(PHY_REG(ch, 911)) & 0x1)
  470. break;
  471. }
  472. sram_regcpy(PHY_REG(ch, 896), (uintptr_t)&phy_regs->phy896[0], 63);
  473. for (i = 0; i < 4; i++)
  474. sram_regcpy(PHY_REG(ch, 128 * i),
  475. (uintptr_t)&phy_regs->phy0[0], 91);
  476. for (i = 0; i < 3; i++)
  477. sram_regcpy(PHY_REG(ch, 512 + 128 * i),
  478. (uintptr_t)&phy_regs->phy512[i][0], 38);
  479. }
  480. static __pmusramfunc int dram_switch_to_next_index(
  481. struct rk3399_sdram_params *sdram_params)
  482. {
  483. uint32_t ch, ch_count;
  484. uint32_t fn = ((mmio_read_32(CTL_REG(0, 111)) >> 16) + 1) & 0x1;
  485. mmio_write_32(CIC_BASE + CIC_CTRL0,
  486. (((0x3 << 4) | (1 << 2) | 1) << 16) |
  487. (fn << 4) | (1 << 2) | 1);
  488. while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)))
  489. ;
  490. mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002);
  491. while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)))
  492. ;
  493. ch_count = sdram_params->num_channels;
  494. /* LPDDR4 f2 can't do training, all training will fail */
  495. for (ch = 0; ch < ch_count; ch++) {
  496. /*
  497. * Without this disabled for LPDDR4 we end up writing 0's
  498. * in place of real data in an interesting pattern.
  499. */
  500. if (sdram_params->dramtype != LPDDR4) {
  501. mmio_clrsetbits_32(PHY_REG(ch, 896), (0x3 << 8) | 1,
  502. fn << 8);
  503. }
  504. /* data_training failed */
  505. if (data_training(ch, sdram_params, PI_FULL_TRAINING))
  506. return -1;
  507. }
  508. return 0;
  509. }
  510. /*
  511. * Needs to be done for both channels at once in case of a shared reset signal
  512. * between channels.
  513. */
  514. static __pmusramfunc int pctl_start(uint32_t channel_mask,
  515. struct rk3399_sdram_params *sdram_params)
  516. {
  517. uint32_t count;
  518. uint32_t byte;
  519. mmio_setbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
  520. mmio_setbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
  521. /* need de-access IO retention before controller START */
  522. if (channel_mask & (1 << 0))
  523. mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 19));
  524. if (channel_mask & (1 << 1))
  525. mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 23));
  526. /* PHY_DLL_RST_EN */
  527. if (channel_mask & (1 << 0))
  528. mmio_clrsetbits_32(PHY_REG(0, 957), 0x3 << 24,
  529. 0x2 << 24);
  530. if (channel_mask & (1 << 1))
  531. mmio_clrsetbits_32(PHY_REG(1, 957), 0x3 << 24,
  532. 0x2 << 24);
  533. /* check ERROR bit */
  534. if (channel_mask & (1 << 0)) {
  535. count = 0;
  536. while (!(mmio_read_32(CTL_REG(0, 203)) & (1 << 3))) {
  537. /* CKE is low, loop 10ms */
  538. if (count > 100)
  539. return -1;
  540. sram_udelay(100);
  541. count++;
  542. }
  543. mmio_clrbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
  544. /* Restore the PHY_RX_CAL_DQS value */
  545. for (byte = 0; byte < 4; byte++)
  546. mmio_clrsetbits_32(PHY_REG(0, 57 + 128 * byte),
  547. 0xfff << 16,
  548. sdram_params->rx_cal_dqs[0][byte]);
  549. }
  550. if (channel_mask & (1 << 1)) {
  551. count = 0;
  552. while (!(mmio_read_32(CTL_REG(1, 203)) & (1 << 3))) {
  553. /* CKE is low, loop 10ms */
  554. if (count > 100)
  555. return -1;
  556. sram_udelay(100);
  557. count++;
  558. }
  559. mmio_clrbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
  560. /* Restore the PHY_RX_CAL_DQS value */
  561. for (byte = 0; byte < 4; byte++)
  562. mmio_clrsetbits_32(PHY_REG(1, 57 + 128 * byte),
  563. 0xfff << 16,
  564. sdram_params->rx_cal_dqs[1][byte]);
  565. }
  566. return 0;
  567. }
  568. __pmusramfunc static void pmusram_restore_pll(int pll_id, uint32_t *src)
  569. {
  570. mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
  571. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK);
  572. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK);
  573. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]);
  574. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK);
  575. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK);
  576. mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK);
  577. while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) &
  578. (1U << 31)) == 0x0)
  579. ;
  580. }
  581. __pmusramfunc static void pmusram_enable_watchdog(void)
  582. {
  583. /* Make the watchdog use the first global reset. */
  584. mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, 1 << 1);
  585. /*
  586. * This gives the system ~8 seconds before reset. The pclk for the
  587. * watchdog is 4MHz on reset. The value of 0x9 in WDT_TORR means that
  588. * the watchdog will wait for 0x1ffffff cycles before resetting.
  589. */
  590. mmio_write_32(WDT0_BASE + 4, 0x9);
  591. /* Enable the watchdog */
  592. mmio_setbits_32(WDT0_BASE, 0x1);
  593. /* Magic reset the watchdog timer value for WDT_CRR. */
  594. mmio_write_32(WDT0_BASE + 0xc, 0x76);
  595. secure_watchdog_ungate();
  596. /* The watchdog is in PD_ALIVE, so deidle it. */
  597. mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, PMU_CLR_ALIVE);
  598. }
  599. void dmc_suspend(void)
  600. {
  601. struct rk3399_sdram_params *sdram_params = &sdram_config;
  602. struct rk3399_ddr_publ_regs *phy_regs;
  603. uint32_t *params_ctl;
  604. uint32_t *params_pi;
  605. uint32_t refdiv, postdiv2, postdiv1, fbdiv;
  606. uint32_t ch, byte, i;
  607. phy_regs = &sdram_params->phy_regs;
  608. params_ctl = sdram_params->pctl_regs.denali_ctl;
  609. params_pi = sdram_params->pi_regs.denali_pi;
  610. /* save dpll register and ddr clock register value to pmusram */
  611. cru_clksel_con6 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON6);
  612. for (i = 0; i < PLL_CON_COUNT; i++)
  613. dpll_data[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, i));
  614. fbdiv = dpll_data[0] & 0xfff;
  615. postdiv2 = POSTDIV2_DEC(dpll_data[1]);
  616. postdiv1 = POSTDIV1_DEC(dpll_data[1]);
  617. refdiv = REFDIV_DEC(dpll_data[1]);
  618. sdram_params->ddr_freq = ((fbdiv * 24) /
  619. (refdiv * postdiv1 * postdiv2)) * MHz;
  620. INFO("sdram_params->ddr_freq = %d\n", sdram_params->ddr_freq);
  621. sdram_params->odt = (((mmio_read_32(PHY_REG(0, 5)) >> 16) &
  622. 0x7) != 0) ? 1 : 0;
  623. /* copy the registers CTL PI and PHY */
  624. dram_regcpy((uintptr_t)&params_ctl[0], CTL_REG(0, 0), CTL_REG_NUM);
  625. /* mask DENALI_CTL_00_DATA.START, only copy here, will trigger later */
  626. params_ctl[0] &= ~(0x1 << 0);
  627. dram_regcpy((uintptr_t)&params_pi[0], PI_REG(0, 0),
  628. PI_REG_NUM);
  629. /* mask DENALI_PI_00_DATA.START, only copy here, will trigger later*/
  630. params_pi[0] &= ~(0x1 << 0);
  631. dram_regcpy((uintptr_t)&phy_regs->phy0[0],
  632. PHY_REG(0, 0), 91);
  633. for (i = 0; i < 3; i++)
  634. dram_regcpy((uintptr_t)&phy_regs->phy512[i][0],
  635. PHY_REG(0, 512 + 128 * i), 38);
  636. dram_regcpy((uintptr_t)&phy_regs->phy896[0], PHY_REG(0, 896), 63);
  637. for (ch = 0; ch < sdram_params->num_channels; ch++) {
  638. for (byte = 0; byte < 4; byte++)
  639. sdram_params->rx_cal_dqs[ch][byte] = (0xfff << 16) &
  640. mmio_read_32(PHY_REG(ch, 57 + byte * 128));
  641. }
  642. /* set DENALI_PHY_957_DATA.PHY_DLL_RST_EN = 0x1 */
  643. phy_regs->phy896[957 - 896] &= ~(0x3 << 24);
  644. phy_regs->phy896[957 - 896] |= 1 << 24;
  645. phy_regs->phy896[0] |= 1;
  646. phy_regs->phy896[0] &= ~(0x3 << 8);
  647. }
  648. __pmusramfunc void phy_dll_bypass_set(uint32_t ch, uint32_t freq)
  649. {
  650. if (freq <= (125 * 1000 * 1000)) {
  651. /* Set master mode to SW for slices*/
  652. mmio_setbits_32(PHY_REG(ch, 86), 3 << 10);
  653. mmio_setbits_32(PHY_REG(ch, 214), 3 << 10);
  654. mmio_setbits_32(PHY_REG(ch, 342), 3 << 10);
  655. mmio_setbits_32(PHY_REG(ch, 470), 3 << 10);
  656. /* Set master mode to SW for address slices*/
  657. mmio_setbits_32(PHY_REG(ch, 547), 3 << 18);
  658. mmio_setbits_32(PHY_REG(ch, 675), 3 << 18);
  659. mmio_setbits_32(PHY_REG(ch, 803), 3 << 18);
  660. } else {
  661. /* Clear SW master mode for slices*/
  662. mmio_clrbits_32(PHY_REG(ch, 86), 3 << 10);
  663. mmio_clrbits_32(PHY_REG(ch, 214), 3 << 10);
  664. mmio_clrbits_32(PHY_REG(ch, 342), 3 << 10);
  665. mmio_clrbits_32(PHY_REG(ch, 470), 3 << 10);
  666. /* Clear SW master mode for address slices*/
  667. mmio_clrbits_32(PHY_REG(ch, 547), 3 << 18);
  668. mmio_clrbits_32(PHY_REG(ch, 675), 3 << 18);
  669. mmio_clrbits_32(PHY_REG(ch, 803), 3 << 18);
  670. }
  671. }
  672. __pmusramfunc void dmc_resume(void)
  673. {
  674. struct rk3399_sdram_params *sdram_params = &sdram_config;
  675. uint32_t channel_mask = 0;
  676. uint32_t channel;
  677. /*
  678. * We can't turn off the watchdog, so if we have not turned it on before
  679. * we should not turn it on here.
  680. */
  681. if ((pmu_enable_watchdog0 & 0x1) == 0x1) {
  682. pmusram_enable_watchdog();
  683. }
  684. pmu_sgrf_rst_hld_release();
  685. restore_pmu_rsthold();
  686. sram_secure_timer_init();
  687. /*
  688. * we switch ddr clock to abpll when suspend,
  689. * we set back to dpll here
  690. */
  691. mmio_write_32(CRU_BASE + CRU_CLKSEL_CON6,
  692. cru_clksel_con6 | REG_SOC_WMSK);
  693. pmusram_restore_pll(DPLL_ID, dpll_data);
  694. configure_sgrf();
  695. retry:
  696. for (channel = 0; channel < sdram_params->num_channels; channel++) {
  697. phy_pctrl_reset(channel);
  698. /*
  699. * Without this, LPDDR4 will write 0's in place of real data
  700. * in a strange pattern.
  701. */
  702. if (sdram_params->dramtype == LPDDR4) {
  703. phy_dll_bypass_set(channel, sdram_params->ddr_freq);
  704. }
  705. pctl_cfg(channel, sdram_params);
  706. }
  707. for (channel = 0; channel < 2; channel++) {
  708. if (sdram_params->ch[channel].col)
  709. channel_mask |= 1 << channel;
  710. }
  711. if (pctl_start(channel_mask, sdram_params) < 0)
  712. goto retry;
  713. for (channel = 0; channel < sdram_params->num_channels; channel++) {
  714. /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */
  715. if (sdram_params->dramtype == LPDDR3)
  716. sram_udelay(10);
  717. /*
  718. * Training here will always fail for LPDDR4, so skip it
  719. * If traning fail, retry to do it again.
  720. */
  721. if (sdram_params->dramtype != LPDDR4 &&
  722. data_training(channel, sdram_params, PI_FULL_TRAINING))
  723. goto retry;
  724. set_ddrconfig(sdram_params, channel,
  725. sdram_params->ch[channel].ddrconfig);
  726. }
  727. dram_all_config(sdram_params);
  728. /* Switch to index 1 and prepare for DDR frequency switch. */
  729. dram_switch_to_next_index(sdram_params);
  730. }