dram.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. /*
  2. * Copyright 2019-2023 NXP
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <bl31/interrupt_mgmt.h>
  7. #include <common/runtime_svc.h>
  8. #include <lib/mmio.h>
  9. #include <lib/spinlock.h>
  10. #include <plat/common/platform.h>
  11. #include <dram.h>
  12. #include <gpc.h>
  13. #define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
  14. #define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
  15. struct dram_info dram_info;
  16. /* lock used for DDR DVFS */
  17. spinlock_t dfs_lock;
  18. #if defined(PLAT_imx8mq)
  19. /* ocram used to dram timing */
  20. static uint8_t dram_timing_saved[13 * 1024] __aligned(8);
  21. #endif
  22. static volatile uint32_t wfe_done;
  23. static volatile bool wait_ddrc_hwffc_done = true;
  24. static unsigned int dev_fsp = 0x1;
  25. static uint32_t fsp_init_reg[3][4] = {
  26. { DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
  27. { DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
  28. { DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
  29. };
  30. #if defined(PLAT_imx8mq)
  31. static inline struct dram_cfg_param *get_cfg_ptr(void *ptr,
  32. void *old_base, void *new_base)
  33. {
  34. uintptr_t offset = (uintptr_t)ptr & ~((uintptr_t)old_base);
  35. return (struct dram_cfg_param *)(offset + new_base);
  36. }
  37. /* copy the dram timing info from DRAM to OCRAM */
  38. void imx8mq_dram_timing_copy(struct dram_timing_info *from)
  39. {
  40. struct dram_timing_info *info = (struct dram_timing_info *)dram_timing_saved;
  41. /* copy the whole 13KB content used for dram timing info */
  42. memcpy(dram_timing_saved, from, sizeof(dram_timing_saved));
  43. /* correct the header after copied into ocram */
  44. info->ddrc_cfg = get_cfg_ptr(info->ddrc_cfg, from, dram_timing_saved);
  45. info->ddrphy_cfg = get_cfg_ptr(info->ddrphy_cfg, from, dram_timing_saved);
  46. info->ddrphy_trained_csr = get_cfg_ptr(info->ddrphy_trained_csr, from, dram_timing_saved);
  47. info->ddrphy_pie = get_cfg_ptr(info->ddrphy_pie, from, dram_timing_saved);
  48. }
  49. #endif
  50. #if defined(PLAT_imx8mp)
  51. static uint32_t lpddr4_mr_read(unsigned int mr_rank, unsigned int mr_addr)
  52. {
  53. unsigned int tmp, drate_byte;
  54. tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
  55. mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), tmp | 0x1);
  56. do {
  57. tmp = mmio_read_32(DDRC_MRSTAT(0));
  58. } while (tmp & 0x1);
  59. mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | 0x1);
  60. mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8));
  61. mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | BIT(31) | 0x1);
  62. /* Workaround for SNPS STAR 9001549457 */
  63. do {
  64. tmp = mmio_read_32(DDRC_MRSTAT(0));
  65. } while (tmp & 0x1);
  66. do {
  67. tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
  68. } while (!(tmp & 0x8));
  69. tmp = mmio_read_32(DRC_PERF_MON_MRR1_DAT(0));
  70. drate_byte = (mmio_read_32(DDRC_DERATEEN(0)) >> 4) & 0xff;
  71. tmp = (tmp >> (drate_byte * 8)) & 0xff;
  72. mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), 0x4);
  73. return tmp;
  74. }
  75. #endif
  76. static void get_mr_values(uint32_t (*mr_value)[8])
  77. {
  78. uint32_t init_val;
  79. unsigned int i, fsp_index;
  80. for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
  81. for (i = 0U; i < 4U; i++) {
  82. init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
  83. mr_value[fsp_index][2*i] = init_val >> 16;
  84. mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
  85. }
  86. #if defined(PLAT_imx8mp)
  87. if (dram_info.dram_type == DDRC_LPDDR4) {
  88. mr_value[fsp_index][5] = lpddr4_mr_read(1, MR12); /* read MR12 from DRAM */
  89. mr_value[fsp_index][7] = lpddr4_mr_read(1, MR14); /* read MR14 from DRAM */
  90. }
  91. #endif
  92. }
  93. }
  94. static void save_rank_setting(void)
  95. {
  96. uint32_t i, offset;
  97. uint32_t pstate_num = dram_info.num_fsp;
  98. /* only support maximum 3 setpoints */
  99. pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num;
  100. for (i = 0U; i < pstate_num; i++) {
  101. offset = i ? (i + 1) * 0x1000 : 0U;
  102. dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset);
  103. if (dram_info.dram_type != DDRC_LPDDR4) {
  104. dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset);
  105. }
  106. #if !defined(PLAT_imx8mq)
  107. dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset);
  108. #endif
  109. }
  110. #if defined(PLAT_imx8mq)
  111. dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0));
  112. #endif
  113. }
  114. /* Restore the ddrc configs */
  115. void dram_umctl2_init(struct dram_timing_info *timing)
  116. {
  117. struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
  118. unsigned int i;
  119. for (i = 0U; i < timing->ddrc_cfg_num; i++) {
  120. mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
  121. ddrc_cfg++;
  122. }
  123. /* set the default fsp to P0 */
  124. mmio_write_32(DDRC_MSTR2(0), 0x0);
  125. }
  126. /* Restore the dram PHY config */
  127. void dram_phy_init(struct dram_timing_info *timing)
  128. {
  129. struct dram_cfg_param *cfg = timing->ddrphy_cfg;
  130. unsigned int i;
  131. /* Restore the PHY init config */
  132. cfg = timing->ddrphy_cfg;
  133. for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
  134. dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
  135. cfg++;
  136. }
  137. /* Restore the DDR PHY CSRs */
  138. cfg = timing->ddrphy_trained_csr;
  139. for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
  140. dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
  141. cfg++;
  142. }
  143. /* Load the PIE image */
  144. cfg = timing->ddrphy_pie;
  145. for (i = 0U; i < timing->ddrphy_pie_num; i++) {
  146. dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
  147. cfg++;
  148. }
  149. }
  150. /* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
  151. static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
  152. void *handle, void *cookie)
  153. {
  154. uint64_t mpidr = read_mpidr_el1();
  155. unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
  156. uint32_t irq;
  157. irq = plat_ic_acknowledge_interrupt();
  158. if (irq < 1022U) {
  159. plat_ic_end_of_interrupt(irq);
  160. }
  161. /* set the WFE done status */
  162. spin_lock(&dfs_lock);
  163. wfe_done |= (1 << cpu_id * 8);
  164. dsb();
  165. spin_unlock(&dfs_lock);
  166. while (1) {
  167. /* ddr frequency change done */
  168. if (!wait_ddrc_hwffc_done)
  169. break;
  170. wfe();
  171. }
  172. return 0;
  173. }
  174. void dram_info_init(unsigned long dram_timing_base)
  175. {
  176. uint32_t ddrc_mstr, current_fsp;
  177. unsigned int idx = 0;
  178. uint32_t flags = 0;
  179. uint32_t rc;
  180. unsigned int i;
  181. /* Get the dram type & rank */
  182. ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
  183. dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
  184. dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ?
  185. DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK;
  186. /* Get current fsp info */
  187. current_fsp = mmio_read_32(DDRC_DFIMISC(0));
  188. current_fsp = (current_fsp >> 8) & 0xf;
  189. dram_info.boot_fsp = current_fsp;
  190. dram_info.current_fsp = current_fsp;
  191. #if defined(PLAT_imx8mq)
  192. imx8mq_dram_timing_copy((struct dram_timing_info *)dram_timing_base);
  193. dram_timing_base = (unsigned long) dram_timing_saved;
  194. #endif
  195. get_mr_values(dram_info.mr_table);
  196. dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
  197. /* get the num of supported fsp */
  198. for (i = 0U; i < 4U; ++i) {
  199. if (!dram_info.timing_info->fsp_table[i]) {
  200. break;
  201. }
  202. idx = i;
  203. }
  204. /* only support maximum 3 setpoints */
  205. dram_info.num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i;
  206. /* no valid fsp table, return directly */
  207. if (i == 0U) {
  208. return;
  209. }
  210. /* save the DRAMTMG2/9 for rank to rank workaround */
  211. save_rank_setting();
  212. /* check if has bypass mode support */
  213. if (dram_info.timing_info->fsp_table[idx] < 666) {
  214. dram_info.bypass_mode = true;
  215. } else {
  216. dram_info.bypass_mode = false;
  217. }
  218. /* Register the EL3 handler for DDR DVFS */
  219. set_interrupt_rm_flag(flags, NON_SECURE);
  220. rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
  221. if (rc != 0) {
  222. panic();
  223. }
  224. if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) {
  225. /* flush the L1/L2 cache */
  226. dcsw_op_all(DCCSW);
  227. lpddr4_swffc(&dram_info, dev_fsp, 0x0);
  228. dev_fsp = (~dev_fsp) & 0x1;
  229. } else if (current_fsp != 0x0) {
  230. /* flush the L1/L2 cache */
  231. dcsw_op_all(DCCSW);
  232. ddr4_swffc(&dram_info, 0x0);
  233. }
  234. }
  235. /*
  236. * For each freq return the following info:
  237. *
  238. * r1: data rate
  239. * r2: 1 + dram_core parent
  240. * r3: 1 + dram_alt parent index
  241. * r4: 1 + dram_apb parent index
  242. *
  243. * The parent indices can be used by an OS who manages source clocks to enabled
  244. * them ahead of the switch.
  245. *
  246. * A parent value of "0" means "don't care".
  247. *
  248. * Current implementation of freq switch is hardcoded in
  249. * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
  250. * a wide variety of rates.
  251. */
  252. int dram_dvfs_get_freq_info(void *handle, u_register_t index)
  253. {
  254. switch (index) {
  255. case 0:
  256. SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
  257. 1, 0, 5);
  258. case 1:
  259. if (!dram_info.bypass_mode) {
  260. SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
  261. 1, 0, 0);
  262. }
  263. SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
  264. 2, 2, 4);
  265. case 2:
  266. if (!dram_info.bypass_mode) {
  267. SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
  268. 1, 0, 0);
  269. }
  270. SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
  271. 2, 3, 3);
  272. case 3:
  273. SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
  274. 1, 0, 0);
  275. default:
  276. SMC_RET1(handle, -3);
  277. }
  278. }
  279. int dram_dvfs_handler(uint32_t smc_fid, void *handle,
  280. u_register_t x1, u_register_t x2, u_register_t x3)
  281. {
  282. uint64_t mpidr = read_mpidr_el1();
  283. unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
  284. unsigned int fsp_index = x1;
  285. uint32_t online_cores = x2;
  286. if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
  287. SMC_RET1(handle, dram_info.num_fsp);
  288. } else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
  289. return dram_dvfs_get_freq_info(handle, x2);
  290. } else if (x1 < 3U) {
  291. wait_ddrc_hwffc_done = true;
  292. dsb();
  293. /* trigger the SGI IPI to info other cores */
  294. for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
  295. if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
  296. plat_ic_raise_el3_sgi(0x8, i);
  297. }
  298. }
  299. #if defined(PLAT_imx8mq)
  300. for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
  301. if (i != cpu_id && online_cores & (1 << (i * 8))) {
  302. imx_gpc_core_wake(1 << i);
  303. }
  304. }
  305. #endif
  306. /* make sure all the core in WFE */
  307. online_cores &= ~(0x1 << (cpu_id * 8));
  308. while (1) {
  309. if (online_cores == wfe_done) {
  310. break;
  311. }
  312. }
  313. /* flush the L1/L2 cache */
  314. dcsw_op_all(DCCSW);
  315. if (dram_info.dram_type == DDRC_LPDDR4) {
  316. lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
  317. dev_fsp = (~dev_fsp) & 0x1;
  318. } else {
  319. ddr4_swffc(&dram_info, fsp_index);
  320. }
  321. dram_info.current_fsp = fsp_index;
  322. wait_ddrc_hwffc_done = false;
  323. wfe_done = 0;
  324. dsb();
  325. sev();
  326. isb();
  327. }
  328. SMC_RET1(handle, 0);
  329. }