cdns_nand.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <stdbool.h>
  9. #include <string.h>
  10. #include <arch_helpers.h>
  11. #include <common/debug.h>
  12. #include <drivers/cadence/cdns_nand.h>
  13. #include <drivers/delay_timer.h>
  14. #include <lib/mmio.h>
  15. #include <lib/utils.h>
  16. #include <platform_def.h>
  17. /* NAND flash device information struct */
  18. static cnf_dev_info_t dev_info;
  19. /*
  20. * Scratch buffers for read and write operations
  21. * DMA transfer of Cadence NAND expects data 8 bytes aligned
  22. * to be written to register
  23. */
  24. static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE] __aligned(8);
  25. /* Wait for controller to be in idle state */
  26. static inline void cdns_nand_wait_idle(void)
  27. {
  28. uint32_t reg = 0U;
  29. do {
  30. udelay(CNF_DEF_DELAY_US);
  31. reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
  32. } while (CNF_GET_CTRL_BUSY(reg) != 0U);
  33. }
  34. /* Wait for given thread to be in ready state */
  35. static inline void cdns_nand_wait_thread_ready(uint8_t thread_id)
  36. {
  37. uint32_t reg = 0U;
  38. do {
  39. udelay(CNF_DEF_DELAY_US);
  40. reg = mmio_read_32(CNF_CMDREG(TRD_STATUS));
  41. reg &= (1U << (uint32_t)thread_id);
  42. } while (reg != 0U);
  43. }
  44. /* Check if the last operation/command in selected thread is completed */
  45. static int cdns_nand_last_opr_status(uint8_t thread_id)
  46. {
  47. uint8_t nthreads = 0U;
  48. uint32_t reg = 0U;
  49. /* Get number of threads */
  50. reg = mmio_read_32(CNF_CTRLPARAM(FEATURE));
  51. nthreads = CNF_GET_NTHREADS(reg);
  52. if (thread_id > nthreads) {
  53. ERROR("%s: Invalid thread ID\n", __func__);
  54. return -EINVAL;
  55. }
  56. /* Select thread */
  57. mmio_write_32(CNF_CMDREG(CMD_STAT_PTR), (uint32_t)thread_id);
  58. uint32_t err_mask = CNF_ECMD | CNF_EECC | CNF_EDEV | CNF_EDQS | CNF_EFAIL |
  59. CNF_EBUS | CNF_EDI | CNF_EPAR | CNF_ECTX | CNF_EPRO;
  60. do {
  61. udelay(CNF_DEF_DELAY_US * 2);
  62. reg = mmio_read_32(CNF_CMDREG(CMD_STAT));
  63. } while ((reg & CNF_CMPLT) == 0U);
  64. /* last operation is completed, make sure no other error bits are set */
  65. if ((reg & err_mask) == 1U) {
  66. ERROR("%s, CMD_STATUS:0x%x\n", __func__, reg);
  67. return -EIO;
  68. }
  69. return 0;
  70. }
  71. /* Set feature command */
  72. int cdns_nand_set_feature(uint8_t feat_addr, uint8_t feat_val, uint8_t thread_id)
  73. {
  74. /* Wait for thread to be ready */
  75. cdns_nand_wait_thread_ready(thread_id);
  76. /* Set feature address */
  77. mmio_write_32(CNF_CMDREG(CMD_REG1), (uint32_t)feat_addr);
  78. /* Set feature volume */
  79. mmio_write_32(CNF_CMDREG(CMD_REG2), (uint32_t)feat_val);
  80. /* Set feature command */
  81. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  82. reg |= (thread_id << CNF_CMDREG0_TRD);
  83. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  84. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  85. reg |= (CNF_CT_SET_FEATURE << CNF_CMDREG0_CMD);
  86. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  87. return cdns_nand_last_opr_status(thread_id);
  88. }
  89. /* Reset command to the selected device */
  90. int cdns_nand_reset(uint8_t thread_id)
  91. {
  92. /* Operation is executed in selected thread */
  93. cdns_nand_wait_thread_ready(thread_id);
  94. /* Select memory */
  95. mmio_write_32(CNF_CMDREG(CMD_REG4),
  96. (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
  97. /* Issue reset command */
  98. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  99. reg |= (thread_id << CNF_CMDREG0_TRD);
  100. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  101. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  102. reg |= (CNF_CT_RESET_ASYNC << CNF_CMDREG0_CMD);
  103. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  104. return cdns_nand_last_opr_status(thread_id);
  105. }
  106. /* Set operation work mode */
  107. static void cdns_nand_set_opr_mode(uint8_t opr_mode)
  108. {
  109. /* Wait for controller to be in idle state */
  110. cdns_nand_wait_idle();
  111. /* Reset DLL PHY */
  112. uint32_t reg = mmio_read_32(CNF_MINICTRL(DLL_PHY_CTRL));
  113. reg &= ~(1 << CNF_DLL_PHY_RST_N);
  114. mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
  115. if (opr_mode == CNF_OPR_WORK_MODE_SDR) {
  116. /* Combo PHY Control Timing Block register settings */
  117. mmio_write_32(CP_CTB(CTRL_REG), CP_CTRL_REG_SDR);
  118. mmio_write_32(CP_CTB(TSEL_REG), CP_TSEL_REG_SDR);
  119. /* Combo PHY DLL register settings */
  120. mmio_write_32(CP_DLL(DQ_TIMING_REG), CP_DQ_TIMING_REG_SDR);
  121. mmio_write_32(CP_DLL(DQS_TIMING_REG), CP_DQS_TIMING_REG_SDR);
  122. mmio_write_32(CP_DLL(GATE_LPBK_CTRL_REG), CP_GATE_LPBK_CTRL_REG_SDR);
  123. mmio_write_32(CP_DLL(MASTER_CTRL_REG), CP_DLL_MASTER_CTRL_REG_SDR);
  124. /* Async mode timing settings */
  125. mmio_write_32(CNF_MINICTRL(ASYNC_TOGGLE_TIMINGS),
  126. (2 << CNF_ASYNC_TIMINGS_TRH) |
  127. (4 << CNF_ASYNC_TIMINGS_TRP) |
  128. (2 << CNF_ASYNC_TIMINGS_TWH) |
  129. (4 << CNF_ASYNC_TIMINGS_TWP));
  130. /* Set extended read and write mode */
  131. reg |= (1 << CNF_DLL_PHY_EXT_RD_MODE);
  132. reg |= (1 << CNF_DLL_PHY_EXT_WR_MODE);
  133. /* Set operation work mode in common settings */
  134. mmio_clrsetbits_32(CNF_MINICTRL(CMN_SETTINGS),
  135. CNF_CMN_SETTINGS_OPR_MASK,
  136. CNF_OPR_WORK_MODE_SDR);
  137. } else if (opr_mode == CNF_OPR_WORK_MODE_NVDDR) {
  138. ; /* ToDo: add DDR mode settings also once available on SIMICS */
  139. } else {
  140. ;
  141. }
  142. reg |= (1 << CNF_DLL_PHY_RST_N);
  143. mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
  144. }
  145. /* Data transfer configuration */
  146. static void cdns_nand_transfer_config(void)
  147. {
  148. /* Wait for controller to be in idle state */
  149. cdns_nand_wait_idle();
  150. /* Configure data transfer parameters */
  151. mmio_write_32(CNF_CTRLCFG(TRANS_CFG0), 1);
  152. /* ECC is disabled */
  153. mmio_write_32(CNF_CTRLCFG(ECC_CFG0), 0);
  154. /* DMA burst select */
  155. mmio_write_32(CNF_CTRLCFG(DMA_SETTINGS),
  156. (CNF_DMA_BURST_SIZE_MAX << CNF_DMA_SETTINGS_BURST) |
  157. (1 << CNF_DMA_SETTINGS_OTE));
  158. /* Enable pre-fetching for 1K */
  159. mmio_write_32(CNF_CTRLCFG(FIFO_TLEVEL),
  160. (CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_POS) |
  161. (CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_DMA_SIZE));
  162. /* Select access type */
  163. mmio_write_32(CNF_CTRLCFG(MULTIPLANE_CFG), 0);
  164. mmio_write_32(CNF_CTRLCFG(CACHE_CFG), 0);
  165. }
  166. /* Update the nand flash device info */
  167. static int cdns_nand_update_dev_info(void)
  168. {
  169. uint32_t reg = 0U;
  170. /* Read the device type and number of LUNs */
  171. reg = mmio_read_32(CNF_CTRLPARAM(DEV_PARAMS0));
  172. dev_info.type = CNF_GET_DEV_TYPE(reg);
  173. if (dev_info.type == CNF_DT_UNKNOWN) {
  174. ERROR("%s: device type unknown\n", __func__);
  175. return -ENXIO;
  176. }
  177. dev_info.nluns = CNF_GET_NLUNS(reg);
  178. /* Pages per block */
  179. reg = mmio_read_32(CNF_CTRLCFG(DEV_LAYOUT));
  180. dev_info.npages_per_block = CNF_GET_NPAGES_PER_BLOCK(reg);
  181. /* Sector size and last sector size */
  182. reg = mmio_read_32(CNF_CTRLCFG(TRANS_CFG1));
  183. dev_info.sector_size = CNF_GET_SCTR_SIZE(reg);
  184. dev_info.last_sector_size = CNF_GET_LAST_SCTR_SIZE(reg);
  185. /* Page size and spare size */
  186. reg = mmio_read_32(CNF_CTRLPARAM(DEV_AREA));
  187. dev_info.page_size = CNF_GET_PAGE_SIZE(reg);
  188. dev_info.spare_size = CNF_GET_SPARE_SIZE(reg);
  189. /* Device blocks per LUN */
  190. dev_info.nblocks_per_lun = mmio_read_32(CNF_CTRLPARAM(DEV_BLOCKS_PLUN));
  191. /* Calculate block size and total device size */
  192. dev_info.block_size = (dev_info.npages_per_block * dev_info.page_size);
  193. dev_info.total_size = ((unsigned long long)dev_info.block_size *
  194. (unsigned long long)dev_info.nblocks_per_lun *
  195. dev_info.nluns);
  196. VERBOSE("CNF params: page_size %d, spare_size %d, block_size %u, total_size %llu\n",
  197. dev_info.page_size, dev_info.spare_size,
  198. dev_info.block_size, dev_info.total_size);
  199. return 0;
  200. }
  201. /* NAND Flash Controller/Host initialization */
  202. int cdns_nand_host_init(void)
  203. {
  204. uint32_t reg = 0U;
  205. int ret = 0;
  206. do {
  207. /* Read controller status register for init complete */
  208. reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
  209. } while (CNF_GET_INIT_COMP(reg) == 0);
  210. ret = cdns_nand_update_dev_info();
  211. if (ret != 0) {
  212. return ret;
  213. }
  214. INFO("CNF: device discovery process completed and device type %d\n",
  215. dev_info.type);
  216. /* Enable data integrity, enable CRC and parity */
  217. reg = mmio_read_32(CNF_DI(CONTROL));
  218. reg |= (1 << CNF_DI_PAR_EN);
  219. reg |= (1 << CNF_DI_CRC_EN);
  220. mmio_write_32(CNF_DI(CONTROL), reg);
  221. /* Status polling mode, device control and status register */
  222. cdns_nand_wait_idle();
  223. reg = mmio_read_32(CNF_CTRLCFG(DEV_STAT));
  224. reg = reg & ~1;
  225. mmio_write_32(CNF_CTRLCFG(DEV_STAT), reg);
  226. /* Set operation work mode */
  227. cdns_nand_set_opr_mode(CNF_OPR_WORK_MODE_SDR);
  228. /* Set data transfer configuration parameters */
  229. cdns_nand_transfer_config();
  230. return 0;
  231. }
  232. /* erase: Block erase command */
  233. int cdns_nand_erase(uint32_t offset, uint32_t size)
  234. {
  235. /* Determine the starting block offset i.e row address */
  236. uint32_t row_address = dev_info.npages_per_block * offset;
  237. /* Wait for thread to be in ready state */
  238. cdns_nand_wait_thread_ready(CNF_DEF_TRD);
  239. /*Set row address */
  240. mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
  241. /* Operation bank number */
  242. mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
  243. /* Block erase command */
  244. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  245. reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
  246. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  247. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  248. reg |= (CNF_CT_ERASE << CNF_CMDREG0_CMD);
  249. reg |= (((size-1) & 0xFF) << CNF_CMDREG0_CMD);
  250. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  251. /* Wait for erase operation to complete */
  252. return cdns_nand_last_opr_status(CNF_DEF_TRD);
  253. }
  254. /* io mtd functions */
  255. int cdns_nand_init_mtd(unsigned long long *size, unsigned int *erase_size)
  256. {
  257. *size = dev_info.total_size;
  258. *erase_size = dev_info.block_size;
  259. return 0;
  260. }
  261. static uint32_t cdns_nand_get_row_address(uint32_t page, uint32_t block)
  262. {
  263. uint32_t row_address = 0U;
  264. uint32_t req_bits = 0U;
  265. /* The device info is not populated yet. */
  266. if (dev_info.npages_per_block == 0U)
  267. return 0;
  268. for (uint32_t i = 0U; i < sizeof(uint32_t) * 8; i++) {
  269. if ((1U << i) & dev_info.npages_per_block)
  270. req_bits = i;
  271. }
  272. row_address = ((page & GENMASK_32((req_bits - 1), 0)) |
  273. (block << req_bits));
  274. return row_address;
  275. }
  276. /* NAND Flash page read */
  277. static int cdns_nand_read_page(uint32_t block, uint32_t page, uintptr_t buffer)
  278. {
  279. /* Wait for thread to be ready */
  280. cdns_nand_wait_thread_ready(CNF_DEF_TRD);
  281. /* Select device */
  282. mmio_write_32(CNF_CMDREG(CMD_REG4),
  283. (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
  284. /* Set host memory address for DMA transfers */
  285. mmio_write_32(CNF_CMDREG(CMD_REG2), (buffer & UINT32_MAX));
  286. mmio_write_32(CNF_CMDREG(CMD_REG3), ((buffer >> 32) & UINT32_MAX));
  287. /* Set row address */
  288. mmio_write_32(CNF_CMDREG(CMD_REG1),
  289. cdns_nand_get_row_address(page, block));
  290. /* Page read command */
  291. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  292. reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
  293. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  294. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  295. reg |= (CNF_DMA_MASTER_SEL << CNF_CMDREG0_DMA);
  296. reg |= (CNF_CT_PAGE_READ << CNF_CMDREG0_CMD);
  297. reg |= (((CNF_READ_SINGLE_PAGE-1) & 0xFF) << CNF_CMDREG0_CMD);
  298. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  299. /* Wait for read operation to complete */
  300. if (cdns_nand_last_opr_status(CNF_DEF_TRD)) {
  301. ERROR("%s: Page read failed\n", __func__);
  302. return -EIO;
  303. }
  304. return 0;
  305. }
  306. int cdns_nand_read(unsigned int offset, uintptr_t buffer, size_t length,
  307. size_t *out_length)
  308. {
  309. uint32_t block = offset / dev_info.block_size;
  310. uint32_t end_block = (offset + length - 1U) / dev_info.block_size;
  311. uint32_t page_start = (offset % dev_info.block_size) / dev_info.page_size;
  312. uint32_t start_offset = offset % dev_info.page_size;
  313. uint32_t nb_pages = dev_info.block_size / dev_info.page_size;
  314. uint32_t bytes_read = 0U;
  315. uint32_t page = 0U;
  316. int result = 0;
  317. INFO("CNF: %s: block %u-%u, page_start %u, len %zu, offset %u\n",
  318. __func__, block, end_block, page_start, length, offset);
  319. if ((offset >= dev_info.total_size) ||
  320. (offset + length-1 >= dev_info.total_size) ||
  321. (length == 0U)) {
  322. ERROR("CNF: Invalid read parameters\n");
  323. return -EINVAL;
  324. }
  325. *out_length = 0UL;
  326. while (block <= end_block) {
  327. for (page = page_start; page < nb_pages; page++) {
  328. if ((start_offset != 0U) || (length < dev_info.page_size)) {
  329. /* Partial page read */
  330. result = cdns_nand_read_page(block, page,
  331. (uintptr_t)scratch_buff);
  332. if (result != 0) {
  333. return result;
  334. }
  335. bytes_read = MIN((size_t)(dev_info.page_size - start_offset),
  336. length);
  337. memcpy((uint8_t *)buffer, scratch_buff + start_offset,
  338. bytes_read);
  339. start_offset = 0U;
  340. } else {
  341. /* Full page read */
  342. result = cdns_nand_read_page(block, page,
  343. (uintptr_t)scratch_buff);
  344. if (result != 0) {
  345. return result;
  346. }
  347. bytes_read = dev_info.page_size;
  348. memcpy((uint8_t *)buffer, scratch_buff, bytes_read);
  349. }
  350. length -= bytes_read;
  351. buffer += bytes_read;
  352. *out_length += bytes_read;
  353. /* All the bytes have read */
  354. if (length == 0U) {
  355. break;
  356. }
  357. udelay(CNF_READ_INT_DELAY_US);
  358. } /* for */
  359. page_start = 0U;
  360. block++;
  361. } /* while */
  362. return 0;
  363. }