cdns_nand.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. /*
  2. * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <stdbool.h>
  9. #include <string.h>
  10. #include <arch_helpers.h>
  11. #include <common/debug.h>
  12. #include <drivers/cadence/cdns_nand.h>
  13. #include <drivers/delay_timer.h>
  14. #include <lib/mmio.h>
  15. #include <lib/utils.h>
  16. #include <platform_def.h>
  17. /* NAND flash device information struct */
  18. static cnf_dev_info_t dev_info;
  19. /* Scratch buffers for read and write operations */
  20. static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
  21. /* Wait for controller to be in idle state */
  22. static inline void cdns_nand_wait_idle(void)
  23. {
  24. uint32_t reg = 0U;
  25. do {
  26. udelay(CNF_DEF_DELAY_US);
  27. reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
  28. } while (CNF_GET_CTRL_BUSY(reg) != 0U);
  29. }
  30. /* Wait for given thread to be in ready state */
  31. static inline void cdns_nand_wait_thread_ready(uint8_t thread_id)
  32. {
  33. uint32_t reg = 0U;
  34. do {
  35. udelay(CNF_DEF_DELAY_US);
  36. reg = mmio_read_32(CNF_CMDREG(TRD_STATUS));
  37. reg &= (1U << (uint32_t)thread_id);
  38. } while (reg != 0U);
  39. }
  40. /* Check if the last operation/command in selected thread is completed */
  41. static int cdns_nand_last_opr_status(uint8_t thread_id)
  42. {
  43. uint8_t nthreads = 0U;
  44. uint32_t reg = 0U;
  45. /* Get number of threads */
  46. reg = mmio_read_32(CNF_CTRLPARAM(FEATURE));
  47. nthreads = CNF_GET_NTHREADS(reg);
  48. if (thread_id > nthreads) {
  49. ERROR("%s: Invalid thread ID\n", __func__);
  50. return -EINVAL;
  51. }
  52. /* Select thread */
  53. mmio_write_32(CNF_CMDREG(CMD_STAT_PTR), (uint32_t)thread_id);
  54. uint32_t err_mask = CNF_ECMD | CNF_EECC | CNF_EDEV | CNF_EDQS | CNF_EFAIL |
  55. CNF_EBUS | CNF_EDI | CNF_EPAR | CNF_ECTX | CNF_EPRO;
  56. do {
  57. udelay(CNF_DEF_DELAY_US * 2);
  58. reg = mmio_read_32(CNF_CMDREG(CMD_STAT));
  59. } while ((reg & CNF_CMPLT) == 0U);
  60. /* last operation is completed, make sure no other error bits are set */
  61. if ((reg & err_mask) == 1U) {
  62. ERROR("%s, CMD_STATUS:0x%x\n", __func__, reg);
  63. return -EIO;
  64. }
  65. return 0;
  66. }
  67. /* Set feature command */
  68. int cdns_nand_set_feature(uint8_t feat_addr, uint8_t feat_val, uint8_t thread_id)
  69. {
  70. /* Wait for thread to be ready */
  71. cdns_nand_wait_thread_ready(thread_id);
  72. /* Set feature address */
  73. mmio_write_32(CNF_CMDREG(CMD_REG1), (uint32_t)feat_addr);
  74. /* Set feature volume */
  75. mmio_write_32(CNF_CMDREG(CMD_REG2), (uint32_t)feat_val);
  76. /* Set feature command */
  77. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  78. reg |= (thread_id << CNF_CMDREG0_TRD);
  79. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  80. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  81. reg |= (CNF_CT_SET_FEATURE << CNF_CMDREG0_CMD);
  82. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  83. return cdns_nand_last_opr_status(thread_id);
  84. }
  85. /* Reset command to the selected device */
  86. int cdns_nand_reset(uint8_t thread_id)
  87. {
  88. /* Operation is executed in selected thread */
  89. cdns_nand_wait_thread_ready(thread_id);
  90. /* Select memory */
  91. mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
  92. /* Issue reset command */
  93. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  94. reg |= (thread_id << CNF_CMDREG0_TRD);
  95. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  96. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  97. reg |= (CNF_CT_RESET_ASYNC << CNF_CMDREG0_CMD);
  98. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  99. return cdns_nand_last_opr_status(thread_id);
  100. }
  101. /* Set operation work mode */
  102. static void cdns_nand_set_opr_mode(uint8_t opr_mode)
  103. {
  104. /* Wait for controller to be in idle state */
  105. cdns_nand_wait_idle();
  106. /* Reset DLL PHY */
  107. uint32_t reg = mmio_read_32(CNF_MINICTRL(DLL_PHY_CTRL));
  108. reg &= ~(1 << CNF_DLL_PHY_RST_N);
  109. mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
  110. if (opr_mode == CNF_OPR_WORK_MODE_SDR) {
  111. /* Combo PHY Control Timing Block register settings */
  112. mmio_write_32(CP_CTB(CTRL_REG), CP_CTRL_REG_SDR);
  113. mmio_write_32(CP_CTB(TSEL_REG), CP_TSEL_REG_SDR);
  114. /* Combo PHY DLL register settings */
  115. mmio_write_32(CP_DLL(DQ_TIMING_REG), CP_DQ_TIMING_REG_SDR);
  116. mmio_write_32(CP_DLL(DQS_TIMING_REG), CP_DQS_TIMING_REG_SDR);
  117. mmio_write_32(CP_DLL(GATE_LPBK_CTRL_REG), CP_GATE_LPBK_CTRL_REG_SDR);
  118. mmio_write_32(CP_DLL(MASTER_CTRL_REG), CP_DLL_MASTER_CTRL_REG_SDR);
  119. /* Async mode timing settings */
  120. mmio_write_32(CNF_MINICTRL(ASYNC_TOGGLE_TIMINGS),
  121. (2 << CNF_ASYNC_TIMINGS_TRH) |
  122. (4 << CNF_ASYNC_TIMINGS_TRP) |
  123. (2 << CNF_ASYNC_TIMINGS_TWH) |
  124. (4 << CNF_ASYNC_TIMINGS_TWP));
  125. /* Set extended read and write mode */
  126. reg |= (1 << CNF_DLL_PHY_EXT_RD_MODE);
  127. reg |= (1 << CNF_DLL_PHY_EXT_WR_MODE);
  128. /* Set operation work mode in common settings */
  129. uint32_t data = mmio_read_32(CNF_MINICTRL(CMN_SETTINGS));
  130. data |= (CNF_OPR_WORK_MODE_SDR << CNF_CMN_SETTINGS_OPR);
  131. mmio_write_32(CNF_MINICTRL(CMN_SETTINGS), data);
  132. } else if (opr_mode == CNF_OPR_WORK_MODE_NVDDR) {
  133. ; /* ToDo: add DDR mode settings also once available on SIMICS */
  134. } else {
  135. ;
  136. }
  137. reg |= (1 << CNF_DLL_PHY_RST_N);
  138. mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
  139. }
  140. /* Data transfer configuration */
  141. static void cdns_nand_transfer_config(void)
  142. {
  143. /* Wait for controller to be in idle state */
  144. cdns_nand_wait_idle();
  145. /* Configure data transfer parameters */
  146. mmio_write_32(CNF_CTRLCFG(TRANS_CFG0), 1);
  147. /* ECC is disabled */
  148. mmio_write_32(CNF_CTRLCFG(ECC_CFG0), 0);
  149. /* DMA burst select */
  150. mmio_write_32(CNF_CTRLCFG(DMA_SETTINGS),
  151. (CNF_DMA_BURST_SIZE_MAX << CNF_DMA_SETTINGS_BURST) |
  152. (1 << CNF_DMA_SETTINGS_OTE));
  153. /* Enable pre-fetching for 1K */
  154. mmio_write_32(CNF_CTRLCFG(FIFO_TLEVEL),
  155. (CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_POS) |
  156. (CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_DMA_SIZE));
  157. /* Select access type */
  158. mmio_write_32(CNF_CTRLCFG(MULTIPLANE_CFG), 0);
  159. mmio_write_32(CNF_CTRLCFG(CACHE_CFG), 0);
  160. }
  161. /* Update the nand flash device info */
  162. static int cdns_nand_update_dev_info(void)
  163. {
  164. uint32_t reg = 0U;
  165. /* Read the device type and number of LUNs */
  166. reg = mmio_read_32(CNF_CTRLPARAM(DEV_PARAMS0));
  167. dev_info.type = CNF_GET_DEV_TYPE(reg);
  168. if (dev_info.type == CNF_DT_UNKNOWN) {
  169. ERROR("%s: device type unknown\n", __func__);
  170. return -ENXIO;
  171. }
  172. dev_info.nluns = CNF_GET_NLUNS(reg);
  173. /* Pages per block */
  174. reg = mmio_read_32(CNF_CTRLCFG(DEV_LAYOUT));
  175. dev_info.npages_per_block = CNF_GET_NPAGES_PER_BLOCK(reg);
  176. /* Sector size and last sector size */
  177. reg = mmio_read_32(CNF_CTRLCFG(TRANS_CFG1));
  178. dev_info.sector_size = CNF_GET_SCTR_SIZE(reg);
  179. dev_info.last_sector_size = CNF_GET_LAST_SCTR_SIZE(reg);
  180. /* Page size and spare size */
  181. reg = mmio_read_32(CNF_CTRLPARAM(DEV_AREA));
  182. dev_info.page_size = CNF_GET_PAGE_SIZE(reg);
  183. dev_info.spare_size = CNF_GET_SPARE_SIZE(reg);
  184. /* Device blocks per LUN */
  185. dev_info.nblocks_per_lun = mmio_read_32(CNF_CTRLPARAM(DEV_BLOCKS_PLUN));
  186. /* Calculate block size and total device size */
  187. dev_info.block_size = (dev_info.npages_per_block * dev_info.page_size);
  188. dev_info.total_size = (dev_info.block_size * dev_info.nblocks_per_lun *
  189. dev_info.nluns);
  190. VERBOSE("CNF params: page %d, spare %d, block %d, total %lld\n",
  191. dev_info.page_size, dev_info.spare_size,
  192. dev_info.block_size, dev_info.total_size);
  193. return 0;
  194. }
  195. /* NAND Flash Controller/Host initialization */
  196. int cdns_nand_host_init(void)
  197. {
  198. uint32_t reg = 0U;
  199. int ret = 0;
  200. do {
  201. /* Read controller status register for init complete */
  202. reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
  203. } while (CNF_GET_INIT_COMP(reg) == 0);
  204. ret = cdns_nand_update_dev_info();
  205. if (ret != 0) {
  206. return ret;
  207. }
  208. INFO("CNF: device discovery process completed and device type %d\n",
  209. dev_info.type);
  210. /* Enable data integrity, enable CRC and parity */
  211. reg = mmio_read_32(CNF_DI(CONTROL));
  212. reg |= (1 << CNF_DI_PAR_EN);
  213. reg |= (1 << CNF_DI_CRC_EN);
  214. mmio_write_32(CNF_DI(CONTROL), reg);
  215. /* Status polling mode, device control and status register */
  216. cdns_nand_wait_idle();
  217. reg = mmio_read_32(CNF_CTRLCFG(DEV_STAT));
  218. reg = reg & ~1;
  219. mmio_write_32(CNF_CTRLCFG(DEV_STAT), reg);
  220. /* Set operation work mode */
  221. cdns_nand_set_opr_mode(CNF_OPR_WORK_MODE_SDR);
  222. /* Set data transfer configuration parameters */
  223. cdns_nand_transfer_config();
  224. return 0;
  225. }
  226. /* erase: Block erase command */
  227. int cdns_nand_erase(uint32_t offset, uint32_t size)
  228. {
  229. /* Determine the starting block offset i.e row address */
  230. uint32_t row_address = dev_info.npages_per_block * offset;
  231. /* Wait for thread to be in ready state */
  232. cdns_nand_wait_thread_ready(CNF_DEF_TRD);
  233. /*Set row address */
  234. mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
  235. /* Operation bank number */
  236. mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
  237. /* Block erase command */
  238. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  239. reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
  240. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  241. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  242. reg |= (CNF_CT_ERASE << CNF_CMDREG0_CMD);
  243. reg |= (((size-1) & 0xFF) << CNF_CMDREG0_CMD);
  244. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  245. /* Wait for erase operation to complete */
  246. return cdns_nand_last_opr_status(CNF_DEF_TRD);
  247. }
  248. /* io mtd functions */
  249. int cdns_nand_init_mtd(unsigned long long *size, unsigned int *erase_size)
  250. {
  251. *size = dev_info.total_size;
  252. *erase_size = dev_info.block_size;
  253. return 0;
  254. }
  255. /* NAND Flash page read */
  256. static int cdns_nand_read_page(uint32_t block, uint32_t page, uintptr_t buffer)
  257. {
  258. /* Wait for thread to be ready */
  259. cdns_nand_wait_thread_ready(CNF_DEF_TRD);
  260. /* Select device */
  261. mmio_write_32(CNF_CMDREG(CMD_REG4),
  262. (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
  263. /* Set host memory address for DMA transfers */
  264. mmio_write_32(CNF_CMDREG(CMD_REG2), (buffer & 0xFFFF));
  265. mmio_write_32(CNF_CMDREG(CMD_REG3), ((buffer >> 32) & 0xFFFF));
  266. /* Set row address */
  267. uint32_t row_address = 0U;
  268. row_address |= ((page & 0x3F) | (block << 6));
  269. mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
  270. /* Page read command */
  271. uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
  272. reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
  273. reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
  274. reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
  275. reg |= (CNF_DMA_MASTER_SEL << CNF_CMDREG0_DMA);
  276. reg |= (CNF_CT_PAGE_READ << CNF_CMDREG0_CMD);
  277. reg |= (((CNF_READ_SINGLE_PAGE-1) & 0xFF) << CNF_CMDREG0_CMD);
  278. mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
  279. /* Wait for read operation to complete */
  280. if (cdns_nand_last_opr_status(CNF_DEF_TRD)) {
  281. ERROR("%s: Page read failed\n", __func__);
  282. return -EIO;
  283. }
  284. return 0;
  285. }
  286. int cdns_nand_read(unsigned int offset, uintptr_t buffer, size_t length,
  287. size_t *out_length)
  288. {
  289. uint32_t block = offset / dev_info.block_size;
  290. uint32_t end_block = (offset + length - 1U) / dev_info.block_size;
  291. uint32_t page_start = (offset % dev_info.block_size) / dev_info.page_size;
  292. uint32_t start_offset = offset % dev_info.page_size;
  293. uint32_t nb_pages = dev_info.block_size / dev_info.page_size;
  294. uint32_t bytes_read = 0U;
  295. uint32_t page = 0U;
  296. int result = 0;
  297. VERBOSE("CNF: block %u-%u, page_start %u, len %zu, offset %u\n",
  298. block, end_block, page_start, length, offset);
  299. if ((offset >= dev_info.total_size) ||
  300. (offset + length-1 >= dev_info.total_size) ||
  301. (length == 0U)) {
  302. ERROR("CNF: Invalid read parameters\n");
  303. return -EINVAL;
  304. }
  305. *out_length = 0UL;
  306. while (block <= end_block) {
  307. for (page = page_start; page < nb_pages; page++) {
  308. if ((start_offset != 0U) || (length < dev_info.page_size)) {
  309. /* Partial page read */
  310. result = cdns_nand_read_page(block, page,
  311. (uintptr_t)scratch_buff);
  312. if (result != 0) {
  313. return result;
  314. }
  315. bytes_read = MIN((size_t)(dev_info.page_size - start_offset),
  316. length);
  317. memcpy((uint8_t *)buffer, scratch_buff + start_offset,
  318. bytes_read);
  319. start_offset = 0U;
  320. } else {
  321. /* Full page read */
  322. result = cdns_nand_read_page(block, page,
  323. (uintptr_t)scratch_buff);
  324. if (result != 0) {
  325. return result;
  326. }
  327. bytes_read = dev_info.page_size;
  328. memcpy((uint8_t *)buffer, scratch_buff, bytes_read);
  329. }
  330. length -= bytes_read;
  331. buffer += bytes_read;
  332. *out_length += bytes_read;
  333. /* All the bytes have read */
  334. if (length == 0U) {
  335. break;
  336. }
  337. udelay(CNF_READ_INT_DELAY_US);
  338. } /* for */
  339. page_start = 0U;
  340. block++;
  341. } /* while */
  342. return 0;
  343. }