spi_nand.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * Copyright (c) 2019-2023, STMicroelectronics - All Rights Reserved
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <stddef.h>
  9. #include <common/debug.h>
  10. #include <drivers/delay_timer.h>
  11. #include <drivers/spi_nand.h>
  12. #include <lib/utils.h>
  13. #include <platform_def.h>
  14. #define SPI_NAND_MAX_ID_LEN 4U
  15. #define DELAY_US_400MS 400000U
  16. static struct spinand_device spinand_dev;
  17. #pragma weak plat_get_spi_nand_data
  18. int plat_get_spi_nand_data(struct spinand_device *device)
  19. {
  20. return 0;
  21. }
  22. static int spi_nand_reg(bool read_reg, uint8_t reg, uint8_t *val,
  23. enum spi_mem_data_dir dir)
  24. {
  25. struct spi_mem_op op;
  26. zeromem(&op, sizeof(struct spi_mem_op));
  27. if (read_reg) {
  28. op.cmd.opcode = SPI_NAND_OP_GET_FEATURE;
  29. } else {
  30. op.cmd.opcode = SPI_NAND_OP_SET_FEATURE;
  31. }
  32. op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  33. op.addr.val = reg;
  34. op.addr.nbytes = 1U;
  35. op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  36. op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  37. op.data.dir = dir;
  38. op.data.nbytes = 1U;
  39. op.data.buf = val;
  40. return spi_mem_exec_op(&op);
  41. }
  42. static int spi_nand_read_reg(uint8_t reg, uint8_t *val)
  43. {
  44. return spi_nand_reg(true, reg, val, SPI_MEM_DATA_IN);
  45. }
  46. static int spi_nand_write_reg(uint8_t reg, uint8_t val)
  47. {
  48. return spi_nand_reg(false, reg, &val, SPI_MEM_DATA_OUT);
  49. }
  50. static int spi_nand_update_cfg(uint8_t mask, uint8_t val)
  51. {
  52. int ret;
  53. uint8_t cfg = spinand_dev.cfg_cache;
  54. cfg &= ~mask;
  55. cfg |= val;
  56. if (cfg == spinand_dev.cfg_cache) {
  57. return 0;
  58. }
  59. ret = spi_nand_write_reg(SPI_NAND_REG_CFG, cfg);
  60. if (ret == 0) {
  61. spinand_dev.cfg_cache = cfg;
  62. }
  63. return ret;
  64. }
  65. static int spi_nand_ecc_enable(bool enable)
  66. {
  67. return spi_nand_update_cfg(SPI_NAND_CFG_ECC_EN,
  68. enable ? SPI_NAND_CFG_ECC_EN : 0U);
  69. }
  70. static int spi_nand_quad_enable(uint8_t manufacturer_id)
  71. {
  72. bool enable = false;
  73. if ((spinand_dev.flags & SPI_NAND_HAS_QE_BIT) == 0U) {
  74. return 0;
  75. }
  76. if (spinand_dev.spi_read_cache_op.data.buswidth ==
  77. SPI_MEM_BUSWIDTH_4_LINE) {
  78. enable = true;
  79. }
  80. return spi_nand_update_cfg(SPI_NAND_CFG_QE,
  81. enable ? SPI_NAND_CFG_QE : 0U);
  82. }
  83. static int spi_nand_wait_ready(uint8_t *status)
  84. {
  85. int ret;
  86. uint64_t timeout = timeout_init_us(DELAY_US_400MS);
  87. while (!timeout_elapsed(timeout)) {
  88. ret = spi_nand_read_reg(SPI_NAND_REG_STATUS, status);
  89. if (ret != 0) {
  90. return ret;
  91. }
  92. VERBOSE("%s Status %x\n", __func__, *status);
  93. if ((*status & SPI_NAND_STATUS_BUSY) == 0U) {
  94. return 0;
  95. }
  96. }
  97. return -ETIMEDOUT;
  98. }
  99. static int spi_nand_reset(void)
  100. {
  101. struct spi_mem_op op;
  102. uint8_t status;
  103. int ret;
  104. zeromem(&op, sizeof(struct spi_mem_op));
  105. op.cmd.opcode = SPI_NAND_OP_RESET;
  106. op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  107. ret = spi_mem_exec_op(&op);
  108. if (ret != 0) {
  109. return ret;
  110. }
  111. return spi_nand_wait_ready(&status);
  112. }
  113. static int spi_nand_read_id(uint8_t *id)
  114. {
  115. struct spi_mem_op op;
  116. zeromem(&op, sizeof(struct spi_mem_op));
  117. op.cmd.opcode = SPI_NAND_OP_READ_ID;
  118. op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  119. op.data.dir = SPI_MEM_DATA_IN;
  120. op.data.nbytes = SPI_NAND_MAX_ID_LEN;
  121. op.data.buf = id;
  122. op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  123. return spi_mem_exec_op(&op);
  124. }
  125. static int spi_nand_load_page(unsigned int page)
  126. {
  127. struct spi_mem_op op;
  128. uint32_t block_nb = page / spinand_dev.nand_dev->block_size;
  129. uint32_t page_nb = page - (block_nb * spinand_dev.nand_dev->page_size);
  130. uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
  131. spinand_dev.nand_dev->page_size;
  132. uint32_t block_sh = __builtin_ctz(nbpages_per_block) + 1U;
  133. zeromem(&op, sizeof(struct spi_mem_op));
  134. op.cmd.opcode = SPI_NAND_OP_LOAD_PAGE;
  135. op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  136. op.addr.val = (block_nb << block_sh) | page_nb;
  137. op.addr.nbytes = 3U;
  138. op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  139. return spi_mem_exec_op(&op);
  140. }
  141. static int spi_nand_read_from_cache(unsigned int page, unsigned int offset,
  142. uint8_t *buffer, unsigned int len)
  143. {
  144. uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
  145. spinand_dev.nand_dev->page_size;
  146. uint32_t block_nb = page / nbpages_per_block;
  147. uint32_t page_sh = __builtin_ctz(spinand_dev.nand_dev->page_size) + 1U;
  148. spinand_dev.spi_read_cache_op.addr.val = offset;
  149. if ((spinand_dev.nand_dev->nb_planes > 1U) && ((block_nb % 2U) == 1U)) {
  150. spinand_dev.spi_read_cache_op.addr.val |= 1U << page_sh;
  151. }
  152. spinand_dev.spi_read_cache_op.data.buf = buffer;
  153. spinand_dev.spi_read_cache_op.data.nbytes = len;
  154. return spi_mem_exec_op(&spinand_dev.spi_read_cache_op);
  155. }
  156. static int spi_nand_read_page(unsigned int page, unsigned int offset,
  157. uint8_t *buffer, unsigned int len,
  158. bool ecc_enabled)
  159. {
  160. uint8_t status;
  161. int ret;
  162. ret = spi_nand_ecc_enable(ecc_enabled);
  163. if (ret != 0) {
  164. return ret;
  165. }
  166. ret = spi_nand_load_page(page);
  167. if (ret != 0) {
  168. return ret;
  169. }
  170. ret = spi_nand_wait_ready(&status);
  171. if (ret != 0) {
  172. return ret;
  173. }
  174. ret = spi_nand_read_from_cache(page, offset, buffer, len);
  175. if (ret != 0) {
  176. return ret;
  177. }
  178. if (ecc_enabled && ((status & SPI_NAND_STATUS_ECC_UNCOR) != 0U)) {
  179. return -EBADMSG;
  180. }
  181. return 0;
  182. }
  183. static int spi_nand_mtd_block_is_bad(unsigned int block)
  184. {
  185. unsigned int nbpages_per_block = spinand_dev.nand_dev->block_size /
  186. spinand_dev.nand_dev->page_size;
  187. uint8_t bbm_marker[2];
  188. int ret;
  189. ret = spi_nand_read_page(block * nbpages_per_block,
  190. spinand_dev.nand_dev->page_size,
  191. bbm_marker, sizeof(bbm_marker), false);
  192. if (ret != 0) {
  193. return ret;
  194. }
  195. if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
  196. (bbm_marker[1] != GENMASK_32(7, 0))) {
  197. WARN("Block %u is bad\n", block);
  198. return 1;
  199. }
  200. return 0;
  201. }
  202. static int spi_nand_mtd_read_page(struct nand_device *nand, unsigned int page,
  203. uintptr_t buffer)
  204. {
  205. return spi_nand_read_page(page, 0, (uint8_t *)buffer,
  206. spinand_dev.nand_dev->page_size, true);
  207. }
  208. int spi_nand_init(unsigned long long *size, unsigned int *erase_size)
  209. {
  210. uint8_t id[SPI_NAND_MAX_ID_LEN];
  211. int ret;
  212. spinand_dev.nand_dev = get_nand_device();
  213. if (spinand_dev.nand_dev == NULL) {
  214. return -EINVAL;
  215. }
  216. spinand_dev.nand_dev->mtd_block_is_bad = spi_nand_mtd_block_is_bad;
  217. spinand_dev.nand_dev->mtd_read_page = spi_nand_mtd_read_page;
  218. spinand_dev.nand_dev->nb_planes = 1;
  219. spinand_dev.spi_read_cache_op.cmd.opcode = SPI_NAND_OP_READ_FROM_CACHE;
  220. spinand_dev.spi_read_cache_op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  221. spinand_dev.spi_read_cache_op.addr.nbytes = 2U;
  222. spinand_dev.spi_read_cache_op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  223. spinand_dev.spi_read_cache_op.dummy.nbytes = 1U;
  224. spinand_dev.spi_read_cache_op.dummy.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  225. spinand_dev.spi_read_cache_op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
  226. if (plat_get_spi_nand_data(&spinand_dev) != 0) {
  227. return -EINVAL;
  228. }
  229. assert((spinand_dev.nand_dev->page_size != 0U) &&
  230. (spinand_dev.nand_dev->block_size != 0U) &&
  231. (spinand_dev.nand_dev->size != 0U));
  232. ret = spi_nand_reset();
  233. if (ret != 0) {
  234. return ret;
  235. }
  236. ret = spi_nand_read_id(id);
  237. if (ret != 0) {
  238. return ret;
  239. }
  240. ret = spi_nand_read_reg(SPI_NAND_REG_CFG, &spinand_dev.cfg_cache);
  241. if (ret != 0) {
  242. return ret;
  243. }
  244. ret = spi_nand_quad_enable(id[1]);
  245. if (ret != 0) {
  246. return ret;
  247. }
  248. VERBOSE("SPI_NAND Detected ID 0x%x\n", id[1]);
  249. VERBOSE("Page size %u, Block size %u, size %llu\n",
  250. spinand_dev.nand_dev->page_size,
  251. spinand_dev.nand_dev->block_size,
  252. spinand_dev.nand_dev->size);
  253. *size = spinand_dev.nand_dev->size;
  254. *erase_size = spinand_dev.nand_dev->block_size;
  255. return 0;
  256. }