core.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <stddef.h>
  9. #include <common/debug.h>
  10. #include <drivers/delay_timer.h>
  11. #include <drivers/nand.h>
  12. #include <lib/utils.h>
  13. #include <platform_def.h>
  14. /*
  15. * Define a single nand_device used by specific NAND frameworks.
  16. */
  17. static struct nand_device nand_dev;
  18. #pragma weak plat_get_scratch_buffer
  19. void plat_get_scratch_buffer(void **buffer_addr, size_t *buf_size)
  20. {
  21. static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
  22. assert(buffer_addr != NULL);
  23. assert(buf_size != NULL);
  24. *buffer_addr = (void *)scratch_buff;
  25. *buf_size = sizeof(scratch_buff);
  26. }
  27. int nand_read(unsigned int offset, uintptr_t buffer, size_t length,
  28. size_t *length_read)
  29. {
  30. unsigned int block = offset / nand_dev.block_size;
  31. unsigned int end_block = (offset + length - 1U) / nand_dev.block_size;
  32. unsigned int page_start =
  33. (offset % nand_dev.block_size) / nand_dev.page_size;
  34. unsigned int nb_pages = nand_dev.block_size / nand_dev.page_size;
  35. unsigned int start_offset = offset % nand_dev.page_size;
  36. unsigned int page;
  37. unsigned int bytes_read;
  38. int is_bad;
  39. int ret;
  40. uint8_t *scratch_buff;
  41. size_t scratch_buff_size;
  42. plat_get_scratch_buffer((void **)&scratch_buff, &scratch_buff_size);
  43. assert(scratch_buff != NULL);
  44. VERBOSE("Block %u - %u, page_start %u, nb %u, length %zu, offset %u\n",
  45. block, end_block, page_start, nb_pages, length, offset);
  46. *length_read = 0UL;
  47. if (((start_offset != 0U) || (length % nand_dev.page_size) != 0U) &&
  48. (scratch_buff_size < nand_dev.page_size)) {
  49. return -EINVAL;
  50. }
  51. while (block <= end_block) {
  52. is_bad = nand_dev.mtd_block_is_bad(block);
  53. if (is_bad < 0) {
  54. return is_bad;
  55. }
  56. if (is_bad == 1) {
  57. /* Skip the block */
  58. uint32_t max_block =
  59. nand_dev.size / nand_dev.block_size;
  60. block++;
  61. end_block++;
  62. if ((block < max_block) && (end_block < max_block)) {
  63. continue;
  64. }
  65. return -EIO;
  66. }
  67. for (page = page_start; page < nb_pages; page++) {
  68. if ((start_offset != 0U) ||
  69. (length < nand_dev.page_size)) {
  70. ret = nand_dev.mtd_read_page(
  71. &nand_dev,
  72. (block * nb_pages) + page,
  73. (uintptr_t)scratch_buff);
  74. if (ret != 0) {
  75. return ret;
  76. }
  77. bytes_read = MIN((size_t)(nand_dev.page_size -
  78. start_offset),
  79. length);
  80. memcpy((uint8_t *)buffer,
  81. scratch_buff + start_offset,
  82. bytes_read);
  83. start_offset = 0U;
  84. } else {
  85. ret = nand_dev.mtd_read_page(&nand_dev,
  86. (block * nb_pages) + page,
  87. buffer);
  88. if (ret != 0) {
  89. return ret;
  90. }
  91. bytes_read = nand_dev.page_size;
  92. }
  93. length -= bytes_read;
  94. buffer += bytes_read;
  95. *length_read += bytes_read;
  96. if (length == 0U) {
  97. break;
  98. }
  99. }
  100. page_start = 0U;
  101. block++;
  102. }
  103. return 0;
  104. }
  105. int nand_seek_bb(uintptr_t base, unsigned int offset, size_t *extra_offset)
  106. {
  107. unsigned int block;
  108. unsigned int offset_block;
  109. unsigned int max_block;
  110. int is_bad;
  111. size_t count_bb = 0U;
  112. block = base / nand_dev.block_size;
  113. if (offset != 0U) {
  114. offset_block = (base + offset - 1U) / nand_dev.block_size;
  115. } else {
  116. offset_block = block;
  117. }
  118. max_block = nand_dev.size / nand_dev.block_size;
  119. while (block <= offset_block) {
  120. if (offset_block >= max_block) {
  121. return -EIO;
  122. }
  123. is_bad = nand_dev.mtd_block_is_bad(block);
  124. if (is_bad < 0) {
  125. return is_bad;
  126. }
  127. if (is_bad == 1) {
  128. count_bb++;
  129. offset_block++;
  130. }
  131. block++;
  132. }
  133. *extra_offset = count_bb * nand_dev.block_size;
  134. return 0;
  135. }
  136. struct nand_device *get_nand_device(void)
  137. {
  138. return &nand_dev;
  139. }