401-partial_eraseblock_write.patch 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. --- a/drivers/mtd/mtdpart.c
  2. +++ b/drivers/mtd/mtdpart.c
  3. @@ -35,6 +35,8 @@
  4. #include "mtdcore.h"
  5. +#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
  6. +
  7. /* Our partition linked list */
  8. static LIST_HEAD(mtd_partitions);
  9. static DEFINE_MUTEX(mtd_partitions_mutex);
  10. @@ -230,13 +232,61 @@ static int part_erase(struct mtd_info *m
  11. struct mtd_part *part = PART(mtd);
  12. int ret;
  13. +
  14. + instr->partial_start = false;
  15. + if (mtd->flags & MTD_ERASE_PARTIAL) {
  16. + size_t readlen = 0;
  17. + u64 mtd_ofs;
  18. +
  19. + instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
  20. + if (!instr->erase_buf)
  21. + return -ENOMEM;
  22. +
  23. + mtd_ofs = part->offset + instr->addr;
  24. + instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
  25. +
  26. + if (instr->erase_buf_ofs > 0) {
  27. + instr->addr -= instr->erase_buf_ofs;
  28. + ret = mtd_read(part->master,
  29. + instr->addr + part->offset,
  30. + part->master->erasesize,
  31. + &readlen, instr->erase_buf);
  32. +
  33. + instr->len += instr->erase_buf_ofs;
  34. + instr->partial_start = true;
  35. + } else {
  36. + mtd_ofs = part->offset + part->mtd.size;
  37. + instr->erase_buf_ofs = part->master->erasesize -
  38. + do_div(mtd_ofs, part->master->erasesize);
  39. +
  40. + if (instr->erase_buf_ofs > 0) {
  41. + instr->len += instr->erase_buf_ofs;
  42. + ret = mtd_read(part->master,
  43. + part->offset + instr->addr +
  44. + instr->len - part->master->erasesize,
  45. + part->master->erasesize, &readlen,
  46. + instr->erase_buf);
  47. + } else {
  48. + ret = 0;
  49. + }
  50. + }
  51. + if (ret < 0) {
  52. + kfree(instr->erase_buf);
  53. + return ret;
  54. + }
  55. +
  56. + }
  57. +
  58. instr->addr += part->offset;
  59. ret = part->master->_erase(part->master, instr);
  60. if (ret) {
  61. if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
  62. instr->fail_addr -= part->offset;
  63. instr->addr -= part->offset;
  64. + if (mtd->flags & MTD_ERASE_PARTIAL)
  65. + kfree(instr->erase_buf);
  66. }
  67. +
  68. return ret;
  69. }
  70. @@ -244,7 +294,25 @@ void mtd_erase_callback(struct erase_inf
  71. {
  72. if (instr->mtd->_erase == part_erase) {
  73. struct mtd_part *part = PART(instr->mtd);
  74. + size_t wrlen = 0;
  75. + if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
  76. + if (instr->partial_start) {
  77. + part->master->_write(part->master,
  78. + instr->addr, instr->erase_buf_ofs,
  79. + &wrlen, instr->erase_buf);
  80. + instr->addr += instr->erase_buf_ofs;
  81. + } else {
  82. + instr->len -= instr->erase_buf_ofs;
  83. + part->master->_write(part->master,
  84. + instr->addr + instr->len,
  85. + instr->erase_buf_ofs, &wrlen,
  86. + instr->erase_buf +
  87. + part->master->erasesize -
  88. + instr->erase_buf_ofs);
  89. + }
  90. + kfree(instr->erase_buf);
  91. + }
  92. if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
  93. instr->fail_addr -= part->offset;
  94. instr->addr -= part->offset;
  95. @@ -504,18 +572,24 @@ static struct mtd_part *allocate_partiti
  96. if ((slave->mtd.flags & MTD_WRITEABLE) &&
  97. mtd_mod_by_eb(slave->offset, &slave->mtd)) {
  98. /* Doesn't start on a boundary of major erase size */
  99. - /* FIXME: Let it be writable if it is on a boundary of
  100. - * _minor_ erase size though */
  101. - slave->mtd.flags &= ~MTD_WRITEABLE;
  102. - printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
  103. - part->name);
  104. + slave->mtd.flags |= MTD_ERASE_PARTIAL;
  105. + if (((u32) slave->mtd.size) > master->erasesize)
  106. + slave->mtd.flags &= ~MTD_WRITEABLE;
  107. + else
  108. + slave->mtd.erasesize = slave->mtd.size;
  109. }
  110. if ((slave->mtd.flags & MTD_WRITEABLE) &&
  111. - mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
  112. - slave->mtd.flags &= ~MTD_WRITEABLE;
  113. - printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
  114. - part->name);
  115. + mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
  116. + slave->mtd.flags |= MTD_ERASE_PARTIAL;
  117. +
  118. + if ((u32) slave->mtd.size > master->erasesize)
  119. + slave->mtd.flags &= ~MTD_WRITEABLE;
  120. + else
  121. + slave->mtd.erasesize = slave->mtd.size;
  122. }
  123. + if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
  124. + printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
  125. + part->name);
  126. slave->mtd.ecclayout = master->ecclayout;
  127. slave->mtd.ecc_strength = master->ecc_strength;
  128. --- a/include/linux/mtd/mtd.h
  129. +++ b/include/linux/mtd/mtd.h
  130. @@ -58,6 +58,10 @@ struct erase_info {
  131. u_long priv;
  132. u_char state;
  133. struct erase_info *next;
  134. +
  135. + u8 *erase_buf;
  136. + u32 erase_buf_ofs;
  137. + bool partial_start;
  138. };
  139. struct mtd_erase_region_info {