cache.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * cache.c
  3. *
  4. * Copyright (C) 2016 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
  5. *
  6. * This program is free software: you can redistribute it and/or modify
  7. * it under the terms of the GNU Affero General Public License as
  8. * published by the Free Software Foundation, either version 3 of the
  9. * License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Affero General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Affero General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <device.h>
  20. #include <cache.h>
  21. #include <heap.h>
  22. static void flush_cache_internal(cache_descriptor_t *cache, avl_node_t *node, void *context)
  23. {
  24. if (node == NULL) return;
  25. cache_entry_t *entry = CONTAINER_OF(node, cache_entry_t, node);
  26. if (entry->dirty)
  27. {
  28. dword_t written;
  29. dword_t ret = cache->write_proc(context, entry->data, entry->address * cache->block_size, cache->block_size, &written);
  30. if ((ret == ERR_SUCCESS) || (written == cache->block_size)) entry->dirty = FALSE;
  31. }
  32. flush_cache_internal(cache, node->left, context);
  33. flush_cache_internal(cache, node->right, context);
  34. }
  35. static void cleanup_cache_internal(avl_node_t *node)
  36. {
  37. if (node == NULL) return;
  38. cleanup_cache_internal(node->left);
  39. cleanup_cache_internal(node->right);
  40. cache_entry_t *entry = CONTAINER_OF(node, cache_entry_t, node);
  41. heap_free(&evictable_heap, entry);
  42. }
  43. static int compare_address(const void *key1, const void *key2)
  44. {
  45. const qword_t first = *(const qword_t*)key1;
  46. const qword_t second = *(const qword_t*)key2;
  47. if (first < second) return -1;
  48. else if (first == second) return 0;
  49. else return 1;
  50. }
  51. void init_cache(cache_descriptor_t *cache,
  52. dword_t flags,
  53. dword_t block_size,
  54. read_write_buffer_proc_t read_proc,
  55. read_write_buffer_proc_t write_proc)
  56. {
  57. cache->enabled = TRUE;
  58. lock_init(&cache->lock);
  59. cache->flags = flags;
  60. cache->block_size = block_size;
  61. cache->read_proc = read_proc;
  62. cache->write_proc = write_proc;
  63. AVL_TREE_INIT(&cache->entries, cache_entry_t, node, address, compare_address);
  64. }
  65. void cleanup_cache(cache_descriptor_t *cache)
  66. {
  67. cleanup_cache_internal(cache->entries.root);
  68. cache->entries.root = NULL;
  69. }
  70. dword_t read_cache(cache_descriptor_t *cache, void *context, byte_t *buffer, qword_t offset, dword_t length, dword_t *bytes_read)
  71. {
  72. dword_t ret = ERR_SUCCESS;
  73. qword_t i;
  74. qword_t first_block = offset / (qword_t)cache->block_size;
  75. qword_t last_block = (offset + length - 1) / (qword_t)cache->block_size;
  76. bool_t exclusive = FALSE;
  77. lock_acquire_shared(&cache->lock);
  78. *bytes_read = 0;
  79. for (i = first_block; i <= last_block; i++)
  80. {
  81. dword_t start_offset = 0, bytes_to_copy = cache->block_size;
  82. avl_node_t *element = avl_tree_lookup(&cache->entries, &i);
  83. if (element == NULL && !exclusive)
  84. {
  85. lock_release(&cache->lock);
  86. lock_acquire(&cache->lock);
  87. exclusive = TRUE;
  88. element = avl_tree_lookup(&cache->entries, &i);
  89. }
  90. if (element == NULL)
  91. {
  92. cache_entry_t *new_entry = (cache_entry_t*)heap_alloc(&evictable_heap, sizeof(cache_entry_t) + cache->block_size);
  93. if (new_entry != NULL)
  94. {
  95. new_entry->address = i;
  96. new_entry->dirty = FALSE;
  97. }
  98. ret = cache->read_proc(context, new_entry->data, i * (qword_t)cache->block_size, cache->block_size, NULL);
  99. if (ret != ERR_SUCCESS)
  100. {
  101. heap_free(&evictable_heap, new_entry);
  102. break;
  103. }
  104. avl_tree_insert(&cache->entries, &new_entry->node);
  105. element = &new_entry->node;
  106. }
  107. cache_entry_t *entry = CONTAINER_OF(element, cache_entry_t, node);
  108. if (first_block == last_block)
  109. {
  110. start_offset = (dword_t)(offset % (qword_t)cache->block_size);
  111. bytes_to_copy = length;
  112. }
  113. else if (i == first_block)
  114. {
  115. start_offset = (dword_t)(offset % (qword_t)cache->block_size);
  116. bytes_to_copy -= start_offset;
  117. }
  118. else if (i == last_block)
  119. {
  120. bytes_to_copy = length - *bytes_read;
  121. }
  122. memcpy(&buffer[*bytes_read], &entry->data[start_offset], bytes_to_copy);
  123. *bytes_read += bytes_to_copy;
  124. }
  125. lock_release(&cache->lock);
  126. return ret;
  127. }
  128. dword_t write_cache(cache_descriptor_t *cache, void *context, const byte_t *buffer, qword_t offset, dword_t length, dword_t *bytes_written)
  129. {
  130. dword_t ret = ERR_SUCCESS;
  131. qword_t i;
  132. qword_t first_block = offset / (qword_t)cache->block_size;
  133. qword_t last_block = (offset + length - 1) / (qword_t)cache->block_size;
  134. lock_acquire(&cache->lock);
  135. *bytes_written = 0;
  136. for (i = first_block; i <= last_block; i++)
  137. {
  138. dword_t start_offset = 0, bytes_to_copy = cache->block_size;
  139. avl_node_t *element = avl_tree_lookup(&cache->entries, &i);
  140. if (element == NULL)
  141. {
  142. cache_entry_t *new_entry = (cache_entry_t*)heap_alloc(&evictable_heap, sizeof(cache_entry_t) + cache->block_size);
  143. if (new_entry == NULL)
  144. {
  145. ret = ERR_NOMEMORY;
  146. break;
  147. }
  148. new_entry->address = i;
  149. new_entry->dirty = FALSE;
  150. ret = cache->read_proc(context, new_entry->data, i * (qword_t)cache->block_size, cache->block_size, NULL);
  151. if (ret != ERR_SUCCESS)
  152. {
  153. heap_free(&evictable_heap, new_entry);
  154. break;
  155. }
  156. avl_tree_insert(&cache->entries, &new_entry->node);
  157. element = &new_entry->node;
  158. }
  159. cache_entry_t *entry = CONTAINER_OF(element, cache_entry_t, node);
  160. if (first_block == last_block)
  161. {
  162. start_offset = (dword_t)(offset % (qword_t)cache->block_size);
  163. bytes_to_copy = length;
  164. }
  165. else if (i == first_block)
  166. {
  167. start_offset = (dword_t)(offset % (qword_t)cache->block_size);
  168. bytes_to_copy -= start_offset;
  169. }
  170. else if (i == last_block)
  171. {
  172. bytes_to_copy = length - *bytes_written;
  173. }
  174. memcpy(&entry->data[start_offset], &buffer[*bytes_written], bytes_to_copy);
  175. *bytes_written += bytes_to_copy;
  176. if (cache->flags & CACHE_WRITE_THROUGH)
  177. {
  178. dword_t written;
  179. ret = cache->write_proc(context, entry->data, i * (qword_t)cache->block_size, cache->block_size, &written);
  180. if ((ret != ERR_SUCCESS) || (written != cache->block_size)) entry->dirty = TRUE;
  181. }
  182. else
  183. {
  184. entry->dirty = TRUE;
  185. }
  186. }
  187. lock_release(&cache->lock);
  188. return ret;
  189. }
  190. dword_t flush_cache(cache_descriptor_t *cache, void *context)
  191. {
  192. flush_cache_internal(cache, cache->entries.root, context);
  193. return ERR_SUCCESS;
  194. }