1
0

090-MIPS-c-r4k-Use-IPI-calls-for-CM-indexed-cache-ops.patch 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. From: James Hogan <james.hogan@imgtec.com>
  2. Date: Mon, 25 Jan 2016 21:30:00 +0000
  3. Subject: [PATCH] MIPS: c-r4k: Use IPI calls for CM indexed cache ops
  4. The Coherence Manager (CM) can propagate address-based ("hit") cache
  5. operations to other cores in the coherent system, alleviating software
  6. of the need to use IPI calls, however indexed cache operations are not
  7. propagated since doing so makes no sense for separate caches.
  8. r4k_on_each_cpu() previously had a special case for CONFIG_MIPS_MT_SMP,
  9. intended to avoid the IPIs when the only other CPUs in the system were
  10. other VPEs in the same core, and hence sharing the same caches. This was
  11. changed by commit cccf34e9411c ("MIPS: c-r4k: Fix cache flushing for MT
  12. cores") to apparently handle multi-core multi-VPE systems, but it
  13. focussed mainly on hit cache ops, so the IPI calls were still disabled
  14. entirely for CM systems.
  15. This doesn't normally cause problems, but tests can be written to hit
  16. these corner cases by using multiple threads, or changing task
  17. affinities to force the process to migrate cores. For example the
  18. failure of mprotect RW->RX to globally sync icaches (via
  19. flush_cache_range) can be detected by modifying and mprotecting a code
  20. page on one core, and migrating to a different core to execute from it.
  21. Most of the functions called by r4k_on_each_cpu() perform cache
  22. operations exclusively with a single addressing-type (virtual address vs
  23. indexed), so add a type argument and modify the callers to pass in
  24. R4K_USER (user virtual addressing), R4K_KERN (global kernel virtual
  25. addressing) or R4K_INDEX (index into cache).
  26. local_r4k_flush_icache_range() is split up, to allow it to be called
  27. from the rest of the kernel, or from r4k_flush_icache_range() where it
  28. will choose either indexed or hit cache operations based on the size of
  29. the range and the cache sizes.
  30. local_r4k_flush_kernel_vmap_range() is split into two functions, each of
  31. which uses cache operations with a single addressing-type, with
  32. r4k_flush_kernel_vmap_range() making the decision whether to use indexed
  33. cache ops or not.
  34. Signed-off-by: James Hogan <james.hogan@imgtec.com>
  35. Cc: Ralf Baechle <ralf@linux-mips.org>
  36. Cc: Paul Burton <paul.burton@imgtec.com>
  37. Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com>
  38. Cc: linux-mips@linux-mips.org
  39. ---
  40. --- a/arch/mips/mm/c-r4k.c
  41. +++ b/arch/mips/mm/c-r4k.c
  42. @@ -40,6 +40,50 @@
  43. #include <asm/mips-cm.h>
  44. /*
  45. + * Bits describing what cache ops an IPI callback function may perform.
  46. + *
  47. + * R4K_USER - Virtual user address based cache operations.
  48. + * Ineffective on other CPUs.
  49. + * R4K_KERN - Virtual kernel address based cache operations (including kmap).
  50. + * Effective on other CPUs.
  51. + * R4K_INDEX - Index based cache operations.
  52. + * Effective on other CPUs.
  53. + */
  54. +
  55. +#define R4K_USER BIT(0)
  56. +#define R4K_KERN BIT(1)
  57. +#define R4K_INDEX BIT(2)
  58. +
  59. +#ifdef CONFIG_SMP
  60. +/* The Coherence manager propagates address-based cache ops to other cores */
  61. +#define r4k_hit_globalized mips_cm_present()
  62. +#define r4k_index_globalized 0
  63. +#else
  64. +/* If there's only 1 CPU, then all cache ops are globalized to that 1 CPU */
  65. +#define r4k_hit_globalized 1
  66. +#define r4k_index_globalized 1
  67. +#endif
  68. +
  69. +/**
  70. + * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
  71. + * @type: Type of cache operations (R4K_USER, R4K_KERN or R4K_INDEX).
  72. + *
  73. + * Returns: 1 if the cache operation @type should be done on every core in
  74. + * the system.
  75. + * 0 if the cache operation @type is globalized and only needs to
  76. + * be performed on a simple CPU.
  77. + */
  78. +static inline bool r4k_op_needs_ipi(unsigned int type)
  79. +{
  80. + /*
  81. + * If hardware doesn't globalize the required cache ops we must use IPIs
  82. + * to do so.
  83. + */
  84. + return (type & R4K_KERN && !r4k_hit_globalized) ||
  85. + (type & R4K_INDEX && !r4k_index_globalized);
  86. +}
  87. +
  88. +/*
  89. * Special Variant of smp_call_function for use by cache functions:
  90. *
  91. * o No return value
  92. @@ -48,19 +92,11 @@
  93. * primary cache.
  94. * o doesn't disable interrupts on the local CPU
  95. */
  96. -static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
  97. +static inline void r4k_on_each_cpu(unsigned int type,
  98. + void (*func) (void *info), void *info)
  99. {
  100. preempt_disable();
  101. -
  102. - /*
  103. - * The Coherent Manager propagates address-based cache ops to other
  104. - * cores but not index-based ops. However, r4k_on_each_cpu is used
  105. - * in both cases so there is no easy way to tell what kind of op is
  106. - * executed to the other cores. The best we can probably do is
  107. - * to restrict that call when a CM is not present because both
  108. - * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
  109. - */
  110. - if (!mips_cm_present())
  111. + if (r4k_op_needs_ipi(type))
  112. smp_call_function_many(&cpu_foreign_map, func, info, 1);
  113. func(info);
  114. preempt_enable();
  115. @@ -456,7 +492,7 @@ static inline void local_r4k___flush_cac
  116. static void r4k___flush_cache_all(void)
  117. {
  118. - r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
  119. + r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
  120. }
  121. static inline int has_valid_asid(const struct mm_struct *mm)
  122. @@ -503,7 +539,7 @@ static void r4k_flush_cache_range(struct
  123. int exec = vma->vm_flags & VM_EXEC;
  124. if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
  125. - r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
  126. + r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
  127. }
  128. static inline void local_r4k_flush_cache_mm(void * args)
  129. @@ -535,7 +571,7 @@ static void r4k_flush_cache_mm(struct mm
  130. if (!cpu_has_dc_aliases)
  131. return;
  132. - r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
  133. + r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
  134. }
  135. struct flush_cache_page_args {
  136. @@ -629,7 +665,7 @@ static void r4k_flush_cache_page(struct
  137. args.addr = addr;
  138. args.pfn = pfn;
  139. - r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
  140. + r4k_on_each_cpu(R4K_KERN, local_r4k_flush_cache_page, &args);
  141. }
  142. static inline void local_r4k_flush_data_cache_page(void * addr)
  143. @@ -642,18 +678,23 @@ static void r4k_flush_data_cache_page(un
  144. if (in_atomic())
  145. local_r4k_flush_data_cache_page((void *)addr);
  146. else
  147. - r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
  148. + r4k_on_each_cpu(R4K_KERN, local_r4k_flush_data_cache_page,
  149. + (void *) addr);
  150. }
  151. struct flush_icache_range_args {
  152. unsigned long start;
  153. unsigned long end;
  154. + unsigned int type;
  155. };
  156. -static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
  157. +static inline void __local_r4k_flush_icache_range(unsigned long start,
  158. + unsigned long end,
  159. + unsigned int type)
  160. {
  161. if (!cpu_has_ic_fills_f_dc) {
  162. - if (end - start >= dcache_size) {
  163. + if (type == R4K_INDEX ||
  164. + (type & R4K_INDEX && end - start >= dcache_size)) {
  165. r4k_blast_dcache();
  166. } else {
  167. R4600_HIT_CACHEOP_WAR_IMPL;
  168. @@ -661,7 +702,8 @@ static inline void local_r4k_flush_icach
  169. }
  170. }
  171. - if (end - start > icache_size)
  172. + if (type == R4K_INDEX ||
  173. + (type & R4K_INDEX && end - start > icache_size))
  174. r4k_blast_icache();
  175. else {
  176. switch (boot_cpu_type()) {
  177. @@ -687,23 +729,59 @@ static inline void local_r4k_flush_icach
  178. #endif
  179. }
  180. +static inline void local_r4k_flush_icache_range(unsigned long start,
  181. + unsigned long end)
  182. +{
  183. + __local_r4k_flush_icache_range(start, end, R4K_KERN | R4K_INDEX);
  184. +}
  185. +
  186. static inline void local_r4k_flush_icache_range_ipi(void *args)
  187. {
  188. struct flush_icache_range_args *fir_args = args;
  189. unsigned long start = fir_args->start;
  190. unsigned long end = fir_args->end;
  191. + unsigned int type = fir_args->type;
  192. - local_r4k_flush_icache_range(start, end);
  193. + __local_r4k_flush_icache_range(start, end, type);
  194. }
  195. static void r4k_flush_icache_range(unsigned long start, unsigned long end)
  196. {
  197. struct flush_icache_range_args args;
  198. + unsigned long size, cache_size;
  199. args.start = start;
  200. args.end = end;
  201. + args.type = R4K_KERN | R4K_INDEX;
  202. - r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
  203. + if (in_atomic()) {
  204. + /*
  205. + * We can't do blocking IPI calls from atomic context, so fall
  206. + * back to pure address-based cache ops if they globalize.
  207. + */
  208. + if (!r4k_index_globalized && r4k_hit_globalized) {
  209. + args.type &= ~R4K_INDEX;
  210. + } else {
  211. + /* Just do it locally instead. */
  212. + local_r4k_flush_icache_range(start, end);
  213. + instruction_hazard();
  214. + return;
  215. + }
  216. + } else if (!r4k_index_globalized && r4k_hit_globalized) {
  217. + /*
  218. + * If address-based cache ops are globalized, then we may be
  219. + * able to avoid the IPI for small flushes.
  220. + */
  221. + size = start - end;
  222. + cache_size = icache_size;
  223. + if (!cpu_has_ic_fills_f_dc) {
  224. + size *= 2;
  225. + cache_size += dcache_size;
  226. + }
  227. + if (size <= cache_size)
  228. + args.type &= ~R4K_INDEX;
  229. + }
  230. + r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
  231. instruction_hazard();
  232. }
  233. @@ -823,7 +901,12 @@ static void local_r4k_flush_cache_sigtra
  234. static void r4k_flush_cache_sigtramp(unsigned long addr)
  235. {
  236. - r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
  237. + /*
  238. + * FIXME this is a bit broken when !r4k_hit_globalized, since the user
  239. + * code probably won't be mapped on other CPUs, so if the process is
  240. + * migrated, it could end up hitting stale icache lines.
  241. + */
  242. + r4k_on_each_cpu(R4K_USER, local_r4k_flush_cache_sigtramp, (void *)addr);
  243. }
  244. static void r4k_flush_icache_all(void)
  245. @@ -837,6 +920,15 @@ struct flush_kernel_vmap_range_args {
  246. int size;
  247. };
  248. +static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
  249. +{
  250. + /*
  251. + * Aliases only affect the primary caches so don't bother with
  252. + * S-caches or T-caches.
  253. + */
  254. + r4k_blast_dcache();
  255. +}
  256. +
  257. static inline void local_r4k_flush_kernel_vmap_range(void *args)
  258. {
  259. struct flush_kernel_vmap_range_args *vmra = args;
  260. @@ -847,12 +939,8 @@ static inline void local_r4k_flush_kerne
  261. * Aliases only affect the primary caches so don't bother with
  262. * S-caches or T-caches.
  263. */
  264. - if (cpu_has_safe_index_cacheops && size >= dcache_size)
  265. - r4k_blast_dcache();
  266. - else {
  267. - R4600_HIT_CACHEOP_WAR_IMPL;
  268. - blast_dcache_range(vaddr, vaddr + size);
  269. - }
  270. + R4600_HIT_CACHEOP_WAR_IMPL;
  271. + blast_dcache_range(vaddr, vaddr + size);
  272. }
  273. static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
  274. @@ -862,7 +950,12 @@ static void r4k_flush_kernel_vmap_range(
  275. args.vaddr = (unsigned long) vaddr;
  276. args.size = size;
  277. - r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
  278. + if (cpu_has_safe_index_cacheops && size >= dcache_size)
  279. + r4k_on_each_cpu(R4K_INDEX,
  280. + local_r4k_flush_kernel_vmap_range_index, NULL);
  281. + else
  282. + r4k_on_each_cpu(R4K_KERN, local_r4k_flush_kernel_vmap_range,
  283. + &args);
  284. }
  285. static inline void rm7k_erratum31(void)