mtrr.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * memory-type region registers.
  3. *
  4. * due to the possibility of extended addresses (for PAE)
  5. * as large as 36 bits coming from the e820 memory map and the like,
  6. * we'll use vlongs to hold addresses and lengths, even though we don't
  7. * implement PAE in Plan 9.
  8. */
  9. #include "u.h"
  10. #include "../port/lib.h"
  11. #include "mem.h"
  12. #include "dat.h"
  13. #include "fns.h"
  14. #include "io.h"
  15. enum {
  16. /*
  17. * MTRR Physical base/mask are indexed by
  18. * MTRRPhys{Base|Mask}N = MTRRPhys{Base|Mask}0 + 2*N
  19. */
  20. MTRRPhysBase0 = 0x200,
  21. MTRRPhysMask0 = 0x201,
  22. MTRRDefaultType = 0x2FF,
  23. MTRRCap = 0xFE,
  24. Nmtrr = 8,
  25. /* cpuid extended function codes */
  26. Exthighfunc = 1ul << 31,
  27. Extprocsigamd,
  28. Extprocname0,
  29. Extprocname1,
  30. Extprocname2,
  31. Exttlbl1,
  32. Extl2,
  33. Extapm,
  34. Extaddrsz,
  35. Paerange = 1LL << 36,
  36. };
  37. enum {
  38. CR4PageGlobalEnable = 1 << 7,
  39. CR0CacheDisable = 1 << 30,
  40. };
  41. enum {
  42. Uncacheable = 0,
  43. Writecomb = 1,
  44. Unknown1 = 2,
  45. Unknown2 = 3,
  46. Writethru = 4,
  47. Writeprot = 5,
  48. Writeback = 6,
  49. };
  50. enum {
  51. Capvcnt = 0xff, /* mask: # of variable-range MTRRs we have */
  52. Capwc = 1<<8, /* flag: have write combining? */
  53. Capfix = 1<<10, /* flag: have fixed MTRRs? */
  54. Deftype = 0xff, /* default MTRR type */
  55. Deffixena = 1<<10, /* fixed-range MTRR enable */
  56. Defena = 1<<11, /* MTRR enable */
  57. };
  58. typedef struct Mtrreg Mtrreg;
  59. typedef struct Mtrrop Mtrrop;
  60. struct Mtrreg {
  61. vlong base;
  62. vlong mask;
  63. };
  64. struct Mtrrop {
  65. Mtrreg *reg;
  66. int slot;
  67. };
  68. static char *types[] = {
  69. [Uncacheable] "uc",
  70. [Writecomb] "wc",
  71. [Unknown1] "uk1",
  72. [Unknown2] "uk2",
  73. [Writethru] "wt",
  74. [Writeprot] "wp",
  75. [Writeback] "wb",
  76. nil
  77. };
  78. static Mtrrop *postedop;
  79. static Rendez oprend;
  80. static char *
  81. type2str(int type)
  82. {
  83. if(type < 0 || type >= nelem(types))
  84. return nil;
  85. return types[type];
  86. }
  87. static int
  88. str2type(char *str)
  89. {
  90. char **p;
  91. for(p = types; *p != nil; p++)
  92. if (strcmp(str, *p) == 0)
  93. return p - types;
  94. return -1;
  95. }
  96. static uvlong
  97. physmask(void)
  98. {
  99. ulong regs[4];
  100. static vlong mask = -1;
  101. if (mask != -1)
  102. return mask;
  103. cpuid(Exthighfunc, regs);
  104. if(regs[0] >= Extaddrsz) { /* ax */
  105. cpuid(Extaddrsz, regs);
  106. mask = (1LL << (regs[0] & 0xFF)) - 1; /* ax */
  107. }
  108. mask &= Paerange - 1; /* x86 sanity */
  109. return mask;
  110. }
  111. /* limit physical addresses to 36 bits on the x86 */
  112. static void
  113. sanity(Mtrreg *mtrr)
  114. {
  115. mtrr->base &= Paerange - 1;
  116. mtrr->mask &= Paerange - 1;
  117. }
  118. static int
  119. ispow2(uvlong ul)
  120. {
  121. return (ul & (ul - 1)) == 0;
  122. }
  123. /* true if mtrr is valid */
  124. static int
  125. mtrrdec(Mtrreg *mtrr, uvlong *ptr, uvlong *size, int *type)
  126. {
  127. sanity(mtrr);
  128. *ptr = mtrr->base & ~(BY2PG-1);
  129. *type = mtrr->base & 0xff;
  130. *size = (physmask() ^ (mtrr->mask & ~(BY2PG-1))) + 1;
  131. return (mtrr->mask >> 11) & 1;
  132. }
  133. static void
  134. mtrrenc(Mtrreg *mtrr, uvlong ptr, uvlong size, int type, int ok)
  135. {
  136. mtrr->base = ptr | (type & 0xff);
  137. mtrr->mask = (physmask() & ~(size - 1)) | (ok? 1<<11: 0);
  138. sanity(mtrr);
  139. }
  140. /*
  141. * i is the index of the MTRR, and is multiplied by 2 because
  142. * mask and base offsets are interleaved.
  143. */
  144. static void
  145. mtrrget(Mtrreg *mtrr, uint i)
  146. {
  147. if (i >= Nmtrr)
  148. error("mtrr index out of range");
  149. rdmsr(MTRRPhysBase0 + 2*i, &mtrr->base);
  150. rdmsr(MTRRPhysMask0 + 2*i, &mtrr->mask);
  151. sanity(mtrr);
  152. }
  153. static void
  154. mtrrput(Mtrreg *mtrr, uint i)
  155. {
  156. if (i >= Nmtrr)
  157. error("mtrr index out of range");
  158. sanity(mtrr);
  159. wrmsr(MTRRPhysBase0 + 2*i, mtrr->base);
  160. wrmsr(MTRRPhysMask0 + 2*i, mtrr->mask);
  161. }
  162. static void
  163. mtrrop(Mtrrop **op)
  164. {
  165. int s;
  166. ulong cr0, cr4;
  167. vlong def;
  168. static long bar1, bar2;
  169. s = splhi(); /* avoid race with mtrrclock */
  170. /*
  171. * wait for all CPUs to sync here, so that the MTRR setup gets
  172. * done at roughly the same time on all processors.
  173. */
  174. _xinc(&bar1);
  175. while(bar1 < conf.nmach)
  176. microdelay(10);
  177. cr4 = getcr4();
  178. putcr4(cr4 & ~CR4PageGlobalEnable);
  179. cr0 = getcr0();
  180. wbinvd();
  181. putcr0(cr0 | CR0CacheDisable);
  182. wbinvd();
  183. rdmsr(MTRRDefaultType, &def);
  184. wrmsr(MTRRDefaultType, def & ~(vlong)Defena);
  185. mtrrput((*op)->reg, (*op)->slot);
  186. wbinvd();
  187. wrmsr(MTRRDefaultType, def);
  188. putcr0(cr0);
  189. putcr4(cr4);
  190. /*
  191. * wait for all CPUs to sync up again, so that we don't continue
  192. * executing while the MTRRs are still being set up.
  193. */
  194. _xinc(&bar2);
  195. while(bar2 < conf.nmach)
  196. microdelay(10);
  197. *op = nil;
  198. _xdec(&bar1);
  199. while(bar1 > 0)
  200. microdelay(10);
  201. _xdec(&bar2);
  202. wakeup(&oprend);
  203. splx(s);
  204. }
  205. void
  206. mtrrclock(void) /* called from clock interrupt */
  207. {
  208. if(postedop != nil)
  209. mtrrop(&postedop);
  210. }
  211. /* if there's an operation still pending, keep sleeping */
  212. static int
  213. opavail(void *)
  214. {
  215. return postedop == nil;
  216. }
  217. int
  218. mtrr(uvlong base, uvlong size, char *tstr)
  219. {
  220. int i, vcnt, slot, type, mtype, mok;
  221. vlong def, cap;
  222. uvlong mp, msize;
  223. Mtrreg entry, mtrr;
  224. Mtrrop op;
  225. static int tickreg;
  226. static QLock mtrrlk;
  227. if(!(m->cpuiddx & Mtrr))
  228. error("mtrrs not supported");
  229. if(base & (BY2PG-1) || size & (BY2PG-1) || size == 0)
  230. error("mtrr base or size not 4k aligned or zero size");
  231. if(base + size >= Paerange)
  232. error("mtrr range exceeds 36 bits");
  233. if(!ispow2(size))
  234. error("mtrr size not power of 2");
  235. if(base & (size - 1))
  236. error("mtrr base not naturally aligned");
  237. if((type = str2type(tstr)) == -1)
  238. error("mtrr bad type");
  239. rdmsr(MTRRCap, &cap);
  240. rdmsr(MTRRDefaultType, &def);
  241. switch(type){
  242. default:
  243. error("mtrr unknown type");
  244. break;
  245. case Writecomb:
  246. if(!(cap & Capwc))
  247. error("mtrr type wc (write combining) unsupported");
  248. /* fallthrough */
  249. case Uncacheable:
  250. case Writethru:
  251. case Writeprot:
  252. case Writeback:
  253. break;
  254. }
  255. qlock(&mtrrlk);
  256. slot = -1;
  257. vcnt = cap & Capvcnt;
  258. for(i = 0; i < vcnt; i++){
  259. mtrrget(&mtrr, i);
  260. mok = mtrrdec(&mtrr, &mp, &msize, &mtype);
  261. /* reuse any entry for addresses above 4GB */
  262. if(!mok || mp == base && msize == size || mp >= (1LL<<32)){
  263. slot = i;
  264. break;
  265. }
  266. }
  267. if(slot == -1)
  268. error("no free mtrr slots");
  269. while(postedop != nil)
  270. sleep(&oprend, opavail, 0);
  271. mtrrenc(&entry, base, size, type, 1);
  272. op.reg = &entry;
  273. op.slot = slot;
  274. postedop = &op;
  275. mtrrop(&postedop);
  276. qunlock(&mtrrlk);
  277. return 0;
  278. }
  279. int
  280. mtrrprint(char *buf, long bufsize)
  281. {
  282. int i, vcnt, type;
  283. long n;
  284. uvlong base, size;
  285. vlong cap, def;
  286. Mtrreg mtrr;
  287. n = 0;
  288. if(!(m->cpuiddx & Mtrr))
  289. return 0;
  290. rdmsr(MTRRCap, &cap);
  291. rdmsr(MTRRDefaultType, &def);
  292. n += snprint(buf+n, bufsize-n, "cache default %s\n",
  293. type2str(def & Deftype));
  294. vcnt = cap & Capvcnt;
  295. for(i = 0; i < vcnt; i++){
  296. mtrrget(&mtrr, i);
  297. if (mtrrdec(&mtrr, &base, &size, &type))
  298. n += snprint(buf+n, bufsize-n,
  299. "cache 0x%llux %llud %s\n",
  300. base, size, type2str(type));
  301. }
  302. return n;
  303. }