coproc.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * arm co-processors
  3. * mainly to cope with arm hard-wiring register numbers into instructions.
  4. *
  5. * CP15 (system control) is the one that gets used the most in practice.
  6. * these routines must be callable from KZERO space or the 0 segment.
  7. */
  8. #include "u.h"
  9. #include "../port/lib.h"
  10. #include "mem.h"
  11. #include "dat.h"
  12. #include "fns.h"
  13. #include "io.h"
  14. #include "arm.h"
  15. enum {
  16. /* alternates: 0xe12fff1e BX (R14); last e is R14 */
  17. /* 0xe28ef000 B 0(R14); second e is R14 (ken) */
  18. Retinst = 0xe1a0f00e, /* MOV R14, R15 */
  19. Opmask = MASK(3),
  20. Regmask = MASK(4),
  21. };
  22. typedef ulong (*Pufv)(void);
  23. typedef void (*Pvfu)(ulong);
  24. static void
  25. setupcpop(ulong instr[2], ulong opcode, int cp, int op1, int crn, int crm,
  26. int op2)
  27. {
  28. ulong instrsz[2];
  29. op1 &= Opmask;
  30. op2 &= Opmask;
  31. crn &= Regmask;
  32. crm &= Regmask;
  33. cp &= Regmask;
  34. instr[0] = opcode | op1 << 21 | crn << 16 | cp << 8 | op2 << 5 | crm;
  35. instr[1] = Retinst;
  36. cachedwbse(instr, sizeof instrsz);
  37. cacheiinv();
  38. }
  39. ulong
  40. cprd(int cp, int op1, int crn, int crm, int op2)
  41. {
  42. int s, r;
  43. volatile ulong instr[2];
  44. Pufv fp;
  45. s = splhi();
  46. /*
  47. * MRC. return value will be in R0, which is convenient.
  48. * Rt will be R0.
  49. */
  50. setupcpop(instr, 0xee100010, cp, op1, crn, crm, op2);
  51. fp = (Pufv)instr;
  52. r = fp();
  53. splx(s);
  54. return r;
  55. }
  56. void
  57. cpwr(int cp, int op1, int crn, int crm, int op2, ulong val)
  58. {
  59. int s;
  60. volatile ulong instr[2];
  61. Pvfu fp;
  62. s = splhi();
  63. setupcpop(instr, 0xee000010, cp, op1, crn, crm, op2); /* MCR, Rt is R0 */
  64. fp = (Pvfu)instr;
  65. fp(val);
  66. coherence();
  67. splx(s);
  68. }
  69. ulong
  70. cprdsc(int op1, int crn, int crm, int op2)
  71. {
  72. return cprd(CpSC, op1, crn, crm, op2);
  73. }
  74. void
  75. cpwrsc(int op1, int crn, int crm, int op2, ulong val)
  76. {
  77. cpwr(CpSC, op1, crn, crm, op2, val);
  78. }
  79. /* floating point */
  80. /* fp coproc control */
  81. static void
  82. setupfpctlop(ulong instr[2], int opcode, int fpctlreg)
  83. {
  84. ulong instrsz[2];
  85. fpctlreg &= Nfpctlregs - 1;
  86. instr[0] = opcode | fpctlreg << 16 | 0 << 12 | CpFP << 8;
  87. instr[1] = Retinst;
  88. cachedwbse(instr, sizeof instrsz);
  89. cacheiinv();
  90. }
  91. ulong
  92. fprd(int fpreg)
  93. {
  94. int s, r;
  95. volatile ulong instr[2];
  96. Pufv fp;
  97. if (!m->fpon) {
  98. dumpstack();
  99. panic("fprd: cpu%d fpu off", m->machno);
  100. }
  101. s = splhi();
  102. /*
  103. * VMRS. return value will be in R0, which is convenient.
  104. * Rt will be R0.
  105. */
  106. setupfpctlop(instr, 0xeef00010, fpreg);
  107. fp = (Pufv)instr;
  108. r = fp();
  109. splx(s);
  110. return r;
  111. }
  112. void
  113. fpwr(int fpreg, ulong val)
  114. {
  115. int s;
  116. volatile ulong instr[2];
  117. Pvfu fp;
  118. /* fpu might be off and this VMSR might enable it */
  119. s = splhi();
  120. setupfpctlop(instr, 0xeee00010, fpreg); /* VMSR, Rt is R0 */
  121. fp = (Pvfu)instr;
  122. fp(val);
  123. coherence();
  124. splx(s);
  125. }
  126. /* fp register access; don't bother with single precision */
  127. static void
  128. setupfpop(ulong instr[2], int opcode, int fpreg)
  129. {
  130. ulong instrsz[2];
  131. instr[0] = opcode | 0 << 16 | (fpreg & (16 - 1)) << 12;
  132. if (fpreg >= 16)
  133. instr[0] |= 1 << 22; /* high bit of dfp reg # */
  134. instr[1] = Retinst;
  135. cachedwbse(instr, sizeof instrsz);
  136. cacheiinv();
  137. }
  138. ulong
  139. fpsavereg(int fpreg, uvlong *fpp)
  140. {
  141. int s, r;
  142. volatile ulong instr[2];
  143. ulong (*fp)(uvlong *);
  144. if (!m->fpon)
  145. panic("fpsavereg: cpu%d fpu off", m->machno);
  146. s = splhi();
  147. /*
  148. * VSTR. pointer will be in R0, which is convenient.
  149. * Rt will be R0.
  150. */
  151. setupfpop(instr, 0xed000000 | CpDFP << 8, fpreg);
  152. fp = (ulong (*)(uvlong *))instr;
  153. r = fp(fpp);
  154. splx(s);
  155. coherence();
  156. return r; /* not too meaningful */
  157. }
  158. void
  159. fprestreg(int fpreg, uvlong val)
  160. {
  161. int s;
  162. volatile ulong instr[2];
  163. void (*fp)(uvlong *);
  164. if (!m->fpon)
  165. panic("fprestreg: cpu%d fpu off", m->machno);
  166. s = splhi();
  167. setupfpop(instr, 0xed100000 | CpDFP << 8, fpreg); /* VLDR, Rt is R0 */
  168. fp = (void (*)(uvlong *))instr;
  169. fp(&val);
  170. coherence();
  171. splx(s);
  172. }