l.s 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /*
  2. * Memory and machine-specific definitions. Used in C and assembler.
  3. */
  4. /*
  5. * Sizes
  6. */
  7. #define BI2BY 8 /* bits per byte */
  8. #define BI2WD 32 /* bits per word */
  9. #define BY2WD 4 /* bytes per word */
  10. #define BY2PG 4096 /* bytes per page */
  11. #define WD2PG (BY2PG/BY2WD) /* words per page */
  12. #define PGSHIFT 12 /* log(BY2PG) */
  13. #define PGROUND(s) (((s)+(BY2PG-1))&~(BY2PG-1))
  14. #define MAXMACH 1 /* max # cpus system can run */
  15. /*
  16. * Time
  17. */
  18. #define HZ 20 /* clock frequency */
  19. #define MS2HZ (1000/HZ) /* millisec per clock tick */
  20. #define TK2SEC(t) ((t)/HZ) /* ticks to seconds */
  21. #define TK2MS(t) ((((ulong)(t))*1000)/HZ) /* ticks to milliseconds */
  22. #define MS2TK(t) ((((ulong)(t))*HZ)/1000) /* milliseconds to ticks */
  23. /*
  24. * PSR bits
  25. */
  26. #define PSREC 0x00002000
  27. #define PSREF 0x00001000
  28. #define PSRSUPER 0x00000080
  29. #define PSRPSUPER 0x00000040
  30. #define PSRET 0x00000020
  31. #define SPL(n) (n<<8)
  32. /*
  33. * Magic registers
  34. */
  35. #define MACH 6 /* R6 is m-> */
  36. #define USER 5 /* R5 is u-> */
  37. /*
  38. * Fundamental addresses
  39. */
  40. #define USERADDR 0xE0000000
  41. #define UREGADDR (USERADDR+BY2PG-((32+6)*BY2WD))
  42. #define BOOTSTACK (KTZERO-0*BY2PG)
  43. #define TRAPS (KTZERO-2*BY2PG)
  44. /*
  45. * MMU
  46. */
  47. #define VAMASK 0x3FFFFFFF
  48. #define NPMEG (1<<12)
  49. #define BY2SEGM (1<<18)
  50. #define PG2SEGM (1<<6)
  51. #define NTLBPID (1+NCONTEXT) /* TLBPID 0 is unallocated */
  52. #define NCONTEXT 8
  53. #define CONTEXT 0x30000000 /* in ASI 2 */
  54. /*
  55. * MMU regions
  56. */
  57. #define INVALIDSEGM 0xFFFC0000 /* highest seg of VA reserved as invalid */
  58. #define INVALIDPMEG 0x7F
  59. #define SCREENSEGM 0xFFF80000
  60. #define SCREENPMEG 0x7E
  61. #define ROMSEGM 0xFFE80000
  62. #define ROMEND 0xFFEA0000
  63. #define PG2ROM ((ROMEND-ROMSEGM)/BY2PG)
  64. #define IOSEGM0 ROMSEGM /* see mmuinit() */
  65. #define NIOSEGM ((SCREENSEGM-ROMSEGM)/BY2SEGM)
  66. #define IOPMEG0 (SCREENPMEG-NIOSEGM)
  67. #define IOSEGM ROMEND
  68. #define IOEND SCREENSEGM
  69. #define TOPPMEG IOPMEG0
  70. /*
  71. * MMU entries
  72. */
  73. #define PTEVALID (1<<31)
  74. #define PTERONLY (0<<30)
  75. #define PTEWRITE (1<<30)
  76. #define PTEKERNEL (1<<29)
  77. #define PTENOCACHE (1<<28)
  78. #define PTEMAINMEM (0<<26)
  79. #define PTEIO (1<<26)
  80. #define PTEACCESS (1<<25)
  81. #define PTEMODIFY (1<<24)
  82. #define PTEUNCACHED 0
  83. #define PTEMAPMEM (1024*1024)
  84. #define PTEPERTAB (PTEMAPMEM/BY2PG)
  85. #define SEGMAPSIZE 16
  86. #define INVALIDPTE 0
  87. #define PPN(pa) ((pa>>12)&0xFFFF)
  88. /*
  89. * Weird addresses in various ASI's
  90. */
  91. #define CACHETAGS 0x80000000 /* ASI 2 */
  92. #define CACHEDATA 0x90000000 /* ASI 2 */
  93. #define SER 0x60000000 /* ASI 2 */
  94. #define SEVAR 0x60000004 /* ASI 2 */
  95. #define ASER 0x60000008 /* ASI 2 */
  96. #define ASEVAR 0x6000000C /* ASI 2 */
  97. #define ENAB 0x40000000 /* ASI 2 */
  98. #define ENABCACHE 0x10
  99. #define ENABRESET 0x04
  100. /*
  101. * Virtual addresses
  102. */
  103. #define VTAG(va) ((va>>22)&0x03F)
  104. #define VPN(va) ((va>>13)&0x1FF)
  105. #define PARAM ((char*)0x40500000)
  106. #define TLBFLUSH_ 0x01
  107. /*
  108. * Address spaces
  109. */
  110. #define UZERO 0x00000000 /* base of user address space */
  111. #define UTZERO (UZERO+BY2PG) /* first address in user text */
  112. #define TSTKTOP 0x10000000 /* end of new stack in sysexec */
  113. #define TSTKSIZ 32
  114. #define USTKTOP (TSTKTOP-TSTKSIZ*BY2PG) /* byte just beyond user stack */
  115. #define KZERO 0xE0000000 /* base of kernel address space */
  116. #define KTZERO (KZERO+4*BY2PG) /* first address in kernel text */
  117. #define USTKSIZE (4*1024*1024) /* size of user stack */
  118. #define MACHSIZE 4096
  119. #define isphys(x) (((ulong)(x)&0xF0000000) == KZERO)
  120. #define SYSPSR (SPL(0x0)|PSREF|PSRSUPER|0)
  121. #define NOOP OR R0, R0; OR R0, R0; OR R0, R0
  122. TEXT start(SB), $-4
  123. /* get virtual, fast */
  124. /* we are executing in segment 0, mapped to pmeg 0. stack is there too */
  125. /* get virtual by mapping segment(KZERO) to pmeg 0., and next to 1 */
  126. MOVW $KZERO, R7
  127. MOVB R0, (R7, 3)
  128. MOVW $(KZERO+BY2SEGM), R7
  129. MOVW $1, R8
  130. MOVB R8, (R7, 3)
  131. /* now mapped correctly. jmpl to where we want to be */
  132. MOVW $setSB(SB), R2
  133. MOVW $startvirt(SB), R7
  134. JMPL (R7)
  135. MOVW $_mul(SB), R0 /* touch _mul etc.; doesn't need to execute */
  136. RETURN /* can't get here */
  137. TEXT startvirt(SB), $-4
  138. MOVW $BOOTSTACK, R1
  139. MOVW $(SPL(0xF)|PSREF|PSRSUPER), R7
  140. MOVW R7, PSR
  141. MOVW $(0x35<<22), R7 /* NVM OFM DZM AU */
  142. MOVW R7, fsr+0(SB)
  143. MOVW fsr+0(SB), FSR
  144. FMOVD $0.5, F26 /* 0.5 -> F26 */
  145. FSUBD F26, F26, F24 /* 0.0 -> F24 */
  146. FADDD F26, F26, F28 /* 1.0 -> F28 */
  147. FADDD F28, F28, F30 /* 2.0 -> F30 */
  148. FMOVD F24, F0
  149. FMOVD F24, F2
  150. FMOVD F24, F4
  151. FMOVD F24, F6
  152. FMOVD F24, F8
  153. FMOVD F24, F10
  154. FMOVD F24, F12
  155. FMOVD F24, F14
  156. FMOVD F24, F16
  157. FMOVD F24, F18
  158. FMOVD F24, F20
  159. FMOVD F24, F22
  160. MOVW $mach0(SB), R(MACH)
  161. /* MOVW $0x8, R7 /**/
  162. MOVW R0, WIM
  163. JMPL main(SB)
  164. MOVW (R0), R0
  165. RETURN
  166. TEXT swap1(SB), $0
  167. TAS (R7), R7 /* LDSTUB, thank you ken */
  168. RETURN
  169. TEXT swap1_should_work(SB), $0
  170. MOVW R7, R8
  171. MOVW $1, R7
  172. SWAP (R8), R7
  173. RETURN
  174. TEXT swap1x(SB), $0
  175. MOVW PSR, R9
  176. MOVW R9, R10
  177. AND $~PSRET, R10 /* BUG: book says this is buggy */
  178. MOVW R10, PSR
  179. NOOP
  180. MOVW (R7), R7
  181. CMP R7, R0
  182. BNE was1
  183. MOVW $1, R10
  184. MOVW R10, (R8)
  185. was1:
  186. MOVW R9, PSR
  187. RETURN
  188. TEXT spllo(SB), $0
  189. MOVW PSR, R7
  190. MOVW R7, R10
  191. OR $PSRET, R10
  192. MOVW R10, PSR
  193. NOOP
  194. RETURN
  195. TEXT splhi(SB), $0
  196. MOVW R15, 4(R(MACH)) /* save PC in m->splpc */
  197. MOVW PSR, R7
  198. MOVW R7, R10
  199. AND $~PSRET, R10 /* BUG: book says this is buggy */
  200. MOVW R10, PSR
  201. NOOP
  202. RETURN
  203. TEXT splx(SB), $0
  204. MOVW R15, 4(R(MACH)) /* save PC in m->splpc */
  205. MOVW R7, PSR /* BUG: book says this is buggy */
  206. NOOP
  207. RETURN
  208. TEXT spldone(SB), $0
  209. RETURN
  210. TEXT touser(SB), $0
  211. MOVW $(SYSPSR&~PSREF), R8
  212. MOVW R8, PSR
  213. NOOP
  214. MOVW R7, R1
  215. SAVE R0, R0 /* RETT is implicit RESTORE */
  216. MOVW $(UTZERO+32), R7 /* PC; header appears in text */
  217. MOVW $(UTZERO+32+4), R8 /* nPC */
  218. RETT R7, R8
  219. TEXT rfnote(SB), $0
  220. MOVW R7, R1 /* 1st arg is &uregpointer */
  221. ADD $4, R1 /* point at ureg */
  222. JMP restore
  223. TEXT traplink(SB), $-4
  224. /* R8 to R23 are free to play with */
  225. /* R17 contains PC, R18 contains nPC */
  226. /* R19 has PSR loaded from vector code */
  227. ANDCC $PSRPSUPER, R19, R0
  228. BE usertrap
  229. kerneltrap:
  230. /*
  231. * Interrupt or fault from kernel
  232. */
  233. ANDN $7, R1, R20 /* dbl aligned */
  234. MOVW R1, (0-(4*(32+6))+(4*1))(R20) /* save R1=SP */
  235. /* really clumsy: store these in Ureg so can be restored below */
  236. MOVW R2, (0-(4*(32+6))+(4*2))(R20) /* SB */
  237. MOVW R5, (0-(4*(32+6))+(4*5))(R20) /* USER */
  238. MOVW R6, (0-(4*(32+6))+(4*6))(R20) /* MACH */
  239. SUB $(4*(32+6)), R20, R1
  240. trap1:
  241. MOVW Y, R20
  242. MOVW R20, (4*(32+0))(R1) /* Y */
  243. MOVW TBR, R20
  244. MOVW R20, (4*(32+1))(R1) /* TBR */
  245. AND $~0x1F, R19 /* force CWP=0 */
  246. MOVW R19, (4*(32+2))(R1) /* PSR */
  247. MOVW R18, (4*(32+3))(R1) /* nPC */
  248. MOVW R17, (4*(32+4))(R1) /* PC */
  249. MOVW R0, (4*0)(R1)
  250. MOVW R3, (4*3)(R1)
  251. MOVW R4, (4*4)(R1)
  252. MOVW R7, (4*7)(R1)
  253. RESTORE R0, R0
  254. /* now our registers R8-R31 are same as before trap */
  255. /* save registers two at a time */
  256. MOVD R8, (4*8)(R1)
  257. MOVD R10, (4*10)(R1)
  258. MOVD R12, (4*12)(R1)
  259. MOVD R14, (4*14)(R1)
  260. MOVD R16, (4*16)(R1)
  261. MOVD R18, (4*18)(R1)
  262. MOVD R20, (4*20)(R1)
  263. MOVD R22, (4*22)(R1)
  264. MOVD R24, (4*24)(R1)
  265. MOVD R26, (4*26)(R1)
  266. MOVD R28, (4*28)(R1)
  267. MOVD R30, (4*30)(R1)
  268. /* SP and SB and u and m are already set; away we go */
  269. MOVW R1, R7 /* pointer to Ureg */
  270. SUB $8, R1
  271. MOVW $SYSPSR, R8
  272. MOVW R8, PSR
  273. NOOP
  274. JMPL trap(SB)
  275. ADD $8, R1
  276. restore:
  277. MOVW (4*(32+2))(R1), R8 /* PSR */
  278. MOVW R8, PSR
  279. NOOP
  280. MOVD (4*30)(R1), R30
  281. MOVD (4*28)(R1), R28
  282. MOVD (4*26)(R1), R26
  283. MOVD (4*24)(R1), R24
  284. MOVD (4*22)(R1), R22
  285. MOVD (4*20)(R1), R20
  286. MOVD (4*18)(R1), R18
  287. MOVD (4*16)(R1), R16
  288. MOVD (4*14)(R1), R14
  289. MOVD (4*12)(R1), R12
  290. MOVD (4*10)(R1), R10
  291. MOVD (4*8)(R1), R8
  292. SAVE R0, R0
  293. MOVD (4*6)(R1), R6
  294. MOVD (4*4)(R1), R4
  295. MOVD (4*2)(R1), R2
  296. MOVW (4*(32+0))(R1), R20 /* Y */
  297. MOVW R20, Y
  298. MOVW (4*(32+4))(R1), R17 /* PC */
  299. MOVW (4*(32+3))(R1), R18 /* nPC */
  300. MOVW (4*1)(R1), R1 /* restore R1=SP */
  301. RETT R17, R18
  302. usertrap:
  303. /*
  304. * Interrupt or fault from user
  305. */
  306. MOVW R1, R8
  307. MOVW R2, R9
  308. MOVW $setSB(SB), R2
  309. MOVW $(USERADDR+BY2PG), R1
  310. MOVW R8, (0-(4*(32+6))+(4*1))(R1) /* save R1=SP */
  311. MOVW R9, (0-(4*(32+6))+(4*2))(R1) /* save R2=SB */
  312. MOVW R5, (0-(4*(32+6))+(4*5))(R1) /* save R5=USER */
  313. MOVW R6, (0-(4*(32+6))+(4*6))(R1) /* save R6=MACH */
  314. MOVW $USERADDR, R(USER)
  315. MOVW $mach0(SB), R(MACH)
  316. SUB $(4*(32+6)), R1
  317. JMP trap1
  318. TEXT syslink(SB), $-4
  319. /* R8 to R23 are free to play with */
  320. /* R17 contains PC, R18 contains nPC */
  321. /* R19 has PSR loaded from vector code */
  322. /* assume user did it; syscall checks */
  323. MOVW R1, R8
  324. MOVW R2, R9
  325. MOVW $setSB(SB), R2
  326. MOVW $(USERADDR+BY2PG), R1
  327. MOVW R8, (0-(4*(32+6))+4)(R1) /* save R1=SP */
  328. SUB $(4*(32+6)), R1
  329. MOVW R9, (4*2)(R1) /* save R2=SB */
  330. MOVW R3, (4*3)(R1) /* global register */
  331. MOVD R4, (4*4)(R1) /* global register, R5=USER */
  332. MOVD R6, (4*6)(R1) /* save R6=MACH, R7=syscall# */
  333. MOVW $USERADDR, R(USER)
  334. MOVW $mach0(SB), R(MACH)
  335. MOVW TBR, R20
  336. MOVW R20, (4*(32+1))(R1) /* TBR */
  337. AND $~0x1F, R19
  338. MOVW R19, (4*(32+2))(R1) /* PSR */
  339. MOVW R18, (4*(32+3))(R1) /* nPC */
  340. MOVW R17, (4*(32+4))(R1) /* PC */
  341. RESTORE R0, R0
  342. /* now our registers R8-R31 are same as before trap */
  343. MOVW R15, (4*15)(R1)
  344. /* SP and SB and u and m are already set; away we go */
  345. MOVW R1, R7 /* pointer to Ureg */
  346. SUB $8, R1
  347. MOVW $SYSPSR, R8
  348. MOVW R8, PSR
  349. JMPL syscall(SB)
  350. /* R7 contains return value from syscall */
  351. ADD $8, R1
  352. MOVW (4*(32+2))(R1), R8 /* PSR */
  353. MOVW R8, PSR
  354. NOOP
  355. MOVW (4*15)(R1), R15
  356. SAVE R0, R0
  357. MOVW (4*6)(R1), R6
  358. MOVD (4*4)(R1), R4
  359. MOVD (4*2)(R1), R2
  360. MOVW (4*(32+4))(R1), R17 /* PC */
  361. MOVW (4*(32+3))(R1), R18 /* nPC */
  362. MOVW (4*1)(R1), R1 /* restore R1=SP */
  363. RETT R17, R18
  364. TEXT puttbr(SB), $0
  365. MOVW R7, TBR
  366. NOOP
  367. RETURN
  368. TEXT gettbr(SB), $0
  369. MOVW TBR, R7
  370. RETURN
  371. TEXT r1(SB), $0
  372. MOVW R1, R7
  373. RETURN
  374. TEXT getwim(SB), $0
  375. MOVW WIM, R7
  376. RETURN
  377. TEXT setlabel(SB), $0
  378. MOVW R1, (R7)
  379. MOVW R15, 4(R7)
  380. MOVW $0, R7
  381. RETURN
  382. TEXT gotolabel(SB), $0
  383. MOVW (R7), R1
  384. MOVW 4(R7), R15
  385. MOVW $1, R7
  386. RETURN
  387. TEXT putcxsegm(SB), $0
  388. MOVW R7, R8 /* context */
  389. MOVW 4(FP), R9 /* segment addr */
  390. MOVW 8(FP), R10 /* segment value */
  391. MOVW $0xFFE80118, R7
  392. JMPL (R7)
  393. RETURN
  394. TEXT getpsr(SB), $0
  395. MOVW PSR, R7
  396. RETURN
  397. TEXT putcxreg(SB), $0
  398. MOVW $CONTEXT, R8
  399. MOVB R7, (R8, 2)
  400. RETURN
  401. TEXT putb2(SB), $0
  402. MOVW 4(FP), R8
  403. MOVB R8, (R7, 2)
  404. RETURN
  405. TEXT getb2(SB), $0
  406. MOVB (R7, 2), R7
  407. RETURN
  408. TEXT getw2(SB), $0
  409. MOVW (R7, 2), R7
  410. RETURN
  411. TEXT putw2(SB), $0
  412. MOVW 4(FP), R8
  413. MOVW R8, (R7, 2)
  414. RETURN
  415. TEXT putw4(SB), $0
  416. MOVW 4(FP), R8
  417. MOVW R8, (R7, 4)
  418. RETURN
  419. TEXT getw4(SB), $0
  420. MOVW (R7, 4), R7
  421. RETURN
  422. TEXT putwC(SB), $0
  423. MOVW 4(FP), R8
  424. MOVW R8, (R7, 0xC)
  425. RETURN
  426. TEXT putwD(SB), $0
  427. MOVW 4(FP), R8
  428. MOVW R8, (R7, 0xD)
  429. RETURN
  430. TEXT putwD16(SB), $0
  431. MOVW 4(FP), R8
  432. MOVW R8, (R7, 0xD)
  433. ADD $(1<<4), R7
  434. MOVW R8, (R7, 0xD)
  435. ADD $(1<<4), R7
  436. MOVW R8, (R7, 0xD)
  437. ADD $(1<<4), R7
  438. MOVW R8, (R7, 0xD)
  439. ADD $(1<<4), R7
  440. MOVW R8, (R7, 0xD)
  441. ADD $(1<<4), R7
  442. MOVW R8, (R7, 0xD)
  443. ADD $(1<<4), R7
  444. MOVW R8, (R7, 0xD)
  445. ADD $(1<<4), R7
  446. MOVW R8, (R7, 0xD)
  447. ADD $(1<<4), R7
  448. MOVW R8, (R7, 0xD)
  449. ADD $(1<<4), R7
  450. MOVW R8, (R7, 0xD)
  451. ADD $(1<<4), R7
  452. MOVW R8, (R7, 0xD)
  453. ADD $(1<<4), R7
  454. MOVW R8, (R7, 0xD)
  455. ADD $(1<<4), R7
  456. MOVW R8, (R7, 0xD)
  457. ADD $(1<<4), R7
  458. MOVW R8, (R7, 0xD)
  459. ADD $(1<<4), R7
  460. MOVW R8, (R7, 0xD)
  461. ADD $(1<<4), R7
  462. MOVW R8, (R7, 0xD)
  463. RETURN
  464. TEXT putwE(SB), $0
  465. MOVW 4(FP), R8
  466. MOVW R8, (R7, 0xE)
  467. RETURN
  468. TEXT putwE16(SB), $0
  469. MOVW 4(FP), R8
  470. MOVW R8, (R7, 0xE)
  471. ADD $(1<<4), R7
  472. MOVW R8, (R7, 0xE)
  473. ADD $(1<<4), R7
  474. MOVW R8, (R7, 0xE)
  475. ADD $(1<<4), R7
  476. MOVW R8, (R7, 0xE)
  477. ADD $(1<<4), R7
  478. MOVW R8, (R7, 0xE)
  479. ADD $(1<<4), R7
  480. MOVW R8, (R7, 0xE)
  481. ADD $(1<<4), R7
  482. MOVW R8, (R7, 0xE)
  483. ADD $(1<<4), R7
  484. MOVW R8, (R7, 0xE)
  485. ADD $(1<<4), R7
  486. MOVW R8, (R7, 0xE)
  487. ADD $(1<<4), R7
  488. MOVW R8, (R7, 0xE)
  489. ADD $(1<<4), R7
  490. MOVW R8, (R7, 0xE)
  491. ADD $(1<<4), R7
  492. MOVW R8, (R7, 0xE)
  493. ADD $(1<<4), R7
  494. MOVW R8, (R7, 0xE)
  495. ADD $(1<<4), R7
  496. MOVW R8, (R7, 0xE)
  497. ADD $(1<<4), R7
  498. MOVW R8, (R7, 0xE)
  499. ADD $(1<<4), R7
  500. MOVW R8, (R7, 0xE)
  501. RETURN
  502. TEXT putsegm(SB), $0
  503. MOVW 4(FP), R8
  504. MOVW R8, (R7, 3)
  505. RETURN
  506. /*
  507. * in savefpregs and restfpregs, incoming R7 points to doubleword
  508. * below where F0 will go; doubleword align in and backfill FSR
  509. */
  510. TEXT savefpregs(SB), $0
  511. ADD $8, R7
  512. ANDN $7, R7 /* now MOVD-aligned */
  513. MOVW FSR, -4(R7)
  514. MOVD F0, (0*4)(R7)
  515. MOVD F2, (2*4)(R7)
  516. MOVD F4, (4*4)(R7)
  517. MOVD F6, (6*4)(R7)
  518. MOVD F8, (8*4)(R7)
  519. MOVD F10, (10*4)(R7)
  520. MOVD F12, (12*4)(R7)
  521. MOVD F14, (14*4)(R7)
  522. MOVD F16, (16*4)(R7)
  523. MOVD F18, (18*4)(R7)
  524. MOVD F20, (20*4)(R7)
  525. MOVD F22, (22*4)(R7)
  526. MOVD F24, (24*4)(R7)
  527. MOVD F26, (26*4)(R7)
  528. MOVD F28, (28*4)(R7)
  529. MOVD F30, (30*4)(R7)
  530. MOVW PSR, R8
  531. ANDN $PSREF, R8
  532. MOVW R8, PSR
  533. RETURN
  534. TEXT restfpregs(SB), $0
  535. MOVW PSR, R8
  536. OR $PSREF, R8
  537. MOVW R8, PSR
  538. ADD $8, R7
  539. ANDN $7, R7 /* now MOVD-aligned */
  540. OR R0, R0
  541. MOVW -4(R7), FSR
  542. MOVD (0*4)(R7), F0
  543. MOVD (2*4)(R7), F2
  544. MOVD (4*4)(R7), F4
  545. MOVD (6*4)(R7), F6
  546. MOVD (8*4)(R7), F8
  547. MOVD (10*4)(R7), F10
  548. MOVD (12*4)(R7), F12
  549. MOVD (14*4)(R7), F14
  550. MOVD (16*4)(R7), F16
  551. MOVD (18*4)(R7), F18
  552. MOVD (20*4)(R7), F20
  553. MOVD (22*4)(R7), F22
  554. MOVD (24*4)(R7), F24
  555. MOVD (26*4)(R7), F26
  556. MOVD (28*4)(R7), F28
  557. MOVD (30*4)(R7), F30
  558. ANDN $PSREF, R8
  559. MOVW R8, PSR
  560. RETURN
  561. TEXT clearfpintr(SB), $0
  562. MOVW $fpq+BY2WD(SB), R7
  563. ANDN $0x7, R7 /* must be D aligned */
  564. MOVW $fsr+0(SB), R9
  565. clrq:
  566. MOVD FQ, (R7)
  567. MOVW FSR, (R9)
  568. MOVW (R9), R8
  569. AND $(1<<13), R8 /* queue not empty? */
  570. BNE clrq
  571. RETURN
  572. TEXT getfsr(SB), $0
  573. MOVW $fsr+0(SB), R7
  574. MOVW FSR, (R7)
  575. MOVW (R7), R7
  576. RETURN
  577. GLOBL mach0+0(SB), $MACHSIZE
  578. GLOBL fpq+0(SB), $(3*BY2WD)
  579. GLOBL fsr+0(SB), $BY2WD