l.s 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * tegra 2 SoC machine assist
  3. * dual arm cortex-a9 processors
  4. *
  5. * ARM v7 arch. ref. man. §B1.3.3 says that we don't need barriers
  6. * around writes to CPSR.
  7. *
  8. * LDREX/STREX use an exclusive monitor, which is part of the data cache unit
  9. * for the L1 cache, so they won't work right if the L1 cache is disabled.
  10. */
  11. #include "arm.s"
  12. #define LDREX(fp,t) WORD $(0xe<<28|0x01900f9f | (fp)<<16 | (t)<<12)
  13. /* `The order of operands is from left to right in dataflow order' - asm man */
  14. #define STREX(f,tp,r) WORD $(0xe<<28|0x01800f90 | (tp)<<16 | (r)<<12 | (f)<<0)
  15. #define MAXMB (KiB-1) /* last MB has vectors */
  16. #define TMPSTACK (DRAMSIZE - 64*MiB) /* used only during cpu startup */
  17. /* tas/cas strex debugging limits; started at 10000 */
  18. #define MAXSC 100000
  19. GLOBL testmem(SB), $4
  20. /*
  21. * Entered here from Das U-Boot or another Plan 9 kernel with MMU disabled.
  22. * Until the MMU is enabled it is OK to call functions provided
  23. * they are within ±32MiB relative and do not require any
  24. * local variables or more than one argument (i.e. there is
  25. * no stack).
  26. */
  27. TEXT _start(SB), 1, $-4
  28. CPSMODE(PsrMsvc)
  29. CPSID /* interrupts off */
  30. CPSAE
  31. SETEND(0) /* little-endian */
  32. BARRIERS
  33. CLREX
  34. SETZSB
  35. MOVW CPSR, R0
  36. ORR $PsrDfiq, R0
  37. MOVW R0, CPSR
  38. /* invalidate i-cache and branch-target cache */
  39. MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
  40. BARRIERS
  41. /* put cpus other than 0 to sleep until cpu 0 is ready */
  42. CPUID(R1)
  43. BEQ cpuinit
  44. /* not cpu 0 */
  45. PUTC('Z')
  46. PUTC('Z')
  47. BARRIERS
  48. dowfi:
  49. WFI
  50. MOVW cpus_proceed(SB), R1
  51. CMP $0, R1
  52. BEQ dowfi
  53. BL cpureset(SB)
  54. B dowfi
  55. cpuinit:
  56. DELAY(printloopret, 1)
  57. PUTC('\r')
  58. DELAY(printloopnl, 1)
  59. PUTC('\n')
  60. DELAY(printloops, 1)
  61. PUTC('P')
  62. /* disable the PL310 L2 cache on cpu0 */
  63. MOVW $(PHYSL2BAG+0x100), R1
  64. MOVW $0, R2
  65. MOVW R2, (R1)
  66. BARRIERS
  67. /* invalidate it */
  68. MOVW $((1<<16)-1), R2
  69. MOVW R2, 0x77c(R1)
  70. BARRIERS
  71. /*
  72. * disable my MMU & caches
  73. */
  74. MFCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  75. ORR $CpCsbo, R1
  76. BIC $(CpCsbz|CpCmmu|CpCdcache|CpCicache|CpCpredict), R1
  77. MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  78. BARRIERS
  79. /* cortex-a9 model-specific initial configuration */
  80. MOVW $0, R1
  81. MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
  82. BARRIERS
  83. PUTC('l')
  84. DELAY(printloop3, 1)
  85. MOVW $testmem-KZERO(SB), R0
  86. BL memdiag(SB)
  87. PUTC('a')
  88. /* clear Mach for cpu 0 */
  89. MOVW $PADDR(MACHADDR), R4 /* address of Mach for cpu 0 */
  90. MOVW $0, R0
  91. _machZ:
  92. MOVW R0, (R4)
  93. ADD $4, R4
  94. CMP.S $PADDR(L1+L1X(0)), R4 /* end at top-level page table */
  95. BNE _machZ
  96. /*
  97. * set up the MMU page table for cpu 0
  98. */
  99. PUTC('n')
  100. /* clear all PTEs first, to provide a default */
  101. // MOVW $PADDR(L1+L1X(0)), R4 /* address of PTE for 0 */
  102. _ptenv0:
  103. ZEROPTE()
  104. CMP.S $PADDR(L1+16*KiB), R4
  105. BNE _ptenv0
  106. DELAY(printloop4, 2)
  107. PUTC(' ')
  108. /*
  109. * set up double map of PHYSDRAM, KZERO to PHYSDRAM for first few MBs,
  110. * but only if KZERO and PHYSDRAM differ.
  111. */
  112. MOVW $PTEDRAM, R2 /* PTE bits */
  113. MOVW $PHYSDRAM, R3 /* pa */
  114. CMP $KZERO, R3
  115. BEQ no2map
  116. MOVW $PADDR(L1+L1X(PHYSDRAM)), R4 /* address of PTE for PHYSDRAM */
  117. MOVW $DOUBLEMAPMBS, R5
  118. _ptdbl:
  119. FILLPTE()
  120. SUB.S $1, R5
  121. BNE _ptdbl
  122. no2map:
  123. /*
  124. * back up and fill in PTEs for memory at KZERO.
  125. * trimslice has 1 bank of 1GB at PHYSDRAM.
  126. * Map the maximum.
  127. */
  128. PUTC('9')
  129. MOVW $PTEDRAM, R2 /* PTE bits */
  130. MOVW $PHYSDRAM, R3
  131. MOVW $PADDR(L1+L1X(KZERO)), R4 /* start with PTE for KZERO */
  132. MOVW $MAXMB, R5 /* inner loop count (MBs) */
  133. _ptekrw: /* set PTEs */
  134. FILLPTE()
  135. SUB.S $1, R5 /* decrement inner loop count */
  136. BNE _ptekrw
  137. /*
  138. * back up and fill in PTEs for MMIO
  139. */
  140. PUTC(' ')
  141. MOVW $PTEIO, R2 /* PTE bits */
  142. MOVW $PHYSIO, R3
  143. MOVW $PADDR(L1+L1X(VIRTIO)), R4 /* start with PTE for VIRTIO */
  144. _ptenv2:
  145. FILLPTE()
  146. CMP.S $PADDR(L1+L1X(PHYSIOEND)), R4
  147. BNE _ptenv2
  148. /* mmu.c sets up the trap vectors later */
  149. MOVW $(PHYSDRAM | TMPSTACK), SP
  150. /*
  151. * learn l1 cache characteristics (on cpu 0 only).
  152. */
  153. MOVW $(1-1), R0 /* l1 */
  154. SLL $1, R0 /* R0 = (cache - 1) << 1 */
  155. MTCP CpSC, CpIDcssel, R0, C(CpID), C(CpIDid), 0 /* select l1 cache */
  156. BARRIERS
  157. MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDid), 0 /* get sets & ways */
  158. MOVW $CACHECONF, R8
  159. /* get log2linelen into l1setsh */
  160. MOVW R0, R1
  161. AND $3, R1
  162. ADD $4, R1
  163. /* l1 & l2 must have same cache line size, thus same set shift */
  164. MOVW R1, 4(R8) /* +4 = l1setsh */
  165. MOVW R1, 12(R8) /* +12 = l2setsh */
  166. /* get nways in R1 */
  167. SRA $3, R0, R1
  168. AND $((1<<10)-1), R1
  169. ADD $1, R1
  170. /* get log2(nways) in R2 (assume nways is 2^n) */
  171. MOVW $(BI2BY*BY2WD - 1), R2
  172. CLZ(1, 1)
  173. SUB.S R1, R2 /* R2 = 31 - clz(nways) */
  174. ADD.EQ $1, R2
  175. // MOVW R2, R3 /* print log2(nways): 2 */
  176. MOVW $32, R1
  177. SUB R2, R1 /* R1 = 32 - log2(nways) */
  178. MOVW R1, 0(R8) /* +0 = l1waysh */
  179. BARRIERS
  180. MOVW $testmem-KZERO(SB), R0
  181. BL memdiag(SB)
  182. /*
  183. * the mpcore manual says invalidate d-cache, scu, pl310 in that order,
  184. * but says nothing about when to disable them.
  185. *
  186. * invalidate my caches before enabling
  187. */
  188. BL cachedinv(SB)
  189. MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
  190. BARRIERS
  191. PUTC('f')
  192. /*
  193. * the mpcore manual says enable scu, d-cache, pl310, smp mode
  194. * in that order. we have to reverse the last two; see main().
  195. */
  196. BL scuon(SB)
  197. /*
  198. * turn my L1 cache on; need it for tas below.
  199. */
  200. MFCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  201. ORR $(CpCdcache|CpCicache|CpCalign|CpCpredict), R1
  202. MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  203. BARRIERS
  204. /* cortex-a9 model-specific configuration */
  205. MOVW $CpACl1pref, R1
  206. MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
  207. BARRIERS
  208. /* we're supposed to wait until l1 & l2 are on before calling smpon */
  209. PUTC('r')
  210. /* set the domain access control */
  211. MOVW $Client, R0
  212. BL dacput(SB)
  213. DELAY(printloop5, 2)
  214. PUTC('o')
  215. BL mmuinvalidate(SB)
  216. MOVW $0, R0
  217. BL pidput(SB)
  218. /* set the translation table base */
  219. MOVW $PADDR(L1), R0
  220. BL ttbput(SB)
  221. PUTC('m')
  222. /*
  223. * the little dance to turn the MMU on
  224. */
  225. BL cacheuwbinv(SB)
  226. BL mmuinvalidate(SB)
  227. BL mmuenable(SB)
  228. PUTC(' ')
  229. /* warp the PC into the virtual map */
  230. MOVW $KZERO, R0
  231. BL _r15warp(SB)
  232. /*
  233. * cpu 0 is now running at KZERO+something!
  234. */
  235. BARRIERS
  236. MOVW $setR12(SB), R12 /* reload kernel SB */
  237. MOVW $(KZERO | TMPSTACK), SP
  238. BL cacheuwbinv(SB)
  239. PUTC('B')
  240. MOVW $PHYSDRAM, R3 /* pa */
  241. CMP $KZERO, R3
  242. BEQ no2unmap
  243. /* undo double map of PHYSDRAM, KZERO & first few MBs */
  244. MOVW $(L1+L1X(PHYSDRAM)), R4 /* addr. of PTE for PHYSDRAM */
  245. MOVW $0, R0
  246. MOVW $DOUBLEMAPMBS, R5
  247. _ptudbl:
  248. ZEROPTE()
  249. SUB.S $1, R5
  250. BNE _ptudbl
  251. no2unmap:
  252. BL cachedwb(SB)
  253. BL mmuinvalidate(SB)
  254. /*
  255. * call main in C
  256. * pass Mach to main and set up the stack in it
  257. */
  258. MOVW $MACHADDR, R0 /* cpu 0 Mach */
  259. MOVW R0, R(MACH) /* m = MACHADDR */
  260. ADD $(MACHSIZE-4), R0, SP /* leave space for link register */
  261. PUTC('e')
  262. BL main(SB) /* main(m) */
  263. limbo:
  264. BL idlehands(SB)
  265. B limbo
  266. BL _div(SB) /* hack to load _div, etc. */
  267. /*
  268. * called on cpu(s) other than 0, to start them, from _vrst
  269. * (reset vector) in lexception.s, with interrupts disabled
  270. * and in SVC mode, running in the zero segment (pc is in lower 256MB).
  271. * SB is set for the zero segment.
  272. */
  273. TEXT cpureset(SB), 1, $-4
  274. CLREX
  275. MOVW CPSR, R0
  276. ORR $PsrDfiq, R0
  277. MOVW R0, CPSR
  278. MOVW $(PHYSDRAM | TMPSTACK), SP /* stack for cache ops */
  279. /* paranoia: turn my mmu and caches off. */
  280. MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  281. ORR $CpCsbo, R0
  282. BIC $(CpCsbz|CpCmmu|CpCdcache|CpCicache|CpCpredict), R0
  283. MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  284. BARRIERS
  285. /* cortex-a9 model-specific initial configuration */
  286. MOVW $0, R1
  287. MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
  288. ISB
  289. /* invalidate my caches before enabling */
  290. BL cachedinv(SB)
  291. MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
  292. BARRIERS
  293. /*
  294. * turn my L1 cache on; need it (and mmu) for tas below.
  295. * need branch prediction to make delay() timing right.
  296. */
  297. MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  298. ORR $(CpCdcache|CpCicache|CpCalign|CpCpredict), R0
  299. MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  300. BARRIERS
  301. /* enable l1 caches coherency, at minimum for ldrex/strex. */
  302. BL smpon(SB)
  303. BARRIERS
  304. /*
  305. * we used to write to PHYSEVP here; now we do it in C, which offers
  306. * more assurance that we're up and won't go off the rails.
  307. */
  308. /* set the domain access control */
  309. MOVW $Client, R0
  310. BL dacput(SB)
  311. BL setmach(SB)
  312. /*
  313. * redo double map of PHYSDRAM, KZERO in this cpu's ptes.
  314. * mmuinit will undo this later.
  315. */
  316. MOVW $PHYSDRAM, R3
  317. CMP $KZERO, R3
  318. BEQ noun2map
  319. /* launchinit set m->mmul1 to a copy of cpu0's l1 page table */
  320. MOVW 12(R(MACH)), R0 /* m->mmul1 (virtual addr) */
  321. BL k2paddr(SB) /* R0 = PADDR(m->mmul1) */
  322. ADD $L1X(PHYSDRAM), R0, R4 /* R4 = address of PHYSDRAM's PTE */
  323. MOVW $PTEDRAM, R2 /* PTE bits */
  324. MOVW $DOUBLEMAPMBS, R5
  325. _ptrdbl:
  326. ORR R3, R2, R1 /* first identity-map 0 to 0, etc. */
  327. MOVW R1, (R4)
  328. ADD $4, R4 /* bump PTE address */
  329. ADD $MiB, R3 /* bump pa */
  330. SUB.S $1, R5
  331. BNE _ptrdbl
  332. noun2map:
  333. MOVW $0, R0
  334. BL pidput(SB)
  335. /* set the translation table base to PADDR(m->mmul1) */
  336. MOVW 12(R(MACH)), R0 /* m->mmul1 */
  337. BL k2paddr(SB) /* R0 = PADDR(m->mmul1) */
  338. BL ttbput(SB)
  339. /*
  340. * the little dance to turn the MMU on
  341. */
  342. BL cacheuwbinv(SB)
  343. BL mmuinvalidate(SB)
  344. BL mmuenable(SB)
  345. /*
  346. * mmu is now on, with l1 pt at m->mmul1.
  347. */
  348. /* warp the PC into the virtual map */
  349. MOVW $KZERO, R0
  350. BL _r15warp(SB)
  351. /*
  352. * now running at KZERO+something!
  353. */
  354. BARRIERS
  355. MOVW $setR12(SB), R12 /* reload kernel's SB */
  356. MOVW $(KZERO | TMPSTACK), SP /* stack for cache ops*/
  357. BL setmach(SB)
  358. ADD $(MACHSIZE-4), R(MACH), SP /* leave space for link register */
  359. BL cpustart(SB)
  360. /*
  361. * converts virtual address in R0 to a physical address.
  362. */
  363. TEXT k2paddr(SB), 1, $-4
  364. BIC $KSEGM, R0
  365. ADD $PHYSDRAM, R0
  366. RET
  367. /*
  368. * converts physical address in R0 to a virtual address.
  369. */
  370. TEXT p2kaddr(SB), 1, $-4
  371. BIC $KSEGM, R0
  372. ORR $KZERO, R0
  373. RET
  374. /*
  375. * converts address in R0 to the current segment, as defined by the PC.
  376. * clobbers R1.
  377. */
  378. TEXT addr2pcseg(SB), 1, $-4
  379. BIC $KSEGM, R0
  380. MOVW PC, R1
  381. AND $KSEGM, R1 /* segment PC is in */
  382. ORR R1, R0
  383. RET
  384. /* sets R(MACH), preserves other registers */
  385. TEXT setmach(SB), 1, $-4
  386. MOVM.DB.W [R14], (R13)
  387. MOVM.DB.W [R0-R2], (R13)
  388. CPUID(R2)
  389. SLL $2, R2 /* convert to word index */
  390. MOVW $machaddr(SB), R0
  391. BL addr2pcseg(SB)
  392. ADD R2, R0 /* R0 = &machaddr[cpuid] */
  393. MOVW (R0), R0 /* R0 = machaddr[cpuid] */
  394. CMP $0, R0
  395. MOVW.EQ $MACHADDR, R0 /* paranoia: use MACHADDR if 0 */
  396. BL addr2pcseg(SB)
  397. MOVW R0, R(MACH) /* m = machaddr[cpuid] */
  398. MOVM.IA.W (R13), [R0-R2]
  399. MOVM.IA.W (R13), [R14]
  400. RET
  401. /*
  402. * memory diagnostic
  403. * tests word at (R0); modifies R7 and R8
  404. */
  405. TEXT memdiag(SB), 1, $-4
  406. MOVW $0xabcdef89, R7
  407. MOVW R7, (R0)
  408. MOVW (R0), R8
  409. CMP R7, R8
  410. BNE mbuggery /* broken memory */
  411. BARRIERS
  412. MOVW (R0), R8
  413. CMP R7, R8
  414. BNE mbuggery /* broken memory */
  415. MOVW $0, R7
  416. MOVW R7, (R0)
  417. BARRIERS
  418. RET
  419. /* modifies R0, R3—R6 */
  420. TEXT printhex(SB), 1, $-4
  421. MOVW R0, R3
  422. PUTC('0')
  423. PUTC('x')
  424. MOVW $(32-4), R5 /* bits to shift right */
  425. nextdig:
  426. SRA R5, R3, R4
  427. AND $0xf, R4
  428. ADD $'0', R4
  429. CMP.S $'9', R4
  430. BLE nothex /* if R4 <= 9, jump */
  431. ADD $('a'-('9'+1)), R4
  432. nothex:
  433. PUTC(R4)
  434. SUB.S $4, R5
  435. BGE nextdig
  436. PUTC('\r')
  437. PUTC('\n')
  438. DELAY(proct, 50)
  439. RET
  440. mbuggery:
  441. PUTC('?')
  442. PUTC('m')
  443. mtopanic:
  444. MOVW $membmsg(SB), R0
  445. MOVW R14, R1 /* get R14's segment ... */
  446. AND $KSEGM, R1
  447. BIC $KSEGM, R0 /* strip segment from address */
  448. ORR R1, R0 /* combine them */
  449. BL panic(SB)
  450. mbugloop:
  451. WFI
  452. B mbugloop
  453. DATA membmsg+0(SB)/8,$"memory b"
  454. DATA membmsg+8(SB)/6,$"roken\z"
  455. GLOBL membmsg(SB), $14
  456. TEXT _r15warp(SB), 1, $-4
  457. BIC $KSEGM, R14 /* link reg, will become PC */
  458. ORR R0, R14
  459. BIC $KSEGM, SP
  460. ORR R0, SP
  461. RET
  462. /*
  463. * `single-element' cache operations.
  464. * in arm arch v7, they operate on all architected cache levels, so separate
  465. * l2 functions are usually unnecessary.
  466. */
  467. TEXT cachedwbse(SB), $-4 /* D writeback SE */
  468. MOVW R0, R2
  469. MOVW CPSR, R3
  470. CPSID /* splhi */
  471. BARRIERS /* force outstanding stores to cache */
  472. MOVW R2, R0
  473. MOVW 4(FP), R1
  474. ADD R0, R1 /* R1 is end address */
  475. BIC $(CACHELINESZ-1), R0 /* cache line start */
  476. _dwbse:
  477. MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEse
  478. ADD $CACHELINESZ, R0
  479. CMP.S R0, R1
  480. BGT _dwbse
  481. B _wait
  482. TEXT cachedwbinvse(SB), $-4 /* D writeback+invalidate SE */
  483. MOVW R0, R2
  484. MOVW CPSR, R3
  485. CPSID /* splhi */
  486. BARRIERS /* force outstanding stores to cache */
  487. MOVW R2, R0
  488. MOVW 4(FP), R1
  489. ADD R0, R1 /* R1 is end address */
  490. BIC $(CACHELINESZ-1), R0 /* cache line start */
  491. _dwbinvse:
  492. MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEse
  493. ADD $CACHELINESZ, R0
  494. CMP.S R0, R1
  495. BGT _dwbinvse
  496. _wait: /* drain write buffer */
  497. BARRIERS
  498. MOVW R3, CPSR /* splx */
  499. RET
  500. TEXT cachedinvse(SB), $-4 /* D invalidate SE */
  501. MOVW R0, R2
  502. MOVW CPSR, R3
  503. CPSID /* splhi */
  504. BARRIERS /* force outstanding stores to cache */
  505. MOVW R2, R0
  506. MOVW 4(FP), R1
  507. ADD R0, R1 /* R1 is end address */
  508. /*
  509. * if start & end addresses are not on cache-line boundaries,
  510. * flush first & last cache lines before invalidating.
  511. */
  512. AND.S $(CACHELINESZ-1), R0, R4
  513. BEQ stok
  514. BIC $(CACHELINESZ-1), R0, R4 /* cache line start */
  515. MTCP CpSC, 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse
  516. stok:
  517. AND.S $(CACHELINESZ-1), R1, R4
  518. BEQ endok
  519. BIC $(CACHELINESZ-1), R1, R4 /* cache line start */
  520. MTCP CpSC, 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse
  521. endok:
  522. BIC $(CACHELINESZ-1), R0 /* cache line start */
  523. _dinvse:
  524. MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEse
  525. ADD $CACHELINESZ, R0
  526. CMP.S R0, R1
  527. BGT _dinvse
  528. B _wait
  529. /*
  530. * enable mmu and high vectors
  531. */
  532. TEXT mmuenable(SB), 1, $-4
  533. MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  534. ORR $CpCmmu, R0
  535. MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  536. BARRIERS
  537. RET
  538. TEXT mmudisable(SB), 1, $-4
  539. MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  540. BIC $CpCmmu, R0
  541. MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  542. BARRIERS
  543. RET
  544. /*
  545. * If one of these MCR instructions crashes or hangs the machine,
  546. * check your Level 1 page table (at TTB) closely.
  547. */
  548. TEXT mmuinvalidate(SB), $-4 /* invalidate all */
  549. MOVW CPSR, R2
  550. CPSID /* interrupts off */
  551. BARRIERS
  552. MTCP CpSC, 0, PC, C(CpTLB), C(CpTLBinvu), CpTLBinv
  553. BARRIERS
  554. MOVW R2, CPSR /* interrupts restored */
  555. RET
  556. TEXT mmuinvalidateaddr(SB), $-4 /* invalidate single entry */
  557. MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse
  558. BARRIERS
  559. RET
  560. TEXT cpidget(SB), 1, $-4 /* main ID */
  561. MFCP CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDid
  562. RET
  563. TEXT cpctget(SB), 1, $-4 /* cache type */
  564. MFCP CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDct
  565. RET
  566. TEXT controlget(SB), 1, $-4 /* system control (sctlr) */
  567. MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
  568. RET
  569. TEXT ttbget(SB), 1, $-4 /* translation table base */
  570. MFCP CpSC, 0, R0, C(CpTTB), C(0), CpTTB0
  571. RET
  572. TEXT ttbput(SB), 1, $-4 /* translation table base */
  573. MOVW CPSR, R2
  574. CPSID
  575. MOVW R0, R1
  576. BARRIERS /* finish prior accesses before changing ttb */
  577. MTCP CpSC, 0, R1, C(CpTTB), C(0), CpTTB0
  578. MTCP CpSC, 0, R1, C(CpTTB), C(0), CpTTB1 /* non-secure too */
  579. MOVW $0, R0
  580. MTCP CpSC, 0, R0, C(CpTTB), C(0), CpTTBctl
  581. BARRIERS
  582. MOVW R2, CPSR
  583. RET
  584. TEXT dacget(SB), 1, $-4 /* domain access control */
  585. MFCP CpSC, 0, R0, C(CpDAC), C(0)
  586. RET
  587. TEXT dacput(SB), 1, $-4 /* domain access control */
  588. MOVW R0, R1
  589. BARRIERS
  590. MTCP CpSC, 0, R1, C(CpDAC), C(0)
  591. ISB
  592. RET
  593. TEXT fsrget(SB), 1, $-4 /* fault status */
  594. MFCP CpSC, 0, R0, C(CpFSR), C(0), CpDFSR
  595. RET
  596. TEXT farget(SB), 1, $-4 /* fault address */
  597. MFCP CpSC, 0, R0, C(CpFAR), C(0), CpDFAR
  598. RET
  599. TEXT getpsr(SB), 1, $-4
  600. MOVW CPSR, R0
  601. RET
  602. TEXT getscr(SB), 1, $-4 /* secure configuration */
  603. MFCP CpSC, 0, R0, C(CpCONTROL), C(CpCONTROLscr), CpSCRscr
  604. RET
  605. TEXT pidget(SB), 1, $-4 /* address translation pid */
  606. MFCP CpSC, 0, R0, C(CpPID), C(0x0)
  607. RET
  608. TEXT pidput(SB), 1, $-4 /* address translation pid */
  609. MTCP CpSC, 0, R0, C(CpPID), C(0), 0 /* pid, v7a deprecated */
  610. MTCP CpSC, 0, R0, C(CpPID), C(0), 1 /* context id, errata 754322 */
  611. ISB
  612. RET
  613. /*
  614. * access to yet more coprocessor registers
  615. */
  616. TEXT getauxctl(SB), 1, $-4 /* get cortex-a9 aux. ctl. */
  617. MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpAuxctl
  618. RET
  619. TEXT putauxctl(SB), 1, $-4 /* put cortex-a9 aux. ctl. */
  620. BARRIERS
  621. MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpAuxctl
  622. BARRIERS
  623. RET
  624. TEXT getclvlid(SB), 1, $-4
  625. MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), CpIDclvlid
  626. RET
  627. TEXT getcyc(SB), 1, $-4
  628. MFCP CpSC, 0, R0, C(CpCLD), C(CpCLDcyc), 0
  629. RET
  630. TEXT getdebug(SB), 1, $-4 /* get cortex-a9 debug enable register */
  631. MFCP CpSC, 0, R0, C(1), C(1), 1
  632. RET
  633. TEXT getpc(SB), 1, $-4
  634. MOVW PC, R0
  635. RET
  636. TEXT getsb(SB), 1, $-4
  637. MOVW R12, R0
  638. RET
  639. TEXT setsp(SB), 1, $-4
  640. MOVW R0, SP
  641. RET
  642. TEXT splhi(SB), 1, $-4
  643. MOVW CPSR, R0 /* return old CPSR */
  644. CPSID /* turn off interrupts */
  645. CMP.S $0, R(MACH)
  646. MOVW.NE R14, 4(R(MACH)) /* save caller pc in m->splpc */
  647. RET
  648. TEXT spllo(SB), 1, $-4 /* start marker for devkprof.c */
  649. MOVW CPSR, R0 /* return old CPSR */
  650. MOVW $0, R1
  651. CMP.S R1, R(MACH)
  652. MOVW.NE R1, 4(R(MACH)) /* clear m->splpc */
  653. CPSIE
  654. RET
  655. TEXT splx(SB), 1, $-4
  656. MOVW CPSR, R3 /* must return old CPSR */
  657. CPSID
  658. CMP.S $0, R(MACH)
  659. MOVW.NE R14, 4(R(MACH)) /* save caller pc in m->splpc */
  660. MOVW R0, CPSR /* reset interrupt level */
  661. MOVW R3, R0 /* must return old CPSR */
  662. RET
  663. TEXT spldone(SB), 1, $0 /* end marker for devkprof.c */
  664. RET
  665. TEXT islo(SB), 1, $-4
  666. MOVW CPSR, R0
  667. AND $(PsrDirq), R0
  668. EOR $(PsrDirq), R0
  669. RET
  670. TEXT clz(SB), $-4
  671. CLZ(0, 0) /* 0 is R0 */
  672. RET
  673. TEXT setlabel(SB), 1, $-4
  674. MOVW SP, 0(R0)
  675. MOVW R14, 4(R0) /* pc */
  676. MOVW $0, R0
  677. RET
  678. TEXT gotolabel(SB), 1, $-4
  679. MOVW 0(R0), SP
  680. MOVW 4(R0), R14 /* pc */
  681. MOVW $1, R0
  682. RET
  683. TEXT getcallerpc(SB), 1, $-4
  684. MOVW 0(SP), R0
  685. RET
  686. TEXT wfi(SB), $-4
  687. MOVW CPSR, R1
  688. /*
  689. * an interrupt should break us out of wfi. masking interrupts
  690. * slows interrupt response slightly but prevents recursion.
  691. */
  692. // CPSIE
  693. CPSID
  694. BARRIERS
  695. WFI
  696. MOVW R1, CPSR
  697. RET
  698. TEXT coherence(SB), $-4
  699. BARRIERS
  700. RET
  701. GLOBL cpus_proceed+0(SB), $4
  702. #include "cache.v7.s"
  703. TEXT tas(SB), $-4 /* _tas(ulong *) */
  704. /* returns old (R0) after modifying (R0) */
  705. MOVW R0,R5
  706. DMB
  707. MOVW $1,R2 /* new value of (R0) */
  708. MOVW $MAXSC, R8
  709. tas1:
  710. LDREX(5,7) /* LDREX 0(R5),R7 */
  711. CMP.S $0, R7 /* old value non-zero (lock taken)? */
  712. BNE lockbusy /* we lose */
  713. SUB.S $1, R8
  714. BEQ lockloop2
  715. STREX(2,5,4) /* STREX R2,(R5),R4 */
  716. CMP.S $0, R4
  717. BNE tas1 /* strex failed? try again */
  718. DMB
  719. B tas0
  720. lockloop2:
  721. PUTC('?')
  722. PUTC('l')
  723. PUTC('t')
  724. BL abort(SB)
  725. lockbusy:
  726. CLREX
  727. tas0:
  728. MOVW R7, R0 /* return old value */
  729. RET