ls1028a.S 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387
  1. /*
  2. * Copyright 2018-2021 NXP
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. .section .text, "ax"
  7. #include <asm_macros.S>
  8. #include <lib/psci/psci.h>
  9. #include <nxp_timer.h>
  10. #include <plat_gic.h>
  11. #include <pmu.h>
  12. #include <bl31_data.h>
  13. #include <plat_psci.h>
  14. #include <platform_def.h>
  15. .global soc_init_lowlevel
  16. .global soc_init_percpu
  17. .global _set_platform_security
  18. .global _soc_set_start_addr
  19. .global _soc_core_release
  20. .global _soc_ck_disabled
  21. .global _soc_core_restart
  22. .global _soc_core_prep_off
  23. .global _soc_core_entr_off
  24. .global _soc_core_exit_off
  25. .global _soc_sys_reset
  26. .global _soc_sys_off
  27. .global _soc_core_prep_stdby
  28. .global _soc_core_entr_stdby
  29. .global _soc_core_exit_stdby
  30. .global _soc_core_prep_pwrdn
  31. .global _soc_core_entr_pwrdn
  32. .global _soc_core_exit_pwrdn
  33. .global _soc_clstr_prep_stdby
  34. .global _soc_clstr_exit_stdby
  35. .global _soc_clstr_prep_pwrdn
  36. .global _soc_clstr_exit_pwrdn
  37. .global _soc_sys_prep_stdby
  38. .global _soc_sys_exit_stdby
  39. .global _soc_sys_prep_pwrdn
  40. .global _soc_sys_pwrdn_wfi
  41. .global _soc_sys_exit_pwrdn
  42. .equ TZPCDECPROT_0_SET_BASE, 0x02200804
  43. .equ TZPCDECPROT_1_SET_BASE, 0x02200810
  44. .equ TZPCDECPROT_2_SET_BASE, 0x0220081C
  45. .equ TZASC_REGION_ATTRIBUTES_0_0, 0x01100110
  46. /*
  47. * This function initialize the soc.
  48. * in: void
  49. * out: void
  50. * uses x0 - x11
  51. */
  52. func soc_init_lowlevel
  53. /*
  54. * Called from C, so save the non-volatile regs
  55. * save these as pairs of registers to maintain the
  56. * required 16-byte alignment on the stack
  57. */
  58. stp x4, x5, [sp, #-16]!
  59. stp x6, x7, [sp, #-16]!
  60. stp x8, x9, [sp, #-16]!
  61. stp x10, x11, [sp, #-16]!
  62. stp x12, x13, [sp, #-16]!
  63. stp x18, x30, [sp, #-16]!
  64. /*
  65. * Make sure the personality has been established by releasing cores
  66. * that are marked "to-be-disabled" from reset
  67. */
  68. bl release_disabled /* 0-8 */
  69. /* Set SCRATCHRW7 to 0x0 */
  70. ldr x0, =DCFG_SCRATCHRW7_OFFSET
  71. mov x1, xzr
  72. bl _write_reg_dcfg
  73. /* Restore the aarch32/64 non-volatile registers */
  74. ldp x18, x30, [sp], #16
  75. ldp x12, x13, [sp], #16
  76. ldp x10, x11, [sp], #16
  77. ldp x8, x9, [sp], #16
  78. ldp x6, x7, [sp], #16
  79. ldp x4, x5, [sp], #16
  80. ret
  81. endfunc soc_init_lowlevel
  82. /*
  83. * void soc_init_percpu(void)
  84. *
  85. * This function performs any soc-specific initialization that is needed on
  86. * a per-core basis
  87. * in: none
  88. * out: none
  89. * uses x0 - x3
  90. */
  91. func soc_init_percpu
  92. stp x4, x30, [sp, #-16]!
  93. bl plat_my_core_mask
  94. mov x2, x0
  95. /* x2 = core mask */
  96. /* see if this core is marked for prefetch disable */
  97. mov x0, #PREFETCH_DIS_OFFSET
  98. bl _get_global_data /* 0-1 */
  99. tst x0, x2
  100. b.eq 1f
  101. bl _disable_ldstr_pfetch_A72 /* 0 */
  102. 1:
  103. mov x0, #NXP_PMU_ADDR
  104. bl enable_timer_base_to_cluster
  105. ldp x4, x30, [sp], #16
  106. ret
  107. endfunc soc_init_percpu
  108. /*
  109. * This function determines if a core is disabled via COREDISABLEDSR
  110. * in: w0 = core_mask_lsb
  111. * out: w0 = 0, core not disabled
  112. * w0 != 0, core disabled
  113. * uses x0, x1
  114. */
  115. func _soc_ck_disabled
  116. /* get base addr of dcfg block */
  117. ldr x1, =NXP_DCFG_ADDR
  118. /* read COREDISABLEDSR */
  119. ldr w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
  120. /* test core bit */
  121. and w0, w1, w0
  122. ret
  123. endfunc _soc_ck_disabled
  124. /*
  125. * This function sets the security mechanisms in the SoC to implement the
  126. * Platform Security Policy
  127. */
  128. func _set_platform_security
  129. mov x3, x30
  130. #if (!SUPPRESS_TZC)
  131. /* initialize the tzpc */
  132. bl init_tzpc
  133. #endif
  134. #if (!SUPPRESS_SEC)
  135. /* initialize secmon */
  136. bl initSecMon
  137. #endif
  138. mov x30, x3
  139. ret
  140. endfunc _set_platform_security
  141. /*
  142. * Part of CPU_ON
  143. *
  144. * This function releases a secondary core from reset
  145. * in: x0 = core_mask_lsb
  146. * out: none
  147. * uses: x0 - x3
  148. */
  149. _soc_core_release:
  150. mov x3, x30
  151. /*
  152. * Write to CORE_HOLD to tell the bootrom that we want this core
  153. * to run
  154. */
  155. ldr x1, =NXP_SEC_REGFILE_ADDR
  156. str w0, [x1, #CORE_HOLD_OFFSET]
  157. /* Read-modify-write BRRL to release core */
  158. mov x1, #NXP_RESET_ADDR
  159. ldr w2, [x1, #BRR_OFFSET]
  160. orr w2, w2, w0
  161. str w2, [x1, #BRR_OFFSET]
  162. dsb sy
  163. isb
  164. /* Send event */
  165. sev
  166. isb
  167. mov x30, x3
  168. ret
  169. /*
  170. * This function writes a 64-bit address to bootlocptrh/l
  171. * in: x0, 64-bit address to write to BOOTLOCPTRL/H
  172. * uses x0, x1, x2
  173. */
  174. func _soc_set_start_addr
  175. /* Get the 64-bit base address of the dcfg block */
  176. ldr x2, =NXP_DCFG_ADDR
  177. /* Write the 32-bit BOOTLOCPTRL register */
  178. mov x1, x0
  179. str w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
  180. /* Write the 32-bit BOOTLOCPTRH register */
  181. lsr x1, x0, #32
  182. str w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
  183. ret
  184. endfunc _soc_set_start_addr
  185. /*
  186. * Part of CPU_ON
  187. *
  188. * This function restarts a core shutdown via _soc_core_entr_off
  189. * in: x0 = core mask lsb (of the target cpu)
  190. * out: x0 == 0, on success
  191. * x0 != 0, on failure
  192. * uses x0 - x6
  193. */
  194. _soc_core_restart:
  195. mov x6, x30
  196. mov x4, x0
  197. /* pgm GICD_CTLR - enable secure grp0 */
  198. mov x5, #NXP_GICD_ADDR
  199. ldr w2, [x5, #GICD_CTLR_OFFSET]
  200. orr w2, w2, #GICD_CTLR_EN_GRP_0
  201. str w2, [x5, #GICD_CTLR_OFFSET]
  202. dsb sy
  203. isb
  204. /* Poll on RWP til write completes */
  205. 4:
  206. ldr w2, [x5, #GICD_CTLR_OFFSET]
  207. tst w2, #GICD_CTLR_RWP
  208. b.ne 4b
  209. /*
  210. * x4 = core mask lsb
  211. * x5 = gicd base addr
  212. */
  213. mov x0, x4
  214. bl get_mpidr_value
  215. /* Generate target list bit */
  216. and x1, x0, #MPIDR_AFFINITY0_MASK
  217. mov x2, #1
  218. lsl x2, x2, x1
  219. /* Get the affinity1 field */
  220. and x1, x0, #MPIDR_AFFINITY1_MASK
  221. lsl x1, x1, #8
  222. orr x2, x2, x1
  223. /* Insert the INTID for SGI15 */
  224. orr x2, x2, #ICC_SGI0R_EL1_INTID
  225. /* Fire the SGI */
  226. msr ICC_SGI0R_EL1, x2
  227. dsb sy
  228. isb
  229. /* Load '0' on success */
  230. mov x0, xzr
  231. mov x30, x6
  232. ret
  233. /*
  234. * Part of CPU_OFF
  235. *
  236. * This function programs SoC & GIC registers in preparation for shutting down
  237. * the core
  238. * in: x0 = core mask lsb
  239. * out: none
  240. * uses x0 - x7
  241. */
  242. _soc_core_prep_off:
  243. mov x8, x30
  244. mov x7, x0
  245. /* x7 = core mask lsb */
  246. mrs x1, CPUECTLR_EL1
  247. /* Set smp and disable L2 snoops in cpuectlr */
  248. orr x1, x1, #CPUECTLR_SMPEN_EN
  249. orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
  250. bic x1, x1, #CPUECTLR_INS_PREFETCH_MASK
  251. bic x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
  252. /* Set retention control in cpuectlr */
  253. bic x1, x1, #CPUECTLR_TIMER_MASK
  254. orr x1, x1, #CPUECTLR_TIMER_2TICKS
  255. msr CPUECTLR_EL1, x1
  256. /* Get redistributor rd base addr for this core */
  257. mov x0, x7
  258. bl get_gic_rd_base
  259. mov x6, x0
  260. /* Get redistributor sgi base addr for this core */
  261. mov x0, x7
  262. bl get_gic_sgi_base
  263. mov x5, x0
  264. /*
  265. * x5 = gicr sgi base addr
  266. * x6 = gicr rd base addr
  267. * x7 = core mask lsb
  268. */
  269. /* Disable SGI 15 at redistributor - GICR_ICENABLER0 */
  270. mov w3, #GICR_ICENABLER0_SGI15
  271. str w3, [x5, #GICR_ICENABLER0_OFFSET]
  272. 2:
  273. /* Poll on rwp bit in GICR_CTLR */
  274. ldr w4, [x6, #GICR_CTLR_OFFSET]
  275. tst w4, #GICR_CTLR_RWP
  276. b.ne 2b
  277. /* Disable GRP1 interrupts at cpu interface */
  278. msr ICC_IGRPEN1_EL3, xzr
  279. /* Disable GRP0 ints at cpu interface */
  280. msr ICC_IGRPEN0_EL1, xzr
  281. /* Program the redistributor - poll on GICR_CTLR.RWP as needed */
  282. /* Define SGI 15 as Grp0 - GICR_IGROUPR0 */
  283. ldr w4, [x5, #GICR_IGROUPR0_OFFSET]
  284. bic w4, w4, #GICR_IGROUPR0_SGI15
  285. str w4, [x5, #GICR_IGROUPR0_OFFSET]
  286. /* Define SGI 15 as Grp0 - GICR_IGRPMODR0 */
  287. ldr w3, [x5, #GICR_IGRPMODR0_OFFSET]
  288. bic w3, w3, #GICR_IGRPMODR0_SGI15
  289. str w3, [x5, #GICR_IGRPMODR0_OFFSET]
  290. /* Set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
  291. ldr w4, [x5, #GICR_IPRIORITYR3_OFFSET]
  292. bic w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
  293. str w4, [x5, #GICR_IPRIORITYR3_OFFSET]
  294. /* Enable SGI 15 at redistributor - GICR_ISENABLER0 */
  295. mov w3, #GICR_ISENABLER0_SGI15
  296. str w3, [x5, #GICR_ISENABLER0_OFFSET]
  297. dsb sy
  298. isb
  299. 3:
  300. /* Poll on rwp bit in GICR_CTLR */
  301. ldr w4, [x6, #GICR_CTLR_OFFSET]
  302. tst w4, #GICR_CTLR_RWP
  303. b.ne 3b
  304. /* Quiesce the debug interfaces */
  305. mrs x3, osdlr_el1
  306. orr x3, x3, #OSDLR_EL1_DLK_LOCK
  307. msr osdlr_el1, x3
  308. isb
  309. /* Enable grp0 ints */
  310. mov x3, #ICC_IGRPEN0_EL1_EN
  311. msr ICC_IGRPEN0_EL1, x3
  312. /*
  313. * x5 = gicr sgi base addr
  314. * x6 = gicr rd base addr
  315. * x7 = core mask lsb
  316. */
  317. /* Clear any pending interrupts */
  318. mvn w1, wzr
  319. str w1, [x5, #GICR_ICPENDR0_OFFSET]
  320. /* Make sure system counter is enabled */
  321. ldr x3, =NXP_TIMER_ADDR
  322. ldr w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
  323. tst w0, #SYS_COUNTER_CNTCR_EN
  324. b.ne 4f
  325. orr w0, w0, #SYS_COUNTER_CNTCR_EN
  326. str w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
  327. 4:
  328. /* Enable the core timer and mask timer interrupt */
  329. mov x1, #CNTP_CTL_EL0_EN
  330. orr x1, x1, #CNTP_CTL_EL0_IMASK
  331. msr cntp_ctl_el0, x1
  332. isb
  333. mov x30, x8
  334. ret
  335. /*
  336. * Part of CPU_OFF
  337. *
  338. * This function performs the final steps to shutdown the core
  339. * in: x0 = core mask lsb
  340. * out: none
  341. * uses x0 - x5
  342. */
  343. _soc_core_entr_off:
  344. mov x5, x30
  345. mov x4, x0
  346. /* x4 = core mask */
  347. 1:
  348. /* Enter low-power state by executing wfi */
  349. wfi
  350. /* See if SGI15 woke us up */
  351. mrs x2, ICC_IAR0_EL1
  352. mov x3, #ICC_IAR0_EL1_SGI15
  353. cmp x2, x3
  354. b.ne 1b
  355. /* Deactivate the int */
  356. msr ICC_EOIR0_EL1, x2
  357. /* x4 = core mask */
  358. 2:
  359. /* Check if core has been turned on */
  360. mov x0, x4
  361. bl _getCoreState
  362. /* x0 = core state */
  363. cmp x0, #CORE_WAKEUP
  364. b.ne 1b
  365. /* If we get here, then we have exited the wfi */
  366. mov x30, x5
  367. ret
  368. /*
  369. * Part of CPU_OFF
  370. *
  371. * This function starts the process of starting a core back up
  372. * in: x0 = core mask lsb
  373. * out: none
  374. * uses x0, x1, x2, x3, x4, x5, x6
  375. */
  376. _soc_core_exit_off:
  377. mov x6, x30
  378. mov x5, x0
  379. /* Disable forwarding of GRP0 ints at cpu interface */
  380. msr ICC_IGRPEN0_EL1, xzr
  381. /* Get redistributor sgi base addr for this core */
  382. mov x0, x5
  383. bl get_gic_sgi_base
  384. mov x4, x0
  385. /* x4 = gicr sgi base addr */
  386. /* x5 = core mask */
  387. /* Disable SGI 15 at redistributor - GICR_ICENABLER0 */
  388. mov w1, #GICR_ICENABLER0_SGI15
  389. str w1, [x4, #GICR_ICENABLER0_OFFSET]
  390. /* Get redistributor rd base addr for this core */
  391. mov x0, x5
  392. bl get_gic_rd_base
  393. mov x4, x0
  394. /* x4 = gicr rd base addr */
  395. 2:
  396. /* Poll on rwp bit in GICR_CTLR */
  397. ldr w2, [x4, #GICR_CTLR_OFFSET]
  398. tst w2, #GICR_CTLR_RWP
  399. b.ne 2b
  400. /* x4 = gicr rd base addr */
  401. /* Unlock the debug interfaces */
  402. mrs x3, osdlr_el1
  403. bic x3, x3, #OSDLR_EL1_DLK_LOCK
  404. msr osdlr_el1, x3
  405. isb
  406. dsb sy
  407. isb
  408. mov x30, x6
  409. ret
  410. /*
  411. * This function requests a reset of the entire SOC
  412. * in: none
  413. * out: none
  414. * uses: x0, x1, x2, x3, x4, x5, x6
  415. */
  416. _soc_sys_reset:
  417. mov x3, x30
  418. /* Make sure the mask is cleared in the reset request mask register */
  419. mov x0, #RST_RSTRQMR1_OFFSET
  420. mov w1, wzr
  421. bl _write_reg_reset
  422. /* Set the reset request */
  423. mov x4, #RST_RSTCR_OFFSET
  424. mov x0, x4
  425. mov w1, #RSTCR_RESET_REQ
  426. bl _write_reg_reset
  427. /* x4 = RST_RSTCR_OFFSET */
  428. /*
  429. * Just in case this address range is mapped as cacheable,
  430. * flush the write out of the dcaches
  431. */
  432. mov x2, #NXP_RESET_ADDR
  433. add x2, x2, x4
  434. dc cvac, x2
  435. dsb st
  436. isb
  437. /* This function does not return */
  438. 1:
  439. wfi
  440. b 1b
  441. /*
  442. * Part of SYSTEM_OFF
  443. *
  444. * This function turns off the SoC clocks
  445. * Note: this function is not intended to return, and the only allowable
  446. * recovery is POR
  447. * in: none
  448. * out: none
  449. * uses x0, x1, x2, x3
  450. */
  451. _soc_sys_off:
  452. /*
  453. * Disable sec, spi and flexspi
  454. * TBD - Check if eNETC needs to be disabled
  455. */
  456. ldr x2, =NXP_DCFG_ADDR
  457. ldr x0, =DCFG_DEVDISR1_OFFSET
  458. ldr w1, =DCFG_DEVDISR1_SEC
  459. str w1, [x2, x0]
  460. ldr x0, =DCFG_DEVDISR4_OFFSET
  461. ldr w1, =DCFG_DEVDISR4_SPI_QSPI
  462. str w1, [x2, x0]
  463. /* Set TPMWAKEMR0 */
  464. ldr x0, =TPMWAKEMR0_ADDR
  465. mov w1, #0x1
  466. str w1, [x0]
  467. /* Disable icache, dcache, mmu @ EL1 */
  468. mov x1, #SCTLR_I_C_M_MASK
  469. mrs x0, sctlr_el1
  470. bic x0, x0, x1
  471. msr sctlr_el1, x0
  472. /* Disable L2 prefetches */
  473. mrs x0, CPUECTLR_EL1
  474. orr x0, x0, #CPUECTLR_SMPEN_EN
  475. bic x0, x0, #CPUECTLR_TIMER_MASK
  476. orr x0, x0, #CPUECTLR_TIMER_2TICKS
  477. msr CPUECTLR_EL1, x0
  478. dsb sy
  479. isb
  480. /* Disable CCI snoop domain */
  481. ldr x0, =NXP_CCI_ADDR
  482. mov w1, #0x1
  483. str w1, [x0]
  484. bl get_pmu_idle_core_mask
  485. /* x3 = pmu base addr */
  486. mov x3, #NXP_PMU_ADDR
  487. 4:
  488. ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
  489. cmp w1, w0
  490. b.ne 4b
  491. bl get_pmu_idle_cluster_mask
  492. mov x3, #NXP_PMU_ADDR
  493. str w0, [x3, #PMU_CLAINACTSETR_OFFSET]
  494. bl get_pmu_idle_core_mask
  495. mov x3, #NXP_PMU_ADDR
  496. 1:
  497. ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
  498. cmp w1, w0
  499. b.ne 1b
  500. bl get_pmu_flush_cluster_mask
  501. mov x3, #NXP_PMU_ADDR
  502. str w0, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
  503. 2:
  504. ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
  505. cmp w1, w0
  506. b.ne 2b
  507. str w0, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
  508. str w0, [x3, #PMU_CLSINACTSETR_OFFSET]
  509. mov x2, #DAIF_SET_MASK
  510. mrs x1, spsr_el1
  511. orr x1, x1, x2
  512. msr spsr_el1, x1
  513. mrs x1, spsr_el2
  514. orr x1, x1, x2
  515. msr spsr_el2, x1
  516. /* Force the debug interface to be quiescent */
  517. mrs x0, osdlr_el1
  518. orr x0, x0, #0x1
  519. msr osdlr_el1, x0
  520. /* Invalidate all TLB entries at all 3 exception levels */
  521. tlbi alle1
  522. tlbi alle2
  523. tlbi alle3
  524. /* x3 = pmu base addr */
  525. /* Request lpm20 */
  526. ldr x0, =PMU_POWMGTCSR_OFFSET
  527. ldr w1, =PMU_POWMGTCSR_VAL
  528. str w1, [x3, x0]
  529. isb
  530. dsb sy
  531. 5:
  532. wfe
  533. b.eq 5b
  534. /*
  535. * Part of CPU_SUSPEND
  536. *
  537. * This function performs SoC-specific programming prior to standby
  538. * in: x0 = core mask lsb
  539. * out: none
  540. * uses x0, x1
  541. */
  542. _soc_core_prep_stdby:
  543. /* Clear CPUECTLR_EL1[2:0] */
  544. mrs x1, CPUECTLR_EL1
  545. bic x1, x1, #CPUECTLR_TIMER_MASK
  546. msr CPUECTLR_EL1, x1
  547. ret
  548. /*
  549. * Part of CPU_SUSPEND
  550. *
  551. * This function puts the calling core into standby state
  552. * in: x0 = core mask lsb
  553. * out: none
  554. * uses x0
  555. */
  556. _soc_core_entr_stdby:
  557. /* X0 = core mask lsb */
  558. dsb sy
  559. isb
  560. wfi
  561. ret
  562. /*
  563. * Part of CPU_SUSPEND
  564. *
  565. * This function performs any SoC-specific cleanup after standby state
  566. * in: x0 = core mask lsb
  567. * out: none
  568. * uses none
  569. */
  570. _soc_core_exit_stdby:
  571. ret
  572. /*
  573. * Part of CPU_SUSPEND
  574. *
  575. * This function performs SoC-specific programming prior to power-down
  576. * in: x0 = core mask lsb
  577. * out: none
  578. * uses x0, x1, x2
  579. */
  580. _soc_core_prep_pwrdn:
  581. /* Make sure system counter is enabled */
  582. ldr x2, =NXP_TIMER_ADDR
  583. ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  584. tst w0, #SYS_COUNTER_CNTCR_EN
  585. b.ne 1f
  586. orr w0, w0, #SYS_COUNTER_CNTCR_EN
  587. str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  588. 1:
  589. /*
  590. * Enable dynamic retention control (CPUECTLR[2:0])
  591. * Set the SMPEN bit (CPUECTLR[6])
  592. */
  593. mrs x1, CPUECTLR_EL1
  594. bic x1, x1, #CPUECTLR_RET_MASK
  595. orr x1, x1, #CPUECTLR_TIMER_2TICKS
  596. orr x1, x1, #CPUECTLR_SMPEN_EN
  597. msr CPUECTLR_EL1, x1
  598. isb
  599. ret
  600. /*
  601. * Part of CPU_SUSPEND
  602. *
  603. * This function puts the calling core into a power-down state
  604. * in: x0 = core mask lsb
  605. * out: none
  606. * uses x0
  607. */
  608. _soc_core_entr_pwrdn:
  609. /* X0 = core mask lsb */
  610. dsb sy
  611. isb
  612. wfi
  613. ret
  614. /*
  615. * Part of CPU_SUSPEND
  616. *
  617. * This function performs any SoC-specific cleanup after power-down state
  618. * in: x0 = core mask lsb
  619. * out: none
  620. * uses none
  621. */
  622. _soc_core_exit_pwrdn:
  623. ret
  624. /*
  625. * Part of CPU_SUSPEND
  626. *
  627. * This function performs SoC-specific programming prior to standby
  628. * in: x0 = core mask lsb
  629. * out: none
  630. * uses x0, x1
  631. */
  632. _soc_clstr_prep_stdby:
  633. /* Clear CPUECTLR_EL1[2:0] */
  634. mrs x1, CPUECTLR_EL1
  635. bic x1, x1, #CPUECTLR_TIMER_MASK
  636. msr CPUECTLR_EL1, x1
  637. ret
  638. /*
  639. * Part of CPU_SUSPEND
  640. *
  641. * This function performs any SoC-specific cleanup after standby state
  642. * in: x0 = core mask lsb
  643. * out: none
  644. * uses none
  645. */
  646. _soc_clstr_exit_stdby:
  647. ret
  648. /*
  649. * Part of CPU_SUSPEND
  650. *
  651. * This function performs SoC-specific programming prior to power-down
  652. * in: x0 = core mask lsb
  653. * out: none
  654. * uses x0, x1, x2
  655. */
  656. _soc_clstr_prep_pwrdn:
  657. /* Make sure system counter is enabled */
  658. ldr x2, =NXP_TIMER_ADDR
  659. ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  660. tst w0, #SYS_COUNTER_CNTCR_EN
  661. b.ne 1f
  662. orr w0, w0, #SYS_COUNTER_CNTCR_EN
  663. str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  664. 1:
  665. /*
  666. * Enable dynamic retention control (CPUECTLR[2:0])
  667. * Set the SMPEN bit (CPUECTLR[6])
  668. */
  669. mrs x1, CPUECTLR_EL1
  670. bic x1, x1, #CPUECTLR_RET_MASK
  671. orr x1, x1, #CPUECTLR_TIMER_2TICKS
  672. orr x1, x1, #CPUECTLR_SMPEN_EN
  673. msr CPUECTLR_EL1, x1
  674. isb
  675. ret
  676. /*
  677. * Part of CPU_SUSPEND
  678. *
  679. * This function performs any SoC-specific cleanup after power-down state
  680. * in: x0 = core mask lsb
  681. * out: none
  682. * uses none
  683. */
  684. _soc_clstr_exit_pwrdn:
  685. ret
  686. /*
  687. * Part of CPU_SUSPEND
  688. *
  689. * This function performs SoC-specific programming prior to standby
  690. * in: x0 = core mask lsb
  691. * out: none
  692. * uses x0, x1
  693. */
  694. _soc_sys_prep_stdby:
  695. /* Clear CPUECTLR_EL1[2:0] */
  696. mrs x1, CPUECTLR_EL1
  697. bic x1, x1, #CPUECTLR_TIMER_MASK
  698. msr CPUECTLR_EL1, x1
  699. ret
  700. /*
  701. * Part of CPU_SUSPEND
  702. *
  703. * This function performs any SoC-specific cleanup after standby state
  704. * in: x0 = core mask lsb
  705. * out: none
  706. * uses none
  707. */
  708. _soc_sys_exit_stdby:
  709. ret
  710. /*
  711. * Part of CPU_SUSPEND
  712. *
  713. * This function performs SoC-specific programming prior to
  714. * suspend-to-power-down
  715. * in: x0 = core mask lsb
  716. * out: none
  717. * uses x0, x1, x2, x3, x4
  718. */
  719. _soc_sys_prep_pwrdn:
  720. /* Set retention control */
  721. mrs x0, CPUECTLR_EL1
  722. bic x0, x0, #CPUECTLR_TIMER_MASK
  723. orr x0, x0, #CPUECTLR_TIMER_2TICKS
  724. orr x0, x0, #CPUECTLR_SMPEN_EN
  725. msr CPUECTLR_EL1, x0
  726. dsb sy
  727. isb
  728. ret
  729. /*
  730. * Part of CPU_SUSPEND
  731. *
  732. * This function puts the calling core, and potentially the soc, into a
  733. * low-power state
  734. * in: x0 = core mask lsb
  735. * out: x0 = 0, success
  736. * x0 < 0, failure
  737. * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x13, x14, x15,
  738. * x16, x17, x18
  739. */
  740. _soc_sys_pwrdn_wfi:
  741. mov x18, x30
  742. mov x3, #NXP_PMU_ADDR
  743. /* x3 = pmu base addr */
  744. /* Backup epu registers to stack */
  745. ldr x2, =NXP_EPU_ADDR
  746. ldr w4, [x2, #EPU_EPIMCR10_OFFSET]
  747. ldr w5, [x2, #EPU_EPCCR10_OFFSET]
  748. ldr w6, [x2, #EPU_EPCTR10_OFFSET]
  749. ldr w7, [x2, #EPU_EPGCR_OFFSET]
  750. stp x4, x5, [sp, #-16]!
  751. stp x6, x7, [sp, #-16]!
  752. /*
  753. * x2 = epu base addr
  754. * x3 = pmu base addr
  755. */
  756. /* Set up EPU event to receive the wake signal from PMU */
  757. mov w4, #EPU_EPIMCR10_VAL
  758. mov w5, #EPU_EPCCR10_VAL
  759. mov w6, #EPU_EPCTR10_VAL
  760. mov w7, #EPU_EPGCR_VAL
  761. str w4, [x2, #EPU_EPIMCR10_OFFSET]
  762. str w5, [x2, #EPU_EPCCR10_OFFSET]
  763. str w6, [x2, #EPU_EPCTR10_OFFSET]
  764. str w7, [x2, #EPU_EPGCR_OFFSET]
  765. ldr x2, =NXP_GICD_ADDR
  766. /*
  767. * x2 = gicd base addr
  768. * x3 = pmu base addr
  769. */
  770. /* Backup flextimer/mmc/usb interrupt router */
  771. ldr x0, =GICD_IROUTER60_OFFSET
  772. ldr x1, =GICD_IROUTER76_OFFSET
  773. ldr w4, [x2, x0]
  774. ldr w5, [x2, x1]
  775. ldr x0, =GICD_IROUTER112_OFFSET
  776. ldr x1, =GICD_IROUTER113_OFFSET
  777. ldr w6, [x2, x0]
  778. ldr w7, [x2, x1]
  779. stp x4, x5, [sp, #-16]!
  780. stp x6, x7, [sp, #-16]!
  781. /*
  782. * x2 = gicd base addr
  783. * x3 = pmu base addr
  784. * x0 = GICD_IROUTER112_OFFSET
  785. * x1 = GICD_IROUTER113_OFFSET
  786. */
  787. /* Re-route interrupt to cluster 1 */
  788. ldr w4, =GICD_IROUTER_VALUE
  789. str w4, [x2, x0]
  790. str w4, [x2, x1]
  791. ldr x0, =GICD_IROUTER60_OFFSET
  792. ldr x1, =GICD_IROUTER76_OFFSET
  793. str w4, [x2, x0]
  794. str w4, [x2, x1]
  795. dsb sy
  796. isb
  797. /* x3 = pmu base addr */
  798. /* Disable sec, Check for eNETC, spi and qspi */
  799. ldr x2, =NXP_DCFG_ADDR
  800. ldr x0, =DCFG_DEVDISR1_OFFSET
  801. ldr w1, =DCFG_DEVDISR1_SEC
  802. str w1, [x2, x0]
  803. ldr x0, =DCFG_DEVDISR4_OFFSET
  804. ldr w1, =DCFG_DEVDISR4_SPI_QSPI
  805. str w1, [x2, x0]
  806. /* x3 = pmu base addr */
  807. /* Set TPMWAKEMR0 */
  808. ldr x0, =TPMWAKEMR0_ADDR
  809. mov w1, #0x1
  810. str w1, [x0]
  811. /* Disable CCI snoop domain */
  812. ldr x0, =NXP_CCI_ADDR
  813. mov w1, #0x1
  814. str w1, [x0]
  815. /* Setup retention control */
  816. mrs x0, CPUECTLR_EL1
  817. orr x0, x0, #CPUECTLR_SMPEN_EN
  818. orr x0, x0, #CPUECTLR_TIMER_2TICKS
  819. msr CPUECTLR_EL1, x0
  820. dsb sy
  821. isb
  822. bl get_pmu_idle_core_mask
  823. mov x3, #NXP_PMU_ADDR
  824. 8:
  825. ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
  826. cmp w1, w0
  827. b.ne 8b
  828. /* x3 = NXP_PMU_ADDR */
  829. /* 1 cluster SoC */
  830. bl get_pmu_idle_cluster_mask
  831. mov x3, #NXP_PMU_ADDR
  832. str w0, [x3, #PMU_CLAINACTSETR_OFFSET]
  833. bl get_pmu_idle_core_mask
  834. /* x3 = NXP_PMU_ADDR */
  835. mov x3, #NXP_PMU_ADDR
  836. 1:
  837. ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
  838. cmp w1, w0
  839. b.ne 1b
  840. /* x3 = NXP_PMU_ADDR */
  841. bl get_pmu_flush_cluster_mask
  842. mov x3, #NXP_PMU_ADDR
  843. str w0, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
  844. /* x3 = NXP_PMU_ADDR */
  845. 2:
  846. ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
  847. cmp w1, w0
  848. b.ne 2b
  849. /* x3 = NXP_PMU_ADDR */
  850. str w0, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
  851. str w0, [x3, #PMU_CLSINACTSETR_OFFSET]
  852. /* Force the debug interface to be quiescent */
  853. mrs x0, osdlr_el1
  854. orr x0, x0, #0x1
  855. msr osdlr_el1, x0
  856. /*
  857. * Enable the WakeRequest signal
  858. * x3 is cpu mask starting from cpu1 to cpu0
  859. */
  860. bl get_tot_num_cores
  861. sub x0, x0, #1
  862. mov x3, #0x1
  863. lsl x3, x3, x0
  864. 2:
  865. mov x0, x3
  866. bl get_gic_rd_base // 0-2
  867. ldr w1, [x0, #GICR_WAKER_OFFSET]
  868. orr w1, w1, #GICR_WAKER_SLEEP_BIT
  869. str w1, [x0, #GICR_WAKER_OFFSET]
  870. 1:
  871. ldr w1, [x0, #GICR_WAKER_OFFSET]
  872. cmp w1, #GICR_WAKER_ASLEEP
  873. b.ne 1b
  874. lsr x3, x3, #1
  875. cbnz x3, 2b
  876. /* Invalidate all TLB entries at all 3 exception levels */
  877. tlbi alle1
  878. tlbi alle2
  879. tlbi alle3
  880. /* Request lpm20 */
  881. mov x3, #NXP_PMU_ADDR
  882. ldr x0, =PMU_POWMGTCSR_OFFSET
  883. ldr w1, =PMU_POWMGTCSR_VAL
  884. str w1, [x3, x0]
  885. ldr x5, =NXP_EPU_ADDR
  886. 4:
  887. wfe
  888. ldr w1, [x5, #EPU_EPCTR10_OFFSET]
  889. cmp w1, #0
  890. b.eq 4b
  891. /* x3 = NXP_PMU_ADDR */
  892. bl get_pmu_idle_cluster_mask
  893. mov x3, NXP_PMU_ADDR
  894. /* Re-enable the GPP ACP */
  895. str w0, [x3, #PMU_CLAINACTCLRR_OFFSET]
  896. str w0, [x3, #PMU_CLSINACTCLRR_OFFSET]
  897. /* x3 = NXP_PMU_ADDR */
  898. 3:
  899. ldr w1, [x3, #PMU_CLAINACTSETR_OFFSET]
  900. cbnz w1, 3b
  901. 4:
  902. ldr w1, [x3, #PMU_CLSINACTSETR_OFFSET]
  903. cbnz w1, 4b
  904. /*
  905. * Enable the WakeRequest signal on cpu 0-1
  906. * x3 is cpu mask starting from cpu1
  907. */
  908. bl get_tot_num_cores
  909. sub x0, x0, #1
  910. mov x3, #0x1
  911. lsl x3, x3, x0
  912. 2:
  913. mov x0, x3
  914. bl get_gic_rd_base // 0-2
  915. ldr w1, [x0, #GICR_WAKER_OFFSET]
  916. bic w1, w1, #GICR_WAKER_SLEEP_BIT
  917. str w1, [x0, #GICR_WAKER_OFFSET]
  918. 1:
  919. ldr w1, [x0, #GICR_WAKER_OFFSET]
  920. cbnz w1, 1b
  921. lsr x3, x3, #1
  922. cbnz x3, 2b
  923. /* Enable CCI snoop domain */
  924. ldr x0, =NXP_CCI_ADDR
  925. str wzr, [x0]
  926. dsb sy
  927. isb
  928. ldr x3, =NXP_EPU_ADDR
  929. /* x3 = epu base addr */
  930. /* Enable sec, enetc, spi and qspi */
  931. ldr x2, =NXP_DCFG_ADDR
  932. str wzr, [x2, #DCFG_DEVDISR1_OFFSET]
  933. str wzr, [x2, #DCFG_DEVDISR2_OFFSET]
  934. str wzr, [x2, #DCFG_DEVDISR4_OFFSET]
  935. /* Restore flextimer/mmc/usb interrupt router */
  936. ldr x3, =NXP_GICD_ADDR
  937. ldp x0, x2, [sp], #16
  938. ldr x1, =GICD_IROUTER113_OFFSET
  939. str w2, [x3, x1]
  940. ldr x1, =GICD_IROUTER112_OFFSET
  941. str w0, [x3, x1]
  942. ldp x0, x2, [sp], #16
  943. ldr x1, =GICD_IROUTER76_OFFSET
  944. str w2, [x3, x1]
  945. ldr x1, =GICD_IROUTER60_OFFSET
  946. str w0, [x3, x1]
  947. /* Restore EPU registers */
  948. ldr x3, =NXP_EPU_ADDR
  949. ldp x0, x2, [sp], #16
  950. str w2, [x3, #EPU_EPGCR_OFFSET]
  951. str w0, [x3, #EPU_EPCTR10_OFFSET]
  952. ldp x2, x1, [sp], #16
  953. str w1, [x3, #EPU_EPCCR10_OFFSET]
  954. str w2, [x3, #EPU_EPIMCR10_OFFSET]
  955. dsb sy
  956. isb
  957. mov x30, x18
  958. ret
  959. /*
  960. * Part of CPU_SUSPEND
  961. *
  962. * This function performs any SoC-specific cleanup after power-down
  963. * in: x0 = core mask lsb
  964. * out: none
  965. * uses x0, x1
  966. */
  967. _soc_sys_exit_pwrdn:
  968. /* Enable stack alignment checking */
  969. mrs x1, SCTLR_EL1
  970. orr x1, x1, #0x4
  971. msr SCTLR_EL1, x1
  972. /* Enable debug interface */
  973. mrs x1, osdlr_el1
  974. bic x1, x1, #OSDLR_EL1_DLK_LOCK
  975. msr osdlr_el1, x1
  976. /* Enable i-cache */
  977. mrs x1, SCTLR_EL3
  978. orr x1, x1, #SCTLR_I_MASK
  979. msr SCTLR_EL3, x1
  980. isb
  981. ret
  982. /*
  983. * This function setc up the TrustZone Address Space Controller (TZASC)
  984. * in: none
  985. * out: none
  986. * uses x0, x1
  987. */
  988. init_tzpc:
  989. /* Set Non Secure access for all devices protected via TZPC */
  990. ldr x1, =TZPCDECPROT_0_SET_BASE /* decode Protection-0 Set Reg */
  991. mov w0, #0xFF /* set decode region to NS, Bits[7:0] */
  992. str w0, [x1]
  993. ldr x1, =TZPCDECPROT_1_SET_BASE /* decode Protection-1 Set Reg */
  994. mov w0, #0xFF /* set decode region to NS, Bits[7:0] */
  995. str w0, [x1]
  996. ldr x1, =TZPCDECPROT_2_SET_BASE /* decode Protection-2 Set Reg */
  997. mov w0, #0xFF /* set decode region to NS, Bits[7:0] */
  998. str w0, [x1]
  999. /* entire SRAM as NS */
  1000. ldr x1, =NXP_OCRAM_TZPC_ADDR /* secure RAM region size Reg */
  1001. mov w0, #0x00000000 /* 0x00000000 = no secure region */
  1002. str w0, [x1]
  1003. ret
  1004. /*
  1005. * This function performs any needed initialization on SecMon for
  1006. * boot services
  1007. */
  1008. initSecMon:
  1009. /* Read the register hpcomr */
  1010. ldr x1, =NXP_SNVS_ADDR
  1011. ldr w0, [x1, #SECMON_HPCOMR_OFFSET]
  1012. /* Turn off secure access for the privileged registers */
  1013. orr w0, w0, #SECMON_HPCOMR_NPSWAEN
  1014. /* Write back */
  1015. str w0, [x1, #SECMON_HPCOMR_OFFSET]
  1016. ret
  1017. /*
  1018. * This function checks to see if cores which are to be disabled have been
  1019. * released from reset - if not, it releases them
  1020. * in: none
  1021. * out: none
  1022. * uses x0, x1, x2, x3, x4, x5, x6, x7, x8
  1023. */
  1024. release_disabled:
  1025. stp x18, x30, [sp, #-16]!
  1026. /*
  1027. * Get the number of cpus on this device
  1028. * Calling the below c function.
  1029. * No need to Callee saved registers x9-x15,
  1030. * as these registers are not used by the callee
  1031. * prior to calling the below C-routine.
  1032. */
  1033. bl get_tot_num_cores
  1034. mov x6, x0
  1035. /* Read COREDISABLESR */
  1036. mov x0, #NXP_DCFG_ADDR
  1037. ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
  1038. mov x0, #NXP_RESET_ADDR
  1039. ldr w5, [x0, #BRR_OFFSET]
  1040. /* Load the core mask for the first core */
  1041. mov x7, #1
  1042. /*
  1043. * x4 = COREDISABLESR
  1044. * x5 = BRR
  1045. * x6 = loop count
  1046. * x7 = core mask bit
  1047. */
  1048. 2:
  1049. /* Check if the core is to be disabled */
  1050. tst x4, x7
  1051. b.eq 1f
  1052. /* See if disabled cores have already been released from reset */
  1053. tst x5, x7
  1054. b.ne 1f
  1055. /* If core has not been released, then release it (0-3) */
  1056. mov x0, x7
  1057. bl _soc_core_release
  1058. /* Record the core state in the data area (0-3) */
  1059. mov x0, x7
  1060. mov x1, #CORE_DISABLED
  1061. bl _setCoreState
  1062. 1:
  1063. /* Decrement the counter */
  1064. subs x6, x6, #1
  1065. b.le 3f
  1066. /* Shift the core mask to the next core */
  1067. lsl x7, x7, #1
  1068. /* Continue */
  1069. b 2b
  1070. 3:
  1071. ldp x18, x30, [sp], #16
  1072. ret
  1073. /*
  1074. * Write a register in the DCFG block
  1075. * in: x0 = offset
  1076. * in: w1 = value to write
  1077. * uses x0, x1, x2
  1078. */
  1079. _write_reg_dcfg:
  1080. ldr x2, =NXP_DCFG_ADDR
  1081. str w1, [x2, x0]
  1082. ret
  1083. /*
  1084. * Read a register in the DCFG block
  1085. * in: x0 = offset
  1086. * out: w0 = value read
  1087. * uses x0, x1, x2
  1088. */
  1089. _read_reg_dcfg:
  1090. ldr x2, =NXP_DCFG_ADDR
  1091. ldr w1, [x2, x0]
  1092. mov w0, w1
  1093. ret
  1094. /*
  1095. * This function returns an mpidr value for a core, given a core_mask_lsb
  1096. * in: x0 = core mask lsb
  1097. * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
  1098. * uses x0, x1
  1099. */
  1100. get_mpidr_value:
  1101. /* Convert a core mask to an SoC core number */
  1102. clz w0, w0
  1103. mov w1, #31
  1104. sub w0, w1, w0
  1105. /* Get the mpidr core number from the SoC core number */
  1106. mov w1, wzr
  1107. tst x0, #1
  1108. b.eq 1f
  1109. orr w1, w1, #1
  1110. 1:
  1111. /* Extract the cluster number */
  1112. lsr w0, w0, #1
  1113. orr w0, w1, w0, lsl #8
  1114. ret
  1115. /*
  1116. * This function returns the redistributor base address for the core specified
  1117. * in x1
  1118. * in: x0 - core mask lsb of specified core
  1119. * out: x0 = redistributor rd base address for specified core
  1120. * uses x0, x1, x2
  1121. */
  1122. get_gic_rd_base:
  1123. /* Get the 0-based core number */
  1124. clz w1, w0
  1125. mov w2, #0x20
  1126. sub w2, w2, w1
  1127. sub w2, w2, #1
  1128. /* x2 = core number / loop counter */
  1129. ldr x0, =NXP_GICR_ADDR
  1130. mov x1, #GIC_RD_OFFSET
  1131. 2:
  1132. cbz x2, 1f
  1133. add x0, x0, x1
  1134. sub x2, x2, #1
  1135. b 2b
  1136. 1:
  1137. ret
  1138. /*
  1139. * This function returns the redistributor base address for the core specified
  1140. * in x1
  1141. * in: x0 - core mask lsb of specified core
  1142. * out: x0 = redistributor sgi base address for specified core
  1143. * uses x0, x1, x2
  1144. */
  1145. get_gic_sgi_base:
  1146. /* Get the 0-based core number */
  1147. clz w1, w0
  1148. mov w2, #0x20
  1149. sub w2, w2, w1
  1150. sub w2, w2, #1
  1151. /* x2 = core number / loop counter */
  1152. ldr x0, =NXP_GICR_SGI_ADDR
  1153. mov x1, #GIC_SGI_OFFSET
  1154. 2:
  1155. cbz x2, 1f
  1156. add x0, x0, x1
  1157. sub x2, x2, #1
  1158. b 2b
  1159. 1:
  1160. ret
  1161. /*
  1162. * Write a register in the RESET block
  1163. * in: x0 = offset
  1164. * in: w1 = value to write
  1165. * uses x0, x1, x2
  1166. */
  1167. _write_reg_reset:
  1168. ldr x2, =NXP_RESET_ADDR
  1169. str w1, [x2, x0]
  1170. ret
  1171. /*
  1172. * Read a register in the RESET block
  1173. * in: x0 = offset
  1174. * out: w0 = value read
  1175. * uses x0, x1
  1176. */
  1177. _read_reg_reset:
  1178. ldr x1, =NXP_RESET_ADDR
  1179. ldr w0, [x1, x0]
  1180. ret