lx2160a.S 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816
  1. /*
  2. * Copyright 2018-2020 NXP
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. */
  7. .section .text, "ax"
  8. #include <asm_macros.S>
  9. #include <lib/psci/psci.h>
  10. #include <nxp_timer.h>
  11. #include <plat_gic.h>
  12. #include <pmu.h>
  13. #include <bl31_data.h>
  14. #include <plat_psci.h>
  15. #include <platform_def.h>
  16. .global soc_init_start
  17. .global soc_init_percpu
  18. .global soc_init_finish
  19. .global _set_platform_security
  20. .global _soc_set_start_addr
  21. .global _soc_core_release
  22. .global _soc_ck_disabled
  23. .global _soc_core_restart
  24. .global _soc_core_prep_off
  25. .global _soc_core_entr_off
  26. .global _soc_core_exit_off
  27. .global _soc_sys_reset
  28. .global _soc_sys_off
  29. .global _soc_core_prep_stdby
  30. .global _soc_core_entr_stdby
  31. .global _soc_core_exit_stdby
  32. .global _soc_core_prep_pwrdn
  33. .global _soc_core_entr_pwrdn
  34. .global _soc_core_exit_pwrdn
  35. .global _soc_clstr_prep_stdby
  36. .global _soc_clstr_exit_stdby
  37. .global _soc_clstr_prep_pwrdn
  38. .global _soc_clstr_exit_pwrdn
  39. .global _soc_sys_prep_stdby
  40. .global _soc_sys_exit_stdby
  41. .global _soc_sys_prep_pwrdn
  42. .global _soc_sys_pwrdn_wfi
  43. .global _soc_sys_exit_pwrdn
  44. .equ TZPC_BASE, 0x02200000
  45. .equ TZPCDECPROT_0_SET_BASE, 0x02200804
  46. .equ TZPCDECPROT_1_SET_BASE, 0x02200810
  47. .equ TZPCDECPROT_2_SET_BASE, 0x0220081C
  48. #define CLUSTER_3_CORES_MASK 0xC0
  49. #define CLUSTER_3_IN_RESET 1
  50. #define CLUSTER_3_NORMAL 0
  51. /* cluster 3 handling no longer based on frequency, but rather on RCW[850],
  52. * which is bit 18 of RCWSR27
  53. */
  54. #define CLUSTER_3_RCW_BIT 0x40000
  55. /* retry count for clock-stop acks */
  56. .equ CLOCK_RETRY_CNT, 800
  57. /* disable prefetching in the A72 core */
  58. #define CPUACTLR_DIS_LS_HW_PRE 0x100000000000000
  59. #define CPUACTLR_DIS_L2_TLB_PRE 0x200000
  60. /* Function starts the initialization tasks of the soc,
  61. * using secondary cores if they are available
  62. *
  63. * Called from C, saving the non-volatile regs
  64. * save these as pairs of registers to maintain the
  65. * required 16-byte alignment on the stack
  66. *
  67. * in:
  68. * out:
  69. * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11
  70. */
  71. func soc_init_start
  72. stp x4, x5, [sp, #-16]!
  73. stp x6, x7, [sp, #-16]!
  74. stp x8, x9, [sp, #-16]!
  75. stp x10, x11, [sp, #-16]!
  76. stp x12, x13, [sp, #-16]!
  77. stp x18, x30, [sp, #-16]!
  78. /* make sure the personality has been
  79. * established by releasing cores that
  80. * are marked "to-be-disabled" from reset
  81. */
  82. bl release_disabled /* 0-9 */
  83. /* init the task flags */
  84. bl _init_task_flags /* 0-1 */
  85. /* set SCRATCHRW7 to 0x0 */
  86. ldr x0, =DCFG_SCRATCHRW7_OFFSET
  87. mov x1, xzr
  88. bl _write_reg_dcfg
  89. 1:
  90. /* restore the aarch32/64 non-volatile registers */
  91. ldp x18, x30, [sp], #16
  92. ldp x12, x13, [sp], #16
  93. ldp x10, x11, [sp], #16
  94. ldp x8, x9, [sp], #16
  95. ldp x6, x7, [sp], #16
  96. ldp x4, x5, [sp], #16
  97. ret
  98. endfunc soc_init_start
  99. /* Function performs any soc-specific initialization that is needed on
  100. * a per-core basis.
  101. * in: none
  102. * out: none
  103. * uses x0, x1, x2, x3
  104. */
  105. func soc_init_percpu
  106. stp x4, x30, [sp, #-16]!
  107. bl plat_my_core_mask
  108. mov x2, x0 /* x2 = core mask */
  109. /* Check if this core is marked for prefetch disable
  110. */
  111. mov x0, #PREFETCH_DIS_OFFSET
  112. bl _get_global_data /* 0-1 */
  113. tst x0, x2
  114. b.eq 1f
  115. bl _disable_ldstr_pfetch_A72 /* 0 */
  116. 1:
  117. mov x0, #NXP_PMU_ADDR
  118. bl enable_timer_base_to_cluster
  119. ldp x4, x30, [sp], #16
  120. ret
  121. endfunc soc_init_percpu
  122. /* Function completes the initialization tasks of the soc
  123. * in:
  124. * out:
  125. * uses x0, x1, x2, x3, x4
  126. */
  127. func soc_init_finish
  128. stp x4, x30, [sp, #-16]!
  129. ldp x4, x30, [sp], #16
  130. ret
  131. endfunc soc_init_finish
  132. /* Function sets the security mechanisms in the SoC to implement the
  133. * Platform Security Policy
  134. */
  135. func _set_platform_security
  136. mov x8, x30
  137. #if (!SUPPRESS_TZC)
  138. /* initialize the tzpc */
  139. bl init_tzpc
  140. #endif
  141. #if (!SUPPRESS_SEC)
  142. /* initialize secmon */
  143. #ifdef NXP_SNVS_ENABLED
  144. mov x0, #NXP_SNVS_ADDR
  145. bl init_sec_mon
  146. #endif
  147. #endif
  148. mov x30, x8
  149. ret
  150. endfunc _set_platform_security
  151. /* Function writes a 64-bit address to bootlocptrh/l
  152. * in: x0, 64-bit address to write to BOOTLOCPTRL/H
  153. * uses x0, x1, x2
  154. */
  155. func _soc_set_start_addr
  156. /* Get the 64-bit base address of the dcfg block */
  157. ldr x2, =NXP_DCFG_ADDR
  158. /* write the 32-bit BOOTLOCPTRL register */
  159. mov x1, x0
  160. str w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
  161. /* write the 32-bit BOOTLOCPTRH register */
  162. lsr x1, x0, #32
  163. str w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
  164. ret
  165. endfunc _soc_set_start_addr
  166. /* Function releases a secondary core from reset
  167. * in: x0 = core_mask_lsb
  168. * out: none
  169. * uses: x0, x1, x2, x3
  170. */
  171. func _soc_core_release
  172. mov x3, x30
  173. ldr x1, =NXP_SEC_REGFILE_ADDR
  174. /* write to CORE_HOLD to tell
  175. * the bootrom that this core is
  176. * expected to run.
  177. */
  178. str w0, [x1, #CORE_HOLD_OFFSET]
  179. /* read-modify-write BRRL to release core */
  180. mov x1, #NXP_RESET_ADDR
  181. ldr w2, [x1, #BRR_OFFSET]
  182. /* x0 = core mask */
  183. orr w2, w2, w0
  184. str w2, [x1, #BRR_OFFSET]
  185. dsb sy
  186. isb
  187. /* send event */
  188. sev
  189. isb
  190. mov x30, x3
  191. ret
  192. endfunc _soc_core_release
  193. /* Function determines if a core is disabled via COREDISABLEDSR
  194. * in: w0 = core_mask_lsb
  195. * out: w0 = 0, core not disabled
  196. * w0 != 0, core disabled
  197. * uses x0, x1
  198. */
  199. func _soc_ck_disabled
  200. /* get base addr of dcfg block */
  201. ldr x1, =NXP_DCFG_ADDR
  202. /* read COREDISABLEDSR */
  203. ldr w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
  204. /* test core bit */
  205. and w0, w1, w0
  206. ret
  207. endfunc _soc_ck_disabled
  208. /* Part of CPU_ON
  209. * Function restarts a core shutdown via _soc_core_entr_off
  210. * in: x0 = core mask lsb (of the target cpu)
  211. * out: x0 == 0, on success
  212. * x0 != 0, on failure
  213. * uses x0, x1, x2, x3, x4, x5, x6
  214. */
  215. func _soc_core_restart
  216. mov x6, x30
  217. mov x4, x0
  218. /* pgm GICD_CTLR - enable secure grp0 */
  219. mov x5, #NXP_GICD_ADDR
  220. ldr w2, [x5, #GICD_CTLR_OFFSET]
  221. orr w2, w2, #GICD_CTLR_EN_GRP_0
  222. str w2, [x5, #GICD_CTLR_OFFSET]
  223. dsb sy
  224. isb
  225. /* poll on RWP til write completes */
  226. 4:
  227. ldr w2, [x5, #GICD_CTLR_OFFSET]
  228. tst w2, #GICD_CTLR_RWP
  229. b.ne 4b
  230. /* x4 = core mask lsb
  231. * x5 = gicd base addr
  232. */
  233. mov x0, x4
  234. bl get_mpidr_value
  235. /* x0 = mpidr of target core
  236. * x4 = core mask lsb of target core
  237. * x5 = gicd base addr
  238. */
  239. /* generate target list bit */
  240. and x1, x0, #MPIDR_AFFINITY0_MASK
  241. mov x2, #1
  242. lsl x2, x2, x1
  243. /* get the affinity1 field */
  244. and x1, x0, #MPIDR_AFFINITY1_MASK
  245. lsl x1, x1, #8
  246. orr x2, x2, x1
  247. /* insert the INTID for SGI15 */
  248. orr x2, x2, #ICC_SGI0R_EL1_INTID
  249. /* fire the SGI */
  250. msr ICC_SGI0R_EL1, x2
  251. dsb sy
  252. isb
  253. /* load '0' on success */
  254. mov x0, xzr
  255. mov x30, x6
  256. ret
  257. endfunc _soc_core_restart
  258. /* Part of CPU_OFF
  259. * Function programs SoC & GIC registers in preparation for shutting down
  260. * the core
  261. * in: x0 = core mask lsb
  262. * out: none
  263. * uses x0, x1, x2, x3, x4, x5, x6, x7
  264. */
  265. func _soc_core_prep_off
  266. mov x8, x30
  267. mov x7, x0 /* x7 = core mask lsb */
  268. mrs x1, CORTEX_A72_ECTLR_EL1
  269. /* set smp and disable L2 snoops in cpuectlr */
  270. orr x1, x1, #CPUECTLR_SMPEN_EN
  271. orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
  272. bic x1, x1, #CPUECTLR_INS_PREFETCH_MASK
  273. bic x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
  274. /* set retention control in cpuectlr */
  275. bic x1, x1, #CPUECTLR_TIMER_MASK
  276. orr x1, x1, #CPUECTLR_TIMER_8TICKS
  277. msr CORTEX_A72_ECTLR_EL1, x1
  278. /* get redistributor rd base addr for this core */
  279. mov x0, x7
  280. bl get_gic_rd_base
  281. mov x6, x0
  282. /* get redistributor sgi base addr for this core */
  283. mov x0, x7
  284. bl get_gic_sgi_base
  285. mov x5, x0
  286. /* x5 = gicr sgi base addr
  287. * x6 = gicr rd base addr
  288. * x7 = core mask lsb
  289. */
  290. /* disable SGI 15 at redistributor - GICR_ICENABLER0 */
  291. mov w3, #GICR_ICENABLER0_SGI15
  292. str w3, [x5, #GICR_ICENABLER0_OFFSET]
  293. 2:
  294. /* poll on rwp bit in GICR_CTLR */
  295. ldr w4, [x6, #GICR_CTLR_OFFSET]
  296. tst w4, #GICR_CTLR_RWP
  297. b.ne 2b
  298. /* disable GRP1 interrupts at cpu interface */
  299. msr ICC_IGRPEN1_EL3, xzr
  300. /* disable GRP0 ints at cpu interface */
  301. msr ICC_IGRPEN0_EL1, xzr
  302. /* program the redistributor - poll on GICR_CTLR.RWP as needed */
  303. /* define SGI 15 as Grp0 - GICR_IGROUPR0 */
  304. ldr w4, [x5, #GICR_IGROUPR0_OFFSET]
  305. bic w4, w4, #GICR_IGROUPR0_SGI15
  306. str w4, [x5, #GICR_IGROUPR0_OFFSET]
  307. /* define SGI 15 as Grp0 - GICR_IGRPMODR0 */
  308. ldr w3, [x5, #GICR_IGRPMODR0_OFFSET]
  309. bic w3, w3, #GICR_IGRPMODR0_SGI15
  310. str w3, [x5, #GICR_IGRPMODR0_OFFSET]
  311. /* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
  312. ldr w4, [x5, #GICR_IPRIORITYR3_OFFSET]
  313. bic w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
  314. str w4, [x5, #GICR_IPRIORITYR3_OFFSET]
  315. /* enable SGI 15 at redistributor - GICR_ISENABLER0 */
  316. mov w3, #GICR_ISENABLER0_SGI15
  317. str w3, [x5, #GICR_ISENABLER0_OFFSET]
  318. dsb sy
  319. isb
  320. 3:
  321. /* poll on rwp bit in GICR_CTLR */
  322. ldr w4, [x6, #GICR_CTLR_OFFSET]
  323. tst w4, #GICR_CTLR_RWP
  324. b.ne 3b
  325. /* quiesce the debug interfaces */
  326. mrs x3, osdlr_el1
  327. orr x3, x3, #OSDLR_EL1_DLK_LOCK
  328. msr osdlr_el1, x3
  329. isb
  330. /* enable grp0 ints */
  331. mov x3, #ICC_IGRPEN0_EL1_EN
  332. msr ICC_IGRPEN0_EL1, x3
  333. /* x5 = gicr sgi base addr
  334. * x6 = gicr rd base addr
  335. * x7 = core mask lsb
  336. */
  337. /* clear any pending interrupts */
  338. mvn w1, wzr
  339. str w1, [x5, #GICR_ICPENDR0_OFFSET]
  340. /* make sure system counter is enabled */
  341. ldr x3, =NXP_TIMER_ADDR
  342. ldr w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
  343. tst w0, #SYS_COUNTER_CNTCR_EN
  344. b.ne 4f
  345. orr w0, w0, #SYS_COUNTER_CNTCR_EN
  346. str w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
  347. 4:
  348. /* enable the core timer and mask timer interrupt */
  349. mov x1, #CNTP_CTL_EL0_EN
  350. orr x1, x1, #CNTP_CTL_EL0_IMASK
  351. msr cntp_ctl_el0, x1
  352. isb
  353. mov x30, x8
  354. ret
  355. endfunc _soc_core_prep_off
  356. /* Part of CPU_OFF:
  357. * Function performs the final steps to shutdown the core
  358. * in: x0 = core mask lsb
  359. * out: none
  360. * uses x0, x1, x2, x3, x4, x5
  361. */
  362. func _soc_core_entr_off
  363. mov x5, x30
  364. mov x4, x0
  365. 1:
  366. /* enter low-power state by executing wfi */
  367. wfi
  368. /* see if SGI15 woke us up */
  369. mrs x2, ICC_IAR0_EL1
  370. mov x3, #ICC_IAR0_EL1_SGI15
  371. cmp x2, x3
  372. b.ne 2f
  373. /* deactivate the intrrupts. */
  374. msr ICC_EOIR0_EL1, x2
  375. 2:
  376. /* check if core is turned ON */
  377. mov x0, x4
  378. /* Fetched the core state in x0 */
  379. bl _getCoreState
  380. cmp x0, #CORE_WAKEUP
  381. b.ne 1b
  382. /* Reached here, exited the wfi */
  383. mov x30, x5
  384. ret
  385. endfunc _soc_core_entr_off
  386. /* Part of CPU_OFF:
  387. * Function starts the process of starting a core back up
  388. * in: x0 = core mask lsb
  389. * out: none
  390. * uses x0, x1, x2, x3, x4, x5, x6
  391. */
  392. func _soc_core_exit_off
  393. mov x6, x30
  394. mov x5, x0
  395. /* disable forwarding of GRP0 ints at cpu interface */
  396. msr ICC_IGRPEN0_EL1, xzr
  397. /* get redistributor sgi base addr for this core */
  398. mov x0, x5
  399. bl get_gic_sgi_base
  400. mov x4, x0
  401. /* x4 = gicr sgi base addr
  402. * x5 = core mask
  403. */
  404. /* disable SGI 15 at redistributor - GICR_ICENABLER0 */
  405. mov w1, #GICR_ICENABLER0_SGI15
  406. str w1, [x4, #GICR_ICENABLER0_OFFSET]
  407. /* get redistributor rd base addr for this core */
  408. mov x0, x5
  409. bl get_gic_rd_base
  410. mov x4, x0
  411. 2:
  412. /* poll on rwp bit in GICR_CTLR */
  413. ldr w2, [x4, #GICR_CTLR_OFFSET]
  414. tst w2, #GICR_CTLR_RWP
  415. b.ne 2b
  416. /* unlock the debug interfaces */
  417. mrs x3, osdlr_el1
  418. bic x3, x3, #OSDLR_EL1_DLK_LOCK
  419. msr osdlr_el1, x3
  420. isb
  421. dsb sy
  422. isb
  423. mov x30, x6
  424. ret
  425. endfunc _soc_core_exit_off
  426. /* Function requests a reset of the entire SOC
  427. * in: none
  428. * out: none
  429. * uses: x0, x1, x2, x3, x4, x5, x6
  430. */
  431. func _soc_sys_reset
  432. mov x6, x30
  433. ldr x2, =NXP_RST_ADDR
  434. /* clear the RST_REQ_MSK and SW_RST_REQ */
  435. mov w0, #0x00000000
  436. str w0, [x2, #RSTCNTL_OFFSET]
  437. /* initiate the sw reset request */
  438. mov w0, #SW_RST_REQ_INIT
  439. str w0, [x2, #RSTCNTL_OFFSET]
  440. /* In case this address range is mapped as cacheable,
  441. * flush the write out of the dcaches.
  442. */
  443. add x2, x2, #RSTCNTL_OFFSET
  444. dc cvac, x2
  445. dsb st
  446. isb
  447. /* Function does not return */
  448. b .
  449. endfunc _soc_sys_reset
  450. /* Part of SYSTEM_OFF:
  451. * Function turns off the SoC clocks
  452. * Note: Function is not intended to return, and the only allowable
  453. * recovery is POR
  454. * in: none
  455. * out: none
  456. * uses x0, x1, x2, x3
  457. */
  458. func _soc_sys_off
  459. /* disable sec, QBman, spi and qspi */
  460. ldr x2, =NXP_DCFG_ADDR
  461. ldr x0, =DCFG_DEVDISR1_OFFSET
  462. ldr w1, =DCFG_DEVDISR1_SEC
  463. str w1, [x2, x0]
  464. ldr x0, =DCFG_DEVDISR3_OFFSET
  465. ldr w1, =DCFG_DEVDISR3_QBMAIN
  466. str w1, [x2, x0]
  467. ldr x0, =DCFG_DEVDISR4_OFFSET
  468. ldr w1, =DCFG_DEVDISR4_SPI_QSPI
  469. str w1, [x2, x0]
  470. /* set TPMWAKEMR0 */
  471. ldr x0, =TPMWAKEMR0_ADDR
  472. mov w1, #0x1
  473. str w1, [x0]
  474. /* disable icache, dcache, mmu @ EL1 */
  475. mov x1, #SCTLR_I_C_M_MASK
  476. mrs x0, sctlr_el1
  477. bic x0, x0, x1
  478. msr sctlr_el1, x0
  479. /* disable L2 prefetches */
  480. mrs x0, CORTEX_A72_ECTLR_EL1
  481. bic x1, x1, #CPUECTLR_TIMER_MASK
  482. orr x0, x0, #CPUECTLR_SMPEN_EN
  483. orr x0, x0, #CPUECTLR_TIMER_8TICKS
  484. msr CORTEX_A72_ECTLR_EL1, x0
  485. isb
  486. /* disable CCN snoop domain */
  487. mov x1, #NXP_CCN_HN_F_0_ADDR
  488. ldr x0, =CCN_HN_F_SNP_DMN_CTL_MASK
  489. str x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
  490. 3:
  491. ldr w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
  492. cmp w2, #0x2
  493. b.ne 3b
  494. mov x3, #NXP_PMU_ADDR
  495. 4:
  496. ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
  497. cmp w1, #PMU_IDLE_CORE_MASK
  498. b.ne 4b
  499. mov w1, #PMU_IDLE_CLUSTER_MASK
  500. str w1, [x3, #PMU_CLAINACTSETR_OFFSET]
  501. 1:
  502. ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
  503. cmp w1, #PMU_IDLE_CORE_MASK
  504. b.ne 1b
  505. mov w1, #PMU_FLUSH_CLUSTER_MASK
  506. str w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
  507. 2:
  508. ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
  509. cmp w1, #PMU_FLUSH_CLUSTER_MASK
  510. b.ne 2b
  511. mov w1, #PMU_FLUSH_CLUSTER_MASK
  512. str w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
  513. mov w1, #PMU_FLUSH_CLUSTER_MASK
  514. str w1, [x3, #PMU_CLSINACTSETR_OFFSET]
  515. mov x2, #DAIF_SET_MASK
  516. mrs x1, spsr_el1
  517. orr x1, x1, x2
  518. msr spsr_el1, x1
  519. mrs x1, spsr_el2
  520. orr x1, x1, x2
  521. msr spsr_el2, x1
  522. /* force the debug interface to be quiescent */
  523. mrs x0, osdlr_el1
  524. orr x0, x0, #0x1
  525. msr osdlr_el1, x0
  526. /* invalidate all TLB entries at all 3 exception levels */
  527. tlbi alle1
  528. tlbi alle2
  529. tlbi alle3
  530. /* x3 = pmu base addr */
  531. /* request lpm20 */
  532. ldr x0, =PMU_POWMGTCSR_OFFSET
  533. ldr w1, =PMU_POWMGTCSR_VAL
  534. str w1, [x3, x0]
  535. 5:
  536. wfe
  537. b.eq 5b
  538. endfunc _soc_sys_off
  539. /* Part of CPU_SUSPEND
  540. * Function puts the calling core into standby state
  541. * in: x0 = core mask lsb
  542. * out: none
  543. * uses x0
  544. */
  545. func _soc_core_entr_stdby
  546. dsb sy
  547. isb
  548. wfi
  549. ret
  550. endfunc _soc_core_entr_stdby
  551. /* Part of CPU_SUSPEND
  552. * Function performs SoC-specific programming prior to standby
  553. * in: x0 = core mask lsb
  554. * out: none
  555. * uses x0, x1
  556. */
  557. func _soc_core_prep_stdby
  558. /* clear CORTEX_A72_ECTLR_EL1[2:0] */
  559. mrs x1, CORTEX_A72_ECTLR_EL1
  560. bic x1, x1, #CPUECTLR_TIMER_MASK
  561. msr CORTEX_A72_ECTLR_EL1, x1
  562. ret
  563. endfunc _soc_core_prep_stdby
  564. /* Part of CPU_SUSPEND
  565. * Function performs any SoC-specific cleanup after standby state
  566. * in: x0 = core mask lsb
  567. * out: none
  568. * uses none
  569. */
  570. func _soc_core_exit_stdby
  571. ret
  572. endfunc _soc_core_exit_stdby
  573. /* Part of CPU_SUSPEND
  574. * Function performs SoC-specific programming prior to power-down
  575. * in: x0 = core mask lsb
  576. * out: none
  577. * uses none
  578. */
  579. func _soc_core_prep_pwrdn
  580. /* make sure system counter is enabled */
  581. ldr x2, =NXP_TIMER_ADDR
  582. ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  583. tst w0, #SYS_COUNTER_CNTCR_EN
  584. b.ne 1f
  585. orr w0, w0, #SYS_COUNTER_CNTCR_EN
  586. str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  587. 1:
  588. /* enable dynamic retention control (CPUECTLR[2:0])
  589. * set the SMPEN bit (CPUECTLR[6])
  590. */
  591. mrs x1, CORTEX_A72_ECTLR_EL1
  592. bic x1, x1, #CPUECTLR_RET_MASK
  593. orr x1, x1, #CPUECTLR_TIMER_8TICKS
  594. orr x1, x1, #CPUECTLR_SMPEN_EN
  595. msr CORTEX_A72_ECTLR_EL1, x1
  596. isb
  597. ret
  598. endfunc _soc_core_prep_pwrdn
  599. /* Part of CPU_SUSPEND
  600. * Function puts the calling core into a power-down state
  601. * in: x0 = core mask lsb
  602. * out: none
  603. * uses x0
  604. */
  605. func _soc_core_entr_pwrdn
  606. /* X0 = core mask lsb */
  607. dsb sy
  608. isb
  609. wfi
  610. ret
  611. endfunc _soc_core_entr_pwrdn
  612. /* Part of CPU_SUSPEND
  613. * Function performs any SoC-specific cleanup after power-down state
  614. * in: x0 = core mask lsb
  615. * out: none
  616. * uses none
  617. */
  618. func _soc_core_exit_pwrdn
  619. ret
  620. endfunc _soc_core_exit_pwrdn
  621. /* Part of CPU_SUSPEND
  622. * Function performs SoC-specific programming prior to standby
  623. * in: x0 = core mask lsb
  624. * out: none
  625. * uses x0, x1
  626. */
  627. func _soc_clstr_prep_stdby
  628. /* clear CORTEX_A72_ECTLR_EL1[2:0] */
  629. mrs x1, CORTEX_A72_ECTLR_EL1
  630. bic x1, x1, #CPUECTLR_TIMER_MASK
  631. msr CORTEX_A72_ECTLR_EL1, x1
  632. ret
  633. endfunc _soc_clstr_prep_stdby
  634. /* Part of CPU_SUSPEND
  635. * Function performs any SoC-specific cleanup after standby state
  636. * in: x0 = core mask lsb
  637. * out: none
  638. * uses none
  639. */
  640. func _soc_clstr_exit_stdby
  641. ret
  642. endfunc _soc_clstr_exit_stdby
  643. /* Part of CPU_SUSPEND
  644. * Function performs SoC-specific programming prior to power-down
  645. * in: x0 = core mask lsb
  646. * out: none
  647. * uses none
  648. */
  649. func _soc_clstr_prep_pwrdn
  650. /* make sure system counter is enabled */
  651. ldr x2, =NXP_TIMER_ADDR
  652. ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  653. tst w0, #SYS_COUNTER_CNTCR_EN
  654. b.ne 1f
  655. orr w0, w0, #SYS_COUNTER_CNTCR_EN
  656. str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  657. 1:
  658. /* enable dynamic retention control (CPUECTLR[2:0])
  659. * set the SMPEN bit (CPUECTLR[6])
  660. */
  661. mrs x1, CORTEX_A72_ECTLR_EL1
  662. bic x1, x1, #CPUECTLR_RET_MASK
  663. orr x1, x1, #CPUECTLR_TIMER_8TICKS
  664. orr x1, x1, #CPUECTLR_SMPEN_EN
  665. msr CORTEX_A72_ECTLR_EL1, x1
  666. isb
  667. ret
  668. endfunc _soc_clstr_prep_pwrdn
  669. /* Part of CPU_SUSPEND
  670. * Function performs any SoC-specific cleanup after power-down state
  671. * in: x0 = core mask lsb
  672. * out: none
  673. * uses none
  674. */
  675. func _soc_clstr_exit_pwrdn
  676. ret
  677. endfunc _soc_clstr_exit_pwrdn
  678. /* Part of CPU_SUSPEND
  679. * Function performs SoC-specific programming prior to standby
  680. * in: x0 = core mask lsb
  681. * out: none
  682. * uses x0, x1
  683. */
  684. func _soc_sys_prep_stdby
  685. /* clear CORTEX_A72_ECTLR_EL1[2:0] */
  686. mrs x1, CORTEX_A72_ECTLR_EL1
  687. bic x1, x1, #CPUECTLR_TIMER_MASK
  688. msr CORTEX_A72_ECTLR_EL1, x1
  689. ret
  690. endfunc _soc_sys_prep_stdby
  691. /* Part of CPU_SUSPEND
  692. * Function performs any SoC-specific cleanup after standby state
  693. * in: x0 = core mask lsb
  694. * out: none
  695. * uses none
  696. */
  697. func _soc_sys_exit_stdby
  698. ret
  699. endfunc _soc_sys_exit_stdby
  700. /* Part of CPU_SUSPEND
  701. * Function performs SoC-specific programming prior to
  702. * suspend-to-power-down
  703. * in: x0 = core mask lsb
  704. * out: none
  705. * uses x0, x1
  706. */
  707. func _soc_sys_prep_pwrdn
  708. mrs x1, CORTEX_A72_ECTLR_EL1
  709. /* make sure the smp bit is set */
  710. orr x1, x1, #CPUECTLR_SMPEN_MASK
  711. /* set the retention control */
  712. orr x1, x1, #CPUECTLR_RET_8CLK
  713. /* disable tablewalk prefetch */
  714. orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
  715. msr CORTEX_A72_ECTLR_EL1, x1
  716. isb
  717. ret
  718. endfunc _soc_sys_prep_pwrdn
  719. /* Part of CPU_SUSPEND
  720. * Function puts the calling core, and potentially the soc, into a
  721. * low-power state
  722. * in: x0 = core mask lsb
  723. * out: x0 = 0, success
  724. * x0 < 0, failure
  725. * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14,
  726. * x15, x16, x17, x18, x19, x20, x21, x28
  727. */
  728. func _soc_sys_pwrdn_wfi
  729. mov x28, x30
  730. /* disable cluster snooping in the CCN-508 */
  731. ldr x1, =NXP_CCN_HN_F_0_ADDR
  732. ldr x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
  733. mov x6, #CCN_HNF_NODE_COUNT
  734. 1:
  735. str x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
  736. sub x6, x6, #1
  737. add x1, x1, #CCN_HNF_OFFSET
  738. cbnz x6, 1b
  739. /* x0 = core mask
  740. * x7 = hnf sdcr
  741. */
  742. ldr x1, =NXP_PMU_CCSR_ADDR
  743. ldr x2, =NXP_PMU_DCSR_ADDR
  744. /* enable the stop-request-override */
  745. mov x3, #PMU_POWMGTDCR0_OFFSET
  746. mov x4, #POWMGTDCR_STP_OV_EN
  747. str w4, [x2, x3]
  748. /* x0 = core mask
  749. * x1 = NXP_PMU_CCSR_ADDR
  750. * x2 = NXP_PMU_DCSR_ADDR
  751. * x7 = hnf sdcr
  752. */
  753. /* disable prefetching in the A72 core */
  754. mrs x8, CORTEX_A72_CPUACTLR_EL1
  755. tst x8, #CPUACTLR_DIS_LS_HW_PRE
  756. b.ne 2f
  757. dsb sy
  758. isb
  759. /* disable data prefetch */
  760. orr x16, x8, #CPUACTLR_DIS_LS_HW_PRE
  761. /* disable tlb prefetch */
  762. orr x16, x16, #CPUACTLR_DIS_L2_TLB_PRE
  763. msr CORTEX_A72_CPUACTLR_EL1, x16
  764. isb
  765. /* x0 = core mask
  766. * x1 = NXP_PMU_CCSR_ADDR
  767. * x2 = NXP_PMU_DCSR_ADDR
  768. * x7 = hnf sdcr
  769. * x8 = cpuactlr
  770. */
  771. 2:
  772. /* save hnf-sdcr and cpuactlr to stack */
  773. stp x7, x8, [sp, #-16]!
  774. /* x0 = core mask
  775. * x1 = NXP_PMU_CCSR_ADDR
  776. * x2 = NXP_PMU_DCSR_ADDR
  777. */
  778. /* save the IPSTPCRn registers to stack */
  779. mov x15, #PMU_IPSTPCR0_OFFSET
  780. ldr w9, [x1, x15]
  781. mov x16, #PMU_IPSTPCR1_OFFSET
  782. ldr w10, [x1, x16]
  783. mov x17, #PMU_IPSTPCR2_OFFSET
  784. ldr w11, [x1, x17]
  785. mov x18, #PMU_IPSTPCR3_OFFSET
  786. ldr w12, [x1, x18]
  787. mov x19, #PMU_IPSTPCR4_OFFSET
  788. ldr w13, [x1, x19]
  789. mov x20, #PMU_IPSTPCR5_OFFSET
  790. ldr w14, [x1, x20]
  791. stp x9, x10, [sp, #-16]!
  792. stp x11, x12, [sp, #-16]!
  793. stp x13, x14, [sp, #-16]!
  794. /* x0 = core mask
  795. * x1 = NXP_PMU_CCSR_ADDR
  796. * x2 = NXP_PMU_DCSR_ADDR
  797. * x15 = PMU_IPSTPCR0_OFFSET
  798. * x16 = PMU_IPSTPCR1_OFFSET
  799. * x17 = PMU_IPSTPCR2_OFFSET
  800. * x18 = PMU_IPSTPCR3_OFFSET
  801. * x19 = PMU_IPSTPCR4_OFFSET
  802. * x20 = PMU_IPSTPCR5_OFFSET
  803. */
  804. /* load the full clock mask for IPSTPCR0 */
  805. ldr x3, =DEVDISR1_MASK
  806. /* get the exclusions */
  807. mov x21, #PMU_IPPDEXPCR0_OFFSET
  808. ldr w4, [x1, x21]
  809. /* apply the exclusions to the mask */
  810. bic w7, w3, w4
  811. /* stop the clocks in IPSTPCR0 */
  812. str w7, [x1, x15]
  813. /* use same procedure for IPSTPCR1-IPSTPCR5 */
  814. /* stop the clocks in IPSTPCR1 */
  815. ldr x5, =DEVDISR2_MASK
  816. mov x21, #PMU_IPPDEXPCR1_OFFSET
  817. ldr w6, [x1, x21]
  818. bic w8, w5, w6
  819. str w8, [x1, x16]
  820. /* stop the clocks in IPSTPCR2 */
  821. ldr x3, =DEVDISR3_MASK
  822. mov x21, #PMU_IPPDEXPCR2_OFFSET
  823. ldr w4, [x1, x21]
  824. bic w9, w3, w4
  825. str w9, [x1, x17]
  826. /* stop the clocks in IPSTPCR3 */
  827. ldr x5, =DEVDISR4_MASK
  828. mov x21, #PMU_IPPDEXPCR3_OFFSET
  829. ldr w6, [x1, x21]
  830. bic w10, w5, w6
  831. str w10, [x1, x18]
  832. /* stop the clocks in IPSTPCR4
  833. * - exclude the ddr clocks as we are currently executing
  834. * out of *some* memory, might be ddr
  835. * - exclude the OCRAM clk so that we retain any code/data in
  836. * OCRAM
  837. * - may need to exclude the debug clock if we are testing
  838. */
  839. ldr x3, =DEVDISR5_MASK
  840. mov w6, #DEVDISR5_MASK_ALL_MEM
  841. bic w3, w3, w6
  842. mov w5, #POLICY_DEBUG_ENABLE
  843. cbz w5, 3f
  844. mov w6, #DEVDISR5_MASK_DBG
  845. bic w3, w3, w6
  846. 3:
  847. mov x21, #PMU_IPPDEXPCR4_OFFSET
  848. ldr w4, [x1, x21]
  849. bic w11, w3, w4
  850. str w11, [x1, x19]
  851. /* stop the clocks in IPSTPCR5 */
  852. ldr x5, =DEVDISR6_MASK
  853. mov x21, #PMU_IPPDEXPCR5_OFFSET
  854. ldr w6, [x1, x21]
  855. bic w12, w5, w6
  856. str w12, [x1, x20]
  857. /* x0 = core mask
  858. * x1 = NXP_PMU_CCSR_ADDR
  859. * x2 = NXP_PMU_DCSR_ADDR
  860. * x7 = IPSTPCR0
  861. * x8 = IPSTPCR1
  862. * x9 = IPSTPCR2
  863. * x10 = IPSTPCR3
  864. * x11 = IPSTPCR4
  865. * x12 = IPSTPCR5
  866. */
  867. /* poll until the clocks are stopped in IPSTPACKSR0 */
  868. mov w4, #CLOCK_RETRY_CNT
  869. mov x21, #PMU_IPSTPACKSR0_OFFSET
  870. 4:
  871. ldr w5, [x1, x21]
  872. cmp w5, w7
  873. b.eq 5f
  874. sub w4, w4, #1
  875. cbnz w4, 4b
  876. /* poll until the clocks are stopped in IPSTPACKSR1 */
  877. 5:
  878. mov w4, #CLOCK_RETRY_CNT
  879. mov x21, #PMU_IPSTPACKSR1_OFFSET
  880. 6:
  881. ldr w5, [x1, x21]
  882. cmp w5, w8
  883. b.eq 7f
  884. sub w4, w4, #1
  885. cbnz w4, 6b
  886. /* poll until the clocks are stopped in IPSTPACKSR2 */
  887. 7:
  888. mov w4, #CLOCK_RETRY_CNT
  889. mov x21, #PMU_IPSTPACKSR2_OFFSET
  890. 8:
  891. ldr w5, [x1, x21]
  892. cmp w5, w9
  893. b.eq 9f
  894. sub w4, w4, #1
  895. cbnz w4, 8b
  896. /* poll until the clocks are stopped in IPSTPACKSR3 */
  897. 9:
  898. mov w4, #CLOCK_RETRY_CNT
  899. mov x21, #PMU_IPSTPACKSR3_OFFSET
  900. 10:
  901. ldr w5, [x1, x21]
  902. cmp w5, w10
  903. b.eq 11f
  904. sub w4, w4, #1
  905. cbnz w4, 10b
  906. /* poll until the clocks are stopped in IPSTPACKSR4 */
  907. 11:
  908. mov w4, #CLOCK_RETRY_CNT
  909. mov x21, #PMU_IPSTPACKSR4_OFFSET
  910. 12:
  911. ldr w5, [x1, x21]
  912. cmp w5, w11
  913. b.eq 13f
  914. sub w4, w4, #1
  915. cbnz w4, 12b
  916. /* poll until the clocks are stopped in IPSTPACKSR5 */
  917. 13:
  918. mov w4, #CLOCK_RETRY_CNT
  919. mov x21, #PMU_IPSTPACKSR5_OFFSET
  920. 14:
  921. ldr w5, [x1, x21]
  922. cmp w5, w12
  923. b.eq 15f
  924. sub w4, w4, #1
  925. cbnz w4, 14b
  926. /* x0 = core mask
  927. * x1 = NXP_PMU_CCSR_ADDR
  928. * x2 = NXP_PMU_DCSR_ADDR
  929. * x7 = IPSTPCR0
  930. * x8 = IPSTPCR1
  931. * x9 = IPSTPCR2
  932. * x10 = IPSTPCR3
  933. * x11 = IPSTPCR4
  934. * x12 = IPSTPCR5
  935. */
  936. 15:
  937. mov x3, #NXP_DCFG_ADDR
  938. /* save the devdisr registers to stack */
  939. ldr w13, [x3, #DCFG_DEVDISR1_OFFSET]
  940. ldr w14, [x3, #DCFG_DEVDISR2_OFFSET]
  941. ldr w15, [x3, #DCFG_DEVDISR3_OFFSET]
  942. ldr w16, [x3, #DCFG_DEVDISR4_OFFSET]
  943. ldr w17, [x3, #DCFG_DEVDISR5_OFFSET]
  944. ldr w18, [x3, #DCFG_DEVDISR6_OFFSET]
  945. stp x13, x14, [sp, #-16]!
  946. stp x15, x16, [sp, #-16]!
  947. stp x17, x18, [sp, #-16]!
  948. /* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */
  949. str w7, [x3, #DCFG_DEVDISR1_OFFSET]
  950. /* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */
  951. str w8, [x3, #DCFG_DEVDISR2_OFFSET]
  952. /* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */
  953. str w9, [x3, #DCFG_DEVDISR3_OFFSET]
  954. /* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */
  955. str w10, [x3, #DCFG_DEVDISR4_OFFSET]
  956. /* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */
  957. str w11, [x3, #DCFG_DEVDISR5_OFFSET]
  958. /* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */
  959. str w12, [x3, #DCFG_DEVDISR6_OFFSET]
  960. /* setup register values for the cache-only sequence */
  961. mov x4, #NXP_DDR_ADDR
  962. mov x5, #NXP_DDR2_ADDR
  963. mov x6, x11
  964. mov x7, x17
  965. ldr x12, =PMU_CLAINACTSETR_OFFSET
  966. ldr x13, =PMU_CLSINACTSETR_OFFSET
  967. ldr x14, =PMU_CLAINACTCLRR_OFFSET
  968. ldr x15, =PMU_CLSINACTCLRR_OFFSET
  969. /* x0 = core mask
  970. * x1 = NXP_PMU_CCSR_ADDR
  971. * x2 = NXP_PMU_DCSR_ADDR
  972. * x3 = NXP_DCFG_ADDR
  973. * x4 = NXP_DDR_ADDR
  974. * x5 = NXP_DDR2_ADDR
  975. * w6 = IPSTPCR4
  976. * w7 = DEVDISR5
  977. * x12 = PMU_CLAINACTSETR_OFFSET
  978. * x13 = PMU_CLSINACTSETR_OFFSET
  979. * x14 = PMU_CLAINACTCLRR_OFFSET
  980. * x15 = PMU_CLSINACTCLRR_OFFSET
  981. */
  982. mov x8, #POLICY_DEBUG_ENABLE
  983. cbnz x8, 29f
  984. /* force the debug interface to be quiescent */
  985. mrs x9, OSDLR_EL1
  986. orr x9, x9, #0x1
  987. msr OSDLR_EL1, x9
  988. /* enter the cache-only sequence */
  989. 29:
  990. bl final_pwrdown
  991. /* when we are here, the core has come out of wfi and the
  992. * ddr is back up
  993. */
  994. mov x8, #POLICY_DEBUG_ENABLE
  995. cbnz x8, 30f
  996. /* restart the debug interface */
  997. mrs x9, OSDLR_EL1
  998. mov x10, #1
  999. bic x9, x9, x10
  1000. msr OSDLR_EL1, x9
  1001. /* get saved DEVDISR regs off stack */
  1002. 30:
  1003. ldp x17, x18, [sp], #16
  1004. ldp x15, x16, [sp], #16
  1005. ldp x13, x14, [sp], #16
  1006. /* restore DEVDISR regs */
  1007. str w18, [x3, #DCFG_DEVDISR6_OFFSET]
  1008. str w17, [x3, #DCFG_DEVDISR5_OFFSET]
  1009. str w16, [x3, #DCFG_DEVDISR4_OFFSET]
  1010. str w15, [x3, #DCFG_DEVDISR3_OFFSET]
  1011. str w14, [x3, #DCFG_DEVDISR2_OFFSET]
  1012. str w13, [x3, #DCFG_DEVDISR1_OFFSET]
  1013. isb
  1014. /* get saved IPSTPCRn regs off stack */
  1015. ldp x13, x14, [sp], #16
  1016. ldp x11, x12, [sp], #16
  1017. ldp x9, x10, [sp], #16
  1018. /* restore IPSTPCRn regs */
  1019. mov x15, #PMU_IPSTPCR5_OFFSET
  1020. str w14, [x1, x15]
  1021. mov x16, #PMU_IPSTPCR4_OFFSET
  1022. str w13, [x1, x16]
  1023. mov x17, #PMU_IPSTPCR3_OFFSET
  1024. str w12, [x1, x17]
  1025. mov x18, #PMU_IPSTPCR2_OFFSET
  1026. str w11, [x1, x18]
  1027. mov x19, #PMU_IPSTPCR1_OFFSET
  1028. str w10, [x1, x19]
  1029. mov x20, #PMU_IPSTPCR0_OFFSET
  1030. str w9, [x1, x20]
  1031. isb
  1032. /* poll on IPSTPACKCRn regs til IP clocks are restarted */
  1033. mov w4, #CLOCK_RETRY_CNT
  1034. mov x15, #PMU_IPSTPACKSR5_OFFSET
  1035. 16:
  1036. ldr w5, [x1, x15]
  1037. and w5, w5, w14
  1038. cbz w5, 17f
  1039. sub w4, w4, #1
  1040. cbnz w4, 16b
  1041. 17:
  1042. mov w4, #CLOCK_RETRY_CNT
  1043. mov x15, #PMU_IPSTPACKSR4_OFFSET
  1044. 18:
  1045. ldr w5, [x1, x15]
  1046. and w5, w5, w13
  1047. cbz w5, 19f
  1048. sub w4, w4, #1
  1049. cbnz w4, 18b
  1050. 19:
  1051. mov w4, #CLOCK_RETRY_CNT
  1052. mov x15, #PMU_IPSTPACKSR3_OFFSET
  1053. 20:
  1054. ldr w5, [x1, x15]
  1055. and w5, w5, w12
  1056. cbz w5, 21f
  1057. sub w4, w4, #1
  1058. cbnz w4, 20b
  1059. 21:
  1060. mov w4, #CLOCK_RETRY_CNT
  1061. mov x15, #PMU_IPSTPACKSR2_OFFSET
  1062. 22:
  1063. ldr w5, [x1, x15]
  1064. and w5, w5, w11
  1065. cbz w5, 23f
  1066. sub w4, w4, #1
  1067. cbnz w4, 22b
  1068. 23:
  1069. mov w4, #CLOCK_RETRY_CNT
  1070. mov x15, #PMU_IPSTPACKSR1_OFFSET
  1071. 24:
  1072. ldr w5, [x1, x15]
  1073. and w5, w5, w10
  1074. cbz w5, 25f
  1075. sub w4, w4, #1
  1076. cbnz w4, 24b
  1077. 25:
  1078. mov w4, #CLOCK_RETRY_CNT
  1079. mov x15, #PMU_IPSTPACKSR0_OFFSET
  1080. 26:
  1081. ldr w5, [x1, x15]
  1082. and w5, w5, w9
  1083. cbz w5, 27f
  1084. sub w4, w4, #1
  1085. cbnz w4, 26b
  1086. 27:
  1087. /* disable the stop-request-override */
  1088. mov x8, #PMU_POWMGTDCR0_OFFSET
  1089. mov w9, #POWMGTDCR_STP_OV_EN
  1090. str w9, [x2, x8]
  1091. isb
  1092. /* get hnf-sdcr and cpuactlr off stack */
  1093. ldp x7, x8, [sp], #16
  1094. /* restore cpuactlr */
  1095. msr CORTEX_A72_CPUACTLR_EL1, x8
  1096. isb
  1097. /* restore snooping in the hnf nodes */
  1098. ldr x9, =NXP_CCN_HN_F_0_ADDR
  1099. mov x6, #CCN_HNF_NODE_COUNT
  1100. 28:
  1101. str x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET]
  1102. sub x6, x6, #1
  1103. add x9, x9, #CCN_HNF_OFFSET
  1104. cbnz x6, 28b
  1105. isb
  1106. mov x30, x28
  1107. ret
  1108. endfunc _soc_sys_pwrdn_wfi
  1109. /* Part of CPU_SUSPEND
  1110. * Function performs any SoC-specific cleanup after power-down
  1111. * in: x0 = core mask lsb
  1112. * out: none
  1113. * uses x0,
  1114. */
  1115. func _soc_sys_exit_pwrdn
  1116. mrs x1, CORTEX_A72_ECTLR_EL1
  1117. /* make sure the smp bit is set */
  1118. orr x1, x1, #CPUECTLR_SMPEN_MASK
  1119. /* clr the retention control */
  1120. mov x2, #CPUECTLR_RET_8CLK
  1121. bic x1, x1, x2
  1122. /* enable tablewalk prefetch */
  1123. mov x2, #CPUECTLR_DISABLE_TWALK_PREFETCH
  1124. bic x1, x1, x2
  1125. msr CORTEX_A72_ECTLR_EL1, x1
  1126. isb
  1127. ret
  1128. endfunc _soc_sys_exit_pwrdn
  1129. /* Function will pwrdown ddr and the final core - it will do this
  1130. * by loading itself into the icache and then executing from there
  1131. * in:
  1132. * x0 = core mask
  1133. * x1 = NXP_PMU_CCSR_ADDR
  1134. * x2 = NXP_PMU_DCSR_ADDR
  1135. * x3 = NXP_DCFG_ADDR
  1136. * x4 = NXP_DDR_ADDR
  1137. * x5 = NXP_DDR2_ADDR
  1138. * w6 = IPSTPCR4
  1139. * w7 = DEVDISR5
  1140. * x12 = PMU_CLAINACTSETR_OFFSET
  1141. * x13 = PMU_CLSINACTSETR_OFFSET
  1142. * x14 = PMU_CLAINACTCLRR_OFFSET
  1143. * x15 = PMU_CLSINACTCLRR_OFFSET
  1144. * out: none
  1145. * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16,
  1146. * x17, x18
  1147. */
  1148. /* 4Kb aligned */
  1149. .align 12
  1150. func final_pwrdown
  1151. mov x0, xzr
  1152. b touch_line_0
  1153. start_line_0:
  1154. mov x0, #1
  1155. /* put ddr controller 1 into self-refresh */
  1156. ldr w8, [x4, #DDR_CFG_2_OFFSET]
  1157. orr w8, w8, #CFG_2_FORCE_REFRESH
  1158. str w8, [x4, #DDR_CFG_2_OFFSET]
  1159. /* put ddr controller 2 into self-refresh */
  1160. ldr w8, [x5, #DDR_CFG_2_OFFSET]
  1161. orr w8, w8, #CFG_2_FORCE_REFRESH
  1162. str w8, [x5, #DDR_CFG_2_OFFSET]
  1163. /* stop the clocks in both ddr controllers */
  1164. mov w10, #DEVDISR5_MASK_DDR
  1165. mov x16, #PMU_IPSTPCR4_OFFSET
  1166. orr w9, w6, w10
  1167. str w9, [x1, x16]
  1168. isb
  1169. mov x17, #PMU_IPSTPACKSR4_OFFSET
  1170. touch_line_0:
  1171. cbz x0, touch_line_1
  1172. start_line_1:
  1173. /* poll IPSTPACKSR4 until
  1174. * ddr controller clocks are stopped.
  1175. */
  1176. 1:
  1177. ldr w8, [x1, x17]
  1178. and w8, w8, w10
  1179. cmp w8, w10
  1180. b.ne 1b
  1181. /* shut down power to the ddr controllers */
  1182. orr w9, w7, #DEVDISR5_MASK_DDR
  1183. str w9, [x3, #DCFG_DEVDISR5_OFFSET]
  1184. /* disable cluster acp ports */
  1185. mov w8, #CLAINACT_DISABLE_ACP
  1186. str w8, [x1, x12]
  1187. /* disable skyros ports */
  1188. mov w9, #CLSINACT_DISABLE_SKY
  1189. str w9, [x1, x13]
  1190. isb
  1191. touch_line_1:
  1192. cbz x0, touch_line_2
  1193. start_line_2:
  1194. isb
  1195. 3:
  1196. wfi
  1197. /* if we are here then we are awake
  1198. * - bring this device back up
  1199. */
  1200. /* enable skyros ports */
  1201. mov w9, #CLSINACT_DISABLE_SKY
  1202. str w9, [x1, x15]
  1203. /* enable acp ports */
  1204. mov w8, #CLAINACT_DISABLE_ACP
  1205. str w8, [x1, x14]
  1206. isb
  1207. /* bring up the ddr controllers */
  1208. str w7, [x3, #DCFG_DEVDISR5_OFFSET]
  1209. isb
  1210. str w6, [x1, x16]
  1211. isb
  1212. nop
  1213. touch_line_2:
  1214. cbz x0, touch_line_3
  1215. start_line_3:
  1216. /* poll IPSTPACKSR4 until
  1217. * ddr controller clocks are running
  1218. */
  1219. mov w10, #DEVDISR5_MASK_DDR
  1220. 2:
  1221. ldr w8, [x1, x17]
  1222. and w8, w8, w10
  1223. cbnz w8, 2b
  1224. /* take ddr controller 2 out of self-refresh */
  1225. mov w8, #CFG_2_FORCE_REFRESH
  1226. ldr w9, [x5, #DDR_CFG_2_OFFSET]
  1227. bic w9, w9, w8
  1228. str w9, [x5, #DDR_CFG_2_OFFSET]
  1229. /* take ddr controller 1 out of self-refresh */
  1230. ldr w9, [x4, #DDR_CFG_2_OFFSET]
  1231. bic w9, w9, w8
  1232. str w9, [x4, #DDR_CFG_2_OFFSET]
  1233. isb
  1234. nop
  1235. nop
  1236. nop
  1237. touch_line_3:
  1238. cbz x0, start_line_0
  1239. /* execute here after ddr is back up */
  1240. ret
  1241. endfunc final_pwrdown
  1242. /* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are
  1243. * to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores
  1244. * are to be held in reset
  1245. * in: none
  1246. * out: x0 = #CLUSTER_3_NORMAL, cluster 3 treated normal
  1247. * x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset
  1248. * uses x0, x1, x2
  1249. */
  1250. func cluster3InReset
  1251. /* default return is treat cores normal */
  1252. mov x0, #CLUSTER_3_NORMAL
  1253. /* read RCW_SR27 register */
  1254. mov x1, #NXP_DCFG_ADDR
  1255. ldr w2, [x1, #RCW_SR27_OFFSET]
  1256. /* test the cluster 3 bit */
  1257. tst w2, #CLUSTER_3_RCW_BIT
  1258. b.eq 1f
  1259. /* if we are here, then the bit was set */
  1260. mov x0, #CLUSTER_3_IN_RESET
  1261. 1:
  1262. ret
  1263. endfunc cluster3InReset
  1264. /* Function checks to see if cores which are to be disabled have been
  1265. * released from reset - if not, it releases them
  1266. * Note: there may be special handling of cluster 3 cores depending upon the
  1267. * sys clk frequency
  1268. * in: none
  1269. * out: none
  1270. * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
  1271. */
  1272. func release_disabled
  1273. mov x9, x30
  1274. /* check if we need to keep cluster 3 cores in reset */
  1275. bl cluster3InReset /* 0-2 */
  1276. mov x8, x0
  1277. /* x8 = cluster 3 handling */
  1278. /* read COREDISABLESR */
  1279. mov x0, #NXP_DCFG_ADDR
  1280. ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
  1281. cmp x8, #CLUSTER_3_IN_RESET
  1282. b.ne 4f
  1283. /* the cluster 3 cores are to be held in reset, so remove
  1284. * them from the disable mask
  1285. */
  1286. bic x4, x4, #CLUSTER_3_CORES_MASK
  1287. 4:
  1288. /* get the number of cpus on this device */
  1289. mov x6, #PLATFORM_CORE_COUNT
  1290. mov x0, #NXP_RESET_ADDR
  1291. ldr w5, [x0, #BRR_OFFSET]
  1292. /* load the core mask for the first core */
  1293. mov x7, #1
  1294. /* x4 = COREDISABLESR
  1295. * x5 = BRR
  1296. * x6 = loop count
  1297. * x7 = core mask bit
  1298. */
  1299. 2:
  1300. /* check if the core is to be disabled */
  1301. tst x4, x7
  1302. b.eq 1f
  1303. /* see if disabled cores have already been released from reset */
  1304. tst x5, x7
  1305. b.ne 5f
  1306. /* if core has not been released, then release it (0-3) */
  1307. mov x0, x7
  1308. bl _soc_core_release
  1309. /* record the core state in the data area (0-3) */
  1310. mov x0, x7
  1311. mov x1, #CORE_STATE_DATA
  1312. mov x2, #CORE_DISABLED
  1313. bl _setCoreData
  1314. 1:
  1315. /* see if this is a cluster 3 core */
  1316. mov x3, #CLUSTER_3_CORES_MASK
  1317. tst x3, x7
  1318. b.eq 5f
  1319. /* this is a cluster 3 core - see if it needs to be held in reset */
  1320. cmp x8, #CLUSTER_3_IN_RESET
  1321. b.ne 5f
  1322. /* record the core state as disabled in the data area (0-3) */
  1323. mov x0, x7
  1324. mov x1, #CORE_STATE_DATA
  1325. mov x2, #CORE_DISABLED
  1326. bl _setCoreData
  1327. 5:
  1328. /* decrement the counter */
  1329. subs x6, x6, #1
  1330. b.le 3f
  1331. /* shift the core mask to the next core */
  1332. lsl x7, x7, #1
  1333. /* continue */
  1334. b 2b
  1335. 3:
  1336. cmp x8, #CLUSTER_3_IN_RESET
  1337. b.ne 6f
  1338. /* we need to hold the cluster 3 cores in reset,
  1339. * so mark them in the COREDISR and COREDISABLEDSR registers as
  1340. * "disabled", and the rest of the sw stack will leave them alone
  1341. * thinking that they have been disabled
  1342. */
  1343. mov x0, #NXP_DCFG_ADDR
  1344. ldr w1, [x0, #DCFG_COREDISR_OFFSET]
  1345. orr w1, w1, #CLUSTER_3_CORES_MASK
  1346. str w1, [x0, #DCFG_COREDISR_OFFSET]
  1347. ldr w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
  1348. orr w2, w2, #CLUSTER_3_CORES_MASK
  1349. str w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
  1350. dsb sy
  1351. isb
  1352. #if (PSCI_TEST)
  1353. /* x0 = NXP_DCFG_ADDR : read COREDISABLESR */
  1354. ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
  1355. /* read COREDISR */
  1356. ldr w3, [x0, #DCFG_COREDISR_OFFSET]
  1357. #endif
  1358. 6:
  1359. mov x30, x9
  1360. ret
  1361. endfunc release_disabled
  1362. /* Function setc up the TrustZone Address Space Controller (TZASC)
  1363. * in: none
  1364. * out: none
  1365. * uses x0, x1
  1366. */
  1367. func init_tzpc
  1368. /* set Non Secure access for all devices protected via TZPC */
  1369. /* decode Protection-0 Set Reg */
  1370. ldr x1, =TZPCDECPROT_0_SET_BASE
  1371. /* set decode region to NS, Bits[7:0] */
  1372. mov w0, #0xFF
  1373. str w0, [x1]
  1374. /* decode Protection-1 Set Reg */
  1375. ldr x1, =TZPCDECPROT_1_SET_BASE
  1376. /* set decode region to NS, Bits[7:0] */
  1377. mov w0, #0xFF
  1378. str w0, [x1]
  1379. /* decode Protection-2 Set Reg */
  1380. ldr x1, =TZPCDECPROT_2_SET_BASE
  1381. /* set decode region to NS, Bits[7:0] */
  1382. mov w0, #0xFF
  1383. str w0, [x1]
  1384. /* entire SRAM as NS */
  1385. /* secure RAM region size Reg */
  1386. ldr x1, =TZPC_BASE
  1387. /* 0x00000000 = no secure region */
  1388. mov w0, #0x00000000
  1389. str w0, [x1]
  1390. ret
  1391. endfunc init_tzpc
  1392. /* write a register in the DCFG block
  1393. * in: x0 = offset
  1394. * in: w1 = value to write
  1395. * uses x0, x1, x2
  1396. */
  1397. func _write_reg_dcfg
  1398. ldr x2, =NXP_DCFG_ADDR
  1399. str w1, [x2, x0]
  1400. ret
  1401. endfunc _write_reg_dcfg
  1402. /* read a register in the DCFG block
  1403. * in: x0 = offset
  1404. * out: w0 = value read
  1405. * uses x0, x1, x2
  1406. */
  1407. func _read_reg_dcfg
  1408. ldr x2, =NXP_DCFG_ADDR
  1409. ldr w1, [x2, x0]
  1410. mov w0, w1
  1411. ret
  1412. endfunc _read_reg_dcfg
  1413. /* Function returns an mpidr value for a core, given a core_mask_lsb
  1414. * in: x0 = core mask lsb
  1415. * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
  1416. * uses x0, x1
  1417. */
  1418. func get_mpidr_value
  1419. /* convert a core mask to an SoC core number */
  1420. clz w0, w0
  1421. mov w1, #31
  1422. sub w0, w1, w0
  1423. /* get the mpidr core number from the SoC core number */
  1424. mov w1, wzr
  1425. tst x0, #1
  1426. b.eq 1f
  1427. orr w1, w1, #1
  1428. 1:
  1429. /* extract the cluster number */
  1430. lsr w0, w0, #1
  1431. orr w0, w1, w0, lsl #8
  1432. ret
  1433. endfunc get_mpidr_value
  1434. /* Function returns the redistributor base address for the core specified
  1435. * in x1
  1436. * in: x0 - core mask lsb of specified core
  1437. * out: x0 = redistributor rd base address for specified core
  1438. * uses x0, x1, x2
  1439. */
  1440. func get_gic_rd_base
  1441. clz w1, w0
  1442. mov w2, #0x20
  1443. sub w2, w2, w1
  1444. sub w2, w2, #1
  1445. ldr x0, =NXP_GICR_ADDR
  1446. mov x1, #GIC_RD_OFFSET
  1447. /* x2 = core number
  1448. * loop counter
  1449. */
  1450. 2:
  1451. cbz x2, 1f
  1452. add x0, x0, x1
  1453. sub x2, x2, #1
  1454. b 2b
  1455. 1:
  1456. ret
  1457. endfunc get_gic_rd_base
  1458. /* Function returns the redistributor base address for the core specified
  1459. * in x1
  1460. * in: x0 - core mask lsb of specified core
  1461. * out: x0 = redistributor sgi base address for specified core
  1462. * uses x0, x1, x2
  1463. */
  1464. func get_gic_sgi_base
  1465. clz w1, w0
  1466. mov w2, #0x20
  1467. sub w2, w2, w1
  1468. sub w2, w2, #1
  1469. ldr x0, =NXP_GICR_SGI_ADDR
  1470. mov x1, #GIC_SGI_OFFSET
  1471. /* loop counter */
  1472. 2:
  1473. cbz x2, 1f /* x2 = core number */
  1474. add x0, x0, x1
  1475. sub x2, x2, #1
  1476. b 2b
  1477. 1:
  1478. ret
  1479. endfunc get_gic_sgi_base
  1480. /* Function writes a register in the RESET block
  1481. * in: x0 = offset
  1482. * in: w1 = value to write
  1483. * uses x0, x1, x2
  1484. */
  1485. func _write_reg_reset
  1486. ldr x2, =NXP_RESET_ADDR
  1487. str w1, [x2, x0]
  1488. ret
  1489. endfunc _write_reg_reset
  1490. /* Function reads a register in the RESET block
  1491. * in: x0 = offset
  1492. * out: w0 = value read
  1493. * uses x0, x1
  1494. */
  1495. func _read_reg_reset
  1496. ldr x1, =NXP_RESET_ADDR
  1497. ldr w0, [x1, x0]
  1498. ret
  1499. endfunc _read_reg_reset