ls1046a.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937
  1. /*
  2. * Copyright 2020-2022 NXP
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. */
  7. #include <asm_macros.S>
  8. #include <dcfg_lsch2.h>
  9. #include <nxp_timer.h>
  10. #include <plat_gic.h>
  11. #include <scfg.h>
  12. #include <bl31_data.h>
  13. #include <plat_psci.h>
  14. #include <platform_def.h>
  15. #define DAIF_DATA AUX_01_DATA
  16. #define TIMER_CNTRL_DATA AUX_02_DATA
  17. .global soc_init_lowlevel
  18. .global soc_init_percpu
  19. .global _soc_core_release
  20. .global _soc_core_restart
  21. .global _soc_ck_disabled
  22. .global _soc_sys_reset
  23. .global _soc_sys_off
  24. .global _soc_set_start_addr
  25. .global _getGICC_BaseAddr
  26. .global _getGICD_BaseAddr
  27. .global _soc_core_prep_off
  28. .global _soc_core_entr_off
  29. .global _soc_core_exit_off
  30. .global _soc_core_prep_stdby
  31. .global _soc_core_entr_stdby
  32. .global _soc_core_exit_stdby
  33. .global _soc_core_prep_pwrdn
  34. .global _soc_core_entr_pwrdn
  35. .global _soc_core_exit_pwrdn
  36. .global _soc_clstr_prep_stdby
  37. .global _soc_clstr_exit_stdby
  38. .global _soc_clstr_prep_pwrdn
  39. .global _soc_clstr_exit_pwrdn
  40. .global _soc_sys_prep_stdby
  41. .global _soc_sys_exit_stdby
  42. .global _soc_sys_prep_pwrdn
  43. .global _soc_sys_pwrdn_wfi
  44. .global _soc_sys_exit_pwrdn
  45. /* This function initialize the soc
  46. * in: void
  47. * out: void
  48. */
  49. func soc_init_lowlevel
  50. ret
  51. endfunc soc_init_lowlevel
  52. /* void soc_init_percpu(void)
  53. * this function performs any soc-specific initialization that is needed on
  54. * a per-core basis
  55. * in: none
  56. * out: none
  57. * uses x0, x1, x2, x3
  58. */
  59. func soc_init_percpu
  60. mov x3, x30
  61. bl plat_my_core_mask
  62. mov x2, x0
  63. /* see if this core is marked for prefetch disable */
  64. mov x0, #PREFETCH_DIS_OFFSET
  65. bl _get_global_data /* 0-1 */
  66. tst x0, x2
  67. b.eq 1f
  68. bl _disable_ldstr_pfetch_A72 /* 0 */
  69. 1:
  70. mov x30, x3
  71. ret
  72. endfunc soc_init_percpu
  73. /* part of CPU_ON
  74. * this function releases a secondary core from reset
  75. * in: x0 = core_mask_lsb
  76. * out: none
  77. * uses: x0, x1, x2, x3
  78. */
  79. func _soc_core_release
  80. #if (TEST_BL31)
  81. rbit w2, w0
  82. /* x2 = core mask msb */
  83. #else
  84. mov x2, x0
  85. #endif
  86. /* write COREBCR */
  87. mov x1, #NXP_SCFG_ADDR
  88. rev w3, w2
  89. str w3, [x1, #SCFG_COREBCR_OFFSET]
  90. isb
  91. /* read-modify-write BRR */
  92. mov x1, #NXP_DCFG_ADDR
  93. ldr w2, [x1, #DCFG_BRR_OFFSET]
  94. rev w3, w2
  95. orr w3, w3, w0
  96. rev w2, w3
  97. str w2, [x1, #DCFG_BRR_OFFSET]
  98. isb
  99. /* send event */
  100. sev
  101. isb
  102. ret
  103. endfunc _soc_core_release
  104. /* part of CPU_ON
  105. * this function restarts a core shutdown via _soc_core_entr_off
  106. * in: x0 = core mask lsb (of the target cpu)
  107. * out: x0 == 0, on success
  108. * x0 != 0, on failure
  109. * uses x0, x1, x2, x3, x4, x5
  110. */
  111. func _soc_core_restart
  112. mov x5, x30
  113. mov x3, x0
  114. /*
  115. * unset ph20 request in RCPM_PCPH20CLEARR
  116. * this is an lsb-0 register
  117. */
  118. ldr x1, =NXP_RCPM_ADDR
  119. rev w2, w3
  120. str w2, [x1, #RCPM_PCPH20CLRR_OFFSET]
  121. dsb sy
  122. isb
  123. bl _getGICD_BaseAddr
  124. mov x4, x0
  125. /* enable forwarding of group 0 interrupts by setting GICD_CTLR[0] = 1 */
  126. ldr w1, [x4, #GICD_CTLR_OFFSET]
  127. orr w1, w1, #GICD_CTLR_EN_GRP0
  128. str w1, [x4, #GICD_CTLR_OFFSET]
  129. dsb sy
  130. isb
  131. /*
  132. * fire SGI by writing to GICD_SGIR the following values:
  133. * [25:24] = 0x0 (forward interrupt to the CPU interfaces
  134. * specified in CPUTargetList field)
  135. * [23:16] = core mask lsb[7:0] (forward interrupt to target cpu)
  136. * [15] = 0 (forward SGI only if it is configured as group 0 interrupt)
  137. * [3:0] = 0xF (interrupt ID = 15)
  138. */
  139. lsl w1, w3, #16
  140. orr w1, w1, #0xF
  141. str w1, [x4, #GICD_SGIR_OFFSET]
  142. dsb sy
  143. isb
  144. /* load '0' on success */
  145. mov x0, xzr
  146. mov x30, x5
  147. ret
  148. endfunc _soc_core_restart
  149. /*
  150. * This function determines if a core is disabled via COREDISR
  151. * in: w0 = core_mask_lsb
  152. * out: w0 = 0, core not disabled
  153. * w0 != 0, core disabled
  154. * uses x0, x1, x2
  155. */
  156. func _soc_ck_disabled
  157. /* get base addr of dcfg block */
  158. mov x1, #NXP_DCFG_ADDR
  159. /* read COREDISR */
  160. ldr w1, [x1, #DCFG_COREDISR_OFFSET]
  161. rev w2, w1
  162. /* test core bit */
  163. and w0, w2, w0
  164. ret
  165. endfunc _soc_ck_disabled
  166. /*
  167. *This function resets the system via SoC-specific methods
  168. * in: none
  169. * out: none
  170. * uses x0, x1, x2, x3
  171. */
  172. func _soc_sys_reset
  173. ldr x2, =NXP_DCFG_ADDR
  174. /* make sure the mask is cleared in the reset request mask register */
  175. mov w1, wzr
  176. str w1, [x2, #DCFG_RSTRQMR1_OFFSET]
  177. /* set the reset request */
  178. ldr w1, =RSTCR_RESET_REQ
  179. ldr x3, =DCFG_RSTCR_OFFSET
  180. rev w0, w1
  181. str w0, [x2, x3]
  182. /*
  183. * just in case this address range is mapped as cacheable,
  184. * flush the write out of the dcaches
  185. */
  186. add x3, x2, x3
  187. dc cvac, x3
  188. dsb st
  189. isb
  190. /* Note: this function does not return */
  191. 1:
  192. wfi
  193. b 1b
  194. endfunc _soc_sys_reset
  195. /*
  196. * Part of SYSTEM_OFF
  197. * this function turns off the SoC clocks
  198. * Note: this function is not intended to return, and the only allowable
  199. * recovery is POR
  200. * in: none
  201. * out: none
  202. * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
  203. */
  204. func _soc_sys_off
  205. /* mask interrupts at the core */
  206. mrs x1, DAIF
  207. mov x0, #DAIF_SET_MASK
  208. orr x0, x1, x0
  209. msr DAIF, x0
  210. /* disable icache, dcache, mmu @ EL1 */
  211. mov x1, #SCTLR_I_C_M_MASK
  212. mrs x0, sctlr_el1
  213. bic x0, x0, x1
  214. msr sctlr_el1, x0
  215. /* disable dcache for EL3 */
  216. mrs x1, SCTLR_EL3
  217. bic x1, x1, #SCTLR_C_MASK
  218. /* make sure icache is enabled */
  219. orr x1, x1, #SCTLR_I_MASK
  220. msr SCTLR_EL3, x1
  221. isb
  222. /* Enable dynamic retention ctrl (CPUECTLR[2:0]) and SMP (CPUECTLR[6]) */
  223. mrs x0, CORTEX_A72_ECTLR_EL1
  224. orr x0, x0, #CPUECTLR_TIMER_8TICKS
  225. orr x0, x0, #CPUECTLR_SMPEN_EN
  226. msr CORTEX_A72_ECTLR_EL1, x0
  227. /* set WFIL2EN in SCFG_CLUSTERPMCR */
  228. ldr x0, =SCFG_COREPMCR_OFFSET
  229. ldr x1, =COREPMCR_WFIL2
  230. bl write_reg_scfg
  231. /* request LPM20 */
  232. mov x0, #RCPM_POWMGTCSR_OFFSET
  233. bl read_reg_rcpm
  234. orr x1, x0, #RCPM_POWMGTCSR_LPM20_REQ
  235. mov x0, #RCPM_POWMGTCSR_OFFSET
  236. bl write_reg_rcpm
  237. dsb sy
  238. isb
  239. 1:
  240. wfi
  241. b 1b
  242. endfunc _soc_sys_off
  243. /*
  244. * Write a register in the RCPM block
  245. * in: x0 = offset
  246. * in: w1 = value to write
  247. * uses x0, x1, x2, x3
  248. */
  249. func write_reg_rcpm
  250. ldr x2, =NXP_RCPM_ADDR
  251. /* swap for BE */
  252. rev w3, w1
  253. str w3, [x2, x0]
  254. ret
  255. endfunc write_reg_rcpm
  256. /*
  257. * Read a register in the RCPM block
  258. * in: x0 = offset
  259. * out: w0 = value read
  260. * uses x0, x1, x2
  261. */
  262. func read_reg_rcpm
  263. ldr x2, =NXP_RCPM_ADDR
  264. ldr w1, [x2, x0]
  265. /* swap for BE */
  266. rev w0, w1
  267. ret
  268. endfunc read_reg_rcpm
  269. /*
  270. * Write a register in the SCFG block
  271. * in: x0 = offset
  272. * in: w1 = value to write
  273. * uses x0, x1, x2, x3
  274. */
  275. func write_reg_scfg
  276. mov x2, #NXP_SCFG_ADDR
  277. /* swap for BE */
  278. rev w3, w1
  279. str w3, [x2, x0]
  280. ret
  281. endfunc write_reg_scfg
  282. /*
  283. * Read a register in the SCFG block
  284. * in: x0 = offset
  285. * out: w0 = value read
  286. * uses x0, x1, x2
  287. */
  288. func read_reg_scfg
  289. mov x2, #NXP_SCFG_ADDR
  290. ldr w1, [x2, x0]
  291. /* swap for BE */
  292. rev w0, w1
  293. ret
  294. endfunc read_reg_scfg
  295. /*
  296. * Part of CPU_OFF
  297. * this function programs SoC & GIC registers in preparation for shutting down
  298. * the core
  299. * in: x0 = core mask lsb
  300. * out: none
  301. * uses x0, x1, x2, x3, x4, x5, x6, x7
  302. */
  303. func _soc_core_prep_off
  304. mov x7, x30
  305. mov x6, x0
  306. /* Set retention control in CPUECTLR make sure smpen bit is set */
  307. mrs x4, CORTEX_A72_ECTLR_EL1
  308. bic x4, x4, #CPUECTLR_RET_MASK
  309. orr x4, x4, #CPUECTLR_TIMER_8TICKS
  310. orr x4, x4, #CPUECTLR_SMPEN_EN
  311. msr CORTEX_A72_ECTLR_EL1, x4
  312. /* save timer control current value */
  313. mov x5, #NXP_TIMER_ADDR
  314. ldr w4, [x5, #SYS_COUNTER_CNTCR_OFFSET]
  315. mov w2, w4
  316. mov x0, x6
  317. mov x1, #TIMER_CNTRL_DATA
  318. bl _setCoreData
  319. /* enable the timer */
  320. orr w4, w4, #CNTCR_EN_MASK
  321. str w4, [x5, #SYS_COUNTER_CNTCR_OFFSET]
  322. bl _getGICC_BaseAddr
  323. mov x5, x0
  324. /* disable signaling of ints */
  325. ldr w3, [x5, #GICC_CTLR_OFFSET]
  326. bic w3, w3, #GICC_CTLR_EN_GRP0
  327. bic w3, w3, #GICC_CTLR_EN_GRP1
  328. str w3, [x5, #GICC_CTLR_OFFSET]
  329. dsb sy
  330. isb
  331. /*
  332. * set retention control in SCFG_RETREQCR
  333. * Note: this register is msb 0
  334. */
  335. ldr x4, =SCFG_RETREQCR_OFFSET
  336. mov x0, x4
  337. bl read_reg_scfg
  338. rbit w1, w6
  339. orr w1, w0, w1
  340. mov x0, x4
  341. bl write_reg_scfg
  342. /* set the priority filter */
  343. ldr w2, [x5, #GICC_PMR_OFFSET]
  344. orr w2, w2, #GICC_PMR_FILTER
  345. str w2, [x5, #GICC_PMR_OFFSET]
  346. /* setup GICC_CTLR */
  347. bic w3, w3, #GICC_CTLR_ACKCTL_MASK
  348. orr w3, w3, #GICC_CTLR_FIQ_EN_MASK
  349. orr w3, w3, #GICC_CTLR_EOImodeS_MASK
  350. orr w3, w3, #GICC_CTLR_CBPR_MASK
  351. str w3, [x5, #GICC_CTLR_OFFSET]
  352. /* setup the banked-per-core GICD registers */
  353. bl _getGICD_BaseAddr
  354. mov x5, x0
  355. /* define SGI15 as Grp0 */
  356. ldr w2, [x5, #GICD_IGROUPR0_OFFSET]
  357. bic w2, w2, #GICD_IGROUP0_SGI15
  358. str w2, [x5, #GICD_IGROUPR0_OFFSET]
  359. /* set priority of SGI 15 to highest... */
  360. ldr w2, [x5, #GICD_IPRIORITYR3_OFFSET]
  361. bic w2, w2, #GICD_IPRIORITY_SGI15_MASK
  362. str w2, [x5, #GICD_IPRIORITYR3_OFFSET]
  363. /* enable SGI 15 */
  364. ldr w2, [x5, #GICD_ISENABLER0_OFFSET]
  365. orr w2, w2, #GICD_ISENABLE0_SGI15
  366. str w2, [x5, #GICD_ISENABLER0_OFFSET]
  367. /* enable the cpu interface */
  368. bl _getGICC_BaseAddr
  369. mov x2, x0
  370. orr w3, w3, #GICC_CTLR_EN_GRP0
  371. str w3, [x2, #GICC_CTLR_OFFSET]
  372. /* clear any pending SGIs */
  373. ldr x2, =GICD_CPENDSGIR_CLR_MASK
  374. add x0, x5, #GICD_CPENDSGIR3_OFFSET
  375. str w2, [x0]
  376. /*
  377. * Set the PC_PH20_REQ bit in RCPM_PCPH20SETR
  378. * this is an lsb-0 register
  379. */
  380. mov x1, x6
  381. mov x0, #RCPM_PCPH20SETR_OFFSET
  382. bl write_reg_rcpm
  383. dsb sy
  384. isb
  385. mov x30, x7
  386. ret
  387. endfunc _soc_core_prep_off
  388. /*
  389. * Part of CPU_OFF
  390. * this function performs the final steps to shutdown the core
  391. * in: x0 = core mask lsb
  392. * out: none
  393. * uses x0, x1, x2, x3, x4, x5
  394. */
  395. func _soc_core_entr_off
  396. mov x5, x30
  397. mov x4, x0
  398. bl _getGICD_BaseAddr
  399. mov x3, x0
  400. 3:
  401. /* enter low-power state by executing wfi */
  402. wfi
  403. /* see if we got hit by SGI 15 */
  404. add x0, x3, #GICD_SPENDSGIR3_OFFSET
  405. ldr w2, [x0]
  406. and w2, w2, #GICD_SPENDSGIR3_SGI15_MASK
  407. cbz w2, 4f
  408. /* clear the pending SGI */
  409. ldr x2, =GICD_CPENDSGIR_CLR_MASK
  410. add x0, x3, #GICD_CPENDSGIR3_OFFSET
  411. str w2, [x0]
  412. 4:
  413. /* check if core has been turned on */
  414. mov x0, x4
  415. bl _getCoreState
  416. cmp x0, #CORE_WAKEUP
  417. b.ne 3b
  418. /* if we get here, then we have exited the wfi */
  419. dsb sy
  420. isb
  421. mov x30, x5
  422. ret
  423. endfunc _soc_core_entr_off
  424. /*
  425. * Part of CPU_OFF
  426. * this function starts the process of starting a core back up
  427. * in: x0 = core mask lsb
  428. * out: none
  429. * uses x0, x1, x2, x3, x4, x5, x6
  430. */
  431. func _soc_core_exit_off
  432. mov x6, x30
  433. mov x5, x0
  434. /*
  435. * Clear ph20 request in RCPM_PCPH20CLRR - no need
  436. * to do that here, it has been done in _soc_core_restart
  437. */
  438. bl _getGICC_BaseAddr
  439. mov x1, x0
  440. /* read GICC_IAR */
  441. ldr w0, [x1, #GICC_IAR_OFFSET]
  442. /* write GICC_EIOR - signal end-of-interrupt */
  443. str w0, [x1, #GICC_EOIR_OFFSET]
  444. /* write GICC_DIR - disable interrupt */
  445. str w0, [x1, #GICC_DIR_OFFSET]
  446. /* disable signaling of grp0 ints */
  447. ldr w3, [x1, #GICC_CTLR_OFFSET]
  448. bic w3, w3, #GICC_CTLR_EN_GRP0
  449. str w3, [x1, #GICC_CTLR_OFFSET]
  450. /*
  451. * Unset retention request in SCFG_RETREQCR
  452. * Note: this register is msb-0
  453. */
  454. ldr x4, =SCFG_RETREQCR_OFFSET
  455. mov x0, x4
  456. bl read_reg_scfg
  457. rbit w1, w5
  458. bic w1, w0, w1
  459. mov x0, x4
  460. bl write_reg_scfg
  461. /* restore timer ctrl */
  462. mov x0, x5
  463. mov x1, #TIMER_CNTRL_DATA
  464. bl _getCoreData
  465. /* w0 = timer ctrl saved value */
  466. mov x2, #NXP_TIMER_ADDR
  467. str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
  468. dsb sy
  469. isb
  470. mov x30, x6
  471. ret
  472. endfunc _soc_core_exit_off
  473. /*
  474. * Function loads a 64-bit execution address of the core in the soc registers
  475. * BOOTLOCPTRL/H
  476. * in: x0, 64-bit address to write to BOOTLOCPTRL/H
  477. * uses x0, x1, x2, x3
  478. */
  479. func _soc_set_start_addr
  480. /* get the 64-bit base address of the scfg block */
  481. ldr x2, =NXP_SCFG_ADDR
  482. /* write the 32-bit BOOTLOCPTRL register */
  483. mov x1, x0
  484. rev w3, w1
  485. str w3, [x2, #SCFG_BOOTLOCPTRL_OFFSET]
  486. /* write the 32-bit BOOTLOCPTRH register */
  487. lsr x1, x0, #32
  488. rev w3, w1
  489. str w3, [x2, #SCFG_BOOTLOCPTRH_OFFSET]
  490. ret
  491. endfunc _soc_set_start_addr
  492. /*
  493. * This function returns the base address of the gic distributor
  494. * in: none
  495. * out: x0 = base address of gic distributor
  496. * uses x0
  497. */
  498. func _getGICD_BaseAddr
  499. #if (TEST_BL31)
  500. /* defect in simulator - gic base addresses are on 4Kb boundary */
  501. ldr x0, =NXP_GICD_4K_ADDR
  502. #else
  503. ldr x0, =NXP_GICD_64K_ADDR
  504. #endif
  505. ret
  506. endfunc _getGICD_BaseAddr
  507. /*
  508. * This function returns the base address of the gic controller
  509. * in: none
  510. * out: x0 = base address of gic controller
  511. * uses x0
  512. */
  513. func _getGICC_BaseAddr
  514. #if (TEST_BL31)
  515. /* defect in simulator - gic base addresses are on 4Kb boundary */
  516. ldr x0, =NXP_GICC_4K_ADDR
  517. #else
  518. ldr x0, =NXP_GICC_64K_ADDR
  519. #endif
  520. ret
  521. endfunc _getGICC_BaseAddr
  522. /*
  523. * Part of CPU_SUSPEND
  524. * this function puts the calling core into standby state
  525. * in: x0 = core mask lsb
  526. * out: none
  527. * uses x0
  528. */
  529. func _soc_core_entr_stdby
  530. dsb sy
  531. isb
  532. wfi
  533. ret
  534. endfunc _soc_core_entr_stdby
  535. /*
  536. * Part of CPU_SUSPEND
  537. * this function performs SoC-specific programming prior to standby
  538. * in: x0 = core mask lsb
  539. * out: none
  540. * uses x0, x1
  541. */
  542. func _soc_core_prep_stdby
  543. /* clear CORTEX_A72_ECTLR_EL1[2:0] */
  544. mrs x1, CORTEX_A72_ECTLR_EL1
  545. bic x1, x1, #CPUECTLR_TIMER_MASK
  546. msr CORTEX_A72_ECTLR_EL1, x1
  547. ret
  548. endfunc _soc_core_prep_stdby
  549. /*
  550. * Part of CPU_SUSPEND
  551. * this function performs any SoC-specific cleanup after standby state
  552. * in: x0 = core mask lsb
  553. * out: none
  554. * uses none
  555. */
  556. func _soc_core_exit_stdby
  557. ret
  558. endfunc _soc_core_exit_stdby
  559. /*
  560. * Part of CPU_SUSPEND
  561. * this function performs SoC-specific programming prior to power-down
  562. * in: x0 = core mask lsb
  563. * out: none
  564. * uses x0, x1, x2, x3, x4, x5
  565. */
  566. func _soc_core_prep_pwrdn
  567. mov x5, x30
  568. mov x4, x0
  569. /* enable CPU retention + set smp */
  570. mrs x1, CORTEX_A72_ECTLR_EL1
  571. orr x1, x1, #0x1
  572. orr x1, x1, #CPUECTLR_SMPEN_MASK
  573. msr CORTEX_A72_ECTLR_EL1, x1
  574. /*
  575. * set the retention request in SCFG_RETREQCR
  576. * this is an msb-0 register
  577. */
  578. ldr x3, =SCFG_RETREQCR_OFFSET
  579. mov x0, x3
  580. bl read_reg_scfg
  581. rbit w1, w4
  582. orr w1, w0, w1
  583. mov x0, x3
  584. bl write_reg_scfg
  585. /*
  586. * Set the PC_PH20_REQ bit in RCPM_PCPH20SETR
  587. * this is an lsb-0 register
  588. */
  589. mov x1, x4
  590. mov x0, #RCPM_PCPH20SETR_OFFSET
  591. bl write_reg_rcpm
  592. mov x30, x5
  593. ret
  594. endfunc _soc_core_prep_pwrdn
  595. /*
  596. * Part of CPU_SUSPEND
  597. * this function puts the calling core into a power-down state
  598. * in: x0 = core mask lsb
  599. * out: none
  600. * uses x0
  601. */
  602. func _soc_core_entr_pwrdn
  603. dsb sy
  604. isb
  605. wfi
  606. ret
  607. endfunc _soc_core_entr_pwrdn
  608. /*
  609. * Part of CPU_SUSPEND
  610. * this function cleans up after a core exits power-down
  611. * in: x0 = core mask lsb
  612. * out: none
  613. * uses x0, x1, x2, x3, x4, x5
  614. */
  615. func _soc_core_exit_pwrdn
  616. mov x5, x30
  617. mov x4, x0
  618. /*
  619. * Set the PC_PH20_REQ bit in RCPM_PCPH20CLRR
  620. * this is an lsb-0 register
  621. */
  622. mov x1, x4
  623. mov x0, #RCPM_PCPH20CLRR_OFFSET
  624. bl write_reg_rcpm
  625. /*
  626. * Unset the retention request in SCFG_RETREQCR
  627. * this is an msb-0 register
  628. */
  629. ldr x3, =SCFG_RETREQCR_OFFSET
  630. mov x0, x3
  631. bl read_reg_scfg
  632. rbit w1, w4
  633. bic w1, w0, w1
  634. mov x0, x3
  635. bl write_reg_scfg
  636. mov x30, x5
  637. ret
  638. endfunc _soc_core_exit_pwrdn
  639. /*
  640. * Part of CPU_SUSPEND
  641. * this function performs SoC-specific programming prior to standby
  642. * in: x0 = core mask lsb
  643. * out: none
  644. * uses none
  645. */
  646. func _soc_clstr_prep_stdby
  647. /* clear CORTEX_A72_ECTLR_EL1[2:0] */
  648. mrs x1, CORTEX_A72_ECTLR_EL1
  649. bic x1, x1, #CPUECTLR_TIMER_MASK
  650. msr CORTEX_A72_ECTLR_EL1, x1
  651. ret
  652. endfunc _soc_clstr_prep_stdby
  653. /*
  654. * Part of CPU_SUSPEND
  655. * this function performs any SoC-specific cleanup after standby state
  656. * in: x0 = core mask lsb
  657. * out: none
  658. * uses none
  659. */
  660. func _soc_clstr_exit_stdby
  661. ret
  662. endfunc _soc_clstr_exit_stdby
  663. /*
  664. * Part of CPU_SUSPEND
  665. * this function performs SoC-specific programming prior to power-down
  666. * in: x0 = core mask lsb
  667. * out: none
  668. * uses x0, x1, x2, x3, x4, x5
  669. */
  670. func _soc_clstr_prep_pwrdn
  671. mov x5, x30
  672. mov x4, x0
  673. /* enable CPU retention + set smp */
  674. mrs x1, CORTEX_A72_ECTLR_EL1
  675. orr x1, x1, #0x1
  676. orr x1, x1, #CPUECTLR_SMPEN_MASK
  677. msr CORTEX_A72_ECTLR_EL1, x1
  678. /*
  679. * Set the retention request in SCFG_RETREQCR
  680. * this is an msb-0 register.
  681. */
  682. ldr x3, =SCFG_RETREQCR_OFFSET
  683. mov x0, x3
  684. bl read_reg_scfg
  685. rbit w1, w4
  686. orr w1, w0, w1
  687. mov x0, x3
  688. bl write_reg_scfg
  689. /*
  690. * Set the PC_PH20_REQ bit in RCPM_PCPH20SETR
  691. * this is an lsb-0 register.
  692. */
  693. mov x1, x4
  694. mov x0, #RCPM_PCPH20SETR_OFFSET
  695. bl write_reg_rcpm
  696. mov x30, x5
  697. ret
  698. endfunc _soc_clstr_prep_pwrdn
  699. /*
  700. * Part of CPU_SUSPEND
  701. * this function cleans up after a core exits power-down
  702. * in: x0 = core mask lsb
  703. * out: none
  704. * uses x0, x1, x2, x3, x4, x5
  705. */
  706. func _soc_clstr_exit_pwrdn
  707. mov x5, x30
  708. mov x4, x0
  709. /*
  710. * Set the PC_PH20_REQ bit in RCPM_PCPH20CLRR
  711. * this is an lsb-0 register.
  712. */
  713. mov x1, x4
  714. mov x0, #RCPM_PCPH20CLRR_OFFSET
  715. bl write_reg_rcpm
  716. /*
  717. * Unset the retention request in SCFG_RETREQCR
  718. * this is an msb-0 register.
  719. */
  720. ldr x3, =SCFG_RETREQCR_OFFSET
  721. mov x0, x3
  722. bl read_reg_scfg
  723. rbit w1, w4
  724. bic w1, w0, w1
  725. mov x0, x3
  726. bl write_reg_scfg
  727. mov x30, x5
  728. ret
  729. endfunc _soc_clstr_exit_pwrdn
  730. /*
  731. * Part of CPU_SUSPEND
  732. * this function performs SoC-specific programming prior to standby
  733. * in: x0 = core mask lsb
  734. * out: none
  735. * uses none
  736. */
  737. func _soc_sys_prep_stdby
  738. /* clear CORTEX_A72_ECTLR_EL1[2:0] */
  739. mrs x1, CORTEX_A72_ECTLR_EL1
  740. bic x1, x1, #CPUECTLR_TIMER_MASK
  741. msr CORTEX_A72_ECTLR_EL1, x1
  742. ret
  743. endfunc _soc_sys_prep_stdby
  744. /* Part of CPU_SUSPEND
  745. * this function performs any SoC-specific cleanup after standby state
  746. * in: x0 = core mask lsb
  747. * out: none
  748. * uses none
  749. */
  750. func _soc_sys_exit_stdby
  751. ret
  752. endfunc _soc_sys_exit_stdby
  753. /*
  754. * Part of CPU_SUSPEND
  755. * this function performs SoC-specific programming prior to
  756. * suspend-to-power-down
  757. * in: x0 = core mask lsb
  758. * out: none
  759. * uses x0, x1, x2, x3, x4
  760. */
  761. func _soc_sys_prep_pwrdn
  762. mov x4, x30
  763. /* Enable dynamic retention contrl (CPUECTLR[2:0]) and SMP (CPUECTLR[6]) */
  764. mrs x0, CORTEX_A72_ECTLR_EL1
  765. bic x0, x0, #CPUECTLR_TIMER_MASK
  766. orr x0, x0, #CPUECTLR_TIMER_8TICKS
  767. orr x0, x0, #CPUECTLR_SMPEN_EN
  768. msr CORTEX_A72_ECTLR_EL1, x0
  769. /* Set WFIL2EN in SCFG_CLUSTERPMCR */
  770. ldr x0, =SCFG_COREPMCR_OFFSET
  771. ldr x1, =COREPMCR_WFIL2
  772. bl write_reg_scfg
  773. isb
  774. mov x30, x4
  775. ret
  776. endfunc _soc_sys_prep_pwrdn
  777. /*
  778. * Part of CPU_SUSPEND
  779. * this function puts the calling core, and potentially the soc, into a
  780. * low-power state
  781. * in: x0 = core mask lsb
  782. * out: x0 = 0, success
  783. * x0 < 0, failure
  784. * uses x0, x1, x2, x3, x4
  785. */
  786. func _soc_sys_pwrdn_wfi
  787. mov x4, x30
  788. /* request LPM20 */
  789. mov x0, #RCPM_POWMGTCSR_OFFSET
  790. bl read_reg_rcpm
  791. orr x1, x0, #RCPM_POWMGTCSR_LPM20_REQ
  792. mov x0, #RCPM_POWMGTCSR_OFFSET
  793. bl write_reg_rcpm
  794. dsb sy
  795. isb
  796. wfi
  797. mov x30, x4
  798. ret
  799. endfunc _soc_sys_pwrdn_wfi
  800. /*
  801. * Part of CPU_SUSPEND
  802. * this function performs any SoC-specific cleanup after power-down
  803. * in: x0 = core mask lsb
  804. * out: none
  805. * uses x0, x1
  806. */
  807. func _soc_sys_exit_pwrdn
  808. /* clear WFIL2_EN in SCFG_COREPMCR */
  809. mov x1, #NXP_SCFG_ADDR
  810. str wzr, [x1, #SCFG_COREPMCR_OFFSET]
  811. ret
  812. endfunc _soc_sys_exit_pwrdn