psci_utils.S 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. /*
  2. * Copyright 2018-2021 NXP
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. */
  7. #include <asm_macros.S>
  8. #include <assert_macros.S>
  9. #include <lib/psci/psci.h>
  10. #include <bl31_data.h>
  11. #include <plat_psci.h>
  12. #define RESET_RETRY_CNT 800
  13. #define PSCI_ABORT_CNT 100
  14. #if (SOC_CORE_RELEASE)
  15. .global _psci_cpu_on
  16. /*
  17. * int _psci_cpu_on(u_register_t core_mask)
  18. * x0 = target cpu core mask
  19. *
  20. * Called from C, so save the non-volatile regs
  21. * save these as pairs of registers to maintain the
  22. * required 16-byte alignment on the stack
  23. *
  24. */
  25. func _psci_cpu_on
  26. stp x4, x5, [sp, #-16]!
  27. stp x6, x7, [sp, #-16]!
  28. stp x8, x9, [sp, #-16]!
  29. stp x10, x11, [sp, #-16]!
  30. stp x12, x13, [sp, #-16]!
  31. stp x14, x15, [sp, #-16]!
  32. stp x16, x17, [sp, #-16]!
  33. stp x18, x30, [sp, #-16]!
  34. mov x6, x0
  35. /* x0 = core mask (lsb)
  36. * x6 = core mask (lsb)
  37. */
  38. /* check if core disabled */
  39. bl _soc_ck_disabled /* 0-2 */
  40. cbnz w0, psci_disabled
  41. /* check core data area to see if core cannot be turned on
  42. * read the core state
  43. */
  44. mov x0, x6
  45. bl _getCoreState /* 0-5 */
  46. mov x9, x0
  47. /* x6 = core mask (lsb)
  48. * x9 = core state (from data area)
  49. */
  50. cmp x9, #CORE_DISABLED
  51. mov x0, #PSCI_E_DISABLED
  52. b.eq cpu_on_done
  53. cmp x9, #CORE_PENDING
  54. mov x0, #PSCI_E_ON_PENDING
  55. b.eq cpu_on_done
  56. cmp x9, #CORE_RELEASED
  57. mov x0, #PSCI_E_ALREADY_ON
  58. b.eq cpu_on_done
  59. 8:
  60. /* x6 = core mask (lsb)
  61. * x9 = core state (from data area)
  62. */
  63. cmp x9, #CORE_WFE
  64. b.eq core_in_wfe
  65. cmp x9, #CORE_IN_RESET
  66. b.eq core_in_reset
  67. cmp x9, #CORE_OFF
  68. b.eq core_is_off
  69. cmp x9, #CORE_OFF_PENDING
  70. /* if state == CORE_OFF_PENDING, set abort */
  71. mov x0, x6
  72. mov x1, #ABORT_FLAG_DATA
  73. mov x2, #CORE_ABORT_OP
  74. bl _setCoreData /* 0-3, [13-15] */
  75. ldr x3, =PSCI_ABORT_CNT
  76. 7:
  77. /* watch for abort to take effect */
  78. mov x0, x6
  79. bl _getCoreState /* 0-5 */
  80. cmp x0, #CORE_OFF
  81. b.eq core_is_off
  82. cmp x0, #CORE_PENDING
  83. mov x0, #PSCI_E_SUCCESS
  84. b.eq cpu_on_done
  85. /* loop til finished */
  86. sub x3, x3, #1
  87. cbnz x3, 7b
  88. /* if we didn't see either CORE_OFF or CORE_PENDING, then this
  89. * core is in CORE_OFF_PENDING - exit with success, as the core will
  90. * respond to the abort request
  91. */
  92. mov x0, #PSCI_E_SUCCESS
  93. b cpu_on_done
  94. /* this is where we start up a core out of reset */
  95. core_in_reset:
  96. /* see if the soc-specific module supports this op */
  97. ldr x7, =SOC_CORE_RELEASE
  98. cbnz x7, 3f
  99. mov x0, #PSCI_E_NOT_SUPPORTED
  100. b cpu_on_done
  101. /* x6 = core mask (lsb) */
  102. 3:
  103. /* set core state in data area */
  104. mov x0, x6
  105. mov x1, #CORE_PENDING
  106. bl _setCoreState /* 0-3, [13-15] */
  107. /* release the core from reset */
  108. mov x0, x6
  109. bl _soc_core_release /* 0-3 */
  110. mov x0, #PSCI_E_SUCCESS
  111. b cpu_on_done
  112. /* Start up the core that has been powered-down via CPU_OFF
  113. */
  114. core_is_off:
  115. /* see if the soc-specific module supports this op
  116. */
  117. ldr x7, =SOC_CORE_RESTART
  118. cbnz x7, 2f
  119. mov x0, #PSCI_E_NOT_SUPPORTED
  120. b cpu_on_done
  121. /* x6 = core mask (lsb) */
  122. 2:
  123. /* set core state in data area */
  124. mov x0, x6
  125. mov x1, #CORE_WAKEUP
  126. bl _setCoreState /* 0-3, [13-15] */
  127. /* put the core back into service */
  128. mov x0, x6
  129. #if (SOC_CORE_RESTART)
  130. bl _soc_core_restart /* 0-5 */
  131. #endif
  132. mov x0, #PSCI_E_SUCCESS
  133. b cpu_on_done
  134. /* this is where we release a core that is being held in wfe */
  135. core_in_wfe:
  136. /* x6 = core mask (lsb) */
  137. /* set core state in data area */
  138. mov x0, x6
  139. mov x1, #CORE_PENDING
  140. bl _setCoreState /* 0-3, [13-15] */
  141. dsb sy
  142. isb
  143. /* put the core back into service */
  144. sev
  145. sev
  146. isb
  147. mov x0, #PSCI_E_SUCCESS
  148. cpu_on_done:
  149. /* restore the aarch32/64 non-volatile registers */
  150. ldp x18, x30, [sp], #16
  151. ldp x16, x17, [sp], #16
  152. ldp x14, x15, [sp], #16
  153. ldp x12, x13, [sp], #16
  154. ldp x10, x11, [sp], #16
  155. ldp x8, x9, [sp], #16
  156. ldp x6, x7, [sp], #16
  157. ldp x4, x5, [sp], #16
  158. b psci_completed
  159. endfunc _psci_cpu_on
  160. #endif
  161. #if (SOC_CORE_OFF)
  162. .global _psci_cpu_prep_off
  163. .global _psci_cpu_off_wfi
  164. /*
  165. * void _psci_cpu_prep_off(u_register_t core_mask)
  166. * this function performs the SoC-specific programming prior
  167. * to shutting the core down
  168. * x0 = core_mask
  169. *
  170. * called from C, so save the non-volatile regs
  171. * save these as pairs of registers to maintain the
  172. * required 16-byte alignment on the stack
  173. */
  174. func _psci_cpu_prep_off
  175. stp x4, x5, [sp, #-16]!
  176. stp x6, x7, [sp, #-16]!
  177. stp x8, x9, [sp, #-16]!
  178. stp x10, x11, [sp, #-16]!
  179. stp x12, x13, [sp, #-16]!
  180. stp x14, x15, [sp, #-16]!
  181. stp x16, x17, [sp, #-16]!
  182. stp x18, x30, [sp, #-16]!
  183. mov x10, x0 /* x10 = core_mask */
  184. /* the core does not return from cpu_off, so no need
  185. * to save/restore non-volatile registers
  186. */
  187. /* mask interrupts by setting DAIF[7:4] to 'b1111 */
  188. msr DAIFSet, #0xF
  189. /* read cpuectlr and save current value */
  190. mrs x4, CPUECTLR_EL1
  191. mov x1, #CPUECTLR_DATA
  192. mov x2, x4
  193. mov x0, x10
  194. bl _setCoreData
  195. /* remove the core from coherency */
  196. bic x4, x4, #CPUECTLR_SMPEN_MASK
  197. msr CPUECTLR_EL1, x4
  198. /* save scr_el3 */
  199. mov x0, x10
  200. mrs x4, SCR_EL3
  201. mov x2, x4
  202. mov x1, #SCR_EL3_DATA
  203. bl _setCoreData
  204. /* x4 = scr_el3 */
  205. /* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
  206. orr x4, x4, #SCR_FIQ_MASK
  207. msr scr_el3, x4
  208. /* x10 = core_mask */
  209. /* prep the core for shutdown */
  210. mov x0, x10
  211. bl _soc_core_prep_off
  212. /* restore the aarch32/64 non-volatile registers */
  213. ldp x18, x30, [sp], #16
  214. ldp x16, x17, [sp], #16
  215. ldp x14, x15, [sp], #16
  216. ldp x12, x13, [sp], #16
  217. ldp x10, x11, [sp], #16
  218. ldp x8, x9, [sp], #16
  219. ldp x6, x7, [sp], #16
  220. ldp x4, x5, [sp], #16
  221. b psci_completed
  222. endfunc _psci_cpu_prep_off
  223. /*
  224. * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
  225. * - this function shuts down the core
  226. * - this function does not return!!
  227. */
  228. func _psci_cpu_off_wfi
  229. /* save the wakeup address */
  230. mov x29, x1
  231. /* x0 = core_mask */
  232. /* shutdown the core */
  233. bl _soc_core_entr_off
  234. /* branch to resume execution */
  235. br x29
  236. endfunc _psci_cpu_off_wfi
  237. #endif
  238. #if (SOC_CORE_RESTART)
  239. .global _psci_wakeup
  240. /*
  241. * void _psci_wakeup(u_register_t core_mask)
  242. * this function performs the SoC-specific programming
  243. * after a core wakes up from OFF
  244. * x0 = core mask
  245. *
  246. * called from C, so save the non-volatile regs
  247. * save these as pairs of registers to maintain the
  248. * required 16-byte alignment on the stack
  249. */
  250. func _psci_wakeup
  251. stp x4, x5, [sp, #-16]!
  252. stp x6, x7, [sp, #-16]!
  253. stp x8, x9, [sp, #-16]!
  254. stp x10, x11, [sp, #-16]!
  255. stp x12, x13, [sp, #-16]!
  256. stp x14, x15, [sp, #-16]!
  257. stp x16, x17, [sp, #-16]!
  258. stp x18, x30, [sp, #-16]!
  259. mov x4, x0 /* x4 = core mask */
  260. /* restore scr_el3 */
  261. mov x0, x4
  262. mov x1, #SCR_EL3_DATA
  263. bl _getCoreData
  264. /* x0 = saved scr_el3 */
  265. msr SCR_EL3, x0
  266. /* x4 = core mask */
  267. /* restore CPUECTLR */
  268. mov x0, x4
  269. mov x1, #CPUECTLR_DATA
  270. bl _getCoreData
  271. orr x0, x0, #CPUECTLR_SMPEN_MASK
  272. msr CPUECTLR_EL1, x0
  273. /* x4 = core mask */
  274. /* start the core back up */
  275. mov x0, x4
  276. bl _soc_core_exit_off
  277. /* restore the aarch32/64 non-volatile registers
  278. */
  279. ldp x18, x30, [sp], #16
  280. ldp x16, x17, [sp], #16
  281. ldp x14, x15, [sp], #16
  282. ldp x12, x13, [sp], #16
  283. ldp x10, x11, [sp], #16
  284. ldp x8, x9, [sp], #16
  285. ldp x6, x7, [sp], #16
  286. ldp x4, x5, [sp], #16
  287. b psci_completed
  288. endfunc _psci_wakeup
  289. #endif
  290. #if (SOC_SYSTEM_RESET)
  291. .global _psci_system_reset
  292. func _psci_system_reset
  293. /* system reset is mandatory
  294. * system reset is soc-specific
  295. * Note: under no circumstances do we return from this call
  296. */
  297. bl _soc_sys_reset
  298. endfunc _psci_system_reset
  299. #endif
  300. #if (SOC_SYSTEM_OFF)
  301. .global _psci_system_off
  302. func _psci_system_off
  303. /* system off is mandatory
  304. * system off is soc-specific
  305. * Note: under no circumstances do we return from this call */
  306. b _soc_sys_off
  307. endfunc _psci_system_off
  308. #endif
  309. #if (SOC_CORE_STANDBY)
  310. .global _psci_core_entr_stdby
  311. .global _psci_core_prep_stdby
  312. .global _psci_core_exit_stdby
  313. /*
  314. * void _psci_core_entr_stdby(u_register_t core_mask) - this
  315. * is the fast-path for simple core standby
  316. */
  317. func _psci_core_entr_stdby
  318. stp x4, x5, [sp, #-16]!
  319. stp x6, x30, [sp, #-16]!
  320. mov x5, x0 /* x5 = core mask */
  321. /* save scr_el3 */
  322. mov x0, x5
  323. mrs x4, SCR_EL3
  324. mov x2, x4
  325. mov x1, #SCR_EL3_DATA
  326. bl _setCoreData
  327. /* x4 = SCR_EL3
  328. * x5 = core mask
  329. */
  330. /* allow interrupts @ EL3 */
  331. orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
  332. msr SCR_EL3, x4
  333. /* x5 = core mask */
  334. /* put the core into standby */
  335. mov x0, x5
  336. bl _soc_core_entr_stdby
  337. /* restore scr_el3 */
  338. mov x0, x5
  339. mov x1, #SCR_EL3_DATA
  340. bl _getCoreData
  341. /* x0 = saved scr_el3 */
  342. msr SCR_EL3, x0
  343. ldp x6, x30, [sp], #16
  344. ldp x4, x5, [sp], #16
  345. isb
  346. ret
  347. endfunc _psci_core_entr_stdby
  348. /*
  349. * void _psci_core_prep_stdby(u_register_t core_mask) - this
  350. * sets up the core to enter standby state thru the normal path
  351. */
  352. func _psci_core_prep_stdby
  353. stp x4, x5, [sp, #-16]!
  354. stp x6, x30, [sp, #-16]!
  355. mov x5, x0
  356. /* x5 = core mask */
  357. /* save scr_el3 */
  358. mov x0, x5
  359. mrs x4, SCR_EL3
  360. mov x2, x4
  361. mov x1, #SCR_EL3_DATA
  362. bl _setCoreData
  363. /* allow interrupts @ EL3 */
  364. orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
  365. msr SCR_EL3, x4
  366. /* x5 = core mask */
  367. /* call for any SoC-specific programming */
  368. mov x0, x5
  369. bl _soc_core_prep_stdby
  370. ldp x6, x30, [sp], #16
  371. ldp x4, x5, [sp], #16
  372. isb
  373. ret
  374. endfunc _psci_core_prep_stdby
  375. /*
  376. * void _psci_core_exit_stdby(u_register_t core_mask) - this
  377. * exits the core from standby state thru the normal path
  378. */
  379. func _psci_core_exit_stdby
  380. stp x4, x5, [sp, #-16]!
  381. stp x6, x30, [sp, #-16]!
  382. mov x5, x0
  383. /* x5 = core mask */
  384. /* restore scr_el3 */
  385. mov x0, x5
  386. mov x1, #SCR_EL3_DATA
  387. bl _getCoreData
  388. /* x0 = saved scr_el3 */
  389. msr SCR_EL3, x0
  390. /* x5 = core mask */
  391. /* perform any SoC-specific programming after standby state */
  392. mov x0, x5
  393. bl _soc_core_exit_stdby
  394. ldp x6, x30, [sp], #16
  395. ldp x4, x5, [sp], #16
  396. isb
  397. ret
  398. endfunc _psci_core_exit_stdby
  399. #endif
  400. #if (SOC_CORE_PWR_DWN)
  401. .global _psci_core_prep_pwrdn
  402. .global _psci_cpu_pwrdn_wfi
  403. .global _psci_core_exit_pwrdn
  404. /*
  405. * void _psci_core_prep_pwrdn_(u_register_t core_mask)
  406. * this function prepares the core for power-down
  407. * x0 = core mask
  408. *
  409. * called from C, so save the non-volatile regs
  410. * save these as pairs of registers to maintain the
  411. * required 16-byte alignment on the stack
  412. */
  413. func _psci_core_prep_pwrdn
  414. stp x4, x5, [sp, #-16]!
  415. stp x6, x7, [sp, #-16]!
  416. stp x8, x9, [sp, #-16]!
  417. stp x10, x11, [sp, #-16]!
  418. stp x12, x13, [sp, #-16]!
  419. stp x14, x15, [sp, #-16]!
  420. stp x16, x17, [sp, #-16]!
  421. stp x18, x30, [sp, #-16]!
  422. mov x6, x0
  423. /* x6 = core mask */
  424. /* mask interrupts by setting DAIF[7:4] to 'b1111 */
  425. msr DAIFSet, #0xF
  426. /* save scr_el3 */
  427. mov x0, x6
  428. mrs x4, SCR_EL3
  429. mov x2, x4
  430. mov x1, #SCR_EL3_DATA
  431. bl _setCoreData
  432. /* allow interrupts @ EL3 */
  433. orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
  434. msr SCR_EL3, x4
  435. /* save cpuectlr */
  436. mov x0, x6
  437. mov x1, #CPUECTLR_DATA
  438. mrs x2, CPUECTLR_EL1
  439. bl _setCoreData
  440. /* x6 = core mask */
  441. /* SoC-specific programming for power-down */
  442. mov x0, x6
  443. bl _soc_core_prep_pwrdn
  444. /* restore the aarch32/64 non-volatile registers
  445. */
  446. ldp x18, x30, [sp], #16
  447. ldp x16, x17, [sp], #16
  448. ldp x14, x15, [sp], #16
  449. ldp x12, x13, [sp], #16
  450. ldp x10, x11, [sp], #16
  451. ldp x8, x9, [sp], #16
  452. ldp x6, x7, [sp], #16
  453. ldp x4, x5, [sp], #16
  454. b psci_completed
  455. endfunc _psci_core_prep_pwrdn
  456. /*
  457. * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
  458. * this function powers down the core
  459. */
  460. func _psci_cpu_pwrdn_wfi
  461. /* save the wakeup address */
  462. mov x29, x1
  463. /* x0 = core mask */
  464. /* shutdown the core */
  465. bl _soc_core_entr_pwrdn
  466. /* branch to resume execution */
  467. br x29
  468. endfunc _psci_cpu_pwrdn_wfi
  469. /*
  470. * void _psci_core_exit_pwrdn_(u_register_t core_mask)
  471. * this function cleans up after a core power-down
  472. * x0 = core mask
  473. *
  474. * called from C, so save the non-volatile regs
  475. * save these as pairs of registers to maintain the
  476. * required 16-byte alignment on the stack
  477. */
  478. func _psci_core_exit_pwrdn
  479. stp x4, x5, [sp, #-16]!
  480. stp x6, x7, [sp, #-16]!
  481. stp x8, x9, [sp, #-16]!
  482. stp x10, x11, [sp, #-16]!
  483. stp x12, x13, [sp, #-16]!
  484. stp x14, x15, [sp, #-16]!
  485. stp x16, x17, [sp, #-16]!
  486. stp x18, x30, [sp, #-16]!
  487. mov x5, x0 /* x5 = core mask */
  488. /* restore scr_el3 */
  489. mov x0, x5
  490. mov x1, #SCR_EL3_DATA
  491. bl _getCoreData
  492. /* x0 = saved scr_el3 */
  493. msr SCR_EL3, x0
  494. /* x5 = core mask */
  495. /* restore cpuectlr */
  496. mov x0, x5
  497. mov x1, #CPUECTLR_DATA
  498. bl _getCoreData
  499. /* make sure smp is set */
  500. orr x0, x0, #CPUECTLR_SMPEN_MASK
  501. msr CPUECTLR_EL1, x0
  502. /* x5 = core mask */
  503. /* SoC-specific cleanup */
  504. mov x0, x5
  505. bl _soc_core_exit_pwrdn
  506. /* restore the aarch32/64 non-volatile registers
  507. */
  508. ldp x18, x30, [sp], #16
  509. ldp x16, x17, [sp], #16
  510. ldp x14, x15, [sp], #16
  511. ldp x12, x13, [sp], #16
  512. ldp x10, x11, [sp], #16
  513. ldp x8, x9, [sp], #16
  514. ldp x6, x7, [sp], #16
  515. ldp x4, x5, [sp], #16
  516. b psci_completed
  517. endfunc _psci_core_exit_pwrdn
  518. #endif
  519. #if (SOC_CLUSTER_STANDBY)
  520. .global _psci_clstr_prep_stdby
  521. .global _psci_clstr_exit_stdby
  522. /*
  523. * void _psci_clstr_prep_stdby(u_register_t core_mask) - this
  524. * sets up the clstr to enter standby state thru the normal path
  525. */
  526. func _psci_clstr_prep_stdby
  527. stp x4, x5, [sp, #-16]!
  528. stp x6, x30, [sp, #-16]!
  529. mov x5, x0
  530. /* x5 = core mask */
  531. /* save scr_el3 */
  532. mov x0, x5
  533. mrs x4, SCR_EL3
  534. mov x2, x4
  535. mov x1, #SCR_EL3_DATA
  536. bl _setCoreData
  537. /* allow interrupts @ EL3 */
  538. orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
  539. msr SCR_EL3, x4
  540. /* x5 = core mask */
  541. /* call for any SoC-specific programming */
  542. mov x0, x5
  543. bl _soc_clstr_prep_stdby
  544. ldp x6, x30, [sp], #16
  545. ldp x4, x5, [sp], #16
  546. isb
  547. ret
  548. endfunc _psci_clstr_prep_stdby
  549. /*
  550. * void _psci_clstr_exit_stdby(u_register_t core_mask) - this
  551. * exits the clstr from standby state thru the normal path
  552. */
  553. func _psci_clstr_exit_stdby
  554. stp x4, x5, [sp, #-16]!
  555. stp x6, x30, [sp, #-16]!
  556. mov x5, x0 /* x5 = core mask */
  557. /* restore scr_el3 */
  558. mov x0, x5
  559. mov x1, #SCR_EL3_DATA
  560. bl _getCoreData
  561. /* x0 = saved scr_el3 */
  562. msr SCR_EL3, x0
  563. /* x5 = core mask */
  564. /* perform any SoC-specific programming after standby state */
  565. mov x0, x5
  566. bl _soc_clstr_exit_stdby
  567. ldp x6, x30, [sp], #16
  568. ldp x4, x5, [sp], #16
  569. isb
  570. ret
  571. endfunc _psci_clstr_exit_stdby
  572. #endif
  573. #if (SOC_CLUSTER_PWR_DWN)
  574. .global _psci_clstr_prep_pwrdn
  575. .global _psci_clstr_exit_pwrdn
  576. /*
  577. * void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
  578. * this function prepares the cluster+core for power-down
  579. * x0 = core mask
  580. *
  581. * called from C, so save the non-volatile regs
  582. * save these as pairs of registers to maintain the
  583. * required 16-byte alignment on the stack
  584. */
  585. func _psci_clstr_prep_pwrdn
  586. stp x4, x5, [sp, #-16]!
  587. stp x6, x7, [sp, #-16]!
  588. stp x8, x9, [sp, #-16]!
  589. stp x10, x11, [sp, #-16]!
  590. stp x12, x13, [sp, #-16]!
  591. stp x14, x15, [sp, #-16]!
  592. stp x16, x17, [sp, #-16]!
  593. stp x18, x30, [sp, #-16]!
  594. mov x6, x0 /* x6 = core mask */
  595. /* mask interrupts by setting DAIF[7:4] to 'b1111 */
  596. msr DAIFSet, #0xF
  597. /* save scr_el3 */
  598. mov x0, x6
  599. mrs x4, SCR_EL3
  600. mov x2, x4
  601. mov x1, #SCR_EL3_DATA
  602. bl _setCoreData
  603. /* allow interrupts @ EL3 */
  604. orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
  605. msr SCR_EL3, x4
  606. /* save cpuectlr */
  607. mov x0, x6
  608. mov x1, #CPUECTLR_DATA
  609. mrs x2, CPUECTLR_EL1
  610. mov x4, x2
  611. bl _setCoreData
  612. /* remove core from coherency */
  613. bic x4, x4, #CPUECTLR_SMPEN_MASK
  614. msr CPUECTLR_EL1, x4
  615. /* x6 = core mask */
  616. /* SoC-specific programming for power-down */
  617. mov x0, x6
  618. bl _soc_clstr_prep_pwrdn
  619. /* restore the aarch32/64 non-volatile registers
  620. */
  621. ldp x18, x30, [sp], #16
  622. ldp x16, x17, [sp], #16
  623. ldp x14, x15, [sp], #16
  624. ldp x12, x13, [sp], #16
  625. ldp x10, x11, [sp], #16
  626. ldp x8, x9, [sp], #16
  627. ldp x6, x7, [sp], #16
  628. ldp x4, x5, [sp], #16
  629. b psci_completed
  630. endfunc _psci_clstr_prep_pwrdn
  631. /*
  632. * void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
  633. * this function cleans up after a cluster power-down
  634. * x0 = core mask
  635. *
  636. * called from C, so save the non-volatile regs
  637. * save these as pairs of registers to maintain the
  638. * required 16-byte alignment on the stack
  639. */
  640. func _psci_clstr_exit_pwrdn
  641. stp x4, x5, [sp, #-16]!
  642. stp x6, x7, [sp, #-16]!
  643. stp x8, x9, [sp, #-16]!
  644. stp x10, x11, [sp, #-16]!
  645. stp x12, x13, [sp, #-16]!
  646. stp x14, x15, [sp, #-16]!
  647. stp x16, x17, [sp, #-16]!
  648. stp x18, x30, [sp, #-16]!
  649. mov x4, x0 /* x4 = core mask */
  650. /* restore scr_el3 */
  651. mov x0, x4
  652. mov x1, #SCR_EL3_DATA
  653. bl _getCoreData
  654. /* x0 = saved scr_el3 */
  655. msr SCR_EL3, x0
  656. /* x4 = core mask */
  657. /* restore cpuectlr */
  658. mov x0, x4
  659. mov x1, #CPUECTLR_DATA
  660. bl _getCoreData
  661. /* make sure smp is set */
  662. orr x0, x0, #CPUECTLR_SMPEN_MASK
  663. msr CPUECTLR_EL1, x0
  664. /* x4 = core mask */
  665. /* SoC-specific cleanup */
  666. mov x0, x4
  667. bl _soc_clstr_exit_pwrdn
  668. /* restore the aarch32/64 non-volatile registers
  669. */
  670. ldp x18, x30, [sp], #16
  671. ldp x16, x17, [sp], #16
  672. ldp x14, x15, [sp], #16
  673. ldp x12, x13, [sp], #16
  674. ldp x10, x11, [sp], #16
  675. ldp x8, x9, [sp], #16
  676. ldp x6, x7, [sp], #16
  677. ldp x4, x5, [sp], #16
  678. b psci_completed
  679. endfunc _psci_clstr_exit_pwrdn
  680. #endif
  681. #if (SOC_SYSTEM_STANDBY)
  682. .global _psci_sys_prep_stdby
  683. .global _psci_sys_exit_stdby
  684. /*
  685. * void _psci_sys_prep_stdby(u_register_t core_mask) - this
  686. * sets up the system to enter standby state thru the normal path
  687. */
  688. func _psci_sys_prep_stdby
  689. stp x4, x5, [sp, #-16]!
  690. stp x6, x30, [sp, #-16]!
  691. mov x5, x0 /* x5 = core mask */
  692. /* save scr_el3 */
  693. mov x0, x5
  694. mrs x4, SCR_EL3
  695. mov x2, x4
  696. mov x1, #SCR_EL3_DATA
  697. bl _setCoreData
  698. /* allow interrupts @ EL3 */
  699. orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
  700. msr SCR_EL3, x4
  701. /* x5 = core mask */
  702. /* call for any SoC-specific programming */
  703. mov x0, x5
  704. bl _soc_sys_prep_stdby
  705. ldp x6, x30, [sp], #16
  706. ldp x4, x5, [sp], #16
  707. isb
  708. ret
  709. endfunc _psci_sys_prep_stdby
  710. /*
  711. * void _psci_sys_exit_stdby(u_register_t core_mask) - this
  712. * exits the system from standby state thru the normal path
  713. */
  714. func _psci_sys_exit_stdby
  715. stp x4, x5, [sp, #-16]!
  716. stp x6, x30, [sp, #-16]!
  717. mov x5, x0
  718. /* x5 = core mask */
  719. /* restore scr_el3 */
  720. mov x0, x5
  721. mov x1, #SCR_EL3_DATA
  722. bl _getCoreData
  723. /* x0 = saved scr_el3 */
  724. msr SCR_EL3, x0
  725. /* x5 = core mask */
  726. /* perform any SoC-specific programming after standby state */
  727. mov x0, x5
  728. bl _soc_sys_exit_stdby
  729. ldp x6, x30, [sp], #16
  730. ldp x4, x5, [sp], #16
  731. isb
  732. ret
  733. endfunc _psci_sys_exit_stdby
  734. #endif
  735. #if (SOC_SYSTEM_PWR_DWN)
  736. .global _psci_sys_prep_pwrdn
  737. .global _psci_sys_pwrdn_wfi
  738. .global _psci_sys_exit_pwrdn
  739. /*
  740. * void _psci_sys_prep_pwrdn_(u_register_t core_mask)
  741. * this function prepares the system+core for power-down
  742. * x0 = core mask
  743. *
  744. * called from C, so save the non-volatile regs
  745. * save these as pairs of registers to maintain the
  746. * required 16-byte alignment on the stack
  747. */
  748. func _psci_sys_prep_pwrdn
  749. stp x4, x5, [sp, #-16]!
  750. stp x6, x7, [sp, #-16]!
  751. stp x8, x9, [sp, #-16]!
  752. stp x10, x11, [sp, #-16]!
  753. stp x12, x13, [sp, #-16]!
  754. stp x14, x15, [sp, #-16]!
  755. stp x16, x17, [sp, #-16]!
  756. stp x18, x30, [sp, #-16]!
  757. mov x6, x0 /* x6 = core mask */
  758. /* mask interrupts by setting DAIF[7:4] to 'b1111 */
  759. msr DAIFSet, #0xF
  760. /* save scr_el3 */
  761. mov x0, x6
  762. mrs x4, SCR_EL3
  763. mov x2, x4
  764. mov x1, #SCR_EL3_DATA
  765. bl _setCoreData
  766. /* allow interrupts @ EL3 */
  767. orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
  768. msr SCR_EL3, x4
  769. /* save cpuectlr */
  770. mov x0, x6
  771. mov x1, #CPUECTLR_DATA
  772. mrs x2, CPUECTLR_EL1
  773. mov x4, x2
  774. bl _setCoreData
  775. /* remove core from coherency */
  776. bic x4, x4, #CPUECTLR_SMPEN_MASK
  777. msr CPUECTLR_EL1, x4
  778. /* x6 = core mask */
  779. /* SoC-specific programming for power-down */
  780. mov x0, x6
  781. bl _soc_sys_prep_pwrdn
  782. /* restore the aarch32/64 non-volatile registers
  783. */
  784. ldp x18, x30, [sp], #16
  785. ldp x16, x17, [sp], #16
  786. ldp x14, x15, [sp], #16
  787. ldp x12, x13, [sp], #16
  788. ldp x10, x11, [sp], #16
  789. ldp x8, x9, [sp], #16
  790. ldp x6, x7, [sp], #16
  791. ldp x4, x5, [sp], #16
  792. b psci_completed
  793. endfunc _psci_sys_prep_pwrdn
  794. /*
  795. * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
  796. * this function powers down the system
  797. */
  798. func _psci_sys_pwrdn_wfi
  799. /* save the wakeup address */
  800. mov x29, x1
  801. /* x0 = core mask */
  802. /* shutdown the system */
  803. bl _soc_sys_pwrdn_wfi
  804. /* branch to resume execution */
  805. br x29
  806. endfunc _psci_sys_pwrdn_wfi
  807. /*
  808. * void _psci_sys_exit_pwrdn_(u_register_t core_mask)
  809. * this function cleans up after a system power-down
  810. * x0 = core mask
  811. *
  812. * Called from C, so save the non-volatile regs
  813. * save these as pairs of registers to maintain the
  814. * required 16-byte alignment on the stack
  815. */
  816. func _psci_sys_exit_pwrdn
  817. stp x4, x5, [sp, #-16]!
  818. stp x6, x7, [sp, #-16]!
  819. stp x8, x9, [sp, #-16]!
  820. stp x10, x11, [sp, #-16]!
  821. stp x12, x13, [sp, #-16]!
  822. stp x14, x15, [sp, #-16]!
  823. stp x16, x17, [sp, #-16]!
  824. stp x18, x30, [sp, #-16]!
  825. mov x4, x0 /* x4 = core mask */
  826. /* restore scr_el3 */
  827. mov x0, x4
  828. mov x1, #SCR_EL3_DATA
  829. bl _getCoreData
  830. /* x0 = saved scr_el3 */
  831. msr SCR_EL3, x0
  832. /* x4 = core mask */
  833. /* restore cpuectlr */
  834. mov x0, x4
  835. mov x1, #CPUECTLR_DATA
  836. bl _getCoreData
  837. /* make sure smp is set */
  838. orr x0, x0, #CPUECTLR_SMPEN_MASK
  839. msr CPUECTLR_EL1, x0
  840. /* x4 = core mask */
  841. /* SoC-specific cleanup */
  842. mov x0, x4
  843. bl _soc_sys_exit_pwrdn
  844. /* restore the aarch32/64 non-volatile registers
  845. */
  846. ldp x18, x30, [sp], #16
  847. ldp x16, x17, [sp], #16
  848. ldp x14, x15, [sp], #16
  849. ldp x12, x13, [sp], #16
  850. ldp x10, x11, [sp], #16
  851. ldp x8, x9, [sp], #16
  852. ldp x6, x7, [sp], #16
  853. ldp x4, x5, [sp], #16
  854. b psci_completed
  855. endfunc _psci_sys_exit_pwrdn
  856. #endif
  857. /* psci std returns */
  858. func psci_disabled
  859. ldr w0, =PSCI_E_DISABLED
  860. b psci_completed
  861. endfunc psci_disabled
  862. func psci_not_present
  863. ldr w0, =PSCI_E_NOT_PRESENT
  864. b psci_completed
  865. endfunc psci_not_present
  866. func psci_on_pending
  867. ldr w0, =PSCI_E_ON_PENDING
  868. b psci_completed
  869. endfunc psci_on_pending
  870. func psci_already_on
  871. ldr w0, =PSCI_E_ALREADY_ON
  872. b psci_completed
  873. endfunc psci_already_on
  874. func psci_failure
  875. ldr w0, =PSCI_E_INTERN_FAIL
  876. b psci_completed
  877. endfunc psci_failure
  878. func psci_unimplemented
  879. ldr w0, =PSCI_E_NOT_SUPPORTED
  880. b psci_completed
  881. endfunc psci_unimplemented
  882. func psci_denied
  883. ldr w0, =PSCI_E_DENIED
  884. b psci_completed
  885. endfunc psci_denied
  886. func psci_invalid
  887. ldr w0, =PSCI_E_INVALID_PARAMS
  888. b psci_completed
  889. endfunc psci_invalid
  890. func psci_success
  891. mov x0, #PSCI_E_SUCCESS
  892. endfunc psci_success
  893. func psci_completed
  894. /* x0 = status code */
  895. ret
  896. endfunc psci_completed