psci_private.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. /*
  2. * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #ifndef PSCI_PRIVATE_H
  7. #define PSCI_PRIVATE_H
  8. #include <stdbool.h>
  9. #include <arch.h>
  10. #include <arch_helpers.h>
  11. #include <common/bl_common.h>
  12. #include <lib/bakery_lock.h>
  13. #include <lib/el3_runtime/cpu_data.h>
  14. #include <lib/psci/psci.h>
  15. #include <lib/spinlock.h>
  16. /*
  17. * The PSCI capability which are provided by the generic code but does not
  18. * depend on the platform or spd capabilities.
  19. */
  20. #define PSCI_GENERIC_CAP \
  21. (define_psci_cap(PSCI_VERSION) | \
  22. define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
  23. define_psci_cap(PSCI_FEATURES))
  24. /*
  25. * The PSCI capabilities mask for 64 bit functions.
  26. */
  27. #define PSCI_CAP_64BIT_MASK \
  28. (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \
  29. define_psci_cap(PSCI_CPU_ON_AARCH64) | \
  30. define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
  31. define_psci_cap(PSCI_MIG_AARCH64) | \
  32. define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
  33. define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \
  34. define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \
  35. define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \
  36. define_psci_cap(PSCI_STAT_COUNT_AARCH64) | \
  37. define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \
  38. define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
  39. /* Internally PSCI uses a uint16_t for various cpu indexes so
  40. * define a limit to number of CPUs that can be initialised.
  41. */
  42. #define PSCI_MAX_CPUS_INDEX 0xFFFFU
  43. /* Invalid parent */
  44. #define PSCI_PARENT_NODE_INVALID 0xFFFFFFFFU
  45. /*
  46. * Helper functions to get/set the fields of PSCI per-cpu data.
  47. */
  48. static inline void psci_set_aff_info_state(aff_info_state_t aff_state)
  49. {
  50. set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state);
  51. }
  52. static inline aff_info_state_t psci_get_aff_info_state(void)
  53. {
  54. return get_cpu_data(psci_svc_cpu_data.aff_info_state);
  55. }
  56. static inline aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx)
  57. {
  58. return get_cpu_data_by_index(idx,
  59. psci_svc_cpu_data.aff_info_state);
  60. }
  61. static inline void psci_set_aff_info_state_by_idx(unsigned int idx,
  62. aff_info_state_t aff_state)
  63. {
  64. set_cpu_data_by_index(idx,
  65. psci_svc_cpu_data.aff_info_state, aff_state);
  66. }
  67. static inline unsigned int psci_get_suspend_pwrlvl(void)
  68. {
  69. return get_cpu_data(psci_svc_cpu_data.target_pwrlvl);
  70. }
  71. static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl)
  72. {
  73. set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl);
  74. }
  75. static inline void psci_set_cpu_local_state(plat_local_state_t state)
  76. {
  77. set_cpu_data(psci_svc_cpu_data.local_state, state);
  78. }
  79. static inline plat_local_state_t psci_get_cpu_local_state(void)
  80. {
  81. return get_cpu_data(psci_svc_cpu_data.local_state);
  82. }
  83. static inline plat_local_state_t psci_get_cpu_local_state_by_idx(
  84. unsigned int idx)
  85. {
  86. return get_cpu_data_by_index(idx,
  87. psci_svc_cpu_data.local_state);
  88. }
  89. /* Helper function to identify a CPU standby request in PSCI Suspend call */
  90. static inline bool is_cpu_standby_req(unsigned int is_power_down_state,
  91. unsigned int retn_lvl)
  92. {
  93. return (is_power_down_state == 0U) && (retn_lvl == 0U);
  94. }
  95. /*******************************************************************************
  96. * The following two data structures implement the power domain tree. The tree
  97. * is used to track the state of all the nodes i.e. power domain instances
  98. * described by the platform. The tree consists of nodes that describe CPU power
  99. * domains i.e. leaf nodes and all other power domains which are parents of a
  100. * CPU power domain i.e. non-leaf nodes.
  101. ******************************************************************************/
  102. typedef struct non_cpu_pwr_domain_node {
  103. /*
  104. * Index of the first CPU power domain node level 0 which has this node
  105. * as its parent.
  106. */
  107. unsigned int cpu_start_idx;
  108. /*
  109. * Number of CPU power domains which are siblings of the domain indexed
  110. * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
  111. * -> cpu_start_idx + ncpus' have this node as their parent.
  112. */
  113. unsigned int ncpus;
  114. /*
  115. * Index of the parent power domain node.
  116. * TODO: Figure out whether to whether using pointer is more efficient.
  117. */
  118. unsigned int parent_node;
  119. plat_local_state_t local_state;
  120. unsigned char level;
  121. /* For indexing the psci_lock array*/
  122. uint16_t lock_index;
  123. } non_cpu_pd_node_t;
  124. typedef struct cpu_pwr_domain_node {
  125. u_register_t mpidr;
  126. /*
  127. * Index of the parent power domain node.
  128. * TODO: Figure out whether to whether using pointer is more efficient.
  129. */
  130. unsigned int parent_node;
  131. /*
  132. * A CPU power domain does not require state coordination like its
  133. * parent power domains. Hence this node does not include a bakery
  134. * lock. A spinlock is required by the CPU_ON handler to prevent a race
  135. * when multiple CPUs try to turn ON the same target CPU.
  136. */
  137. spinlock_t cpu_lock;
  138. } cpu_pd_node_t;
  139. #if PSCI_OS_INIT_MODE
  140. /*******************************************************************************
  141. * The supported power state coordination modes that can be used in CPU_SUSPEND.
  142. ******************************************************************************/
  143. typedef enum suspend_mode {
  144. PLAT_COORD = 0,
  145. OS_INIT = 1
  146. } suspend_mode_t;
  147. #endif
  148. /*******************************************************************************
  149. * The following are helpers and declarations of locks.
  150. ******************************************************************************/
  151. #if HW_ASSISTED_COHERENCY
  152. /*
  153. * On systems where participant CPUs are cache-coherent, we can use spinlocks
  154. * instead of bakery locks.
  155. */
  156. #define DEFINE_PSCI_LOCK(_name) spinlock_t _name
  157. #define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
  158. /* One lock is required per non-CPU power domain node */
  159. DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
  160. /*
  161. * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
  162. * as PSCI participants are cache-coherent, and there's no need for explicit
  163. * cache maintenance operations or barriers to coordinate their state.
  164. */
  165. static inline void psci_flush_dcache_range(uintptr_t __unused addr,
  166. size_t __unused size)
  167. {
  168. /* Empty */
  169. }
  170. #define psci_flush_cpu_data(member)
  171. #define psci_inv_cpu_data(member)
  172. static inline void psci_dsbish(void)
  173. {
  174. /* Empty */
  175. }
  176. static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
  177. {
  178. spin_lock(&psci_locks[non_cpu_pd_node->lock_index]);
  179. }
  180. static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
  181. {
  182. spin_unlock(&psci_locks[non_cpu_pd_node->lock_index]);
  183. }
  184. #else /* if HW_ASSISTED_COHERENCY == 0 */
  185. /*
  186. * Use bakery locks for state coordination as not all PSCI participants are
  187. * cache coherent.
  188. */
  189. #define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name)
  190. #define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name)
  191. /* One lock is required per non-CPU power domain node */
  192. DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
  193. /*
  194. * If not all PSCI participants are cache-coherent, perform cache maintenance
  195. * and issue barriers wherever required to coordinate state.
  196. */
  197. static inline void psci_flush_dcache_range(uintptr_t addr, size_t size)
  198. {
  199. flush_dcache_range(addr, size);
  200. }
  201. #define psci_flush_cpu_data(member) flush_cpu_data(member)
  202. #define psci_inv_cpu_data(member) inv_cpu_data(member)
  203. static inline void psci_dsbish(void)
  204. {
  205. dsbish();
  206. }
  207. static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
  208. {
  209. bakery_lock_get(&psci_locks[non_cpu_pd_node->lock_index]);
  210. }
  211. static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
  212. {
  213. bakery_lock_release(&psci_locks[non_cpu_pd_node->lock_index]);
  214. }
  215. #endif /* HW_ASSISTED_COHERENCY */
  216. static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node,
  217. uint16_t idx)
  218. {
  219. non_cpu_pd_node[idx].lock_index = idx;
  220. }
  221. /*******************************************************************************
  222. * Data prototypes
  223. ******************************************************************************/
  224. extern const plat_psci_ops_t *psci_plat_pm_ops;
  225. extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
  226. extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
  227. extern unsigned int psci_caps;
  228. extern unsigned int psci_plat_core_count;
  229. #if PSCI_OS_INIT_MODE
  230. extern suspend_mode_t psci_suspend_mode;
  231. #endif
  232. /*******************************************************************************
  233. * SPD's power management hooks registered with PSCI
  234. ******************************************************************************/
  235. extern const spd_pm_ops_t *psci_spd_pm;
  236. /*******************************************************************************
  237. * Function prototypes
  238. ******************************************************************************/
  239. /* Private exported functions from psci_common.c */
  240. int psci_validate_power_state(unsigned int power_state,
  241. psci_power_state_t *state_info);
  242. void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
  243. void psci_init_req_local_pwr_states(void);
  244. #if PSCI_OS_INIT_MODE
  245. void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
  246. unsigned int cpu_idx,
  247. psci_power_state_t *state_info,
  248. plat_local_state_t *prev);
  249. void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
  250. plat_local_state_t *prev);
  251. #endif
  252. void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
  253. psci_power_state_t *target_state);
  254. void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
  255. const psci_power_state_t *target_state);
  256. int psci_validate_entry_point(entry_point_info_t *ep,
  257. uintptr_t entrypoint, u_register_t context_id);
  258. void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
  259. unsigned int end_lvl,
  260. unsigned int *node_index);
  261. void psci_do_state_coordination(unsigned int end_pwrlvl,
  262. psci_power_state_t *state_info);
  263. #if PSCI_OS_INIT_MODE
  264. int psci_validate_state_coordination(unsigned int end_pwrlvl,
  265. psci_power_state_t *state_info);
  266. #endif
  267. void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
  268. const unsigned int *parent_nodes);
  269. void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
  270. const unsigned int *parent_nodes);
  271. int psci_validate_suspend_req(const psci_power_state_t *state_info,
  272. unsigned int is_power_down_state);
  273. unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
  274. unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
  275. void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
  276. void psci_print_power_domain_map(void);
  277. bool psci_is_last_on_cpu(void);
  278. int psci_spd_migrate_info(u_register_t *mpidr);
  279. /*
  280. * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
  281. * available. Otherwise, this needs post-call stack maintenance, which is
  282. * handled in assembly.
  283. */
  284. void prepare_cpu_pwr_dwn(unsigned int power_level);
  285. /* This function applies various CPU errata during power down. */
  286. void apply_cpu_pwr_dwn_errata(void);
  287. /* Private exported functions from psci_on.c */
  288. int psci_cpu_on_start(u_register_t target_cpu,
  289. const entry_point_info_t *ep);
  290. void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
  291. /* Private exported functions from psci_off.c */
  292. int psci_do_cpu_off(unsigned int end_pwrlvl);
  293. /* Private exported functions from psci_suspend.c */
  294. int psci_cpu_suspend_start(const entry_point_info_t *ep,
  295. unsigned int end_pwrlvl,
  296. psci_power_state_t *state_info,
  297. unsigned int is_power_down_state);
  298. void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
  299. /* Private exported functions from psci_helpers.S */
  300. void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
  301. void psci_do_pwrup_cache_maintenance(void);
  302. /* Private exported functions from psci_system_off.c */
  303. void __dead2 psci_system_off(void);
  304. void __dead2 psci_system_reset(void);
  305. u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie);
  306. /* Private exported functions from psci_stat.c */
  307. void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
  308. const psci_power_state_t *state_info);
  309. void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
  310. const psci_power_state_t *state_info);
  311. u_register_t psci_stat_residency(u_register_t target_cpu,
  312. unsigned int power_state);
  313. u_register_t psci_stat_count(u_register_t target_cpu,
  314. unsigned int power_state);
  315. /* Private exported functions from psci_mem_protect.c */
  316. u_register_t psci_mem_protect(unsigned int enable);
  317. u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length);
  318. #endif /* PSCI_PRIVATE_H */