psci_off.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
  3. * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: BSD-3-Clause
  6. */
  7. #include <assert.h>
  8. #include <string.h>
  9. #include <arch.h>
  10. #include <arch_helpers.h>
  11. #include <common/debug.h>
  12. #include <lib/pmf/pmf.h>
  13. #include <lib/runtime_instr.h>
  14. #include <plat/common/platform.h>
  15. #include "psci_private.h"
  16. /******************************************************************************
  17. * Construct the psci_power_state to request power OFF at all power levels.
  18. ******************************************************************************/
  19. static void psci_set_power_off_state(psci_power_state_t *state_info)
  20. {
  21. unsigned int lvl;
  22. for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
  23. state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
  24. }
  25. /******************************************************************************
  26. * Top level handler which is called when a cpu wants to power itself down.
  27. * It's assumed that along with turning the cpu power domain off, power
  28. * domains at higher levels will be turned off as far as possible. It finds
  29. * the highest level where a domain has to be powered off by traversing the
  30. * node information and then performs generic, architectural, platform setup
  31. * and state management required to turn OFF that power domain and domains
  32. * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
  33. * the power controller whereas for a cluster that's to be powered off, it will
  34. * call the platform specific code which will disable coherency at the
  35. * interconnect level if the cpu is the last in the cluster and also the
  36. * program the power controller.
  37. ******************************************************************************/
  38. int psci_do_cpu_off(unsigned int end_pwrlvl)
  39. {
  40. int rc = PSCI_E_SUCCESS;
  41. unsigned int idx = plat_my_core_pos();
  42. psci_power_state_t state_info;
  43. unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
  44. /*
  45. * This function must only be called on platforms where the
  46. * CPU_OFF platform hooks have been implemented.
  47. */
  48. assert(psci_plat_pm_ops->pwr_domain_off != NULL);
  49. /* Construct the psci_power_state for CPU_OFF */
  50. psci_set_power_off_state(&state_info);
  51. /*
  52. * Call the platform provided early CPU_OFF handler to allow
  53. * platforms to perform any housekeeping activities before
  54. * actually powering the CPU off. PSCI_E_DENIED indicates that
  55. * the CPU off sequence should be aborted at this time.
  56. */
  57. if (psci_plat_pm_ops->pwr_domain_off_early) {
  58. rc = psci_plat_pm_ops->pwr_domain_off_early(&state_info);
  59. if (rc == PSCI_E_DENIED) {
  60. return rc;
  61. }
  62. }
  63. /*
  64. * Get the parent nodes here, this is important to do before we
  65. * initiate the power down sequence as after that point the core may
  66. * have exited coherency and its cache may be disabled, any access to
  67. * shared memory after that (such as the parent node lookup in
  68. * psci_cpu_pd_nodes) can cause coherency issues on some platforms.
  69. */
  70. psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
  71. /*
  72. * This function acquires the lock corresponding to each power
  73. * level so that by the time all locks are taken, the system topology
  74. * is snapshot and state management can be done safely.
  75. */
  76. psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
  77. /*
  78. * Call the cpu off handler registered by the Secure Payload Dispatcher
  79. * to let it do any bookkeeping. Assume that the SPD always reports an
  80. * E_DENIED error if SP refuse to power down
  81. */
  82. if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_off != NULL)) {
  83. rc = psci_spd_pm->svc_off(0);
  84. if (rc != 0)
  85. goto exit;
  86. }
  87. /*
  88. * This function is passed the requested state info and
  89. * it returns the negotiated state info for each power level upto
  90. * the end level specified.
  91. */
  92. psci_do_state_coordination(end_pwrlvl, &state_info);
  93. /* Update the target state in the power domain nodes */
  94. psci_set_target_local_pwr_states(end_pwrlvl, &state_info);
  95. #if ENABLE_PSCI_STAT
  96. /* Update the last cpu for each level till end_pwrlvl */
  97. psci_stats_update_pwr_down(end_pwrlvl, &state_info);
  98. #endif
  99. #if ENABLE_RUNTIME_INSTRUMENTATION
  100. /*
  101. * Flush cache line so that even if CPU power down happens
  102. * the timestamp update is reflected in memory.
  103. */
  104. PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
  105. RT_INSTR_ENTER_CFLUSH,
  106. PMF_CACHE_MAINT);
  107. #endif
  108. /*
  109. * Arch. management. Initiate power down sequence.
  110. */
  111. psci_pwrdown_cpu(psci_find_max_off_lvl(&state_info));
  112. #if ENABLE_RUNTIME_INSTRUMENTATION
  113. PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
  114. RT_INSTR_EXIT_CFLUSH,
  115. PMF_NO_CACHE_MAINT);
  116. #endif
  117. /*
  118. * Plat. management: Perform platform specific actions to turn this
  119. * cpu off e.g. exit cpu coherency, program the power controller etc.
  120. */
  121. psci_plat_pm_ops->pwr_domain_off(&state_info);
  122. #if ENABLE_PSCI_STAT
  123. plat_psci_stat_accounting_start(&state_info);
  124. #endif
  125. exit:
  126. /*
  127. * Release the locks corresponding to each power level in the
  128. * reverse order to which they were acquired.
  129. */
  130. psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
  131. /*
  132. * Check if all actions needed to safely power down this cpu have
  133. * successfully completed.
  134. */
  135. if (rc == PSCI_E_SUCCESS) {
  136. /*
  137. * Set the affinity info state to OFF. When caches are disabled,
  138. * this writes directly to main memory, so cache maintenance is
  139. * required to ensure that later cached reads of aff_info_state
  140. * return AFF_STATE_OFF. A dsbish() ensures ordering of the
  141. * update to the affinity info state prior to cache line
  142. * invalidation.
  143. */
  144. psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
  145. psci_set_aff_info_state(AFF_STATE_OFF);
  146. psci_dsbish();
  147. psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
  148. #if ENABLE_RUNTIME_INSTRUMENTATION
  149. /*
  150. * Update the timestamp with cache off. We assume this
  151. * timestamp can only be read from the current CPU and the
  152. * timestamp cache line will be flushed before return to
  153. * normal world on wakeup.
  154. */
  155. PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
  156. RT_INSTR_ENTER_HW_LOW_PWR,
  157. PMF_NO_CACHE_MAINT);
  158. #endif
  159. if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) {
  160. /* This function must not return */
  161. psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
  162. } else {
  163. /*
  164. * Enter a wfi loop which will allow the power
  165. * controller to physically power down this cpu.
  166. */
  167. psci_power_down_wfi();
  168. }
  169. }
  170. return rc;
  171. }