plat_psci.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
  3. * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
  4. * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
  5. *
  6. * SPDX-License-Identifier: BSD-3-Clause
  7. */
  8. #include <assert.h>
  9. #include <common/debug.h>
  10. #include <common/runtime_svc.h>
  11. #include <lib/mmio.h>
  12. #include <lib/psci/psci.h>
  13. #include <plat/arm/common/plat_arm.h>
  14. #include <plat/common/platform.h>
  15. #include <plat_arm.h>
  16. #include <plat_private.h>
  17. #include <pm_defs.h>
  18. #define PM_RET_ERROR_NOFEATURE U(19)
  19. #define ALWAYSTRUE true
  20. #define LINEAR_MODE BIT(1)
  21. static uintptr_t _sec_entry;
  22. static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
  23. {
  24. dsb();
  25. wfi();
  26. }
  27. #define MPIDR_MT_BIT (24)
  28. static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
  29. {
  30. uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr) & ~BIT(MPIDR_MT_BIT);
  31. uint32_t cpu = cpu_id % PLATFORM_CORE_COUNT_PER_CLUSTER;
  32. uint32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER;
  33. uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0;
  34. uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + ((uint64_t)cluster * 0x4U);
  35. VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n",
  36. __func__, mpidr, cpu_id, cpu, cluster);
  37. if (cpu_id == -1) {
  38. return PSCI_E_INTERN_FAIL;
  39. }
  40. if (cluster > 3) {
  41. panic();
  42. }
  43. apu_pcli_cluster = APU_PCLI + APU_PCLI_CLUSTER_OFFSET + ((uint64_t)cluster * APU_PCLI_CLUSTER_STEP);
  44. apu_cluster_base = APU_CLUSTER0 + ((uint64_t)cluster * APU_CLUSTER_STEP);
  45. /* Enable clock */
  46. mmio_setbits_32(PSX_CRF + ACPU0_CLK_CTRL + ((uint64_t)cluster * 0x4U), ACPU_CLK_CTRL_CLKACT);
  47. /* Enable cluster states */
  48. mmio_setbits_32(apu_pcli_cluster + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_SET);
  49. mmio_setbits_32(apu_pcli_cluster + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
  50. /* assert core reset */
  51. mmio_setbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
  52. /* program RVBAR */
  53. mmio_write_32(apu_cluster_base + APU_RVBAR_L_0 + (cpu << 3),
  54. (uint32_t)_sec_entry);
  55. mmio_write_32(apu_cluster_base + APU_RVBAR_H_0 + (cpu << 3),
  56. _sec_entry >> 32);
  57. /* de-assert core reset */
  58. mmio_clrbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
  59. /* clear cluster resets */
  60. mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_WARM_RESET);
  61. mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_COLD_RESET);
  62. apu_pcli_base = APU_PCLI + (APU_PCLI_CPU_STEP * cpu) +
  63. (APU_PCLI_CLUSTER_CPU_STEP * cluster);
  64. mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR);
  65. mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
  66. return PSCI_E_SUCCESS;
  67. }
  68. static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
  69. {
  70. plat_gic_cpuif_disable();
  71. }
  72. static void __dead2 zynqmp_nopmu_system_reset(void)
  73. {
  74. while (ALWAYSTRUE) {
  75. wfi();
  76. }
  77. }
  78. static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)
  79. {
  80. VERBOSE("Validate ns_entry point %lx\n", ns_entrypoint);
  81. if ((ns_entrypoint) != 0U) {
  82. return PSCI_E_SUCCESS;
  83. } else {
  84. return PSCI_E_INVALID_ADDRESS;
  85. }
  86. }
  87. static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
  88. {
  89. plat_gic_pcpu_init();
  90. plat_gic_cpuif_enable();
  91. }
  92. static void __dead2 zynqmp_system_off(void)
  93. {
  94. while (ALWAYSTRUE) {
  95. wfi();
  96. }
  97. }
  98. static int32_t zynqmp_validate_power_state(uint32_t power_state, psci_power_state_t *req_state)
  99. {
  100. return PSCI_E_SUCCESS;
  101. }
  102. static const struct plat_psci_ops _nopmc_psci_ops = {
  103. .cpu_standby = zynqmp_cpu_standby,
  104. .pwr_domain_on = zynqmp_nopmu_pwr_domain_on,
  105. .pwr_domain_off = zynqmp_nopmu_pwr_domain_off,
  106. .system_reset = zynqmp_nopmu_system_reset,
  107. .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint,
  108. .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish,
  109. .system_off = zynqmp_system_off,
  110. .validate_power_state = zynqmp_validate_power_state,
  111. };
  112. /*******************************************************************************
  113. * Export the platform specific power ops.
  114. ******************************************************************************/
  115. int32_t plat_setup_psci_ops(uintptr_t sec_entrypoint,
  116. const struct plat_psci_ops **psci_ops)
  117. {
  118. _sec_entry = sec_entrypoint;
  119. VERBOSE("Setting up entry point %lx\n", _sec_entry);
  120. *psci_ops = &_nopmc_psci_ops;
  121. return 0;
  122. }
  123. int sip_svc_setup_init(void)
  124. {
  125. return 0;
  126. }
  127. static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
  128. uint32_t arg1, uint32_t arg2)
  129. {
  130. int32_t ret = 0;
  131. VERBOSE("%s: ioctl_id: %x, arg1: %x\n", __func__, ioctl_id, arg1);
  132. switch (ioctl_id) {
  133. case IOCTL_OSPI_MUX_SELECT:
  134. if ((arg1 == 0) || (arg1 == 1)) {
  135. mmio_clrsetbits_32(SLCR_OSPI_QSPI_IOU_AXI_MUX_SEL, LINEAR_MODE,
  136. (arg1 ? LINEAR_MODE : 0));
  137. } else {
  138. ret = PM_RET_ERROR_ARGS;
  139. }
  140. break;
  141. case IOCTL_UFS_TXRX_CFGRDY_GET:
  142. ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_TX_RX_CONFIG_RDY);
  143. break;
  144. case IOCTL_UFS_SRAM_CSR_SEL:
  145. if (arg1 == 1) {
  146. ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_SRAM_CSR);
  147. } else if (arg1 == 0) {
  148. mmio_write_32(PMXC_IOU_SLCR_SRAM_CSR, arg2);
  149. }
  150. break;
  151. case IOCTL_USB_SET_STATE:
  152. break;
  153. default:
  154. ret = PM_RET_ERROR_NOFEATURE;
  155. break;
  156. }
  157. return ret;
  158. }
  159. static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
  160. uint64_t x4, void *cookie, void *handle, uint64_t flags)
  161. {
  162. int32_t ret;
  163. uint32_t arg[4], api_id;
  164. arg[0] = (uint32_t)x1;
  165. arg[1] = (uint32_t)(x1 >> 32);
  166. arg[2] = (uint32_t)x2;
  167. arg[3] = (uint32_t)(x2 >> 32);
  168. api_id = smc_fid & FUNCID_NUM_MASK;
  169. VERBOSE("%s: smc_fid: %x, api_id=0x%x\n", __func__, smc_fid, api_id);
  170. switch (api_id) {
  171. case PM_IOCTL:
  172. {
  173. ret = no_pm_ioctl(arg[0], arg[1], arg[2], arg[3]);
  174. /* Firmware driver expects return code in upper 32 bits and
  175. * status in lower 32 bits.
  176. * status is always SUCCESS(0) for mmio low level register
  177. * r/w calls and return value is the value returned from
  178. * no_pm_ioctl
  179. */
  180. SMC_RET1(handle, ((uint64_t)ret << 32));
  181. }
  182. case PM_GET_CHIPID:
  183. {
  184. uint32_t idcode, version_type;
  185. idcode = mmio_read_32(PMC_TAP);
  186. version_type = mmio_read_32(PMC_TAP_VERSION);
  187. SMC_RET2(handle, ((uint64_t)idcode << 32), version_type);
  188. }
  189. default:
  190. WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
  191. SMC_RET1(handle, SMC_UNK);
  192. }
  193. }
  194. uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
  195. void *cookie, void *handle, uint64_t flags)
  196. {
  197. return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
  198. }