mt_cpu_pm.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * Copyright (c) 2022, MediaTek Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #ifndef MT_CPU_PM_H
  7. #define MT_CPU_PM_H
  8. #include <assert.h>
  9. #include <mcucfg.h>
  10. #include <platform_def.h>
  11. /*
  12. * After ARM v8.2, the cache will turn off automatically when powering down CPU. Therefore, there
  13. * is no doubt to use the spin_lock here.
  14. */
  15. #if !HW_ASSISTED_COHERENCY
  16. #define MT_CPU_PM_USING_BAKERY_LOCK
  17. #endif
  18. #define CPU_PM_FN (MTK_CPUPM_FN_CPUPM_GET_PWR_STATE | \
  19. MTK_CPUPM_FN_PWR_STATE_VALID | \
  20. MTK_CPUPM_FN_PWR_ON_CORE_PREPARE | \
  21. MTK_CPUPM_FN_RESUME_CORE | \
  22. MTK_CPUPM_FN_SUSPEND_MCUSYS | \
  23. MTK_CPUPM_FN_RESUME_MCUSYS | \
  24. MTK_CPUPM_FN_SMP_INIT | \
  25. MTK_CPUPM_FN_SMP_CORE_ON | \
  26. MTK_CPUPM_FN_SMP_CORE_OFF)
  27. #define CPU_PM_ASSERT(_cond) ({ \
  28. if (!(_cond)) { \
  29. INFO("[%s:%d] - %s\n", __func__, __LINE__, #_cond); \
  30. panic(); \
  31. } })
  32. #define CPC_PWR_MASK_MCUSYS_MP0 (0xC001)
  33. #define PER_CPU_PWR_DATA(ctrl, cluster, core) \
  34. do { \
  35. ctrl.rvbaraddr_l = CORE_RVBRADDR_##cluster##_##core##_L; \
  36. ctrl.arch_addr = MCUCFG_MP0_CLUSTER_CFG5; \
  37. ctrl.pwpr = SPM_MP##cluster##_CPU##core##_PWR_CON; \
  38. } while (0)
  39. #define PER_CPU_PWR_CTRL(ctrl, cpu) ({ \
  40. switch (cpu) { \
  41. case 0: \
  42. PER_CPU_PWR_DATA(ctrl, 0, 0); \
  43. break; \
  44. case 1: \
  45. PER_CPU_PWR_DATA(ctrl, 0, 1); \
  46. break; \
  47. case 2: \
  48. PER_CPU_PWR_DATA(ctrl, 0, 2); \
  49. break; \
  50. case 3: \
  51. PER_CPU_PWR_DATA(ctrl, 0, 3); \
  52. break; \
  53. case 4: \
  54. PER_CPU_PWR_DATA(ctrl, 0, 4); \
  55. break; \
  56. case 5: \
  57. PER_CPU_PWR_DATA(ctrl, 0, 5); \
  58. break; \
  59. case 6: \
  60. PER_CPU_PWR_DATA(ctrl, 0, 6); \
  61. break; \
  62. case 7: \
  63. PER_CPU_PWR_DATA(ctrl, 0, 7); \
  64. break; \
  65. default: \
  66. assert(0); \
  67. break; \
  68. } })
  69. /* MCUSYS DREQ BIG VPROC ISO control */
  70. #define DREQ20_BIG_VPROC_ISO (MCUCFG_BASE + 0xad8c)
  71. /* Definition about bootup address for each core CORE_RVBRADDR_clusterid_cpuid */
  72. #define CORE_RVBRADDR_0_0_L (MCUCFG_BASE + 0xc900)
  73. #define CORE_RVBRADDR_0_1_L (MCUCFG_BASE + 0xc908)
  74. #define CORE_RVBRADDR_0_2_L (MCUCFG_BASE + 0xc910)
  75. #define CORE_RVBRADDR_0_3_L (MCUCFG_BASE + 0xc918)
  76. #define CORE_RVBRADDR_0_4_L (MCUCFG_BASE + 0xc920)
  77. #define CORE_RVBRADDR_0_5_L (MCUCFG_BASE + 0xc928)
  78. #define CORE_RVBRADDR_0_6_L (MCUCFG_BASE + 0xc930)
  79. #define CORE_RVBRADDR_0_7_L (MCUCFG_BASE + 0xc938)
  80. #define MCUCFG_MP0_CLUSTER_CFG5 (MCUCFG_BASE + 0xc8e4)
  81. struct cpu_pwr_ctrl {
  82. unsigned int rvbaraddr_l;
  83. unsigned int arch_addr;
  84. unsigned int pwpr;
  85. };
  86. #define MCUSYS_STATUS_PDN BIT(0)
  87. #define MCUSYS_STATUS_CPUSYS_PROTECT BIT(8)
  88. #define MCUSYS_STATUS_MCUSYS_PROTECT BIT(9)
  89. /* cpu_pm function ID */
  90. enum mt_cpu_pm_user_id {
  91. MCUSYS_STATUS,
  92. CPC_COMMAND,
  93. };
  94. /* cpu_pm lp function ID */
  95. enum mt_cpu_pm_lp_smc_id {
  96. LP_CPC_COMMAND,
  97. IRQS_REMAIN_ALLOC,
  98. IRQS_REMAIN_CTRL,
  99. IRQS_REMAIN_IRQ,
  100. IRQS_REMAIN_WAKEUP_CAT,
  101. IRQS_REMAIN_WAKEUP_SRC,
  102. };
  103. #endif /* MT_CPU_PM_H */