amu.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <cdefs.h>
  8. #include <stdbool.h>
  9. #include "../amu_private.h"
  10. #include <arch.h>
  11. #include <arch_features.h>
  12. #include <arch_helpers.h>
  13. #include <common/debug.h>
  14. #include <lib/el3_runtime/pubsub_events.h>
  15. #include <lib/extensions/amu.h>
  16. #include <plat/common/platform.h>
  17. struct amu_ctx {
  18. uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
  19. #if ENABLE_AMU_AUXILIARY_COUNTERS
  20. uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
  21. #endif
  22. uint16_t group0_enable;
  23. #if ENABLE_AMU_AUXILIARY_COUNTERS
  24. uint16_t group1_enable;
  25. #endif
  26. };
  27. static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
  28. CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
  29. amu_ctx_group0_enable_cannot_represent_all_group0_counters);
  30. #if ENABLE_AMU_AUXILIARY_COUNTERS
  31. CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
  32. amu_ctx_group1_enable_cannot_represent_all_group1_counters);
  33. #endif
  34. static inline __unused void write_hcptr_tam(uint32_t value)
  35. {
  36. write_hcptr((read_hcptr() & ~TAM_BIT) |
  37. ((value << TAM_SHIFT) & TAM_BIT));
  38. }
  39. static inline __unused void write_amcr_cg1rz(uint32_t value)
  40. {
  41. write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
  42. ((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
  43. }
  44. static inline __unused uint32_t read_amcfgr_ncg(void)
  45. {
  46. return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
  47. AMCFGR_NCG_MASK;
  48. }
  49. static inline __unused uint32_t read_amcgcr_cg0nc(void)
  50. {
  51. return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
  52. AMCGCR_CG0NC_MASK;
  53. }
  54. static inline __unused uint32_t read_amcgcr_cg1nc(void)
  55. {
  56. return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
  57. AMCGCR_CG1NC_MASK;
  58. }
  59. static inline __unused uint32_t read_amcntenset0_px(void)
  60. {
  61. return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
  62. AMCNTENSET0_Pn_MASK;
  63. }
  64. static inline __unused uint32_t read_amcntenset1_px(void)
  65. {
  66. return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
  67. AMCNTENSET1_Pn_MASK;
  68. }
  69. static inline __unused void write_amcntenset0_px(uint32_t px)
  70. {
  71. uint32_t value = read_amcntenset0();
  72. value &= ~AMCNTENSET0_Pn_MASK;
  73. value |= (px << AMCNTENSET0_Pn_SHIFT) &
  74. AMCNTENSET0_Pn_MASK;
  75. write_amcntenset0(value);
  76. }
  77. static inline __unused void write_amcntenset1_px(uint32_t px)
  78. {
  79. uint32_t value = read_amcntenset1();
  80. value &= ~AMCNTENSET1_Pn_MASK;
  81. value |= (px << AMCNTENSET1_Pn_SHIFT) &
  82. AMCNTENSET1_Pn_MASK;
  83. write_amcntenset1(value);
  84. }
  85. static inline __unused void write_amcntenclr0_px(uint32_t px)
  86. {
  87. uint32_t value = read_amcntenclr0();
  88. value &= ~AMCNTENCLR0_Pn_MASK;
  89. value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
  90. write_amcntenclr0(value);
  91. }
  92. static inline __unused void write_amcntenclr1_px(uint32_t px)
  93. {
  94. uint32_t value = read_amcntenclr1();
  95. value &= ~AMCNTENCLR1_Pn_MASK;
  96. value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
  97. write_amcntenclr1(value);
  98. }
  99. #if ENABLE_AMU_AUXILIARY_COUNTERS
  100. static __unused bool amu_group1_supported(void)
  101. {
  102. return read_amcfgr_ncg() > 0U;
  103. }
  104. #endif
  105. /*
  106. * Enable counters. This function is meant to be invoked by the context
  107. * management library before exiting from EL3.
  108. */
  109. void amu_enable(bool el2_unused)
  110. {
  111. uint32_t amcfgr_ncg; /* Number of counter groups */
  112. uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
  113. uint32_t amcntenset0_px = 0x0; /* Group 0 enable mask */
  114. uint32_t amcntenset1_px = 0x0; /* Group 1 enable mask */
  115. if (el2_unused) {
  116. /*
  117. * HCPTR.TAM: Set to zero so any accesses to the Activity
  118. * Monitor registers do not trap to EL2.
  119. */
  120. write_hcptr_tam(0U);
  121. }
  122. /*
  123. * Retrieve the number of architected counters. All of these counters
  124. * are enabled by default.
  125. */
  126. amcgcr_cg0nc = read_amcgcr_cg0nc();
  127. amcntenset0_px = (UINT32_C(1) << (amcgcr_cg0nc)) - 1U;
  128. assert(amcgcr_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
  129. /*
  130. * The platform may opt to enable specific auxiliary counters. This can
  131. * be done via the common FCONF getter, or via the platform-implemented
  132. * function.
  133. */
  134. #if ENABLE_AMU_AUXILIARY_COUNTERS
  135. const struct amu_topology *topology;
  136. #if ENABLE_AMU_FCONF
  137. topology = FCONF_GET_PROPERTY(amu, config, topology);
  138. #else
  139. topology = plat_amu_topology();
  140. #endif /* ENABLE_AMU_FCONF */
  141. if (topology != NULL) {
  142. unsigned int core_pos = plat_my_core_pos();
  143. amcntenset1_el0_px = topology->cores[core_pos].enable;
  144. } else {
  145. ERROR("AMU: failed to generate AMU topology\n");
  146. }
  147. #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
  148. /*
  149. * Enable the requested counters.
  150. */
  151. write_amcntenset0_px(amcntenset0_px);
  152. amcfgr_ncg = read_amcfgr_ncg();
  153. if (amcfgr_ncg > 0U) {
  154. write_amcntenset1_px(amcntenset1_px);
  155. #if !ENABLE_AMU_AUXILIARY_COUNTERS
  156. VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
  157. #endif
  158. }
  159. /* Bail out if FEAT_AMUv1p1 features are not present. */
  160. if (!is_feat_amuv1p1_supported()) {
  161. return;
  162. }
  163. #if AMU_RESTRICT_COUNTERS
  164. /*
  165. * FEAT_AMUv1p1 adds a register field to restrict access to group 1
  166. * counters at all but the highest implemented EL. This is controlled
  167. * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
  168. * register reads at lower ELs return zero. Reads from the memory
  169. * mapped view are unaffected.
  170. */
  171. VERBOSE("AMU group 1 counter access restricted.\n");
  172. write_amcr_cg1rz(1U);
  173. #else
  174. write_amcr_cg1rz(0U);
  175. #endif
  176. }
  177. /* Read the group 0 counter identified by the given `idx`. */
  178. static uint64_t amu_group0_cnt_read(unsigned int idx)
  179. {
  180. assert(is_feat_amu_supported());
  181. assert(idx < read_amcgcr_cg0nc());
  182. return amu_group0_cnt_read_internal(idx);
  183. }
  184. /* Write the group 0 counter identified by the given `idx` with `val` */
  185. static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
  186. {
  187. assert(is_feat_amu_supported());
  188. assert(idx < read_amcgcr_cg0nc());
  189. amu_group0_cnt_write_internal(idx, val);
  190. isb();
  191. }
  192. #if ENABLE_AMU_AUXILIARY_COUNTERS
  193. /* Read the group 1 counter identified by the given `idx` */
  194. static uint64_t amu_group1_cnt_read(unsigned int idx)
  195. {
  196. assert(is_feat_amu_supported());
  197. assert(amu_group1_supported());
  198. assert(idx < read_amcgcr_cg1nc());
  199. return amu_group1_cnt_read_internal(idx);
  200. }
  201. /* Write the group 1 counter identified by the given `idx` with `val` */
  202. static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
  203. {
  204. assert(is_feat_amu_supported());
  205. assert(amu_group1_supported());
  206. assert(idx < read_amcgcr_cg1nc());
  207. amu_group1_cnt_write_internal(idx, val);
  208. isb();
  209. }
  210. #endif
  211. static void *amu_context_save(const void *arg)
  212. {
  213. uint32_t i;
  214. unsigned int core_pos;
  215. struct amu_ctx *ctx;
  216. uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
  217. #if ENABLE_AMU_AUXILIARY_COUNTERS
  218. uint32_t amcfgr_ncg; /* Number of counter groups */
  219. uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
  220. #endif
  221. if (!is_feat_amu_supported()) {
  222. return (void *)0;
  223. }
  224. core_pos = plat_my_core_pos();
  225. ctx = &amu_ctxs_[core_pos];
  226. amcgcr_cg0nc = read_amcgcr_cg0nc();
  227. #if ENABLE_AMU_AUXILIARY_COUNTERS
  228. amcfgr_ncg = read_amcfgr_ncg();
  229. amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
  230. #endif
  231. /*
  232. * Disable all AMU counters.
  233. */
  234. ctx->group0_enable = read_amcntenset0_px();
  235. write_amcntenclr0_px(ctx->group0_enable);
  236. #if ENABLE_AMU_AUXILIARY_COUNTERS
  237. if (amcfgr_ncg > 0U) {
  238. ctx->group1_enable = read_amcntenset1_px();
  239. write_amcntenclr1_px(ctx->group1_enable);
  240. }
  241. #endif
  242. /*
  243. * Save the counters to the local context.
  244. */
  245. isb(); /* Ensure counters have been stopped */
  246. for (i = 0U; i < amcgcr_cg0nc; i++) {
  247. ctx->group0_cnts[i] = amu_group0_cnt_read(i);
  248. }
  249. #if ENABLE_AMU_AUXILIARY_COUNTERS
  250. for (i = 0U; i < amcgcr_cg1nc; i++) {
  251. ctx->group1_cnts[i] = amu_group1_cnt_read(i);
  252. }
  253. #endif
  254. return (void *)0;
  255. }
  256. static void *amu_context_restore(const void *arg)
  257. {
  258. uint32_t i;
  259. unsigned int core_pos;
  260. struct amu_ctx *ctx;
  261. uint32_t amcfgr_ncg; /* Number of counter groups */
  262. uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
  263. #if ENABLE_AMU_AUXILIARY_COUNTERS
  264. uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
  265. #endif
  266. if (!is_feat_amu_supported()) {
  267. return (void *)0;
  268. }
  269. core_pos = plat_my_core_pos();
  270. ctx = &amu_ctxs_[core_pos];
  271. amcfgr_ncg = read_amcfgr_ncg();
  272. amcgcr_cg0nc = read_amcgcr_cg0nc();
  273. #if ENABLE_AMU_AUXILIARY_COUNTERS
  274. amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
  275. #endif
  276. /*
  277. * Sanity check that all counters were disabled when the context was
  278. * previously saved.
  279. */
  280. assert(read_amcntenset0_px() == 0U);
  281. if (amcfgr_ncg > 0U) {
  282. assert(read_amcntenset1_px() == 0U);
  283. }
  284. /*
  285. * Restore the counter values from the local context.
  286. */
  287. for (i = 0U; i < amcgcr_cg0nc; i++) {
  288. amu_group0_cnt_write(i, ctx->group0_cnts[i]);
  289. }
  290. #if ENABLE_AMU_AUXILIARY_COUNTERS
  291. for (i = 0U; i < amcgcr_cg1nc; i++) {
  292. amu_group1_cnt_write(i, ctx->group1_cnts[i]);
  293. }
  294. #endif
  295. /*
  296. * Re-enable counters that were disabled during context save.
  297. */
  298. write_amcntenset0_px(ctx->group0_enable);
  299. #if ENABLE_AMU_AUXILIARY_COUNTERS
  300. if (amcfgr_ncg > 0U) {
  301. write_amcntenset1_px(ctx->group1_enable);
  302. }
  303. #endif
  304. return (void *)0;
  305. }
  306. SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
  307. SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);