mt_cirq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * Copyright (c) 2020-2022, MediaTek Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch_helpers.h>
  7. #include <common/debug.h>
  8. #include <drivers/arm/gic_common.h>
  9. #include <lib/mmio.h>
  10. #include <mt_cirq.h>
  11. #include <mt_gic_v3.h>
  12. static struct cirq_events cirq_all_events = {
  13. .spi_start = CIRQ_SPI_START,
  14. };
  15. static uint32_t already_cloned;
  16. /*
  17. * mt_irq_mask_restore: restore all interrupts
  18. * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
  19. * Return 0 for success; return negative values for failure.
  20. * (This is ONLY used for the idle current measurement by the factory mode.)
  21. */
  22. int mt_irq_mask_restore(struct mtk_irq_mask *mask)
  23. {
  24. if (mask == NULL) {
  25. return -1;
  26. }
  27. if (mask->header != IRQ_MASK_HEADER) {
  28. return -1;
  29. }
  30. if (mask->footer != IRQ_MASK_FOOTER) {
  31. return -1;
  32. }
  33. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x4),
  34. mask->mask1);
  35. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x8),
  36. mask->mask2);
  37. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0xc),
  38. mask->mask3);
  39. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x10),
  40. mask->mask4);
  41. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x14),
  42. mask->mask5);
  43. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x18),
  44. mask->mask6);
  45. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x1c),
  46. mask->mask7);
  47. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x20),
  48. mask->mask8);
  49. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x24),
  50. mask->mask9);
  51. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x28),
  52. mask->mask10);
  53. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x2c),
  54. mask->mask11);
  55. mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x30),
  56. mask->mask12);
  57. /* make sure dist changes happen */
  58. dsb();
  59. return 0;
  60. }
  61. /*
  62. * mt_irq_mask_all: disable all interrupts
  63. * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
  64. * Return 0 for success; return negative values for failure.
  65. * (This is ONLY used for the idle current measurement by the factory mode.)
  66. */
  67. int mt_irq_mask_all(struct mtk_irq_mask *mask)
  68. {
  69. if (mask != NULL) {
  70. /* for SPI */
  71. mask->mask1 = mmio_read_32((BASE_GICD_BASE +
  72. GICD_ISENABLER + 0x4));
  73. mask->mask2 = mmio_read_32((BASE_GICD_BASE +
  74. GICD_ISENABLER + 0x8));
  75. mask->mask3 = mmio_read_32((BASE_GICD_BASE +
  76. GICD_ISENABLER + 0xc));
  77. mask->mask4 = mmio_read_32((BASE_GICD_BASE +
  78. GICD_ISENABLER + 0x10));
  79. mask->mask5 = mmio_read_32((BASE_GICD_BASE +
  80. GICD_ISENABLER + 0x14));
  81. mask->mask6 = mmio_read_32((BASE_GICD_BASE +
  82. GICD_ISENABLER + 0x18));
  83. mask->mask7 = mmio_read_32((BASE_GICD_BASE +
  84. GICD_ISENABLER + 0x1c));
  85. mask->mask8 = mmio_read_32((BASE_GICD_BASE +
  86. GICD_ISENABLER + 0x20));
  87. mask->mask9 = mmio_read_32((BASE_GICD_BASE +
  88. GICD_ISENABLER + 0x24));
  89. mask->mask10 = mmio_read_32((BASE_GICD_BASE +
  90. GICD_ISENABLER + 0x28));
  91. mask->mask11 = mmio_read_32((BASE_GICD_BASE +
  92. GICD_ISENABLER + 0x2c));
  93. mask->mask12 = mmio_read_32((BASE_GICD_BASE +
  94. GICD_ISENABLER + 0x30));
  95. /* for SPI */
  96. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x4),
  97. 0xFFFFFFFF);
  98. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x8),
  99. 0xFFFFFFFF);
  100. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0xC),
  101. 0xFFFFFFFF);
  102. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x10),
  103. 0xFFFFFFFF);
  104. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x14),
  105. 0xFFFFFFFF);
  106. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x18),
  107. 0xFFFFFFFF);
  108. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x1C),
  109. 0xFFFFFFFF);
  110. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x20),
  111. 0xFFFFFFFF);
  112. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x24),
  113. 0xFFFFFFFF);
  114. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x28),
  115. 0xFFFFFFFF);
  116. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x2c),
  117. 0xFFFFFFFF);
  118. mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x30),
  119. 0xFFFFFFFF);
  120. /* make sure distributor changes happen */
  121. dsb();
  122. mask->header = IRQ_MASK_HEADER;
  123. mask->footer = IRQ_MASK_FOOTER;
  124. return 0;
  125. } else {
  126. return -1;
  127. }
  128. }
  129. static uint32_t mt_irq_get_pol(uint32_t irq)
  130. {
  131. #ifdef CIRQ_WITH_POLARITY
  132. uint32_t reg;
  133. uint32_t base = INT_POL_CTL0;
  134. if (irq < 32U) {
  135. return 0;
  136. }
  137. reg = ((irq - 32U) / 32U);
  138. return mmio_read_32(base + reg * 4U);
  139. #else
  140. return 0;
  141. #endif
  142. }
  143. unsigned int mt_irq_get_sens(unsigned int irq)
  144. {
  145. unsigned int config;
  146. /*
  147. * 2'b10 edge
  148. * 2'b01 level
  149. */
  150. config = mmio_read_32(MT_GIC_BASE + GICD_ICFGR + (irq / 16U) * 4U);
  151. config = (config >> (irq % 16U) * 2U) & 0x3;
  152. return config;
  153. }
  154. static void collect_all_wakeup_events(void)
  155. {
  156. unsigned int i;
  157. uint32_t gic_irq;
  158. uint32_t cirq;
  159. uint32_t cirq_reg;
  160. uint32_t cirq_offset;
  161. uint32_t mask;
  162. uint32_t pol_mask;
  163. uint32_t irq_offset;
  164. uint32_t irq_mask;
  165. if ((cirq_all_events.wakeup_events == NULL) ||
  166. cirq_all_events.num_of_events == 0U) {
  167. return;
  168. }
  169. for (i = 0U; i < cirq_all_events.num_of_events; i++) {
  170. if (cirq_all_events.wakeup_events[i] > 0U) {
  171. gic_irq = cirq_all_events.wakeup_events[i];
  172. cirq = gic_irq - cirq_all_events.spi_start - 32U;
  173. cirq_reg = cirq / 32U;
  174. cirq_offset = cirq % 32U;
  175. mask = 0x1 << cirq_offset;
  176. irq_offset = gic_irq % 32U;
  177. irq_mask = 0x1 << irq_offset;
  178. /*
  179. * CIRQ default masks all
  180. */
  181. cirq_all_events.table[cirq_reg].mask |= mask;
  182. /*
  183. * CIRQ default pol is low
  184. */
  185. pol_mask = mt_irq_get_pol(
  186. cirq_all_events.wakeup_events[i])
  187. & irq_mask;
  188. /*
  189. * 0 means rising
  190. */
  191. if (pol_mask == 0U) {
  192. cirq_all_events.table[cirq_reg].pol |= mask;
  193. }
  194. /*
  195. * CIRQ could monitor edge/level trigger
  196. * cirq register (0: edge, 1: level)
  197. */
  198. if (mt_irq_get_sens(cirq_all_events.wakeup_events[i])
  199. == SENS_EDGE) {
  200. cirq_all_events.table[cirq_reg].sen |= mask;
  201. }
  202. cirq_all_events.table[cirq_reg].used = 1U;
  203. cirq_all_events.table[cirq_reg].reg_num = cirq_reg;
  204. }
  205. }
  206. }
  207. /*
  208. * mt_cirq_set_pol: Set the polarity for the specified SYS_CIRQ number.
  209. * @cirq_num: the SYS_CIRQ number to set
  210. * @pol: polarity to set
  211. * @return:
  212. * 0: set pol success
  213. * -1: cirq num is out of range
  214. */
  215. #ifdef CIRQ_WITH_POLARITY
  216. static int mt_cirq_set_pol(uint32_t cirq_num, uint32_t pol)
  217. {
  218. uint32_t base;
  219. uint32_t bit = 1U << (cirq_num % 32U);
  220. if (cirq_num >= CIRQ_IRQ_NUM) {
  221. return -1;
  222. }
  223. if (pol == MT_CIRQ_POL_NEG) {
  224. base = (cirq_num / 32U) * 4U + CIRQ_POL_CLR_BASE;
  225. } else if (pol == MT_CIRQ_POL_POS) {
  226. base = (cirq_num / 32U) * 4U + CIRQ_POL_SET_BASE;
  227. } else {
  228. return -1;
  229. }
  230. mmio_write_32(base, bit);
  231. return 0;
  232. }
  233. #endif
  234. /*
  235. * mt_cirq_mask: Mask the specified SYS_CIRQ.
  236. * @cirq_num: the SYS_CIRQ number to mask
  237. * @return:
  238. * 0: mask success
  239. * -1: cirq num is out of range
  240. */
  241. static int mt_cirq_mask(uint32_t cirq_num)
  242. {
  243. uint32_t bit = 1U << (cirq_num % 32U);
  244. if (cirq_num >= CIRQ_IRQ_NUM) {
  245. return -1;
  246. }
  247. mmio_write_32((cirq_num / 32U) * 4U + CIRQ_MASK_SET_BASE, bit);
  248. return 0;
  249. }
  250. /*
  251. * mt_cirq_unmask: Unmask the specified SYS_CIRQ.
  252. * @cirq_num: the SYS_CIRQ number to unmask
  253. * @return:
  254. * 0: umask success
  255. * -1: cirq num is out of range
  256. */
  257. static int mt_cirq_unmask(uint32_t cirq_num)
  258. {
  259. uint32_t bit = 1U << (cirq_num % 32U);
  260. if (cirq_num >= CIRQ_IRQ_NUM) {
  261. return -1;
  262. }
  263. mmio_write_32((cirq_num / 32U) * 4U + CIRQ_MASK_CLR_BASE, bit);
  264. return 0;
  265. }
  266. uint32_t mt_irq_get_en(uint32_t irq)
  267. {
  268. uint32_t addr, st, val;
  269. addr = BASE_GICD_BASE + GICD_ISENABLER + (irq / 32U) * 4U;
  270. st = mmio_read_32(addr);
  271. val = (st >> (irq % 32U)) & 1U;
  272. return val;
  273. }
  274. static void __cirq_fast_clone(void)
  275. {
  276. struct cirq_reg *reg;
  277. unsigned int i;
  278. for (i = 0U; i < CIRQ_REG_NUM ; ++i) {
  279. uint32_t cirq_bit;
  280. reg = &cirq_all_events.table[i];
  281. if (reg->used == 0U) {
  282. continue;
  283. }
  284. mmio_write_32(CIRQ_SENS_CLR_BASE + (reg->reg_num * 4U),
  285. reg->sen);
  286. for (cirq_bit = 0U; cirq_bit < 32U; ++cirq_bit) {
  287. uint32_t val, cirq_id;
  288. uint32_t gic_id;
  289. #ifdef CIRQ_WITH_POLARITY
  290. uint32_t gic_bit, pol;
  291. #endif
  292. uint32_t en;
  293. val = ((1U << cirq_bit) & reg->mask);
  294. if (val == 0U) {
  295. continue;
  296. }
  297. cirq_id = (reg->reg_num << 5U) + cirq_bit;
  298. gic_id = CIRQ_TO_IRQ_NUM(cirq_id);
  299. #ifdef CIRQ_WITH_POLARITY
  300. gic_bit = (0x1U << ((gic_id - 32U) % 32U));
  301. pol = mt_irq_get_pol(gic_id) & gic_bit;
  302. if (pol != 0U) {
  303. mt_cirq_set_pol(cirq_id, MT_CIRQ_POL_NEG);
  304. } else {
  305. mt_cirq_set_pol(cirq_id, MT_CIRQ_POL_POS);
  306. }
  307. #endif
  308. en = mt_irq_get_en(gic_id);
  309. if (en == 1U) {
  310. mt_cirq_unmask(cirq_id);
  311. } else {
  312. mt_cirq_mask(cirq_id);
  313. }
  314. }
  315. }
  316. }
  317. static void cirq_fast_clone(void)
  318. {
  319. if (already_cloned == 0U) {
  320. collect_all_wakeup_events();
  321. already_cloned = 1U;
  322. }
  323. __cirq_fast_clone();
  324. }
  325. void set_wakeup_sources(uint32_t *list, uint32_t num_of_events)
  326. {
  327. cirq_all_events.num_of_events = num_of_events;
  328. cirq_all_events.wakeup_events = list;
  329. }
  330. /*
  331. * mt_cirq_clone_gic: Copy the setting from GIC to SYS_CIRQ
  332. */
  333. void mt_cirq_clone_gic(void)
  334. {
  335. cirq_fast_clone();
  336. }
  337. uint32_t mt_irq_get_pending_vec(uint32_t start_irq)
  338. {
  339. uint32_t base = 0U;
  340. uint32_t pending_vec = 0U;
  341. uint32_t reg = start_irq / 32U;
  342. uint32_t LSB_num, MSB_num;
  343. uint32_t LSB_vec, MSB_vec;
  344. base = BASE_GICD_BASE;
  345. /* if start_irq is not aligned 32, do some assembling */
  346. MSB_num = start_irq % 32U;
  347. if (MSB_num != 0U) {
  348. LSB_num = 32U - MSB_num;
  349. LSB_vec = mmio_read_32(base + GICD_ISPENDR +
  350. reg * 4U) >> MSB_num;
  351. MSB_vec = mmio_read_32(base + GICD_ISPENDR +
  352. (reg + 1U) * 4U) << LSB_num;
  353. pending_vec = MSB_vec | LSB_vec;
  354. } else {
  355. pending_vec = mmio_read_32(base + GICD_ISPENDR + reg * 4);
  356. }
  357. return pending_vec;
  358. }
  359. static int mt_cirq_get_mask_vec(unsigned int i)
  360. {
  361. return mmio_read_32((i * 4U) + CIRQ_MASK_BASE);
  362. }
  363. /*
  364. * mt_cirq_ack_all: Ack all the interrupt on SYS_CIRQ
  365. */
  366. void mt_cirq_ack_all(void)
  367. {
  368. uint32_t ack_vec, pend_vec, mask_vec;
  369. unsigned int i;
  370. for (i = 0; i < CIRQ_CTRL_REG_NUM; i++) {
  371. /*
  372. * if a irq is pending & not masked, don't ack it
  373. * , since cirq start irq might not be 32 aligned with gic,
  374. * need an exotic API to get proper vector of pending irq
  375. */
  376. pend_vec = mt_irq_get_pending_vec(CIRQ_SPI_START
  377. + (i + 1U) * 32U);
  378. mask_vec = mt_cirq_get_mask_vec(i);
  379. /* those should be acked are: "not (pending & not masked)",
  380. */
  381. ack_vec = (~pend_vec) | mask_vec;
  382. mmio_write_32(CIRQ_ACK_BASE + (i * 4U), ack_vec);
  383. }
  384. /*
  385. * make sure all cirq setting take effect
  386. * before doing other things
  387. */
  388. dsb();
  389. }
  390. /*
  391. * mt_cirq_enable: Enable SYS_CIRQ
  392. */
  393. void mt_cirq_enable(void)
  394. {
  395. uint32_t st;
  396. /* level only */
  397. mt_cirq_ack_all();
  398. st = mmio_read_32(CIRQ_CON);
  399. /*
  400. * CIRQ could monitor edge/level trigger
  401. */
  402. st |= (CIRQ_CON_EN << CIRQ_CON_EN_BITS);
  403. mmio_write_32(CIRQ_CON, (st & CIRQ_CON_BITS_MASK));
  404. }
  405. /*
  406. * mt_cirq_disable: Disable SYS_CIRQ
  407. */
  408. void mt_cirq_disable(void)
  409. {
  410. uint32_t st;
  411. st = mmio_read_32(CIRQ_CON);
  412. st &= ~(CIRQ_CON_EN << CIRQ_CON_EN_BITS);
  413. mmio_write_32(CIRQ_CON, (st & CIRQ_CON_BITS_MASK));
  414. }
  415. void mt_irq_unmask_for_sleep_ex(uint32_t irq)
  416. {
  417. uint32_t mask;
  418. mask = 1U << (irq % 32U);
  419. mmio_write_32(BASE_GICD_BASE + GICD_ISENABLER +
  420. ((irq / 32U) * 4U), mask);
  421. }
  422. void mt_cirq_mask_all(void)
  423. {
  424. unsigned int i;
  425. for (i = 0U; i < CIRQ_CTRL_REG_NUM; i++) {
  426. mmio_write_32(CIRQ_MASK_SET_BASE + (i * 4U), 0xFFFFFFFF);
  427. }
  428. dsb();
  429. }
  430. static void cirq_fast_sw_flush(void)
  431. {
  432. struct cirq_reg *reg;
  433. unsigned int i;
  434. for (i = 0U; i < CIRQ_REG_NUM ; ++i) {
  435. uint32_t cirq_bit;
  436. reg = &cirq_all_events.table[i];
  437. if (reg->used == 0U) {
  438. continue;
  439. }
  440. reg->pending = mmio_read_32(CIRQ_STA_BASE +
  441. (reg->reg_num << 2U));
  442. reg->pending &= reg->mask;
  443. for (cirq_bit = 0U; cirq_bit < 32U; ++cirq_bit) {
  444. uint32_t val, cirq_id;
  445. val = (1U << cirq_bit) & reg->pending;
  446. if (val == 0U) {
  447. continue;
  448. }
  449. cirq_id = (reg->reg_num << 5U) + cirq_bit;
  450. mt_irq_set_pending(CIRQ_TO_IRQ_NUM(cirq_id));
  451. if (CIRQ_TO_IRQ_NUM(cirq_id) == MD_WDT_IRQ_BIT_ID) {
  452. INFO("Set MD_WDT_IRQ pending in %s\n",
  453. __func__);
  454. }
  455. }
  456. }
  457. }
  458. /*
  459. * mt_cirq_disable: Flush interrupt from SYS_CIRQ to GIC
  460. */
  461. void mt_cirq_flush(void)
  462. {
  463. cirq_fast_sw_flush();
  464. mt_cirq_mask_all();
  465. mt_cirq_ack_all();
  466. }
  467. void mt_cirq_sw_reset(void)
  468. {
  469. uint32_t st;
  470. st = mmio_read_32(CIRQ_CON);
  471. st |= (CIRQ_SW_RESET << CIRQ_CON_SW_RST_BITS);
  472. mmio_write_32(CIRQ_CON, st);
  473. }