clk-stm32-core.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include "clk-stm32-core.h"
  9. #include <common/debug.h>
  10. #include <common/fdt_wrappers.h>
  11. #include <drivers/clk.h>
  12. #include <drivers/delay_timer.h>
  13. #include <drivers/st/stm32mp_clkfunc.h>
  14. #include <lib/mmio.h>
  15. #include <lib/spinlock.h>
  16. static struct spinlock reg_lock;
  17. static struct spinlock refcount_lock;
  18. static struct stm32_clk_priv *stm32_clock_data;
  19. const struct stm32_clk_ops clk_mux_ops;
  20. struct stm32_clk_priv *clk_stm32_get_priv(void)
  21. {
  22. return stm32_clock_data;
  23. }
  24. static void stm32mp1_clk_lock(struct spinlock *lock)
  25. {
  26. if (stm32mp_lock_available()) {
  27. /* Assume interrupts are masked */
  28. spin_lock(lock);
  29. }
  30. }
  31. static void stm32mp1_clk_unlock(struct spinlock *lock)
  32. {
  33. if (stm32mp_lock_available()) {
  34. spin_unlock(lock);
  35. }
  36. }
  37. void stm32mp1_clk_rcc_regs_lock(void)
  38. {
  39. stm32mp1_clk_lock(&reg_lock);
  40. }
  41. void stm32mp1_clk_rcc_regs_unlock(void)
  42. {
  43. stm32mp1_clk_unlock(&reg_lock);
  44. }
  45. #define TIMEOUT_US_1S U(1000000)
  46. #define OSCRDY_TIMEOUT TIMEOUT_US_1S
  47. struct clk_oscillator_data *clk_oscillator_get_data(struct stm32_clk_priv *priv, int id)
  48. {
  49. const struct clk_stm32 *clk = _clk_get(priv, id);
  50. struct stm32_osc_cfg *osc_cfg = clk->clock_cfg;
  51. int osc_id = osc_cfg->osc_id;
  52. return &priv->osci_data[osc_id];
  53. }
  54. void clk_oscillator_set_bypass(struct stm32_clk_priv *priv, int id, bool digbyp, bool bypass)
  55. {
  56. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  57. struct stm32_clk_bypass *bypass_data = osc_data->bypass;
  58. uintptr_t address;
  59. if (bypass_data == NULL) {
  60. return;
  61. }
  62. address = priv->base + bypass_data->offset;
  63. if (digbyp) {
  64. mmio_setbits_32(address, BIT(bypass_data->bit_digbyp));
  65. }
  66. if (bypass || digbyp) {
  67. mmio_setbits_32(address, BIT(bypass_data->bit_byp));
  68. }
  69. }
  70. void clk_oscillator_set_css(struct stm32_clk_priv *priv, int id, bool css)
  71. {
  72. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  73. struct stm32_clk_css *css_data = osc_data->css;
  74. uintptr_t address;
  75. if (css_data == NULL) {
  76. return;
  77. }
  78. address = priv->base + css_data->offset;
  79. if (css) {
  80. mmio_setbits_32(address, BIT(css_data->bit_css));
  81. }
  82. }
  83. void clk_oscillator_set_drive(struct stm32_clk_priv *priv, int id, uint8_t lsedrv)
  84. {
  85. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  86. struct stm32_clk_drive *drive_data = osc_data->drive;
  87. uintptr_t address;
  88. uint32_t mask;
  89. uint32_t value;
  90. if (drive_data == NULL) {
  91. return;
  92. }
  93. address = priv->base + drive_data->offset;
  94. mask = (BIT(drive_data->drv_width) - 1U) << drive_data->drv_shift;
  95. /*
  96. * Warning: not recommended to switch directly from "high drive"
  97. * to "medium low drive", and vice-versa.
  98. */
  99. value = (mmio_read_32(address) & mask) >> drive_data->drv_shift;
  100. while (value != lsedrv) {
  101. if (value > lsedrv) {
  102. value--;
  103. } else {
  104. value++;
  105. }
  106. mmio_clrsetbits_32(address, mask, value << drive_data->drv_shift);
  107. }
  108. }
  109. int clk_oscillator_wait_ready(struct stm32_clk_priv *priv, int id, bool ready_on)
  110. {
  111. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  112. return _clk_stm32_gate_wait_ready(priv, osc_data->gate_rdy_id, ready_on);
  113. }
  114. int clk_oscillator_wait_ready_on(struct stm32_clk_priv *priv, int id)
  115. {
  116. return clk_oscillator_wait_ready(priv, id, true);
  117. }
  118. int clk_oscillator_wait_ready_off(struct stm32_clk_priv *priv, int id)
  119. {
  120. return clk_oscillator_wait_ready(priv, id, false);
  121. }
  122. static int clk_gate_enable(struct stm32_clk_priv *priv, int id)
  123. {
  124. const struct clk_stm32 *clk = _clk_get(priv, id);
  125. struct clk_gate_cfg *cfg = clk->clock_cfg;
  126. mmio_setbits_32(priv->base + cfg->offset, BIT(cfg->bit_idx));
  127. return 0;
  128. }
  129. static void clk_gate_disable(struct stm32_clk_priv *priv, int id)
  130. {
  131. const struct clk_stm32 *clk = _clk_get(priv, id);
  132. struct clk_gate_cfg *cfg = clk->clock_cfg;
  133. mmio_clrbits_32(priv->base + cfg->offset, BIT(cfg->bit_idx));
  134. }
  135. static bool clk_gate_is_enabled(struct stm32_clk_priv *priv, int id)
  136. {
  137. const struct clk_stm32 *clk = _clk_get(priv, id);
  138. struct clk_gate_cfg *cfg = clk->clock_cfg;
  139. return ((mmio_read_32(priv->base + cfg->offset) & BIT(cfg->bit_idx)) != 0U);
  140. }
  141. const struct stm32_clk_ops clk_gate_ops = {
  142. .enable = clk_gate_enable,
  143. .disable = clk_gate_disable,
  144. .is_enabled = clk_gate_is_enabled,
  145. };
  146. void _clk_stm32_gate_disable(struct stm32_clk_priv *priv, uint16_t gate_id)
  147. {
  148. const struct gate_cfg *gate = &priv->gates[gate_id];
  149. uintptr_t addr = priv->base + gate->offset;
  150. if (gate->set_clr != 0U) {
  151. mmio_write_32(addr + RCC_MP_ENCLRR_OFFSET, BIT(gate->bit_idx));
  152. } else {
  153. mmio_clrbits_32(addr, BIT(gate->bit_idx));
  154. }
  155. }
  156. int _clk_stm32_gate_enable(struct stm32_clk_priv *priv, uint16_t gate_id)
  157. {
  158. const struct gate_cfg *gate = &priv->gates[gate_id];
  159. uintptr_t addr = priv->base + gate->offset;
  160. if (gate->set_clr != 0U) {
  161. mmio_write_32(addr, BIT(gate->bit_idx));
  162. } else {
  163. mmio_setbits_32(addr, BIT(gate->bit_idx));
  164. }
  165. return 0;
  166. }
  167. const struct clk_stm32 *_clk_get(struct stm32_clk_priv *priv, int id)
  168. {
  169. if ((unsigned int)id < priv->num) {
  170. return &priv->clks[id];
  171. }
  172. return NULL;
  173. }
  174. #define clk_div_mask(_width) GENMASK(((_width) - 1U), 0U)
  175. static unsigned int _get_table_div(const struct clk_div_table *table,
  176. unsigned int val)
  177. {
  178. const struct clk_div_table *clkt;
  179. for (clkt = table; clkt->div; clkt++) {
  180. if (clkt->val == val) {
  181. return clkt->div;
  182. }
  183. }
  184. return 0;
  185. }
  186. static unsigned int _get_div(const struct clk_div_table *table,
  187. unsigned int val, unsigned long flags,
  188. uint8_t width)
  189. {
  190. if ((flags & CLK_DIVIDER_ONE_BASED) != 0UL) {
  191. return val;
  192. }
  193. if ((flags & CLK_DIVIDER_POWER_OF_TWO) != 0UL) {
  194. return BIT(val);
  195. }
  196. if ((flags & CLK_DIVIDER_MAX_AT_ZERO) != 0UL) {
  197. return (val != 0U) ? val : BIT(width);
  198. }
  199. if (table != NULL) {
  200. return _get_table_div(table, val);
  201. }
  202. return val + 1U;
  203. }
  204. #define TIMEOUT_US_200MS U(200000)
  205. #define CLKSRC_TIMEOUT TIMEOUT_US_200MS
  206. int clk_mux_set_parent(struct stm32_clk_priv *priv, uint16_t pid, uint8_t sel)
  207. {
  208. const struct parent_cfg *parents = &priv->parents[pid & MUX_PARENT_MASK];
  209. const struct mux_cfg *mux = parents->mux;
  210. uintptr_t address = priv->base + mux->offset;
  211. uint32_t mask;
  212. uint64_t timeout;
  213. mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
  214. mmio_clrsetbits_32(address, mask, (sel << mux->shift) & mask);
  215. if (mux->bitrdy == MUX_NO_BIT_RDY) {
  216. return 0;
  217. }
  218. timeout = timeout_init_us(CLKSRC_TIMEOUT);
  219. mask = BIT(mux->bitrdy);
  220. while ((mmio_read_32(address) & mask) == 0U) {
  221. if (timeout_elapsed(timeout)) {
  222. return -ETIMEDOUT;
  223. }
  224. }
  225. return 0;
  226. }
  227. int _clk_stm32_set_parent(struct stm32_clk_priv *priv, int clk, int clkp)
  228. {
  229. const struct parent_cfg *parents;
  230. uint16_t pid;
  231. uint8_t sel;
  232. int old_parent;
  233. pid = priv->clks[clk].parent;
  234. if ((pid == CLK_IS_ROOT) || (pid < MUX_MAX_PARENTS)) {
  235. return -EINVAL;
  236. }
  237. old_parent = _clk_stm32_get_parent(priv, clk);
  238. if (old_parent < 0) {
  239. return old_parent;
  240. }
  241. if (old_parent == clkp) {
  242. return 0;
  243. }
  244. parents = &priv->parents[pid & MUX_PARENT_MASK];
  245. for (sel = 0; sel < parents->num_parents; sel++) {
  246. if (parents->id_parents[sel] == (uint16_t)clkp) {
  247. bool clk_was_enabled = _clk_stm32_is_enabled(priv, clk);
  248. int err = 0;
  249. /* Enable the parents (for glitch free mux) */
  250. _clk_stm32_enable(priv, clkp);
  251. _clk_stm32_enable(priv, old_parent);
  252. err = clk_mux_set_parent(priv, pid, sel);
  253. _clk_stm32_disable(priv, old_parent);
  254. if (clk_was_enabled) {
  255. _clk_stm32_disable(priv, old_parent);
  256. } else {
  257. _clk_stm32_disable(priv, clkp);
  258. }
  259. return err;
  260. }
  261. }
  262. return -EINVAL;
  263. }
  264. int clk_mux_get_parent(struct stm32_clk_priv *priv, uint32_t mux_id)
  265. {
  266. const struct parent_cfg *parent;
  267. const struct mux_cfg *mux;
  268. uint32_t mask;
  269. if (mux_id >= priv->nb_parents) {
  270. panic();
  271. }
  272. parent = &priv->parents[mux_id];
  273. mux = parent->mux;
  274. mask = MASK_WIDTH_SHIFT(mux->width, mux->shift);
  275. return (mmio_read_32(priv->base + mux->offset) & mask) >> mux->shift;
  276. }
  277. int _clk_stm32_set_parent_by_index(struct stm32_clk_priv *priv, int clk, int sel)
  278. {
  279. uint16_t pid;
  280. pid = priv->clks[clk].parent;
  281. if ((pid == CLK_IS_ROOT) || (pid < MUX_MAX_PARENTS)) {
  282. return -EINVAL;
  283. }
  284. return clk_mux_set_parent(priv, pid, sel);
  285. }
  286. int _clk_stm32_get_parent(struct stm32_clk_priv *priv, int clk_id)
  287. {
  288. const struct clk_stm32 *clk = _clk_get(priv, clk_id);
  289. const struct parent_cfg *parent;
  290. uint16_t mux_id;
  291. int sel;
  292. mux_id = priv->clks[clk_id].parent;
  293. if (mux_id == CLK_IS_ROOT) {
  294. return CLK_IS_ROOT;
  295. }
  296. if (mux_id < MUX_MAX_PARENTS) {
  297. return mux_id & MUX_PARENT_MASK;
  298. }
  299. mux_id &= MUX_PARENT_MASK;
  300. parent = &priv->parents[mux_id];
  301. if (clk->ops->get_parent != NULL) {
  302. sel = clk->ops->get_parent(priv, clk_id);
  303. } else {
  304. sel = clk_mux_get_parent(priv, mux_id);
  305. }
  306. if ((sel >= 0) && (sel < parent->num_parents)) {
  307. return parent->id_parents[sel];
  308. }
  309. return -EINVAL;
  310. }
  311. int _clk_stm32_get_parent_index(struct stm32_clk_priv *priv, int clk_id)
  312. {
  313. uint16_t mux_id;
  314. mux_id = priv->clks[clk_id].parent;
  315. if (mux_id == CLK_IS_ROOT) {
  316. return CLK_IS_ROOT;
  317. }
  318. if (mux_id < MUX_MAX_PARENTS) {
  319. return mux_id & MUX_PARENT_MASK;
  320. }
  321. mux_id &= MUX_PARENT_MASK;
  322. return clk_mux_get_parent(priv, mux_id);
  323. }
  324. int _clk_stm32_get_parent_by_index(struct stm32_clk_priv *priv, int clk_id, int idx)
  325. {
  326. const struct parent_cfg *parent;
  327. uint16_t mux_id;
  328. mux_id = priv->clks[clk_id].parent;
  329. if (mux_id == CLK_IS_ROOT) {
  330. return CLK_IS_ROOT;
  331. }
  332. if (mux_id < MUX_MAX_PARENTS) {
  333. return mux_id & MUX_PARENT_MASK;
  334. }
  335. mux_id &= MUX_PARENT_MASK;
  336. parent = &priv->parents[mux_id];
  337. if (idx < parent->num_parents) {
  338. return parent->id_parents[idx];
  339. }
  340. return -EINVAL;
  341. }
  342. int clk_get_index(struct stm32_clk_priv *priv, unsigned long binding_id)
  343. {
  344. unsigned int i;
  345. for (i = 0U; i < priv->num; i++) {
  346. if (binding_id == priv->clks[i].binding) {
  347. return (int)i;
  348. }
  349. }
  350. return -EINVAL;
  351. }
  352. unsigned long _clk_stm32_get_rate(struct stm32_clk_priv *priv, int id)
  353. {
  354. const struct clk_stm32 *clk = _clk_get(priv, id);
  355. int parent;
  356. if ((unsigned int)id >= priv->num) {
  357. return 0UL;
  358. }
  359. parent = _clk_stm32_get_parent(priv, id);
  360. if (parent < 0) {
  361. return 0UL;
  362. }
  363. if (clk->ops->recalc_rate != NULL) {
  364. unsigned long prate = 0UL;
  365. if (parent != CLK_IS_ROOT) {
  366. prate = _clk_stm32_get_rate(priv, parent);
  367. }
  368. return clk->ops->recalc_rate(priv, id, prate);
  369. }
  370. if (parent == CLK_IS_ROOT) {
  371. panic();
  372. }
  373. return _clk_stm32_get_rate(priv, parent);
  374. }
  375. unsigned long _clk_stm32_get_parent_rate(struct stm32_clk_priv *priv, int id)
  376. {
  377. int parent_id = _clk_stm32_get_parent(priv, id);
  378. if (parent_id < 0) {
  379. return 0UL;
  380. }
  381. return _clk_stm32_get_rate(priv, parent_id);
  382. }
  383. static uint8_t _stm32_clk_get_flags(struct stm32_clk_priv *priv, int id)
  384. {
  385. return priv->clks[id].flags;
  386. }
  387. bool _stm32_clk_is_flags(struct stm32_clk_priv *priv, int id, uint8_t flag)
  388. {
  389. if ((_stm32_clk_get_flags(priv, id) & flag) != 0U) {
  390. return true;
  391. }
  392. return false;
  393. }
  394. int clk_stm32_enable_call_ops(struct stm32_clk_priv *priv, uint16_t id)
  395. {
  396. const struct clk_stm32 *clk = _clk_get(priv, id);
  397. if (clk->ops->enable != NULL) {
  398. clk->ops->enable(priv, id);
  399. }
  400. return 0;
  401. }
  402. static int _clk_stm32_enable_core(struct stm32_clk_priv *priv, int id)
  403. {
  404. int parent;
  405. int ret = 0;
  406. if (priv->gate_refcounts[id] == 0U) {
  407. parent = _clk_stm32_get_parent(priv, id);
  408. if (parent < 0) {
  409. return parent;
  410. }
  411. if (parent != CLK_IS_ROOT) {
  412. ret = _clk_stm32_enable_core(priv, parent);
  413. if (ret != 0) {
  414. return ret;
  415. }
  416. }
  417. clk_stm32_enable_call_ops(priv, id);
  418. }
  419. priv->gate_refcounts[id]++;
  420. if (priv->gate_refcounts[id] == UINT_MAX) {
  421. ERROR("%s: %d max enable count !", __func__, id);
  422. panic();
  423. }
  424. return 0;
  425. }
  426. int _clk_stm32_enable(struct stm32_clk_priv *priv, int id)
  427. {
  428. int ret;
  429. stm32mp1_clk_lock(&refcount_lock);
  430. ret = _clk_stm32_enable_core(priv, id);
  431. stm32mp1_clk_unlock(&refcount_lock);
  432. return ret;
  433. }
  434. void clk_stm32_disable_call_ops(struct stm32_clk_priv *priv, uint16_t id)
  435. {
  436. const struct clk_stm32 *clk = _clk_get(priv, id);
  437. if (clk->ops->disable != NULL) {
  438. clk->ops->disable(priv, id);
  439. }
  440. }
  441. static void _clk_stm32_disable_core(struct stm32_clk_priv *priv, int id)
  442. {
  443. int parent;
  444. if ((priv->gate_refcounts[id] == 1U) && _stm32_clk_is_flags(priv, id, CLK_IS_CRITICAL)) {
  445. return;
  446. }
  447. if (priv->gate_refcounts[id] == 0U) {
  448. /* case of clock ignore unused */
  449. if (_clk_stm32_is_enabled(priv, id)) {
  450. clk_stm32_disable_call_ops(priv, id);
  451. return;
  452. }
  453. VERBOSE("%s: %d already disabled !\n\n", __func__, id);
  454. return;
  455. }
  456. if (--priv->gate_refcounts[id] > 0U) {
  457. return;
  458. }
  459. clk_stm32_disable_call_ops(priv, id);
  460. parent = _clk_stm32_get_parent(priv, id);
  461. if ((parent >= 0) && (parent != CLK_IS_ROOT)) {
  462. _clk_stm32_disable_core(priv, parent);
  463. }
  464. }
  465. void _clk_stm32_disable(struct stm32_clk_priv *priv, int id)
  466. {
  467. stm32mp1_clk_lock(&refcount_lock);
  468. _clk_stm32_disable_core(priv, id);
  469. stm32mp1_clk_unlock(&refcount_lock);
  470. }
  471. bool _clk_stm32_is_enabled(struct stm32_clk_priv *priv, int id)
  472. {
  473. const struct clk_stm32 *clk = _clk_get(priv, id);
  474. if (clk->ops->is_enabled != NULL) {
  475. return clk->ops->is_enabled(priv, id);
  476. }
  477. return priv->gate_refcounts[id];
  478. }
  479. static int clk_stm32_enable(unsigned long binding_id)
  480. {
  481. struct stm32_clk_priv *priv = clk_stm32_get_priv();
  482. int id;
  483. id = clk_get_index(priv, binding_id);
  484. if (id == -EINVAL) {
  485. return id;
  486. }
  487. return _clk_stm32_enable(priv, id);
  488. }
  489. static void clk_stm32_disable(unsigned long binding_id)
  490. {
  491. struct stm32_clk_priv *priv = clk_stm32_get_priv();
  492. int id;
  493. id = clk_get_index(priv, binding_id);
  494. if (id != -EINVAL) {
  495. _clk_stm32_disable(priv, id);
  496. }
  497. }
  498. static bool clk_stm32_is_enabled(unsigned long binding_id)
  499. {
  500. struct stm32_clk_priv *priv = clk_stm32_get_priv();
  501. int id;
  502. id = clk_get_index(priv, binding_id);
  503. if (id == -EINVAL) {
  504. return false;
  505. }
  506. return _clk_stm32_is_enabled(priv, id);
  507. }
  508. static unsigned long clk_stm32_get_rate(unsigned long binding_id)
  509. {
  510. struct stm32_clk_priv *priv = clk_stm32_get_priv();
  511. int id;
  512. id = clk_get_index(priv, binding_id);
  513. if (id == -EINVAL) {
  514. return 0UL;
  515. }
  516. return _clk_stm32_get_rate(priv, id);
  517. }
  518. static int clk_stm32_get_parent(unsigned long binding_id)
  519. {
  520. struct stm32_clk_priv *priv = clk_stm32_get_priv();
  521. int id;
  522. id = clk_get_index(priv, binding_id);
  523. if (id == -EINVAL) {
  524. return id;
  525. }
  526. return _clk_stm32_get_parent(priv, id);
  527. }
  528. static const struct clk_ops stm32mp_clk_ops = {
  529. .enable = clk_stm32_enable,
  530. .disable = clk_stm32_disable,
  531. .is_enabled = clk_stm32_is_enabled,
  532. .get_rate = clk_stm32_get_rate,
  533. .get_parent = clk_stm32_get_parent,
  534. };
  535. void clk_stm32_enable_critical_clocks(void)
  536. {
  537. struct stm32_clk_priv *priv = clk_stm32_get_priv();
  538. unsigned int i;
  539. for (i = 0U; i < priv->num; i++) {
  540. if (_stm32_clk_is_flags(priv, i, CLK_IS_CRITICAL)) {
  541. _clk_stm32_enable(priv, i);
  542. }
  543. }
  544. }
  545. static void stm32_clk_register(void)
  546. {
  547. clk_register(&stm32mp_clk_ops);
  548. }
  549. uint32_t clk_stm32_div_get_value(struct stm32_clk_priv *priv, int div_id)
  550. {
  551. const struct div_cfg *divider = &priv->div[div_id];
  552. uint32_t val = 0;
  553. val = mmio_read_32(priv->base + divider->offset) >> divider->shift;
  554. val &= clk_div_mask(divider->width);
  555. return val;
  556. }
  557. unsigned long _clk_stm32_divider_recalc(struct stm32_clk_priv *priv,
  558. int div_id,
  559. unsigned long prate)
  560. {
  561. const struct div_cfg *divider = &priv->div[div_id];
  562. uint32_t val = clk_stm32_div_get_value(priv, div_id);
  563. unsigned int div = 0U;
  564. div = _get_div(divider->table, val, divider->flags, divider->width);
  565. if (div == 0U) {
  566. return prate;
  567. }
  568. return div_round_up((uint64_t)prate, div);
  569. }
  570. unsigned long clk_stm32_divider_recalc(struct stm32_clk_priv *priv, int id,
  571. unsigned long prate)
  572. {
  573. const struct clk_stm32 *clk = _clk_get(priv, id);
  574. struct clk_stm32_div_cfg *div_cfg = clk->clock_cfg;
  575. return _clk_stm32_divider_recalc(priv, div_cfg->id, prate);
  576. }
  577. const struct stm32_clk_ops clk_stm32_divider_ops = {
  578. .recalc_rate = clk_stm32_divider_recalc,
  579. };
  580. int clk_stm32_set_div(struct stm32_clk_priv *priv, uint32_t div_id, uint32_t value)
  581. {
  582. const struct div_cfg *divider;
  583. uintptr_t address;
  584. uint64_t timeout;
  585. uint32_t mask;
  586. if (div_id >= priv->nb_div) {
  587. panic();
  588. }
  589. divider = &priv->div[div_id];
  590. address = priv->base + divider->offset;
  591. mask = MASK_WIDTH_SHIFT(divider->width, divider->shift);
  592. mmio_clrsetbits_32(address, mask, (value << divider->shift) & mask);
  593. if (divider->bitrdy == DIV_NO_BIT_RDY) {
  594. return 0;
  595. }
  596. timeout = timeout_init_us(CLKSRC_TIMEOUT);
  597. mask = BIT(divider->bitrdy);
  598. while ((mmio_read_32(address) & mask) == 0U) {
  599. if (timeout_elapsed(timeout)) {
  600. return -ETIMEDOUT;
  601. }
  602. }
  603. return 0;
  604. }
  605. int _clk_stm32_gate_wait_ready(struct stm32_clk_priv *priv, uint16_t gate_id,
  606. bool ready_on)
  607. {
  608. const struct gate_cfg *gate = &priv->gates[gate_id];
  609. uintptr_t address = priv->base + gate->offset;
  610. uint32_t mask_rdy = BIT(gate->bit_idx);
  611. uint64_t timeout;
  612. uint32_t mask_test;
  613. if (ready_on) {
  614. mask_test = BIT(gate->bit_idx);
  615. } else {
  616. mask_test = 0U;
  617. }
  618. timeout = timeout_init_us(OSCRDY_TIMEOUT);
  619. while ((mmio_read_32(address) & mask_rdy) != mask_test) {
  620. if (timeout_elapsed(timeout)) {
  621. break;
  622. }
  623. }
  624. if ((mmio_read_32(address) & mask_rdy) != mask_test) {
  625. return -ETIMEDOUT;
  626. }
  627. return 0;
  628. }
  629. int clk_stm32_gate_enable(struct stm32_clk_priv *priv, int id)
  630. {
  631. const struct clk_stm32 *clk = _clk_get(priv, id);
  632. struct clk_stm32_gate_cfg *cfg = clk->clock_cfg;
  633. const struct gate_cfg *gate = &priv->gates[cfg->id];
  634. uintptr_t addr = priv->base + gate->offset;
  635. if (gate->set_clr != 0U) {
  636. mmio_write_32(addr, BIT(gate->bit_idx));
  637. } else {
  638. mmio_setbits_32(addr, BIT(gate->bit_idx));
  639. }
  640. return 0;
  641. }
  642. void clk_stm32_gate_disable(struct stm32_clk_priv *priv, int id)
  643. {
  644. const struct clk_stm32 *clk = _clk_get(priv, id);
  645. struct clk_stm32_gate_cfg *cfg = clk->clock_cfg;
  646. const struct gate_cfg *gate = &priv->gates[cfg->id];
  647. uintptr_t addr = priv->base + gate->offset;
  648. if (gate->set_clr != 0U) {
  649. mmio_write_32(addr + RCC_MP_ENCLRR_OFFSET, BIT(gate->bit_idx));
  650. } else {
  651. mmio_clrbits_32(addr, BIT(gate->bit_idx));
  652. }
  653. }
  654. bool _clk_stm32_gate_is_enabled(struct stm32_clk_priv *priv, int gate_id)
  655. {
  656. const struct gate_cfg *gate;
  657. uint32_t addr;
  658. gate = &priv->gates[gate_id];
  659. addr = priv->base + gate->offset;
  660. return ((mmio_read_32(addr) & BIT(gate->bit_idx)) != 0U);
  661. }
  662. bool clk_stm32_gate_is_enabled(struct stm32_clk_priv *priv, int id)
  663. {
  664. const struct clk_stm32 *clk = _clk_get(priv, id);
  665. struct clk_stm32_gate_cfg *cfg = clk->clock_cfg;
  666. return _clk_stm32_gate_is_enabled(priv, cfg->id);
  667. }
  668. const struct stm32_clk_ops clk_stm32_gate_ops = {
  669. .enable = clk_stm32_gate_enable,
  670. .disable = clk_stm32_gate_disable,
  671. .is_enabled = clk_stm32_gate_is_enabled,
  672. };
  673. const struct stm32_clk_ops clk_fixed_factor_ops = {
  674. .recalc_rate = fixed_factor_recalc_rate,
  675. };
  676. unsigned long fixed_factor_recalc_rate(struct stm32_clk_priv *priv,
  677. int id, unsigned long prate)
  678. {
  679. const struct clk_stm32 *clk = _clk_get(priv, id);
  680. const struct fixed_factor_cfg *cfg = clk->clock_cfg;
  681. unsigned long long rate;
  682. rate = (unsigned long long)prate * cfg->mult;
  683. if (cfg->div == 0U) {
  684. ERROR("division by zero\n");
  685. panic();
  686. }
  687. return (unsigned long)(rate / cfg->div);
  688. };
  689. #define APB_DIV_MASK GENMASK(2, 0)
  690. #define TIM_PRE_MASK BIT(0)
  691. static unsigned long timer_recalc_rate(struct stm32_clk_priv *priv,
  692. int id, unsigned long prate)
  693. {
  694. const struct clk_stm32 *clk = _clk_get(priv, id);
  695. const struct clk_timer_cfg *cfg = clk->clock_cfg;
  696. uint32_t prescaler, timpre;
  697. uintptr_t rcc_base = priv->base;
  698. prescaler = mmio_read_32(rcc_base + cfg->apbdiv) &
  699. APB_DIV_MASK;
  700. timpre = mmio_read_32(rcc_base + cfg->timpre) &
  701. TIM_PRE_MASK;
  702. if (prescaler == 0U) {
  703. return prate;
  704. }
  705. return prate * (timpre + 1U) * 2U;
  706. };
  707. const struct stm32_clk_ops clk_timer_ops = {
  708. .recalc_rate = timer_recalc_rate,
  709. };
  710. static unsigned long clk_fixed_rate_recalc(struct stm32_clk_priv *priv, int id,
  711. unsigned long prate)
  712. {
  713. const struct clk_stm32 *clk = _clk_get(priv, id);
  714. struct clk_stm32_fixed_rate_cfg *cfg = clk->clock_cfg;
  715. return cfg->rate;
  716. }
  717. const struct stm32_clk_ops clk_stm32_fixed_rate_ops = {
  718. .recalc_rate = clk_fixed_rate_recalc,
  719. };
  720. static unsigned long clk_stm32_osc_recalc_rate(struct stm32_clk_priv *priv,
  721. int id, unsigned long prate)
  722. {
  723. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  724. return osc_data->frequency;
  725. };
  726. bool clk_stm32_osc_gate_is_enabled(struct stm32_clk_priv *priv, int id)
  727. {
  728. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  729. return _clk_stm32_gate_is_enabled(priv, osc_data->gate_id);
  730. }
  731. int clk_stm32_osc_gate_enable(struct stm32_clk_priv *priv, int id)
  732. {
  733. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  734. _clk_stm32_gate_enable(priv, osc_data->gate_id);
  735. if (_clk_stm32_gate_wait_ready(priv, osc_data->gate_rdy_id, true) != 0U) {
  736. ERROR("%s: %s (%d)\n", __func__, osc_data->name, __LINE__);
  737. panic();
  738. }
  739. return 0;
  740. }
  741. void clk_stm32_osc_gate_disable(struct stm32_clk_priv *priv, int id)
  742. {
  743. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  744. _clk_stm32_gate_disable(priv, osc_data->gate_id);
  745. if (_clk_stm32_gate_wait_ready(priv, osc_data->gate_rdy_id, false) != 0U) {
  746. ERROR("%s: %s (%d)\n", __func__, osc_data->name, __LINE__);
  747. panic();
  748. }
  749. }
  750. static unsigned long clk_stm32_get_dt_oscillator_frequency(const char *name)
  751. {
  752. void *fdt = NULL;
  753. int node = 0;
  754. int subnode = 0;
  755. if (fdt_get_address(&fdt) == 0) {
  756. panic();
  757. }
  758. node = fdt_path_offset(fdt, "/clocks");
  759. if (node < 0) {
  760. return 0UL;
  761. }
  762. fdt_for_each_subnode(subnode, fdt, node) {
  763. const char *cchar = NULL;
  764. const fdt32_t *cuint = NULL;
  765. int ret = 0;
  766. cchar = fdt_get_name(fdt, subnode, &ret);
  767. if (cchar == NULL) {
  768. continue;
  769. }
  770. if (strncmp(cchar, name, (size_t)ret) ||
  771. fdt_get_status(subnode) == DT_DISABLED) {
  772. continue;
  773. }
  774. cuint = fdt_getprop(fdt, subnode, "clock-frequency", &ret);
  775. if (cuint == NULL) {
  776. return 0UL;
  777. }
  778. return fdt32_to_cpu(*cuint);
  779. }
  780. return 0UL;
  781. }
  782. void clk_stm32_osc_init(struct stm32_clk_priv *priv, int id)
  783. {
  784. struct clk_oscillator_data *osc_data = clk_oscillator_get_data(priv, id);
  785. const char *name = osc_data->name;
  786. osc_data->frequency = clk_stm32_get_dt_oscillator_frequency(name);
  787. }
  788. const struct stm32_clk_ops clk_stm32_osc_ops = {
  789. .recalc_rate = clk_stm32_osc_recalc_rate,
  790. .is_enabled = clk_stm32_osc_gate_is_enabled,
  791. .enable = clk_stm32_osc_gate_enable,
  792. .disable = clk_stm32_osc_gate_disable,
  793. .init = clk_stm32_osc_init,
  794. };
  795. const struct stm32_clk_ops clk_stm32_osc_nogate_ops = {
  796. .recalc_rate = clk_stm32_osc_recalc_rate,
  797. .init = clk_stm32_osc_init,
  798. };
  799. int stm32_clk_parse_fdt_by_name(void *fdt, int node, const char *name, uint32_t *tab, uint32_t *nb)
  800. {
  801. const fdt32_t *cell;
  802. int len = 0;
  803. uint32_t i;
  804. cell = fdt_getprop(fdt, node, name, &len);
  805. if (cell == NULL) {
  806. *nb = 0U;
  807. return 0;
  808. }
  809. for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) {
  810. uint32_t val = fdt32_to_cpu(cell[i]);
  811. tab[i] = val;
  812. }
  813. *nb = (uint32_t)len / sizeof(uint32_t);
  814. return 0;
  815. }
  816. int clk_stm32_init(struct stm32_clk_priv *priv, uintptr_t base)
  817. {
  818. unsigned int i;
  819. stm32_clock_data = priv;
  820. priv->base = base;
  821. for (i = 0U; i < priv->num; i++) {
  822. const struct clk_stm32 *clk = _clk_get(priv, i);
  823. assert(clk->ops != NULL);
  824. if (clk->ops->init != NULL) {
  825. clk->ops->init(priv, i);
  826. }
  827. }
  828. stm32_clk_register();
  829. return 0;
  830. }