s32cc_clk_drv.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481
  1. /*
  2. * Copyright 2024 NXP
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <errno.h>
  7. #include <common/debug.h>
  8. #include <drivers/clk.h>
  9. #include <lib/mmio.h>
  10. #include <s32cc-clk-ids.h>
  11. #include <s32cc-clk-modules.h>
  12. #include <s32cc-clk-regs.h>
  13. #include <s32cc-clk-utils.h>
  14. #include <s32cc-mc-me.h>
  15. #define MAX_STACK_DEPTH (40U)
  16. /* This is used for floating-point precision calculations. */
  17. #define FP_PRECISION (100000000UL)
  18. struct s32cc_clk_drv {
  19. uintptr_t fxosc_base;
  20. uintptr_t armpll_base;
  21. uintptr_t periphpll_base;
  22. uintptr_t armdfs_base;
  23. uintptr_t cgm0_base;
  24. uintptr_t cgm1_base;
  25. uintptr_t cgm5_base;
  26. uintptr_t ddrpll_base;
  27. uintptr_t mc_me;
  28. uintptr_t mc_rgm;
  29. uintptr_t rdc;
  30. };
  31. static int update_stack_depth(unsigned int *depth)
  32. {
  33. if (*depth == 0U) {
  34. return -ENOMEM;
  35. }
  36. (*depth)--;
  37. return 0;
  38. }
  39. static struct s32cc_clk_drv *get_drv(void)
  40. {
  41. static struct s32cc_clk_drv driver = {
  42. .fxosc_base = FXOSC_BASE_ADDR,
  43. .armpll_base = ARMPLL_BASE_ADDR,
  44. .periphpll_base = PERIPHPLL_BASE_ADDR,
  45. .armdfs_base = ARM_DFS_BASE_ADDR,
  46. .cgm0_base = CGM0_BASE_ADDR,
  47. .cgm1_base = CGM1_BASE_ADDR,
  48. .cgm5_base = MC_CGM5_BASE_ADDR,
  49. .ddrpll_base = DDRPLL_BASE_ADDR,
  50. .mc_me = MC_ME_BASE_ADDR,
  51. .mc_rgm = MC_RGM_BASE_ADDR,
  52. .rdc = RDC_BASE_ADDR,
  53. };
  54. return &driver;
  55. }
  56. static int enable_module(struct s32cc_clk_obj *module,
  57. const struct s32cc_clk_drv *drv,
  58. unsigned int depth);
  59. static struct s32cc_clk_obj *get_clk_parent(const struct s32cc_clk_obj *module)
  60. {
  61. const struct s32cc_clk *clk = s32cc_obj2clk(module);
  62. if (clk->module != NULL) {
  63. return clk->module;
  64. }
  65. if (clk->pclock != NULL) {
  66. return &clk->pclock->desc;
  67. }
  68. return NULL;
  69. }
  70. static int get_base_addr(enum s32cc_clk_source id, const struct s32cc_clk_drv *drv,
  71. uintptr_t *base)
  72. {
  73. int ret = 0;
  74. switch (id) {
  75. case S32CC_FXOSC:
  76. *base = drv->fxosc_base;
  77. break;
  78. case S32CC_ARM_PLL:
  79. *base = drv->armpll_base;
  80. break;
  81. case S32CC_PERIPH_PLL:
  82. *base = drv->periphpll_base;
  83. break;
  84. case S32CC_DDR_PLL:
  85. *base = drv->ddrpll_base;
  86. break;
  87. case S32CC_ARM_DFS:
  88. *base = drv->armdfs_base;
  89. break;
  90. case S32CC_CGM0:
  91. *base = drv->cgm0_base;
  92. break;
  93. case S32CC_CGM1:
  94. *base = drv->cgm1_base;
  95. break;
  96. case S32CC_CGM5:
  97. *base = drv->cgm5_base;
  98. break;
  99. case S32CC_FIRC:
  100. break;
  101. case S32CC_SIRC:
  102. break;
  103. default:
  104. ret = -EINVAL;
  105. break;
  106. }
  107. if (ret != 0) {
  108. ERROR("Unknown clock source id: %u\n", id);
  109. }
  110. return ret;
  111. }
  112. static void enable_fxosc(const struct s32cc_clk_drv *drv)
  113. {
  114. uintptr_t fxosc_base = drv->fxosc_base;
  115. uint32_t ctrl;
  116. ctrl = mmio_read_32(FXOSC_CTRL(fxosc_base));
  117. if ((ctrl & FXOSC_CTRL_OSCON) != U(0)) {
  118. return;
  119. }
  120. ctrl = FXOSC_CTRL_COMP_EN;
  121. ctrl &= ~FXOSC_CTRL_OSC_BYP;
  122. ctrl |= FXOSC_CTRL_EOCV(0x1);
  123. ctrl |= FXOSC_CTRL_GM_SEL(0x7);
  124. mmio_write_32(FXOSC_CTRL(fxosc_base), ctrl);
  125. /* Switch ON the crystal oscillator. */
  126. mmio_setbits_32(FXOSC_CTRL(fxosc_base), FXOSC_CTRL_OSCON);
  127. /* Wait until the clock is stable. */
  128. while ((mmio_read_32(FXOSC_STAT(fxosc_base)) & FXOSC_STAT_OSC_STAT) == U(0)) {
  129. }
  130. }
  131. static int enable_osc(struct s32cc_clk_obj *module,
  132. const struct s32cc_clk_drv *drv,
  133. unsigned int depth)
  134. {
  135. const struct s32cc_osc *osc = s32cc_obj2osc(module);
  136. unsigned int ldepth = depth;
  137. int ret = 0;
  138. ret = update_stack_depth(&ldepth);
  139. if (ret != 0) {
  140. return ret;
  141. }
  142. switch (osc->source) {
  143. case S32CC_FXOSC:
  144. enable_fxosc(drv);
  145. break;
  146. /* FIRC and SIRC oscillators are enabled by default */
  147. case S32CC_FIRC:
  148. break;
  149. case S32CC_SIRC:
  150. break;
  151. default:
  152. ERROR("Invalid oscillator %d\n", osc->source);
  153. ret = -EINVAL;
  154. break;
  155. };
  156. return ret;
  157. }
  158. static struct s32cc_clk_obj *get_pll_parent(const struct s32cc_clk_obj *module)
  159. {
  160. const struct s32cc_pll *pll = s32cc_obj2pll(module);
  161. if (pll->source == NULL) {
  162. ERROR("Failed to identify PLL's parent\n");
  163. }
  164. return pll->source;
  165. }
  166. static int get_pll_mfi_mfn(unsigned long pll_vco, unsigned long ref_freq,
  167. uint32_t *mfi, uint32_t *mfn)
  168. {
  169. unsigned long vco;
  170. unsigned long mfn64;
  171. /* FRAC-N mode */
  172. *mfi = (uint32_t)(pll_vco / ref_freq);
  173. /* MFN formula : (double)(pll_vco % ref_freq) / ref_freq * 18432.0 */
  174. mfn64 = pll_vco % ref_freq;
  175. mfn64 *= FP_PRECISION;
  176. mfn64 /= ref_freq;
  177. mfn64 *= 18432UL;
  178. mfn64 /= FP_PRECISION;
  179. if (mfn64 > UINT32_MAX) {
  180. return -EINVAL;
  181. }
  182. *mfn = (uint32_t)mfn64;
  183. vco = ((unsigned long)*mfn * FP_PRECISION) / 18432UL;
  184. vco += (unsigned long)*mfi * FP_PRECISION;
  185. vco *= ref_freq;
  186. vco /= FP_PRECISION;
  187. if (vco != pll_vco) {
  188. ERROR("Failed to find MFI and MFN settings for PLL freq %lu. Nearest freq = %lu\n",
  189. pll_vco, vco);
  190. return -EINVAL;
  191. }
  192. return 0;
  193. }
  194. static struct s32cc_clkmux *get_pll_mux(const struct s32cc_pll *pll)
  195. {
  196. const struct s32cc_clk_obj *source = pll->source;
  197. const struct s32cc_clk *clk;
  198. if (source == NULL) {
  199. ERROR("Failed to identify PLL's parent\n");
  200. return NULL;
  201. }
  202. if (source->type != s32cc_clk_t) {
  203. ERROR("The parent of the PLL isn't a clock\n");
  204. return NULL;
  205. }
  206. clk = s32cc_obj2clk(source);
  207. if (clk->module == NULL) {
  208. ERROR("The clock isn't connected to a module\n");
  209. return NULL;
  210. }
  211. source = clk->module;
  212. if ((source->type != s32cc_clkmux_t) &&
  213. (source->type != s32cc_shared_clkmux_t)) {
  214. ERROR("The parent of the PLL isn't a MUX\n");
  215. return NULL;
  216. }
  217. return s32cc_obj2clkmux(source);
  218. }
  219. static void disable_odiv(uintptr_t pll_addr, uint32_t div_index)
  220. {
  221. mmio_clrbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE);
  222. }
  223. static void enable_odiv(uintptr_t pll_addr, uint32_t div_index)
  224. {
  225. mmio_setbits_32(PLLDIG_PLLODIV(pll_addr, div_index), PLLDIG_PLLODIV_DE);
  226. }
  227. static void disable_odivs(uintptr_t pll_addr, uint32_t ndivs)
  228. {
  229. uint32_t i;
  230. for (i = 0; i < ndivs; i++) {
  231. disable_odiv(pll_addr, i);
  232. }
  233. }
  234. static void enable_pll_hw(uintptr_t pll_addr)
  235. {
  236. /* Enable the PLL. */
  237. mmio_write_32(PLLDIG_PLLCR(pll_addr), 0x0);
  238. /* Poll until PLL acquires lock. */
  239. while ((mmio_read_32(PLLDIG_PLLSR(pll_addr)) & PLLDIG_PLLSR_LOCK) == 0U) {
  240. }
  241. }
  242. static void disable_pll_hw(uintptr_t pll_addr)
  243. {
  244. mmio_write_32(PLLDIG_PLLCR(pll_addr), PLLDIG_PLLCR_PLLPD);
  245. }
  246. static int program_pll(const struct s32cc_pll *pll, uintptr_t pll_addr,
  247. const struct s32cc_clk_drv *drv, uint32_t sclk_id,
  248. unsigned long sclk_freq)
  249. {
  250. uint32_t rdiv = 1, mfi, mfn;
  251. int ret;
  252. ret = get_pll_mfi_mfn(pll->vco_freq, sclk_freq, &mfi, &mfn);
  253. if (ret != 0) {
  254. return -EINVAL;
  255. }
  256. /* Disable ODIVs*/
  257. disable_odivs(pll_addr, pll->ndividers);
  258. /* Disable PLL */
  259. disable_pll_hw(pll_addr);
  260. /* Program PLLCLKMUX */
  261. mmio_write_32(PLLDIG_PLLCLKMUX(pll_addr), sclk_id);
  262. /* Program VCO */
  263. mmio_clrsetbits_32(PLLDIG_PLLDV(pll_addr),
  264. PLLDIG_PLLDV_RDIV_MASK | PLLDIG_PLLDV_MFI_MASK,
  265. PLLDIG_PLLDV_RDIV_SET(rdiv) | PLLDIG_PLLDV_MFI(mfi));
  266. mmio_write_32(PLLDIG_PLLFD(pll_addr),
  267. PLLDIG_PLLFD_MFN_SET(mfn) | PLLDIG_PLLFD_SMDEN);
  268. enable_pll_hw(pll_addr);
  269. return ret;
  270. }
  271. static int enable_pll(struct s32cc_clk_obj *module,
  272. const struct s32cc_clk_drv *drv,
  273. unsigned int depth)
  274. {
  275. const struct s32cc_pll *pll = s32cc_obj2pll(module);
  276. const struct s32cc_clkmux *mux;
  277. uintptr_t pll_addr = UL(0x0);
  278. unsigned int ldepth = depth;
  279. unsigned long sclk_freq;
  280. uint32_t sclk_id;
  281. int ret;
  282. ret = update_stack_depth(&ldepth);
  283. if (ret != 0) {
  284. return ret;
  285. }
  286. mux = get_pll_mux(pll);
  287. if (mux == NULL) {
  288. return -EINVAL;
  289. }
  290. if (pll->instance != mux->module) {
  291. ERROR("MUX type is not in sync with PLL ID\n");
  292. return -EINVAL;
  293. }
  294. ret = get_base_addr(pll->instance, drv, &pll_addr);
  295. if (ret != 0) {
  296. ERROR("Failed to detect PLL instance\n");
  297. return ret;
  298. }
  299. switch (mux->source_id) {
  300. case S32CC_CLK_FIRC:
  301. sclk_freq = 48U * MHZ;
  302. sclk_id = 0;
  303. break;
  304. case S32CC_CLK_FXOSC:
  305. sclk_freq = 40U * MHZ;
  306. sclk_id = 1;
  307. break;
  308. default:
  309. ERROR("Invalid source selection for PLL 0x%lx\n",
  310. pll_addr);
  311. return -EINVAL;
  312. };
  313. return program_pll(pll, pll_addr, drv, sclk_id, sclk_freq);
  314. }
  315. static inline struct s32cc_pll *get_div_pll(const struct s32cc_pll_out_div *pdiv)
  316. {
  317. const struct s32cc_clk_obj *parent;
  318. parent = pdiv->parent;
  319. if (parent == NULL) {
  320. ERROR("Failed to identify PLL divider's parent\n");
  321. return NULL;
  322. }
  323. if (parent->type != s32cc_pll_t) {
  324. ERROR("The parent of the divider is not a PLL instance\n");
  325. return NULL;
  326. }
  327. return s32cc_obj2pll(parent);
  328. }
  329. static void config_pll_out_div(uintptr_t pll_addr, uint32_t div_index, uint32_t dc)
  330. {
  331. uint32_t pllodiv;
  332. uint32_t pdiv;
  333. pllodiv = mmio_read_32(PLLDIG_PLLODIV(pll_addr, div_index));
  334. pdiv = PLLDIG_PLLODIV_DIV(pllodiv);
  335. if (((pdiv + 1U) == dc) && ((pllodiv & PLLDIG_PLLODIV_DE) != 0U)) {
  336. return;
  337. }
  338. if ((pllodiv & PLLDIG_PLLODIV_DE) != 0U) {
  339. disable_odiv(pll_addr, div_index);
  340. }
  341. pllodiv = PLLDIG_PLLODIV_DIV_SET(dc - 1U);
  342. mmio_write_32(PLLDIG_PLLODIV(pll_addr, div_index), pllodiv);
  343. enable_odiv(pll_addr, div_index);
  344. }
  345. static struct s32cc_clk_obj *get_pll_div_parent(const struct s32cc_clk_obj *module)
  346. {
  347. const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module);
  348. if (pdiv->parent == NULL) {
  349. ERROR("Failed to identify PLL DIV's parent\n");
  350. }
  351. return pdiv->parent;
  352. }
  353. static int enable_pll_div(struct s32cc_clk_obj *module,
  354. const struct s32cc_clk_drv *drv,
  355. unsigned int depth)
  356. {
  357. const struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module);
  358. uintptr_t pll_addr = 0x0ULL;
  359. unsigned int ldepth = depth;
  360. const struct s32cc_pll *pll;
  361. uint32_t dc;
  362. int ret;
  363. ret = update_stack_depth(&ldepth);
  364. if (ret != 0) {
  365. return ret;
  366. }
  367. pll = get_div_pll(pdiv);
  368. if (pll == NULL) {
  369. ERROR("The parent of the PLL DIV is invalid\n");
  370. return 0;
  371. }
  372. ret = get_base_addr(pll->instance, drv, &pll_addr);
  373. if (ret != 0) {
  374. ERROR("Failed to detect PLL instance\n");
  375. return -EINVAL;
  376. }
  377. dc = (uint32_t)(pll->vco_freq / pdiv->freq);
  378. config_pll_out_div(pll_addr, pdiv->index, dc);
  379. return 0;
  380. }
  381. static int cgm_mux_clk_config(uintptr_t cgm_addr, uint32_t mux, uint32_t source,
  382. bool safe_clk)
  383. {
  384. uint32_t css, csc;
  385. css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux));
  386. /* Already configured */
  387. if ((MC_CGM_MUXn_CSS_SELSTAT(css) == source) &&
  388. (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) &&
  389. ((css & MC_CGM_MUXn_CSS_SWIP) == 0U) && !safe_clk) {
  390. return 0;
  391. }
  392. /* Ongoing clock switch? */
  393. while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) &
  394. MC_CGM_MUXn_CSS_SWIP) != 0U) {
  395. }
  396. csc = mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux));
  397. /* Clear previous source. */
  398. csc &= ~(MC_CGM_MUXn_CSC_SELCTL_MASK);
  399. if (!safe_clk) {
  400. /* Select the clock source and trigger the clock switch. */
  401. csc |= MC_CGM_MUXn_CSC_SELCTL(source) | MC_CGM_MUXn_CSC_CLK_SW;
  402. } else {
  403. /* Switch to safe clock */
  404. csc |= MC_CGM_MUXn_CSC_SAFE_SW;
  405. }
  406. mmio_write_32(CGM_MUXn_CSC(cgm_addr, mux), csc);
  407. /* Wait for configuration bit to auto-clear. */
  408. while ((mmio_read_32(CGM_MUXn_CSC(cgm_addr, mux)) &
  409. MC_CGM_MUXn_CSC_CLK_SW) != 0U) {
  410. }
  411. /* Is the clock switch completed? */
  412. while ((mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux)) &
  413. MC_CGM_MUXn_CSS_SWIP) != 0U) {
  414. }
  415. /*
  416. * Check if the switch succeeded.
  417. * Check switch trigger cause and the source.
  418. */
  419. css = mmio_read_32(CGM_MUXn_CSS(cgm_addr, mux));
  420. if (!safe_clk) {
  421. if ((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SUCCESS) &&
  422. (MC_CGM_MUXn_CSS_SELSTAT(css) == source)) {
  423. return 0;
  424. }
  425. ERROR("Failed to change the source of mux %" PRIu32 " to %" PRIu32 " (CGM=%lu)\n",
  426. mux, source, cgm_addr);
  427. } else {
  428. if (((MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK) ||
  429. (MC_CGM_MUXn_CSS_SWTRG(css) == MC_CGM_MUXn_CSS_SWTRG_SAFE_CLK_INACTIVE)) &&
  430. ((MC_CGM_MUXn_CSS_SAFE_SW & css) != 0U)) {
  431. return 0;
  432. }
  433. ERROR("The switch of mux %" PRIu32 " (CGM=%lu) to safe clock failed\n",
  434. mux, cgm_addr);
  435. }
  436. return -EINVAL;
  437. }
  438. static int enable_cgm_mux(const struct s32cc_clkmux *mux,
  439. const struct s32cc_clk_drv *drv)
  440. {
  441. uintptr_t cgm_addr = UL(0x0);
  442. uint32_t mux_hw_clk;
  443. int ret;
  444. ret = get_base_addr(mux->module, drv, &cgm_addr);
  445. if (ret != 0) {
  446. return ret;
  447. }
  448. mux_hw_clk = (uint32_t)S32CC_CLK_ID(mux->source_id);
  449. return cgm_mux_clk_config(cgm_addr, mux->index,
  450. mux_hw_clk, false);
  451. }
  452. static struct s32cc_clk_obj *get_mux_parent(const struct s32cc_clk_obj *module)
  453. {
  454. const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module);
  455. struct s32cc_clk *clk;
  456. if (mux == NULL) {
  457. return NULL;
  458. }
  459. clk = s32cc_get_arch_clk(mux->source_id);
  460. if (clk == NULL) {
  461. ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n",
  462. mux->source_id, mux->index);
  463. return NULL;
  464. }
  465. return &clk->desc;
  466. }
  467. static int enable_mux(struct s32cc_clk_obj *module,
  468. const struct s32cc_clk_drv *drv,
  469. unsigned int depth)
  470. {
  471. const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module);
  472. unsigned int ldepth = depth;
  473. const struct s32cc_clk *clk;
  474. int ret = 0;
  475. ret = update_stack_depth(&ldepth);
  476. if (ret != 0) {
  477. return ret;
  478. }
  479. if (mux == NULL) {
  480. return -EINVAL;
  481. }
  482. clk = s32cc_get_arch_clk(mux->source_id);
  483. if (clk == NULL) {
  484. ERROR("Invalid parent (%lu) for mux %" PRIu8 "\n",
  485. mux->source_id, mux->index);
  486. return -EINVAL;
  487. }
  488. switch (mux->module) {
  489. /* PLL mux will be enabled by PLL setup */
  490. case S32CC_ARM_PLL:
  491. case S32CC_PERIPH_PLL:
  492. case S32CC_DDR_PLL:
  493. break;
  494. case S32CC_CGM1:
  495. ret = enable_cgm_mux(mux, drv);
  496. break;
  497. case S32CC_CGM0:
  498. ret = enable_cgm_mux(mux, drv);
  499. break;
  500. case S32CC_CGM5:
  501. ret = enable_cgm_mux(mux, drv);
  502. break;
  503. default:
  504. ERROR("Unknown mux parent type: %d\n", mux->module);
  505. ret = -EINVAL;
  506. break;
  507. };
  508. return ret;
  509. }
  510. static struct s32cc_clk_obj *get_dfs_parent(const struct s32cc_clk_obj *module)
  511. {
  512. const struct s32cc_dfs *dfs = s32cc_obj2dfs(module);
  513. if (dfs->parent == NULL) {
  514. ERROR("Failed to identify DFS's parent\n");
  515. }
  516. return dfs->parent;
  517. }
  518. static int enable_dfs(struct s32cc_clk_obj *module,
  519. const struct s32cc_clk_drv *drv,
  520. unsigned int depth)
  521. {
  522. unsigned int ldepth = depth;
  523. int ret = 0;
  524. ret = update_stack_depth(&ldepth);
  525. if (ret != 0) {
  526. return ret;
  527. }
  528. return 0;
  529. }
  530. static struct s32cc_dfs *get_div_dfs(const struct s32cc_dfs_div *dfs_div)
  531. {
  532. const struct s32cc_clk_obj *parent = dfs_div->parent;
  533. if (parent->type != s32cc_dfs_t) {
  534. ERROR("DFS DIV doesn't have a DFS as parent\n");
  535. return NULL;
  536. }
  537. return s32cc_obj2dfs(parent);
  538. }
  539. static struct s32cc_pll *dfsdiv2pll(const struct s32cc_dfs_div *dfs_div)
  540. {
  541. const struct s32cc_clk_obj *parent;
  542. const struct s32cc_dfs *dfs;
  543. dfs = get_div_dfs(dfs_div);
  544. if (dfs == NULL) {
  545. return NULL;
  546. }
  547. parent = dfs->parent;
  548. if (parent->type != s32cc_pll_t) {
  549. return NULL;
  550. }
  551. return s32cc_obj2pll(parent);
  552. }
  553. static int get_dfs_mfi_mfn(unsigned long dfs_freq, const struct s32cc_dfs_div *dfs_div,
  554. uint32_t *mfi, uint32_t *mfn)
  555. {
  556. uint64_t factor64, tmp64, ofreq;
  557. uint32_t factor32;
  558. unsigned long in = dfs_freq;
  559. unsigned long out = dfs_div->freq;
  560. /**
  561. * factor = (IN / OUT) / 2
  562. * MFI = integer(factor)
  563. * MFN = (factor - MFI) * 36
  564. */
  565. factor64 = ((((uint64_t)in) * FP_PRECISION) / ((uint64_t)out)) / 2ULL;
  566. tmp64 = factor64 / FP_PRECISION;
  567. if (tmp64 > UINT32_MAX) {
  568. return -EINVAL;
  569. }
  570. factor32 = (uint32_t)tmp64;
  571. *mfi = factor32;
  572. tmp64 = ((factor64 - ((uint64_t)*mfi * FP_PRECISION)) * 36UL) / FP_PRECISION;
  573. if (tmp64 > UINT32_MAX) {
  574. return -EINVAL;
  575. }
  576. *mfn = (uint32_t)tmp64;
  577. /* div_freq = in / (2 * (*mfi + *mfn / 36.0)) */
  578. factor64 = (((uint64_t)*mfn) * FP_PRECISION) / 36ULL;
  579. factor64 += ((uint64_t)*mfi) * FP_PRECISION;
  580. factor64 *= 2ULL;
  581. ofreq = (((uint64_t)in) * FP_PRECISION) / factor64;
  582. if (ofreq != dfs_div->freq) {
  583. ERROR("Failed to find MFI and MFN settings for DFS DIV freq %lu\n",
  584. dfs_div->freq);
  585. ERROR("Nearest freq = %" PRIx64 "\n", ofreq);
  586. return -EINVAL;
  587. }
  588. return 0;
  589. }
  590. static int init_dfs_port(uintptr_t dfs_addr, uint32_t port,
  591. uint32_t mfi, uint32_t mfn)
  592. {
  593. uint32_t portsr, portolsr;
  594. uint32_t mask, old_mfi, old_mfn;
  595. uint32_t dvport;
  596. bool init_dfs;
  597. dvport = mmio_read_32(DFS_DVPORTn(dfs_addr, port));
  598. old_mfi = DFS_DVPORTn_MFI(dvport);
  599. old_mfn = DFS_DVPORTn_MFN(dvport);
  600. portsr = mmio_read_32(DFS_PORTSR(dfs_addr));
  601. portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr));
  602. /* Skip configuration if it's not needed */
  603. if (((portsr & BIT_32(port)) != 0U) &&
  604. ((portolsr & BIT_32(port)) == 0U) &&
  605. (mfi == old_mfi) && (mfn == old_mfn)) {
  606. return 0;
  607. }
  608. init_dfs = (portsr == 0U);
  609. if (init_dfs) {
  610. mask = DFS_PORTRESET_MASK;
  611. } else {
  612. mask = DFS_PORTRESET_SET(BIT_32(port));
  613. }
  614. mmio_write_32(DFS_PORTOLSR(dfs_addr), mask);
  615. mmio_write_32(DFS_PORTRESET(dfs_addr), mask);
  616. while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & mask) != 0U) {
  617. }
  618. if (init_dfs) {
  619. mmio_write_32(DFS_CTL(dfs_addr), DFS_CTL_RESET);
  620. }
  621. mmio_write_32(DFS_DVPORTn(dfs_addr, port),
  622. DFS_DVPORTn_MFI_SET(mfi) | DFS_DVPORTn_MFN_SET(mfn));
  623. if (init_dfs) {
  624. /* DFS clk enable programming */
  625. mmio_clrbits_32(DFS_CTL(dfs_addr), DFS_CTL_RESET);
  626. }
  627. mmio_clrbits_32(DFS_PORTRESET(dfs_addr), BIT_32(port));
  628. while ((mmio_read_32(DFS_PORTSR(dfs_addr)) & BIT_32(port)) != BIT_32(port)) {
  629. }
  630. portolsr = mmio_read_32(DFS_PORTOLSR(dfs_addr));
  631. if ((portolsr & DFS_PORTOLSR_LOL(port)) != 0U) {
  632. ERROR("Failed to lock DFS divider\n");
  633. return -EINVAL;
  634. }
  635. return 0;
  636. }
  637. static struct s32cc_clk_obj *
  638. get_dfs_div_parent(const struct s32cc_clk_obj *module)
  639. {
  640. const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module);
  641. if (dfs_div->parent == NULL) {
  642. ERROR("Failed to identify DFS divider's parent\n");
  643. }
  644. return dfs_div->parent;
  645. }
  646. static int enable_dfs_div(struct s32cc_clk_obj *module,
  647. const struct s32cc_clk_drv *drv,
  648. unsigned int depth)
  649. {
  650. const struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module);
  651. unsigned int ldepth = depth;
  652. const struct s32cc_pll *pll;
  653. const struct s32cc_dfs *dfs;
  654. uintptr_t dfs_addr = 0UL;
  655. uint32_t mfi, mfn;
  656. int ret = 0;
  657. ret = update_stack_depth(&ldepth);
  658. if (ret != 0) {
  659. return ret;
  660. }
  661. dfs = get_div_dfs(dfs_div);
  662. if (dfs == NULL) {
  663. return -EINVAL;
  664. }
  665. pll = dfsdiv2pll(dfs_div);
  666. if (pll == NULL) {
  667. ERROR("Failed to identify DFS divider's parent\n");
  668. return -EINVAL;
  669. }
  670. ret = get_base_addr(dfs->instance, drv, &dfs_addr);
  671. if ((ret != 0) || (dfs_addr == 0UL)) {
  672. return -EINVAL;
  673. }
  674. ret = get_dfs_mfi_mfn(pll->vco_freq, dfs_div, &mfi, &mfn);
  675. if (ret != 0) {
  676. return -EINVAL;
  677. }
  678. return init_dfs_port(dfs_addr, dfs_div->index, mfi, mfn);
  679. }
  680. typedef int (*enable_clk_t)(struct s32cc_clk_obj *module,
  681. const struct s32cc_clk_drv *drv,
  682. unsigned int depth);
  683. static int enable_part(struct s32cc_clk_obj *module,
  684. const struct s32cc_clk_drv *drv,
  685. unsigned int depth)
  686. {
  687. const struct s32cc_part *part = s32cc_obj2part(module);
  688. uint32_t part_no = part->partition_id;
  689. if ((drv->mc_me == 0UL) || (drv->mc_rgm == 0UL) || (drv->rdc == 0UL)) {
  690. return -EINVAL;
  691. }
  692. return mc_me_enable_partition(drv->mc_me, drv->mc_rgm, drv->rdc, part_no);
  693. }
  694. static int enable_part_block(struct s32cc_clk_obj *module,
  695. const struct s32cc_clk_drv *drv,
  696. unsigned int depth)
  697. {
  698. const struct s32cc_part_block *block = s32cc_obj2partblock(module);
  699. const struct s32cc_part *part = block->part;
  700. uint32_t part_no = part->partition_id;
  701. unsigned int ldepth = depth;
  702. uint32_t cofb;
  703. int ret;
  704. ret = update_stack_depth(&ldepth);
  705. if (ret != 0) {
  706. return ret;
  707. }
  708. if ((block->block >= s32cc_part_block0) &&
  709. (block->block <= s32cc_part_block15)) {
  710. cofb = (uint32_t)block->block - (uint32_t)s32cc_part_block0;
  711. mc_me_enable_part_cofb(drv->mc_me, part_no, cofb, block->status);
  712. } else {
  713. ERROR("Unknown partition block type: %d\n", block->block);
  714. return -EINVAL;
  715. }
  716. return 0;
  717. }
  718. static struct s32cc_clk_obj *
  719. get_part_block_parent(const struct s32cc_clk_obj *module)
  720. {
  721. const struct s32cc_part_block *block = s32cc_obj2partblock(module);
  722. return &block->part->desc;
  723. }
  724. static int enable_module_with_refcount(struct s32cc_clk_obj *module,
  725. const struct s32cc_clk_drv *drv,
  726. unsigned int depth);
  727. static int enable_part_block_link(struct s32cc_clk_obj *module,
  728. const struct s32cc_clk_drv *drv,
  729. unsigned int depth)
  730. {
  731. const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module);
  732. struct s32cc_part_block *block = link->block;
  733. unsigned int ldepth = depth;
  734. int ret;
  735. ret = update_stack_depth(&ldepth);
  736. if (ret != 0) {
  737. return ret;
  738. }
  739. /* Move the enablement algorithm to partition tree */
  740. return enable_module_with_refcount(&block->desc, drv, ldepth);
  741. }
  742. static struct s32cc_clk_obj *
  743. get_part_block_link_parent(const struct s32cc_clk_obj *module)
  744. {
  745. const struct s32cc_part_block_link *link = s32cc_obj2partblocklink(module);
  746. return link->parent;
  747. }
  748. static int no_enable(struct s32cc_clk_obj *module,
  749. const struct s32cc_clk_drv *drv,
  750. unsigned int depth)
  751. {
  752. return 0;
  753. }
  754. static int exec_cb_with_refcount(enable_clk_t en_cb, struct s32cc_clk_obj *mod,
  755. const struct s32cc_clk_drv *drv, bool leaf_node,
  756. unsigned int depth)
  757. {
  758. unsigned int ldepth = depth;
  759. int ret = 0;
  760. if (mod == NULL) {
  761. return 0;
  762. }
  763. ret = update_stack_depth(&ldepth);
  764. if (ret != 0) {
  765. return ret;
  766. }
  767. /* Refcount will be updated as part of the recursivity */
  768. if (leaf_node) {
  769. return en_cb(mod, drv, ldepth);
  770. }
  771. if (mod->refcount == 0U) {
  772. ret = en_cb(mod, drv, ldepth);
  773. }
  774. if (ret == 0) {
  775. mod->refcount++;
  776. }
  777. return ret;
  778. }
  779. static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module);
  780. static int enable_module(struct s32cc_clk_obj *module,
  781. const struct s32cc_clk_drv *drv,
  782. unsigned int depth)
  783. {
  784. struct s32cc_clk_obj *parent = get_module_parent(module);
  785. static const enable_clk_t enable_clbs[12] = {
  786. [s32cc_clk_t] = no_enable,
  787. [s32cc_osc_t] = enable_osc,
  788. [s32cc_pll_t] = enable_pll,
  789. [s32cc_pll_out_div_t] = enable_pll_div,
  790. [s32cc_clkmux_t] = enable_mux,
  791. [s32cc_shared_clkmux_t] = enable_mux,
  792. [s32cc_dfs_t] = enable_dfs,
  793. [s32cc_dfs_div_t] = enable_dfs_div,
  794. [s32cc_part_t] = enable_part,
  795. [s32cc_part_block_t] = enable_part_block,
  796. [s32cc_part_block_link_t] = enable_part_block_link,
  797. };
  798. unsigned int ldepth = depth;
  799. uint32_t index;
  800. int ret = 0;
  801. ret = update_stack_depth(&ldepth);
  802. if (ret != 0) {
  803. return ret;
  804. }
  805. if (drv == NULL) {
  806. return -EINVAL;
  807. }
  808. index = (uint32_t)module->type;
  809. if (index >= ARRAY_SIZE(enable_clbs)) {
  810. ERROR("Undefined module type: %d\n", module->type);
  811. return -EINVAL;
  812. }
  813. if (enable_clbs[index] == NULL) {
  814. ERROR("Undefined callback for the clock type: %d\n",
  815. module->type);
  816. return -EINVAL;
  817. }
  818. parent = get_module_parent(module);
  819. ret = exec_cb_with_refcount(enable_module, parent, drv,
  820. false, ldepth);
  821. if (ret != 0) {
  822. return ret;
  823. }
  824. ret = exec_cb_with_refcount(enable_clbs[index], module, drv,
  825. true, ldepth);
  826. if (ret != 0) {
  827. return ret;
  828. }
  829. return ret;
  830. }
  831. static int enable_module_with_refcount(struct s32cc_clk_obj *module,
  832. const struct s32cc_clk_drv *drv,
  833. unsigned int depth)
  834. {
  835. return exec_cb_with_refcount(enable_module, module, drv, false, depth);
  836. }
  837. static int s32cc_clk_enable(unsigned long id)
  838. {
  839. const struct s32cc_clk_drv *drv = get_drv();
  840. unsigned int depth = MAX_STACK_DEPTH;
  841. struct s32cc_clk *clk;
  842. clk = s32cc_get_arch_clk(id);
  843. if (clk == NULL) {
  844. return -EINVAL;
  845. }
  846. return enable_module_with_refcount(&clk->desc, drv, depth);
  847. }
  848. static void s32cc_clk_disable(unsigned long id)
  849. {
  850. }
  851. static bool s32cc_clk_is_enabled(unsigned long id)
  852. {
  853. return false;
  854. }
  855. static unsigned long s32cc_clk_get_rate(unsigned long id)
  856. {
  857. return 0;
  858. }
  859. static int set_module_rate(const struct s32cc_clk_obj *module,
  860. unsigned long rate, unsigned long *orate,
  861. unsigned int *depth);
  862. static int set_osc_freq(const struct s32cc_clk_obj *module, unsigned long rate,
  863. unsigned long *orate, unsigned int *depth)
  864. {
  865. struct s32cc_osc *osc = s32cc_obj2osc(module);
  866. int ret;
  867. ret = update_stack_depth(depth);
  868. if (ret != 0) {
  869. return ret;
  870. }
  871. if ((osc->freq != 0UL) && (rate != osc->freq)) {
  872. ERROR("Already initialized oscillator. freq = %lu\n",
  873. osc->freq);
  874. return -EINVAL;
  875. }
  876. osc->freq = rate;
  877. *orate = osc->freq;
  878. return 0;
  879. }
  880. static int set_clk_freq(const struct s32cc_clk_obj *module, unsigned long rate,
  881. unsigned long *orate, unsigned int *depth)
  882. {
  883. const struct s32cc_clk *clk = s32cc_obj2clk(module);
  884. int ret;
  885. ret = update_stack_depth(depth);
  886. if (ret != 0) {
  887. return ret;
  888. }
  889. if ((clk->min_freq != 0UL) && (clk->max_freq != 0UL) &&
  890. ((rate < clk->min_freq) || (rate > clk->max_freq))) {
  891. ERROR("%lu frequency is out of the allowed range: [%lu:%lu]\n",
  892. rate, clk->min_freq, clk->max_freq);
  893. return -EINVAL;
  894. }
  895. if (clk->module != NULL) {
  896. return set_module_rate(clk->module, rate, orate, depth);
  897. }
  898. if (clk->pclock != NULL) {
  899. return set_clk_freq(&clk->pclock->desc, rate, orate, depth);
  900. }
  901. return -EINVAL;
  902. }
  903. static int set_pll_freq(const struct s32cc_clk_obj *module, unsigned long rate,
  904. unsigned long *orate, unsigned int *depth)
  905. {
  906. struct s32cc_pll *pll = s32cc_obj2pll(module);
  907. int ret;
  908. ret = update_stack_depth(depth);
  909. if (ret != 0) {
  910. return ret;
  911. }
  912. if ((pll->vco_freq != 0UL) && (pll->vco_freq != rate)) {
  913. ERROR("PLL frequency was already set\n");
  914. return -EINVAL;
  915. }
  916. pll->vco_freq = rate;
  917. *orate = pll->vco_freq;
  918. return 0;
  919. }
  920. static int set_pll_div_freq(const struct s32cc_clk_obj *module, unsigned long rate,
  921. unsigned long *orate, unsigned int *depth)
  922. {
  923. struct s32cc_pll_out_div *pdiv = s32cc_obj2plldiv(module);
  924. const struct s32cc_pll *pll;
  925. unsigned long prate, dc;
  926. int ret;
  927. ret = update_stack_depth(depth);
  928. if (ret != 0) {
  929. return ret;
  930. }
  931. if (pdiv->parent == NULL) {
  932. ERROR("Failed to identify PLL divider's parent\n");
  933. return -EINVAL;
  934. }
  935. pll = s32cc_obj2pll(pdiv->parent);
  936. if (pll == NULL) {
  937. ERROR("The parent of the PLL DIV is invalid\n");
  938. return -EINVAL;
  939. }
  940. prate = pll->vco_freq;
  941. /**
  942. * The PLL is not initialized yet, so let's take a risk
  943. * and accept the proposed rate.
  944. */
  945. if (prate == 0UL) {
  946. pdiv->freq = rate;
  947. *orate = rate;
  948. return 0;
  949. }
  950. /* Decline in case the rate cannot fit PLL's requirements. */
  951. dc = prate / rate;
  952. if ((prate / dc) != rate) {
  953. return -EINVAL;
  954. }
  955. pdiv->freq = rate;
  956. *orate = pdiv->freq;
  957. return 0;
  958. }
  959. static int set_fixed_div_freq(const struct s32cc_clk_obj *module, unsigned long rate,
  960. unsigned long *orate, unsigned int *depth)
  961. {
  962. const struct s32cc_fixed_div *fdiv = s32cc_obj2fixeddiv(module);
  963. int ret;
  964. ret = update_stack_depth(depth);
  965. if (ret != 0) {
  966. return ret;
  967. }
  968. if (fdiv->parent == NULL) {
  969. ERROR("The divider doesn't have a valid parent\b");
  970. return -EINVAL;
  971. }
  972. ret = set_module_rate(fdiv->parent, rate * fdiv->rate_div, orate, depth);
  973. /* Update the output rate based on the parent's rate */
  974. *orate /= fdiv->rate_div;
  975. return ret;
  976. }
  977. static int set_mux_freq(const struct s32cc_clk_obj *module, unsigned long rate,
  978. unsigned long *orate, unsigned int *depth)
  979. {
  980. const struct s32cc_clkmux *mux = s32cc_obj2clkmux(module);
  981. const struct s32cc_clk *clk = s32cc_get_arch_clk(mux->source_id);
  982. int ret;
  983. ret = update_stack_depth(depth);
  984. if (ret != 0) {
  985. return ret;
  986. }
  987. if (clk == NULL) {
  988. ERROR("Mux (id:%" PRIu8 ") without a valid source (%lu)\n",
  989. mux->index, mux->source_id);
  990. return -EINVAL;
  991. }
  992. return set_module_rate(&clk->desc, rate, orate, depth);
  993. }
  994. static int set_dfs_div_freq(const struct s32cc_clk_obj *module, unsigned long rate,
  995. unsigned long *orate, unsigned int *depth)
  996. {
  997. struct s32cc_dfs_div *dfs_div = s32cc_obj2dfsdiv(module);
  998. const struct s32cc_dfs *dfs;
  999. int ret;
  1000. ret = update_stack_depth(depth);
  1001. if (ret != 0) {
  1002. return ret;
  1003. }
  1004. if (dfs_div->parent == NULL) {
  1005. ERROR("Failed to identify DFS divider's parent\n");
  1006. return -EINVAL;
  1007. }
  1008. /* Sanity check */
  1009. dfs = s32cc_obj2dfs(dfs_div->parent);
  1010. if (dfs->parent == NULL) {
  1011. ERROR("Failed to identify DFS's parent\n");
  1012. return -EINVAL;
  1013. }
  1014. if ((dfs_div->freq != 0U) && (dfs_div->freq != rate)) {
  1015. ERROR("DFS DIV frequency was already set to %lu\n",
  1016. dfs_div->freq);
  1017. return -EINVAL;
  1018. }
  1019. dfs_div->freq = rate;
  1020. *orate = rate;
  1021. return ret;
  1022. }
  1023. static int set_module_rate(const struct s32cc_clk_obj *module,
  1024. unsigned long rate, unsigned long *orate,
  1025. unsigned int *depth)
  1026. {
  1027. int ret = 0;
  1028. ret = update_stack_depth(depth);
  1029. if (ret != 0) {
  1030. return ret;
  1031. }
  1032. ret = -EINVAL;
  1033. switch (module->type) {
  1034. case s32cc_clk_t:
  1035. ret = set_clk_freq(module, rate, orate, depth);
  1036. break;
  1037. case s32cc_osc_t:
  1038. ret = set_osc_freq(module, rate, orate, depth);
  1039. break;
  1040. case s32cc_pll_t:
  1041. ret = set_pll_freq(module, rate, orate, depth);
  1042. break;
  1043. case s32cc_pll_out_div_t:
  1044. ret = set_pll_div_freq(module, rate, orate, depth);
  1045. break;
  1046. case s32cc_fixed_div_t:
  1047. ret = set_fixed_div_freq(module, rate, orate, depth);
  1048. break;
  1049. case s32cc_clkmux_t:
  1050. ret = set_mux_freq(module, rate, orate, depth);
  1051. break;
  1052. case s32cc_shared_clkmux_t:
  1053. ret = set_mux_freq(module, rate, orate, depth);
  1054. break;
  1055. case s32cc_dfs_t:
  1056. ERROR("Setting the frequency of a DFS is not allowed!");
  1057. break;
  1058. case s32cc_dfs_div_t:
  1059. ret = set_dfs_div_freq(module, rate, orate, depth);
  1060. break;
  1061. default:
  1062. break;
  1063. }
  1064. return ret;
  1065. }
  1066. static int s32cc_clk_set_rate(unsigned long id, unsigned long rate,
  1067. unsigned long *orate)
  1068. {
  1069. unsigned int depth = MAX_STACK_DEPTH;
  1070. const struct s32cc_clk *clk;
  1071. int ret;
  1072. clk = s32cc_get_arch_clk(id);
  1073. if (clk == NULL) {
  1074. return -EINVAL;
  1075. }
  1076. ret = set_module_rate(&clk->desc, rate, orate, &depth);
  1077. if (ret != 0) {
  1078. ERROR("Failed to set frequency (%lu MHz) for clock %lu\n",
  1079. rate, id);
  1080. }
  1081. return ret;
  1082. }
  1083. static struct s32cc_clk_obj *get_no_parent(const struct s32cc_clk_obj *module)
  1084. {
  1085. return NULL;
  1086. }
  1087. typedef struct s32cc_clk_obj *(*get_parent_clb_t)(const struct s32cc_clk_obj *clk_obj);
  1088. static struct s32cc_clk_obj *get_module_parent(const struct s32cc_clk_obj *module)
  1089. {
  1090. static const get_parent_clb_t parents_clbs[12] = {
  1091. [s32cc_clk_t] = get_clk_parent,
  1092. [s32cc_osc_t] = get_no_parent,
  1093. [s32cc_pll_t] = get_pll_parent,
  1094. [s32cc_pll_out_div_t] = get_pll_div_parent,
  1095. [s32cc_clkmux_t] = get_mux_parent,
  1096. [s32cc_shared_clkmux_t] = get_mux_parent,
  1097. [s32cc_dfs_t] = get_dfs_parent,
  1098. [s32cc_dfs_div_t] = get_dfs_div_parent,
  1099. [s32cc_part_t] = get_no_parent,
  1100. [s32cc_part_block_t] = get_part_block_parent,
  1101. [s32cc_part_block_link_t] = get_part_block_link_parent,
  1102. };
  1103. uint32_t index;
  1104. if (module == NULL) {
  1105. return NULL;
  1106. }
  1107. index = (uint32_t)module->type;
  1108. if (index >= ARRAY_SIZE(parents_clbs)) {
  1109. ERROR("Undefined module type: %d\n", module->type);
  1110. return NULL;
  1111. }
  1112. if (parents_clbs[index] == NULL) {
  1113. ERROR("Undefined parent getter for type: %d\n", module->type);
  1114. return NULL;
  1115. }
  1116. return parents_clbs[index](module);
  1117. }
  1118. static int s32cc_clk_get_parent(unsigned long id)
  1119. {
  1120. struct s32cc_clk *parent_clk;
  1121. const struct s32cc_clk_obj *parent;
  1122. const struct s32cc_clk *clk;
  1123. unsigned long parent_id;
  1124. int ret;
  1125. clk = s32cc_get_arch_clk(id);
  1126. if (clk == NULL) {
  1127. return -EINVAL;
  1128. }
  1129. parent = get_module_parent(clk->module);
  1130. if (parent == NULL) {
  1131. return -EINVAL;
  1132. }
  1133. parent_clk = s32cc_obj2clk(parent);
  1134. if (parent_clk == NULL) {
  1135. return -EINVAL;
  1136. }
  1137. ret = s32cc_get_clk_id(parent_clk, &parent_id);
  1138. if (ret != 0) {
  1139. return ret;
  1140. }
  1141. if (parent_id > (unsigned long)INT_MAX) {
  1142. return -E2BIG;
  1143. }
  1144. return (int)parent_id;
  1145. }
  1146. static int s32cc_clk_set_parent(unsigned long id, unsigned long parent_id)
  1147. {
  1148. const struct s32cc_clk *parent;
  1149. const struct s32cc_clk *clk;
  1150. bool valid_source = false;
  1151. struct s32cc_clkmux *mux;
  1152. uint8_t i;
  1153. clk = s32cc_get_arch_clk(id);
  1154. if (clk == NULL) {
  1155. return -EINVAL;
  1156. }
  1157. parent = s32cc_get_arch_clk(parent_id);
  1158. if (parent == NULL) {
  1159. return -EINVAL;
  1160. }
  1161. if (!is_s32cc_clk_mux(clk)) {
  1162. ERROR("Clock %lu is not a mux\n", id);
  1163. return -EINVAL;
  1164. }
  1165. mux = s32cc_clk2mux(clk);
  1166. if (mux == NULL) {
  1167. ERROR("Failed to cast clock %lu to clock mux\n", id);
  1168. return -EINVAL;
  1169. }
  1170. for (i = 0; i < mux->nclks; i++) {
  1171. if (mux->clkids[i] == parent_id) {
  1172. valid_source = true;
  1173. break;
  1174. }
  1175. }
  1176. if (!valid_source) {
  1177. ERROR("Clock %lu is not a valid clock for mux %lu\n",
  1178. parent_id, id);
  1179. return -EINVAL;
  1180. }
  1181. mux->source_id = parent_id;
  1182. return 0;
  1183. }
  1184. void s32cc_clk_register_drv(void)
  1185. {
  1186. static const struct clk_ops s32cc_clk_ops = {
  1187. .enable = s32cc_clk_enable,
  1188. .disable = s32cc_clk_disable,
  1189. .is_enabled = s32cc_clk_is_enabled,
  1190. .get_rate = s32cc_clk_get_rate,
  1191. .set_rate = s32cc_clk_set_rate,
  1192. .get_parent = s32cc_clk_get_parent,
  1193. .set_parent = s32cc_clk_set_parent,
  1194. };
  1195. clk_register(&s32cc_clk_ops);
  1196. }