mvsw61xx.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. /*
  2. * Marvell 88E61xx switch driver
  3. *
  4. * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
  5. * Copyright (c) 2014 Nikita Nazarenko <nnazarenko@radiofid.com>
  6. *
  7. * Based on code (c) 2008 Felix Fietkau <nbd@nbd.name>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License v2 as published by the
  11. * Free Software Foundation
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/list.h>
  17. #include <linux/mii.h>
  18. #include <linux/phy.h>
  19. #include <linux/of.h>
  20. #include <linux/of_mdio.h>
  21. #include <linux/delay.h>
  22. #include <linux/switch.h>
  23. #include <linux/device.h>
  24. #include <linux/platform_device.h>
  25. #include "mvsw61xx.h"
  26. MODULE_DESCRIPTION("Marvell 88E61xx Switch driver");
  27. MODULE_AUTHOR("Claudio Leite <leitec@staticky.com>");
  28. MODULE_AUTHOR("Nikita Nazarenko <nnazarenko@radiofid.com>");
  29. MODULE_LICENSE("GPL v2");
  30. MODULE_ALIAS("platform:mvsw61xx");
  31. /*
  32. * Register access is done through direct or indirect addressing,
  33. * depending on how the switch is physically connected.
  34. *
  35. * Direct addressing: all port and global registers directly
  36. * accessible via an address/register pair
  37. *
  38. * Indirect addressing: switch is mapped at a single address,
  39. * port and global registers accessible via a single command/data
  40. * register pair
  41. */
  42. static int
  43. mvsw61xx_wait_mask_raw(struct mii_bus *bus, int addr,
  44. int reg, u16 mask, u16 val)
  45. {
  46. int i = 100;
  47. u16 r;
  48. do {
  49. r = bus->read(bus, addr, reg);
  50. if ((r & mask) == val)
  51. return 0;
  52. } while (--i > 0);
  53. return -ETIMEDOUT;
  54. }
  55. static u16
  56. r16(struct mii_bus *bus, bool indirect, int base_addr, int addr, int reg)
  57. {
  58. u16 ind_addr;
  59. if (!indirect)
  60. return bus->read(bus, addr, reg);
  61. /* Indirect read: First, make sure switch is free */
  62. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  63. MV_INDIRECT_INPROGRESS, 0);
  64. /* Load address and request read */
  65. ind_addr = MV_INDIRECT_READ | (addr << MV_INDIRECT_ADDR_S) | reg;
  66. bus->write(bus, base_addr, MV_INDIRECT_REG_CMD,
  67. ind_addr);
  68. /* Wait until it's ready */
  69. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  70. MV_INDIRECT_INPROGRESS, 0);
  71. /* Read the requested data */
  72. return bus->read(bus, base_addr, MV_INDIRECT_REG_DATA);
  73. }
  74. static void
  75. w16(struct mii_bus *bus, bool indirect, int base_addr, int addr,
  76. int reg, u16 val)
  77. {
  78. u16 ind_addr;
  79. if (!indirect) {
  80. bus->write(bus, addr, reg, val);
  81. return;
  82. }
  83. /* Indirect write: First, make sure switch is free */
  84. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  85. MV_INDIRECT_INPROGRESS, 0);
  86. /* Load the data to be written */
  87. bus->write(bus, base_addr, MV_INDIRECT_REG_DATA, val);
  88. /* Wait again for switch to be free */
  89. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  90. MV_INDIRECT_INPROGRESS, 0);
  91. /* Load address, and issue write command */
  92. ind_addr = MV_INDIRECT_WRITE | (addr << MV_INDIRECT_ADDR_S) | reg;
  93. bus->write(bus, base_addr, MV_INDIRECT_REG_CMD,
  94. ind_addr);
  95. }
  96. /* swconfig support */
  97. static inline u16
  98. sr16(struct switch_dev *dev, int addr, int reg)
  99. {
  100. struct mvsw61xx_state *state = get_state(dev);
  101. return r16(state->bus, state->is_indirect, state->base_addr, addr, reg);
  102. }
  103. static inline void
  104. sw16(struct switch_dev *dev, int addr, int reg, u16 val)
  105. {
  106. struct mvsw61xx_state *state = get_state(dev);
  107. w16(state->bus, state->is_indirect, state->base_addr, addr, reg, val);
  108. }
  109. static int
  110. mvsw61xx_wait_mask_s(struct switch_dev *dev, int addr,
  111. int reg, u16 mask, u16 val)
  112. {
  113. int i = 100;
  114. u16 r;
  115. do {
  116. r = sr16(dev, addr, reg) & mask;
  117. if (r == val)
  118. return 0;
  119. } while (--i > 0);
  120. return -ETIMEDOUT;
  121. }
  122. static int
  123. mvsw61xx_mdio_read(struct switch_dev *dev, int addr, int reg)
  124. {
  125. sw16(dev, MV_GLOBAL2REG(SMI_OP),
  126. MV_INDIRECT_READ | (addr << MV_INDIRECT_ADDR_S) | reg);
  127. if (mvsw61xx_wait_mask_s(dev, MV_GLOBAL2REG(SMI_OP),
  128. MV_INDIRECT_INPROGRESS, 0) < 0)
  129. return -ETIMEDOUT;
  130. return sr16(dev, MV_GLOBAL2REG(SMI_DATA));
  131. }
  132. static int
  133. mvsw61xx_mdio_write(struct switch_dev *dev, int addr, int reg, u16 val)
  134. {
  135. sw16(dev, MV_GLOBAL2REG(SMI_DATA), val);
  136. sw16(dev, MV_GLOBAL2REG(SMI_OP),
  137. MV_INDIRECT_WRITE | (addr << MV_INDIRECT_ADDR_S) | reg);
  138. return mvsw61xx_wait_mask_s(dev, MV_GLOBAL2REG(SMI_OP),
  139. MV_INDIRECT_INPROGRESS, 0) < 0;
  140. }
  141. static int
  142. mvsw61xx_mdio_page_read(struct switch_dev *dev, int port, int page, int reg)
  143. {
  144. int ret;
  145. mvsw61xx_mdio_write(dev, port, MII_MV_PAGE, page);
  146. ret = mvsw61xx_mdio_read(dev, port, reg);
  147. mvsw61xx_mdio_write(dev, port, MII_MV_PAGE, 0);
  148. return ret;
  149. }
  150. static void
  151. mvsw61xx_mdio_page_write(struct switch_dev *dev, int port, int page, int reg,
  152. u16 val)
  153. {
  154. mvsw61xx_mdio_write(dev, port, MII_MV_PAGE, page);
  155. mvsw61xx_mdio_write(dev, port, reg, val);
  156. mvsw61xx_mdio_write(dev, port, MII_MV_PAGE, 0);
  157. }
  158. static int
  159. mvsw61xx_get_port_mask(struct switch_dev *dev,
  160. const struct switch_attr *attr, struct switch_val *val)
  161. {
  162. struct mvsw61xx_state *state = get_state(dev);
  163. char *buf = state->buf;
  164. int port, len, i;
  165. u16 reg;
  166. port = val->port_vlan;
  167. reg = sr16(dev, MV_PORTREG(VLANMAP, port)) & MV_PORTS_MASK;
  168. len = sprintf(buf, "0x%04x: ", reg);
  169. for (i = 0; i < MV_PORTS; i++) {
  170. if (reg & (1 << i))
  171. len += sprintf(buf + len, "%d ", i);
  172. else if (i == port)
  173. len += sprintf(buf + len, "(%d) ", i);
  174. }
  175. val->value.s = buf;
  176. return 0;
  177. }
  178. static int
  179. mvsw61xx_get_port_qmode(struct switch_dev *dev,
  180. const struct switch_attr *attr, struct switch_val *val)
  181. {
  182. struct mvsw61xx_state *state = get_state(dev);
  183. val->value.i = state->ports[val->port_vlan].qmode;
  184. return 0;
  185. }
  186. static int
  187. mvsw61xx_set_port_qmode(struct switch_dev *dev,
  188. const struct switch_attr *attr, struct switch_val *val)
  189. {
  190. struct mvsw61xx_state *state = get_state(dev);
  191. state->ports[val->port_vlan].qmode = val->value.i;
  192. return 0;
  193. }
  194. static int
  195. mvsw61xx_get_port_pvid(struct switch_dev *dev, int port, int *val)
  196. {
  197. struct mvsw61xx_state *state = get_state(dev);
  198. *val = state->ports[port].pvid;
  199. return 0;
  200. }
  201. static int
  202. mvsw61xx_set_port_pvid(struct switch_dev *dev, int port, int val)
  203. {
  204. struct mvsw61xx_state *state = get_state(dev);
  205. if (val < 0 || val >= MV_VLANS)
  206. return -EINVAL;
  207. state->ports[port].pvid = (u16)val;
  208. return 0;
  209. }
  210. static int
  211. mvsw61xx_get_port_link(struct switch_dev *dev, int port,
  212. struct switch_port_link *link)
  213. {
  214. u16 status, speed;
  215. status = sr16(dev, MV_PORTREG(STATUS, port));
  216. link->link = status & MV_PORT_STATUS_LINK;
  217. if (!link->link)
  218. return 0;
  219. link->duplex = status & MV_PORT_STATUS_FDX;
  220. speed = (status & MV_PORT_STATUS_SPEED_MASK) >>
  221. MV_PORT_STATUS_SPEED_SHIFT;
  222. switch (speed) {
  223. case MV_PORT_STATUS_SPEED_10:
  224. link->speed = SWITCH_PORT_SPEED_10;
  225. break;
  226. case MV_PORT_STATUS_SPEED_100:
  227. link->speed = SWITCH_PORT_SPEED_100;
  228. break;
  229. case MV_PORT_STATUS_SPEED_1000:
  230. link->speed = SWITCH_PORT_SPEED_1000;
  231. break;
  232. }
  233. return 0;
  234. }
  235. static int mvsw61xx_get_vlan_ports(struct switch_dev *dev,
  236. struct switch_val *val)
  237. {
  238. struct mvsw61xx_state *state = get_state(dev);
  239. int i, j, mode, vno;
  240. vno = val->port_vlan;
  241. if (vno <= 0 || vno >= dev->vlans)
  242. return -EINVAL;
  243. for (i = 0, j = 0; i < dev->ports; i++) {
  244. if (state->vlans[vno].mask & (1 << i)) {
  245. val->value.ports[j].id = i;
  246. mode = (state->vlans[vno].port_mode >> (i * 4)) & 0xf;
  247. if (mode == MV_VTUCTL_EGRESS_TAGGED)
  248. val->value.ports[j].flags =
  249. (1 << SWITCH_PORT_FLAG_TAGGED);
  250. else
  251. val->value.ports[j].flags = 0;
  252. j++;
  253. }
  254. }
  255. val->len = j;
  256. return 0;
  257. }
  258. static int mvsw61xx_set_vlan_ports(struct switch_dev *dev,
  259. struct switch_val *val)
  260. {
  261. struct mvsw61xx_state *state = get_state(dev);
  262. int i, mode, pno, vno;
  263. vno = val->port_vlan;
  264. if (vno <= 0 || vno >= dev->vlans)
  265. return -EINVAL;
  266. state->vlans[vno].mask = 0;
  267. state->vlans[vno].port_mode = 0;
  268. state->vlans[vno].port_sstate = 0;
  269. if(state->vlans[vno].vid == 0)
  270. state->vlans[vno].vid = vno;
  271. for (i = 0; i < val->len; i++) {
  272. pno = val->value.ports[i].id;
  273. state->vlans[vno].mask |= (1 << pno);
  274. if (val->value.ports[i].flags &
  275. (1 << SWITCH_PORT_FLAG_TAGGED))
  276. mode = MV_VTUCTL_EGRESS_TAGGED;
  277. else
  278. mode = MV_VTUCTL_EGRESS_UNTAGGED;
  279. state->vlans[vno].port_mode |= mode << (pno * 4);
  280. state->vlans[vno].port_sstate |=
  281. MV_STUCTL_STATE_FORWARDING << (pno * 4 + 2);
  282. }
  283. /*
  284. * DISCARD is nonzero, so it must be explicitly
  285. * set on ports not in the VLAN.
  286. */
  287. for (i = 0; i < dev->ports; i++)
  288. if (!(state->vlans[vno].mask & (1 << i)))
  289. state->vlans[vno].port_mode |=
  290. MV_VTUCTL_DISCARD << (i * 4);
  291. return 0;
  292. }
  293. static int mvsw61xx_get_vlan_port_based(struct switch_dev *dev,
  294. const struct switch_attr *attr, struct switch_val *val)
  295. {
  296. struct mvsw61xx_state *state = get_state(dev);
  297. int vno = val->port_vlan;
  298. if (vno <= 0 || vno >= dev->vlans)
  299. return -EINVAL;
  300. if (state->vlans[vno].port_based)
  301. val->value.i = 1;
  302. else
  303. val->value.i = 0;
  304. return 0;
  305. }
  306. static int mvsw61xx_set_vlan_port_based(struct switch_dev *dev,
  307. const struct switch_attr *attr, struct switch_val *val)
  308. {
  309. struct mvsw61xx_state *state = get_state(dev);
  310. int vno = val->port_vlan;
  311. if (vno <= 0 || vno >= dev->vlans)
  312. return -EINVAL;
  313. if (val->value.i == 1)
  314. state->vlans[vno].port_based = true;
  315. else
  316. state->vlans[vno].port_based = false;
  317. return 0;
  318. }
  319. static int mvsw61xx_get_vid(struct switch_dev *dev,
  320. const struct switch_attr *attr, struct switch_val *val)
  321. {
  322. struct mvsw61xx_state *state = get_state(dev);
  323. int vno = val->port_vlan;
  324. if (vno <= 0 || vno >= dev->vlans)
  325. return -EINVAL;
  326. val->value.i = state->vlans[vno].vid;
  327. return 0;
  328. }
  329. static int mvsw61xx_set_vid(struct switch_dev *dev,
  330. const struct switch_attr *attr, struct switch_val *val)
  331. {
  332. struct mvsw61xx_state *state = get_state(dev);
  333. int vno = val->port_vlan;
  334. if (vno <= 0 || vno >= dev->vlans)
  335. return -EINVAL;
  336. state->vlans[vno].vid = val->value.i;
  337. return 0;
  338. }
  339. static int mvsw61xx_get_enable_vlan(struct switch_dev *dev,
  340. const struct switch_attr *attr, struct switch_val *val)
  341. {
  342. struct mvsw61xx_state *state = get_state(dev);
  343. val->value.i = state->vlan_enabled;
  344. return 0;
  345. }
  346. static int mvsw61xx_set_enable_vlan(struct switch_dev *dev,
  347. const struct switch_attr *attr, struct switch_val *val)
  348. {
  349. struct mvsw61xx_state *state = get_state(dev);
  350. state->vlan_enabled = val->value.i;
  351. return 0;
  352. }
  353. static int mvsw61xx_get_mirror_rx_enable(struct switch_dev *dev,
  354. const struct switch_attr *attr, struct switch_val *val)
  355. {
  356. struct mvsw61xx_state *state = get_state(dev);
  357. val->value.i = state->mirror_rx;
  358. return 0;
  359. }
  360. static int mvsw61xx_set_mirror_rx_enable(struct switch_dev *dev,
  361. const struct switch_attr *attr, struct switch_val *val)
  362. {
  363. struct mvsw61xx_state *state = get_state(dev);
  364. state->mirror_rx = val->value.i;
  365. return 0;
  366. }
  367. static int mvsw61xx_get_mirror_tx_enable(struct switch_dev *dev,
  368. const struct switch_attr *attr, struct switch_val *val)
  369. {
  370. struct mvsw61xx_state *state = get_state(dev);
  371. val->value.i = state->mirror_tx;
  372. return 0;
  373. }
  374. static int mvsw61xx_set_mirror_tx_enable(struct switch_dev *dev,
  375. const struct switch_attr *attr, struct switch_val *val)
  376. {
  377. struct mvsw61xx_state *state = get_state(dev);
  378. state->mirror_tx = val->value.i;
  379. return 0;
  380. }
  381. static int mvsw61xx_get_mirror_monitor_port(struct switch_dev *dev,
  382. const struct switch_attr *attr, struct switch_val *val)
  383. {
  384. struct mvsw61xx_state *state = get_state(dev);
  385. val->value.i = state->monitor_port;
  386. return 0;
  387. }
  388. static int mvsw61xx_set_mirror_monitor_port(struct switch_dev *dev,
  389. const struct switch_attr *attr, struct switch_val *val)
  390. {
  391. struct mvsw61xx_state *state = get_state(dev);
  392. state->monitor_port = val->value.i;
  393. return 0;
  394. }
  395. static int mvsw61xx_get_mirror_source_port(struct switch_dev *dev,
  396. const struct switch_attr *attr, struct switch_val *val)
  397. {
  398. struct mvsw61xx_state *state = get_state(dev);
  399. val->value.i = state->source_port;
  400. return 0;
  401. }
  402. static int mvsw61xx_set_mirror_source_port(struct switch_dev *dev,
  403. const struct switch_attr *attr, struct switch_val *val)
  404. {
  405. struct mvsw61xx_state *state = get_state(dev);
  406. state->source_port = val->value.i;
  407. return 0;
  408. }
  409. static int mvsw61xx_vtu_program(struct switch_dev *dev)
  410. {
  411. struct mvsw61xx_state *state = get_state(dev);
  412. u16 v1, v2, s1, s2;
  413. int i;
  414. /* Flush */
  415. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  416. MV_VTUOP_INPROGRESS, 0);
  417. sw16(dev, MV_GLOBALREG(VTU_OP),
  418. MV_VTUOP_INPROGRESS | MV_VTUOP_PURGE);
  419. /* Write VLAN table */
  420. for (i = 1; i < dev->vlans; i++) {
  421. if (state->vlans[i].mask == 0 ||
  422. state->vlans[i].vid == 0 ||
  423. state->vlans[i].port_based == true)
  424. continue;
  425. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  426. MV_VTUOP_INPROGRESS, 0);
  427. /* Write per-VLAN port state into STU */
  428. s1 = (u16) (state->vlans[i].port_sstate & 0xffff);
  429. s2 = (u16) ((state->vlans[i].port_sstate >> 16) & 0xffff);
  430. sw16(dev, MV_GLOBALREG(VTU_VID), MV_VTU_VID_VALID);
  431. sw16(dev, MV_GLOBALREG(VTU_SID), i);
  432. sw16(dev, MV_GLOBALREG(VTU_DATA1), s1);
  433. sw16(dev, MV_GLOBALREG(VTU_DATA2), s2);
  434. sw16(dev, MV_GLOBALREG(VTU_DATA3), 0);
  435. sw16(dev, MV_GLOBALREG(VTU_OP),
  436. MV_VTUOP_INPROGRESS | MV_VTUOP_STULOAD);
  437. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  438. MV_VTUOP_INPROGRESS, 0);
  439. /* Write VLAN information into VTU */
  440. v1 = (u16) (state->vlans[i].port_mode & 0xffff);
  441. v2 = (u16) ((state->vlans[i].port_mode >> 16) & 0xffff);
  442. sw16(dev, MV_GLOBALREG(VTU_VID),
  443. MV_VTU_VID_VALID | state->vlans[i].vid);
  444. sw16(dev, MV_GLOBALREG(VTU_SID), i);
  445. sw16(dev, MV_GLOBALREG(VTU_FID), i);
  446. sw16(dev, MV_GLOBALREG(VTU_DATA1), v1);
  447. sw16(dev, MV_GLOBALREG(VTU_DATA2), v2);
  448. sw16(dev, MV_GLOBALREG(VTU_DATA3), 0);
  449. sw16(dev, MV_GLOBALREG(VTU_OP),
  450. MV_VTUOP_INPROGRESS | MV_VTUOP_LOAD);
  451. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  452. MV_VTUOP_INPROGRESS, 0);
  453. }
  454. return 0;
  455. }
  456. static void mvsw61xx_vlan_port_config(struct switch_dev *dev, int vno)
  457. {
  458. struct mvsw61xx_state *state = get_state(dev);
  459. int i, mode;
  460. for (i = 0; i < dev->ports; i++) {
  461. if (!(state->vlans[vno].mask & (1 << i)))
  462. continue;
  463. mode = (state->vlans[vno].port_mode >> (i * 4)) & 0xf;
  464. if(mode != MV_VTUCTL_EGRESS_TAGGED)
  465. state->ports[i].pvid = state->vlans[vno].vid;
  466. if (state->vlans[vno].port_based) {
  467. state->ports[i].mask |= state->vlans[vno].mask;
  468. state->ports[i].fdb = vno;
  469. }
  470. else
  471. state->ports[i].qmode = MV_8021Q_MODE_SECURE;
  472. }
  473. }
  474. static int mvsw61xx_update_state(struct switch_dev *dev)
  475. {
  476. struct mvsw61xx_state *state = get_state(dev);
  477. int i;
  478. u16 reg;
  479. if (!state->registered)
  480. return -EINVAL;
  481. /*
  482. * Set 802.1q-only mode if vlan_enabled is true.
  483. *
  484. * Without this, even if 802.1q is enabled for
  485. * a port/VLAN, it still depends on the port-based
  486. * VLAN mask being set.
  487. *
  488. * With this setting, port-based VLANs are still
  489. * functional, provided the VID is not in the VTU.
  490. */
  491. reg = sr16(dev, MV_GLOBAL2REG(SDET_POLARITY));
  492. if (state->vlan_enabled)
  493. reg |= MV_8021Q_VLAN_ONLY;
  494. else
  495. reg &= ~MV_8021Q_VLAN_ONLY;
  496. sw16(dev, MV_GLOBAL2REG(SDET_POLARITY), reg);
  497. /*
  498. * Set port-based VLAN masks on each port
  499. * based only on VLAN definitions known to
  500. * the driver (i.e. in state).
  501. *
  502. * This means any pre-existing port mapping is
  503. * wiped out once our driver is initialized.
  504. */
  505. for (i = 0; i < dev->ports; i++) {
  506. state->ports[i].mask = 0;
  507. state->ports[i].qmode = MV_8021Q_MODE_DISABLE;
  508. }
  509. for (i = 0; i < dev->vlans; i++)
  510. mvsw61xx_vlan_port_config(dev, i);
  511. for (i = 0; i < dev->ports; i++) {
  512. reg = sr16(dev, MV_PORTREG(VLANID, i)) & ~MV_PVID_MASK;
  513. reg |= state->ports[i].pvid;
  514. sw16(dev, MV_PORTREG(VLANID, i), reg);
  515. state->ports[i].mask &= ~(1 << i);
  516. /* set default forwarding DB number and port mask */
  517. reg = sr16(dev, MV_PORTREG(CONTROL1, i)) & ~MV_FDB_HI_MASK;
  518. reg |= (state->ports[i].fdb >> MV_FDB_HI_SHIFT) &
  519. MV_FDB_HI_MASK;
  520. sw16(dev, MV_PORTREG(CONTROL1, i), reg);
  521. reg = ((state->ports[i].fdb & 0xf) << MV_FDB_LO_SHIFT) |
  522. state->ports[i].mask;
  523. sw16(dev, MV_PORTREG(VLANMAP, i), reg);
  524. reg = sr16(dev, MV_PORTREG(CONTROL2, i)) &
  525. ~MV_8021Q_MODE_MASK;
  526. reg |= state->ports[i].qmode << MV_8021Q_MODE_SHIFT;
  527. sw16(dev, MV_PORTREG(CONTROL2, i), reg);
  528. }
  529. mvsw61xx_vtu_program(dev);
  530. /* port mirroring */
  531. /* reset all mirror registers */
  532. for (i = 0; i < dev->ports; i++) {
  533. reg = sr16(dev, MV_PORTREG(CONTROL2, i));
  534. reg &= ~(MV_MIRROR_RX_SRC_MASK | MV_MIRROR_TX_SRC_MASK);
  535. sw16(dev, MV_PORTREG(CONTROL2, i), reg);
  536. }
  537. reg = sr16(dev, MV_GLOBALREG(MONITOR_CTRL));
  538. reg |= MV_MIRROR_RX_DEST_MASK | MV_MIRROR_TX_DEST_MASK;
  539. sw16(dev, MV_GLOBALREG(MONITOR_CTRL), reg);
  540. /* now enable mirroring if necessary */
  541. if (state->mirror_rx) {
  542. /* set ingress monitor source */
  543. reg = sr16(dev, MV_PORTREG(CONTROL2, state->source_port)) & ~MV_MIRROR_RX_SRC_MASK;
  544. reg |= state->mirror_rx << MV_MIRROR_RX_SRC_SHIFT;
  545. sw16(dev, MV_PORTREG(CONTROL2, state->source_port), reg);
  546. /* set ingress monitor destination */
  547. reg = sr16(dev, MV_GLOBALREG(MONITOR_CTRL)) & ~MV_MIRROR_RX_DEST_MASK;
  548. reg |= state->monitor_port << MV_MIRROR_RX_DEST_SHIFT;
  549. sw16(dev, MV_GLOBALREG(MONITOR_CTRL), reg);
  550. }
  551. if (state->mirror_tx) {
  552. /* set egress monitor source */
  553. reg = sr16(dev, MV_PORTREG(CONTROL2, state->source_port)) & ~MV_MIRROR_TX_SRC_MASK;
  554. reg |= state->mirror_tx << MV_MIRROR_TX_SRC_SHIFT;
  555. sw16(dev, MV_PORTREG(CONTROL2, state->source_port), reg);
  556. /* set egress monitor destination */
  557. reg = sr16(dev, MV_GLOBALREG(MONITOR_CTRL)) & ~MV_MIRROR_TX_DEST_MASK;
  558. reg |= state->monitor_port << MV_MIRROR_TX_DEST_SHIFT;
  559. sw16(dev, MV_GLOBALREG(MONITOR_CTRL), reg);
  560. }
  561. return 0;
  562. }
  563. static int mvsw61xx_apply(struct switch_dev *dev)
  564. {
  565. return mvsw61xx_update_state(dev);
  566. }
  567. static void mvsw61xx_enable_serdes(struct switch_dev *dev)
  568. {
  569. int bmcr = mvsw61xx_mdio_page_read(dev, MV_REG_FIBER_SERDES,
  570. MV_PAGE_FIBER_SERDES, MII_BMCR);
  571. if (bmcr < 0)
  572. return;
  573. if (bmcr & BMCR_PDOWN)
  574. mvsw61xx_mdio_page_write(dev, MV_REG_FIBER_SERDES,
  575. MV_PAGE_FIBER_SERDES, MII_BMCR,
  576. bmcr & ~BMCR_PDOWN);
  577. }
  578. static int _mvsw61xx_reset(struct switch_dev *dev, bool full)
  579. {
  580. struct mvsw61xx_state *state = get_state(dev);
  581. int i;
  582. u16 reg;
  583. /* Disable all ports before reset */
  584. for (i = 0; i < dev->ports; i++) {
  585. reg = sr16(dev, MV_PORTREG(CONTROL, i)) &
  586. ~MV_PORTCTRL_FORWARDING;
  587. sw16(dev, MV_PORTREG(CONTROL, i), reg);
  588. }
  589. reg = sr16(dev, MV_GLOBALREG(CONTROL)) | MV_CONTROL_RESET;
  590. sw16(dev, MV_GLOBALREG(CONTROL), reg);
  591. if (mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(CONTROL),
  592. MV_CONTROL_RESET, 0) < 0)
  593. return -ETIMEDOUT;
  594. for (i = 0; i < dev->ports; i++) {
  595. state->ports[i].fdb = 0;
  596. state->ports[i].qmode = 0;
  597. state->ports[i].mask = 0;
  598. state->ports[i].pvid = 0;
  599. /* Force flow control off */
  600. reg = sr16(dev, MV_PORTREG(PHYCTL, i)) & ~MV_PHYCTL_FC_MASK;
  601. reg |= MV_PHYCTL_FC_DISABLE;
  602. sw16(dev, MV_PORTREG(PHYCTL, i), reg);
  603. /* Set port association vector */
  604. sw16(dev, MV_PORTREG(ASSOC, i), (1 << i));
  605. /* power up phys */
  606. if (full && i < 5) {
  607. mvsw61xx_mdio_write(dev, i, MII_MV_SPEC_CTRL,
  608. MV_SPEC_MDI_CROSS_AUTO |
  609. MV_SPEC_ENERGY_DETECT |
  610. MV_SPEC_DOWNSHIFT_COUNTER);
  611. mvsw61xx_mdio_write(dev, i, MII_BMCR, BMCR_RESET |
  612. BMCR_ANENABLE | BMCR_FULLDPLX |
  613. BMCR_SPEED1000);
  614. }
  615. /* enable SerDes if necessary */
  616. if (full && i >= 5 && state->model == MV_IDENT_VALUE_6176) {
  617. u16 sts = sr16(dev, MV_PORTREG(STATUS, i));
  618. u16 mode = sts & MV_PORT_STATUS_CMODE_MASK;
  619. if (mode == MV_PORT_STATUS_CMODE_100BASE_X ||
  620. mode == MV_PORT_STATUS_CMODE_1000BASE_X ||
  621. mode == MV_PORT_STATUS_CMODE_SGMII) {
  622. mvsw61xx_enable_serdes(dev);
  623. }
  624. }
  625. }
  626. for (i = 0; i < dev->vlans; i++) {
  627. state->vlans[i].port_based = false;
  628. state->vlans[i].mask = 0;
  629. state->vlans[i].vid = 0;
  630. state->vlans[i].port_mode = 0;
  631. state->vlans[i].port_sstate = 0;
  632. }
  633. state->vlan_enabled = 0;
  634. state->mirror_rx = false;
  635. state->mirror_tx = false;
  636. state->source_port = 0;
  637. state->monitor_port = 0;
  638. mvsw61xx_update_state(dev);
  639. /* Re-enable ports */
  640. for (i = 0; i < dev->ports; i++) {
  641. reg = sr16(dev, MV_PORTREG(CONTROL, i)) |
  642. MV_PORTCTRL_FORWARDING;
  643. sw16(dev, MV_PORTREG(CONTROL, i), reg);
  644. }
  645. return 0;
  646. }
  647. static int mvsw61xx_reset(struct switch_dev *dev)
  648. {
  649. return _mvsw61xx_reset(dev, false);
  650. }
  651. enum {
  652. MVSW61XX_VLAN_PORT_BASED,
  653. MVSW61XX_VLAN_ID,
  654. };
  655. enum {
  656. MVSW61XX_PORT_MASK,
  657. MVSW61XX_PORT_QMODE,
  658. };
  659. static const struct switch_attr mvsw61xx_global[] = {
  660. {
  661. .type = SWITCH_TYPE_INT,
  662. .name = "enable_vlan",
  663. .description = "Enable 802.1q VLAN support",
  664. .get = mvsw61xx_get_enable_vlan,
  665. .set = mvsw61xx_set_enable_vlan,
  666. },
  667. {
  668. .type = SWITCH_TYPE_INT,
  669. .name = "enable_mirror_rx",
  670. .description = "Enable mirroring of RX packets",
  671. .set = mvsw61xx_set_mirror_rx_enable,
  672. .get = mvsw61xx_get_mirror_rx_enable,
  673. .max = 1
  674. },
  675. {
  676. .type = SWITCH_TYPE_INT,
  677. .name = "enable_mirror_tx",
  678. .description = "Enable mirroring of TX packets",
  679. .set = mvsw61xx_set_mirror_tx_enable,
  680. .get = mvsw61xx_get_mirror_tx_enable,
  681. .max = 1
  682. },
  683. {
  684. .type = SWITCH_TYPE_INT,
  685. .name = "mirror_monitor_port",
  686. .description = "Mirror monitor port",
  687. .set = mvsw61xx_set_mirror_monitor_port,
  688. .get = mvsw61xx_get_mirror_monitor_port,
  689. .max = MV_PORTS - 1
  690. },
  691. {
  692. .type = SWITCH_TYPE_INT,
  693. .name = "mirror_source_port",
  694. .description = "Mirror source port",
  695. .set = mvsw61xx_set_mirror_source_port,
  696. .get = mvsw61xx_get_mirror_source_port,
  697. .max = MV_PORTS - 1
  698. },
  699. };
  700. static const struct switch_attr mvsw61xx_vlan[] = {
  701. [MVSW61XX_VLAN_PORT_BASED] = {
  702. .id = MVSW61XX_VLAN_PORT_BASED,
  703. .type = SWITCH_TYPE_INT,
  704. .name = "port_based",
  705. .description = "Use port-based (non-802.1q) VLAN only",
  706. .get = mvsw61xx_get_vlan_port_based,
  707. .set = mvsw61xx_set_vlan_port_based,
  708. },
  709. [MVSW61XX_VLAN_ID] = {
  710. .id = MVSW61XX_VLAN_ID,
  711. .type = SWITCH_TYPE_INT,
  712. .name = "vid",
  713. .description = "Get/set VLAN ID",
  714. .get = mvsw61xx_get_vid,
  715. .set = mvsw61xx_set_vid,
  716. },
  717. };
  718. static const struct switch_attr mvsw61xx_port[] = {
  719. [MVSW61XX_PORT_MASK] = {
  720. .id = MVSW61XX_PORT_MASK,
  721. .type = SWITCH_TYPE_STRING,
  722. .description = "Port-based VLAN mask",
  723. .name = "mask",
  724. .get = mvsw61xx_get_port_mask,
  725. .set = NULL,
  726. },
  727. [MVSW61XX_PORT_QMODE] = {
  728. .id = MVSW61XX_PORT_QMODE,
  729. .type = SWITCH_TYPE_INT,
  730. .description = "802.1q mode: 0=off/1=fallback/2=check/3=secure",
  731. .name = "qmode",
  732. .get = mvsw61xx_get_port_qmode,
  733. .set = mvsw61xx_set_port_qmode,
  734. },
  735. };
  736. static const struct switch_dev_ops mvsw61xx_ops = {
  737. .attr_global = {
  738. .attr = mvsw61xx_global,
  739. .n_attr = ARRAY_SIZE(mvsw61xx_global),
  740. },
  741. .attr_vlan = {
  742. .attr = mvsw61xx_vlan,
  743. .n_attr = ARRAY_SIZE(mvsw61xx_vlan),
  744. },
  745. .attr_port = {
  746. .attr = mvsw61xx_port,
  747. .n_attr = ARRAY_SIZE(mvsw61xx_port),
  748. },
  749. .get_port_link = mvsw61xx_get_port_link,
  750. .get_port_pvid = mvsw61xx_get_port_pvid,
  751. .set_port_pvid = mvsw61xx_set_port_pvid,
  752. .get_vlan_ports = mvsw61xx_get_vlan_ports,
  753. .set_vlan_ports = mvsw61xx_set_vlan_ports,
  754. .apply_config = mvsw61xx_apply,
  755. .reset_switch = mvsw61xx_reset,
  756. };
  757. /* end swconfig stuff */
  758. static int mvsw61xx_probe(struct platform_device *pdev)
  759. {
  760. struct mvsw61xx_state *state;
  761. struct device_node *np = pdev->dev.of_node;
  762. struct device_node *mdio;
  763. char *model_str;
  764. u32 val;
  765. int err;
  766. state = kzalloc(sizeof(*state), GFP_KERNEL);
  767. if (!state)
  768. return -ENOMEM;
  769. mdio = of_parse_phandle(np, "mii-bus", 0);
  770. if (!mdio) {
  771. dev_err(&pdev->dev, "Couldn't get MII bus handle\n");
  772. err = -ENODEV;
  773. goto out_err;
  774. }
  775. state->bus = of_mdio_find_bus(mdio);
  776. if (!state->bus) {
  777. dev_err(&pdev->dev, "Couldn't find MII bus from handle\n");
  778. err = -ENODEV;
  779. goto out_err;
  780. }
  781. state->is_indirect = of_property_read_bool(np, "is-indirect");
  782. if (state->is_indirect) {
  783. if (of_property_read_u32(np, "reg", &val)) {
  784. dev_err(&pdev->dev, "Switch address not specified\n");
  785. err = -ENODEV;
  786. goto out_err;
  787. }
  788. state->base_addr = val;
  789. } else {
  790. state->base_addr = MV_BASE;
  791. }
  792. state->model = r16(state->bus, state->is_indirect, state->base_addr,
  793. MV_PORTREG(IDENT, 0)) & MV_IDENT_MASK;
  794. switch(state->model) {
  795. case MV_IDENT_VALUE_6171:
  796. model_str = MV_IDENT_STR_6171;
  797. break;
  798. case MV_IDENT_VALUE_6172:
  799. model_str = MV_IDENT_STR_6172;
  800. break;
  801. case MV_IDENT_VALUE_6176:
  802. model_str = MV_IDENT_STR_6176;
  803. break;
  804. case MV_IDENT_VALUE_6352:
  805. model_str = MV_IDENT_STR_6352;
  806. break;
  807. default:
  808. dev_err(&pdev->dev, "No compatible switch found at 0x%02x\n",
  809. state->base_addr);
  810. err = -ENODEV;
  811. goto out_err;
  812. }
  813. platform_set_drvdata(pdev, state);
  814. dev_info(&pdev->dev, "Found %s at %s:%02x\n", model_str,
  815. state->bus->id, state->base_addr);
  816. dev_info(&pdev->dev, "Using %sdirect addressing\n",
  817. (state->is_indirect ? "in" : ""));
  818. if (of_property_read_u32(np, "cpu-port-0", &val)) {
  819. dev_err(&pdev->dev, "CPU port not set\n");
  820. err = -ENODEV;
  821. goto out_err;
  822. }
  823. state->cpu_port0 = val;
  824. if (!of_property_read_u32(np, "cpu-port-1", &val))
  825. state->cpu_port1 = val;
  826. else
  827. state->cpu_port1 = -1;
  828. state->dev.vlans = MV_VLANS;
  829. state->dev.cpu_port = state->cpu_port0;
  830. state->dev.ports = MV_PORTS;
  831. state->dev.name = model_str;
  832. state->dev.ops = &mvsw61xx_ops;
  833. state->dev.alias = dev_name(&pdev->dev);
  834. _mvsw61xx_reset(&state->dev, true);
  835. err = register_switch(&state->dev, NULL);
  836. if (err < 0)
  837. goto out_err;
  838. state->registered = true;
  839. return 0;
  840. out_err:
  841. kfree(state);
  842. return err;
  843. }
  844. static int
  845. mvsw61xx_remove(struct platform_device *pdev)
  846. {
  847. struct mvsw61xx_state *state = platform_get_drvdata(pdev);
  848. if (state->registered)
  849. unregister_switch(&state->dev);
  850. kfree(state);
  851. return 0;
  852. }
  853. static const struct of_device_id mvsw61xx_match[] = {
  854. { .compatible = "marvell,88e6171" },
  855. { .compatible = "marvell,88e6172" },
  856. { .compatible = "marvell,88e6176" },
  857. { .compatible = "marvell,88e6352" },
  858. { }
  859. };
  860. MODULE_DEVICE_TABLE(of, mvsw61xx_match);
  861. static struct platform_driver mvsw61xx_driver = {
  862. .probe = mvsw61xx_probe,
  863. .remove = mvsw61xx_remove,
  864. .driver = {
  865. .name = "mvsw61xx",
  866. .of_match_table = of_match_ptr(mvsw61xx_match),
  867. .owner = THIS_MODULE,
  868. },
  869. };
  870. static int __init mvsw61xx_module_init(void)
  871. {
  872. return platform_driver_register(&mvsw61xx_driver);
  873. }
  874. late_initcall(mvsw61xx_module_init);
  875. static void __exit mvsw61xx_module_exit(void)
  876. {
  877. platform_driver_unregister(&mvsw61xx_driver);
  878. }
  879. module_exit(mvsw61xx_module_exit);