ether1116.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747
  1. /*
  2. * marvell kirkwood gigabit ethernet (88e1116 and 88e1121) driver
  3. * (as found in the sheevaplug, openrd and guruplug).
  4. * the main difference is the flavour of phy kludgery necessary.
  5. *
  6. * from /public/doc/marvell/88f61xx.kirkwood.pdf,
  7. * /public/doc/marvell/88e1116.pdf, and
  8. * /public/doc/marvell/88e1121r.pdf.
  9. */
  10. #include "u.h"
  11. #include "../port/lib.h"
  12. #include "mem.h"
  13. #include "dat.h"
  14. #include "fns.h"
  15. #include "io.h"
  16. #include "../port/error.h"
  17. #include "../port/netif.h"
  18. #include "etherif.h"
  19. #include "ethermii.h"
  20. #include "../ip/ip.h"
  21. #define MIIDBG if(0)iprint
  22. #define WINATTR(v) (((v) & MASK(8)) << 8)
  23. #define WINSIZE(v) (((v)/(64*1024) - 1) << 16)
  24. enum {
  25. Nrx = 512,
  26. Ntx = 32,
  27. Nrxblks = 1024,
  28. Rxblklen = 2+1522, /* ifc. supplies first 2 bytes as padding */
  29. Maxrxintrsec = 20*1000, /* max. rx intrs. / sec */
  30. Etherstuck = 70, /* must send or receive a packet in this many sec.s */
  31. Descralign = 16,
  32. Bufalign = 8,
  33. Pass = 1, /* accept packets */
  34. Qno = 0, /* do everything on queue zero */
  35. };
  36. typedef struct Ctlr Ctlr;
  37. typedef struct Gbereg Gbereg;
  38. typedef struct Mibstats Mibstats;
  39. typedef struct Rx Rx;
  40. typedef struct Tx Tx;
  41. static struct {
  42. Lock;
  43. Block *head;
  44. } freeblocks;
  45. /* hardware receive buffer descriptor */
  46. struct Rx {
  47. ulong cs;
  48. ulong countsize; /* bytes, buffer size */
  49. ulong buf; /* phys. addr. of packet buffer */
  50. ulong next; /* phys. addr. of next Rx */
  51. };
  52. /* hardware transmit buffer descriptor */
  53. struct Tx {
  54. ulong cs;
  55. ulong countchk; /* bytes, checksum */
  56. ulong buf; /* phys. addr. of packet buffer */
  57. ulong next; /* phys. addr. of next Tx */
  58. };
  59. /* fixed by hw; part of Gberegs */
  60. struct Mibstats {
  61. union {
  62. uvlong rxby; /* good bytes rcv'd */
  63. struct {
  64. ulong rxbylo;
  65. ulong rxbyhi;
  66. };
  67. };
  68. ulong badrxby; /* bad bytes rcv'd */
  69. ulong mactxerr; /* tx err pkts */
  70. ulong rxpkt; /* good pkts rcv'd */
  71. ulong badrxpkt; /* bad pkts rcv'd */
  72. ulong rxbcastpkt; /* b'cast pkts rcv'd */
  73. ulong rxmcastpkt; /* m'cast pkts rcv'd */
  74. ulong rx64; /* pkts <= 64 bytes */
  75. ulong rx65_127; /* pkts 65—127 bytes */
  76. ulong rx128_255; /* pkts 128—255 bytes */
  77. ulong rx256_511; /* pkts 256—511 bytes */
  78. ulong rx512_1023; /* pkts 512—1023 bytes */
  79. ulong rx1024_max; /* pkts >= 1024 bytes */
  80. union {
  81. uvlong txby; /* good bytes sent */
  82. struct {
  83. ulong txbylo;
  84. ulong txbyhi;
  85. };
  86. };
  87. ulong txpkt; /* good pkts sent */
  88. /* half-duplex: pkts dropped due to excessive collisions */
  89. ulong txcollpktdrop;
  90. ulong txmcastpkt; /* m'cast pkts sent */
  91. ulong txbcastpkt; /* b'cast pkts sent */
  92. ulong badmacctlpkts; /* bad mac ctl pkts */
  93. ulong txflctl; /* flow-control pkts sent */
  94. ulong rxflctl; /* good flow-control pkts rcv'd */
  95. ulong badrxflctl; /* bad flow-control pkts rcv'd */
  96. ulong rxundersized; /* runts */
  97. ulong rxfrags; /* fragments rcv'd */
  98. ulong rxtoobig; /* oversized pkts rcv'd */
  99. ulong rxjabber; /* jabber pkts rcv'd */
  100. ulong rxerr; /* rx error events */
  101. ulong crcerr; /* crc error events */
  102. ulong collisions; /* collision events */
  103. ulong latecoll; /* late collisions */
  104. };
  105. struct Ctlr {
  106. Lock;
  107. Ether *ether;
  108. Gbereg *reg;
  109. Lock initlock;
  110. int init;
  111. Rx *rx; /* receive descriptors */
  112. Block *rxb[Nrx]; /* blocks belonging to the descriptors */
  113. int rxhead; /* descr ethernet will write to next */
  114. int rxtail; /* next descr that might need a buffer */
  115. Rendez rrendez; /* interrupt wakes up read process */
  116. int haveinput;
  117. Tx *tx;
  118. Block *txb[Ntx];
  119. int txhead; /* next descr we can use for new packet */
  120. int txtail; /* next descr to reclaim on tx complete */
  121. Mii *mii;
  122. int port;
  123. /* stats */
  124. ulong intrs;
  125. ulong newintrs;
  126. ulong txunderrun;
  127. ulong txringfull;
  128. ulong rxdiscard;
  129. ulong rxoverrun;
  130. ulong nofirstlast;
  131. Mibstats;
  132. };
  133. #define Rxqon(q) (1<<(q))
  134. #define Txqon(q) (1<<(q))
  135. enum {
  136. /* euc bits */
  137. Portreset = 1 << 20,
  138. /* sdma config, sdc bits */
  139. Burst1 = 0,
  140. Burst2,
  141. Burst4,
  142. Burst8,
  143. Burst16,
  144. SDCrifb = 1<<0, /* rx intr on pkt boundaries */
  145. #define SDCrxburst(v) ((v)<<1)
  146. SDCrxnobyteswap = 1<<4,
  147. SDCtxnobyteswap = 1<<5,
  148. SDCswap64byte = 1<<6,
  149. #define SDCtxburst(v) ((v)<<22)
  150. /* rx intr ipg (inter packet gap) */
  151. #define SDCipgintrx(v) ((((v)>>15) & 1)<<25) | (((v) & MASK(15))<<7)
  152. /* portcfg bits */
  153. PCFGupromisc = 1<<0, /* unicast promiscuous mode */
  154. #define Rxqdefault(q) ((q)<<1)
  155. #define Rxqarp(q) ((q)<<4)
  156. PCFGbcrejectnoiparp = 1<<7,
  157. PCFGbcrejectip = 1<<8,
  158. PCFGbcrejectarp = 1<<9,
  159. PCFGamnotxes = 1<<12, /* auto mode, no summary update on tx */
  160. PCFGtcpq = 1<<14, /* capture tcp frames to tcpq */
  161. PCFGudpq = 1<<15, /* capture udp frames to udpq */
  162. #define Rxqtcp(q) ((q)<<16)
  163. #define Rxqudp(q) ((q)<<19)
  164. #define Rxqbpdu(q) ((q)<<22)
  165. PCFGrxcs = 1<<25, /* rx tcp checksum mode with header */
  166. /* portcfgx bits */
  167. PCFGXspanq = 1<<1,
  168. PCFGXcrcoff = 1<<2, /* no ethernet crc */
  169. /* port serial control0, psc0 bits */
  170. PSC0porton = 1<<0,
  171. PSC0forcelinkup = 1<<1,
  172. PSC0an_dplxoff = 1<<2, /* an_ = auto. negotiate */
  173. PSC0an_flctloff = 1<<3,
  174. PSC0an_pauseadv = 1<<4,
  175. PSC0nofrclinkdown = 1<<10,
  176. PSC0an_spdoff = 1<<13,
  177. PSC0dteadv = 1<<14, /* dte advertise */
  178. /* max. input pkt size */
  179. #define PSC0mru(v) ((v)<<17)
  180. PSC0mrumask = PSC0mru(MASK(3)),
  181. PSC0mru1518 = 0, /* 1500+2* 6(addrs) +2 + 4(crc) */
  182. PSC0mru1522, /* 1518 + 4(vlan tags) */
  183. PSC0mru1552, /* `baby giant' */
  184. PSC0mru9022, /* `jumbo' */
  185. PSC0mru9192, /* bigger jumbo */
  186. PSC0mru9700, /* still bigger jumbo */
  187. PSC0fd_frc = 1<<21, /* force full duplex */
  188. PSC0flctlfrc = 1<<22,
  189. PSC0gmiispd_gbfrc = 1<<23,
  190. PSC0miispdfrc100mbps = 1<<24,
  191. /* port status 0, ps0 bits */
  192. PS0linkup = 1<<1,
  193. PS0fd = 1<<2, /* full duplex */
  194. PS0flctl = 1<<3,
  195. PS0gmii_gb = 1<<4,
  196. PS0mii100mbps = 1<<5,
  197. PS0txbusy = 1<<7,
  198. PS0txfifoempty = 1<<10,
  199. PS0rxfifo1empty = 1<<11,
  200. PS0rxfifo2empty = 1<<12,
  201. /* port serial control 1, psc1 bits */
  202. PSC1loopback = 1<<1,
  203. PSC1mii = 0<<2,
  204. PSC1rgmii = 1<<3, /* enable RGMII */
  205. PSC1portreset = 1<<4,
  206. PSC1clockbypass = 1<<5,
  207. PSC1iban = 1<<6,
  208. PSC1iban_bypass = 1<<7,
  209. PSC1iban_restart= 1<<8,
  210. PSC1_gbonly = 1<<11,
  211. PSC1encolonbp = 1<<15, /* "collision during back-pressure mib counting" */
  212. PSC1coldomlimmask= MASK(6)<<16,
  213. #define PSC1coldomlim(v) (((v) & MASK(6))<<16)
  214. PSC1miiallowoddpreamble = 1<<22,
  215. /* port status 1, ps1 bits */
  216. PS1rxpause = 1<<0,
  217. PS1txpause = 1<<1,
  218. PS1pressure = 1<<2,
  219. PS1syncfail10ms = 1<<3,
  220. PS1an_done = 1<<4,
  221. PS1inbandan_bypassed = 1<<5,
  222. PS1serdesplllocked = 1<<6,
  223. PS1syncok = 1<<7,
  224. PS1nosquelch = 1<<8,
  225. /* irq bits */
  226. /* rx buf returned to cpu ownership, or frame reception finished */
  227. Irx = 1<<0,
  228. Iextend = 1<<1, /* IEsum of irqe set */
  229. #define Irxbufferq(q) (1<<((q)+2)) /* rx buf returned to cpu ownership */
  230. Irxerr = 1<<10, /* input ring full, usually */
  231. #define Irxerrq(q) (1<<((q)+11))
  232. #define Itxendq(q) (1<<((q)+19)) /* tx dma stopped for q */
  233. Isum = 1<<31,
  234. /* irq extended, irqe bits */
  235. #define IEtxbufferq(q) (1<<((q)+0)) /* tx buf returned to cpu ownership */
  236. #define IEtxerrq(q) (1<<((q)+8))
  237. IEphystschg = 1<<16,
  238. IEptp = 1<<17,
  239. IErxoverrun = 1<<18,
  240. IEtxunderrun = 1<<19,
  241. IElinkchg = 1<<20,
  242. IEintaddrerr = 1<<23,
  243. IEprbserr = 1<<25,
  244. IEsum = 1<<31,
  245. /* tx fifo urgent threshold (tx interrupt coalescing), pxtfut */
  246. #define TFUTipginttx(v) (((v) & MASK(16))<<4);
  247. /* minimal frame size, mfs */
  248. MFS40by = 10<<2,
  249. MFS44by = 11<<2,
  250. MFS48by = 12<<2,
  251. MFS52by = 13<<2,
  252. MFS56by = 14<<2,
  253. MFS60by = 15<<2,
  254. MFS64by = 16<<2,
  255. /* receive descriptor status */
  256. RCSmacerr = 1<<0,
  257. RCSmacmask = 3<<1,
  258. RCSmacce = 0<<1,
  259. RCSmacor = 1<<1,
  260. RCSmacmf = 2<<1,
  261. RCSl4chkshift = 3,
  262. RCSl4chkmask = MASK(16),
  263. RCSvlan = 1<<17,
  264. RCSbpdu = 1<<18,
  265. RCSl4mask = 3<<21,
  266. RCSl4tcp4 = 0<<21,
  267. RCSl4udp4 = 1<<21,
  268. RCSl4other = 2<<21,
  269. RCSl4rsvd = 3<<21,
  270. RCSl2ev2 = 1<<23,
  271. RCSl3ip4 = 1<<24,
  272. RCSip4headok = 1<<25,
  273. RCSlast = 1<<26,
  274. RCSfirst = 1<<27,
  275. RCSunknownaddr = 1<<28,
  276. RCSenableintr = 1<<29,
  277. RCSl4chkok = 1<<30,
  278. RCSdmaown = 1<<31,
  279. /* transmit descriptor status */
  280. TCSmacerr = 1<<0,
  281. TCSmacmask = 3<<1,
  282. TCSmaclc = 0<<1,
  283. TCSmacur = 1<<1,
  284. TCSmacrl = 2<<1,
  285. TCSllc = 1<<9,
  286. TCSl4chkmode = 1<<10,
  287. TCSipv4hdlenshift= 11,
  288. TCSvlan = 1<<15,
  289. TCSl4type = 1<<16,
  290. TCSgl4chk = 1<<17,
  291. TCSgip4chk = 1<<18,
  292. TCSpadding = 1<<19,
  293. TCSlast = 1<<20,
  294. TCSfirst = 1<<21,
  295. TCSenableintr = 1<<23,
  296. TCSautomode = 1<<30,
  297. TCSdmaown = 1<<31,
  298. };
  299. enum {
  300. /* SMI regs */
  301. PhysmiTimeout = 10000, /* what units? in ms. */
  302. Physmidataoff = 0, /* Data */
  303. Physmidatamask = 0xffff<<Physmidataoff,
  304. Physmiaddroff = 16, /* PHY device addr */
  305. Physmiaddrmask = 0x1f << Physmiaddroff,
  306. Physmiop = 26,
  307. Physmiopmask = 3<<Physmiop,
  308. PhysmiopWr = 0<<Physmiop,
  309. PhysmiopRd = 1<<Physmiop,
  310. PhysmiReadok = 1<<27,
  311. PhysmiBusy = 1<<28,
  312. SmiRegaddroff = 21, /* PHY device register addr */
  313. SmiRegaddrmask = 0x1f << SmiRegaddroff,
  314. };
  315. struct Gbereg {
  316. ulong phy; /* PHY address */
  317. ulong smi; /* serial mgmt. interface */
  318. ulong euda; /* ether default address */
  319. ulong eudid; /* ether default id */
  320. uchar _pad0[0x80-0x10];
  321. /* dma stuff */
  322. ulong euirq; /* interrupt cause */
  323. ulong euirqmask; /* interrupt mask */
  324. uchar _pad1[0x94-0x88];
  325. ulong euea; /* error address */
  326. ulong euiae; /* internal error address */
  327. uchar _pad2[0xb0-0x9c];
  328. ulong euc; /* control */
  329. uchar _pad3[0x200-0xb4];
  330. struct {
  331. ulong base; /* window base */
  332. ulong size; /* window size */
  333. } base[6];
  334. uchar _pad4[0x280-0x230];
  335. ulong harr[4]; /* high address remap */
  336. ulong bare; /* base address enable */
  337. ulong epap; /* port access protect */
  338. uchar _pad5[0x400-0x298];
  339. ulong portcfg; /* port configuration */
  340. ulong portcfgx; /* port config. extend */
  341. ulong mii; /* mii serial parameters */
  342. ulong _pad6;
  343. ulong evlane; /* vlan ether type */
  344. ulong macal; /* mac address low */
  345. ulong macah; /* mac address high */
  346. ulong sdc; /* sdma config. */
  347. ulong dscp[7]; /* ip diff. serv. code point -> pri */
  348. ulong psc0; /* port serial control 0 */
  349. ulong vpt2p; /* vlan priority tag -> pri */
  350. ulong ps0; /* ether port status 0 */
  351. ulong tqc; /* transmit queue command */
  352. ulong psc1; /* port serial control 1 */
  353. ulong ps1; /* ether port status 1 */
  354. ulong mvhdr; /* marvell header */
  355. ulong _pad8[2];
  356. /* interrupts */
  357. ulong irq; /* interrupt cause; some rw0c bits */
  358. ulong irqe; /* " " extended; some rw0c bits */
  359. ulong irqmask; /* interrupt mask (actually enable) */
  360. ulong irqemask; /* " " extended */
  361. ulong _pad9;
  362. ulong pxtfut; /* port tx fifo urgent threshold */
  363. ulong _pad10;
  364. ulong pxmfs; /* port rx minimum frame size */
  365. ulong _pad11;
  366. /*
  367. * # of input frames discarded by addr filtering or lack of resources;
  368. * zeroed upon read.
  369. */
  370. ulong pxdfc; /* port rx discard frame counter */
  371. ulong pxofc; /* port overrun frame counter */
  372. ulong _pad12[2];
  373. ulong piae; /* port internal address error */
  374. uchar _pad13[0x4bc-0x498];
  375. ulong etherprio; /* ether type priority */
  376. uchar _pad14[0x4dc-0x4c0];
  377. ulong tqfpc; /* tx queue fixed priority config. */
  378. ulong pttbrc; /* port tx token-bucket rate config. */
  379. ulong tqc1; /* tx queue command 1 */
  380. ulong pmtu; /* port maximum transmit unit */
  381. ulong pmtbs; /* port maximum token bucket size */
  382. uchar _pad15[0x600-0x4f0];
  383. struct {
  384. ulong _pad[3];
  385. ulong r; /* phys. addr.: cur. rx desc. ptrs */
  386. } crdp[8];
  387. ulong rqc; /* rx queue command */
  388. ulong tcsdp; /* phys. addr.: cur. tx desc. ptr */
  389. uchar _pad16[0x6c0-0x688];
  390. ulong tcqdp[8]; /* phys. addr.: cur. tx q. desc. ptr */
  391. uchar _pad17[0x700-0x6e0];
  392. struct {
  393. ulong tbctr; /* queue tx token-bucket counter */
  394. ulong tbcfg; /* tx queue token-bucket config. */
  395. ulong acfg; /* tx queue arbiter config. */
  396. ulong _pad;
  397. } tq[8];
  398. ulong pttbc; /* port tx token-bucket counter */
  399. uchar _pad18[0x7a8-0x784];
  400. ulong ipg2; /* tx queue ipg */
  401. ulong _pad19[3];
  402. ulong ipg3;
  403. ulong _pad20;
  404. ulong htlp; /* high token in low packet */
  405. ulong htap; /* high token in async packet */
  406. ulong ltap; /* low token in async packet */
  407. ulong _pad21;
  408. ulong ts; /* tx speed */
  409. uchar _pad22[0x1000-0x7d4];
  410. /* mac mib counters: statistics */
  411. Mibstats;
  412. uchar _pad23[0x1400-0x1080];
  413. /* multicast filtering; each byte: Qno<<1 | Pass */
  414. ulong dfsmt[64]; /* dest addr filter special m'cast table */
  415. ulong dfomt[64]; /* dest addr filter other m'cast table */
  416. /* unicast filtering */
  417. ulong dfut[4]; /* dest addr filter unicast table */
  418. };
  419. static Ctlr *ctlrs[MaxEther];
  420. static uchar zeroea[Eaddrlen];
  421. static void getmibstats(Ctlr *);
  422. static void
  423. rxfreeb(Block *b)
  424. {
  425. /* freeb(b) will have previously decremented b->ref to 0; raise to 1 */
  426. _xinc(&b->ref);
  427. b->wp = b->rp =
  428. (uchar*)((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  429. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  430. b->free = rxfreeb;
  431. ilock(&freeblocks);
  432. b->next = freeblocks.head;
  433. freeblocks.head = b;
  434. iunlock(&freeblocks);
  435. }
  436. static Block *
  437. rxallocb(void)
  438. {
  439. Block *b;
  440. ilock(&freeblocks);
  441. b = freeblocks.head;
  442. if(b != nil) {
  443. freeblocks.head = b->next;
  444. b->next = nil;
  445. b->free = rxfreeb;
  446. }
  447. iunlock(&freeblocks);
  448. return b;
  449. }
  450. static void
  451. rxkick(Ctlr *ctlr)
  452. {
  453. Gbereg *reg = ctlr->reg;
  454. if (reg->crdp[Qno].r == 0)
  455. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  456. if ((reg->rqc & 0xff) == 0) /* all queues are stopped? */
  457. reg->rqc = Rxqon(Qno); /* restart */
  458. coherence();
  459. }
  460. static void
  461. txkick(Ctlr *ctlr)
  462. {
  463. Gbereg *reg = ctlr->reg;
  464. if (reg->tcqdp[Qno] == 0)
  465. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  466. if ((reg->tqc & 0xff) == 0) /* all q's stopped? */
  467. reg->tqc = Txqon(Qno); /* restart */
  468. coherence();
  469. }
  470. static void
  471. rxreplenish(Ctlr *ctlr)
  472. {
  473. Rx *r;
  474. Block *b;
  475. while(ctlr->rxb[ctlr->rxtail] == nil) {
  476. b = rxallocb();
  477. if(b == nil) {
  478. iprint("#l%d: rxreplenish out of buffers\n",
  479. ctlr->ether->ctlrno);
  480. break;
  481. }
  482. ctlr->rxb[ctlr->rxtail] = b;
  483. /* set up uncached receive descriptor */
  484. r = &ctlr->rx[ctlr->rxtail];
  485. assert(((uintptr)r & (Descralign - 1)) == 0);
  486. r->countsize = ROUNDUP(Rxblklen, 8);
  487. r->buf = PADDR(b->rp);
  488. coherence();
  489. /* and fire */
  490. r->cs = RCSdmaown | RCSenableintr;
  491. coherence();
  492. ctlr->rxtail = NEXT(ctlr->rxtail, Nrx);
  493. }
  494. }
  495. static void
  496. dump(uchar *bp, long max)
  497. {
  498. if (max > 64)
  499. max = 64;
  500. for (; max > 0; max--, bp++)
  501. iprint("%02.2ux ", *bp);
  502. print("...\n");
  503. }
  504. static void
  505. etheractive(Ether *ether)
  506. {
  507. ether->starttime = TK2MS(MACHP(0)->ticks)/1000;
  508. }
  509. static void
  510. ethercheck(Ether *ether)
  511. {
  512. if (ether->starttime != 0 &&
  513. TK2MS(MACHP(0)->ticks)/1000 - ether->starttime > Etherstuck) {
  514. etheractive(ether);
  515. if (ether->ctlrno == 0) /* only complain about main ether */
  516. iprint("#l%d: ethernet stuck\n", ether->ctlrno);
  517. }
  518. }
  519. static void
  520. receive(Ether *ether)
  521. {
  522. int i;
  523. ulong n;
  524. Block *b;
  525. Ctlr *ctlr = ether->ctlr;
  526. Rx *r;
  527. ethercheck(ether);
  528. for (i = Nrx-2; i > 0; i--) {
  529. r = &ctlr->rx[ctlr->rxhead]; /* *r is uncached */
  530. assert(((uintptr)r & (Descralign - 1)) == 0);
  531. if(r->cs & RCSdmaown) /* descriptor busy? */
  532. break;
  533. b = ctlr->rxb[ctlr->rxhead]; /* got input buffer? */
  534. if (b == nil)
  535. panic("ether1116: nil ctlr->rxb[ctlr->rxhead] "
  536. "in receive");
  537. ctlr->rxb[ctlr->rxhead] = nil;
  538. ctlr->rxhead = NEXT(ctlr->rxhead, Nrx);
  539. if((r->cs & (RCSfirst|RCSlast)) != (RCSfirst|RCSlast)) {
  540. ctlr->nofirstlast++; /* partial packet */
  541. freeb(b);
  542. continue;
  543. }
  544. if(r->cs & RCSmacerr) {
  545. freeb(b);
  546. continue;
  547. }
  548. n = r->countsize >> 16; /* TODO includes 2 pad bytes? */
  549. assert(n >= 2 && n < 2048);
  550. /* clear any cached packet or part thereof */
  551. l2cacheuinvse(b->rp, n+2);
  552. cachedinvse(b->rp, n+2);
  553. b->wp = b->rp + n;
  554. /*
  555. * skip hardware padding intended to align ipv4 address
  556. * in memory (mv-s104860-u0 §8.3.4.1)
  557. */
  558. b->rp += 2;
  559. etheriq(ether, b, 1);
  560. etheractive(ether);
  561. if (i % (Nrx / 2) == 0) {
  562. rxreplenish(ctlr);
  563. rxkick(ctlr);
  564. }
  565. }
  566. rxreplenish(ctlr);
  567. rxkick(ctlr);
  568. }
  569. static void
  570. txreplenish(Ether *ether) /* free transmitted packets */
  571. {
  572. Ctlr *ctlr;
  573. ctlr = ether->ctlr;
  574. while(ctlr->txtail != ctlr->txhead) {
  575. /* ctlr->tx is uncached */
  576. if(ctlr->tx[ctlr->txtail].cs & TCSdmaown)
  577. break;
  578. if(ctlr->txb[ctlr->txtail] == nil)
  579. panic("no block for sent packet?!");
  580. freeb(ctlr->txb[ctlr->txtail]);
  581. ctlr->txb[ctlr->txtail] = nil;
  582. ctlr->txtail = NEXT(ctlr->txtail, Ntx);
  583. etheractive(ether);
  584. }
  585. }
  586. /*
  587. * transmit strategy: fill the output ring as far as possible,
  588. * perhaps leaving a few spare; kick off the output and take
  589. * an interrupt only when the transmit queue is empty.
  590. */
  591. static void
  592. transmit(Ether *ether)
  593. {
  594. int i, kick, len;
  595. Block *b;
  596. Ctlr *ctlr = ether->ctlr;
  597. Gbereg *reg = ctlr->reg;
  598. Tx *t;
  599. ethercheck(ether);
  600. ilock(ctlr);
  601. txreplenish(ether); /* reap old packets */
  602. /* queue new packets; use at most half the tx descs to avoid livelock */
  603. kick = 0;
  604. for (i = Ntx/2 - 2; i > 0; i--) {
  605. t = &ctlr->tx[ctlr->txhead]; /* *t is uncached */
  606. assert(((uintptr)t & (Descralign - 1)) == 0);
  607. if(t->cs & TCSdmaown) { /* descriptor busy? */
  608. ctlr->txringfull++;
  609. break;
  610. }
  611. b = qget(ether->oq); /* outgoing packet? */
  612. if (b == nil)
  613. break;
  614. len = BLEN(b);
  615. if(len < ether->minmtu || len > ether->maxmtu) {
  616. freeb(b);
  617. continue;
  618. }
  619. ctlr->txb[ctlr->txhead] = b;
  620. /* make sure the whole packet is in memory */
  621. cachedwbse(b->rp, len);
  622. l2cacheuwbse(b->rp, len);
  623. /* set up the transmit descriptor */
  624. t->buf = PADDR(b->rp);
  625. t->countchk = len << 16;
  626. coherence();
  627. /* and fire */
  628. t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
  629. TCSenableintr;
  630. coherence();
  631. kick++;
  632. ctlr->txhead = NEXT(ctlr->txhead, Ntx);
  633. }
  634. if (kick) {
  635. txkick(ctlr);
  636. reg->irqmask |= Itxendq(Qno);
  637. reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
  638. }
  639. iunlock(ctlr);
  640. }
  641. static void
  642. dumprxdescs(Ctlr *ctlr)
  643. {
  644. int i;
  645. Gbereg *reg = ctlr->reg;
  646. iprint("\nrxhead %d rxtail %d; txcdp %#p rxcdp %#p\n",
  647. ctlr->rxhead, ctlr->rxtail, reg->tcqdp[Qno], reg->crdp[Qno].r);
  648. for (i = 0; i < Nrx; i++) {
  649. iprint("rxb %d @ %#p: %#p\n", i, &ctlr->rxb[i], ctlr->rxb[i]);
  650. delay(50);
  651. }
  652. for (i = 0; i < Nrx; i++) {
  653. iprint("rx %d @ %#p: cs %#lux countsize %lud buf %#lux next %#lux\n",
  654. i, &ctlr->rx[i], ctlr->rx[i].cs,
  655. ctlr->rx[i].countsize >> 3, ctlr->rx[i].buf,
  656. ctlr->rx[i].next);
  657. delay(50);
  658. }
  659. delay(1000);
  660. }
  661. static int
  662. gotinput(void* ctlr)
  663. {
  664. return ((Ctlr*)ctlr)->haveinput != 0;
  665. }
  666. /*
  667. * process any packets in the input ring.
  668. * also sum mib stats frequently to avoid the overflow
  669. * mentioned in the errata.
  670. */
  671. static void
  672. rcvproc(void* arg)
  673. {
  674. Ctlr *ctlr;
  675. Ether *ether;
  676. ether = arg;
  677. ctlr = ether->ctlr;
  678. for(;;){
  679. tsleep(&ctlr->rrendez, gotinput, ctlr, 10*1000);
  680. ilock(ctlr);
  681. getmibstats(ctlr);
  682. if (ctlr->haveinput) {
  683. ctlr->haveinput = 0;
  684. iunlock(ctlr);
  685. receive(ether);
  686. } else
  687. iunlock(ctlr);
  688. }
  689. }
  690. static void
  691. interrupt(Ureg*, void *arg)
  692. {
  693. ulong irq, irqe, handled;
  694. Ether *ether = arg;
  695. Ctlr *ctlr = ether->ctlr;
  696. Gbereg *reg = ctlr->reg;
  697. handled = 0;
  698. irq = reg->irq;
  699. irqe = reg->irqe;
  700. reg->irqe = 0; /* extinguish intr causes */
  701. reg->irq = 0; /* extinguish intr causes */
  702. ethercheck(ether);
  703. if(irq & (Irx | Irxbufferq(Qno))) {
  704. /*
  705. * letting a kproc process the input takes far less real time
  706. * than doing it all at interrupt level.
  707. */
  708. ctlr->haveinput = 1;
  709. wakeup(&ctlr->rrendez);
  710. irq &= ~(Irx | Irxbufferq(Qno));
  711. handled++;
  712. } else
  713. rxkick(ctlr);
  714. if(irq & Itxendq(Qno)) { /* transmit ring empty? */
  715. reg->irqmask &= ~Itxendq(Qno); /* prevent more interrupts */
  716. reg->irqemask &= ~(IEtxerrq(Qno) | IEtxunderrun);
  717. transmit(ether);
  718. irq &= ~Itxendq(Qno);
  719. handled++;
  720. }
  721. if(irqe & IEsum) {
  722. /*
  723. * IElinkchg appears to only be set when unplugging.
  724. * autonegotiation is likely not done yet, so linkup not valid,
  725. * thus we note the link change here, and check for
  726. * that and autonegotiation done below.
  727. */
  728. if(irqe & IEphystschg) {
  729. ether->link = (reg->ps0 & PS0linkup) != 0;
  730. ether->linkchg = 1;
  731. }
  732. if(irqe & IEtxerrq(Qno))
  733. ether->oerrs++;
  734. if(irqe & IErxoverrun)
  735. ether->overflows++;
  736. if(irqe & IEtxunderrun)
  737. ctlr->txunderrun++;
  738. if(irqe & (IEphystschg | IEtxerrq(Qno) | IErxoverrun |
  739. IEtxunderrun))
  740. handled++;
  741. }
  742. if (irq & Isum) {
  743. if (irq & Irxerr) { /* nil desc. ptr. or desc. owned by cpu */
  744. ether->buffs++; /* approx. error */
  745. /* if the input ring is full, drain it */
  746. ctlr->haveinput = 1;
  747. wakeup(&ctlr->rrendez);
  748. }
  749. if(irq & (Irxerr | Irxerrq(Qno)))
  750. handled++;
  751. irq &= ~(Irxerr | Irxerrq(Qno));
  752. }
  753. if(ether->linkchg && (reg->ps1 & PS1an_done)) {
  754. handled++;
  755. ether->link = (reg->ps0 & PS0linkup) != 0;
  756. ether->linkchg = 0;
  757. }
  758. ctlr->newintrs++;
  759. if (!handled) {
  760. irq &= ~Isum;
  761. irqe &= ~IEtxbufferq(Qno);
  762. if (irq == 0 && irqe == 0) {
  763. /* seems to be triggered by continuous output */
  764. // iprint("ether1116: spurious interrupt\n");
  765. } else
  766. iprint("ether1116: interrupt cause unknown; "
  767. "irq %#lux irqe %#lux\n", irq, irqe);
  768. }
  769. intrclear(Irqlo, ether->irq);
  770. }
  771. void
  772. promiscuous(void *arg, int on)
  773. {
  774. Ether *ether = arg;
  775. Ctlr *ctlr = ether->ctlr;
  776. Gbereg *reg = ctlr->reg;
  777. ilock(ctlr);
  778. ether->prom = on;
  779. if(on)
  780. reg->portcfg |= PCFGupromisc;
  781. else
  782. reg->portcfg &= ~PCFGupromisc;
  783. iunlock(ctlr);
  784. }
  785. void
  786. multicast(void *, uchar *, int)
  787. {
  788. /* nothing to do; we always accept multicast */
  789. }
  790. static void quiesce(Gbereg *reg);
  791. static void
  792. shutdown(Ether *ether)
  793. {
  794. int i;
  795. Ctlr *ctlr = ether->ctlr;
  796. Gbereg *reg = ctlr->reg;
  797. ilock(ctlr);
  798. quiesce(reg);
  799. reg->euc |= Portreset;
  800. coherence();
  801. iunlock(ctlr);
  802. delay(100);
  803. ilock(ctlr);
  804. reg->euc &= ~Portreset;
  805. coherence();
  806. delay(20);
  807. reg->psc0 = 0; /* no PSC0porton */
  808. reg->psc1 |= PSC1portreset;
  809. coherence();
  810. delay(50);
  811. reg->psc1 &= ~PSC1portreset;
  812. coherence();
  813. for (i = 0; i < nelem(reg->tcqdp); i++)
  814. reg->tcqdp[i] = 0;
  815. for (i = 0; i < nelem(reg->crdp); i++)
  816. reg->crdp[i].r = 0;
  817. coherence();
  818. iunlock(ctlr);
  819. }
  820. enum {
  821. CMjumbo,
  822. };
  823. static Cmdtab ctlmsg[] = {
  824. CMjumbo, "jumbo", 2,
  825. };
  826. long
  827. ctl(Ether *e, void *p, long n)
  828. {
  829. Cmdbuf *cb;
  830. Cmdtab *ct;
  831. Ctlr *ctlr = e->ctlr;
  832. Gbereg *reg = ctlr->reg;
  833. cb = parsecmd(p, n);
  834. if(waserror()) {
  835. free(cb);
  836. nexterror();
  837. }
  838. ct = lookupcmd(cb, ctlmsg, nelem(ctlmsg));
  839. switch(ct->index) {
  840. case CMjumbo:
  841. if(strcmp(cb->f[1], "on") == 0) {
  842. /* incoming packet queue doesn't expect jumbo frames */
  843. error("jumbo disabled");
  844. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  845. PSC0mru(PSC0mru9022);
  846. e->maxmtu = 9022;
  847. } else if(strcmp(cb->f[1], "off") == 0) {
  848. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  849. PSC0mru(PSC0mru1522);
  850. e->maxmtu = ETHERMAXTU;
  851. } else
  852. error(Ebadctl);
  853. break;
  854. default:
  855. error(Ebadctl);
  856. break;
  857. }
  858. free(cb);
  859. poperror();
  860. return n;
  861. }
  862. /*
  863. * phy/mii goo
  864. */
  865. static int
  866. smibusywait(Gbereg *reg, ulong waitbit)
  867. {
  868. ulong timeout, smi_reg;
  869. timeout = PhysmiTimeout;
  870. /* wait till the SMI is not busy */
  871. do {
  872. /* read smi register */
  873. smi_reg = reg->smi;
  874. if (timeout-- == 0) {
  875. MIIDBG("SMI busy timeout\n");
  876. return -1;
  877. }
  878. // delay(1);
  879. } while (smi_reg & waitbit);
  880. return 0;
  881. }
  882. static int
  883. miird(Mii *mii, int pa, int ra)
  884. {
  885. ulong smi_reg, timeout;
  886. Gbereg *reg;
  887. reg = ((Ctlr*)mii->ctlr)->reg;
  888. /* check params */
  889. if ((pa<<Physmiaddroff) & ~Physmiaddrmask ||
  890. (ra<<SmiRegaddroff) & ~SmiRegaddrmask)
  891. return -1;
  892. smibusywait(reg, PhysmiBusy);
  893. /* fill the phy address and register offset and read opcode */
  894. reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd;
  895. coherence();
  896. /* wait til read value is ready */
  897. timeout = PhysmiTimeout;
  898. do {
  899. smi_reg = reg->smi;
  900. if (timeout-- == 0) {
  901. MIIDBG("SMI read-valid timeout\n");
  902. return -1;
  903. }
  904. } while (!(smi_reg & PhysmiReadok));
  905. /* Wait for the data to update in the SMI register */
  906. for (timeout = 0; timeout < PhysmiTimeout; timeout++)
  907. ;
  908. return reg->smi & Physmidatamask;
  909. }
  910. static int
  911. miiwr(Mii *mii, int pa, int ra, int v)
  912. {
  913. Gbereg *reg;
  914. ulong smi_reg;
  915. reg = ((Ctlr*)mii->ctlr)->reg;
  916. /* check params */
  917. if (((pa<<Physmiaddroff) & ~Physmiaddrmask) ||
  918. ((ra<<SmiRegaddroff) & ~SmiRegaddrmask))
  919. return -1;
  920. smibusywait(reg, PhysmiBusy);
  921. /* fill the phy address and register offset and read opcode */
  922. smi_reg = v << Physmidataoff | pa << Physmiaddroff | ra << SmiRegaddroff;
  923. reg->smi = smi_reg & ~PhysmiopRd;
  924. coherence();
  925. return 0;
  926. }
  927. #define MIIMODEL(idr2) (((idr2) >> 4) & MASK(6))
  928. enum {
  929. Hacknone,
  930. Hackdual,
  931. Ouimarvell = 0x005043,
  932. /* idr2 mii/phy model numbers */
  933. Phy1000 = 0x00, /* 88E1000 Gb */
  934. Phy1011 = 0x02, /* 88E1011 Gb */
  935. Phy1000_3 = 0x03, /* 88E1000 Gb */
  936. Phy1000s = 0x04, /* 88E1000S Gb */
  937. Phy1000_5 = 0x05, /* 88E1000 Gb */
  938. Phy1000_6 = 0x06, /* 88E1000 Gb */
  939. Phy3082 = 0x08, /* 88E3082 10/100 */
  940. Phy1112 = 0x09, /* 88E1112 Gb */
  941. Phy1121r = 0x0b, /* says the 1121r manual */
  942. Phy1149 = 0x0b, /* 88E1149 Gb */
  943. Phy1111 = 0x0c, /* 88E1111 Gb */
  944. Phy1116 = 0x21, /* 88E1116 Gb */
  945. Phy1116r = 0x24, /* 88E1116R Gb */
  946. Phy1118 = 0x22, /* 88E1118 Gb */
  947. Phy3016 = 0x26, /* 88E3016 10/100 */
  948. };
  949. static int hackflavour;
  950. /*
  951. * on openrd, ether0's phy has address 8, ether1's is ether0's 24.
  952. * on guruplug, ether0's is phy 0 and ether1's is ether0's phy 1.
  953. */
  954. int
  955. mymii(Mii* mii, int mask)
  956. {
  957. Ctlr *ctlr;
  958. MiiPhy *miiphy;
  959. int bit, ctlrno, oui, model, phyno, r, rmask;
  960. static int dualport, phyidx;
  961. static int phynos[NMiiPhy];
  962. ctlr = mii->ctlr;
  963. ctlrno = ctlr->ether->ctlrno;
  964. /* first pass: figure out what kind of phy(s) we have. */
  965. dualport = 0;
  966. if (ctlrno == 0) {
  967. for(phyno = 0; phyno < NMiiPhy; phyno++){
  968. bit = 1<<phyno;
  969. if(!(mask & bit) || mii->mask & bit)
  970. continue;
  971. if(mii->mir(mii, phyno, Bmsr) == -1)
  972. continue;
  973. r = mii->mir(mii, phyno, Phyidr1);
  974. oui = (r & 0x3FFF)<<6;
  975. r = mii->mir(mii, phyno, Phyidr2);
  976. oui |= r>>10;
  977. model = MIIMODEL(r);
  978. if (oui == 0xfffff && model == 0x3f)
  979. continue;
  980. MIIDBG("ctlrno %d phy %d oui %#ux model %#ux\n",
  981. ctlrno, phyno, oui, model);
  982. if (oui == Ouimarvell &&
  983. (model == Phy1121r || model == Phy1116r))
  984. ++dualport;
  985. phynos[phyidx++] = phyno;
  986. }
  987. hackflavour = dualport == 2 && phyidx == 2? Hackdual: Hacknone;
  988. MIIDBG("ether1116: %s-port phy\n",
  989. hackflavour == Hackdual? "dual": "single");
  990. }
  991. /*
  992. * Probe through mii for PHYs in mask;
  993. * return the mask of those found in the current probe.
  994. * If the PHY has not already been probed, update
  995. * the Mii information.
  996. */
  997. rmask = 0;
  998. if (hackflavour == Hackdual && ctlrno < phyidx) {
  999. /*
  1000. * openrd, guruplug or the like: use ether0's phys.
  1001. * this is a nasty hack, but so is the hardware.
  1002. */
  1003. MIIDBG("ctlrno %d using ctlrno 0's phyno %d\n",
  1004. ctlrno, phynos[ctlrno]);
  1005. ctlr->mii = mii = ctlrs[0]->mii;
  1006. mask = 1 << phynos[ctlrno];
  1007. mii->mask = ~mask;
  1008. }
  1009. for(phyno = 0; phyno < NMiiPhy; phyno++){
  1010. bit = 1<<phyno;
  1011. if(!(mask & bit))
  1012. continue;
  1013. if(mii->mask & bit){
  1014. rmask |= bit;
  1015. continue;
  1016. }
  1017. if(mii->mir(mii, phyno, Bmsr) == -1)
  1018. continue;
  1019. r = mii->mir(mii, phyno, Phyidr1);
  1020. oui = (r & 0x3FFF)<<6;
  1021. r = mii->mir(mii, phyno, Phyidr2);
  1022. oui |= r>>10;
  1023. if(oui == 0xFFFFF || oui == 0)
  1024. continue;
  1025. if((miiphy = malloc(sizeof(MiiPhy))) == nil)
  1026. continue;
  1027. miiphy->mii = mii;
  1028. miiphy->oui = oui;
  1029. miiphy->phyno = phyno;
  1030. miiphy->anar = ~0;
  1031. miiphy->fc = ~0;
  1032. miiphy->mscr = ~0;
  1033. mii->phy[phyno] = miiphy;
  1034. if(ctlrno == 0 || hackflavour != Hackdual && mii->curphy == nil)
  1035. mii->curphy = miiphy;
  1036. mii->mask |= bit;
  1037. mii->nphy++;
  1038. rmask |= bit;
  1039. }
  1040. return rmask;
  1041. }
  1042. static int
  1043. kirkwoodmii(Ether *ether)
  1044. {
  1045. int i;
  1046. Ctlr *ctlr;
  1047. MiiPhy *phy;
  1048. MIIDBG("mii\n");
  1049. ctlr = ether->ctlr;
  1050. if((ctlr->mii = malloc(sizeof(Mii))) == nil)
  1051. return -1;
  1052. ctlr->mii->ctlr = ctlr;
  1053. ctlr->mii->mir = miird;
  1054. ctlr->mii->miw = miiwr;
  1055. if(mymii(ctlr->mii, ~0) == 0 || (phy = ctlr->mii->curphy) == nil){
  1056. print("#l%d: ether1116: init mii failure\n", ether->ctlrno);
  1057. free(ctlr->mii);
  1058. ctlr->mii = nil;
  1059. return -1;
  1060. }
  1061. /* oui 005043 is marvell */
  1062. MIIDBG("oui %#X phyno %d\n", phy->oui, phy->phyno);
  1063. // TODO: does this make sense? shouldn't each phy be initialised?
  1064. if((ctlr->ether->ctlrno == 0 || hackflavour != Hackdual) &&
  1065. miistatus(ctlr->mii) < 0){
  1066. miireset(ctlr->mii);
  1067. MIIDBG("miireset\n");
  1068. if(miiane(ctlr->mii, ~0, 0, ~0) < 0){
  1069. iprint("miiane failed\n");
  1070. return -1;
  1071. }
  1072. MIIDBG("miistatus\n");
  1073. miistatus(ctlr->mii);
  1074. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrLs){
  1075. for(i = 0; ; i++){
  1076. if(i > 600){
  1077. iprint("ether1116: autonegotiation failed\n");
  1078. break;
  1079. }
  1080. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrAnc)
  1081. break;
  1082. delay(10);
  1083. }
  1084. if(miistatus(ctlr->mii) < 0)
  1085. iprint("miistatus failed\n");
  1086. }else{
  1087. iprint("ether1116: no link\n");
  1088. phy->speed = 10; /* simple default */
  1089. }
  1090. }
  1091. ether->mbps = phy->speed;
  1092. MIIDBG("#l%d: kirkwoodmii: fd %d speed %d tfc %d rfc %d\n",
  1093. ctlr->port, phy->fd, phy->speed, phy->tfc, phy->rfc);
  1094. MIIDBG("mii done\n");
  1095. return 0;
  1096. }
  1097. enum { /* PHY register pages */
  1098. Pagcopper,
  1099. Pagfiber,
  1100. Pagrgmii,
  1101. Pagled,
  1102. Pagrsvd1,
  1103. Pagvct,
  1104. Pagtest,
  1105. Pagrsvd2,
  1106. Pagfactest,
  1107. };
  1108. static void
  1109. miiregpage(Mii *mii, ulong dev, ulong page)
  1110. {
  1111. miiwr(mii, dev, Eadr, page);
  1112. }
  1113. static int
  1114. miiphyinit(Mii *mii)
  1115. {
  1116. ulong dev;
  1117. Ctlr *ctlr;
  1118. Gbereg *reg;
  1119. ctlr = (Ctlr*)mii->ctlr;
  1120. reg = ctlr->reg;
  1121. dev = reg->phy;
  1122. MIIDBG("phy dev addr %lux\n", dev);
  1123. /* leds link & activity */
  1124. miiregpage(mii, dev, Pagled);
  1125. /* low 4 bits == 1: on - link, blink - activity, off - no link */
  1126. miiwr(mii, dev, Scr, (miird(mii, dev, Scr) & ~0xf) | 1);
  1127. miiregpage(mii, dev, Pagrgmii);
  1128. miiwr(mii, dev, Scr, miird(mii, dev, Scr) | Rgmiipwrup);
  1129. /* must now do a software reset, says the manual */
  1130. miireset(ctlr->mii);
  1131. /* enable RGMII delay on Tx and Rx for CPU port */
  1132. miiwr(mii, dev, Recr, miird(mii, dev, Recr) | Rxtiming | Rxtiming);
  1133. /* must now do a software reset, says the manual */
  1134. miireset(ctlr->mii);
  1135. miiregpage(mii, dev, Pagcopper);
  1136. miiwr(mii, dev, Scr,
  1137. (miird(mii, dev, Scr) & ~(Pwrdown|Endetect)) | Mdix);
  1138. return 0;
  1139. }
  1140. /*
  1141. * initialisation
  1142. */
  1143. static void
  1144. quiesce(Gbereg *reg)
  1145. {
  1146. ulong v;
  1147. v = reg->tqc;
  1148. if (v & 0xFF)
  1149. reg->tqc = v << 8; /* stop active channels */
  1150. v = reg->rqc;
  1151. if (v & 0xFF)
  1152. reg->rqc = v << 8; /* stop active channels */
  1153. /* wait for all queues to stop */
  1154. while (reg->tqc & 0xFF || reg->rqc & 0xFF)
  1155. ;
  1156. }
  1157. static void
  1158. p16(uchar *p, ulong v) /* convert big-endian short to bytes */
  1159. {
  1160. *p++ = v>>8;
  1161. *p = v;
  1162. }
  1163. static void
  1164. p32(uchar *p, ulong v) /* convert big-endian long to bytes */
  1165. {
  1166. *p++ = v>>24;
  1167. *p++ = v>>16;
  1168. *p++ = v>>8;
  1169. *p = v;
  1170. }
  1171. /*
  1172. * set ether->ea from hw mac address,
  1173. * configure unicast filtering to accept it.
  1174. */
  1175. void
  1176. archetheraddr(Ether *ether, Gbereg *reg, int rxqno)
  1177. {
  1178. uchar *ea;
  1179. ulong nibble, ucreg, tbloff, regoff;
  1180. ea = ether->ea;
  1181. p32(ea, reg->macah);
  1182. p16(ea+4, reg->macal);
  1183. if (memcmp(ea, zeroea, sizeof zeroea) == 0 && ether->ctlrno > 0) {
  1184. /* hack: use ctlr[0]'s + ctlrno */
  1185. memmove(ea, ctlrs[0]->ether->ea, Eaddrlen);
  1186. ea[Eaddrlen-1] += ether->ctlrno;
  1187. reg->macah = ea[0] << 24 | ea[1] << 16 | ea[2] << 8 | ea[3];
  1188. reg->macal = ea[4] << 8 | ea[5];
  1189. coherence();
  1190. }
  1191. /* accept frames on ea */
  1192. nibble = ea[5] & 0xf;
  1193. tbloff = nibble / 4;
  1194. regoff = nibble % 4;
  1195. regoff *= 8;
  1196. ucreg = reg->dfut[tbloff] & (0xff << regoff);
  1197. ucreg |= (rxqno << 1 | Pass) << regoff;
  1198. reg->dfut[tbloff] = ucreg;
  1199. /* accept all multicast too. set up special & other tables. */
  1200. memset(reg->dfsmt, Qno<<1 | Pass, sizeof reg->dfsmt);
  1201. memset(reg->dfomt, Qno<<1 | Pass, sizeof reg->dfomt);
  1202. coherence();
  1203. }
  1204. static void
  1205. cfgdramacc(Gbereg *reg)
  1206. {
  1207. memset(reg->harr, 0, sizeof reg->harr);
  1208. memset(reg->base, 0, sizeof reg->base);
  1209. reg->bare = MASK(6) - MASK(2); /* disable wins 2-5 */
  1210. /* this doesn't make any sense, but it's required */
  1211. reg->epap = 3 << 2 | 3; /* full access for wins 0 & 1 */
  1212. // reg->epap = 0; /* no access on access violation for all wins */
  1213. coherence();
  1214. reg->base[0].base = PHYSDRAM | WINATTR(Attrcs0) | Targdram;
  1215. reg->base[0].size = WINSIZE(256*MB);
  1216. reg->base[1].base = (PHYSDRAM + 256*MB) | WINATTR(Attrcs1) | Targdram;
  1217. reg->base[1].size = WINSIZE(256*MB);
  1218. coherence();
  1219. }
  1220. static void
  1221. ctlralloc(Ctlr *ctlr)
  1222. {
  1223. int i;
  1224. Block *b;
  1225. Rx *r;
  1226. Tx *t;
  1227. ilock(&freeblocks);
  1228. for(i = 0; i < Nrxblks; i++) {
  1229. b = iallocb(Rxblklen+Bufalign-1);
  1230. if(b == nil) {
  1231. iprint("ether1116: no memory for rx buffers\n");
  1232. break;
  1233. }
  1234. assert(b->ref == 1);
  1235. b->wp = b->rp = (uchar*)
  1236. ((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  1237. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  1238. b->free = rxfreeb;
  1239. b->next = freeblocks.head;
  1240. freeblocks.head = b;
  1241. }
  1242. iunlock(&freeblocks);
  1243. /*
  1244. * allocate uncached rx ring descriptors because rings are shared
  1245. * with the ethernet controller and more than one fits in a cache line.
  1246. */
  1247. ctlr->rx = ucallocalign(Nrx * sizeof(Rx), Descralign, 0);
  1248. if(ctlr->rx == nil)
  1249. panic("ether1116: no memory for rx ring");
  1250. for(i = 0; i < Nrx; i++) {
  1251. r = &ctlr->rx[i];
  1252. assert(((uintptr)r & (Descralign - 1)) == 0);
  1253. r->cs = 0; /* owned by software until r->buf is non-nil */
  1254. r->buf = 0;
  1255. r->next = PADDR(&ctlr->rx[NEXT(i, Nrx)]);
  1256. ctlr->rxb[i] = nil;
  1257. }
  1258. ctlr->rxtail = ctlr->rxhead = 0;
  1259. rxreplenish(ctlr);
  1260. /* allocate uncached tx ring descriptors */
  1261. ctlr->tx = ucallocalign(Ntx * sizeof(Tx), Descralign, 0);
  1262. if(ctlr->tx == nil)
  1263. panic("ether1116: no memory for tx ring");
  1264. for(i = 0; i < Ntx; i++) {
  1265. t = &ctlr->tx[i];
  1266. assert(((uintptr)t & (Descralign - 1)) == 0);
  1267. t->cs = 0;
  1268. t->buf = 0;
  1269. t->next = PADDR(&ctlr->tx[NEXT(i, Ntx)]);
  1270. ctlr->txb[i] = nil;
  1271. }
  1272. ctlr->txtail = ctlr->txhead = 0;
  1273. }
  1274. static void
  1275. ctlrinit(Ether *ether)
  1276. {
  1277. int i;
  1278. Ctlr *ctlr = ether->ctlr;
  1279. Gbereg *reg = ctlr->reg;
  1280. static char name[KNAMELEN];
  1281. static Ctlr fakectlr; /* bigger than 4K; keep off the stack */
  1282. for (i = 0; i < nelem(reg->tcqdp); i++)
  1283. reg->tcqdp[i] = 0;
  1284. for (i = 0; i < nelem(reg->crdp); i++)
  1285. reg->crdp[i].r = 0;
  1286. coherence();
  1287. cfgdramacc(reg);
  1288. ctlralloc(ctlr);
  1289. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  1290. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  1291. coherence();
  1292. // dumprxdescs(ctlr);
  1293. /* clear stats by reading them into fake ctlr */
  1294. getmibstats(&fakectlr);
  1295. reg->pxmfs = MFS40by; /* allow runts in */
  1296. /*
  1297. * ipg's (inter packet gaps) for interrupt coalescing,
  1298. * values in units of 64 clock cycles. A full-sized
  1299. * packet (1514 bytes) takes just over 12µs to transmit.
  1300. */
  1301. if (CLOCKFREQ/(Maxrxintrsec*64) >= (1<<16))
  1302. panic("rx coalescing value %d too big for short",
  1303. CLOCKFREQ/(Maxrxintrsec*64));
  1304. reg->sdc = SDCrifb | SDCrxburst(Burst16) | SDCtxburst(Burst16) |
  1305. SDCrxnobyteswap | SDCtxnobyteswap |
  1306. SDCipgintrx(CLOCKFREQ/(Maxrxintrsec*64));
  1307. reg->pxtfut = 0; /* TFUTipginttx(CLOCKFREQ/(Maxrxintrsec*64)) */
  1308. /* allow just these interrupts */
  1309. /* guruplug generates Irxerr interrupts continually */
  1310. reg->irqmask = Isum | Irx | Irxbufferq(Qno) | Irxerr | Itxendq(Qno);
  1311. reg->irqemask = IEsum | IEtxerrq(Qno) | IEphystschg | IErxoverrun |
  1312. IEtxunderrun;
  1313. reg->irqe = 0;
  1314. reg->euirqmask = 0;
  1315. coherence();
  1316. reg->irq = 0;
  1317. reg->euirq = 0;
  1318. /* send errors to end of memory */
  1319. // reg->euda = PHYSDRAM + 512*MB - 8*1024;
  1320. reg->euda = 0;
  1321. reg->eudid = Attrcs1 << 4 | Targdram;
  1322. // archetheraddr(ether, ctlr->reg, Qno); /* 2nd location */
  1323. reg->portcfg = Rxqdefault(Qno) | Rxqarp(Qno);
  1324. reg->portcfgx = 0;
  1325. coherence();
  1326. /*
  1327. * start the controller running.
  1328. * turn the port on, kick the receiver.
  1329. */
  1330. reg->psc1 = PSC1rgmii | PSC1encolonbp | PSC1coldomlim(0x23);
  1331. /* do this only when the controller is quiescent */
  1332. reg->psc0 = PSC0porton | PSC0an_flctloff |
  1333. PSC0an_pauseadv | PSC0nofrclinkdown | PSC0mru(PSC0mru1522);
  1334. coherence();
  1335. for (i = 0; i < 4000; i++) /* magic delay */
  1336. ;
  1337. ether->link = (reg->ps0 & PS0linkup) != 0;
  1338. /* set ethernet MTU for leaky bucket mechanism to 0 (disabled) */
  1339. reg->pmtu = 0;
  1340. etheractive(ether);
  1341. snprint(name, sizeof name, "#l%drproc", ether->ctlrno);
  1342. kproc(name, rcvproc, ether);
  1343. reg->rqc = Rxqon(Qno);
  1344. coherence();
  1345. }
  1346. static void
  1347. attach(Ether* ether)
  1348. {
  1349. Ctlr *ctlr = ether->ctlr;
  1350. lock(&ctlr->initlock);
  1351. if(ctlr->init == 0) {
  1352. ctlrinit(ether);
  1353. ctlr->init = 1;
  1354. }
  1355. unlock(&ctlr->initlock);
  1356. }
  1357. /*
  1358. * statistics goo.
  1359. * mib registers clear on read.
  1360. */
  1361. static void
  1362. getmibstats(Ctlr *ctlr)
  1363. {
  1364. Gbereg *reg = ctlr->reg;
  1365. /*
  1366. * Marvell 88f6281 errata FE-ETH-120: high long of rxby and txby
  1367. * can't be read correctly, so read the low long frequently
  1368. * (every 30 seconds or less), thus avoiding overflow into high long.
  1369. */
  1370. ctlr->rxby += reg->rxbylo;
  1371. ctlr->txby += reg->txbylo;
  1372. ctlr->badrxby += reg->badrxby;
  1373. ctlr->mactxerr += reg->mactxerr;
  1374. ctlr->rxpkt += reg->rxpkt;
  1375. ctlr->badrxpkt += reg->badrxpkt;
  1376. ctlr->rxbcastpkt+= reg->rxbcastpkt;
  1377. ctlr->rxmcastpkt+= reg->rxmcastpkt;
  1378. ctlr->rx64 += reg->rx64;
  1379. ctlr->rx65_127 += reg->rx65_127;
  1380. ctlr->rx128_255 += reg->rx128_255;
  1381. ctlr->rx256_511 += reg->rx256_511;
  1382. ctlr->rx512_1023+= reg->rx512_1023;
  1383. ctlr->rx1024_max+= reg->rx1024_max;
  1384. ctlr->txpkt += reg->txpkt;
  1385. ctlr->txcollpktdrop+= reg->txcollpktdrop;
  1386. ctlr->txmcastpkt+= reg->txmcastpkt;
  1387. ctlr->txbcastpkt+= reg->txbcastpkt;
  1388. ctlr->badmacctlpkts+= reg->badmacctlpkts;
  1389. ctlr->txflctl += reg->txflctl;
  1390. ctlr->rxflctl += reg->rxflctl;
  1391. ctlr->badrxflctl+= reg->badrxflctl;
  1392. ctlr->rxundersized+= reg->rxundersized;
  1393. ctlr->rxfrags += reg->rxfrags;
  1394. ctlr->rxtoobig += reg->rxtoobig;
  1395. ctlr->rxjabber += reg->rxjabber;
  1396. ctlr->rxerr += reg->rxerr;
  1397. ctlr->crcerr += reg->crcerr;
  1398. ctlr->collisions+= reg->collisions;
  1399. ctlr->latecoll += reg->latecoll;
  1400. }
  1401. long
  1402. ifstat(Ether *ether, void *a, long n, ulong off)
  1403. {
  1404. Ctlr *ctlr = ether->ctlr;
  1405. Gbereg *reg = ctlr->reg;
  1406. char *buf, *p, *e;
  1407. buf = p = malloc(READSTR);
  1408. e = p + READSTR;
  1409. ilock(ctlr);
  1410. getmibstats(ctlr);
  1411. ctlr->intrs += ctlr->newintrs;
  1412. p = seprint(p, e, "interrupts: %lud\n", ctlr->intrs);
  1413. p = seprint(p, e, "new interrupts: %lud\n", ctlr->newintrs);
  1414. ctlr->newintrs = 0;
  1415. p = seprint(p, e, "tx underrun: %lud\n", ctlr->txunderrun);
  1416. p = seprint(p, e, "tx ring full: %lud\n", ctlr->txringfull);
  1417. ctlr->rxdiscard += reg->pxdfc;
  1418. ctlr->rxoverrun += reg->pxofc;
  1419. p = seprint(p, e, "rx discarded frames: %lud\n", ctlr->rxdiscard);
  1420. p = seprint(p, e, "rx overrun frames: %lud\n", ctlr->rxoverrun);
  1421. p = seprint(p, e, "no first+last flag: %lud\n", ctlr->nofirstlast);
  1422. p = seprint(p, e, "duplex: %s\n", (reg->ps0 & PS0fd)? "full": "half");
  1423. p = seprint(p, e, "flow control: %s\n", (reg->ps0 & PS0flctl)? "on": "off");
  1424. /* p = seprint(p, e, "speed: %d mbps\n", ); */
  1425. p = seprint(p, e, "received bytes: %llud\n", ctlr->rxby);
  1426. p = seprint(p, e, "bad received bytes: %lud\n", ctlr->badrxby);
  1427. p = seprint(p, e, "internal mac transmit errors: %lud\n", ctlr->mactxerr);
  1428. p = seprint(p, e, "total received frames: %lud\n", ctlr->rxpkt);
  1429. p = seprint(p, e, "received broadcast frames: %lud\n", ctlr->rxbcastpkt);
  1430. p = seprint(p, e, "received multicast frames: %lud\n", ctlr->rxmcastpkt);
  1431. p = seprint(p, e, "bad received frames: %lud\n", ctlr->badrxpkt);
  1432. p = seprint(p, e, "received frames 0-64: %lud\n", ctlr->rx64);
  1433. p = seprint(p, e, "received frames 65-127: %lud\n", ctlr->rx65_127);
  1434. p = seprint(p, e, "received frames 128-255: %lud\n", ctlr->rx128_255);
  1435. p = seprint(p, e, "received frames 256-511: %lud\n", ctlr->rx256_511);
  1436. p = seprint(p, e, "received frames 512-1023: %lud\n", ctlr->rx512_1023);
  1437. p = seprint(p, e, "received frames 1024-max: %lud\n", ctlr->rx1024_max);
  1438. p = seprint(p, e, "transmitted bytes: %llud\n", ctlr->txby);
  1439. p = seprint(p, e, "total transmitted frames: %lud\n", ctlr->txpkt);
  1440. p = seprint(p, e, "transmitted broadcast frames: %lud\n", ctlr->txbcastpkt);
  1441. p = seprint(p, e, "transmitted multicast frames: %lud\n", ctlr->txmcastpkt);
  1442. p = seprint(p, e, "transmit frames dropped by collision: %lud\n", ctlr->txcollpktdrop);
  1443. p = seprint(p, e, "misaligned buffers: %lud\n", ether->pktsmisaligned);
  1444. p = seprint(p, e, "bad mac control frames: %lud\n", ctlr->badmacctlpkts);
  1445. p = seprint(p, e, "transmitted flow control messages: %lud\n", ctlr->txflctl);
  1446. p = seprint(p, e, "received flow control messages: %lud\n", ctlr->rxflctl);
  1447. p = seprint(p, e, "bad received flow control messages: %lud\n", ctlr->badrxflctl);
  1448. p = seprint(p, e, "received undersized packets: %lud\n", ctlr->rxundersized);
  1449. p = seprint(p, e, "received fragments: %lud\n", ctlr->rxfrags);
  1450. p = seprint(p, e, "received oversized packets: %lud\n", ctlr->rxtoobig);
  1451. p = seprint(p, e, "received jabber packets: %lud\n", ctlr->rxjabber);
  1452. p = seprint(p, e, "mac receive errors: %lud\n", ctlr->rxerr);
  1453. p = seprint(p, e, "crc errors: %lud\n", ctlr->crcerr);
  1454. p = seprint(p, e, "collisions: %lud\n", ctlr->collisions);
  1455. p = seprint(p, e, "late collisions: %lud\n", ctlr->latecoll);
  1456. USED(p);
  1457. iunlock(ctlr);
  1458. n = readstr(off, a, n, buf);
  1459. free(buf);
  1460. return n;
  1461. }
  1462. static int
  1463. reset(Ether *ether)
  1464. {
  1465. Ctlr *ctlr;
  1466. ether->ctlr = ctlr = malloc(sizeof *ctlr);
  1467. switch(ether->ctlrno) {
  1468. case 0:
  1469. ether->irq = IRQ0gbe0sum;
  1470. break;
  1471. case 1:
  1472. ether->irq = IRQ0gbe1sum;
  1473. break;
  1474. default:
  1475. panic("ether1116: bad ether ctlr #%d", ether->ctlrno);
  1476. }
  1477. ctlr->reg = (Gbereg*)soc.ether[ether->ctlrno];
  1478. /* need this for guruplug, at least */
  1479. *(ulong *)soc.iocfg |= 1 << 7 | 1 << 15; /* io cfg 0: 1.8v gbe */
  1480. coherence();
  1481. ctlr->ether = ether;
  1482. ctlrs[ether->ctlrno] = ctlr;
  1483. shutdown(ether);
  1484. /* ensure that both interfaces are set to RGMII before calling mii */
  1485. ((Gbereg*)soc.ether[0])->psc1 |= PSC1rgmii;
  1486. ((Gbereg*)soc.ether[1])->psc1 |= PSC1rgmii;
  1487. coherence();
  1488. /* Set phy address of the port */
  1489. ctlr->port = ether->ctlrno;
  1490. ctlr->reg->phy = ether->ctlrno;
  1491. coherence();
  1492. ether->port = (uintptr)ctlr->reg;
  1493. if(kirkwoodmii(ether) < 0){
  1494. free(ctlr);
  1495. ether->ctlr = nil;
  1496. return -1;
  1497. }
  1498. miiphyinit(ctlr->mii);
  1499. archetheraddr(ether, ctlr->reg, Qno); /* original location */
  1500. if (memcmp(ether->ea, zeroea, sizeof zeroea) == 0){
  1501. iprint("ether1116: reset: zero ether->ea\n");
  1502. free(ctlr);
  1503. ether->ctlr = nil;
  1504. return -1; /* no rj45 for this ether */
  1505. }
  1506. ether->attach = attach;
  1507. ether->transmit = transmit;
  1508. ether->interrupt = interrupt;
  1509. ether->ifstat = ifstat;
  1510. ether->shutdown = shutdown;
  1511. ether->ctl = ctl;
  1512. ether->arg = ether;
  1513. ether->promiscuous = promiscuous;
  1514. ether->multicast = multicast;
  1515. return 0;
  1516. }
  1517. void
  1518. ether1116link(void)
  1519. {
  1520. addethercard("88e1116", reset);
  1521. }