etherkw.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /*
  2. * marvell kirkwood ethernet (88e1116) driver
  3. * (as found in the sheevaplug & openrd).
  4. * from /public/doc/marvell/88f61xx.kirkwood.pdf
  5. * and /public/doc/marvell/88e1116.pdf.
  6. */
  7. #include "u.h"
  8. #include "../port/lib.h"
  9. #include "mem.h"
  10. #include "dat.h"
  11. #include "fns.h"
  12. #include "io.h"
  13. #include "../port/error.h"
  14. #include "../port/netif.h"
  15. #include "etherif.h"
  16. #include "ethermii.h"
  17. #include "../ip/ip.h"
  18. #define MASK(v) ((1UL<<(v)) - 1)
  19. #define MIIDBG if(0)iprint
  20. enum {
  21. Gbe0regs = Regbase + 0x72000,
  22. Gbe1regs = Regbase + 0x76000,
  23. Nrx = 512,
  24. Ntx = 512,
  25. Nrxblks = 1024,
  26. Rxblklen = 2+1522, /* ifc. supplies first 2 bytes as padding */
  27. Maxrxintrsec = 20*1000, /* max. rx intrs. / sec */
  28. Etherstuck = 90, /* must send or receive a packet in this many sec.s */
  29. Descralign = 16,
  30. Bufalign = 8,
  31. Pass = 1, /* accept packets */
  32. Qno = 0, /* do everything on queue zero */
  33. };
  34. typedef struct Ctlr Ctlr;
  35. typedef struct Gbereg Gbereg;
  36. typedef struct Mibstats Mibstats;
  37. typedef struct Rx Rx;
  38. typedef struct Tx Tx;
  39. static struct {
  40. Lock;
  41. Block *head;
  42. } freeblocks;
  43. /* hardware receive buffer descriptor */
  44. struct Rx {
  45. ulong cs;
  46. ulong countsize; /* bytes, buffer size */
  47. ulong buf; /* phys. addr. of packet buffer */
  48. ulong next; /* phys. addr. of next Rx */
  49. };
  50. /* hardware transmit buffer descriptor */
  51. struct Tx {
  52. ulong cs;
  53. ulong countchk; /* bytes, checksum */
  54. ulong buf; /* phys. addr. of packet buffer */
  55. ulong next; /* phys. addr. of next Tx */
  56. };
  57. /* fixed by hw; part of Gberegs */
  58. struct Mibstats {
  59. union {
  60. uvlong rxby; /* good bytes rcv'd */
  61. struct {
  62. ulong rxbylo;
  63. ulong rxbyhi;
  64. };
  65. };
  66. ulong badrxby; /* bad bytes rcv'd */
  67. ulong mactxerr; /* tx err pkts */
  68. ulong rxpkt; /* good pkts rcv'd */
  69. ulong badrxpkt; /* bad pkts rcv'd */
  70. ulong rxbcastpkt; /* b'cast pkts rcv'd */
  71. ulong rxmcastpkt; /* m'cast pkts rcv'd */
  72. ulong rx64; /* pkts <= 64 bytes */
  73. ulong rx65_127; /* pkts 65—127 bytes */
  74. ulong rx128_255; /* pkts 128—255 bytes */
  75. ulong rx256_511; /* pkts 256—511 bytes */
  76. ulong rx512_1023; /* pkts 512—1023 bytes */
  77. ulong rx1024_max; /* pkts >= 1024 bytes */
  78. union {
  79. uvlong txby; /* good bytes sent */
  80. struct {
  81. ulong txbylo;
  82. ulong txbyhi;
  83. };
  84. };
  85. ulong txpkt; /* good pkts sent */
  86. /* half-duplex: pkts dropped due to excessive collisions */
  87. ulong txcollpktdrop;
  88. ulong txmcastpkt; /* m'cast pkts sent */
  89. ulong txbcastpkt; /* b'cast pkts sent */
  90. ulong badmacctlpkts; /* bad mac ctl pkts */
  91. ulong txflctl; /* flow-control pkts sent */
  92. ulong rxflctl; /* good flow-control pkts rcv'd */
  93. ulong badrxflctl; /* bad flow-control pkts rcv'd */
  94. ulong rxundersized; /* runts */
  95. ulong rxfrags; /* fragments rcv'd */
  96. ulong rxtoobig; /* oversized pkts rcv'd */
  97. ulong rxjabber; /* jabber pkts rcv'd */
  98. ulong rxerr; /* rx error events */
  99. ulong crcerr; /* crc error events */
  100. ulong collisions; /* collision events */
  101. ulong latecoll; /* late collisions */
  102. };
  103. struct Ctlr {
  104. Lock;
  105. Gbereg *reg;
  106. Lock initlock;
  107. int init;
  108. Rx *rx; /* receive descriptors */
  109. Block *rxb[Nrx]; /* blocks belonging to the descriptors */
  110. int rxhead; /* descr ethernet will write to next */
  111. int rxtail; /* next descr that might need a buffer */
  112. Rendez rrendez; /* interrupt wakes up read process */
  113. int haveinput;
  114. Tx *tx;
  115. Block *txb[Ntx];
  116. int txhead; /* next descr we can use for new packet */
  117. int txtail; /* next descr to reclaim on tx complete */
  118. Mii *mii;
  119. int port;
  120. /* stats */
  121. ulong intrs;
  122. ulong newintrs;
  123. ulong txunderrun;
  124. ulong txringfull;
  125. ulong rxdiscard;
  126. ulong rxoverrun;
  127. ulong nofirstlast;
  128. Mibstats;
  129. };
  130. #define Rxqon(q) (1<<(q))
  131. #define Txqon(q) (1<<(q))
  132. enum {
  133. /* sdma config, sdc */
  134. Burst1 = 0,
  135. Burst2,
  136. Burst4,
  137. Burst8,
  138. Burst16,
  139. SDCrifb = 1<<0, /* rx intr on pkt boundaries */
  140. #define SDCrxburst(v) ((v)<<1)
  141. SDCrxnobyteswap = 1<<4,
  142. SDCtxnobyteswap = 1<<5,
  143. SDCswap64byte = 1<<6,
  144. #define SDCtxburst(v) ((v)<<22)
  145. /* rx intr ipg (inter packet gap) */
  146. #define SDCipgintrx(v) ((((v)>>15) & 1)<<25) | (((v) & MASK(15))<<7)
  147. /* portcfg */
  148. PCFGupromisc = 1<<0, /* unicast promiscuous mode */
  149. #define Rxqdefault(q) ((q)<<1)
  150. #define Rxqarp(q) ((q)<<4)
  151. PCFGbcrejectnoiparp = 1<<7,
  152. PCFGbcrejectip = 1<<8,
  153. PCFGbcrejectarp = 1<<9,
  154. PCFGamnotxes = 1<<12, /* auto mode, no summary update on tx */
  155. PCFGtcpq = 1<<14,
  156. PCFGudpq = 1<<15,
  157. #define Rxqtcp(q) ((q)<<16)
  158. #define Rxqudp(q) ((q)<<19)
  159. #define Rxqbpdu(q) ((q)<<22)
  160. PCFGrxcs = 1<<25, /* rx tcp checksum mode with header */
  161. /* portcfgx */
  162. PCFGXspanq = 1<<1,
  163. PCFGXcrcoff = 1<<2, /* no ethernet crc */
  164. /* port serial control0, psc0 */
  165. PSC0porton = 1<<0,
  166. PSC0forcelinkup = 1<<1,
  167. PSC0an_dplxoff = 1<<2, /* an_ = auto. negotiate */
  168. PSC0an_flctloff = 1<<3,
  169. PSC0an_pauseadv = 1<<4,
  170. PSC0nofrclinkdown = 1<<10,
  171. PSC0an_spdoff = 1<<13,
  172. PSC0dteadv = 1<<14,
  173. /* max. input pkt size */
  174. #define PSC0mru(v) ((v)<<17)
  175. PSC0mrumask = PSC0mru(MASK(3)),
  176. PSC0mru1518 = 0, /* 1500+2* 6(addrs) +2 + 4(crc) */
  177. PSC0mru1522, /* 1518 + 4(vlan tags) */
  178. PSC0mru1552, /* `baby giant' */
  179. PSC0mru9022, /* `jumbo' */
  180. PSC0mru9192, /* bigger jumbo */
  181. PSC0mru9700, /* still bigger jumbo */
  182. PSC0fd_frc = 1<<21, /* force full duplex */
  183. PSC0flctlfrc = 1<<22,
  184. PSC0gmiispd_gbfrc = 1<<23,
  185. PSC0miispdfrc100mbps = 1<<24,
  186. /* port status 0, ps0 */
  187. PS0linkup = 1<<1,
  188. PS0fd = 1<<2, /* full duplex */
  189. PS0flctl = 1<<3,
  190. PS0gmii_gb = 1<<4,
  191. PS0mii100mbps = 1<<5,
  192. PS0txbusy = 1<<7,
  193. PS0txfifoempty = 1<<10,
  194. PS0rxfifo1empty = 1<<11,
  195. PS0rxfifo2empty = 1<<12,
  196. /* port serial control 1, psc1 */
  197. PSC1loopback = 1<<1,
  198. PSC1mii = 0<<2,
  199. PSC1rgmii = 1<<3, /* enable RGMII */
  200. PSC1portreset = 1<<4,
  201. PSC1clockbypass = 1<<5,
  202. PSC1iban = 1<<6,
  203. PSC1iban_bypass = 1<<7,
  204. PSC1iban_restart= 1<<8,
  205. PSC1_gbonly = 1<<11,
  206. PSC1encolonbp = 1<<15, /* "collision during back-pressure mib counting" */
  207. PSC1coldomlimmask= MASK(6)<<16,
  208. #define PSC1coldomlim(v) (((v) & MASK(6))<<16)
  209. PSC1miiallowoddpreamble = 1<<22,
  210. /* port status 1, ps1 */
  211. PS1rxpause = 1<<0,
  212. PS1txpause = 1<<1,
  213. PS1pressure = 1<<2,
  214. PS1syncfail10ms = 1<<3,
  215. PS1an_done = 1<<4,
  216. PS1inbandan_bypassed = 1<<5,
  217. PS1serdesplllocked = 1<<6,
  218. PS1syncok = 1<<7,
  219. PS1nosquelch = 1<<8,
  220. /* irq */
  221. Irx = 1<<0,
  222. Iextend = 1<<1,
  223. #define Irxbufferq(q) (1<<((q)+2))
  224. Irxerr = 1<<10,
  225. #define Irxerrq(q) (1<<((q)+11))
  226. #define Itxendq(q) (1<<((q)+19))
  227. Isum = 1<<31,
  228. /* irq extended, irqe */
  229. #define IEtxbufferq(q) (1<<((q)+0))
  230. #define IEtxerrq(q) (1<<((q)+8))
  231. IEphystschg = 1<<16,
  232. IEptp = 1<<17,
  233. IErxoverrun = 1<<18,
  234. IEtxunderrun = 1<<19,
  235. IElinkchg = 1<<20,
  236. IEintaddrerr = 1<<23,
  237. IEprbserr = 1<<25,
  238. IEsum = 1<<31,
  239. /* tx fifo urgent threshold (tx interrupt coalescing), pxtfut */
  240. #define TFUTipginttx(v) (((v) & MASK(16))<<4);
  241. /* minimal frame size, mfs */
  242. MFS40by = 10<<2,
  243. MFS44by = 11<<2,
  244. MFS48by = 12<<2,
  245. MFS52by = 13<<2,
  246. MFS56by = 14<<2,
  247. MFS60by = 15<<2,
  248. MFS64by = 16<<2,
  249. /* receive descriptor */
  250. #define Bufsize(v) ((v)<<3)
  251. /* receive descriptor status */
  252. RCSmacerr = 1<<0,
  253. RCSmacmask = 3<<1,
  254. RCSmacce = 0<<1,
  255. RCSmacor = 1<<1,
  256. RCSmacmf = 2<<1,
  257. RCSl4chkshift = 3,
  258. RCSl4chkmask = MASK(16),
  259. RCSvlan = 1<<17,
  260. RCSbpdu = 1<<18,
  261. RCSl4mask = 3<<21,
  262. RCSl4tcp4 = 0<<21,
  263. RCSl4udp4 = 1<<21,
  264. RCSl4other = 2<<21,
  265. RCSl4rsvd = 3<<21,
  266. RCSl2ev2 = 1<<23,
  267. RCSl3ip4 = 1<<24,
  268. RCSip4headok = 1<<25,
  269. RCSlast = 1<<26,
  270. RCSfirst = 1<<27,
  271. RCSunknownaddr = 1<<28,
  272. RCSenableintr = 1<<29,
  273. RCSl4chkok = 1<<30,
  274. RCSdmaown = 1<<31,
  275. /* transmit descriptor status */
  276. TCSmacerr = 1<<0,
  277. TCSmacmask = 3<<1,
  278. TCSmaclc = 0<<1,
  279. TCSmacur = 1<<1,
  280. TCSmacrl = 2<<1,
  281. TCSllc = 1<<9,
  282. TCSl4chkmode = 1<<10,
  283. TCSipv4hdlenshift= 11,
  284. TCSvlan = 1<<15,
  285. TCSl4type = 1<<16,
  286. TCSgl4chk = 1<<17,
  287. TCSgip4chk = 1<<18,
  288. TCSpadding = 1<<19,
  289. TCSlast = 1<<20,
  290. TCSfirst = 1<<21,
  291. TCSenableintr = 1<<23,
  292. TCSautomode = 1<<30,
  293. TCSdmaown = 1<<31,
  294. };
  295. enum {
  296. /* SMI regs */
  297. PhysmiTimeout = 10000, /* what units? in ms. */
  298. Physmidataoff = 0, /* Data */
  299. Physmidatamask = 0xffff<<Physmidataoff,
  300. Physmiaddroff = 16, /* PHY device addr */
  301. Physmiaddrmask = 0x1f << Physmiaddroff,
  302. Physmiop = 26,
  303. Physmiopmask = 3<<Physmiop,
  304. PhysmiopWr = 0<<Physmiop,
  305. PhysmiopRd = 1<<Physmiop,
  306. PhysmiReadok = 1<<27,
  307. PhysmiBusy = 1<<28,
  308. SmiRegaddroff = 21, /* PHY device register addr */
  309. SmiRegaddrmask = 0x1f << SmiRegaddroff,
  310. };
  311. struct Gbereg {
  312. ulong phy; /* PHY address */
  313. ulong smi; /* serial mgmt. interface */
  314. ulong euda; /* ether default address */
  315. ulong eudid; /* ether default id */
  316. uchar _pad0[0x80-0x10];
  317. ulong euirq; /* interrupt cause */
  318. ulong euirqmask; /* interrupt mask */
  319. uchar _pad1[0x94-0x88];
  320. ulong euea; /* error address */
  321. ulong euiae; /* internal error address */
  322. uchar _pad2[0xb0-0x9c];
  323. ulong euc; /* control */
  324. uchar _pad3[0x200-0xb4];
  325. struct {
  326. ulong base; /* window base */
  327. ulong size; /* window size */
  328. } base[6];
  329. uchar _pad4[0x280-0x230];
  330. ulong harr[4]; /* high address remap */
  331. ulong bare; /* base address enable */
  332. ulong epap; /* port access protect */
  333. uchar _pad5[0x400-0x298];
  334. ulong portcfg; /* port configuration */
  335. ulong portcfgx; /* port config. extend */
  336. ulong mii; /* mii serial parameters */
  337. ulong _pad6;
  338. ulong evlane; /* vlan ether type */
  339. ulong macal; /* mac address low */
  340. ulong macah; /* mac address high */
  341. ulong sdc; /* sdma config. */
  342. ulong dscp[7]; /* ip diff. serv. code point -> pri */
  343. ulong psc0; /* port serial control 0 */
  344. ulong vpt2p; /* vlan priority tag -> pri */
  345. ulong ps0; /* ether port status 0 */
  346. ulong tqc; /* transmit queue command */
  347. ulong psc1; /* port serial control 1 */
  348. ulong ps1; /* ether port status 1 */
  349. ulong mvhdr; /* marvell header */
  350. ulong _pad8[2];
  351. /* interrupts */
  352. ulong irq; /* interrupt cause; some rw0c bits */
  353. ulong irqe; /* " " extended; some rw0c bits */
  354. ulong irqmask; /* interrupt mask (actually enable) */
  355. ulong irqemask; /* " " extended */
  356. ulong _pad9;
  357. ulong pxtfut; /* port tx fifo urgent threshold */
  358. ulong _pad10;
  359. ulong pxmfs; /* port rx minimum frame size */
  360. ulong _pad11;
  361. /*
  362. * # of input frames discarded by addr filtering or lack of resources;
  363. * zeroed upon read.
  364. */
  365. ulong pxdfc; /* port rx discard frame counter */
  366. ulong pxofc; /* port overrun frame counter */
  367. ulong _pad12[2];
  368. ulong piae; /* port internal address error */
  369. uchar _pad13[0x4bc-0x498];
  370. ulong etherprio; /* ether type priority */
  371. uchar _pad14[0x4dc-0x4c0];
  372. ulong tqfpc; /* tx queue fixed priority config. */
  373. ulong pttbrc; /* port tx token-bucket rate config. */
  374. ulong tqc1; /* tx queue command 1 */
  375. ulong pmtu; /* port maximum transmit unit */
  376. ulong pmtbs; /* port maximum token bucket size */
  377. uchar _pad15[0x600-0x4f0];
  378. struct {
  379. ulong _pad[3];
  380. ulong r; /* phys. addr.: cur. rx desc. ptrs */
  381. } crdp[8];
  382. ulong rqc; /* rx queue command */
  383. ulong tcsdp; /* phys. addr.: cur. tx desc. ptr */
  384. uchar _pad16[0x6c0-0x688];
  385. ulong tcqdp[8]; /* phys. addr.: cur. tx q. desc. ptr */
  386. uchar _pad17[0x700-0x6e0];
  387. struct {
  388. ulong tbctr; /* queue tx token-bucket counter */
  389. ulong tbcfg; /* tx queue token-bucket config. */
  390. ulong acfg; /* tx queue arbiter config. */
  391. ulong _pad;
  392. } tq[8];
  393. ulong pttbc; /* port tx token-bucket counter */
  394. uchar _pad18[0x7a8-0x784];
  395. ulong ipg2; /* tx queue ipg */
  396. ulong _pad19[3];
  397. ulong ipg3;
  398. ulong _pad20;
  399. ulong htlp; /* high token in low packet */
  400. ulong htap; /* high token in async packet */
  401. ulong ltap; /* low token in async packet */
  402. ulong _pad21;
  403. ulong ts; /* tx speed */
  404. uchar _pad22[0x1000-0x7d4];
  405. /* mac mib counters: statistics */
  406. Mibstats;
  407. uchar _pad23[0x1400-0x1080];
  408. /* multicast filtering; each byte: Qno<<1 | Pass */
  409. ulong dfsmt[64]; /* dest addr filter special m'cast table */
  410. ulong dfomt[64]; /* dest addr filter other m'cast table */
  411. /* unicast filtering */
  412. ulong dfut[4]; /* dest addr filter unicast table */
  413. };
  414. static void getmibstats(Ctlr *);
  415. static void
  416. rxfreeb(Block *b)
  417. {
  418. /* freeb(b) will have previously decremented b->ref to 0; raise to 1 */
  419. _xinc(&b->ref);
  420. //iprint("fr %ld ", b->ref);
  421. b->wp = b->rp =
  422. (uchar*)((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  423. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  424. b->free = rxfreeb;
  425. ilock(&freeblocks);
  426. b->next = freeblocks.head;
  427. freeblocks.head = b;
  428. iunlock(&freeblocks);
  429. }
  430. static Block *
  431. rxallocb(void)
  432. {
  433. Block *b;
  434. ilock(&freeblocks);
  435. b = freeblocks.head;
  436. if(b != nil) {
  437. freeblocks.head = b->next;
  438. b->next = nil;
  439. b->free = rxfreeb;
  440. }
  441. iunlock(&freeblocks);
  442. return b;
  443. }
  444. static void
  445. rxkick(Ctlr *ctlr)
  446. {
  447. Gbereg *reg = ctlr->reg;
  448. if (reg->crdp[Qno].r == 0)
  449. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  450. if ((reg->rqc & 0xff) == 0) /* all queues are stopped? */
  451. reg->rqc = Rxqon(Qno); /* restart */
  452. }
  453. static void
  454. txkick(Ctlr *ctlr)
  455. {
  456. Gbereg *reg = ctlr->reg;
  457. if (reg->tcqdp[Qno] == 0)
  458. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  459. if ((reg->tqc & 0xff) == 0) /* all q's stopped? */
  460. reg->tqc = Txqon(Qno); /* restart */
  461. }
  462. static void
  463. rxreplenish(Ctlr *ctlr)
  464. {
  465. Rx *r;
  466. Block *b;
  467. while(ctlr->rxb[ctlr->rxtail] == nil) {
  468. b = rxallocb();
  469. if(b == nil) {
  470. iprint("etherkw: rxreplenish out of buffers\n");
  471. break;
  472. }
  473. ctlr->rxb[ctlr->rxtail] = b;
  474. /* set up receive descriptor */
  475. r = &ctlr->rx[ctlr->rxtail];
  476. assert(((uintptr)r & (Descralign - 1)) == 0);
  477. r->countsize = Bufsize(Rxblklen);
  478. r->buf = PADDR(b->rp);
  479. cachedwbse(r, sizeof *r);
  480. /* and fire */
  481. r->cs = RCSdmaown | RCSenableintr;
  482. cachedwbse(&r->cs, BY2SE);
  483. ctlr->rxtail = NEXT(ctlr->rxtail, Nrx);
  484. }
  485. rxkick(ctlr);
  486. }
  487. static void
  488. dump(uchar *bp, long max)
  489. {
  490. if (max > 64)
  491. max = 64;
  492. for (; max > 0; max--, bp++)
  493. iprint("%02.2ux ", *bp);
  494. print("...\n");
  495. }
  496. static void
  497. etheractive(Ether *ether)
  498. {
  499. ether->starttime = TK2MS(MACHP(0)->ticks)/1000;
  500. }
  501. static void
  502. ethercheck(Ether *ether)
  503. {
  504. if (ether->starttime != 0 &&
  505. TK2MS(MACHP(0)->ticks)/1000 - ether->starttime > Etherstuck)
  506. iprint("ethernet stuck\n");
  507. }
  508. static void
  509. receive(Ether *ether)
  510. {
  511. int i;
  512. ulong n;
  513. Block *b;
  514. Ctlr *ctlr = ether->ctlr;
  515. Rx *r;
  516. ethercheck(ether);
  517. for (i = Nrx-2; i > 0; i--) {
  518. r = &ctlr->rx[ctlr->rxhead];
  519. assert(((uintptr)r & (Descralign - 1)) == 0);
  520. cachedinvse(r, sizeof *r);
  521. if(r->cs & RCSdmaown)
  522. break;
  523. b = ctlr->rxb[ctlr->rxhead];
  524. if (b == nil)
  525. panic("etherkw: nil ctlr->rxb[ctlr->rxhead] "
  526. "in receive");
  527. ctlr->rxb[ctlr->rxhead] = nil;
  528. ctlr->rxhead = NEXT(ctlr->rxhead, Nrx);
  529. if((r->cs & (RCSfirst|RCSlast)) != (RCSfirst|RCSlast)) {
  530. ctlr->nofirstlast++;
  531. freeb(b);
  532. continue;
  533. }
  534. if(r->cs & RCSmacerr) {
  535. freeb(b);
  536. continue;
  537. }
  538. n = r->countsize >> 16;
  539. assert(n >= 2 && n < 2048);
  540. cachedinvse(b->rp, n);
  541. b->wp = b->rp + n;
  542. /*
  543. * skip hardware padding to align ipv4 address in memory
  544. * (mv-s104860-u0 §8.3.4.1)
  545. */
  546. b->rp += 2;
  547. etheriq(ether, b, 1);
  548. etheractive(ether);
  549. if (i % (Nrx / 2) == 0)
  550. rxreplenish(ctlr);
  551. }
  552. rxreplenish(ctlr);
  553. }
  554. static void
  555. txreplenish(Ether *ether) /* free transmitted packets */
  556. {
  557. Ctlr *ctlr;
  558. ctlr = ether->ctlr;
  559. while(ctlr->txtail != ctlr->txhead) {
  560. cachedinvse(&ctlr->tx[ctlr->txtail].cs, BY2SE);
  561. if(ctlr->tx[ctlr->txtail].cs & TCSdmaown)
  562. break;
  563. if(ctlr->txb[ctlr->txtail] == nil)
  564. panic("no block for sent packet?!");
  565. freeb(ctlr->txb[ctlr->txtail]);
  566. ctlr->txb[ctlr->txtail] = nil;
  567. ctlr->txtail = NEXT(ctlr->txtail, Ntx);
  568. etheractive(ether);
  569. }
  570. }
  571. /*
  572. * transmit strategy: fill the output ring as far as possible,
  573. * perhaps leaving a few spare; kick off the output and take
  574. * an interrupt only when the transmit queue is empty.
  575. */
  576. static void
  577. transmit(Ether *ether)
  578. {
  579. int i, kick, len;
  580. Block *b;
  581. Ctlr *ctlr = ether->ctlr;
  582. Gbereg *reg = ctlr->reg;
  583. Tx *t;
  584. ethercheck(ether);
  585. ilock(ctlr);
  586. txreplenish(ether); /* reap old packets */
  587. /* queue new packets; don't use more than half the tx descs. */
  588. kick = 0;
  589. for (i = Ntx/2 - 2; i > 0; i--) {
  590. t = &ctlr->tx[ctlr->txhead];
  591. assert(((uintptr)t & (Descralign - 1)) == 0);
  592. cachedinvse(t, sizeof *t);
  593. if(t->cs & TCSdmaown) { /* free descriptor? */
  594. ctlr->txringfull++;
  595. break;
  596. }
  597. b = qget(ether->oq); /* outgoing packet? */
  598. if (b == nil)
  599. break;
  600. len = BLEN(b);
  601. if(len < ether->minmtu || len > ether->maxmtu) {
  602. freeb(b);
  603. continue;
  604. }
  605. ctlr->txb[ctlr->txhead] = b;
  606. /* set up the transmit descriptor */
  607. t->buf = PADDR(b->rp);
  608. t->countchk = len << 16;
  609. cachedwbse(t, sizeof *t);
  610. /* and fire */
  611. t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
  612. TCSenableintr;
  613. cachedwbse(&t->cs, BY2SE);
  614. kick++;
  615. ctlr->txhead = NEXT(ctlr->txhead, Ntx);
  616. }
  617. if (kick) {
  618. txkick(ctlr);
  619. reg->irqmask |= Itxendq(Qno);
  620. reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
  621. }
  622. iunlock(ctlr);
  623. }
  624. static void
  625. dumprxdescs(Ctlr *ctlr)
  626. {
  627. int i;
  628. Gbereg *reg = ctlr->reg;
  629. iprint("\nrxhead %d rxtail %d; txcdp %#p rxcdp %#p\n",
  630. ctlr->rxhead, ctlr->rxtail, reg->tcqdp[Qno], reg->crdp[Qno].r);
  631. for (i = 0; i < Nrx; i++)
  632. iprint("rxb %d @ %#p: %#p\n", i, &ctlr->rxb[i], ctlr->rxb[i]);
  633. for (i = 0; i < Nrx; i++)
  634. iprint("rx %d @ %#p: cs %#lux countsize %lud buf %#lux next %#lux\n",
  635. i, &ctlr->rx[i], ctlr->rx[i].cs,
  636. ctlr->rx[i].countsize >> 3, ctlr->rx[i].buf,
  637. ctlr->rx[i].next);
  638. delay(1000);
  639. }
  640. static int
  641. gotinput(void* ctlr)
  642. {
  643. return ((Ctlr*)ctlr)->haveinput != 0;
  644. }
  645. /*
  646. * process any packets in the input ring.
  647. * also sum mib stats frequently so avoid the overflow
  648. * mentioned in the errata.
  649. */
  650. static void
  651. rcvproc(void* arg)
  652. {
  653. Ctlr *ctlr;
  654. Ether *ether;
  655. ether = arg;
  656. ctlr = ether->ctlr;
  657. for(;;){
  658. tsleep(&ctlr->rrendez, gotinput, ctlr, 10*1000);
  659. ilock(ctlr);
  660. getmibstats(ctlr);
  661. if (ctlr->haveinput) {
  662. ctlr->haveinput = 0;
  663. iunlock(ctlr);
  664. receive(ether);
  665. } else
  666. iunlock(ctlr);
  667. }
  668. }
  669. static void
  670. interrupt(Ureg*, void *arg)
  671. {
  672. ulong irq, irqe, handled;
  673. Ether *ether = arg;
  674. Ctlr *ctlr = ether->ctlr;
  675. Gbereg *reg = ctlr->reg;
  676. handled = 0;
  677. irq = reg->irq;
  678. irqe = reg->irqe;
  679. reg->irq = 0; /* extinguish intr causes */
  680. reg->irqe = 0; /* " " " */
  681. ethercheck(ether);
  682. if(irq & Irxbufferq(Qno)) {
  683. /*
  684. * letting a kproc process the input takes far less real time
  685. * than doing it all at interrupt level.
  686. */
  687. ctlr->haveinput = 1;
  688. wakeup(&ctlr->rrendez);
  689. handled++;
  690. } else
  691. rxkick(ctlr);
  692. if(irq & Itxendq(Qno)) { /* transmit ring empty? */
  693. reg->irqmask &= ~Itxendq(Qno); /* prevent more interrupts */
  694. reg->irqemask &= ~(IEtxerrq(Qno) | IEtxunderrun);
  695. transmit(ether);
  696. handled++;
  697. }
  698. if(irqe & IEsum) {
  699. /*
  700. * IElinkchg appears to only be set when unplugging.
  701. * autonegotiation is likely not done yet, so linkup not valid,
  702. * thus we note the link change here, and check for
  703. * that and autonegotiation done below.
  704. */
  705. if(irqe & IEphystschg) {
  706. ether->link = (reg->ps0 & PS0linkup) != 0;
  707. ether->linkchg = 1;
  708. }
  709. if(irqe & IEtxerrq(Qno))
  710. ether->oerrs++;
  711. if(irqe & IErxoverrun)
  712. ether->overflows++;
  713. if(irqe & IEtxunderrun)
  714. ctlr->txunderrun++;
  715. if(irqe & (IEphystschg | IEtxerrq(Qno) | IErxoverrun |
  716. IEtxunderrun))
  717. handled++;
  718. }
  719. if (irq & Isum) {
  720. if (irq & Irxerrq(Qno)) {
  721. ether->buffs++; /* approx. error */
  722. /* null descriptor pointer or descriptor owned by cpu */
  723. panic("etherkw: rx err on queue 0");
  724. }
  725. if (irq & Irxerr) {
  726. ether->buffs++; /* approx. error */
  727. /* null descriptor pointer or descriptor owned by cpu */
  728. panic("etherkw: rx err");
  729. }
  730. if(irq & (Irxerr | Irxerrq(Qno)))
  731. handled++;
  732. }
  733. if(ether->linkchg && (reg->ps1 & PS1an_done)) {
  734. handled++;
  735. ether->link = (reg->ps0 & PS0linkup) != 0;
  736. ether->linkchg = 0;
  737. }
  738. ctlr->newintrs++;
  739. if (!handled) {
  740. irq &= ~Isum;
  741. irqe &= ~IEtxbufferq(Qno);
  742. if (irq == 0 && irqe == 0) {
  743. /* seems to be triggered by continuous output */
  744. // iprint("etherkw: spurious interrupt\n");
  745. } else
  746. iprint("etherkw: interrupt cause unknown; "
  747. "irq %#lux irqe %#lux\n", irq, irqe);
  748. }
  749. intrclear(Irqlo, ether->irq);
  750. }
  751. void
  752. promiscuous(void *arg, int on)
  753. {
  754. Ether *ether = arg;
  755. Ctlr *ctlr = ether->ctlr;
  756. Gbereg *reg = ctlr->reg;
  757. ilock(ctlr);
  758. ether->prom = on;
  759. if(on)
  760. reg->portcfg |= PCFGupromisc;
  761. else
  762. reg->portcfg &= ~PCFGupromisc;
  763. iunlock(ctlr);
  764. }
  765. void
  766. multicast(void *, uchar *, int)
  767. {
  768. /* nothing to do; we always accept multicast */
  769. }
  770. static void quiesce(Gbereg *reg);
  771. static void
  772. shutdown(Ether *ether)
  773. {
  774. Ctlr *ctlr = ether->ctlr;
  775. Gbereg *reg = ctlr->reg;
  776. ilock(ctlr);
  777. quiesce(reg);
  778. reg->tcqdp[Qno] = 0;
  779. reg->crdp[Qno].r = 0;
  780. reg->psc0 = 0; /* no PSC0porton */
  781. reg->psc1 |= PSC1portreset;
  782. iunlock(ctlr);
  783. coherence();
  784. delay(100);
  785. }
  786. enum {
  787. CMjumbo,
  788. };
  789. static Cmdtab ctlmsg[] = {
  790. CMjumbo, "jumbo", 2,
  791. };
  792. long
  793. ctl(Ether *e, void *p, long n)
  794. {
  795. Cmdbuf *cb;
  796. Cmdtab *ct;
  797. Ctlr *ctlr = e->ctlr;
  798. Gbereg *reg = ctlr->reg;
  799. cb = parsecmd(p, n);
  800. if(waserror()) {
  801. free(cb);
  802. nexterror();
  803. }
  804. ct = lookupcmd(cb, ctlmsg, nelem(ctlmsg));
  805. switch(ct->index) {
  806. case CMjumbo:
  807. if(strcmp(cb->f[1], "on") == 0) {
  808. /* incoming packet queue doesn't expect jumbo frames */
  809. error("jumbo disabled");
  810. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  811. PSC0mru(PSC0mru9022);
  812. e->maxmtu = 9022;
  813. } else if(strcmp(cb->f[1], "off") == 0) {
  814. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  815. PSC0mru(PSC0mru1522);
  816. e->maxmtu = ETHERMAXTU;
  817. } else
  818. error(Ebadctl);
  819. break;
  820. default:
  821. error(Ebadctl);
  822. break;
  823. }
  824. free(cb);
  825. poperror();
  826. return n;
  827. }
  828. /*
  829. * phy/mii goo
  830. */
  831. static int
  832. smibusywait(Gbereg *reg, ulong waitbit)
  833. {
  834. ulong timeout, smi_reg;
  835. timeout = PhysmiTimeout;
  836. /* wait till the SMI is not busy */
  837. do {
  838. /* read smi register */
  839. smi_reg = reg->smi;
  840. if (timeout-- == 0) {
  841. MIIDBG("SMI busy timeout\n");
  842. return -1;
  843. }
  844. // delay(1);
  845. } while (smi_reg & waitbit);
  846. return 0;
  847. }
  848. static int
  849. miird(Mii *mii, int pa, int ra)
  850. {
  851. ulong smi_reg, timeout;
  852. Ctlr *ctlr;
  853. Gbereg *reg;
  854. ctlr = (Ctlr*)mii->ctlr;
  855. reg = ctlr->reg;
  856. /* check params */
  857. if ((pa<<Physmiaddroff) & ~Physmiaddrmask ||
  858. (ra<<SmiRegaddroff) & ~SmiRegaddrmask)
  859. return -1;
  860. smibusywait(reg, PhysmiBusy);
  861. /* fill the phy address and register offset and read opcode */
  862. reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd;
  863. coherence();
  864. /* wait til read value is ready */
  865. // if (smibusywait(reg, PhysmiReadok) < 0)
  866. // return -1;
  867. timeout = PhysmiTimeout;
  868. do {
  869. smi_reg = reg->smi;
  870. if (timeout-- == 0) {
  871. MIIDBG("SMI read-valid timeout\n");
  872. return -1;
  873. }
  874. } while (!(smi_reg & PhysmiReadok));
  875. /* Wait for the data to update in the SMI register */
  876. for (timeout = 0; timeout < PhysmiTimeout; timeout++)
  877. ;
  878. return reg->smi & Physmidatamask;
  879. }
  880. static int
  881. miiwr(Mii *mii, int pa, int ra, int v)
  882. {
  883. Ctlr *ctlr;
  884. Gbereg *reg;
  885. ulong smi_reg;
  886. ctlr = (Ctlr*)mii->ctlr;
  887. reg = ctlr->reg;
  888. /* check params */
  889. if (((pa<<Physmiaddroff) & ~Physmiaddrmask) ||
  890. ((ra<<SmiRegaddroff) & ~SmiRegaddrmask))
  891. return -1;
  892. smibusywait(reg, PhysmiBusy);
  893. /* fill the phy address and register offset and read opcode */
  894. smi_reg = v << Physmidataoff | pa << Physmiaddroff | ra << SmiRegaddroff;
  895. reg->smi = smi_reg & ~PhysmiopRd;
  896. coherence();
  897. return 0;
  898. }
  899. static int
  900. kirkwoodmii(Ether *ether)
  901. {
  902. int i;
  903. Ctlr *ctlr;
  904. MiiPhy *phy;
  905. MIIDBG("mii\n");
  906. ctlr = ether->ctlr;
  907. if((ctlr->mii = malloc(sizeof(Mii))) == nil)
  908. return -1;
  909. ctlr->mii->ctlr = ctlr;
  910. ctlr->mii->mir = miird;
  911. ctlr->mii->miw = miiwr;
  912. if(mii(ctlr->mii, ~0) == 0 || (phy = ctlr->mii->curphy) == nil){
  913. print("#l%d: etherkw: init mii failure\n", ether->ctlrno);
  914. free(ctlr->mii);
  915. ctlr->mii = nil;
  916. return -1;
  917. }
  918. /* oui 005043 is marvell */
  919. MIIDBG("oui %#X phyno %d\n", phy->oui, phy->phyno);
  920. if(miistatus(ctlr->mii) < 0){
  921. miireset(ctlr->mii);
  922. MIIDBG("miireset\n");
  923. if(miiane(ctlr->mii, ~0, 0, ~0) < 0){
  924. iprint("miiane failed\n");
  925. return -1;
  926. }
  927. MIIDBG("miistatus\n");
  928. miistatus(ctlr->mii);
  929. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrLs){
  930. for(i = 0; ; i++){
  931. if(i > 600){
  932. iprint("etherkw: autonegotiation failed\n");
  933. break;
  934. }
  935. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrAnc)
  936. break;
  937. delay(10);
  938. }
  939. if(miistatus(ctlr->mii) < 0)
  940. iprint("miistatus failed\n");
  941. }else{
  942. iprint("etherkw: no link\n");
  943. phy->speed = 10; /* simple default */
  944. }
  945. }
  946. ether->mbps = phy->speed;
  947. // iprint("#l%d: kirkwoodmii: fd %d speed %d tfc %d rfc %d\n",
  948. // ctlr->port, phy->fd, phy->speed, phy->tfc, phy->rfc);
  949. MIIDBG("mii done\n");
  950. return 0;
  951. }
  952. enum { /* PHY register pages */
  953. Pagcopper,
  954. Pagfiber,
  955. Pagrgmii,
  956. Pagled,
  957. Pagrsvd1,
  958. Pagvct,
  959. Pagtest,
  960. Pagrsvd2,
  961. Pagfactest,
  962. };
  963. static void
  964. miiregpage(Mii *mii, ulong dev, ulong page)
  965. {
  966. miiwr(mii, dev, Eadr, page);
  967. }
  968. static int
  969. miiphyinit(Mii *mii)
  970. {
  971. ulong dev;
  972. Ctlr *ctlr;
  973. Gbereg *reg;
  974. ctlr = (Ctlr*)mii->ctlr;
  975. reg = ctlr->reg;
  976. dev = reg->phy;
  977. MIIDBG("phy dev addr %lux\n", dev);
  978. /* leds link & activity */
  979. miiregpage(mii, dev, Pagled);
  980. /* low 4 bits == 1: on - link, blink - activity, off - no link */
  981. miiwr(mii, dev, Scr, (miird(mii, dev, Scr) & ~0xf) | 1);
  982. miiregpage(mii, dev, Pagrgmii);
  983. miiwr(mii, dev, Scr, miird(mii, dev, Scr) | Rgmiipwrup);
  984. /* must now do a software reset, sez the manual */
  985. /* enable RGMII delay on Tx and Rx for CPU port */
  986. miiwr(mii, dev, Recr, miird(mii, dev, Recr) | Rxtiming | Rxtiming);
  987. miiregpage(mii, dev, Pagcopper);
  988. miiwr(mii, dev, Scr,
  989. (miird(mii, dev, Scr) & ~(Pwrdown|Endetect)) | Mdix);
  990. return 0;
  991. }
  992. /*
  993. * initialisation
  994. */
  995. static void
  996. quiesce(Gbereg *reg)
  997. {
  998. ulong v;
  999. v = reg->tqc;
  1000. if (v & 0xFF)
  1001. reg->tqc = v << 8; /* stop active channels */
  1002. v = reg->rqc;
  1003. if (v & 0xFF)
  1004. reg->rqc = v << 8; /* stop active channels */
  1005. /* wait for all queues to stop */
  1006. while (reg->tqc & 0xFF || reg->rqc & 0xFF)
  1007. ;
  1008. }
  1009. static void
  1010. portreset(Gbereg *reg)
  1011. {
  1012. ulong i;
  1013. quiesce(reg);
  1014. reg->psc0 &= ~PSC0porton; /* disable port */
  1015. reg->psc1 &= ~(PSC1rgmii|PSC1portreset); /* set port & MII active */
  1016. coherence();
  1017. for (i = 0; i < 4000; i++) /* magic delay */
  1018. ;
  1019. }
  1020. static void
  1021. p16(uchar *p, ulong v)
  1022. {
  1023. *p++ = v>>8;
  1024. *p = v;
  1025. }
  1026. static void
  1027. p32(uchar *p, ulong v)
  1028. {
  1029. *p++ = v>>24;
  1030. *p++ = v>>16;
  1031. *p++ = v>>8;
  1032. *p = v;
  1033. }
  1034. /*
  1035. * set ether->ea from hw mac address,
  1036. * configure unicast filtering to accept it.
  1037. */
  1038. void
  1039. archetheraddr(Ether *ether, Gbereg *reg, int rxqno)
  1040. {
  1041. ulong nibble, ucreg, tbloff, regoff;
  1042. p32(ether->ea, reg->macah);
  1043. p16(ether->ea+4, reg->macal);
  1044. /* accept frames on ea */
  1045. nibble = ether->ea[5] & 0xf;
  1046. tbloff = nibble / 4;
  1047. regoff = nibble % 4;
  1048. regoff *= 8;
  1049. ucreg = reg->dfut[tbloff];
  1050. ucreg &= 0xff << regoff;
  1051. ucreg |= (rxqno << 1 | Pass) << regoff;
  1052. reg->dfut[tbloff] = ucreg;
  1053. /* accept all multicast too. set up special & other tables. */
  1054. memset(reg->dfsmt, Qno<<1 | Pass, sizeof reg->dfsmt);
  1055. memset(reg->dfomt, Qno<<1 | Pass, sizeof reg->dfomt);
  1056. coherence();
  1057. }
  1058. static void
  1059. ctlrinit(Ether *ether)
  1060. {
  1061. int i;
  1062. Block *b;
  1063. Ctlr *ctlr = ether->ctlr;
  1064. Gbereg *reg = ctlr->reg;
  1065. Rx *r;
  1066. Tx *t;
  1067. static char name[KNAMELEN];
  1068. static Ctlr fakectlr; /* bigger than 4K; keep off the stack */
  1069. ilock(&freeblocks);
  1070. for(i = 0; i < Nrxblks; i++) {
  1071. b = iallocb(Rxblklen+Bufalign-1);
  1072. if(b == nil) {
  1073. iprint("etherkw: no memory for rx buffers\n");
  1074. break;
  1075. }
  1076. assert(b->ref == 1);
  1077. b->wp = b->rp = (uchar*)
  1078. ((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  1079. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  1080. b->free = rxfreeb;
  1081. b->next = freeblocks.head;
  1082. freeblocks.head = b;
  1083. }
  1084. iunlock(&freeblocks);
  1085. ctlr->rx = xspanalloc(Nrx * sizeof(Rx), Descralign, 0);
  1086. if(ctlr->rx == nil)
  1087. panic("etherkw: no memory for rx ring");
  1088. for(i = 0; i < Nrx; i++) {
  1089. r = &ctlr->rx[i];
  1090. assert(((uintptr)r & (Descralign - 1)) == 0);
  1091. r->cs = 0; /* not owned by hardware until r->buf is set */
  1092. r->buf = 0;
  1093. r->next = PADDR(&ctlr->rx[NEXT(i, Nrx)]);
  1094. ctlr->rxb[i] = nil;
  1095. }
  1096. ctlr->rxtail = ctlr->rxhead = 0;
  1097. cachedwb();
  1098. rxreplenish(ctlr);
  1099. ctlr->tx = xspanalloc(Ntx * sizeof(Tx), Descralign, 0);
  1100. if(ctlr->tx == nil)
  1101. panic("etherkw: no memory for tx ring");
  1102. for(i = 0; i < Ntx; i++) {
  1103. t = &ctlr->tx[i];
  1104. assert(((uintptr)t & (Descralign - 1)) == 0);
  1105. t->cs = 0;
  1106. t->buf = 0;
  1107. t->next = PADDR(&ctlr->tx[NEXT(i, Ntx)]);
  1108. ctlr->txb[i] = nil;
  1109. }
  1110. ctlr->txtail = ctlr->txhead = 0;
  1111. cachedwb();
  1112. /* clear stats by reading them into fake ctlr */
  1113. getmibstats(&fakectlr);
  1114. reg->pxmfs = MFS64by;
  1115. /*
  1116. * ipg's (inter packet gaps) for interrupt coalescing,
  1117. * values in units of 64 clock cycles. A full-sized
  1118. * packet (1514 bytes) takes just over 12µs to transmit.
  1119. */
  1120. if (CLOCKFREQ/(Maxrxintrsec*64) >= (1<<16))
  1121. panic("rx coalescing value %d too big for short",
  1122. CLOCKFREQ/(Maxrxintrsec*64));
  1123. reg->sdc = SDCrifb | SDCrxburst(Burst16) | SDCtxburst(Burst16) |
  1124. SDCrxnobyteswap | SDCtxnobyteswap |
  1125. SDCipgintrx(CLOCKFREQ/(Maxrxintrsec*64));
  1126. reg->pxtfut = 0; /* TFUTipginttx(CLOCKFREQ/(Maxrxintrsec*64)) */
  1127. /* allow just these interrupts */
  1128. reg->irqmask = Irxbufferq(Qno) | Irxerr | Itxendq(Qno);
  1129. reg->irqemask = IEtxerrq(Qno) | IEphystschg | IErxoverrun | IEtxunderrun;
  1130. reg->irq = 0;
  1131. reg->irqe = 0;
  1132. reg->euirqmask = 0;
  1133. reg->euirq = 0;
  1134. // archetheraddr(ether, ctlr->reg, Qno); /* 2nd location */
  1135. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  1136. for (i = 1; i < nelem(reg->tcqdp); i++)
  1137. reg->tcqdp[i] = 0;
  1138. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  1139. for (i = 1; i < nelem(reg->crdp); i++)
  1140. reg->crdp[i].r = 0;
  1141. reg->portcfg = Rxqdefault(Qno) | Rxqarp(Qno);
  1142. reg->portcfgx = 0;
  1143. reg->psc1 = PSC1rgmii | PSC1encolonbp | PSC1coldomlim(0x23);
  1144. reg->psc0 = PSC0porton | PSC0an_flctloff |
  1145. PSC0an_pauseadv | PSC0nofrclinkdown | PSC0mru(PSC0mru1522);
  1146. ether->link = (reg->ps0 & PS0linkup) != 0;
  1147. /* set ethernet MTU for leaky bucket mechanism to 0 (disabled) */
  1148. reg->pmtu = 0;
  1149. reg->rqc = Rxqon(Qno);
  1150. coherence();
  1151. etheractive(ether);
  1152. snprint(name, sizeof name, "#l%drproc", ether->ctlrno);
  1153. kproc(name, rcvproc, ether);
  1154. }
  1155. static void
  1156. attach(Ether* ether)
  1157. {
  1158. Ctlr *ctlr = ether->ctlr;
  1159. lock(&ctlr->initlock);
  1160. if(ctlr->init == 0) {
  1161. ctlrinit(ether);
  1162. ctlr->init = 1;
  1163. }
  1164. unlock(&ctlr->initlock);
  1165. }
  1166. /*
  1167. * statistics goo.
  1168. * mib registers clear on read.
  1169. */
  1170. static void
  1171. getmibstats(Ctlr *ctlr)
  1172. {
  1173. Gbereg *reg = ctlr->reg;
  1174. /*
  1175. * Marvell 88f6281 errata FE-ETH-120: high long of rxby and txby
  1176. * can't be read correctly, so read the low long frequently
  1177. * (every 30 seconds or less), thus avoiding overflow into high long.
  1178. */
  1179. ctlr->rxby += reg->rxbylo;
  1180. ctlr->txby += reg->txbylo;
  1181. ctlr->badrxby += reg->badrxby;
  1182. ctlr->mactxerr += reg->mactxerr;
  1183. ctlr->rxpkt += reg->rxpkt;
  1184. ctlr->badrxpkt += reg->badrxpkt;
  1185. ctlr->rxbcastpkt+= reg->rxbcastpkt;
  1186. ctlr->rxmcastpkt+= reg->rxmcastpkt;
  1187. ctlr->rx64 += reg->rx64;
  1188. ctlr->rx65_127 += reg->rx65_127;
  1189. ctlr->rx128_255 += reg->rx128_255;
  1190. ctlr->rx256_511 += reg->rx256_511;
  1191. ctlr->rx512_1023+= reg->rx512_1023;
  1192. ctlr->rx1024_max+= reg->rx1024_max;
  1193. ctlr->txpkt += reg->txpkt;
  1194. ctlr->txcollpktdrop+= reg->txcollpktdrop;
  1195. ctlr->txmcastpkt+= reg->txmcastpkt;
  1196. ctlr->txbcastpkt+= reg->txbcastpkt;
  1197. ctlr->badmacctlpkts+= reg->badmacctlpkts;
  1198. ctlr->txflctl += reg->txflctl;
  1199. ctlr->rxflctl += reg->rxflctl;
  1200. ctlr->badrxflctl+= reg->badrxflctl;
  1201. ctlr->rxundersized+= reg->rxundersized;
  1202. ctlr->rxfrags += reg->rxfrags;
  1203. ctlr->rxtoobig += reg->rxtoobig;
  1204. ctlr->rxjabber += reg->rxjabber;
  1205. ctlr->rxerr += reg->rxerr;
  1206. ctlr->crcerr += reg->crcerr;
  1207. ctlr->collisions+= reg->collisions;
  1208. ctlr->latecoll += reg->latecoll;
  1209. }
  1210. long
  1211. ifstat(Ether *ether, void *a, long n, ulong off)
  1212. {
  1213. Ctlr *ctlr = ether->ctlr;
  1214. Gbereg *reg = ctlr->reg;
  1215. char *buf, *p, *e;
  1216. buf = p = malloc(READSTR);
  1217. e = p + READSTR;
  1218. ilock(ctlr);
  1219. getmibstats(ctlr);
  1220. ctlr->intrs += ctlr->newintrs;
  1221. p = seprint(p, e, "interrupts: %lud\n", ctlr->intrs);
  1222. p = seprint(p, e, "new interrupts: %lud\n", ctlr->newintrs);
  1223. ctlr->newintrs = 0;
  1224. p = seprint(p, e, "tx underrun: %lud\n", ctlr->txunderrun);
  1225. p = seprint(p, e, "tx ring full: %lud\n", ctlr->txringfull);
  1226. ctlr->rxdiscard += reg->pxdfc;
  1227. ctlr->rxoverrun += reg->pxofc;
  1228. p = seprint(p, e, "rx discarded frames: %lud\n", ctlr->rxdiscard);
  1229. p = seprint(p, e, "rx overrun frames: %lud\n", ctlr->rxoverrun);
  1230. p = seprint(p, e, "no first+last flag: %lud\n", ctlr->nofirstlast);
  1231. p = seprint(p, e, "duplex: %s\n", (reg->ps0 & PS0fd)? "full": "half");
  1232. p = seprint(p, e, "flow control: %s\n", (reg->ps0 & PS0flctl)? "on": "off");
  1233. /* p = seprint(p, e, "speed: %d mbps\n", ); */
  1234. p = seprint(p, e, "received bytes: %llud\n", ctlr->rxby);
  1235. p = seprint(p, e, "bad received bytes: %lud\n", ctlr->badrxby);
  1236. p = seprint(p, e, "internal mac transmit errors: %lud\n", ctlr->mactxerr);
  1237. p = seprint(p, e, "total received frames: %lud\n", ctlr->rxpkt);
  1238. p = seprint(p, e, "received broadcast frames: %lud\n", ctlr->rxbcastpkt);
  1239. p = seprint(p, e, "received multicast frames: %lud\n", ctlr->rxmcastpkt);
  1240. p = seprint(p, e, "bad received frames: %lud\n", ctlr->badrxpkt);
  1241. p = seprint(p, e, "received frames 0-64: %lud\n", ctlr->rx64);
  1242. p = seprint(p, e, "received frames 65-127: %lud\n", ctlr->rx65_127);
  1243. p = seprint(p, e, "received frames 128-255: %lud\n", ctlr->rx128_255);
  1244. p = seprint(p, e, "received frames 256-511: %lud\n", ctlr->rx256_511);
  1245. p = seprint(p, e, "received frames 512-1023: %lud\n", ctlr->rx512_1023);
  1246. p = seprint(p, e, "received frames 1024-max: %lud\n", ctlr->rx1024_max);
  1247. p = seprint(p, e, "transmitted bytes: %llud\n", ctlr->txby);
  1248. p = seprint(p, e, "total transmitted frames: %lud\n", ctlr->txpkt);
  1249. p = seprint(p, e, "transmitted broadcast frames: %lud\n", ctlr->txbcastpkt);
  1250. p = seprint(p, e, "transmitted multicast frames: %lud\n", ctlr->txmcastpkt);
  1251. p = seprint(p, e, "transmit frames dropped by collision: %lud\n", ctlr->txcollpktdrop);
  1252. p = seprint(p, e, "misaligned buffers: %lud\n", ether->pktsmisaligned);
  1253. p = seprint(p, e, "bad mac control frames: %lud\n", ctlr->badmacctlpkts);
  1254. p = seprint(p, e, "transmitted flow control messages: %lud\n", ctlr->txflctl);
  1255. p = seprint(p, e, "received flow control messages: %lud\n", ctlr->rxflctl);
  1256. p = seprint(p, e, "bad received flow control messages: %lud\n", ctlr->badrxflctl);
  1257. p = seprint(p, e, "received undersized packets: %lud\n", ctlr->rxundersized);
  1258. p = seprint(p, e, "received fragments: %lud\n", ctlr->rxfrags);
  1259. p = seprint(p, e, "received oversized packets: %lud\n", ctlr->rxtoobig);
  1260. p = seprint(p, e, "received jabber packets: %lud\n", ctlr->rxjabber);
  1261. p = seprint(p, e, "mac receive errors: %lud\n", ctlr->rxerr);
  1262. p = seprint(p, e, "crc errors: %lud\n", ctlr->crcerr);
  1263. p = seprint(p, e, "collisions: %lud\n", ctlr->collisions);
  1264. p = seprint(p, e, "late collisions: %lud\n", ctlr->latecoll);
  1265. USED(p);
  1266. iunlock(ctlr);
  1267. n = readstr(off, a, n, buf);
  1268. free(buf);
  1269. return n;
  1270. }
  1271. static int
  1272. reset(Ether *ether)
  1273. {
  1274. Ctlr *ctlr;
  1275. static uchar zeroea[Eaddrlen];
  1276. ether->ctlr = ctlr = malloc(sizeof *ctlr);
  1277. switch(ether->ctlrno) {
  1278. case 0:
  1279. ctlr->reg = (Gbereg*)Gbe0regs;
  1280. ether->irq = IRQ0gbe0sum;
  1281. break;
  1282. case 1:
  1283. ctlr->reg = (Gbereg*)Gbe1regs;
  1284. ether->irq = IRQ0gbe1sum;
  1285. break;
  1286. default:
  1287. panic("etherkw: bad ether ctlr #%d", ether->ctlrno);
  1288. }
  1289. // *(ulong *)AddrIocfg0 |= 1 << 7 | 1 << 15; /* io cfg 0: 1.8v gbe */
  1290. // coherence();
  1291. portreset(ctlr->reg);
  1292. /* ensure that both interfaces are set to RGMII before calling mii */
  1293. ((Gbereg*)Gbe0regs)->psc1 |= PSC1rgmii;
  1294. ((Gbereg*)Gbe1regs)->psc1 |= PSC1rgmii;
  1295. /* Set phy address of the port */
  1296. ctlr->port = ether->ctlrno;
  1297. ctlr->reg->phy = ether->ctlrno;
  1298. coherence();
  1299. ether->port = (uintptr)ctlr->reg;
  1300. if(kirkwoodmii(ether) < 0){
  1301. free(ctlr);
  1302. ether->ctlr = nil;
  1303. return -1;
  1304. }
  1305. miiphyinit(ctlr->mii);
  1306. archetheraddr(ether, ctlr->reg, Qno); /* original location */
  1307. if (memcmp(ether->ea, zeroea, sizeof zeroea) == 0){
  1308. free(ctlr);
  1309. ether->ctlr = nil;
  1310. return -1; /* no rj45 for this ether */
  1311. }
  1312. ether->attach = attach;
  1313. ether->transmit = transmit;
  1314. ether->interrupt = interrupt;
  1315. ether->ifstat = ifstat;
  1316. ether->shutdown = shutdown;
  1317. ether->ctl = ctl;
  1318. ether->arg = ether;
  1319. ether->promiscuous = promiscuous;
  1320. ether->multicast = multicast;
  1321. return 0;
  1322. }
  1323. void
  1324. etherkwlink(void)
  1325. {
  1326. addethercard("kirkwood", reset);
  1327. }