etherkw.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508
  1. /*
  2. * marvell kirkwood ethernet (88e1116) driver
  3. * (as found in the sheevaplug & openrd).
  4. * from /public/doc/marvell/88f61xx.kirkwood.pdf
  5. * and /public/doc/marvell/88e1116.pdf.
  6. */
  7. #include "u.h"
  8. #include "../port/lib.h"
  9. #include "mem.h"
  10. #include "dat.h"
  11. #include "fns.h"
  12. #include "io.h"
  13. #include "../port/error.h"
  14. #include "../port/netif.h"
  15. #include "etherif.h"
  16. #include "ethermii.h"
  17. #include "../ip/ip.h"
  18. #define MASK(v) ((1UL<<(v)) - 1)
  19. #define MIIDBG if(0)iprint
  20. enum {
  21. Gbe0regs = Regbase + 0x72000,
  22. Gbe1regs = Regbase + 0x76000,
  23. Nrx = 512,
  24. Ntx = 512,
  25. Nrxblks = 1024,
  26. Rxblklen = 2+1522, /* ethernet uses first 2 bytes as padding */
  27. Maxrxintrsec = 20*1000, /* max. rx intrs. / sec */
  28. Etherstuck = 90, /* must send or receive a packet in this many sec.s */
  29. Descralign = 16,
  30. Bufalign = 8,
  31. Pass = 1, /* accept packets */
  32. Qno = 0, /* do everything on queue zero */
  33. };
  34. typedef struct Ctlr Ctlr;
  35. typedef struct Gbereg Gbereg;
  36. typedef struct Mibstats Mibstats;
  37. typedef struct Rx Rx;
  38. typedef struct Tx Tx;
  39. static struct {
  40. Lock;
  41. Block *head;
  42. } freeblocks;
  43. /* hardware receive buffer descriptor */
  44. struct Rx {
  45. ulong cs;
  46. ulong countsize; /* bytes, buffer size */
  47. ulong buf; /* phys. addr. of packet buffer */
  48. ulong next; /* phys. addr. of next Rx */
  49. };
  50. /* hardware transmit buffer descriptor */
  51. struct Tx {
  52. ulong cs;
  53. ulong countchk; /* bytes, checksum */
  54. ulong buf; /* phys. addr. of packet buffer */
  55. ulong next; /* phys. addr. of next Tx */
  56. };
  57. /* fixed by hw; part of Gberegs */
  58. struct Mibstats {
  59. uvlong rxby; /* good bytes rcv'd */
  60. ulong badrxby; /* bad bytes rcv'd */
  61. ulong mactxerr; /* tx err pkts */
  62. ulong rxpkt; /* good pkts rcv'd */
  63. ulong badrxpkt; /* bad pkts rcv'd */
  64. ulong rxbcastpkt; /* b'cast pkts rcv'd */
  65. ulong rxmcastpkt; /* m'cast pkts rcv'd */
  66. ulong rx64; /* pkts <= 64 bytes */
  67. ulong rx65_127; /* pkts 65—127 bytes */
  68. ulong rx128_255; /* pkts 128—255 bytes */
  69. ulong rx256_511; /* pkts 256—511 bytes */
  70. ulong rx512_1023; /* pkts 512—1023 bytes */
  71. ulong rx1024_max; /* pkts >= 1024 bytes */
  72. uvlong txby; /* good bytes sent */
  73. ulong txpkt; /* good pkts sent */
  74. /* half-duplex: pkts dropped due to excessive collisions */
  75. ulong txcollpktdrop;
  76. ulong txmcastpkt; /* m'cast pkts sent */
  77. ulong txbcastpkt; /* b'cast pkts sent */
  78. ulong badmacctlpkts; /* bad mac ctl pkts */
  79. ulong txflctl; /* flow-control pkts sent */
  80. ulong rxflctl; /* good flow-control pkts rcv'd */
  81. ulong badrxflctl; /* bad flow-control pkts rcv'd */
  82. ulong rxundersized; /* runts */
  83. ulong rxfrags; /* fragments rcv'd */
  84. ulong rxtoobig; /* oversized pkts rcv'd */
  85. ulong rxjabber; /* jabber pkts rcv'd */
  86. ulong rxerr; /* rx error events */
  87. ulong crcerr; /* crc error events */
  88. ulong collisions; /* collision events */
  89. ulong latecoll; /* late collisions */
  90. };
  91. struct Ctlr {
  92. Lock;
  93. Gbereg *reg;
  94. Lock initlock;
  95. int init;
  96. Rx *rx; /* receive descriptors */
  97. Block *rxb[Nrx]; /* blocks belonging to the descriptors */
  98. int rxhead; /* descr ethernet will write to next */
  99. int rxtail; /* next descr that might need a buffer */
  100. Rendez rrendez; /* interrupt wakes up read process */
  101. int haveinput;
  102. Tx *tx;
  103. Block *txb[Ntx];
  104. int txhead; /* next descr we can use for new packet */
  105. int txtail; /* next descr to reclaim on tx complete */
  106. Mii *mii;
  107. int port;
  108. /* stats */
  109. ulong intrs;
  110. ulong newintrs;
  111. ulong txunderrun;
  112. ulong txringfull;
  113. ulong rxdiscard;
  114. ulong rxoverrun;
  115. ulong nofirstlast;
  116. Mibstats;
  117. };
  118. #define Rxqon(q) (1<<(q))
  119. #define Txqon(q) (1<<(q))
  120. enum {
  121. /* sdma config, sdc */
  122. Burst1 = 0,
  123. Burst2,
  124. Burst4,
  125. Burst8,
  126. Burst16,
  127. SDCrifb = 1<<0, /* rx intr on pkt boundaries */
  128. #define SDCrxburst(v) ((v)<<1)
  129. SDCrxnobyteswap = 1<<4,
  130. SDCtxnobyteswap = 1<<5,
  131. SDCswap64byte = 1<<6,
  132. #define SDCtxburst(v) ((v)<<22)
  133. /* rx intr ipg (inter packet gap) */
  134. #define SDCipgintrx(v) ((((v)>>15) & 1)<<25) | (((v) & MASK(15))<<7)
  135. /* portcfg */
  136. PCFGupromisc = 1<<0, /* unicast promiscuous mode */
  137. #define Rxqdefault(q) ((q)<<1)
  138. #define Rxqarp(q) ((q)<<4)
  139. PCFGbcrejectnoiparp = 1<<7,
  140. PCFGbcrejectip = 1<<8,
  141. PCFGbcrejectarp = 1<<9,
  142. PCFGamnotxes = 1<<12, /* auto mode, no summary update on tx */
  143. PCFGtcpq = 1<<14,
  144. PCFGudpq = 1<<15,
  145. #define Rxqtcp(q) ((q)<<16)
  146. #define Rxqudp(q) ((q)<<19)
  147. #define Rxqbpdu(q) ((q)<<22)
  148. PCFGrxcs = 1<<25, /* rx tcp checksum mode with header */
  149. /* portcfgx */
  150. PCFGXspanq = 1<<1,
  151. PCFGXcrcoff = 1<<2, /* no ethernet crc */
  152. /* port serial control0, psc0 */
  153. PSC0porton = 1<<0,
  154. PSC0forcelinkup = 1<<1,
  155. PSC0an_dplxoff = 1<<2, /* an_ = auto. negotiate */
  156. PSC0an_flctloff = 1<<3,
  157. PSC0an_pauseadv = 1<<4,
  158. PSC0nofrclinkdown = 1<<10,
  159. PSC0an_spdoff = 1<<13,
  160. PSC0dteadv = 1<<14,
  161. /* max. input pkt size */
  162. #define PSC0mru(v) ((v)<<17)
  163. PSC0mrumask = PSC0mru(MASK(3)),
  164. PSC0mru1518 = 0,
  165. PSC0mru1522,
  166. PSC0mru1552,
  167. PSC0mru9022,
  168. PSC0mru9192,
  169. PSC0mru9700,
  170. PSC0fd_frc = 1<<21, /* force full duplex */
  171. PSC0flctlfrc = 1<<22,
  172. PSC0gmiispd_gbfrc = 1<<23,
  173. PSC0miispdfrc100mbps = 1<<24,
  174. /* port status 0, ps0 */
  175. PS0linkup = 1<<1,
  176. PS0fd = 1<<2, /* full duplex */
  177. PS0flctl = 1<<3,
  178. PS0gmii_gb = 1<<4,
  179. PS0mii100mbps = 1<<5,
  180. PS0txbusy = 1<<7,
  181. PS0txfifoempty = 1<<10,
  182. PS0rxfifo1empty = 1<<11,
  183. PS0rxfifo2empty = 1<<12,
  184. /* port serial control 1, psc1 */
  185. PSC1loopback = 1<<1,
  186. PSC1mii = 0<<2,
  187. PSC1rgmii = 1<<3, /* enable RGMII */
  188. PSC1portreset = 1<<4,
  189. PSC1clockbypass = 1<<5,
  190. PSC1iban = 1<<6,
  191. PSC1iban_bypass = 1<<7,
  192. PSC1iban_restart= 1<<8,
  193. PSC1_gbonly = 1<<11,
  194. PSC1encolonbp = 1<<15, /* "collision during back-pressure mib counting" */
  195. PSC1coldomlimmask= MASK(6)<<16,
  196. #define PSC1coldomlim(v) (((v) & MASK(6))<<16)
  197. PSC1miiallowoddpreamble = 1<<22,
  198. /* port status 1, ps1 */
  199. PS1rxpause = 1<<0,
  200. PS1txpause = 1<<1,
  201. PS1pressure = 1<<2,
  202. PS1syncfail10ms = 1<<3,
  203. PS1an_done = 1<<4,
  204. PS1inbandan_bypassed = 1<<5,
  205. PS1serdesplllocked = 1<<6,
  206. PS1syncok = 1<<7,
  207. PS1nosquelch = 1<<8,
  208. /* irq */
  209. Irx = 1<<0,
  210. Iextend = 1<<1,
  211. #define Irxbufferq(q) (1<<((q)+2))
  212. Irxerr = 1<<10,
  213. #define Irxerrq(q) (1<<((q)+11))
  214. #define Itxendq(q) (1<<((q)+19))
  215. Isum = 1<<31,
  216. /* irq extended, irqe */
  217. #define IEtxbufferq(q) (1<<((q)+0))
  218. #define IEtxerrq(q) (1<<((q)+8))
  219. IEphystschg = 1<<16,
  220. IEptp = 1<<17,
  221. IErxoverrun = 1<<18,
  222. IEtxunderrun = 1<<19,
  223. IElinkchg = 1<<20,
  224. IEintaddrerr = 1<<23,
  225. IEprbserr = 1<<25,
  226. IEsum = 1<<31,
  227. /* tx fifo urgent threshold (tx interrupt coalescing), pxtfut */
  228. #define TFUTipginttx(v) (((v) & MASK(16))<<4);
  229. /* minimal frame size, mfs */
  230. MFS40by = 10<<2,
  231. MFS44by = 11<<2,
  232. MFS48by = 12<<2,
  233. MFS52by = 13<<2,
  234. MFS56by = 14<<2,
  235. MFS60by = 15<<2,
  236. MFS64by = 16<<2,
  237. /* receive descriptor */
  238. #define Bufsize(v) ((v)<<3)
  239. /* receive descriptor status */
  240. RCSmacerr = 1<<0,
  241. RCSmacmask = 3<<1,
  242. RCSmacce = 0<<1,
  243. RCSmacor = 1<<1,
  244. RCSmacmf = 2<<1,
  245. RCSl4chkshift = 3,
  246. RCSl4chkmask = MASK(16),
  247. RCSvlan = 1<<17,
  248. RCSbpdu = 1<<18,
  249. RCSl4mask = 3<<21,
  250. RCSl4tcp4 = 0<<21,
  251. RCSl4udp4 = 1<<21,
  252. RCSl4other = 2<<21,
  253. RCSl4rsvd = 3<<21,
  254. RCSl2ev2 = 1<<23,
  255. RCSl3ip4 = 1<<24,
  256. RCSip4headok = 1<<25,
  257. RCSlast = 1<<26,
  258. RCSfirst = 1<<27,
  259. RCSunknownaddr = 1<<28,
  260. RCSenableintr = 1<<29,
  261. RCSl4chkok = 1<<30,
  262. RCSdmaown = 1<<31,
  263. /* transmit descriptor status */
  264. TCSmacerr = 1<<0,
  265. TCSmacmask = 3<<1,
  266. TCSmaclc = 0<<1,
  267. TCSmacur = 1<<1,
  268. TCSmacrl = 2<<1,
  269. TCSllc = 1<<9,
  270. TCSl4chkmode = 1<<10,
  271. TCSipv4hdlenshift= 11,
  272. TCSvlan = 1<<15,
  273. TCSl4type = 1<<16,
  274. TCSgl4chk = 1<<17,
  275. TCSgip4chk = 1<<18,
  276. TCSpadding = 1<<19,
  277. TCSlast = 1<<20,
  278. TCSfirst = 1<<21,
  279. TCSenableintr = 1<<23,
  280. TCSautomode = 1<<30,
  281. TCSdmaown = 1<<31,
  282. };
  283. enum {
  284. /* SMI regs */
  285. PhysmiTimeout = 10000, /* what units? in ms. */
  286. Physmidataoff = 0, /* Data */
  287. Physmidatamask = 0xffff<<Physmidataoff,
  288. Physmiaddroff = 16, /* PHY device addr */
  289. Physmiaddrmask = 0x1f << Physmiaddroff,
  290. Physmiop = 26,
  291. Physmiopmask = 3<<Physmiop,
  292. PhysmiopWr = 0<<Physmiop,
  293. PhysmiopRd = 1<<Physmiop,
  294. PhysmiReadok = 1<<27,
  295. PhysmiBusy = 1<<28,
  296. SmiRegaddroff = 21, /* PHY device register addr */
  297. SmiRegaddrmask = 0x1f << SmiRegaddroff,
  298. };
  299. struct Gbereg {
  300. ulong phy; /* PHY address */
  301. ulong smi; /* serial mgmt. interface */
  302. ulong euda; /* ether default address */
  303. ulong eudid; /* ether default id */
  304. ulong _pad0[PAD(0x80, 0xc)];
  305. ulong euirq; /* interrupt cause */
  306. ulong euirqmask; /* interrupt mask */
  307. ulong _pad1[PAD(0x94, 0x84)];
  308. ulong euea; /* error address */
  309. ulong euiae; /* internal error address */
  310. ulong _pad2[PAD(0xb0, 0x98)];
  311. ulong euc; /* control */
  312. ulong _pad3[PAD(0x200, 0xb0)];
  313. struct {
  314. ulong base; /* window base */
  315. ulong size; /* window size */
  316. } base[6];
  317. ulong _pad4[PAD(0x280, 0x22c)];
  318. ulong harr[4]; /* high address remap */
  319. ulong bare; /* base address enable */
  320. ulong epap; /* port access protect */
  321. ulong _pad5[PAD(0x400, 0x294)];
  322. ulong portcfg; /* port configuration */
  323. ulong portcfgx; /* port config. extend */
  324. ulong mii; /* mii serial parameters */
  325. ulong _pad6;
  326. ulong evlane; /* vlan ether type */
  327. ulong macal; /* mac address low */
  328. ulong macah; /* mac address high */
  329. ulong sdc; /* sdma config. */
  330. ulong dscp[7]; /* ip diff. serv. code point -> pri */
  331. ulong psc0; /* port serial control 0 */
  332. ulong vpt2p; /* vlan priority tag -> pri */
  333. ulong ps0; /* ether port status 0 */
  334. ulong tqc; /* transmit queue command */
  335. ulong psc1; /* port serial control 1 */
  336. ulong ps1; /* ether port status 1 */
  337. ulong mvhdr; /* marvell header */
  338. ulong _pad8[2];
  339. /* interrupts */
  340. ulong irq; /* interrupt cause; some rw0c bits */
  341. ulong irqe; /* " " extended; some rw0c bits */
  342. ulong irqmask; /* interrupt mask (actually enable) */
  343. ulong irqemask; /* " " extended */
  344. ulong _pad9;
  345. ulong pxtfut; /* port tx fifo urgent threshold */
  346. ulong _pad10;
  347. ulong pxmfs; /* port rx minimum frame size */
  348. ulong _pad11;
  349. /*
  350. * # of input frames discarded by addr filtering or lack of resources;
  351. * zeroed upon read.
  352. */
  353. ulong pxdfc; /* port rx discard frame counter */
  354. ulong pxofc; /* port overrun frame counter */
  355. ulong _pad12[2];
  356. ulong piae; /* port internal address error */
  357. ulong _pad13[PAD(0x4bc, 0x494)];
  358. ulong etherprio; /* ether type priority */
  359. ulong _pad14[PAD(0x4dc, 0x4bc)];
  360. ulong tqfpc; /* tx queue fixed priority config. */
  361. ulong pttbrc; /* port tx token-bucket rate config. */
  362. ulong tqc1; /* tx queue command 1 */
  363. ulong pmtu; /* port maximum transmit unit */
  364. ulong pmtbs; /* port maximum token bucket size */
  365. ulong _pad15[PAD(0x600, 0x4ec)];
  366. struct {
  367. ulong _pad[3];
  368. ulong r; /* phys. addr.: cur. rx desc. ptrs */
  369. } crdp[8];
  370. ulong rqc; /* rx queue command */
  371. ulong tcsdp; /* phys. addr.: cur. tx desc. ptr */
  372. ulong _pad16[PAD(0x6c0, 0x684)];
  373. ulong tcqdp[8]; /* phys. addr.: cur. tx q. desc. ptr */
  374. ulong _pad17[PAD(0x700, 0x6dc)];
  375. struct {
  376. ulong tbctr; /* queue tx token-bucket counter */
  377. ulong tbcfg; /* tx queue token-bucket config. */
  378. ulong acfg; /* tx queue arbiter config. */
  379. ulong _pad;
  380. } tq[8];
  381. ulong pttbc; /* port tx token-bucket counter */
  382. ulong _pad18[PAD(0x7a8, 0x780)];
  383. ulong ipg2; /* tx queue ipg */
  384. ulong _pad19[3];
  385. ulong ipg3;
  386. ulong _pad20;
  387. ulong htlp; /* high token in low packet */
  388. ulong htap; /* high token in async packet */
  389. ulong ltap; /* low token in async packet */
  390. ulong _pad21;
  391. ulong ts; /* tx speed */
  392. ulong _pad22[PAD(0x1000, 0x7d0)];
  393. /* mac mib counters: statistics */
  394. Mibstats;
  395. ulong _pad23[PAD(0x1400, 0x107c)];
  396. /* multicast filtering; each byte: Qno<<1 | Pass */
  397. ulong dfsmt[64]; /* dest addr filter special m'cast table */
  398. ulong dfomt[64]; /* dest addr filter other m'cast table */
  399. /* unicast filtering */
  400. ulong dfut[4]; /* dest addr filter unicast table */
  401. };
  402. static void getmibstats(Ctlr *);
  403. static void
  404. rxfreeb(Block *b)
  405. {
  406. /* freeb(b) will have previously decremented b->ref to 0; raise to 1 */
  407. _xinc(&b->ref);
  408. //iprint("fr %ld ", b->ref);
  409. b->wp = b->rp =
  410. (uchar*)((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  411. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  412. b->free = rxfreeb;
  413. ilock(&freeblocks);
  414. b->next = freeblocks.head;
  415. freeblocks.head = b;
  416. iunlock(&freeblocks);
  417. }
  418. static Block *
  419. rxallocb(void)
  420. {
  421. Block *b;
  422. ilock(&freeblocks);
  423. b = freeblocks.head;
  424. if(b != nil) {
  425. freeblocks.head = b->next;
  426. b->next = nil;
  427. b->free = rxfreeb;
  428. }
  429. iunlock(&freeblocks);
  430. return b;
  431. }
  432. static void
  433. rxkick(Ctlr *ctlr)
  434. {
  435. Gbereg *reg = ctlr->reg;
  436. if (reg->crdp[Qno].r == 0)
  437. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  438. if ((reg->rqc & 0xff) == 0) /* all queues are stopped? */
  439. reg->rqc = Rxqon(Qno); /* restart */
  440. }
  441. static void
  442. txkick(Ctlr *ctlr)
  443. {
  444. Gbereg *reg = ctlr->reg;
  445. if (reg->tcqdp[Qno] == 0)
  446. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  447. if ((reg->tqc & 0xff) == 0) /* all q's stopped? */
  448. reg->tqc = Txqon(Qno); /* restart */
  449. }
  450. static void
  451. rxreplenish(Ctlr *ctlr)
  452. {
  453. Rx *r;
  454. Block *b;
  455. while(ctlr->rxb[ctlr->rxtail] == nil) {
  456. b = rxallocb();
  457. if(b == nil) {
  458. iprint("etherkw: rxreplenish out of buffers\n");
  459. break;
  460. }
  461. ctlr->rxb[ctlr->rxtail] = b;
  462. /* set up receive descriptor */
  463. r = &ctlr->rx[ctlr->rxtail];
  464. assert(((uintptr)r & (Descralign - 1)) == 0);
  465. r->countsize = Bufsize(Rxblklen);
  466. r->buf = PADDR(b->rp);
  467. cachedwbse(r, sizeof *r);
  468. /* and fire */
  469. r->cs = RCSdmaown | RCSenableintr;
  470. cachedwbse(&r->cs, BY2SE);
  471. ctlr->rxtail = NEXT(ctlr->rxtail, Nrx);
  472. }
  473. rxkick(ctlr);
  474. }
  475. static void
  476. dump(uchar *bp, long max)
  477. {
  478. if (max > 64)
  479. max = 64;
  480. for (; max > 0; max--, bp++)
  481. iprint("%02.2ux ", *bp);
  482. print("...\n");
  483. }
  484. static void
  485. etheractive(Ether *ether)
  486. {
  487. ether->starttime = TK2MS(MACHP(0)->ticks)/1000;
  488. }
  489. static void
  490. ethercheck(Ether *ether)
  491. {
  492. if (ether->starttime != 0 &&
  493. TK2MS(MACHP(0)->ticks)/1000 - ether->starttime > Etherstuck)
  494. iprint("ethernet stuck\n");
  495. }
  496. static void
  497. receive(Ether *ether)
  498. {
  499. int i;
  500. ulong n;
  501. Block *b;
  502. Ctlr *ctlr = ether->ctlr;
  503. Rx *r;
  504. ethercheck(ether);
  505. for (i = Nrx-2; i > 0; i--) {
  506. r = &ctlr->rx[ctlr->rxhead];
  507. assert(((uintptr)r & (Descralign - 1)) == 0);
  508. cachedinvse(r, sizeof *r);
  509. if(r->cs & RCSdmaown)
  510. break;
  511. b = ctlr->rxb[ctlr->rxhead];
  512. if (b == nil)
  513. panic("etherkw: nil ctlr->rxb[ctlr->rxhead] "
  514. "in receive");
  515. ctlr->rxb[ctlr->rxhead] = nil;
  516. ctlr->rxhead = NEXT(ctlr->rxhead, Nrx);
  517. if((r->cs & (RCSfirst|RCSlast)) != (RCSfirst|RCSlast)) {
  518. ctlr->nofirstlast++;
  519. freeb(b);
  520. continue;
  521. }
  522. if(r->cs & RCSmacerr) {
  523. freeb(b);
  524. continue;
  525. }
  526. n = r->countsize >> 16;
  527. assert(n >= 2 && n < 2048);
  528. cachedinvse(b->rp, n);
  529. b->wp = b->rp + n;
  530. /*
  531. * skip hardware padding to align ipv4 address in memory
  532. * (mv-s104860-u0 §8.3.4.1)
  533. */
  534. b->rp += 2;
  535. etheriq(ether, b, 1);
  536. etheractive(ether);
  537. if (i % (Nrx / 2) == 0)
  538. rxreplenish(ctlr);
  539. }
  540. rxreplenish(ctlr);
  541. }
  542. static void
  543. txreplenish(Ether *ether) /* free transmitted packets */
  544. {
  545. Ctlr *ctlr;
  546. ctlr = ether->ctlr;
  547. while(ctlr->txtail != ctlr->txhead) {
  548. cachedinvse(&ctlr->tx[ctlr->txtail].cs, BY2SE);
  549. if(ctlr->tx[ctlr->txtail].cs & TCSdmaown)
  550. break;
  551. if(ctlr->txb[ctlr->txtail] == nil)
  552. panic("no block for sent packet?!");
  553. freeb(ctlr->txb[ctlr->txtail]);
  554. ctlr->txb[ctlr->txtail] = nil;
  555. ctlr->txtail = NEXT(ctlr->txtail, Ntx);
  556. etheractive(ether);
  557. }
  558. }
  559. /*
  560. * transmit strategy: fill the output ring as far as possible,
  561. * perhaps leaving a few spare; kick off the output and take
  562. * an interrupt only when the transmit queue is empty.
  563. */
  564. static void
  565. transmit(Ether *ether)
  566. {
  567. int i, kick, len;
  568. Block *b;
  569. Ctlr *ctlr = ether->ctlr;
  570. Gbereg *reg = ctlr->reg;
  571. Tx *t;
  572. ethercheck(ether);
  573. ilock(ctlr);
  574. txreplenish(ether); /* reap old packets */
  575. /* queue new packets; don't use more than half the tx descs. */
  576. kick = 0;
  577. for (i = Ntx/2 - 2; i > 0; i--) {
  578. t = &ctlr->tx[ctlr->txhead];
  579. assert(((uintptr)t & (Descralign - 1)) == 0);
  580. cachedinvse(t, sizeof *t);
  581. if(t->cs & TCSdmaown) { /* free descriptor? */
  582. ctlr->txringfull++;
  583. break;
  584. }
  585. b = qget(ether->oq); /* outgoing packet? */
  586. if (b == nil)
  587. break;
  588. len = BLEN(b);
  589. if(len < ether->minmtu || len > ether->maxmtu) {
  590. freeb(b);
  591. continue;
  592. }
  593. ctlr->txb[ctlr->txhead] = b;
  594. /* set up the transmit descriptor */
  595. t->buf = PADDR(b->rp);
  596. t->countchk = len << 16;
  597. cachedwbse(t, sizeof *t);
  598. /* and fire */
  599. t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
  600. TCSenableintr;
  601. cachedwbse(&t->cs, BY2SE);
  602. kick++;
  603. ctlr->txhead = NEXT(ctlr->txhead, Ntx);
  604. }
  605. if (kick) {
  606. txkick(ctlr);
  607. reg->irqmask |= Itxendq(Qno);
  608. reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
  609. }
  610. iunlock(ctlr);
  611. }
  612. static void
  613. dumprxdescs(Ctlr *ctlr)
  614. {
  615. int i;
  616. Gbereg *reg = ctlr->reg;
  617. iprint("\nrxhead %d rxtail %d; txcdp %#p rxcdp %#p\n",
  618. ctlr->rxhead, ctlr->rxtail, reg->tcqdp[Qno], reg->crdp[Qno].r);
  619. for (i = 0; i < Nrx; i++)
  620. iprint("rxb %d @ %#p: %#p\n", i, &ctlr->rxb[i], ctlr->rxb[i]);
  621. for (i = 0; i < Nrx; i++)
  622. iprint("rx %d @ %#p: cs %#lux countsize %lud buf %#lux next %#lux\n",
  623. i, &ctlr->rx[i], ctlr->rx[i].cs,
  624. ctlr->rx[i].countsize >> 3, ctlr->rx[i].buf,
  625. ctlr->rx[i].next);
  626. delay(1000);
  627. }
  628. static int
  629. gotinput(void* ctlr)
  630. {
  631. return ((Ctlr*)ctlr)->haveinput != 0;
  632. }
  633. static void
  634. rcvproc(void* arg)
  635. {
  636. Ctlr *ctlr;
  637. Ether *ether;
  638. ether = arg;
  639. ctlr = ether->ctlr;
  640. for(;;){
  641. sleep(&ctlr->rrendez, gotinput, ctlr);
  642. ctlr->haveinput = 0;
  643. receive(ether);
  644. }
  645. }
  646. static void
  647. interrupt(Ureg*, void *arg)
  648. {
  649. ulong irq, irqe, handled;
  650. Ether *ether = arg;
  651. Ctlr *ctlr = ether->ctlr;
  652. Gbereg *reg = ctlr->reg;
  653. handled = 0;
  654. irq = reg->irq;
  655. irqe = reg->irqe;
  656. reg->irq = 0; /* extinguish intr causes */
  657. reg->irqe = 0; /* " " " */
  658. ethercheck(ether);
  659. if(irq & Irxbufferq(Qno)) {
  660. /*
  661. * letting a kproc process the input takes far less real time
  662. * than doing it all at interrupt level.
  663. */
  664. ctlr->haveinput = 1;
  665. wakeup(&ctlr->rrendez);
  666. handled++;
  667. } else
  668. rxkick(ctlr);
  669. if(irq & Itxendq(Qno)) { /* transmit ring empty? */
  670. reg->irqmask &= ~Itxendq(Qno); /* prevent more interrupts */
  671. reg->irqemask &= ~(IEtxerrq(Qno) | IEtxunderrun);
  672. transmit(ether);
  673. handled++;
  674. }
  675. if(irqe & IEsum) {
  676. /*
  677. * IElinkchg appears to only be set when unplugging.
  678. * autonegotiation is likely not done yet, so linkup not valid,
  679. * thus we note the link change here, and check for
  680. * that and autonegotiation done below.
  681. */
  682. if(irqe & IEphystschg) {
  683. ether->link = (reg->ps0 & PS0linkup) != 0;
  684. ether->linkchg = 1;
  685. }
  686. if(irqe & IEtxerrq(Qno))
  687. ether->oerrs++;
  688. if(irqe & IErxoverrun)
  689. ether->overflows++;
  690. if(irqe & IEtxunderrun)
  691. ctlr->txunderrun++;
  692. if(irqe & (IEphystschg | IEtxerrq(Qno) | IErxoverrun |
  693. IEtxunderrun))
  694. handled++;
  695. }
  696. if (irq & Isum) {
  697. if (irq & Irxerrq(Qno)) {
  698. ether->buffs++; /* approx. error */
  699. /* null descriptor pointer or descriptor owned by cpu */
  700. panic("etherkw: rx err on queue 0");
  701. }
  702. if (irq & Irxerr) {
  703. ether->buffs++; /* approx. error */
  704. /* null descriptor pointer or descriptor owned by cpu */
  705. panic("etherkw: rx err");
  706. }
  707. if(irq & (Irxerr | Irxerrq(Qno)))
  708. handled++;
  709. }
  710. if(ether->linkchg && (reg->ps1 & PS1an_done)) {
  711. handled++;
  712. ether->link = (reg->ps0 & PS0linkup) != 0;
  713. ether->linkchg = 0;
  714. }
  715. ctlr->newintrs++;
  716. if (!handled) {
  717. irq &= ~Isum;
  718. irqe &= ~IEtxbufferq(Qno);
  719. if (irq == 0 && irqe == 0) {
  720. /* seems to be triggered by continuous output */
  721. // iprint("etherkw: spurious interrupt\n");
  722. } else
  723. iprint("etherkw: interrupt cause unknown; "
  724. "irq %#lux irqe %#lux\n", irq, irqe);
  725. }
  726. intrclear(Irqlo, ether->irq);
  727. }
  728. void
  729. promiscuous(void *arg, int on)
  730. {
  731. Ether *ether = arg;
  732. Ctlr *ctlr = ether->ctlr;
  733. Gbereg *reg = ctlr->reg;
  734. ilock(ctlr);
  735. ether->prom = on;
  736. if(on)
  737. reg->portcfg |= PCFGupromisc;
  738. else
  739. reg->portcfg &= ~PCFGupromisc;
  740. iunlock(ctlr);
  741. }
  742. void
  743. multicast(void *, uchar *, int)
  744. {
  745. /* nothing to do; we always accept multicast */
  746. }
  747. static void quiesce(Gbereg *reg);
  748. static void
  749. shutdown(Ether *ether)
  750. {
  751. Ctlr *ctlr = ether->ctlr;
  752. Gbereg *reg = ctlr->reg;
  753. ilock(ctlr);
  754. quiesce(reg);
  755. reg->tcqdp[Qno] = 0;
  756. reg->crdp[Qno].r = 0;
  757. reg->psc0 = 0; /* no PSC0porton */
  758. reg->psc1 |= PSC1portreset;
  759. iunlock(ctlr);
  760. coherence();
  761. delay(100);
  762. }
  763. enum {
  764. CMjumbo,
  765. };
  766. static Cmdtab ctlmsg[] = {
  767. CMjumbo, "jumbo", 2,
  768. };
  769. long
  770. ctl(Ether *e, void *p, long n)
  771. {
  772. Cmdbuf *cb;
  773. Cmdtab *ct;
  774. Ctlr *ctlr = e->ctlr;
  775. Gbereg *reg = ctlr->reg;
  776. cb = parsecmd(p, n);
  777. if(waserror()) {
  778. free(cb);
  779. nexterror();
  780. }
  781. ct = lookupcmd(cb, ctlmsg, nelem(ctlmsg));
  782. switch(ct->index) {
  783. case CMjumbo:
  784. if(strcmp(cb->f[1], "on") == 0) {
  785. /* incoming packet queue doesn't expect jumbo frames */
  786. error("jumbo disabled");
  787. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  788. PSC0mru(PSC0mru9022);
  789. e->maxmtu = 9022;
  790. } else if(strcmp(cb->f[1], "off") == 0) {
  791. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  792. PSC0mru(PSC0mru1522);
  793. e->maxmtu = ETHERMAXTU;
  794. } else
  795. error(Ebadctl);
  796. break;
  797. default:
  798. error(Ebadctl);
  799. break;
  800. }
  801. free(cb);
  802. poperror();
  803. return n;
  804. }
  805. /*
  806. * phy/mii goo
  807. */
  808. static int
  809. smibusywait(Gbereg *reg, ulong waitbit)
  810. {
  811. ulong timeout, smi_reg;
  812. timeout = PhysmiTimeout;
  813. /* wait till the SMI is not busy */
  814. do {
  815. /* read smi register */
  816. smi_reg = reg->smi;
  817. if (timeout-- == 0) {
  818. MIIDBG("SMI busy timeout\n");
  819. return -1;
  820. }
  821. // delay(1);
  822. } while (smi_reg & waitbit);
  823. return 0;
  824. }
  825. static int
  826. miird(Mii *mii, int pa, int ra)
  827. {
  828. ulong smi_reg, timeout;
  829. Ctlr *ctlr;
  830. Gbereg *reg;
  831. ctlr = (Ctlr*)mii->ctlr;
  832. reg = ctlr->reg;
  833. /* check params */
  834. if ((pa<<Physmiaddroff) & ~Physmiaddrmask ||
  835. (ra<<SmiRegaddroff) & ~SmiRegaddrmask)
  836. return -1;
  837. smibusywait(reg, PhysmiBusy);
  838. /* fill the phy address and register offset and read opcode */
  839. reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd;
  840. coherence();
  841. /* wait til read value is ready */
  842. // if (smibusywait(reg, PhysmiReadok) < 0)
  843. // return -1;
  844. timeout = PhysmiTimeout;
  845. do {
  846. smi_reg = reg->smi;
  847. if (timeout-- == 0) {
  848. MIIDBG("SMI read-valid timeout\n");
  849. return -1;
  850. }
  851. } while (!(smi_reg & PhysmiReadok));
  852. /* Wait for the data to update in the SMI register */
  853. for (timeout = 0; timeout < PhysmiTimeout; timeout++)
  854. ;
  855. return reg->smi & Physmidatamask;
  856. }
  857. static int
  858. miiwr(Mii *mii, int pa, int ra, int v)
  859. {
  860. Ctlr *ctlr;
  861. Gbereg *reg;
  862. ulong smi_reg;
  863. ctlr = (Ctlr*)mii->ctlr;
  864. reg = ctlr->reg;
  865. /* check params */
  866. if (((pa<<Physmiaddroff) & ~Physmiaddrmask) ||
  867. ((ra<<SmiRegaddroff) & ~SmiRegaddrmask))
  868. return -1;
  869. smibusywait(reg, PhysmiBusy);
  870. /* fill the phy address and register offset and read opcode */
  871. smi_reg = v << Physmidataoff | pa << Physmiaddroff | ra << SmiRegaddroff;
  872. reg->smi = smi_reg & ~PhysmiopRd;
  873. coherence();
  874. return 0;
  875. }
  876. static int
  877. kirkwoodmii(Ether *ether)
  878. {
  879. int i;
  880. Ctlr *ctlr;
  881. MiiPhy *phy;
  882. MIIDBG("mii\n");
  883. ctlr = ether->ctlr;
  884. if((ctlr->mii = malloc(sizeof(Mii))) == nil)
  885. return -1;
  886. ctlr->mii->ctlr = ctlr;
  887. ctlr->mii->mir = miird;
  888. ctlr->mii->miw = miiwr;
  889. if(mii(ctlr->mii, ~0) == 0 || (phy = ctlr->mii->curphy) == nil){
  890. print("#l%d: etherkw: init mii failure\n", ether->ctlrno);
  891. free(ctlr->mii);
  892. ctlr->mii = nil;
  893. return -1;
  894. }
  895. /* oui 005043 is marvell */
  896. MIIDBG("oui %#X phyno %d\n", phy->oui, phy->phyno);
  897. if(miistatus(ctlr->mii) < 0){
  898. miireset(ctlr->mii);
  899. MIIDBG("miireset\n");
  900. if(miiane(ctlr->mii, ~0, 0, ~0) < 0){
  901. iprint("miiane failed\n");
  902. return -1;
  903. }
  904. MIIDBG("miistatus\n");
  905. miistatus(ctlr->mii);
  906. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrLs){
  907. for(i = 0; ; i++){
  908. if(i > 600){
  909. iprint("etherkw: autonegotiation failed\n");
  910. break;
  911. }
  912. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrAnc)
  913. break;
  914. delay(10);
  915. }
  916. if(miistatus(ctlr->mii) < 0)
  917. iprint("miistatus failed\n");
  918. }else{
  919. iprint("etherkw: no link\n");
  920. phy->speed = 10; /* simple default */
  921. }
  922. }
  923. ether->mbps = phy->speed;
  924. // iprint("#l%d: kirkwoodmii: fd %d speed %d tfc %d rfc %d\n",
  925. // ctlr->port, phy->fd, phy->speed, phy->tfc, phy->rfc);
  926. MIIDBG("mii done\n");
  927. return 0;
  928. }
  929. enum { /* PHY register pages */
  930. Pagcopper,
  931. Pagfiber,
  932. Pagrgmii,
  933. Pagled,
  934. Pagrsvd1,
  935. Pagvct,
  936. Pagtest,
  937. Pagrsvd2,
  938. Pagfactest,
  939. };
  940. static void
  941. miiregpage(Mii *mii, ulong dev, ulong page)
  942. {
  943. miiwr(mii, dev, Eadr, page);
  944. }
  945. static int
  946. miiphyinit(Mii *mii)
  947. {
  948. ulong dev;
  949. Ctlr *ctlr;
  950. Gbereg *reg;
  951. ctlr = (Ctlr*)mii->ctlr;
  952. reg = ctlr->reg;
  953. dev = reg->phy;
  954. MIIDBG("phy dev addr %lux\n", dev);
  955. /* leds link & activity */
  956. miiregpage(mii, dev, Pagled);
  957. /* low 4 bits == 1: on - link, blink - activity, off - no link */
  958. miiwr(mii, dev, Scr, (miird(mii, dev, Scr) & ~0xf) | 1);
  959. miiregpage(mii, dev, Pagrgmii);
  960. miiwr(mii, dev, Scr, miird(mii, dev, Scr) | Rgmiipwrup);
  961. /* must now do a software reset, sez the manual */
  962. /* enable RGMII delay on Tx and Rx for CPU port */
  963. miiwr(mii, dev, Recr, miird(mii, dev, Recr) | Rxtiming | Rxtiming);
  964. miiregpage(mii, dev, Pagcopper);
  965. miiwr(mii, dev, Scr,
  966. (miird(mii, dev, Scr) & ~(Pwrdown|Endetect)) | Mdix);
  967. return 0;
  968. }
  969. /*
  970. * initialisation
  971. */
  972. static void
  973. quiesce(Gbereg *reg)
  974. {
  975. ulong v;
  976. v = reg->tqc;
  977. if (v & 0xFF)
  978. reg->tqc = v << 8; /* stop active channels */
  979. v = reg->rqc;
  980. if (v & 0xFF)
  981. reg->rqc = v << 8; /* stop active channels */
  982. /* wait for all queues to stop */
  983. while (reg->tqc & 0xFF || reg->rqc & 0xFF)
  984. ;
  985. }
  986. static void
  987. portreset(Gbereg *reg)
  988. {
  989. ulong i;
  990. quiesce(reg);
  991. reg->psc0 &= ~PSC0porton; /* disable port */
  992. reg->psc1 &= ~(PSC1rgmii|PSC1portreset); /* set port & MII active */
  993. coherence();
  994. for (i = 0; i < 4000; i++) /* magic delay */
  995. ;
  996. }
  997. static void
  998. p16(uchar *p, ulong v)
  999. {
  1000. *p++ = v>>8;
  1001. *p = v;
  1002. }
  1003. static void
  1004. p32(uchar *p, ulong v)
  1005. {
  1006. *p++ = v>>24;
  1007. *p++ = v>>16;
  1008. *p++ = v>>8;
  1009. *p = v;
  1010. }
  1011. /*
  1012. * set ether->ea from hw mac address,
  1013. * configure unicast filtering to accept it.
  1014. */
  1015. void
  1016. archetheraddr(Ether *ether, Gbereg *reg, int rxqno)
  1017. {
  1018. ulong nibble, ucreg, tbloff, regoff;
  1019. p32(ether->ea, reg->macah);
  1020. p16(ether->ea+4, reg->macal);
  1021. /* accept frames on ea */
  1022. nibble = ether->ea[5] & 0xf;
  1023. tbloff = nibble / 4;
  1024. regoff = nibble % 4;
  1025. regoff *= 8;
  1026. ucreg = reg->dfut[tbloff];
  1027. ucreg &= 0xff << regoff;
  1028. ucreg |= (rxqno << 1 | Pass) << regoff;
  1029. reg->dfut[tbloff] = ucreg;
  1030. /* accept all multicast too. set up special & other tables. */
  1031. memset(reg->dfsmt, Qno<<1 | Pass, sizeof reg->dfsmt);
  1032. memset(reg->dfomt, Qno<<1 | Pass, sizeof reg->dfomt);
  1033. coherence();
  1034. }
  1035. static void
  1036. ctlrinit(Ether *ether)
  1037. {
  1038. int i;
  1039. Block *b;
  1040. Ctlr *ctlr = ether->ctlr;
  1041. Gbereg *reg = ctlr->reg;
  1042. Rx *r;
  1043. Tx *t;
  1044. static char name[KNAMELEN];
  1045. static Ctlr fakectlr; /* bigger than 4K; keep off the stack */
  1046. ilock(&freeblocks);
  1047. for(i = 0; i < Nrxblks; i++) {
  1048. b = iallocb(Rxblklen+Bufalign-1);
  1049. if(b == nil) {
  1050. iprint("etherkw: no memory for rx buffers\n");
  1051. break;
  1052. }
  1053. assert(b->ref == 1);
  1054. b->wp = b->rp = (uchar*)
  1055. ((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  1056. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  1057. b->free = rxfreeb;
  1058. b->next = freeblocks.head;
  1059. freeblocks.head = b;
  1060. }
  1061. iunlock(&freeblocks);
  1062. ctlr->rx = xspanalloc(Nrx * sizeof(Rx), Descralign, 0);
  1063. if(ctlr->rx == nil)
  1064. panic("etherkw: no memory for rx ring");
  1065. for(i = 0; i < Nrx; i++) {
  1066. r = &ctlr->rx[i];
  1067. assert(((uintptr)r & (Descralign - 1)) == 0);
  1068. r->cs = 0; /* not owned by hardware until r->buf is set */
  1069. r->buf = 0;
  1070. r->next = PADDR(&ctlr->rx[NEXT(i, Nrx)]);
  1071. ctlr->rxb[i] = nil;
  1072. }
  1073. ctlr->rxtail = ctlr->rxhead = 0;
  1074. cachedwb();
  1075. rxreplenish(ctlr);
  1076. ctlr->tx = xspanalloc(Ntx * sizeof(Tx), Descralign, 0);
  1077. if(ctlr->tx == nil)
  1078. panic("etherkw: no memory for tx ring");
  1079. for(i = 0; i < Ntx; i++) {
  1080. t = &ctlr->tx[i];
  1081. assert(((uintptr)t & (Descralign - 1)) == 0);
  1082. t->cs = 0;
  1083. t->buf = 0;
  1084. t->next = PADDR(&ctlr->tx[NEXT(i, Ntx)]);
  1085. ctlr->txb[i] = nil;
  1086. }
  1087. ctlr->txtail = ctlr->txhead = 0;
  1088. cachedwb();
  1089. /* clear stats by reading them into fake ctlr */
  1090. getmibstats(&fakectlr);
  1091. reg->pxmfs = MFS64by;
  1092. /*
  1093. * ipg's (inter packet gaps) for interrupt coalescing,
  1094. * values in units of 64 clock cycles. A full-sized
  1095. * packet (1514 bytes) takes just over 12µs to transmit.
  1096. */
  1097. if (CLOCKFREQ/(Maxrxintrsec*64) >= (1<<16))
  1098. panic("rx coalescing value %d too big for short",
  1099. CLOCKFREQ/(Maxrxintrsec*64));
  1100. reg->sdc = SDCrifb | SDCrxburst(Burst16) | SDCtxburst(Burst16) |
  1101. SDCrxnobyteswap | SDCtxnobyteswap |
  1102. SDCipgintrx(CLOCKFREQ/(Maxrxintrsec*64));
  1103. reg->pxtfut = 0; /* TFUTipginttx(CLOCKFREQ/(Maxrxintrsec*64)) */
  1104. /* allow just these interrupts */
  1105. reg->irqmask = Irxbufferq(Qno) | Irxerr | Itxendq(Qno);
  1106. reg->irqemask = IEtxerrq(Qno) | IEphystschg | IErxoverrun | IEtxunderrun;
  1107. reg->irq = 0;
  1108. reg->irqe = 0;
  1109. reg->euirqmask = 0;
  1110. reg->euirq = 0;
  1111. // archetheraddr(ether, ctlr->reg, Qno); /* 2nd location */
  1112. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  1113. for (i = 1; i < nelem(reg->tcqdp); i++)
  1114. reg->tcqdp[i] = 0;
  1115. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  1116. for (i = 1; i < nelem(reg->crdp); i++)
  1117. reg->crdp[i].r = 0;
  1118. reg->portcfg = Rxqdefault(Qno) | Rxqarp(Qno);
  1119. reg->portcfgx = 0;
  1120. reg->psc1 = PSC1rgmii | PSC1encolonbp | PSC1coldomlim(0x23);
  1121. /* why 1522? 1518 should be enough */
  1122. reg->psc0 = PSC0porton | PSC0an_flctloff |
  1123. PSC0an_pauseadv | PSC0nofrclinkdown | PSC0mru(PSC0mru1522);
  1124. ether->link = (reg->ps0 & PS0linkup) != 0;
  1125. /* set ethernet MTU for leaky bucket mechanism to 0 (disabled) */
  1126. reg->pmtu = 0;
  1127. reg->rqc = Rxqon(Qno);
  1128. coherence();
  1129. etheractive(ether);
  1130. snprint(name, sizeof name, "#l%drproc", ether->ctlrno);
  1131. kproc(name, rcvproc, ether);
  1132. }
  1133. static void
  1134. attach(Ether* ether)
  1135. {
  1136. Ctlr *ctlr = ether->ctlr;
  1137. lock(&ctlr->initlock);
  1138. if(ctlr->init == 0) {
  1139. ctlrinit(ether);
  1140. ctlr->init = 1;
  1141. }
  1142. unlock(&ctlr->initlock);
  1143. }
  1144. /*
  1145. * statistics goo
  1146. */
  1147. static void
  1148. getmibstats(Ctlr *ctlr)
  1149. {
  1150. Gbereg *reg = ctlr->reg;
  1151. /*
  1152. * rxbyteslo & txbylo seem to return the same as the *hi-variant.
  1153. * the docs claim [rt]xby 64 bit. can we do an atomic 64 bit read?
  1154. */
  1155. /* mib registers clear on read, store them */
  1156. ctlr->rxby += reg->rxby;
  1157. ctlr->badrxby += reg->badrxby;
  1158. ctlr->mactxerr += reg->mactxerr;
  1159. ctlr->rxpkt += reg->rxpkt;
  1160. ctlr->badrxpkt += reg->badrxpkt;
  1161. ctlr->rxbcastpkt+= reg->rxbcastpkt;
  1162. ctlr->rxmcastpkt+= reg->rxmcastpkt;
  1163. ctlr->rx64 += reg->rx64;
  1164. ctlr->rx65_127 += reg->rx65_127;
  1165. ctlr->rx128_255 += reg->rx128_255;
  1166. ctlr->rx256_511 += reg->rx256_511;
  1167. ctlr->rx512_1023+= reg->rx512_1023;
  1168. ctlr->rx1024_max+= reg->rx1024_max;
  1169. ctlr->txby += reg->txby;
  1170. ctlr->txpkt += reg->txpkt;
  1171. ctlr->txcollpktdrop+= reg->txcollpktdrop;
  1172. ctlr->txmcastpkt+= reg->txmcastpkt;
  1173. ctlr->txbcastpkt+= reg->txbcastpkt;
  1174. ctlr->badmacctlpkts+= reg->badmacctlpkts;
  1175. ctlr->txflctl += reg->txflctl;
  1176. ctlr->rxflctl += reg->rxflctl;
  1177. ctlr->badrxflctl+= reg->badrxflctl;
  1178. ctlr->rxundersized+= reg->rxundersized;
  1179. ctlr->rxfrags += reg->rxfrags;
  1180. ctlr->rxtoobig += reg->rxtoobig;
  1181. ctlr->rxjabber += reg->rxjabber;
  1182. ctlr->rxerr += reg->rxerr;
  1183. ctlr->crcerr += reg->crcerr;
  1184. ctlr->collisions+= reg->collisions;
  1185. ctlr->latecoll += reg->latecoll;
  1186. }
  1187. long
  1188. ifstat(Ether *ether, void *a, long n, ulong off)
  1189. {
  1190. Ctlr *ctlr = ether->ctlr;
  1191. Gbereg *reg = ctlr->reg;
  1192. char *buf, *p, *e;
  1193. buf = p = malloc(READSTR);
  1194. e = p + READSTR;
  1195. ilock(ctlr);
  1196. getmibstats(ctlr);
  1197. ctlr->intrs += ctlr->newintrs;
  1198. p = seprint(p, e, "interrupts: %lud\n", ctlr->intrs);
  1199. p = seprint(p, e, "new interrupts: %lud\n", ctlr->newintrs);
  1200. ctlr->newintrs = 0;
  1201. p = seprint(p, e, "tx underrun: %lud\n", ctlr->txunderrun);
  1202. p = seprint(p, e, "tx ring full: %lud\n", ctlr->txringfull);
  1203. ctlr->rxdiscard += reg->pxdfc;
  1204. ctlr->rxoverrun += reg->pxofc;
  1205. p = seprint(p, e, "rx discarded frames: %lud\n", ctlr->rxdiscard);
  1206. p = seprint(p, e, "rx overrun frames: %lud\n", ctlr->rxoverrun);
  1207. p = seprint(p, e, "no first+last flag: %lud\n", ctlr->nofirstlast);
  1208. p = seprint(p, e, "duplex: %s\n", (reg->ps0 & PS0fd)? "full": "half");
  1209. p = seprint(p, e, "flow control: %s\n", (reg->ps0 & PS0flctl)? "on": "off");
  1210. /* p = seprint(p, e, "speed: %d mbps\n", ); */
  1211. p = seprint(p, e, "received bytes: %llud\n", ctlr->rxby);
  1212. p = seprint(p, e, "bad received bytes: %lud\n", ctlr->badrxby);
  1213. p = seprint(p, e, "internal mac transmit errors: %lud\n", ctlr->mactxerr);
  1214. p = seprint(p, e, "total received frames: %lud\n", ctlr->rxpkt);
  1215. p = seprint(p, e, "received broadcast frames: %lud\n", ctlr->rxbcastpkt);
  1216. p = seprint(p, e, "received multicast frames: %lud\n", ctlr->rxmcastpkt);
  1217. p = seprint(p, e, "bad received frames: %lud\n", ctlr->badrxpkt);
  1218. p = seprint(p, e, "received frames 0-64: %lud\n", ctlr->rx64);
  1219. p = seprint(p, e, "received frames 65-127: %lud\n", ctlr->rx65_127);
  1220. p = seprint(p, e, "received frames 128-255: %lud\n", ctlr->rx128_255);
  1221. p = seprint(p, e, "received frames 256-511: %lud\n", ctlr->rx256_511);
  1222. p = seprint(p, e, "received frames 512-1023: %lud\n", ctlr->rx512_1023);
  1223. p = seprint(p, e, "received frames 1024-max: %lud\n", ctlr->rx1024_max);
  1224. p = seprint(p, e, "transmitted bytes: %llud\n", ctlr->txby);
  1225. p = seprint(p, e, "total transmitted frames: %lud\n", ctlr->txpkt);
  1226. p = seprint(p, e, "transmitted broadcast frames: %lud\n", ctlr->txbcastpkt);
  1227. p = seprint(p, e, "transmitted multicast frames: %lud\n", ctlr->txmcastpkt);
  1228. p = seprint(p, e, "transmit frames dropped by collision: %lud\n", ctlr->txcollpktdrop);
  1229. p = seprint(p, e, "misaligned buffers: %lud\n", ether->pktsmisaligned);
  1230. p = seprint(p, e, "bad mac control frames: %lud\n", ctlr->badmacctlpkts);
  1231. p = seprint(p, e, "transmitted flow control messages: %lud\n", ctlr->txflctl);
  1232. p = seprint(p, e, "received flow control messages: %lud\n", ctlr->rxflctl);
  1233. p = seprint(p, e, "bad received flow control messages: %lud\n", ctlr->badrxflctl);
  1234. p = seprint(p, e, "received undersized packets: %lud\n", ctlr->rxundersized);
  1235. p = seprint(p, e, "received fragments: %lud\n", ctlr->rxfrags);
  1236. p = seprint(p, e, "received oversized packets: %lud\n", ctlr->rxtoobig);
  1237. p = seprint(p, e, "received jabber packets: %lud\n", ctlr->rxjabber);
  1238. p = seprint(p, e, "mac receive errors: %lud\n", ctlr->rxerr);
  1239. p = seprint(p, e, "crc errors: %lud\n", ctlr->crcerr);
  1240. p = seprint(p, e, "collisions: %lud\n", ctlr->collisions);
  1241. p = seprint(p, e, "late collisions: %lud\n", ctlr->latecoll);
  1242. USED(p);
  1243. iunlock(ctlr);
  1244. n = readstr(off, a, n, buf);
  1245. free(buf);
  1246. return n;
  1247. }
  1248. static int
  1249. reset(Ether *ether)
  1250. {
  1251. Ctlr *ctlr;
  1252. static uchar zeroea[Eaddrlen];
  1253. ether->ctlr = ctlr = malloc(sizeof *ctlr);
  1254. switch(ether->ctlrno) {
  1255. case 0:
  1256. ctlr->reg = (Gbereg*)Gbe0regs;
  1257. ether->irq = IRQ0gbe0sum;
  1258. break;
  1259. case 1:
  1260. ctlr->reg = (Gbereg*)Gbe1regs;
  1261. ether->irq = IRQ0gbe1sum;
  1262. break;
  1263. default:
  1264. panic("etherkw: bad ether ctlr #%d", ether->ctlrno);
  1265. }
  1266. /* io cfg 0: 1.8v gbe */
  1267. // *(ulong *)0xf10100e0 |= 1 << 7 | 1 << 15;
  1268. portreset(ctlr->reg);
  1269. /* ensure that both interfaces are set to RGMII before calling mii */
  1270. ((Gbereg*)Gbe0regs)->psc1 |= PSC1rgmii;
  1271. ((Gbereg*)Gbe1regs)->psc1 |= PSC1rgmii;
  1272. /* Set phy address of the port */
  1273. ctlr->port = ether->ctlrno;
  1274. ctlr->reg->phy = ether->ctlrno;
  1275. coherence();
  1276. ether->port = (uintptr)ctlr->reg;
  1277. if(kirkwoodmii(ether) < 0){
  1278. free(ctlr);
  1279. ether->ctlr = nil;
  1280. return -1;
  1281. }
  1282. miiphyinit(ctlr->mii);
  1283. archetheraddr(ether, ctlr->reg, Qno); /* original location */
  1284. if (memcmp(ether->ea, zeroea, sizeof zeroea) == 0){
  1285. free(ctlr);
  1286. ether->ctlr = nil;
  1287. return -1; /* no rj45 for this ether */
  1288. }
  1289. ether->attach = attach;
  1290. ether->transmit = transmit;
  1291. ether->interrupt = interrupt;
  1292. ether->ifstat = ifstat;
  1293. ether->shutdown = shutdown;
  1294. ether->ctl = ctl;
  1295. ether->arg = ether;
  1296. ether->promiscuous = promiscuous;
  1297. ether->multicast = multicast;
  1298. return 0;
  1299. }
  1300. void
  1301. etherkwlink(void)
  1302. {
  1303. addethercard("kirkwood", reset);
  1304. }