ether1116.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764
  1. /*
  2. * marvell kirkwood ethernet (88e1116 and 88e1121) driver
  3. * (as found in the sheevaplug, openrd and guruplug).
  4. * the main difference is the flavour of phy kludgery necessary.
  5. *
  6. * from /public/doc/marvell/88f61xx.kirkwood.pdf,
  7. * /public/doc/marvell/88e1116.pdf, and
  8. * /public/doc/marvell/88e1121r.pdf.
  9. */
  10. #include "u.h"
  11. #include "../port/lib.h"
  12. #include "mem.h"
  13. #include "dat.h"
  14. #include "fns.h"
  15. #include "io.h"
  16. #include "../port/error.h"
  17. #include "../port/netif.h"
  18. #include "etherif.h"
  19. #include "ethermii.h"
  20. #include "../ip/ip.h"
  21. #define MIIDBG if(0)iprint
  22. #define WINATTR(v) (((v) & MASK(8)) << 8)
  23. #define WINSIZE(v) (((v)/(64*1024) - 1) << 16)
  24. enum {
  25. Gbe0regs = PHYSIO + 0x72000,
  26. Gbe1regs = PHYSIO + 0x76000,
  27. Nrx = 512,
  28. Ntx = 512,
  29. Nrxblks = 1024,
  30. Rxblklen = 2+1522, /* ifc. supplies first 2 bytes as padding */
  31. Maxrxintrsec = 20*1000, /* max. rx intrs. / sec */
  32. Etherstuck = 70, /* must send or receive a packet in this many sec.s */
  33. Descralign = 16,
  34. Bufalign = 8,
  35. Pass = 1, /* accept packets */
  36. Qno = 0, /* do everything on queue zero */
  37. };
  38. typedef struct Ctlr Ctlr;
  39. typedef struct Gbereg Gbereg;
  40. typedef struct Mibstats Mibstats;
  41. typedef struct Rx Rx;
  42. typedef struct Tx Tx;
  43. static struct {
  44. Lock;
  45. Block *head;
  46. } freeblocks;
  47. /* hardware receive buffer descriptor */
  48. struct Rx {
  49. ulong cs;
  50. ulong countsize; /* bytes, buffer size */
  51. ulong buf; /* phys. addr. of packet buffer */
  52. ulong next; /* phys. addr. of next Rx */
  53. };
  54. /* hardware transmit buffer descriptor */
  55. struct Tx {
  56. ulong cs;
  57. ulong countchk; /* bytes, checksum */
  58. ulong buf; /* phys. addr. of packet buffer */
  59. ulong next; /* phys. addr. of next Tx */
  60. };
  61. /* fixed by hw; part of Gberegs */
  62. struct Mibstats {
  63. union {
  64. uvlong rxby; /* good bytes rcv'd */
  65. struct {
  66. ulong rxbylo;
  67. ulong rxbyhi;
  68. };
  69. };
  70. ulong badrxby; /* bad bytes rcv'd */
  71. ulong mactxerr; /* tx err pkts */
  72. ulong rxpkt; /* good pkts rcv'd */
  73. ulong badrxpkt; /* bad pkts rcv'd */
  74. ulong rxbcastpkt; /* b'cast pkts rcv'd */
  75. ulong rxmcastpkt; /* m'cast pkts rcv'd */
  76. ulong rx64; /* pkts <= 64 bytes */
  77. ulong rx65_127; /* pkts 65—127 bytes */
  78. ulong rx128_255; /* pkts 128—255 bytes */
  79. ulong rx256_511; /* pkts 256—511 bytes */
  80. ulong rx512_1023; /* pkts 512—1023 bytes */
  81. ulong rx1024_max; /* pkts >= 1024 bytes */
  82. union {
  83. uvlong txby; /* good bytes sent */
  84. struct {
  85. ulong txbylo;
  86. ulong txbyhi;
  87. };
  88. };
  89. ulong txpkt; /* good pkts sent */
  90. /* half-duplex: pkts dropped due to excessive collisions */
  91. ulong txcollpktdrop;
  92. ulong txmcastpkt; /* m'cast pkts sent */
  93. ulong txbcastpkt; /* b'cast pkts sent */
  94. ulong badmacctlpkts; /* bad mac ctl pkts */
  95. ulong txflctl; /* flow-control pkts sent */
  96. ulong rxflctl; /* good flow-control pkts rcv'd */
  97. ulong badrxflctl; /* bad flow-control pkts rcv'd */
  98. ulong rxundersized; /* runts */
  99. ulong rxfrags; /* fragments rcv'd */
  100. ulong rxtoobig; /* oversized pkts rcv'd */
  101. ulong rxjabber; /* jabber pkts rcv'd */
  102. ulong rxerr; /* rx error events */
  103. ulong crcerr; /* crc error events */
  104. ulong collisions; /* collision events */
  105. ulong latecoll; /* late collisions */
  106. };
  107. struct Ctlr {
  108. Lock;
  109. Ether *ether;
  110. Gbereg *reg;
  111. Lock initlock;
  112. int init;
  113. Rx *rx; /* receive descriptors */
  114. Block *rxb[Nrx]; /* blocks belonging to the descriptors */
  115. int rxhead; /* descr ethernet will write to next */
  116. int rxtail; /* next descr that might need a buffer */
  117. Rendez rrendez; /* interrupt wakes up read process */
  118. int haveinput;
  119. Tx *tx;
  120. Block *txb[Ntx];
  121. int txhead; /* next descr we can use for new packet */
  122. int txtail; /* next descr to reclaim on tx complete */
  123. Mii *mii;
  124. int port;
  125. /* stats */
  126. ulong intrs;
  127. ulong newintrs;
  128. ulong txunderrun;
  129. ulong txringfull;
  130. ulong rxdiscard;
  131. ulong rxoverrun;
  132. ulong nofirstlast;
  133. Mibstats;
  134. };
  135. #define Rxqon(q) (1<<(q))
  136. #define Txqon(q) (1<<(q))
  137. enum {
  138. /* euc bits */
  139. Portreset = 1 << 20,
  140. /* sdma config, sdc bits */
  141. Burst1 = 0,
  142. Burst2,
  143. Burst4,
  144. Burst8,
  145. Burst16,
  146. SDCrifb = 1<<0, /* rx intr on pkt boundaries */
  147. #define SDCrxburst(v) ((v)<<1)
  148. SDCrxnobyteswap = 1<<4,
  149. SDCtxnobyteswap = 1<<5,
  150. SDCswap64byte = 1<<6,
  151. #define SDCtxburst(v) ((v)<<22)
  152. /* rx intr ipg (inter packet gap) */
  153. #define SDCipgintrx(v) ((((v)>>15) & 1)<<25) | (((v) & MASK(15))<<7)
  154. /* portcfg bits */
  155. PCFGupromisc = 1<<0, /* unicast promiscuous mode */
  156. #define Rxqdefault(q) ((q)<<1)
  157. #define Rxqarp(q) ((q)<<4)
  158. PCFGbcrejectnoiparp = 1<<7,
  159. PCFGbcrejectip = 1<<8,
  160. PCFGbcrejectarp = 1<<9,
  161. PCFGamnotxes = 1<<12, /* auto mode, no summary update on tx */
  162. PCFGtcpq = 1<<14, /* capture tcp frames to tcpq */
  163. PCFGudpq = 1<<15, /* capture udp frames to udpq */
  164. #define Rxqtcp(q) ((q)<<16)
  165. #define Rxqudp(q) ((q)<<19)
  166. #define Rxqbpdu(q) ((q)<<22)
  167. PCFGrxcs = 1<<25, /* rx tcp checksum mode with header */
  168. /* portcfgx bits */
  169. PCFGXspanq = 1<<1,
  170. PCFGXcrcoff = 1<<2, /* no ethernet crc */
  171. /* port serial control0, psc0 bits */
  172. PSC0porton = 1<<0,
  173. PSC0forcelinkup = 1<<1,
  174. PSC0an_dplxoff = 1<<2, /* an_ = auto. negotiate */
  175. PSC0an_flctloff = 1<<3,
  176. PSC0an_pauseadv = 1<<4,
  177. PSC0nofrclinkdown = 1<<10,
  178. PSC0an_spdoff = 1<<13,
  179. PSC0dteadv = 1<<14, /* dte advertise */
  180. /* max. input pkt size */
  181. #define PSC0mru(v) ((v)<<17)
  182. PSC0mrumask = PSC0mru(MASK(3)),
  183. PSC0mru1518 = 0, /* 1500+2* 6(addrs) +2 + 4(crc) */
  184. PSC0mru1522, /* 1518 + 4(vlan tags) */
  185. PSC0mru1552, /* `baby giant' */
  186. PSC0mru9022, /* `jumbo' */
  187. PSC0mru9192, /* bigger jumbo */
  188. PSC0mru9700, /* still bigger jumbo */
  189. PSC0fd_frc = 1<<21, /* force full duplex */
  190. PSC0flctlfrc = 1<<22,
  191. PSC0gmiispd_gbfrc = 1<<23,
  192. PSC0miispdfrc100mbps = 1<<24,
  193. /* port status 0, ps0 bits */
  194. PS0linkup = 1<<1,
  195. PS0fd = 1<<2, /* full duplex */
  196. PS0flctl = 1<<3,
  197. PS0gmii_gb = 1<<4,
  198. PS0mii100mbps = 1<<5,
  199. PS0txbusy = 1<<7,
  200. PS0txfifoempty = 1<<10,
  201. PS0rxfifo1empty = 1<<11,
  202. PS0rxfifo2empty = 1<<12,
  203. /* port serial control 1, psc1 bits */
  204. PSC1loopback = 1<<1,
  205. PSC1mii = 0<<2,
  206. PSC1rgmii = 1<<3, /* enable RGMII */
  207. PSC1portreset = 1<<4,
  208. PSC1clockbypass = 1<<5,
  209. PSC1iban = 1<<6,
  210. PSC1iban_bypass = 1<<7,
  211. PSC1iban_restart= 1<<8,
  212. PSC1_gbonly = 1<<11,
  213. PSC1encolonbp = 1<<15, /* "collision during back-pressure mib counting" */
  214. PSC1coldomlimmask= MASK(6)<<16,
  215. #define PSC1coldomlim(v) (((v) & MASK(6))<<16)
  216. PSC1miiallowoddpreamble = 1<<22,
  217. /* port status 1, ps1 bits */
  218. PS1rxpause = 1<<0,
  219. PS1txpause = 1<<1,
  220. PS1pressure = 1<<2,
  221. PS1syncfail10ms = 1<<3,
  222. PS1an_done = 1<<4,
  223. PS1inbandan_bypassed = 1<<5,
  224. PS1serdesplllocked = 1<<6,
  225. PS1syncok = 1<<7,
  226. PS1nosquelch = 1<<8,
  227. /* irq bits */
  228. /* rx buf returned to cpu ownership, or frame reception finished */
  229. Irx = 1<<0,
  230. Iextend = 1<<1, /* IEsum of irqe set */
  231. #define Irxbufferq(q) (1<<((q)+2)) /* rx buf returned to cpu ownership */
  232. Irxerr = 1<<10, /* input ring full, usually */
  233. #define Irxerrq(q) (1<<((q)+11))
  234. #define Itxendq(q) (1<<((q)+19)) /* tx dma stopped for q */
  235. Isum = 1<<31,
  236. /* irq extended, irqe bits */
  237. #define IEtxbufferq(q) (1<<((q)+0)) /* tx buf returned to cpu ownership */
  238. #define IEtxerrq(q) (1<<((q)+8))
  239. IEphystschg = 1<<16,
  240. IEptp = 1<<17,
  241. IErxoverrun = 1<<18,
  242. IEtxunderrun = 1<<19,
  243. IElinkchg = 1<<20,
  244. IEintaddrerr = 1<<23,
  245. IEprbserr = 1<<25,
  246. IEsum = 1<<31,
  247. /* tx fifo urgent threshold (tx interrupt coalescing), pxtfut */
  248. #define TFUTipginttx(v) (((v) & MASK(16))<<4);
  249. /* minimal frame size, mfs */
  250. MFS40by = 10<<2,
  251. MFS44by = 11<<2,
  252. MFS48by = 12<<2,
  253. MFS52by = 13<<2,
  254. MFS56by = 14<<2,
  255. MFS60by = 15<<2,
  256. MFS64by = 16<<2,
  257. /* receive descriptor */
  258. #define Bufsize(v) ((v)<<3)
  259. /* receive descriptor status */
  260. RCSmacerr = 1<<0,
  261. RCSmacmask = 3<<1,
  262. RCSmacce = 0<<1,
  263. RCSmacor = 1<<1,
  264. RCSmacmf = 2<<1,
  265. RCSl4chkshift = 3,
  266. RCSl4chkmask = MASK(16),
  267. RCSvlan = 1<<17,
  268. RCSbpdu = 1<<18,
  269. RCSl4mask = 3<<21,
  270. RCSl4tcp4 = 0<<21,
  271. RCSl4udp4 = 1<<21,
  272. RCSl4other = 2<<21,
  273. RCSl4rsvd = 3<<21,
  274. RCSl2ev2 = 1<<23,
  275. RCSl3ip4 = 1<<24,
  276. RCSip4headok = 1<<25,
  277. RCSlast = 1<<26,
  278. RCSfirst = 1<<27,
  279. RCSunknownaddr = 1<<28,
  280. RCSenableintr = 1<<29,
  281. RCSl4chkok = 1<<30,
  282. RCSdmaown = 1<<31,
  283. /* transmit descriptor status */
  284. TCSmacerr = 1<<0,
  285. TCSmacmask = 3<<1,
  286. TCSmaclc = 0<<1,
  287. TCSmacur = 1<<1,
  288. TCSmacrl = 2<<1,
  289. TCSllc = 1<<9,
  290. TCSl4chkmode = 1<<10,
  291. TCSipv4hdlenshift= 11,
  292. TCSvlan = 1<<15,
  293. TCSl4type = 1<<16,
  294. TCSgl4chk = 1<<17,
  295. TCSgip4chk = 1<<18,
  296. TCSpadding = 1<<19,
  297. TCSlast = 1<<20,
  298. TCSfirst = 1<<21,
  299. TCSenableintr = 1<<23,
  300. TCSautomode = 1<<30,
  301. TCSdmaown = 1<<31,
  302. };
  303. enum {
  304. /* SMI regs */
  305. PhysmiTimeout = 10000, /* what units? in ms. */
  306. Physmidataoff = 0, /* Data */
  307. Physmidatamask = 0xffff<<Physmidataoff,
  308. Physmiaddroff = 16, /* PHY device addr */
  309. Physmiaddrmask = 0x1f << Physmiaddroff,
  310. Physmiop = 26,
  311. Physmiopmask = 3<<Physmiop,
  312. PhysmiopWr = 0<<Physmiop,
  313. PhysmiopRd = 1<<Physmiop,
  314. PhysmiReadok = 1<<27,
  315. PhysmiBusy = 1<<28,
  316. SmiRegaddroff = 21, /* PHY device register addr */
  317. SmiRegaddrmask = 0x1f << SmiRegaddroff,
  318. };
  319. struct Gbereg {
  320. ulong phy; /* PHY address */
  321. ulong smi; /* serial mgmt. interface */
  322. ulong euda; /* ether default address */
  323. ulong eudid; /* ether default id */
  324. uchar _pad0[0x80-0x10];
  325. /* dma stuff */
  326. ulong euirq; /* interrupt cause */
  327. ulong euirqmask; /* interrupt mask */
  328. uchar _pad1[0x94-0x88];
  329. ulong euea; /* error address */
  330. ulong euiae; /* internal error address */
  331. uchar _pad2[0xb0-0x9c];
  332. ulong euc; /* control */
  333. uchar _pad3[0x200-0xb4];
  334. struct {
  335. ulong base; /* window base */
  336. ulong size; /* window size */
  337. } base[6];
  338. uchar _pad4[0x280-0x230];
  339. ulong harr[4]; /* high address remap */
  340. ulong bare; /* base address enable */
  341. ulong epap; /* port access protect */
  342. uchar _pad5[0x400-0x298];
  343. ulong portcfg; /* port configuration */
  344. ulong portcfgx; /* port config. extend */
  345. ulong mii; /* mii serial parameters */
  346. ulong _pad6;
  347. ulong evlane; /* vlan ether type */
  348. ulong macal; /* mac address low */
  349. ulong macah; /* mac address high */
  350. ulong sdc; /* sdma config. */
  351. ulong dscp[7]; /* ip diff. serv. code point -> pri */
  352. ulong psc0; /* port serial control 0 */
  353. ulong vpt2p; /* vlan priority tag -> pri */
  354. ulong ps0; /* ether port status 0 */
  355. ulong tqc; /* transmit queue command */
  356. ulong psc1; /* port serial control 1 */
  357. ulong ps1; /* ether port status 1 */
  358. ulong mvhdr; /* marvell header */
  359. ulong _pad8[2];
  360. /* interrupts */
  361. ulong irq; /* interrupt cause; some rw0c bits */
  362. ulong irqe; /* " " extended; some rw0c bits */
  363. ulong irqmask; /* interrupt mask (actually enable) */
  364. ulong irqemask; /* " " extended */
  365. ulong _pad9;
  366. ulong pxtfut; /* port tx fifo urgent threshold */
  367. ulong _pad10;
  368. ulong pxmfs; /* port rx minimum frame size */
  369. ulong _pad11;
  370. /*
  371. * # of input frames discarded by addr filtering or lack of resources;
  372. * zeroed upon read.
  373. */
  374. ulong pxdfc; /* port rx discard frame counter */
  375. ulong pxofc; /* port overrun frame counter */
  376. ulong _pad12[2];
  377. ulong piae; /* port internal address error */
  378. uchar _pad13[0x4bc-0x498];
  379. ulong etherprio; /* ether type priority */
  380. uchar _pad14[0x4dc-0x4c0];
  381. ulong tqfpc; /* tx queue fixed priority config. */
  382. ulong pttbrc; /* port tx token-bucket rate config. */
  383. ulong tqc1; /* tx queue command 1 */
  384. ulong pmtu; /* port maximum transmit unit */
  385. ulong pmtbs; /* port maximum token bucket size */
  386. uchar _pad15[0x600-0x4f0];
  387. struct {
  388. ulong _pad[3];
  389. ulong r; /* phys. addr.: cur. rx desc. ptrs */
  390. } crdp[8];
  391. ulong rqc; /* rx queue command */
  392. ulong tcsdp; /* phys. addr.: cur. tx desc. ptr */
  393. uchar _pad16[0x6c0-0x688];
  394. ulong tcqdp[8]; /* phys. addr.: cur. tx q. desc. ptr */
  395. uchar _pad17[0x700-0x6e0];
  396. struct {
  397. ulong tbctr; /* queue tx token-bucket counter */
  398. ulong tbcfg; /* tx queue token-bucket config. */
  399. ulong acfg; /* tx queue arbiter config. */
  400. ulong _pad;
  401. } tq[8];
  402. ulong pttbc; /* port tx token-bucket counter */
  403. uchar _pad18[0x7a8-0x784];
  404. ulong ipg2; /* tx queue ipg */
  405. ulong _pad19[3];
  406. ulong ipg3;
  407. ulong _pad20;
  408. ulong htlp; /* high token in low packet */
  409. ulong htap; /* high token in async packet */
  410. ulong ltap; /* low token in async packet */
  411. ulong _pad21;
  412. ulong ts; /* tx speed */
  413. uchar _pad22[0x1000-0x7d4];
  414. /* mac mib counters: statistics */
  415. Mibstats;
  416. uchar _pad23[0x1400-0x1080];
  417. /* multicast filtering; each byte: Qno<<1 | Pass */
  418. ulong dfsmt[64]; /* dest addr filter special m'cast table */
  419. ulong dfomt[64]; /* dest addr filter other m'cast table */
  420. /* unicast filtering */
  421. ulong dfut[4]; /* dest addr filter unicast table */
  422. };
  423. static Ctlr *ctlrs[MaxEther];
  424. static uchar zeroea[Eaddrlen];
  425. static void getmibstats(Ctlr *);
  426. static void
  427. rxfreeb(Block *b)
  428. {
  429. /* freeb(b) will have previously decremented b->ref to 0; raise to 1 */
  430. _xinc(&b->ref);
  431. //iprint("fr %ld ", b->ref);
  432. b->wp = b->rp =
  433. (uchar*)((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  434. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  435. b->free = rxfreeb;
  436. ilock(&freeblocks);
  437. b->next = freeblocks.head;
  438. freeblocks.head = b;
  439. iunlock(&freeblocks);
  440. }
  441. static Block *
  442. rxallocb(void)
  443. {
  444. Block *b;
  445. ilock(&freeblocks);
  446. b = freeblocks.head;
  447. if(b != nil) {
  448. freeblocks.head = b->next;
  449. b->next = nil;
  450. b->free = rxfreeb;
  451. }
  452. iunlock(&freeblocks);
  453. return b;
  454. }
  455. static void
  456. rxkick(Ctlr *ctlr)
  457. {
  458. Gbereg *reg = ctlr->reg;
  459. if (reg->crdp[Qno].r == 0)
  460. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  461. if ((reg->rqc & 0xff) == 0) /* all queues are stopped? */
  462. reg->rqc = Rxqon(Qno); /* restart */
  463. coherence();
  464. }
  465. static void
  466. txkick(Ctlr *ctlr)
  467. {
  468. Gbereg *reg = ctlr->reg;
  469. if (reg->tcqdp[Qno] == 0)
  470. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  471. if ((reg->tqc & 0xff) == 0) /* all q's stopped? */
  472. reg->tqc = Txqon(Qno); /* restart */
  473. coherence();
  474. }
  475. static void
  476. rxreplenish(Ctlr *ctlr)
  477. {
  478. Rx *r;
  479. Block *b;
  480. while(ctlr->rxb[ctlr->rxtail] == nil) {
  481. b = rxallocb();
  482. if(b == nil) {
  483. iprint("#l%d: rxreplenish out of buffers\n",
  484. ctlr->ether->ctlrno);
  485. break;
  486. }
  487. ctlr->rxb[ctlr->rxtail] = b;
  488. /* set up receive descriptor */
  489. r = &ctlr->rx[ctlr->rxtail];
  490. assert(((uintptr)r & (Descralign - 1)) == 0);
  491. r->countsize = Bufsize(Rxblklen);
  492. r->buf = PADDR(b->rp);
  493. cachedwbse(r, sizeof *r);
  494. l2cacheuwbse(r, sizeof *r);
  495. /* and fire */
  496. r->cs = RCSdmaown | RCSenableintr;
  497. cachedwbse(&r->cs, BY2SE);
  498. l2cacheuwbse(&r->cs, BY2SE);
  499. ctlr->rxtail = NEXT(ctlr->rxtail, Nrx);
  500. }
  501. }
  502. static void
  503. dump(uchar *bp, long max)
  504. {
  505. if (max > 64)
  506. max = 64;
  507. for (; max > 0; max--, bp++)
  508. iprint("%02.2ux ", *bp);
  509. print("...\n");
  510. }
  511. static void
  512. etheractive(Ether *ether)
  513. {
  514. ether->starttime = TK2MS(MACHP(0)->ticks)/1000;
  515. }
  516. static void
  517. ethercheck(Ether *ether)
  518. {
  519. if (ether->starttime != 0 &&
  520. TK2MS(MACHP(0)->ticks)/1000 - ether->starttime > Etherstuck) {
  521. etheractive(ether);
  522. iprint("ethernet stuck\n");
  523. }
  524. }
  525. static void
  526. receive(Ether *ether)
  527. {
  528. int i;
  529. ulong n;
  530. Block *b;
  531. Ctlr *ctlr = ether->ctlr;
  532. Rx *r;
  533. ethercheck(ether);
  534. for (i = Nrx-2; i > 0; i--) {
  535. r = &ctlr->rx[ctlr->rxhead];
  536. assert(((uintptr)r & (Descralign - 1)) == 0);
  537. l2cacheuinvse(r, sizeof *r);
  538. cachedinvse(r, sizeof *r);
  539. if(r->cs & RCSdmaown)
  540. break;
  541. b = ctlr->rxb[ctlr->rxhead];
  542. if (b == nil)
  543. panic("ether1116: nil ctlr->rxb[ctlr->rxhead] "
  544. "in receive");
  545. ctlr->rxb[ctlr->rxhead] = nil;
  546. ctlr->rxhead = NEXT(ctlr->rxhead, Nrx);
  547. if((r->cs & (RCSfirst|RCSlast)) != (RCSfirst|RCSlast)) {
  548. ctlr->nofirstlast++;
  549. freeb(b);
  550. continue;
  551. }
  552. if(r->cs & RCSmacerr) {
  553. freeb(b);
  554. continue;
  555. }
  556. n = r->countsize >> 16; /* TODO includes 2 pad bytes? */
  557. assert(n >= 2 && n < 2048);
  558. l2cacheuinvse(b->rp, n+2);
  559. cachedinvse(b->rp, n+2);
  560. b->wp = b->rp + n;
  561. /*
  562. * skip hardware padding intended to align ipv4 address
  563. * in memory (mv-s104860-u0 §8.3.4.1)
  564. */
  565. b->rp += 2;
  566. etheriq(ether, b, 1);
  567. etheractive(ether);
  568. if (i % (Nrx / 2) == 0) {
  569. rxreplenish(ctlr);
  570. rxkick(ctlr);
  571. }
  572. }
  573. rxreplenish(ctlr);
  574. rxkick(ctlr);
  575. }
  576. static void
  577. txreplenish(Ether *ether) /* free transmitted packets */
  578. {
  579. Ctlr *ctlr;
  580. ctlr = ether->ctlr;
  581. while(ctlr->txtail != ctlr->txhead) {
  582. l2cacheuinvse(&ctlr->tx[ctlr->txtail].cs, BY2SE);
  583. cachedinvse(&ctlr->tx[ctlr->txtail].cs, BY2SE);
  584. if(ctlr->tx[ctlr->txtail].cs & TCSdmaown)
  585. break;
  586. if(ctlr->txb[ctlr->txtail] == nil)
  587. panic("no block for sent packet?!");
  588. freeb(ctlr->txb[ctlr->txtail]);
  589. ctlr->txb[ctlr->txtail] = nil;
  590. ctlr->txtail = NEXT(ctlr->txtail, Ntx);
  591. etheractive(ether);
  592. }
  593. }
  594. /*
  595. * transmit strategy: fill the output ring as far as possible,
  596. * perhaps leaving a few spare; kick off the output and take
  597. * an interrupt only when the transmit queue is empty.
  598. */
  599. static void
  600. transmit(Ether *ether)
  601. {
  602. int i, kick, len;
  603. Block *b;
  604. Ctlr *ctlr = ether->ctlr;
  605. Gbereg *reg = ctlr->reg;
  606. Tx *t;
  607. ethercheck(ether);
  608. ilock(ctlr);
  609. txreplenish(ether); /* reap old packets */
  610. /* queue new packets; don't use more than half the tx descs. */
  611. kick = 0;
  612. for (i = Ntx/2 - 2; i > 0; i--) {
  613. t = &ctlr->tx[ctlr->txhead];
  614. assert(((uintptr)t & (Descralign - 1)) == 0);
  615. l2cacheuinvse(t, sizeof *t);
  616. cachedinvse(t, sizeof *t);
  617. if(t->cs & TCSdmaown) { /* free descriptor? */
  618. ctlr->txringfull++;
  619. break;
  620. }
  621. b = qget(ether->oq); /* outgoing packet? */
  622. if (b == nil)
  623. break;
  624. len = BLEN(b);
  625. if(len < ether->minmtu || len > ether->maxmtu) {
  626. freeb(b);
  627. continue;
  628. }
  629. ctlr->txb[ctlr->txhead] = b;
  630. /* make sure the whole packet is in memory */
  631. cachedwbse(b->rp, len);
  632. l2cacheuwbse(b->rp, len);
  633. /* set up the transmit descriptor */
  634. t->buf = PADDR(b->rp);
  635. t->countchk = len << 16;
  636. cachedwbse(t, sizeof *t);
  637. l2cacheuwbse(t, sizeof *t);
  638. /* and fire */
  639. t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
  640. TCSenableintr;
  641. cachedwbse(&t->cs, BY2SE);
  642. l2cacheuwbse(&t->cs, BY2SE);
  643. kick++;
  644. ctlr->txhead = NEXT(ctlr->txhead, Ntx);
  645. }
  646. if (kick) {
  647. txkick(ctlr);
  648. reg->irqmask |= Itxendq(Qno);
  649. reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
  650. }
  651. iunlock(ctlr);
  652. }
  653. static void
  654. dumprxdescs(Ctlr *ctlr)
  655. {
  656. int i;
  657. Gbereg *reg = ctlr->reg;
  658. iprint("\nrxhead %d rxtail %d; txcdp %#p rxcdp %#p\n",
  659. ctlr->rxhead, ctlr->rxtail, reg->tcqdp[Qno], reg->crdp[Qno].r);
  660. for (i = 0; i < Nrx; i++) {
  661. iprint("rxb %d @ %#p: %#p\n", i, &ctlr->rxb[i], ctlr->rxb[i]);
  662. delay(50);
  663. }
  664. for (i = 0; i < Nrx; i++) {
  665. iprint("rx %d @ %#p: cs %#lux countsize %lud buf %#lux next %#lux\n",
  666. i, &ctlr->rx[i], ctlr->rx[i].cs,
  667. ctlr->rx[i].countsize >> 3, ctlr->rx[i].buf,
  668. ctlr->rx[i].next);
  669. delay(50);
  670. }
  671. delay(1000);
  672. }
  673. static int
  674. gotinput(void* ctlr)
  675. {
  676. return ((Ctlr*)ctlr)->haveinput != 0;
  677. }
  678. /*
  679. * process any packets in the input ring.
  680. * also sum mib stats frequently to avoid the overflow
  681. * mentioned in the errata.
  682. */
  683. static void
  684. rcvproc(void* arg)
  685. {
  686. Ctlr *ctlr;
  687. Ether *ether;
  688. ether = arg;
  689. ctlr = ether->ctlr;
  690. for(;;){
  691. tsleep(&ctlr->rrendez, gotinput, ctlr, 10*1000);
  692. ilock(ctlr);
  693. getmibstats(ctlr);
  694. if (ctlr->haveinput) {
  695. ctlr->haveinput = 0;
  696. iunlock(ctlr);
  697. receive(ether);
  698. } else
  699. iunlock(ctlr);
  700. }
  701. }
  702. static void
  703. interrupt(Ureg*, void *arg)
  704. {
  705. ulong irq, irqe, handled;
  706. Ether *ether = arg;
  707. Ctlr *ctlr = ether->ctlr;
  708. Gbereg *reg = ctlr->reg;
  709. handled = 0;
  710. irq = reg->irq;
  711. irqe = reg->irqe;
  712. reg->irqe = 0; /* extinguish intr causes */
  713. reg->irq = 0; /* extinguish intr causes */
  714. ethercheck(ether);
  715. if(irq & (Irx | Irxbufferq(Qno))) {
  716. /*
  717. * letting a kproc process the input takes far less real time
  718. * than doing it all at interrupt level.
  719. */
  720. ctlr->haveinput = 1;
  721. wakeup(&ctlr->rrendez);
  722. irq &= ~(Irx | Irxbufferq(Qno));
  723. handled++;
  724. } else
  725. rxkick(ctlr);
  726. if(irq & Itxendq(Qno)) { /* transmit ring empty? */
  727. reg->irqmask &= ~Itxendq(Qno); /* prevent more interrupts */
  728. reg->irqemask &= ~(IEtxerrq(Qno) | IEtxunderrun);
  729. transmit(ether);
  730. irq &= ~Itxendq(Qno);
  731. handled++;
  732. }
  733. if(irqe & IEsum) {
  734. /*
  735. * IElinkchg appears to only be set when unplugging.
  736. * autonegotiation is likely not done yet, so linkup not valid,
  737. * thus we note the link change here, and check for
  738. * that and autonegotiation done below.
  739. */
  740. if(irqe & IEphystschg) {
  741. ether->link = (reg->ps0 & PS0linkup) != 0;
  742. ether->linkchg = 1;
  743. }
  744. if(irqe & IEtxerrq(Qno))
  745. ether->oerrs++;
  746. if(irqe & IErxoverrun)
  747. ether->overflows++;
  748. if(irqe & IEtxunderrun)
  749. ctlr->txunderrun++;
  750. if(irqe & (IEphystschg | IEtxerrq(Qno) | IErxoverrun |
  751. IEtxunderrun))
  752. handled++;
  753. }
  754. if (irq & Isum) {
  755. if (irq & Irxerr) {
  756. ether->buffs++; /* approx. error */
  757. /* null descriptor pointer or descriptor owned by cpu */
  758. // iprint("#l%d: rx err - input ring full\n", ether->ctlrno);
  759. /* if the input ring is full, drain it */
  760. ctlr->haveinput = 1;
  761. wakeup(&ctlr->rrendez);
  762. }
  763. if(irq & (Irxerr | Irxerrq(Qno)))
  764. handled++;
  765. irq &= ~(Irxerr | Irxerrq(Qno));
  766. }
  767. if(ether->linkchg && (reg->ps1 & PS1an_done)) {
  768. handled++;
  769. ether->link = (reg->ps0 & PS0linkup) != 0;
  770. ether->linkchg = 0;
  771. }
  772. ctlr->newintrs++;
  773. if (!handled) {
  774. irq &= ~Isum;
  775. irqe &= ~IEtxbufferq(Qno);
  776. if (irq == 0 && irqe == 0) {
  777. /* seems to be triggered by continuous output */
  778. // iprint("ether1116: spurious interrupt\n");
  779. } else
  780. iprint("ether1116: interrupt cause unknown; "
  781. "irq %#lux irqe %#lux\n", irq, irqe);
  782. }
  783. intrclear(Irqlo, ether->irq);
  784. }
  785. void
  786. promiscuous(void *arg, int on)
  787. {
  788. Ether *ether = arg;
  789. Ctlr *ctlr = ether->ctlr;
  790. Gbereg *reg = ctlr->reg;
  791. ilock(ctlr);
  792. ether->prom = on;
  793. if(on)
  794. reg->portcfg |= PCFGupromisc;
  795. else
  796. reg->portcfg &= ~PCFGupromisc;
  797. iunlock(ctlr);
  798. }
  799. void
  800. multicast(void *, uchar *, int)
  801. {
  802. /* nothing to do; we always accept multicast */
  803. }
  804. static void quiesce(Gbereg *reg);
  805. static void
  806. shutdown(Ether *ether)
  807. {
  808. int i;
  809. Ctlr *ctlr = ether->ctlr;
  810. Gbereg *reg = ctlr->reg;
  811. ilock(ctlr);
  812. quiesce(reg);
  813. reg->euc |= Portreset;
  814. coherence();
  815. iunlock(ctlr);
  816. delay(100);
  817. ilock(ctlr);
  818. reg->euc &= ~Portreset;
  819. coherence();
  820. delay(20);
  821. reg->psc0 = 0; /* no PSC0porton */
  822. reg->psc1 |= PSC1portreset;
  823. coherence();
  824. delay(50);
  825. reg->psc1 &= ~PSC1portreset;
  826. coherence();
  827. for (i = 0; i < nelem(reg->tcqdp); i++)
  828. reg->tcqdp[i] = 0;
  829. for (i = 0; i < nelem(reg->crdp); i++)
  830. reg->crdp[i].r = 0;
  831. coherence();
  832. iunlock(ctlr);
  833. }
  834. enum {
  835. CMjumbo,
  836. };
  837. static Cmdtab ctlmsg[] = {
  838. CMjumbo, "jumbo", 2,
  839. };
  840. long
  841. ctl(Ether *e, void *p, long n)
  842. {
  843. Cmdbuf *cb;
  844. Cmdtab *ct;
  845. Ctlr *ctlr = e->ctlr;
  846. Gbereg *reg = ctlr->reg;
  847. cb = parsecmd(p, n);
  848. if(waserror()) {
  849. free(cb);
  850. nexterror();
  851. }
  852. ct = lookupcmd(cb, ctlmsg, nelem(ctlmsg));
  853. switch(ct->index) {
  854. case CMjumbo:
  855. if(strcmp(cb->f[1], "on") == 0) {
  856. /* incoming packet queue doesn't expect jumbo frames */
  857. error("jumbo disabled");
  858. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  859. PSC0mru(PSC0mru9022);
  860. e->maxmtu = 9022;
  861. } else if(strcmp(cb->f[1], "off") == 0) {
  862. reg->psc0 = (reg->psc0 & ~PSC0mrumask) |
  863. PSC0mru(PSC0mru1522);
  864. e->maxmtu = ETHERMAXTU;
  865. } else
  866. error(Ebadctl);
  867. break;
  868. default:
  869. error(Ebadctl);
  870. break;
  871. }
  872. free(cb);
  873. poperror();
  874. return n;
  875. }
  876. /*
  877. * phy/mii goo
  878. */
  879. static int
  880. smibusywait(Gbereg *reg, ulong waitbit)
  881. {
  882. ulong timeout, smi_reg;
  883. timeout = PhysmiTimeout;
  884. /* wait till the SMI is not busy */
  885. do {
  886. /* read smi register */
  887. smi_reg = reg->smi;
  888. if (timeout-- == 0) {
  889. MIIDBG("SMI busy timeout\n");
  890. return -1;
  891. }
  892. // delay(1);
  893. } while (smi_reg & waitbit);
  894. return 0;
  895. }
  896. static int
  897. miird(Mii *mii, int pa, int ra)
  898. {
  899. ulong smi_reg, timeout;
  900. Gbereg *reg;
  901. reg = ((Ctlr*)mii->ctlr)->reg;
  902. /* check params */
  903. if ((pa<<Physmiaddroff) & ~Physmiaddrmask ||
  904. (ra<<SmiRegaddroff) & ~SmiRegaddrmask)
  905. return -1;
  906. smibusywait(reg, PhysmiBusy);
  907. /* fill the phy address and register offset and read opcode */
  908. reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd;
  909. coherence();
  910. /* wait til read value is ready */
  911. timeout = PhysmiTimeout;
  912. do {
  913. smi_reg = reg->smi;
  914. if (timeout-- == 0) {
  915. MIIDBG("SMI read-valid timeout\n");
  916. return -1;
  917. }
  918. } while (!(smi_reg & PhysmiReadok));
  919. /* Wait for the data to update in the SMI register */
  920. for (timeout = 0; timeout < PhysmiTimeout; timeout++)
  921. ;
  922. return reg->smi & Physmidatamask;
  923. }
  924. static int
  925. miiwr(Mii *mii, int pa, int ra, int v)
  926. {
  927. Gbereg *reg;
  928. ulong smi_reg;
  929. reg = ((Ctlr*)mii->ctlr)->reg;
  930. /* check params */
  931. if (((pa<<Physmiaddroff) & ~Physmiaddrmask) ||
  932. ((ra<<SmiRegaddroff) & ~SmiRegaddrmask))
  933. return -1;
  934. smibusywait(reg, PhysmiBusy);
  935. /* fill the phy address and register offset and read opcode */
  936. smi_reg = v << Physmidataoff | pa << Physmiaddroff | ra << SmiRegaddroff;
  937. reg->smi = smi_reg & ~PhysmiopRd;
  938. coherence();
  939. return 0;
  940. }
  941. #define MIIMODEL(idr2) (((idr2) >> 4) & MASK(6))
  942. enum {
  943. Hacknone,
  944. Hackdual,
  945. Ouimarvell = 0x005043,
  946. /* idr2 mii/phy model numbers */
  947. Phy1000 = 0x00, /* 88E1000 Gb */
  948. Phy1011 = 0x02, /* 88E1011 Gb */
  949. Phy1000_3 = 0x03, /* 88E1000 Gb */
  950. Phy1000s = 0x04, /* 88E1000S Gb */
  951. Phy1000_5 = 0x05, /* 88E1000 Gb */
  952. Phy1000_6 = 0x06, /* 88E1000 Gb */
  953. Phy3082 = 0x08, /* 88E3082 10/100 */
  954. Phy1112 = 0x09, /* 88E1112 Gb */
  955. Phy1121r = 0x0b, /* says the 1121r manual */
  956. Phy1149 = 0x0b, /* 88E1149 Gb */
  957. Phy1111 = 0x0c, /* 88E1111 Gb */
  958. Phy1116 = 0x21, /* 88E1116 Gb */
  959. Phy1116r = 0x24, /* 88E1116R Gb */
  960. Phy1118 = 0x22, /* 88E1118 Gb */
  961. Phy3016 = 0x26, /* 88E3016 10/100 */
  962. };
  963. static int hackflavour;
  964. /*
  965. * on openrd, ether0's phy has address 8, ether1's is ether0's 24.
  966. * on guruplug, ether0's is phy 0 and ether1's is ether0's phy 1.
  967. */
  968. int
  969. mymii(Mii* mii, int mask)
  970. {
  971. Ctlr *ctlr;
  972. MiiPhy *miiphy;
  973. int bit, ctlrno, oui, model, phyno, r, rmask;
  974. static int dualport, phyidx;
  975. static int phynos[NMiiPhy];
  976. ctlr = mii->ctlr;
  977. ctlrno = ctlr->ether->ctlrno;
  978. /* first pass: figure out what kind of phy(s) we have. */
  979. dualport = 0;
  980. if (ctlrno == 0) {
  981. for(phyno = 0; phyno < NMiiPhy; phyno++){
  982. bit = 1<<phyno;
  983. if(!(mask & bit) || mii->mask & bit)
  984. continue;
  985. if(mii->mir(mii, phyno, Bmsr) == -1)
  986. continue;
  987. r = mii->mir(mii, phyno, Phyidr1);
  988. oui = (r & 0x3FFF)<<6;
  989. r = mii->mir(mii, phyno, Phyidr2);
  990. oui |= r>>10;
  991. model = MIIMODEL(r);
  992. if (oui == 0xfffff && model == 0x3f)
  993. continue;
  994. MIIDBG("ctlrno %d phy %d oui %#ux model %#ux\n",
  995. ctlrno, phyno, oui, model);
  996. if (oui == Ouimarvell &&
  997. (model == Phy1121r || model == Phy1116r))
  998. ++dualport;
  999. phynos[phyidx++] = phyno;
  1000. }
  1001. hackflavour = dualport == 2 && phyidx == 2? Hackdual: Hacknone;
  1002. MIIDBG("ether1116: %s-port phy\n",
  1003. hackflavour == Hackdual? "dual": "single");
  1004. }
  1005. /*
  1006. * Probe through mii for PHYs in mask;
  1007. * return the mask of those found in the current probe.
  1008. * If the PHY has not already been probed, update
  1009. * the Mii information.
  1010. */
  1011. rmask = 0;
  1012. if (hackflavour == Hackdual && ctlrno < phyidx) {
  1013. /*
  1014. * openrd, guruplug or the like: use ether0's phys.
  1015. * this is a nasty hack, but so is the hardware.
  1016. */
  1017. MIIDBG("ctlrno %d using ctlrno 0's phyno %d\n",
  1018. ctlrno, phynos[ctlrno]);
  1019. ctlr->mii = mii = ctlrs[0]->mii;
  1020. mask = 1 << phynos[ctlrno];
  1021. mii->mask = ~mask;
  1022. }
  1023. for(phyno = 0; phyno < NMiiPhy; phyno++){
  1024. bit = 1<<phyno;
  1025. if(!(mask & bit))
  1026. continue;
  1027. if(mii->mask & bit){
  1028. rmask |= bit;
  1029. continue;
  1030. }
  1031. if(mii->mir(mii, phyno, Bmsr) == -1)
  1032. continue;
  1033. r = mii->mir(mii, phyno, Phyidr1);
  1034. oui = (r & 0x3FFF)<<6;
  1035. r = mii->mir(mii, phyno, Phyidr2);
  1036. oui |= r>>10;
  1037. if(oui == 0xFFFFF || oui == 0)
  1038. continue;
  1039. if((miiphy = malloc(sizeof(MiiPhy))) == nil)
  1040. continue;
  1041. miiphy->mii = mii;
  1042. miiphy->oui = oui;
  1043. miiphy->phyno = phyno;
  1044. miiphy->anar = ~0;
  1045. miiphy->fc = ~0;
  1046. miiphy->mscr = ~0;
  1047. mii->phy[phyno] = miiphy;
  1048. if(ctlrno == 0 || hackflavour != Hackdual && mii->curphy == nil)
  1049. mii->curphy = miiphy;
  1050. mii->mask |= bit;
  1051. mii->nphy++;
  1052. rmask |= bit;
  1053. }
  1054. return rmask;
  1055. }
  1056. static int
  1057. kirkwoodmii(Ether *ether)
  1058. {
  1059. int i;
  1060. Ctlr *ctlr;
  1061. MiiPhy *phy;
  1062. MIIDBG("mii\n");
  1063. ctlr = ether->ctlr;
  1064. if((ctlr->mii = malloc(sizeof(Mii))) == nil)
  1065. return -1;
  1066. ctlr->mii->ctlr = ctlr;
  1067. ctlr->mii->mir = miird;
  1068. ctlr->mii->miw = miiwr;
  1069. if(mymii(ctlr->mii, ~0) == 0 || (phy = ctlr->mii->curphy) == nil){
  1070. print("#l%d: ether1116: init mii failure\n", ether->ctlrno);
  1071. free(ctlr->mii);
  1072. ctlr->mii = nil;
  1073. return -1;
  1074. }
  1075. /* oui 005043 is marvell */
  1076. MIIDBG("oui %#X phyno %d\n", phy->oui, phy->phyno);
  1077. // TODO: does this make sense? shouldn't each phy be initialised?
  1078. if((ctlr->ether->ctlrno == 0 || hackflavour != Hackdual) &&
  1079. miistatus(ctlr->mii) < 0){
  1080. miireset(ctlr->mii);
  1081. MIIDBG("miireset\n");
  1082. if(miiane(ctlr->mii, ~0, 0, ~0) < 0){
  1083. iprint("miiane failed\n");
  1084. return -1;
  1085. }
  1086. MIIDBG("miistatus\n");
  1087. miistatus(ctlr->mii);
  1088. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrLs){
  1089. for(i = 0; ; i++){
  1090. if(i > 600){
  1091. iprint("ether1116: autonegotiation failed\n");
  1092. break;
  1093. }
  1094. if(miird(ctlr->mii, phy->phyno, Bmsr) & BmsrAnc)
  1095. break;
  1096. delay(10);
  1097. }
  1098. if(miistatus(ctlr->mii) < 0)
  1099. iprint("miistatus failed\n");
  1100. }else{
  1101. iprint("ether1116: no link\n");
  1102. phy->speed = 10; /* simple default */
  1103. }
  1104. }
  1105. ether->mbps = phy->speed;
  1106. MIIDBG("#l%d: kirkwoodmii: fd %d speed %d tfc %d rfc %d\n",
  1107. ctlr->port, phy->fd, phy->speed, phy->tfc, phy->rfc);
  1108. MIIDBG("mii done\n");
  1109. return 0;
  1110. }
  1111. enum { /* PHY register pages */
  1112. Pagcopper,
  1113. Pagfiber,
  1114. Pagrgmii,
  1115. Pagled,
  1116. Pagrsvd1,
  1117. Pagvct,
  1118. Pagtest,
  1119. Pagrsvd2,
  1120. Pagfactest,
  1121. };
  1122. static void
  1123. miiregpage(Mii *mii, ulong dev, ulong page)
  1124. {
  1125. miiwr(mii, dev, Eadr, page);
  1126. }
  1127. static int
  1128. miiphyinit(Mii *mii)
  1129. {
  1130. ulong dev;
  1131. Ctlr *ctlr;
  1132. Gbereg *reg;
  1133. ctlr = (Ctlr*)mii->ctlr;
  1134. reg = ctlr->reg;
  1135. dev = reg->phy;
  1136. MIIDBG("phy dev addr %lux\n", dev);
  1137. /* leds link & activity */
  1138. miiregpage(mii, dev, Pagled);
  1139. /* low 4 bits == 1: on - link, blink - activity, off - no link */
  1140. miiwr(mii, dev, Scr, (miird(mii, dev, Scr) & ~0xf) | 1);
  1141. miiregpage(mii, dev, Pagrgmii);
  1142. miiwr(mii, dev, Scr, miird(mii, dev, Scr) | Rgmiipwrup);
  1143. /* must now do a software reset, says the manual */
  1144. miireset(ctlr->mii);
  1145. /* enable RGMII delay on Tx and Rx for CPU port */
  1146. miiwr(mii, dev, Recr, miird(mii, dev, Recr) | Rxtiming | Rxtiming);
  1147. /* must now do a software reset, says the manual */
  1148. miireset(ctlr->mii);
  1149. miiregpage(mii, dev, Pagcopper);
  1150. miiwr(mii, dev, Scr,
  1151. (miird(mii, dev, Scr) & ~(Pwrdown|Endetect)) | Mdix);
  1152. return 0;
  1153. }
  1154. /*
  1155. * initialisation
  1156. */
  1157. static void
  1158. quiesce(Gbereg *reg)
  1159. {
  1160. ulong v;
  1161. v = reg->tqc;
  1162. if (v & 0xFF)
  1163. reg->tqc = v << 8; /* stop active channels */
  1164. v = reg->rqc;
  1165. if (v & 0xFF)
  1166. reg->rqc = v << 8; /* stop active channels */
  1167. /* wait for all queues to stop */
  1168. while (reg->tqc & 0xFF || reg->rqc & 0xFF)
  1169. ;
  1170. }
  1171. static void
  1172. p16(uchar *p, ulong v) /* convert big-endian short to bytes */
  1173. {
  1174. *p++ = v>>8;
  1175. *p = v;
  1176. }
  1177. static void
  1178. p32(uchar *p, ulong v) /* convert big-endian long to bytes */
  1179. {
  1180. *p++ = v>>24;
  1181. *p++ = v>>16;
  1182. *p++ = v>>8;
  1183. *p = v;
  1184. }
  1185. /*
  1186. * set ether->ea from hw mac address,
  1187. * configure unicast filtering to accept it.
  1188. */
  1189. void
  1190. archetheraddr(Ether *ether, Gbereg *reg, int rxqno)
  1191. {
  1192. uchar *ea;
  1193. ulong nibble, ucreg, tbloff, regoff;
  1194. ea = ether->ea;
  1195. p32(ea, reg->macah);
  1196. p16(ea+4, reg->macal);
  1197. if (memcmp(ea, zeroea, sizeof zeroea) == 0 && ether->ctlrno > 0) {
  1198. /* hack: use ctlr[0]'s + ctlrno */
  1199. memmove(ea, ctlrs[0]->ether->ea, Eaddrlen);
  1200. ea[Eaddrlen-1] += ether->ctlrno;
  1201. reg->macah = ea[0] << 24 | ea[1] << 16 | ea[2] << 8 | ea[3];
  1202. reg->macal = ea[4] << 8 | ea[5];
  1203. coherence();
  1204. }
  1205. /* accept frames on ea */
  1206. nibble = ea[5] & 0xf;
  1207. tbloff = nibble / 4;
  1208. regoff = nibble % 4;
  1209. regoff *= 8;
  1210. ucreg = reg->dfut[tbloff] & (0xff << regoff);
  1211. ucreg |= (rxqno << 1 | Pass) << regoff;
  1212. reg->dfut[tbloff] = ucreg;
  1213. /* accept all multicast too. set up special & other tables. */
  1214. memset(reg->dfsmt, Qno<<1 | Pass, sizeof reg->dfsmt);
  1215. memset(reg->dfomt, Qno<<1 | Pass, sizeof reg->dfomt);
  1216. coherence();
  1217. }
  1218. static void
  1219. cfgdramacc(Gbereg *reg)
  1220. {
  1221. memset(reg->harr, 0, sizeof reg->harr);
  1222. memset(reg->base, 0, sizeof reg->base);
  1223. reg->bare = MASK(6) - MASK(2); /* disable wins 2-5 */
  1224. /* this doesn't make any sense, but it's required */
  1225. reg->epap = 3 << 2 | 3; /* full access for wins 0 & 1 */
  1226. // reg->epap = 0; /* no access on access violation for all wins */
  1227. coherence();
  1228. reg->base[0].base = PHYSDRAM | WINATTR(Attrcs0) | Targdram;
  1229. reg->base[0].size = WINSIZE(256*MB);
  1230. reg->base[1].base = (PHYSDRAM + 256*MB) | WINATTR(Attrcs1) | Targdram;
  1231. reg->base[1].size = WINSIZE(256*MB);
  1232. coherence();
  1233. }
  1234. static void
  1235. ctlralloc(Ctlr *ctlr)
  1236. {
  1237. int i;
  1238. Block *b;
  1239. Rx *r;
  1240. Tx *t;
  1241. ilock(&freeblocks);
  1242. for(i = 0; i < Nrxblks; i++) {
  1243. b = iallocb(Rxblklen+Bufalign-1);
  1244. if(b == nil) {
  1245. iprint("ether1116: no memory for rx buffers\n");
  1246. break;
  1247. }
  1248. assert(b->ref == 1);
  1249. b->wp = b->rp = (uchar*)
  1250. ((uintptr)(b->lim - Rxblklen) & ~(Bufalign - 1));
  1251. assert(((uintptr)b->rp & (Bufalign - 1)) == 0);
  1252. b->free = rxfreeb;
  1253. b->next = freeblocks.head;
  1254. freeblocks.head = b;
  1255. }
  1256. iunlock(&freeblocks);
  1257. ctlr->rx = xspanalloc(Nrx * sizeof(Rx), Descralign, 0);
  1258. if(ctlr->rx == nil)
  1259. panic("ether1116: no memory for rx ring");
  1260. for(i = 0; i < Nrx; i++) {
  1261. r = &ctlr->rx[i];
  1262. assert(((uintptr)r & (Descralign - 1)) == 0);
  1263. r->cs = 0; /* not owned by hardware until r->buf is set */
  1264. r->buf = 0;
  1265. r->next = PADDR(&ctlr->rx[NEXT(i, Nrx)]);
  1266. ctlr->rxb[i] = nil;
  1267. }
  1268. ctlr->rxtail = ctlr->rxhead = 0;
  1269. rxreplenish(ctlr);
  1270. cachedwb();
  1271. l2cacheuwb();
  1272. ctlr->tx = xspanalloc(Ntx * sizeof(Tx), Descralign, 0);
  1273. if(ctlr->tx == nil)
  1274. panic("ether1116: no memory for tx ring");
  1275. for(i = 0; i < Ntx; i++) {
  1276. t = &ctlr->tx[i];
  1277. assert(((uintptr)t & (Descralign - 1)) == 0);
  1278. t->cs = 0;
  1279. t->buf = 0;
  1280. t->next = PADDR(&ctlr->tx[NEXT(i, Ntx)]);
  1281. ctlr->txb[i] = nil;
  1282. }
  1283. ctlr->txtail = ctlr->txhead = 0;
  1284. cachedwb();
  1285. l2cacheuwb();
  1286. }
  1287. static void
  1288. ctlrinit(Ether *ether)
  1289. {
  1290. int i;
  1291. Ctlr *ctlr = ether->ctlr;
  1292. Gbereg *reg = ctlr->reg;
  1293. static char name[KNAMELEN];
  1294. static Ctlr fakectlr; /* bigger than 4K; keep off the stack */
  1295. for (i = 0; i < nelem(reg->tcqdp); i++)
  1296. reg->tcqdp[i] = 0;
  1297. for (i = 0; i < nelem(reg->crdp); i++)
  1298. reg->crdp[i].r = 0;
  1299. coherence();
  1300. cfgdramacc(reg);
  1301. ctlralloc(ctlr);
  1302. reg->tcqdp[Qno] = PADDR(&ctlr->tx[ctlr->txhead]);
  1303. reg->crdp[Qno].r = PADDR(&ctlr->rx[ctlr->rxhead]);
  1304. coherence();
  1305. // dumprxdescs(ctlr);
  1306. /* clear stats by reading them into fake ctlr */
  1307. getmibstats(&fakectlr);
  1308. reg->pxmfs = MFS40by; /* allow runts in */
  1309. /*
  1310. * ipg's (inter packet gaps) for interrupt coalescing,
  1311. * values in units of 64 clock cycles. A full-sized
  1312. * packet (1514 bytes) takes just over 12µs to transmit.
  1313. */
  1314. if (CLOCKFREQ/(Maxrxintrsec*64) >= (1<<16))
  1315. panic("rx coalescing value %d too big for short",
  1316. CLOCKFREQ/(Maxrxintrsec*64));
  1317. reg->sdc = SDCrifb | SDCrxburst(Burst16) | SDCtxburst(Burst16) |
  1318. SDCrxnobyteswap | SDCtxnobyteswap |
  1319. SDCipgintrx(CLOCKFREQ/(Maxrxintrsec*64));
  1320. reg->pxtfut = 0; /* TFUTipginttx(CLOCKFREQ/(Maxrxintrsec*64)) */
  1321. /* allow just these interrupts */
  1322. /* guruplug generates Irxerr interrupts continually */
  1323. reg->irqmask = Isum | Irx | Irxbufferq(Qno) | Irxerr | Itxendq(Qno);
  1324. reg->irqemask = IEsum | IEtxerrq(Qno) | IEphystschg | IErxoverrun |
  1325. IEtxunderrun;
  1326. reg->irqe = 0;
  1327. reg->euirqmask = 0;
  1328. coherence();
  1329. reg->irq = 0;
  1330. reg->euirq = 0;
  1331. /* send errors to end of memory */
  1332. // reg->euda = PHYSDRAM + 512*MB - 8*1024;
  1333. reg->euda = 0;
  1334. reg->eudid = Attrcs1 << 4 | Targdram;
  1335. // archetheraddr(ether, ctlr->reg, Qno); /* 2nd location */
  1336. reg->portcfg = Rxqdefault(Qno) | Rxqarp(Qno);
  1337. reg->portcfgx = 0;
  1338. coherence();
  1339. /*
  1340. * start the controller running.
  1341. * turn the port on, kick the receiver.
  1342. */
  1343. reg->psc1 = PSC1rgmii | PSC1encolonbp | PSC1coldomlim(0x23);
  1344. /* do this only when the controller is quiescent */
  1345. reg->psc0 = PSC0porton | PSC0an_flctloff |
  1346. PSC0an_pauseadv | PSC0nofrclinkdown | PSC0mru(PSC0mru1522);
  1347. coherence();
  1348. for (i = 0; i < 4000; i++) /* magic delay */
  1349. ;
  1350. ether->link = (reg->ps0 & PS0linkup) != 0;
  1351. /* set ethernet MTU for leaky bucket mechanism to 0 (disabled) */
  1352. reg->pmtu = 0;
  1353. etheractive(ether);
  1354. snprint(name, sizeof name, "#l%drproc", ether->ctlrno);
  1355. kproc(name, rcvproc, ether);
  1356. reg->rqc = Rxqon(Qno);
  1357. coherence();
  1358. }
  1359. static void
  1360. attach(Ether* ether)
  1361. {
  1362. Ctlr *ctlr = ether->ctlr;
  1363. lock(&ctlr->initlock);
  1364. if(ctlr->init == 0) {
  1365. ctlrinit(ether);
  1366. ctlr->init = 1;
  1367. }
  1368. unlock(&ctlr->initlock);
  1369. }
  1370. /*
  1371. * statistics goo.
  1372. * mib registers clear on read.
  1373. */
  1374. static void
  1375. getmibstats(Ctlr *ctlr)
  1376. {
  1377. Gbereg *reg = ctlr->reg;
  1378. /*
  1379. * Marvell 88f6281 errata FE-ETH-120: high long of rxby and txby
  1380. * can't be read correctly, so read the low long frequently
  1381. * (every 30 seconds or less), thus avoiding overflow into high long.
  1382. */
  1383. ctlr->rxby += reg->rxbylo;
  1384. ctlr->txby += reg->txbylo;
  1385. ctlr->badrxby += reg->badrxby;
  1386. ctlr->mactxerr += reg->mactxerr;
  1387. ctlr->rxpkt += reg->rxpkt;
  1388. ctlr->badrxpkt += reg->badrxpkt;
  1389. ctlr->rxbcastpkt+= reg->rxbcastpkt;
  1390. ctlr->rxmcastpkt+= reg->rxmcastpkt;
  1391. ctlr->rx64 += reg->rx64;
  1392. ctlr->rx65_127 += reg->rx65_127;
  1393. ctlr->rx128_255 += reg->rx128_255;
  1394. ctlr->rx256_511 += reg->rx256_511;
  1395. ctlr->rx512_1023+= reg->rx512_1023;
  1396. ctlr->rx1024_max+= reg->rx1024_max;
  1397. ctlr->txpkt += reg->txpkt;
  1398. ctlr->txcollpktdrop+= reg->txcollpktdrop;
  1399. ctlr->txmcastpkt+= reg->txmcastpkt;
  1400. ctlr->txbcastpkt+= reg->txbcastpkt;
  1401. ctlr->badmacctlpkts+= reg->badmacctlpkts;
  1402. ctlr->txflctl += reg->txflctl;
  1403. ctlr->rxflctl += reg->rxflctl;
  1404. ctlr->badrxflctl+= reg->badrxflctl;
  1405. ctlr->rxundersized+= reg->rxundersized;
  1406. ctlr->rxfrags += reg->rxfrags;
  1407. ctlr->rxtoobig += reg->rxtoobig;
  1408. ctlr->rxjabber += reg->rxjabber;
  1409. ctlr->rxerr += reg->rxerr;
  1410. ctlr->crcerr += reg->crcerr;
  1411. ctlr->collisions+= reg->collisions;
  1412. ctlr->latecoll += reg->latecoll;
  1413. }
  1414. long
  1415. ifstat(Ether *ether, void *a, long n, ulong off)
  1416. {
  1417. Ctlr *ctlr = ether->ctlr;
  1418. Gbereg *reg = ctlr->reg;
  1419. char *buf, *p, *e;
  1420. buf = p = malloc(READSTR);
  1421. e = p + READSTR;
  1422. ilock(ctlr);
  1423. getmibstats(ctlr);
  1424. ctlr->intrs += ctlr->newintrs;
  1425. p = seprint(p, e, "interrupts: %lud\n", ctlr->intrs);
  1426. p = seprint(p, e, "new interrupts: %lud\n", ctlr->newintrs);
  1427. ctlr->newintrs = 0;
  1428. p = seprint(p, e, "tx underrun: %lud\n", ctlr->txunderrun);
  1429. p = seprint(p, e, "tx ring full: %lud\n", ctlr->txringfull);
  1430. ctlr->rxdiscard += reg->pxdfc;
  1431. ctlr->rxoverrun += reg->pxofc;
  1432. p = seprint(p, e, "rx discarded frames: %lud\n", ctlr->rxdiscard);
  1433. p = seprint(p, e, "rx overrun frames: %lud\n", ctlr->rxoverrun);
  1434. p = seprint(p, e, "no first+last flag: %lud\n", ctlr->nofirstlast);
  1435. p = seprint(p, e, "duplex: %s\n", (reg->ps0 & PS0fd)? "full": "half");
  1436. p = seprint(p, e, "flow control: %s\n", (reg->ps0 & PS0flctl)? "on": "off");
  1437. /* p = seprint(p, e, "speed: %d mbps\n", ); */
  1438. p = seprint(p, e, "received bytes: %llud\n", ctlr->rxby);
  1439. p = seprint(p, e, "bad received bytes: %lud\n", ctlr->badrxby);
  1440. p = seprint(p, e, "internal mac transmit errors: %lud\n", ctlr->mactxerr);
  1441. p = seprint(p, e, "total received frames: %lud\n", ctlr->rxpkt);
  1442. p = seprint(p, e, "received broadcast frames: %lud\n", ctlr->rxbcastpkt);
  1443. p = seprint(p, e, "received multicast frames: %lud\n", ctlr->rxmcastpkt);
  1444. p = seprint(p, e, "bad received frames: %lud\n", ctlr->badrxpkt);
  1445. p = seprint(p, e, "received frames 0-64: %lud\n", ctlr->rx64);
  1446. p = seprint(p, e, "received frames 65-127: %lud\n", ctlr->rx65_127);
  1447. p = seprint(p, e, "received frames 128-255: %lud\n", ctlr->rx128_255);
  1448. p = seprint(p, e, "received frames 256-511: %lud\n", ctlr->rx256_511);
  1449. p = seprint(p, e, "received frames 512-1023: %lud\n", ctlr->rx512_1023);
  1450. p = seprint(p, e, "received frames 1024-max: %lud\n", ctlr->rx1024_max);
  1451. p = seprint(p, e, "transmitted bytes: %llud\n", ctlr->txby);
  1452. p = seprint(p, e, "total transmitted frames: %lud\n", ctlr->txpkt);
  1453. p = seprint(p, e, "transmitted broadcast frames: %lud\n", ctlr->txbcastpkt);
  1454. p = seprint(p, e, "transmitted multicast frames: %lud\n", ctlr->txmcastpkt);
  1455. p = seprint(p, e, "transmit frames dropped by collision: %lud\n", ctlr->txcollpktdrop);
  1456. p = seprint(p, e, "misaligned buffers: %lud\n", ether->pktsmisaligned);
  1457. p = seprint(p, e, "bad mac control frames: %lud\n", ctlr->badmacctlpkts);
  1458. p = seprint(p, e, "transmitted flow control messages: %lud\n", ctlr->txflctl);
  1459. p = seprint(p, e, "received flow control messages: %lud\n", ctlr->rxflctl);
  1460. p = seprint(p, e, "bad received flow control messages: %lud\n", ctlr->badrxflctl);
  1461. p = seprint(p, e, "received undersized packets: %lud\n", ctlr->rxundersized);
  1462. p = seprint(p, e, "received fragments: %lud\n", ctlr->rxfrags);
  1463. p = seprint(p, e, "received oversized packets: %lud\n", ctlr->rxtoobig);
  1464. p = seprint(p, e, "received jabber packets: %lud\n", ctlr->rxjabber);
  1465. p = seprint(p, e, "mac receive errors: %lud\n", ctlr->rxerr);
  1466. p = seprint(p, e, "crc errors: %lud\n", ctlr->crcerr);
  1467. p = seprint(p, e, "collisions: %lud\n", ctlr->collisions);
  1468. p = seprint(p, e, "late collisions: %lud\n", ctlr->latecoll);
  1469. USED(p);
  1470. iunlock(ctlr);
  1471. n = readstr(off, a, n, buf);
  1472. free(buf);
  1473. return n;
  1474. }
  1475. static int
  1476. reset(Ether *ether)
  1477. {
  1478. Ctlr *ctlr;
  1479. ether->ctlr = ctlr = malloc(sizeof *ctlr);
  1480. switch(ether->ctlrno) {
  1481. case 0:
  1482. ctlr->reg = (Gbereg*)Gbe0regs;
  1483. ether->irq = IRQ0gbe0sum;
  1484. break;
  1485. case 1:
  1486. ctlr->reg = (Gbereg*)Gbe1regs;
  1487. ether->irq = IRQ0gbe1sum;
  1488. break;
  1489. default:
  1490. panic("ether1116: bad ether ctlr #%d", ether->ctlrno);
  1491. }
  1492. /* need this for guruplug, at least */
  1493. *(ulong *)AddrIocfg0 |= 1 << 7 | 1 << 15; /* io cfg 0: 1.8v gbe */
  1494. coherence();
  1495. ctlr->ether = ether;
  1496. ctlrs[ether->ctlrno] = ctlr;
  1497. shutdown(ether);
  1498. /* ensure that both interfaces are set to RGMII before calling mii */
  1499. ((Gbereg*)Gbe0regs)->psc1 |= PSC1rgmii;
  1500. ((Gbereg*)Gbe1regs)->psc1 |= PSC1rgmii;
  1501. coherence();
  1502. /* Set phy address of the port */
  1503. ctlr->port = ether->ctlrno;
  1504. ctlr->reg->phy = ether->ctlrno;
  1505. coherence();
  1506. ether->port = (uintptr)ctlr->reg;
  1507. if(kirkwoodmii(ether) < 0){
  1508. free(ctlr);
  1509. ether->ctlr = nil;
  1510. return -1;
  1511. }
  1512. miiphyinit(ctlr->mii);
  1513. archetheraddr(ether, ctlr->reg, Qno); /* original location */
  1514. if (memcmp(ether->ea, zeroea, sizeof zeroea) == 0){
  1515. iprint("ether1116: reset: zero ether->ea\n");
  1516. free(ctlr);
  1517. ether->ctlr = nil;
  1518. return -1; /* no rj45 for this ether */
  1519. }
  1520. ether->attach = attach;
  1521. ether->transmit = transmit;
  1522. ether->interrupt = interrupt;
  1523. ether->ifstat = ifstat;
  1524. ether->shutdown = shutdown;
  1525. ether->ctl = ctl;
  1526. ether->arg = ether;
  1527. ether->promiscuous = promiscuous;
  1528. ether->multicast = multicast;
  1529. return 0;
  1530. }
  1531. void
  1532. ether1116link(void)
  1533. {
  1534. addethercard("88e1116", reset);
  1535. }