devether.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475
  1. /*
  2. * Atheros 71xx ethernets for rb450g.
  3. *
  4. * all 5 PHYs are accessible only through first ether's register space.
  5. *
  6. * TODO:
  7. * promiscuous mode.
  8. * make ether1 work: probably needs mii/phy initialisation,
  9. * maybe needs 8316 switch code too (which requires mdio, phy, etc. glop).
  10. * to maybe do some day:
  11. * dig mac addresses out & config phy/mii via spi or other grot and swill
  12. * (instead of editing rb config file).
  13. */
  14. #include "u.h"
  15. #include "../port/lib.h"
  16. #include "mem.h"
  17. #include "dat.h"
  18. #include "fns.h"
  19. #include "io.h"
  20. #include "../port/error.h"
  21. #include "../port/netif.h"
  22. #include "etherif.h"
  23. #include "ethermii.h"
  24. #include <pool.h>
  25. enum {
  26. Ntd = 64,
  27. Nrd = 256,
  28. Nrb = 1024,
  29. Bufalign= 4,
  30. Rbsz = ETHERMAXTU + 4, /* 4 for CRC */
  31. };
  32. extern uchar arge0mac[Eaddrlen]; /* see rb config file */
  33. extern uchar arge1mac[Eaddrlen];
  34. typedef struct Arge Arge;
  35. typedef struct Ctlr Ctlr;
  36. typedef struct Desc Desc;
  37. typedef struct Etherif Etherif;
  38. /*
  39. * device registers
  40. */
  41. struct Arge {
  42. ulong cfg1;
  43. ulong cfg2;
  44. ulong ifg;
  45. ulong hduplex;
  46. ulong maxframelen;
  47. uchar _pad0[0x20 - 0x14];
  48. ulong miicfg;
  49. ulong miicmd;
  50. ulong miiaddr;
  51. ulong miictl;
  52. ulong miists;
  53. ulong miiindic;
  54. ulong ifctl;
  55. ulong _pad1;
  56. ulong staaddr1;
  57. ulong staaddr2;
  58. ulong fifocfg[3];
  59. ulong fifotxthresh;
  60. ulong fiforxfiltmatch;
  61. ulong fiforxfiltmask;
  62. ulong fiforam[7];
  63. uchar _pad2[0x180 - 0x7c];
  64. /* dma */
  65. ulong txctl;
  66. ulong txdesc;
  67. ulong txsts;
  68. ulong rxctl;
  69. ulong rxdesc;
  70. ulong rxsts;
  71. ulong dmaintr;
  72. ulong dmaintrsts;
  73. };
  74. enum {
  75. Cfg1softrst = 1 << 31,
  76. Cfg1simulrst = 1 << 30,
  77. Cfg1macrxblkrst = 1 << 19,
  78. Cfg1mactxblkrst = 1 << 18,
  79. Cfg1rxfuncrst = 1 << 17,
  80. Cfg1txfuncrst = 1 << 16,
  81. Cfg1loopback = 1 << 8,
  82. Cfg1rxflowctl = 1 << 5,
  83. Cfg1txflowctl = 1 << 4,
  84. Cfg1syncrx = 1 << 3,
  85. Cfg1rxen = 1 << 2,
  86. Cfg1synctx = 1 << 1,
  87. Cfg1txen = 1 << 0,
  88. Cfg2preamblelenmask = 0xf,
  89. Cfg2preamblelenshift = 12,
  90. Cfg2ifmode1000 = 2 << 8,
  91. Cfg2ifmode10_100 = 1 << 8,
  92. Cfg2ifmodeshift = 8,
  93. Cfg2ifmodemask = 3,
  94. Cfg2hugeframe = 1 << 5,
  95. Cfg2lenfield = 1 << 4,
  96. Cfg2enpadcrc = 1 << 2,
  97. Cfg2encrc = 1 << 1,
  98. Cfg2fdx = 1 << 0,
  99. Miicfgrst = 1 << 31,
  100. Miicfgscanautoinc = 1 << 5,
  101. Miicfgpreamblesup = 1 << 4,
  102. Miicfgclkselmask = 0x7,
  103. Miicfgclkdiv4 = 0,
  104. Miicfgclkdiv6 = 2,
  105. Miicfgclkdiv8 = 3,
  106. Miicfgclkdiv10 = 4,
  107. Miicfgclkdiv14 = 5,
  108. Miicfgclkdiv20 = 6,
  109. Miicfgclkdiv28 = 7,
  110. Miicmdscancycle = 1 << 1,
  111. Miicmdread = 1,
  112. Miicmdwrite = 0,
  113. Miiphyaddrshift = 8,
  114. Miiphyaddrmask = 0xff,
  115. Miiregmask = 0x1f,
  116. Miictlmask = 0xffff,
  117. Miistsmask = 0xffff,
  118. Miiindicinvalid = 1 << 2,
  119. Miiindicscanning = 1 << 1,
  120. Miiindicbusy = 1 << 0,
  121. Ifctlspeed = 1 << 16,
  122. Fifocfg0txfabric = 1 << 4,
  123. Fifocfg0txsys = 1 << 3,
  124. Fifocfg0rxfabric = 1 << 2,
  125. Fifocfg0rxsys = 1 << 1,
  126. Fifocfg0watermark = 1 << 0,
  127. Fifocfg0all = MASK(5),
  128. Fifocfg0enshift = 8,
  129. /*
  130. * these flags applicable both to filter mask and to filter match.
  131. * `Ff' is for `fifo filter'.
  132. */
  133. Ffunicast = 1 << 17,
  134. Fftruncframe = 1 << 16,
  135. Ffvlantag = 1 << 15,
  136. Ffunsupopcode = 1 << 14,
  137. Ffpauseframe = 1 << 13,
  138. Ffctlframe = 1 << 12,
  139. Fflongevent = 1 << 11,
  140. Ffdribblenibble = 1 << 10,
  141. Ffbcast = 1 << 9,
  142. Ffmcast = 1 << 8,
  143. Ffok = 1 << 7,
  144. Ffoorange = 1 << 6,
  145. Fflenmsmtch = 1 << 5,
  146. Ffcrcerr = 1 << 4,
  147. Ffcodeerr = 1 << 3,
  148. Fffalsecarrier = 1 << 2,
  149. Ffrxdvevent = 1 << 1,
  150. Ffdropevent = 1 << 0,
  151. /*
  152. * exclude unicast and truncated frames from matching.
  153. */
  154. Ffmatchdflt = Ffvlantag | Ffunsupopcode | Ffpauseframe | Ffctlframe |
  155. Fflongevent | Ffdribblenibble | Ffbcast | Ffmcast | Ffok |
  156. Ffoorange | Fflenmsmtch | Ffcrcerr | Ffcodeerr |
  157. Fffalsecarrier | Ffrxdvevent | Ffdropevent,
  158. /* `Frm' is for `fifo receive mask'. */
  159. Frmbytemode = 1 << 19,
  160. Frmnoshortframe = 1 << 18,
  161. Frmbit17 = 1 << 17,
  162. Frmbit16 = 1 << 16,
  163. Frmtruncframe = 1 << 15,
  164. Frmlongevent = 1 << 14,
  165. Frmvlantag = 1 << 13,
  166. Frmunsupopcode = 1 << 12,
  167. Frmpauseframe = 1 << 11,
  168. Frmctlframe = 1 << 10,
  169. Frmdribblenibble = 1 << 9,
  170. Frmbcast = 1 << 8,
  171. Frmmcast = 1 << 7,
  172. Frmok = 1 << 6,
  173. Frmoorange = 1 << 5,
  174. Frmlenmsmtch = 1 << 4,
  175. Frmcodeerr = 1 << 3,
  176. Frmfalsecarrier = 1 << 2,
  177. Frmrxdvevent = 1 << 1,
  178. Frmdropevent = 1 << 0,
  179. /*
  180. * len. mismatch, unsupp. opcode and short frame bits excluded
  181. */
  182. Ffmaskdflt = Frmnoshortframe | Frmbit17 | Frmbit16 | Frmtruncframe |
  183. Frmlongevent | Frmvlantag | Frmpauseframe | Frmctlframe |
  184. Frmdribblenibble | Frmbcast | Frmmcast | Frmok | Frmoorange |
  185. Frmcodeerr | Frmfalsecarrier | Frmrxdvevent | Frmdropevent,
  186. Dmatxctlen = 1 << 0,
  187. /* dma tx status */
  188. Txpcountmask = 0xff,
  189. Txpcountshift = 16,
  190. Txbuserr = 1 << 3,
  191. Txunderrun = 1 << 1,
  192. Txpktsent = 1 << 0,
  193. Dmarxctlen = 1 << 0,
  194. /* dma rx status */
  195. Rxpcountmask = 0xff,
  196. Rxpcountshift = 16,
  197. Rxbuserr = 1 << 3,
  198. Rxovflo = 1 << 2,
  199. Rxpktrcvd = 1 << 0,
  200. /* dmaintr & dmaintrsts bits */
  201. Dmarxbuserr = 1 << 7,
  202. Dmarxovflo = 1 << 6,
  203. Dmarxpktrcvd = 1 << 4,
  204. Dmatxbuserr = 1 << 3,
  205. Dmatxunderrun = 1 << 1,
  206. Dmatxpktsent = 1 << 0,
  207. /* we don't really need most tx interrupts */
  208. Dmaall = Dmarxbuserr | Dmarxovflo | Dmarxpktrcvd | Dmatxbuserr,
  209. Spictlremapdisable = 1 << 6,
  210. Spictlclkdividermask = MASK(6),
  211. Spiioctlcs2 = 1 << 18,
  212. Spiioctlcs1 = 1 << 17,
  213. Spiioctlcs0 = 1 << 16,
  214. Spiioctlcsmask = 7 << 16,
  215. Spiioctlclk = 1 << 8,
  216. Spiioctldo = 1,
  217. };
  218. struct Spi { /* at 0x1f000000 */
  219. ulong fs;
  220. ulong ctl;
  221. ulong ioctl;
  222. ulong rds;
  223. };
  224. /* hw descriptors of buffer rings (rx and tx), need to be uncached */
  225. struct Desc {
  226. ulong addr; /* of packet buffer */
  227. ulong ctl;
  228. Desc *next;
  229. ulong _pad;
  230. };
  231. enum {
  232. Descempty = 1 << 31,
  233. Descmore = 1 << 24,
  234. Descszmask = MASK(12),
  235. };
  236. #define DMASIZE(len) ((len) & Descszmask)
  237. struct Ctlr {
  238. Arge *regs;
  239. Ether* edev; /* backward pointer */
  240. Lock; /* attach */
  241. int init;
  242. int attached;
  243. Mii* mii;
  244. Rendez lrendez;
  245. int lim;
  246. int link;
  247. int phymask;
  248. /* receiver */
  249. Rendez rrendez;
  250. uint rintr; /* count */
  251. int pktstoread; /* flag */
  252. int discard;
  253. /* rx descriptors */
  254. Desc* rdba; /* base address */
  255. Block** rd;
  256. uint rdh; /* head */
  257. uint rdt; /* tail */
  258. uint nrdfree; /* rd's awaiting pkts (sort of) */
  259. /* transmitter */
  260. Rendez trendez;
  261. uint tintr; /* count */
  262. int pktstosend; /* flag */
  263. int ntq;
  264. /* tx descriptors */
  265. Desc* tdba; /* base address */
  266. Block** td;
  267. uint tdh; /* head */
  268. uint tdt; /* tail */
  269. };
  270. struct Etherif {
  271. uintptr regs;
  272. int irq;
  273. uchar *mac;
  274. int phymask;
  275. };
  276. static Etherif etherifs[] = {
  277. { 0x1a000000, ILenet0, arge0mac, 1<<4 },
  278. { 0x19000000, ILenet1, arge1mac, MASK(4) },
  279. };
  280. static Ether *etherxx[MaxEther];
  281. static Lock athrblock; /* free receive Blocks */
  282. static Block* athrbpool; /* receive Blocks for all ath controllers */
  283. static void athrbfree(Block* bp);
  284. /*
  285. * ar8316 ether switch
  286. */
  287. enum {
  288. Swrgmii = 0,
  289. Swgmii = 1,
  290. Swphy4cpu = 0, /* flag: port 4 connected to CPU (not internal switch) */
  291. };
  292. typedef struct Switch Switch;
  293. struct Switch {
  294. int page;
  295. int scdev;
  296. };
  297. enum {
  298. /* atheros-specific mii registers */
  299. Miiathdbgaddr = 0x1d,
  300. Miiathdbgdata = 0x1e,
  301. Swregmask = 0,
  302. Swmaskrevmask = 0x00ff,
  303. Swmaskvermask = 0xff00,
  304. Swmaskvershift = 8,
  305. Swmasksoftreset = 1 << 31,
  306. Swregmode = 8,
  307. Swdir615uboot = 0x8d1003e0,
  308. /* from ubiquiti rspro */
  309. Swrgmiiport4iso = 0x81461bea,
  310. Swrgmiiport4sw = 0x01261be2,
  311. /* avm fritz!box 7390 */
  312. Swgmiiavm = 0x010e5b71,
  313. Swmac0gmiien = 1 << 0,
  314. Swmac0rgmiien = 1 << 1,
  315. Swphy4gmiien = 1 << 2,
  316. Swphy4rgmiien = 1 << 3,
  317. Swmac0macmode = 1 << 4,
  318. Swrgmiirxclkdelayen= 1 << 6,
  319. Swrgmiitxclkdelayen= 1 << 7,
  320. Swmac5macmode = 1 << 14,
  321. Swmac5phymode = 1 << 15,
  322. Swtxdelays0 = 1 << 21,
  323. Swtxdelays1 = 1 << 22,
  324. Swrxdelays0 = 1 << 23,
  325. Swledopenen = 1 << 24,
  326. Swspien = 1 << 25,
  327. Swrxdelays1 = 1 << 26,
  328. Swpoweronsel = 1 << 31,
  329. Swregfloodmask = 0x2c,
  330. Swfloodmaskbcast2cpu= 1 << 26,
  331. Swregglobal = 0x30,
  332. Swglobalmtumask = 0x7fff,
  333. };
  334. #ifdef NOTYET
  335. void *
  336. devicegetparent(int)
  337. {
  338. static int glop;
  339. return &glop;
  340. }
  341. static void
  342. arswsplitsetpage(int dev, ulong addr, ushort *phy, ushort *reg)
  343. {
  344. static Switch ar8316;
  345. Switch *sc = &ar8316;
  346. ushort page;
  347. page = ((addr) >> 9) & 0xffff;
  348. *phy = (((addr) >> 6) & 0x7) | 0x10;
  349. *reg = ((addr) >> 1) & 0x1f;
  350. MDIOWRREG(devicegetparent(dev), 0x18, 0, page);
  351. sc->page = page;
  352. }
  353. /*
  354. * Read half a register. Some of the registers define control bits, and
  355. * the sequence of half-word accesses matters. The register addresses
  356. * are word-even (mod 4).
  357. */
  358. static int
  359. arswrdreg16(int dev, int addr)
  360. {
  361. ushort phy, reg;
  362. arswsplitsetpage(dev, addr, &phy, &reg);
  363. return MDIORDREG(devicegetparent(dev), phy, reg);
  364. }
  365. void
  366. arswwritedbg(int dev, int phy, ushort dbgaddr, ushort dbgdata)
  367. {
  368. MDIOWRREG(devicegetparent(dev), phy, Miiathdbgaddr, dbgaddr);
  369. MDIOWRREG(devicegetparent(dev), phy, Miiathdbgdata, dbgdata);
  370. }
  371. /*
  372. * Write half a register
  373. */
  374. static inline int
  375. arswwrreg16(int dev, int addr, int data)
  376. {
  377. ushort phy, reg;
  378. arswsplitsetpage(dev, addr, &phy, &reg);
  379. return MDIOWRREG(devicegetparent(dev), phy, reg, data);
  380. }
  381. /* arsw??reglsb routines operate on lower 16 bits; *msb on upper ones */
  382. int
  383. arswrdreg(int dev, int addr)
  384. {
  385. return arswrdreglsb(dev, addr) | arswrdregmsb(dev, addr);
  386. }
  387. int
  388. arswwrreg(int dev, int addr, int value)
  389. {
  390. arswwrreglsb(dev, addr, value); /* XXX check this write too? */
  391. return arswwrregmsb(dev, addr, value);
  392. }
  393. int
  394. arswmodifyreg(int dev, int addr, int mask, int set)
  395. {
  396. return arswwrreg(dev, addr, (arswrdreg(dev, addr) & ~mask) | set);
  397. }
  398. /*
  399. * initialise the switch
  400. */
  401. static int
  402. ar8316init(Switch *sc)
  403. {
  404. if (Swrgmii && Swphy4cpu) {
  405. arswwrreg(sc->scdev, Swregmode, Swrgmiiport4iso);
  406. iprint("ar8316: MAC port == RGMII, port 4 = dedicated PHY\n");
  407. } else if (Swrgmii) {
  408. arswwrreg(sc->scdev, Swregmode, Swrgmiiport4sw);
  409. iprint("ar8316: MAC port == RGMII, port 4 = switch port\n");
  410. } else if (Swgmii) {
  411. arswwrreg(sc->scdev, Swregmode, Swgmiiavm);
  412. iprint("ar8316: MAC port == GMII\n");
  413. } else {
  414. iprint("ar8316: unknown switch PHY config\n");
  415. return -1;
  416. }
  417. delay(1); /* wait for things to settle */
  418. if (Swrgmii && Swphy4cpu) {
  419. iprint("ar8316: port 4 RGMII hack\n");
  420. /* work around for phy4 rgmii mode */
  421. arswwritedbg(sc->scdev, 4, 0x12, 0x480c);
  422. arswwritedbg(sc->scdev, 4, 0x0, 0x824e); /* rx delay */
  423. arswwritedbg(sc->scdev, 4, 0x5, 0x3d47); /* tx delay */
  424. delay(1); /* again to let things settle */
  425. }
  426. arswwrreg(sc->scdev, 0x38, 0xc000050e); /* mystery */
  427. /*
  428. * Flood address table misses to all ports, and enable forwarding of
  429. * broadcasts to the cpu port.
  430. */
  431. arswwrreg(sc->scdev, Swregfloodmask, Swfloodmaskbcast2cpu | 0x003f003f);
  432. arswmodifyreg(sc->scdev, Swregglobal, Swglobalmtumask, ETHERMAXTU+8+2);
  433. return 0;
  434. }
  435. #endif /* NOTYET */
  436. static long
  437. ifstat(Ether* edev, void* a, long n, ulong offset)
  438. {
  439. int l, i, r;
  440. char *p;
  441. Ctlr *ctlr;
  442. ctlr = edev->ctlr;
  443. p = malloc(READSTR);
  444. if(p == nil)
  445. error(Enomem);
  446. l = 0;
  447. l += snprint(p+l, READSTR-l, "tintr: %ud\n", ctlr->tintr);
  448. l += snprint(p+l, READSTR-l, "rintr: %ud\n", ctlr->rintr);
  449. l += snprint(p+l, READSTR-l, "discarded: %ud\n", ctlr->discard);
  450. if(ctlr->mii != nil && ctlr->mii->curphy != nil){
  451. l += snprint(p+l, READSTR-l, "phy: ");
  452. for(i = 0; i < NMiiPhyr; i++){
  453. if(i && ((i & 0x07) == 0))
  454. l += snprint(p+l, READSTR-l, "\n ");
  455. r = miimir(ctlr->mii, i);
  456. l += snprint(p+l, READSTR-l, " %4.4uX", r);
  457. }
  458. snprint(p+l, READSTR-l, "\n");
  459. }
  460. n = readstr(offset, a, n, p);
  461. free(p);
  462. return n;
  463. }
  464. static void
  465. etherrtrace(Netfile* f, Etherpkt* pkt, int len)
  466. {
  467. int i, n;
  468. Block *bp;
  469. if(qwindow(f->in) <= 0)
  470. return;
  471. if(len > 58)
  472. n = 58;
  473. else
  474. n = len;
  475. bp = iallocb(64);
  476. if(bp == nil)
  477. return;
  478. memmove(bp->wp, pkt->d, n);
  479. i = TK2MS(MACHP(0)->ticks);
  480. bp->wp[58] = len>>8;
  481. bp->wp[59] = len;
  482. bp->wp[60] = i>>24;
  483. bp->wp[61] = i>>16;
  484. bp->wp[62] = i>>8;
  485. bp->wp[63] = i;
  486. bp->wp += 64;
  487. qpass(f->in, bp);
  488. }
  489. Block*
  490. etheriq(Ether* ether, Block* bp, int fromwire)
  491. {
  492. Etherpkt *pkt;
  493. ushort type;
  494. int len, multi, tome, fromme;
  495. Netfile **ep, *f, **fp, *fx;
  496. Block *xbp;
  497. Ctlr *ctlr;
  498. ether->inpackets++;
  499. ctlr = ether->ctlr;
  500. pkt = (Etherpkt*)bp->rp;
  501. len = BLEN(bp);
  502. type = (pkt->type[0]<<8)|pkt->type[1];
  503. fx = 0;
  504. ep = &ether->f[Ntypes];
  505. multi = pkt->d[0] & 1;
  506. /* check for valid multicast addresses */
  507. if(multi && memcmp(pkt->d, ether->bcast, sizeof(pkt->d)) != 0 &&
  508. ether->prom == 0)
  509. if(!activemulti(ether, pkt->d, sizeof(pkt->d))){
  510. if(fromwire){
  511. ctlr->discard++;
  512. freeb(bp);
  513. bp = 0;
  514. }
  515. return bp;
  516. }
  517. /* is it for me? */
  518. tome = memcmp(pkt->d, ether->ea, sizeof(pkt->d)) == 0;
  519. fromme = memcmp(pkt->s, ether->ea, sizeof(pkt->s)) == 0;
  520. /*
  521. * Multiplex the packet to all the connections which want it.
  522. * If the packet is not to be used subsequently (fromwire != 0),
  523. * attempt to simply pass it into one of the connections, thereby
  524. * saving a copy of the data (usual case hopefully).
  525. */
  526. for(fp = ether->f; fp < ep; fp++)
  527. if((f = *fp) != nil && (f->type == type || f->type < 0))
  528. if(tome || multi || f->prom)
  529. /* Don't want to hear bridged packets */
  530. if(f->bridge && !fromwire && !fromme)
  531. continue;
  532. else if(f->headersonly)
  533. etherrtrace(f, pkt, len);
  534. else if(fromwire && fx == 0)
  535. fx = f;
  536. else if(xbp = iallocb(len)){
  537. memmove(xbp->wp, pkt, len);
  538. xbp->wp += len;
  539. if(qpass(f->in, xbp) < 0){
  540. iprint("soverflow for f->in\n");
  541. ether->soverflows++;
  542. }
  543. }else{
  544. iprint("soverflow iallocb\n");
  545. ether->soverflows++;
  546. }
  547. if(fx){
  548. if(qpass(fx->in, bp) < 0){
  549. iprint("soverflow for fx->in\n");
  550. ether->soverflows++;
  551. }
  552. return 0;
  553. }
  554. if(fromwire){
  555. ctlr->discard++;
  556. freeb(bp);
  557. return 0;
  558. }
  559. return bp;
  560. }
  561. static void
  562. athhwreset(Ether *ether)
  563. {
  564. Ctlr *ctlr;
  565. Arge *arge;
  566. ctlr = ether->ctlr;
  567. if (ctlr == nil)
  568. return;
  569. arge = ctlr->regs;
  570. if (arge == nil)
  571. return;
  572. arge->dmaintr = 0;
  573. arge->rxctl = 0;
  574. arge->txctl = 0;
  575. coherence();
  576. /*
  577. * give tx & rx time to stop, otherwise clearing desc registers
  578. * too early will cause random memory corruption.
  579. */
  580. delay(1);
  581. arge->rxdesc = 0;
  582. arge->txdesc = 0;
  583. coherence();
  584. /* clear all interrupts */
  585. while (arge->rxsts & Rxpktrcvd)
  586. arge->rxsts = Rxpktrcvd;
  587. while (arge->txsts & Txpktsent)
  588. arge->txsts = Txpktsent;
  589. /* and errors */
  590. arge->rxsts = Rxbuserr | Rxovflo;
  591. arge->txsts = Txbuserr | Txunderrun;
  592. }
  593. static void
  594. txreclaim(Ctlr *ctlr)
  595. {
  596. uint tdh;
  597. Arge *arge;
  598. Block *bp;
  599. arge = ctlr->regs;
  600. tdh = ctlr->tdh;
  601. while (tdh != ctlr->tdt && ctlr->tdba[tdh].ctl & Descempty){
  602. arge->txsts = Txpktsent;
  603. bp = ctlr->td[tdh];
  604. ctlr->td[tdh] = nil;
  605. if (bp)
  606. freeb(bp);
  607. ctlr->tdba[tdh].addr = 0;
  608. ctlr->ntq--;
  609. tdh = NEXT(tdh, Ntd);
  610. }
  611. ctlr->tdh = tdh;
  612. }
  613. static Block*
  614. athrballoc(void)
  615. {
  616. Block *bp;
  617. ilock(&athrblock);
  618. if((bp = athrbpool) != nil){
  619. athrbpool = bp->next;
  620. bp->next = nil;
  621. _xinc(&bp->ref); /* prevent bp from being freed */
  622. }
  623. iunlock(&athrblock);
  624. return bp;
  625. }
  626. static void
  627. athrbfree(Block* bp)
  628. {
  629. bp->wp = bp->rp = bp->lim - ROUND(Rbsz, BLOCKALIGN);
  630. bp->flag &= ~(Bipck | Budpck | Btcpck | Bpktck);
  631. ilock(&athrblock);
  632. bp->next = athrbpool;
  633. athrbpool = bp;
  634. iunlock(&athrblock);
  635. }
  636. static void
  637. rxnewbuf(Ctlr *ctlr, int i)
  638. {
  639. Block *bp;
  640. Desc *rd;
  641. if (ctlr->rd[i] != nil)
  642. return;
  643. ctlr->rd[i] = bp = athrballoc();
  644. if(bp == nil)
  645. panic("#l%d: can't allocate receive buffer",
  646. ctlr->edev->ctlrno);
  647. dcflush(bp->rp, Rbsz); /* writeback & invalidate */
  648. rd = &ctlr->rdba[i];
  649. rd->addr = PADDR(bp->rp);
  650. rd->ctl = Descempty | DMASIZE(Rbsz);
  651. ctlr->nrdfree++;
  652. }
  653. static void
  654. rxreclaim(Ctlr *ctlr)
  655. {
  656. uint rdt;
  657. rdt = ctlr->rdt;
  658. while (rdt != ctlr->rdh && !(ctlr->rdba[rdt].ctl & Descempty)){
  659. rxnewbuf(ctlr, rdt);
  660. rdt = NEXT(rdt, Nrd);
  661. }
  662. ctlr->rdt = rdt;
  663. }
  664. static void
  665. etherintr(void *arg)
  666. {
  667. int sts;
  668. Arge *arge;
  669. Ctlr *ctlr;
  670. Ether *ether;
  671. ether = arg;
  672. ctlr = ether->ctlr;
  673. arge = ctlr->regs;
  674. ilock(ctlr);
  675. sts = arge->dmaintrsts;
  676. if (sts & Dmarxpktrcvd) {
  677. arge->dmaintr &= ~Dmarxpktrcvd;
  678. ctlr->pktstoread = 1;
  679. wakeup(&ctlr->rrendez);
  680. ctlr->rintr++;
  681. sts &= ~Dmarxpktrcvd;
  682. }
  683. if (sts & (Dmatxpktsent | Dmatxunderrun)) {
  684. arge->dmaintr &= ~(Dmatxpktsent | Dmatxunderrun);
  685. ctlr->pktstosend = 1;
  686. wakeup(&ctlr->trendez);
  687. ctlr->tintr++;
  688. sts &= ~(Dmatxpktsent | Dmatxunderrun);
  689. }
  690. iunlock(ctlr);
  691. if (sts)
  692. iprint("#l%d: sts %#ux\n", ether->ctlrno, sts);
  693. }
  694. static int
  695. pktstoread(void* v)
  696. {
  697. Ctlr *ctlr = v;
  698. return ctlr->pktstoread || !(ctlr->rdba[ctlr->rdh].ctl & Descempty);
  699. }
  700. static void
  701. rproc(void* arg)
  702. {
  703. uint rdh, sz;
  704. Arge *arge;
  705. Block *bp;
  706. Ctlr *ctlr;
  707. Desc *rd;
  708. Ether *edev;
  709. edev = arg;
  710. ctlr = edev->ctlr;
  711. arge = ctlr->regs;
  712. for(;;){
  713. /* wait for next interrupt */
  714. ilock(ctlr);
  715. arge->dmaintr |= Dmarxpktrcvd;
  716. iunlock(ctlr);
  717. sleep(&ctlr->rrendez, pktstoread, ctlr);
  718. ctlr->pktstoread = 0;
  719. rxreclaim(ctlr);
  720. rdh = ctlr->rdh;
  721. for (rd = &ctlr->rdba[rdh]; !(rd->ctl & Descempty);
  722. rd = &ctlr->rdba[rdh]){
  723. bp = ctlr->rd[rdh];
  724. assert(bp != nil);
  725. ctlr->rd[rdh] = nil;
  726. /* omit final 4 bytes (crc), pass pkt upstream */
  727. sz = DMASIZE(rd->ctl) - 4;
  728. assert(sz > 0 && sz <= Rbsz);
  729. bp->wp = bp->rp + sz;
  730. bp = etheriq(edev, bp, 1);
  731. assert(bp == nil); /* Block was consumed */
  732. arge->rxsts = Rxpktrcvd;
  733. ctlr->nrdfree--;
  734. rdh = NEXT(rdh, Nrd);
  735. if(ctlr->nrdfree < Nrd/2) {
  736. /* rxreclaim reads ctlr->rdh */
  737. ctlr->rdh = rdh;
  738. rxreclaim(edev->ctlr);
  739. }
  740. }
  741. ctlr->rdh = rdh;
  742. }
  743. }
  744. static int
  745. pktstosend(void* v)
  746. {
  747. Ether *edev = v;
  748. Ctlr *ctlr = edev->ctlr;
  749. return ctlr->pktstosend || ctlr->ntq > 0 || qlen(edev->oq) > 0;
  750. }
  751. static void
  752. tproc(void* arg)
  753. {
  754. uint tdt, added;
  755. Arge *arge;
  756. Block *bp;
  757. Ctlr *ctlr;
  758. Desc *td;
  759. Ether *edev;
  760. edev = arg;
  761. ctlr = edev->ctlr;
  762. arge = ctlr->regs;
  763. for(;;){
  764. /* wait for next free buffer and output queue block */
  765. sleep(&ctlr->trendez, pktstosend, edev);
  766. ctlr->pktstosend = 0;
  767. txreclaim(ctlr);
  768. /* copy as much of my output q as possible into output ring */
  769. added = 0;
  770. tdt = ctlr->tdt;
  771. while(ctlr->ntq < Ntd - 1){
  772. td = &ctlr->tdba[tdt];
  773. if (!(td->ctl & Descempty))
  774. break;
  775. bp = qget(edev->oq);
  776. if(bp == nil)
  777. break;
  778. /* make sure the whole packet is in ram */
  779. dcflush(bp->rp, BLEN(bp));
  780. /*
  781. * Give ownership of the descriptor to the chip,
  782. * increment the software ring descriptor pointer.
  783. */
  784. ctlr->td[tdt] = bp;
  785. td->addr = PADDR(bp->rp);
  786. td->ctl = DMASIZE(BLEN(bp));
  787. coherence();
  788. added++;
  789. ctlr->ntq++;
  790. tdt = NEXT(tdt, Ntd);
  791. }
  792. ctlr->tdt = tdt;
  793. /*
  794. * Underrun turns off TX. Clear underrun indication.
  795. * If there's anything left in the ring, reactivate the tx.
  796. */
  797. if (arge->dmaintrsts & Dmatxunderrun)
  798. arge->txsts = Txunderrun;
  799. if(1 || added)
  800. arge->txctl = Dmatxctlen; /* kick xmiter */
  801. ilock(ctlr);
  802. if(ctlr->ntq >= Ntd/2) /* tx ring half-full? */
  803. arge->dmaintr |= Dmatxpktsent;
  804. else if (ctlr->ntq > 0)
  805. arge->dmaintr |= Dmatxunderrun;
  806. iunlock(ctlr);
  807. txreclaim(ctlr);
  808. }
  809. }
  810. /*
  811. * turn promiscuous mode on/off
  812. */
  813. static void
  814. promiscuous(void *ve, int on)
  815. {
  816. USED(ve, on);
  817. }
  818. static void
  819. multicast(void *ve, uchar*, int on)
  820. {
  821. USED(ve, on);
  822. }
  823. static void
  824. linkdescs(Desc *base, int ndesc)
  825. {
  826. int i;
  827. for(i = 0; i < ndesc - 1; i++)
  828. base[i].next = (Desc *)PADDR(&base[i+1]);
  829. base[ndesc - 1].next = (Desc *)PADDR(&base[0]);
  830. }
  831. /*
  832. * Initialise the receive and transmit buffer rings.
  833. *
  834. * This routine is protected by ctlr->init.
  835. */
  836. static void
  837. ringinit(Ctlr* ctlr)
  838. {
  839. int i;
  840. void *v;
  841. if(ctlr->rdba == 0){
  842. v = xspanalloc(Nrd * sizeof(Desc), CACHELINESZ, 0);
  843. assert(v);
  844. ctlr->rdba = (Desc *)KSEG1ADDR(v);
  845. ctlr->rd = xspanalloc(Nrd * sizeof(Block *), 0, 0);
  846. assert(ctlr->rd != nil);
  847. linkdescs(ctlr->rdba, Nrd);
  848. for(i = 0; i < Nrd; i++)
  849. rxnewbuf(ctlr, i);
  850. }
  851. ctlr->rdt = ctlr->rdh = 0;
  852. if(ctlr->tdba == 0) {
  853. v = xspanalloc(Ntd * sizeof(Desc), CACHELINESZ, 0);
  854. assert(v);
  855. ctlr->tdba = (Desc *)KSEG1ADDR(v);
  856. ctlr->td = xspanalloc(Ntd * sizeof(Block *), 0, 0);
  857. assert(ctlr->td != nil);
  858. }
  859. memset(ctlr->td, 0, Ntd * sizeof(Block *));
  860. linkdescs(ctlr->tdba, Ntd);
  861. for(i = 0; i < Ntd; i++)
  862. ctlr->tdba[i].ctl = Descempty;
  863. ctlr->tdh = ctlr->tdt = 0;
  864. }
  865. static void
  866. cfgmediaduplex(Ether *ether)
  867. {
  868. Arge *arge, *arge0;
  869. Ctlr *ctlr;
  870. ctlr = ether->ctlr;
  871. arge = ctlr->regs;
  872. arge->cfg2 = (arge->cfg2 & ~Cfg2ifmode10_100) | Cfg2ifmode1000 | Cfg2fdx;
  873. arge->ifctl &= ~Ifctlspeed;
  874. arge->fiforxfiltmask |= Frmbytemode;
  875. arge->fifotxthresh = 0x008001ff; /* undocumented magic */
  876. if (ether->ctlrno > 0) {
  877. /* set PLL registers: copy from arge0 */
  878. arge0 = (Arge *)(KSEG1 | etherifs[0].regs);
  879. USED(arge0);
  880. }
  881. }
  882. static void
  883. athmii(Ether *ether, int phymask)
  884. {
  885. USED(ether, phymask);
  886. }
  887. static void
  888. athcfg(Ether *ether, int phymask)
  889. {
  890. uchar *eaddr;
  891. Arge *arge;
  892. Ctlr *ctlr;
  893. ctlr = ether->ctlr;
  894. arge = ctlr->regs;
  895. if(ether->ctlrno > 0){
  896. if(0){
  897. /* doing this seems to disable both ethers */
  898. arge->cfg1 |= Cfg1softrst; /* stop */
  899. delay(20);
  900. *Reset |= Rstge1mac;
  901. delay(100);
  902. }
  903. *Reset &= ~Rstge1mac;
  904. delay(200);
  905. }
  906. /* configure */
  907. arge->cfg1 = Cfg1syncrx | Cfg1rxen | Cfg1synctx | Cfg1txen;
  908. arge->cfg2 |= Cfg2enpadcrc | Cfg2lenfield | Cfg2encrc;
  909. arge->maxframelen = Rbsz;
  910. if(ether->ctlrno > 0){
  911. arge->miicfg = Miicfgrst;
  912. delay(100);
  913. arge->miicfg = Miicfgclkdiv28;
  914. delay(100);
  915. }
  916. /*
  917. * Set all Ethernet address registers to the same initial values
  918. * set all four addresses to 66-88-aa-cc-dd-ee
  919. */
  920. eaddr = ether->ea;
  921. arge->staaddr1 = eaddr[2]<<24 | eaddr[3]<<16 | eaddr[4]<<8 | eaddr[5];
  922. arge->staaddr2 = eaddr[0]<< 8 | eaddr[1];
  923. arge->fifocfg[0] = Fifocfg0all << Fifocfg0enshift; /* undocumented magic */
  924. arge->fifocfg[1] = 0x0fff0000; /* undocumented magic */
  925. arge->fifocfg[2] = 0x00001fff; /* undocumented magic */
  926. arge->fiforxfiltmatch = Ffmatchdflt;
  927. arge->fiforxfiltmask = Ffmaskdflt;
  928. /* phy goo */
  929. athmii(ether, phymask);
  930. if (ether->ctlrno > 0)
  931. cfgmediaduplex(ether);
  932. }
  933. static int
  934. athattach(Ether *ether)
  935. {
  936. int i;
  937. char name[32];
  938. Arge *arge;
  939. Block *bp;
  940. Ctlr *ctlr;
  941. ctlr = ether->ctlr;
  942. if (ctlr->attached)
  943. return -1;
  944. ilock(ctlr);
  945. ctlr->init = 1;
  946. for(i = 0; i < Nrb; i++){
  947. if((bp = allocb(Rbsz + Bufalign)) == nil)
  948. error(Enomem);
  949. bp->free = athrbfree;
  950. freeb(bp);
  951. }
  952. ringinit(ctlr);
  953. ctlr->init = 0;
  954. iunlock(ctlr);
  955. athcfg(ether, ctlr->phymask);
  956. /* start */
  957. arge = ctlr->regs;
  958. arge->txdesc = PADDR(ctlr->tdba);
  959. arge->rxdesc = PADDR(ctlr->rdba);
  960. coherence();
  961. arge->rxctl = Dmarxctlen;
  962. snprint(name, KNAMELEN, "#l%drproc", ether->ctlrno);
  963. kproc(name, rproc, ether);
  964. snprint(name, KNAMELEN, "#l%dtproc", ether->ctlrno);
  965. kproc(name, tproc, ether);
  966. ilock(ctlr);
  967. arge->dmaintr |= Dmaall;
  968. iunlock(ctlr);
  969. ctlr->attached = 1;
  970. return 0;
  971. }
  972. /*
  973. * strategy: RouterBOOT has initialised arge0, try to leave it alone.
  974. * copy arge0 registers to arge1, with a few exceptions.
  975. */
  976. static int
  977. athreset(Ether *ether)
  978. {
  979. Arge *arge;
  980. Ctlr *ctlr;
  981. Etherif *ep;
  982. if (ether->ctlrno < 0 || ether->ctlrno >= MaxEther)
  983. return -1;
  984. if (ether->ctlr == nil) {
  985. /*
  986. * Allocate a controller structure and start to initialise it.
  987. */
  988. ether->ctlr = ctlr = malloc(sizeof(Ctlr));
  989. if (ctlr == nil)
  990. return -1;
  991. ctlr->edev = ether;
  992. ep = etherifs + ether->ctlrno;
  993. ctlr->regs = arge = (Arge *)(KSEG1 | ep->regs);
  994. ctlr->phymask = ep->phymask;
  995. ether->port = (uint)arge;
  996. ether->irq = ep->irq;
  997. memmove(ether->ea, ep->mac, Eaddrlen);
  998. ether->ifstat = ifstat;
  999. ether->promiscuous = promiscuous;
  1000. ether->multicast = multicast;
  1001. ether->arg = ether;
  1002. }
  1003. athhwreset(ether);
  1004. return 0;
  1005. }
  1006. static Ether*
  1007. etherprobe(int ctlrno)
  1008. {
  1009. int i, lg;
  1010. ulong mb, bsz;
  1011. Ether *ether;
  1012. char buf[128], name[32];
  1013. ether = malloc(sizeof(Ether));
  1014. if(ether == nil)
  1015. error(Enomem);
  1016. memset(ether, 0, sizeof(Ether));
  1017. ether->ctlrno = ctlrno;
  1018. ether->tbdf = BUSUNKNOWN;
  1019. ether->mbps = 1000;
  1020. ether->minmtu = ETHERMINTU;
  1021. ether->maxmtu = ETHERMAXTU;
  1022. ether->mtu = ETHERMAXTU;
  1023. if(ctlrno >= MaxEther || athreset(ether) < 0){
  1024. free(ether);
  1025. return nil;
  1026. }
  1027. snprint(name, sizeof(name), "ether%d", ctlrno);
  1028. /*
  1029. * If ether->irq is <0, it is a hack to indicate no interrupt
  1030. * used by ethersink.
  1031. * apparently has to be done here and cannot be deferred until attach.
  1032. */
  1033. if(ether->irq >= 0)
  1034. intrenable(ether->irq, etherintr, ether);
  1035. i = sprint(buf, "#l%d: atheros71xx: ", ctlrno);
  1036. if(ether->mbps >= 1000)
  1037. i += sprint(buf+i, "%dGbps", ether->mbps/1000);
  1038. else
  1039. i += sprint(buf+i, "%dMbps", ether->mbps);
  1040. i += sprint(buf+i, " port %#luX irq %d", PADDR(ether->port), ether->irq);
  1041. i += sprint(buf+i, ": %2.2ux%2.2ux%2.2ux%2.2ux%2.2ux%2.2ux",
  1042. ether->ea[0], ether->ea[1], ether->ea[2],
  1043. ether->ea[3], ether->ea[4], ether->ea[5]);
  1044. sprint(buf+i, "\n");
  1045. print(buf);
  1046. /*
  1047. * input queues are allocated by ../port/netif.c:/^openfile.
  1048. * the size will be the last argument to netifinit() below.
  1049. *
  1050. * output queues should be small, to minimise `bufferbloat',
  1051. * which confuses tcp's feedback loop. at 1Gb/s, it only takes
  1052. * ~15µs to transmit a full-sized non-jumbo packet.
  1053. */
  1054. /* compute log10(ether->mbps) into lg */
  1055. for(lg = 0, mb = ether->mbps; mb >= 10; lg++)
  1056. mb /= 10;
  1057. if (lg > 13) /* sanity cap; 2**(13+16) = 2²⁹ */
  1058. lg = 13;
  1059. /* allocate larger input queues for higher-speed interfaces */
  1060. bsz = 1UL << (lg + 16); /* 2ⁱ⁶ = 64K, bsz = 2ⁿ × 64K */
  1061. while (bsz > mainmem->maxsize / 8 && bsz > 128*1024) /* sanity */
  1062. bsz /= 2;
  1063. netifinit(ether, name, Ntypes, bsz);
  1064. if(ether->oq == nil)
  1065. ether->oq = qopen(1 << (lg + 13), Qmsg, 0, 0);
  1066. if(ether->oq == nil)
  1067. panic("etherreset %s: can't allocate output queue", name);
  1068. ether->alen = Eaddrlen;
  1069. memmove(ether->addr, ether->ea, Eaddrlen);
  1070. memset(ether->bcast, 0xFF, Eaddrlen);
  1071. return ether;
  1072. }
  1073. static void
  1074. etherreset(void)
  1075. {
  1076. int ctlrno;
  1077. for(ctlrno = 0; ctlrno < MaxEther; ctlrno++)
  1078. etherxx[ctlrno] = etherprobe(ctlrno);
  1079. }
  1080. static void
  1081. ethershutdown(void)
  1082. {
  1083. Ether *ether;
  1084. int i;
  1085. for(i = 0; i < MaxEther; i++){
  1086. ether = etherxx[i];
  1087. if(ether)
  1088. athhwreset(ether);
  1089. }
  1090. }
  1091. static Chan *
  1092. etherattach(char* spec)
  1093. {
  1094. ulong ctlrno;
  1095. char *p;
  1096. Chan *chan;
  1097. ctlrno = 0;
  1098. if(spec && *spec){
  1099. ctlrno = strtoul(spec, &p, 0);
  1100. if((ctlrno == 0 && p == spec) || *p || (ctlrno >= MaxEther))
  1101. error(Ebadarg);
  1102. }
  1103. if(etherxx[ctlrno] == 0)
  1104. error(Enodev);
  1105. chan = devattach('l', spec);
  1106. if(waserror()){
  1107. chanfree(chan);
  1108. nexterror();
  1109. }
  1110. chan->dev = ctlrno;
  1111. athattach(etherxx[ctlrno]);
  1112. poperror();
  1113. return chan;
  1114. }
  1115. static Walkqid*
  1116. etherwalk(Chan *c, Chan *nc, char **name, int nname)
  1117. {
  1118. return netifwalk(etherxx[c->dev], c, nc, name, nname);
  1119. }
  1120. static Chan*
  1121. etheropen(Chan *c, int omode)
  1122. {
  1123. return netifopen(etherxx[c->dev], c, omode);
  1124. }
  1125. static void
  1126. ethercreate(Chan*, char*, int, ulong)
  1127. {
  1128. }
  1129. static void
  1130. etherclose(Chan *c)
  1131. {
  1132. netifclose(etherxx[c->dev], c);
  1133. }
  1134. static long
  1135. etherread(Chan *chan, void *buf, long n, vlong off)
  1136. {
  1137. Ether *ether;
  1138. ulong offset = off;
  1139. ether = etherxx[chan->dev];
  1140. if((chan->qid.type & QTDIR) == 0 && ether->ifstat){
  1141. /*
  1142. * With some controllers it is necessary to reach
  1143. * into the chip to extract statistics.
  1144. */
  1145. if(NETTYPE(chan->qid.path) == Nifstatqid)
  1146. return ether->ifstat(ether, buf, n, offset);
  1147. else if(NETTYPE(chan->qid.path) == Nstatqid)
  1148. ether->ifstat(ether, buf, 0, offset);
  1149. }
  1150. return netifread(ether, chan, buf, n, offset);
  1151. }
  1152. static Block*
  1153. etherbread(Chan *c, long n, ulong offset)
  1154. {
  1155. return netifbread(etherxx[c->dev], c, n, offset);
  1156. }
  1157. /* kick the transmitter to drain the output ring */
  1158. static void
  1159. athtransmit(Ether* ether)
  1160. {
  1161. Ctlr *ctlr;
  1162. ctlr = ether->ctlr;
  1163. ilock(ctlr);
  1164. ctlr->pktstosend = 1;
  1165. wakeup(&ctlr->trendez);
  1166. iunlock(ctlr);
  1167. }
  1168. static long (*athctl)(Ether *, char *, int) = nil;
  1169. static int
  1170. etheroq(Ether* ether, Block* bp)
  1171. {
  1172. int len, loopback, s;
  1173. Etherpkt *pkt;
  1174. ether->outpackets++;
  1175. /*
  1176. * Check if the packet has to be placed back onto the input queue,
  1177. * i.e. if it's a loopback or broadcast packet or the interface is
  1178. * in promiscuous mode.
  1179. * If it's a loopback packet indicate to etheriq that the data isn't
  1180. * needed and return, etheriq will pass-on or free the block.
  1181. * To enable bridging to work, only packets that were originated
  1182. * by this interface are fed back.
  1183. */
  1184. pkt = (Etherpkt*)bp->rp;
  1185. len = BLEN(bp);
  1186. loopback = memcmp(pkt->d, ether->ea, sizeof(pkt->d)) == 0;
  1187. if(loopback || memcmp(pkt->d, ether->bcast, sizeof(pkt->d)) == 0 || ether->prom){
  1188. s = splhi();
  1189. etheriq(ether, bp, 0);
  1190. splx(s);
  1191. }
  1192. if(!loopback){
  1193. if(qfull(ether->oq))
  1194. print("etheroq: WARNING: ether->oq full!\n");
  1195. qbwrite(ether->oq, bp);
  1196. athtransmit(ether);
  1197. } else
  1198. freeb(bp);
  1199. return len;
  1200. }
  1201. static long
  1202. etherwrite(Chan* chan, void* buf, long n, vlong)
  1203. {
  1204. Ether *ether;
  1205. Block *bp;
  1206. int nn, onoff;
  1207. Cmdbuf *cb;
  1208. ether = etherxx[chan->dev];
  1209. if(NETTYPE(chan->qid.path) != Ndataqid) {
  1210. nn = netifwrite(ether, chan, buf, n);
  1211. if(nn >= 0)
  1212. return nn;
  1213. cb = parsecmd(buf, n);
  1214. if(cb->f[0] && strcmp(cb->f[0], "nonblocking") == 0){
  1215. if(cb->nf <= 1)
  1216. onoff = 1;
  1217. else
  1218. onoff = atoi(cb->f[1]);
  1219. qnoblock(ether->oq, onoff);
  1220. free(cb);
  1221. return n;
  1222. }
  1223. free(cb);
  1224. if(athctl != nil)
  1225. return athctl(ether, buf, n);
  1226. error(Ebadctl);
  1227. }
  1228. assert(ether->ctlr != nil);
  1229. if(n > ether->mtu)
  1230. error(Etoobig);
  1231. if(n < ether->minmtu)
  1232. error(Etoosmall);
  1233. bp = allocb(n);
  1234. if(waserror()){
  1235. freeb(bp);
  1236. nexterror();
  1237. }
  1238. memmove(bp->rp, buf, n);
  1239. memmove(bp->rp+Eaddrlen, ether->ea, Eaddrlen);
  1240. poperror();
  1241. bp->wp += n;
  1242. return etheroq(ether, bp);
  1243. }
  1244. static long
  1245. etherbwrite(Chan *c, Block *bp, ulong offset)
  1246. {
  1247. return devbwrite(c, bp, offset);
  1248. }
  1249. static int
  1250. etherstat(Chan *c, uchar *dp, int n)
  1251. {
  1252. return netifstat(etherxx[c->dev], c, dp, n);
  1253. }
  1254. static int
  1255. etherwstat(Chan *c, uchar *dp, int n)
  1256. {
  1257. return netifwstat(etherxx[c->dev], c, dp, n);
  1258. }
  1259. Dev etherdevtab = {
  1260. 'l',
  1261. "ether",
  1262. etherreset,
  1263. devinit,
  1264. ethershutdown,
  1265. etherattach,
  1266. etherwalk,
  1267. etherstat,
  1268. etheropen,
  1269. ethercreate,
  1270. etherclose,
  1271. etherread,
  1272. etherbread,
  1273. etherwrite,
  1274. etherbwrite,
  1275. devremove,
  1276. etherwstat,
  1277. devpower,
  1278. devconfig,
  1279. };