1
0

0501-net-next-mediatek-add-the-drivers-core-files.patch 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715
  1. From 2abe91b53ca4d2528ef1fc9c44c6e69f8c805776 Mon Sep 17 00:00:00 2001
  2. From: John Crispin <blogic@openwrt.org>
  3. Date: Wed, 18 Nov 2015 03:12:19 +0100
  4. Subject: [PATCH 501/513] net-next: mediatek: add the drivers core files
  5. This patch adds the main chunk of the driver. The ethernet core is used in all
  6. of the Mediatek/Ralink Wireless SoCs. Over the years we have seen verious
  7. changes to
  8. * the register layout
  9. * the type of ports (single/dual gbit, internal FE/Gbit switch)
  10. * dma engine
  11. and new offloading features were added, such as
  12. * checksum
  13. * vlan tx/rx
  14. * gso
  15. * lro
  16. However the core functionality has remained the sama allowing us to use the
  17. same core for all SoCs.
  18. The abstraction for the various SoCs uses the typical ops struct pattern which
  19. allows us to extend or override the cores functionality depending on which SoC
  20. we are on. The code to bring up the switches and external ports has also been
  21. split into separate files.
  22. Signed-off-by: John Crispin <blogic@openwrt.org>
  23. Signed-off-by: Felix Fietkau <nbd@nbd.name>
  24. Signed-off-by: Michael Lee <igvtee@gmail.com>
  25. ---
  26. drivers/net/ethernet/mediatek/ethtool.c | 235 ++++
  27. drivers/net/ethernet/mediatek/ethtool.h | 22 +
  28. drivers/net/ethernet/mediatek/mdio.c | 258 +++++
  29. drivers/net/ethernet/mediatek/mdio.h | 27 +
  30. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1607 +++++++++++++++++++++++++++
  31. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 522 +++++++++
  32. 6 files changed, 2671 insertions(+)
  33. create mode 100644 drivers/net/ethernet/mediatek/ethtool.c
  34. create mode 100644 drivers/net/ethernet/mediatek/ethtool.h
  35. create mode 100644 drivers/net/ethernet/mediatek/mdio.c
  36. create mode 100644 drivers/net/ethernet/mediatek/mdio.h
  37. create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
  38. create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
  39. --- /dev/null
  40. +++ b/drivers/net/ethernet/mediatek/ethtool.c
  41. @@ -0,0 +1,235 @@
  42. +/* This program is free software; you can redistribute it and/or modify
  43. + * it under the terms of the GNU General Public License as published by
  44. + * the Free Software Foundation; version 2 of the License
  45. + *
  46. + * This program is distributed in the hope that it will be useful,
  47. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  48. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  49. + * GNU General Public License for more details.
  50. + *
  51. + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
  52. + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
  53. + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
  54. + */
  55. +
  56. +#include "mtk_eth_soc.h"
  57. +
  58. +static const char fe_gdma_str[][ETH_GSTRING_LEN] = {
  59. +#define _FE(x...) # x,
  60. +FE_STAT_REG_DECLARE
  61. +#undef _FE
  62. +};
  63. +
  64. +static int fe_get_settings(struct net_device *dev,
  65. + struct ethtool_cmd *cmd)
  66. +{
  67. + struct fe_priv *priv = netdev_priv(dev);
  68. + int err;
  69. +
  70. + if (!priv->phy_dev)
  71. + goto out_gset;
  72. +
  73. + if (priv->phy_flags == FE_PHY_FLAG_ATTACH) {
  74. + err = phy_read_status(priv->phy_dev);
  75. + if (err)
  76. + goto out_gset;
  77. + }
  78. +
  79. + return phy_ethtool_gset(priv->phy_dev, cmd);
  80. +
  81. +out_gset:
  82. + return -ENODEV;
  83. +}
  84. +
  85. +static int fe_set_settings(struct net_device *dev,
  86. + struct ethtool_cmd *cmd)
  87. +{
  88. + struct fe_priv *priv = netdev_priv(dev);
  89. +
  90. + if (!priv->phy_dev)
  91. + goto out_sset;
  92. +
  93. + if (cmd->phy_address != priv->phy_dev->addr) {
  94. + if (priv->phy->phy_node[cmd->phy_address]) {
  95. + priv->phy_dev = priv->phy->phy[cmd->phy_address];
  96. + priv->phy_flags = FE_PHY_FLAG_PORT;
  97. + } else if (priv->mii_bus &&
  98. + priv->mii_bus->phy_map[cmd->phy_address]) {
  99. + priv->phy_dev =
  100. + priv->mii_bus->phy_map[cmd->phy_address];
  101. + priv->phy_flags = FE_PHY_FLAG_ATTACH;
  102. + } else {
  103. + goto out_sset;
  104. + }
  105. + }
  106. +
  107. + return phy_ethtool_sset(priv->phy_dev, cmd);
  108. +
  109. +out_sset:
  110. + return -ENODEV;
  111. +}
  112. +
  113. +static void fe_get_drvinfo(struct net_device *dev,
  114. + struct ethtool_drvinfo *info)
  115. +{
  116. + struct fe_priv *priv = netdev_priv(dev);
  117. + struct fe_soc_data *soc = priv->soc;
  118. +
  119. + strlcpy(info->driver, priv->device->driver->name, sizeof(info->driver));
  120. + strlcpy(info->version, MTK_FE_DRV_VERSION, sizeof(info->version));
  121. + strlcpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
  122. +
  123. + if (soc->reg_table[FE_REG_FE_COUNTER_BASE])
  124. + info->n_stats = ARRAY_SIZE(fe_gdma_str);
  125. +}
  126. +
  127. +static u32 fe_get_msglevel(struct net_device *dev)
  128. +{
  129. + struct fe_priv *priv = netdev_priv(dev);
  130. +
  131. + return priv->msg_enable;
  132. +}
  133. +
  134. +static void fe_set_msglevel(struct net_device *dev, u32 value)
  135. +{
  136. + struct fe_priv *priv = netdev_priv(dev);
  137. +
  138. + priv->msg_enable = value;
  139. +}
  140. +
  141. +static int fe_nway_reset(struct net_device *dev)
  142. +{
  143. + struct fe_priv *priv = netdev_priv(dev);
  144. +
  145. + if (!priv->phy_dev)
  146. + goto out_nway_reset;
  147. +
  148. + return genphy_restart_aneg(priv->phy_dev);
  149. +
  150. +out_nway_reset:
  151. + return -EOPNOTSUPP;
  152. +}
  153. +
  154. +static u32 fe_get_link(struct net_device *dev)
  155. +{
  156. + struct fe_priv *priv = netdev_priv(dev);
  157. + int err;
  158. +
  159. + if (!priv->phy_dev)
  160. + goto out_get_link;
  161. +
  162. + if (priv->phy_flags == FE_PHY_FLAG_ATTACH) {
  163. + err = genphy_update_link(priv->phy_dev);
  164. + if (err)
  165. + goto out_get_link;
  166. + }
  167. +
  168. + return priv->phy_dev->link;
  169. +
  170. +out_get_link:
  171. + return ethtool_op_get_link(dev);
  172. +}
  173. +
  174. +static int fe_set_ringparam(struct net_device *dev,
  175. + struct ethtool_ringparam *ring)
  176. +{
  177. + struct fe_priv *priv = netdev_priv(dev);
  178. +
  179. + if ((ring->tx_pending < 2) ||
  180. + (ring->rx_pending < 2) ||
  181. + (ring->rx_pending > MAX_DMA_DESC) ||
  182. + (ring->tx_pending > MAX_DMA_DESC))
  183. + return -EINVAL;
  184. +
  185. + dev->netdev_ops->ndo_stop(dev);
  186. +
  187. + priv->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
  188. + priv->rx_ring.rx_ring_size = BIT(fls(ring->rx_pending) - 1);
  189. +
  190. + dev->netdev_ops->ndo_open(dev);
  191. +
  192. + return 0;
  193. +}
  194. +
  195. +static void fe_get_ringparam(struct net_device *dev,
  196. + struct ethtool_ringparam *ring)
  197. +{
  198. + struct fe_priv *priv = netdev_priv(dev);
  199. +
  200. + ring->rx_max_pending = MAX_DMA_DESC;
  201. + ring->tx_max_pending = MAX_DMA_DESC;
  202. + ring->rx_pending = priv->rx_ring.rx_ring_size;
  203. + ring->tx_pending = priv->tx_ring.tx_ring_size;
  204. +}
  205. +
  206. +static void fe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  207. +{
  208. + switch (stringset) {
  209. + case ETH_SS_STATS:
  210. + memcpy(data, *fe_gdma_str, sizeof(fe_gdma_str));
  211. + break;
  212. + }
  213. +}
  214. +
  215. +static int fe_get_sset_count(struct net_device *dev, int sset)
  216. +{
  217. + switch (sset) {
  218. + case ETH_SS_STATS:
  219. + return ARRAY_SIZE(fe_gdma_str);
  220. + default:
  221. + return -EOPNOTSUPP;
  222. + }
  223. +}
  224. +
  225. +static void fe_get_ethtool_stats(struct net_device *dev,
  226. + struct ethtool_stats *stats, u64 *data)
  227. +{
  228. + struct fe_priv *priv = netdev_priv(dev);
  229. + struct fe_hw_stats *hwstats = priv->hw_stats;
  230. + u64 *data_src, *data_dst;
  231. + unsigned int start;
  232. + int i;
  233. +
  234. + if (netif_running(dev) && netif_device_present(dev)) {
  235. + if (spin_trylock(&hwstats->stats_lock)) {
  236. + fe_stats_update(priv);
  237. + spin_unlock(&hwstats->stats_lock);
  238. + }
  239. + }
  240. +
  241. + do {
  242. + data_src = &hwstats->tx_bytes;
  243. + data_dst = data;
  244. + start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  245. +
  246. + for (i = 0; i < ARRAY_SIZE(fe_gdma_str); i++)
  247. + *data_dst++ = *data_src++;
  248. +
  249. + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  250. +}
  251. +
  252. +static struct ethtool_ops fe_ethtool_ops = {
  253. + .get_settings = fe_get_settings,
  254. + .set_settings = fe_set_settings,
  255. + .get_drvinfo = fe_get_drvinfo,
  256. + .get_msglevel = fe_get_msglevel,
  257. + .set_msglevel = fe_set_msglevel,
  258. + .nway_reset = fe_nway_reset,
  259. + .get_link = fe_get_link,
  260. + .set_ringparam = fe_set_ringparam,
  261. + .get_ringparam = fe_get_ringparam,
  262. +};
  263. +
  264. +void fe_set_ethtool_ops(struct net_device *netdev)
  265. +{
  266. + struct fe_priv *priv = netdev_priv(netdev);
  267. + struct fe_soc_data *soc = priv->soc;
  268. +
  269. + if (soc->reg_table[FE_REG_FE_COUNTER_BASE]) {
  270. + fe_ethtool_ops.get_strings = fe_get_strings;
  271. + fe_ethtool_ops.get_sset_count = fe_get_sset_count;
  272. + fe_ethtool_ops.get_ethtool_stats = fe_get_ethtool_stats;
  273. + }
  274. +
  275. + netdev->ethtool_ops = &fe_ethtool_ops;
  276. +}
  277. --- /dev/null
  278. +++ b/drivers/net/ethernet/mediatek/ethtool.h
  279. @@ -0,0 +1,22 @@
  280. +/* This program is free software; you can redistribute it and/or modify
  281. + * it under the terms of the GNU General Public License as published by
  282. + * the Free Software Foundation; version 2 of the License
  283. + *
  284. + * This program is distributed in the hope that it will be useful,
  285. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  286. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  287. + * GNU General Public License for more details.
  288. + *
  289. + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
  290. + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
  291. + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
  292. + */
  293. +
  294. +#ifndef FE_ETHTOOL_H
  295. +#define FE_ETHTOOL_H
  296. +
  297. +#include <linux/ethtool.h>
  298. +
  299. +void fe_set_ethtool_ops(struct net_device *netdev);
  300. +
  301. +#endif /* FE_ETHTOOL_H */
  302. --- /dev/null
  303. +++ b/drivers/net/ethernet/mediatek/mdio.c
  304. @@ -0,0 +1,258 @@
  305. +/* This program is free software; you can redistribute it and/or modify
  306. + * it under the terms of the GNU General Public License as published by
  307. + * the Free Software Foundation; version 2 of the License
  308. + *
  309. + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
  310. + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
  311. + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
  312. + */
  313. +
  314. +#include <linux/module.h>
  315. +#include <linux/kernel.h>
  316. +#include <linux/phy.h>
  317. +#include <linux/of_net.h>
  318. +#include <linux/of_mdio.h>
  319. +
  320. +#include "mtk_eth_soc.h"
  321. +#include "mdio.h"
  322. +
  323. +static int fe_mdio_reset(struct mii_bus *bus)
  324. +{
  325. + /* TODO */
  326. + return 0;
  327. +}
  328. +
  329. +static void fe_phy_link_adjust(struct net_device *dev)
  330. +{
  331. + struct fe_priv *priv = netdev_priv(dev);
  332. + unsigned long flags;
  333. + int i;
  334. +
  335. + spin_lock_irqsave(&priv->phy->lock, flags);
  336. + for (i = 0; i < 8; i++) {
  337. + if (priv->phy->phy_node[i]) {
  338. + struct phy_device *phydev = priv->phy->phy[i];
  339. + int status_change = 0;
  340. +
  341. + if (phydev->link)
  342. + if (priv->phy->duplex[i] != phydev->duplex ||
  343. + priv->phy->speed[i] != phydev->speed)
  344. + status_change = 1;
  345. +
  346. + if (phydev->link != priv->link[i])
  347. + status_change = 1;
  348. +
  349. + switch (phydev->speed) {
  350. + case SPEED_1000:
  351. + case SPEED_100:
  352. + case SPEED_10:
  353. + priv->link[i] = phydev->link;
  354. + priv->phy->duplex[i] = phydev->duplex;
  355. + priv->phy->speed[i] = phydev->speed;
  356. +
  357. + if (status_change &&
  358. + priv->soc->mdio_adjust_link)
  359. + priv->soc->mdio_adjust_link(priv, i);
  360. + break;
  361. + }
  362. + }
  363. + }
  364. +}
  365. +
  366. +int fe_connect_phy_node(struct fe_priv *priv, struct device_node *phy_node)
  367. +{
  368. + const __be32 *_port = NULL;
  369. + struct phy_device *phydev;
  370. + int phy_mode, port;
  371. +
  372. + _port = of_get_property(phy_node, "reg", NULL);
  373. +
  374. + if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
  375. + pr_err("%s: invalid port id\n", phy_node->name);
  376. + return -EINVAL;
  377. + }
  378. + port = be32_to_cpu(*_port);
  379. + phy_mode = of_get_phy_mode(phy_node);
  380. + if (phy_mode < 0) {
  381. + dev_err(priv->device, "incorrect phy-mode %d\n", phy_mode);
  382. + priv->phy->phy_node[port] = NULL;
  383. + return -EINVAL;
  384. + }
  385. +
  386. + phydev = of_phy_connect(priv->netdev, phy_node, fe_phy_link_adjust,
  387. + 0, phy_mode);
  388. + if (IS_ERR(phydev)) {
  389. + dev_err(priv->device, "could not connect to PHY\n");
  390. + priv->phy->phy_node[port] = NULL;
  391. + return PTR_ERR(phydev);
  392. + }
  393. +
  394. + phydev->supported &= PHY_GBIT_FEATURES;
  395. + phydev->advertising = phydev->supported;
  396. +
  397. + dev_info(priv->device,
  398. + "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
  399. + port, dev_name(&phydev->dev), phydev->phy_id,
  400. + phydev->drv->name);
  401. +
  402. + priv->phy->phy[port] = phydev;
  403. + priv->link[port] = 0;
  404. +
  405. + return 0;
  406. +}
  407. +
  408. +static void phy_init(struct fe_priv *priv, struct phy_device *phy)
  409. +{
  410. + phy_attach(priv->netdev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
  411. +
  412. + phy->autoneg = AUTONEG_ENABLE;
  413. + phy->speed = 0;
  414. + phy->duplex = 0;
  415. + phy->supported &= PHY_BASIC_FEATURES;
  416. + phy->advertising = phy->supported | ADVERTISED_Autoneg;
  417. +
  418. + phy_start_aneg(phy);
  419. +}
  420. +
  421. +static int fe_phy_connect(struct fe_priv *priv)
  422. +{
  423. + int i;
  424. +
  425. + for (i = 0; i < 8; i++) {
  426. + if (priv->phy->phy_node[i]) {
  427. + if (!priv->phy_dev) {
  428. + priv->phy_dev = priv->phy->phy[i];
  429. + priv->phy_flags = FE_PHY_FLAG_PORT;
  430. + }
  431. + } else if (priv->mii_bus && priv->mii_bus->phy_map[i]) {
  432. + phy_init(priv, priv->mii_bus->phy_map[i]);
  433. + if (!priv->phy_dev) {
  434. + priv->phy_dev = priv->mii_bus->phy_map[i];
  435. + priv->phy_flags = FE_PHY_FLAG_ATTACH;
  436. + }
  437. + }
  438. + }
  439. +
  440. + return 0;
  441. +}
  442. +
  443. +static void fe_phy_disconnect(struct fe_priv *priv)
  444. +{
  445. + unsigned long flags;
  446. + int i;
  447. +
  448. + for (i = 0; i < 8; i++)
  449. + if (priv->phy->phy_fixed[i]) {
  450. + spin_lock_irqsave(&priv->phy->lock, flags);
  451. + priv->link[i] = 0;
  452. + if (priv->soc->mdio_adjust_link)
  453. + priv->soc->mdio_adjust_link(priv, i);
  454. + spin_unlock_irqrestore(&priv->phy->lock, flags);
  455. + } else if (priv->phy->phy[i]) {
  456. + phy_disconnect(priv->phy->phy[i]);
  457. + } else if (priv->mii_bus && priv->mii_bus->phy_map[i]) {
  458. + phy_detach(priv->mii_bus->phy_map[i]);
  459. + }
  460. +}
  461. +
  462. +static void fe_phy_start(struct fe_priv *priv)
  463. +{
  464. + unsigned long flags;
  465. + int i;
  466. +
  467. + for (i = 0; i < 8; i++) {
  468. + if (priv->phy->phy_fixed[i]) {
  469. + spin_lock_irqsave(&priv->phy->lock, flags);
  470. + priv->link[i] = 1;
  471. + if (priv->soc->mdio_adjust_link)
  472. + priv->soc->mdio_adjust_link(priv, i);
  473. + spin_unlock_irqrestore(&priv->phy->lock, flags);
  474. + } else if (priv->phy->phy[i]) {
  475. + phy_start(priv->phy->phy[i]);
  476. + }
  477. + }
  478. +}
  479. +
  480. +static void fe_phy_stop(struct fe_priv *priv)
  481. +{
  482. + unsigned long flags;
  483. + int i;
  484. +
  485. + for (i = 0; i < 8; i++)
  486. + if (priv->phy->phy_fixed[i]) {
  487. + spin_lock_irqsave(&priv->phy->lock, flags);
  488. + priv->link[i] = 0;
  489. + if (priv->soc->mdio_adjust_link)
  490. + priv->soc->mdio_adjust_link(priv, i);
  491. + spin_unlock_irqrestore(&priv->phy->lock, flags);
  492. + } else if (priv->phy->phy[i]) {
  493. + phy_stop(priv->phy->phy[i]);
  494. + }
  495. +}
  496. +
  497. +static struct fe_phy phy_ralink = {
  498. + .connect = fe_phy_connect,
  499. + .disconnect = fe_phy_disconnect,
  500. + .start = fe_phy_start,
  501. + .stop = fe_phy_stop,
  502. +};
  503. +
  504. +int fe_mdio_init(struct fe_priv *priv)
  505. +{
  506. + struct device_node *mii_np;
  507. + int err;
  508. +
  509. + if (!priv->soc->mdio_read || !priv->soc->mdio_write)
  510. + return 0;
  511. +
  512. + spin_lock_init(&phy_ralink.lock);
  513. + priv->phy = &phy_ralink;
  514. +
  515. + mii_np = of_get_child_by_name(priv->device->of_node, "mdio-bus");
  516. + if (!mii_np) {
  517. + dev_err(priv->device, "no %s child node found", "mdio-bus");
  518. + return -ENODEV;
  519. + }
  520. +
  521. + if (!of_device_is_available(mii_np)) {
  522. + err = 0;
  523. + goto err_put_node;
  524. + }
  525. +
  526. + priv->mii_bus = mdiobus_alloc();
  527. + if (!priv->mii_bus) {
  528. + err = -ENOMEM;
  529. + goto err_put_node;
  530. + }
  531. +
  532. + priv->mii_bus->name = "mdio";
  533. + priv->mii_bus->read = priv->soc->mdio_read;
  534. + priv->mii_bus->write = priv->soc->mdio_write;
  535. + priv->mii_bus->reset = fe_mdio_reset;
  536. + priv->mii_bus->priv = priv;
  537. + priv->mii_bus->parent = priv->device;
  538. +
  539. + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
  540. + err = of_mdiobus_register(priv->mii_bus, mii_np);
  541. + if (err)
  542. + goto err_free_bus;
  543. +
  544. + return 0;
  545. +
  546. +err_free_bus:
  547. + kfree(priv->mii_bus);
  548. +err_put_node:
  549. + of_node_put(mii_np);
  550. + priv->mii_bus = NULL;
  551. + return err;
  552. +}
  553. +
  554. +void fe_mdio_cleanup(struct fe_priv *priv)
  555. +{
  556. + if (!priv->mii_bus)
  557. + return;
  558. +
  559. + mdiobus_unregister(priv->mii_bus);
  560. + of_node_put(priv->mii_bus->dev.of_node);
  561. + kfree(priv->mii_bus);
  562. +}
  563. --- /dev/null
  564. +++ b/drivers/net/ethernet/mediatek/mdio.h
  565. @@ -0,0 +1,27 @@
  566. +/* This program is free software; you can redistribute it and/or modify
  567. + * it under the terms of the GNU General Public License as published by
  568. + * the Free Software Foundation; version 2 of the License
  569. + *
  570. + * This program is distributed in the hope that it will be useful,
  571. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  572. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  573. + * GNU General Public License for more details.
  574. + *
  575. + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
  576. + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
  577. + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
  578. + */
  579. +
  580. +#ifndef _RALINK_MDIO_H__
  581. +#define _RALINK_MDIO_H__
  582. +
  583. +#ifdef CONFIG_NET_MEDIATEK_MDIO
  584. +int fe_mdio_init(struct fe_priv *priv);
  585. +void fe_mdio_cleanup(struct fe_priv *priv);
  586. +int fe_connect_phy_node(struct fe_priv *priv,
  587. + struct device_node *phy_node);
  588. +#else
  589. +static inline int fe_mdio_init(struct fe_priv *priv) { return 0; }
  590. +static inline void fe_mdio_cleanup(struct fe_priv *priv) {}
  591. +#endif
  592. +#endif
  593. --- /dev/null
  594. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  595. @@ -0,0 +1,1587 @@
  596. +/* This program is free software; you can redistribute it and/or modify
  597. + * it under the terms of the GNU General Public License as published by
  598. + * the Free Software Foundation; version 2 of the License
  599. + *
  600. + * This program is distributed in the hope that it will be useful,
  601. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  602. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  603. + * GNU General Public License for more details.
  604. + *
  605. + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
  606. + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
  607. + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
  608. + */
  609. +
  610. +#include <linux/module.h>
  611. +#include <linux/kernel.h>
  612. +#include <linux/types.h>
  613. +#include <linux/dma-mapping.h>
  614. +#include <linux/init.h>
  615. +#include <linux/skbuff.h>
  616. +#include <linux/etherdevice.h>
  617. +#include <linux/ethtool.h>
  618. +#include <linux/platform_device.h>
  619. +#include <linux/of_device.h>
  620. +#include <linux/clk.h>
  621. +#include <linux/of_net.h>
  622. +#include <linux/of_mdio.h>
  623. +#include <linux/if_vlan.h>
  624. +#include <linux/reset.h>
  625. +#include <linux/tcp.h>
  626. +#include <linux/io.h>
  627. +#include <linux/bug.h>
  628. +
  629. +#include <asm/mach-ralink/ralink_regs.h>
  630. +
  631. +#include "mtk_eth_soc.h"
  632. +#include "mdio.h"
  633. +#include "ethtool.h"
  634. +
  635. +#define MAX_RX_LENGTH 1536
  636. +#define FE_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
  637. +#define FE_RX_HLEN (NET_SKB_PAD + FE_RX_ETH_HLEN + NET_IP_ALIGN)
  638. +#define DMA_DUMMY_DESC 0xffffffff
  639. +#define FE_DEFAULT_MSG_ENABLE \
  640. + (NETIF_MSG_DRV | \
  641. + NETIF_MSG_PROBE | \
  642. + NETIF_MSG_LINK | \
  643. + NETIF_MSG_TIMER | \
  644. + NETIF_MSG_IFDOWN | \
  645. + NETIF_MSG_IFUP | \
  646. + NETIF_MSG_RX_ERR | \
  647. + NETIF_MSG_TX_ERR)
  648. +
  649. +#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
  650. +#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
  651. +#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
  652. +#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
  653. +
  654. +#define SYSC_REG_RSTCTRL 0x34
  655. +
  656. +static int fe_msg_level = -1;
  657. +module_param_named(msg_level, fe_msg_level, int, 0);
  658. +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  659. +
  660. +static const u16 fe_reg_table_default[FE_REG_COUNT] = {
  661. + [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG,
  662. + [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG,
  663. + [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG,
  664. + [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0,
  665. + [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0,
  666. + [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0,
  667. + [FE_REG_TX_DTX_IDX0] = FE_TX_DTX_IDX0,
  668. + [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0,
  669. + [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0,
  670. + [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0,
  671. + [FE_REG_RX_DRX_IDX0] = FE_RX_DRX_IDX0,
  672. + [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE,
  673. + [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS,
  674. + [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0,
  675. + [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT,
  676. + [FE_REG_FE_RST_GL] = FE_FE_RST_GL,
  677. +};
  678. +
  679. +static const u16 *fe_reg_table = fe_reg_table_default;
  680. +
  681. +struct fe_work_t {
  682. + int bitnr;
  683. + void (*action)(struct fe_priv *);
  684. +};
  685. +
  686. +static void __iomem *fe_base;
  687. +
  688. +void fe_w32(u32 val, unsigned reg)
  689. +{
  690. + __raw_writel(val, fe_base + reg);
  691. +}
  692. +
  693. +u32 fe_r32(unsigned reg)
  694. +{
  695. + return __raw_readl(fe_base + reg);
  696. +}
  697. +
  698. +void fe_reg_w32(u32 val, enum fe_reg reg)
  699. +{
  700. + fe_w32(val, fe_reg_table[reg]);
  701. +}
  702. +
  703. +u32 fe_reg_r32(enum fe_reg reg)
  704. +{
  705. + return fe_r32(fe_reg_table[reg]);
  706. +}
  707. +
  708. +void fe_reset(u32 reset_bits)
  709. +{
  710. + u32 t;
  711. +
  712. + t = rt_sysc_r32(SYSC_REG_RSTCTRL);
  713. + t |= reset_bits;
  714. + rt_sysc_w32(t, SYSC_REG_RSTCTRL);
  715. + usleep_range(10, 20);
  716. +
  717. + t &= ~reset_bits;
  718. + rt_sysc_w32(t, SYSC_REG_RSTCTRL);
  719. + usleep_range(10, 20);
  720. +}
  721. +
  722. +static inline void fe_int_disable(u32 mask)
  723. +{
  724. + fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask,
  725. + FE_REG_FE_INT_ENABLE);
  726. + /* flush write */
  727. + fe_reg_r32(FE_REG_FE_INT_ENABLE);
  728. +}
  729. +
  730. +static inline void fe_int_enable(u32 mask)
  731. +{
  732. + fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask,
  733. + FE_REG_FE_INT_ENABLE);
  734. + /* flush write */
  735. + fe_reg_r32(FE_REG_FE_INT_ENABLE);
  736. +}
  737. +
  738. +static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac)
  739. +{
  740. + unsigned long flags;
  741. +
  742. + spin_lock_irqsave(&priv->page_lock, flags);
  743. + fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH);
  744. + fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
  745. + FE_GDMA1_MAC_ADRL);
  746. + spin_unlock_irqrestore(&priv->page_lock, flags);
  747. +}
  748. +
  749. +static int fe_set_mac_address(struct net_device *dev, void *p)
  750. +{
  751. + int ret = eth_mac_addr(dev, p);
  752. +
  753. + if (!ret) {
  754. + struct fe_priv *priv = netdev_priv(dev);
  755. +
  756. + if (priv->soc->set_mac)
  757. + priv->soc->set_mac(priv, dev->dev_addr);
  758. + else
  759. + fe_hw_set_macaddr(priv, p);
  760. + }
  761. +
  762. + return ret;
  763. +}
  764. +
  765. +static inline int fe_max_frag_size(int mtu)
  766. +{
  767. + /* make sure buf_size will be at least MAX_RX_LENGTH */
  768. + if (mtu + FE_RX_ETH_HLEN < MAX_RX_LENGTH)
  769. + mtu = MAX_RX_LENGTH - FE_RX_ETH_HLEN;
  770. +
  771. + return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) +
  772. + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  773. +}
  774. +
  775. +static inline int fe_max_buf_size(int frag_size)
  776. +{
  777. + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
  778. + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  779. +
  780. + BUG_ON(buf_size < MAX_RX_LENGTH);
  781. + return buf_size;
  782. +}
  783. +
  784. +static inline void fe_get_rxd(struct fe_rx_dma *rxd, struct fe_rx_dma *dma_rxd)
  785. +{
  786. + rxd->rxd1 = dma_rxd->rxd1;
  787. + rxd->rxd2 = dma_rxd->rxd2;
  788. + rxd->rxd3 = dma_rxd->rxd3;
  789. + rxd->rxd4 = dma_rxd->rxd4;
  790. +}
  791. +
  792. +static inline void fe_set_txd(struct fe_tx_dma *txd, struct fe_tx_dma *dma_txd)
  793. +{
  794. + dma_txd->txd1 = txd->txd1;
  795. + dma_txd->txd3 = txd->txd3;
  796. + dma_txd->txd4 = txd->txd4;
  797. + /* clean dma done flag last */
  798. + dma_txd->txd2 = txd->txd2;
  799. +}
  800. +
  801. +static void fe_clean_rx(struct fe_priv *priv)
  802. +{
  803. + int i;
  804. + struct fe_rx_ring *ring = &priv->rx_ring;
  805. +
  806. + if (ring->rx_data) {
  807. + for (i = 0; i < ring->rx_ring_size; i++)
  808. + if (ring->rx_data[i]) {
  809. + if (ring->rx_dma && ring->rx_dma[i].rxd1)
  810. + dma_unmap_single(&priv->netdev->dev,
  811. + ring->rx_dma[i].rxd1,
  812. + ring->rx_buf_size,
  813. + DMA_FROM_DEVICE);
  814. + put_page(virt_to_head_page(ring->rx_data[i]));
  815. + }
  816. +
  817. + kfree(ring->rx_data);
  818. + ring->rx_data = NULL;
  819. + }
  820. +
  821. + if (ring->rx_dma) {
  822. + dma_free_coherent(&priv->netdev->dev,
  823. + ring->rx_ring_size * sizeof(*ring->rx_dma),
  824. + ring->rx_dma,
  825. + ring->rx_phys);
  826. + ring->rx_dma = NULL;
  827. + }
  828. +}
  829. +
  830. +static int fe_alloc_rx(struct fe_priv *priv)
  831. +{
  832. + struct net_device *netdev = priv->netdev;
  833. + struct fe_rx_ring *ring = &priv->rx_ring;
  834. + int i, pad;
  835. +
  836. + ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
  837. + GFP_KERNEL);
  838. + if (!ring->rx_data)
  839. + goto no_rx_mem;
  840. +
  841. + for (i = 0; i < ring->rx_ring_size; i++) {
  842. + ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
  843. + if (!ring->rx_data[i])
  844. + goto no_rx_mem;
  845. + }
  846. +
  847. + ring->rx_dma = dma_alloc_coherent(&netdev->dev,
  848. + ring->rx_ring_size * sizeof(*ring->rx_dma),
  849. + &ring->rx_phys,
  850. + GFP_ATOMIC | __GFP_ZERO);
  851. + if (!ring->rx_dma)
  852. + goto no_rx_mem;
  853. +
  854. + if (priv->flags & FE_FLAG_RX_2B_OFFSET)
  855. + pad = 0;
  856. + else
  857. + pad = NET_IP_ALIGN;
  858. + for (i = 0; i < ring->rx_ring_size; i++) {
  859. + dma_addr_t dma_addr = dma_map_single(&netdev->dev,
  860. + ring->rx_data[i] + NET_SKB_PAD + pad,
  861. + ring->rx_buf_size,
  862. + DMA_FROM_DEVICE);
  863. + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr)))
  864. + goto no_rx_mem;
  865. + ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
  866. +
  867. + if (priv->flags & FE_FLAG_RX_SG_DMA)
  868. + ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
  869. + else
  870. + ring->rx_dma[i].rxd2 = RX_DMA_LSO;
  871. + }
  872. + ring->rx_calc_idx = ring->rx_ring_size - 1;
  873. + /* make sure that all changes to the dma ring are flushed before we
  874. + * continue
  875. + */
  876. + wmb();
  877. +
  878. + fe_reg_w32(ring->rx_phys, FE_REG_RX_BASE_PTR0);
  879. + fe_reg_w32(ring->rx_ring_size, FE_REG_RX_MAX_CNT0);
  880. + fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0);
  881. + fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG);
  882. +
  883. + return 0;
  884. +
  885. +no_rx_mem:
  886. + return -ENOMEM;
  887. +}
  888. +
  889. +static void fe_txd_unmap(struct device *dev, struct fe_tx_buf *tx_buf)
  890. +{
  891. + if (tx_buf->flags & FE_TX_FLAGS_SINGLE0) {
  892. + dma_unmap_single(dev,
  893. + dma_unmap_addr(tx_buf, dma_addr0),
  894. + dma_unmap_len(tx_buf, dma_len0),
  895. + DMA_TO_DEVICE);
  896. + } else if (tx_buf->flags & FE_TX_FLAGS_PAGE0) {
  897. + dma_unmap_page(dev,
  898. + dma_unmap_addr(tx_buf, dma_addr0),
  899. + dma_unmap_len(tx_buf, dma_len0),
  900. + DMA_TO_DEVICE);
  901. + }
  902. + if (tx_buf->flags & FE_TX_FLAGS_PAGE1)
  903. + dma_unmap_page(dev,
  904. + dma_unmap_addr(tx_buf, dma_addr1),
  905. + dma_unmap_len(tx_buf, dma_len1),
  906. + DMA_TO_DEVICE);
  907. +
  908. + tx_buf->flags = 0;
  909. + if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
  910. + dev_kfree_skb_any(tx_buf->skb);
  911. + tx_buf->skb = NULL;
  912. +}
  913. +
  914. +static void fe_clean_tx(struct fe_priv *priv)
  915. +{
  916. + int i;
  917. + struct device *dev = &priv->netdev->dev;
  918. + struct fe_tx_ring *ring = &priv->tx_ring;
  919. +
  920. + if (ring->tx_buf) {
  921. + for (i = 0; i < ring->tx_ring_size; i++)
  922. + fe_txd_unmap(dev, &ring->tx_buf[i]);
  923. + kfree(ring->tx_buf);
  924. + ring->tx_buf = NULL;
  925. + }
  926. +
  927. + if (ring->tx_dma) {
  928. + dma_free_coherent(dev,
  929. + ring->tx_ring_size * sizeof(*ring->tx_dma),
  930. + ring->tx_dma,
  931. + ring->tx_phys);
  932. + ring->tx_dma = NULL;
  933. + }
  934. +
  935. + netdev_reset_queue(priv->netdev);
  936. +}
  937. +
  938. +static int fe_alloc_tx(struct fe_priv *priv)
  939. +{
  940. + int i;
  941. + struct fe_tx_ring *ring = &priv->tx_ring;
  942. +
  943. + ring->tx_free_idx = 0;
  944. + ring->tx_next_idx = 0;
  945. + ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
  946. + MAX_SKB_FRAGS);
  947. +
  948. + ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
  949. + GFP_KERNEL);
  950. + if (!ring->tx_buf)
  951. + goto no_tx_mem;
  952. +
  953. + ring->tx_dma = dma_alloc_coherent(&priv->netdev->dev,
  954. + ring->tx_ring_size * sizeof(*ring->tx_dma),
  955. + &ring->tx_phys,
  956. + GFP_ATOMIC | __GFP_ZERO);
  957. + if (!ring->tx_dma)
  958. + goto no_tx_mem;
  959. +
  960. + for (i = 0; i < ring->tx_ring_size; i++) {
  961. + if (priv->soc->tx_dma)
  962. + priv->soc->tx_dma(&ring->tx_dma[i]);
  963. + ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
  964. + }
  965. + /* make sure that all changes to the dma ring are flushed before we
  966. + * continue
  967. + */
  968. + wmb();
  969. +
  970. + fe_reg_w32(ring->tx_phys, FE_REG_TX_BASE_PTR0);
  971. + fe_reg_w32(ring->tx_ring_size, FE_REG_TX_MAX_CNT0);
  972. + fe_reg_w32(0, FE_REG_TX_CTX_IDX0);
  973. + fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG);
  974. +
  975. + return 0;
  976. +
  977. +no_tx_mem:
  978. + return -ENOMEM;
  979. +}
  980. +
  981. +static int fe_init_dma(struct fe_priv *priv)
  982. +{
  983. + int err;
  984. +
  985. + err = fe_alloc_tx(priv);
  986. + if (err)
  987. + return err;
  988. +
  989. + err = fe_alloc_rx(priv);
  990. + if (err)
  991. + return err;
  992. +
  993. + return 0;
  994. +}
  995. +
  996. +static void fe_free_dma(struct fe_priv *priv)
  997. +{
  998. + fe_clean_tx(priv);
  999. + fe_clean_rx(priv);
  1000. +}
  1001. +
  1002. +void fe_stats_update(struct fe_priv *priv)
  1003. +{
  1004. + struct fe_hw_stats *hwstats = priv->hw_stats;
  1005. + unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
  1006. + u64 stats;
  1007. +
  1008. + u64_stats_update_begin(&hwstats->syncp);
  1009. +
  1010. + if (IS_ENABLED(CONFIG_SOC_MT7621)) {
  1011. + hwstats->rx_bytes += fe_r32(base);
  1012. + stats = fe_r32(base + 0x04);
  1013. + if (stats)
  1014. + hwstats->rx_bytes += (stats << 32);
  1015. + hwstats->rx_packets += fe_r32(base + 0x08);
  1016. + hwstats->rx_overflow += fe_r32(base + 0x10);
  1017. + hwstats->rx_fcs_errors += fe_r32(base + 0x14);
  1018. + hwstats->rx_short_errors += fe_r32(base + 0x18);
  1019. + hwstats->rx_long_errors += fe_r32(base + 0x1c);
  1020. + hwstats->rx_checksum_errors += fe_r32(base + 0x20);
  1021. + hwstats->rx_flow_control_packets += fe_r32(base + 0x24);
  1022. + hwstats->tx_skip += fe_r32(base + 0x28);
  1023. + hwstats->tx_collisions += fe_r32(base + 0x2c);
  1024. + hwstats->tx_bytes += fe_r32(base + 0x30);
  1025. + stats = fe_r32(base + 0x34);
  1026. + if (stats)
  1027. + hwstats->tx_bytes += (stats << 32);
  1028. + hwstats->tx_packets += fe_r32(base + 0x38);
  1029. + } else {
  1030. + hwstats->tx_bytes += fe_r32(base);
  1031. + hwstats->tx_packets += fe_r32(base + 0x04);
  1032. + hwstats->tx_skip += fe_r32(base + 0x08);
  1033. + hwstats->tx_collisions += fe_r32(base + 0x0c);
  1034. + hwstats->rx_bytes += fe_r32(base + 0x20);
  1035. + hwstats->rx_packets += fe_r32(base + 0x24);
  1036. + hwstats->rx_overflow += fe_r32(base + 0x28);
  1037. + hwstats->rx_fcs_errors += fe_r32(base + 0x2c);
  1038. + hwstats->rx_short_errors += fe_r32(base + 0x30);
  1039. + hwstats->rx_long_errors += fe_r32(base + 0x34);
  1040. + hwstats->rx_checksum_errors += fe_r32(base + 0x38);
  1041. + hwstats->rx_flow_control_packets += fe_r32(base + 0x3c);
  1042. + }
  1043. +
  1044. + u64_stats_update_end(&hwstats->syncp);
  1045. +}
  1046. +
  1047. +static struct rtnl_link_stats64 *fe_get_stats64(struct net_device *dev,
  1048. + struct rtnl_link_stats64 *storage)
  1049. +{
  1050. + struct fe_priv *priv = netdev_priv(dev);
  1051. + struct fe_hw_stats *hwstats = priv->hw_stats;
  1052. + unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
  1053. + unsigned int start;
  1054. +
  1055. + if (!base) {
  1056. + netdev_stats_to_stats64(storage, &dev->stats);
  1057. + return storage;
  1058. + }
  1059. +
  1060. + if (netif_running(dev) && netif_device_present(dev)) {
  1061. + if (spin_trylock(&hwstats->stats_lock)) {
  1062. + fe_stats_update(priv);
  1063. + spin_unlock(&hwstats->stats_lock);
  1064. + }
  1065. + }
  1066. +
  1067. + do {
  1068. + start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  1069. + storage->rx_packets = hwstats->rx_packets;
  1070. + storage->tx_packets = hwstats->tx_packets;
  1071. + storage->rx_bytes = hwstats->rx_bytes;
  1072. + storage->tx_bytes = hwstats->tx_bytes;
  1073. + storage->collisions = hwstats->tx_collisions;
  1074. + storage->rx_length_errors = hwstats->rx_short_errors +
  1075. + hwstats->rx_long_errors;
  1076. + storage->rx_over_errors = hwstats->rx_overflow;
  1077. + storage->rx_crc_errors = hwstats->rx_fcs_errors;
  1078. + storage->rx_errors = hwstats->rx_checksum_errors;
  1079. + storage->tx_aborted_errors = hwstats->tx_skip;
  1080. + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  1081. +
  1082. + storage->tx_errors = priv->netdev->stats.tx_errors;
  1083. + storage->rx_dropped = priv->netdev->stats.rx_dropped;
  1084. + storage->tx_dropped = priv->netdev->stats.tx_dropped;
  1085. +
  1086. + return storage;
  1087. +}
  1088. +
  1089. +static int fe_vlan_rx_add_vid(struct net_device *dev,
  1090. + __be16 proto, u16 vid)
  1091. +{
  1092. + struct fe_priv *priv = netdev_priv(dev);
  1093. + u32 idx = (vid & 0xf);
  1094. + u32 vlan_cfg;
  1095. +
  1096. + if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) &&
  1097. + (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
  1098. + return 0;
  1099. +
  1100. + if (test_bit(idx, &priv->vlan_map)) {
  1101. + netdev_warn(dev, "disable tx vlan offload\n");
  1102. + dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  1103. + netdev_update_features(dev);
  1104. + } else {
  1105. + vlan_cfg = fe_r32(fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
  1106. + ((idx >> 1) << 2));
  1107. + if (idx & 0x1) {
  1108. + vlan_cfg &= 0xffff;
  1109. + vlan_cfg |= (vid << 16);
  1110. + } else {
  1111. + vlan_cfg &= 0xffff0000;
  1112. + vlan_cfg |= vid;
  1113. + }
  1114. + fe_w32(vlan_cfg, fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
  1115. + ((idx >> 1) << 2));
  1116. + set_bit(idx, &priv->vlan_map);
  1117. + }
  1118. +
  1119. + return 0;
  1120. +}
  1121. +
  1122. +static int fe_vlan_rx_kill_vid(struct net_device *dev,
  1123. + __be16 proto, u16 vid)
  1124. +{
  1125. + struct fe_priv *priv = netdev_priv(dev);
  1126. + u32 idx = (vid & 0xf);
  1127. +
  1128. + if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) &&
  1129. + (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
  1130. + return 0;
  1131. +
  1132. + clear_bit(idx, &priv->vlan_map);
  1133. +
  1134. + return 0;
  1135. +}
  1136. +
  1137. +static inline u32 fe_empty_txd(struct fe_tx_ring *ring)
  1138. +{
  1139. + barrier();
  1140. + return (u32)(ring->tx_ring_size -
  1141. + ((ring->tx_next_idx - ring->tx_free_idx) &
  1142. + (ring->tx_ring_size - 1)));
  1143. +}
  1144. +
  1145. +static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev,
  1146. + int tx_num, struct fe_tx_ring *ring)
  1147. +{
  1148. + struct fe_priv *priv = netdev_priv(dev);
  1149. + struct skb_frag_struct *frag;
  1150. + struct fe_tx_dma txd, *ptxd;
  1151. + struct fe_tx_buf *tx_buf;
  1152. + dma_addr_t mapped_addr;
  1153. + unsigned int nr_frags;
  1154. + u32 def_txd4;
  1155. + int i, j, k, frag_size, frag_map_size, offset;
  1156. +
  1157. + tx_buf = &ring->tx_buf[ring->tx_next_idx];
  1158. + memset(tx_buf, 0, sizeof(*tx_buf));
  1159. + memset(&txd, 0, sizeof(txd));
  1160. + nr_frags = skb_shinfo(skb)->nr_frags;
  1161. +
  1162. + /* init tx descriptor */
  1163. + if (priv->soc->tx_dma)
  1164. + priv->soc->tx_dma(&txd);
  1165. + else
  1166. + txd.txd4 = TX_DMA_DESP4_DEF;
  1167. + def_txd4 = txd.txd4;
  1168. +
  1169. + /* TX Checksum offload */
  1170. + if (skb->ip_summed == CHECKSUM_PARTIAL)
  1171. + txd.txd4 |= TX_DMA_CHKSUM;
  1172. +
  1173. + /* VLAN header offload */
  1174. + if (skb_vlan_tag_present(skb)) {
  1175. + u16 tag = skb_vlan_tag_get(skb);
  1176. +
  1177. + if (IS_ENABLED(CONFIG_SOC_MT7621))
  1178. + txd.txd4 |= TX_DMA_INS_VLAN_MT7621 | tag;
  1179. + else
  1180. + txd.txd4 |= TX_DMA_INS_VLAN |
  1181. + ((tag >> VLAN_PRIO_SHIFT) << 4) |
  1182. + (tag & 0xF);
  1183. + }
  1184. +
  1185. + /* TSO: fill MSS info in tcp checksum field */
  1186. + if (skb_is_gso(skb)) {
  1187. + if (skb_cow_head(skb, 0)) {
  1188. + netif_warn(priv, tx_err, dev,
  1189. + "GSO expand head fail.\n");
  1190. + goto err_out;
  1191. + }
  1192. + if (skb_shinfo(skb)->gso_type &
  1193. + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  1194. + txd.txd4 |= TX_DMA_TSO;
  1195. + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
  1196. + }
  1197. + }
  1198. +
  1199. + mapped_addr = dma_map_single(&dev->dev, skb->data,
  1200. + skb_headlen(skb), DMA_TO_DEVICE);
  1201. + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
  1202. + goto err_out;
  1203. + txd.txd1 = mapped_addr;
  1204. + txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
  1205. +
  1206. + tx_buf->flags |= FE_TX_FLAGS_SINGLE0;
  1207. + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  1208. + dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
  1209. +
  1210. + /* TX SG offload */
  1211. + j = ring->tx_next_idx;
  1212. + k = 0;
  1213. + for (i = 0; i < nr_frags; i++) {
  1214. + offset = 0;
  1215. + frag = &skb_shinfo(skb)->frags[i];
  1216. + frag_size = skb_frag_size(frag);
  1217. +
  1218. + while (frag_size > 0) {
  1219. + frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
  1220. + mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
  1221. + frag_map_size,
  1222. + DMA_TO_DEVICE);
  1223. + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
  1224. + goto err_dma;
  1225. +
  1226. + if (k & 0x1) {
  1227. + j = NEXT_TX_DESP_IDX(j);
  1228. + txd.txd1 = mapped_addr;
  1229. + txd.txd2 = TX_DMA_PLEN0(frag_map_size);
  1230. + txd.txd4 = def_txd4;
  1231. +
  1232. + tx_buf = &ring->tx_buf[j];
  1233. + memset(tx_buf, 0, sizeof(*tx_buf));
  1234. +
  1235. + tx_buf->flags |= FE_TX_FLAGS_PAGE0;
  1236. + dma_unmap_addr_set(tx_buf, dma_addr0,
  1237. + mapped_addr);
  1238. + dma_unmap_len_set(tx_buf, dma_len0,
  1239. + frag_map_size);
  1240. + } else {
  1241. + txd.txd3 = mapped_addr;
  1242. + txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
  1243. +
  1244. + tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
  1245. + tx_buf->flags |= FE_TX_FLAGS_PAGE1;
  1246. + dma_unmap_addr_set(tx_buf, dma_addr1,
  1247. + mapped_addr);
  1248. + dma_unmap_len_set(tx_buf, dma_len1,
  1249. + frag_map_size);
  1250. +
  1251. + if (!((i == (nr_frags - 1)) &&
  1252. + (frag_map_size == frag_size))) {
  1253. + fe_set_txd(&txd, &ring->tx_dma[j]);
  1254. + memset(&txd, 0, sizeof(txd));
  1255. + }
  1256. + }
  1257. + frag_size -= frag_map_size;
  1258. + offset += frag_map_size;
  1259. + k++;
  1260. + }
  1261. + }
  1262. +
  1263. + /* set last segment */
  1264. + if (k & 0x1)
  1265. + txd.txd2 |= TX_DMA_LS1;
  1266. + else
  1267. + txd.txd2 |= TX_DMA_LS0;
  1268. + fe_set_txd(&txd, &ring->tx_dma[j]);
  1269. +
  1270. + /* store skb to cleanup */
  1271. + tx_buf->skb = skb;
  1272. +
  1273. + netdev_sent_queue(dev, skb->len);
  1274. + skb_tx_timestamp(skb);
  1275. +
  1276. + ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
  1277. + /* make sure that all changes to the dma ring are flushed before we
  1278. + * continue
  1279. + */
  1280. + wmb();
  1281. + if (unlikely(fe_empty_txd(ring) <= ring->tx_thresh)) {
  1282. + netif_stop_queue(dev);
  1283. + smp_mb();
  1284. + if (unlikely(fe_empty_txd(ring) > ring->tx_thresh))
  1285. + netif_wake_queue(dev);
  1286. + }
  1287. +
  1288. + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
  1289. + fe_reg_w32(ring->tx_next_idx, FE_REG_TX_CTX_IDX0);
  1290. +
  1291. + return 0;
  1292. +
  1293. +err_dma:
  1294. + j = ring->tx_next_idx;
  1295. + for (i = 0; i < tx_num; i++) {
  1296. + ptxd = &ring->tx_dma[j];
  1297. + tx_buf = &ring->tx_buf[j];
  1298. +
  1299. + /* unmap dma */
  1300. + fe_txd_unmap(&dev->dev, tx_buf);
  1301. +
  1302. + ptxd->txd2 = TX_DMA_DESP2_DEF;
  1303. + j = NEXT_TX_DESP_IDX(j);
  1304. + }
  1305. + /* make sure that all changes to the dma ring are flushed before we
  1306. + * continue
  1307. + */
  1308. + wmb();
  1309. +
  1310. +err_out:
  1311. + return -1;
  1312. +}
  1313. +
  1314. +static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv)
  1315. +{
  1316. + unsigned int len;
  1317. + int ret;
  1318. +
  1319. + ret = 0;
  1320. + if (unlikely(skb->len < VLAN_ETH_ZLEN)) {
  1321. + if ((priv->flags & FE_FLAG_PADDING_64B) &&
  1322. + !(priv->flags & FE_FLAG_PADDING_BUG))
  1323. + return ret;
  1324. +
  1325. + if (skb_vlan_tag_present(skb))
  1326. + len = ETH_ZLEN;
  1327. + else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
  1328. + len = VLAN_ETH_ZLEN;
  1329. + else if (!(priv->flags & FE_FLAG_PADDING_64B))
  1330. + len = ETH_ZLEN;
  1331. + else
  1332. + return ret;
  1333. +
  1334. + if (skb->len < len) {
  1335. + ret = skb_pad(skb, len - skb->len);
  1336. + if (ret < 0)
  1337. + return ret;
  1338. + skb->len = len;
  1339. + skb_set_tail_pointer(skb, len);
  1340. + }
  1341. + }
  1342. +
  1343. + return ret;
  1344. +}
  1345. +
  1346. +static inline int fe_cal_txd_req(struct sk_buff *skb)
  1347. +{
  1348. + int i, nfrags;
  1349. + struct skb_frag_struct *frag;
  1350. +
  1351. + nfrags = 1;
  1352. + if (skb_is_gso(skb)) {
  1353. + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1354. + frag = &skb_shinfo(skb)->frags[i];
  1355. + nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
  1356. + }
  1357. + } else {
  1358. + nfrags += skb_shinfo(skb)->nr_frags;
  1359. + }
  1360. +
  1361. + return DIV_ROUND_UP(nfrags, 2);
  1362. +}
  1363. +
  1364. +static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1365. +{
  1366. + struct fe_priv *priv = netdev_priv(dev);
  1367. + struct fe_tx_ring *ring = &priv->tx_ring;
  1368. + struct net_device_stats *stats = &dev->stats;
  1369. + int tx_num;
  1370. + int len = skb->len;
  1371. +
  1372. + if (fe_skb_padto(skb, priv)) {
  1373. + netif_warn(priv, tx_err, dev, "tx padding failed!\n");
  1374. + return NETDEV_TX_OK;
  1375. + }
  1376. +
  1377. + tx_num = fe_cal_txd_req(skb);
  1378. + if (unlikely(fe_empty_txd(ring) <= tx_num)) {
  1379. + netif_stop_queue(dev);
  1380. + netif_err(priv, tx_queued, dev,
  1381. + "Tx Ring full when queue awake!\n");
  1382. + return NETDEV_TX_BUSY;
  1383. + }
  1384. +
  1385. + if (fe_tx_map_dma(skb, dev, tx_num, ring) < 0) {
  1386. + stats->tx_dropped++;
  1387. + } else {
  1388. + stats->tx_packets++;
  1389. + stats->tx_bytes += len;
  1390. + }
  1391. +
  1392. + return NETDEV_TX_OK;
  1393. +}
  1394. +
  1395. +static int fe_poll_rx(struct napi_struct *napi, int budget,
  1396. + struct fe_priv *priv, u32 rx_intr)
  1397. +{
  1398. + struct net_device *netdev = priv->netdev;
  1399. + struct net_device_stats *stats = &netdev->stats;
  1400. + struct fe_soc_data *soc = priv->soc;
  1401. + struct fe_rx_ring *ring = &priv->rx_ring;
  1402. + int idx = ring->rx_calc_idx;
  1403. + u32 checksum_bit;
  1404. + struct sk_buff *skb;
  1405. + u8 *data, *new_data;
  1406. + struct fe_rx_dma *rxd, trxd;
  1407. + int done = 0, pad;
  1408. +
  1409. + if (netdev->features & NETIF_F_RXCSUM)
  1410. + checksum_bit = soc->checksum_bit;
  1411. + else
  1412. + checksum_bit = 0;
  1413. +
  1414. + if (priv->flags & FE_FLAG_RX_2B_OFFSET)
  1415. + pad = 0;
  1416. + else
  1417. + pad = NET_IP_ALIGN;
  1418. +
  1419. + while (done < budget) {
  1420. + unsigned int pktlen;
  1421. + dma_addr_t dma_addr;
  1422. +
  1423. + idx = NEXT_RX_DESP_IDX(idx);
  1424. + rxd = &ring->rx_dma[idx];
  1425. + data = ring->rx_data[idx];
  1426. +
  1427. + fe_get_rxd(&trxd, rxd);
  1428. + if (!(trxd.rxd2 & RX_DMA_DONE))
  1429. + break;
  1430. +
  1431. + /* alloc new buffer */
  1432. + new_data = netdev_alloc_frag(ring->frag_size);
  1433. + if (unlikely(!new_data)) {
  1434. + stats->rx_dropped++;
  1435. + goto release_desc;
  1436. + }
  1437. + dma_addr = dma_map_single(&netdev->dev,
  1438. + new_data + NET_SKB_PAD + pad,
  1439. + ring->rx_buf_size,
  1440. + DMA_FROM_DEVICE);
  1441. + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
  1442. + put_page(virt_to_head_page(new_data));
  1443. + goto release_desc;
  1444. + }
  1445. +
  1446. + /* receive data */
  1447. + skb = build_skb(data, ring->frag_size);
  1448. + if (unlikely(!skb)) {
  1449. + put_page(virt_to_head_page(new_data));
  1450. + goto release_desc;
  1451. + }
  1452. + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  1453. +
  1454. + dma_unmap_single(&netdev->dev, trxd.rxd1,
  1455. + ring->rx_buf_size, DMA_FROM_DEVICE);
  1456. + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  1457. + skb->dev = netdev;
  1458. + skb_put(skb, pktlen);
  1459. + if (trxd.rxd4 & checksum_bit)
  1460. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  1461. + else
  1462. + skb_checksum_none_assert(skb);
  1463. + skb->protocol = eth_type_trans(skb, netdev);
  1464. +
  1465. + stats->rx_packets++;
  1466. + stats->rx_bytes += pktlen;
  1467. +
  1468. + napi_gro_receive(napi, skb);
  1469. +
  1470. + ring->rx_data[idx] = new_data;
  1471. + rxd->rxd1 = (unsigned int)dma_addr;
  1472. +
  1473. +release_desc:
  1474. + if (priv->flags & FE_FLAG_RX_SG_DMA)
  1475. + rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
  1476. + else
  1477. + rxd->rxd2 = RX_DMA_LSO;
  1478. +
  1479. + ring->rx_calc_idx = idx;
  1480. + /* make sure that all changes to the dma ring are flushed before
  1481. + * we continue
  1482. + */
  1483. + wmb();
  1484. + fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0);
  1485. + done++;
  1486. + }
  1487. +
  1488. + if (done < budget)
  1489. + fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS);
  1490. +
  1491. + return done;
  1492. +}
  1493. +
  1494. +static int fe_poll_tx(struct fe_priv *priv, int budget, u32 tx_intr,
  1495. + int *tx_again)
  1496. +{
  1497. + struct net_device *netdev = priv->netdev;
  1498. + struct device *dev = &netdev->dev;
  1499. + unsigned int bytes_compl = 0;
  1500. + struct sk_buff *skb;
  1501. + struct fe_tx_buf *tx_buf;
  1502. + int done = 0;
  1503. + u32 idx, hwidx;
  1504. + struct fe_tx_ring *ring = &priv->tx_ring;
  1505. +
  1506. + idx = ring->tx_free_idx;
  1507. + hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0);
  1508. +
  1509. + while ((idx != hwidx) && budget) {
  1510. + tx_buf = &ring->tx_buf[idx];
  1511. + skb = tx_buf->skb;
  1512. +
  1513. + if (!skb)
  1514. + break;
  1515. +
  1516. + if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
  1517. + bytes_compl += skb->len;
  1518. + done++;
  1519. + budget--;
  1520. + }
  1521. + fe_txd_unmap(dev, tx_buf);
  1522. + idx = NEXT_TX_DESP_IDX(idx);
  1523. + }
  1524. + ring->tx_free_idx = idx;
  1525. +
  1526. + if (idx == hwidx) {
  1527. + /* read hw index again make sure no new tx packet */
  1528. + hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0);
  1529. + if (idx == hwidx)
  1530. + fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS);
  1531. + else
  1532. + *tx_again = 1;
  1533. + } else {
  1534. + *tx_again = 1;
  1535. + }
  1536. +
  1537. + if (done) {
  1538. + netdev_completed_queue(netdev, done, bytes_compl);
  1539. + smp_mb();
  1540. + if (unlikely(netif_queue_stopped(netdev) &&
  1541. + (fe_empty_txd(ring) > ring->tx_thresh)))
  1542. + netif_wake_queue(netdev);
  1543. + }
  1544. +
  1545. + return done;
  1546. +}
  1547. +
  1548. +static int fe_poll(struct napi_struct *napi, int budget)
  1549. +{
  1550. + struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi);
  1551. + struct fe_hw_stats *hwstat = priv->hw_stats;
  1552. + int tx_done, rx_done, tx_again;
  1553. + u32 status, fe_status, status_reg, mask;
  1554. + u32 tx_intr, rx_intr, status_intr;
  1555. +
  1556. + status = fe_reg_r32(FE_REG_FE_INT_STATUS);
  1557. + fe_status = status;
  1558. + tx_intr = priv->soc->tx_int;
  1559. + rx_intr = priv->soc->rx_int;
  1560. + status_intr = priv->soc->status_int;
  1561. + tx_done = 0;
  1562. + rx_done = 0;
  1563. + tx_again = 0;
  1564. +
  1565. + if (fe_reg_table[FE_REG_FE_INT_STATUS2]) {
  1566. + fe_status = fe_reg_r32(FE_REG_FE_INT_STATUS2);
  1567. + status_reg = FE_REG_FE_INT_STATUS2;
  1568. + } else {
  1569. + status_reg = FE_REG_FE_INT_STATUS;
  1570. + }
  1571. +
  1572. + if (status & tx_intr)
  1573. + tx_done = fe_poll_tx(priv, budget, tx_intr, &tx_again);
  1574. +
  1575. + if (status & rx_intr)
  1576. + rx_done = fe_poll_rx(napi, budget, priv, rx_intr);
  1577. +
  1578. + if (unlikely(fe_status & status_intr)) {
  1579. + if (hwstat && spin_trylock(&hwstat->stats_lock)) {
  1580. + fe_stats_update(priv);
  1581. + spin_unlock(&hwstat->stats_lock);
  1582. + }
  1583. + fe_reg_w32(status_intr, status_reg);
  1584. + }
  1585. +
  1586. + if (unlikely(netif_msg_intr(priv))) {
  1587. + mask = fe_reg_r32(FE_REG_FE_INT_ENABLE);
  1588. + netdev_info(priv->netdev,
  1589. + "done tx %d, rx %d, intr 0x%08x/0x%x\n",
  1590. + tx_done, rx_done, status, mask);
  1591. + }
  1592. +
  1593. + if (!tx_again && (rx_done < budget)) {
  1594. + status = fe_reg_r32(FE_REG_FE_INT_STATUS);
  1595. + if (status & (tx_intr | rx_intr)) {
  1596. + /* let napi poll again */
  1597. + rx_done = budget;
  1598. + goto poll_again;
  1599. + }
  1600. +
  1601. + napi_complete(napi);
  1602. + fe_int_enable(tx_intr | rx_intr);
  1603. + } else {
  1604. + rx_done = budget;
  1605. + }
  1606. +
  1607. +poll_again:
  1608. + return rx_done;
  1609. +}
  1610. +
  1611. +static void fe_tx_timeout(struct net_device *dev)
  1612. +{
  1613. + struct fe_priv *priv = netdev_priv(dev);
  1614. + struct fe_tx_ring *ring = &priv->tx_ring;
  1615. +
  1616. + priv->netdev->stats.tx_errors++;
  1617. + netif_err(priv, tx_err, dev,
  1618. + "transmit timed out\n");
  1619. + netif_info(priv, drv, dev, "dma_cfg:%08x\n",
  1620. + fe_reg_r32(FE_REG_PDMA_GLO_CFG));
  1621. + netif_info(priv, drv, dev, "tx_ring=%d, "
  1622. + "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
  1623. + 0, fe_reg_r32(FE_REG_TX_BASE_PTR0),
  1624. + fe_reg_r32(FE_REG_TX_MAX_CNT0),
  1625. + fe_reg_r32(FE_REG_TX_CTX_IDX0),
  1626. + fe_reg_r32(FE_REG_TX_DTX_IDX0),
  1627. + ring->tx_free_idx,
  1628. + ring->tx_next_idx);
  1629. + netif_info(priv, drv, dev,
  1630. + "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
  1631. + 0, fe_reg_r32(FE_REG_RX_BASE_PTR0),
  1632. + fe_reg_r32(FE_REG_RX_MAX_CNT0),
  1633. + fe_reg_r32(FE_REG_RX_CALC_IDX0),
  1634. + fe_reg_r32(FE_REG_RX_DRX_IDX0));
  1635. +
  1636. + if (!test_and_set_bit(FE_FLAG_RESET_PENDING, priv->pending_flags))
  1637. + schedule_work(&priv->pending_work);
  1638. +}
  1639. +
  1640. +static irqreturn_t fe_handle_irq(int irq, void *dev)
  1641. +{
  1642. + struct fe_priv *priv = netdev_priv(dev);
  1643. + u32 status, int_mask;
  1644. +
  1645. + status = fe_reg_r32(FE_REG_FE_INT_STATUS);
  1646. +
  1647. + if (unlikely(!status))
  1648. + return IRQ_NONE;
  1649. +
  1650. + int_mask = (priv->soc->rx_int | priv->soc->tx_int);
  1651. + if (likely(status & int_mask)) {
  1652. + if (likely(napi_schedule_prep(&priv->rx_napi))) {
  1653. + fe_int_disable(int_mask);
  1654. + __napi_schedule(&priv->rx_napi);
  1655. + }
  1656. + } else {
  1657. + fe_reg_w32(status, FE_REG_FE_INT_STATUS);
  1658. + }
  1659. +
  1660. + return IRQ_HANDLED;
  1661. +}
  1662. +
  1663. +#ifdef CONFIG_NET_POLL_CONTROLLER
  1664. +static void fe_poll_controller(struct net_device *dev)
  1665. +{
  1666. + struct fe_priv *priv = netdev_priv(dev);
  1667. + u32 int_mask = priv->soc->tx_int | priv->soc->rx_int;
  1668. +
  1669. + fe_int_disable(int_mask);
  1670. + fe_handle_irq(dev->irq, dev);
  1671. + fe_int_enable(int_mask);
  1672. +}
  1673. +#endif
  1674. +
  1675. +int fe_set_clock_cycle(struct fe_priv *priv)
  1676. +{
  1677. + unsigned long sysclk = priv->sysclk;
  1678. +
  1679. + sysclk /= FE_US_CYC_CNT_DIVISOR;
  1680. + sysclk <<= FE_US_CYC_CNT_SHIFT;
  1681. +
  1682. + fe_w32((fe_r32(FE_FE_GLO_CFG) &
  1683. + ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) |
  1684. + sysclk,
  1685. + FE_FE_GLO_CFG);
  1686. + return 0;
  1687. +}
  1688. +
  1689. +void fe_fwd_config(struct fe_priv *priv)
  1690. +{
  1691. + u32 fwd_cfg;
  1692. +
  1693. + fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
  1694. +
  1695. + /* disable jumbo frame */
  1696. + if (priv->flags & FE_FLAG_JUMBO_FRAME)
  1697. + fwd_cfg &= ~FE_GDM1_JMB_EN;
  1698. +
  1699. + /* set unicast/multicast/broadcast frame to cpu */
  1700. + fwd_cfg &= ~0xffff;
  1701. +
  1702. + fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
  1703. +}
  1704. +
  1705. +static void fe_rxcsum_config(bool enable)
  1706. +{
  1707. + if (enable)
  1708. + fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN |
  1709. + FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
  1710. + FE_GDMA1_FWD_CFG);
  1711. + else
  1712. + fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN |
  1713. + FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
  1714. + FE_GDMA1_FWD_CFG);
  1715. +}
  1716. +
  1717. +static void fe_txcsum_config(bool enable)
  1718. +{
  1719. + if (enable)
  1720. + fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN |
  1721. + FE_TCS_GEN_EN | FE_UCS_GEN_EN),
  1722. + FE_CDMA_CSG_CFG);
  1723. + else
  1724. + fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN |
  1725. + FE_TCS_GEN_EN | FE_UCS_GEN_EN),
  1726. + FE_CDMA_CSG_CFG);
  1727. +}
  1728. +
  1729. +void fe_csum_config(struct fe_priv *priv)
  1730. +{
  1731. + struct net_device *dev = priv_netdev(priv);
  1732. +
  1733. + fe_txcsum_config((dev->features & NETIF_F_IP_CSUM));
  1734. + fe_rxcsum_config((dev->features & NETIF_F_RXCSUM));
  1735. +}
  1736. +
  1737. +static int fe_hw_init(struct net_device *dev)
  1738. +{
  1739. + struct fe_priv *priv = netdev_priv(dev);
  1740. + int i, err;
  1741. +
  1742. + err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0,
  1743. + dev_name(priv->device), dev);
  1744. + if (err)
  1745. + return err;
  1746. +
  1747. + if (priv->soc->set_mac)
  1748. + priv->soc->set_mac(priv, dev->dev_addr);
  1749. + else
  1750. + fe_hw_set_macaddr(priv, dev->dev_addr);
  1751. +
  1752. + /* disable delay interrupt */
  1753. + fe_reg_w32(0, FE_REG_DLY_INT_CFG);
  1754. +
  1755. + fe_int_disable(priv->soc->tx_int | priv->soc->rx_int);
  1756. +
  1757. + /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc */
  1758. + if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
  1759. + for (i = 0; i < 16; i += 2)
  1760. + fe_w32(((i + 1) << 16) + i,
  1761. + fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
  1762. + (i * 2));
  1763. +
  1764. + if (priv->soc->fwd_config(priv))
  1765. + netdev_err(dev, "unable to get clock\n");
  1766. +
  1767. + if (fe_reg_table[FE_REG_FE_RST_GL]) {
  1768. + fe_reg_w32(1, FE_REG_FE_RST_GL);
  1769. + fe_reg_w32(0, FE_REG_FE_RST_GL);
  1770. + }
  1771. +
  1772. + return 0;
  1773. +}
  1774. +
  1775. +static int fe_open(struct net_device *dev)
  1776. +{
  1777. + struct fe_priv *priv = netdev_priv(dev);
  1778. + unsigned long flags;
  1779. + u32 val;
  1780. + int err;
  1781. +
  1782. + err = fe_init_dma(priv);
  1783. + if (err) {
  1784. + fe_free_dma(priv);
  1785. + return err;
  1786. + }
  1787. +
  1788. + spin_lock_irqsave(&priv->page_lock, flags);
  1789. +
  1790. + val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
  1791. + if (priv->flags & FE_FLAG_RX_2B_OFFSET)
  1792. + val |= FE_RX_2B_OFFSET;
  1793. + val |= priv->soc->pdma_glo_cfg;
  1794. + fe_reg_w32(val, FE_REG_PDMA_GLO_CFG);
  1795. +
  1796. + spin_unlock_irqrestore(&priv->page_lock, flags);
  1797. +
  1798. + if (priv->phy)
  1799. + priv->phy->start(priv);
  1800. +
  1801. + if (priv->soc->has_carrier && priv->soc->has_carrier(priv))
  1802. + netif_carrier_on(dev);
  1803. +
  1804. + napi_enable(&priv->rx_napi);
  1805. + fe_int_enable(priv->soc->tx_int | priv->soc->rx_int);
  1806. + netif_start_queue(dev);
  1807. +
  1808. + return 0;
  1809. +}
  1810. +
  1811. +static int fe_stop(struct net_device *dev)
  1812. +{
  1813. + struct fe_priv *priv = netdev_priv(dev);
  1814. + unsigned long flags;
  1815. + int i;
  1816. +
  1817. + netif_tx_disable(dev);
  1818. + fe_int_disable(priv->soc->tx_int | priv->soc->rx_int);
  1819. + napi_disable(&priv->rx_napi);
  1820. +
  1821. + if (priv->phy)
  1822. + priv->phy->stop(priv);
  1823. +
  1824. + spin_lock_irqsave(&priv->page_lock, flags);
  1825. +
  1826. + fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
  1827. + ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN),
  1828. + FE_REG_PDMA_GLO_CFG);
  1829. + spin_unlock_irqrestore(&priv->page_lock, flags);
  1830. +
  1831. + /* wait dma stop */
  1832. + for (i = 0; i < 10; i++) {
  1833. + if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
  1834. + (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) {
  1835. + msleep(20);
  1836. + continue;
  1837. + }
  1838. + break;
  1839. + }
  1840. +
  1841. + fe_free_dma(priv);
  1842. +
  1843. + return 0;
  1844. +}
  1845. +
  1846. +static int __init fe_init(struct net_device *dev)
  1847. +{
  1848. + struct fe_priv *priv = netdev_priv(dev);
  1849. + struct device_node *port;
  1850. + const char *mac_addr;
  1851. + int err;
  1852. +
  1853. + priv->soc->reset_fe();
  1854. +
  1855. + if (priv->soc->switch_init)
  1856. + if (priv->soc->switch_init(priv)) {
  1857. + netdev_err(dev, "failed to initialize switch core\n");
  1858. + return -ENODEV;
  1859. + }
  1860. +
  1861. + mac_addr = of_get_mac_address(priv->device->of_node);
  1862. + if (mac_addr)
  1863. + ether_addr_copy(dev->dev_addr, mac_addr);
  1864. +
  1865. + /* If the mac address is invalid, use random mac address */
  1866. + if (!is_valid_ether_addr(dev->dev_addr)) {
  1867. + random_ether_addr(dev->dev_addr);
  1868. + dev_err(priv->device, "generated random MAC address %pM\n",
  1869. + dev->dev_addr);
  1870. + }
  1871. +
  1872. + err = fe_mdio_init(priv);
  1873. + if (err)
  1874. + return err;
  1875. +
  1876. + if (priv->soc->port_init)
  1877. + for_each_child_of_node(priv->device->of_node, port)
  1878. + if (of_device_is_compatible(port, "mediatek,eth-port") &&
  1879. + of_device_is_available(port))
  1880. + priv->soc->port_init(priv, port);
  1881. +
  1882. + if (priv->phy) {
  1883. + err = priv->phy->connect(priv);
  1884. + if (err)
  1885. + goto err_phy_disconnect;
  1886. + }
  1887. +
  1888. + err = fe_hw_init(dev);
  1889. + if (!err)
  1890. + return 0;
  1891. +
  1892. +err_phy_disconnect:
  1893. + if (priv->phy)
  1894. + priv->phy->disconnect(priv);
  1895. + fe_mdio_cleanup(priv);
  1896. +
  1897. + return err;
  1898. +}
  1899. +
  1900. +static void fe_uninit(struct net_device *dev)
  1901. +{
  1902. + struct fe_priv *priv = netdev_priv(dev);
  1903. +
  1904. + if (priv->phy)
  1905. + priv->phy->disconnect(priv);
  1906. + fe_mdio_cleanup(priv);
  1907. +
  1908. + fe_reg_w32(0, FE_REG_FE_INT_ENABLE);
  1909. + free_irq(dev->irq, dev);
  1910. +}
  1911. +
  1912. +static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1913. +{
  1914. + struct fe_priv *priv = netdev_priv(dev);
  1915. +
  1916. + if (!priv->phy_dev)
  1917. + return -ENODEV;
  1918. +
  1919. + switch (cmd) {
  1920. + case SIOCGMIIPHY:
  1921. + case SIOCGMIIREG:
  1922. + case SIOCSMIIREG:
  1923. + return phy_mii_ioctl(priv->phy_dev, ifr, cmd);
  1924. + default:
  1925. + break;
  1926. + }
  1927. +
  1928. + return -EOPNOTSUPP;
  1929. +}
  1930. +
  1931. +static int fe_change_mtu(struct net_device *dev, int new_mtu)
  1932. +{
  1933. + struct fe_priv *priv = netdev_priv(dev);
  1934. + int frag_size, old_mtu;
  1935. + u32 fwd_cfg;
  1936. +
  1937. + if (!(priv->flags & FE_FLAG_JUMBO_FRAME))
  1938. + return eth_change_mtu(dev, new_mtu);
  1939. +
  1940. + frag_size = fe_max_frag_size(new_mtu);
  1941. + if (new_mtu < 68 || frag_size > PAGE_SIZE)
  1942. + return -EINVAL;
  1943. +
  1944. + old_mtu = dev->mtu;
  1945. + dev->mtu = new_mtu;
  1946. +
  1947. + /* return early if the buffer sizes will not change */
  1948. + if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
  1949. + return 0;
  1950. + if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
  1951. + return 0;
  1952. +
  1953. + if (new_mtu <= ETH_DATA_LEN)
  1954. + priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN);
  1955. + else
  1956. + priv->rx_ring.frag_size = PAGE_SIZE;
  1957. + priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size);
  1958. +
  1959. + if (!netif_running(dev))
  1960. + return 0;
  1961. +
  1962. + fe_stop(dev);
  1963. + fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
  1964. + if (new_mtu <= ETH_DATA_LEN) {
  1965. + fwd_cfg &= ~FE_GDM1_JMB_EN;
  1966. + } else {
  1967. + fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT);
  1968. + fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
  1969. + FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN;
  1970. + }
  1971. + fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
  1972. +
  1973. + return fe_open(dev);
  1974. +}
  1975. +
  1976. +static const struct net_device_ops fe_netdev_ops = {
  1977. + .ndo_init = fe_init,
  1978. + .ndo_uninit = fe_uninit,
  1979. + .ndo_open = fe_open,
  1980. + .ndo_stop = fe_stop,
  1981. + .ndo_start_xmit = fe_start_xmit,
  1982. + .ndo_set_mac_address = fe_set_mac_address,
  1983. + .ndo_validate_addr = eth_validate_addr,
  1984. + .ndo_do_ioctl = fe_do_ioctl,
  1985. + .ndo_change_mtu = fe_change_mtu,
  1986. + .ndo_tx_timeout = fe_tx_timeout,
  1987. + .ndo_get_stats64 = fe_get_stats64,
  1988. + .ndo_vlan_rx_add_vid = fe_vlan_rx_add_vid,
  1989. + .ndo_vlan_rx_kill_vid = fe_vlan_rx_kill_vid,
  1990. +#ifdef CONFIG_NET_POLL_CONTROLLER
  1991. + .ndo_poll_controller = fe_poll_controller,
  1992. +#endif
  1993. +};
  1994. +
  1995. +static void fe_reset_pending(struct fe_priv *priv)
  1996. +{
  1997. + struct net_device *dev = priv->netdev;
  1998. + int err;
  1999. +
  2000. + rtnl_lock();
  2001. + fe_stop(dev);
  2002. +
  2003. + err = fe_open(dev);
  2004. + if (err) {
  2005. + netif_alert(priv, ifup, dev,
  2006. + "Driver up/down cycle failed, closing device.\n");
  2007. + dev_close(dev);
  2008. + }
  2009. + rtnl_unlock();
  2010. +}
  2011. +
  2012. +static const struct fe_work_t fe_work[] = {
  2013. + {FE_FLAG_RESET_PENDING, fe_reset_pending},
  2014. +};
  2015. +
  2016. +static void fe_pending_work(struct work_struct *work)
  2017. +{
  2018. + struct fe_priv *priv = container_of(work, struct fe_priv, pending_work);
  2019. + int i;
  2020. + bool pending;
  2021. +
  2022. + for (i = 0; i < ARRAY_SIZE(fe_work); i++) {
  2023. + pending = test_and_clear_bit(fe_work[i].bitnr,
  2024. + priv->pending_flags);
  2025. + if (pending)
  2026. + fe_work[i].action(priv);
  2027. + }
  2028. +}
  2029. +
  2030. +static int fe_probe(struct platform_device *pdev)
  2031. +{
  2032. + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2033. + const struct of_device_id *match;
  2034. + struct fe_soc_data *soc;
  2035. + struct net_device *netdev;
  2036. + struct fe_priv *priv;
  2037. + struct clk *sysclk;
  2038. + int err, napi_weight;
  2039. +
  2040. + device_reset(&pdev->dev);
  2041. +
  2042. + match = of_match_device(of_fe_match, &pdev->dev);
  2043. + soc = (struct fe_soc_data *)match->data;
  2044. +
  2045. + if (soc->reg_table)
  2046. + fe_reg_table = soc->reg_table;
  2047. + else
  2048. + soc->reg_table = fe_reg_table;
  2049. +
  2050. + fe_base = devm_ioremap_resource(&pdev->dev, res);
  2051. + if (!fe_base) {
  2052. + err = -EADDRNOTAVAIL;
  2053. + goto err_out;
  2054. + }
  2055. +
  2056. + netdev = alloc_etherdev(sizeof(*priv));
  2057. + if (!netdev) {
  2058. + dev_err(&pdev->dev, "alloc_etherdev failed\n");
  2059. + err = -ENOMEM;
  2060. + goto err_iounmap;
  2061. + }
  2062. +
  2063. + SET_NETDEV_DEV(netdev, &pdev->dev);
  2064. + netdev->netdev_ops = &fe_netdev_ops;
  2065. + netdev->base_addr = (unsigned long)fe_base;
  2066. +
  2067. + netdev->irq = platform_get_irq(pdev, 0);
  2068. + if (netdev->irq < 0) {
  2069. + dev_err(&pdev->dev, "no IRQ resource found\n");
  2070. + err = -ENXIO;
  2071. + goto err_free_dev;
  2072. + }
  2073. +
  2074. + if (soc->init_data)
  2075. + soc->init_data(soc, netdev);
  2076. + netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_CTAG_TX;
  2077. + netdev->features |= netdev->hw_features;
  2078. +
  2079. + /* fake rx vlan filter func. to support tx vlan offload func */
  2080. + if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
  2081. + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  2082. +
  2083. + priv = netdev_priv(netdev);
  2084. + spin_lock_init(&priv->page_lock);
  2085. + if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) {
  2086. + priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL);
  2087. + if (!priv->hw_stats) {
  2088. + err = -ENOMEM;
  2089. + goto err_free_dev;
  2090. + }
  2091. + spin_lock_init(&priv->hw_stats->stats_lock);
  2092. + }
  2093. +
  2094. + sysclk = devm_clk_get(&pdev->dev, NULL);
  2095. + if (!IS_ERR(sysclk)) {
  2096. + priv->sysclk = clk_get_rate(sysclk);
  2097. + } else if ((priv->flags & FE_FLAG_CALIBRATE_CLK)) {
  2098. + dev_err(&pdev->dev, "this soc needs a clk for calibration\n");
  2099. + err = -ENXIO;
  2100. + goto err_free_dev;
  2101. + }
  2102. +
  2103. + priv->switch_np = of_parse_phandle(pdev->dev.of_node, "mediatek,switch", 0);
  2104. + if ((priv->flags & FE_FLAG_HAS_SWITCH) && !priv->switch_np) {
  2105. + dev_err(&pdev->dev, "failed to read switch phandle\n");
  2106. + err = -ENODEV;
  2107. + goto err_free_dev;
  2108. + }
  2109. +
  2110. + priv->netdev = netdev;
  2111. + priv->device = &pdev->dev;
  2112. + priv->soc = soc;
  2113. + priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE);
  2114. + priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN);
  2115. + priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size);
  2116. + priv->tx_ring.tx_ring_size = NUM_DMA_DESC;
  2117. + priv->rx_ring.rx_ring_size = NUM_DMA_DESC;
  2118. + INIT_WORK(&priv->pending_work, fe_pending_work);
  2119. +
  2120. + napi_weight = 32;
  2121. + if (priv->flags & FE_FLAG_NAPI_WEIGHT) {
  2122. + napi_weight *= 4;
  2123. + priv->tx_ring.tx_ring_size *= 4;
  2124. + priv->rx_ring.rx_ring_size *= 4;
  2125. + }
  2126. + netif_napi_add(netdev, &priv->rx_napi, fe_poll, napi_weight);
  2127. + fe_set_ethtool_ops(netdev);
  2128. +
  2129. + err = register_netdev(netdev);
  2130. + if (err) {
  2131. + dev_err(&pdev->dev, "error bringing up device\n");
  2132. + goto err_free_dev;
  2133. + }
  2134. +
  2135. + platform_set_drvdata(pdev, netdev);
  2136. +
  2137. + netif_info(priv, probe, netdev, "mediatek frame engine at 0x%08lx, irq %d\n",
  2138. + netdev->base_addr, netdev->irq);
  2139. +
  2140. + return 0;
  2141. +
  2142. +err_free_dev:
  2143. + free_netdev(netdev);
  2144. +err_iounmap:
  2145. + devm_iounmap(&pdev->dev, fe_base);
  2146. +err_out:
  2147. + return err;
  2148. +}
  2149. +
  2150. +static int fe_remove(struct platform_device *pdev)
  2151. +{
  2152. + struct net_device *dev = platform_get_drvdata(pdev);
  2153. + struct fe_priv *priv = netdev_priv(dev);
  2154. +
  2155. + netif_napi_del(&priv->rx_napi);
  2156. + kfree(priv->hw_stats);
  2157. +
  2158. + cancel_work_sync(&priv->pending_work);
  2159. +
  2160. + unregister_netdev(dev);
  2161. + free_netdev(dev);
  2162. + platform_set_drvdata(pdev, NULL);
  2163. +
  2164. + return 0;
  2165. +}
  2166. +
  2167. +static struct platform_driver fe_driver = {
  2168. + .probe = fe_probe,
  2169. + .remove = fe_remove,
  2170. + .driver = {
  2171. + .name = "mtk_soc_eth",
  2172. + .owner = THIS_MODULE,
  2173. + .of_match_table = of_fe_match,
  2174. + },
  2175. +};
  2176. +
  2177. +module_platform_driver(fe_driver);
  2178. +
  2179. +MODULE_LICENSE("GPL");
  2180. +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  2181. +MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
  2182. +MODULE_VERSION(MTK_FE_DRV_VERSION);
  2183. --- /dev/null
  2184. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  2185. @@ -0,0 +1,522 @@
  2186. +/* This program is free software; you can redistribute it and/or modify
  2187. + * it under the terms of the GNU General Public License as published by
  2188. + * the Free Software Foundation; version 2 of the License
  2189. + *
  2190. + * This program is distributed in the hope that it will be useful,
  2191. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  2192. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  2193. + * GNU General Public License for more details.
  2194. + *
  2195. + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org>
  2196. + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name>
  2197. + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com>
  2198. + */
  2199. +
  2200. +#ifndef FE_ETH_H
  2201. +#define FE_ETH_H
  2202. +
  2203. +#include <linux/mii.h>
  2204. +#include <linux/interrupt.h>
  2205. +#include <linux/netdevice.h>
  2206. +#include <linux/dma-mapping.h>
  2207. +#include <linux/phy.h>
  2208. +#include <linux/ethtool.h>
  2209. +#include <linux/version.h>
  2210. +
  2211. +enum fe_reg {
  2212. + FE_REG_PDMA_GLO_CFG = 0,
  2213. + FE_REG_PDMA_RST_CFG,
  2214. + FE_REG_DLY_INT_CFG,
  2215. + FE_REG_TX_BASE_PTR0,
  2216. + FE_REG_TX_MAX_CNT0,
  2217. + FE_REG_TX_CTX_IDX0,
  2218. + FE_REG_TX_DTX_IDX0,
  2219. + FE_REG_RX_BASE_PTR0,
  2220. + FE_REG_RX_MAX_CNT0,
  2221. + FE_REG_RX_CALC_IDX0,
  2222. + FE_REG_RX_DRX_IDX0,
  2223. + FE_REG_FE_INT_ENABLE,
  2224. + FE_REG_FE_INT_STATUS,
  2225. + FE_REG_FE_DMA_VID_BASE,
  2226. + FE_REG_FE_COUNTER_BASE,
  2227. + FE_REG_FE_RST_GL,
  2228. + FE_REG_FE_INT_STATUS2,
  2229. + FE_REG_COUNT
  2230. +};
  2231. +
  2232. +enum fe_work_flag {
  2233. + FE_FLAG_RESET_PENDING,
  2234. + FE_FLAG_MAX
  2235. +};
  2236. +
  2237. +#define MTK_FE_DRV_VERSION "0.1.2"
  2238. +
  2239. +/* power of 2 to let NEXT_TX_DESP_IDX work */
  2240. +#define NUM_DMA_DESC BIT(7)
  2241. +#define MAX_DMA_DESC 0xfff
  2242. +
  2243. +#define FE_DELAY_EN_INT 0x80
  2244. +#define FE_DELAY_MAX_INT 0x04
  2245. +#define FE_DELAY_MAX_TOUT 0x04
  2246. +#define FE_DELAY_TIME 20
  2247. +#define FE_DELAY_CHAN (((FE_DELAY_EN_INT | FE_DELAY_MAX_INT) << 8) | \
  2248. + FE_DELAY_MAX_TOUT)
  2249. +#define FE_DELAY_INIT ((FE_DELAY_CHAN << 16) | FE_DELAY_CHAN)
  2250. +#define FE_PSE_FQFC_CFG_INIT 0x80504000
  2251. +#define FE_PSE_FQFC_CFG_256Q 0xff908000
  2252. +
  2253. +/* interrupt bits */
  2254. +#define FE_CNT_PPE_AF BIT(31)
  2255. +#define FE_CNT_GDM_AF BIT(29)
  2256. +#define FE_PSE_P2_FC BIT(26)
  2257. +#define FE_PSE_BUF_DROP BIT(24)
  2258. +#define FE_GDM_OTHER_DROP BIT(23)
  2259. +#define FE_PSE_P1_FC BIT(22)
  2260. +#define FE_PSE_P0_FC BIT(21)
  2261. +#define FE_PSE_FQ_EMPTY BIT(20)
  2262. +#define FE_GE1_STA_CHG BIT(18)
  2263. +#define FE_TX_COHERENT BIT(17)
  2264. +#define FE_RX_COHERENT BIT(16)
  2265. +#define FE_TX_DONE_INT3 BIT(11)
  2266. +#define FE_TX_DONE_INT2 BIT(10)
  2267. +#define FE_TX_DONE_INT1 BIT(9)
  2268. +#define FE_TX_DONE_INT0 BIT(8)
  2269. +#define FE_RX_DONE_INT0 BIT(2)
  2270. +#define FE_TX_DLY_INT BIT(1)
  2271. +#define FE_RX_DLY_INT BIT(0)
  2272. +
  2273. +#define FE_RX_DONE_INT FE_RX_DONE_INT0
  2274. +#define FE_TX_DONE_INT (FE_TX_DONE_INT0 | FE_TX_DONE_INT1 | \
  2275. + FE_TX_DONE_INT2 | FE_TX_DONE_INT3)
  2276. +
  2277. +#define RT5350_RX_DLY_INT BIT(30)
  2278. +#define RT5350_TX_DLY_INT BIT(28)
  2279. +#define RT5350_RX_DONE_INT1 BIT(17)
  2280. +#define RT5350_RX_DONE_INT0 BIT(16)
  2281. +#define RT5350_TX_DONE_INT3 BIT(3)
  2282. +#define RT5350_TX_DONE_INT2 BIT(2)
  2283. +#define RT5350_TX_DONE_INT1 BIT(1)
  2284. +#define RT5350_TX_DONE_INT0 BIT(0)
  2285. +
  2286. +#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
  2287. +#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
  2288. + RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
  2289. +
  2290. +/* registers */
  2291. +#define FE_FE_OFFSET 0x0000
  2292. +#define FE_GDMA_OFFSET 0x0020
  2293. +#define FE_PSE_OFFSET 0x0040
  2294. +#define FE_GDMA2_OFFSET 0x0060
  2295. +#define FE_CDMA_OFFSET 0x0080
  2296. +#define FE_DMA_VID0 0x00a8
  2297. +#define FE_PDMA_OFFSET 0x0100
  2298. +#define FE_PPE_OFFSET 0x0200
  2299. +#define FE_CMTABLE_OFFSET 0x0400
  2300. +#define FE_POLICYTABLE_OFFSET 0x1000
  2301. +
  2302. +#define RT5350_PDMA_OFFSET 0x0800
  2303. +#define RT5350_SDM_OFFSET 0x0c00
  2304. +
  2305. +#define FE_MDIO_ACCESS (FE_FE_OFFSET + 0x00)
  2306. +#define FE_MDIO_CFG (FE_FE_OFFSET + 0x04)
  2307. +#define FE_FE_GLO_CFG (FE_FE_OFFSET + 0x08)
  2308. +#define FE_FE_RST_GL (FE_FE_OFFSET + 0x0C)
  2309. +#define FE_FE_INT_STATUS (FE_FE_OFFSET + 0x10)
  2310. +#define FE_FE_INT_ENABLE (FE_FE_OFFSET + 0x14)
  2311. +#define FE_MDIO_CFG2 (FE_FE_OFFSET + 0x18)
  2312. +#define FE_FOC_TS_T (FE_FE_OFFSET + 0x1C)
  2313. +
  2314. +#define FE_GDMA1_FWD_CFG (FE_GDMA_OFFSET + 0x00)
  2315. +#define FE_GDMA1_SCH_CFG (FE_GDMA_OFFSET + 0x04)
  2316. +#define FE_GDMA1_SHPR_CFG (FE_GDMA_OFFSET + 0x08)
  2317. +#define FE_GDMA1_MAC_ADRL (FE_GDMA_OFFSET + 0x0C)
  2318. +#define FE_GDMA1_MAC_ADRH (FE_GDMA_OFFSET + 0x10)
  2319. +
  2320. +#define FE_GDMA2_FWD_CFG (FE_GDMA2_OFFSET + 0x00)
  2321. +#define FE_GDMA2_SCH_CFG (FE_GDMA2_OFFSET + 0x04)
  2322. +#define FE_GDMA2_SHPR_CFG (FE_GDMA2_OFFSET + 0x08)
  2323. +#define FE_GDMA2_MAC_ADRL (FE_GDMA2_OFFSET + 0x0C)
  2324. +#define FE_GDMA2_MAC_ADRH (FE_GDMA2_OFFSET + 0x10)
  2325. +
  2326. +#define FE_PSE_FQ_CFG (FE_PSE_OFFSET + 0x00)
  2327. +#define FE_CDMA_FC_CFG (FE_PSE_OFFSET + 0x04)
  2328. +#define FE_GDMA1_FC_CFG (FE_PSE_OFFSET + 0x08)
  2329. +#define FE_GDMA2_FC_CFG (FE_PSE_OFFSET + 0x0C)
  2330. +
  2331. +#define FE_CDMA_CSG_CFG (FE_CDMA_OFFSET + 0x00)
  2332. +#define FE_CDMA_SCH_CFG (FE_CDMA_OFFSET + 0x04)
  2333. +
  2334. +#ifdef CONFIG_SOC_MT7621
  2335. +#define MT7620A_GDMA_OFFSET 0x0500
  2336. +#else
  2337. +#define MT7620A_GDMA_OFFSET 0x0600
  2338. +#endif
  2339. +#define MT7620A_GDMA1_FWD_CFG (MT7620A_GDMA_OFFSET + 0x00)
  2340. +#define MT7620A_FE_GDMA1_SCH_CFG (MT7620A_GDMA_OFFSET + 0x04)
  2341. +#define MT7620A_FE_GDMA1_SHPR_CFG (MT7620A_GDMA_OFFSET + 0x08)
  2342. +#define MT7620A_FE_GDMA1_MAC_ADRL (MT7620A_GDMA_OFFSET + 0x0C)
  2343. +#define MT7620A_FE_GDMA1_MAC_ADRH (MT7620A_GDMA_OFFSET + 0x10)
  2344. +
  2345. +#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00)
  2346. +#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04)
  2347. +#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08)
  2348. +#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C)
  2349. +#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10)
  2350. +#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14)
  2351. +#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18)
  2352. +#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C)
  2353. +#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20)
  2354. +#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24)
  2355. +#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28)
  2356. +#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C)
  2357. +#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30)
  2358. +#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34)
  2359. +#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38)
  2360. +#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C)
  2361. +#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100)
  2362. +#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104)
  2363. +#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108)
  2364. +#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C)
  2365. +#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110)
  2366. +#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114)
  2367. +#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118)
  2368. +#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C)
  2369. +#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204)
  2370. +#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208)
  2371. +#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c)
  2372. +#define RT5350_FE_INT_STATUS (RT5350_PDMA_OFFSET + 0x220)
  2373. +#define RT5350_FE_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228)
  2374. +#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280)
  2375. +
  2376. +#define FE_PDMA_GLO_CFG (FE_PDMA_OFFSET + 0x00)
  2377. +#define FE_PDMA_RST_CFG (FE_PDMA_OFFSET + 0x04)
  2378. +#define FE_PDMA_SCH_CFG (FE_PDMA_OFFSET + 0x08)
  2379. +#define FE_DLY_INT_CFG (FE_PDMA_OFFSET + 0x0C)
  2380. +#define FE_TX_BASE_PTR0 (FE_PDMA_OFFSET + 0x10)
  2381. +#define FE_TX_MAX_CNT0 (FE_PDMA_OFFSET + 0x14)
  2382. +#define FE_TX_CTX_IDX0 (FE_PDMA_OFFSET + 0x18)
  2383. +#define FE_TX_DTX_IDX0 (FE_PDMA_OFFSET + 0x1C)
  2384. +#define FE_TX_BASE_PTR1 (FE_PDMA_OFFSET + 0x20)
  2385. +#define FE_TX_MAX_CNT1 (FE_PDMA_OFFSET + 0x24)
  2386. +#define FE_TX_CTX_IDX1 (FE_PDMA_OFFSET + 0x28)
  2387. +#define FE_TX_DTX_IDX1 (FE_PDMA_OFFSET + 0x2C)
  2388. +#define FE_RX_BASE_PTR0 (FE_PDMA_OFFSET + 0x30)
  2389. +#define FE_RX_MAX_CNT0 (FE_PDMA_OFFSET + 0x34)
  2390. +#define FE_RX_CALC_IDX0 (FE_PDMA_OFFSET + 0x38)
  2391. +#define FE_RX_DRX_IDX0 (FE_PDMA_OFFSET + 0x3C)
  2392. +#define FE_TX_BASE_PTR2 (FE_PDMA_OFFSET + 0x40)
  2393. +#define FE_TX_MAX_CNT2 (FE_PDMA_OFFSET + 0x44)
  2394. +#define FE_TX_CTX_IDX2 (FE_PDMA_OFFSET + 0x48)
  2395. +#define FE_TX_DTX_IDX2 (FE_PDMA_OFFSET + 0x4C)
  2396. +#define FE_TX_BASE_PTR3 (FE_PDMA_OFFSET + 0x50)
  2397. +#define FE_TX_MAX_CNT3 (FE_PDMA_OFFSET + 0x54)
  2398. +#define FE_TX_CTX_IDX3 (FE_PDMA_OFFSET + 0x58)
  2399. +#define FE_TX_DTX_IDX3 (FE_PDMA_OFFSET + 0x5C)
  2400. +#define FE_RX_BASE_PTR1 (FE_PDMA_OFFSET + 0x60)
  2401. +#define FE_RX_MAX_CNT1 (FE_PDMA_OFFSET + 0x64)
  2402. +#define FE_RX_CALC_IDX1 (FE_PDMA_OFFSET + 0x68)
  2403. +#define FE_RX_DRX_IDX1 (FE_PDMA_OFFSET + 0x6C)
  2404. +
  2405. +/* Switch DMA configuration */
  2406. +#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00)
  2407. +#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04)
  2408. +#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08)
  2409. +#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C)
  2410. +#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10)
  2411. +#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100)
  2412. +#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104)
  2413. +#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108)
  2414. +#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C)
  2415. +#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110)
  2416. +
  2417. +#define RT5350_SDM_ICS_EN BIT(16)
  2418. +#define RT5350_SDM_TCS_EN BIT(17)
  2419. +#define RT5350_SDM_UCS_EN BIT(18)
  2420. +
  2421. +/* MDIO_CFG register bits */
  2422. +#define FE_MDIO_CFG_AUTO_POLL_EN BIT(29)
  2423. +#define FE_MDIO_CFG_GP1_BP_EN BIT(16)
  2424. +#define FE_MDIO_CFG_GP1_FRC_EN BIT(15)
  2425. +#define FE_MDIO_CFG_GP1_SPEED_10 (0 << 13)
  2426. +#define FE_MDIO_CFG_GP1_SPEED_100 (1 << 13)
  2427. +#define FE_MDIO_CFG_GP1_SPEED_1000 (2 << 13)
  2428. +#define FE_MDIO_CFG_GP1_DUPLEX BIT(12)
  2429. +#define FE_MDIO_CFG_GP1_FC_TX BIT(11)
  2430. +#define FE_MDIO_CFG_GP1_FC_RX BIT(10)
  2431. +#define FE_MDIO_CFG_GP1_LNK_DWN BIT(9)
  2432. +#define FE_MDIO_CFG_GP1_AN_FAIL BIT(8)
  2433. +#define FE_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6)
  2434. +#define FE_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6)
  2435. +#define FE_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6)
  2436. +#define FE_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6)
  2437. +#define FE_MDIO_CFG_TURBO_MII_FREQ BIT(5)
  2438. +#define FE_MDIO_CFG_TURBO_MII_MODE BIT(4)
  2439. +#define FE_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2)
  2440. +#define FE_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2)
  2441. +#define FE_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2)
  2442. +#define FE_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2)
  2443. +#define FE_MDIO_CFG_TX_CLK_SKEW_0 0
  2444. +#define FE_MDIO_CFG_TX_CLK_SKEW_200 1
  2445. +#define FE_MDIO_CFG_TX_CLK_SKEW_400 2
  2446. +#define FE_MDIO_CFG_TX_CLK_SKEW_INV 3
  2447. +
  2448. +/* uni-cast port */
  2449. +#define FE_GDM1_JMB_LEN_MASK 0xf
  2450. +#define FE_GDM1_JMB_LEN_SHIFT 28
  2451. +#define FE_GDM1_ICS_EN BIT(22)
  2452. +#define FE_GDM1_TCS_EN BIT(21)
  2453. +#define FE_GDM1_UCS_EN BIT(20)
  2454. +#define FE_GDM1_JMB_EN BIT(19)
  2455. +#define FE_GDM1_STRPCRC BIT(16)
  2456. +#define FE_GDM1_UFRC_P_CPU (0 << 12)
  2457. +#define FE_GDM1_UFRC_P_GDMA1 (1 << 12)
  2458. +#define FE_GDM1_UFRC_P_PPE (6 << 12)
  2459. +
  2460. +/* checksums */
  2461. +#define FE_ICS_GEN_EN BIT(2)
  2462. +#define FE_UCS_GEN_EN BIT(1)
  2463. +#define FE_TCS_GEN_EN BIT(0)
  2464. +
  2465. +/* dma ring */
  2466. +#define FE_PST_DRX_IDX0 BIT(16)
  2467. +#define FE_PST_DTX_IDX3 BIT(3)
  2468. +#define FE_PST_DTX_IDX2 BIT(2)
  2469. +#define FE_PST_DTX_IDX1 BIT(1)
  2470. +#define FE_PST_DTX_IDX0 BIT(0)
  2471. +
  2472. +#define FE_RX_2B_OFFSET BIT(31)
  2473. +#define FE_TX_WB_DDONE BIT(6)
  2474. +#define FE_RX_DMA_BUSY BIT(3)
  2475. +#define FE_TX_DMA_BUSY BIT(1)
  2476. +#define FE_RX_DMA_EN BIT(2)
  2477. +#define FE_TX_DMA_EN BIT(0)
  2478. +
  2479. +#define FE_PDMA_SIZE_4DWORDS (0 << 4)
  2480. +#define FE_PDMA_SIZE_8DWORDS (1 << 4)
  2481. +#define FE_PDMA_SIZE_16DWORDS (2 << 4)
  2482. +
  2483. +#define FE_US_CYC_CNT_MASK 0xff
  2484. +#define FE_US_CYC_CNT_SHIFT 0x8
  2485. +#define FE_US_CYC_CNT_DIVISOR 1000000
  2486. +
  2487. +/* rxd2 */
  2488. +#define RX_DMA_DONE BIT(31)
  2489. +#define RX_DMA_LSO BIT(30)
  2490. +#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
  2491. +#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
  2492. +#define RX_DMA_TAG BIT(15)
  2493. +/* rxd3 */
  2494. +#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff)
  2495. +#define RX_DMA_VID(_x) ((_x) & 0xffff)
  2496. +/* rxd4 */
  2497. +#define RX_DMA_L4VALID BIT(30)
  2498. +
  2499. +struct fe_rx_dma {
  2500. + unsigned int rxd1;
  2501. + unsigned int rxd2;
  2502. + unsigned int rxd3;
  2503. + unsigned int rxd4;
  2504. +} __packed __aligned(4);
  2505. +
  2506. +#define TX_DMA_BUF_LEN 0x3fff
  2507. +#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16)
  2508. +#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16)
  2509. +#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
  2510. +#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN)
  2511. +#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
  2512. +#define TX_DMA_LS1 BIT(14)
  2513. +#define TX_DMA_LS0 BIT(30)
  2514. +#define TX_DMA_DONE BIT(31)
  2515. +
  2516. +#define TX_DMA_INS_VLAN_MT7621 BIT(16)
  2517. +#define TX_DMA_INS_VLAN BIT(7)
  2518. +#define TX_DMA_INS_PPPOE BIT(12)
  2519. +#define TX_DMA_QN(_x) ((_x) << 16)
  2520. +#define TX_DMA_PN(_x) ((_x) << 24)
  2521. +#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
  2522. +#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
  2523. +#define TX_DMA_UDF BIT(20)
  2524. +#define TX_DMA_CHKSUM (0x7 << 29)
  2525. +#define TX_DMA_TSO BIT(28)
  2526. +
  2527. +/* frame engine counters */
  2528. +#define FE_PPE_AC_BCNT0 (FE_CMTABLE_OFFSET + 0x00)
  2529. +#define FE_GDMA1_TX_GBCNT (FE_CMTABLE_OFFSET + 0x300)
  2530. +#define FE_GDMA2_TX_GBCNT (FE_GDMA1_TX_GBCNT + 0x40)
  2531. +
  2532. +/* phy device flags */
  2533. +#define FE_PHY_FLAG_PORT BIT(0)
  2534. +#define FE_PHY_FLAG_ATTACH BIT(1)
  2535. +
  2536. +struct fe_tx_dma {
  2537. + unsigned int txd1;
  2538. + unsigned int txd2;
  2539. + unsigned int txd3;
  2540. + unsigned int txd4;
  2541. +} __packed __aligned(4);
  2542. +
  2543. +struct fe_priv;
  2544. +
  2545. +struct fe_phy {
  2546. + /* make sure that phy operations are atomic */
  2547. + spinlock_t lock;
  2548. +
  2549. + struct phy_device *phy[8];
  2550. + struct device_node *phy_node[8];
  2551. + const __be32 *phy_fixed[8];
  2552. + int duplex[8];
  2553. + int speed[8];
  2554. + int tx_fc[8];
  2555. + int rx_fc[8];
  2556. + int (*connect)(struct fe_priv *priv);
  2557. + void (*disconnect)(struct fe_priv *priv);
  2558. + void (*start)(struct fe_priv *priv);
  2559. + void (*stop)(struct fe_priv *priv);
  2560. +};
  2561. +
  2562. +struct fe_soc_data {
  2563. + const u16 *reg_table;
  2564. +
  2565. + void (*init_data)(struct fe_soc_data *data, struct net_device *netdev);
  2566. + void (*reset_fe)(void);
  2567. + void (*set_mac)(struct fe_priv *priv, unsigned char *mac);
  2568. + int (*fwd_config)(struct fe_priv *priv);
  2569. + void (*tx_dma)(struct fe_tx_dma *txd);
  2570. + int (*switch_init)(struct fe_priv *priv);
  2571. + void (*port_init)(struct fe_priv *priv, struct device_node *port);
  2572. + int (*has_carrier)(struct fe_priv *priv);
  2573. + int (*mdio_init)(struct fe_priv *priv);
  2574. + void (*mdio_cleanup)(struct fe_priv *priv);
  2575. + int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
  2576. + u16 val);
  2577. + int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
  2578. + void (*mdio_adjust_link)(struct fe_priv *priv, int port);
  2579. +
  2580. + void *swpriv;
  2581. + u32 pdma_glo_cfg;
  2582. + u32 rx_int;
  2583. + u32 tx_int;
  2584. + u32 status_int;
  2585. + u32 checksum_bit;
  2586. +};
  2587. +
  2588. +#define FE_FLAG_PADDING_64B BIT(0)
  2589. +#define FE_FLAG_PADDING_BUG BIT(1)
  2590. +#define FE_FLAG_JUMBO_FRAME BIT(2)
  2591. +#define FE_FLAG_RX_2B_OFFSET BIT(3)
  2592. +#define FE_FLAG_RX_SG_DMA BIT(4)
  2593. +#define FE_FLAG_RX_VLAN_CTAG BIT(5)
  2594. +#define FE_FLAG_NAPI_WEIGHT BIT(6)
  2595. +#define FE_FLAG_CALIBRATE_CLK BIT(7)
  2596. +#define FE_FLAG_HAS_SWITCH BIT(8)
  2597. +
  2598. +#define FE_STAT_REG_DECLARE \
  2599. + _FE(tx_bytes) \
  2600. + _FE(tx_packets) \
  2601. + _FE(tx_skip) \
  2602. + _FE(tx_collisions) \
  2603. + _FE(rx_bytes) \
  2604. + _FE(rx_packets) \
  2605. + _FE(rx_overflow) \
  2606. + _FE(rx_fcs_errors) \
  2607. + _FE(rx_short_errors) \
  2608. + _FE(rx_long_errors) \
  2609. + _FE(rx_checksum_errors) \
  2610. + _FE(rx_flow_control_packets)
  2611. +
  2612. +struct fe_hw_stats {
  2613. + /* make sure that stats operations are atomic */
  2614. + spinlock_t stats_lock;
  2615. +
  2616. + struct u64_stats_sync syncp;
  2617. +#define _FE(x) u64 x;
  2618. + FE_STAT_REG_DECLARE
  2619. +#undef _FE
  2620. +};
  2621. +
  2622. +enum fe_tx_flags {
  2623. + FE_TX_FLAGS_SINGLE0 = 0x01,
  2624. + FE_TX_FLAGS_PAGE0 = 0x02,
  2625. + FE_TX_FLAGS_PAGE1 = 0x04,
  2626. +};
  2627. +
  2628. +struct fe_tx_buf {
  2629. + struct sk_buff *skb;
  2630. + u32 flags;
  2631. + DEFINE_DMA_UNMAP_ADDR(dma_addr0);
  2632. + DEFINE_DMA_UNMAP_LEN(dma_len0);
  2633. + DEFINE_DMA_UNMAP_ADDR(dma_addr1);
  2634. + DEFINE_DMA_UNMAP_LEN(dma_len1);
  2635. +};
  2636. +
  2637. +struct fe_tx_ring {
  2638. + struct fe_tx_dma *tx_dma;
  2639. + struct fe_tx_buf *tx_buf;
  2640. + dma_addr_t tx_phys;
  2641. + u16 tx_ring_size;
  2642. + u16 tx_free_idx;
  2643. + u16 tx_next_idx;
  2644. + u16 tx_thresh;
  2645. +};
  2646. +
  2647. +struct fe_rx_ring {
  2648. + struct fe_rx_dma *rx_dma;
  2649. + u8 **rx_data;
  2650. + dma_addr_t rx_phys;
  2651. + u16 rx_ring_size;
  2652. + u16 frag_size;
  2653. + u16 rx_buf_size;
  2654. + u16 rx_calc_idx;
  2655. +};
  2656. +
  2657. +struct fe_priv {
  2658. + /* make sure that register operations are atomic */
  2659. + spinlock_t page_lock;
  2660. +
  2661. + struct fe_soc_data *soc;
  2662. + struct net_device *netdev;
  2663. + struct device_node *switch_np;
  2664. + u32 msg_enable;
  2665. + u32 flags;
  2666. +
  2667. + struct device *device;
  2668. + unsigned long sysclk;
  2669. +
  2670. + struct fe_rx_ring rx_ring;
  2671. + struct napi_struct rx_napi;
  2672. +
  2673. + struct fe_tx_ring tx_ring;
  2674. +
  2675. + struct fe_phy *phy;
  2676. + struct mii_bus *mii_bus;
  2677. + struct phy_device *phy_dev;
  2678. + u32 phy_flags;
  2679. +
  2680. + int link[8];
  2681. +
  2682. + struct fe_hw_stats *hw_stats;
  2683. + unsigned long vlan_map;
  2684. + struct work_struct pending_work;
  2685. + DECLARE_BITMAP(pending_flags, FE_FLAG_MAX);
  2686. +};
  2687. +
  2688. +extern const struct of_device_id of_fe_match[];
  2689. +
  2690. +void fe_w32(u32 val, unsigned reg);
  2691. +u32 fe_r32(unsigned reg);
  2692. +
  2693. +int fe_set_clock_cycle(struct fe_priv *priv);
  2694. +void fe_csum_config(struct fe_priv *priv);
  2695. +void fe_stats_update(struct fe_priv *priv);
  2696. +void fe_fwd_config(struct fe_priv *priv);
  2697. +void fe_reg_w32(u32 val, enum fe_reg reg);
  2698. +u32 fe_reg_r32(enum fe_reg reg);
  2699. +
  2700. +void fe_reset(u32 reset_bits);
  2701. +
  2702. +static inline void *priv_netdev(struct fe_priv *priv)
  2703. +{
  2704. + return (char *)priv - ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
  2705. +}
  2706. +
  2707. +#endif /* FE_ETH_H */