1
0

760-8139cp-fixes-from-4.3.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. commit 41b976414c88016e2c9d9b2f6667ee67a998d388
  2. Author: David Woodhouse <David.Woodhouse@intel.com>
  3. Date: Wed Sep 23 09:45:31 2015 +0100
  4. 8139cp: Dump contents of descriptor ring on TX timeout
  5. We are seeing unexplained TX timeouts under heavy load. Let's try to get
  6. a better idea of what's going on.
  7. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  8. Signed-off-by: David S. Miller <davem@davemloft.net>
  9. commit 7f4c685633e2df9ba10d49a31dda13715745db37
  10. Author: David Woodhouse <David.Woodhouse@intel.com>
  11. Date: Wed Sep 23 09:45:16 2015 +0100
  12. 8139cp: Fix DMA unmapping of transmitted buffers
  13. The low 16 bits of the 'opts1' field in the TX descriptor are supposed
  14. to still contain the buffer length when the descriptor is handed back to
  15. us. In practice, at least on my hardware, they don't. So stash the
  16. original value of the opts1 field and get the length to unmap from
  17. there.
  18. There are other ways we could have worked out the length, but I actually
  19. want a stash of the opts1 field anyway so that I can dump it alongside
  20. the contents of the descriptor ring when we suffer a TX timeout.
  21. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  22. Signed-off-by: David S. Miller <davem@davemloft.net>
  23. commit 0a5aeee0b79fa99d8e04c98dd4e87d4f52aa497b
  24. Author: David Woodhouse <David.Woodhouse@intel.com>
  25. Date: Wed Sep 23 09:44:57 2015 +0100
  26. 8139cp: Reduce duplicate csum/tso code in cp_start_xmit()
  27. We calculate the value of the opts1 descriptor field in three different
  28. places. With two different behaviours when given an invalid packet to
  29. be checksummed — none of them correct. Sort that out.
  30. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  31. Signed-off-by: David S. Miller <davem@davemloft.net>
  32. commit a3b804043f490aeec57d8ca5baccdd35e6250857
  33. Author: David Woodhouse <David.Woodhouse@intel.com>
  34. Date: Wed Sep 23 09:44:38 2015 +0100
  35. 8139cp: Fix TSO/scatter-gather descriptor setup
  36. When sending a TSO frame in multiple buffers, we were neglecting to set
  37. the first descriptor up in TSO mode.
  38. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  39. Signed-off-by: David S. Miller <davem@davemloft.net>
  40. commit 26b0bad6ac3a0167792dc4ffb276c29bc597d239
  41. Author: David Woodhouse <David.Woodhouse@intel.com>
  42. Date: Wed Sep 23 09:44:06 2015 +0100
  43. 8139cp: Fix tx_queued debug message to print correct slot numbers
  44. After a certain amount of staring at the debug output of this driver, I
  45. realised it was lying to me.
  46. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  47. Signed-off-by: David S. Miller <davem@davemloft.net>
  48. commit aaa0062ecf4877a26dea66bee1039c6eaf906c94
  49. Author: David Woodhouse <David.Woodhouse@intel.com>
  50. Date: Wed Sep 23 09:43:41 2015 +0100
  51. 8139cp: Do not re-enable RX interrupts in cp_tx_timeout()
  52. If an RX interrupt was already received but NAPI has not yet run when
  53. the RX timeout happens, we end up in cp_tx_timeout() with RX interrupts
  54. already disabled. Blindly re-enabling them will cause an IRQ storm.
  55. (This is made particularly horrid by the fact that cp_interrupt() always
  56. returns that it's handled the interrupt, even when it hasn't actually
  57. done anything. If it didn't do that, the core IRQ code would have
  58. detected the storm and handled it, I'd have had a clear smoking gun
  59. backtrace instead of just a spontaneously resetting router, and I'd have
  60. at *least* two days of my life back. Changing the return value of
  61. cp_interrupt() will be argued about under separate cover.)
  62. Unconditionally leave RX interrupts disabled after the reset, and
  63. schedule NAPI to check the receive ring and re-enable them.
  64. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  65. Signed-off-by: David S. Miller <davem@davemloft.net>
  66. commit 7a8a8e75d505147358b225173e890ada43a267e2
  67. Author: David Woodhouse <dwmw2@infradead.org>
  68. Date: Fri Sep 18 00:21:54 2015 +0100
  69. 8139cp: Call __cp_set_rx_mode() from cp_tx_timeout()
  70. Unless we reset the RX config, on real hardware I don't seem to receive
  71. any packets after a TX timeout.
  72. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  73. Signed-off-by: David S. Miller <davem@davemloft.net>
  74. commit fc27bd115b334e3ebdc682a42a47c3aea2566dcc
  75. Author: David Woodhouse <dwmw2@infradead.org>
  76. Date: Fri Sep 18 00:19:08 2015 +0100
  77. 8139cp: Use dev_kfree_skb_any() instead of dev_kfree_skb() in cp_clean_rings()
  78. This can be called from cp_tx_timeout() with interrupts disabled.
  79. Spotted by Francois Romieu <romieu@fr.zoreil.com>
  80. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  81. Signed-off-by: David S. Miller <davem@davemloft.net>
  82. --- a/drivers/net/ethernet/realtek/8139cp.c
  83. +++ b/drivers/net/ethernet/realtek/8139cp.c
  84. @@ -157,6 +157,7 @@ enum {
  85. NWayAdvert = 0x66, /* MII ADVERTISE */
  86. NWayLPAR = 0x68, /* MII LPA */
  87. NWayExpansion = 0x6A, /* MII Expansion */
  88. + TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
  89. Config5 = 0xD8, /* Config5 */
  90. TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
  91. RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
  92. @@ -341,6 +342,7 @@ struct cp_private {
  93. unsigned tx_tail;
  94. struct cp_desc *tx_ring;
  95. struct sk_buff *tx_skb[CP_TX_RING_SIZE];
  96. + u32 tx_opts[CP_TX_RING_SIZE];
  97. unsigned rx_buf_sz;
  98. unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
  99. @@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp
  100. BUG_ON(!skb);
  101. dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
  102. - le32_to_cpu(txd->opts1) & 0xffff,
  103. + cp->tx_opts[tx_tail] & 0xffff,
  104. PCI_DMA_TODEVICE);
  105. if (status & LastFrag) {
  106. @@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct
  107. {
  108. struct cp_private *cp = netdev_priv(dev);
  109. unsigned entry;
  110. - u32 eor, flags;
  111. + u32 eor, opts1;
  112. unsigned long intr_flags;
  113. __le32 opts2;
  114. int mss = 0;
  115. @@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct
  116. mss = skb_shinfo(skb)->gso_size;
  117. opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
  118. + opts1 = DescOwn;
  119. + if (mss)
  120. + opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
  121. + else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  122. + const struct iphdr *ip = ip_hdr(skb);
  123. + if (ip->protocol == IPPROTO_TCP)
  124. + opts1 |= IPCS | TCPCS;
  125. + else if (ip->protocol == IPPROTO_UDP)
  126. + opts1 |= IPCS | UDPCS;
  127. + else {
  128. + WARN_ONCE(1,
  129. + "Net bug: asked to checksum invalid Legacy IP packet\n");
  130. + goto out_dma_error;
  131. + }
  132. + }
  133. if (skb_shinfo(skb)->nr_frags == 0) {
  134. struct cp_desc *txd = &cp->tx_ring[entry];
  135. @@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct
  136. txd->addr = cpu_to_le64(mapping);
  137. wmb();
  138. - flags = eor | len | DescOwn | FirstFrag | LastFrag;
  139. -
  140. - if (mss)
  141. - flags |= LargeSend | ((mss & MSSMask) << MSSShift);
  142. - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  143. - const struct iphdr *ip = ip_hdr(skb);
  144. - if (ip->protocol == IPPROTO_TCP)
  145. - flags |= IPCS | TCPCS;
  146. - else if (ip->protocol == IPPROTO_UDP)
  147. - flags |= IPCS | UDPCS;
  148. - else
  149. - WARN_ON(1); /* we need a WARN() */
  150. - }
  151. + opts1 |= eor | len | FirstFrag | LastFrag;
  152. - txd->opts1 = cpu_to_le32(flags);
  153. + txd->opts1 = cpu_to_le32(opts1);
  154. wmb();
  155. cp->tx_skb[entry] = skb;
  156. - entry = NEXT_TX(entry);
  157. + cp->tx_opts[entry] = opts1;
  158. + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
  159. + entry, skb->len);
  160. } else {
  161. struct cp_desc *txd;
  162. - u32 first_len, first_eor;
  163. + u32 first_len, first_eor, ctrl;
  164. dma_addr_t first_mapping;
  165. int frag, first_entry = entry;
  166. - const struct iphdr *ip = ip_hdr(skb);
  167. /* We must give this initial chunk to the device last.
  168. * Otherwise we could race with the device.
  169. @@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct
  170. goto out_dma_error;
  171. cp->tx_skb[entry] = skb;
  172. - entry = NEXT_TX(entry);
  173. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  174. const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
  175. u32 len;
  176. - u32 ctrl;
  177. dma_addr_t mapping;
  178. + entry = NEXT_TX(entry);
  179. +
  180. len = skb_frag_size(this_frag);
  181. mapping = dma_map_single(&cp->pdev->dev,
  182. skb_frag_address(this_frag),
  183. @@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct
  184. eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
  185. - ctrl = eor | len | DescOwn;
  186. -
  187. - if (mss)
  188. - ctrl |= LargeSend |
  189. - ((mss & MSSMask) << MSSShift);
  190. - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  191. - if (ip->protocol == IPPROTO_TCP)
  192. - ctrl |= IPCS | TCPCS;
  193. - else if (ip->protocol == IPPROTO_UDP)
  194. - ctrl |= IPCS | UDPCS;
  195. - else
  196. - BUG();
  197. - }
  198. + ctrl = opts1 | eor | len;
  199. if (frag == skb_shinfo(skb)->nr_frags - 1)
  200. ctrl |= LastFrag;
  201. @@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct
  202. txd->opts1 = cpu_to_le32(ctrl);
  203. wmb();
  204. + cp->tx_opts[entry] = ctrl;
  205. cp->tx_skb[entry] = skb;
  206. - entry = NEXT_TX(entry);
  207. }
  208. txd = &cp->tx_ring[first_entry];
  209. @@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct
  210. txd->addr = cpu_to_le64(first_mapping);
  211. wmb();
  212. - if (skb->ip_summed == CHECKSUM_PARTIAL) {
  213. - if (ip->protocol == IPPROTO_TCP)
  214. - txd->opts1 = cpu_to_le32(first_eor | first_len |
  215. - FirstFrag | DescOwn |
  216. - IPCS | TCPCS);
  217. - else if (ip->protocol == IPPROTO_UDP)
  218. - txd->opts1 = cpu_to_le32(first_eor | first_len |
  219. - FirstFrag | DescOwn |
  220. - IPCS | UDPCS);
  221. - else
  222. - BUG();
  223. - } else
  224. - txd->opts1 = cpu_to_le32(first_eor | first_len |
  225. - FirstFrag | DescOwn);
  226. + ctrl = opts1 | first_eor | first_len | FirstFrag;
  227. + txd->opts1 = cpu_to_le32(ctrl);
  228. wmb();
  229. +
  230. + cp->tx_opts[first_entry] = ctrl;
  231. + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
  232. + first_entry, entry, skb->len);
  233. }
  234. - cp->tx_head = entry;
  235. + cp->tx_head = NEXT_TX(entry);
  236. netdev_sent_queue(dev, skb->len);
  237. - netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
  238. - entry, skb->len);
  239. if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
  240. netif_stop_queue(dev);
  241. @@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_priv
  242. {
  243. memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
  244. cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
  245. + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
  246. cp_init_rings_index(cp);
  247. @@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_pr
  248. desc = cp->rx_ring + i;
  249. dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
  250. cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
  251. - dev_kfree_skb(cp->rx_skb[i]);
  252. + dev_kfree_skb_any(cp->rx_skb[i]);
  253. }
  254. }
  255. @@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_pr
  256. le32_to_cpu(desc->opts1) & 0xffff,
  257. PCI_DMA_TODEVICE);
  258. if (le32_to_cpu(desc->opts1) & LastFrag)
  259. - dev_kfree_skb(skb);
  260. + dev_kfree_skb_any(skb);
  261. cp->dev->stats.tx_dropped++;
  262. }
  263. }
  264. @@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_pr
  265. memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
  266. memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
  267. + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
  268. memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
  269. memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
  270. @@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_dev
  271. {
  272. struct cp_private *cp = netdev_priv(dev);
  273. unsigned long flags;
  274. - int rc;
  275. + int rc, i;
  276. netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
  277. cpr8(Cmd), cpr16(CpCmd),
  278. @@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_dev
  279. spin_lock_irqsave(&cp->lock, flags);
  280. + netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
  281. + cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
  282. + for (i = 0; i < CP_TX_RING_SIZE; i++) {
  283. + netif_dbg(cp, tx_err, cp->dev,
  284. + "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
  285. + i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
  286. + cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
  287. + le64_to_cpu(cp->tx_ring[i].addr),
  288. + cp->tx_skb[i]);
  289. + }
  290. +
  291. cp_stop_hw(cp);
  292. cp_clean_rings(cp);
  293. rc = cp_init_rings(cp);
  294. cp_start_hw(cp);
  295. - cp_enable_irq(cp);
  296. + __cp_set_rx_mode(dev);
  297. + cpw16_f(IntrMask, cp_norx_intr_mask);
  298. netif_wake_queue(dev);
  299. + napi_schedule(&cp->napi);
  300. spin_unlock_irqrestore(&cp->lock, flags);
  301. }