1
0

035-fq_codel-fix-NET_XMIT_CN-behavior.patch 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. From: Eric Dumazet <edumazet@google.com>
  2. Date: Sat, 4 Jun 2016 12:55:13 -0700
  3. Subject: [PATCH] fq_codel: fix NET_XMIT_CN behavior
  4. My prior attempt to fix the backlogs of parents failed.
  5. If we return NET_XMIT_CN, our parents wont increase their backlog,
  6. so our qdisc_tree_reduce_backlog() should take this into account.
  7. v2: Florian Westphal pointed out that we could drop the packet,
  8. so we need to save qdisc_pkt_len(skb) in a temp variable before
  9. calling fq_codel_drop()
  10. Fixes: 9d18562a2278 ("fq_codel: add batch ability to fq_codel_drop()")
  11. Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too")
  12. Reported-by: Stas Nichiporovich <stasn77@gmail.com>
  13. Signed-off-by: Eric Dumazet <edumazet@google.com>
  14. Cc: WANG Cong <xiyou.wangcong@gmail.com>
  15. Cc: Jamal Hadi Salim <jhs@mojatatu.com>
  16. ---
  17. --- a/net/sched/sch_fq_codel.c
  18. +++ b/net/sched/sch_fq_codel.c
  19. @@ -197,6 +197,7 @@ static int fq_codel_enqueue(struct sk_bu
  20. unsigned int idx, prev_backlog, prev_qlen;
  21. struct fq_codel_flow *flow;
  22. int uninitialized_var(ret);
  23. + unsigned int pkt_len;
  24. bool memory_limited;
  25. idx = fq_codel_classify(skb, sch, &ret);
  26. @@ -228,6 +229,8 @@ static int fq_codel_enqueue(struct sk_bu
  27. prev_backlog = sch->qstats.backlog;
  28. prev_qlen = sch->q.qlen;
  29. + /* save this packet length as it might be dropped by fq_codel_drop() */
  30. + pkt_len = qdisc_pkt_len(skb);
  31. /* fq_codel_drop() is quite expensive, as it performs a linear search
  32. * in q->backlogs[] to find a fat flow.
  33. * So instead of dropping a single packet, drop half of its backlog
  34. @@ -235,14 +238,23 @@ static int fq_codel_enqueue(struct sk_bu
  35. */
  36. ret = fq_codel_drop(sch, q->drop_batch_size);
  37. - q->drop_overlimit += prev_qlen - sch->q.qlen;
  38. + prev_qlen -= sch->q.qlen;
  39. + prev_backlog -= sch->qstats.backlog;
  40. + q->drop_overlimit += prev_qlen;
  41. if (memory_limited)
  42. - q->drop_overmemory += prev_qlen - sch->q.qlen;
  43. - /* As we dropped packet(s), better let upper stack know this */
  44. - qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
  45. - prev_backlog - sch->qstats.backlog);
  46. + q->drop_overmemory += prev_qlen;
  47. - return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
  48. + /* As we dropped packet(s), better let upper stack know this.
  49. + * If we dropped a packet for this flow, return NET_XMIT_CN,
  50. + * but in this case, our parents wont increase their backlogs.
  51. + */
  52. + if (ret == idx) {
  53. + qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
  54. + prev_backlog - pkt_len);
  55. + return NET_XMIT_CN;
  56. + }
  57. + qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
  58. + return NET_XMIT_SUCCESS;
  59. }
  60. /* This is the specific function called from codel_dequeue()