1
0

620-net_sched-codel-do-not-defer-queue-length-update.patch 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
  2. Date: Mon, 21 Aug 2017 11:14:14 +0300
  3. Subject: [PATCH] net_sched/codel: do not defer queue length update
  4. When codel wants to drop last packet in ->dequeue() it cannot call
  5. qdisc_tree_reduce_backlog() right away - it will notify parent qdisc
  6. about zero qlen and HTB/HFSC will deactivate class. The same class will
  7. be deactivated second time by caller of ->dequeue(). Currently codel and
  8. fq_codel defer update. This triggers warning in HFSC when it's qlen != 0
  9. but there is no active classes.
  10. This patch update parent queue length immediately: just temporary increase
  11. qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation
  12. if we have skb to return.
  13. This might open another problem in HFSC - now operation peek could fail and
  14. deactivate parent class.
  15. Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
  16. Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581
  17. ---
  18. --- a/net/sched/sch_codel.c
  19. +++ b/net/sched/sch_codel.c
  20. @@ -79,11 +79,17 @@ static struct sk_buff *codel_qdisc_deque
  21. skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
  22. - /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
  23. - * or HTB crashes. Defer it for next round.
  24. + /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
  25. + * parent class, dequeue in parent qdisc will do the same if we
  26. + * return skb. Temporary increment qlen if we have skb.
  27. */
  28. - if (q->stats.drop_count && sch->q.qlen) {
  29. - qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
  30. + if (q->stats.drop_count) {
  31. + if (skb)
  32. + sch->q.qlen++;
  33. + qdisc_tree_reduce_backlog(sch, q->stats.drop_count,
  34. + q->stats.drop_len);
  35. + if (skb)
  36. + sch->q.qlen--;
  37. q->stats.drop_count = 0;
  38. q->stats.drop_len = 0;
  39. }
  40. --- a/net/sched/sch_fq_codel.c
  41. +++ b/net/sched/sch_fq_codel.c
  42. @@ -311,6 +311,21 @@ begin:
  43. flow->dropped += q->cstats.drop_count - prev_drop_count;
  44. flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
  45. + /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
  46. + * parent class, dequeue in parent qdisc will do the same if we
  47. + * return skb. Temporary increment qlen if we have skb.
  48. + */
  49. + if (q->cstats.drop_count) {
  50. + if (skb)
  51. + sch->q.qlen++;
  52. + qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
  53. + q->cstats.drop_len);
  54. + if (skb)
  55. + sch->q.qlen--;
  56. + q->cstats.drop_count = 0;
  57. + q->cstats.drop_len = 0;
  58. + }
  59. +
  60. if (!skb) {
  61. /* force a pass through old_flows to prevent starvation */
  62. if ((head == &q->new_flows) && !list_empty(&q->old_flows))
  63. @@ -321,15 +336,6 @@ begin:
  64. }
  65. qdisc_bstats_update(sch, skb);
  66. flow->deficit -= qdisc_pkt_len(skb);
  67. - /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
  68. - * or HTB crashes. Defer it for next round.
  69. - */
  70. - if (q->cstats.drop_count && sch->q.qlen) {
  71. - qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
  72. - q->cstats.drop_len);
  73. - q->cstats.drop_count = 0;
  74. - q->cstats.drop_len = 0;
  75. - }
  76. return skb;
  77. }