320-v4.16-netfilter-nf_conntrack-add-IPS_OFFLOAD-status-bit.patch 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. From: Pablo Neira Ayuso <pablo@netfilter.org>
  2. Date: Sun, 7 Jan 2018 01:03:56 +0100
  3. Subject: [PATCH] netfilter: nf_conntrack: add IPS_OFFLOAD status bit
  4. This new bit tells us that the conntrack entry is owned by the flow
  5. table offload infrastructure.
  6. # cat /proc/net/nf_conntrack
  7. ipv4 2 tcp 6 src=10.141.10.2 dst=147.75.205.195 sport=36392 dport=443 src=147.75.205.195 dst=192.168.2.195 sport=443 dport=36392 [OFFLOAD] mark=0 zone=0 use=2
  8. Note the [OFFLOAD] tag in the listing.
  9. The timer of such conntrack entries look like stopped from userspace.
  10. In practise, to make sure the conntrack entry does not go away, the
  11. conntrack timer is periodically set to an arbitrary large value that
  12. gets refreshed on every iteration from the garbage collector, so it
  13. never expires- and they display no internal state in the case of TCP
  14. flows. This allows us to save a bitcheck from the packet path via
  15. nf_ct_is_expired().
  16. Conntrack entries that have been offloaded to the flow table
  17. infrastructure cannot be deleted/flushed via ctnetlink. The flow table
  18. infrastructure is also responsible for releasing this conntrack entry.
  19. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
  20. ---
  21. --- a/include/uapi/linux/netfilter/nf_conntrack_common.h
  22. +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
  23. @@ -101,12 +101,16 @@ enum ip_conntrack_status {
  24. IPS_HELPER_BIT = 13,
  25. IPS_HELPER = (1 << IPS_HELPER_BIT),
  26. + /* Conntrack has been offloaded to flow table. */
  27. + IPS_OFFLOAD_BIT = 14,
  28. + IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT),
  29. +
  30. /* Be careful here, modifying these bits can make things messy,
  31. * so don't let users modify them directly.
  32. */
  33. IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
  34. IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
  35. - IPS_SEQ_ADJUST | IPS_TEMPLATE),
  36. + IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
  37. __IPS_MAX_BIT = 14,
  38. };
  39. --- a/net/netfilter/nf_conntrack_core.c
  40. +++ b/net/netfilter/nf_conntrack_core.c
  41. @@ -975,6 +975,9 @@ static unsigned int early_drop_list(stru
  42. hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
  43. tmp = nf_ct_tuplehash_to_ctrack(h);
  44. + if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
  45. + continue;
  46. +
  47. if (nf_ct_is_expired(tmp)) {
  48. nf_ct_gc_expired(tmp);
  49. continue;
  50. @@ -1052,6 +1055,18 @@ static bool gc_worker_can_early_drop(con
  51. return false;
  52. }
  53. +#define DAY (86400 * HZ)
  54. +
  55. +/* Set an arbitrary timeout large enough not to ever expire, this save
  56. + * us a check for the IPS_OFFLOAD_BIT from the packet path via
  57. + * nf_ct_is_expired().
  58. + */
  59. +static void nf_ct_offload_timeout(struct nf_conn *ct)
  60. +{
  61. + if (nf_ct_expires(ct) < DAY / 2)
  62. + ct->timeout = nfct_time_stamp + DAY;
  63. +}
  64. +
  65. static void gc_worker(struct work_struct *work)
  66. {
  67. unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
  68. @@ -1088,6 +1103,11 @@ static void gc_worker(struct work_struct
  69. tmp = nf_ct_tuplehash_to_ctrack(h);
  70. scanned++;
  71. + if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
  72. + nf_ct_offload_timeout(tmp);
  73. + continue;
  74. + }
  75. +
  76. if (nf_ct_is_expired(tmp)) {
  77. nf_ct_gc_expired(tmp);
  78. expired_count++;
  79. --- a/net/netfilter/nf_conntrack_netlink.c
  80. +++ b/net/netfilter/nf_conntrack_netlink.c
  81. @@ -1125,6 +1125,14 @@ static const struct nla_policy ct_nla_po
  82. .len = NF_CT_LABELS_MAX_SIZE },
  83. };
  84. +static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
  85. +{
  86. + if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
  87. + return 0;
  88. +
  89. + return ctnetlink_filter_match(ct, data);
  90. +}
  91. +
  92. static int ctnetlink_flush_conntrack(struct net *net,
  93. const struct nlattr * const cda[],
  94. u32 portid, int report)
  95. @@ -1137,7 +1145,7 @@ static int ctnetlink_flush_conntrack(str
  96. return PTR_ERR(filter);
  97. }
  98. - nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter,
  99. + nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
  100. portid, report);
  101. kfree(filter);
  102. @@ -1183,6 +1191,11 @@ static int ctnetlink_del_conntrack(struc
  103. ct = nf_ct_tuplehash_to_ctrack(h);
  104. + if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
  105. + nf_ct_put(ct);
  106. + return -EBUSY;
  107. + }
  108. +
  109. if (cda[CTA_ID]) {
  110. __be32 id = nla_get_be32(cda[CTA_ID]);
  111. --- a/net/netfilter/nf_conntrack_proto_tcp.c
  112. +++ b/net/netfilter/nf_conntrack_proto_tcp.c
  113. @@ -305,6 +305,9 @@ static bool tcp_invert_tuple(struct nf_c
  114. /* Print out the private part of the conntrack. */
  115. static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
  116. {
  117. + if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
  118. + return;
  119. +
  120. seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
  121. }
  122. #endif
  123. --- a/net/netfilter/nf_conntrack_standalone.c
  124. +++ b/net/netfilter/nf_conntrack_standalone.c
  125. @@ -310,10 +310,12 @@ static int ct_seq_show(struct seq_file *
  126. WARN_ON(!l4proto);
  127. ret = -ENOSPC;
  128. - seq_printf(s, "%-8s %u %-8s %u %ld ",
  129. + seq_printf(s, "%-8s %u %-8s %u ",
  130. l3proto_name(l3proto->l3proto), nf_ct_l3num(ct),
  131. - l4proto_name(l4proto->l4proto), nf_ct_protonum(ct),
  132. - nf_ct_expires(ct) / HZ);
  133. + l4proto_name(l4proto->l4proto), nf_ct_protonum(ct));
  134. +
  135. + if (!test_bit(IPS_OFFLOAD_BIT, &ct->status))
  136. + seq_printf(s, "%ld ", nf_ct_expires(ct) / HZ);
  137. if (l4proto->print_conntrack)
  138. l4proto->print_conntrack(s, ct);
  139. @@ -340,7 +342,9 @@ static int ct_seq_show(struct seq_file *
  140. if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
  141. goto release;
  142. - if (test_bit(IPS_ASSURED_BIT, &ct->status))
  143. + if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
  144. + seq_puts(s, "[OFFLOAD] ");
  145. + else if (test_bit(IPS_ASSURED_BIT, &ct->status))
  146. seq_puts(s, "[ASSURED] ");
  147. if (seq_has_overflowed(s))