641-netfilter-nf_flow_table-support-hw-offload-through-v.patch 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. From: Felix Fietkau <nbd@nbd.name>
  2. Date: Thu, 15 Mar 2018 20:46:31 +0100
  3. Subject: [PATCH] netfilter: nf_flow_table: support hw offload through
  4. virtual interfaces
  5. There are hardware offload devices that support offloading VLANs and
  6. PPPoE devices. Additionally, it is useful to be able to offload packets
  7. routed through bridge interfaces as well.
  8. Add support for finding the path to the offload device through these
  9. virtual interfaces, while collecting useful parameters for the offload
  10. device, like VLAN ID/protocol, PPPoE session and Ethernet MAC address.
  11. Signed-off-by: Felix Fietkau <nbd@nbd.name>
  12. ---
  13. --- a/include/linux/netdevice.h
  14. +++ b/include/linux/netdevice.h
  15. @@ -828,6 +828,7 @@ struct xfrmdev_ops {
  16. #endif
  17. struct flow_offload;
  18. +struct flow_offload_hw_path;
  19. enum flow_offload_type {
  20. FLOW_OFFLOAD_ADD = 0,
  21. @@ -1065,8 +1066,15 @@ enum flow_offload_type {
  22. * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
  23. * u16 flags);
  24. *
  25. + * int (*ndo_flow_offload_check)(struct flow_offload_hw_path *path);
  26. + * For virtual devices like bridges, vlan, and pppoe, fill in the
  27. + * underlying network device that can be used for offloading connections.
  28. + * Return an error if offloading is not supported.
  29. + *
  30. * int (*ndo_flow_offload)(enum flow_offload_type type,
  31. - * struct flow_offload *flow);
  32. + * struct flow_offload *flow,
  33. + * struct flow_offload_hw_path *src,
  34. + * struct flow_offload_hw_path *dest);
  35. * Adds/deletes flow entry to/from net device flowtable.
  36. *
  37. * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  38. @@ -1293,8 +1301,11 @@ struct net_device_ops {
  39. int (*ndo_bridge_dellink)(struct net_device *dev,
  40. struct nlmsghdr *nlh,
  41. u16 flags);
  42. + int (*ndo_flow_offload_check)(struct flow_offload_hw_path *path);
  43. int (*ndo_flow_offload)(enum flow_offload_type type,
  44. - struct flow_offload *flow);
  45. + struct flow_offload *flow,
  46. + struct flow_offload_hw_path *src,
  47. + struct flow_offload_hw_path *dest);
  48. int (*ndo_change_carrier)(struct net_device *dev,
  49. bool new_carrier);
  50. int (*ndo_get_phys_port_id)(struct net_device *dev,
  51. --- a/include/net/netfilter/nf_flow_table.h
  52. +++ b/include/net/netfilter/nf_flow_table.h
  53. @@ -86,6 +86,21 @@ struct flow_offload {
  54. };
  55. };
  56. +#define FLOW_OFFLOAD_PATH_ETHERNET BIT(0)
  57. +#define FLOW_OFFLOAD_PATH_VLAN BIT(1)
  58. +#define FLOW_OFFLOAD_PATH_PPPOE BIT(2)
  59. +
  60. +struct flow_offload_hw_path {
  61. + struct net_device *dev;
  62. + u32 flags;
  63. +
  64. + u8 eth_src[ETH_ALEN];
  65. + u8 eth_dest[ETH_ALEN];
  66. + u16 vlan_proto;
  67. + u16 vlan_id;
  68. + u16 pppoe_sid;
  69. +};
  70. +
  71. #define NF_FLOW_TIMEOUT (30 * HZ)
  72. struct nf_flow_route {
  73. --- a/net/netfilter/nf_flow_table_hw.c
  74. +++ b/net/netfilter/nf_flow_table_hw.c
  75. @@ -19,48 +19,77 @@ struct flow_offload_hw {
  76. enum flow_offload_type type;
  77. struct flow_offload *flow;
  78. struct nf_conn *ct;
  79. - possible_net_t flow_hw_net;
  80. +
  81. + struct flow_offload_hw_path src;
  82. + struct flow_offload_hw_path dest;
  83. };
  84. -static int do_flow_offload_hw(struct net *net, struct flow_offload *flow,
  85. - int type)
  86. +static void flow_offload_check_ethernet(struct flow_offload_tuple *tuple,
  87. + struct dst_entry *dst,
  88. + struct flow_offload_hw_path *path)
  89. {
  90. - struct net_device *indev;
  91. - int ret, ifindex;
  92. + struct net_device *dev = path->dev;
  93. + struct neighbour *n;
  94. - ifindex = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx;
  95. - indev = dev_get_by_index(net, ifindex);
  96. - if (WARN_ON(!indev))
  97. - return 0;
  98. -
  99. - mutex_lock(&nf_flow_offload_hw_mutex);
  100. - ret = indev->netdev_ops->ndo_flow_offload(type, flow);
  101. - mutex_unlock(&nf_flow_offload_hw_mutex);
  102. + if (dev->type != ARPHRD_ETHER)
  103. + return;
  104. - dev_put(indev);
  105. + memcpy(path->eth_src, path->dev->dev_addr, ETH_ALEN);
  106. + n = dst_neigh_lookup(dst, &tuple->src_v4);
  107. + if (!n)
  108. + return;
  109. - return ret;
  110. + memcpy(path->eth_dest, n->ha, ETH_ALEN);
  111. + path->flags |= FLOW_OFFLOAD_PATH_ETHERNET;
  112. + neigh_release(n);
  113. }
  114. -static void flow_offload_hw_work_add(struct flow_offload_hw *offload)
  115. +static int flow_offload_check_path(struct net *net,
  116. + struct flow_offload_tuple *tuple,
  117. + struct dst_entry *dst,
  118. + struct flow_offload_hw_path *path)
  119. {
  120. - struct net *net;
  121. - int ret;
  122. + struct net_device *dev;
  123. - if (nf_ct_is_dying(offload->ct))
  124. - return;
  125. + dev = dev_get_by_index_rcu(net, tuple->iifidx);
  126. + if (!dev)
  127. + return -ENOENT;
  128. +
  129. + path->dev = dev;
  130. + flow_offload_check_ethernet(tuple, dst, path);
  131. - net = read_pnet(&offload->flow_hw_net);
  132. - ret = do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_ADD);
  133. - if (ret >= 0)
  134. - offload->flow->flags |= FLOW_OFFLOAD_HW;
  135. + if (dev->netdev_ops->ndo_flow_offload_check)
  136. + return dev->netdev_ops->ndo_flow_offload_check(path);
  137. +
  138. + return 0;
  139. }
  140. -static void flow_offload_hw_work_del(struct flow_offload_hw *offload)
  141. +static int do_flow_offload_hw(struct flow_offload_hw *offload)
  142. {
  143. - struct net *net = read_pnet(&offload->flow_hw_net);
  144. + struct net_device *src_dev = offload->src.dev;
  145. + struct net_device *dest_dev = offload->dest.dev;
  146. + int ret;
  147. +
  148. + ret = src_dev->netdev_ops->ndo_flow_offload(offload->type,
  149. + offload->flow,
  150. + &offload->src,
  151. + &offload->dest);
  152. +
  153. + /* restore devices in case the driver mangled them */
  154. + offload->src.dev = src_dev;
  155. + offload->dest.dev = dest_dev;
  156. - do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_DEL);
  157. + return ret;
  158. +}
  159. +
  160. +static void flow_offload_hw_free(struct flow_offload_hw *offload)
  161. +{
  162. + dev_put(offload->src.dev);
  163. + dev_put(offload->dest.dev);
  164. + if (offload->ct)
  165. + nf_conntrack_put(&offload->ct->ct_general);
  166. + list_del(&offload->list);
  167. + kfree(offload);
  168. }
  169. static void flow_offload_hw_work(struct work_struct *work)
  170. @@ -73,18 +102,22 @@ static void flow_offload_hw_work(struct
  171. spin_unlock_bh(&flow_offload_hw_pending_list_lock);
  172. list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
  173. + mutex_lock(&nf_flow_offload_hw_mutex);
  174. switch (offload->type) {
  175. case FLOW_OFFLOAD_ADD:
  176. - flow_offload_hw_work_add(offload);
  177. + if (nf_ct_is_dying(offload->ct))
  178. + break;
  179. +
  180. + if (do_flow_offload_hw(offload) >= 0)
  181. + offload->flow->flags |= FLOW_OFFLOAD_HW;
  182. break;
  183. case FLOW_OFFLOAD_DEL:
  184. - flow_offload_hw_work_del(offload);
  185. + do_flow_offload_hw(offload);
  186. break;
  187. }
  188. - if (offload->ct)
  189. - nf_conntrack_put(&offload->ct->ct_general);
  190. - list_del(&offload->list);
  191. - kfree(offload);
  192. + mutex_unlock(&nf_flow_offload_hw_mutex);
  193. +
  194. + flow_offload_hw_free(offload);
  195. }
  196. }
  197. @@ -97,20 +130,56 @@ static void flow_offload_queue_work(stru
  198. schedule_work(&nf_flow_offload_hw_work);
  199. }
  200. +static struct flow_offload_hw *
  201. +flow_offload_hw_prepare(struct net *net, struct flow_offload *flow)
  202. +{
  203. + struct flow_offload_hw_path src = {};
  204. + struct flow_offload_hw_path dest = {};
  205. + struct flow_offload_tuple *tuple_s, *tuple_d;
  206. + struct flow_offload_hw *offload = NULL;
  207. +
  208. + rcu_read_lock_bh();
  209. +
  210. + tuple_s = &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple;
  211. + tuple_d = &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple;
  212. +
  213. + if (flow_offload_check_path(net, tuple_s, tuple_d->dst_cache, &src))
  214. + goto out;
  215. +
  216. + if (flow_offload_check_path(net, tuple_d, tuple_s->dst_cache, &dest))
  217. + goto out;
  218. +
  219. + if (!src.dev->netdev_ops->ndo_flow_offload)
  220. + goto out;
  221. +
  222. + offload = kzalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
  223. + if (!offload)
  224. + goto out;
  225. +
  226. + dev_hold(src.dev);
  227. + dev_hold(dest.dev);
  228. + offload->src = src;
  229. + offload->dest = dest;
  230. + offload->flow = flow;
  231. +
  232. +out:
  233. + rcu_read_unlock_bh();
  234. +
  235. + return offload;
  236. +}
  237. +
  238. static void flow_offload_hw_add(struct net *net, struct flow_offload *flow,
  239. struct nf_conn *ct)
  240. {
  241. struct flow_offload_hw *offload;
  242. - offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
  243. + offload = flow_offload_hw_prepare(net, flow);
  244. if (!offload)
  245. return;
  246. nf_conntrack_get(&ct->ct_general);
  247. offload->type = FLOW_OFFLOAD_ADD;
  248. offload->ct = ct;
  249. - offload->flow = flow;
  250. - write_pnet(&offload->flow_hw_net, net);
  251. flow_offload_queue_work(offload);
  252. }
  253. @@ -119,14 +188,11 @@ static void flow_offload_hw_del(struct n
  254. {
  255. struct flow_offload_hw *offload;
  256. - offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
  257. + offload = flow_offload_hw_prepare(net, flow);
  258. if (!offload)
  259. return;
  260. offload->type = FLOW_OFFLOAD_DEL;
  261. - offload->ct = NULL;
  262. - offload->flow = flow;
  263. - write_pnet(&offload->flow_hw_net, net);
  264. flow_offload_queue_work(offload);
  265. }
  266. @@ -153,12 +219,8 @@ static void __exit nf_flow_table_hw_modu
  267. nf_flow_table_hw_unregister(&flow_offload_hw);
  268. cancel_work_sync(&nf_flow_offload_hw_work);
  269. - list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
  270. - if (offload->ct)
  271. - nf_conntrack_put(&offload->ct->ct_general);
  272. - list_del(&offload->list);
  273. - kfree(offload);
  274. - }
  275. + list_for_each_entry_safe(offload, next, &hw_offload_pending, list)
  276. + flow_offload_hw_free(offload);
  277. }
  278. module_init(nf_flow_table_hw_module_init);