Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

net: core: Split out code to run generic XDP prog

This helper can later be utilized in code that runs cpumap and devmap
programs in generic redirect mode and adjust skb based on changes made
to xdp_buff.

When returning XDP_REDIRECT/XDP_TX, it invokes __skb_push, so whenever a
generic redirect path invokes devmap/cpumap prog if set, it must
__skb_pull again as we expect mac header to be pulled.

It also drops the skb_reset_mac_len call after do_xdp_generic, as the
mac_header and network_header are advanced by the same offset, so the
difference (mac_len) remains constant.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20210702111825.491065-2-memxor@gmail.com

authored by

Kumar Kartikeya Dwivedi and committed by
Alexei Starovoitov
fe21cb91 a080cdcc

+55 -31
+2
include/linux/netdevice.h
··· 3984 3984 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); 3985 3985 } 3986 3986 3987 + u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 3988 + struct bpf_prog *xdp_prog); 3987 3989 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); 3988 3990 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); 3989 3991 int netif_rx(struct sk_buff *skb);
+53 -31
net/core/dev.c
··· 4744 4744 return rxqueue; 4745 4745 } 4746 4746 4747 - static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4748 - struct xdp_buff *xdp, 4749 - struct bpf_prog *xdp_prog) 4747 + u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4748 + struct bpf_prog *xdp_prog) 4750 4749 { 4751 4750 void *orig_data, *orig_data_end, *hard_start; 4752 4751 struct netdev_rx_queue *rxqueue; 4753 - u32 metalen, act = XDP_DROP; 4754 4752 bool orig_bcast, orig_host; 4755 4753 u32 mac_len, frame_sz; 4756 4754 __be16 orig_eth_type; 4757 4755 struct ethhdr *eth; 4756 + u32 metalen, act; 4758 4757 int off; 4759 - 4760 - /* Reinjected packets coming from act_mirred or similar should 4761 - * not get XDP generic processing. 4762 - */ 4763 - if (skb_is_redirected(skb)) 4764 - return XDP_PASS; 4765 - 4766 - /* XDP packets must be linear and must have sufficient headroom 4767 - * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4768 - * native XDP provides, thus we need to do it here as well. 4769 - */ 4770 - if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4771 - skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4772 - int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4773 - int troom = skb->tail + skb->data_len - skb->end; 4774 - 4775 - /* In case we have to go down the path and also linearize, 4776 - * then lets do the pskb_expand_head() work just once here. 4777 - */ 4778 - if (pskb_expand_head(skb, 4779 - hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4780 - troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4781 - goto do_drop; 4782 - if (skb_linearize(skb)) 4783 - goto do_drop; 4784 - } 4785 4758 4786 4759 /* The XDP program wants to see the packet starting at the MAC 4787 4760 * header. ··· 4810 4837 skb->protocol = eth_type_trans(skb, skb->dev); 4811 4838 } 4812 4839 4840 + /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 4841 + * before calling us again on redirect path. We do not call do_redirect 4842 + * as we leave that up to the caller. 4843 + * 4844 + * Caller is responsible for managing lifetime of skb (i.e. calling 4845 + * kfree_skb in response to actions it cannot handle/XDP_DROP). 4846 + */ 4813 4847 switch (act) { 4814 4848 case XDP_REDIRECT: 4815 4849 case XDP_TX: ··· 4826 4846 metalen = xdp->data - xdp->data_meta; 4827 4847 if (metalen) 4828 4848 skb_metadata_set(skb, metalen); 4849 + break; 4850 + } 4851 + 4852 + return act; 4853 + } 4854 + 4855 + static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4856 + struct xdp_buff *xdp, 4857 + struct bpf_prog *xdp_prog) 4858 + { 4859 + u32 act = XDP_DROP; 4860 + 4861 + /* Reinjected packets coming from act_mirred or similar should 4862 + * not get XDP generic processing. 4863 + */ 4864 + if (skb_is_redirected(skb)) 4865 + return XDP_PASS; 4866 + 4867 + /* XDP packets must be linear and must have sufficient headroom 4868 + * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4869 + * native XDP provides, thus we need to do it here as well. 4870 + */ 4871 + if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4872 + skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4873 + int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4874 + int troom = skb->tail + skb->data_len - skb->end; 4875 + 4876 + /* In case we have to go down the path and also linearize, 4877 + * then lets do the pskb_expand_head() work just once here. 4878 + */ 4879 + if (pskb_expand_head(skb, 4880 + hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4881 + troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4882 + goto do_drop; 4883 + if (skb_linearize(skb)) 4884 + goto do_drop; 4885 + } 4886 + 4887 + act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog); 4888 + switch (act) { 4889 + case XDP_REDIRECT: 4890 + case XDP_TX: 4891 + case XDP_PASS: 4829 4892 break; 4830 4893 default: 4831 4894 bpf_warn_invalid_xdp_action(act); ··· 5335 5312 ret = NET_RX_DROP; 5336 5313 goto out; 5337 5314 } 5338 - skb_reset_mac_len(skb); 5339 5315 } 5340 5316 5341 5317 if (eth_type_vlan(skb->protocol)) {