Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'nf-next-26-01-29' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Florian Westphal says:

====================
netfilter: updates for net-next

The following patchset contains Netfilter updates for *net-next*:

Patches 1 to 4 add IP6IP6 tunneling acceleration to the flowtable
infrastructure. Patch 5 extends test coverage for this.
From Lorenzo Bianconi.

Patch 6 removes a duplicated helper from xt_time extension, we can
use an existing helper for this, from Jinjie Ruan.

Patch 7 adds an rhashtable to nfnetink_queue to speed up out-of-order
verdict processing. Before this list walk was required due to in-order
design assumption.

netfilter pull request nf-next-26-01-29

* tag 'nf-next-26-01-29' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next:
netfilter: nfnetlink_queue: optimize verdict lookup with hash table
netfilter: xt_time: use is_leap_year() helper
selftests: netfilter: nft_flowtable.sh: Add IP6IP6 flowtable selftest
netfilter: flowtable: Add IP6IP6 tx sw acceleration
netfilter: flowtable: Add IP6IP6 rx sw acceleration
netfilter: Introduce tunnel metadata info in nf_flowtable_ctx struct
netfilter: Add ctx pointer in nf_flow_skb_encap_protocol/nf_flow_ip4_tunnel_proto signature
====================

Link: https://patch.msgid.link/20260129105427.12494-1-fw@strlen.de
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+408 -81
+3
include/net/netfilter/nf_queue.h
··· 6 6 #include <linux/ipv6.h> 7 7 #include <linux/jhash.h> 8 8 #include <linux/netfilter.h> 9 + #include <linux/rhashtable-types.h> 9 10 #include <linux/skbuff.h> 10 11 11 12 /* Each queued (to userspace) skbuff has one of these. */ 12 13 struct nf_queue_entry { 13 14 struct list_head list; 15 + struct rhash_head hash_node; 14 16 struct sk_buff *skb; 15 17 unsigned int id; 16 18 unsigned int hook_index; /* index in hook_entries->hook[] */ ··· 22 20 #endif 23 21 struct nf_hook_state state; 24 22 u16 size; /* sizeof(entry) + saved route keys */ 23 + u16 queue_num; 25 24 26 25 /* extra space to store route keys */ 27 26 };
+27
net/ipv6/ip6_tunnel.c
··· 1828 1828 } 1829 1829 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup); 1830 1830 1831 + static int ip6_tnl_fill_forward_path(struct net_device_path_ctx *ctx, 1832 + struct net_device_path *path) 1833 + { 1834 + struct ip6_tnl *t = netdev_priv(ctx->dev); 1835 + struct flowi6 fl6 = { 1836 + .daddr = t->parms.raddr, 1837 + }; 1838 + struct dst_entry *dst; 1839 + int err; 1840 + 1841 + dst = ip6_route_output(dev_net(ctx->dev), NULL, &fl6); 1842 + if (!dst->error) { 1843 + path->type = DEV_PATH_TUN; 1844 + path->tun.src_v6 = t->parms.laddr; 1845 + path->tun.dst_v6 = t->parms.raddr; 1846 + path->tun.l3_proto = IPPROTO_IPV6; 1847 + path->dev = ctx->dev; 1848 + ctx->dev = dst->dev; 1849 + } 1850 + 1851 + err = dst->error; 1852 + dst_release(dst); 1853 + 1854 + return err; 1855 + } 1856 + 1831 1857 static const struct net_device_ops ip6_tnl_netdev_ops = { 1832 1858 .ndo_init = ip6_tnl_dev_init, 1833 1859 .ndo_uninit = ip6_tnl_dev_uninit, ··· 1862 1836 .ndo_change_mtu = ip6_tnl_change_mtu, 1863 1837 .ndo_get_stats64 = dev_get_tstats64, 1864 1838 .ndo_get_iflink = ip6_tnl_get_iflink, 1839 + .ndo_fill_forward_path = ip6_tnl_fill_forward_path, 1865 1840 }; 1866 1841 1867 1842 #define IPXIPX_FEATURES (NETIF_F_SG | \
+207 -36
net/netfilter/nf_flow_table_ip.c
··· 14 14 #include <net/ip.h> 15 15 #include <net/ipv6.h> 16 16 #include <net/ip6_route.h> 17 + #include <net/ip6_tunnel.h> 17 18 #include <net/neighbour.h> 18 19 #include <net/netfilter/nf_flow_table.h> 19 20 #include <net/netfilter/nf_conntrack_acct.h> ··· 145 144 return thoff != sizeof(struct iphdr); 146 145 } 147 146 148 - static void nf_flow_tuple_encap(struct sk_buff *skb, 147 + struct nf_flowtable_ctx { 148 + const struct net_device *in; 149 + u32 offset; 150 + u32 hdrsize; 151 + struct { 152 + /* Tunnel IP header size */ 153 + u32 hdr_size; 154 + /* IP tunnel protocol */ 155 + u8 proto; 156 + } tun; 157 + }; 158 + 159 + static void nf_flow_tuple_encap(struct nf_flowtable_ctx *ctx, 160 + struct sk_buff *skb, 149 161 struct flow_offload_tuple *tuple) 150 162 { 151 163 __be16 inner_proto = skb->protocol; 152 164 struct vlan_ethhdr *veth; 153 165 struct pppoe_hdr *phdr; 166 + struct ipv6hdr *ip6h; 154 167 struct iphdr *iph; 155 168 u16 offset = 0; 156 169 int i = 0; ··· 191 176 break; 192 177 } 193 178 194 - if (inner_proto == htons(ETH_P_IP)) { 179 + switch (inner_proto) { 180 + case htons(ETH_P_IP): 195 181 iph = (struct iphdr *)(skb_network_header(skb) + offset); 196 - if (iph->protocol == IPPROTO_IPIP) { 182 + if (ctx->tun.proto == IPPROTO_IPIP) { 197 183 tuple->tun.dst_v4.s_addr = iph->daddr; 198 184 tuple->tun.src_v4.s_addr = iph->saddr; 199 185 tuple->tun.l3_proto = IPPROTO_IPIP; 200 186 } 187 + break; 188 + case htons(ETH_P_IPV6): 189 + ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 190 + if (ctx->tun.proto == IPPROTO_IPV6) { 191 + tuple->tun.dst_v6 = ip6h->daddr; 192 + tuple->tun.src_v6 = ip6h->saddr; 193 + tuple->tun.l3_proto = IPPROTO_IPV6; 194 + } 195 + break; 196 + default: 197 + break; 201 198 } 202 199 } 203 - 204 - struct nf_flowtable_ctx { 205 - const struct net_device *in; 206 - u32 offset; 207 - u32 hdrsize; 208 - }; 209 200 210 201 static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb, 211 202 struct flow_offload_tuple *tuple) ··· 280 259 tuple->l3proto = AF_INET; 281 260 tuple->l4proto = ipproto; 282 261 tuple->iifidx = ctx->in->ifindex; 283 - nf_flow_tuple_encap(skb, tuple); 262 + nf_flow_tuple_encap(ctx, skb, tuple); 284 263 285 264 return 0; 286 265 } ··· 316 295 return NF_STOLEN; 317 296 } 318 297 319 - static bool nf_flow_ip4_tunnel_proto(struct sk_buff *skb, u32 *psize) 298 + static bool nf_flow_ip4_tunnel_proto(struct nf_flowtable_ctx *ctx, 299 + struct sk_buff *skb) 320 300 { 321 301 struct iphdr *iph; 322 302 u16 size; 323 303 324 - if (!pskb_may_pull(skb, sizeof(*iph) + *psize)) 304 + if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset)) 325 305 return false; 326 306 327 - iph = (struct iphdr *)(skb_network_header(skb) + *psize); 307 + iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset); 328 308 size = iph->ihl << 2; 329 309 330 310 if (ip_is_fragment(iph) || unlikely(ip_has_options(size))) ··· 334 312 if (iph->ttl <= 1) 335 313 return false; 336 314 337 - if (iph->protocol == IPPROTO_IPIP) 338 - *psize += size; 315 + if (iph->protocol == IPPROTO_IPIP) { 316 + ctx->tun.proto = IPPROTO_IPIP; 317 + ctx->tun.hdr_size = size; 318 + ctx->offset += size; 319 + } 339 320 340 321 return true; 341 322 } 342 323 343 - static void nf_flow_ip4_tunnel_pop(struct sk_buff *skb) 324 + static bool nf_flow_ip6_tunnel_proto(struct nf_flowtable_ctx *ctx, 325 + struct sk_buff *skb) 344 326 { 345 - struct iphdr *iph = (struct iphdr *)skb_network_header(skb); 327 + #if IS_ENABLED(CONFIG_IPV6) 328 + struct ipv6hdr *ip6h, _ip6h; 329 + __be16 frag_off; 330 + u8 nexthdr; 331 + int hdrlen; 346 332 347 - if (iph->protocol != IPPROTO_IPIP) 333 + ip6h = skb_header_pointer(skb, ctx->offset, sizeof(*ip6h), &_ip6h); 334 + if (!ip6h) 335 + return false; 336 + 337 + if (ip6h->hop_limit <= 1) 338 + return false; 339 + 340 + nexthdr = ip6h->nexthdr; 341 + hdrlen = ipv6_skip_exthdr(skb, sizeof(*ip6h) + ctx->offset, &nexthdr, 342 + &frag_off); 343 + if (hdrlen < 0) 344 + return false; 345 + 346 + if (nexthdr == IPPROTO_IPV6) { 347 + ctx->tun.hdr_size = hdrlen; 348 + ctx->tun.proto = IPPROTO_IPV6; 349 + } 350 + ctx->offset += ctx->tun.hdr_size; 351 + 352 + return true; 353 + #else 354 + return false; 355 + #endif /* IS_ENABLED(CONFIG_IPV6) */ 356 + } 357 + 358 + static void nf_flow_ip_tunnel_pop(struct nf_flowtable_ctx *ctx, 359 + struct sk_buff *skb) 360 + { 361 + if (ctx->tun.proto != IPPROTO_IPIP && 362 + ctx->tun.proto != IPPROTO_IPV6) 348 363 return; 349 364 350 - skb_pull(skb, iph->ihl << 2); 365 + skb_pull(skb, ctx->tun.hdr_size); 351 366 skb_reset_network_header(skb); 352 367 } 353 368 354 - static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto, 355 - u32 *offset) 369 + static bool nf_flow_skb_encap_protocol(struct nf_flowtable_ctx *ctx, 370 + struct sk_buff *skb, __be16 proto) 356 371 { 357 372 __be16 inner_proto = skb->protocol; 358 373 struct vlan_ethhdr *veth; ··· 402 343 403 344 veth = (struct vlan_ethhdr *)skb_mac_header(skb); 404 345 if (veth->h_vlan_encapsulated_proto == proto) { 405 - *offset += VLAN_HLEN; 346 + ctx->offset += VLAN_HLEN; 406 347 inner_proto = proto; 407 348 ret = true; 408 349 } ··· 410 351 case htons(ETH_P_PPP_SES): 411 352 if (nf_flow_pppoe_proto(skb, &inner_proto) && 412 353 inner_proto == proto) { 413 - *offset += PPPOE_SES_HLEN; 354 + ctx->offset += PPPOE_SES_HLEN; 414 355 ret = true; 415 356 } 416 357 break; 417 358 } 418 359 419 - if (inner_proto == htons(ETH_P_IP)) 420 - ret = nf_flow_ip4_tunnel_proto(skb, offset); 360 + switch (inner_proto) { 361 + case htons(ETH_P_IP): 362 + ret = nf_flow_ip4_tunnel_proto(ctx, skb); 363 + break; 364 + case htons(ETH_P_IPV6): 365 + ret = nf_flow_ip6_tunnel_proto(ctx, skb); 366 + break; 367 + default: 368 + break; 369 + } 421 370 422 371 return ret; 423 372 } 424 373 425 - static void nf_flow_encap_pop(struct sk_buff *skb, 374 + static void nf_flow_encap_pop(struct nf_flowtable_ctx *ctx, 375 + struct sk_buff *skb, 426 376 struct flow_offload_tuple_rhash *tuplehash) 427 377 { 428 378 struct vlan_hdr *vlan_hdr; ··· 457 389 } 458 390 } 459 391 460 - if (skb->protocol == htons(ETH_P_IP)) 461 - nf_flow_ip4_tunnel_pop(skb); 392 + if (skb->protocol == htons(ETH_P_IP) || 393 + skb->protocol == htons(ETH_P_IPV6)) 394 + nf_flow_ip_tunnel_pop(ctx, skb); 462 395 } 463 396 464 397 struct nf_flow_xmit { ··· 485 416 { 486 417 struct flow_offload_tuple tuple = {}; 487 418 488 - if (!nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset)) 419 + if (!nf_flow_skb_encap_protocol(ctx, skb, htons(ETH_P_IP))) 489 420 return NULL; 490 421 491 422 if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0) ··· 529 460 530 461 flow_offload_refresh(flow_table, flow, false); 531 462 532 - nf_flow_encap_pop(skb, tuplehash); 463 + nf_flow_encap_pop(ctx, skb, tuplehash); 533 464 thoff -= ctx->offset; 534 465 535 466 iph = ip_hdr(skb); ··· 634 565 { 635 566 if (tuple->tun_num) 636 567 return nf_flow_tunnel_ipip_push(net, skb, tuple, ip_daddr); 568 + 569 + return 0; 570 + } 571 + 572 + struct ipv6_tel_txoption { 573 + struct ipv6_txoptions ops; 574 + __u8 dst_opt[8]; 575 + }; 576 + 577 + static int nf_flow_tunnel_ip6ip6_push(struct net *net, struct sk_buff *skb, 578 + struct flow_offload_tuple *tuple, 579 + struct in6_addr **ip6_daddr, 580 + int encap_limit) 581 + { 582 + struct ipv6hdr *ip6h = (struct ipv6hdr *)skb_network_header(skb); 583 + u8 hop_limit = ip6h->hop_limit, proto = IPPROTO_IPV6; 584 + struct rtable *rt = dst_rtable(tuple->dst_cache); 585 + __u8 dsfield = ipv6_get_dsfield(ip6h); 586 + struct flowi6 fl6 = { 587 + .daddr = tuple->tun.src_v6, 588 + .saddr = tuple->tun.dst_v6, 589 + .flowi6_proto = proto, 590 + }; 591 + int err, mtu; 592 + u32 headroom; 593 + 594 + err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6); 595 + if (err) 596 + return err; 597 + 598 + skb_set_inner_ipproto(skb, proto); 599 + headroom = sizeof(*ip6h) + LL_RESERVED_SPACE(rt->dst.dev) + 600 + rt->dst.header_len; 601 + if (encap_limit) 602 + headroom += 8; 603 + err = skb_cow_head(skb, headroom); 604 + if (err) 605 + return err; 606 + 607 + skb_scrub_packet(skb, true); 608 + mtu = dst_mtu(&rt->dst) - sizeof(*ip6h); 609 + if (encap_limit) 610 + mtu -= 8; 611 + mtu = max(mtu, IPV6_MIN_MTU); 612 + skb_dst_update_pmtu_no_confirm(skb, mtu); 613 + 614 + if (encap_limit > 0) { 615 + struct ipv6_tel_txoption opt = { 616 + .dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT, 617 + .dst_opt[3] = 1, 618 + .dst_opt[4] = encap_limit, 619 + .dst_opt[5] = IPV6_TLV_PADN, 620 + .dst_opt[6] = 1, 621 + }; 622 + struct ipv6_opt_hdr *hopt; 623 + 624 + opt.ops.dst1opt = (struct ipv6_opt_hdr *)opt.dst_opt; 625 + opt.ops.opt_nflen = 8; 626 + 627 + hopt = skb_push(skb, ipv6_optlen(opt.ops.dst1opt)); 628 + memcpy(hopt, opt.ops.dst1opt, ipv6_optlen(opt.ops.dst1opt)); 629 + hopt->nexthdr = IPPROTO_IPV6; 630 + proto = NEXTHDR_DEST; 631 + } 632 + 633 + skb_push(skb, sizeof(*ip6h)); 634 + skb_reset_network_header(skb); 635 + 636 + ip6h = ipv6_hdr(skb); 637 + ip6_flow_hdr(ip6h, dsfield, 638 + ip6_make_flowlabel(net, skb, fl6.flowlabel, true, &fl6)); 639 + ip6h->hop_limit = hop_limit; 640 + ip6h->nexthdr = proto; 641 + ip6h->daddr = tuple->tun.src_v6; 642 + ip6h->saddr = tuple->tun.dst_v6; 643 + ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(*ip6h)); 644 + IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 645 + 646 + *ip6_daddr = &tuple->tun.src_v6; 647 + 648 + return 0; 649 + } 650 + 651 + static int nf_flow_tunnel_v6_push(struct net *net, struct sk_buff *skb, 652 + struct flow_offload_tuple *tuple, 653 + struct in6_addr **ip6_daddr, 654 + int encap_limit) 655 + { 656 + if (tuple->tun_num) 657 + return nf_flow_tunnel_ip6ip6_push(net, skb, tuple, ip6_daddr, 658 + encap_limit); 637 659 638 660 return 0; 639 661 } ··· 998 838 tuple->l3proto = AF_INET6; 999 839 tuple->l4proto = nexthdr; 1000 840 tuple->iifidx = ctx->in->ifindex; 1001 - nf_flow_tuple_encap(skb, tuple); 841 + nf_flow_tuple_encap(ctx, skb, tuple); 1002 842 1003 843 return 0; 1004 844 } ··· 1006 846 static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx, 1007 847 struct nf_flowtable *flow_table, 1008 848 struct flow_offload_tuple_rhash *tuplehash, 1009 - struct sk_buff *skb) 849 + struct sk_buff *skb, int encap_limit) 1010 850 { 1011 851 enum flow_offload_tuple_dir dir; 1012 852 struct flow_offload *flow; ··· 1017 857 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 1018 858 1019 859 mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset; 860 + if (flow->tuplehash[!dir].tuple.tun_num) { 861 + mtu -= sizeof(*ip6h); 862 + if (encap_limit > 0) 863 + mtu -= 8; /* encap limit option */ 864 + } 865 + 1020 866 if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) 1021 867 return 0; 1022 868 ··· 1041 875 1042 876 flow_offload_refresh(flow_table, flow, false); 1043 877 1044 - nf_flow_encap_pop(skb, tuplehash); 878 + nf_flow_encap_pop(ctx, skb, tuplehash); 1045 879 1046 880 ip6h = ipv6_hdr(skb); 1047 881 nf_flow_nat_ipv6(flow, skb, dir, ip6h); ··· 1062 896 { 1063 897 struct flow_offload_tuple tuple = {}; 1064 898 1065 - if (skb->protocol != htons(ETH_P_IPV6) && 1066 - !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset)) 899 + if (!nf_flow_skb_encap_protocol(ctx, skb, htons(ETH_P_IPV6))) 1067 900 return NULL; 1068 901 1069 902 if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0) ··· 1075 910 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, 1076 911 const struct nf_hook_state *state) 1077 912 { 913 + int encap_limit = IPV6_DEFAULT_TNL_ENCAP_LIMIT; 1078 914 struct flow_offload_tuple_rhash *tuplehash; 1079 915 struct nf_flowtable *flow_table = priv; 1080 916 struct flow_offload_tuple *other_tuple; ··· 1094 928 if (tuplehash == NULL) 1095 929 return NF_ACCEPT; 1096 930 1097 - ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb); 931 + ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb, 932 + encap_limit); 1098 933 if (ret < 0) 1099 934 return NF_DROP; 1100 935 else if (ret == 0) ··· 1113 946 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 1114 947 other_tuple = &flow->tuplehash[!dir].tuple; 1115 948 ip6_daddr = &other_tuple->src_v6; 949 + 950 + if (nf_flow_tunnel_v6_push(state->net, skb, other_tuple, 951 + &ip6_daddr, encap_limit) < 0) 952 + return NF_DROP; 1116 953 1117 954 if (nf_flow_encap_push(skb, other_tuple) < 0) 1118 955 return NF_DROP;
+2 -6
net/netfilter/xt_time.c
··· 14 14 15 15 #include <linux/ktime.h> 16 16 #include <linux/module.h> 17 + #include <linux/rtc.h> 17 18 #include <linux/skbuff.h> 18 19 #include <linux/types.h> 19 20 #include <linux/netfilter/x_tables.h> ··· 64 63 /* 1979 - 1970 */ 65 64 3287, 2922, 2557, 2191, 1826, 1461, 1096, 730, 365, 0, 66 65 }; 67 - 68 - static inline bool is_leap(unsigned int y) 69 - { 70 - return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); 71 - } 72 66 73 67 /* 74 68 * Each network packet has a (nano)seconds-since-the-epoch (SSTE) timestamp. ··· 134 138 * (A different approach to use would be to subtract a monthlength 135 139 * from w repeatedly while counting.) 136 140 */ 137 - if (is_leap(year)) { 141 + if (is_leap_year(year)) { 138 142 /* use days_since_leapyear[] in a leap year */ 139 143 for (i = ARRAY_SIZE(days_since_leapyear) - 1; 140 144 i > 0 && days_since_leapyear[i] > w; --i)
+53 -9
tools/testing/selftests/net/netfilter/nft_flowtable.sh
··· 592 592 ip -net "$nsr1" addr add 192.168.100.1/24 dev tun0 593 593 ip netns exec "$nsr1" sysctl net.ipv4.conf.tun0.forwarding=1 > /dev/null 594 594 595 + ip -net "$nsr1" link add name tun6 type ip6tnl local fee1:2::1 remote fee1:2::2 596 + ip -net "$nsr1" link set tun6 up 597 + ip -net "$nsr1" addr add fee1:3::1/64 dev tun6 nodad 598 + 595 599 ip -net "$nsr2" link add name tun0 type ipip local 192.168.10.2 remote 192.168.10.1 596 600 ip -net "$nsr2" link set tun0 up 597 601 ip -net "$nsr2" addr add 192.168.100.2/24 dev tun0 598 602 ip netns exec "$nsr2" sysctl net.ipv4.conf.tun0.forwarding=1 > /dev/null 599 603 604 + ip -net "$nsr2" link add name tun6 type ip6tnl local fee1:2::2 remote fee1:2::1 605 + ip -net "$nsr2" link set tun6 up 606 + ip -net "$nsr2" addr add fee1:3::2/64 dev tun6 nodad 607 + 600 608 ip -net "$nsr1" route change default via 192.168.100.2 601 609 ip -net "$nsr2" route change default via 192.168.100.1 610 + ip -6 -net "$nsr1" route change default via fee1:3::2 611 + ip -6 -net "$nsr2" route change default via fee1:3::1 602 612 ip -net "$ns2" route add default via 10.0.2.1 613 + ip -6 -net "$ns2" route add default via dead:2::1 603 614 604 615 ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif tun0 accept' 616 + ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif tun6 accept' 605 617 ip netns exec "$nsr1" nft -a insert rule inet filter forward \ 606 618 'meta oif "veth0" tcp sport 12345 ct mark set 1 flow add @f1 counter name routed_repl accept' 607 619 ··· 623 611 ret=1 624 612 fi 625 613 614 + if test_tcp_forwarding "$ns1" "$ns2" 1 6 "[dead:2::99]" 12345; then 615 + echo "PASS: flow offload for ns1/ns2 IP6IP6 tunnel" 616 + else 617 + echo "FAIL: flow offload for ns1/ns2 with IP6IP6 tunnel" 1>&2 618 + ip netns exec "$nsr1" nft list ruleset 619 + ret=1 620 + fi 621 + 626 622 # Create vlan tagged devices for IPIP traffic. 627 623 ip -net "$nsr1" link add link veth1 name veth1.10 type vlan id 10 628 624 ip -net "$nsr1" link set veth1.10 up 629 625 ip -net "$nsr1" addr add 192.168.20.1/24 dev veth1.10 626 + ip -net "$nsr1" addr add fee1:4::1/64 dev veth1.10 nodad 630 627 ip netns exec "$nsr1" sysctl net.ipv4.conf.veth1/10.forwarding=1 > /dev/null 631 628 ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif veth1.10 accept' 632 - ip -net "$nsr1" link add name tun1 type ipip local 192.168.20.1 remote 192.168.20.2 633 - ip -net "$nsr1" link set tun1 up 634 - ip -net "$nsr1" addr add 192.168.200.1/24 dev tun1 629 + 630 + ip -net "$nsr1" link add name tun0.10 type ipip local 192.168.20.1 remote 192.168.20.2 631 + ip -net "$nsr1" link set tun0.10 up 632 + ip -net "$nsr1" addr add 192.168.200.1/24 dev tun0.10 635 633 ip -net "$nsr1" route change default via 192.168.200.2 636 - ip netns exec "$nsr1" sysctl net.ipv4.conf.tun1.forwarding=1 > /dev/null 637 - ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif tun1 accept' 634 + ip netns exec "$nsr1" sysctl net.ipv4.conf.tun0/10.forwarding=1 > /dev/null 635 + ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif tun0.10 accept' 636 + 637 + ip -net "$nsr1" link add name tun6.10 type ip6tnl local fee1:4::1 remote fee1:4::2 638 + ip -net "$nsr1" link set tun6.10 up 639 + ip -net "$nsr1" addr add fee1:5::1/64 dev tun6.10 nodad 640 + ip -6 -net "$nsr1" route change default via fee1:5::2 641 + ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif tun6.10 accept' 638 642 639 643 ip -net "$nsr2" link add link veth0 name veth0.10 type vlan id 10 640 644 ip -net "$nsr2" link set veth0.10 up 641 645 ip -net "$nsr2" addr add 192.168.20.2/24 dev veth0.10 646 + ip -net "$nsr2" addr add fee1:4::2/64 dev veth0.10 nodad 642 647 ip netns exec "$nsr2" sysctl net.ipv4.conf.veth0/10.forwarding=1 > /dev/null 643 - ip -net "$nsr2" link add name tun1 type ipip local 192.168.20.2 remote 192.168.20.1 644 - ip -net "$nsr2" link set tun1 up 645 - ip -net "$nsr2" addr add 192.168.200.2/24 dev tun1 648 + 649 + ip -net "$nsr2" link add name tun0.10 type ipip local 192.168.20.2 remote 192.168.20.1 650 + ip -net "$nsr2" link set tun0.10 up 651 + ip -net "$nsr2" addr add 192.168.200.2/24 dev tun0.10 646 652 ip -net "$nsr2" route change default via 192.168.200.1 647 - ip netns exec "$nsr2" sysctl net.ipv4.conf.tun1.forwarding=1 > /dev/null 653 + ip netns exec "$nsr2" sysctl net.ipv4.conf.tun0/10.forwarding=1 > /dev/null 654 + 655 + ip -net "$nsr2" link add name tun6.10 type ip6tnl local fee1:4::2 remote fee1:4::1 656 + ip -net "$nsr2" link set tun6.10 up 657 + ip -net "$nsr2" addr add fee1:5::2/64 dev tun6.10 nodad 658 + ip -6 -net "$nsr2" route change default via fee1:5::1 648 659 649 660 if ! test_tcp_forwarding_nat "$ns1" "$ns2" 1 "IPIP tunnel over vlan"; then 650 661 echo "FAIL: flow offload for ns1/ns2 with IPIP tunnel over vlan" 1>&2 662 + ip netns exec "$nsr1" nft list ruleset 663 + ret=1 664 + fi 665 + 666 + if test_tcp_forwarding "$ns1" "$ns2" 1 6 "[dead:2::99]" 12345; then 667 + echo "PASS: flow offload for ns1/ns2 IP6IP6 tunnel over vlan" 668 + else 669 + echo "FAIL: flow offload for ns1/ns2 with IP6IP6 tunnel over vlan" 1>&2 651 670 ip netns exec "$nsr1" nft list ruleset 652 671 ret=1 653 672 fi ··· 687 644 ip -net "$nsr1" route change default via 192.168.10.2 688 645 ip -net "$nsr2" route change default via 192.168.10.1 689 646 ip -net "$ns2" route del default via 10.0.2.1 647 + ip -6 -net "$ns2" route del default via dead:2::1 690 648 } 691 649 692 650 # Another test: