Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

net/tcp: Wire up l3index to TCP-AO

Similarly how TCP_MD5SIG_FLAG_IFINDEX works for TCP-MD5,
TCP_AO_KEYF_IFINDEX is an AO-key flag that binds that MKT to a specified
by L3 ifinndex. Similarly, without this flag the key will work in
the default VRF l3index = 0 for connections.
To prevent AO-keys from overlapping, it's restricted to add key B for a
socket that has key A, which have the same sndid/rcvid and one of
the following is true:
- !(A.keyflags & TCP_AO_KEYF_IFINDEX) or !(B.keyflags & TCP_AO_KEYF_IFINDEX)
so that any key is non-bound to a VRF
- A.l3index == B.l3index
both want to work for the same VRF

Additionally, it's restricted to match TCP-MD5 keys for the same peer
the following way:
|--------------|--------------------|----------------|---------------|
| | MD5 key without | MD5 key | MD5 key |
| | l3index | l3index=0 | l3index=N |
|--------------|--------------------|----------------|---------------|
| TCP-AO key | | | |
| without | reject | reject | reject |
| l3index | | | |
|--------------|--------------------|----------------|---------------|
| TCP-AO key | | | |
| l3index=0 | reject | reject | allow |
|--------------|--------------------|----------------|---------------|
| TCP-AO key | | | |
| l3index=N | reject | allow | reject |
|--------------|--------------------|----------------|---------------|

This is done with the help of tcp_md5_do_lookup_any_l3index() to reject
adding AO key without TCP_AO_KEYF_IFINDEX if there's TCP-MD5 in any VRF.
This is important for case where sysctl_tcp_l3mdev_accept = 1
Similarly, for TCP-AO lookups tcp_ao_do_lookup() may be used with
l3index < 0, so that __tcp_ao_key_cmp() will match TCP-AO key in any VRF.

Signed-off-by: Dmitry Safonov <dima@arista.com>
Acked-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Dmitry Safonov and committed by
David S. Miller
248411b8 67fa83f7

+177 -79
+6 -5
include/net/tcp.h
··· 2717 2717 } 2718 2718 2719 2719 static inline bool tcp_ao_required(struct sock *sk, const void *saddr, 2720 - int family, bool stat_inc) 2720 + int family, int l3index, bool stat_inc) 2721 2721 { 2722 2722 #ifdef CONFIG_TCP_AO 2723 2723 struct tcp_ao_info *ao_info; ··· 2731 2731 if (!ao_info) 2732 2732 return false; 2733 2733 2734 - ao_key = tcp_ao_do_lookup(sk, saddr, family, -1, -1); 2734 + ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1); 2735 2735 if (ao_info->ao_required || ao_key) { 2736 2736 if (stat_inc) { 2737 2737 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED); ··· 2784 2784 * the last key is impossible to remove, so there's 2785 2785 * always at least one current_key. 2786 2786 */ 2787 - if (tcp_ao_required(sk, saddr, family, true)) { 2787 + if (tcp_ao_required(sk, saddr, family, l3index, true)) { 2788 2788 tcp_hash_fail("AO hash is required, but not found", 2789 2789 family, skb, "L3 index %d", l3index); 2790 2790 return SKB_DROP_REASON_TCP_AONOTFOUND; 2791 2791 } 2792 2792 if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { 2793 2793 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 2794 - tcp_hash_fail("MD5 Hash not found", family, skb, ""); 2794 + tcp_hash_fail("MD5 Hash not found", 2795 + family, skb, "L3 index %d", l3index); 2795 2796 return SKB_DROP_REASON_TCP_MD5NOTFOUND; 2796 2797 } 2797 2798 return SKB_NOT_DROPPED_YET; 2798 2799 } 2799 2800 2800 2801 if (aoh) 2801 - return tcp_inbound_ao_hash(sk, skb, family, req, aoh); 2802 + return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); 2802 2803 2803 2804 return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, 2804 2805 l3index, md5_location);
+9 -9
include/net/tcp_ao.h
··· 33 33 u8 key[TCP_AO_MAXKEYLEN] __tcp_ao_key_align; 34 34 unsigned int tcp_sigpool_id; 35 35 unsigned int digest_size; 36 + int l3index; 36 37 u8 prefixlen; 37 38 u8 family; 38 39 u8 keylen; ··· 201 200 int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen); 202 201 enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk, 203 202 const struct sk_buff *skb, unsigned short int family, 204 - const struct request_sock *req, 203 + const struct request_sock *req, int l3index, 205 204 const struct tcp_ao_hdr *aoh); 206 205 u32 tcp_ao_compute_sne(u32 next_sne, u32 next_seq, u32 seq); 207 - struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, 206 + struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, int l3index, 208 207 const union tcp_ao_addr *addr, 209 208 int family, int sndid, int rcvid); 210 209 int tcp_ao_hash_hdr(unsigned short family, char *ao_hash, ··· 246 245 __be32 disn, bool send); 247 246 int tcp_v6_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key, 248 247 struct request_sock *req); 249 - struct tcp_ao_key *tcp_v6_ao_do_lookup(const struct sock *sk, 250 - const struct in6_addr *addr, 251 - int sndid, int rcvid); 252 248 struct tcp_ao_key *tcp_v6_ao_lookup(const struct sock *sk, 253 249 struct sock *addr_sk, int sndid, int rcvid); 254 250 struct tcp_ao_key *tcp_v6_ao_lookup_rsk(const struct sock *sk, ··· 263 265 void tcp_ao_connect_init(struct sock *sk); 264 266 void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb, 265 267 struct tcp_request_sock *treq, 266 - unsigned short int family); 268 + unsigned short int family, int l3index); 267 269 #else /* CONFIG_TCP_AO */ 268 270 269 271 static inline int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb, ··· 275 277 276 278 static inline void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb, 277 279 struct tcp_request_sock *treq, 278 - unsigned short int family) 280 + unsigned short int family, int l3index) 279 281 { 280 282 } 281 283 ··· 287 289 288 290 static inline enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk, 289 291 const struct sk_buff *skb, unsigned short int family, 290 - const struct request_sock *req, const struct tcp_ao_hdr *aoh) 292 + const struct request_sock *req, int l3index, 293 + const struct tcp_ao_hdr *aoh) 291 294 { 292 295 return SKB_NOT_DROPPED_YET; 293 296 } 294 297 295 298 static inline struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, 296 - const union tcp_ao_addr *addr, int family, int sndid, int rcvid) 299 + int l3index, const union tcp_ao_addr *addr, 300 + int family, int sndid, int rcvid) 297 301 { 298 302 return NULL; 299 303 }
+4 -2
net/ipv4/syncookies.c
··· 344 344 __u8 rcv_wscale; 345 345 struct flowi4 fl4; 346 346 u32 tsoff = 0; 347 + int l3index; 347 348 348 349 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || 349 350 !th->ack || th->rst) ··· 401 400 treq->snt_synack = 0; 402 401 treq->tfo_listener = false; 403 402 404 - tcp_ao_syncookie(sk, skb, treq, AF_INET); 405 - 406 403 if (IS_ENABLED(CONFIG_SMC)) 407 404 ireq->smc_ok = 0; 408 405 409 406 ireq->ir_iif = inet_request_bound_dev_if(sk, skb); 407 + 408 + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); 409 + tcp_ao_syncookie(sk, skb, treq, AF_INET, l3index); 410 410 411 411 /* We throwed the options of the initial SYN away, so we hope 412 412 * the ACK carries the same options again (see RFC1122 4.2.3.8)
+126 -44
net/ipv4/tcp_ao.c
··· 136 136 return memcmp(&a1, &a2, sizeof(a1)); 137 137 } 138 138 139 - static int __tcp_ao_key_cmp(const struct tcp_ao_key *key, 139 + static int __tcp_ao_key_cmp(const struct tcp_ao_key *key, int l3index, 140 140 const union tcp_ao_addr *addr, u8 prefixlen, 141 141 int family, int sndid, int rcvid) 142 142 { ··· 144 144 return (key->sndid > sndid) ? 1 : -1; 145 145 if (rcvid >= 0 && key->rcvid != rcvid) 146 146 return (key->rcvid > rcvid) ? 1 : -1; 147 + if (l3index >= 0 && (key->keyflags & TCP_AO_KEYF_IFINDEX)) { 148 + if (key->l3index != l3index) 149 + return (key->l3index > l3index) ? 1 : -1; 150 + } 147 151 148 152 if (family == AF_UNSPEC) 149 153 return 0; ··· 172 168 return -1; 173 169 } 174 170 175 - static int tcp_ao_key_cmp(const struct tcp_ao_key *key, 171 + static int tcp_ao_key_cmp(const struct tcp_ao_key *key, int l3index, 176 172 const union tcp_ao_addr *addr, u8 prefixlen, 177 173 int family, int sndid, int rcvid) 178 174 { ··· 180 176 if (family == AF_INET6 && ipv6_addr_v4mapped(&addr->a6)) { 181 177 __be32 addr4 = addr->a6.s6_addr32[3]; 182 178 183 - return __tcp_ao_key_cmp(key, (union tcp_ao_addr *)&addr4, 179 + return __tcp_ao_key_cmp(key, l3index, 180 + (union tcp_ao_addr *)&addr4, 184 181 prefixlen, AF_INET, sndid, rcvid); 185 182 } 186 183 #endif 187 - return __tcp_ao_key_cmp(key, addr, prefixlen, family, sndid, rcvid); 184 + return __tcp_ao_key_cmp(key, l3index, addr, 185 + prefixlen, family, sndid, rcvid); 188 186 } 189 187 190 - static struct tcp_ao_key *__tcp_ao_do_lookup(const struct sock *sk, 188 + static struct tcp_ao_key *__tcp_ao_do_lookup(const struct sock *sk, int l3index, 191 189 const union tcp_ao_addr *addr, int family, u8 prefix, 192 190 int sndid, int rcvid) 193 191 { ··· 207 201 hlist_for_each_entry_rcu(key, &ao->head, node) { 208 202 u8 prefixlen = min(prefix, key->prefixlen); 209 203 210 - if (!tcp_ao_key_cmp(key, addr, prefixlen, family, sndid, rcvid)) 204 + if (!tcp_ao_key_cmp(key, l3index, addr, prefixlen, 205 + family, sndid, rcvid)) 211 206 return key; 212 207 } 213 208 return NULL; 214 209 } 215 210 216 - struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, 211 + struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, int l3index, 217 212 const union tcp_ao_addr *addr, 218 213 int family, int sndid, int rcvid) 219 214 { 220 - return __tcp_ao_do_lookup(sk, addr, family, U8_MAX, sndid, rcvid); 215 + return __tcp_ao_do_lookup(sk, l3index, addr, family, U8_MAX, sndid, rcvid); 221 216 } 222 217 223 218 static struct tcp_ao_info *tcp_ao_alloc_info(gfp_t flags) ··· 684 677 struct request_sock *req, 685 678 int sndid, int rcvid) 686 679 { 687 - union tcp_ao_addr *addr = 688 - (union tcp_ao_addr *)&inet_rsk(req)->ir_rmt_addr; 680 + struct inet_request_sock *ireq = inet_rsk(req); 681 + union tcp_ao_addr *addr = (union tcp_ao_addr *)&ireq->ir_rmt_addr; 682 + int l3index; 689 683 690 - return tcp_ao_do_lookup(sk, addr, AF_INET, sndid, rcvid); 684 + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); 685 + return tcp_ao_do_lookup(sk, l3index, addr, AF_INET, sndid, rcvid); 691 686 } 692 687 693 688 struct tcp_ao_key *tcp_v4_ao_lookup(const struct sock *sk, struct sock *addr_sk, 694 689 int sndid, int rcvid) 695 690 { 691 + int l3index = l3mdev_master_ifindex_by_index(sock_net(sk), 692 + addr_sk->sk_bound_dev_if); 696 693 union tcp_ao_addr *addr = (union tcp_ao_addr *)&addr_sk->sk_daddr; 697 694 698 - return tcp_ao_do_lookup(sk, addr, AF_INET, sndid, rcvid); 695 + return tcp_ao_do_lookup(sk, l3index, addr, AF_INET, sndid, rcvid); 699 696 } 700 697 701 698 int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb, ··· 749 738 ao_info = rcu_dereference(tcp_sk(sk)->ao_info); 750 739 if (!ao_info) 751 740 return -ENOENT; 752 - *key = tcp_ao_do_lookup(sk, addr, family, -1, aoh->rnext_keyid); 741 + *key = tcp_ao_do_lookup(sk, l3index, addr, family, 742 + -1, aoh->rnext_keyid); 753 743 if (!*key) 754 744 return -ENOENT; 755 745 *traffic_key = kmalloc(tcp_ao_digest_size(*key), GFP_ATOMIC); ··· 826 814 827 815 static struct tcp_ao_key *tcp_ao_inbound_lookup(unsigned short int family, 828 816 const struct sock *sk, const struct sk_buff *skb, 829 - int sndid, int rcvid) 817 + int sndid, int rcvid, int l3index) 830 818 { 831 819 if (family == AF_INET) { 832 820 const struct iphdr *iph = ip_hdr(skb); 833 821 834 - return tcp_ao_do_lookup(sk, (union tcp_ao_addr *)&iph->saddr, 835 - AF_INET, sndid, rcvid); 822 + return tcp_ao_do_lookup(sk, l3index, 823 + (union tcp_ao_addr *)&iph->saddr, 824 + AF_INET, sndid, rcvid); 836 825 } else { 837 826 const struct ipv6hdr *iph = ipv6_hdr(skb); 838 827 839 - return tcp_ao_do_lookup(sk, (union tcp_ao_addr *)&iph->saddr, 840 - AF_INET6, sndid, rcvid); 828 + return tcp_ao_do_lookup(sk, l3index, 829 + (union tcp_ao_addr *)&iph->saddr, 830 + AF_INET6, sndid, rcvid); 841 831 } 842 832 } 843 833 844 834 void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb, 845 835 struct tcp_request_sock *treq, 846 - unsigned short int family) 836 + unsigned short int family, int l3index) 847 837 { 848 838 const struct tcphdr *th = tcp_hdr(skb); 849 839 const struct tcp_ao_hdr *aoh; ··· 856 842 if (tcp_parse_auth_options(th, NULL, &aoh) || !aoh) 857 843 return; 858 844 859 - key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid); 845 + key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid, l3index); 860 846 if (!key) 861 847 /* Key not found, continue without TCP-AO */ 862 848 return; ··· 870 856 tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb, 871 857 unsigned short int family, struct tcp_ao_info *info, 872 858 const struct tcp_ao_hdr *aoh, struct tcp_ao_key *key, 873 - u8 *traffic_key, u8 *phash, u32 sne) 859 + u8 *traffic_key, u8 *phash, u32 sne, int l3index) 874 860 { 875 861 u8 maclen = aoh->length - sizeof(struct tcp_ao_hdr); 876 862 const struct tcphdr *th = tcp_hdr(skb); ··· 881 867 atomic64_inc(&info->counters.pkt_bad); 882 868 atomic64_inc(&key->pkt_bad); 883 869 tcp_hash_fail("AO hash wrong length", family, skb, 884 - "%u != %d", maclen, tcp_ao_maclen(key)); 870 + "%u != %d L3index: %d", maclen, 871 + tcp_ao_maclen(key), l3index); 885 872 return SKB_DROP_REASON_TCP_AOFAILURE; 886 873 } 887 874 ··· 897 882 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); 898 883 atomic64_inc(&info->counters.pkt_bad); 899 884 atomic64_inc(&key->pkt_bad); 900 - tcp_hash_fail("AO hash mismatch", family, skb, ""); 885 + tcp_hash_fail("AO hash mismatch", family, skb, 886 + "L3index: %d", l3index); 901 887 kfree(hash_buf); 902 888 return SKB_DROP_REASON_TCP_AOFAILURE; 903 889 } ··· 912 896 enum skb_drop_reason 913 897 tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, 914 898 unsigned short int family, const struct request_sock *req, 915 - const struct tcp_ao_hdr *aoh) 899 + int l3index, const struct tcp_ao_hdr *aoh) 916 900 { 917 901 const struct tcphdr *th = tcp_hdr(skb); 918 902 u8 *phash = (u8 *)(aoh + 1); /* hash goes just after the header */ ··· 927 911 if (!info) { 928 912 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND); 929 913 tcp_hash_fail("AO key not found", family, skb, 930 - "keyid: %u", aoh->keyid); 914 + "keyid: %u L3index: %d", aoh->keyid, l3index); 931 915 return SKB_DROP_REASON_TCP_AOUNEXPECTED; 932 916 } 933 917 ··· 961 945 /* Established socket, traffic key are cached */ 962 946 traffic_key = rcv_other_key(key); 963 947 err = tcp_ao_verify_hash(sk, skb, family, info, aoh, key, 964 - traffic_key, phash, sne); 948 + traffic_key, phash, sne, l3index); 965 949 if (err) 966 950 return err; 967 951 current_key = READ_ONCE(info->current_key); ··· 982 966 * - request sockets would race on those key pointers 983 967 * - tcp_ao_del_cmd() allows async key removal 984 968 */ 985 - key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid); 969 + key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid, l3index); 986 970 if (!key) 987 971 goto key_not_found; 988 972 ··· 1022 1006 return SKB_DROP_REASON_NOT_SPECIFIED; 1023 1007 tcp_ao_calc_key_skb(key, traffic_key, skb, sisn, disn, family); 1024 1008 ret = tcp_ao_verify_hash(sk, skb, family, info, aoh, key, 1025 - traffic_key, phash, sne); 1009 + traffic_key, phash, sne, l3index); 1026 1010 kfree(traffic_key); 1027 1011 return ret; 1028 1012 ··· 1030 1014 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND); 1031 1015 atomic64_inc(&info->counters.key_not_found); 1032 1016 tcp_hash_fail("Requested by the peer AO key id not found", 1033 - family, skb, ""); 1017 + family, skb, "L3index: %d", l3index); 1034 1018 return SKB_DROP_REASON_TCP_AOKEYNOTFOUND; 1035 1019 } 1036 1020 ··· 1058 1042 struct tcp_ao_info *ao_info; 1059 1043 union tcp_ao_addr *addr; 1060 1044 struct tcp_ao_key *key; 1061 - int family; 1045 + int family, l3index; 1062 1046 1063 1047 ao_info = rcu_dereference_protected(tp->ao_info, 1064 1048 lockdep_sock_is_held(sk)); ··· 1075 1059 #endif 1076 1060 else 1077 1061 return; 1062 + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), 1063 + sk->sk_bound_dev_if); 1078 1064 1079 1065 hlist_for_each_entry_rcu(key, &ao_info->head, node) { 1080 - if (!tcp_ao_key_cmp(key, addr, key->prefixlen, family, -1, -1)) 1066 + if (!tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1)) 1081 1067 continue; 1082 1068 1083 1069 if (key == ao_info->current_key) ··· 1152 1134 struct tcp_ao_key *key, *new_key, *first_key; 1153 1135 struct tcp_ao_info *new_ao, *ao; 1154 1136 struct hlist_node *key_head; 1137 + int l3index, ret = -ENOMEM; 1155 1138 union tcp_ao_addr *addr; 1156 1139 bool match = false; 1157 - int ret = -ENOMEM; 1158 1140 1159 1141 ao = rcu_dereference(tcp_sk(sk)->ao_info); 1160 1142 if (!ao) ··· 1182 1164 ret = -EAFNOSUPPORT; 1183 1165 goto free_ao; 1184 1166 } 1167 + l3index = l3mdev_master_ifindex_by_index(sock_net(newsk), 1168 + newsk->sk_bound_dev_if); 1185 1169 1186 1170 hlist_for_each_entry_rcu(key, &ao->head, node) { 1187 - if (tcp_ao_key_cmp(key, addr, key->prefixlen, family, -1, -1)) 1171 + if (tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1)) 1188 1172 continue; 1189 1173 1190 1174 new_key = tcp_ao_copy_key(newsk, key); ··· 1490 1470 return ERR_PTR(-ESOCKTNOSUPPORT); 1491 1471 } 1492 1472 1493 - #define TCP_AO_KEYF_ALL (TCP_AO_KEYF_EXCLUDE_OPT) 1473 + #define TCP_AO_KEYF_ALL (TCP_AO_KEYF_IFINDEX | TCP_AO_KEYF_EXCLUDE_OPT) 1474 + #define TCP_AO_GET_KEYF_VALID (TCP_AO_KEYF_IFINDEX) 1494 1475 1495 1476 static struct tcp_ao_key *tcp_ao_key_alloc(struct sock *sk, 1496 1477 struct tcp_ao_add *cmd) ··· 1555 1534 union tcp_ao_addr *addr; 1556 1535 struct tcp_ao_key *key; 1557 1536 struct tcp_ao_add cmd; 1537 + int ret, l3index = 0; 1558 1538 bool first = false; 1559 - int ret; 1560 1539 1561 1540 if (optlen < sizeof(cmd)) 1562 1541 return -EINVAL; ··· 1586 1565 return -EINVAL; 1587 1566 } 1588 1567 1568 + if (cmd.ifindex && !(cmd.keyflags & TCP_AO_KEYF_IFINDEX)) 1569 + return -EINVAL; 1570 + 1571 + /* For cmd.tcp_ifindex = 0 the key will apply to the default VRF */ 1572 + if (cmd.keyflags & TCP_AO_KEYF_IFINDEX && cmd.ifindex) { 1573 + int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 1574 + struct net_device *dev; 1575 + 1576 + rcu_read_lock(); 1577 + dev = dev_get_by_index_rcu(sock_net(sk), cmd.ifindex); 1578 + if (dev && netif_is_l3_master(dev)) 1579 + l3index = dev->ifindex; 1580 + rcu_read_unlock(); 1581 + 1582 + if (!dev || !l3index) 1583 + return -EINVAL; 1584 + 1585 + /* It's still possible to bind after adding keys or even 1586 + * re-bind to a different dev (with CAP_NET_RAW). 1587 + * So, no reason to return error here, rather try to be 1588 + * nice and warn the user. 1589 + */ 1590 + if (bound_dev_if && bound_dev_if != cmd.ifindex) 1591 + net_warn_ratelimited("AO key ifindex %d != sk bound ifindex %d\n", 1592 + cmd.ifindex, bound_dev_if); 1593 + } 1594 + 1589 1595 /* Don't allow keys for peers that have a matching TCP-MD5 key */ 1590 - if (tcp_md5_do_lookup_any_l3index(sk, addr, family)) 1591 - return -EKEYREJECTED; 1596 + if (cmd.keyflags & TCP_AO_KEYF_IFINDEX) { 1597 + /* Non-_exact version of tcp_md5_do_lookup() will 1598 + * as well match keys that aren't bound to a specific VRF 1599 + * (that will make them match AO key with 1600 + * sysctl_tcp_l3dev_accept = 1 1601 + */ 1602 + if (tcp_md5_do_lookup(sk, l3index, addr, family)) 1603 + return -EKEYREJECTED; 1604 + } else { 1605 + if (tcp_md5_do_lookup_any_l3index(sk, addr, family)) 1606 + return -EKEYREJECTED; 1607 + } 1592 1608 1593 1609 ao_info = setsockopt_ao_info(sk); 1594 1610 if (IS_ERR(ao_info)) ··· 1642 1584 * > The IDs of MKTs MUST NOT overlap where their 1643 1585 * > TCP connection identifiers overlap. 1644 1586 */ 1645 - if (__tcp_ao_do_lookup(sk, addr, family, 1646 - cmd.prefix, -1, cmd.rcvid)) 1587 + if (__tcp_ao_do_lookup(sk, l3index, addr, family, cmd.prefix, -1, cmd.rcvid)) 1647 1588 return -EEXIST; 1648 - if (__tcp_ao_do_lookup(sk, addr, family, 1589 + if (__tcp_ao_do_lookup(sk, l3index, addr, family, 1649 1590 cmd.prefix, cmd.sndid, -1)) 1650 1591 return -EEXIST; 1651 1592 } ··· 1663 1606 key->keyflags = cmd.keyflags; 1664 1607 key->sndid = cmd.sndid; 1665 1608 key->rcvid = cmd.rcvid; 1609 + key->l3index = l3index; 1666 1610 atomic64_set(&key->pkt_good, 0); 1667 1611 atomic64_set(&key->pkt_bad, 0); 1668 1612 ··· 1752 1694 return err; 1753 1695 } 1754 1696 1697 + #define TCP_AO_DEL_KEYF_ALL (TCP_AO_KEYF_IFINDEX) 1755 1698 static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family, 1756 1699 sockptr_t optval, int optlen) 1757 1700 { 1758 1701 struct tcp_ao_key *key, *new_current = NULL, *new_rnext = NULL; 1702 + int err, addr_len, l3index = 0; 1759 1703 struct tcp_ao_info *ao_info; 1760 1704 union tcp_ao_addr *addr; 1761 1705 struct tcp_ao_del cmd; 1762 - int addr_len; 1763 1706 __u8 prefix; 1764 1707 u16 port; 1765 - int err; 1766 1708 1767 1709 if (optlen < sizeof(cmd)) 1768 1710 return -EINVAL; ··· 1778 1720 if (!tcp_ao_can_set_current_rnext(sk)) 1779 1721 return -EINVAL; 1780 1722 } 1723 + 1724 + if (cmd.keyflags & ~TCP_AO_DEL_KEYF_ALL) 1725 + return -EINVAL; 1726 + 1727 + /* No sanity check for TCP_AO_KEYF_IFINDEX as if a VRF 1728 + * was destroyed, there still should be a way to delete keys, 1729 + * that were bound to that l3intf. So, fail late at lookup stage 1730 + * if there is no key for that ifindex. 1731 + */ 1732 + if (cmd.ifindex && !(cmd.keyflags & TCP_AO_KEYF_IFINDEX)) 1733 + return -EINVAL; 1781 1734 1782 1735 ao_info = setsockopt_ao_info(sk); 1783 1736 if (IS_ERR(ao_info)) ··· 1855 1786 if (family != key->family || 1856 1787 prefix != key->prefixlen || 1857 1788 memcmp(addr, &key->addr, addr_len)) 1789 + continue; 1790 + 1791 + if ((cmd.keyflags & TCP_AO_KEYF_IFINDEX) != 1792 + (key->keyflags & TCP_AO_KEYF_IFINDEX)) 1793 + continue; 1794 + 1795 + if (key->l3index != l3index) 1858 1796 continue; 1859 1797 1860 1798 if (key == new_current || key == new_rnext) ··· 2049 1973 struct tcp_ao_key *key, *current_key; 2050 1974 bool do_address_matching = true; 2051 1975 union tcp_ao_addr *addr = NULL; 1976 + int err, l3index, user_len; 2052 1977 unsigned int max_keys; /* maximum number of keys to copy to user */ 2053 1978 size_t out_offset = 0; 2054 1979 size_t bytes_to_write; /* number of bytes to write to user level */ 2055 - int err, user_len; 2056 1980 u32 matched_keys; /* keys from ao_info matched so far */ 2057 1981 int optlen_out; 2058 1982 __be16 port = 0; ··· 2071 1995 2072 1996 if (opt_in.pkt_good || opt_in.pkt_bad) 2073 1997 return -EINVAL; 1998 + if (opt_in.keyflags & ~TCP_AO_GET_KEYF_VALID) 1999 + return -EINVAL; 2000 + if (opt_in.ifindex && !(opt_in.keyflags & TCP_AO_KEYF_IFINDEX)) 2001 + return -EINVAL; 2074 2002 2075 2003 if (opt_in.reserved != 0) 2076 2004 return -EINVAL; 2077 2005 2078 2006 max_keys = opt_in.nkeys; 2007 + l3index = (opt_in.keyflags & TCP_AO_KEYF_IFINDEX) ? opt_in.ifindex : -1; 2079 2008 2080 2009 if (opt_in.get_all || opt_in.is_current || opt_in.is_rnext) { 2081 2010 if (opt_in.get_all && (opt_in.is_current || opt_in.is_rnext)) ··· 2182 2101 continue; 2183 2102 } 2184 2103 2185 - if (tcp_ao_key_cmp(key, addr, opt_in.prefix, 2104 + if (tcp_ao_key_cmp(key, l3index, addr, opt_in.prefix, 2186 2105 opt_in.addr.ss_family, 2187 2106 opt_in.sndid, opt_in.rcvid) != 0) 2188 2107 continue; ··· 2215 2134 opt_out.nkeys = 0; 2216 2135 opt_out.maclen = key->maclen; 2217 2136 opt_out.keylen = key->keylen; 2137 + opt_out.ifindex = key->l3index; 2218 2138 opt_out.pkt_good = atomic64_read(&key->pkt_good); 2219 2139 opt_out.pkt_bad = atomic64_read(&key->pkt_bad); 2220 2140 memcpy(&opt_out.key, key->key, key->keylen);
+7 -3
net/ipv4/tcp_ipv4.c
··· 1087 1087 tcp_rsk_used_ao(req)) { 1088 1088 const union tcp_md5_addr *addr; 1089 1089 const struct tcp_ao_hdr *aoh; 1090 + int l3index; 1090 1091 1091 1092 /* Invalid TCP option size or twice included auth */ 1092 1093 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) ··· 1096 1095 return; 1097 1096 1098 1097 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr; 1099 - key.ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, 1098 + l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0; 1099 + key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, 1100 1100 aoh->rnext_keyid, -1); 1101 1101 if (unlikely(!key.ao_key)) { 1102 1102 /* Send ACK with any matching MKT for the peer */ 1103 - key.ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, -1, -1); 1103 + key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, -1, -1); 1104 1104 /* Matching key disappeared (user removed the key?) 1105 1105 * let the handshake timeout. 1106 1106 */ ··· 1495 1493 const union tcp_md5_addr *addr; 1496 1494 u8 prefixlen = 32; 1497 1495 int l3index = 0; 1496 + bool l3flag; 1498 1497 u8 flags; 1499 1498 1500 1499 if (optlen < sizeof(cmd)) ··· 1508 1505 return -EINVAL; 1509 1506 1510 1507 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; 1508 + l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; 1511 1509 1512 1510 if (optname == TCP_MD5SIG_EXT && 1513 1511 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) { ··· 1546 1542 /* Don't allow keys for peers that have a matching TCP-AO key. 1547 1543 * See the comment in tcp_ao_add_cmd() 1548 1544 */ 1549 - if (tcp_ao_required(sk, addr, AF_INET, false)) 1545 + if (tcp_ao_required(sk, addr, AF_INET, l3flag ? l3index : -1, false)) 1550 1546 return -EKEYREJECTED; 1551 1547 1552 1548 return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
+4 -1
net/ipv6/syncookies.c
··· 140 140 struct dst_entry *dst; 141 141 __u8 rcv_wscale; 142 142 u32 tsoff = 0; 143 + int l3index; 143 144 144 145 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || 145 146 !th->ack || th->rst) ··· 215 214 treq->snt_isn = cookie; 216 215 treq->ts_off = 0; 217 216 treq->txhash = net_tx_rndhash(); 218 - tcp_ao_syncookie(sk, skb, treq, AF_INET6); 217 + 218 + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); 219 + tcp_ao_syncookie(sk, skb, treq, AF_INET6, l3index); 219 220 220 221 if (IS_ENABLED(CONFIG_SMC)) 221 222 ireq->smc_ok = 0;
+10 -11
net/ipv6/tcp_ao.c
··· 87 87 htonl(tcp_rsk(req)->rcv_isn)); 88 88 } 89 89 90 - struct tcp_ao_key *tcp_v6_ao_do_lookup(const struct sock *sk, 91 - const struct in6_addr *addr, 92 - int sndid, int rcvid) 93 - { 94 - return tcp_ao_do_lookup(sk, (union tcp_ao_addr *)addr, AF_INET6, 95 - sndid, rcvid); 96 - } 97 - 98 90 struct tcp_ao_key *tcp_v6_ao_lookup(const struct sock *sk, 99 91 struct sock *addr_sk, 100 92 int sndid, int rcvid) 101 93 { 94 + int l3index = l3mdev_master_ifindex_by_index(sock_net(sk), 95 + addr_sk->sk_bound_dev_if); 102 96 struct in6_addr *addr = &addr_sk->sk_v6_daddr; 103 97 104 - return tcp_v6_ao_do_lookup(sk, addr, sndid, rcvid); 98 + return tcp_ao_do_lookup(sk, l3index, (union tcp_ao_addr *)addr, 99 + AF_INET6, sndid, rcvid); 105 100 } 106 101 107 102 struct tcp_ao_key *tcp_v6_ao_lookup_rsk(const struct sock *sk, 108 103 struct request_sock *req, 109 104 int sndid, int rcvid) 110 105 { 111 - struct in6_addr *addr = &inet_rsk(req)->ir_v6_rmt_addr; 106 + struct inet_request_sock *ireq = inet_rsk(req); 107 + struct in6_addr *addr = &ireq->ir_v6_rmt_addr; 108 + int l3index; 112 109 113 - return tcp_v6_ao_do_lookup(sk, addr, sndid, rcvid); 110 + l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); 111 + return tcp_ao_do_lookup(sk, l3index, (union tcp_ao_addr *)addr, 112 + AF_INET6, sndid, rcvid); 114 113 } 115 114 116 115 int tcp_v6_ao_hash_pseudoheader(struct tcp_sigpool *hp,
+11 -4
net/ipv6/tcp_ipv6.c
··· 610 610 union tcp_ao_addr *addr; 611 611 int l3index = 0; 612 612 u8 prefixlen; 613 + bool l3flag; 613 614 u8 flags; 614 615 615 616 if (optlen < sizeof(cmd)) ··· 623 622 return -EINVAL; 624 623 625 624 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; 625 + l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; 626 626 627 627 if (optname == TCP_MD5SIG_EXT && 628 628 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) { ··· 670 668 /* Don't allow keys for peers that have a matching TCP-AO key. 671 669 * See the comment in tcp_ao_add_cmd() 672 670 */ 673 - if (tcp_ao_required(sk, addr, AF_INET, false)) 671 + if (tcp_ao_required(sk, addr, AF_INET, 672 + l3flag ? l3index : -1, false)) 674 673 return -EKEYREJECTED; 675 674 return tcp_md5_do_add(sk, addr, 676 675 AF_INET, prefixlen, l3index, flags, ··· 683 680 /* Don't allow keys for peers that have a matching TCP-AO key. 684 681 * See the comment in tcp_ao_add_cmd() 685 682 */ 686 - if (tcp_ao_required(sk, addr, AF_INET6, false)) 683 + if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false)) 687 684 return -EKEYREJECTED; 688 685 689 686 return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags, ··· 1223 1220 return; 1224 1221 if (!aoh) 1225 1222 return; 1226 - key.ao_key = tcp_v6_ao_do_lookup(sk, addr, aoh->rnext_keyid, -1); 1223 + key.ao_key = tcp_ao_do_lookup(sk, l3index, 1224 + (union tcp_ao_addr *)addr, 1225 + AF_INET6, aoh->rnext_keyid, -1); 1227 1226 if (unlikely(!key.ao_key)) { 1228 1227 /* Send ACK with any matching MKT for the peer */ 1229 - key.ao_key = tcp_v6_ao_do_lookup(sk, addr, -1, -1); 1228 + key.ao_key = tcp_ao_do_lookup(sk, l3index, 1229 + (union tcp_ao_addr *)addr, 1230 + AF_INET6, -1, -1); 1230 1231 /* Matching key disappeared (user removed the key?) 1231 1232 * let the handshake timeout. 1232 1233 */