Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

tcp: secure_seq: add back ports to TS offset

This reverts 28ee1b746f49 ("secure_seq: downgrade to per-host timestamp offsets")

tcp_tw_recycle went away in 2017.

Zhouyan Deng reported off-path TCP source port leakage via
SYN cookie side-channel that can be fixed in multiple ways.

One of them is to bring back TCP ports in TS offset randomization.

As a bonus, we perform a single siphash() computation
to provide both an ISN and a TS offset.

Fixes: 28ee1b746f49 ("secure_seq: downgrade to per-host timestamp offsets")
Reported-by: Zhouyan Deng <dengzhouyan_nwpu@163.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Acked-by: Florian Westphal <fw@strlen.de>
Link: https://patch.msgid.link/20260302205527.1982836-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
165573e4 7f083faf

+127 -108
+38 -7
include/net/secure_seq.h
··· 5 5 #include <linux/types.h> 6 6 7 7 struct net; 8 + extern struct net init_net; 9 + 10 + union tcp_seq_and_ts_off { 11 + struct { 12 + u32 seq; 13 + u32 ts_off; 14 + }; 15 + u64 hash64; 16 + }; 8 17 9 18 u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 10 19 u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 11 20 __be16 dport); 12 - u32 secure_tcp_seq(__be32 saddr, __be32 daddr, 13 - __be16 sport, __be16 dport); 14 - u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr); 15 - u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, 16 - __be16 sport, __be16 dport); 17 - u32 secure_tcpv6_ts_off(const struct net *net, 18 - const __be32 *saddr, const __be32 *daddr); 21 + union tcp_seq_and_ts_off 22 + secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr, 23 + __be16 sport, __be16 dport); 19 24 25 + static inline u32 secure_tcp_seq(__be32 saddr, __be32 daddr, 26 + __be16 sport, __be16 dport) 27 + { 28 + union tcp_seq_and_ts_off ts; 29 + 30 + ts = secure_tcp_seq_and_ts_off(&init_net, saddr, daddr, 31 + sport, dport); 32 + 33 + return ts.seq; 34 + } 35 + 36 + union tcp_seq_and_ts_off 37 + secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr, 38 + const __be32 *daddr, 39 + __be16 sport, __be16 dport); 40 + 41 + static inline u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, 42 + __be16 sport, __be16 dport) 43 + { 44 + union tcp_seq_and_ts_off ts; 45 + 46 + ts = secure_tcpv6_seq_and_ts_off(&init_net, saddr, daddr, 47 + sport, dport); 48 + 49 + return ts.seq; 50 + } 20 51 #endif /* _NET_SECURE_SEQ */
+4 -2
include/net/tcp.h
··· 43 43 #include <net/dst.h> 44 44 #include <net/mptcp.h> 45 45 #include <net/xfrm.h> 46 + #include <net/secure_seq.h> 46 47 47 48 #include <linux/seq_file.h> 48 49 #include <linux/memcontrol.h> ··· 2465 2464 struct flowi *fl, 2466 2465 struct request_sock *req, 2467 2466 u32 tw_isn); 2468 - u32 (*init_seq)(const struct sk_buff *skb); 2469 - u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb); 2467 + union tcp_seq_and_ts_off (*init_seq_and_ts_off)( 2468 + const struct net *net, 2469 + const struct sk_buff *skb); 2470 2470 int (*send_synack)(const struct sock *sk, struct dst_entry *dst, 2471 2471 struct flowi *fl, struct request_sock *req, 2472 2472 struct tcp_fastopen_cookie *foc,
+29 -51
net/core/secure_seq.c
··· 20 20 #include <net/tcp.h> 21 21 22 22 static siphash_aligned_key_t net_secret; 23 - static siphash_aligned_key_t ts_secret; 24 23 25 24 #define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ) 26 25 27 26 static __always_inline void net_secret_init(void) 28 27 { 29 28 net_get_random_once(&net_secret, sizeof(net_secret)); 30 - } 31 - 32 - static __always_inline void ts_secret_init(void) 33 - { 34 - net_get_random_once(&ts_secret, sizeof(ts_secret)); 35 29 } 36 30 #endif 37 31 ··· 47 53 #endif 48 54 49 55 #if IS_ENABLED(CONFIG_IPV6) 50 - u32 secure_tcpv6_ts_off(const struct net *net, 51 - const __be32 *saddr, const __be32 *daddr) 52 - { 53 - const struct { 54 - struct in6_addr saddr; 55 - struct in6_addr daddr; 56 - } __aligned(SIPHASH_ALIGNMENT) combined = { 57 - .saddr = *(struct in6_addr *)saddr, 58 - .daddr = *(struct in6_addr *)daddr, 59 - }; 60 - 61 - if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) 62 - return 0; 63 - 64 - ts_secret_init(); 65 - return siphash(&combined, offsetofend(typeof(combined), daddr), 66 - &ts_secret); 67 - } 68 - EXPORT_IPV6_MOD(secure_tcpv6_ts_off); 69 - 70 - u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, 71 - __be16 sport, __be16 dport) 56 + union tcp_seq_and_ts_off 57 + secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr, 58 + const __be32 *daddr, __be16 sport, __be16 dport) 72 59 { 73 60 const struct { 74 61 struct in6_addr saddr; ··· 62 87 .sport = sport, 63 88 .dport = dport 64 89 }; 65 - u32 hash; 90 + union tcp_seq_and_ts_off st; 66 91 67 92 net_secret_init(); 68 - hash = siphash(&combined, offsetofend(typeof(combined), dport), 69 - &net_secret); 70 - return seq_scale(hash); 93 + 94 + st.hash64 = siphash(&combined, offsetofend(typeof(combined), dport), 95 + &net_secret); 96 + 97 + if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) 98 + st.ts_off = 0; 99 + 100 + st.seq = seq_scale(st.seq); 101 + return st; 71 102 } 72 - EXPORT_SYMBOL(secure_tcpv6_seq); 103 + EXPORT_SYMBOL(secure_tcpv6_seq_and_ts_off); 73 104 74 105 u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 75 106 __be16 dport) ··· 99 118 #endif 100 119 101 120 #ifdef CONFIG_INET 102 - u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr) 103 - { 104 - if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) 105 - return 0; 106 - 107 - ts_secret_init(); 108 - return siphash_2u32((__force u32)saddr, (__force u32)daddr, 109 - &ts_secret); 110 - } 111 - 112 121 /* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), 113 122 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, 114 123 * it would be easy enough to have the former function use siphash_4u32, passing 115 124 * the arguments as separate u32. 116 125 */ 117 - u32 secure_tcp_seq(__be32 saddr, __be32 daddr, 118 - __be16 sport, __be16 dport) 126 + union tcp_seq_and_ts_off 127 + secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr, 128 + __be16 sport, __be16 dport) 119 129 { 120 - u32 hash; 130 + u32 ports = (__force u32)sport << 16 | (__force u32)dport; 131 + union tcp_seq_and_ts_off st; 121 132 122 133 net_secret_init(); 123 - hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, 124 - (__force u32)sport << 16 | (__force u32)dport, 125 - &net_secret); 126 - return seq_scale(hash); 134 + 135 + st.hash64 = siphash_3u32((__force u32)saddr, (__force u32)daddr, 136 + ports, &net_secret); 137 + 138 + if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) 139 + st.ts_off = 0; 140 + 141 + st.seq = seq_scale(st.seq); 142 + return st; 127 143 } 128 - EXPORT_SYMBOL_GPL(secure_tcp_seq); 144 + EXPORT_SYMBOL_GPL(secure_tcp_seq_and_ts_off); 129 145 130 146 u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) 131 147 {
+8 -3
net/ipv4/syncookies.c
··· 378 378 tcp_parse_options(net, skb, &tcp_opt, 0, NULL); 379 379 380 380 if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { 381 - tsoff = secure_tcp_ts_off(net, 382 - ip_hdr(skb)->daddr, 383 - ip_hdr(skb)->saddr); 381 + union tcp_seq_and_ts_off st; 382 + 383 + st = secure_tcp_seq_and_ts_off(net, 384 + ip_hdr(skb)->daddr, 385 + ip_hdr(skb)->saddr, 386 + tcp_hdr(skb)->dest, 387 + tcp_hdr(skb)->source); 388 + tsoff = st.ts_off; 384 389 tcp_opt.rcv_tsecr -= tsoff; 385 390 } 386 391
+6 -2
net/ipv4/tcp_input.c
··· 7646 7646 const struct tcp_sock *tp = tcp_sk(sk); 7647 7647 struct net *net = sock_net(sk); 7648 7648 struct sock *fastopen_sk = NULL; 7649 + union tcp_seq_and_ts_off st; 7649 7650 struct request_sock *req; 7650 7651 bool want_cookie = false; 7651 7652 struct dst_entry *dst; ··· 7716 7715 if (!dst) 7717 7716 goto drop_and_free; 7718 7717 7718 + if (tmp_opt.tstamp_ok || (!want_cookie && !isn)) 7719 + st = af_ops->init_seq_and_ts_off(net, skb); 7720 + 7719 7721 if (tmp_opt.tstamp_ok) { 7720 7722 tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst); 7721 - tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb); 7723 + tcp_rsk(req)->ts_off = st.ts_off; 7722 7724 } 7723 7725 if (!want_cookie && !isn) { 7724 7726 int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog); ··· 7743 7739 goto drop_and_release; 7744 7740 } 7745 7741 7746 - isn = af_ops->init_seq(skb); 7742 + isn = st.seq; 7747 7743 } 7748 7744 7749 7745 tcp_ecn_create_request(req, skb, sk, dst);
+17 -20
net/ipv4/tcp_ipv4.c
··· 105 105 106 106 static DEFINE_MUTEX(tcp_exit_batch_mutex); 107 107 108 - static u32 tcp_v4_init_seq(const struct sk_buff *skb) 108 + static union tcp_seq_and_ts_off 109 + tcp_v4_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb) 109 110 { 110 - return secure_tcp_seq(ip_hdr(skb)->daddr, 111 - ip_hdr(skb)->saddr, 112 - tcp_hdr(skb)->dest, 113 - tcp_hdr(skb)->source); 114 - } 115 - 116 - static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb) 117 - { 118 - return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr); 111 + return secure_tcp_seq_and_ts_off(net, 112 + ip_hdr(skb)->daddr, 113 + ip_hdr(skb)->saddr, 114 + tcp_hdr(skb)->dest, 115 + tcp_hdr(skb)->source); 119 116 } 120 117 121 118 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) ··· 324 327 rt = NULL; 325 328 326 329 if (likely(!tp->repair)) { 330 + union tcp_seq_and_ts_off st; 331 + 332 + st = secure_tcp_seq_and_ts_off(net, 333 + inet->inet_saddr, 334 + inet->inet_daddr, 335 + inet->inet_sport, 336 + usin->sin_port); 327 337 if (!tp->write_seq) 328 - WRITE_ONCE(tp->write_seq, 329 - secure_tcp_seq(inet->inet_saddr, 330 - inet->inet_daddr, 331 - inet->inet_sport, 332 - usin->sin_port)); 333 - WRITE_ONCE(tp->tsoffset, 334 - secure_tcp_ts_off(net, inet->inet_saddr, 335 - inet->inet_daddr)); 338 + WRITE_ONCE(tp->write_seq, st.seq); 339 + WRITE_ONCE(tp->tsoffset, st.ts_off); 336 340 } 337 341 338 342 atomic_set(&inet->inet_id, get_random_u16()); ··· 1675 1677 .cookie_init_seq = cookie_v4_init_sequence, 1676 1678 #endif 1677 1679 .route_req = tcp_v4_route_req, 1678 - .init_seq = tcp_v4_init_seq, 1679 - .init_ts_off = tcp_v4_init_ts_off, 1680 + .init_seq_and_ts_off = tcp_v4_init_seq_and_ts_off, 1680 1681 .send_synack = tcp_v4_send_synack, 1681 1682 }; 1682 1683
+8 -3
net/ipv6/syncookies.c
··· 151 151 tcp_parse_options(net, skb, &tcp_opt, 0, NULL); 152 152 153 153 if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { 154 - tsoff = secure_tcpv6_ts_off(net, 155 - ipv6_hdr(skb)->daddr.s6_addr32, 156 - ipv6_hdr(skb)->saddr.s6_addr32); 154 + union tcp_seq_and_ts_off st; 155 + 156 + st = secure_tcpv6_seq_and_ts_off(net, 157 + ipv6_hdr(skb)->daddr.s6_addr32, 158 + ipv6_hdr(skb)->saddr.s6_addr32, 159 + tcp_hdr(skb)->dest, 160 + tcp_hdr(skb)->source); 161 + tsoff = st.ts_off; 157 162 tcp_opt.rcv_tsecr -= tsoff; 158 163 } 159 164
+17 -20
net/ipv6/tcp_ipv6.c
··· 105 105 } 106 106 } 107 107 108 - static u32 tcp_v6_init_seq(const struct sk_buff *skb) 108 + static union tcp_seq_and_ts_off 109 + tcp_v6_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb) 109 110 { 110 - return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32, 111 - ipv6_hdr(skb)->saddr.s6_addr32, 112 - tcp_hdr(skb)->dest, 113 - tcp_hdr(skb)->source); 114 - } 115 - 116 - static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb) 117 - { 118 - return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32, 119 - ipv6_hdr(skb)->saddr.s6_addr32); 111 + return secure_tcpv6_seq_and_ts_off(net, 112 + ipv6_hdr(skb)->daddr.s6_addr32, 113 + ipv6_hdr(skb)->saddr.s6_addr32, 114 + tcp_hdr(skb)->dest, 115 + tcp_hdr(skb)->source); 120 116 } 121 117 122 118 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, ··· 316 320 sk_set_txhash(sk); 317 321 318 322 if (likely(!tp->repair)) { 323 + union tcp_seq_and_ts_off st; 324 + 325 + st = secure_tcpv6_seq_and_ts_off(net, 326 + np->saddr.s6_addr32, 327 + sk->sk_v6_daddr.s6_addr32, 328 + inet->inet_sport, 329 + inet->inet_dport); 319 330 if (!tp->write_seq) 320 - WRITE_ONCE(tp->write_seq, 321 - secure_tcpv6_seq(np->saddr.s6_addr32, 322 - sk->sk_v6_daddr.s6_addr32, 323 - inet->inet_sport, 324 - inet->inet_dport)); 325 - tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32, 326 - sk->sk_v6_daddr.s6_addr32); 331 + WRITE_ONCE(tp->write_seq, st.seq); 332 + tp->tsoffset = st.ts_off; 327 333 } 328 334 329 335 if (tcp_fastopen_defer_connect(sk, &err)) ··· 815 817 .cookie_init_seq = cookie_v6_init_sequence, 816 818 #endif 817 819 .route_req = tcp_v6_route_req, 818 - .init_seq = tcp_v6_init_seq, 819 - .init_ts_off = tcp_v6_init_ts_off, 820 + .init_seq_and_ts_off = tcp_v6_init_seq_and_ts_off, 820 821 .send_synack = tcp_v6_send_synack, 821 822 }; 822 823