Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/cache.h>
10#include <linux/random.h>
11#include <linux/hrtimer.h>
12#include <linux/ktime.h>
13#include <linux/string.h>
14#include <linux/net.h>
15#include <linux/siphash.h>
16#include <net/secure_seq.h>
17
18#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
19#include <linux/in6.h>
20#include <net/tcp.h>
21
22static siphash_aligned_key_t net_secret;
23
24#define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ)
25
26static __always_inline void net_secret_init(void)
27{
28 net_get_random_once(&net_secret, sizeof(net_secret));
29}
30#endif
31
32#ifdef CONFIG_INET
33static u32 seq_scale(u32 seq)
34{
35 /*
36 * As close as possible to RFC 793, which
37 * suggests using a 250 kHz clock.
38 * Further reading shows this assumes 2 Mb/s networks.
39 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
40 * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
41 * we also need to limit the resolution so that the u32 seq
42 * overlaps less than one time per MSL (2 minutes).
43 * Choosing a clock of 64 ns period is OK. (period of 274 s)
44 */
45 return seq + (ktime_get_real_ns() >> 6);
46}
47#endif
48
49#if IS_ENABLED(CONFIG_IPV6)
50union tcp_seq_and_ts_off
51secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr,
52 const __be32 *daddr, __be16 sport, __be16 dport)
53{
54 const struct {
55 struct in6_addr saddr;
56 struct in6_addr daddr;
57 __be16 sport;
58 __be16 dport;
59 } __aligned(SIPHASH_ALIGNMENT) combined = {
60 .saddr = *(struct in6_addr *)saddr,
61 .daddr = *(struct in6_addr *)daddr,
62 .sport = sport,
63 .dport = dport
64 };
65 union tcp_seq_and_ts_off st;
66
67 net_secret_init();
68
69 st.hash64 = siphash(&combined, offsetofend(typeof(combined), dport),
70 &net_secret);
71
72 if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
73 st.ts_off = 0;
74
75 st.seq = seq_scale(st.seq);
76 return st;
77}
78EXPORT_SYMBOL(secure_tcpv6_seq_and_ts_off);
79
80u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81 __be16 dport)
82{
83 const struct {
84 struct in6_addr saddr;
85 struct in6_addr daddr;
86 unsigned int timeseed;
87 __be16 dport;
88 } __aligned(SIPHASH_ALIGNMENT) combined = {
89 .saddr = *(struct in6_addr *)saddr,
90 .daddr = *(struct in6_addr *)daddr,
91 .timeseed = jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD,
92 .dport = dport,
93 };
94 net_secret_init();
95 return siphash(&combined, offsetofend(typeof(combined), dport),
96 &net_secret);
97}
98EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
99#endif
100
101#ifdef CONFIG_INET
102/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
103 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
104 * it would be easy enough to have the former function use siphash_4u32, passing
105 * the arguments as separate u32.
106 */
107union tcp_seq_and_ts_off
108secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr,
109 __be16 sport, __be16 dport)
110{
111 u32 ports = (__force u32)sport << 16 | (__force u32)dport;
112 union tcp_seq_and_ts_off st;
113
114 net_secret_init();
115
116 st.hash64 = siphash_3u32((__force u32)saddr, (__force u32)daddr,
117 ports, &net_secret);
118
119 if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
120 st.ts_off = 0;
121
122 st.seq = seq_scale(st.seq);
123 return st;
124}
125EXPORT_SYMBOL_GPL(secure_tcp_seq_and_ts_off);
126
127u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
128{
129 net_secret_init();
130 return siphash_4u32((__force u32)saddr, (__force u32)daddr,
131 (__force u16)dport,
132 jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD,
133 &net_secret);
134}
135EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
136#endif