Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * NET Generic infrastructure for Network protocols.
4 *
5 * Definitions for request_sock
6 *
7 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 *
9 * From code originally in include/net/tcp.h
10 */
11#ifndef _REQUEST_SOCK_H
12#define _REQUEST_SOCK_H
13
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/refcount.h>
19
20#include <net/sock.h>
21#include <net/rstreason.h>
22
23struct request_sock;
24struct sk_buff;
25struct dst_entry;
26struct proto;
27
28struct request_sock_ops {
29 int family;
30 unsigned int obj_size;
31 struct kmem_cache *slab;
32 char *slab_name;
33 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
34 struct request_sock *req);
35 void (*send_reset)(const struct sock *sk,
36 struct sk_buff *skb,
37 enum sk_rst_reason reason);
38 void (*destructor)(struct request_sock *req);
39};
40
41struct saved_syn {
42 u32 mac_hdrlen;
43 u32 network_hdrlen;
44 u32 tcp_hdrlen;
45 u8 data[];
46};
47
48/* struct request_sock - mini sock to represent a connection request
49 */
50struct request_sock {
51 struct sock_common __req_common;
52#define rsk_refcnt __req_common.skc_refcnt
53#define rsk_hash __req_common.skc_hash
54#define rsk_listener __req_common.skc_listener
55#define rsk_window_clamp __req_common.skc_window_clamp
56#define rsk_rcv_wnd __req_common.skc_rcv_wnd
57
58 struct request_sock *dl_next;
59 u16 mss;
60 u8 num_retrans; /* number of retransmits */
61 u8 syncookie:1; /* True if
62 * 1) tcpopts needs to be encoded in
63 * TS of SYN+ACK
64 * 2) ACK is validated by BPF kfunc.
65 */
66 u8 num_timeout:7; /* number of timeouts */
67 u32 ts_recent;
68 struct timer_list rsk_timer;
69 const struct request_sock_ops *rsk_ops;
70 struct sock *sk;
71 struct saved_syn *saved_syn;
72 u32 secid;
73 u32 peer_secid;
74 u32 timeout;
75};
76
77static inline struct request_sock *inet_reqsk(const struct sock *sk)
78{
79 return (struct request_sock *)sk;
80}
81
82static inline struct sock *req_to_sk(struct request_sock *req)
83{
84 return (struct sock *)req;
85}
86
87/**
88 * skb_steal_sock - steal a socket from an sk_buff
89 * @skb: sk_buff to steal the socket from
90 * @refcounted: is set to true if the socket is reference-counted
91 * @prefetched: is set to true if the socket was assigned from bpf
92 */
93static inline struct sock *skb_steal_sock(struct sk_buff *skb,
94 bool *refcounted, bool *prefetched)
95{
96 struct sock *sk = skb->sk;
97
98 if (!sk) {
99 *prefetched = false;
100 *refcounted = false;
101 return NULL;
102 }
103
104 *prefetched = skb_sk_is_prefetched(skb);
105 if (*prefetched) {
106#if IS_ENABLED(CONFIG_SYN_COOKIES)
107 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
108 struct request_sock *req = inet_reqsk(sk);
109
110 *refcounted = false;
111 sk = req->rsk_listener;
112 req->rsk_listener = NULL;
113 return sk;
114 }
115#endif
116 *refcounted = sk_is_refcounted(sk);
117 } else {
118 *refcounted = true;
119 }
120
121 skb->destructor = NULL;
122 skb->sk = NULL;
123 return sk;
124}
125
126void __reqsk_free(struct request_sock *req);
127
128static inline void reqsk_free(struct request_sock *req)
129{
130 DEBUG_NET_WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
131 __reqsk_free(req);
132}
133
134static inline void reqsk_put(struct request_sock *req)
135{
136 if (refcount_dec_and_test(&req->rsk_refcnt))
137 __reqsk_free(req);
138}
139
140/*
141 * For a TCP Fast Open listener -
142 * lock - protects the access to all the reqsk, which is co-owned by
143 * the listener and the child socket.
144 * qlen - pending TFO requests (still in TCP_SYN_RECV).
145 * max_qlen - max TFO reqs allowed before TFO is disabled.
146 *
147 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
148 * structure above. But there is some implementation difficulty due to
149 * listen_sock being part of request_sock_queue hence will be freed when
150 * a listener is stopped. But TFO related fields may continue to be
151 * accessed even after a listener is closed, until its sk_refcnt drops
152 * to 0 implying no more outstanding TFO reqs. One solution is to keep
153 * listen_opt around until sk_refcnt drops to 0. But there is some other
154 * complexity that needs to be resolved. E.g., a listener can be disabled
155 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
156 */
157struct fastopen_queue {
158 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
159 struct request_sock *rskq_rst_tail; /* requests that caused RST.
160 * This is part of the defense
161 * against spoofing attack.
162 */
163 spinlock_t lock;
164 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
165 int max_qlen; /* != 0 iff TFO is currently enabled */
166
167 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
168};
169
170/** struct request_sock_queue - queue of request_socks
171 *
172 * @rskq_accept_head - FIFO head of established children
173 * @rskq_accept_tail - FIFO tail of established children
174 * @rskq_defer_accept - User waits for some data after accept()
175 *
176 */
177struct request_sock_queue {
178 spinlock_t rskq_lock;
179 u8 rskq_defer_accept;
180 u8 synflood_warned;
181
182 atomic_t qlen;
183 atomic_t young;
184
185 struct request_sock *rskq_accept_head;
186 struct request_sock *rskq_accept_tail;
187 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
188 * if TFO is enabled.
189 */
190};
191
192void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
193 bool reset);
194
195static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
196{
197 return READ_ONCE(queue->rskq_accept_head) == NULL;
198}
199
200static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
201 struct sock *parent)
202{
203 struct request_sock *req;
204
205 spin_lock_bh(&queue->rskq_lock);
206 req = queue->rskq_accept_head;
207 if (req) {
208 sk_acceptq_removed(parent);
209 WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
210 if (queue->rskq_accept_head == NULL)
211 queue->rskq_accept_tail = NULL;
212 }
213 spin_unlock_bh(&queue->rskq_lock);
214 return req;
215}
216
217static inline void reqsk_queue_removed(struct request_sock_queue *queue,
218 const struct request_sock *req)
219{
220 if (req->num_timeout == 0)
221 atomic_dec(&queue->young);
222 atomic_dec(&queue->qlen);
223}
224
225static inline void reqsk_queue_added(struct request_sock_queue *queue)
226{
227 atomic_inc(&queue->young);
228 atomic_inc(&queue->qlen);
229}
230
231static inline int reqsk_queue_len(const struct request_sock_queue *queue)
232{
233 return atomic_read(&queue->qlen);
234}
235
236static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
237{
238 return atomic_read(&queue->young);
239}
240
241/* RFC 7323 2.3 Using the Window Scale Option
242 * The window field (SEG.WND) of every outgoing segment, with the
243 * exception of <SYN> segments, MUST be right-shifted by
244 * Rcv.Wind.Shift bits.
245 *
246 * This means the SEG.WND carried in SYNACK can not exceed 65535.
247 * We use this property to harden TCP stack while in NEW_SYN_RECV state.
248 */
249static inline u32 tcp_synack_window(const struct request_sock *req)
250{
251 return min(req->rsk_rcv_wnd, 65535U);
252}
253#endif /* _REQUEST_SOCK_H */