Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP protocol.
8 *
9 * Version: @(#)udp.h 1.0.2 04/28/93
10 *
11 * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 */
13#ifndef _LINUX_UDP_H
14#define _LINUX_UDP_H
15
16#include <net/inet_sock.h>
17#include <linux/skbuff.h>
18#include <net/netns/hash.h>
19#include <uapi/linux/udp.h>
20
21static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
22{
23 return (struct udphdr *)skb_transport_header(skb);
24}
25
26#define UDP_HTABLE_SIZE_MIN_PERNET 128
27#define UDP_HTABLE_SIZE_MIN (IS_ENABLED(CONFIG_BASE_SMALL) ? 128 : 256)
28#define UDP_HTABLE_SIZE_MAX 65536
29
30static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
31{
32 return (num + net_hash_mix(net)) & mask;
33}
34
35enum {
36 UDP_FLAGS_CORK, /* Cork is required */
37 UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
38 UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
39 UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
40 UDP_FLAGS_ACCEPT_FRAGLIST,
41 UDP_FLAGS_ACCEPT_L4,
42 UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
43};
44
45/* per NUMA structure for lockless producer usage. */
46struct udp_prod_queue {
47 struct llist_head ll_root ____cacheline_aligned_in_smp;
48 atomic_t rmem_alloc;
49};
50
51struct udp_sock {
52 /* inet_sock has to be the first member */
53 struct inet_sock inet;
54#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
55#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
56#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
57
58 unsigned long udp_flags;
59
60 int pending; /* Any pending frames ? */
61 __u8 encap_type; /* Is this an Encapsulation socket? */
62
63#if !IS_ENABLED(CONFIG_BASE_SMALL)
64 /* For UDP 4-tuple hash */
65 __u16 udp_lrpa_hash;
66 struct hlist_nulls_node udp_lrpa_node;
67#endif
68
69 /*
70 * Following member retains the information to create a UDP header
71 * when the socket is uncorked.
72 */
73 __u16 len; /* total length of pending frames */
74 __u16 gso_size;
75
76 /*
77 * For encapsulation sockets.
78 */
79 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
80 void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, int err,
81 __be16 port, u32 info, u8 *payload);
82 int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
83 void (*encap_destroy)(struct sock *sk);
84
85 /* GRO functions for UDP socket */
86 struct sk_buff * (*gro_receive)(struct sock *sk,
87 struct list_head *head,
88 struct sk_buff *skb);
89 int (*gro_complete)(struct sock *sk,
90 struct sk_buff *skb,
91 int nhoff);
92
93 struct udp_prod_queue *udp_prod_queue;
94
95 /* udp_recvmsg try to use this before splicing sk_receive_queue */
96 struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
97
98 /* This field is dirtied by udp_recvmsg() */
99 int forward_deficit;
100
101 /* This fields follows rcvbuf value, and is touched by udp_recvmsg */
102 int forward_threshold;
103
104 /* Cache friendly copy of sk->sk_peek_off >= 0 */
105 bool peeking_with_offset;
106
107 /*
108 * Accounting for the tunnel GRO fastpath.
109 * Unprotected by compilers guard, as it uses space available in
110 * the last UDP socket cacheline.
111 */
112 struct hlist_node tunnel_list;
113 struct numa_drop_counters drop_counters;
114};
115
116#define udp_test_bit(nr, sk) \
117 test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
118#define udp_set_bit(nr, sk) \
119 set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
120#define udp_test_and_set_bit(nr, sk) \
121 test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
122#define udp_clear_bit(nr, sk) \
123 clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
124#define udp_assign_bit(nr, sk, val) \
125 assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
126
127#define UDP_MAX_SEGMENTS (1 << 7UL)
128
129#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
130
131static inline int udp_set_peek_off(struct sock *sk, int val)
132{
133 sk_set_peek_off(sk, val);
134 WRITE_ONCE(udp_sk(sk)->peeking_with_offset, val >= 0);
135 return 0;
136}
137
138static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
139{
140 udp_assign_bit(NO_CHECK6_TX, sk, val);
141}
142
143static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
144{
145 udp_assign_bit(NO_CHECK6_RX, sk, val);
146}
147
148static inline bool udp_get_no_check6_tx(const struct sock *sk)
149{
150 return udp_test_bit(NO_CHECK6_TX, sk);
151}
152
153static inline bool udp_get_no_check6_rx(const struct sock *sk)
154{
155 return udp_test_bit(NO_CHECK6_RX, sk);
156}
157
158static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
159 struct sk_buff *skb)
160{
161 int gso_size;
162
163 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
164 gso_size = skb_shinfo(skb)->gso_size;
165 put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
166 }
167}
168
169DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
170#if IS_ENABLED(CONFIG_IPV6)
171DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
172#endif
173
174static inline bool udp_encap_needed(void)
175{
176 if (static_branch_unlikely(&udp_encap_needed_key))
177 return true;
178
179#if IS_ENABLED(CONFIG_IPV6)
180 if (static_branch_unlikely(&udpv6_encap_needed_key))
181 return true;
182#endif
183
184 return false;
185}
186
187static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
188{
189 if (!skb_is_gso(skb))
190 return false;
191
192 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
193 !udp_test_bit(ACCEPT_L4, sk))
194 return true;
195
196 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
197 !udp_test_bit(ACCEPT_FRAGLIST, sk))
198 return true;
199
200 /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
201 * land in a tunnel as the socket check in udp_gro_receive cannot be
202 * foolproof.
203 */
204 if (udp_encap_needed() &&
205 READ_ONCE(udp_sk(sk)->encap_rcv) &&
206 !(skb_shinfo(skb)->gso_type &
207 (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
208 return true;
209
210 return false;
211}
212
213static inline void udp_allow_gso(struct sock *sk)
214{
215 udp_set_bit(ACCEPT_L4, sk);
216 udp_set_bit(ACCEPT_FRAGLIST, sk);
217}
218
219#define udp_portaddr_for_each_entry(__sk, list) \
220 hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
221
222#define udp_portaddr_for_each_entry_from(__sk) \
223 hlist_for_each_entry_from(__sk, __sk_common.skc_portaddr_node)
224
225#define udp_portaddr_for_each_entry_rcu(__sk, list) \
226 hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
227
228#if !IS_ENABLED(CONFIG_BASE_SMALL)
229#define udp_lrpa_for_each_entry_rcu(__up, node, list) \
230 hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
231#endif
232
233static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
234{
235#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
236 return rcu_dereference(net->ipv4.udp_tunnel_gro[is_ipv6].sk);
237#else
238 return NULL;
239#endif
240}
241
242#endif /* _LINUX_UDP_H */