Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP module.
8 *
9 * Version: @(#)udp.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 *
14 * Fixes:
15 * Alan Cox : Turned on udp checksums. I don't want to
16 * chase 'memory corruption' bugs that aren't!
17 */
18#ifndef _UDP_H
19#define _UDP_H
20
21#include <linux/list.h>
22#include <linux/bug.h>
23#include <net/inet_sock.h>
24#include <net/gso.h>
25#include <net/sock.h>
26#include <net/snmp.h>
27#include <net/ip.h>
28#include <linux/ipv6.h>
29#include <linux/seq_file.h>
30#include <linux/poll.h>
31#include <linux/indirect_call_wrapper.h>
32#include <linux/math.h>
33
34/**
35 * struct udp_skb_cb - UDP private variables
36 *
37 * @header: private variables used by IPv4/IPv6
38 */
39struct udp_skb_cb {
40 union {
41 struct inet_skb_parm h4;
42#if IS_ENABLED(CONFIG_IPV6)
43 struct inet6_skb_parm h6;
44#endif
45 } header;
46};
47#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
48
49/**
50 * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
51 *
52 * @head: head of list of sockets
53 * @nulls_head: head of list of sockets, only used by hash4
54 * @count: number of sockets in 'head' list
55 * @lock: spinlock protecting changes to head/count
56 */
57struct udp_hslot {
58 union {
59 struct hlist_head head;
60 /* hash4 uses hlist_nulls to avoid moving wrongly onto another
61 * hlist, because rehash() can happen with lookup().
62 */
63 struct hlist_nulls_head nulls_head;
64 };
65 int count;
66 spinlock_t lock;
67} __aligned(2 * sizeof(long));
68
69/**
70 * struct udp_hslot_main - UDP hash slot used by udp_table.hash2
71 *
72 * @hslot: basic hash slot
73 * @hash4_cnt: number of sockets in hslot4 of the same
74 * (local port, local address)
75 */
76struct udp_hslot_main {
77 struct udp_hslot hslot; /* must be the first member */
78#if !IS_ENABLED(CONFIG_BASE_SMALL)
79 u32 hash4_cnt;
80#endif
81} __aligned(2 * sizeof(long));
82#define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot))
83
84/**
85 * struct udp_table - UDP table
86 *
87 * @hash: hash table, sockets are hashed on (local port)
88 * @hash2: hash table, sockets are hashed on (local port, local address)
89 * @hash4: hash table, connected sockets are hashed on
90 * (local port, local address, remote port, remote address)
91 * @mask: number of slots in hash tables, minus 1
92 * @log: log2(number of slots in hash table)
93 */
94struct udp_table {
95 struct udp_hslot *hash;
96 struct udp_hslot_main *hash2;
97#if !IS_ENABLED(CONFIG_BASE_SMALL)
98 struct udp_hslot *hash4;
99#endif
100 unsigned int mask;
101 unsigned int log;
102};
103extern struct udp_table udp_table;
104
105static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
106 const struct net *net,
107 unsigned int num)
108{
109 return &table->hash[udp_hashfn(net, num, table->mask)];
110}
111
112/*
113 * For secondary hash, net_hash_mix() is performed before calling
114 * udp_hashslot2(), this explains difference with udp_hashslot()
115 */
116static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
117 unsigned int hash)
118{
119 return &table->hash2[hash & table->mask].hslot;
120}
121
122#if IS_ENABLED(CONFIG_BASE_SMALL)
123static inline void udp_table_hash4_init(struct udp_table *table)
124{
125}
126
127static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
128 unsigned int hash)
129{
130 BUILD_BUG();
131 return NULL;
132}
133
134static inline bool udp_hashed4(const struct sock *sk)
135{
136 return false;
137}
138
139static inline unsigned int udp_hash4_slot_size(void)
140{
141 return 0;
142}
143
144static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
145{
146 return false;
147}
148
149static inline void udp_hash4_inc(struct udp_hslot *hslot2)
150{
151}
152
153static inline void udp_hash4_dec(struct udp_hslot *hslot2)
154{
155}
156#else /* !CONFIG_BASE_SMALL */
157
158/* Must be called with table->hash2 initialized */
159static inline void udp_table_hash4_init(struct udp_table *table)
160{
161 table->hash4 = (void *)(table->hash2 + (table->mask + 1));
162 for (int i = 0; i <= table->mask; i++) {
163 table->hash2[i].hash4_cnt = 0;
164
165 INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i);
166 table->hash4[i].count = 0;
167 spin_lock_init(&table->hash4[i].lock);
168 }
169}
170
171static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
172 unsigned int hash)
173{
174 return &table->hash4[hash & table->mask];
175}
176
177static inline bool udp_hashed4(const struct sock *sk)
178{
179 return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node);
180}
181
182static inline unsigned int udp_hash4_slot_size(void)
183{
184 return sizeof(struct udp_hslot);
185}
186
187static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
188{
189 return UDP_HSLOT_MAIN(hslot2)->hash4_cnt;
190}
191
192static inline void udp_hash4_inc(struct udp_hslot *hslot2)
193{
194 UDP_HSLOT_MAIN(hslot2)->hash4_cnt++;
195}
196
197static inline void udp_hash4_dec(struct udp_hslot *hslot2)
198{
199 UDP_HSLOT_MAIN(hslot2)->hash4_cnt--;
200}
201#endif /* CONFIG_BASE_SMALL */
202
203extern struct proto udp_prot;
204
205DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
206
207/* sysctl variables for udp */
208extern long sysctl_udp_mem[3];
209extern int sysctl_udp_rmem_min;
210extern int sysctl_udp_wmem_min;
211
212struct sk_buff;
213
214/*
215 * Generic checksumming routines for UDP v4 and v6
216 */
217static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
218{
219 return __skb_checksum_complete(skb);
220}
221
222static inline int udp_lib_checksum_complete(struct sk_buff *skb)
223{
224 return !skb_csum_unnecessary(skb) &&
225 __udp_lib_checksum_complete(skb);
226}
227
228/**
229 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
230 * @sk: socket we are writing to
231 * @skb: sk_buff containing the filled-in UDP header
232 * (checksum field must be zeroed out)
233 */
234static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
235{
236 __wsum csum = csum_partial(skb_transport_header(skb),
237 sizeof(struct udphdr), 0);
238 skb_queue_walk(&sk->sk_write_queue, skb) {
239 csum = csum_add(csum, skb->csum);
240 }
241 return csum;
242}
243
244static inline __wsum udp_csum(struct sk_buff *skb)
245{
246 __wsum csum = csum_partial(skb_transport_header(skb),
247 sizeof(struct udphdr), skb->csum);
248
249 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
250 csum = csum_add(csum, skb->csum);
251 }
252 return csum;
253}
254
255static inline __sum16 udp_v4_check(int len, __be32 saddr,
256 __be32 daddr, __wsum base)
257{
258 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
259}
260
261void udp_set_csum(bool nocheck, struct sk_buff *skb,
262 __be32 saddr, __be32 daddr, int len);
263
264static inline void udp_csum_pull_header(struct sk_buff *skb)
265{
266 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
267 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
268 skb->csum);
269 skb_pull_rcsum(skb, sizeof(struct udphdr));
270}
271
272typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
273 __be16 dport);
274
275void udp_v6_early_demux(struct sk_buff *skb);
276INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
277
278int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
279INDIRECT_CALLABLE_DECLARE(int udpv6_recvmsg(struct sock *sk, struct msghdr *msg,
280 size_t len, int flags));
281
282struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
283 netdev_features_t features, bool is_ipv6);
284
285static inline int udp_lib_init_sock(struct sock *sk)
286{
287 struct udp_sock *up = udp_sk(sk);
288
289 sk->sk_drop_counters = &up->drop_counters;
290 skb_queue_head_init(&up->reader_queue);
291 INIT_HLIST_NODE(&up->tunnel_list);
292 up->forward_threshold = sk->sk_rcvbuf >> 2;
293 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
294
295 up->udp_prod_queue = kzalloc_objs(*up->udp_prod_queue, nr_node_ids);
296 if (!up->udp_prod_queue)
297 return -ENOMEM;
298 for (int i = 0; i < nr_node_ids; i++)
299 init_llist_head(&up->udp_prod_queue[i].ll_root);
300 return 0;
301}
302
303static inline void udp_drops_inc(struct sock *sk)
304{
305 numa_drop_add(&udp_sk(sk)->drop_counters, 1);
306}
307
308/* hash routines shared between UDPv4/6 */
309static inline int udp_lib_hash(struct sock *sk)
310{
311 BUG();
312 return 0;
313}
314
315void udp_lib_unhash(struct sock *sk);
316void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4);
317u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
318 const __be32 faddr, const __be16 fport);
319
320static inline void udp_lib_close(struct sock *sk, long timeout)
321{
322 sk_common_release(sk);
323}
324
325/* hash4 routines shared between UDPv4/6 */
326#if IS_ENABLED(CONFIG_BASE_SMALL)
327static inline void udp_lib_hash4(struct sock *sk, u16 hash)
328{
329}
330
331static inline void udp4_hash4(struct sock *sk)
332{
333}
334#else /* !CONFIG_BASE_SMALL */
335void udp_lib_hash4(struct sock *sk, u16 hash);
336void udp4_hash4(struct sock *sk);
337#endif /* CONFIG_BASE_SMALL */
338
339int udp_lib_get_port(struct sock *sk, unsigned short snum,
340 unsigned int hash2_nulladdr);
341
342u32 udp_flow_hashrnd(void);
343
344static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
345 int min, int max, bool use_eth)
346{
347 u32 hash;
348
349 if (min >= max) {
350 /* Use default range */
351 inet_get_local_port_range(net, &min, &max);
352 }
353
354 hash = skb_get_hash(skb);
355 if (unlikely(!hash)) {
356 if (use_eth) {
357 /* Can't find a normal hash, caller has indicated an
358 * Ethernet packet so use that to compute a hash.
359 */
360 hash = jhash(skb->data, 2 * ETH_ALEN,
361 (__force u32) skb->protocol);
362 } else {
363 /* Can't derive any sort of hash for the packet, set
364 * to some consistent random value.
365 */
366 hash = udp_flow_hashrnd();
367 }
368 }
369
370 /* Since this is being sent on the wire obfuscate hash a bit
371 * to minimize possibility that any useful information to an
372 * attacker is leaked. Only upper 16 bits are relevant in the
373 * computation for 16 bit port value.
374 */
375 hash ^= hash << 16;
376
377 return htons(reciprocal_scale(hash, max - min + 1) + min);
378}
379
380static inline int udp_rqueue_get(struct sock *sk)
381{
382 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
383}
384
385static inline bool udp_sk_bound_dev_eq(const struct net *net, int bound_dev_if,
386 int dif, int sdif)
387{
388#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
389 return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
390 bound_dev_if, dif, sdif);
391#else
392 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
393#endif
394}
395
396/* net/ipv4/udp.c */
397void udp_destruct_common(struct sock *sk);
398void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
399int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
400void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
401struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off,
402 int *err);
403static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
404 int *err)
405{
406 int off = 0;
407
408 return __skb_recv_udp(sk, flags, &off, err);
409}
410
411enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb);
412bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
413int udp_err(struct sk_buff *, u32);
414int udp_abort(struct sock *sk, int err);
415int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
416INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *sk, struct msghdr *msg,
417 size_t len, int flags));
418void udp_splice_eof(struct socket *sock);
419int udp_push_pending_frames(struct sock *sk);
420void udp_flush_pending_frames(struct sock *sk);
421int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
422void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
423int udp_rcv(struct sk_buff *skb);
424int udp_ioctl(struct sock *sk, int cmd, int *karg);
425int udp_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
426int __udp_disconnect(struct sock *sk, int flags);
427int udp_disconnect(struct sock *sk, int flags);
428__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
429struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
430 netdev_features_t features,
431 bool is_ipv6);
432int udp_lib_getsockopt(struct sock *sk, int level, int optname,
433 char __user *optval, int __user *optlen);
434int udp_lib_setsockopt(struct sock *sk, int level, int optname,
435 sockptr_t optval, unsigned int optlen,
436 int (*push_pending_frames)(struct sock *));
437struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
438 __be32 daddr, __be16 dport, int dif);
439struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
440 __be16 sport, __be32 daddr, __be16 dport,
441 int dif, int sdif, struct sk_buff *skb);
442struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
443 __be16 sport, __be16 dport);
444struct sock *udp6_lib_lookup(const struct net *net,
445 const struct in6_addr *saddr, __be16 sport,
446 const struct in6_addr *daddr, __be16 dport,
447 int dif);
448struct sock *__udp6_lib_lookup(const struct net *net,
449 const struct in6_addr *saddr, __be16 sport,
450 const struct in6_addr *daddr, __be16 dport,
451 int dif, int sdif, struct sk_buff *skb);
452struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
453 __be16 sport, __be16 dport);
454int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
455
456/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
457 * possibly multiple cache miss on dequeue()
458 */
459struct udp_dev_scratch {
460 /* skb->truesize and the stateless bit are embedded in a single field;
461 * do not use a bitfield since the compiler emits better/smaller code
462 * this way
463 */
464 u32 _tsize_state;
465
466#if BITS_PER_LONG == 64
467 /* len and the bit needed to compute skb_csum_unnecessary
468 * will be on cold cache lines at recvmsg time.
469 * skb->len can be stored on 16 bits since the udp header has been
470 * already validated and pulled.
471 */
472 u16 len;
473 bool is_linear;
474 bool csum_unnecessary;
475#endif
476};
477
478static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
479{
480 return (struct udp_dev_scratch *)&skb->dev_scratch;
481}
482
483#if BITS_PER_LONG == 64
484static inline unsigned int udp_skb_len(struct sk_buff *skb)
485{
486 return udp_skb_scratch(skb)->len;
487}
488
489static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
490{
491 return udp_skb_scratch(skb)->csum_unnecessary;
492}
493
494static inline bool udp_skb_is_linear(struct sk_buff *skb)
495{
496 return udp_skb_scratch(skb)->is_linear;
497}
498
499#else
500static inline unsigned int udp_skb_len(struct sk_buff *skb)
501{
502 return skb->len;
503}
504
505static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
506{
507 return skb_csum_unnecessary(skb);
508}
509
510static inline bool udp_skb_is_linear(struct sk_buff *skb)
511{
512 return !skb_is_nonlinear(skb);
513}
514#endif
515
516static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
517 struct iov_iter *to)
518{
519 return copy_to_iter_full(skb->data + off, len, to) ? 0 : -EFAULT;
520}
521
522/*
523 * SNMP statistics for UDP
524 */
525#define __UDP_INC_STATS(net, field) \
526 __SNMP_INC_STATS((net)->mib.udp_statistics, field)
527#define UDP_INC_STATS(net, field) \
528 SNMP_INC_STATS((net)->mib.udp_statistics, field)
529#define __UDP6_INC_STATS(net, field) \
530 __SNMP_INC_STATS((net)->mib.udp_stats_in6, field)
531#define UDP6_INC_STATS(net, field) \
532 SNMP_INC_STATS((net)->mib.udp_stats_in6, field)
533
534#if IS_ENABLED(CONFIG_IPV6)
535#define __UDPX_MIB(sk, ipv4) \
536 ({ \
537 ipv4 ? sock_net(sk)->mib.udp_statistics : \
538 sock_net(sk)->mib.udp_stats_in6; \
539 })
540#else
541#define __UDPX_MIB(sk, ipv4) \
542 ({ \
543 sock_net(sk)->mib.udp_statistics; \
544 })
545#endif
546
547#define __UDPX_INC_STATS(sk, field) \
548 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
549
550#ifdef CONFIG_PROC_FS
551struct udp_seq_afinfo {
552 sa_family_t family;
553};
554
555struct udp_iter_state {
556 struct seq_net_private p;
557 int bucket;
558};
559
560void *udp_seq_start(struct seq_file *seq, loff_t *pos);
561void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
562void udp_seq_stop(struct seq_file *seq, void *v);
563
564int udp4_proc_init(void);
565void udp4_proc_exit(void);
566#endif /* CONFIG_PROC_FS */
567
568int udpv4_offload_init(void);
569
570void udp_init(void);
571
572DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
573void udp_encap_enable(void);
574void udp_encap_disable(void);
575#if IS_ENABLED(CONFIG_IPV6)
576DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
577void udpv6_encap_enable(void);
578#endif
579
580static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
581 struct sk_buff *skb, bool ipv4)
582{
583 netdev_features_t features = NETIF_F_SG;
584 struct sk_buff *segs;
585 int drop_count;
586
587 /*
588 * Segmentation in UDP receive path is only for UDP GRO, drop udp
589 * fragmentation offload (UFO) packets.
590 */
591 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
592 drop_count = 1;
593 goto drop;
594 }
595
596 /* Avoid csum recalculation by skb_segment unless userspace explicitly
597 * asks for the final checksum values
598 */
599 if (!inet_get_convert_csum(sk))
600 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
601
602 /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
603 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
604 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
605 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
606 * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
607 * Reset in this specific case, where PARTIAL is both correct and
608 * required.
609 */
610 if (skb->pkt_type == PACKET_LOOPBACK)
611 skb->ip_summed = CHECKSUM_PARTIAL;
612
613 /* the GSO CB lays after the UDP one, no need to save and restore any
614 * CB fragment
615 */
616 segs = __skb_gso_segment(skb, features, false);
617 if (IS_ERR_OR_NULL(segs)) {
618 drop_count = skb_shinfo(skb)->gso_segs;
619 goto drop;
620 }
621
622 consume_skb(skb);
623 return segs;
624
625drop:
626 sk_drops_add(sk, drop_count);
627 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
628 kfree_skb(skb);
629 return NULL;
630}
631
632static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
633{
634 /* UDP packets generated with UDP_SEGMENT and traversing:
635 *
636 * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
637 *
638 * can reach an UDP socket with CHECKSUM_NONE, because
639 * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE.
640 * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will
641 * have a valid checksum, as the GRO engine validates the UDP csum
642 * before the aggregation and nobody strips such info in between.
643 * Instead of adding another check in the tunnel fastpath, we can force
644 * a valid csum after the segmentation.
645 * Additionally fixup the UDP CB.
646 */
647 if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
648 skb->csum_valid = 1;
649}
650
651#ifdef CONFIG_BPF_SYSCALL
652struct sk_psock;
653int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
654#endif
655
656#endif /* _UDP_H */