Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

udp: udplite is unlikely

Add some unlikely() annotations to speed up the fast path,
at least with clang compiler.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260105101719.2378881-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
e9cd04b2 e4bc5dd5

+11 -9
+1 -1
include/linux/udp.h
··· 236 236 hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node) 237 237 #endif 238 238 239 - #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) 239 + #define IS_UDPLITE(__sk) (unlikely(__sk->sk_protocol == IPPROTO_UDPLITE)) 240 240 241 241 static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6) 242 242 {
+4 -4
include/net/udp.h
··· 527 527 * SNMP statistics for UDP and UDP-Lite 528 528 */ 529 529 #define UDP_INC_STATS(net, field, is_udplite) do { \ 530 - if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 530 + if (unlikely(is_udplite)) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 531 531 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 532 532 #define __UDP_INC_STATS(net, field, is_udplite) do { \ 533 - if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 533 + if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 534 534 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 535 535 536 536 #define __UDP6_INC_STATS(net, field, is_udplite) do { \ 537 - if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\ 537 + if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ 538 538 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 539 539 } while(0) 540 540 #define UDP6_INC_STATS(net, field, __lite) do { \ 541 - if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ 541 + if (unlikely(__lite)) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ 542 542 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 543 543 } while(0) 544 544
+3 -2
net/ipv4/udp.c
··· 1193 1193 1194 1194 send: 1195 1195 err = ip_send_skb(sock_net(sk), skb); 1196 - if (err) { 1196 + if (unlikely(err)) { 1197 1197 if (err == -ENOBUFS && 1198 1198 !inet_test_bit(RECVERR, sk)) { 1199 1199 UDP_INC_STATS(sock_net(sk), ··· 2429 2429 /* 2430 2430 * UDP-Lite specific tests, ignored on UDP sockets 2431 2431 */ 2432 - if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { 2432 + if (unlikely(udp_test_bit(UDPLITE_RECV_CC, sk) && 2433 + UDP_SKB_CB(skb)->partial_cov)) { 2433 2434 u16 pcrlen = READ_ONCE(up->pcrlen); 2434 2435 2435 2436 /*
+3 -2
net/ipv6/udp.c
··· 875 875 /* 876 876 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 877 877 */ 878 - if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { 878 + if (unlikely(udp_test_bit(UDPLITE_RECV_CC, sk) && 879 + UDP_SKB_CB(skb)->partial_cov)) { 879 880 u16 pcrlen = READ_ONCE(up->pcrlen); 880 881 881 882 if (pcrlen == 0) { /* full coverage was set */ ··· 1440 1439 1441 1440 send: 1442 1441 err = ip6_send_skb(skb); 1443 - if (err) { 1442 + if (unlikely(err)) { 1444 1443 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) { 1445 1444 UDP6_INC_STATS(sock_net(sk), 1446 1445 UDP_MIB_SNDBUFERRORS, is_udplite);