Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'net-better-drop-accounting'

Eric Dumazet says:

====================
net: better drop accounting

Incrementing sk->sk_drops for every dropped packet can
cause serious cache line contention under DOS.

Add optional sk->sk_drop_counters pointer so that
protocols can opt-in to use two dedicated cache lines
to hold drop counters.

Convert UDP and RAW to use this infrastructure.

Tested on UDP (see patch 4/5 for details)

Before:

nstat -n ; sleep 1 ; nstat | grep Udp
Udp6InDatagrams 615091 0.0
Udp6InErrors 3904277 0.0
Udp6RcvbufErrors 3904277 0.0

After:

nstat -n ; sleep 1 ; nstat | grep Udp
Udp6InDatagrams 816281 0.0
Udp6InErrors 7497093 0.0
Udp6RcvbufErrors 7497093 0.0
====================

Link: https://patch.msgid.link/20250826125031.1578842-1-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+114 -56
+1 -1
include/linux/ipv6.h
··· 295 295 __u32 offset; /* checksum offset */ 296 296 struct icmp6_filter filter; 297 297 __u32 ip6mr_table; 298 - 298 + struct socket_drop_counters drop_counters; 299 299 struct ipv6_pinfo inet6; 300 300 }; 301 301
+1 -1
include/linux/skmsg.h
··· 315 315 316 316 static inline void sock_drop(struct sock *sk, struct sk_buff *skb) 317 317 { 318 - sk_drops_add(sk, skb); 318 + sk_drops_skbadd(sk, skb); 319 319 kfree_skb(skb); 320 320 } 321 321
+1
include/linux/udp.h
··· 108 108 * the last UDP socket cacheline. 109 109 */ 110 110 struct hlist_node tunnel_list; 111 + struct socket_drop_counters drop_counters; 111 112 }; 112 113 113 114 #define udp_test_bit(nr, sk) \
+1
include/net/raw.h
··· 81 81 struct inet_sock inet; 82 82 struct icmp_filter filter; 83 83 u32 ipmr_table; 84 + struct socket_drop_counters drop_counters; 84 85 }; 85 86 86 87 #define raw_sk(ptr) container_of_const(ptr, struct raw_sock, inet.sk)
+53 -3
include/net/sock.h
··· 102 102 typedef __u32 __bitwise __portpair; 103 103 typedef __u64 __bitwise __addrpair; 104 104 105 + struct socket_drop_counters { 106 + atomic_t drops0 ____cacheline_aligned_in_smp; 107 + atomic_t drops1 ____cacheline_aligned_in_smp; 108 + }; 109 + 105 110 /** 106 111 * struct sock_common - minimal network layer representation of sockets 107 112 * @skc_daddr: Foreign IPv4 addr ··· 287 282 * @sk_err_soft: errors that don't cause failure but are the cause of a 288 283 * persistent failure not just 'timed out' 289 284 * @sk_drops: raw/udp drops counter 285 + * @sk_drop_counters: optional pointer to socket_drop_counters 290 286 * @sk_ack_backlog: current listen backlog 291 287 * @sk_max_ack_backlog: listen backlog set in listen() 292 288 * @sk_uid: user id of owner ··· 455 449 #ifdef CONFIG_XFRM 456 450 struct xfrm_policy __rcu *sk_policy[2]; 457 451 #endif 452 + struct socket_drop_counters *sk_drop_counters; 458 453 __cacheline_group_end(sock_read_rxtx); 459 454 460 455 __cacheline_group_begin(sock_write_rxtx); ··· 2689 2682 #define sock_skb_cb_check_size(size) \ 2690 2683 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) 2691 2684 2685 + static inline void sk_drops_add(struct sock *sk, int segs) 2686 + { 2687 + struct socket_drop_counters *sdc = sk->sk_drop_counters; 2688 + 2689 + if (sdc) { 2690 + int n = numa_node_id() % 2; 2691 + 2692 + if (n) 2693 + atomic_add(segs, &sdc->drops1); 2694 + else 2695 + atomic_add(segs, &sdc->drops0); 2696 + } else { 2697 + atomic_add(segs, &sk->sk_drops); 2698 + } 2699 + } 2700 + 2701 + static inline void sk_drops_inc(struct sock *sk) 2702 + { 2703 + sk_drops_add(sk, 1); 2704 + } 2705 + 2706 + static inline int sk_drops_read(const struct sock *sk) 2707 + { 2708 + const struct socket_drop_counters *sdc = sk->sk_drop_counters; 2709 + 2710 + if (sdc) { 2711 + DEBUG_NET_WARN_ON_ONCE(atomic_read(&sk->sk_drops)); 2712 + return atomic_read(&sdc->drops0) + atomic_read(&sdc->drops1); 2713 + } 2714 + return atomic_read(&sk->sk_drops); 2715 + } 2716 + 2717 + static inline void sk_drops_reset(struct sock *sk) 2718 + { 2719 + struct socket_drop_counters *sdc = sk->sk_drop_counters; 2720 + 2721 + if (sdc) { 2722 + atomic_set(&sdc->drops0, 0); 2723 + atomic_set(&sdc->drops1, 0); 2724 + } 2725 + atomic_set(&sk->sk_drops, 0); 2726 + } 2727 + 2692 2728 static inline void 2693 2729 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) 2694 2730 { 2695 2731 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? 2696 - atomic_read(&sk->sk_drops) : 0; 2732 + sk_drops_read(sk) : 0; 2697 2733 } 2698 2734 2699 - static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) 2735 + static inline void sk_drops_skbadd(struct sock *sk, const struct sk_buff *skb) 2700 2736 { 2701 2737 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 2702 2738 2703 - atomic_add(segs, &sk->sk_drops); 2739 + sk_drops_add(sk, segs); 2704 2740 } 2705 2741 2706 2742 static inline ktime_t sock_read_timestamp(struct sock *sk)
+1 -1
include/net/tcp.h
··· 2612 2612 */ 2613 2613 static inline void tcp_listendrop(const struct sock *sk) 2614 2614 { 2615 - atomic_inc(&((struct sock *)sk)->sk_drops); 2615 + sk_drops_inc((struct sock *)sk); 2616 2616 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 2617 2617 } 2618 2618
+2 -1
include/net/udp.h
··· 288 288 { 289 289 struct udp_sock *up = udp_sk(sk); 290 290 291 + sk->sk_drop_counters = &up->drop_counters; 291 292 skb_queue_head_init(&up->reader_queue); 292 293 INIT_HLIST_NODE(&up->tunnel_list); 293 294 up->forward_threshold = sk->sk_rcvbuf >> 2; ··· 628 627 return segs; 629 628 630 629 drop: 631 - atomic_add(drop_count, &sk->sk_drops); 630 + sk_drops_add(sk, drop_count); 632 631 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count); 633 632 kfree_skb(skb); 634 633 return NULL;
+1 -1
net/core/datagram.c
··· 345 345 spin_unlock_bh(&sk_queue->lock); 346 346 } 347 347 348 - atomic_inc(&sk->sk_drops); 348 + sk_drops_inc(sk); 349 349 return err; 350 350 } 351 351 EXPORT_SYMBOL(__sk_queue_drop_skb);
+9 -7
net/core/sock.c
··· 491 491 struct sk_buff_head *list = &sk->sk_receive_queue; 492 492 493 493 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { 494 - atomic_inc(&sk->sk_drops); 494 + sk_drops_inc(sk); 495 495 trace_sock_rcvqueue_full(sk, skb); 496 496 return -ENOMEM; 497 497 } 498 498 499 499 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 500 - atomic_inc(&sk->sk_drops); 500 + sk_drops_inc(sk); 501 501 return -ENOBUFS; 502 502 } 503 503 ··· 562 562 skb->dev = NULL; 563 563 564 564 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { 565 - atomic_inc(&sk->sk_drops); 565 + sk_drops_inc(sk); 566 566 reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 567 567 goto discard_and_relse; 568 568 } ··· 585 585 reason = SKB_DROP_REASON_PFMEMALLOC; 586 586 if (err == -ENOBUFS) 587 587 reason = SKB_DROP_REASON_SOCKET_BACKLOG; 588 - atomic_inc(&sk->sk_drops); 588 + sk_drops_inc(sk); 589 589 goto discard_and_relse; 590 590 } 591 591 ··· 2505 2505 newsk->sk_wmem_queued = 0; 2506 2506 newsk->sk_forward_alloc = 0; 2507 2507 newsk->sk_reserved_mem = 0; 2508 - atomic_set(&newsk->sk_drops, 0); 2508 + DEBUG_NET_WARN_ON_ONCE(newsk->sk_drop_counters); 2509 + sk_drops_reset(newsk); 2509 2510 newsk->sk_send_head = NULL; 2510 2511 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 2511 2512 atomic_set(&newsk->sk_zckey, 0); ··· 3714 3713 */ 3715 3714 smp_wmb(); 3716 3715 refcount_set(&sk->sk_refcnt, 1); 3717 - atomic_set(&sk->sk_drops, 0); 3716 + sk_drops_reset(sk); 3718 3717 } 3719 3718 EXPORT_SYMBOL(sock_init_data_uid); 3720 3719 ··· 3974 3973 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); 3975 3974 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3976 3975 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 3977 - mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 3976 + mem[SK_MEMINFO_DROPS] = sk_drops_read(sk); 3978 3977 } 3979 3978 3980 3979 #ifdef CONFIG_PROC_FS ··· 4458 4457 #ifdef CONFIG_MEMCG 4459 4458 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg); 4460 4459 #endif 4460 + CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_drop_counters); 4461 4461 4462 4462 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock); 4463 4463 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
+1 -1
net/ipv4/ping.c
··· 1119 1119 from_kuid_munged(seq_user_ns(f), sk_uid(sp)), 1120 1120 0, sock_i_ino(sp), 1121 1121 refcount_read(&sp->sk_refcnt), sp, 1122 - atomic_read(&sp->sk_drops)); 1122 + sk_drops_read(sp)); 1123 1123 } 1124 1124 1125 1125 static int ping_v4_seq_show(struct seq_file *seq, void *v)
+4 -3
net/ipv4/raw.c
··· 178 178 179 179 if (atomic_read(&sk->sk_rmem_alloc) >= 180 180 READ_ONCE(sk->sk_rcvbuf)) { 181 - atomic_inc(&sk->sk_drops); 181 + sk_drops_inc(sk); 182 182 continue; 183 183 } 184 184 ··· 311 311 int raw_rcv(struct sock *sk, struct sk_buff *skb) 312 312 { 313 313 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { 314 - atomic_inc(&sk->sk_drops); 314 + sk_drops_inc(sk); 315 315 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); 316 316 return NET_RX_DROP; 317 317 } ··· 793 793 { 794 794 struct raw_sock *rp = raw_sk(sk); 795 795 796 + sk->sk_drop_counters = &rp->drop_counters; 796 797 if (inet_sk(sk)->inet_num == IPPROTO_ICMP) 797 798 memset(&rp->filter, 0, sizeof(rp->filter)); 798 799 return 0; ··· 1046 1045 0, 0L, 0, 1047 1046 from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), 1048 1047 0, sock_i_ino(sp), 1049 - refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 1048 + refcount_read(&sp->sk_refcnt), sp, sk_drops_read(sp)); 1050 1049 } 1051 1050 1052 1051 static int raw_seq_show(struct seq_file *seq, void *v)
+1 -1
net/ipv4/tcp_input.c
··· 4830 4830 noinline_for_tracing static void 4831 4831 tcp_drop_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) 4832 4832 { 4833 - sk_drops_add(sk, skb); 4833 + sk_drops_skbadd(sk, skb); 4834 4834 sk_skb_reason_drop(sk, skb, reason); 4835 4835 } 4836 4836
+2 -2
net/ipv4/tcp_ipv4.c
··· 2254 2254 &iph->saddr, &iph->daddr, 2255 2255 AF_INET, dif, sdif); 2256 2256 if (unlikely(drop_reason)) { 2257 - sk_drops_add(sk, skb); 2257 + sk_drops_skbadd(sk, skb); 2258 2258 reqsk_put(req); 2259 2259 goto discard_it; 2260 2260 } ··· 2399 2399 return 0; 2400 2400 2401 2401 discard_and_relse: 2402 - sk_drops_add(sk, skb); 2402 + sk_drops_skbadd(sk, skb); 2403 2403 if (refcounted) 2404 2404 sock_put(sk); 2405 2405 goto discard_it;
+7 -7
net/ipv4/udp.c
··· 1787 1787 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1788 1788 1789 1789 drop: 1790 - atomic_inc(&sk->sk_drops); 1790 + sk_drops_inc(sk); 1791 1791 busylock_release(busy); 1792 1792 return err; 1793 1793 } ··· 1852 1852 IS_UDPLITE(sk)); 1853 1853 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 1854 1854 IS_UDPLITE(sk)); 1855 - atomic_inc(&sk->sk_drops); 1855 + sk_drops_inc(sk); 1856 1856 __skb_unlink(skb, rcvq); 1857 1857 *total += skb->truesize; 1858 1858 kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); ··· 2008 2008 2009 2009 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite); 2010 2010 __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite); 2011 - atomic_inc(&sk->sk_drops); 2011 + sk_drops_inc(sk); 2012 2012 kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); 2013 2013 goto try_again; 2014 2014 } ··· 2078 2078 2079 2079 if (unlikely(err)) { 2080 2080 if (!peeking) { 2081 - atomic_inc(&sk->sk_drops); 2081 + sk_drops_inc(sk); 2082 2082 UDP_INC_STATS(sock_net(sk), 2083 2083 UDP_MIB_INERRORS, is_udplite); 2084 2084 } ··· 2449 2449 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 2450 2450 drop: 2451 2451 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2452 - atomic_inc(&sk->sk_drops); 2452 + sk_drops_inc(sk); 2453 2453 sk_skb_reason_drop(sk, skb, drop_reason); 2454 2454 return -1; 2455 2455 } ··· 2534 2534 nskb = skb_clone(skb, GFP_ATOMIC); 2535 2535 2536 2536 if (unlikely(!nskb)) { 2537 - atomic_inc(&sk->sk_drops); 2537 + sk_drops_inc(sk); 2538 2538 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 2539 2539 IS_UDPLITE(sk)); 2540 2540 __UDP_INC_STATS(net, UDP_MIB_INERRORS, ··· 3386 3386 from_kuid_munged(seq_user_ns(f), sk_uid(sp)), 3387 3387 0, sock_i_ino(sp), 3388 3388 refcount_read(&sp->sk_refcnt), sp, 3389 - atomic_read(&sp->sk_drops)); 3389 + sk_drops_read(sp)); 3390 3390 } 3391 3391 3392 3392 int udp4_seq_show(struct seq_file *seq, void *v)
+1 -1
net/ipv6/datagram.c
··· 1068 1068 0, 1069 1069 sock_i_ino(sp), 1070 1070 refcount_read(&sp->sk_refcnt), sp, 1071 - atomic_read(&sp->sk_drops)); 1071 + sk_drops_read(sp)); 1072 1072 }
+5 -4
net/ipv6/raw.c
··· 163 163 164 164 if (atomic_read(&sk->sk_rmem_alloc) >= 165 165 READ_ONCE(sk->sk_rcvbuf)) { 166 - atomic_inc(&sk->sk_drops); 166 + sk_drops_inc(sk); 167 167 continue; 168 168 } 169 169 ··· 361 361 362 362 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && 363 363 skb_checksum_complete(skb)) { 364 - atomic_inc(&sk->sk_drops); 364 + sk_drops_inc(sk); 365 365 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM); 366 366 return NET_RX_DROP; 367 367 } ··· 389 389 struct raw6_sock *rp = raw6_sk(sk); 390 390 391 391 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 392 - atomic_inc(&sk->sk_drops); 392 + sk_drops_inc(sk); 393 393 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); 394 394 return NET_RX_DROP; 395 395 } ··· 414 414 415 415 if (inet_test_bit(HDRINCL, sk)) { 416 416 if (skb_checksum_complete(skb)) { 417 - atomic_inc(&sk->sk_drops); 417 + sk_drops_inc(sk); 418 418 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM); 419 419 return NET_RX_DROP; 420 420 } ··· 1175 1175 { 1176 1176 struct raw6_sock *rp = raw6_sk(sk); 1177 1177 1178 + sk->sk_drop_counters = &rp->drop_counters; 1178 1179 switch (inet_sk(sk)->inet_num) { 1179 1180 case IPPROTO_ICMPV6: 1180 1181 rp->checksum = 1;
+2 -2
net/ipv6/tcp_ipv6.c
··· 1809 1809 &hdr->saddr, &hdr->daddr, 1810 1810 AF_INET6, dif, sdif); 1811 1811 if (drop_reason) { 1812 - sk_drops_add(sk, skb); 1812 + sk_drops_skbadd(sk, skb); 1813 1813 reqsk_put(req); 1814 1814 goto discard_it; 1815 1815 } ··· 1948 1948 return 0; 1949 1949 1950 1950 discard_and_relse: 1951 - sk_drops_add(sk, skb); 1951 + sk_drops_skbadd(sk, skb); 1952 1952 if (refcounted) 1953 1953 sock_put(sk); 1954 1954 goto discard_it;
+3 -3
net/ipv6/udp.c
··· 524 524 } 525 525 if (unlikely(err)) { 526 526 if (!peeking) { 527 - atomic_inc(&sk->sk_drops); 527 + sk_drops_inc(sk); 528 528 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 529 529 } 530 530 kfree_skb(skb); ··· 908 908 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 909 909 drop: 910 910 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 911 - atomic_inc(&sk->sk_drops); 911 + sk_drops_inc(sk); 912 912 sk_skb_reason_drop(sk, skb, drop_reason); 913 913 return -1; 914 914 } ··· 1013 1013 } 1014 1014 nskb = skb_clone(skb, GFP_ATOMIC); 1015 1015 if (unlikely(!nskb)) { 1016 - atomic_inc(&sk->sk_drops); 1016 + sk_drops_inc(sk); 1017 1017 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 1018 1018 IS_UDPLITE(sk)); 1019 1019 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
+2 -2
net/iucv/af_iucv.c
··· 1187 1187 1188 1188 IUCV_SKB_CB(skb)->offset = 0; 1189 1189 if (sk_filter(sk, skb)) { 1190 - atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 1190 + sk_drops_inc(sk); /* skb rejected by filter */ 1191 1191 kfree_skb(skb); 1192 1192 return; 1193 1193 } ··· 2011 2011 skb_reset_network_header(skb); 2012 2012 IUCV_SKB_CB(skb)->offset = 0; 2013 2013 if (sk_filter(sk, skb)) { 2014 - atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 2014 + sk_drops_inc(sk); /* skb rejected by filter */ 2015 2015 kfree_skb(skb); 2016 2016 return NET_RX_SUCCESS; 2017 2017 }
+1 -1
net/mptcp/protocol.c
··· 137 137 138 138 static void mptcp_drop(struct sock *sk, struct sk_buff *skb) 139 139 { 140 - sk_drops_add(sk, skb); 140 + sk_drops_skbadd(sk, skb); 141 141 __kfree_skb(skb); 142 142 } 143 143
+2 -2
net/netlink/af_netlink.c
··· 356 356 sk_error_report(sk); 357 357 } 358 358 } 359 - atomic_inc(&sk->sk_drops); 359 + sk_drops_inc(sk); 360 360 } 361 361 362 362 static void netlink_rcv_wake(struct sock *sk) ··· 2711 2711 sk_wmem_alloc_get(s), 2712 2712 READ_ONCE(nlk->cb_running), 2713 2713 refcount_read(&s->sk_refcnt), 2714 - atomic_read(&s->sk_drops), 2714 + sk_drops_read(s), 2715 2715 sock_i_ino(s) 2716 2716 ); 2717 2717
+1 -1
net/packet/af_packet.c
··· 2265 2265 2266 2266 drop_n_acct: 2267 2267 atomic_inc(&po->tp_drops); 2268 - atomic_inc(&sk->sk_drops); 2268 + sk_drops_inc(sk); 2269 2269 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2270 2270 2271 2271 drop_n_restore:
+3 -3
net/phonet/pep.c
··· 376 376 377 377 case PNS_PEP_CTRL_REQ: 378 378 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { 379 - atomic_inc(&sk->sk_drops); 379 + sk_drops_inc(sk); 380 380 break; 381 381 } 382 382 __skb_pull(skb, 4); ··· 397 397 } 398 398 399 399 if (pn->rx_credits == 0) { 400 - atomic_inc(&sk->sk_drops); 400 + sk_drops_inc(sk); 401 401 err = -ENOBUFS; 402 402 break; 403 403 } ··· 567 567 } 568 568 569 569 if (pn->rx_credits == 0) { 570 - atomic_inc(&sk->sk_drops); 570 + sk_drops_inc(sk); 571 571 err = NET_RX_DROP; 572 572 break; 573 573 }
+1 -1
net/phonet/socket.c
··· 587 587 from_kuid_munged(seq_user_ns(seq), sk_uid(sk)), 588 588 sock_i_ino(sk), 589 589 refcount_read(&sk->sk_refcnt), sk, 590 - atomic_read(&sk->sk_drops)); 590 + sk_drops_read(sk)); 591 591 } 592 592 seq_pad(seq, '\n'); 593 593 return 0;
+1 -1
net/sctp/diag.c
··· 173 173 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; 174 174 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 175 175 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 176 - mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 176 + mem[SK_MEMINFO_DROPS] = sk_drops_read(sk); 177 177 178 178 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0) 179 179 goto errout;
+3 -3
net/tipc/socket.c
··· 2366 2366 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2367 2367 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, 2368 2368 "err_overload2!"); 2369 - atomic_inc(&sk->sk_drops); 2369 + sk_drops_inc(sk); 2370 2370 err = TIPC_ERR_OVERLOAD; 2371 2371 } 2372 2372 ··· 2458 2458 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!"); 2459 2459 /* Overload => reject message back to sender */ 2460 2460 onode = tipc_own_addr(sock_net(sk)); 2461 - atomic_inc(&sk->sk_drops); 2461 + sk_drops_inc(sk); 2462 2462 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) { 2463 2463 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL, 2464 2464 "@sk_enqueue!"); ··· 3657 3657 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3658 3658 skb_queue_len(&sk->sk_write_queue)) || 3659 3659 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3660 - atomic_read(&sk->sk_drops))) 3660 + sk_drops_read(sk))) 3661 3661 goto stat_msg_cancel; 3662 3662 3663 3663 if (tsk->cong_link_cnt &&
+2 -1
tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
··· 64 64 0, 0L, 0, ctx->uid, 0, 65 65 sock_i_ino(&inet->sk), 66 66 inet->sk.sk_refcnt.refs.counter, udp_sk, 67 - inet->sk.sk_drops.counter); 67 + udp_sk->drop_counters.drops0.counter + 68 + udp_sk->drop_counters.drops1.counter); 68 69 69 70 return 0; 70 71 }
+2 -2
tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
··· 72 72 0, 0L, 0, ctx->uid, 0, 73 73 sock_i_ino(&inet->sk), 74 74 inet->sk.sk_refcnt.refs.counter, udp_sk, 75 - inet->sk.sk_drops.counter); 76 - 75 + udp_sk->drop_counters.drops0.counter + 76 + udp_sk->drop_counters.drops1.counter); 77 77 return 0; 78 78 }