Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

udp: Remove UDP-Lite SNMP stats.

Since UDP and UDP-Lite shared most of the code, we have had
to check the protocol every time we increment SNMP stats.

Now that the UDP-Lite paths are dead, let's remove UDP-Lite
SNMP stats.

Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/20260311052020.1213705-6-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Kuniyuki Iwashima and committed by
Jakub Kicinski
7accba6f 56520b39

+71 -145
-5
include/net/netns/mib.h
··· 28 28 DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics); 29 29 #endif 30 30 31 - DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); 32 - #if IS_ENABLED(CONFIG_IPV6) 33 - DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6); 34 - #endif 35 - 36 31 DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); 37 32 DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics); 38 33 #if IS_ENABLED(CONFIG_IPV6)
+18 -28
include/net/udp.h
··· 529 529 } 530 530 531 531 /* 532 - * SNMP statistics for UDP and UDP-Lite 532 + * SNMP statistics for UDP 533 533 */ 534 - #define UDP_INC_STATS(net, field, is_udplite) do { \ 535 - if (unlikely(is_udplite)) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 536 - else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 537 - #define __UDP_INC_STATS(net, field, is_udplite) do { \ 538 - if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 539 - else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 540 - 541 - #define __UDP6_INC_STATS(net, field, is_udplite) do { \ 542 - if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ 543 - else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 544 - } while(0) 545 - #define UDP6_INC_STATS(net, field, __lite) do { \ 546 - if (unlikely(__lite)) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ 547 - else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 548 - } while(0) 534 + #define __UDP_INC_STATS(net, field) \ 535 + __SNMP_INC_STATS((net)->mib.udp_statistics, field) 536 + #define UDP_INC_STATS(net, field) \ 537 + SNMP_INC_STATS((net)->mib.udp_statistics, field) 538 + #define __UDP6_INC_STATS(net, field) \ 539 + __SNMP_INC_STATS((net)->mib.udp_stats_in6, field) 540 + #define UDP6_INC_STATS(net, field) \ 541 + SNMP_INC_STATS((net)->mib.udp_stats_in6, field) 549 542 550 543 #if IS_ENABLED(CONFIG_IPV6) 551 - #define __UDPX_MIB(sk, ipv4) \ 552 - ({ \ 553 - ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 554 - sock_net(sk)->mib.udp_statistics) : \ 555 - (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \ 556 - sock_net(sk)->mib.udp_stats_in6); \ 557 - }) 544 + #define __UDPX_MIB(sk, ipv4) \ 545 + ({ \ 546 + ipv4 ? sock_net(sk)->mib.udp_statistics : \ 547 + sock_net(sk)->mib.udp_stats_in6; \ 548 + }) 558 549 #else 559 - #define __UDPX_MIB(sk, ipv4) \ 560 - ({ \ 561 - IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 562 - sock_net(sk)->mib.udp_statistics; \ 563 - }) 550 + #define __UDPX_MIB(sk, ipv4) \ 551 + ({ \ 552 + sock_net(sk)->mib.udp_statistics; \ 553 + }) 564 554 #endif 565 555 566 556 #define __UDPX_INC_STATS(sk, field) \
-6
net/ipv4/af_inet.c
··· 1733 1733 net->mib.udp_statistics = alloc_percpu(struct udp_mib); 1734 1734 if (!net->mib.udp_statistics) 1735 1735 goto err_udp_mib; 1736 - net->mib.udplite_statistics = alloc_percpu(struct udp_mib); 1737 - if (!net->mib.udplite_statistics) 1738 - goto err_udplite_mib; 1739 1736 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib); 1740 1737 if (!net->mib.icmp_statistics) 1741 1738 goto err_icmp_mib; ··· 1746 1749 err_icmpmsg_mib: 1747 1750 free_percpu(net->mib.icmp_statistics); 1748 1751 err_icmp_mib: 1749 - free_percpu(net->mib.udplite_statistics); 1750 - err_udplite_mib: 1751 1752 free_percpu(net->mib.udp_statistics); 1752 1753 err_udp_mib: 1753 1754 free_percpu(net->mib.net_statistics); ··· 1761 1766 { 1762 1767 kfree(net->mib.icmpmsg_statistics); 1763 1768 free_percpu(net->mib.icmp_statistics); 1764 - free_percpu(net->mib.udplite_statistics); 1765 1769 free_percpu(net->mib.udp_statistics); 1766 1770 free_percpu(net->mib.net_statistics); 1767 1771 free_percpu(net->mib.ip_statistics);
-13
net/ipv4/proc.c
··· 444 444 for (i = 0; i < udp_cnt; i++) 445 445 seq_printf(seq, " %lu", buff[i]); 446 446 447 - memset(buff, 0, udp_cnt * sizeof(unsigned long)); 448 - 449 - /* the UDP and UDP-Lite MIBs are the same */ 450 - seq_puts(seq, "\nUdpLite:"); 451 - snmp_get_cpu_field_batch_cnt(buff, snmp4_udp_list, 452 - udp_cnt, 453 - net->mib.udplite_statistics); 454 - for (i = 0; i < udp_cnt; i++) 455 - seq_printf(seq, " %s", snmp4_udp_list[i].name); 456 - seq_puts(seq, "\nUdpLite:"); 457 - for (i = 0; i < udp_cnt; i++) 458 - seq_printf(seq, " %lu", buff[i]); 459 - 460 447 seq_putc(seq, '\n'); 461 448 return 0; 462 449 }
+32 -43
net/ipv4/udp.c
··· 1198 1198 if (unlikely(err)) { 1199 1199 if (err == -ENOBUFS && 1200 1200 !inet_test_bit(RECVERR, sk)) { 1201 - UDP_INC_STATS(sock_net(sk), 1202 - UDP_MIB_SNDBUFERRORS, is_udplite); 1201 + UDP_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS); 1203 1202 err = 0; 1204 1203 } 1205 - } else 1206 - UDP_INC_STATS(sock_net(sk), 1207 - UDP_MIB_OUTDATAGRAMS, is_udplite); 1204 + } else { 1205 + UDP_INC_STATS(sock_net(sk), UDP_MIB_OUTDATAGRAMS); 1206 + } 1208 1207 return err; 1209 1208 } 1210 1209 ··· 1534 1535 * things). We could add another new stat but at least for now that 1535 1536 * seems like overkill. 1536 1537 */ 1537 - if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1538 - UDP_INC_STATS(sock_net(sk), 1539 - UDP_MIB_SNDBUFERRORS, is_udplite); 1540 - } 1538 + if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 1539 + UDP_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS); 1540 + 1541 1541 return err; 1542 1542 1543 1543 do_confirm: ··· 1895 1897 1896 1898 while ((skb = skb_peek(rcvq)) != NULL) { 1897 1899 if (udp_lib_checksum_complete(skb)) { 1898 - __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, 1899 - IS_UDPLITE(sk)); 1900 - __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 1901 - IS_UDPLITE(sk)); 1900 + struct net *net = sock_net(sk); 1901 + 1902 + __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS); 1903 + __UDP_INC_STATS(net, UDP_MIB_INERRORS); 1902 1904 udp_drops_inc(sk); 1903 1905 __skb_unlink(skb, rcvq); 1904 1906 *total += skb->truesize; ··· 2050 2052 return err; 2051 2053 2052 2054 if (udp_lib_checksum_complete(skb)) { 2053 - int is_udplite = IS_UDPLITE(sk); 2054 2055 struct net *net = sock_net(sk); 2055 2056 2056 - __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite); 2057 - __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite); 2057 + __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS); 2058 + __UDP_INC_STATS(net, UDP_MIB_INERRORS); 2058 2059 udp_drops_inc(sk); 2059 2060 kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); 2060 2061 goto try_again; ··· 2078 2081 unsigned int ulen, copied; 2079 2082 int off, err, peeking = flags & MSG_PEEK; 2080 2083 int is_udplite = IS_UDPLITE(sk); 2084 + struct net *net = sock_net(sk); 2081 2085 bool checksum_valid = false; 2082 2086 2083 2087 if (flags & MSG_ERRQUEUE) ··· 2126 2128 if (unlikely(err)) { 2127 2129 if (!peeking) { 2128 2130 udp_drops_inc(sk); 2129 - UDP_INC_STATS(sock_net(sk), 2130 - UDP_MIB_INERRORS, is_udplite); 2131 + UDP_INC_STATS(net, UDP_MIB_INERRORS); 2131 2132 } 2132 2133 kfree_skb(skb); 2133 2134 return err; 2134 2135 } 2135 2136 2136 2137 if (!peeking) 2137 - UDP_INC_STATS(sock_net(sk), 2138 - UDP_MIB_INDATAGRAMS, is_udplite); 2138 + UDP_INC_STATS(net, UDP_MIB_INDATAGRAMS); 2139 2139 2140 2140 sock_recv_cmsgs(msg, sk, skb); 2141 2141 ··· 2166 2170 csum_copy_err: 2167 2171 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 2168 2172 udp_skb_destructor)) { 2169 - UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 2170 - UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2173 + UDP_INC_STATS(net, UDP_MIB_CSUMERRORS); 2174 + UDP_INC_STATS(net, UDP_MIB_INERRORS); 2171 2175 } 2172 2176 kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); 2173 2177 ··· 2367 2371 2368 2372 rc = __udp_enqueue_schedule_skb(sk, skb); 2369 2373 if (rc < 0) { 2370 - int is_udplite = IS_UDPLITE(sk); 2374 + struct net *net = sock_net(sk); 2371 2375 int drop_reason; 2372 2376 2373 2377 /* Note that an ENOMEM error is charged twice */ 2374 2378 if (rc == -ENOMEM) { 2375 - UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, 2376 - is_udplite); 2379 + UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS); 2377 2380 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 2378 2381 } else { 2379 - UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS, 2380 - is_udplite); 2382 + UDP_INC_STATS(net, UDP_MIB_MEMERRORS); 2381 2383 drop_reason = SKB_DROP_REASON_PROTO_MEM; 2382 2384 } 2383 - UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2385 + UDP_INC_STATS(net, UDP_MIB_INERRORS); 2384 2386 trace_udp_fail_queue_rcv_skb(rc, sk, skb); 2385 2387 sk_skb_reason_drop(sk, skb, drop_reason); 2386 2388 return -1; ··· 2399 2405 { 2400 2406 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 2401 2407 struct udp_sock *up = udp_sk(sk); 2402 - int is_udplite = IS_UDPLITE(sk); 2408 + struct net *net = sock_net(sk); 2403 2409 2404 2410 /* 2405 2411 * Charge it to the socket, dropping if the queue is full. ··· 2436 2442 2437 2443 ret = encap_rcv(sk, skb); 2438 2444 if (ret <= 0) { 2439 - __UDP_INC_STATS(sock_net(sk), 2440 - UDP_MIB_INDATAGRAMS, 2441 - is_udplite); 2445 + __UDP_INC_STATS(net, UDP_MIB_INDATAGRAMS); 2442 2446 return -ret; 2443 2447 } 2444 2448 } ··· 2495 2503 2496 2504 csum_error: 2497 2505 drop_reason = SKB_DROP_REASON_UDP_CSUM; 2498 - __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 2506 + __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS); 2499 2507 drop: 2500 - __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2508 + __UDP_INC_STATS(net, UDP_MIB_INERRORS); 2501 2509 udp_drops_inc(sk); 2502 2510 sk_skb_reason_drop(sk, skb, drop_reason); 2503 2511 return -1; ··· 2584 2592 2585 2593 if (unlikely(!nskb)) { 2586 2594 udp_drops_inc(sk); 2587 - __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 2588 - IS_UDPLITE(sk)); 2589 - __UDP_INC_STATS(net, UDP_MIB_INERRORS, 2590 - IS_UDPLITE(sk)); 2595 + __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS); 2596 + __UDP_INC_STATS(net, UDP_MIB_INERRORS); 2591 2597 continue; 2592 2598 } 2593 2599 if (udp_queue_rcv_skb(sk, nskb) > 0) ··· 2603 2613 consume_skb(skb); 2604 2614 } else { 2605 2615 kfree_skb(skb); 2606 - __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 2607 - proto == IPPROTO_UDPLITE); 2616 + __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI); 2608 2617 } 2609 2618 return 0; 2610 2619 } ··· 2753 2764 goto csum_error; 2754 2765 2755 2766 drop_reason = SKB_DROP_REASON_NO_SOCKET; 2756 - __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 2767 + __UDP_INC_STATS(net, UDP_MIB_NOPORTS); 2757 2768 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 2758 2769 2759 2770 /* ··· 2782 2793 proto == IPPROTO_UDPLITE ? "Lite" : "", 2783 2794 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), 2784 2795 ulen); 2785 - __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 2796 + __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS); 2786 2797 drop: 2787 - __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 2798 + __UDP_INC_STATS(net, UDP_MIB_INERRORS); 2788 2799 sk_skb_reason_drop(sk, skb, drop_reason); 2789 2800 return 0; 2790 2801 }
+2 -7
net/ipv6/af_inet6.c
··· 886 886 net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib); 887 887 if (!net->mib.udp_stats_in6) 888 888 return -ENOMEM; 889 - net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib); 890 - if (!net->mib.udplite_stats_in6) 891 - goto err_udplite_mib; 889 + 892 890 net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib); 893 891 if (!net->mib.ipv6_statistics) 894 892 goto err_ip_mib; ··· 897 899 u64_stats_init(&af_inet6_stats->syncp); 898 900 } 899 901 900 - 901 902 net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib); 902 903 if (!net->mib.icmpv6_statistics) 903 904 goto err_icmp_mib; 905 + 904 906 net->mib.icmpv6msg_statistics = kzalloc_obj(struct icmpv6msg_mib); 905 907 if (!net->mib.icmpv6msg_statistics) 906 908 goto err_icmpmsg_mib; ··· 911 913 err_icmp_mib: 912 914 free_percpu(net->mib.ipv6_statistics); 913 915 err_ip_mib: 914 - free_percpu(net->mib.udplite_stats_in6); 915 - err_udplite_mib: 916 916 free_percpu(net->mib.udp_stats_in6); 917 917 return -ENOMEM; 918 918 } ··· 918 922 static void ipv6_cleanup_mibs(struct net *net) 919 923 { 920 924 free_percpu(net->mib.udp_stats_in6); 921 - free_percpu(net->mib.udplite_stats_in6); 922 925 free_percpu(net->mib.ipv6_statistics); 923 926 free_percpu(net->mib.icmpv6_statistics); 924 927 kfree(net->mib.icmpv6msg_statistics);
-14
net/ipv6/proc.c
··· 108 108 SNMP_MIB_ITEM("Udp6MemErrors", UDP_MIB_MEMERRORS), 109 109 }; 110 110 111 - static const struct snmp_mib snmp6_udplite6_list[] = { 112 - SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), 113 - SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), 114 - SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), 115 - SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), 116 - SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS), 117 - SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS), 118 - SNMP_MIB_ITEM("UdpLite6InCsumErrors", UDP_MIB_CSUMERRORS), 119 - SNMP_MIB_ITEM("UdpLite6MemErrors", UDP_MIB_MEMERRORS), 120 - }; 121 - 122 111 static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib) 123 112 { 124 113 char name[32]; ··· 215 226 snmp6_seq_show_item(seq, net->mib.udp_stats_in6, 216 227 NULL, snmp6_udp6_list, 217 228 ARRAY_SIZE(snmp6_udp6_list)); 218 - snmp6_seq_show_item(seq, net->mib.udplite_stats_in6, 219 - NULL, snmp6_udplite6_list, 220 - ARRAY_SIZE(snmp6_udplite6_list)); 221 229 return 0; 222 230 } 223 231
+19 -29
net/ipv6/udp.c
··· 796 796 797 797 rc = __udp_enqueue_schedule_skb(sk, skb); 798 798 if (rc < 0) { 799 - int is_udplite = IS_UDPLITE(sk); 800 799 enum skb_drop_reason drop_reason; 800 + struct net *net = sock_net(sk); 801 801 802 802 /* Note that an ENOMEM error is charged twice */ 803 803 if (rc == -ENOMEM) { 804 - UDP6_INC_STATS(sock_net(sk), 805 - UDP_MIB_RCVBUFERRORS, is_udplite); 804 + UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS); 806 805 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 807 806 } else { 808 - UDP6_INC_STATS(sock_net(sk), 809 - UDP_MIB_MEMERRORS, is_udplite); 807 + UDP6_INC_STATS(net, UDP_MIB_MEMERRORS); 810 808 drop_reason = SKB_DROP_REASON_PROTO_MEM; 811 809 } 812 - UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 810 + UDP6_INC_STATS(net, UDP_MIB_INERRORS); 813 811 trace_udp_fail_queue_rcv_skb(rc, sk, skb); 814 812 sk_skb_reason_drop(sk, skb, drop_reason); 815 813 return -1; ··· 828 830 { 829 831 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 830 832 struct udp_sock *up = udp_sk(sk); 831 - int is_udplite = IS_UDPLITE(sk); 833 + struct net *net = sock_net(sk); 832 834 833 835 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 834 836 drop_reason = SKB_DROP_REASON_XFRM_POLICY; ··· 862 864 863 865 ret = encap_rcv(sk, skb); 864 866 if (ret <= 0) { 865 - __UDP6_INC_STATS(sock_net(sk), 866 - UDP_MIB_INDATAGRAMS, 867 - is_udplite); 867 + __UDP6_INC_STATS(net, UDP_MIB_INDATAGRAMS); 868 868 return -ret; 869 869 } 870 870 } ··· 905 909 906 910 csum_error: 907 911 drop_reason = SKB_DROP_REASON_UDP_CSUM; 908 - __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 912 + __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS); 909 913 drop: 910 - __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 914 + __UDP6_INC_STATS(net, UDP_MIB_INERRORS); 911 915 udp_drops_inc(sk); 912 916 sk_skb_reason_drop(sk, skb, drop_reason); 913 917 return -1; ··· 1014 1018 nskb = skb_clone(skb, GFP_ATOMIC); 1015 1019 if (unlikely(!nskb)) { 1016 1020 udp_drops_inc(sk); 1017 - __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 1018 - IS_UDPLITE(sk)); 1019 - __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 1020 - IS_UDPLITE(sk)); 1021 + __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS); 1022 + __UDP6_INC_STATS(net, UDP_MIB_INERRORS); 1021 1023 continue; 1022 1024 } 1023 1025 ··· 1034 1040 consume_skb(skb); 1035 1041 } else { 1036 1042 kfree_skb(skb); 1037 - __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 1038 - proto == IPPROTO_UDPLITE); 1043 + __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI); 1039 1044 } 1040 1045 return 0; 1041 1046 } ··· 1206 1213 if (udp_lib_checksum_complete(skb)) 1207 1214 goto csum_error; 1208 1215 1209 - __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1216 + __UDP6_INC_STATS(net, UDP_MIB_NOPORTS); 1210 1217 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1211 1218 1212 1219 sk_skb_reason_drop(sk, skb, reason); ··· 1227 1234 csum_error: 1228 1235 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1229 1236 reason = SKB_DROP_REASON_UDP_CSUM; 1230 - __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1237 + __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS); 1231 1238 discard: 1232 - __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1239 + __UDP6_INC_STATS(net, UDP_MIB_INERRORS); 1233 1240 sk_skb_reason_drop(sk, skb, reason); 1234 1241 return 0; 1235 1242 } ··· 1483 1490 err = ip6_send_skb(skb); 1484 1491 if (unlikely(err)) { 1485 1492 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) { 1486 - UDP6_INC_STATS(sock_net(sk), 1487 - UDP_MIB_SNDBUFERRORS, is_udplite); 1493 + UDP6_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS); 1488 1494 err = 0; 1489 1495 } 1490 1496 } else { 1491 - UDP6_INC_STATS(sock_net(sk), 1492 - UDP_MIB_OUTDATAGRAMS, is_udplite); 1497 + UDP6_INC_STATS(sock_net(sk), UDP_MIB_OUTDATAGRAMS); 1493 1498 } 1494 1499 return err; 1495 1500 } ··· 1817 1826 * things). We could add another new stat but at least for now that 1818 1827 * seems like overkill. 1819 1828 */ 1820 - if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1821 - UDP6_INC_STATS(sock_net(sk), 1822 - UDP_MIB_SNDBUFERRORS, is_udplite); 1823 - } 1829 + if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 1830 + UDP6_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS); 1831 + 1824 1832 return err; 1825 1833 1826 1834 do_confirm: