Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

net: remove EXPORT_IPV6_MOD() and EXPORT_IPV6_MOD_GPL() macros

As IPv6 is built-in only, the macro is always evaluating to an empty
one. Remove it completely from the code.

Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
Link: https://patch.msgid.link/20260325120928.15848-3-fmancera@suse.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Fernando Fernandez Mancera and committed by
Jakub Kicinski
0557a344 309b905d

-138
-8
include/net/ip.h
··· 692 692 693 693 #endif 694 694 695 - #if IS_MODULE(CONFIG_IPV6) 696 - #define EXPORT_IPV6_MOD(X) EXPORT_SYMBOL(X) 697 - #define EXPORT_IPV6_MOD_GPL(X) EXPORT_SYMBOL_GPL(X) 698 - #else 699 - #define EXPORT_IPV6_MOD(X) 700 - #define EXPORT_IPV6_MOD_GPL(X) 701 - #endif 702 - 703 695 static inline unsigned int ipv4_addr_hash(__be32 ip) 704 696 { 705 697 return (__force unsigned int) ip;
-3
net/core/dev.c
··· 1182 1182 strscpy(name, dev->name, IFNAMSIZ); 1183 1183 } while (read_seqretry(&netdev_rename_lock, seq)); 1184 1184 } 1185 - EXPORT_IPV6_MOD_GPL(netdev_copy_name); 1186 1185 1187 1186 /** 1188 1187 * netdev_get_name - get a netdevice name, knowing its ifindex. ··· 1311 1312 1312 1313 return NULL; 1313 1314 } 1314 - EXPORT_IPV6_MOD(netdev_get_by_flags_rcu); 1315 1315 1316 1316 /** 1317 1317 * dev_valid_name - check if name is okay for network device ··· 1836 1838 netdev_unlock_ops(lower_dev); 1837 1839 } 1838 1840 } 1839 - EXPORT_IPV6_MOD(netif_disable_lro); 1840 1841 1841 1842 /** 1842 1843 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
-1
net/core/hotdata.c
··· 27 27 EXPORT_SYMBOL(net_hotdata); 28 28 29 29 struct net_aligned_data net_aligned_data; 30 - EXPORT_IPV6_MOD(net_aligned_data);
-1
net/core/neighbour.c
··· 778 778 779 779 return NULL; 780 780 } 781 - EXPORT_IPV6_MOD(pneigh_lookup); 782 781 783 782 int pneigh_create(struct neigh_table *tbl, struct net *net, 784 783 const void *pkey, struct net_device *dev,
-2
net/ipv4/inet_connection_sock.c
··· 709 709 arg->err = error; 710 710 return NULL; 711 711 } 712 - EXPORT_IPV6_MOD(inet_csk_accept); 713 712 714 713 /* 715 714 * Using different timers for retransmit, delayed acks and probes ··· 1020 1021 inet_csk_reqsk_queue_drop(sk, req); 1021 1022 reqsk_put(req); 1022 1023 } 1023 - EXPORT_IPV6_MOD(inet_csk_reqsk_queue_drop_and_put); 1024 1024 1025 1025 static void reqsk_timer_handler(struct timer_list *t) 1026 1026 {
-5
net/ipv4/inet_hashtables.c
··· 758 758 } 759 759 return ok; 760 760 } 761 - EXPORT_IPV6_MOD(inet_ehash_nolisten); 762 761 763 762 static int inet_reuseport_add_sock(struct sock *sk, 764 763 struct inet_listen_hashbucket *ilb) ··· 825 826 826 827 return err; 827 828 } 828 - EXPORT_IPV6_MOD(inet_hash); 829 829 830 830 void inet_unhash(struct sock *sk) 831 831 { ··· 857 859 spin_unlock_bh(lock); 858 860 } 859 861 } 860 - EXPORT_IPV6_MOD(inet_unhash); 861 862 862 863 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, 863 864 const struct net *net, unsigned short port, ··· 1019 1022 { 1020 1023 return __inet_bhash2_update_saddr(sk, saddr, family, false); 1021 1024 } 1022 - EXPORT_IPV6_MOD(inet_bhash2_update_saddr); 1023 1025 1024 1026 void inet_bhash2_reset_saddr(struct sock *sk) 1025 1027 { 1026 1028 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1027 1029 __inet_bhash2_update_saddr(sk, NULL, 0, true); 1028 1030 } 1029 - EXPORT_IPV6_MOD(inet_bhash2_reset_saddr); 1030 1031 1031 1032 /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm 1032 1033 * Note that we use 32bit integers (vs RFC 'short integers')
-4
net/ipv4/inetpeer.c
··· 59 59 seqlock_init(&bp->lock); 60 60 bp->total = 0; 61 61 } 62 - EXPORT_IPV6_MOD_GPL(inet_peer_base_init); 63 62 64 63 #define PEER_MAX_GC 32 65 64 ··· 216 217 217 218 return p; 218 219 } 219 - EXPORT_IPV6_MOD_GPL(inet_getpeer); 220 220 221 221 void inet_putpeer(struct inet_peer *p) 222 222 { ··· 266 268 WRITE_ONCE(peer->rate_tokens, token); 267 269 return rc; 268 270 } 269 - EXPORT_IPV6_MOD(inet_peer_xrlim_allow); 270 271 271 272 void inetpeer_invalidate_tree(struct inet_peer_base *base) 272 273 { ··· 282 285 283 286 base->total = 0; 284 287 } 285 - EXPORT_IPV6_MOD(inetpeer_invalidate_tree);
-1
net/ipv4/metrics.c
··· 88 88 89 89 return fib_metrics; 90 90 } 91 - EXPORT_IPV6_MOD_GPL(ip_fib_metrics_init);
-16
net/ipv4/ping.c
··· 56 56 57 57 static struct ping_table ping_table; 58 58 struct pingv6_ops pingv6_ops; 59 - EXPORT_IPV6_MOD_GPL(pingv6_ops); 60 59 61 60 static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask) 62 61 { ··· 138 139 spin_unlock(&ping_table.lock); 139 140 return -EADDRINUSE; 140 141 } 141 - EXPORT_IPV6_MOD_GPL(ping_get_port); 142 142 143 143 void ping_unhash(struct sock *sk) 144 144 { ··· 152 154 } 153 155 spin_unlock(&ping_table.lock); 154 156 } 155 - EXPORT_IPV6_MOD_GPL(ping_unhash); 156 157 157 158 /* Called under rcu_read_lock() */ 158 159 static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) ··· 275 278 put_group_info(group_info); 276 279 return ret; 277 280 } 278 - EXPORT_IPV6_MOD_GPL(ping_init_sock); 279 281 280 282 void ping_close(struct sock *sk, long timeout) 281 283 { ··· 284 288 285 289 sk_common_release(sk); 286 290 } 287 - EXPORT_IPV6_MOD_GPL(ping_close); 288 291 289 292 static int ping_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, 290 293 int addr_len) ··· 463 468 pr_debug("ping_v4_bind -> %d\n", err); 464 469 return err; 465 470 } 466 - EXPORT_IPV6_MOD_GPL(ping_bind); 467 471 468 472 /* 469 473 * Is this a supported type of ICMP message? ··· 595 601 out: 596 602 return; 597 603 } 598 - EXPORT_IPV6_MOD_GPL(ping_err); 599 604 600 605 /* 601 606 * Copy and checksum an ICMP Echo packet from user space into a buffer ··· 624 631 625 632 return 0; 626 633 } 627 - EXPORT_IPV6_MOD_GPL(ping_getfrag); 628 634 629 635 static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, 630 636 struct flowi4 *fl4) ··· 684 692 685 693 return 0; 686 694 } 687 - EXPORT_IPV6_MOD_GPL(ping_common_sendmsg); 688 695 689 696 static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 690 697 { ··· 927 936 pr_debug("ping_recvmsg -> %d\n", err); 928 937 return err; 929 938 } 930 - EXPORT_IPV6_MOD_GPL(ping_recvmsg); 931 939 932 940 static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk, 933 941 struct sk_buff *skb) ··· 947 957 { 948 958 return __ping_queue_rcv_skb(sk, skb) ? -1 : 0; 949 959 } 950 - EXPORT_IPV6_MOD_GPL(ping_queue_rcv_skb); 951 960 952 961 953 962 /* ··· 974 985 kfree_skb_reason(skb, SKB_DROP_REASON_NO_SOCKET); 975 986 return SKB_DROP_REASON_NO_SOCKET; 976 987 } 977 - EXPORT_IPV6_MOD_GPL(ping_rcv); 978 988 979 989 struct proto ping_prot = { 980 990 .name = "PING", ··· 995 1007 .put_port = ping_unhash, 996 1008 .obj_size = sizeof(struct inet_sock), 997 1009 }; 998 - EXPORT_IPV6_MOD(ping_prot); 999 1010 1000 1011 #ifdef CONFIG_PROC_FS 1001 1012 ··· 1059 1072 1060 1073 return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 1061 1074 } 1062 - EXPORT_IPV6_MOD_GPL(ping_seq_start); 1063 1075 1064 1076 static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos) 1065 1077 { ··· 1077 1091 ++*pos; 1078 1092 return sk; 1079 1093 } 1080 - EXPORT_IPV6_MOD_GPL(ping_seq_next); 1081 1094 1082 1095 void ping_seq_stop(struct seq_file *seq, void *v) 1083 1096 __releases(ping_table.lock) 1084 1097 { 1085 1098 spin_unlock(&ping_table.lock); 1086 1099 } 1087 - EXPORT_IPV6_MOD_GPL(ping_seq_stop); 1088 1100 1089 1101 static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, 1090 1102 int bucket)
-4
net/ipv4/syncookies.c
··· 223 223 224 224 return NULL; 225 225 } 226 - EXPORT_IPV6_MOD(tcp_get_cookie_sock); 227 226 228 227 /* 229 228 * when syncookies are in effect and tcp timestamps are enabled we stored ··· 259 260 260 261 return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0; 261 262 } 262 - EXPORT_IPV6_MOD(cookie_timestamp_decode); 263 263 264 264 static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb, 265 265 struct request_sock *req) ··· 310 312 311 313 return req; 312 314 } 313 - EXPORT_IPV6_MOD_GPL(cookie_bpf_check); 314 315 #endif 315 316 316 317 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, ··· 350 353 351 354 return req; 352 355 } 353 - EXPORT_IPV6_MOD_GPL(cookie_tcp_reqsk_alloc); 354 356 355 357 static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk, 356 358 struct sk_buff *skb)
-23
net/ipv4/tcp.c
··· 303 303 EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn); 304 304 305 305 long sysctl_tcp_mem[3] __read_mostly; 306 - EXPORT_IPV6_MOD(sysctl_tcp_mem); 307 306 308 307 DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 309 308 EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); ··· 316 317 * Current number of TCP sockets. 317 318 */ 318 319 struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 319 - EXPORT_IPV6_MOD(tcp_sockets_allocated); 320 320 321 321 /* 322 322 * Pressure flag: try to collapse. ··· 339 341 if (!cmpxchg(&tcp_memory_pressure, 0, val)) 340 342 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 341 343 } 342 - EXPORT_IPV6_MOD_GPL(tcp_enter_memory_pressure); 343 344 344 345 void tcp_leave_memory_pressure(struct sock *sk) 345 346 { ··· 351 354 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 352 355 jiffies_to_msecs(jiffies - val)); 353 356 } 354 - EXPORT_IPV6_MOD_GPL(tcp_leave_memory_pressure); 355 357 356 358 /* Convert seconds to retransmits based on initial and max timeout */ 357 359 static u8 secs_to_retrans(int seconds, int timeout, int rto_max) ··· 414 418 static_branch_slow_dec_deferred(&tcp_md5_needed); 415 419 } 416 420 } 417 - EXPORT_IPV6_MOD_GPL(tcp_md5_destruct_sock); 418 421 #endif 419 422 420 423 /* Address-family independent initialization for a tcp_sock. ··· 481 486 sk_sockets_allocated_inc(sk); 482 487 xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1); 483 488 } 484 - EXPORT_IPV6_MOD(tcp_init_sock); 485 489 486 490 static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) 487 491 { ··· 685 691 *karg = answ; 686 692 return 0; 687 693 } 688 - EXPORT_IPV6_MOD(tcp_ioctl); 689 694 690 695 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 691 696 { ··· 901 908 902 909 return ret; 903 910 } 904 - EXPORT_IPV6_MOD(tcp_splice_read); 905 911 906 912 /* We allow to exceed memory limits for FIN packets to expedite 907 913 * connection tear down and (memory) recovery. ··· 1475 1483 tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); 1476 1484 release_sock(sk); 1477 1485 } 1478 - EXPORT_IPV6_MOD_GPL(tcp_splice_eof); 1479 1486 1480 1487 /* 1481 1488 * Handle reading urgent data. BSD has very simple semantics for ··· 1786 1795 } 1787 1796 return copied; 1788 1797 } 1789 - EXPORT_IPV6_MOD(tcp_read_skb); 1790 1798 1791 1799 void tcp_read_done(struct sock *sk, size_t len) 1792 1800 { ··· 1830 1840 { 1831 1841 return tcp_inq(sock->sk); 1832 1842 } 1833 - EXPORT_IPV6_MOD(tcp_peek_len); 1834 1843 1835 1844 /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1836 1845 int tcp_set_rcvlowat(struct sock *sk, int val) ··· 1859 1870 } 1860 1871 return 0; 1861 1872 } 1862 - EXPORT_IPV6_MOD(tcp_set_rcvlowat); 1863 1873 1864 1874 #ifdef CONFIG_MMU 1865 1875 static const struct vm_operations_struct tcp_vm_ops = { ··· 1877 1889 vma->vm_ops = &tcp_vm_ops; 1878 1890 return 0; 1879 1891 } 1880 - EXPORT_IPV6_MOD(tcp_mmap); 1881 1892 1882 1893 static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 1883 1894 u32 *offset_frag) ··· 2961 2974 } 2962 2975 return ret; 2963 2976 } 2964 - EXPORT_IPV6_MOD(tcp_recvmsg); 2965 2977 2966 2978 void tcp_set_state(struct sock *sk, int state) 2967 2979 { ··· 3090 3104 tcp_send_fin(sk); 3091 3105 } 3092 3106 } 3093 - EXPORT_IPV6_MOD(tcp_shutdown); 3094 3107 3095 3108 int tcp_orphan_count_sum(void) 3096 3109 { ··· 3602 3617 } 3603 3618 3604 3619 DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3605 - EXPORT_IPV6_MOD(tcp_tx_delay_enabled); 3606 3620 3607 3621 static void tcp_enable_tx_delay(struct sock *sk, int val) 3608 3622 { ··· 4186 4202 optval, optlen); 4187 4203 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 4188 4204 } 4189 - EXPORT_IPV6_MOD(tcp_setsockopt); 4190 4205 4191 4206 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 4192 4207 struct tcp_info *info) ··· 4848 4865 4849 4866 return false; 4850 4867 } 4851 - EXPORT_IPV6_MOD(tcp_bpf_bypass_getsockopt); 4852 4868 4853 4869 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 4854 4870 int __user *optlen) ··· 4861 4879 return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 4862 4880 USER_SOCKPTR(optlen)); 4863 4881 } 4864 - EXPORT_IPV6_MOD(tcp_getsockopt); 4865 4882 4866 4883 #ifdef CONFIG_TCP_MD5SIG 4867 4884 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb, ··· 4891 4910 skb_walk_frags(skb, frag_iter) 4892 4911 tcp_md5_hash_skb_data(ctx, frag_iter, 0); 4893 4912 } 4894 - EXPORT_IPV6_MOD(tcp_md5_hash_skb_data); 4895 4913 4896 4914 void tcp_md5_hash_key(struct md5_ctx *ctx, 4897 4915 const struct tcp_md5sig_key *key) ··· 4902 4922 */ 4903 4923 data_race(({ md5_update(ctx, key->key, keylen), 0; })); 4904 4924 } 4905 - EXPORT_IPV6_MOD(tcp_md5_hash_key); 4906 4925 4907 4926 /* Called with rcu_read_lock() */ 4908 4927 static enum skb_drop_reason ··· 5002 5023 } 5003 5024 return 0; 5004 5025 } 5005 - EXPORT_IPV6_MOD(tcp_do_parse_auth_options); 5006 5026 #endif 5007 5027 5008 5028 /* Called with rcu_read_lock() */ ··· 5070 5092 return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, 5071 5093 l3index, md5_location); 5072 5094 } 5073 - EXPORT_IPV6_MOD_GPL(tcp_inbound_hash); 5074 5095 5075 5096 void tcp_done(struct sock *sk) 5076 5097 {
-1
net/ipv4/tcp_fastopen.c
··· 559 559 } 560 560 return false; 561 561 } 562 - EXPORT_IPV6_MOD(tcp_fastopen_defer_connect); 563 562 564 563 /* 565 564 * The following code block is to deal with middle box issues with TFO:
-7
net/ipv4/tcp_input.c
··· 814 814 815 815 inet_csk(sk)->icsk_ack.rcv_mss = hint; 816 816 } 817 - EXPORT_IPV6_MOD(tcp_initialize_rcv_mss); 818 817 819 818 /* Receiver "autotuning" code. 820 819 * ··· 3171 3172 */ 3172 3173 tcp_non_congestion_loss_retransmit(sk); 3173 3174 } 3174 - EXPORT_IPV6_MOD(tcp_simple_retransmit); 3175 3175 3176 3176 void tcp_enter_recovery(struct sock *sk, bool ece_ack) 3177 3177 { ··· 4841 4843 if (!sock_flag(sk, SOCK_DEAD)) 4842 4844 sk_error_report(sk); 4843 4845 } 4844 - EXPORT_IPV6_MOD(tcp_done_with_error); 4845 4846 4846 4847 /* When we get a reset we do this. */ 4847 4848 void tcp_reset(struct sock *sk, struct sk_buff *skb) ··· 6662 6665 discard: 6663 6666 tcp_drop_reason(sk, skb, reason); 6664 6667 } 6665 - EXPORT_IPV6_MOD(tcp_rcv_established); 6666 6668 6667 6669 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb) 6668 6670 { ··· 7380 7384 __kfree_skb(skb); 7381 7385 return 0; 7382 7386 } 7383 - EXPORT_IPV6_MOD(tcp_rcv_state_process); 7384 7387 7385 7388 static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) 7386 7389 { ··· 7577 7582 7578 7583 return mss; 7579 7584 } 7580 - EXPORT_IPV6_MOD_GPL(tcp_get_syncookie_mss); 7581 7585 7582 7586 int tcp_conn_request(struct request_sock_ops *rsk_ops, 7583 7587 const struct tcp_request_sock_ops *af_ops, ··· 7756 7762 tcp_listendrop(sk); 7757 7763 return 0; 7758 7764 } 7759 - EXPORT_IPV6_MOD(tcp_conn_request);
-21
net/ipv4/tcp_ipv4.c
··· 201 201 202 202 return 0; 203 203 } 204 - EXPORT_IPV6_MOD_GPL(tcp_twsk_unique); 205 204 206 205 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, 207 206 int addr_len) ··· 361 362 inet->inet_dport = 0; 362 363 return err; 363 364 } 364 - EXPORT_IPV6_MOD(tcp_v4_connect); 365 365 366 366 /* 367 367 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. ··· 400 402 tcp_simple_retransmit(sk); 401 403 } /* else let the usual retransmit timer handle it */ 402 404 } 403 - EXPORT_IPV6_MOD(tcp_v4_mtu_reduced); 404 405 405 406 static void do_redirect(struct sk_buff *skb, struct sock *sk) 406 407 { ··· 433 436 } 434 437 reqsk_put(req); 435 438 } 436 - EXPORT_IPV6_MOD(tcp_req_err); 437 439 438 440 /* TCP-LD (RFC 6069) logic */ 439 441 void tcp_ld_RTO_revert(struct sock *sk, u32 seq) ··· 471 475 tcp_retransmit_timer(sk); 472 476 } 473 477 } 474 - EXPORT_IPV6_MOD(tcp_ld_RTO_revert); 475 478 476 479 /* 477 480 * This routine is called by the ICMP module when it gets some ··· 1217 1222 */ 1218 1223 1219 1224 DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ); 1220 - EXPORT_IPV6_MOD(tcp_md5_needed); 1221 1225 1222 1226 static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new) 1223 1227 { ··· 1275 1281 } 1276 1282 return best_match; 1277 1283 } 1278 - EXPORT_IPV6_MOD(__tcp_md5_do_lookup); 1279 1284 1280 1285 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk, 1281 1286 const union tcp_md5_addr *addr, ··· 1321 1328 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; 1322 1329 return tcp_md5_do_lookup(sk, l3index, addr, AF_INET); 1323 1330 } 1324 - EXPORT_IPV6_MOD(tcp_v4_md5_lookup); 1325 1331 1326 1332 static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp) 1327 1333 { ··· 1415 1423 return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags, 1416 1424 newkey, newkeylen, GFP_KERNEL); 1417 1425 } 1418 - EXPORT_IPV6_MOD(tcp_md5_do_add); 1419 1426 1420 1427 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, 1421 1428 int family, u8 prefixlen, int l3index, ··· 1442 1451 key->flags, key->key, key->keylen, 1443 1452 sk_gfp_mask(sk, GFP_ATOMIC)); 1444 1453 } 1445 - EXPORT_IPV6_MOD(tcp_md5_key_copy); 1446 1454 1447 1455 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, 1448 1456 u8 prefixlen, int l3index, u8 flags) ··· 1456 1466 kfree_rcu(key, rcu); 1457 1467 return 0; 1458 1468 } 1459 - EXPORT_IPV6_MOD(tcp_md5_do_del); 1460 1469 1461 1470 void tcp_clear_md5_list(struct sock *sk) 1462 1471 { ··· 1593 1604 tcp_md5_hash_key(&ctx, key); 1594 1605 md5_final(&ctx, md5_hash); 1595 1606 } 1596 - EXPORT_IPV6_MOD(tcp_v4_md5_hash_skb); 1597 1607 1598 1608 #endif 1599 1609 ··· 1662 1674 tcp_listendrop(sk); 1663 1675 return 0; 1664 1676 } 1665 - EXPORT_IPV6_MOD(tcp_v4_conn_request); 1666 1677 1667 1678 1668 1679 /* ··· 1787 1800 tcp_done(newsk); 1788 1801 goto exit; 1789 1802 } 1790 - EXPORT_IPV6_MOD(tcp_v4_syn_recv_sock); 1791 1803 1792 1804 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) 1793 1805 { ··· 2041 2055 } 2042 2056 return false; 2043 2057 } 2044 - EXPORT_IPV6_MOD(tcp_add_backlog); 2045 2058 2046 2059 static void tcp_v4_restore_cb(struct sk_buff *skb) 2047 2060 { ··· 2352 2367 sk->sk_rx_dst_ifindex = skb->skb_iif; 2353 2368 } 2354 2369 } 2355 - EXPORT_IPV6_MOD(inet_sk_rx_dst_set); 2356 2370 2357 2371 const struct inet_connection_sock_af_ops ipv4_specific = { 2358 2372 .queue_xmit = ip_queue_xmit, ··· 2364 2380 .getsockopt = ip_getsockopt, 2365 2381 .mtu_reduced = tcp_v4_mtu_reduced, 2366 2382 }; 2367 - EXPORT_IPV6_MOD(ipv4_specific); 2368 2383 2369 2384 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) 2370 2385 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { ··· 2456 2473 2457 2474 sk_sockets_allocated_dec(sk); 2458 2475 } 2459 - EXPORT_IPV6_MOD(tcp_v4_destroy_sock); 2460 2476 2461 2477 #ifdef CONFIG_PROC_FS 2462 2478 /* Proc filesystem TCP sock list dumping. */ ··· 2691 2709 st->last_pos = *pos; 2692 2710 return rc; 2693 2711 } 2694 - EXPORT_IPV6_MOD(tcp_seq_start); 2695 2712 2696 2713 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2697 2714 { ··· 2721 2740 st->last_pos = *pos; 2722 2741 return rc; 2723 2742 } 2724 - EXPORT_IPV6_MOD(tcp_seq_next); 2725 2743 2726 2744 void tcp_seq_stop(struct seq_file *seq, void *v) 2727 2745 { ··· 2738 2758 break; 2739 2759 } 2740 2760 } 2741 - EXPORT_IPV6_MOD(tcp_seq_stop); 2742 2761 2743 2762 static void get_openreq4(const struct request_sock *req, 2744 2763 struct seq_file *f, int i)
-4
net/ipv4/tcp_minisocks.c
··· 287 287 inet_twsk_put(tw); 288 288 return TCP_TW_SUCCESS; 289 289 } 290 - EXPORT_IPV6_MOD(tcp_timewait_state_process); 291 290 292 291 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) 293 292 { ··· 522 523 523 524 tcp_set_ca_state(sk, TCP_CA_Open); 524 525 } 525 - EXPORT_IPV6_MOD_GPL(tcp_ca_openreq_child); 526 526 527 527 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, 528 528 struct request_sock *req, ··· 974 976 } 975 977 return NULL; 976 978 } 977 - EXPORT_IPV6_MOD(tcp_check_req); 978 979 979 980 /* 980 981 * Queue segment on the new socket if the new socket is active, ··· 1015 1018 sock_put(child); 1016 1019 return reason; 1017 1020 } 1018 - EXPORT_IPV6_MOD(tcp_child_process);
-6
net/ipv4/tcp_output.c
··· 272 272 WRITE_ONCE(*__window_clamp, 273 273 min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp)); 274 274 } 275 - EXPORT_IPV6_MOD(tcp_select_initial_window); 276 275 277 276 /* Chose a new window to advertise, update state in tcp_sock for the 278 277 * socket, and return result with RFC1323 scaling applied. The return ··· 1360 1361 if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk)) 1361 1362 tcp_send_ack(sk); 1362 1363 } 1363 - EXPORT_IPV6_MOD(tcp_release_cb); 1364 1364 1365 1365 void __init tcp_tsq_work_init(void) 1366 1366 { ··· 2021 2023 return __tcp_mtu_to_mss(sk, pmtu) - 2022 2024 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 2023 2025 } 2024 - EXPORT_IPV6_MOD(tcp_mtu_to_mss); 2025 2026 2026 2027 /* Inverse of above */ 2027 2028 int tcp_mss_to_mtu(struct sock *sk, int mss) ··· 2093 2096 2094 2097 return mss_now; 2095 2098 } 2096 - EXPORT_IPV6_MOD(tcp_sync_mss); 2097 2099 2098 2100 /* Compute the current effective MSS, taking SACKs and IP options, 2099 2101 * and even PMTU discovery events into account. ··· 4073 4077 4074 4078 return skb; 4075 4079 } 4076 - EXPORT_IPV6_MOD(tcp_make_synack); 4077 4080 4078 4081 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 4079 4082 { ··· 4653 4658 } 4654 4659 return res; 4655 4660 } 4656 - EXPORT_IPV6_MOD(tcp_rtx_synack);
-1
net/ipv4/tcp_timer.c
··· 774 774 else if (!val) 775 775 tcp_delete_keepalive_timer(sk); 776 776 } 777 - EXPORT_IPV6_MOD_GPL(tcp_set_keepalive); 778 777 779 778 static void tcp_keepalive_timer(struct timer_list *t) 780 779 {
-29
net/ipv4/udp.c
··· 126 126 struct udp_table udp_table __read_mostly; 127 127 128 128 long sysctl_udp_mem[3] __read_mostly; 129 - EXPORT_IPV6_MOD(sysctl_udp_mem); 130 129 131 130 DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); 132 131 EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc); ··· 348 349 fail: 349 350 return error; 350 351 } 351 - EXPORT_IPV6_MOD(udp_lib_get_port); 352 352 353 353 static int udp_v4_get_port(struct sock *sk, unsigned short snum) 354 354 { ··· 413 415 return __inet_ehashfn(laddr, lport, faddr, fport, 414 416 udp_ehash_secret + net_hash_mix(net)); 415 417 } 416 - EXPORT_IPV6_MOD(udp_ehashfn); 417 418 418 419 /** 419 420 * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port) ··· 647 650 648 651 spin_unlock_bh(&hslot->lock); 649 652 } 650 - EXPORT_IPV6_MOD(udp_lib_hash4); 651 653 652 654 /* call with sock lock */ 653 655 void udp4_hash4(struct sock *sk) ··· 662 666 663 667 udp_lib_hash4(sk, hash); 664 668 } 665 - EXPORT_IPV6_MOD(udp4_hash4); 666 669 #endif /* CONFIG_BASE_SMALL */ 667 670 668 671 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try ··· 797 802 } 798 803 799 804 DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); 800 - EXPORT_IPV6_MOD(udp_encap_needed_key); 801 805 802 806 #if IS_ENABLED(CONFIG_IPV6) 803 807 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 804 - EXPORT_IPV6_MOD(udpv6_encap_needed_key); 805 808 #endif 806 809 807 810 void udp_encap_enable(void) ··· 1019 1026 ip_flush_pending_frames(sk); 1020 1027 } 1021 1028 } 1022 - EXPORT_IPV6_MOD(udp_flush_pending_frames); 1023 1029 1024 1030 /** 1025 1031 * udp4_hwcsum - handle outgoing HW checksumming ··· 1196 1204 WRITE_ONCE(up->pending, 0); 1197 1205 return err; 1198 1206 } 1199 - EXPORT_IPV6_MOD(udp_push_pending_frames); 1200 1207 1201 1208 static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size) 1202 1209 { ··· 1232 1241 1233 1242 return need_ip; 1234 1243 } 1235 - EXPORT_IPV6_MOD_GPL(udp_cmsg_send); 1236 1244 1237 1245 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1238 1246 { ··· 1522 1532 udp_push_pending_frames(sk); 1523 1533 release_sock(sk); 1524 1534 } 1525 - EXPORT_IPV6_MOD_GPL(udp_splice_eof); 1526 1535 1527 1536 #define UDP_SKB_IS_STATELESS 0x80000000 1528 1537 ··· 1636 1647 prefetch(&skb->data); 1637 1648 udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); 1638 1649 } 1639 - EXPORT_IPV6_MOD(udp_skb_destructor); 1640 1650 1641 1651 /* as above, but the caller held the rx queue lock, too */ 1642 1652 static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) ··· 1788 1800 udp_drops_inc(sk); 1789 1801 return err; 1790 1802 } 1791 - EXPORT_IPV6_MOD_GPL(__udp_enqueue_schedule_skb); 1792 1803 1793 1804 void udp_destruct_common(struct sock *sk) 1794 1805 { ··· 1804 1817 udp_rmem_release(sk, total, 0, true); 1805 1818 kfree(up->udp_prod_queue); 1806 1819 } 1807 - EXPORT_IPV6_MOD_GPL(udp_destruct_common); 1808 1820 1809 1821 static void udp_destruct_sock(struct sock *sk) 1810 1822 { ··· 1841 1855 skb_release_head_state(skb); 1842 1856 __consume_stateless_skb(skb); 1843 1857 } 1844 - EXPORT_IPV6_MOD_GPL(skb_consume_udp); 1845 1858 1846 1859 static struct sk_buff *__first_packet_length(struct sock *sk, 1847 1860 struct sk_buff_head *rcvq, ··· 1922 1937 1923 1938 return 0; 1924 1939 } 1925 - EXPORT_IPV6_MOD(udp_ioctl); 1926 1940 1927 1941 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, 1928 1942 int *off, int *err) ··· 2016 2032 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 2017 2033 return recv_actor(sk, skb); 2018 2034 } 2019 - EXPORT_IPV6_MOD(udp_read_skb); 2020 2035 2021 2036 /* 2022 2037 * This should be easy, if there is something there we ··· 2137 2154 2138 2155 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, &addr_len); 2139 2156 } 2140 - EXPORT_IPV6_MOD(udp_pre_connect); 2141 2157 2142 2158 static int udp_connect(struct sock *sk, struct sockaddr_unsized *uaddr, 2143 2159 int addr_len) ··· 2186 2204 release_sock(sk); 2187 2205 return 0; 2188 2206 } 2189 - EXPORT_IPV6_MOD(udp_disconnect); 2190 2207 2191 2208 void udp_lib_unhash(struct sock *sk) 2192 2209 { ··· 2217 2236 spin_unlock_bh(&hslot->lock); 2218 2237 } 2219 2238 } 2220 - EXPORT_IPV6_MOD(udp_lib_unhash); 2221 2239 2222 2240 /* 2223 2241 * inet_rcv_saddr was changed, we must rehash secondary hash ··· 2286 2306 udp_sk(sk)->udp_portaddr_hash = newhash; 2287 2307 } 2288 2308 } 2289 - EXPORT_IPV6_MOD(udp_lib_rehash); 2290 2309 2291 2310 static void udp_v4_rehash(struct sock *sk) 2292 2311 { ··· 2449 2470 } 2450 2471 return false; 2451 2472 } 2452 - EXPORT_IPV6_MOD(udp_sk_rx_dst_set); 2453 2473 2454 2474 /* 2455 2475 * Multicasts and broadcasts go to each listener. ··· 2977 2999 2978 3000 return err; 2979 3001 } 2980 - EXPORT_IPV6_MOD(udp_lib_setsockopt); 2981 3002 2982 3003 static int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 2983 3004 unsigned int optlen) ··· 3037 3060 return -EFAULT; 3038 3061 return 0; 3039 3062 } 3040 - EXPORT_IPV6_MOD(udp_lib_getsockopt); 3041 3063 3042 3064 static int udp_getsockopt(struct sock *sk, int level, int optname, 3043 3065 char __user *optval, int __user *optlen) ··· 3078 3102 return mask; 3079 3103 3080 3104 } 3081 - EXPORT_IPV6_MOD(udp_poll); 3082 3105 3083 3106 int udp_abort(struct sock *sk, int err) 3084 3107 { ··· 3100 3125 3101 3126 return 0; 3102 3127 } 3103 - EXPORT_IPV6_MOD_GPL(udp_abort); 3104 3128 3105 3129 struct proto udp_prot = { 3106 3130 .name = "UDP", ··· 3219 3245 3220 3246 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 3221 3247 } 3222 - EXPORT_IPV6_MOD(udp_seq_start); 3223 3248 3224 3249 void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3225 3250 { ··· 3232 3259 ++*pos; 3233 3260 return sk; 3234 3261 } 3235 - EXPORT_IPV6_MOD(udp_seq_next); 3236 3262 3237 3263 void udp_seq_stop(struct seq_file *seq, void *v) 3238 3264 { ··· 3243 3271 if (state->bucket <= udptable->mask) 3244 3272 spin_unlock_bh(&udptable->hash[state->bucket].lock); 3245 3273 } 3246 - EXPORT_IPV6_MOD(udp_seq_stop); 3247 3274 3248 3275 /* ------------------------------------------------------------------------ */ 3249 3276 static void udp4_format_sock(struct sock *sp, struct seq_file *f,
-1
net/psp/psp_sock.c
··· 291 291 skb->decrypted = 1; 292 292 rcu_read_unlock(); 293 293 } 294 - EXPORT_IPV6_MOD_GPL(psp_reply_set_decrypted);