Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at b935117fe6d1af576e39b1f18c9e875f44bd146f 251 lines 5.7 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Rusty Russell (C)2000 -- This code is GPL. 4 * Patrick McHardy (c) 2006-2012 5 */ 6 7#include <linux/kernel.h> 8#include <linux/slab.h> 9#include <linux/init.h> 10#include <linux/module.h> 11#include <linux/proc_fs.h> 12#include <linux/skbuff.h> 13#include <linux/netfilter.h> 14#include <linux/netfilter_ipv4.h> 15#include <linux/netfilter_ipv6.h> 16#include <linux/netfilter_bridge.h> 17#include <linux/seq_file.h> 18#include <linux/rcupdate.h> 19#include <net/protocol.h> 20#include <net/netfilter/nf_queue.h> 21#include <net/dst.h> 22 23#include "nf_internals.h" 24 25static const struct nf_queue_handler __rcu *nf_queue_handler; 26 27/* 28 * Hook for nfnetlink_queue to register its queue handler. 29 * We do this so that most of the NFQUEUE code can be modular. 30 * 31 * Once the queue is registered it must reinject all packets it 32 * receives, no matter what. 33 */ 34 35void nf_register_queue_handler(const struct nf_queue_handler *qh) 36{ 37 /* should never happen, we only have one queueing backend in kernel */ 38 WARN_ON(rcu_access_pointer(nf_queue_handler)); 39 rcu_assign_pointer(nf_queue_handler, qh); 40} 41EXPORT_SYMBOL(nf_register_queue_handler); 42 43/* The caller must flush their queue before this */ 44void nf_unregister_queue_handler(void) 45{ 46 RCU_INIT_POINTER(nf_queue_handler, NULL); 47} 48EXPORT_SYMBOL(nf_unregister_queue_handler); 49 50static void nf_queue_sock_put(struct sock *sk) 51{ 52#ifdef CONFIG_INET 53 sock_gen_put(sk); 54#else 55 sock_put(sk); 56#endif 57} 58 59static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) 60{ 61 struct nf_hook_state *state = &entry->state; 62 63 /* Release those devices we held, or Alexey will kill me. */ 64 dev_put(state->in); 65 dev_put(state->out); 66 if (state->sk) 67 nf_queue_sock_put(state->sk); 68 69#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 70 dev_put(entry->physin); 71 dev_put(entry->physout); 72#endif 73} 74 75void nf_queue_entry_free(struct nf_queue_entry *entry) 76{ 77 nf_queue_entry_release_refs(entry); 78 kfree(entry); 79} 80EXPORT_SYMBOL_GPL(nf_queue_entry_free); 81 82static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry) 83{ 84#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 85 const struct sk_buff *skb = entry->skb; 86 87 if (nf_bridge_info_exists(skb)) { 88 entry->physin = nf_bridge_get_physindev(skb, entry->state.net); 89 entry->physout = nf_bridge_get_physoutdev(skb); 90 } else { 91 entry->physin = NULL; 92 entry->physout = NULL; 93 } 94#endif 95} 96 97/* Bump dev refs so they don't vanish while packet is out */ 98bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) 99{ 100 struct nf_hook_state *state = &entry->state; 101 102 if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt)) 103 return false; 104 105 dev_hold(state->in); 106 dev_hold(state->out); 107 108#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 109 dev_hold(entry->physin); 110 dev_hold(entry->physout); 111#endif 112 return true; 113} 114EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); 115 116void nf_queue_nf_hook_drop(struct net *net) 117{ 118 const struct nf_queue_handler *qh; 119 120 rcu_read_lock(); 121 qh = rcu_dereference(nf_queue_handler); 122 if (qh) 123 qh->nf_hook_drop(net); 124 rcu_read_unlock(); 125} 126EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop); 127 128static void nf_ip_saveroute(const struct sk_buff *skb, 129 struct nf_queue_entry *entry) 130{ 131 struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); 132 133 if (entry->state.hook == NF_INET_LOCAL_OUT) { 134 const struct iphdr *iph = ip_hdr(skb); 135 136 rt_info->tos = iph->tos; 137 rt_info->daddr = iph->daddr; 138 rt_info->saddr = iph->saddr; 139 rt_info->mark = skb->mark; 140 } 141} 142 143static void nf_ip6_saveroute(const struct sk_buff *skb, 144 struct nf_queue_entry *entry) 145{ 146 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 147 148 if (entry->state.hook == NF_INET_LOCAL_OUT) { 149 const struct ipv6hdr *iph = ipv6_hdr(skb); 150 151 rt_info->daddr = iph->daddr; 152 rt_info->saddr = iph->saddr; 153 rt_info->mark = skb->mark; 154 } 155} 156 157static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, 158 unsigned int index, unsigned int queuenum) 159{ 160 struct nf_queue_entry *entry = NULL; 161 const struct nf_queue_handler *qh; 162 unsigned int route_key_size; 163 int status; 164 165 /* QUEUE == DROP if no one is waiting, to be safe. */ 166 qh = rcu_dereference(nf_queue_handler); 167 if (!qh) 168 return -ESRCH; 169 170 switch (state->pf) { 171 case AF_INET: 172 route_key_size = sizeof(struct ip_rt_info); 173 break; 174 case AF_INET6: 175 route_key_size = sizeof(struct ip6_rt_info); 176 break; 177 default: 178 route_key_size = 0; 179 break; 180 } 181 182 if (skb_sk_is_prefetched(skb)) { 183 struct sock *sk = skb->sk; 184 185 if (!sk_is_refcounted(sk)) { 186 if (!refcount_inc_not_zero(&sk->sk_refcnt)) 187 return -ENOTCONN; 188 189 /* drop refcount on skb_orphan */ 190 skb->destructor = sock_edemux; 191 } 192 } 193 194 entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC); 195 if (!entry) 196 return -ENOMEM; 197 198 if (skb_dst(skb) && !skb_dst_force(skb)) { 199 kfree(entry); 200 return -ENETDOWN; 201 } 202 203 *entry = (struct nf_queue_entry) { 204 .skb = skb, 205 .state = *state, 206 .hook_index = index, 207 .size = sizeof(*entry) + route_key_size, 208 }; 209 210 __nf_queue_entry_init_physdevs(entry); 211 212 if (!nf_queue_entry_get_refs(entry)) { 213 kfree(entry); 214 return -ENOTCONN; 215 } 216 217 switch (entry->state.pf) { 218 case AF_INET: 219 nf_ip_saveroute(skb, entry); 220 break; 221 case AF_INET6: 222 nf_ip6_saveroute(skb, entry); 223 break; 224 } 225 226 status = qh->outfn(entry, queuenum); 227 if (status < 0) { 228 nf_queue_entry_free(entry); 229 return status; 230 } 231 232 return 0; 233} 234 235/* Packets leaving via this function must come back through nf_reinject(). */ 236int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, 237 unsigned int index, unsigned int verdict) 238{ 239 int ret; 240 241 ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS); 242 if (ret < 0) { 243 if (ret == -ESRCH && 244 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) 245 return 1; 246 kfree_skb(skb); 247 } 248 249 return 0; 250} 251EXPORT_SYMBOL_GPL(nf_queue);