Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * x_tables core - Backend for {ip,ip6,arp}_tables
4 *
5 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
6 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 *
8 * Based on existing ip_tables code which is
9 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
10 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/socket.h>
16#include <linux/net.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <linux/string.h>
20#include <linux/vmalloc.h>
21#include <linux/mutex.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/audit.h>
25#include <linux/user_namespace.h>
26#include <net/net_namespace.h>
27#include <net/netns/generic.h>
28
29#include <linux/netfilter/x_tables.h>
30#include <linux/netfilter_arp.h>
31#include <linux/netfilter_ipv4/ip_tables.h>
32#include <linux/netfilter_ipv6/ip6_tables.h>
33#include <linux/netfilter_arp/arp_tables.h>
34
35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
37MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
38
39#define XT_PCPU_BLOCK_SIZE 4096
40#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
41
42struct xt_template {
43 struct list_head list;
44
45 /* called when table is needed in the given netns */
46 int (*table_init)(struct net *net);
47
48 struct module *me;
49
50 /* A unique name... */
51 char name[XT_TABLE_MAXNAMELEN];
52};
53
54static struct list_head xt_templates[NFPROTO_NUMPROTO];
55
56struct xt_pernet {
57 struct list_head tables[NFPROTO_NUMPROTO];
58};
59
60struct compat_delta {
61 unsigned int offset; /* offset in kernel */
62 int delta; /* delta in 32bit user land */
63};
64
65struct xt_af {
66 struct mutex mutex;
67 struct list_head match;
68 struct list_head target;
69#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
70 struct mutex compat_mutex;
71 struct compat_delta *compat_tab;
72 unsigned int number; /* number of slots in compat_tab[] */
73 unsigned int cur; /* number of used slots in compat_tab[] */
74#endif
75};
76
77static unsigned int xt_pernet_id __read_mostly;
78static struct xt_af *xt __read_mostly;
79
80static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
81 [NFPROTO_UNSPEC] = "x",
82 [NFPROTO_IPV4] = "ip",
83 [NFPROTO_ARP] = "arp",
84 [NFPROTO_BRIDGE] = "eb",
85 [NFPROTO_IPV6] = "ip6",
86};
87
88/* Registration hooks for targets. */
89int xt_register_target(struct xt_target *target)
90{
91 u_int8_t af = target->family;
92
93 mutex_lock(&xt[af].mutex);
94 list_add(&target->list, &xt[af].target);
95 mutex_unlock(&xt[af].mutex);
96 return 0;
97}
98EXPORT_SYMBOL(xt_register_target);
99
100void
101xt_unregister_target(struct xt_target *target)
102{
103 u_int8_t af = target->family;
104
105 mutex_lock(&xt[af].mutex);
106 list_del(&target->list);
107 mutex_unlock(&xt[af].mutex);
108}
109EXPORT_SYMBOL(xt_unregister_target);
110
111int
112xt_register_targets(struct xt_target *target, unsigned int n)
113{
114 unsigned int i;
115 int err = 0;
116
117 for (i = 0; i < n; i++) {
118 err = xt_register_target(&target[i]);
119 if (err)
120 goto err;
121 }
122 return err;
123
124err:
125 if (i > 0)
126 xt_unregister_targets(target, i);
127 return err;
128}
129EXPORT_SYMBOL(xt_register_targets);
130
131void
132xt_unregister_targets(struct xt_target *target, unsigned int n)
133{
134 while (n-- > 0)
135 xt_unregister_target(&target[n]);
136}
137EXPORT_SYMBOL(xt_unregister_targets);
138
139int xt_register_match(struct xt_match *match)
140{
141 u_int8_t af = match->family;
142
143 mutex_lock(&xt[af].mutex);
144 list_add(&match->list, &xt[af].match);
145 mutex_unlock(&xt[af].mutex);
146 return 0;
147}
148EXPORT_SYMBOL(xt_register_match);
149
150void
151xt_unregister_match(struct xt_match *match)
152{
153 u_int8_t af = match->family;
154
155 mutex_lock(&xt[af].mutex);
156 list_del(&match->list);
157 mutex_unlock(&xt[af].mutex);
158}
159EXPORT_SYMBOL(xt_unregister_match);
160
161int
162xt_register_matches(struct xt_match *match, unsigned int n)
163{
164 unsigned int i;
165 int err = 0;
166
167 for (i = 0; i < n; i++) {
168 err = xt_register_match(&match[i]);
169 if (err)
170 goto err;
171 }
172 return err;
173
174err:
175 if (i > 0)
176 xt_unregister_matches(match, i);
177 return err;
178}
179EXPORT_SYMBOL(xt_register_matches);
180
181void
182xt_unregister_matches(struct xt_match *match, unsigned int n)
183{
184 while (n-- > 0)
185 xt_unregister_match(&match[n]);
186}
187EXPORT_SYMBOL(xt_unregister_matches);
188
189
190/*
191 * These are weird, but module loading must not be done with mutex
192 * held (since they will register), and we have to have a single
193 * function to use.
194 */
195
196/* Find match, grabs ref. Returns ERR_PTR() on error. */
197struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
198{
199 struct xt_match *m;
200 int err = -ENOENT;
201
202 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
203 return ERR_PTR(-EINVAL);
204
205 mutex_lock(&xt[af].mutex);
206 list_for_each_entry(m, &xt[af].match, list) {
207 if (strcmp(m->name, name) == 0) {
208 if (m->revision == revision) {
209 if (try_module_get(m->me)) {
210 mutex_unlock(&xt[af].mutex);
211 return m;
212 }
213 } else
214 err = -EPROTOTYPE; /* Found something. */
215 }
216 }
217 mutex_unlock(&xt[af].mutex);
218
219 if (af != NFPROTO_UNSPEC)
220 /* Try searching again in the family-independent list */
221 return xt_find_match(NFPROTO_UNSPEC, name, revision);
222
223 return ERR_PTR(err);
224}
225EXPORT_SYMBOL(xt_find_match);
226
227struct xt_match *
228xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
229{
230 struct xt_match *match;
231
232 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
233 return ERR_PTR(-EINVAL);
234
235 match = xt_find_match(nfproto, name, revision);
236 if (IS_ERR(match)) {
237 request_module("%st_%s", xt_prefix[nfproto], name);
238 match = xt_find_match(nfproto, name, revision);
239 }
240
241 return match;
242}
243EXPORT_SYMBOL_GPL(xt_request_find_match);
244
245/* Find target, grabs ref. Returns ERR_PTR() on error. */
246static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
247{
248 struct xt_target *t;
249 int err = -ENOENT;
250
251 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
252 return ERR_PTR(-EINVAL);
253
254 mutex_lock(&xt[af].mutex);
255 list_for_each_entry(t, &xt[af].target, list) {
256 if (strcmp(t->name, name) == 0) {
257 if (t->revision == revision) {
258 if (try_module_get(t->me)) {
259 mutex_unlock(&xt[af].mutex);
260 return t;
261 }
262 } else
263 err = -EPROTOTYPE; /* Found something. */
264 }
265 }
266 mutex_unlock(&xt[af].mutex);
267
268 if (af != NFPROTO_UNSPEC)
269 /* Try searching again in the family-independent list */
270 return xt_find_target(NFPROTO_UNSPEC, name, revision);
271
272 return ERR_PTR(err);
273}
274
275struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
276{
277 struct xt_target *target;
278
279 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
280 return ERR_PTR(-EINVAL);
281
282 target = xt_find_target(af, name, revision);
283 if (IS_ERR(target)) {
284 request_module("%st_%s", xt_prefix[af], name);
285 target = xt_find_target(af, name, revision);
286 }
287
288 return target;
289}
290EXPORT_SYMBOL_GPL(xt_request_find_target);
291
292
293static int xt_obj_to_user(u16 __user *psize, u16 size,
294 void __user *pname, const char *name,
295 u8 __user *prev, u8 rev)
296{
297 if (put_user(size, psize))
298 return -EFAULT;
299 if (copy_to_user(pname, name, strlen(name) + 1))
300 return -EFAULT;
301 if (put_user(rev, prev))
302 return -EFAULT;
303
304 return 0;
305}
306
307#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
308 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
309 U->u.user.name, K->u.kernel.TYPE->name, \
310 &U->u.user.revision, K->u.kernel.TYPE->revision)
311
312int xt_data_to_user(void __user *dst, const void *src,
313 int usersize, int size, int aligned_size)
314{
315 usersize = usersize ? : size;
316 if (copy_to_user(dst, src, usersize))
317 return -EFAULT;
318 if (usersize != aligned_size &&
319 clear_user(dst + usersize, aligned_size - usersize))
320 return -EFAULT;
321
322 return 0;
323}
324EXPORT_SYMBOL_GPL(xt_data_to_user);
325
326#define XT_DATA_TO_USER(U, K, TYPE) \
327 xt_data_to_user(U->data, K->data, \
328 K->u.kernel.TYPE->usersize, \
329 K->u.kernel.TYPE->TYPE##size, \
330 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
331
332int xt_match_to_user(const struct xt_entry_match *m,
333 struct xt_entry_match __user *u)
334{
335 return XT_OBJ_TO_USER(u, m, match, 0) ||
336 XT_DATA_TO_USER(u, m, match);
337}
338EXPORT_SYMBOL_GPL(xt_match_to_user);
339
340int xt_target_to_user(const struct xt_entry_target *t,
341 struct xt_entry_target __user *u)
342{
343 return XT_OBJ_TO_USER(u, t, target, 0) ||
344 XT_DATA_TO_USER(u, t, target);
345}
346EXPORT_SYMBOL_GPL(xt_target_to_user);
347
348static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
349{
350 const struct xt_match *m;
351 int have_rev = 0;
352
353 mutex_lock(&xt[af].mutex);
354 list_for_each_entry(m, &xt[af].match, list) {
355 if (strcmp(m->name, name) == 0) {
356 if (m->revision > *bestp)
357 *bestp = m->revision;
358 if (m->revision == revision)
359 have_rev = 1;
360 }
361 }
362 mutex_unlock(&xt[af].mutex);
363
364 if (af != NFPROTO_UNSPEC && !have_rev)
365 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
366
367 return have_rev;
368}
369
370static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
371{
372 const struct xt_target *t;
373 int have_rev = 0;
374
375 mutex_lock(&xt[af].mutex);
376 list_for_each_entry(t, &xt[af].target, list) {
377 if (strcmp(t->name, name) == 0) {
378 if (t->revision > *bestp)
379 *bestp = t->revision;
380 if (t->revision == revision)
381 have_rev = 1;
382 }
383 }
384 mutex_unlock(&xt[af].mutex);
385
386 if (af != NFPROTO_UNSPEC && !have_rev)
387 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
388
389 return have_rev;
390}
391
392/* Returns true or false (if no such extension at all) */
393int xt_find_revision(u8 af, const char *name, u8 revision, int target,
394 int *err)
395{
396 int have_rev, best = -1;
397
398 if (target == 1)
399 have_rev = target_revfn(af, name, revision, &best);
400 else
401 have_rev = match_revfn(af, name, revision, &best);
402
403 /* Nothing at all? Return 0 to try loading module. */
404 if (best == -1) {
405 *err = -ENOENT;
406 return 0;
407 }
408
409 *err = best;
410 if (!have_rev)
411 *err = -EPROTONOSUPPORT;
412 return 1;
413}
414EXPORT_SYMBOL_GPL(xt_find_revision);
415
416static char *
417textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
418{
419 static const char *const inetbr_names[] = {
420 "PREROUTING", "INPUT", "FORWARD",
421 "OUTPUT", "POSTROUTING", "BROUTING",
422 };
423 static const char *const arp_names[] = {
424 "INPUT", "FORWARD", "OUTPUT",
425 };
426 const char *const *names;
427 unsigned int i, max;
428 char *p = buf;
429 bool np = false;
430 int res;
431
432 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
433 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
434 ARRAY_SIZE(inetbr_names);
435 *p = '\0';
436 for (i = 0; i < max; ++i) {
437 if (!(mask & (1 << i)))
438 continue;
439 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
440 if (res > 0) {
441 size -= res;
442 p += res;
443 }
444 np = true;
445 }
446
447 return buf;
448}
449
450/**
451 * xt_check_proc_name - check that name is suitable for /proc file creation
452 *
453 * @name: file name candidate
454 * @size: length of buffer
455 *
456 * some x_tables modules wish to create a file in /proc.
457 * This function makes sure that the name is suitable for this
458 * purpose, it checks that name is NUL terminated and isn't a 'special'
459 * name, like "..".
460 *
461 * returns negative number on error or 0 if name is useable.
462 */
463int xt_check_proc_name(const char *name, unsigned int size)
464{
465 if (name[0] == '\0')
466 return -EINVAL;
467
468 if (strnlen(name, size) == size)
469 return -ENAMETOOLONG;
470
471 if (strcmp(name, ".") == 0 ||
472 strcmp(name, "..") == 0 ||
473 strchr(name, '/'))
474 return -EINVAL;
475
476 return 0;
477}
478EXPORT_SYMBOL(xt_check_proc_name);
479
480int xt_check_match(struct xt_mtchk_param *par,
481 unsigned int size, u16 proto, bool inv_proto)
482{
483 int ret;
484
485 if (XT_ALIGN(par->match->matchsize) != size &&
486 par->match->matchsize != -1) {
487 /*
488 * ebt_among is exempt from centralized matchsize checking
489 * because it uses a dynamic-size data set.
490 */
491 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
492 xt_prefix[par->family], par->match->name,
493 par->match->revision,
494 XT_ALIGN(par->match->matchsize), size);
495 return -EINVAL;
496 }
497 if (par->match->table != NULL &&
498 strcmp(par->match->table, par->table) != 0) {
499 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
500 xt_prefix[par->family], par->match->name,
501 par->match->table, par->table);
502 return -EINVAL;
503 }
504
505 /* NFPROTO_UNSPEC implies NF_INET_* hooks which do not overlap with
506 * NF_ARP_IN,OUT,FORWARD, allow explicit extensions with NFPROTO_ARP
507 * support.
508 */
509 if (par->family == NFPROTO_ARP &&
510 par->match->family != NFPROTO_ARP) {
511 pr_info_ratelimited("%s_tables: %s match: not valid for this family\n",
512 xt_prefix[par->family], par->match->name);
513 return -EINVAL;
514 }
515 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
516 char used[64], allow[64];
517
518 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
519 xt_prefix[par->family], par->match->name,
520 textify_hooks(used, sizeof(used),
521 par->hook_mask, par->family),
522 textify_hooks(allow, sizeof(allow),
523 par->match->hooks,
524 par->family));
525 return -EINVAL;
526 }
527 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
528 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
529 xt_prefix[par->family], par->match->name,
530 par->match->proto);
531 return -EINVAL;
532 }
533 if (par->match->checkentry != NULL) {
534 ret = par->match->checkentry(par);
535 if (ret < 0)
536 return ret;
537 else if (ret > 0)
538 /* Flag up potential errors. */
539 return -EIO;
540 }
541 return 0;
542}
543EXPORT_SYMBOL_GPL(xt_check_match);
544
545/** xt_check_entry_match - check that matches end before start of target
546 *
547 * @match: beginning of xt_entry_match
548 * @target: beginning of this rules target (alleged end of matches)
549 * @alignment: alignment requirement of match structures
550 *
551 * Validates that all matches add up to the beginning of the target,
552 * and that each match covers at least the base structure size.
553 *
554 * Return: 0 on success, negative errno on failure.
555 */
556static int xt_check_entry_match(const char *match, const char *target,
557 const size_t alignment)
558{
559 const struct xt_entry_match *pos;
560 int length = target - match;
561
562 if (length == 0) /* no matches */
563 return 0;
564
565 pos = (struct xt_entry_match *)match;
566 do {
567 if ((unsigned long)pos % alignment)
568 return -EINVAL;
569
570 if (length < (int)sizeof(struct xt_entry_match))
571 return -EINVAL;
572
573 if (pos->u.match_size < sizeof(struct xt_entry_match))
574 return -EINVAL;
575
576 if (pos->u.match_size > length)
577 return -EINVAL;
578
579 length -= pos->u.match_size;
580 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
581 } while (length > 0);
582
583 return 0;
584}
585
586/** xt_check_table_hooks - check hook entry points are sane
587 *
588 * @info xt_table_info to check
589 * @valid_hooks - hook entry points that we can enter from
590 *
591 * Validates that the hook entry and underflows points are set up.
592 *
593 * Return: 0 on success, negative errno on failure.
594 */
595int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
596{
597 const char *err = "unsorted underflow";
598 unsigned int i, max_uflow, max_entry;
599 bool check_hooks = false;
600
601 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
602
603 max_entry = 0;
604 max_uflow = 0;
605
606 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
607 if (!(valid_hooks & (1 << i)))
608 continue;
609
610 if (info->hook_entry[i] == 0xFFFFFFFF)
611 return -EINVAL;
612 if (info->underflow[i] == 0xFFFFFFFF)
613 return -EINVAL;
614
615 if (check_hooks) {
616 if (max_uflow > info->underflow[i])
617 goto error;
618
619 if (max_uflow == info->underflow[i]) {
620 err = "duplicate underflow";
621 goto error;
622 }
623 if (max_entry > info->hook_entry[i]) {
624 err = "unsorted entry";
625 goto error;
626 }
627 if (max_entry == info->hook_entry[i]) {
628 err = "duplicate entry";
629 goto error;
630 }
631 }
632 max_entry = info->hook_entry[i];
633 max_uflow = info->underflow[i];
634 check_hooks = true;
635 }
636
637 return 0;
638error:
639 pr_err_ratelimited("%s at hook %d\n", err, i);
640 return -EINVAL;
641}
642EXPORT_SYMBOL(xt_check_table_hooks);
643
644static bool verdict_ok(int verdict)
645{
646 if (verdict > 0)
647 return true;
648
649 if (verdict < 0) {
650 int v = -verdict - 1;
651
652 if (verdict == XT_RETURN)
653 return true;
654
655 switch (v) {
656 case NF_ACCEPT: return true;
657 case NF_DROP: return true;
658 case NF_QUEUE: return true;
659 default:
660 break;
661 }
662
663 return false;
664 }
665
666 return false;
667}
668
669static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
670 const char *msg, unsigned int msglen)
671{
672 return usersize == kernsize && strnlen(msg, msglen) < msglen;
673}
674
675#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
676int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
677{
678 struct xt_af *xp = &xt[af];
679
680 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
681
682 if (WARN_ON(!xp->compat_tab))
683 return -ENOMEM;
684
685 if (xp->cur >= xp->number)
686 return -EINVAL;
687
688 if (xp->cur)
689 delta += xp->compat_tab[xp->cur - 1].delta;
690 xp->compat_tab[xp->cur].offset = offset;
691 xp->compat_tab[xp->cur].delta = delta;
692 xp->cur++;
693 return 0;
694}
695EXPORT_SYMBOL_GPL(xt_compat_add_offset);
696
697void xt_compat_flush_offsets(u_int8_t af)
698{
699 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
700
701 if (xt[af].compat_tab) {
702 vfree(xt[af].compat_tab);
703 xt[af].compat_tab = NULL;
704 xt[af].number = 0;
705 xt[af].cur = 0;
706 }
707}
708EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
709
710int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
711{
712 struct compat_delta *tmp = xt[af].compat_tab;
713 int mid, left = 0, right = xt[af].cur - 1;
714
715 while (left <= right) {
716 mid = (left + right) >> 1;
717 if (offset > tmp[mid].offset)
718 left = mid + 1;
719 else if (offset < tmp[mid].offset)
720 right = mid - 1;
721 else
722 return mid ? tmp[mid - 1].delta : 0;
723 }
724 return left ? tmp[left - 1].delta : 0;
725}
726EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
727
728int xt_compat_init_offsets(u8 af, unsigned int number)
729{
730 size_t mem;
731
732 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
733
734 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
735 return -EINVAL;
736
737 if (WARN_ON(xt[af].compat_tab))
738 return -EINVAL;
739
740 mem = sizeof(struct compat_delta) * number;
741 if (mem > XT_MAX_TABLE_SIZE)
742 return -ENOMEM;
743
744 xt[af].compat_tab = vmalloc(mem);
745 if (!xt[af].compat_tab)
746 return -ENOMEM;
747
748 xt[af].number = number;
749 xt[af].cur = 0;
750
751 return 0;
752}
753EXPORT_SYMBOL(xt_compat_init_offsets);
754
755int xt_compat_match_offset(const struct xt_match *match)
756{
757 u_int16_t csize = match->compatsize ? : match->matchsize;
758 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
759}
760EXPORT_SYMBOL_GPL(xt_compat_match_offset);
761
762void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
763 unsigned int *size)
764{
765 const struct xt_match *match = m->u.kernel.match;
766 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
767 int off = xt_compat_match_offset(match);
768 u_int16_t msize = cm->u.user.match_size;
769 char name[sizeof(m->u.user.name)];
770
771 m = *dstptr;
772 memcpy(m, cm, sizeof(*cm));
773 if (match->compat_from_user)
774 match->compat_from_user(m->data, cm->data);
775 else
776 memcpy(m->data, cm->data, msize - sizeof(*cm));
777
778 msize += off;
779 m->u.user.match_size = msize;
780 strscpy(name, match->name, sizeof(name));
781 module_put(match->me);
782 strscpy_pad(m->u.user.name, name, sizeof(m->u.user.name));
783
784 *size += off;
785 *dstptr += msize;
786}
787EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
788
789#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
790 xt_data_to_user(U->data, K->data, \
791 K->u.kernel.TYPE->usersize, \
792 C_SIZE, \
793 COMPAT_XT_ALIGN(C_SIZE))
794
795int xt_compat_match_to_user(const struct xt_entry_match *m,
796 void __user **dstptr, unsigned int *size)
797{
798 const struct xt_match *match = m->u.kernel.match;
799 struct compat_xt_entry_match __user *cm = *dstptr;
800 int off = xt_compat_match_offset(match);
801 u_int16_t msize = m->u.user.match_size - off;
802
803 if (XT_OBJ_TO_USER(cm, m, match, msize))
804 return -EFAULT;
805
806 if (match->compat_to_user) {
807 if (match->compat_to_user((void __user *)cm->data, m->data))
808 return -EFAULT;
809 } else {
810 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
811 return -EFAULT;
812 }
813
814 *size -= off;
815 *dstptr += msize;
816 return 0;
817}
818EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
819
820/* non-compat version may have padding after verdict */
821struct compat_xt_standard_target {
822 /* Must be last as it ends in a flexible-array member. */
823 TRAILING_OVERLAP(struct compat_xt_entry_target, t, data,
824 compat_uint_t verdict;
825 );
826};
827
828struct compat_xt_error_target {
829 /* Must be last as it ends in a flexible-array member. */
830 TRAILING_OVERLAP(struct compat_xt_entry_target, t, data,
831 char errorname[XT_FUNCTION_MAXNAMELEN];
832 );
833};
834
835int xt_compat_check_entry_offsets(const void *base, const char *elems,
836 unsigned int target_offset,
837 unsigned int next_offset)
838{
839 long size_of_base_struct = elems - (const char *)base;
840 const struct compat_xt_entry_target *t;
841 const char *e = base;
842
843 if (target_offset < size_of_base_struct)
844 return -EINVAL;
845
846 if (target_offset + sizeof(*t) > next_offset)
847 return -EINVAL;
848
849 t = (void *)(e + target_offset);
850 if (t->u.target_size < sizeof(*t))
851 return -EINVAL;
852
853 if (target_offset + t->u.target_size > next_offset)
854 return -EINVAL;
855
856 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
857 const struct compat_xt_standard_target *st = (const void *)t;
858
859 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
860 return -EINVAL;
861
862 if (!verdict_ok(st->verdict))
863 return -EINVAL;
864 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
865 const struct compat_xt_error_target *et = (const void *)t;
866
867 if (!error_tg_ok(t->u.target_size, sizeof(*et),
868 et->errorname, sizeof(et->errorname)))
869 return -EINVAL;
870 }
871
872 /* compat_xt_entry match has less strict alignment requirements,
873 * otherwise they are identical. In case of padding differences
874 * we need to add compat version of xt_check_entry_match.
875 */
876 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
877
878 return xt_check_entry_match(elems, base + target_offset,
879 __alignof__(struct compat_xt_entry_match));
880}
881EXPORT_SYMBOL(xt_compat_check_entry_offsets);
882#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
883
884/**
885 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
886 *
887 * @base: pointer to arp/ip/ip6t_entry
888 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
889 * @target_offset: the arp/ip/ip6_t->target_offset
890 * @next_offset: the arp/ip/ip6_t->next_offset
891 *
892 * validates that target_offset and next_offset are sane and that all
893 * match sizes (if any) align with the target offset.
894 *
895 * This function does not validate the targets or matches themselves, it
896 * only tests that all the offsets and sizes are correct, that all
897 * match structures are aligned, and that the last structure ends where
898 * the target structure begins.
899 *
900 * Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version.
901 *
902 * The arp/ip/ip6t_entry structure @base must have passed following tests:
903 * - it must point to a valid memory location
904 * - base to base + next_offset must be accessible, i.e. not exceed allocated
905 * length.
906 *
907 * A well-formed entry looks like this:
908 *
909 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
910 * e->elems[]-----' | |
911 * matchsize | |
912 * matchsize | |
913 * | |
914 * target_offset---------------------------------' |
915 * next_offset---------------------------------------------------'
916 *
917 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
918 * This is where matches (if any) and the target reside.
919 * target_offset: beginning of target.
920 * next_offset: start of the next rule; also: size of this rule.
921 * Since targets have a minimum size, target_offset + minlen <= next_offset.
922 *
923 * Every match stores its size, sum of sizes must not exceed target_offset.
924 *
925 * Return: 0 on success, negative errno on failure.
926 */
927int xt_check_entry_offsets(const void *base,
928 const char *elems,
929 unsigned int target_offset,
930 unsigned int next_offset)
931{
932 long size_of_base_struct = elems - (const char *)base;
933 const struct xt_entry_target *t;
934 const char *e = base;
935
936 /* target start is within the ip/ip6/arpt_entry struct */
937 if (target_offset < size_of_base_struct)
938 return -EINVAL;
939
940 if (target_offset + sizeof(*t) > next_offset)
941 return -EINVAL;
942
943 t = (void *)(e + target_offset);
944 if (t->u.target_size < sizeof(*t))
945 return -EINVAL;
946
947 if (target_offset + t->u.target_size > next_offset)
948 return -EINVAL;
949
950 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
951 const struct xt_standard_target *st = (const void *)t;
952
953 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
954 return -EINVAL;
955
956 if (!verdict_ok(st->verdict))
957 return -EINVAL;
958 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
959 const struct xt_error_target *et = (const void *)t;
960
961 if (!error_tg_ok(t->u.target_size, sizeof(*et),
962 et->errorname, sizeof(et->errorname)))
963 return -EINVAL;
964 }
965
966 return xt_check_entry_match(elems, base + target_offset,
967 __alignof__(struct xt_entry_match));
968}
969EXPORT_SYMBOL(xt_check_entry_offsets);
970
971/**
972 * xt_alloc_entry_offsets - allocate array to store rule head offsets
973 *
974 * @size: number of entries
975 *
976 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
977 */
978unsigned int *xt_alloc_entry_offsets(unsigned int size)
979{
980 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
981 return NULL;
982
983 return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
984
985}
986EXPORT_SYMBOL(xt_alloc_entry_offsets);
987
988/**
989 * xt_find_jump_offset - check if target is a valid jump offset
990 *
991 * @offsets: array containing all valid rule start offsets of a rule blob
992 * @target: the jump target to search for
993 * @size: entries in @offset
994 */
995bool xt_find_jump_offset(const unsigned int *offsets,
996 unsigned int target, unsigned int size)
997{
998 int m, low = 0, hi = size;
999
1000 while (hi > low) {
1001 m = (low + hi) / 2u;
1002
1003 if (offsets[m] > target)
1004 hi = m;
1005 else if (offsets[m] < target)
1006 low = m + 1;
1007 else
1008 return true;
1009 }
1010
1011 return false;
1012}
1013EXPORT_SYMBOL(xt_find_jump_offset);
1014
1015int xt_check_target(struct xt_tgchk_param *par,
1016 unsigned int size, u16 proto, bool inv_proto)
1017{
1018 int ret;
1019
1020 if (XT_ALIGN(par->target->targetsize) != size) {
1021 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
1022 xt_prefix[par->family], par->target->name,
1023 par->target->revision,
1024 XT_ALIGN(par->target->targetsize), size);
1025 return -EINVAL;
1026 }
1027 if (par->target->table != NULL &&
1028 strcmp(par->target->table, par->table) != 0) {
1029 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1030 xt_prefix[par->family], par->target->name,
1031 par->target->table, par->table);
1032 return -EINVAL;
1033 }
1034
1035 /* NFPROTO_UNSPEC implies NF_INET_* hooks which do not overlap with
1036 * NF_ARP_IN,OUT,FORWARD, allow explicit extensions with NFPROTO_ARP
1037 * support.
1038 */
1039 if (par->family == NFPROTO_ARP &&
1040 par->target->family != NFPROTO_ARP) {
1041 pr_info_ratelimited("%s_tables: %s target: not valid for this family\n",
1042 xt_prefix[par->family], par->target->name);
1043 return -EINVAL;
1044 }
1045
1046 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1047 char used[64], allow[64];
1048
1049 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1050 xt_prefix[par->family], par->target->name,
1051 textify_hooks(used, sizeof(used),
1052 par->hook_mask, par->family),
1053 textify_hooks(allow, sizeof(allow),
1054 par->target->hooks,
1055 par->family));
1056 return -EINVAL;
1057 }
1058 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1059 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1060 xt_prefix[par->family], par->target->name,
1061 par->target->proto);
1062 return -EINVAL;
1063 }
1064 if (par->target->checkentry != NULL) {
1065 ret = par->target->checkentry(par);
1066 if (ret < 0)
1067 return ret;
1068 else if (ret > 0)
1069 /* Flag up potential errors. */
1070 return -EIO;
1071 }
1072 return 0;
1073}
1074EXPORT_SYMBOL_GPL(xt_check_target);
1075
1076/**
1077 * xt_copy_counters - copy counters and metadata from a sockptr_t
1078 *
1079 * @arg: src sockptr
1080 * @len: alleged size of userspace memory
1081 * @info: where to store the xt_counters_info metadata
1082 *
1083 * Copies counter meta data from @user and stores it in @info.
1084 *
1085 * vmallocs memory to hold the counters, then copies the counter data
1086 * from @user to the new memory and returns a pointer to it.
1087 *
1088 * If called from a compat syscall, @info gets converted automatically to the
1089 * 64bit representation.
1090 *
1091 * The metadata associated with the counters is stored in @info.
1092 *
1093 * Return: returns pointer that caller has to test via IS_ERR().
1094 * If IS_ERR is false, caller has to vfree the pointer.
1095 */
1096void *xt_copy_counters(sockptr_t arg, unsigned int len,
1097 struct xt_counters_info *info)
1098{
1099 size_t offset;
1100 void *mem;
1101 u64 size;
1102
1103#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1104 if (in_compat_syscall()) {
1105 /* structures only differ in size due to alignment */
1106 struct compat_xt_counters_info compat_tmp;
1107
1108 if (len <= sizeof(compat_tmp))
1109 return ERR_PTR(-EINVAL);
1110
1111 len -= sizeof(compat_tmp);
1112 if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1113 return ERR_PTR(-EFAULT);
1114
1115 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1116 info->num_counters = compat_tmp.num_counters;
1117 offset = sizeof(compat_tmp);
1118 } else
1119#endif
1120 {
1121 if (len <= sizeof(*info))
1122 return ERR_PTR(-EINVAL);
1123
1124 len -= sizeof(*info);
1125 if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1126 return ERR_PTR(-EFAULT);
1127
1128 offset = sizeof(*info);
1129 }
1130 info->name[sizeof(info->name) - 1] = '\0';
1131
1132 size = sizeof(struct xt_counters);
1133 size *= info->num_counters;
1134
1135 if (size != (u64)len)
1136 return ERR_PTR(-EINVAL);
1137
1138 mem = vmalloc(len);
1139 if (!mem)
1140 return ERR_PTR(-ENOMEM);
1141
1142 if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1143 return mem;
1144
1145 vfree(mem);
1146 return ERR_PTR(-EFAULT);
1147}
1148EXPORT_SYMBOL_GPL(xt_copy_counters);
1149
1150#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1151int xt_compat_target_offset(const struct xt_target *target)
1152{
1153 u_int16_t csize = target->compatsize ? : target->targetsize;
1154 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1155}
1156EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1157
1158void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1159 unsigned int *size)
1160{
1161 const struct xt_target *target = t->u.kernel.target;
1162 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1163 int off = xt_compat_target_offset(target);
1164 u_int16_t tsize = ct->u.user.target_size;
1165 char name[sizeof(t->u.user.name)];
1166
1167 t = *dstptr;
1168 memcpy(t, ct, sizeof(*ct));
1169 if (target->compat_from_user)
1170 target->compat_from_user(t->data, ct->data);
1171 else
1172 unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
1173 /* UAPI 0-sized destination */);
1174
1175 tsize += off;
1176 t->u.user.target_size = tsize;
1177 strscpy(name, target->name, sizeof(name));
1178 module_put(target->me);
1179 strscpy_pad(t->u.user.name, name, sizeof(t->u.user.name));
1180
1181 *size += off;
1182 *dstptr += tsize;
1183}
1184EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1185
1186int xt_compat_target_to_user(const struct xt_entry_target *t,
1187 void __user **dstptr, unsigned int *size)
1188{
1189 const struct xt_target *target = t->u.kernel.target;
1190 struct compat_xt_entry_target __user *ct = *dstptr;
1191 int off = xt_compat_target_offset(target);
1192 u_int16_t tsize = t->u.user.target_size - off;
1193
1194 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1195 return -EFAULT;
1196
1197 if (target->compat_to_user) {
1198 if (target->compat_to_user((void __user *)ct->data, t->data))
1199 return -EFAULT;
1200 } else {
1201 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1202 return -EFAULT;
1203 }
1204
1205 *size -= off;
1206 *dstptr += tsize;
1207 return 0;
1208}
1209EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1210#endif
1211
1212struct xt_table_info *xt_alloc_table_info(unsigned int size)
1213{
1214 struct xt_table_info *info = NULL;
1215 size_t sz = sizeof(*info) + size;
1216
1217 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1218 return NULL;
1219
1220 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1221 if (!info)
1222 return NULL;
1223
1224 memset(info, 0, sizeof(*info));
1225 info->size = size;
1226 return info;
1227}
1228EXPORT_SYMBOL(xt_alloc_table_info);
1229
1230void xt_free_table_info(struct xt_table_info *info)
1231{
1232 int cpu;
1233
1234 if (info->jumpstack != NULL) {
1235 for_each_possible_cpu(cpu)
1236 kvfree(info->jumpstack[cpu]);
1237 kvfree(info->jumpstack);
1238 }
1239
1240 kvfree(info);
1241}
1242EXPORT_SYMBOL(xt_free_table_info);
1243
1244struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
1245{
1246 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1247 struct xt_table *t;
1248
1249 mutex_lock(&xt[af].mutex);
1250 list_for_each_entry(t, &xt_net->tables[af], list) {
1251 if (strcmp(t->name, name) == 0) {
1252 mutex_unlock(&xt[af].mutex);
1253 return t;
1254 }
1255 }
1256 mutex_unlock(&xt[af].mutex);
1257 return NULL;
1258}
1259EXPORT_SYMBOL(xt_find_table);
1260
1261/* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */
1262struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1263 const char *name)
1264{
1265 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1266 struct module *owner = NULL;
1267 struct xt_template *tmpl;
1268 struct xt_table *t;
1269
1270 mutex_lock(&xt[af].mutex);
1271 list_for_each_entry(t, &xt_net->tables[af], list)
1272 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1273 return t;
1274
1275 /* Table doesn't exist in this netns, check larval list */
1276 list_for_each_entry(tmpl, &xt_templates[af], list) {
1277 int err;
1278
1279 if (strcmp(tmpl->name, name))
1280 continue;
1281 if (!try_module_get(tmpl->me))
1282 goto out;
1283
1284 owner = tmpl->me;
1285
1286 mutex_unlock(&xt[af].mutex);
1287 err = tmpl->table_init(net);
1288 if (err < 0) {
1289 module_put(owner);
1290 return ERR_PTR(err);
1291 }
1292
1293 mutex_lock(&xt[af].mutex);
1294 break;
1295 }
1296
1297 /* and once again: */
1298 list_for_each_entry(t, &xt_net->tables[af], list)
1299 if (strcmp(t->name, name) == 0 && owner == t->me)
1300 return t;
1301
1302 module_put(owner);
1303 out:
1304 mutex_unlock(&xt[af].mutex);
1305 return ERR_PTR(-ENOENT);
1306}
1307EXPORT_SYMBOL_GPL(xt_find_table_lock);
1308
1309struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1310 const char *name)
1311{
1312 struct xt_table *t = xt_find_table_lock(net, af, name);
1313
1314#ifdef CONFIG_MODULES
1315 if (IS_ERR(t)) {
1316 int err = request_module("%stable_%s", xt_prefix[af], name);
1317 if (err < 0)
1318 return ERR_PTR(err);
1319 t = xt_find_table_lock(net, af, name);
1320 }
1321#endif
1322
1323 return t;
1324}
1325EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1326
1327void xt_table_unlock(struct xt_table *table)
1328{
1329 mutex_unlock(&xt[table->af].mutex);
1330}
1331EXPORT_SYMBOL_GPL(xt_table_unlock);
1332
1333#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1334void xt_compat_lock(u_int8_t af)
1335{
1336 mutex_lock(&xt[af].compat_mutex);
1337}
1338EXPORT_SYMBOL_GPL(xt_compat_lock);
1339
1340void xt_compat_unlock(u_int8_t af)
1341{
1342 mutex_unlock(&xt[af].compat_mutex);
1343}
1344EXPORT_SYMBOL_GPL(xt_compat_unlock);
1345#endif
1346
1347struct static_key xt_tee_enabled __read_mostly;
1348EXPORT_SYMBOL_GPL(xt_tee_enabled);
1349
1350#ifdef CONFIG_NETFILTER_XTABLES_LEGACY
1351DEFINE_PER_CPU(seqcount_t, xt_recseq);
1352EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1353
1354static int xt_jumpstack_alloc(struct xt_table_info *i)
1355{
1356 unsigned int size;
1357 int cpu;
1358
1359 size = sizeof(void **) * nr_cpu_ids;
1360 if (size > PAGE_SIZE)
1361 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1362 else
1363 i->jumpstack = kzalloc(size, GFP_KERNEL);
1364 if (i->jumpstack == NULL)
1365 return -ENOMEM;
1366
1367 /* ruleset without jumps -- no stack needed */
1368 if (i->stacksize == 0)
1369 return 0;
1370
1371 /* Jumpstack needs to be able to record two full callchains, one
1372 * from the first rule set traversal, plus one table reentrancy
1373 * via -j TEE without clobbering the callchain that brought us to
1374 * TEE target.
1375 *
1376 * This is done by allocating two jumpstacks per cpu, on reentry
1377 * the upper half of the stack is used.
1378 *
1379 * see the jumpstack setup in ipt_do_table() for more details.
1380 */
1381 size = sizeof(void *) * i->stacksize * 2u;
1382 for_each_possible_cpu(cpu) {
1383 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1384 cpu_to_node(cpu));
1385 if (i->jumpstack[cpu] == NULL)
1386 /*
1387 * Freeing will be done later on by the callers. The
1388 * chain is: xt_replace_table -> __do_replace ->
1389 * do_replace -> xt_free_table_info.
1390 */
1391 return -ENOMEM;
1392 }
1393
1394 return 0;
1395}
1396
1397struct xt_counters *xt_counters_alloc(unsigned int counters)
1398{
1399 struct xt_counters *mem;
1400
1401 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1402 return NULL;
1403
1404 counters *= sizeof(*mem);
1405 if (counters > XT_MAX_TABLE_SIZE)
1406 return NULL;
1407
1408 return vzalloc(counters);
1409}
1410EXPORT_SYMBOL(xt_counters_alloc);
1411
1412struct xt_table_info *
1413xt_replace_table(struct xt_table *table,
1414 unsigned int num_counters,
1415 struct xt_table_info *newinfo,
1416 int *error)
1417{
1418 struct xt_table_info *private;
1419 unsigned int cpu;
1420 int ret;
1421
1422 ret = xt_jumpstack_alloc(newinfo);
1423 if (ret < 0) {
1424 *error = ret;
1425 return NULL;
1426 }
1427
1428 /* Do the substitution. */
1429 local_bh_disable();
1430 private = table->private;
1431
1432 /* Check inside lock: is the old number correct? */
1433 if (num_counters != private->number) {
1434 pr_debug("num_counters != table->private->number (%u/%u)\n",
1435 num_counters, private->number);
1436 local_bh_enable();
1437 *error = -EAGAIN;
1438 return NULL;
1439 }
1440
1441 newinfo->initial_entries = private->initial_entries;
1442 /*
1443 * Ensure contents of newinfo are visible before assigning to
1444 * private.
1445 */
1446 smp_wmb();
1447 table->private = newinfo;
1448
1449 /* make sure all cpus see new ->private value */
1450 smp_mb();
1451
1452 /*
1453 * Even though table entries have now been swapped, other CPU's
1454 * may still be using the old entries...
1455 */
1456 local_bh_enable();
1457
1458 /* ... so wait for even xt_recseq on all cpus */
1459 for_each_possible_cpu(cpu) {
1460 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1461 u32 seq = raw_read_seqcount(s);
1462
1463 if (seq & 1) {
1464 do {
1465 cond_resched();
1466 cpu_relax();
1467 } while (seq == raw_read_seqcount(s));
1468 }
1469 }
1470
1471 audit_log_nfcfg(table->name, table->af, private->number,
1472 !private->number ? AUDIT_XT_OP_REGISTER :
1473 AUDIT_XT_OP_REPLACE,
1474 GFP_KERNEL);
1475 return private;
1476}
1477EXPORT_SYMBOL_GPL(xt_replace_table);
1478
1479struct xt_table *xt_register_table(struct net *net,
1480 const struct xt_table *input_table,
1481 struct xt_table_info *bootstrap,
1482 struct xt_table_info *newinfo)
1483{
1484 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1485 struct xt_table_info *private;
1486 struct xt_table *t, *table;
1487 int ret;
1488
1489 /* Don't add one object to multiple lists. */
1490 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1491 if (!table) {
1492 ret = -ENOMEM;
1493 goto out;
1494 }
1495
1496 mutex_lock(&xt[table->af].mutex);
1497 /* Don't autoload: we'd eat our tail... */
1498 list_for_each_entry(t, &xt_net->tables[table->af], list) {
1499 if (strcmp(t->name, table->name) == 0) {
1500 ret = -EEXIST;
1501 goto unlock;
1502 }
1503 }
1504
1505 /* Simplifies replace_table code. */
1506 table->private = bootstrap;
1507
1508 if (!xt_replace_table(table, 0, newinfo, &ret))
1509 goto unlock;
1510
1511 private = table->private;
1512 pr_debug("table->private->number = %u\n", private->number);
1513
1514 /* save number of initial entries */
1515 private->initial_entries = private->number;
1516
1517 list_add(&table->list, &xt_net->tables[table->af]);
1518 mutex_unlock(&xt[table->af].mutex);
1519 return table;
1520
1521unlock:
1522 mutex_unlock(&xt[table->af].mutex);
1523 kfree(table);
1524out:
1525 return ERR_PTR(ret);
1526}
1527EXPORT_SYMBOL_GPL(xt_register_table);
1528
1529void *xt_unregister_table(struct xt_table *table)
1530{
1531 struct xt_table_info *private;
1532
1533 mutex_lock(&xt[table->af].mutex);
1534 private = table->private;
1535 list_del(&table->list);
1536 mutex_unlock(&xt[table->af].mutex);
1537 audit_log_nfcfg(table->name, table->af, private->number,
1538 AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1539 kfree(table->ops);
1540 kfree(table);
1541
1542 return private;
1543}
1544EXPORT_SYMBOL_GPL(xt_unregister_table);
1545#endif
1546
1547#ifdef CONFIG_PROC_FS
1548static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1549{
1550 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1551 struct net *net = seq_file_net(seq);
1552 struct xt_pernet *xt_net;
1553
1554 xt_net = net_generic(net, xt_pernet_id);
1555
1556 mutex_lock(&xt[af].mutex);
1557 return seq_list_start(&xt_net->tables[af], *pos);
1558}
1559
1560static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1561{
1562 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1563 struct net *net = seq_file_net(seq);
1564 struct xt_pernet *xt_net;
1565
1566 xt_net = net_generic(net, xt_pernet_id);
1567
1568 return seq_list_next(v, &xt_net->tables[af], pos);
1569}
1570
1571static void xt_table_seq_stop(struct seq_file *seq, void *v)
1572{
1573 u_int8_t af = (unsigned long)pde_data(file_inode(seq->file));
1574
1575 mutex_unlock(&xt[af].mutex);
1576}
1577
1578static int xt_table_seq_show(struct seq_file *seq, void *v)
1579{
1580 struct xt_table *table = list_entry(v, struct xt_table, list);
1581
1582 if (*table->name)
1583 seq_printf(seq, "%s\n", table->name);
1584 return 0;
1585}
1586
1587static const struct seq_operations xt_table_seq_ops = {
1588 .start = xt_table_seq_start,
1589 .next = xt_table_seq_next,
1590 .stop = xt_table_seq_stop,
1591 .show = xt_table_seq_show,
1592};
1593
1594/*
1595 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1596 * the multi-AF mutexes.
1597 */
1598struct nf_mttg_trav {
1599 struct list_head *head, *curr;
1600 uint8_t class;
1601};
1602
1603enum {
1604 MTTG_TRAV_INIT,
1605 MTTG_TRAV_NFP_UNSPEC,
1606 MTTG_TRAV_NFP_SPEC,
1607 MTTG_TRAV_DONE,
1608};
1609
1610static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1611 bool is_target)
1612{
1613 static const uint8_t next_class[] = {
1614 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1615 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1616 };
1617 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1618 struct nf_mttg_trav *trav = seq->private;
1619
1620 if (ppos != NULL)
1621 ++(*ppos);
1622
1623 switch (trav->class) {
1624 case MTTG_TRAV_INIT:
1625 trav->class = MTTG_TRAV_NFP_UNSPEC;
1626 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1627 trav->head = trav->curr = is_target ?
1628 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1629 break;
1630 case MTTG_TRAV_NFP_UNSPEC:
1631 trav->curr = trav->curr->next;
1632 if (trav->curr != trav->head)
1633 break;
1634 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1635 mutex_lock(&xt[nfproto].mutex);
1636 trav->head = trav->curr = is_target ?
1637 &xt[nfproto].target : &xt[nfproto].match;
1638 trav->class = next_class[trav->class];
1639 break;
1640 case MTTG_TRAV_NFP_SPEC:
1641 trav->curr = trav->curr->next;
1642 if (trav->curr != trav->head)
1643 break;
1644 fallthrough;
1645 default:
1646 return NULL;
1647 }
1648 return trav;
1649}
1650
1651static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1652 bool is_target)
1653{
1654 struct nf_mttg_trav *trav = seq->private;
1655 unsigned int j;
1656
1657 trav->class = MTTG_TRAV_INIT;
1658 for (j = 0; j < *pos; ++j)
1659 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1660 return NULL;
1661 return trav;
1662}
1663
1664static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1665{
1666 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1667 struct nf_mttg_trav *trav = seq->private;
1668
1669 switch (trav->class) {
1670 case MTTG_TRAV_NFP_UNSPEC:
1671 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1672 break;
1673 case MTTG_TRAV_NFP_SPEC:
1674 mutex_unlock(&xt[nfproto].mutex);
1675 break;
1676 }
1677}
1678
1679static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1680{
1681 return xt_mttg_seq_start(seq, pos, false);
1682}
1683
1684static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1685{
1686 return xt_mttg_seq_next(seq, v, ppos, false);
1687}
1688
1689static int xt_match_seq_show(struct seq_file *seq, void *v)
1690{
1691 const struct nf_mttg_trav *trav = seq->private;
1692 const struct xt_match *match;
1693
1694 switch (trav->class) {
1695 case MTTG_TRAV_NFP_UNSPEC:
1696 case MTTG_TRAV_NFP_SPEC:
1697 if (trav->curr == trav->head)
1698 return 0;
1699 match = list_entry(trav->curr, struct xt_match, list);
1700 if (*match->name)
1701 seq_printf(seq, "%s\n", match->name);
1702 }
1703 return 0;
1704}
1705
1706static const struct seq_operations xt_match_seq_ops = {
1707 .start = xt_match_seq_start,
1708 .next = xt_match_seq_next,
1709 .stop = xt_mttg_seq_stop,
1710 .show = xt_match_seq_show,
1711};
1712
1713static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1714{
1715 return xt_mttg_seq_start(seq, pos, true);
1716}
1717
1718static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1719{
1720 return xt_mttg_seq_next(seq, v, ppos, true);
1721}
1722
1723static int xt_target_seq_show(struct seq_file *seq, void *v)
1724{
1725 const struct nf_mttg_trav *trav = seq->private;
1726 const struct xt_target *target;
1727
1728 switch (trav->class) {
1729 case MTTG_TRAV_NFP_UNSPEC:
1730 case MTTG_TRAV_NFP_SPEC:
1731 if (trav->curr == trav->head)
1732 return 0;
1733 target = list_entry(trav->curr, struct xt_target, list);
1734 if (*target->name)
1735 seq_printf(seq, "%s\n", target->name);
1736 }
1737 return 0;
1738}
1739
1740static const struct seq_operations xt_target_seq_ops = {
1741 .start = xt_target_seq_start,
1742 .next = xt_target_seq_next,
1743 .stop = xt_mttg_seq_stop,
1744 .show = xt_target_seq_show,
1745};
1746
1747#define FORMAT_TABLES "_tables_names"
1748#define FORMAT_MATCHES "_tables_matches"
1749#define FORMAT_TARGETS "_tables_targets"
1750
1751#endif /* CONFIG_PROC_FS */
1752
1753/**
1754 * xt_hook_ops_alloc - set up hooks for a new table
1755 * @table: table with metadata needed to set up hooks
1756 * @fn: Hook function
1757 *
1758 * This function will create the nf_hook_ops that the x_table needs
1759 * to hand to xt_hook_link_net().
1760 */
1761struct nf_hook_ops *
1762xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1763{
1764 unsigned int hook_mask = table->valid_hooks;
1765 uint8_t i, num_hooks = hweight32(hook_mask);
1766 uint8_t hooknum;
1767 struct nf_hook_ops *ops;
1768
1769 if (!num_hooks)
1770 return ERR_PTR(-EINVAL);
1771
1772 ops = kzalloc_objs(*ops, num_hooks);
1773 if (ops == NULL)
1774 return ERR_PTR(-ENOMEM);
1775
1776 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1777 hook_mask >>= 1, ++hooknum) {
1778 if (!(hook_mask & 1))
1779 continue;
1780 ops[i].hook = fn;
1781 ops[i].pf = table->af;
1782 ops[i].hooknum = hooknum;
1783 ops[i].priority = table->priority;
1784 ++i;
1785 }
1786
1787 return ops;
1788}
1789EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1790
1791int xt_register_template(const struct xt_table *table,
1792 int (*table_init)(struct net *net))
1793{
1794 int ret = -EBUSY, af = table->af;
1795 struct xt_template *t;
1796
1797 mutex_lock(&xt[af].mutex);
1798
1799 list_for_each_entry(t, &xt_templates[af], list) {
1800 if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0))
1801 goto out_unlock;
1802 }
1803
1804 ret = -ENOMEM;
1805 t = kzalloc_obj(*t);
1806 if (!t)
1807 goto out_unlock;
1808
1809 BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name));
1810
1811 strscpy(t->name, table->name, sizeof(t->name));
1812 t->table_init = table_init;
1813 t->me = table->me;
1814 list_add(&t->list, &xt_templates[af]);
1815 ret = 0;
1816out_unlock:
1817 mutex_unlock(&xt[af].mutex);
1818 return ret;
1819}
1820EXPORT_SYMBOL_GPL(xt_register_template);
1821
1822void xt_unregister_template(const struct xt_table *table)
1823{
1824 struct xt_template *t;
1825 int af = table->af;
1826
1827 mutex_lock(&xt[af].mutex);
1828 list_for_each_entry(t, &xt_templates[af], list) {
1829 if (strcmp(table->name, t->name))
1830 continue;
1831
1832 list_del(&t->list);
1833 mutex_unlock(&xt[af].mutex);
1834 kfree(t);
1835 return;
1836 }
1837
1838 mutex_unlock(&xt[af].mutex);
1839 WARN_ON_ONCE(1);
1840}
1841EXPORT_SYMBOL_GPL(xt_unregister_template);
1842
1843int xt_proto_init(struct net *net, u_int8_t af)
1844{
1845#ifdef CONFIG_PROC_FS
1846 char buf[XT_FUNCTION_MAXNAMELEN];
1847 struct proc_dir_entry *proc;
1848 kuid_t root_uid;
1849 kgid_t root_gid;
1850#endif
1851
1852 if (af >= ARRAY_SIZE(xt_prefix))
1853 return -EINVAL;
1854
1855
1856#ifdef CONFIG_PROC_FS
1857 root_uid = make_kuid(net->user_ns, 0);
1858 root_gid = make_kgid(net->user_ns, 0);
1859
1860 strscpy(buf, xt_prefix[af], sizeof(buf));
1861 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1862 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1863 sizeof(struct seq_net_private),
1864 (void *)(unsigned long)af);
1865 if (!proc)
1866 goto out;
1867 if (uid_valid(root_uid) && gid_valid(root_gid))
1868 proc_set_user(proc, root_uid, root_gid);
1869
1870 strscpy(buf, xt_prefix[af], sizeof(buf));
1871 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1872 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1873 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1874 (void *)(unsigned long)af);
1875 if (!proc)
1876 goto out_remove_tables;
1877 if (uid_valid(root_uid) && gid_valid(root_gid))
1878 proc_set_user(proc, root_uid, root_gid);
1879
1880 strscpy(buf, xt_prefix[af], sizeof(buf));
1881 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1882 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1883 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1884 (void *)(unsigned long)af);
1885 if (!proc)
1886 goto out_remove_matches;
1887 if (uid_valid(root_uid) && gid_valid(root_gid))
1888 proc_set_user(proc, root_uid, root_gid);
1889#endif
1890
1891 return 0;
1892
1893#ifdef CONFIG_PROC_FS
1894out_remove_matches:
1895 strscpy(buf, xt_prefix[af], sizeof(buf));
1896 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1897 remove_proc_entry(buf, net->proc_net);
1898
1899out_remove_tables:
1900 strscpy(buf, xt_prefix[af], sizeof(buf));
1901 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1902 remove_proc_entry(buf, net->proc_net);
1903out:
1904 return -1;
1905#endif
1906}
1907EXPORT_SYMBOL_GPL(xt_proto_init);
1908
1909void xt_proto_fini(struct net *net, u_int8_t af)
1910{
1911#ifdef CONFIG_PROC_FS
1912 char buf[XT_FUNCTION_MAXNAMELEN];
1913
1914 strscpy(buf, xt_prefix[af], sizeof(buf));
1915 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1916 remove_proc_entry(buf, net->proc_net);
1917
1918 strscpy(buf, xt_prefix[af], sizeof(buf));
1919 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1920 remove_proc_entry(buf, net->proc_net);
1921
1922 strscpy(buf, xt_prefix[af], sizeof(buf));
1923 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1924 remove_proc_entry(buf, net->proc_net);
1925#endif /*CONFIG_PROC_FS*/
1926}
1927EXPORT_SYMBOL_GPL(xt_proto_fini);
1928
1929#ifdef CONFIG_NETFILTER_XTABLES_LEGACY
1930/**
1931 * xt_percpu_counter_alloc - allocate x_tables rule counter
1932 *
1933 * @state: pointer to xt_percpu allocation state
1934 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1935 *
1936 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1937 * contain the address of the real (percpu) counter.
1938 *
1939 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1940 * to fetch the real percpu counter.
1941 *
1942 * To speed up allocation and improve data locality, a 4kb block is
1943 * allocated. Freeing any counter may free an entire block, so all
1944 * counters allocated using the same state must be freed at the same
1945 * time.
1946 *
1947 * xt_percpu_counter_alloc_state contains the base address of the
1948 * allocated page and the current sub-offset.
1949 *
1950 * returns false on error.
1951 */
1952bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1953 struct xt_counters *counter)
1954{
1955 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1956
1957 if (nr_cpu_ids <= 1)
1958 return true;
1959
1960 if (!state->mem) {
1961 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1962 XT_PCPU_BLOCK_SIZE);
1963 if (!state->mem)
1964 return false;
1965 }
1966 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1967 state->off += sizeof(*counter);
1968 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1969 state->mem = NULL;
1970 state->off = 0;
1971 }
1972 return true;
1973}
1974EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1975
1976void xt_percpu_counter_free(struct xt_counters *counters)
1977{
1978 unsigned long pcnt = counters->pcnt;
1979
1980 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1981 free_percpu((void __percpu *)pcnt);
1982}
1983EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1984#endif
1985
1986static int __net_init xt_net_init(struct net *net)
1987{
1988 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1989 int i;
1990
1991 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1992 INIT_LIST_HEAD(&xt_net->tables[i]);
1993 return 0;
1994}
1995
1996static void __net_exit xt_net_exit(struct net *net)
1997{
1998 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1999 int i;
2000
2001 for (i = 0; i < NFPROTO_NUMPROTO; i++)
2002 WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
2003}
2004
2005static struct pernet_operations xt_net_ops = {
2006 .init = xt_net_init,
2007 .exit = xt_net_exit,
2008 .id = &xt_pernet_id,
2009 .size = sizeof(struct xt_pernet),
2010};
2011
2012static int __init xt_init(void)
2013{
2014 unsigned int i;
2015 int rv;
2016
2017 if (IS_ENABLED(CONFIG_NETFILTER_XTABLES_LEGACY)) {
2018 for_each_possible_cpu(i) {
2019 seqcount_init(&per_cpu(xt_recseq, i));
2020 }
2021 }
2022
2023 xt = kzalloc_objs(struct xt_af, NFPROTO_NUMPROTO);
2024 if (!xt)
2025 return -ENOMEM;
2026
2027 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
2028 mutex_init(&xt[i].mutex);
2029#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
2030 mutex_init(&xt[i].compat_mutex);
2031 xt[i].compat_tab = NULL;
2032#endif
2033 INIT_LIST_HEAD(&xt[i].target);
2034 INIT_LIST_HEAD(&xt[i].match);
2035 INIT_LIST_HEAD(&xt_templates[i]);
2036 }
2037 rv = register_pernet_subsys(&xt_net_ops);
2038 if (rv < 0)
2039 kfree(xt);
2040 return rv;
2041}
2042
2043static void __exit xt_fini(void)
2044{
2045 unregister_pernet_subsys(&xt_net_ops);
2046 kfree(xt);
2047}
2048
2049module_init(xt_init);
2050module_exit(xt_fini);