Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/netlink.h>
12#include <linux/netfilter.h>
13#include <linux/if_arp.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables_core.h>
16#include <net/netfilter/nf_tables_offload.h>
17#include <net/netfilter/nf_tables.h>
18
19struct nft_cmp_expr {
20 struct nft_data data;
21 u8 sreg;
22 u8 len;
23 enum nft_cmp_ops op:8;
24};
25
26void nft_cmp_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
29{
30 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
31 int d;
32
33 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
34 switch (priv->op) {
35 case NFT_CMP_EQ:
36 if (d != 0)
37 goto mismatch;
38 break;
39 case NFT_CMP_NEQ:
40 if (d == 0)
41 goto mismatch;
42 break;
43 case NFT_CMP_LT:
44 if (d == 0)
45 goto mismatch;
46 fallthrough;
47 case NFT_CMP_LTE:
48 if (d > 0)
49 goto mismatch;
50 break;
51 case NFT_CMP_GT:
52 if (d == 0)
53 goto mismatch;
54 fallthrough;
55 case NFT_CMP_GTE:
56 if (d < 0)
57 goto mismatch;
58 break;
59 }
60 return;
61
62mismatch:
63 regs->verdict.code = NFT_BREAK;
64}
65
66static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 [NFTA_CMP_SREG] = NLA_POLICY_MAX(NLA_BE32, NFT_REG32_MAX),
68 [NFTA_CMP_OP] = { .type = NLA_U32 },
69 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
70};
71
72static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 const struct nlattr * const tb[])
74{
75 struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 struct nft_data_desc desc = {
77 .type = NFT_DATA_VALUE,
78 .size = sizeof(priv->data),
79 };
80 int err;
81
82 err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
83 if (err < 0)
84 return err;
85
86 err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
87 if (err < 0)
88 return err;
89
90 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
91 priv->len = desc.len;
92 return 0;
93}
94
95static int nft_cmp_dump(struct sk_buff *skb,
96 const struct nft_expr *expr, bool reset)
97{
98 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
99
100 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
101 goto nla_put_failure;
102 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
103 goto nla_put_failure;
104
105 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
106 NFT_DATA_VALUE, priv->len) < 0)
107 goto nla_put_failure;
108 return 0;
109
110nla_put_failure:
111 return -1;
112}
113
114union nft_cmp_offload_data {
115 u16 val16;
116 u32 val32;
117 u64 val64;
118};
119
120static void nft_payload_n2h(union nft_cmp_offload_data *data,
121 const u8 *val, u32 len)
122{
123 switch (len) {
124 case 2:
125 data->val16 = ntohs(*((__be16 *)val));
126 break;
127 case 4:
128 data->val32 = ntohl(*((__be32 *)val));
129 break;
130 case 8:
131 data->val64 = be64_to_cpu(*((__be64 *)val));
132 break;
133 default:
134 WARN_ON_ONCE(1);
135 break;
136 }
137}
138
139static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
140 struct nft_flow_rule *flow,
141 const struct nft_cmp_expr *priv)
142{
143 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
144 union nft_cmp_offload_data _data, _datamask;
145 u8 *mask = (u8 *)&flow->match.mask;
146 u8 *key = (u8 *)&flow->match.key;
147 u8 *data, *datamask;
148
149 if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
150 return -EOPNOTSUPP;
151
152 if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
153 nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
154 nft_payload_n2h(&_datamask, (u8 *)®->mask, reg->len);
155 data = (u8 *)&_data;
156 datamask = (u8 *)&_datamask;
157 } else {
158 data = (u8 *)&priv->data;
159 datamask = (u8 *)®->mask;
160 }
161
162 memcpy(key + reg->offset, data, reg->len);
163 memcpy(mask + reg->offset, datamask, reg->len);
164
165 flow->match.dissector.used_keys |= BIT_ULL(reg->key);
166 flow->match.dissector.offset[reg->key] = reg->base_offset;
167
168 if (reg->key == FLOW_DISSECTOR_KEY_META &&
169 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
170 nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
171 return -EOPNOTSUPP;
172
173 nft_offload_update_dependency(ctx, &priv->data, reg->len);
174
175 return 0;
176}
177
178static int nft_cmp_offload(struct nft_offload_ctx *ctx,
179 struct nft_flow_rule *flow,
180 const struct nft_expr *expr)
181{
182 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
183
184 return __nft_cmp_offload(ctx, flow, priv);
185}
186
187static const struct nft_expr_ops nft_cmp_ops = {
188 .type = &nft_cmp_type,
189 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
190 .eval = nft_cmp_eval,
191 .init = nft_cmp_init,
192 .dump = nft_cmp_dump,
193 .offload = nft_cmp_offload,
194};
195
196/* Calculate the mask for the nft_cmp_fast expression. On big endian the
197 * mask needs to include the *upper* bytes when interpreting that data as
198 * something smaller than the full u32, therefore a cpu_to_le32 is done.
199 */
200static u32 nft_cmp_fast_mask(unsigned int len)
201{
202 __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
203 data) * BITS_PER_BYTE - len));
204
205 return (__force u32)mask;
206}
207
208static int nft_cmp_fast_init(const struct nft_ctx *ctx,
209 const struct nft_expr *expr,
210 const struct nlattr * const tb[])
211{
212 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
213 struct nft_data data;
214 struct nft_data_desc desc = {
215 .type = NFT_DATA_VALUE,
216 .size = sizeof(data),
217 };
218 int err;
219
220 err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
221 if (err < 0)
222 return err;
223
224 err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
225 if (err < 0)
226 return err;
227
228 desc.len *= BITS_PER_BYTE;
229
230 priv->mask = nft_cmp_fast_mask(desc.len);
231 priv->data = data.data[0] & priv->mask;
232 priv->len = desc.len;
233 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
234 return 0;
235}
236
237static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
238 struct nft_flow_rule *flow,
239 const struct nft_expr *expr)
240{
241 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
242 struct nft_cmp_expr cmp = {
243 .data = {
244 .data = {
245 [0] = priv->data,
246 },
247 },
248 .sreg = priv->sreg,
249 .len = priv->len / BITS_PER_BYTE,
250 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
251 };
252
253 return __nft_cmp_offload(ctx, flow, &cmp);
254}
255
256static int nft_cmp_fast_dump(struct sk_buff *skb,
257 const struct nft_expr *expr, bool reset)
258{
259 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
260 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
261 struct nft_data data;
262
263 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
264 goto nla_put_failure;
265 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
266 goto nla_put_failure;
267
268 data.data[0] = priv->data;
269 if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
270 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
271 goto nla_put_failure;
272 return 0;
273
274nla_put_failure:
275 return -1;
276}
277
278const struct nft_expr_ops nft_cmp_fast_ops = {
279 .type = &nft_cmp_type,
280 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
281 .eval = NULL, /* inlined */
282 .init = nft_cmp_fast_init,
283 .dump = nft_cmp_fast_dump,
284 .offload = nft_cmp_fast_offload,
285};
286
287static u32 nft_cmp_mask(u32 bitlen)
288{
289 return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
290}
291
292static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
293{
294 int len = bitlen / BITS_PER_BYTE;
295 int i, words = len / sizeof(u32);
296
297 for (i = 0; i < words; i++) {
298 data->data[i] = 0xffffffff;
299 bitlen -= sizeof(u32) * BITS_PER_BYTE;
300 }
301
302 if (len % sizeof(u32))
303 data->data[i++] = nft_cmp_mask(bitlen);
304
305 for (; i < 4; i++)
306 data->data[i] = 0;
307}
308
309static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
310 const struct nft_expr *expr,
311 const struct nlattr * const tb[])
312{
313 struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
314 struct nft_data_desc desc = {
315 .type = NFT_DATA_VALUE,
316 .size = sizeof(priv->data),
317 };
318 int err;
319
320 err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
321 if (err < 0)
322 return err;
323
324 err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
325 if (err < 0)
326 return err;
327
328 nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
329 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
330 priv->len = desc.len;
331
332 return 0;
333}
334
335static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
336 struct nft_flow_rule *flow,
337 const struct nft_expr *expr)
338{
339 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
340 struct nft_cmp_expr cmp = {
341 .data = priv->data,
342 .sreg = priv->sreg,
343 .len = priv->len,
344 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
345 };
346
347 return __nft_cmp_offload(ctx, flow, &cmp);
348}
349
350static int nft_cmp16_fast_dump(struct sk_buff *skb,
351 const struct nft_expr *expr, bool reset)
352{
353 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
354 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
355
356 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
357 goto nla_put_failure;
358 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
359 goto nla_put_failure;
360
361 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
362 NFT_DATA_VALUE, priv->len) < 0)
363 goto nla_put_failure;
364 return 0;
365
366nla_put_failure:
367 return -1;
368}
369
370
371const struct nft_expr_ops nft_cmp16_fast_ops = {
372 .type = &nft_cmp_type,
373 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
374 .eval = NULL, /* inlined */
375 .init = nft_cmp16_fast_init,
376 .dump = nft_cmp16_fast_dump,
377 .offload = nft_cmp16_fast_offload,
378};
379
380static const struct nft_expr_ops *
381nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
382{
383 struct nft_data data;
384 struct nft_data_desc desc = {
385 .type = NFT_DATA_VALUE,
386 .size = sizeof(data),
387 };
388 enum nft_cmp_ops op;
389 u8 sreg;
390 int err;
391
392 if (tb[NFTA_CMP_SREG] == NULL ||
393 tb[NFTA_CMP_OP] == NULL ||
394 tb[NFTA_CMP_DATA] == NULL)
395 return ERR_PTR(-EINVAL);
396
397 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
398 switch (op) {
399 case NFT_CMP_EQ:
400 case NFT_CMP_NEQ:
401 case NFT_CMP_LT:
402 case NFT_CMP_LTE:
403 case NFT_CMP_GT:
404 case NFT_CMP_GTE:
405 break;
406 default:
407 return ERR_PTR(-EINVAL);
408 }
409
410 err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
411 if (err < 0)
412 return ERR_PTR(err);
413
414 sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
415
416 if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
417 if (desc.len <= sizeof(u32))
418 return &nft_cmp_fast_ops;
419 else if (desc.len <= sizeof(data) &&
420 ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
421 (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
422 return &nft_cmp16_fast_ops;
423 }
424 return &nft_cmp_ops;
425}
426
427struct nft_expr_type nft_cmp_type __read_mostly = {
428 .name = "cmp",
429 .select_ops = nft_cmp_select_ops,
430 .policy = nft_cmp_policy,
431 .maxattr = NFTA_CMP_MAX,
432 .owner = THIS_MODULE,
433};