Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at d986ba0329dcca102e227995371135c9bbcefb6b 640 lines 16 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4#ifndef _LINUX_SKMSG_H 5#define _LINUX_SKMSG_H 6 7#include <linux/bpf.h> 8#include <linux/filter.h> 9#include <linux/scatterlist.h> 10#include <linux/skbuff.h> 11 12#include <net/sock.h> 13#include <net/tcp.h> 14#include <net/strparser.h> 15 16#define MAX_MSG_FRAGS MAX_SKB_FRAGS 17#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) 18 19enum __sk_action { 20 __SK_DROP = 0, 21 __SK_PASS, 22 __SK_REDIRECT, 23 __SK_NONE, 24}; 25 26struct sk_msg_sg { 27 u32 start; 28 u32 curr; 29 u32 end; 30 u32 size; 31 u32 copybreak; 32 DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); 33 /* The extra two elements: 34 * 1) used for chaining the front and sections when the list becomes 35 * partitioned (e.g. end < start). The crypto APIs require the 36 * chaining; 37 * 2) to chain tailer SG entries after the message. 38 */ 39 struct scatterlist data[MAX_MSG_FRAGS + 2]; 40}; 41 42/* UAPI in filter.c depends on struct sk_msg_sg being first element. */ 43struct sk_msg { 44 struct sk_msg_sg sg; 45 void *data; 46 void *data_end; 47 u32 apply_bytes; 48 u32 cork_bytes; 49 u32 flags; 50 struct sk_buff *skb; 51 struct sock *sk_redir; 52 struct sock *sk; 53 struct list_head list; 54}; 55 56struct sk_psock_progs { 57 struct bpf_prog *msg_parser; 58 struct bpf_prog *stream_parser; 59 struct bpf_prog *stream_verdict; 60 struct bpf_prog *skb_verdict; 61 struct bpf_link *msg_parser_link; 62 struct bpf_link *stream_parser_link; 63 struct bpf_link *stream_verdict_link; 64 struct bpf_link *skb_verdict_link; 65}; 66 67enum sk_psock_state_bits { 68 SK_PSOCK_TX_ENABLED, 69 SK_PSOCK_RX_STRP_ENABLED, 70}; 71 72struct sk_psock_link { 73 struct list_head list; 74 struct bpf_map *map; 75 void *link_raw; 76}; 77 78struct sk_psock_work_state { 79 u32 len; 80 u32 off; 81}; 82 83struct sk_psock { 84 struct sock *sk; 85 struct sock *sk_redir; 86 u32 apply_bytes; 87 u32 cork_bytes; 88 u32 eval; 89 bool redir_ingress; /* undefined if sk_redir is null */ 90 struct sk_msg *cork; 91 struct sk_psock_progs progs; 92#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 93 struct strparser strp; 94 u32 copied_seq; 95 u32 ingress_bytes; 96#endif 97 struct sk_buff_head ingress_skb; 98 struct list_head ingress_msg; 99 spinlock_t ingress_lock; 100 /** @msg_tot_len: Total bytes queued in ingress_msg list. */ 101 u32 msg_tot_len; 102 unsigned long state; 103 struct list_head link; 104 spinlock_t link_lock; 105 refcount_t refcnt; 106 void (*saved_unhash)(struct sock *sk); 107 void (*saved_destroy)(struct sock *sk); 108 void (*saved_close)(struct sock *sk, long timeout); 109 void (*saved_write_space)(struct sock *sk); 110 void (*saved_data_ready)(struct sock *sk); 111 /* psock_update_sk_prot may be called with restore=false many times 112 * so the handler must be safe for this case. It will be called 113 * exactly once with restore=true when the psock is being destroyed 114 * and psock refcnt is zero, but before an RCU grace period. 115 */ 116 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, 117 bool restore); 118 struct proto *sk_proto; 119 struct mutex work_mutex; 120 struct sk_psock_work_state work_state; 121 struct delayed_work work; 122 struct sock *sk_pair; 123 struct rcu_work rwork; 124}; 125 126int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 127 int elem_first_coalesce); 128int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 129 u32 off, u32 len); 130void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); 131int sk_msg_free(struct sock *sk, struct sk_msg *msg); 132int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); 133void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); 134void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 135 u32 bytes); 136 137void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); 138void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); 139 140int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 141 struct sk_msg *msg, u32 bytes); 142int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 143 struct sk_msg *msg, u32 bytes); 144int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 145 int len, int flags); 146int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 147 int len, int flags, int *copied_from_self); 148bool sk_msg_is_readable(struct sock *sk); 149 150static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 151{ 152 WARN_ON(i == msg->sg.end && bytes); 153} 154 155static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) 156{ 157 if (psock->apply_bytes) { 158 if (psock->apply_bytes < bytes) 159 psock->apply_bytes = 0; 160 else 161 psock->apply_bytes -= bytes; 162 } 163} 164 165static inline u32 sk_msg_iter_dist(u32 start, u32 end) 166{ 167 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); 168} 169 170#define sk_msg_iter_var_prev(var) \ 171 do { \ 172 if (var == 0) \ 173 var = NR_MSG_FRAG_IDS - 1; \ 174 else \ 175 var--; \ 176 } while (0) 177 178#define sk_msg_iter_var_next(var) \ 179 do { \ 180 var++; \ 181 if (var == NR_MSG_FRAG_IDS) \ 182 var = 0; \ 183 } while (0) 184 185#define sk_msg_iter_prev(msg, which) \ 186 sk_msg_iter_var_prev(msg->sg.which) 187 188#define sk_msg_iter_next(msg, which) \ 189 sk_msg_iter_var_next(msg->sg.which) 190 191static inline void sk_msg_init(struct sk_msg *msg) 192{ 193 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); 194 memset(msg, 0, sizeof(*msg)); 195 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 196} 197 198static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, 199 int which, u32 size) 200{ 201 dst->sg.data[which] = src->sg.data[which]; 202 dst->sg.data[which].length = size; 203 dst->sg.size += size; 204 src->sg.size -= size; 205 src->sg.data[which].length -= size; 206 src->sg.data[which].offset += size; 207} 208 209static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) 210{ 211 memcpy(dst, src, sizeof(*src)); 212 sk_msg_init(src); 213} 214 215static inline bool sk_msg_full(const struct sk_msg *msg) 216{ 217 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; 218} 219 220static inline u32 sk_msg_elem_used(const struct sk_msg *msg) 221{ 222 return sk_msg_iter_dist(msg->sg.start, msg->sg.end); 223} 224 225static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 226{ 227 return &msg->sg.data[which]; 228} 229 230static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) 231{ 232 return msg->sg.data[which]; 233} 234 235static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 236{ 237 return sg_page(sk_msg_elem(msg, which)); 238} 239 240static inline bool sk_msg_to_ingress(const struct sk_msg *msg) 241{ 242 return msg->flags & BPF_F_INGRESS; 243} 244 245static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) 246{ 247 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); 248 249 if (test_bit(msg->sg.start, msg->sg.copy)) { 250 msg->data = NULL; 251 msg->data_end = NULL; 252 } else { 253 msg->data = sg_virt(sge); 254 msg->data_end = msg->data + sge->length; 255 } 256} 257 258static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, 259 u32 len, u32 offset) 260{ 261 struct scatterlist *sge; 262 263 get_page(page); 264 sge = sk_msg_elem(msg, msg->sg.end); 265 sg_set_page(sge, page, len, offset); 266 sg_unmark_end(sge); 267 268 __set_bit(msg->sg.end, msg->sg.copy); 269 msg->sg.size += len; 270 sk_msg_iter_next(msg, end); 271} 272 273static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) 274{ 275 do { 276 if (copy_state) 277 __set_bit(i, msg->sg.copy); 278 else 279 __clear_bit(i, msg->sg.copy); 280 sk_msg_iter_var_next(i); 281 if (i == msg->sg.end) 282 break; 283 } while (1); 284} 285 286static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) 287{ 288 sk_msg_sg_copy(msg, start, true); 289} 290 291static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) 292{ 293 sk_msg_sg_copy(msg, start, false); 294} 295 296static inline struct sk_psock *sk_psock(const struct sock *sk) 297{ 298 return __rcu_dereference_sk_user_data_with_flags(sk, 299 SK_USER_DATA_PSOCK); 300} 301 302static inline void sk_psock_set_state(struct sk_psock *psock, 303 enum sk_psock_state_bits bit) 304{ 305 set_bit(bit, &psock->state); 306} 307 308static inline void sk_psock_clear_state(struct sk_psock *psock, 309 enum sk_psock_state_bits bit) 310{ 311 clear_bit(bit, &psock->state); 312} 313 314static inline bool sk_psock_test_state(const struct sk_psock *psock, 315 enum sk_psock_state_bits bit) 316{ 317 return test_bit(bit, &psock->state); 318} 319 320static inline void sock_drop(struct sock *sk, struct sk_buff *skb) 321{ 322 sk_drops_skbadd(sk, skb); 323 kfree_skb(skb); 324} 325 326static inline u32 sk_psock_get_msg_len_nolock(struct sk_psock *psock) 327{ 328 /* Used by ioctl to read msg_tot_len only; lock-free for performance */ 329 return READ_ONCE(psock->msg_tot_len); 330} 331 332static inline void sk_psock_msg_len_add_locked(struct sk_psock *psock, int diff) 333{ 334 /* Use WRITE_ONCE to ensure correct read in sk_psock_get_msg_len_nolock(). 335 * ingress_lock should be held to prevent concurrent updates to msg_tot_len 336 */ 337 WRITE_ONCE(psock->msg_tot_len, psock->msg_tot_len + diff); 338} 339 340static inline void sk_psock_msg_len_add(struct sk_psock *psock, int diff) 341{ 342 spin_lock_bh(&psock->ingress_lock); 343 sk_psock_msg_len_add_locked(psock, diff); 344 spin_unlock_bh(&psock->ingress_lock); 345} 346 347static inline bool sk_psock_queue_msg(struct sk_psock *psock, 348 struct sk_msg *msg) 349{ 350 bool ret; 351 352 spin_lock_bh(&psock->ingress_lock); 353 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 354 list_add_tail(&msg->list, &psock->ingress_msg); 355 sk_psock_msg_len_add_locked(psock, msg->sg.size); 356 ret = true; 357 } else { 358 sk_msg_free(psock->sk, msg); 359 kfree(msg); 360 ret = false; 361 } 362 spin_unlock_bh(&psock->ingress_lock); 363 return ret; 364} 365 366static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) 367{ 368 struct sk_msg *msg; 369 370 spin_lock_bh(&psock->ingress_lock); 371 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 372 if (msg) { 373 list_del(&msg->list); 374 sk_psock_msg_len_add_locked(psock, -msg->sg.size); 375 } 376 spin_unlock_bh(&psock->ingress_lock); 377 return msg; 378} 379 380static inline struct sk_msg *sk_psock_peek_msg_locked(struct sk_psock *psock) 381{ 382 return list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); 383} 384 385static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) 386{ 387 struct sk_msg *msg; 388 389 spin_lock_bh(&psock->ingress_lock); 390 msg = sk_psock_peek_msg_locked(psock); 391 spin_unlock_bh(&psock->ingress_lock); 392 return msg; 393} 394 395static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, 396 struct sk_msg *msg) 397{ 398 struct sk_msg *ret; 399 400 spin_lock_bh(&psock->ingress_lock); 401 if (list_is_last(&msg->list, &psock->ingress_msg)) 402 ret = NULL; 403 else 404 ret = list_next_entry(msg, list); 405 spin_unlock_bh(&psock->ingress_lock); 406 return ret; 407} 408 409static inline bool sk_psock_queue_empty(const struct sk_psock *psock) 410{ 411 return psock ? list_empty(&psock->ingress_msg) : true; 412} 413 414static inline void kfree_sk_msg(struct sk_msg *msg) 415{ 416 if (msg->skb) 417 consume_skb(msg->skb); 418 kfree(msg); 419} 420 421static inline void sk_psock_report_error(struct sk_psock *psock, int err) 422{ 423 struct sock *sk = psock->sk; 424 425 sk->sk_err = err; 426 sk_error_report(sk); 427} 428 429struct sk_psock *sk_psock_init(struct sock *sk, int node); 430void sk_psock_stop(struct sk_psock *psock); 431 432#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 433int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); 434void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); 435void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); 436#else 437static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) 438{ 439 return -EOPNOTSUPP; 440} 441 442static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) 443{ 444} 445 446static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) 447{ 448} 449#endif 450 451void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); 452void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); 453 454int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 455 struct sk_msg *msg); 456 457/* 458 * This specialized allocator has to be a macro for its allocations to be 459 * accounted separately (to have a separate alloc_tag). The typecast is 460 * intentional to enforce typesafety. 461 */ 462#define sk_psock_init_link() \ 463 kzalloc_obj(struct sk_psock_link, GFP_ATOMIC | __GFP_NOWARN) 464 465static inline void sk_psock_free_link(struct sk_psock_link *link) 466{ 467 kfree(link); 468} 469 470struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); 471 472static inline void sk_psock_cork_free(struct sk_psock *psock) 473{ 474 if (psock->cork) { 475 sk_msg_free(psock->sk, psock->cork); 476 kfree(psock->cork); 477 psock->cork = NULL; 478 } 479} 480 481static inline void sk_psock_restore_proto(struct sock *sk, 482 struct sk_psock *psock) 483{ 484 if (psock->psock_update_sk_prot) 485 psock->psock_update_sk_prot(sk, psock, true); 486} 487 488static inline struct sk_psock *sk_psock_get(struct sock *sk) 489{ 490 struct sk_psock *psock; 491 492 rcu_read_lock(); 493 psock = sk_psock(sk); 494 if (psock && !refcount_inc_not_zero(&psock->refcnt)) 495 psock = NULL; 496 rcu_read_unlock(); 497 return psock; 498} 499 500void sk_psock_drop(struct sock *sk, struct sk_psock *psock); 501 502static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) 503{ 504 if (refcount_dec_and_test(&psock->refcnt)) 505 sk_psock_drop(sk, psock); 506} 507 508static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) 509{ 510 read_lock_bh(&sk->sk_callback_lock); 511 if (psock->saved_data_ready) 512 psock->saved_data_ready(sk); 513 else 514 sk->sk_data_ready(sk); 515 read_unlock_bh(&sk->sk_callback_lock); 516} 517 518static inline void psock_set_prog(struct bpf_prog **pprog, 519 struct bpf_prog *prog) 520{ 521 prog = xchg(pprog, prog); 522 if (prog) 523 bpf_prog_put(prog); 524} 525 526static inline int psock_replace_prog(struct bpf_prog **pprog, 527 struct bpf_prog *prog, 528 struct bpf_prog *old) 529{ 530 if (cmpxchg(pprog, old, prog) != old) 531 return -ENOENT; 532 533 if (old) 534 bpf_prog_put(old); 535 536 return 0; 537} 538 539static inline void psock_progs_drop(struct sk_psock_progs *progs) 540{ 541 psock_set_prog(&progs->msg_parser, NULL); 542 psock_set_prog(&progs->stream_parser, NULL); 543 psock_set_prog(&progs->stream_verdict, NULL); 544 psock_set_prog(&progs->skb_verdict, NULL); 545} 546 547int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); 548 549static inline bool sk_psock_strp_enabled(struct sk_psock *psock) 550{ 551 if (!psock) 552 return false; 553 return !!psock->saved_data_ready; 554} 555 556/* for tcp only, sk is locked */ 557static inline ssize_t sk_psock_msg_inq(struct sock *sk) 558{ 559 struct sk_psock *psock; 560 ssize_t inq = 0; 561 562 psock = sk_psock_get(sk); 563 if (likely(psock)) { 564 inq = sk_psock_get_msg_len_nolock(psock); 565 sk_psock_put(sk, psock); 566 } 567 return inq; 568} 569 570/* for udp only, sk is not locked */ 571static inline ssize_t sk_msg_first_len(struct sock *sk) 572{ 573 struct sk_psock *psock; 574 struct sk_msg *msg; 575 ssize_t inq = 0; 576 577 psock = sk_psock_get(sk); 578 if (likely(psock)) { 579 spin_lock_bh(&psock->ingress_lock); 580 msg = sk_psock_peek_msg_locked(psock); 581 if (msg) 582 inq = msg->sg.size; 583 spin_unlock_bh(&psock->ingress_lock); 584 sk_psock_put(sk, psock); 585 } 586 return inq; 587} 588 589#if IS_ENABLED(CONFIG_NET_SOCK_MSG) 590 591#define BPF_F_STRPARSER (1UL << 1) 592 593/* We only have two bits so far. */ 594#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) 595 596static inline bool skb_bpf_strparser(const struct sk_buff *skb) 597{ 598 unsigned long sk_redir = skb->_sk_redir; 599 600 return sk_redir & BPF_F_STRPARSER; 601} 602 603static inline void skb_bpf_set_strparser(struct sk_buff *skb) 604{ 605 skb->_sk_redir |= BPF_F_STRPARSER; 606} 607 608static inline bool skb_bpf_ingress(const struct sk_buff *skb) 609{ 610 unsigned long sk_redir = skb->_sk_redir; 611 612 return sk_redir & BPF_F_INGRESS; 613} 614 615static inline void skb_bpf_set_ingress(struct sk_buff *skb) 616{ 617 skb->_sk_redir |= BPF_F_INGRESS; 618} 619 620static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, 621 bool ingress) 622{ 623 skb->_sk_redir = (unsigned long)sk_redir; 624 if (ingress) 625 skb->_sk_redir |= BPF_F_INGRESS; 626} 627 628static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) 629{ 630 unsigned long sk_redir = skb->_sk_redir; 631 632 return (struct sock *)(sk_redir & BPF_F_PTR_MASK); 633} 634 635static inline void skb_bpf_redirect_clear(struct sk_buff *skb) 636{ 637 skb->_sk_redir = 0; 638} 639#endif /* CONFIG_NET_SOCK_MSG */ 640#endif /* _LINUX_SKMSG_H */