Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

- Fix alignment of arm64 JIT buffer to prevent atomic tearing (Fuad
Tabba)

- Fix invariant violation for single value tnums in the verifier
(Harishankar Vishwanathan, Paul Chaignon)

- Fix a bunch of issues found by ASAN in selftests/bpf (Ihor Solodrai)

- Fix race in devmpa and cpumap on PREEMPT_RT (Jiayuan Chen)

- Fix show_fdinfo of kprobe_multi when cookies are not present (Jiri
Olsa)

- Fix race in freeing special fields in BPF maps to prevent memory
leaks (Kumar Kartikeya Dwivedi)

- Fix OOB read in dmabuf_collector (T.J. Mercier)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (36 commits)
selftests/bpf: Avoid simplification of crafted bounds test
selftests/bpf: Test refinement of single-value tnum
bpf: Improve bounds when tnum has a single possible value
bpf: Introduce tnum_step to step through tnum's members
bpf: Fix race in devmap on PREEMPT_RT
bpf: Fix race in cpumap on PREEMPT_RT
selftests/bpf: Add tests for special fields races
bpf: Retire rcu_trace_implies_rcu_gp() from local storage
bpf: Delay freeing fields in local storage
bpf: Lose const-ness of map in map_check_btf()
bpf: Register dtor for freeing special fields
selftests/bpf: Fix OOB read in dmabuf_collector
selftests/bpf: Fix a memory leak in xdp_flowtable test
bpf: Fix stack-out-of-bounds write in devmap
bpf: Fix kprobe_multi cookies access in show_fdinfo callback
bpf, arm64: Force 8-byte alignment for JIT buffer to prevent atomic tearing
selftests/bpf: Don't override SIGSEGV handler with ASAN
selftests/bpf: Check BPFTOOL env var in detect_bpftool_path()
selftests/bpf: Fix out-of-bounds array access bugs reported by ASAN
selftests/bpf: Fix array bounds warning in jit_disasm_helpers
...

+1181 -237
+1 -1
arch/arm64/net/bpf_jit_comp.c
··· 2119 2119 extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align); 2120 2120 image_size = extable_offset + extable_size; 2121 2121 ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr, 2122 - sizeof(u32), &header, &image_ptr, 2122 + sizeof(u64), &header, &image_ptr, 2123 2123 jit_fill_hole); 2124 2124 if (!ro_header) { 2125 2125 prog = orig_prog;
+2 -2
include/linux/bpf.h
··· 124 124 u32 (*map_fd_sys_lookup_elem)(void *ptr); 125 125 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 126 126 struct seq_file *m); 127 - int (*map_check_btf)(const struct bpf_map *map, 127 + int (*map_check_btf)(struct bpf_map *map, 128 128 const struct btf *btf, 129 129 const struct btf_type *key_type, 130 130 const struct btf_type *value_type); ··· 656 656 map->ops->map_seq_show_elem; 657 657 } 658 658 659 - int map_check_no_btf(const struct bpf_map *map, 659 + int map_check_no_btf(struct bpf_map *map, 660 660 const struct btf *btf, 661 661 const struct btf_type *key_type, 662 662 const struct btf_type *value_type);
+1 -1
include/linux/bpf_local_storage.h
··· 176 176 void bpf_local_storage_map_free(struct bpf_map *map, 177 177 struct bpf_local_storage_cache *cache); 178 178 179 - int bpf_local_storage_map_check_btf(const struct bpf_map *map, 179 + int bpf_local_storage_map_check_btf(struct bpf_map *map, 180 180 const struct btf *btf, 181 181 const struct btf_type *key_type, 182 182 const struct btf_type *value_type);
+6
include/linux/bpf_mem_alloc.h
··· 14 14 struct obj_cgroup *objcg; 15 15 bool percpu; 16 16 struct work_struct work; 17 + void (*dtor_ctx_free)(void *ctx); 18 + void *dtor_ctx; 17 19 }; 18 20 19 21 /* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects. ··· 34 32 /* The percpu allocation with a specific unit size. */ 35 33 int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size); 36 34 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma); 35 + void bpf_mem_alloc_set_dtor(struct bpf_mem_alloc *ma, 36 + void (*dtor)(void *obj, void *ctx), 37 + void (*dtor_ctx_free)(void *ctx), 38 + void *ctx); 37 39 38 40 /* Check the allocation size for kmalloc equivalent allocator */ 39 41 int bpf_mem_alloc_check_size(bool percpu, size_t size);
+3
include/linux/tnum.h
··· 131 131 return !(tnum_subreg(a)).mask; 132 132 } 133 133 134 + /* Returns the smallest member of t larger than z */ 135 + u64 tnum_step(struct tnum t, u64 z); 136 + 134 137 #endif /* _LINUX_TNUM_H */
+1 -1
kernel/bpf/arena.c
··· 303 303 return -EOPNOTSUPP; 304 304 } 305 305 306 - static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf, 306 + static int arena_map_check_btf(struct bpf_map *map, const struct btf *btf, 307 307 const struct btf_type *key_type, const struct btf_type *value_type) 308 308 { 309 309 return 0;
+1 -1
kernel/bpf/arraymap.c
··· 548 548 rcu_read_unlock(); 549 549 } 550 550 551 - static int array_map_check_btf(const struct bpf_map *map, 551 + static int array_map_check_btf(struct bpf_map *map, 552 552 const struct btf *btf, 553 553 const struct btf_type *key_type, 554 554 const struct btf_type *value_type)
+1 -1
kernel/bpf/bloom_filter.c
··· 180 180 return -EINVAL; 181 181 } 182 182 183 - static int bloom_map_check_btf(const struct bpf_map *map, 183 + static int bloom_map_check_btf(struct bpf_map *map, 184 184 const struct btf *btf, 185 185 const struct btf_type *key_type, 186 186 const struct btf_type *value_type)
+1 -1
kernel/bpf/bpf_insn_array.c
··· 98 98 return -EINVAL; 99 99 } 100 100 101 - static int insn_array_check_btf(const struct bpf_map *map, 101 + static int insn_array_check_btf(struct bpf_map *map, 102 102 const struct btf *btf, 103 103 const struct btf_type *key_type, 104 104 const struct btf_type *value_type)
+40 -37
kernel/bpf/bpf_local_storage.c
··· 107 107 { 108 108 struct bpf_local_storage *local_storage; 109 109 110 - /* If RCU Tasks Trace grace period implies RCU grace period, do 111 - * kfree(), else do kfree_rcu(). 110 + /* 111 + * RCU Tasks Trace grace period implies RCU grace period, do 112 + * kfree() directly. 112 113 */ 113 114 local_storage = container_of(rcu, struct bpf_local_storage, rcu); 114 - if (rcu_trace_implies_rcu_gp()) 115 - kfree(local_storage); 116 - else 117 - kfree_rcu(local_storage, rcu); 115 + kfree(local_storage); 118 116 } 119 117 120 118 /* Handle use_kmalloc_nolock == false */ ··· 136 138 137 139 static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu) 138 140 { 139 - if (rcu_trace_implies_rcu_gp()) 140 - bpf_local_storage_free_rcu(rcu); 141 - else 142 - call_rcu(rcu, bpf_local_storage_free_rcu); 141 + /* 142 + * RCU Tasks Trace grace period implies RCU grace period, do 143 + * kfree() directly. 144 + */ 145 + bpf_local_storage_free_rcu(rcu); 143 146 } 144 147 145 148 static void bpf_local_storage_free(struct bpf_local_storage *local_storage, ··· 163 164 bpf_local_storage_free_trace_rcu); 164 165 } 165 166 167 + /* rcu callback for use_kmalloc_nolock == false */ 168 + static void __bpf_selem_free_rcu(struct rcu_head *rcu) 169 + { 170 + struct bpf_local_storage_elem *selem; 171 + struct bpf_local_storage_map *smap; 172 + 173 + selem = container_of(rcu, struct bpf_local_storage_elem, rcu); 174 + /* bpf_selem_unlink_nofail may have already cleared smap and freed fields. */ 175 + smap = rcu_dereference_check(SDATA(selem)->smap, 1); 176 + 177 + if (smap) 178 + bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); 179 + kfree(selem); 180 + } 181 + 166 182 /* rcu tasks trace callback for use_kmalloc_nolock == false */ 167 183 static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu) 168 184 { 169 - struct bpf_local_storage_elem *selem; 170 - 171 - selem = container_of(rcu, struct bpf_local_storage_elem, rcu); 172 - if (rcu_trace_implies_rcu_gp()) 173 - kfree(selem); 174 - else 175 - kfree_rcu(selem, rcu); 185 + /* 186 + * RCU Tasks Trace grace period implies RCU grace period, do 187 + * kfree() directly. 188 + */ 189 + __bpf_selem_free_rcu(rcu); 176 190 } 177 191 178 192 /* Handle use_kmalloc_nolock == false */ ··· 193 181 bool vanilla_rcu) 194 182 { 195 183 if (vanilla_rcu) 196 - kfree_rcu(selem, rcu); 184 + call_rcu(&selem->rcu, __bpf_selem_free_rcu); 197 185 else 198 186 call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu); 199 187 } ··· 207 195 /* The bpf_local_storage_map_free will wait for rcu_barrier */ 208 196 smap = rcu_dereference_check(SDATA(selem)->smap, 1); 209 197 210 - if (smap) { 211 - migrate_disable(); 198 + if (smap) 212 199 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); 213 - migrate_enable(); 214 - } 215 200 kfree_nolock(selem); 216 201 } 217 202 218 203 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu) 219 204 { 220 - if (rcu_trace_implies_rcu_gp()) 221 - bpf_selem_free_rcu(rcu); 222 - else 223 - call_rcu(rcu, bpf_selem_free_rcu); 205 + /* 206 + * RCU Tasks Trace grace period implies RCU grace period, do 207 + * kfree() directly. 208 + */ 209 + bpf_selem_free_rcu(rcu); 224 210 } 225 211 226 212 void bpf_selem_free(struct bpf_local_storage_elem *selem, 227 213 bool reuse_now) 228 214 { 229 - struct bpf_local_storage_map *smap; 230 - 231 - smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); 232 - 233 215 if (!selem->use_kmalloc_nolock) { 234 216 /* 235 217 * No uptr will be unpin even when reuse_now == false since uptr 236 218 * is only supported in task local storage, where 237 219 * smap->use_kmalloc_nolock == true. 238 220 */ 239 - if (smap) 240 - bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); 241 221 __bpf_selem_free(selem, reuse_now); 242 222 return; 243 223 } ··· 801 797 return 0; 802 798 } 803 799 804 - int bpf_local_storage_map_check_btf(const struct bpf_map *map, 800 + int bpf_local_storage_map_check_btf(struct bpf_map *map, 805 801 const struct btf *btf, 806 802 const struct btf_type *key_type, 807 803 const struct btf_type *value_type) ··· 962 958 */ 963 959 synchronize_rcu(); 964 960 965 - if (smap->use_kmalloc_nolock) { 966 - rcu_barrier_tasks_trace(); 967 - rcu_barrier(); 968 - } 961 + /* smap remains in use regardless of kmalloc_nolock, so wait unconditionally. */ 962 + rcu_barrier_tasks_trace(); 963 + rcu_barrier(); 969 964 kvfree(smap->buckets); 970 965 bpf_map_area_free(smap); 971 966 }
+15 -2
kernel/bpf/cpumap.c
··· 29 29 #include <linux/sched.h> 30 30 #include <linux/workqueue.h> 31 31 #include <linux/kthread.h> 32 + #include <linux/local_lock.h> 32 33 #include <linux/completion.h> 33 34 #include <trace/events/xdp.h> 34 35 #include <linux/btf_ids.h> ··· 53 52 struct list_head flush_node; 54 53 struct bpf_cpu_map_entry *obj; 55 54 unsigned int count; 55 + local_lock_t bq_lock; 56 56 }; 57 57 58 58 /* Struct for every remote "destination" CPU in map */ ··· 453 451 for_each_possible_cpu(i) { 454 452 bq = per_cpu_ptr(rcpu->bulkq, i); 455 453 bq->obj = rcpu; 454 + local_lock_init(&bq->bq_lock); 456 455 } 457 456 458 457 /* Alloc queue */ ··· 725 722 struct ptr_ring *q; 726 723 int i; 727 724 725 + lockdep_assert_held(&bq->bq_lock); 726 + 728 727 if (unlikely(!bq->count)) 729 728 return; 730 729 ··· 754 749 } 755 750 756 751 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 757 - * Thus, safe percpu variable access. 752 + * Thus, safe percpu variable access. PREEMPT_RT relies on 753 + * local_lock_nested_bh() to serialise access to the per-CPU bq. 758 754 */ 759 755 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) 760 756 { 761 - struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); 757 + struct xdp_bulk_queue *bq; 758 + 759 + local_lock_nested_bh(&rcpu->bulkq->bq_lock); 760 + bq = this_cpu_ptr(rcpu->bulkq); 762 761 763 762 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) 764 763 bq_flush_to_queue(bq); ··· 783 774 784 775 list_add(&bq->flush_node, flush_list); 785 776 } 777 + 778 + local_unlock_nested_bh(&rcpu->bulkq->bq_lock); 786 779 } 787 780 788 781 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, ··· 821 810 struct xdp_bulk_queue *bq, *tmp; 822 811 823 812 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { 813 + local_lock_nested_bh(&bq->obj->bulkq->bq_lock); 824 814 bq_flush_to_queue(bq); 815 + local_unlock_nested_bh(&bq->obj->bulkq->bq_lock); 825 816 826 817 /* If already running, costs spin_lock_irqsave + smb_mb */ 827 818 wake_up_process(bq->obj->kthread);
+38 -9
kernel/bpf/devmap.c
··· 45 45 * types of devmap; only the lookup and insertion is different. 46 46 */ 47 47 #include <linux/bpf.h> 48 + #include <linux/local_lock.h> 48 49 #include <net/xdp.h> 49 50 #include <linux/filter.h> 50 51 #include <trace/events/xdp.h> ··· 61 60 struct net_device *dev_rx; 62 61 struct bpf_prog *xdp_prog; 63 62 unsigned int count; 63 + local_lock_t bq_lock; 64 64 }; 65 65 66 66 struct bpf_dtab_netdev { ··· 383 381 int to_send = cnt; 384 382 int i; 385 383 384 + lockdep_assert_held(&bq->bq_lock); 385 + 386 386 if (unlikely(!cnt)) 387 387 return; 388 388 ··· 429 425 struct xdp_dev_bulk_queue *bq, *tmp; 430 426 431 427 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { 428 + local_lock_nested_bh(&bq->dev->xdp_bulkq->bq_lock); 432 429 bq_xmit_all(bq, XDP_XMIT_FLUSH); 433 430 bq->dev_rx = NULL; 434 431 bq->xdp_prog = NULL; 435 432 __list_del_clearprev(&bq->flush_node); 433 + local_unlock_nested_bh(&bq->dev->xdp_bulkq->bq_lock); 436 434 } 437 435 } 438 436 ··· 457 451 458 452 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu 459 453 * variable access, and map elements stick around. See comment above 460 - * xdp_do_flush() in filter.c. 454 + * xdp_do_flush() in filter.c. PREEMPT_RT relies on local_lock_nested_bh() 455 + * to serialise access to the per-CPU bq. 461 456 */ 462 457 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 463 458 struct net_device *dev_rx, struct bpf_prog *xdp_prog) 464 459 { 465 - struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); 460 + struct xdp_dev_bulk_queue *bq; 461 + 462 + local_lock_nested_bh(&dev->xdp_bulkq->bq_lock); 463 + bq = this_cpu_ptr(dev->xdp_bulkq); 466 464 467 465 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 468 466 bq_xmit_all(bq, 0); ··· 487 477 } 488 478 489 479 bq->q[bq->count++] = xdpf; 480 + 481 + local_unlock_nested_bh(&dev->xdp_bulkq->bq_lock); 490 482 } 491 483 492 484 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, ··· 600 588 } 601 589 602 590 /* Get ifindex of each upper device. 'indexes' must be able to hold at 603 - * least MAX_NEST_DEV elements. 604 - * Returns the number of ifindexes added. 591 + * least 'max' elements. 592 + * Returns the number of ifindexes added, or -EOVERFLOW if there are too 593 + * many upper devices. 605 594 */ 606 - static int get_upper_ifindexes(struct net_device *dev, int *indexes) 595 + static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max) 607 596 { 608 597 struct net_device *upper; 609 598 struct list_head *iter; 610 599 int n = 0; 611 600 612 601 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 602 + if (n >= max) 603 + return -EOVERFLOW; 613 604 indexes[n++] = upper->ifindex; 614 605 } 606 + 615 607 return n; 616 608 } 617 609 ··· 631 615 int err; 632 616 633 617 if (exclude_ingress) { 634 - num_excluded = get_upper_ifindexes(dev_rx, excluded_devices); 618 + num_excluded = get_upper_ifindexes(dev_rx, excluded_devices, 619 + ARRAY_SIZE(excluded_devices) - 1); 620 + if (num_excluded < 0) 621 + return num_excluded; 622 + 635 623 excluded_devices[num_excluded++] = dev_rx->ifindex; 636 624 } 637 625 ··· 753 733 int err; 754 734 755 735 if (exclude_ingress) { 756 - num_excluded = get_upper_ifindexes(dev, excluded_devices); 736 + num_excluded = get_upper_ifindexes(dev, excluded_devices, 737 + ARRAY_SIZE(excluded_devices) - 1); 738 + if (num_excluded < 0) 739 + return num_excluded; 740 + 757 741 excluded_devices[num_excluded++] = dev->ifindex; 758 742 } 759 743 ··· 1139 1115 if (!netdev->xdp_bulkq) 1140 1116 return NOTIFY_BAD; 1141 1117 1142 - for_each_possible_cpu(cpu) 1143 - per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; 1118 + for_each_possible_cpu(cpu) { 1119 + struct xdp_dev_bulk_queue *bq; 1120 + 1121 + bq = per_cpu_ptr(netdev->xdp_bulkq, cpu); 1122 + bq->dev = netdev; 1123 + local_lock_init(&bq->bq_lock); 1124 + } 1144 1125 break; 1145 1126 case NETDEV_UNREGISTER: 1146 1127 /* This rcu_read_lock/unlock pair is needed because
+86
kernel/bpf/hashtab.c
··· 125 125 char key[] __aligned(8); 126 126 }; 127 127 128 + struct htab_btf_record { 129 + struct btf_record *record; 130 + u32 key_size; 131 + }; 132 + 128 133 static inline bool htab_is_prealloc(const struct bpf_htab *htab) 129 134 { 130 135 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); ··· 460 455 return -E2BIG; 461 456 462 457 return 0; 458 + } 459 + 460 + static void htab_mem_dtor(void *obj, void *ctx) 461 + { 462 + struct htab_btf_record *hrec = ctx; 463 + struct htab_elem *elem = obj; 464 + void *map_value; 465 + 466 + if (IS_ERR_OR_NULL(hrec->record)) 467 + return; 468 + 469 + map_value = htab_elem_value(elem, hrec->key_size); 470 + bpf_obj_free_fields(hrec->record, map_value); 471 + } 472 + 473 + static void htab_pcpu_mem_dtor(void *obj, void *ctx) 474 + { 475 + void __percpu *pptr = *(void __percpu **)obj; 476 + struct htab_btf_record *hrec = ctx; 477 + int cpu; 478 + 479 + if (IS_ERR_OR_NULL(hrec->record)) 480 + return; 481 + 482 + for_each_possible_cpu(cpu) 483 + bpf_obj_free_fields(hrec->record, per_cpu_ptr(pptr, cpu)); 484 + } 485 + 486 + static void htab_dtor_ctx_free(void *ctx) 487 + { 488 + struct htab_btf_record *hrec = ctx; 489 + 490 + btf_record_free(hrec->record); 491 + kfree(ctx); 492 + } 493 + 494 + static int htab_set_dtor(struct bpf_htab *htab, void (*dtor)(void *, void *)) 495 + { 496 + u32 key_size = htab->map.key_size; 497 + struct bpf_mem_alloc *ma; 498 + struct htab_btf_record *hrec; 499 + int err; 500 + 501 + /* No need for dtors. */ 502 + if (IS_ERR_OR_NULL(htab->map.record)) 503 + return 0; 504 + 505 + hrec = kzalloc(sizeof(*hrec), GFP_KERNEL); 506 + if (!hrec) 507 + return -ENOMEM; 508 + hrec->key_size = key_size; 509 + hrec->record = btf_record_dup(htab->map.record); 510 + if (IS_ERR(hrec->record)) { 511 + err = PTR_ERR(hrec->record); 512 + kfree(hrec); 513 + return err; 514 + } 515 + ma = htab_is_percpu(htab) ? &htab->pcpu_ma : &htab->ma; 516 + bpf_mem_alloc_set_dtor(ma, dtor, htab_dtor_ctx_free, hrec); 517 + return 0; 518 + } 519 + 520 + static int htab_map_check_btf(struct bpf_map *map, const struct btf *btf, 521 + const struct btf_type *key_type, const struct btf_type *value_type) 522 + { 523 + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 524 + 525 + if (htab_is_prealloc(htab)) 526 + return 0; 527 + /* 528 + * We must set the dtor using this callback, as map's BTF record is not 529 + * populated in htab_map_alloc(), so it will always appear as NULL. 530 + */ 531 + if (htab_is_percpu(htab)) 532 + return htab_set_dtor(htab, htab_pcpu_mem_dtor); 533 + else 534 + return htab_set_dtor(htab, htab_mem_dtor); 463 535 } 464 536 465 537 static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ··· 2363 2281 .map_seq_show_elem = htab_map_seq_show_elem, 2364 2282 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2365 2283 .map_for_each_callback = bpf_for_each_hash_elem, 2284 + .map_check_btf = htab_map_check_btf, 2366 2285 .map_mem_usage = htab_map_mem_usage, 2367 2286 BATCH_OPS(htab), 2368 2287 .map_btf_id = &htab_map_btf_ids[0], ··· 2386 2303 .map_seq_show_elem = htab_map_seq_show_elem, 2387 2304 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2388 2305 .map_for_each_callback = bpf_for_each_hash_elem, 2306 + .map_check_btf = htab_map_check_btf, 2389 2307 .map_mem_usage = htab_map_mem_usage, 2390 2308 BATCH_OPS(htab_lru), 2391 2309 .map_btf_id = &htab_map_btf_ids[0], ··· 2566 2482 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2567 2483 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2568 2484 .map_for_each_callback = bpf_for_each_hash_elem, 2485 + .map_check_btf = htab_map_check_btf, 2569 2486 .map_mem_usage = htab_map_mem_usage, 2570 2487 BATCH_OPS(htab_percpu), 2571 2488 .map_btf_id = &htab_map_btf_ids[0], ··· 2587 2502 .map_seq_show_elem = htab_percpu_map_seq_show_elem, 2588 2503 .map_set_for_each_callback_args = map_set_for_each_callback_args, 2589 2504 .map_for_each_callback = bpf_for_each_hash_elem, 2505 + .map_check_btf = htab_map_check_btf, 2590 2506 .map_mem_usage = htab_map_mem_usage, 2591 2507 BATCH_OPS(htab_lru_percpu), 2592 2508 .map_btf_id = &htab_map_btf_ids[0],
+1 -1
kernel/bpf/local_storage.c
··· 364 364 return -EINVAL; 365 365 } 366 366 367 - static int cgroup_storage_check_btf(const struct bpf_map *map, 367 + static int cgroup_storage_check_btf(struct bpf_map *map, 368 368 const struct btf *btf, 369 369 const struct btf_type *key_type, 370 370 const struct btf_type *value_type)
+1 -1
kernel/bpf/lpm_trie.c
··· 751 751 return err; 752 752 } 753 753 754 - static int trie_check_btf(const struct bpf_map *map, 754 + static int trie_check_btf(struct bpf_map *map, 755 755 const struct btf *btf, 756 756 const struct btf_type *key_type, 757 757 const struct btf_type *value_type)
+47 -11
kernel/bpf/memalloc.c
··· 102 102 int percpu_size; 103 103 bool draining; 104 104 struct bpf_mem_cache *tgt; 105 + void (*dtor)(void *obj, void *ctx); 106 + void *dtor_ctx; 105 107 106 108 /* list of objects to be freed after RCU GP */ 107 109 struct llist_head free_by_rcu; ··· 262 260 kfree(obj); 263 261 } 264 262 265 - static int free_all(struct llist_node *llnode, bool percpu) 263 + static int free_all(struct bpf_mem_cache *c, struct llist_node *llnode, bool percpu) 266 264 { 267 265 struct llist_node *pos, *t; 268 266 int cnt = 0; 269 267 270 268 llist_for_each_safe(pos, t, llnode) { 269 + if (c->dtor) 270 + c->dtor((void *)pos + LLIST_NODE_SZ, c->dtor_ctx); 271 271 free_one(pos, percpu); 272 272 cnt++; 273 273 } ··· 280 276 { 281 277 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); 282 278 283 - free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 279 + free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 284 280 atomic_set(&c->call_rcu_ttrace_in_progress, 0); 285 281 } 286 282 ··· 312 308 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { 313 309 if (unlikely(READ_ONCE(c->draining))) { 314 310 llnode = llist_del_all(&c->free_by_rcu_ttrace); 315 - free_all(llnode, !!c->percpu_size); 311 + free_all(c, llnode, !!c->percpu_size); 316 312 } 317 313 return; 318 314 } ··· 421 417 dec_active(c, &flags); 422 418 423 419 if (unlikely(READ_ONCE(c->draining))) { 424 - free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); 420 + free_all(c, llist_del_all(&c->waiting_for_gp), !!c->percpu_size); 425 421 atomic_set(&c->call_rcu_in_progress, 0); 426 422 } else { 427 423 call_rcu_hurry(&c->rcu, __free_by_rcu); ··· 639 635 * Except for waiting_for_gp_ttrace list, there are no concurrent operations 640 636 * on these lists, so it is safe to use __llist_del_all(). 641 637 */ 642 - free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); 643 - free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); 644 - free_all(__llist_del_all(&c->free_llist), percpu); 645 - free_all(__llist_del_all(&c->free_llist_extra), percpu); 646 - free_all(__llist_del_all(&c->free_by_rcu), percpu); 647 - free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu); 648 - free_all(llist_del_all(&c->waiting_for_gp), percpu); 638 + free_all(c, llist_del_all(&c->free_by_rcu_ttrace), percpu); 639 + free_all(c, llist_del_all(&c->waiting_for_gp_ttrace), percpu); 640 + free_all(c, __llist_del_all(&c->free_llist), percpu); 641 + free_all(c, __llist_del_all(&c->free_llist_extra), percpu); 642 + free_all(c, __llist_del_all(&c->free_by_rcu), percpu); 643 + free_all(c, __llist_del_all(&c->free_llist_extra_rcu), percpu); 644 + free_all(c, llist_del_all(&c->waiting_for_gp), percpu); 649 645 } 650 646 651 647 static void check_mem_cache(struct bpf_mem_cache *c) ··· 684 680 685 681 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) 686 682 { 683 + /* We can free dtor ctx only once all callbacks are done using it. */ 684 + if (ma->dtor_ctx_free) 685 + ma->dtor_ctx_free(ma->dtor_ctx); 687 686 check_leaked_objs(ma); 688 687 free_percpu(ma->cache); 689 688 free_percpu(ma->caches); ··· 1020 1013 return -E2BIG; 1021 1014 1022 1015 return 0; 1016 + } 1017 + 1018 + void bpf_mem_alloc_set_dtor(struct bpf_mem_alloc *ma, void (*dtor)(void *obj, void *ctx), 1019 + void (*dtor_ctx_free)(void *ctx), void *ctx) 1020 + { 1021 + struct bpf_mem_caches *cc; 1022 + struct bpf_mem_cache *c; 1023 + int cpu, i; 1024 + 1025 + ma->dtor_ctx_free = dtor_ctx_free; 1026 + ma->dtor_ctx = ctx; 1027 + 1028 + if (ma->cache) { 1029 + for_each_possible_cpu(cpu) { 1030 + c = per_cpu_ptr(ma->cache, cpu); 1031 + c->dtor = dtor; 1032 + c->dtor_ctx = ctx; 1033 + } 1034 + } 1035 + if (ma->caches) { 1036 + for_each_possible_cpu(cpu) { 1037 + cc = per_cpu_ptr(ma->caches, cpu); 1038 + for (i = 0; i < NUM_CACHES; i++) { 1039 + c = &cc->cache[i]; 1040 + c->dtor = dtor; 1041 + c->dtor_ctx = ctx; 1042 + } 1043 + } 1044 + } 1023 1045 }
+1 -1
kernel/bpf/syscall.c
··· 1234 1234 } 1235 1235 EXPORT_SYMBOL_GPL(bpf_obj_name_cpy); 1236 1236 1237 - int map_check_no_btf(const struct bpf_map *map, 1237 + int map_check_no_btf(struct bpf_map *map, 1238 1238 const struct btf *btf, 1239 1239 const struct btf_type *key_type, 1240 1240 const struct btf_type *value_type)
+56
kernel/bpf/tnum.c
··· 269 269 { 270 270 return TNUM(swab64(a.value), swab64(a.mask)); 271 271 } 272 + 273 + /* Given tnum t, and a number z such that tmin <= z < tmax, where tmin 274 + * is the smallest member of the t (= t.value) and tmax is the largest 275 + * member of t (= t.value | t.mask), returns the smallest member of t 276 + * larger than z. 277 + * 278 + * For example, 279 + * t = x11100x0 280 + * z = 11110001 (241) 281 + * result = 11110010 (242) 282 + * 283 + * Note: if this function is called with z >= tmax, it just returns 284 + * early with tmax; if this function is called with z < tmin, the 285 + * algorithm already returns tmin. 286 + */ 287 + u64 tnum_step(struct tnum t, u64 z) 288 + { 289 + u64 tmax, j, p, q, r, s, v, u, w, res; 290 + u8 k; 291 + 292 + tmax = t.value | t.mask; 293 + 294 + /* if z >= largest member of t, return largest member of t */ 295 + if (z >= tmax) 296 + return tmax; 297 + 298 + /* if z < smallest member of t, return smallest member of t */ 299 + if (z < t.value) 300 + return t.value; 301 + 302 + /* keep t's known bits, and match all unknown bits to z */ 303 + j = t.value | (z & t.mask); 304 + 305 + if (j > z) { 306 + p = ~z & t.value & ~t.mask; 307 + k = fls64(p); /* k is the most-significant 0-to-1 flip */ 308 + q = U64_MAX << k; 309 + r = q & z; /* positions > k matched to z */ 310 + s = ~q & t.value; /* positions <= k matched to t.value */ 311 + v = r | s; 312 + res = v; 313 + } else { 314 + p = z & ~t.value & ~t.mask; 315 + k = fls64(p); /* k is the most-significant 1-to-0 flip */ 316 + q = U64_MAX << k; 317 + r = q & t.mask & z; /* unknown positions > k, matched to z */ 318 + s = q & ~t.mask; /* known positions > k, set to 1 */ 319 + v = r | s; 320 + /* add 1 to unknown positions > k to make value greater than z */ 321 + u = v + (1ULL << k); 322 + /* extract bits in unknown positions > k from u, rest from t.value */ 323 + w = (u & t.mask) | t.value; 324 + res = w; 325 + } 326 + return res; 327 + }
+30
kernel/bpf/verifier.c
··· 2379 2379 2380 2380 static void __update_reg64_bounds(struct bpf_reg_state *reg) 2381 2381 { 2382 + u64 tnum_next, tmax; 2383 + bool umin_in_tnum; 2384 + 2382 2385 /* min signed is max(sign bit) | min(other bits) */ 2383 2386 reg->smin_value = max_t(s64, reg->smin_value, 2384 2387 reg->var_off.value | (reg->var_off.mask & S64_MIN)); ··· 2391 2388 reg->umin_value = max(reg->umin_value, reg->var_off.value); 2392 2389 reg->umax_value = min(reg->umax_value, 2393 2390 reg->var_off.value | reg->var_off.mask); 2391 + 2392 + /* Check if u64 and tnum overlap in a single value */ 2393 + tnum_next = tnum_step(reg->var_off, reg->umin_value); 2394 + umin_in_tnum = (reg->umin_value & ~reg->var_off.mask) == reg->var_off.value; 2395 + tmax = reg->var_off.value | reg->var_off.mask; 2396 + if (umin_in_tnum && tnum_next > reg->umax_value) { 2397 + /* The u64 range and the tnum only overlap in umin. 2398 + * u64: ---[xxxxxx]----- 2399 + * tnum: --xx----------x- 2400 + */ 2401 + ___mark_reg_known(reg, reg->umin_value); 2402 + } else if (!umin_in_tnum && tnum_next == tmax) { 2403 + /* The u64 range and the tnum only overlap in the maximum value 2404 + * represented by the tnum, called tmax. 2405 + * u64: ---[xxxxxx]----- 2406 + * tnum: xx-----x-------- 2407 + */ 2408 + ___mark_reg_known(reg, tmax); 2409 + } else if (!umin_in_tnum && tnum_next <= reg->umax_value && 2410 + tnum_step(reg->var_off, tnum_next) > reg->umax_value) { 2411 + /* The u64 range and the tnum only overlap in between umin 2412 + * (excluded) and umax. 2413 + * u64: ---[xxxxxx]----- 2414 + * tnum: xx----x-------x- 2415 + */ 2416 + ___mark_reg_known(reg, tnum_next); 2417 + } 2394 2418 } 2395 2419 2396 2420 static void __update_reg_bounds(struct bpf_reg_state *reg)
+3 -1
kernel/trace/bpf_trace.c
··· 2454 2454 struct seq_file *seq) 2455 2455 { 2456 2456 struct bpf_kprobe_multi_link *kmulti_link; 2457 + bool has_cookies; 2457 2458 2458 2459 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 2460 + has_cookies = !!kmulti_link->cookies; 2459 2461 2460 2462 seq_printf(seq, 2461 2463 "kprobe_cnt:\t%u\n" ··· 2469 2467 for (int i = 0; i < kmulti_link->cnt; i++) { 2470 2468 seq_printf(seq, 2471 2469 "%llu\t %pS\n", 2472 - kmulti_link->cookies[i], 2470 + has_cookies ? kmulti_link->cookies[i] : 0, 2473 2471 (void *)kmulti_link->addrs[i]); 2474 2472 } 2475 2473 }
+5 -2
tools/bpf/resolve_btfids/Makefile
··· 65 65 LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null) 66 66 LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf) 67 67 68 + ZLIB_LIBS := $(shell $(HOSTPKG_CONFIG) zlib --libs 2>/dev/null || echo -lz) 69 + ZSTD_LIBS := $(shell $(HOSTPKG_CONFIG) libzstd --libs 2>/dev/null || echo -lzstd) 70 + 68 71 HOSTCFLAGS_resolve_btfids += -g \ 69 72 -I$(srctree)/tools/include \ 70 73 -I$(srctree)/tools/include/uapi \ ··· 76 73 $(LIBELF_FLAGS) \ 77 74 -Wall -Werror 78 75 79 - LIBS = $(LIBELF_LIBS) -lz 76 + LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS) $(ZSTD_LIBS) 80 77 81 78 export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR 82 79 include $(srctree)/tools/build/Makefile.include ··· 86 83 87 84 $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) 88 85 $(call msg,LINK,$@) 89 - $(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) 86 + $(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) $(EXTRA_LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) 90 87 91 88 clean_objects := $(wildcard $(OUTPUT)/*.o \ 92 89 $(OUTPUT)/.*.o.cmd \
+54 -27
tools/bpf/resolve_btfids/main.c
··· 226 226 } 227 227 228 228 static struct btf_id *__btf_id__add(struct rb_root *root, 229 - char *name, 229 + const char *name, 230 230 enum btf_id_kind kind, 231 231 bool unique) 232 232 { ··· 250 250 id = zalloc(sizeof(*id)); 251 251 if (id) { 252 252 pr_debug("adding symbol %s\n", name); 253 - id->name = name; 253 + id->name = strdup(name); 254 + if (!id->name) { 255 + free(id); 256 + return NULL; 257 + } 254 258 id->kind = kind; 255 259 rb_link_node(&id->rb_node, parent, p); 256 260 rb_insert_color(&id->rb_node, root); ··· 262 258 return id; 263 259 } 264 260 265 - static inline struct btf_id *btf_id__add(struct rb_root *root, char *name, enum btf_id_kind kind) 261 + static inline struct btf_id *btf_id__add(struct rb_root *root, 262 + const char *name, 263 + enum btf_id_kind kind) 266 264 { 267 265 return __btf_id__add(root, name, kind, false); 268 266 } 269 267 270 - static inline struct btf_id *btf_id__add_unique(struct rb_root *root, char *name, enum btf_id_kind kind) 268 + static inline struct btf_id *btf_id__add_unique(struct rb_root *root, 269 + const char *name, 270 + enum btf_id_kind kind) 271 271 { 272 272 return __btf_id__add(root, name, kind, true); 273 273 } 274 274 275 - static char *get_id(const char *prefix_end) 275 + static int get_id(const char *prefix_end, char *buf, size_t buf_sz) 276 276 { 277 277 /* 278 278 * __BTF_ID__func__vfs_truncate__0 ··· 285 277 */ 286 278 int len = strlen(prefix_end); 287 279 int pos = sizeof("__") - 1; 288 - char *p, *id; 280 + char *p; 289 281 290 282 if (pos >= len) 291 - return NULL; 283 + return -1; 292 284 293 - id = strdup(prefix_end + pos); 294 - if (id) { 295 - /* 296 - * __BTF_ID__func__vfs_truncate__0 297 - * id = ^ 298 - * 299 - * cut the unique id part 300 - */ 301 - p = strrchr(id, '_'); 302 - p--; 303 - if (*p != '_') { 304 - free(id); 305 - return NULL; 306 - } 307 - *p = '\0'; 308 - } 309 - return id; 285 + if (len - pos >= buf_sz) 286 + return -1; 287 + 288 + strcpy(buf, prefix_end + pos); 289 + /* 290 + * __BTF_ID__func__vfs_truncate__0 291 + * buf = ^ 292 + * 293 + * cut the unique id part 294 + */ 295 + p = strrchr(buf, '_'); 296 + p--; 297 + if (*p != '_') 298 + return -1; 299 + *p = '\0'; 300 + 301 + return 0; 310 302 } 311 303 312 304 static struct btf_id *add_set(struct object *obj, char *name, enum btf_id_kind kind) ··· 343 335 344 336 static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) 345 337 { 346 - char *id; 338 + char id[KSYM_NAME_LEN]; 347 339 348 - id = get_id(name + size); 349 - if (!id) { 340 + if (get_id(name + size, id, sizeof(id))) { 350 341 pr_err("FAILED to parse symbol name: %s\n", name); 351 342 return NULL; 352 343 } 353 344 354 345 return btf_id__add(root, id, BTF_ID_KIND_SYM); 346 + } 347 + 348 + static void btf_id__free_all(struct rb_root *root) 349 + { 350 + struct rb_node *next; 351 + struct btf_id *id; 352 + 353 + next = rb_first(root); 354 + while (next) { 355 + id = rb_entry(next, struct btf_id, rb_node); 356 + next = rb_next(&id->rb_node); 357 + rb_erase(&id->rb_node, root); 358 + free(id->name); 359 + free(id); 360 + } 355 361 } 356 362 357 363 static void bswap_32_data(void *data, u32 nr_bytes) ··· 1569 1547 out: 1570 1548 btf__free(obj.base_btf); 1571 1549 btf__free(obj.btf); 1550 + btf_id__free_all(&obj.structs); 1551 + btf_id__free_all(&obj.unions); 1552 + btf_id__free_all(&obj.typedefs); 1553 + btf_id__free_all(&obj.funcs); 1554 + btf_id__free_all(&obj.sets); 1572 1555 if (obj.efile.elf) { 1573 1556 elf_end(obj.efile.elf); 1574 1557 close(obj.efile.fd);
+4
tools/include/linux/args.h
··· 22 22 #define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 23 23 24 24 /* Concatenate two parameters, but allow them to be expanded beforehand. */ 25 + #ifndef __CONCAT 25 26 #define __CONCAT(a, b) a ## b 27 + #endif 28 + #ifndef CONCATENATE 26 29 #define CONCATENATE(a, b) __CONCAT(a, b) 30 + #endif 27 31 28 32 #endif /* _LINUX_ARGS_H */
+3
tools/testing/selftests/bpf/DENYLIST.asan
··· 1 + *arena* 2 + task_local_data 3 + uprobe_multi_test
+9 -4
tools/testing/selftests/bpf/Makefile
··· 27 27 endif 28 28 29 29 BPF_GCC ?= $(shell command -v bpf-gcc;) 30 + ifdef ASAN 31 + SAN_CFLAGS ?= -fsanitize=address -fno-omit-frame-pointer 32 + else 30 33 SAN_CFLAGS ?= 34 + endif 31 35 SAN_LDFLAGS ?= $(SAN_CFLAGS) 32 36 RELEASE ?= 33 37 OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0) ··· 330 326 $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool 331 327 $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ 332 328 ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \ 333 - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ 334 - EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ 329 + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ 330 + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 335 331 OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ 336 332 LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ 337 333 LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ ··· 342 338 $(BPFOBJ) | $(BUILD_DIR)/bpftool 343 339 $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ 344 340 ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \ 345 - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ 346 - EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ 341 + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ 342 + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 347 343 OUTPUT=$(BUILD_DIR)/bpftool/ \ 348 344 LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \ 349 345 LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \ ··· 408 404 $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ 409 405 CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \ 410 406 LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \ 407 + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 411 408 OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ) 412 409 413 410 # Get Clang's default includes on this system, as opposed to those seen by
+8 -6
tools/testing/selftests/bpf/benchs/bench_trigger.c
··· 230 230 static void attach_ksyms_all(struct bpf_program *empty, bool kretprobe) 231 231 { 232 232 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 233 - char **syms = NULL; 234 - size_t cnt = 0; 233 + struct bpf_link *link = NULL; 234 + struct ksyms *ksyms = NULL; 235 235 236 236 /* Some recursive functions will be skipped in 237 237 * bpf_get_ksyms -> skip_entry, as they can introduce sufficient ··· 241 241 * So, don't run the kprobe-multi-all and kretprobe-multi-all on 242 242 * a debug kernel. 243 243 */ 244 - if (bpf_get_ksyms(&syms, &cnt, true)) { 244 + if (bpf_get_ksyms(&ksyms, true)) { 245 245 fprintf(stderr, "failed to get ksyms\n"); 246 246 exit(1); 247 247 } 248 248 249 - opts.syms = (const char **) syms; 250 - opts.cnt = cnt; 249 + opts.syms = (const char **)ksyms->filtered_syms; 250 + opts.cnt = ksyms->filtered_cnt; 251 251 opts.retprobe = kretprobe; 252 252 /* attach empty to all the kernel functions except bpf_get_numa_node_id. */ 253 - if (!bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts)) { 253 + link = bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts); 254 + free_kallsyms_local(ksyms); 255 + if (!link) { 254 256 fprintf(stderr, "failed to attach bpf_program__attach_kprobe_multi_opts to all\n"); 255 257 exit(1); 256 258 }
+32 -13
tools/testing/selftests/bpf/bpf_util.h
··· 8 8 #include <errno.h> 9 9 #include <syscall.h> 10 10 #include <bpf/libbpf.h> /* libbpf_num_possible_cpus */ 11 + #include <linux/args.h> 11 12 12 13 static inline unsigned int bpf_num_possible_cpus(void) 13 14 { ··· 22 21 return possible_cpus; 23 22 } 24 23 25 - /* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst 26 - * is zero-terminated string no matter what (unless sz == 0, in which case 27 - * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs 28 - * in what is returned. Given this is internal helper, it's trivial to extend 29 - * this, when necessary. Use this instead of strncpy inside libbpf source code. 24 + /* 25 + * Simplified strscpy() implementation. The kernel one is in lib/string.c 30 26 */ 31 - static inline void bpf_strlcpy(char *dst, const char *src, size_t sz) 27 + static inline ssize_t sized_strscpy(char *dest, const char *src, size_t count) 32 28 { 33 - size_t i; 29 + long res = 0; 34 30 35 - if (sz == 0) 36 - return; 31 + if (count == 0) 32 + return -E2BIG; 37 33 38 - sz--; 39 - for (i = 0; i < sz && src[i]; i++) 40 - dst[i] = src[i]; 41 - dst[i] = '\0'; 34 + while (count > 1) { 35 + char c; 36 + 37 + c = src[res]; 38 + dest[res] = c; 39 + if (!c) 40 + return res; 41 + res++; 42 + count--; 43 + } 44 + 45 + /* Force NUL-termination. */ 46 + dest[res] = '\0'; 47 + 48 + /* Return E2BIG if the source didn't stop */ 49 + return src[res] ? -E2BIG : res; 42 50 } 51 + 52 + #define __strscpy0(dst, src, ...) \ 53 + sized_strscpy(dst, src, sizeof(dst)) 54 + #define __strscpy1(dst, src, size) \ 55 + sized_strscpy(dst, src, size) 56 + 57 + #undef strscpy /* Redefine the placeholder from tools/include/linux/string.h */ 58 + #define strscpy(dst, src, ...) \ 59 + CONCATENATE(__strscpy, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__) 43 60 44 61 #define __bpf_percpu_val_align __attribute__((__aligned__(8))) 45 62
+19 -6
tools/testing/selftests/bpf/bpftool_helpers.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #include "bpftool_helpers.h" 3 2 #include <unistd.h> 4 3 #include <string.h> 5 4 #include <stdbool.h> 5 + 6 + #include "bpf_util.h" 7 + #include "bpftool_helpers.h" 6 8 7 9 #define BPFTOOL_PATH_MAX_LEN 64 8 10 #define BPFTOOL_FULL_CMD_MAX_LEN 512 9 11 10 12 #define BPFTOOL_DEFAULT_PATH "tools/sbin/bpftool" 11 13 12 - static int detect_bpftool_path(char *buffer) 14 + static int detect_bpftool_path(char *buffer, size_t size) 13 15 { 14 16 char tmp[BPFTOOL_PATH_MAX_LEN]; 17 + const char *env_path; 18 + 19 + /* First, check if BPFTOOL environment variable is set */ 20 + env_path = getenv("BPFTOOL"); 21 + if (env_path && access(env_path, X_OK) == 0) { 22 + strscpy(buffer, env_path, size); 23 + return 0; 24 + } else if (env_path) { 25 + fprintf(stderr, "bpftool '%s' doesn't exist or is not executable\n", env_path); 26 + return 1; 27 + } 15 28 16 29 /* Check default bpftool location (will work if we are running the 17 30 * default flavor of test_progs) 18 31 */ 19 32 snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "./%s", BPFTOOL_DEFAULT_PATH); 20 33 if (access(tmp, X_OK) == 0) { 21 - strncpy(buffer, tmp, BPFTOOL_PATH_MAX_LEN); 34 + strscpy(buffer, tmp, size); 22 35 return 0; 23 36 } 24 37 ··· 40 27 */ 41 28 snprintf(tmp, BPFTOOL_PATH_MAX_LEN, "../%s", BPFTOOL_DEFAULT_PATH); 42 29 if (access(tmp, X_OK) == 0) { 43 - strncpy(buffer, tmp, BPFTOOL_PATH_MAX_LEN); 30 + strscpy(buffer, tmp, size); 44 31 return 0; 45 32 } 46 33 47 - /* Failed to find bpftool binary */ 34 + fprintf(stderr, "Failed to detect bpftool path, use BPFTOOL env var to override\n"); 48 35 return 1; 49 36 } 50 37 ··· 57 44 int ret; 58 45 59 46 /* Detect and cache bpftool binary location */ 60 - if (bpftool_path[0] == 0 && detect_bpftool_path(bpftool_path)) 47 + if (bpftool_path[0] == 0 && detect_bpftool_path(bpftool_path, sizeof(bpftool_path))) 61 48 return 1; 62 49 63 50 ret = snprintf(command, BPFTOOL_FULL_CMD_MAX_LEN, "%s %s%s",
+1 -1
tools/testing/selftests/bpf/cgroup_helpers.c
··· 86 86 enable[len] = 0; 87 87 close(fd); 88 88 } else { 89 - bpf_strlcpy(enable, controllers, sizeof(enable)); 89 + strscpy(enable, controllers); 90 90 } 91 91 92 92 snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path);
+9 -9
tools/testing/selftests/bpf/jit_disasm_helpers.c
··· 122 122 pc += cnt; 123 123 } 124 124 qsort(labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32); 125 - for (i = 0; i < labels.cnt; ++i) 126 - /* gcc is unable to infer upper bound for labels.cnt and assumes 127 - * it to be U32_MAX. U32_MAX takes 10 decimal digits. 128 - * snprintf below prints into labels.names[*], 129 - * which has space only for two digits and a letter. 130 - * To avoid truncation warning use (i % MAX_LOCAL_LABELS), 131 - * which informs gcc about printed value upper bound. 132 - */ 133 - snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i % MAX_LOCAL_LABELS); 125 + /* gcc is unable to infer upper bound for labels.cnt and 126 + * assumes it to be U32_MAX. U32_MAX takes 10 decimal digits. 127 + * snprintf below prints into labels.names[*], which has space 128 + * only for two digits and a letter. To avoid truncation 129 + * warning use (i < MAX_LOCAL_LABELS), which informs gcc about 130 + * printed value upper bound. 131 + */ 132 + for (i = 0; i < labels.cnt && i < MAX_LOCAL_LABELS; ++i) 133 + snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i); 134 134 135 135 /* now print with labels */ 136 136 labels.print_phase = true;
+2 -3
tools/testing/selftests/bpf/network_helpers.c
··· 432 432 memset(addr, 0, sizeof(*sun)); 433 433 sun->sun_family = family; 434 434 sun->sun_path[0] = 0; 435 - strcpy(sun->sun_path + 1, addr_str); 435 + strscpy(sun->sun_path + 1, addr_str, sizeof(sun->sun_path) - 1); 436 436 if (len) 437 437 *len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(addr_str); 438 438 return 0; ··· 581 581 return -1; 582 582 583 583 ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); 584 - strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1); 585 - ifr.ifr_name[IFNAMSIZ - 1] = '\0'; 584 + strscpy(ifr.ifr_name, dev_name); 586 585 587 586 err = ioctl(fd, TUNSETIFF, &ifr); 588 587 if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) {
+1 -2
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
··· 346 346 close(finish_pipe[1]); 347 347 348 348 test_data = malloc(sizeof(char) * 10); 349 - strncpy(test_data, "test_data", 10); 350 - test_data[9] = '\0'; 349 + strscpy(test_data, "test_data", 10); 351 350 352 351 test_data_long = malloc(sizeof(char) * 5000); 353 352 for (int i = 0; i < 5000; ++i) {
+1 -1
tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
··· 281 281 dctcp_skel = bpf_dctcp__open(); 282 282 if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel")) 283 283 return; 284 - strcpy(dctcp_skel->rodata->fallback_cc, "cubic"); 284 + strscpy(dctcp_skel->rodata->fallback_cc, "cubic"); 285 285 if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load")) 286 286 goto done; 287 287
+3 -1
tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
··· 202 202 203 203 iter_fd = bpf_iter_create(bpf_link__fd(link)); 204 204 if (!ASSERT_GE(iter_fd, 0, "iter_create")) 205 - goto out; 205 + goto out_link; 206 206 207 207 /* trigger the program run */ 208 208 (void)read(iter_fd, buf, sizeof(buf)); ··· 210 210 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id"); 211 211 212 212 close(iter_fd); 213 + out_link: 214 + bpf_link__destroy(link); 213 215 out: 214 216 cgrp_ls_sleepable__destroy(skel); 215 217 }
+4 -2
tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
··· 308 308 return -1; 309 309 } 310 310 311 - strncpy(type_str, type, type_sz); 312 - strncpy(field_str, field, field_sz); 311 + memcpy(type_str, type, type_sz); 312 + type_str[type_sz] = '\0'; 313 + memcpy(field_str, field, field_sz); 314 + field_str[field_sz] = '\0'; 313 315 btf_id = btf__find_by_name(btf, type_str); 314 316 if (btf_id < 0) { 315 317 PRINT_FAIL("No BTF info for type %s\n", type_str);
+4 -1
tools/testing/selftests/bpf/prog_tests/dynptr.c
··· 137 137 ); 138 138 139 139 link = bpf_program__attach(prog); 140 - if (!ASSERT_OK_PTR(link, "bpf_program__attach")) 140 + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) { 141 + bpf_object__close(obj); 141 142 goto cleanup; 143 + } 142 144 143 145 err = bpf_prog_test_run_opts(aux_prog_fd, &topts); 144 146 bpf_link__destroy(link); 147 + bpf_object__close(obj); 145 148 146 149 if (!ASSERT_OK(err, "test_run")) 147 150 goto cleanup;
+2 -2
tools/testing/selftests/bpf/prog_tests/fd_array.c
··· 412 412 ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG"); 413 413 414 414 cleanup_fds: 415 - while (i > 0) 416 - Close(extra_fds[--i]); 415 + while (i-- > 0) 416 + Close(extra_fds[i]); 417 417 } 418 418 419 419 void test_fd_array_cnt(void)
+2 -2
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
··· 570 570 }; 571 571 int fd, ret; 572 572 573 - strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 573 + strscpy(ifr.ifr_name, ifname); 574 574 575 575 fd = open("/dev/net/tun", O_RDWR); 576 576 if (fd < 0) ··· 599 599 struct ifreq ifr = {}; 600 600 int sk, ret; 601 601 602 - strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 602 + strscpy(ifr.ifr_name, ifname); 603 603 604 604 sk = socket(PF_INET, SOCK_DGRAM, 0); 605 605 if (sk < 0)
+1
tools/testing/selftests/bpf/prog_tests/htab_update.c
··· 61 61 62 62 ASSERT_EQ(skel->bss->update_err, -EDEADLK, "no reentrancy"); 63 63 out: 64 + free(value); 64 65 htab_update__destroy(skel); 65 66 } 66 67
+2 -5
tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
··· 104 104 if (!ASSERT_GE(iter_fd, 0, "iter_create")) 105 105 goto destroy; 106 106 107 - memset(buf, 0, sizeof(buf)); 108 - while (read(iter_fd, buf, sizeof(buf)) > 0) { 109 - /* Read out all contents */ 110 - printf("%s", buf); 111 - } 107 + while (read(iter_fd, buf, sizeof(buf)) > 0) 108 + ; /* Read out all contents */ 112 109 113 110 /* Next reads should return 0 */ 114 111 ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");
+5 -7
tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
··· 456 456 { 457 457 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 458 458 struct kprobe_multi_empty *skel = NULL; 459 - char **syms = NULL; 460 - size_t cnt = 0; 459 + struct ksyms *ksyms = NULL; 461 460 462 - if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms")) 461 + if (!ASSERT_OK(bpf_get_ksyms(&ksyms, kernel), "bpf_get_ksyms")) 463 462 return; 464 463 465 464 skel = kprobe_multi_empty__open_and_load(); 466 465 if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load")) 467 466 goto cleanup; 468 467 469 - opts.syms = (const char **) syms; 470 - opts.cnt = cnt; 468 + opts.syms = (const char **)ksyms->filtered_syms; 469 + opts.cnt = ksyms->filtered_cnt; 471 470 472 471 do_bench_test(skel, &opts); 473 472 474 473 cleanup: 475 474 kprobe_multi_empty__destroy(skel); 476 - if (syms) 477 - free(syms); 475 + free_kallsyms_local(ksyms); 478 476 } 479 477 480 478 static void test_kprobe_multi_bench_attach_addr(bool kernel)
+1 -1
tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c
··· 117 117 const char *ns1 = NETNS_BASE "1"; 118 118 const char *ns6 = NETNS_BASE "6"; 119 119 struct nstoken *nstoken = NULL; 120 - const char *foobar = "foobar"; 120 + const char foobar[] = "foobar"; 121 121 ssize_t bytes; 122 122 int sfd, cfd; 123 123 char buf[7];
+218
tools/testing/selftests/bpf/prog_tests/map_kptr_race.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */ 3 + #include <test_progs.h> 4 + #include <network_helpers.h> 5 + 6 + #include "map_kptr_race.skel.h" 7 + 8 + static int get_map_id(int map_fd) 9 + { 10 + struct bpf_map_info info = {}; 11 + __u32 len = sizeof(info); 12 + 13 + if (!ASSERT_OK(bpf_map_get_info_by_fd(map_fd, &info, &len), "get_map_info")) 14 + return -1; 15 + return info.id; 16 + } 17 + 18 + static int read_refs(struct map_kptr_race *skel) 19 + { 20 + LIBBPF_OPTS(bpf_test_run_opts, opts); 21 + int ret; 22 + 23 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.count_ref), &opts); 24 + if (!ASSERT_OK(ret, "count_ref run")) 25 + return -1; 26 + if (!ASSERT_OK(opts.retval, "count_ref retval")) 27 + return -1; 28 + return skel->bss->num_of_refs; 29 + } 30 + 31 + static void test_htab_leak(void) 32 + { 33 + LIBBPF_OPTS(bpf_test_run_opts, opts, 34 + .data_in = &pkt_v4, 35 + .data_size_in = sizeof(pkt_v4), 36 + .repeat = 1, 37 + ); 38 + struct map_kptr_race *skel, *watcher; 39 + int ret, map_id; 40 + 41 + skel = map_kptr_race__open_and_load(); 42 + if (!ASSERT_OK_PTR(skel, "open_and_load")) 43 + return; 44 + 45 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_htab_leak), &opts); 46 + if (!ASSERT_OK(ret, "test_htab_leak run")) 47 + goto out_skel; 48 + if (!ASSERT_OK(opts.retval, "test_htab_leak retval")) 49 + goto out_skel; 50 + 51 + map_id = get_map_id(bpf_map__fd(skel->maps.race_hash_map)); 52 + if (!ASSERT_GE(map_id, 0, "map_id")) 53 + goto out_skel; 54 + 55 + watcher = map_kptr_race__open_and_load(); 56 + if (!ASSERT_OK_PTR(watcher, "watcher open_and_load")) 57 + goto out_skel; 58 + 59 + watcher->bss->target_map_id = map_id; 60 + watcher->links.map_put = bpf_program__attach(watcher->progs.map_put); 61 + if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry")) 62 + goto out_watcher; 63 + watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free); 64 + if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit")) 65 + goto out_watcher; 66 + 67 + map_kptr_race__destroy(skel); 68 + skel = NULL; 69 + 70 + kern_sync_rcu(); 71 + 72 + while (!READ_ONCE(watcher->bss->map_freed)) 73 + sched_yield(); 74 + 75 + ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed"); 76 + ASSERT_EQ(read_refs(watcher), 2, "htab refcount"); 77 + 78 + out_watcher: 79 + map_kptr_race__destroy(watcher); 80 + out_skel: 81 + map_kptr_race__destroy(skel); 82 + } 83 + 84 + static void test_percpu_htab_leak(void) 85 + { 86 + LIBBPF_OPTS(bpf_test_run_opts, opts, 87 + .data_in = &pkt_v4, 88 + .data_size_in = sizeof(pkt_v4), 89 + .repeat = 1, 90 + ); 91 + struct map_kptr_race *skel, *watcher; 92 + int ret, map_id; 93 + 94 + skel = map_kptr_race__open(); 95 + if (!ASSERT_OK_PTR(skel, "open")) 96 + return; 97 + 98 + skel->rodata->nr_cpus = libbpf_num_possible_cpus(); 99 + if (skel->rodata->nr_cpus > 16) 100 + skel->rodata->nr_cpus = 16; 101 + 102 + ret = map_kptr_race__load(skel); 103 + if (!ASSERT_OK(ret, "load")) 104 + goto out_skel; 105 + 106 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_percpu_htab_leak), &opts); 107 + if (!ASSERT_OK(ret, "test_percpu_htab_leak run")) 108 + goto out_skel; 109 + if (!ASSERT_OK(opts.retval, "test_percpu_htab_leak retval")) 110 + goto out_skel; 111 + 112 + map_id = get_map_id(bpf_map__fd(skel->maps.race_percpu_hash_map)); 113 + if (!ASSERT_GE(map_id, 0, "map_id")) 114 + goto out_skel; 115 + 116 + watcher = map_kptr_race__open_and_load(); 117 + if (!ASSERT_OK_PTR(watcher, "watcher open_and_load")) 118 + goto out_skel; 119 + 120 + watcher->bss->target_map_id = map_id; 121 + watcher->links.map_put = bpf_program__attach(watcher->progs.map_put); 122 + if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry")) 123 + goto out_watcher; 124 + watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free); 125 + if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit")) 126 + goto out_watcher; 127 + 128 + map_kptr_race__destroy(skel); 129 + skel = NULL; 130 + 131 + kern_sync_rcu(); 132 + 133 + while (!READ_ONCE(watcher->bss->map_freed)) 134 + sched_yield(); 135 + 136 + ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed"); 137 + ASSERT_EQ(read_refs(watcher), 2, "percpu_htab refcount"); 138 + 139 + out_watcher: 140 + map_kptr_race__destroy(watcher); 141 + out_skel: 142 + map_kptr_race__destroy(skel); 143 + } 144 + 145 + static void test_sk_ls_leak(void) 146 + { 147 + struct map_kptr_race *skel, *watcher; 148 + int listen_fd = -1, client_fd = -1, map_id; 149 + 150 + skel = map_kptr_race__open_and_load(); 151 + if (!ASSERT_OK_PTR(skel, "open_and_load")) 152 + return; 153 + 154 + if (!ASSERT_OK(map_kptr_race__attach(skel), "attach")) 155 + goto out_skel; 156 + 157 + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); 158 + if (!ASSERT_GE(listen_fd, 0, "start_server")) 159 + goto out_skel; 160 + 161 + client_fd = connect_to_fd(listen_fd, 0); 162 + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) 163 + goto out_skel; 164 + 165 + if (!ASSERT_EQ(skel->bss->sk_ls_leak_done, 1, "sk_ls_leak_done")) 166 + goto out_skel; 167 + 168 + close(client_fd); 169 + client_fd = -1; 170 + close(listen_fd); 171 + listen_fd = -1; 172 + 173 + map_id = get_map_id(bpf_map__fd(skel->maps.race_sk_ls_map)); 174 + if (!ASSERT_GE(map_id, 0, "map_id")) 175 + goto out_skel; 176 + 177 + watcher = map_kptr_race__open_and_load(); 178 + if (!ASSERT_OK_PTR(watcher, "watcher open_and_load")) 179 + goto out_skel; 180 + 181 + watcher->bss->target_map_id = map_id; 182 + watcher->links.map_put = bpf_program__attach(watcher->progs.map_put); 183 + if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry")) 184 + goto out_watcher; 185 + watcher->links.sk_map_free = bpf_program__attach(watcher->progs.sk_map_free); 186 + if (!ASSERT_OK_PTR(watcher->links.sk_map_free, "attach fexit")) 187 + goto out_watcher; 188 + 189 + map_kptr_race__destroy(skel); 190 + skel = NULL; 191 + 192 + kern_sync_rcu(); 193 + 194 + while (!READ_ONCE(watcher->bss->map_freed)) 195 + sched_yield(); 196 + 197 + ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed"); 198 + ASSERT_EQ(read_refs(watcher), 2, "sk_ls refcount"); 199 + 200 + out_watcher: 201 + map_kptr_race__destroy(watcher); 202 + out_skel: 203 + if (client_fd >= 0) 204 + close(client_fd); 205 + if (listen_fd >= 0) 206 + close(listen_fd); 207 + map_kptr_race__destroy(skel); 208 + } 209 + 210 + void serial_test_map_kptr_race(void) 211 + { 212 + if (test__start_subtest("htab_leak")) 213 + test_htab_leak(); 214 + if (test__start_subtest("percpu_htab_leak")) 215 + test_percpu_htab_leak(); 216 + if (test__start_subtest("sk_ls_leak")) 217 + test_sk_ls_leak(); 218 + }
+2 -2
tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
··· 28 28 vals[i] = rand(); 29 29 30 30 if (type == QUEUE) 31 - strncpy(file, "./test_queue_map.bpf.o", sizeof(file)); 31 + strscpy(file, "./test_queue_map.bpf.o"); 32 32 else if (type == STACK) 33 - strncpy(file, "./test_stack_map.bpf.o", sizeof(file)); 33 + strscpy(file, "./test_stack_map.bpf.o"); 34 34 else 35 35 return; 36 36
+1 -1
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
··· 2091 2091 {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}}, 2092 2092 2093 2093 {U64, U32, {0, 0x100000000}, {0, 0}}, 2094 - {U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}}, 2094 + {U64, U32, {0xfffffffe, 0x300000000}, {0x80000000, 0x80000000}}, 2095 2095 2096 2096 {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}}, 2097 2097 /* these are tricky cases where lower 32 bits allow to tighten 64
+1 -1
tools/testing/selftests/bpf/prog_tests/setget_sockopt.c
··· 212 212 if (!ASSERT_OK_PTR(skel, "open skel")) 213 213 goto done; 214 214 215 - strcpy(skel->rodata->veth, "binddevtest1"); 215 + strscpy(skel->rodata->veth, "binddevtest1"); 216 216 skel->rodata->veth_ifindex = if_nametoindex("binddevtest1"); 217 217 if (!ASSERT_GT(skel->rodata->veth_ifindex, 0, "if_nametoindex")) 218 218 goto done;
+1 -1
tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c
··· 34 34 35 35 memset(&sockaddr, 0, sizeof(sockaddr)); 36 36 sockaddr.sun_family = AF_UNIX; 37 - strncpy(sockaddr.sun_path, sock_path, strlen(sock_path)); 37 + strscpy(sockaddr.sun_path, sock_path); 38 38 sockaddr.sun_path[0] = '\0'; 39 39 40 40 err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
+14 -14
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
··· 204 204 /* Fail since bpf_link for the same prog type has been created. */ 205 205 link2 = bpf_program__attach_sockmap(prog_clone, map); 206 206 if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) { 207 - bpf_link__detach(link2); 207 + bpf_link__destroy(link2); 208 208 goto out; 209 209 } 210 210 ··· 230 230 if (!ASSERT_OK(err, "bpf_link_update")) 231 231 goto out; 232 232 out: 233 - bpf_link__detach(link); 233 + bpf_link__destroy(link); 234 234 test_skmsg_load_helpers__destroy(skel); 235 235 } 236 236 ··· 417 417 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 418 418 goto out; 419 419 420 - bpf_link__detach(link); 420 + bpf_link__destroy(link); 421 421 422 422 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0); 423 423 if (!ASSERT_OK(err, "bpf_prog_attach")) ··· 426 426 /* Fail since attaching with the same prog/map has been done. */ 427 427 link = bpf_program__attach_sockmap(prog, map); 428 428 if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap")) 429 - bpf_link__detach(link); 429 + bpf_link__destroy(link); 430 430 431 431 err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT); 432 432 if (!ASSERT_OK(err, "bpf_prog_detach2")) ··· 747 747 test_sockmap_skb_verdict_peek_helper(map); 748 748 ASSERT_EQ(pass->bss->clone_called, 1, "clone_called"); 749 749 out: 750 - bpf_link__detach(link); 750 + bpf_link__destroy(link); 751 751 test_sockmap_pass_prog__destroy(pass); 752 752 } 753 753 754 754 static void test_sockmap_unconnected_unix(void) 755 755 { 756 - int err, map, stream = 0, dgram = 0, zero = 0; 756 + int err, map, stream = -1, dgram = -1, zero = 0; 757 757 struct test_sockmap_pass_prog *skel; 758 758 759 759 skel = test_sockmap_pass_prog__open_and_load(); ··· 764 764 765 765 stream = xsocket(AF_UNIX, SOCK_STREAM, 0); 766 766 if (stream < 0) 767 - return; 767 + goto out; 768 768 769 769 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 770 - if (dgram < 0) { 771 - close(stream); 772 - return; 773 - } 770 + if (dgram < 0) 771 + goto out; 774 772 775 773 err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY); 776 - ASSERT_ERR(err, "bpf_map_update_elem(stream)"); 774 + if (!ASSERT_ERR(err, "bpf_map_update_elem(stream)")) 775 + goto out; 777 776 778 777 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY); 779 778 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 780 - 779 + out: 781 780 close(stream); 782 781 close(dgram); 782 + test_sockmap_pass_prog__destroy(skel); 783 783 } 784 784 785 785 static void test_sockmap_many_socket(void) ··· 1027 1027 if (xrecv_nonblock(conn, &buf, 1, 0) != 1) 1028 1028 FAIL("xrecv_nonblock"); 1029 1029 detach: 1030 - bpf_link__detach(link); 1030 + bpf_link__destroy(link); 1031 1031 close: 1032 1032 xclose(conn); 1033 1033 xclose(peer);
+1 -1
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
··· 899 899 900 900 redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS); 901 901 902 - bpf_link__detach(link); 902 + bpf_link__destroy(link); 903 903 } 904 904 905 905 static void redir_partial(int family, int sotype, int sock_map, int parser_map)
+1 -1
tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
··· 142 142 143 143 /* TCP_CONGESTION can extend the string */ 144 144 145 - strcpy(buf.cc, "nv"); 145 + strscpy(buf.cc, "nv"); 146 146 err = setsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, strlen("nv")); 147 147 if (err) { 148 148 log_err("Failed to call setsockopt(TCP_CONGESTION)");
+1 -3
tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c
··· 54 54 } 55 55 56 56 err = struct_ops_private_stack_fail__load(skel); 57 - if (!ASSERT_ERR(err, "struct_ops_private_stack_fail__load")) 58 - goto cleanup; 59 - return; 57 + ASSERT_ERR(err, "struct_ops_private_stack_fail__load"); 60 58 61 59 cleanup: 62 60 struct_ops_private_stack_fail__destroy(skel);
+1 -1
tools/testing/selftests/bpf/prog_tests/task_local_data.h
··· 262 262 if (!atomic_compare_exchange_strong(&tld_meta_p->cnt, &cnt, cnt + 1)) 263 263 goto retry; 264 264 265 - strncpy(tld_meta_p->metadata[i].name, name, TLD_NAME_LEN); 265 + strscpy(tld_meta_p->metadata[i].name, name); 266 266 atomic_store(&tld_meta_p->metadata[i].size, size); 267 267 return (tld_key_t){(__s16)off}; 268 268 }
+2 -4
tools/testing/selftests/bpf/prog_tests/tc_opts.c
··· 1360 1360 1361 1361 assert_mprog_count_ifindex(ifindex, target, 4); 1362 1362 1363 - ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1364 - ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1365 - ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1366 - return; 1363 + goto cleanup; 1364 + 1367 1365 cleanup3: 1368 1366 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 1369 1367 ASSERT_OK(err, "prog_detach");
+1 -1
tools/testing/selftests/bpf/prog_tests/tc_redirect.c
··· 1095 1095 1096 1096 ifr.ifr_flags = IFF_TUN | IFF_NO_PI; 1097 1097 if (*name) 1098 - strncpy(ifr.ifr_name, name, IFNAMSIZ); 1098 + strscpy(ifr.ifr_name, name); 1099 1099 1100 1100 err = ioctl(fd, TUNSETIFF, &ifr); 1101 1101 if (!ASSERT_OK(err, "ioctl TUNSETIFF"))
+3
tools/testing/selftests/bpf/prog_tests/test_sysctl.c
··· 27 27 OP_EPERM, 28 28 SUCCESS, 29 29 } result; 30 + struct bpf_object *obj; 30 31 }; 31 32 32 33 static struct sysctl_test tests[] = { ··· 1472 1471 return -1; 1473 1472 } 1474 1473 1474 + test->obj = obj; 1475 1475 return prog_fd; 1476 1476 } 1477 1477 ··· 1575 1573 /* Detaching w/o checking return code: best effort attempt. */ 1576 1574 if (progfd != -1) 1577 1575 bpf_prog_detach(cgfd, atype); 1576 + bpf_object__close(test->obj); 1578 1577 close(progfd); 1579 1578 printf("[%s]\n", err ? "FAIL" : "PASS"); 1580 1579 return err;
+4 -1
tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
··· 699 699 return; 700 700 701 701 if (!ASSERT_OK(setup(), "global setup")) 702 - return; 702 + goto out; 703 703 704 704 for (i = 0; i < ARRAY_SIZE(subtests_cfg); i++) { 705 705 cfg = &subtests_cfg[i]; ··· 711 711 subtest_cleanup(cfg); 712 712 } 713 713 cleanup(); 714 + 715 + out: 716 + test_tc_tunnel__destroy(skel); 714 717 }
+2 -2
tools/testing/selftests/bpf/prog_tests/test_veristat.c
··· 24 24 25 25 /* for no_alu32 and cpuv4 veristat is in parent folder */ 26 26 if (access("./veristat", F_OK) == 0) 27 - strcpy(fix->veristat, "./veristat"); 27 + strscpy(fix->veristat, "./veristat"); 28 28 else if (access("../veristat", F_OK) == 0) 29 - strcpy(fix->veristat, "../veristat"); 29 + strscpy(fix->veristat, "../veristat"); 30 30 else 31 31 PRINT_FAIL("Can't find veristat binary"); 32 32
+20 -4
tools/testing/selftests/bpf/prog_tests/test_xsk.c
··· 2003 2003 2004 2004 int testapp_stats_rx_full(struct test_spec *test) 2005 2005 { 2006 - if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE)) 2006 + struct pkt_stream *tmp; 2007 + 2008 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 2009 + if (!tmp) 2007 2010 return TEST_FAILURE; 2008 - test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2011 + test->ifobj_tx->xsk->pkt_stream = tmp; 2012 + 2013 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2014 + if (!tmp) 2015 + return TEST_FAILURE; 2016 + test->ifobj_rx->xsk->pkt_stream = tmp; 2009 2017 2010 2018 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; 2011 2019 test->ifobj_rx->release_rx = false; ··· 2023 2015 2024 2016 int testapp_stats_fill_empty(struct test_spec *test) 2025 2017 { 2026 - if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE)) 2018 + struct pkt_stream *tmp; 2019 + 2020 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 2021 + if (!tmp) 2027 2022 return TEST_FAILURE; 2028 - test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2023 + test->ifobj_tx->xsk->pkt_stream = tmp; 2024 + 2025 + tmp = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2026 + if (!tmp) 2027 + return TEST_FAILURE; 2028 + test->ifobj_rx->xsk->pkt_stream = tmp; 2029 2029 2030 2030 test->ifobj_rx->use_fill_ring = false; 2031 2031 test->ifobj_rx->validation_func = validate_fill_empty;
+5 -1
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
··· 62 62 return; 63 63 close(child->go[1]); 64 64 close(child->go[0]); 65 - if (child->thread) 65 + if (child->thread) { 66 66 pthread_join(child->thread, NULL); 67 + child->thread = 0; 68 + } 67 69 close(child->c2p[0]); 68 70 close(child->c2p[1]); 69 71 if (child->pid > 0) ··· 332 330 test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) 333 331 { 334 332 static struct child child; 333 + 334 + memset(&child, 0, sizeof(child)); 335 335 336 336 /* no pid filter */ 337 337 __test_attach_api(binary, pattern, opts, NULL);
+1 -1
tools/testing/selftests/bpf/prog_tests/verifier_log.c
··· 47 47 static void verif_log_subtest(const char *name, bool expect_load_error, int log_level) 48 48 { 49 49 LIBBPF_OPTS(bpf_prog_load_opts, opts); 50 - char *exp_log, prog_name[16], op_name[32]; 50 + char *exp_log, prog_name[24], op_name[32]; 51 51 struct test_log_buf *skel; 52 52 struct bpf_program *prog; 53 53 size_t fixed_log_sz;
+2 -1
tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
··· 67 67 struct nstoken *tok = NULL; 68 68 int iifindex, stats_fd; 69 69 __u32 value, key = 0; 70 - struct bpf_link *link; 70 + struct bpf_link *link = NULL; 71 71 72 72 if (SYS_NOFAIL("nft -v")) { 73 73 fprintf(stdout, "Missing required nft tool\n"); ··· 160 160 161 161 ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed"); 162 162 out: 163 + bpf_link__destroy(link); 163 164 xdp_flowtable__destroy(skel); 164 165 if (tok) 165 166 close_netns(tok);
+2 -2
tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
··· 126 126 127 127 static void close_xsk(struct xsk *xsk) 128 128 { 129 - if (xsk->umem) 130 - xsk_umem__delete(xsk->umem); 131 129 if (xsk->socket) 132 130 xsk_socket__delete(xsk->socket); 131 + if (xsk->umem) 132 + xsk_umem__delete(xsk->umem); 133 133 munmap(xsk->umem_area, UMEM_SIZE); 134 134 } 135 135
+1 -1
tools/testing/selftests/bpf/progs/dmabuf_iter.c
··· 48 48 49 49 /* Buffers are not required to be named */ 50 50 if (pname) { 51 - if (bpf_probe_read_kernel(name, sizeof(name), pname)) 51 + if (bpf_probe_read_kernel_str(name, sizeof(name), pname) < 0) 52 52 return 1; 53 53 54 54 /* Name strings can be provided by userspace */
+197
tools/testing/selftests/bpf/progs/map_kptr_race.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */ 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include "../test_kmods/bpf_testmod_kfunc.h" 7 + 8 + struct map_value { 9 + struct prog_test_ref_kfunc __kptr *ref_ptr; 10 + }; 11 + 12 + struct { 13 + __uint(type, BPF_MAP_TYPE_HASH); 14 + __uint(map_flags, BPF_F_NO_PREALLOC); 15 + __type(key, int); 16 + __type(value, struct map_value); 17 + __uint(max_entries, 1); 18 + } race_hash_map SEC(".maps"); 19 + 20 + struct { 21 + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 22 + __uint(map_flags, BPF_F_NO_PREALLOC); 23 + __type(key, int); 24 + __type(value, struct map_value); 25 + __uint(max_entries, 1); 26 + } race_percpu_hash_map SEC(".maps"); 27 + 28 + struct { 29 + __uint(type, BPF_MAP_TYPE_SK_STORAGE); 30 + __uint(map_flags, BPF_F_NO_PREALLOC); 31 + __type(key, int); 32 + __type(value, struct map_value); 33 + } race_sk_ls_map SEC(".maps"); 34 + 35 + int num_of_refs; 36 + int sk_ls_leak_done; 37 + int target_map_id; 38 + int map_freed; 39 + const volatile int nr_cpus; 40 + 41 + SEC("tc") 42 + int test_htab_leak(struct __sk_buff *skb) 43 + { 44 + struct prog_test_ref_kfunc *p, *old; 45 + struct map_value val = {}; 46 + struct map_value *v; 47 + int key = 0; 48 + 49 + if (bpf_map_update_elem(&race_hash_map, &key, &val, BPF_ANY)) 50 + return 1; 51 + 52 + v = bpf_map_lookup_elem(&race_hash_map, &key); 53 + if (!v) 54 + return 2; 55 + 56 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 57 + if (!p) 58 + return 3; 59 + old = bpf_kptr_xchg(&v->ref_ptr, p); 60 + if (old) 61 + bpf_kfunc_call_test_release(old); 62 + 63 + bpf_map_delete_elem(&race_hash_map, &key); 64 + 65 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 66 + if (!p) 67 + return 4; 68 + old = bpf_kptr_xchg(&v->ref_ptr, p); 69 + if (old) 70 + bpf_kfunc_call_test_release(old); 71 + 72 + return 0; 73 + } 74 + 75 + static int fill_percpu_kptr(struct map_value *v) 76 + { 77 + struct prog_test_ref_kfunc *p, *old; 78 + 79 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 80 + if (!p) 81 + return 1; 82 + old = bpf_kptr_xchg(&v->ref_ptr, p); 83 + if (old) 84 + bpf_kfunc_call_test_release(old); 85 + return 0; 86 + } 87 + 88 + SEC("tc") 89 + int test_percpu_htab_leak(struct __sk_buff *skb) 90 + { 91 + struct map_value *v, *arr[16] = {}; 92 + struct map_value val = {}; 93 + int key = 0; 94 + int err = 0; 95 + 96 + if (bpf_map_update_elem(&race_percpu_hash_map, &key, &val, BPF_ANY)) 97 + return 1; 98 + 99 + for (int i = 0; i < nr_cpus; i++) { 100 + v = bpf_map_lookup_percpu_elem(&race_percpu_hash_map, &key, i); 101 + if (!v) 102 + return 2; 103 + arr[i] = v; 104 + } 105 + 106 + bpf_map_delete_elem(&race_percpu_hash_map, &key); 107 + 108 + for (int i = 0; i < nr_cpus; i++) { 109 + v = arr[i]; 110 + err = fill_percpu_kptr(v); 111 + if (err) 112 + return 3; 113 + } 114 + 115 + return 0; 116 + } 117 + 118 + SEC("tp_btf/inet_sock_set_state") 119 + int BPF_PROG(test_sk_ls_leak, struct sock *sk, int oldstate, int newstate) 120 + { 121 + struct prog_test_ref_kfunc *p, *old; 122 + struct map_value *v; 123 + 124 + if (newstate != BPF_TCP_SYN_SENT) 125 + return 0; 126 + 127 + if (sk_ls_leak_done) 128 + return 0; 129 + 130 + v = bpf_sk_storage_get(&race_sk_ls_map, sk, NULL, 131 + BPF_SK_STORAGE_GET_F_CREATE); 132 + if (!v) 133 + return 0; 134 + 135 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 136 + if (!p) 137 + return 0; 138 + old = bpf_kptr_xchg(&v->ref_ptr, p); 139 + if (old) 140 + bpf_kfunc_call_test_release(old); 141 + 142 + bpf_sk_storage_delete(&race_sk_ls_map, sk); 143 + 144 + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 145 + if (!p) 146 + return 0; 147 + old = bpf_kptr_xchg(&v->ref_ptr, p); 148 + if (old) 149 + bpf_kfunc_call_test_release(old); 150 + 151 + sk_ls_leak_done = 1; 152 + return 0; 153 + } 154 + 155 + long target_map_ptr; 156 + 157 + SEC("fentry/bpf_map_put") 158 + int BPF_PROG(map_put, struct bpf_map *map) 159 + { 160 + if (target_map_id && map->id == (u32)target_map_id) 161 + target_map_ptr = (long)map; 162 + return 0; 163 + } 164 + 165 + SEC("fexit/htab_map_free") 166 + int BPF_PROG(htab_map_free, struct bpf_map *map) 167 + { 168 + if (target_map_ptr && (long)map == target_map_ptr) 169 + map_freed = 1; 170 + return 0; 171 + } 172 + 173 + SEC("fexit/bpf_sk_storage_map_free") 174 + int BPF_PROG(sk_map_free, struct bpf_map *map) 175 + { 176 + if (target_map_ptr && (long)map == target_map_ptr) 177 + map_freed = 1; 178 + return 0; 179 + } 180 + 181 + SEC("syscall") 182 + int count_ref(void *ctx) 183 + { 184 + struct prog_test_ref_kfunc *p; 185 + unsigned long arg = 0; 186 + 187 + p = bpf_kfunc_call_test_acquire(&arg); 188 + if (!p) 189 + return 1; 190 + 191 + num_of_refs = p->cnt.refs.counter; 192 + 193 + bpf_kfunc_call_test_release(p); 194 + return 0; 195 + } 196 + 197 + char _license[] SEC("license") = "GPL";
+137
tools/testing/selftests/bpf/progs/verifier_bounds.c
··· 1863 1863 : __clobber_all); 1864 1864 } 1865 1865 1866 + /* This test covers the bounds deduction when the u64 range and the tnum 1867 + * overlap only at umax. After instruction 3, the ranges look as follows: 1868 + * 1869 + * 0 umin=0xe01 umax=0xf00 U64_MAX 1870 + * | [xxxxxxxxxxxxxx] | 1871 + * |----------------------------|------------------------------| 1872 + * | x x | tnum values 1873 + * 1874 + * The verifier can therefore deduce that the R0=0xf0=240. 1875 + */ 1876 + SEC("socket") 1877 + __description("bounds refinement with single-value tnum on umax") 1878 + __msg("3: (15) if r0 == 0xe0 {{.*}} R0=240") 1879 + __success __log_level(2) 1880 + __flag(BPF_F_TEST_REG_INVARIANTS) 1881 + __naked void bounds_refinement_tnum_umax(void *ctx) 1882 + { 1883 + asm volatile(" \ 1884 + call %[bpf_get_prandom_u32]; \ 1885 + r0 |= 0xe0; \ 1886 + r0 &= 0xf0; \ 1887 + if r0 == 0xe0 goto +2; \ 1888 + if r0 == 0xf0 goto +1; \ 1889 + r10 = 0; \ 1890 + exit; \ 1891 + " : 1892 + : __imm(bpf_get_prandom_u32) 1893 + : __clobber_all); 1894 + } 1895 + 1896 + /* This test covers the bounds deduction when the u64 range and the tnum 1897 + * overlap only at umin. After instruction 3, the ranges look as follows: 1898 + * 1899 + * 0 umin=0xe00 umax=0xeff U64_MAX 1900 + * | [xxxxxxxxxxxxxx] | 1901 + * |----------------------------|------------------------------| 1902 + * | x x | tnum values 1903 + * 1904 + * The verifier can therefore deduce that the R0=0xe0=224. 1905 + */ 1906 + SEC("socket") 1907 + __description("bounds refinement with single-value tnum on umin") 1908 + __msg("3: (15) if r0 == 0xf0 {{.*}} R0=224") 1909 + __success __log_level(2) 1910 + __flag(BPF_F_TEST_REG_INVARIANTS) 1911 + __naked void bounds_refinement_tnum_umin(void *ctx) 1912 + { 1913 + asm volatile(" \ 1914 + call %[bpf_get_prandom_u32]; \ 1915 + r0 |= 0xe0; \ 1916 + r0 &= 0xf0; \ 1917 + if r0 == 0xf0 goto +2; \ 1918 + if r0 == 0xe0 goto +1; \ 1919 + r10 = 0; \ 1920 + exit; \ 1921 + " : 1922 + : __imm(bpf_get_prandom_u32) 1923 + : __clobber_all); 1924 + } 1925 + 1926 + /* This test covers the bounds deduction when the only possible tnum value is 1927 + * in the middle of the u64 range. After instruction 3, the ranges look as 1928 + * follows: 1929 + * 1930 + * 0 umin=0x7cf umax=0x7df U64_MAX 1931 + * | [xxxxxxxxxxxx] | 1932 + * |----------------------------|------------------------------| 1933 + * | x x x x x | tnum values 1934 + * | +--- 0x7e0 1935 + * +--- 0x7d0 1936 + * 1937 + * Since the lower four bits are zero, the tnum and the u64 range only overlap 1938 + * in R0=0x7d0=2000. Instruction 5 is therefore dead code. 1939 + */ 1940 + SEC("socket") 1941 + __description("bounds refinement with single-value tnum in middle of range") 1942 + __msg("3: (a5) if r0 < 0x7cf {{.*}} R0=2000") 1943 + __success __log_level(2) 1944 + __naked void bounds_refinement_tnum_middle(void *ctx) 1945 + { 1946 + asm volatile(" \ 1947 + call %[bpf_get_prandom_u32]; \ 1948 + if r0 & 0x0f goto +4; \ 1949 + if r0 > 0x7df goto +3; \ 1950 + if r0 < 0x7cf goto +2; \ 1951 + if r0 == 0x7d0 goto +1; \ 1952 + r10 = 0; \ 1953 + exit; \ 1954 + " : 1955 + : __imm(bpf_get_prandom_u32) 1956 + : __clobber_all); 1957 + } 1958 + 1959 + /* This test cover the negative case for the tnum/u64 overlap. Since 1960 + * they contain the same two values (i.e., {0, 1}), we can't deduce 1961 + * anything more. 1962 + */ 1963 + SEC("socket") 1964 + __description("bounds refinement: several overlaps between tnum and u64") 1965 + __msg("2: (25) if r0 > 0x1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=1,var_off=(0x0; 0x1))") 1966 + __failure __log_level(2) 1967 + __naked void bounds_refinement_several_overlaps(void *ctx) 1968 + { 1969 + asm volatile(" \ 1970 + call %[bpf_get_prandom_u32]; \ 1971 + if r0 < 0 goto +3; \ 1972 + if r0 > 1 goto +2; \ 1973 + if r0 == 1 goto +1; \ 1974 + r10 = 0; \ 1975 + exit; \ 1976 + " : 1977 + : __imm(bpf_get_prandom_u32) 1978 + : __clobber_all); 1979 + } 1980 + 1981 + /* This test cover the negative case for the tnum/u64 overlap. Since 1982 + * they overlap in the two values contained by the u64 range (i.e., 1983 + * {0xf, 0x10}), we can't deduce anything more. 1984 + */ 1985 + SEC("socket") 1986 + __description("bounds refinement: multiple overlaps between tnum and u64") 1987 + __msg("2: (25) if r0 > 0x10 {{.*}} R0=scalar(smin=umin=smin32=umin32=15,smax=umax=smax32=umax32=16,var_off=(0x0; 0x1f))") 1988 + __failure __log_level(2) 1989 + __naked void bounds_refinement_multiple_overlaps(void *ctx) 1990 + { 1991 + asm volatile(" \ 1992 + call %[bpf_get_prandom_u32]; \ 1993 + if r0 < 0xf goto +3; \ 1994 + if r0 > 0x10 goto +2; \ 1995 + if r0 == 0x10 goto +1; \ 1996 + r10 = 0; \ 1997 + exit; \ 1998 + " : 1999 + : __imm(bpf_get_prandom_u32) 2000 + : __clobber_all); 2001 + } 2002 + 1866 2003 char _license[] SEC("license") = "GPL";
+27 -11
tools/testing/selftests/bpf/test_progs.c
··· 1261 1261 return ret; 1262 1262 } 1263 1263 1264 - #define MAX_BACKTRACE_SZ 128 1265 - void crash_handler(int signum) 1264 + static void dump_crash_log(void) 1266 1265 { 1267 - void *bt[MAX_BACKTRACE_SZ]; 1268 - size_t sz; 1269 - 1270 - sz = backtrace(bt, ARRAY_SIZE(bt)); 1271 - 1272 1266 fflush(stdout); 1273 1267 stdout = env.stdout_saved; 1274 1268 stderr = env.stderr_saved; ··· 1271 1277 env.test_state->error_cnt++; 1272 1278 dump_test_log(env.test, env.test_state, true, false, NULL); 1273 1279 } 1280 + } 1281 + 1282 + #define MAX_BACKTRACE_SZ 128 1283 + 1284 + void crash_handler(int signum) 1285 + { 1286 + void *bt[MAX_BACKTRACE_SZ]; 1287 + size_t sz; 1288 + 1289 + sz = backtrace(bt, ARRAY_SIZE(bt)); 1290 + 1291 + dump_crash_log(); 1292 + 1274 1293 if (env.worker_id != -1) 1275 1294 fprintf(stderr, "[%d]: ", env.worker_id); 1276 1295 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); 1277 1296 backtrace_symbols_fd(bt, sz, STDERR_FILENO); 1278 1297 } 1298 + 1299 + #ifdef __SANITIZE_ADDRESS__ 1300 + void __asan_on_error(void) 1301 + { 1302 + dump_crash_log(); 1303 + } 1304 + #endif 1279 1305 1280 1306 void hexdump(const char *prefix, const void *buf, size_t len) 1281 1307 { ··· 1813 1799 1814 1800 msg.subtest_done.num = i; 1815 1801 1816 - strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME); 1802 + strscpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME); 1817 1803 1818 1804 msg.subtest_done.error_cnt = subtest_state->error_cnt; 1819 1805 msg.subtest_done.skipped = subtest_state->skipped; ··· 1958 1944 .parser = parse_arg, 1959 1945 .doc = argp_program_doc, 1960 1946 }; 1947 + int err, i; 1948 + 1949 + #ifndef __SANITIZE_ADDRESS__ 1961 1950 struct sigaction sigact = { 1962 1951 .sa_handler = crash_handler, 1963 1952 .sa_flags = SA_RESETHAND, 1964 - }; 1965 - int err, i; 1966 - 1953 + }; 1967 1954 sigaction(SIGSEGV, &sigact, NULL); 1955 + #endif 1968 1956 1969 1957 env.stdout_saved = stdout; 1970 1958 env.stderr_saved = stderr;
+1 -1
tools/testing/selftests/bpf/test_verifier.c
··· 1320 1320 printf("FAIL\nTestcase bug\n"); 1321 1321 return false; 1322 1322 } 1323 - strncpy(needle, exp, len); 1323 + memcpy(needle, exp, len); 1324 1324 needle[len] = 0; 1325 1325 q = strstr(log, needle); 1326 1326 if (!q) {
+1
tools/testing/selftests/bpf/testing_helpers.c
··· 212 212 break; 213 213 } 214 214 215 + free(buf); 215 216 fclose(f); 216 217 return err; 217 218 }
+12 -11
tools/testing/selftests/bpf/trace_helpers.c
··· 24 24 #define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe" 25 25 #define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe" 26 26 27 - struct ksyms { 28 - struct ksym *syms; 29 - size_t sym_cap; 30 - size_t sym_cnt; 31 - }; 32 - 33 27 static struct ksyms *ksyms; 34 28 static pthread_mutex_t ksyms_mutex = PTHREAD_MUTEX_INITIALIZER; 35 29 ··· 47 53 48 54 if (!ksyms) 49 55 return; 56 + 57 + free(ksyms->filtered_syms); 50 58 51 59 if (!ksyms->syms) { 52 60 free(ksyms); ··· 606 610 return compare_name(p1, p2->name); 607 611 } 608 612 609 - int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel) 613 + int bpf_get_ksyms(struct ksyms **ksymsp, bool kernel) 610 614 { 611 615 size_t cap = 0, cnt = 0; 612 616 char *name = NULL, *ksym_name, **syms = NULL; ··· 633 637 else 634 638 f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r"); 635 639 636 - if (!f) 640 + if (!f) { 641 + free_kallsyms_local(ksyms); 637 642 return -EINVAL; 643 + } 638 644 639 645 map = hashmap__new(symbol_hash, symbol_equal, NULL); 640 646 if (IS_ERR(map)) { ··· 677 679 syms[cnt++] = ksym_name; 678 680 } 679 681 680 - *symsp = syms; 681 - *cntp = cnt; 682 + ksyms->filtered_syms = syms; 683 + ksyms->filtered_cnt = cnt; 684 + *ksymsp = ksyms; 682 685 683 686 error: 684 687 free(name); 685 688 fclose(f); 686 689 hashmap__free(map); 687 - if (err) 690 + if (err) { 688 691 free(syms); 692 + free_kallsyms_local(ksyms); 693 + } 689 694 return err; 690 695 } 691 696
+9 -2
tools/testing/selftests/bpf/trace_helpers.h
··· 23 23 long addr; 24 24 char *name; 25 25 }; 26 - struct ksyms; 26 + 27 + struct ksyms { 28 + struct ksym *syms; 29 + size_t sym_cap; 30 + size_t sym_cnt; 31 + char **filtered_syms; 32 + size_t filtered_cnt; 33 + }; 27 34 28 35 typedef int (*ksym_cmp_t)(const void *p1, const void *p2); 29 36 typedef int (*ksym_search_cmp_t)(const void *p1, const struct ksym *p2); ··· 60 53 61 54 int read_build_id(const char *path, char *build_id, size_t size); 62 55 63 - int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel); 56 + int bpf_get_ksyms(struct ksyms **ksymsp, bool kernel); 64 57 int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel); 65 58 66 59 #endif
+2
tools/testing/selftests/bpf/veristat.c
··· 3378 3378 } 3379 3379 } 3380 3380 free(env.presets[i].atoms); 3381 + if (env.presets[i].value.type == ENUMERATOR) 3382 + free(env.presets[i].value.svalue); 3381 3383 } 3382 3384 free(env.presets); 3383 3385 return -err;
+2 -1
tools/testing/selftests/bpf/xdp_features.c
··· 16 16 17 17 #include <network_helpers.h> 18 18 19 + #include "bpf_util.h" 19 20 #include "xdp_features.skel.h" 20 21 #include "xdp_features.h" 21 22 ··· 213 212 env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT; 214 213 env.feature.action = -EINVAL; 215 214 env.ifindex = -ENODEV; 216 - strcpy(env.ifname, "unknown"); 215 + strscpy(env.ifname, "unknown"); 217 216 make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_CTRL_PORT, 218 217 &env.dut_ctrl_addr, NULL); 219 218 make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_ECHO_PORT,
+2 -2
tools/testing/selftests/bpf/xdp_hw_metadata.c
··· 550 550 struct ifreq ifr = { 551 551 .ifr_data = (void *)&ch, 552 552 }; 553 - strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1); 553 + strscpy(ifr.ifr_name, ifname); 554 554 int fd, ret; 555 555 556 556 fd = socket(AF_UNIX, SOCK_DGRAM, 0); ··· 571 571 struct ifreq ifr = { 572 572 .ifr_data = (void *)cfg, 573 573 }; 574 - strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1); 574 + strscpy(ifr.ifr_name, ifname); 575 575 int fd, ret; 576 576 577 577 fd = socket(AF_UNIX, SOCK_DGRAM, 0);