Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

kcov: Enable context analysis

Enable context analysis for the KCOV subsystem.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-30-elver@google.com

authored by

Marco Elver and committed by
Peter Zijlstra
6556fde2 0f5d7648

+27 -11
+2
kernel/Makefile
··· 43 43 KCSAN_SANITIZE_kcov.o := n 44 44 UBSAN_SANITIZE_kcov.o := n 45 45 KMSAN_SANITIZE_kcov.o := n 46 + 47 + CONTEXT_ANALYSIS_kcov.o := y 46 48 CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector 47 49 48 50 obj-y += sched/
+25 -11
kernel/kcov.c
··· 55 55 refcount_t refcount; 56 56 /* The lock protects mode, size, area and t. */ 57 57 spinlock_t lock; 58 - enum kcov_mode mode; 58 + enum kcov_mode mode __guarded_by(&lock); 59 59 /* Size of arena (in long's). */ 60 - unsigned int size; 60 + unsigned int size __guarded_by(&lock); 61 61 /* Coverage buffer shared with user space. */ 62 - void *area; 62 + void *area __guarded_by(&lock); 63 63 /* Task for which we collect coverage, or NULL. */ 64 - struct task_struct *t; 64 + struct task_struct *t __guarded_by(&lock); 65 65 /* Collecting coverage from remote (background) threads. */ 66 66 bool remote; 67 67 /* Size of remote area (in long's). */ ··· 391 391 } 392 392 393 393 static void kcov_reset(struct kcov *kcov) 394 + __must_hold(&kcov->lock) 394 395 { 395 396 kcov->t = NULL; 396 397 kcov->mode = KCOV_MODE_INIT; ··· 401 400 } 402 401 403 402 static void kcov_remote_reset(struct kcov *kcov) 403 + __must_hold(&kcov->lock) 404 404 { 405 405 int bkt; 406 406 struct kcov_remote *remote; ··· 421 419 } 422 420 423 421 static void kcov_disable(struct task_struct *t, struct kcov *kcov) 422 + __must_hold(&kcov->lock) 424 423 { 425 424 kcov_task_reset(t); 426 425 if (kcov->remote) ··· 438 435 static void kcov_put(struct kcov *kcov) 439 436 { 440 437 if (refcount_dec_and_test(&kcov->refcount)) { 441 - kcov_remote_reset(kcov); 442 - vfree(kcov->area); 438 + /* Context-safety: no references left, object being destroyed. */ 439 + context_unsafe( 440 + kcov_remote_reset(kcov); 441 + vfree(kcov->area); 442 + ); 443 443 kfree(kcov); 444 444 } 445 445 } ··· 497 491 unsigned long size, off; 498 492 struct page *page; 499 493 unsigned long flags; 494 + void *area; 500 495 501 496 spin_lock_irqsave(&kcov->lock, flags); 502 497 size = kcov->size * sizeof(unsigned long); ··· 506 499 res = -EINVAL; 507 500 goto exit; 508 501 } 502 + area = kcov->area; 509 503 spin_unlock_irqrestore(&kcov->lock, flags); 510 504 vm_flags_set(vma, VM_DONTEXPAND); 511 505 for (off = 0; off < size; off += PAGE_SIZE) { 512 - page = vmalloc_to_page(kcov->area + off); 506 + page = vmalloc_to_page(area + off); 513 507 res = vm_insert_page(vma, vma->vm_start + off, page); 514 508 if (res) { 515 509 pr_warn_once("kcov: vm_insert_page() failed\n"); ··· 530 522 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 531 523 if (!kcov) 532 524 return -ENOMEM; 525 + spin_lock_init(&kcov->lock); 533 526 kcov->mode = KCOV_MODE_DISABLED; 534 527 kcov->sequence = 1; 535 528 refcount_set(&kcov->refcount, 1); 536 - spin_lock_init(&kcov->lock); 537 529 filep->private_data = kcov; 538 530 return nonseekable_open(inode, filep); 539 531 } ··· 564 556 * vmalloc fault handling path is instrumented. 565 557 */ 566 558 static void kcov_fault_in_area(struct kcov *kcov) 559 + __must_hold(&kcov->lock) 567 560 { 568 561 unsigned long stride = PAGE_SIZE / sizeof(unsigned long); 569 562 unsigned long *area = kcov->area; ··· 593 584 594 585 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 595 586 unsigned long arg) 587 + __must_hold(&kcov->lock) 596 588 { 597 589 struct task_struct *t; 598 590 unsigned long flags, unused; ··· 824 814 } 825 815 826 816 static void kcov_remote_softirq_start(struct task_struct *t) 817 + __must_hold(&kcov_percpu_data.lock) 827 818 { 828 819 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 829 820 unsigned int mode; ··· 842 831 } 843 832 844 833 static void kcov_remote_softirq_stop(struct task_struct *t) 834 + __must_hold(&kcov_percpu_data.lock) 845 835 { 846 836 struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); 847 837 ··· 908 896 /* Put in kcov_remote_stop(). */ 909 897 kcov_get(kcov); 910 898 /* 911 - * Read kcov fields before unlock to prevent races with 912 - * KCOV_DISABLE / kcov_remote_reset(). 899 + * Read kcov fields before unlocking kcov_remote_lock to prevent races 900 + * with KCOV_DISABLE and kcov_remote_reset(); cannot acquire kcov->lock 901 + * here, because it might lead to deadlock given kcov_remote_lock is 902 + * acquired _after_ kcov->lock elsewhere. 913 903 */ 914 - mode = kcov->mode; 904 + mode = context_unsafe(kcov->mode); 915 905 sequence = kcov->sequence; 916 906 if (in_task()) { 917 907 size = kcov->remote_size;