Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fix from Ingo Molnar:
"A module unload lockdep race fix"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep: Fix the module unload key range freeing logic

+59 -30
+55 -26
kernel/locking/lockdep.c
··· 633 633 if (!new_class->name) 634 634 return 0; 635 635 636 - list_for_each_entry(class, &all_lock_classes, lock_entry) { 636 + list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { 637 637 if (new_class->key - new_class->subclass == class->key) 638 638 return class->name_version; 639 639 if (class->name && !strcmp(class->name, new_class->name)) ··· 700 700 hash_head = classhashentry(key); 701 701 702 702 /* 703 - * We can walk the hash lockfree, because the hash only 704 - * grows, and we are careful when adding entries to the end: 703 + * We do an RCU walk of the hash, see lockdep_free_key_range(). 705 704 */ 706 - list_for_each_entry(class, hash_head, hash_entry) { 705 + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 706 + return NULL; 707 + 708 + list_for_each_entry_rcu(class, hash_head, hash_entry) { 707 709 if (class->key == key) { 708 710 /* 709 711 * Huh! same key, different name? Did someone trample ··· 730 728 struct lockdep_subclass_key *key; 731 729 struct list_head *hash_head; 732 730 struct lock_class *class; 733 - unsigned long flags; 731 + 732 + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 734 733 735 734 class = look_up_lock_class(lock, subclass); 736 735 if (likely(class)) ··· 753 750 key = lock->key->subkeys + subclass; 754 751 hash_head = classhashentry(key); 755 752 756 - raw_local_irq_save(flags); 757 753 if (!graph_lock()) { 758 - raw_local_irq_restore(flags); 759 754 return NULL; 760 755 } 761 756 /* 762 757 * We have to do the hash-walk again, to avoid races 763 758 * with another CPU: 764 759 */ 765 - list_for_each_entry(class, hash_head, hash_entry) 760 + list_for_each_entry_rcu(class, hash_head, hash_entry) { 766 761 if (class->key == key) 767 762 goto out_unlock_set; 763 + } 764 + 768 765 /* 769 766 * Allocate a new key from the static array, and add it to 770 767 * the hash: 771 768 */ 772 769 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 773 770 if (!debug_locks_off_graph_unlock()) { 774 - raw_local_irq_restore(flags); 775 771 return NULL; 776 772 } 777 - raw_local_irq_restore(flags); 778 773 779 774 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 780 775 dump_stack(); ··· 799 798 800 799 if (verbose(class)) { 801 800 graph_unlock(); 802 - raw_local_irq_restore(flags); 803 801 804 802 printk("\nnew class %p: %s", class->key, class->name); 805 803 if (class->name_version > 1) ··· 806 806 printk("\n"); 807 807 dump_stack(); 808 808 809 - raw_local_irq_save(flags); 810 809 if (!graph_lock()) { 811 - raw_local_irq_restore(flags); 812 810 return NULL; 813 811 } 814 812 } 815 813 out_unlock_set: 816 814 graph_unlock(); 817 - raw_local_irq_restore(flags); 818 815 819 816 out_set_class_cache: 820 817 if (!subclass || force) ··· 867 870 entry->distance = distance; 868 871 entry->trace = *trace; 869 872 /* 870 - * Since we never remove from the dependency list, the list can 871 - * be walked lockless by other CPUs, it's only allocation 872 - * that must be protected by the spinlock. But this also means 873 - * we must make new entries visible only once writes to the 874 - * entry become visible - hence the RCU op: 873 + * Both allocation and removal are done under the graph lock; but 874 + * iteration is under RCU-sched; see look_up_lock_class() and 875 + * lockdep_free_key_range(). 875 876 */ 876 877 list_add_tail_rcu(&entry->entry, head); 877 878 ··· 1020 1025 else 1021 1026 head = &lock->class->locks_before; 1022 1027 1023 - list_for_each_entry(entry, head, entry) { 1028 + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1029 + 1030 + list_for_each_entry_rcu(entry, head, entry) { 1024 1031 if (!lock_accessed(entry)) { 1025 1032 unsigned int cq_depth; 1026 1033 mark_lock_accessed(entry, lock); ··· 2019 2022 * We can walk it lock-free, because entries only get added 2020 2023 * to the hash: 2021 2024 */ 2022 - list_for_each_entry(chain, hash_head, entry) { 2025 + list_for_each_entry_rcu(chain, hash_head, entry) { 2023 2026 if (chain->chain_key == chain_key) { 2024 2027 cache_hit: 2025 2028 debug_atomic_inc(chain_lookup_hits); ··· 2993 2996 if (unlikely(!debug_locks)) 2994 2997 return; 2995 2998 2996 - if (subclass) 2999 + if (subclass) { 3000 + unsigned long flags; 3001 + 3002 + if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) 3003 + return; 3004 + 3005 + raw_local_irq_save(flags); 3006 + current->lockdep_recursion = 1; 2997 3007 register_lock_class(lock, subclass, 1); 3008 + current->lockdep_recursion = 0; 3009 + raw_local_irq_restore(flags); 3010 + } 2998 3011 } 2999 3012 EXPORT_SYMBOL_GPL(lockdep_init_map); 3000 3013 ··· 3894 3887 return addr >= start && addr < start + size; 3895 3888 } 3896 3889 3890 + /* 3891 + * Used in module.c to remove lock classes from memory that is going to be 3892 + * freed; and possibly re-used by other modules. 3893 + * 3894 + * We will have had one sync_sched() before getting here, so we're guaranteed 3895 + * nobody will look up these exact classes -- they're properly dead but still 3896 + * allocated. 3897 + */ 3897 3898 void lockdep_free_key_range(void *start, unsigned long size) 3898 3899 { 3899 - struct lock_class *class, *next; 3900 + struct lock_class *class; 3900 3901 struct list_head *head; 3901 3902 unsigned long flags; 3902 3903 int i; ··· 3920 3905 head = classhash_table + i; 3921 3906 if (list_empty(head)) 3922 3907 continue; 3923 - list_for_each_entry_safe(class, next, head, hash_entry) { 3908 + list_for_each_entry_rcu(class, head, hash_entry) { 3924 3909 if (within(class->key, start, size)) 3925 3910 zap_class(class); 3926 3911 else if (within(class->name, start, size)) ··· 3931 3916 if (locked) 3932 3917 graph_unlock(); 3933 3918 raw_local_irq_restore(flags); 3919 + 3920 + /* 3921 + * Wait for any possible iterators from look_up_lock_class() to pass 3922 + * before continuing to free the memory they refer to. 3923 + * 3924 + * sync_sched() is sufficient because the read-side is IRQ disable. 3925 + */ 3926 + synchronize_sched(); 3927 + 3928 + /* 3929 + * XXX at this point we could return the resources to the pool; 3930 + * instead we leak them. We would need to change to bitmap allocators 3931 + * instead of the linear allocators we have now. 3932 + */ 3934 3933 } 3935 3934 3936 3935 void lockdep_reset_lock(struct lockdep_map *lock) 3937 3936 { 3938 - struct lock_class *class, *next; 3937 + struct lock_class *class; 3939 3938 struct list_head *head; 3940 3939 unsigned long flags; 3941 3940 int i, j; ··· 3977 3948 head = classhash_table + i; 3978 3949 if (list_empty(head)) 3979 3950 continue; 3980 - list_for_each_entry_safe(class, next, head, hash_entry) { 3951 + list_for_each_entry_rcu(class, head, hash_entry) { 3981 3952 int match = 0; 3982 3953 3983 3954 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
+4 -4
kernel/module.c
··· 1865 1865 kfree(mod->args); 1866 1866 percpu_modfree(mod); 1867 1867 1868 - /* Free lock-classes: */ 1868 + /* Free lock-classes; relies on the preceding sync_rcu(). */ 1869 1869 lockdep_free_key_range(mod->module_core, mod->core_size); 1870 1870 1871 1871 /* Finally, free the core (containing the module structure) */ ··· 3349 3349 module_bug_cleanup(mod); 3350 3350 mutex_unlock(&module_mutex); 3351 3351 3352 - /* Free lock-classes: */ 3353 - lockdep_free_key_range(mod->module_core, mod->core_size); 3354 - 3355 3352 /* we can't deallocate the module until we clear memory protection */ 3356 3353 unset_module_init_ro_nx(mod); 3357 3354 unset_module_core_ro_nx(mod); ··· 3372 3375 synchronize_rcu(); 3373 3376 mutex_unlock(&module_mutex); 3374 3377 free_module: 3378 + /* Free lock-classes; relies on the preceding sync_rcu() */ 3379 + lockdep_free_key_range(mod->module_core, mod->core_size); 3380 + 3375 3381 module_deallocate(mod, info); 3376 3382 free_copy: 3377 3383 free_copy(info);