Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs deadlock fix from Chris Mason:
"This has a fix for a long standing deadlock that we've been trying to
nail down for a while. It ended up being a bad interaction with the
fair reader/writer locks and the order btrfs reacquires locks in the
btree"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
btrfs: fix lockups from btrfs_clear_path_blocking

+25 -15
+2 -12
fs/btrfs/ctree.c
··· 80 80 { 81 81 int i; 82 82 83 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 84 - /* lockdep really cares that we take all of these spinlocks 85 - * in the right order. If any of the locks in the path are not 86 - * currently blocking, it is going to complain. So, make really 87 - * really sure by forcing the path to blocking before we clear 88 - * the path blocking. 89 - */ 90 83 if (held) { 91 84 btrfs_set_lock_blocking_rw(held, held_rw); 92 85 if (held_rw == BTRFS_WRITE_LOCK) ··· 88 95 held_rw = BTRFS_READ_LOCK_BLOCKING; 89 96 } 90 97 btrfs_set_path_blocking(p); 91 - #endif 92 98 93 99 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { 94 100 if (p->nodes[i] && p->locks[i]) { ··· 99 107 } 100 108 } 101 109 102 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 103 110 if (held) 104 111 btrfs_clear_lock_blocking_rw(held, held_rw); 105 - #endif 106 112 } 107 113 108 114 /* this also releases the path */ ··· 2883 2893 } 2884 2894 p->locks[level] = BTRFS_WRITE_LOCK; 2885 2895 } else { 2886 - err = btrfs_try_tree_read_lock(b); 2896 + err = btrfs_tree_read_lock_atomic(b); 2887 2897 if (!err) { 2888 2898 btrfs_set_path_blocking(p); 2889 2899 btrfs_tree_read_lock(b); ··· 3015 3025 } 3016 3026 3017 3027 level = btrfs_header_level(b); 3018 - err = btrfs_try_tree_read_lock(b); 3028 + err = btrfs_tree_read_lock_atomic(b); 3019 3029 if (!err) { 3020 3030 btrfs_set_path_blocking(p); 3021 3031 btrfs_tree_read_lock(b);
+21 -3
fs/btrfs/locking.c
··· 128 128 } 129 129 130 130 /* 131 + * take a spinning read lock. 132 + * returns 1 if we get the read lock and 0 if we don't 133 + * this won't wait for blocking writers 134 + */ 135 + int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) 136 + { 137 + if (atomic_read(&eb->blocking_writers)) 138 + return 0; 139 + 140 + read_lock(&eb->lock); 141 + if (atomic_read(&eb->blocking_writers)) { 142 + read_unlock(&eb->lock); 143 + return 0; 144 + } 145 + atomic_inc(&eb->read_locks); 146 + atomic_inc(&eb->spinning_readers); 147 + return 1; 148 + } 149 + 150 + /* 131 151 * returns 1 if we get the read lock and 0 if we don't 132 152 * this won't wait for blocking writers 133 153 */ ··· 178 158 atomic_read(&eb->blocking_readers)) 179 159 return 0; 180 160 181 - if (!write_trylock(&eb->lock)) 182 - return 0; 183 - 161 + write_lock(&eb->lock); 184 162 if (atomic_read(&eb->blocking_writers) || 185 163 atomic_read(&eb->blocking_readers)) { 186 164 write_unlock(&eb->lock);
+2
fs/btrfs/locking.h
··· 35 35 void btrfs_assert_tree_locked(struct extent_buffer *eb); 36 36 int btrfs_try_tree_read_lock(struct extent_buffer *eb); 37 37 int btrfs_try_tree_write_lock(struct extent_buffer *eb); 38 + int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); 39 + 38 40 39 41 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) 40 42 {