Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'bcachefs-2024-08-24' of git://evilpiepirate.org/bcachefs

Pull bcachefs fixes from Kent Overstreet:

- assorted syzbot fixes

- some upgrade fixes for old (pre 1.0) filesystems

- fix for moving data off a device that was switched to durability=0
after data had been written to it.

- nocow deadlock fix

- fix for new rebalance_work accounting

* tag 'bcachefs-2024-08-24' of git://evilpiepirate.org/bcachefs: (28 commits)
bcachefs: Fix rebalance_work accounting
bcachefs: Fix failure to flush moves before sleeping in copygc
bcachefs: don't use rht_bucket() in btree_key_cache_scan()
bcachefs: add missing inode_walker_exit()
bcachefs: clear path->should_be_locked in bch2_btree_key_cache_drop()
bcachefs: Fix double assignment in check_dirent_to_subvol()
bcachefs: Fix refcounting in discard path
bcachefs: Fix compat issue with old alloc_v4 keys
bcachefs: Fix warning in bch2_fs_journal_stop()
fs/super.c: improve get_tree() error message
bcachefs: Fix missing validation in bch2_sb_journal_v2_validate()
bcachefs: Fix replay_now_at() assert
bcachefs: Fix locking in bch2_ioc_setlabel()
bcachefs: fix failure to relock in btree_node_fill()
bcachefs: fix failure to relock in bch2_btree_node_mem_alloc()
bcachefs: unlock_long() before resort in journal replay
bcachefs: fix missing bch2_err_str()
bcachefs: fix time_stats_to_text()
bcachefs: Fix bch2_bucket_gens_init()
bcachefs: Fix bch2_trigger_alloc assert
...

+387 -192
+34 -32
fs/bcachefs/alloc_background.c
··· 240 240 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, 241 241 enum bch_validate_flags flags) 242 242 { 243 - struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); 243 + struct bch_alloc_v4 a; 244 244 int ret = 0; 245 245 246 - bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k), 246 + bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k)); 247 + 248 + bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k), 247 249 c, alloc_v4_val_size_bad, 248 250 "bad val size (%u > %zu)", 249 - alloc_v4_u64s_noerror(a.v), bkey_val_u64s(k.k)); 251 + alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k)); 250 252 251 - bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) && 252 - BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), 253 + bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) && 254 + BCH_ALLOC_V4_NR_BACKPOINTERS(&a), 253 255 c, alloc_v4_backpointers_start_bad, 254 256 "invalid backpointers_start"); 255 257 256 - bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, 258 + bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type, 257 259 c, alloc_key_data_type_bad, 258 260 "invalid data type (got %u should be %u)", 259 - a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); 261 + a.data_type, alloc_data_type(a, a.data_type)); 260 262 261 263 for (unsigned i = 0; i < 2; i++) 262 - bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX, 264 + bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX, 263 265 c, alloc_key_io_time_bad, 264 266 "invalid io_time[%s]: %llu, max %llu", 265 267 i == READ ? "read" : "write", 266 - a.v->io_time[i], LRU_TIME_MAX); 268 + a.io_time[i], LRU_TIME_MAX); 267 269 268 - unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(a.v) * sizeof(u64) > 270 + unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) > 269 271 offsetof(struct bch_alloc_v4, stripe_sectors) 270 - ? a.v->stripe_sectors 272 + ? a.stripe_sectors 271 273 : 0; 272 274 273 - switch (a.v->data_type) { 275 + switch (a.data_type) { 274 276 case BCH_DATA_free: 275 277 case BCH_DATA_need_gc_gens: 276 278 case BCH_DATA_need_discard: 277 279 bkey_fsck_err_on(stripe_sectors || 278 - a.v->dirty_sectors || 279 - a.v->cached_sectors || 280 - a.v->stripe, 280 + a.dirty_sectors || 281 + a.cached_sectors || 282 + a.stripe, 281 283 c, alloc_key_empty_but_have_data, 282 284 "empty data type free but have data %u.%u.%u %u", 283 285 stripe_sectors, 284 - a.v->dirty_sectors, 285 - a.v->cached_sectors, 286 - a.v->stripe); 286 + a.dirty_sectors, 287 + a.cached_sectors, 288 + a.stripe); 287 289 break; 288 290 case BCH_DATA_sb: 289 291 case BCH_DATA_journal: 290 292 case BCH_DATA_btree: 291 293 case BCH_DATA_user: 292 294 case BCH_DATA_parity: 293 - bkey_fsck_err_on(!a.v->dirty_sectors && 295 + bkey_fsck_err_on(!a.dirty_sectors && 294 296 !stripe_sectors, 295 297 c, alloc_key_dirty_sectors_0, 296 298 "data_type %s but dirty_sectors==0", 297 - bch2_data_type_str(a.v->data_type)); 299 + bch2_data_type_str(a.data_type)); 298 300 break; 299 301 case BCH_DATA_cached: 300 - bkey_fsck_err_on(!a.v->cached_sectors || 301 - a.v->dirty_sectors || 302 + bkey_fsck_err_on(!a.cached_sectors || 303 + a.dirty_sectors || 302 304 stripe_sectors || 303 - a.v->stripe, 305 + a.stripe, 304 306 c, alloc_key_cached_inconsistency, 305 307 "data type inconsistency"); 306 308 307 - bkey_fsck_err_on(!a.v->io_time[READ] && 309 + bkey_fsck_err_on(!a.io_time[READ] && 308 310 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, 309 311 c, alloc_key_cached_but_read_time_zero, 310 312 "cached bucket with read_time == 0"); ··· 558 556 struct bpos pos = alloc_gens_pos(iter.pos, &offset); 559 557 int ret2 = 0; 560 558 561 - if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) { 559 + if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) { 562 560 ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?: 563 561 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 564 562 if (ret2) ··· 831 829 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { 832 830 new_a = bkey_s_to_alloc_v4(new).v; 833 831 } else { 834 - BUG_ON(!(flags & BTREE_TRIGGER_gc)); 832 + BUG_ON(!(flags & (BTREE_TRIGGER_gc|BTREE_TRIGGER_check_repair))); 835 833 836 834 struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c); 837 835 ret = PTR_ERR_OR_ZERO(new_ka); ··· 1874 1872 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, 1875 1873 bch2_err_str(ret)); 1876 1874 1877 - bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1878 1875 percpu_ref_put(&ca->io_ref); 1876 + bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1879 1877 } 1880 1878 1881 1879 void bch2_dev_do_discards(struct bch_dev *ca) 1882 1880 { 1883 1881 struct bch_fs *c = ca->fs; 1884 1882 1885 - if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 1883 + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) 1886 1884 return; 1887 1885 1888 - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) 1889 - goto put_ioref; 1886 + if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 1887 + goto put_write_ref; 1890 1888 1891 1889 if (queue_work(c->write_ref_wq, &ca->discard_work)) 1892 1890 return; 1893 1891 1894 - bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1895 - put_ioref: 1896 1892 percpu_ref_put(&ca->io_ref); 1893 + put_write_ref: 1894 + bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1897 1895 } 1898 1896 1899 1897 void bch2_do_discards(struct bch_fs *c)
+1
fs/bcachefs/alloc_background_format.h
··· 69 69 __u64 io_time[2]; 70 70 __u32 stripe; 71 71 __u32 nr_external_backpointers; 72 + /* end of fields in original version of alloc_v4 */ 72 73 __u64 fragmentation_lru; 73 74 __u32 stripe_sectors; 74 75 __u32 pad;
+2 -1
fs/bcachefs/bcachefs_format.h
··· 677 677 x(bucket_stripe_sectors, BCH_VERSION(1, 8)) \ 678 678 x(disk_accounting_v2, BCH_VERSION(1, 9)) \ 679 679 x(disk_accounting_v3, BCH_VERSION(1, 10)) \ 680 - x(disk_accounting_inum, BCH_VERSION(1, 11)) 680 + x(disk_accounting_inum, BCH_VERSION(1, 11)) \ 681 + x(rebalance_work_acct_fix, BCH_VERSION(1, 12)) 681 682 682 683 enum bcachefs_metadata_version { 683 684 bcachefs_metadata_version_min = 9,
+25
fs/bcachefs/btree_cache.c
··· 159 159 return b; 160 160 } 161 161 162 + void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) 163 + { 164 + mutex_lock(&c->btree_cache.lock); 165 + list_move(&b->list, &c->btree_cache.freeable); 166 + mutex_unlock(&c->btree_cache.lock); 167 + 168 + six_unlock_write(&b->c.lock); 169 + six_unlock_intent(&b->c.lock); 170 + } 171 + 162 172 /* Btree in memory cache - hash table */ 163 173 164 174 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) ··· 746 736 start_time); 747 737 748 738 memalloc_nofs_restore(flags); 739 + 740 + int ret = bch2_trans_relock(trans); 741 + if (unlikely(ret)) { 742 + bch2_btree_node_to_freelist(c, b); 743 + return ERR_PTR(ret); 744 + } 745 + 749 746 return b; 750 747 err: 751 748 mutex_lock(&bc->lock); ··· 873 856 874 857 bch2_btree_node_read(trans, b, sync); 875 858 859 + int ret = bch2_trans_relock(trans); 860 + if (ret) 861 + return ERR_PTR(ret); 862 + 876 863 if (!sync) 877 864 return NULL; 878 865 ··· 994 973 need_relock = true; 995 974 996 975 bch2_btree_node_wait_on_read(b); 976 + 977 + ret = bch2_trans_relock(trans); 978 + if (ret) 979 + return ERR_PTR(ret); 997 980 998 981 /* 999 982 * should_be_locked is not set on this path yet, so we need to
+2
fs/bcachefs/btree_cache.h
··· 12 12 13 13 void bch2_recalc_btree_reserve(struct bch_fs *); 14 14 15 + void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *); 16 + 15 17 void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); 16 18 int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *); 17 19 int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
+9
fs/bcachefs/btree_iter.h
··· 569 569 bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \ 570 570 _btree_id, _pos, _flags, KEY_TYPE_##_type)) 571 571 572 + #define bkey_val_copy(_dst_v, _src_k) \ 573 + do { \ 574 + unsigned b = min_t(unsigned, sizeof(*_dst_v), \ 575 + bkey_val_bytes(_src_k.k)); \ 576 + memcpy(_dst_v, _src_k.v, b); \ 577 + if (b < sizeof(*_dst_v)) \ 578 + memset((void *) (_dst_v) + b, 0, sizeof(*_dst_v) - b); \ 579 + } while (0) 580 + 572 581 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, 573 582 unsigned btree_id, struct bpos pos, 574 583 unsigned flags, unsigned type,
+28 -3
fs/bcachefs/btree_key_cache.c
··· 726 726 727 727 mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED); 728 728 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); 729 + path->should_be_locked = false; 729 730 } 730 731 731 732 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, ··· 778 777 779 778 rcu_read_lock(); 780 779 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); 780 + 781 + /* 782 + * Scanning is expensive while a rehash is in progress - most elements 783 + * will be on the new hashtable, if it's in progress 784 + * 785 + * A rehash could still start while we're scanning - that's ok, we'll 786 + * still see most elements. 787 + */ 788 + if (unlikely(tbl->nest)) { 789 + rcu_read_unlock(); 790 + srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); 791 + return SHRINK_STOP; 792 + } 793 + 781 794 if (bc->shrink_iter >= tbl->size) 782 795 bc->shrink_iter = 0; 783 796 start = bc->shrink_iter; ··· 799 784 do { 800 785 struct rhash_head *pos, *next; 801 786 802 - pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter)); 787 + pos = rht_ptr_rcu(&tbl->buckets[bc->shrink_iter]); 803 788 804 789 while (!rht_is_a_nulls(pos)) { 805 790 next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter); ··· 880 865 while (atomic_long_read(&bc->nr_keys)) { 881 866 rcu_read_lock(); 882 867 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); 883 - if (tbl) 868 + if (tbl) { 869 + if (tbl->nest) { 870 + /* wait for in progress rehash */ 871 + rcu_read_unlock(); 872 + mutex_lock(&bc->table.mutex); 873 + mutex_unlock(&bc->table.mutex); 874 + rcu_read_lock(); 875 + continue; 876 + } 884 877 for (i = 0; i < tbl->size; i++) 885 - rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { 878 + while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) { 879 + ck = container_of(pos, struct bkey_cached, hash); 886 880 bkey_cached_evict(bc, ck); 887 881 list_add(&ck->list, &items); 888 882 } 883 + } 889 884 rcu_read_unlock(); 890 885 } 891 886
+25 -21
fs/bcachefs/btree_update_interior.c
··· 317 317 : 0; 318 318 int ret; 319 319 320 + b = bch2_btree_node_mem_alloc(trans, interior_node); 321 + if (IS_ERR(b)) 322 + return b; 323 + 324 + BUG_ON(b->ob.nr); 325 + 320 326 mutex_lock(&c->btree_reserve_cache_lock); 321 327 if (c->btree_reserve_cache_nr > nr_reserve) { 322 328 struct btree_alloc *a = ··· 331 325 obs = a->ob; 332 326 bkey_copy(&tmp.k, &a->k); 333 327 mutex_unlock(&c->btree_reserve_cache_lock); 334 - goto mem_alloc; 328 + goto out; 335 329 } 336 330 mutex_unlock(&c->btree_reserve_cache_lock); 337 - 338 331 retry: 339 332 ret = bch2_alloc_sectors_start_trans(trans, 340 333 c->opts.metadata_target ?: ··· 346 341 c->opts.metadata_replicas_required), 347 342 watermark, 0, cl, &wp); 348 343 if (unlikely(ret)) 349 - return ERR_PTR(ret); 344 + goto err; 350 345 351 346 if (wp->sectors_free < btree_sectors(c)) { 352 347 struct open_bucket *ob; ··· 365 360 366 361 bch2_open_bucket_get(c, wp, &obs); 367 362 bch2_alloc_sectors_done(c, wp); 368 - mem_alloc: 369 - b = bch2_btree_node_mem_alloc(trans, interior_node); 363 + out: 364 + bkey_copy(&b->key, &tmp.k); 365 + b->ob = obs; 370 366 six_unlock_write(&b->c.lock); 371 367 six_unlock_intent(&b->c.lock); 372 368 373 - /* we hold cannibalize_lock: */ 374 - BUG_ON(IS_ERR(b)); 375 - BUG_ON(b->ob.nr); 376 - 377 - bkey_copy(&b->key, &tmp.k); 378 - b->ob = obs; 379 - 380 369 return b; 370 + err: 371 + bch2_btree_node_to_freelist(c, b); 372 + return ERR_PTR(ret); 381 373 } 382 374 383 375 static struct btree *bch2_btree_node_alloc(struct btree_update *as, ··· 2441 2439 } 2442 2440 2443 2441 new_hash = bch2_btree_node_mem_alloc(trans, false); 2442 + ret = PTR_ERR_OR_ZERO(new_hash); 2443 + if (ret) 2444 + goto err; 2444 2445 } 2445 2446 2446 2447 path->intent_ref++; ··· 2451 2446 commit_flags, skip_triggers); 2452 2447 --path->intent_ref; 2453 2448 2454 - if (new_hash) { 2455 - mutex_lock(&c->btree_cache.lock); 2456 - list_move(&new_hash->list, &c->btree_cache.freeable); 2457 - mutex_unlock(&c->btree_cache.lock); 2458 - 2459 - six_unlock_write(&new_hash->c.lock); 2460 - six_unlock_intent(&new_hash->c.lock); 2461 - } 2449 + if (new_hash) 2450 + bch2_btree_node_to_freelist(c, new_hash); 2451 + err: 2462 2452 closure_sync(&cl); 2463 2453 bch2_btree_cache_cannibalize_unlock(trans); 2464 2454 return ret; ··· 2522 2522 b = bch2_btree_node_mem_alloc(trans, false); 2523 2523 bch2_btree_cache_cannibalize_unlock(trans); 2524 2524 2525 + ret = PTR_ERR_OR_ZERO(b); 2526 + if (ret) 2527 + return ret; 2528 + 2525 2529 set_btree_node_fake(b); 2526 2530 set_btree_node_need_rewrite(b); 2527 2531 b->c.level = level; ··· 2557 2553 2558 2554 void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level) 2559 2555 { 2560 - bch2_trans_run(c, bch2_btree_root_alloc_fake_trans(trans, id, level)); 2556 + bch2_trans_run(c, lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level))); 2561 2557 } 2562 2558 2563 2559 static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
+49 -25
fs/bcachefs/buckets.c
··· 699 699 static int __trigger_extent(struct btree_trans *trans, 700 700 enum btree_id btree_id, unsigned level, 701 701 struct bkey_s_c k, 702 - enum btree_iter_update_trigger_flags flags) 702 + enum btree_iter_update_trigger_flags flags, 703 + s64 *replicas_sectors) 703 704 { 704 705 bool gc = flags & BTREE_TRIGGER_gc; 705 706 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); ··· 709 708 enum bch_data_type data_type = bkey_is_btree_ptr(k.k) 710 709 ? BCH_DATA_btree 711 710 : BCH_DATA_user; 712 - s64 replicas_sectors = 0; 713 711 int ret = 0; 714 712 715 713 struct disk_accounting_pos acc_replicas_key = { ··· 739 739 if (ret) 740 740 return ret; 741 741 } else if (!p.has_ec) { 742 - replicas_sectors += disk_sectors; 742 + *replicas_sectors += disk_sectors; 743 743 acc_replicas_key.replicas.devs[acc_replicas_key.replicas.nr_devs++] = p.ptr.dev; 744 744 } else { 745 745 ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags); ··· 777 777 } 778 778 779 779 if (acc_replicas_key.replicas.nr_devs) { 780 - ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, &replicas_sectors, 1, gc); 780 + ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc); 781 781 if (ret) 782 782 return ret; 783 783 } ··· 787 787 .type = BCH_DISK_ACCOUNTING_snapshot, 788 788 .snapshot.id = k.k->p.snapshot, 789 789 }; 790 - ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, &replicas_sectors, 1, gc); 790 + ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, replicas_sectors, 1, gc); 791 791 if (ret) 792 792 return ret; 793 793 } ··· 807 807 .type = BCH_DISK_ACCOUNTING_btree, 808 808 .btree.id = btree_id, 809 809 }; 810 - ret = bch2_disk_accounting_mod(trans, &acc_btree_key, &replicas_sectors, 1, gc); 810 + ret = bch2_disk_accounting_mod(trans, &acc_btree_key, replicas_sectors, 1, gc); 811 811 if (ret) 812 812 return ret; 813 813 } else { ··· 819 819 s64 v[3] = { 820 820 insert ? 1 : -1, 821 821 insert ? k.k->size : -((s64) k.k->size), 822 - replicas_sectors, 822 + *replicas_sectors, 823 823 }; 824 824 ret = bch2_disk_accounting_mod(trans, &acc_inum_key, v, ARRAY_SIZE(v), gc); 825 - if (ret) 826 - return ret; 827 - } 828 - 829 - if (bch2_bkey_rebalance_opts(k)) { 830 - struct disk_accounting_pos acc = { 831 - .type = BCH_DISK_ACCOUNTING_rebalance_work, 832 - }; 833 - ret = bch2_disk_accounting_mod(trans, &acc, &replicas_sectors, 1, gc); 834 825 if (ret) 835 826 return ret; 836 827 } ··· 834 843 struct bkey_s_c old, struct bkey_s new, 835 844 enum btree_iter_update_trigger_flags flags) 836 845 { 846 + struct bch_fs *c = trans->c; 837 847 struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c); 838 848 struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old); 839 849 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start; ··· 850 858 new_ptrs_bytes)) 851 859 return 0; 852 860 853 - if (flags & BTREE_TRIGGER_transactional) { 854 - struct bch_fs *c = trans->c; 855 - int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) - 856 - (int) bch2_bkey_needs_rebalance(c, old); 861 + if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { 862 + s64 old_replicas_sectors = 0, new_replicas_sectors = 0; 857 863 858 - if (mod) { 864 + if (old.k->type) { 865 + int ret = __trigger_extent(trans, btree, level, old, 866 + flags & ~BTREE_TRIGGER_insert, 867 + &old_replicas_sectors); 868 + if (ret) 869 + return ret; 870 + } 871 + 872 + if (new.k->type) { 873 + int ret = __trigger_extent(trans, btree, level, new.s_c, 874 + flags & ~BTREE_TRIGGER_overwrite, 875 + &new_replicas_sectors); 876 + if (ret) 877 + return ret; 878 + } 879 + 880 + int need_rebalance_delta = 0; 881 + s64 need_rebalance_sectors_delta = 0; 882 + 883 + s64 s = bch2_bkey_sectors_need_rebalance(c, old); 884 + need_rebalance_delta -= s != 0; 885 + need_rebalance_sectors_delta -= s; 886 + 887 + s = bch2_bkey_sectors_need_rebalance(c, old); 888 + need_rebalance_delta += s != 0; 889 + need_rebalance_sectors_delta += s; 890 + 891 + if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) { 859 892 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, 860 - new.k->p, mod > 0); 893 + new.k->p, need_rebalance_delta > 0); 894 + if (ret) 895 + return ret; 896 + } 897 + 898 + if (need_rebalance_sectors_delta) { 899 + struct disk_accounting_pos acc = { 900 + .type = BCH_DISK_ACCOUNTING_rebalance_work, 901 + }; 902 + int ret = bch2_disk_accounting_mod(trans, &acc, &need_rebalance_sectors_delta, 1, 903 + flags & BTREE_TRIGGER_gc); 861 904 if (ret) 862 905 return ret; 863 906 } 864 907 } 865 - 866 - if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) 867 - return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree, level, old, new, flags); 868 908 869 909 return 0; 870 910 }
+3 -1
fs/bcachefs/buckets_waiting_for_journal.c
··· 107 107 nr_elements += t->d[i].journal_seq > flushed_seq; 108 108 109 109 new_bits = ilog2(roundup_pow_of_two(nr_elements * 3)); 110 - 110 + realloc: 111 111 n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL); 112 112 if (!n) { 113 113 ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set; ··· 118 118 if (nr_rehashes_this_size == 3) { 119 119 new_bits++; 120 120 nr_rehashes_this_size = 0; 121 + kvfree(n); 122 + goto realloc; 121 123 } 122 124 123 125 nr_rehashes++;
+115 -94
fs/bcachefs/data_update.c
··· 20 20 #include "subvolume.h" 21 21 #include "trace.h" 22 22 23 + static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k) 24 + { 25 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 26 + 27 + bkey_for_each_ptr(ptrs, ptr) 28 + bch2_dev_put(bch2_dev_have_ref(c, ptr->dev)); 29 + } 30 + 31 + static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k) 32 + { 33 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 34 + 35 + bkey_for_each_ptr(ptrs, ptr) { 36 + if (!bch2_dev_tryget(c, ptr->dev)) { 37 + bkey_for_each_ptr(ptrs, ptr2) { 38 + if (ptr2 == ptr) 39 + break; 40 + bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev)); 41 + } 42 + return false; 43 + } 44 + } 45 + return true; 46 + } 47 + 48 + static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k) 49 + { 50 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 51 + 52 + bkey_for_each_ptr(ptrs, ptr) { 53 + struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); 54 + struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 55 + 56 + bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0); 57 + } 58 + } 59 + 60 + static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k) 61 + { 62 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 63 + 64 + bkey_for_each_ptr(ptrs, ptr) { 65 + struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); 66 + struct bpos bucket = PTR_BUCKET_POS(ca, ptr); 67 + 68 + if (ctxt) { 69 + bool locked; 70 + 71 + move_ctxt_wait_event(ctxt, 72 + (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) || 73 + list_empty(&ctxt->ios)); 74 + 75 + if (!locked) 76 + bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0); 77 + } else { 78 + if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) { 79 + bkey_for_each_ptr(ptrs, ptr2) { 80 + if (ptr2 == ptr) 81 + break; 82 + 83 + bucket = PTR_BUCKET_POS(ca, ptr2); 84 + bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0); 85 + } 86 + return false; 87 + } 88 + } 89 + } 90 + return true; 91 + } 92 + 23 93 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k) 24 94 { 25 95 if (trace_move_extent_finish_enabled()) { ··· 425 355 void bch2_data_update_exit(struct data_update *update) 426 356 { 427 357 struct bch_fs *c = update->op.c; 428 - struct bkey_ptrs_c ptrs = 429 - bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k)); 358 + struct bkey_s_c k = bkey_i_to_s_c(update->k.k); 430 359 431 - bkey_for_each_ptr(ptrs, ptr) { 432 - struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); 433 - if (c->opts.nocow_enabled) 434 - bch2_bucket_nocow_unlock(&c->nocow_locks, 435 - PTR_BUCKET_POS(ca, ptr), 0); 436 - bch2_dev_put(ca); 437 - } 438 - 360 + if (c->opts.nocow_enabled) 361 + bkey_nocow_unlock(c, k); 362 + bkey_put_dev_refs(c, k); 439 363 bch2_bkey_buf_exit(&update->k, c); 440 364 bch2_disk_reservation_put(c, &update->op.res); 441 365 bch2_bio_free_pages_pool(c, &update->op.wbio.bio); ··· 539 475 bch2_compression_opt_to_text(out, background_compression(*io_opts)); 540 476 prt_newline(out); 541 477 478 + prt_str(out, "opts.replicas:\t"); 479 + prt_u64(out, io_opts->data_replicas); 480 + 542 481 prt_str(out, "extra replicas:\t"); 543 482 prt_u64(out, data_opts->extra_replicas); 544 483 } ··· 610 543 const union bch_extent_entry *entry; 611 544 struct extent_ptr_decoded p; 612 545 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas; 613 - unsigned ptrs_locked = 0; 614 546 int ret = 0; 615 547 616 548 /* ··· 619 553 */ 620 554 if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot))) 621 555 return -BCH_ERR_data_update_done; 556 + 557 + if (!bkey_get_dev_refs(c, k)) 558 + return -BCH_ERR_data_update_done; 559 + 560 + if (c->opts.nocow_enabled && 561 + !bkey_nocow_lock(c, ctxt, k)) { 562 + bkey_put_dev_refs(c, k); 563 + return -BCH_ERR_nocow_lock_blocked; 564 + } 622 565 623 566 bch2_bkey_buf_init(&m->k); 624 567 bch2_bkey_buf_reassemble(&m->k, c, k); ··· 650 575 m->op.compression_opt = background_compression(io_opts); 651 576 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK; 652 577 653 - bkey_for_each_ptr(ptrs, ptr) { 654 - if (!bch2_dev_tryget(c, ptr->dev)) { 655 - bkey_for_each_ptr(ptrs, ptr2) { 656 - if (ptr2 == ptr) 657 - break; 658 - bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev)); 659 - } 660 - return -BCH_ERR_data_update_done; 661 - } 662 - } 663 - 664 578 unsigned durability_have = 0, durability_removing = 0; 665 579 666 580 i = 0; 667 581 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 668 - struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev); 669 - struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); 670 - bool locked; 582 + if (!p.ptr.cached) { 583 + rcu_read_lock(); 584 + if (BIT(i) & m->data_opts.rewrite_ptrs) { 585 + if (crc_is_compressed(p.crc)) 586 + reserve_sectors += k.k->size; 671 587 672 - rcu_read_lock(); 673 - if (((1U << i) & m->data_opts.rewrite_ptrs)) { 674 - BUG_ON(p.ptr.cached); 675 - 676 - if (crc_is_compressed(p.crc)) 677 - reserve_sectors += k.k->size; 678 - 679 - m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); 680 - durability_removing += bch2_extent_ptr_desired_durability(c, &p); 681 - } else if (!p.ptr.cached && 682 - !((1U << i) & m->data_opts.kill_ptrs)) { 683 - bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev); 684 - durability_have += bch2_extent_ptr_durability(c, &p); 588 + m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); 589 + durability_removing += bch2_extent_ptr_desired_durability(c, &p); 590 + } else if (!(BIT(i) & m->data_opts.kill_ptrs)) { 591 + bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev); 592 + durability_have += bch2_extent_ptr_durability(c, &p); 593 + } 594 + rcu_read_unlock(); 685 595 } 686 - rcu_read_unlock(); 687 596 688 597 /* 689 598 * op->csum_type is normally initialized from the fs/file's ··· 682 623 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) 683 624 m->op.incompressible = true; 684 625 685 - if (c->opts.nocow_enabled) { 686 - if (ctxt) { 687 - move_ctxt_wait_event(ctxt, 688 - (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, 689 - bucket, 0)) || 690 - list_empty(&ctxt->ios)); 691 - 692 - if (!locked) 693 - bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0); 694 - } else { 695 - if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) { 696 - ret = -BCH_ERR_nocow_lock_blocked; 697 - goto err; 698 - } 699 - } 700 - ptrs_locked |= (1U << i); 701 - } 702 - 703 626 i++; 704 627 } 705 628 ··· 695 654 * Increasing replication is an explicit operation triggered by 696 655 * rereplicate, currently, so that users don't get an unexpected -ENOSPC 697 656 */ 698 - if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) && 699 - !durability_required) { 700 - m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs; 701 - m->data_opts.rewrite_ptrs = 0; 702 - /* if iter == NULL, it's just a promote */ 703 - if (iter) 704 - ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts); 705 - goto done; 706 - } 707 - 708 657 m->op.nr_replicas = min(durability_removing, durability_required) + 709 658 m->data_opts.extra_replicas; 710 659 ··· 706 675 if (!(durability_have + durability_removing)) 707 676 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1); 708 677 709 - if (!m->op.nr_replicas) { 710 - struct printbuf buf = PRINTBUF; 711 - 712 - bch2_data_update_to_text(&buf, m); 713 - WARN(1, "trying to move an extent, but nr_replicas=0\n%s", buf.buf); 714 - printbuf_exit(&buf); 715 - ret = -BCH_ERR_data_update_done; 716 - goto done; 717 - } 718 - 719 678 m->op.nr_replicas_required = m->op.nr_replicas; 679 + 680 + /* 681 + * It might turn out that we don't need any new replicas, if the 682 + * replicas or durability settings have been changed since the extent 683 + * was written: 684 + */ 685 + if (!m->op.nr_replicas) { 686 + m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs; 687 + m->data_opts.rewrite_ptrs = 0; 688 + /* if iter == NULL, it's just a promote */ 689 + if (iter) 690 + ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts); 691 + goto out; 692 + } 720 693 721 694 if (reserve_sectors) { 722 695 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors, ··· 728 693 ? 0 729 694 : BCH_DISK_RESERVATION_NOFAIL); 730 695 if (ret) 731 - goto err; 696 + goto out; 732 697 } 733 698 734 699 if (bkey_extent_is_unwritten(k)) { 735 700 bch2_update_unwritten_extent(trans, m); 736 - goto done; 701 + goto out; 737 702 } 738 703 739 704 return 0; 740 - err: 741 - i = 0; 742 - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 743 - struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev); 744 - struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); 745 - if ((1U << i) & ptrs_locked) 746 - bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0); 747 - bch2_dev_put(ca); 748 - i++; 749 - } 750 - 751 - bch2_bkey_buf_exit(&m->k, c); 752 - bch2_bio_free_pages_pool(c, &m->op.wbio.bio); 753 - return ret; 754 - done: 705 + out: 755 706 bch2_data_update_exit(m); 756 707 return ret ?: -BCH_ERR_data_update_done; 757 708 }
+41
fs/bcachefs/extents.c
··· 1017 1017 1018 1018 prt_printf(out, "ptr: %u:%llu:%u gen %u", 1019 1019 ptr->dev, b, offset, ptr->gen); 1020 + if (ca->mi.durability != 1) 1021 + prt_printf(out, " d=%u", ca->mi.durability); 1020 1022 if (ptr->cached) 1021 1023 prt_str(out, " cached"); 1022 1024 if (ptr->unwritten) ··· 1377 1375 r = NULL; 1378 1376 1379 1377 return r != NULL; 1378 + } 1379 + 1380 + static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k, 1381 + unsigned target, unsigned compression) 1382 + { 1383 + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1384 + const union bch_extent_entry *entry; 1385 + struct extent_ptr_decoded p; 1386 + u64 sectors = 0; 1387 + 1388 + if (compression) { 1389 + unsigned compression_type = bch2_compression_opt_to_type(compression); 1390 + 1391 + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 1392 + if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || 1393 + p.ptr.unwritten) { 1394 + sectors = 0; 1395 + goto incompressible; 1396 + } 1397 + 1398 + if (!p.ptr.cached && p.crc.compression_type != compression_type) 1399 + sectors += p.crc.compressed_size; 1400 + } 1401 + } 1402 + incompressible: 1403 + if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { 1404 + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) 1405 + if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target)) 1406 + sectors += p.crc.compressed_size; 1407 + } 1408 + 1409 + return sectors; 1410 + } 1411 + 1412 + u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) 1413 + { 1414 + const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); 1415 + 1416 + return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0; 1380 1417 } 1381 1418 1382 1419 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
+1
fs/bcachefs/extents.h
··· 692 692 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c, 693 693 unsigned, unsigned); 694 694 bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c); 695 + u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c); 695 696 696 697 int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *, 697 698 struct bch_io_opts *);
+1 -1
fs/bcachefs/fs-io-buffered.c
··· 534 534 535 535 if (f_sectors > w->tmp_sectors) { 536 536 kfree(w->tmp); 537 - w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), __GFP_NOFAIL); 537 + w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), GFP_NOFS|__GFP_NOFAIL); 538 538 w->tmp_sectors = f_sectors; 539 539 } 540 540
+1 -2
fs/bcachefs/fs-ioctl.c
··· 328 328 329 329 mutex_lock(&c->sb_lock); 330 330 strscpy(c->disk_sb.sb->label, label, BCH_SB_LABEL_SIZE); 331 - mutex_unlock(&c->sb_lock); 332 - 333 331 ret = bch2_write_super(c); 332 + mutex_unlock(&c->sb_lock); 334 333 335 334 mnt_drop_write_file(file); 336 335 return ret;
+3 -3
fs/bcachefs/fsck.c
··· 2006 2006 if (ret) { 2007 2007 bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum); 2008 2008 ret = -BCH_ERR_fsck_repair_unimplemented; 2009 - ret = 0; 2010 2009 goto err; 2011 2010 } 2012 2011 ··· 2215 2216 NULL, NULL, 2216 2217 BCH_TRANS_COMMIT_no_enospc, 2217 2218 check_xattr(trans, &iter, k, &hash_info, &inode))); 2219 + 2220 + inode_walker_exit(&inode); 2218 2221 bch_err_fn(c, ret); 2219 2222 return ret; 2220 2223 } ··· 2470 2469 : bch2_inode_unpack(inode_k, &inode); 2471 2470 if (ret) { 2472 2471 /* Should have been caught in dirents pass */ 2473 - if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2474 - bch_err(c, "error looking up parent directory: %i", ret); 2472 + bch_err_msg(c, ret, "error looking up parent directory"); 2475 2473 break; 2476 2474 } 2477 2475
+1 -1
fs/bcachefs/journal.c
··· 1260 1260 } 1261 1261 1262 1262 if (!had_entries) 1263 - j->last_empty_seq = cur_seq; 1263 + j->last_empty_seq = cur_seq - 1; /* to match j->seq */ 1264 1264 1265 1265 spin_lock(&j->lock); 1266 1266
+15
fs/bcachefs/journal_sb.c
··· 104 104 struct bch_sb_field_journal_v2 *journal = field_to_type(f, journal_v2); 105 105 struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx); 106 106 int ret = -BCH_ERR_invalid_sb_journal; 107 + u64 sum = 0; 107 108 unsigned nr; 108 109 unsigned i; 109 110 struct u64_range *b; ··· 120 119 for (i = 0; i < nr; i++) { 121 120 b[i].start = le64_to_cpu(journal->d[i].start); 122 121 b[i].end = b[i].start + le64_to_cpu(journal->d[i].nr); 122 + 123 + if (b[i].end <= b[i].start) { 124 + prt_printf(err, "journal buckets entry with bad nr: %llu+%llu", 125 + le64_to_cpu(journal->d[i].start), 126 + le64_to_cpu(journal->d[i].nr)); 127 + goto err; 128 + } 129 + 130 + sum += le64_to_cpu(journal->d[i].nr); 123 131 } 124 132 125 133 sort(b, nr, sizeof(*b), u64_range_cmp, NULL); ··· 156 146 b[i].start, b[i].end, b[i + 1].start, b[i + 1].end); 157 147 goto err; 158 148 } 149 + } 150 + 151 + if (sum > UINT_MAX) { 152 + prt_printf(err, "too many journal buckets: %llu > %u", sum, UINT_MAX); 153 + goto err; 159 154 } 160 155 161 156 ret = 0;
+1 -1
fs/bcachefs/movinggc.c
··· 383 383 if (min_member_capacity == U64_MAX) 384 384 min_member_capacity = 128 * 2048; 385 385 386 - bch2_trans_unlock_long(ctxt.trans); 386 + move_buckets_wait(&ctxt, buckets, true); 387 387 bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6), 388 388 MAX_SCHEDULE_TIMEOUT); 389 389 }
+8 -1
fs/bcachefs/recovery.c
··· 241 241 const struct journal_key *l = *((const struct journal_key **)_l); 242 242 const struct journal_key *r = *((const struct journal_key **)_r); 243 243 244 - return cmp_int(l->journal_seq, r->journal_seq); 244 + /* 245 + * Map 0 to U64_MAX, so that keys with journal_seq === 0 come last 246 + * 247 + * journal_seq == 0 means that the key comes from early repair, and 248 + * should be inserted last so as to avoid overflowing the journal 249 + */ 250 + return cmp_int(l->journal_seq - 1, r->journal_seq - 1); 245 251 } 246 252 247 253 int bch2_journal_replay(struct bch_fs *c) ··· 328 322 } 329 323 } 330 324 325 + bch2_trans_unlock_long(trans); 331 326 /* 332 327 * Now, replay any remaining keys in the order in which they appear in 333 328 * the journal, unpinning those journal entries as we go:
+2 -1
fs/bcachefs/replicas.c
··· 451 451 .type = BCH_DISK_ACCOUNTING_replicas, 452 452 }; 453 453 454 - memcpy(&k.replicas, e, replicas_entry_bytes(e)); 454 + unsafe_memcpy(&k.replicas, e, replicas_entry_bytes(e), 455 + "embedded variable length struct"); 455 456 456 457 struct bpos p = disk_accounting_pos_to_bpos(&k); 457 458
+7 -1
fs/bcachefs/sb-downgrade.c
··· 75 75 BCH_FSCK_ERR_accounting_key_junk_at_end) \ 76 76 x(disk_accounting_inum, \ 77 77 BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 78 + BCH_FSCK_ERR_accounting_mismatch) \ 79 + x(rebalance_work_acct_fix, \ 80 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 78 81 BCH_FSCK_ERR_accounting_mismatch) 79 82 80 83 #define DOWNGRADE_TABLE() \ ··· 111 108 BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \ 112 109 BCH_FSCK_ERR_fs_usage_replicas_wrong, \ 113 110 BCH_FSCK_ERR_accounting_replicas_not_marked, \ 114 - BCH_FSCK_ERR_bkey_version_in_future) 111 + BCH_FSCK_ERR_bkey_version_in_future) \ 112 + x(rebalance_work_acct_fix, \ 113 + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ 114 + BCH_FSCK_ERR_accounting_mismatch) 115 115 116 116 struct upgrade_downgrade_entry { 117 117 u64 recovery_passes;
-1
fs/bcachefs/util.c
··· 416 416 printbuf_tabstop_push(out, TABSTOP_SIZE + 2); 417 417 418 418 prt_printf(out, "\tsince mount\r\trecent\r\n"); 419 - prt_printf(out, "recent"); 420 419 421 420 printbuf_tabstops_reset(out); 422 421 printbuf_tabstop_push(out, out->indent + 20);
+11 -1
fs/bcachefs/xattr.c
··· 612 612 name, buffer, size, true); 613 613 } 614 614 615 + /* Noop - xattrs in the bcachefs_effective namespace are inherited */ 616 + static int bch2_xattr_bcachefs_set_effective(const struct xattr_handler *handler, 617 + struct mnt_idmap *idmap, 618 + struct dentry *dentry, struct inode *vinode, 619 + const char *name, const void *value, 620 + size_t size, int flags) 621 + { 622 + return 0; 623 + } 624 + 615 625 static const struct xattr_handler bch_xattr_bcachefs_effective_handler = { 616 626 .prefix = "bcachefs_effective.", 617 627 .get = bch2_xattr_bcachefs_get_effective, 618 - .set = bch2_xattr_bcachefs_set, 628 + .set = bch2_xattr_bcachefs_set_effective, 619 629 }; 620 630 621 631 #endif /* NO_BCACHEFS_FS */
+2 -2
fs/super.c
··· 1802 1802 return error; 1803 1803 1804 1804 if (!fc->root) { 1805 - pr_err("Filesystem %s get_tree() didn't set fc->root\n", 1806 - fc->fs_type->name); 1805 + pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n", 1806 + fc->fs_type->name, error); 1807 1807 /* We don't know what the locking state of the superblock is - 1808 1808 * if there is a superblock. 1809 1809 */