Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'bcachefs-2024-02-25' of https://evilpiepirate.org/git/bcachefs

Pull bcachefs fixes from Kent Overstreet:
"Some more mostly boring fixes, but some not

User reported ones:

- the BTREE_ITER_FILTER_SNAPSHOTS one fixes a really nasty
performance bug; user reported an untar initially taking two
seconds and then ~2 minutes

- kill a __GFP_NOFAIL in the buffered read path; this was a leftover
from the trickier fix to kill __GFP_NOFAIL in readahead, where we
can't return errors (and have to silently truncate the read
ourselves).

bcachefs can't use GFP_NOFAIL for folio state unlike iomap based
filesystems because our folio state is just barely too big, 2MB
hugepages cause us to exceed the 2 page threshhold for GFP_NOFAIL.

additionally, the flags argument was just buggy, we weren't
supplying GFP_KERNEL previously (!)"

* tag 'bcachefs-2024-02-25' of https://evilpiepirate.org/git/bcachefs:
bcachefs: fix bch2_save_backtrace()
bcachefs: Fix check_snapshot() memcpy
bcachefs: Fix bch2_journal_flush_device_pins()
bcachefs: fix iov_iter count underflow on sub-block dio read
bcachefs: Fix BTREE_ITER_FILTER_SNAPSHOTS on inodes btree
bcachefs: Kill __GFP_NOFAIL in buffered read path
bcachefs: fix backpointer_to_text() when dev does not exist

+25 -22
+5 -3
fs/bcachefs/backpointers.c
··· 68 68 69 69 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 70 70 { 71 - prt_str(out, "bucket="); 72 - bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p)); 73 - prt_str(out, " "); 71 + if (bch2_dev_exists2(c, k.k->p.inode)) { 72 + prt_str(out, "bucket="); 73 + bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p)); 74 + prt_str(out, " "); 75 + } 74 76 75 77 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v); 76 78 }
+3 -1
fs/bcachefs/btree_iter.c
··· 2156 2156 * isn't monotonically increasing before FILTER_SNAPSHOTS, and 2157 2157 * that's what we check against in extents mode: 2158 2158 */ 2159 - if (k.k->p.inode > end.inode) 2159 + if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS) 2160 + ? bkey_gt(k.k->p, end) 2161 + : k.k->p.inode > end.inode)) 2160 2162 goto end; 2161 2163 2162 2164 if (iter->update_path &&
+8 -13
fs/bcachefs/fs-io-buffered.c
··· 303 303 darray_exit(&readpages_iter.folios); 304 304 } 305 305 306 - static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio, 307 - subvol_inum inum, struct folio *folio) 308 - { 309 - bch2_folio_create(folio, __GFP_NOFAIL); 310 - 311 - rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC; 312 - rbio->bio.bi_iter.bi_sector = folio_sector(folio); 313 - BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0)); 314 - 315 - bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0)); 316 - } 317 - 318 306 static void bch2_read_single_folio_end_io(struct bio *bio) 319 307 { 320 308 complete(bio->bi_private); ··· 317 329 int ret; 318 330 DECLARE_COMPLETION_ONSTACK(done); 319 331 332 + if (!bch2_folio_create(folio, GFP_KERNEL)) 333 + return -ENOMEM; 334 + 320 335 bch2_inode_opts_get(&opts, c, &inode->ei_inode); 321 336 322 337 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read), ··· 327 336 rbio->bio.bi_private = &done; 328 337 rbio->bio.bi_end_io = bch2_read_single_folio_end_io; 329 338 330 - __bchfs_readfolio(c, rbio, inode_inum(inode), folio); 339 + rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC; 340 + rbio->bio.bi_iter.bi_sector = folio_sector(folio); 341 + BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0)); 342 + 343 + bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0)); 331 344 wait_for_completion(&done); 332 345 333 346 ret = blk_status_to_errno(rbio->bio.bi_status);
+2
fs/bcachefs/fs-io-direct.c
··· 88 88 return ret; 89 89 90 90 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c)); 91 + if (shorten >= iter->count) 92 + shorten = 0; 91 93 iter->count -= shorten; 92 94 93 95 bio = bio_alloc_bioset(NULL,
+5 -3
fs/bcachefs/journal_reclaim.c
··· 892 892 journal_seq_pin(j, seq)->devs); 893 893 seq++; 894 894 895 - spin_unlock(&j->lock); 896 - ret = bch2_mark_replicas(c, &replicas.e); 897 - spin_lock(&j->lock); 895 + if (replicas.e.nr_devs) { 896 + spin_unlock(&j->lock); 897 + ret = bch2_mark_replicas(c, &replicas.e); 898 + spin_lock(&j->lock); 899 + } 898 900 } 899 901 spin_unlock(&j->lock); 900 902 err:
+1 -1
fs/bcachefs/snapshot.c
··· 728 728 return 0; 729 729 730 730 memset(&s, 0, sizeof(s)); 731 - memcpy(&s, k.v, bkey_val_bytes(k.k)); 731 + memcpy(&s, k.v, min(sizeof(s), bkey_val_bytes(k.k))); 732 732 733 733 id = le32_to_cpu(s.parent); 734 734 if (id) {
+1 -1
fs/bcachefs/util.c
··· 289 289 do { 290 290 nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1); 291 291 } while (nr_entries == stack->size && 292 - !(ret = darray_make_room(stack, stack->size * 2))); 292 + !(ret = darray_make_room_gfp(stack, stack->size * 2, gfp))); 293 293 294 294 stack->nr = nr_entries; 295 295 up_read(&task->signal->exec_update_lock);