Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/buffer.c
4 *
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
6 */
7
8/*
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 *
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 *
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 *
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 *
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20 */
21
22#include <linux/kernel.h>
23#include <linux/sched/signal.h>
24#include <linux/syscalls.h>
25#include <linux/fs.h>
26#include <linux/iomap.h>
27#include <linux/mm.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
30#include <linux/capability.h>
31#include <linux/blkdev.h>
32#include <linux/blk-crypto.h>
33#include <linux/file.h>
34#include <linux/quotaops.h>
35#include <linux/highmem.h>
36#include <linux/export.h>
37#include <linux/backing-dev.h>
38#include <linux/writeback.h>
39#include <linux/hash.h>
40#include <linux/suspend.h>
41#include <linux/buffer_head.h>
42#include <linux/task_io_accounting_ops.h>
43#include <linux/bio.h>
44#include <linux/cpu.h>
45#include <linux/bitops.h>
46#include <linux/mpage.h>
47#include <linux/bit_spinlock.h>
48#include <linux/folio_batch.h>
49#include <linux/sched/mm.h>
50#include <trace/events/block.h>
51#include <linux/fscrypt.h>
52#include <linux/fsverity.h>
53#include <linux/sched/isolation.h>
54
55#include "internal.h"
56
57static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58 enum rw_hint hint, struct writeback_control *wbc);
59
60#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
62inline void touch_buffer(struct buffer_head *bh)
63{
64 trace_block_touch_buffer(bh);
65 folio_mark_accessed(bh->b_folio);
66}
67EXPORT_SYMBOL(touch_buffer);
68
69void __lock_buffer(struct buffer_head *bh)
70{
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72}
73EXPORT_SYMBOL(__lock_buffer);
74
75void unlock_buffer(struct buffer_head *bh)
76{
77 clear_bit_unlock(BH_Lock, &bh->b_state);
78 smp_mb__after_atomic();
79 wake_up_bit(&bh->b_state, BH_Lock);
80}
81EXPORT_SYMBOL(unlock_buffer);
82
83/*
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
87 */
88void buffer_check_dirty_writeback(struct folio *folio,
89 bool *dirty, bool *writeback)
90{
91 struct buffer_head *head, *bh;
92 *dirty = false;
93 *writeback = false;
94
95 BUG_ON(!folio_test_locked(folio));
96
97 head = folio_buffers(folio);
98 if (!head)
99 return;
100
101 if (folio_test_writeback(folio))
102 *writeback = true;
103
104 bh = head;
105 do {
106 if (buffer_locked(bh))
107 *writeback = true;
108
109 if (buffer_dirty(bh))
110 *dirty = true;
111
112 bh = bh->b_this_page;
113 } while (bh != head);
114}
115
116/*
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
120 */
121void __wait_on_buffer(struct buffer_head * bh)
122{
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124}
125EXPORT_SYMBOL(__wait_on_buffer);
126
127static void buffer_io_error(struct buffer_head *bh, char *msg)
128{
129 if (!test_bit(BH_Quiet, &bh->b_state))
130 printk_ratelimited(KERN_ERR
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133}
134
135/*
136 * End-of-IO handler helper function which does not touch the bh after
137 * unlocking it.
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
141 * itself.
142 */
143static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144{
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 /* This happens, due to failed read-ahead attempts. */
149 clear_buffer_uptodate(bh);
150 }
151 unlock_buffer(bh);
152}
153
154/*
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
156 * unlock the buffer.
157 */
158void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159{
160 put_bh(bh);
161 __end_buffer_read_notouch(bh, uptodate);
162}
163EXPORT_SYMBOL(end_buffer_read_sync);
164
165void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166{
167 if (uptodate) {
168 set_buffer_uptodate(bh);
169 } else {
170 buffer_io_error(bh, ", lost sync page write");
171 mark_buffer_write_io_error(bh);
172 clear_buffer_uptodate(bh);
173 }
174 unlock_buffer(bh);
175 put_bh(bh);
176}
177EXPORT_SYMBOL(end_buffer_write_sync);
178
179static struct buffer_head *
180__find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
181{
182 struct address_space *bd_mapping = bdev->bd_mapping;
183 const int blkbits = bd_mapping->host->i_blkbits;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct folio *folio;
189 int all_mapped = 1;
190 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
191
192 index = ((loff_t)block << blkbits) / PAGE_SIZE;
193 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
194 if (IS_ERR(folio))
195 goto out;
196
197 /*
198 * Folio lock protects the buffers. Callers that cannot block
199 * will fallback to serializing vs try_to_free_buffers() via
200 * the i_private_lock.
201 */
202 if (atomic)
203 spin_lock(&bd_mapping->i_private_lock);
204 else
205 folio_lock(folio);
206
207 head = folio_buffers(folio);
208 if (!head)
209 goto out_unlock;
210 /*
211 * Upon a noref migration, the folio lock serializes here;
212 * otherwise bail.
213 */
214 if (test_bit_acquire(BH_Migrate, &head->b_state)) {
215 WARN_ON(!atomic);
216 goto out_unlock;
217 }
218
219 bh = head;
220 do {
221 if (!buffer_mapped(bh))
222 all_mapped = 0;
223 else if (bh->b_blocknr == block) {
224 ret = bh;
225 get_bh(bh);
226 goto out_unlock;
227 }
228 bh = bh->b_this_page;
229 } while (bh != head);
230
231 /* we might be here because some of the buffers on this page are
232 * not mapped. This is due to various races between
233 * file io on the block device and getblk. It gets dealt with
234 * elsewhere, don't buffer_error if we had some unmapped buffers
235 */
236 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
237 if (all_mapped && __ratelimit(&last_warned)) {
238 printk("__find_get_block_slow() failed. block=%llu, "
239 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
240 "device %pg blocksize: %d\n",
241 (unsigned long long)block,
242 (unsigned long long)bh->b_blocknr,
243 bh->b_state, bh->b_size, bdev,
244 1 << blkbits);
245 }
246out_unlock:
247 if (atomic)
248 spin_unlock(&bd_mapping->i_private_lock);
249 else
250 folio_unlock(folio);
251 folio_put(folio);
252out:
253 return ret;
254}
255
256static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
257{
258 unsigned long flags;
259 struct buffer_head *first;
260 struct buffer_head *tmp;
261 struct folio *folio;
262 int folio_uptodate = 1;
263
264 BUG_ON(!buffer_async_read(bh));
265
266 folio = bh->b_folio;
267 if (uptodate) {
268 set_buffer_uptodate(bh);
269 } else {
270 clear_buffer_uptodate(bh);
271 buffer_io_error(bh, ", async page read");
272 }
273
274 /*
275 * Be _very_ careful from here on. Bad things can happen if
276 * two buffer heads end IO at almost the same time and both
277 * decide that the page is now completely done.
278 */
279 first = folio_buffers(folio);
280 spin_lock_irqsave(&first->b_uptodate_lock, flags);
281 clear_buffer_async_read(bh);
282 unlock_buffer(bh);
283 tmp = bh;
284 do {
285 if (!buffer_uptodate(tmp))
286 folio_uptodate = 0;
287 if (buffer_async_read(tmp)) {
288 BUG_ON(!buffer_locked(tmp));
289 goto still_busy;
290 }
291 tmp = tmp->b_this_page;
292 } while (tmp != bh);
293 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
294
295 folio_end_read(folio, folio_uptodate);
296 return;
297
298still_busy:
299 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
300}
301
302struct postprocess_bh_ctx {
303 struct work_struct work;
304 struct buffer_head *bh;
305 struct fsverity_info *vi;
306};
307
308static void verify_bh(struct work_struct *work)
309{
310 struct postprocess_bh_ctx *ctx =
311 container_of(work, struct postprocess_bh_ctx, work);
312 struct buffer_head *bh = ctx->bh;
313 bool valid;
314
315 valid = fsverity_verify_blocks(ctx->vi, bh->b_folio, bh->b_size,
316 bh_offset(bh));
317 end_buffer_async_read(bh, valid);
318 kfree(ctx);
319}
320
321static void decrypt_bh(struct work_struct *work)
322{
323 struct postprocess_bh_ctx *ctx =
324 container_of(work, struct postprocess_bh_ctx, work);
325 struct buffer_head *bh = ctx->bh;
326 int err;
327
328 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
329 bh_offset(bh));
330 if (err == 0 && ctx->vi) {
331 /*
332 * We use different work queues for decryption and for verity
333 * because verity may require reading metadata pages that need
334 * decryption, and we shouldn't recurse to the same workqueue.
335 */
336 INIT_WORK(&ctx->work, verify_bh);
337 fsverity_enqueue_verify_work(&ctx->work);
338 return;
339 }
340 end_buffer_async_read(bh, err == 0);
341 kfree(ctx);
342}
343
344/*
345 * I/O completion handler for block_read_full_folio() - pages
346 * which come unlocked at the end of I/O.
347 */
348static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
349{
350 struct inode *inode = bh->b_folio->mapping->host;
351 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
352 struct fsverity_info *vi = NULL;
353
354 /* needed by ext4 */
355 if (bh->b_folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
356 vi = fsverity_get_info(inode);
357
358 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
359 if (uptodate && (decrypt || vi)) {
360 struct postprocess_bh_ctx *ctx = kmalloc_obj(*ctx, GFP_ATOMIC);
361
362 if (ctx) {
363 ctx->bh = bh;
364 ctx->vi = vi;
365 if (decrypt) {
366 INIT_WORK(&ctx->work, decrypt_bh);
367 fscrypt_enqueue_decrypt_work(&ctx->work);
368 } else {
369 INIT_WORK(&ctx->work, verify_bh);
370 fsverity_enqueue_verify_work(&ctx->work);
371 }
372 return;
373 }
374 uptodate = 0;
375 }
376 end_buffer_async_read(bh, uptodate);
377}
378
379/*
380 * Completion handler for block_write_full_folio() - folios which are unlocked
381 * during I/O, and which have the writeback flag cleared upon I/O completion.
382 */
383static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
384{
385 unsigned long flags;
386 struct buffer_head *first;
387 struct buffer_head *tmp;
388 struct folio *folio;
389
390 BUG_ON(!buffer_async_write(bh));
391
392 folio = bh->b_folio;
393 if (uptodate) {
394 set_buffer_uptodate(bh);
395 } else {
396 buffer_io_error(bh, ", lost async page write");
397 mark_buffer_write_io_error(bh);
398 clear_buffer_uptodate(bh);
399 }
400
401 first = folio_buffers(folio);
402 spin_lock_irqsave(&first->b_uptodate_lock, flags);
403
404 clear_buffer_async_write(bh);
405 unlock_buffer(bh);
406 tmp = bh->b_this_page;
407 while (tmp != bh) {
408 if (buffer_async_write(tmp)) {
409 BUG_ON(!buffer_locked(tmp));
410 goto still_busy;
411 }
412 tmp = tmp->b_this_page;
413 }
414 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
415 folio_end_writeback(folio);
416 return;
417
418still_busy:
419 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
420}
421
422/*
423 * If a page's buffers are under async readin (end_buffer_async_read
424 * completion) then there is a possibility that another thread of
425 * control could lock one of the buffers after it has completed
426 * but while some of the other buffers have not completed. This
427 * locked buffer would confuse end_buffer_async_read() into not unlocking
428 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
429 * that this buffer is not under async I/O.
430 *
431 * The page comes unlocked when it has no locked buffer_async buffers
432 * left.
433 *
434 * PageLocked prevents anyone starting new async I/O reads any of
435 * the buffers.
436 *
437 * PageWriteback is used to prevent simultaneous writeout of the same
438 * page.
439 *
440 * PageLocked prevents anyone from starting writeback of a page which is
441 * under read I/O (PageWriteback is only ever set against a locked page).
442 */
443static void mark_buffer_async_read(struct buffer_head *bh)
444{
445 bh->b_end_io = end_buffer_async_read_io;
446 set_buffer_async_read(bh);
447}
448
449static void mark_buffer_async_write_endio(struct buffer_head *bh,
450 bh_end_io_t *handler)
451{
452 bh->b_end_io = handler;
453 set_buffer_async_write(bh);
454}
455
456void mark_buffer_async_write(struct buffer_head *bh)
457{
458 mark_buffer_async_write_endio(bh, end_buffer_async_write);
459}
460EXPORT_SYMBOL(mark_buffer_async_write);
461
462
463/*
464 * fs/buffer.c contains helper functions for buffer-backed address space's
465 * fsync functions. A common requirement for buffer-based filesystems is
466 * that certain data from the backing blockdev needs to be written out for
467 * a successful fsync(). For example, ext2 indirect blocks need to be
468 * written back and waited upon before fsync() returns.
469 *
470 * The functions mmb_mark_buffer_dirty(), mmb_sync(), mmb_has_buffers()
471 * and mmb_invalidate() are provided for the management of a list of dependent
472 * buffers in mapping_metadata_bhs struct.
473 *
474 * The locking is a little subtle: The list of buffer heads is protected by
475 * the lock in mapping_metadata_bhs so functions coming from bdev mapping
476 * (such as try_to_free_buffers()) need to safely get to mapping_metadata_bhs
477 * using RCU, grab the lock, verify we didn't race with somebody detaching the
478 * bh / moving it to different inode and only then proceeding.
479 */
480
481void mmb_init(struct mapping_metadata_bhs *mmb, struct address_space *mapping)
482{
483 spin_lock_init(&mmb->lock);
484 INIT_LIST_HEAD(&mmb->list);
485 mmb->mapping = mapping;
486}
487EXPORT_SYMBOL(mmb_init);
488
489static void __remove_assoc_queue(struct mapping_metadata_bhs *mmb,
490 struct buffer_head *bh)
491{
492 lockdep_assert_held(&mmb->lock);
493 list_del_init(&bh->b_assoc_buffers);
494 WARN_ON(!bh->b_mmb);
495 bh->b_mmb = NULL;
496}
497
498static void remove_assoc_queue(struct buffer_head *bh)
499{
500 struct mapping_metadata_bhs *mmb;
501
502 /*
503 * The locking dance is ugly here. We need to acquire the lock
504 * protecting the metadata bh list while possibly racing with bh
505 * being removed from the list or moved to a different one. We
506 * use RCU to pin mapping_metadata_bhs in memory to
507 * opportunistically acquire the lock and then recheck the bh
508 * didn't move under us.
509 */
510 while (bh->b_mmb) {
511 rcu_read_lock();
512 mmb = READ_ONCE(bh->b_mmb);
513 if (mmb) {
514 spin_lock(&mmb->lock);
515 if (bh->b_mmb == mmb)
516 __remove_assoc_queue(mmb, bh);
517 spin_unlock(&mmb->lock);
518 }
519 rcu_read_unlock();
520 }
521}
522
523bool mmb_has_buffers(struct mapping_metadata_bhs *mmb)
524{
525 return !list_empty(&mmb->list);
526}
527EXPORT_SYMBOL_GPL(mmb_has_buffers);
528
529/**
530 * mmb_sync - write out & wait upon all buffers in a list
531 * @mmb: the list of buffers to write
532 *
533 * Starts I/O against the buffers in the given list and waits upon
534 * that I/O. Basically, this is a convenience function for fsync(). @mmb is
535 * for a file or directory which needs those buffers to be written for a
536 * successful fsync().
537 *
538 * We have conflicting pressures: we want to make sure that all
539 * initially dirty buffers get waited on, but that any subsequently
540 * dirtied buffers don't. After all, we don't want fsync to last
541 * forever if somebody is actively writing to the file.
542 *
543 * Do this in two main stages: first we copy dirty buffers to a
544 * temporary inode list, queueing the writes as we go. Then we clean
545 * up, waiting for those writes to complete. mark_buffer_dirty_inode()
546 * doesn't touch b_assoc_buffers list if b_mmb is not NULL so we are sure the
547 * buffer stays on our list until IO completes (at which point it can be
548 * reaped).
549 */
550int mmb_sync(struct mapping_metadata_bhs *mmb)
551{
552 struct buffer_head *bh;
553 int err = 0;
554 struct blk_plug plug;
555 LIST_HEAD(tmp);
556
557 if (!mmb_has_buffers(mmb))
558 return 0;
559
560 blk_start_plug(&plug);
561
562 spin_lock(&mmb->lock);
563 while (!list_empty(&mmb->list)) {
564 bh = BH_ENTRY(mmb->list.next);
565 WARN_ON_ONCE(bh->b_mmb != mmb);
566 __remove_assoc_queue(mmb, bh);
567 /* Avoid race with mark_buffer_dirty_inode() which does
568 * a lockless check and we rely on seeing the dirty bit */
569 smp_mb();
570 if (buffer_dirty(bh) || buffer_locked(bh)) {
571 list_add(&bh->b_assoc_buffers, &tmp);
572 bh->b_mmb = mmb;
573 if (buffer_dirty(bh)) {
574 get_bh(bh);
575 spin_unlock(&mmb->lock);
576 /*
577 * Ensure any pending I/O completes so that
578 * write_dirty_buffer() actually writes the
579 * current contents - it is a noop if I/O is
580 * still in flight on potentially older
581 * contents.
582 */
583 write_dirty_buffer(bh, REQ_SYNC);
584
585 /*
586 * Kick off IO for the previous mapping. Note
587 * that we will not run the very last mapping,
588 * wait_on_buffer() will do that for us
589 * through sync_buffer().
590 */
591 brelse(bh);
592 spin_lock(&mmb->lock);
593 }
594 }
595 }
596
597 spin_unlock(&mmb->lock);
598 blk_finish_plug(&plug);
599 spin_lock(&mmb->lock);
600
601 while (!list_empty(&tmp)) {
602 bh = BH_ENTRY(tmp.prev);
603 get_bh(bh);
604 __remove_assoc_queue(mmb, bh);
605 /* Avoid race with mark_buffer_dirty_inode() which does
606 * a lockless check and we rely on seeing the dirty bit */
607 smp_mb();
608 if (buffer_dirty(bh)) {
609 list_add(&bh->b_assoc_buffers, &mmb->list);
610 bh->b_mmb = mmb;
611 }
612 spin_unlock(&mmb->lock);
613 wait_on_buffer(bh);
614 if (!buffer_uptodate(bh))
615 err = -EIO;
616 brelse(bh);
617 spin_lock(&mmb->lock);
618 }
619 spin_unlock(&mmb->lock);
620 return err;
621}
622EXPORT_SYMBOL(mmb_sync);
623
624/**
625 * mmb_fsync_noflush - fsync implementation for simple filesystems with
626 * metadata buffers list
627 *
628 * @file: file to synchronize
629 * @mmb: list of metadata bhs to flush
630 * @start: start offset in bytes
631 * @end: end offset in bytes (inclusive)
632 * @datasync: only synchronize essential metadata if true
633 *
634 * This is an implementation of the fsync method for simple filesystems which
635 * track all non-inode metadata in the buffers list hanging off the @mmb
636 * structure.
637 */
638int mmb_fsync_noflush(struct file *file, struct mapping_metadata_bhs *mmb,
639 loff_t start, loff_t end, bool datasync)
640{
641 struct inode *inode = file->f_mapping->host;
642 int err;
643 int ret = 0;
644
645 err = file_write_and_wait_range(file, start, end);
646 if (err)
647 return err;
648
649 if (mmb)
650 ret = mmb_sync(mmb);
651 if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
652 goto out;
653 if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
654 goto out;
655
656 err = sync_inode_metadata(inode, 1);
657 if (ret == 0)
658 ret = err;
659
660out:
661 /* check and advance again to catch errors after syncing out buffers */
662 err = file_check_and_advance_wb_err(file);
663 if (ret == 0)
664 ret = err;
665 return ret;
666}
667EXPORT_SYMBOL(mmb_fsync_noflush);
668
669/**
670 * mmb_fsync - fsync implementation for simple filesystems with metadata
671 * buffers list
672 *
673 * @file: file to synchronize
674 * @mmb: list of metadata bhs to flush
675 * @start: start offset in bytes
676 * @end: end offset in bytes (inclusive)
677 * @datasync: only synchronize essential metadata if true
678 *
679 * This is an implementation of the fsync method for simple filesystems which
680 * track all non-inode metadata in the buffers list hanging off the @mmb
681 * structure. This also makes sure that a device cache flush operation is
682 * called at the end.
683 */
684int mmb_fsync(struct file *file, struct mapping_metadata_bhs *mmb,
685 loff_t start, loff_t end, bool datasync)
686{
687 struct inode *inode = file->f_mapping->host;
688 int ret;
689
690 ret = mmb_fsync_noflush(file, mmb, start, end, datasync);
691 if (!ret)
692 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
693 return ret;
694}
695EXPORT_SYMBOL(mmb_fsync);
696
697/*
698 * Called when we've recently written block `bblock', and it is known that
699 * `bblock' was for a buffer_boundary() buffer. This means that the block at
700 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
701 * dirty, schedule it for IO. So that indirects merge nicely with their data.
702 */
703void write_boundary_block(struct block_device *bdev,
704 sector_t bblock, unsigned blocksize)
705{
706 struct buffer_head *bh;
707
708 bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
709 if (bh) {
710 if (buffer_dirty(bh))
711 write_dirty_buffer(bh, 0);
712 put_bh(bh);
713 }
714}
715
716void mmb_mark_buffer_dirty(struct buffer_head *bh,
717 struct mapping_metadata_bhs *mmb)
718{
719 mark_buffer_dirty(bh);
720 if (!bh->b_mmb) {
721 spin_lock(&mmb->lock);
722 /*
723 * For a corrupted filesystem with multiply claimed blocks this
724 * can fail. Avoid corrupting the linked list in that case.
725 */
726 if (cmpxchg(&bh->b_mmb, NULL, mmb) != NULL) {
727 spin_unlock(&mmb->lock);
728 return;
729 }
730 list_move_tail(&bh->b_assoc_buffers, &mmb->list);
731 spin_unlock(&mmb->lock);
732 }
733}
734EXPORT_SYMBOL(mmb_mark_buffer_dirty);
735
736/**
737 * block_dirty_folio - Mark a folio as dirty.
738 * @mapping: The address space containing this folio.
739 * @folio: The folio to mark dirty.
740 *
741 * Filesystems which use buffer_heads can use this function as their
742 * ->dirty_folio implementation. Some filesystems need to do a little
743 * work before calling this function. Filesystems which do not use
744 * buffer_heads should call filemap_dirty_folio() instead.
745 *
746 * If the folio has buffers, the uptodate buffers are set dirty, to
747 * preserve dirty-state coherency between the folio and the buffers.
748 * Buffers added to a dirty folio are created dirty.
749 *
750 * The buffers are dirtied before the folio is dirtied. There's a small
751 * race window in which writeback may see the folio cleanness but not the
752 * buffer dirtiness. That's fine. If this code were to set the folio
753 * dirty before the buffers, writeback could clear the folio dirty flag,
754 * see a bunch of clean buffers and we'd end up with dirty buffers/clean
755 * folio on the dirty folio list.
756 *
757 * We use i_private_lock to lock against try_to_free_buffers() while
758 * using the folio's buffer list. This also prevents clean buffers
759 * being added to the folio after it was set dirty.
760 *
761 * Context: May only be called from process context. Does not sleep.
762 * Caller must ensure that @folio cannot be truncated during this call,
763 * typically by holding the folio lock or having a page in the folio
764 * mapped and holding the page table lock.
765 *
766 * Return: True if the folio was dirtied; false if it was already dirtied.
767 */
768bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
769{
770 struct buffer_head *head;
771 bool newly_dirty;
772
773 spin_lock(&mapping->i_private_lock);
774 head = folio_buffers(folio);
775 if (head) {
776 struct buffer_head *bh = head;
777
778 do {
779 set_buffer_dirty(bh);
780 bh = bh->b_this_page;
781 } while (bh != head);
782 }
783 /*
784 * Lock out page's memcg migration to keep PageDirty
785 * synchronized with per-memcg dirty page counters.
786 */
787 newly_dirty = !folio_test_set_dirty(folio);
788 spin_unlock(&mapping->i_private_lock);
789
790 if (newly_dirty)
791 __folio_mark_dirty(folio, mapping, 1);
792
793 if (newly_dirty)
794 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
795
796 return newly_dirty;
797}
798EXPORT_SYMBOL(block_dirty_folio);
799
800/*
801 * Invalidate any and all dirty buffers on a given buffers list. We are
802 * probably unmounting the fs, but that doesn't mean we have already
803 * done a sync(). Just drop the buffers from the inode list.
804 */
805void mmb_invalidate(struct mapping_metadata_bhs *mmb)
806{
807 if (mmb_has_buffers(mmb)) {
808 spin_lock(&mmb->lock);
809 while (!list_empty(&mmb->list))
810 __remove_assoc_queue(mmb, BH_ENTRY(mmb->list.next));
811 spin_unlock(&mmb->lock);
812 }
813}
814EXPORT_SYMBOL(mmb_invalidate);
815
816/*
817 * Create the appropriate buffers when given a folio for data area and
818 * the size of each buffer.. Use the bh->b_this_page linked list to
819 * follow the buffers created. Return NULL if unable to create more
820 * buffers.
821 *
822 * The retry flag is used to differentiate async IO (paging, swapping)
823 * which may not fail from ordinary buffer allocations.
824 */
825struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
826 gfp_t gfp)
827{
828 struct buffer_head *bh, *head;
829 long offset;
830 struct mem_cgroup *memcg, *old_memcg;
831
832 memcg = get_mem_cgroup_from_folio(folio);
833 old_memcg = set_active_memcg(memcg);
834
835 head = NULL;
836 offset = folio_size(folio);
837 while ((offset -= size) >= 0) {
838 bh = alloc_buffer_head(gfp);
839 if (!bh)
840 goto no_grow;
841
842 bh->b_this_page = head;
843 bh->b_blocknr = -1;
844 head = bh;
845
846 bh->b_size = size;
847
848 /* Link the buffer to its folio */
849 folio_set_bh(bh, folio, offset);
850 }
851out:
852 set_active_memcg(old_memcg);
853 mem_cgroup_put(memcg);
854 return head;
855/*
856 * In case anything failed, we just free everything we got.
857 */
858no_grow:
859 if (head) {
860 do {
861 bh = head;
862 head = head->b_this_page;
863 free_buffer_head(bh);
864 } while (head);
865 }
866
867 goto out;
868}
869EXPORT_SYMBOL_GPL(folio_alloc_buffers);
870
871struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
872{
873 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
874
875 return folio_alloc_buffers(page_folio(page), size, gfp);
876}
877EXPORT_SYMBOL_GPL(alloc_page_buffers);
878
879static inline void link_dev_buffers(struct folio *folio,
880 struct buffer_head *head)
881{
882 struct buffer_head *bh, *tail;
883
884 bh = head;
885 do {
886 tail = bh;
887 bh = bh->b_this_page;
888 } while (bh);
889 tail->b_this_page = head;
890 folio_attach_private(folio, head);
891}
892
893static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
894{
895 sector_t retval = ~((sector_t)0);
896 loff_t sz = bdev_nr_bytes(bdev);
897
898 if (sz) {
899 unsigned int sizebits = blksize_bits(size);
900 retval = (sz >> sizebits);
901 }
902 return retval;
903}
904
905/*
906 * Initialise the state of a blockdev folio's buffers.
907 */
908static sector_t folio_init_buffers(struct folio *folio,
909 struct block_device *bdev, unsigned size)
910{
911 struct buffer_head *head = folio_buffers(folio);
912 struct buffer_head *bh = head;
913 bool uptodate = folio_test_uptodate(folio);
914 sector_t block = div_u64(folio_pos(folio), size);
915 sector_t end_block = blkdev_max_block(bdev, size);
916
917 do {
918 if (!buffer_mapped(bh)) {
919 bh->b_end_io = NULL;
920 bh->b_private = NULL;
921 bh->b_bdev = bdev;
922 bh->b_blocknr = block;
923 if (uptodate)
924 set_buffer_uptodate(bh);
925 if (block < end_block)
926 set_buffer_mapped(bh);
927 }
928 block++;
929 bh = bh->b_this_page;
930 } while (bh != head);
931
932 /*
933 * Caller needs to validate requested block against end of device.
934 */
935 return end_block;
936}
937
938/*
939 * Create the page-cache folio that contains the requested block.
940 *
941 * This is used purely for blockdev mappings.
942 *
943 * Returns false if we have a failure which cannot be cured by retrying
944 * without sleeping. Returns true if we succeeded, or the caller should retry.
945 */
946static bool grow_dev_folio(struct block_device *bdev, sector_t block,
947 pgoff_t index, unsigned size, gfp_t gfp)
948{
949 struct address_space *mapping = bdev->bd_mapping;
950 struct folio *folio;
951 struct buffer_head *bh;
952 sector_t end_block = 0;
953
954 folio = __filemap_get_folio(mapping, index,
955 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
956 if (IS_ERR(folio))
957 return false;
958
959 bh = folio_buffers(folio);
960 if (bh) {
961 if (bh->b_size == size) {
962 end_block = folio_init_buffers(folio, bdev, size);
963 goto unlock;
964 }
965
966 /*
967 * Retrying may succeed; for example the folio may finish
968 * writeback, or buffers may be cleaned. This should not
969 * happen very often; maybe we have old buffers attached to
970 * this blockdev's page cache and we're trying to change
971 * the block size?
972 */
973 if (!try_to_free_buffers(folio)) {
974 end_block = ~0ULL;
975 goto unlock;
976 }
977 }
978
979 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
980 if (!bh)
981 goto unlock;
982
983 /*
984 * Link the folio to the buffers and initialise them. Take the
985 * lock to be atomic wrt __find_get_block(), which does not
986 * run under the folio lock.
987 */
988 spin_lock(&mapping->i_private_lock);
989 link_dev_buffers(folio, bh);
990 end_block = folio_init_buffers(folio, bdev, size);
991 spin_unlock(&mapping->i_private_lock);
992unlock:
993 folio_unlock(folio);
994 folio_put(folio);
995 return block < end_block;
996}
997
998/*
999 * Create buffers for the specified block device block's folio. If
1000 * that folio was dirty, the buffers are set dirty also. Returns false
1001 * if we've hit a permanent error.
1002 */
1003static bool grow_buffers(struct block_device *bdev, sector_t block,
1004 unsigned size, gfp_t gfp)
1005{
1006 loff_t pos;
1007
1008 /*
1009 * Check for a block which lies outside our maximum possible
1010 * pagecache index.
1011 */
1012 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1013 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1014 __func__, (unsigned long long)block,
1015 bdev);
1016 return false;
1017 }
1018
1019 /* Create a folio with the proper size buffers */
1020 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1021}
1022
1023static struct buffer_head *
1024__getblk_slow(struct block_device *bdev, sector_t block,
1025 unsigned size, gfp_t gfp)
1026{
1027 bool blocking = gfpflags_allow_blocking(gfp);
1028
1029 if (WARN_ON_ONCE(!IS_ALIGNED(size, bdev_logical_block_size(bdev)))) {
1030 printk(KERN_ERR "getblk(): block size %d not aligned to logical block size %d\n",
1031 size, bdev_logical_block_size(bdev));
1032 return NULL;
1033 }
1034
1035 for (;;) {
1036 struct buffer_head *bh;
1037
1038 if (!grow_buffers(bdev, block, size, gfp))
1039 return NULL;
1040
1041 if (blocking)
1042 bh = __find_get_block_nonatomic(bdev, block, size);
1043 else
1044 bh = __find_get_block(bdev, block, size);
1045 if (bh)
1046 return bh;
1047 }
1048}
1049
1050/*
1051 * The relationship between dirty buffers and dirty pages:
1052 *
1053 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1054 * the page is tagged dirty in the page cache.
1055 *
1056 * At all times, the dirtiness of the buffers represents the dirtiness of
1057 * subsections of the page. If the page has buffers, the page dirty bit is
1058 * merely a hint about the true dirty state.
1059 *
1060 * When a page is set dirty in its entirety, all its buffers are marked dirty
1061 * (if the page has buffers).
1062 *
1063 * When a buffer is marked dirty, its page is dirtied, but the page's other
1064 * buffers are not.
1065 *
1066 * Also. When blockdev buffers are explicitly read with bread(), they
1067 * individually become uptodate. But their backing page remains not
1068 * uptodate - even if all of its buffers are uptodate. A subsequent
1069 * block_read_full_folio() against that folio will discover all the uptodate
1070 * buffers, will set the folio uptodate and will perform no I/O.
1071 */
1072
1073/**
1074 * mark_buffer_dirty - mark a buffer_head as needing writeout
1075 * @bh: the buffer_head to mark dirty
1076 *
1077 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1078 * its backing page dirty, then tag the page as dirty in the page cache
1079 * and then attach the address_space's inode to its superblock's dirty
1080 * inode list.
1081 *
1082 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1083 * i_pages lock and mapping->host->i_lock.
1084 */
1085void mark_buffer_dirty(struct buffer_head *bh)
1086{
1087 WARN_ON_ONCE(!buffer_uptodate(bh));
1088
1089 trace_block_dirty_buffer(bh);
1090
1091 /*
1092 * Very *carefully* optimize the it-is-already-dirty case.
1093 *
1094 * Don't let the final "is it dirty" escape to before we
1095 * perhaps modified the buffer.
1096 */
1097 if (buffer_dirty(bh)) {
1098 smp_mb();
1099 if (buffer_dirty(bh))
1100 return;
1101 }
1102
1103 if (!test_set_buffer_dirty(bh)) {
1104 struct folio *folio = bh->b_folio;
1105 struct address_space *mapping = NULL;
1106
1107 if (!folio_test_set_dirty(folio)) {
1108 mapping = folio->mapping;
1109 if (mapping)
1110 __folio_mark_dirty(folio, mapping, 0);
1111 }
1112 if (mapping)
1113 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1114 }
1115}
1116EXPORT_SYMBOL(mark_buffer_dirty);
1117
1118void mark_buffer_write_io_error(struct buffer_head *bh)
1119{
1120 set_buffer_write_io_error(bh);
1121 /* FIXME: do we need to set this in both places? */
1122 if (bh->b_folio && bh->b_folio->mapping)
1123 mapping_set_error(bh->b_folio->mapping, -EIO);
1124 if (bh->b_mmb)
1125 mapping_set_error(bh->b_mmb->mapping, -EIO);
1126}
1127EXPORT_SYMBOL(mark_buffer_write_io_error);
1128
1129/**
1130 * __brelse - Release a buffer.
1131 * @bh: The buffer to release.
1132 *
1133 * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1134 */
1135void __brelse(struct buffer_head *bh)
1136{
1137 if (atomic_read(&bh->b_count)) {
1138 put_bh(bh);
1139 return;
1140 }
1141 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1142}
1143EXPORT_SYMBOL(__brelse);
1144
1145/**
1146 * __bforget - Discard any dirty data in a buffer.
1147 * @bh: The buffer to forget.
1148 *
1149 * This variant of bforget() can be called if @bh is guaranteed to not
1150 * be NULL.
1151 */
1152void __bforget(struct buffer_head *bh)
1153{
1154 clear_buffer_dirty(bh);
1155 remove_assoc_queue(bh);
1156 __brelse(bh);
1157}
1158EXPORT_SYMBOL(__bforget);
1159
1160static struct buffer_head *__bread_slow(struct buffer_head *bh)
1161{
1162 lock_buffer(bh);
1163 if (buffer_uptodate(bh)) {
1164 unlock_buffer(bh);
1165 return bh;
1166 } else {
1167 get_bh(bh);
1168 bh->b_end_io = end_buffer_read_sync;
1169 submit_bh(REQ_OP_READ, bh);
1170 wait_on_buffer(bh);
1171 if (buffer_uptodate(bh))
1172 return bh;
1173 }
1174 brelse(bh);
1175 return NULL;
1176}
1177
1178/*
1179 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1180 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1181 * refcount elevated by one when they're in an LRU. A buffer can only appear
1182 * once in a particular CPU's LRU. A single buffer can be present in multiple
1183 * CPU's LRUs at the same time.
1184 *
1185 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1186 * sb_find_get_block().
1187 *
1188 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1189 * a local interrupt disable for that.
1190 */
1191
1192#define BH_LRU_SIZE 16
1193
1194struct bh_lru {
1195 struct buffer_head *bhs[BH_LRU_SIZE];
1196};
1197
1198static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1199
1200#ifdef CONFIG_SMP
1201#define bh_lru_lock() local_irq_disable()
1202#define bh_lru_unlock() local_irq_enable()
1203#else
1204#define bh_lru_lock() preempt_disable()
1205#define bh_lru_unlock() preempt_enable()
1206#endif
1207
1208static inline void check_irqs_on(void)
1209{
1210#ifdef irqs_disabled
1211 BUG_ON(irqs_disabled());
1212#endif
1213}
1214
1215/*
1216 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1217 * inserted at the front, and the buffer_head at the back if any is evicted.
1218 * Or, if already in the LRU it is moved to the front.
1219 */
1220static void bh_lru_install(struct buffer_head *bh)
1221{
1222 struct buffer_head *evictee = bh;
1223 struct bh_lru *b;
1224 int i;
1225
1226 check_irqs_on();
1227 bh_lru_lock();
1228
1229 /*
1230 * the refcount of buffer_head in bh_lru prevents dropping the
1231 * attached page(i.e., try_to_free_buffers) so it could cause
1232 * failing page migration.
1233 * Skip putting upcoming bh into bh_lru until migration is done.
1234 */
1235 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1236 bh_lru_unlock();
1237 return;
1238 }
1239
1240 b = this_cpu_ptr(&bh_lrus);
1241 for (i = 0; i < BH_LRU_SIZE; i++) {
1242 swap(evictee, b->bhs[i]);
1243 if (evictee == bh) {
1244 bh_lru_unlock();
1245 return;
1246 }
1247 }
1248
1249 get_bh(bh);
1250 bh_lru_unlock();
1251 brelse(evictee);
1252}
1253
1254/*
1255 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1256 */
1257static struct buffer_head *
1258lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1259{
1260 struct buffer_head *ret = NULL;
1261 unsigned int i;
1262
1263 check_irqs_on();
1264 bh_lru_lock();
1265 if (cpu_is_isolated(smp_processor_id())) {
1266 bh_lru_unlock();
1267 return NULL;
1268 }
1269 for (i = 0; i < BH_LRU_SIZE; i++) {
1270 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1271
1272 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1273 bh->b_size == size) {
1274 if (i) {
1275 while (i) {
1276 __this_cpu_write(bh_lrus.bhs[i],
1277 __this_cpu_read(bh_lrus.bhs[i - 1]));
1278 i--;
1279 }
1280 __this_cpu_write(bh_lrus.bhs[0], bh);
1281 }
1282 get_bh(bh);
1283 ret = bh;
1284 break;
1285 }
1286 }
1287 bh_lru_unlock();
1288 return ret;
1289}
1290
1291/*
1292 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1293 * it in the LRU and mark it as accessed. If it is not present then return
1294 * NULL. Atomic context callers may also return NULL if the buffer is being
1295 * migrated; similarly the page is not marked accessed either.
1296 */
1297static struct buffer_head *
1298find_get_block_common(struct block_device *bdev, sector_t block,
1299 unsigned size, bool atomic)
1300{
1301 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1302
1303 if (bh == NULL) {
1304 /* __find_get_block_slow will mark the page accessed */
1305 bh = __find_get_block_slow(bdev, block, atomic);
1306 if (bh)
1307 bh_lru_install(bh);
1308 } else
1309 touch_buffer(bh);
1310
1311 return bh;
1312}
1313
1314struct buffer_head *
1315__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1316{
1317 return find_get_block_common(bdev, block, size, true);
1318}
1319EXPORT_SYMBOL(__find_get_block);
1320
1321/* same as __find_get_block() but allows sleeping contexts */
1322struct buffer_head *
1323__find_get_block_nonatomic(struct block_device *bdev, sector_t block,
1324 unsigned size)
1325{
1326 return find_get_block_common(bdev, block, size, false);
1327}
1328EXPORT_SYMBOL(__find_get_block_nonatomic);
1329
1330/**
1331 * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1332 * @bdev: The block device.
1333 * @block: The block number.
1334 * @size: The size of buffer_heads for this @bdev.
1335 * @gfp: The memory allocation flags to use.
1336 *
1337 * The returned buffer head has its reference count incremented, but is
1338 * not locked. The caller should call brelse() when it has finished
1339 * with the buffer. The buffer may not be uptodate. If needed, the
1340 * caller can bring it uptodate either by reading it or overwriting it.
1341 *
1342 * Return: The buffer head, or NULL if memory could not be allocated.
1343 */
1344struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1345 unsigned size, gfp_t gfp)
1346{
1347 struct buffer_head *bh;
1348
1349 if (gfpflags_allow_blocking(gfp))
1350 bh = __find_get_block_nonatomic(bdev, block, size);
1351 else
1352 bh = __find_get_block(bdev, block, size);
1353
1354 might_alloc(gfp);
1355 if (bh)
1356 return bh;
1357
1358 return __getblk_slow(bdev, block, size, gfp);
1359}
1360EXPORT_SYMBOL(bdev_getblk);
1361
1362/*
1363 * Do async read-ahead on a buffer..
1364 */
1365void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1366{
1367 struct buffer_head *bh = bdev_getblk(bdev, block, size,
1368 GFP_NOWAIT | __GFP_MOVABLE);
1369
1370 if (likely(bh)) {
1371 bh_readahead(bh, REQ_RAHEAD);
1372 brelse(bh);
1373 }
1374}
1375EXPORT_SYMBOL(__breadahead);
1376
1377/**
1378 * __bread_gfp() - Read a block.
1379 * @bdev: The block device to read from.
1380 * @block: Block number in units of block size.
1381 * @size: The block size of this device in bytes.
1382 * @gfp: Not page allocation flags; see below.
1383 *
1384 * You are not expected to call this function. You should use one of
1385 * sb_bread(), sb_bread_unmovable() or __bread().
1386 *
1387 * Read a specified block, and return the buffer head that refers to it.
1388 * If @gfp is 0, the memory will be allocated using the block device's
1389 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
1390 * allocated from a movable area. Do not pass in a complete set of
1391 * GFP flags.
1392 *
1393 * The returned buffer head has its refcount increased. The caller should
1394 * call brelse() when it has finished with the buffer.
1395 *
1396 * Context: May sleep waiting for I/O.
1397 * Return: NULL if the block was unreadable.
1398 */
1399struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1400 unsigned size, gfp_t gfp)
1401{
1402 struct buffer_head *bh;
1403
1404 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1405
1406 /*
1407 * Prefer looping in the allocator rather than here, at least that
1408 * code knows what it's doing.
1409 */
1410 gfp |= __GFP_NOFAIL;
1411
1412 bh = bdev_getblk(bdev, block, size, gfp);
1413
1414 if (likely(bh) && !buffer_uptodate(bh))
1415 bh = __bread_slow(bh);
1416 return bh;
1417}
1418EXPORT_SYMBOL(__bread_gfp);
1419
1420static void __invalidate_bh_lrus(struct bh_lru *b)
1421{
1422 int i;
1423
1424 for (i = 0; i < BH_LRU_SIZE; i++) {
1425 brelse(b->bhs[i]);
1426 b->bhs[i] = NULL;
1427 }
1428}
1429/*
1430 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1431 * This doesn't race because it runs in each cpu either in irq
1432 * or with preempt disabled.
1433 */
1434static void invalidate_bh_lru(void *arg)
1435{
1436 struct bh_lru *b = &get_cpu_var(bh_lrus);
1437
1438 __invalidate_bh_lrus(b);
1439 put_cpu_var(bh_lrus);
1440}
1441
1442bool has_bh_in_lru(int cpu, void *dummy)
1443{
1444 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1445 int i;
1446
1447 for (i = 0; i < BH_LRU_SIZE; i++) {
1448 if (b->bhs[i])
1449 return true;
1450 }
1451
1452 return false;
1453}
1454
1455void invalidate_bh_lrus(void)
1456{
1457 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1458}
1459EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1460
1461/*
1462 * It's called from workqueue context so we need a bh_lru_lock to close
1463 * the race with preemption/irq.
1464 */
1465void invalidate_bh_lrus_cpu(void)
1466{
1467 struct bh_lru *b;
1468
1469 bh_lru_lock();
1470 b = this_cpu_ptr(&bh_lrus);
1471 __invalidate_bh_lrus(b);
1472 bh_lru_unlock();
1473}
1474
1475void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1476 unsigned long offset)
1477{
1478 bh->b_folio = folio;
1479 BUG_ON(offset >= folio_size(folio));
1480 if (folio_test_highmem(folio))
1481 /*
1482 * This catches illegal uses and preserves the offset:
1483 */
1484 bh->b_data = (char *)(0 + offset);
1485 else
1486 bh->b_data = folio_address(folio) + offset;
1487}
1488EXPORT_SYMBOL(folio_set_bh);
1489
1490/*
1491 * Called when truncating a buffer on a page completely.
1492 */
1493
1494/* Bits that are cleared during an invalidate */
1495#define BUFFER_FLAGS_DISCARD \
1496 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1497 1 << BH_Delay | 1 << BH_Unwritten)
1498
1499static void discard_buffer(struct buffer_head * bh)
1500{
1501 unsigned long b_state;
1502
1503 lock_buffer(bh);
1504 clear_buffer_dirty(bh);
1505 bh->b_bdev = NULL;
1506 b_state = READ_ONCE(bh->b_state);
1507 do {
1508 } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state,
1509 b_state & ~BUFFER_FLAGS_DISCARD));
1510 unlock_buffer(bh);
1511}
1512
1513/**
1514 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1515 * @folio: The folio which is affected.
1516 * @offset: start of the range to invalidate
1517 * @length: length of the range to invalidate
1518 *
1519 * block_invalidate_folio() is called when all or part of the folio has been
1520 * invalidated by a truncate operation.
1521 *
1522 * block_invalidate_folio() does not have to release all buffers, but it must
1523 * ensure that no dirty buffer is left outside @offset and that no I/O
1524 * is underway against any of the blocks which are outside the truncation
1525 * point. Because the caller is about to free (and possibly reuse) those
1526 * blocks on-disk.
1527 */
1528void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1529{
1530 struct buffer_head *head, *bh, *next;
1531 size_t curr_off = 0;
1532 size_t stop = length + offset;
1533
1534 BUG_ON(!folio_test_locked(folio));
1535
1536 /*
1537 * Check for overflow
1538 */
1539 BUG_ON(stop > folio_size(folio) || stop < length);
1540
1541 head = folio_buffers(folio);
1542 if (!head)
1543 return;
1544
1545 bh = head;
1546 do {
1547 size_t next_off = curr_off + bh->b_size;
1548 next = bh->b_this_page;
1549
1550 /*
1551 * Are we still fully in range ?
1552 */
1553 if (next_off > stop)
1554 goto out;
1555
1556 /*
1557 * is this block fully invalidated?
1558 */
1559 if (offset <= curr_off)
1560 discard_buffer(bh);
1561 curr_off = next_off;
1562 bh = next;
1563 } while (bh != head);
1564
1565 /*
1566 * We release buffers only if the entire folio is being invalidated.
1567 * The get_block cached value has been unconditionally invalidated,
1568 * so real IO is not possible anymore.
1569 */
1570 if (length == folio_size(folio))
1571 filemap_release_folio(folio, 0);
1572out:
1573 folio_clear_mappedtodisk(folio);
1574}
1575EXPORT_SYMBOL(block_invalidate_folio);
1576
1577/*
1578 * We attach and possibly dirty the buffers atomically wrt
1579 * block_dirty_folio() via i_private_lock. try_to_free_buffers
1580 * is already excluded via the folio lock.
1581 */
1582struct buffer_head *create_empty_buffers(struct folio *folio,
1583 unsigned long blocksize, unsigned long b_state)
1584{
1585 struct buffer_head *bh, *head, *tail;
1586 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1587
1588 head = folio_alloc_buffers(folio, blocksize, gfp);
1589 bh = head;
1590 do {
1591 bh->b_state |= b_state;
1592 tail = bh;
1593 bh = bh->b_this_page;
1594 } while (bh);
1595 tail->b_this_page = head;
1596
1597 spin_lock(&folio->mapping->i_private_lock);
1598 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1599 bh = head;
1600 do {
1601 if (folio_test_dirty(folio))
1602 set_buffer_dirty(bh);
1603 if (folio_test_uptodate(folio))
1604 set_buffer_uptodate(bh);
1605 bh = bh->b_this_page;
1606 } while (bh != head);
1607 }
1608 folio_attach_private(folio, head);
1609 spin_unlock(&folio->mapping->i_private_lock);
1610
1611 return head;
1612}
1613EXPORT_SYMBOL(create_empty_buffers);
1614
1615/**
1616 * clean_bdev_aliases: clean a range of buffers in block device
1617 * @bdev: Block device to clean buffers in
1618 * @block: Start of a range of blocks to clean
1619 * @len: Number of blocks to clean
1620 *
1621 * We are taking a range of blocks for data and we don't want writeback of any
1622 * buffer-cache aliases starting from return from this function and until the
1623 * moment when something will explicitly mark the buffer dirty (hopefully that
1624 * will not happen until we will free that block ;-) We don't even need to mark
1625 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1626 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1627 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1628 * would confuse anyone who might pick it with bread() afterwards...
1629 *
1630 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1631 * writeout I/O going on against recently-freed buffers. We don't wait on that
1632 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1633 * need to. That happens here.
1634 */
1635void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1636{
1637 struct address_space *bd_mapping = bdev->bd_mapping;
1638 const int blkbits = bd_mapping->host->i_blkbits;
1639 struct folio_batch fbatch;
1640 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1641 pgoff_t end;
1642 int i, count;
1643 struct buffer_head *bh;
1644 struct buffer_head *head;
1645
1646 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1647 folio_batch_init(&fbatch);
1648 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1649 count = folio_batch_count(&fbatch);
1650 for (i = 0; i < count; i++) {
1651 struct folio *folio = fbatch.folios[i];
1652
1653 if (!folio_buffers(folio))
1654 continue;
1655 /*
1656 * We use folio lock instead of bd_mapping->i_private_lock
1657 * to pin buffers here since we can afford to sleep and
1658 * it scales better than a global spinlock lock.
1659 */
1660 folio_lock(folio);
1661 /* Recheck when the folio is locked which pins bhs */
1662 head = folio_buffers(folio);
1663 if (!head)
1664 goto unlock_page;
1665 bh = head;
1666 do {
1667 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1668 goto next;
1669 if (bh->b_blocknr >= block + len)
1670 break;
1671 clear_buffer_dirty(bh);
1672 wait_on_buffer(bh);
1673 clear_buffer_req(bh);
1674next:
1675 bh = bh->b_this_page;
1676 } while (bh != head);
1677unlock_page:
1678 folio_unlock(folio);
1679 }
1680 folio_batch_release(&fbatch);
1681 cond_resched();
1682 /* End of range already reached? */
1683 if (index > end || !index)
1684 break;
1685 }
1686}
1687EXPORT_SYMBOL(clean_bdev_aliases);
1688
1689static struct buffer_head *folio_create_buffers(struct folio *folio,
1690 struct inode *inode,
1691 unsigned int b_state)
1692{
1693 struct buffer_head *bh;
1694
1695 BUG_ON(!folio_test_locked(folio));
1696
1697 bh = folio_buffers(folio);
1698 if (!bh)
1699 bh = create_empty_buffers(folio,
1700 1 << READ_ONCE(inode->i_blkbits), b_state);
1701 return bh;
1702}
1703
1704/*
1705 * NOTE! All mapped/uptodate combinations are valid:
1706 *
1707 * Mapped Uptodate Meaning
1708 *
1709 * No No "unknown" - must do get_block()
1710 * No Yes "hole" - zero-filled
1711 * Yes No "allocated" - allocated on disk, not read in
1712 * Yes Yes "valid" - allocated and up-to-date in memory.
1713 *
1714 * "Dirty" is valid only with the last case (mapped+uptodate).
1715 */
1716
1717/*
1718 * While block_write_full_folio is writing back the dirty buffers under
1719 * the page lock, whoever dirtied the buffers may decide to clean them
1720 * again at any time. We handle that by only looking at the buffer
1721 * state inside lock_buffer().
1722 *
1723 * If block_write_full_folio() is called for regular writeback
1724 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1725 * locked buffer. This only can happen if someone has written the buffer
1726 * directly, with submit_bh(). At the address_space level PageWriteback
1727 * prevents this contention from occurring.
1728 *
1729 * If block_write_full_folio() is called with wbc->sync_mode ==
1730 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1731 * causes the writes to be flagged as synchronous writes.
1732 */
1733int __block_write_full_folio(struct inode *inode, struct folio *folio,
1734 get_block_t *get_block, struct writeback_control *wbc)
1735{
1736 int err;
1737 sector_t block;
1738 sector_t last_block;
1739 struct buffer_head *bh, *head;
1740 size_t blocksize;
1741 int nr_underway = 0;
1742 blk_opf_t write_flags = wbc_to_write_flags(wbc);
1743
1744 head = folio_create_buffers(folio, inode,
1745 (1 << BH_Dirty) | (1 << BH_Uptodate));
1746
1747 /*
1748 * Be very careful. We have no exclusion from block_dirty_folio
1749 * here, and the (potentially unmapped) buffers may become dirty at
1750 * any time. If a buffer becomes dirty here after we've inspected it
1751 * then we just miss that fact, and the folio stays dirty.
1752 *
1753 * Buffers outside i_size may be dirtied by block_dirty_folio;
1754 * handle that here by just cleaning them.
1755 */
1756
1757 bh = head;
1758 blocksize = bh->b_size;
1759
1760 block = div_u64(folio_pos(folio), blocksize);
1761 last_block = div_u64(i_size_read(inode) - 1, blocksize);
1762
1763 /*
1764 * Get all the dirty buffers mapped to disk addresses and
1765 * handle any aliases from the underlying blockdev's mapping.
1766 */
1767 do {
1768 if (block > last_block) {
1769 /*
1770 * mapped buffers outside i_size will occur, because
1771 * this folio can be outside i_size when there is a
1772 * truncate in progress.
1773 */
1774 /*
1775 * The buffer was zeroed by block_write_full_folio()
1776 */
1777 clear_buffer_dirty(bh);
1778 set_buffer_uptodate(bh);
1779 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1780 buffer_dirty(bh)) {
1781 WARN_ON(bh->b_size != blocksize);
1782 err = get_block(inode, block, bh, 1);
1783 if (err)
1784 goto recover;
1785 clear_buffer_delay(bh);
1786 if (buffer_new(bh)) {
1787 /* blockdev mappings never come here */
1788 clear_buffer_new(bh);
1789 clean_bdev_bh_alias(bh);
1790 }
1791 }
1792 bh = bh->b_this_page;
1793 block++;
1794 } while (bh != head);
1795
1796 do {
1797 if (!buffer_mapped(bh))
1798 continue;
1799 /*
1800 * If it's a fully non-blocking write attempt and we cannot
1801 * lock the buffer then redirty the folio. Note that this can
1802 * potentially cause a busy-wait loop from writeback threads
1803 * and kswapd activity, but those code paths have their own
1804 * higher-level throttling.
1805 */
1806 if (wbc->sync_mode != WB_SYNC_NONE) {
1807 lock_buffer(bh);
1808 } else if (!trylock_buffer(bh)) {
1809 folio_redirty_for_writepage(wbc, folio);
1810 continue;
1811 }
1812 if (test_clear_buffer_dirty(bh)) {
1813 mark_buffer_async_write_endio(bh,
1814 end_buffer_async_write);
1815 } else {
1816 unlock_buffer(bh);
1817 }
1818 } while ((bh = bh->b_this_page) != head);
1819
1820 /*
1821 * The folio and its buffers are protected by the writeback flag,
1822 * so we can drop the bh refcounts early.
1823 */
1824 BUG_ON(folio_test_writeback(folio));
1825 folio_start_writeback(folio);
1826
1827 do {
1828 struct buffer_head *next = bh->b_this_page;
1829 if (buffer_async_write(bh)) {
1830 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1831 inode->i_write_hint, wbc);
1832 nr_underway++;
1833 }
1834 bh = next;
1835 } while (bh != head);
1836 folio_unlock(folio);
1837
1838 err = 0;
1839done:
1840 if (nr_underway == 0) {
1841 /*
1842 * The folio was marked dirty, but the buffers were
1843 * clean. Someone wrote them back by hand with
1844 * write_dirty_buffer/submit_bh. A rare case.
1845 */
1846 folio_end_writeback(folio);
1847
1848 /*
1849 * The folio and buffer_heads can be released at any time from
1850 * here on.
1851 */
1852 }
1853 return err;
1854
1855recover:
1856 /*
1857 * ENOSPC, or some other error. We may already have added some
1858 * blocks to the file, so we need to write these out to avoid
1859 * exposing stale data.
1860 * The folio is currently locked and not marked for writeback
1861 */
1862 bh = head;
1863 /* Recovery: lock and submit the mapped buffers */
1864 do {
1865 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1866 !buffer_delay(bh)) {
1867 lock_buffer(bh);
1868 mark_buffer_async_write_endio(bh,
1869 end_buffer_async_write);
1870 } else {
1871 /*
1872 * The buffer may have been set dirty during
1873 * attachment to a dirty folio.
1874 */
1875 clear_buffer_dirty(bh);
1876 }
1877 } while ((bh = bh->b_this_page) != head);
1878 BUG_ON(folio_test_writeback(folio));
1879 mapping_set_error(folio->mapping, err);
1880 folio_start_writeback(folio);
1881 do {
1882 struct buffer_head *next = bh->b_this_page;
1883 if (buffer_async_write(bh)) {
1884 clear_buffer_dirty(bh);
1885 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1886 inode->i_write_hint, wbc);
1887 nr_underway++;
1888 }
1889 bh = next;
1890 } while (bh != head);
1891 folio_unlock(folio);
1892 goto done;
1893}
1894EXPORT_SYMBOL(__block_write_full_folio);
1895
1896/*
1897 * If a folio has any new buffers, zero them out here, and mark them uptodate
1898 * and dirty so they'll be written out (in order to prevent uninitialised
1899 * block data from leaking). And clear the new bit.
1900 */
1901void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1902{
1903 size_t block_start, block_end;
1904 struct buffer_head *head, *bh;
1905
1906 BUG_ON(!folio_test_locked(folio));
1907 head = folio_buffers(folio);
1908 if (!head)
1909 return;
1910
1911 bh = head;
1912 block_start = 0;
1913 do {
1914 block_end = block_start + bh->b_size;
1915
1916 if (buffer_new(bh)) {
1917 if (block_end > from && block_start < to) {
1918 if (!folio_test_uptodate(folio)) {
1919 size_t start, xend;
1920
1921 start = max(from, block_start);
1922 xend = min(to, block_end);
1923
1924 folio_zero_segment(folio, start, xend);
1925 set_buffer_uptodate(bh);
1926 }
1927
1928 clear_buffer_new(bh);
1929 mark_buffer_dirty(bh);
1930 }
1931 }
1932
1933 block_start = block_end;
1934 bh = bh->b_this_page;
1935 } while (bh != head);
1936}
1937EXPORT_SYMBOL(folio_zero_new_buffers);
1938
1939static int
1940iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1941 const struct iomap *iomap)
1942{
1943 loff_t offset = (loff_t)block << inode->i_blkbits;
1944
1945 bh->b_bdev = iomap->bdev;
1946
1947 /*
1948 * Block points to offset in file we need to map, iomap contains
1949 * the offset at which the map starts. If the map ends before the
1950 * current block, then do not map the buffer and let the caller
1951 * handle it.
1952 */
1953 if (offset >= iomap->offset + iomap->length)
1954 return -EIO;
1955
1956 switch (iomap->type) {
1957 case IOMAP_HOLE:
1958 /*
1959 * If the buffer is not up to date or beyond the current EOF,
1960 * we need to mark it as new to ensure sub-block zeroing is
1961 * executed if necessary.
1962 */
1963 if (!buffer_uptodate(bh) ||
1964 (offset >= i_size_read(inode)))
1965 set_buffer_new(bh);
1966 return 0;
1967 case IOMAP_DELALLOC:
1968 if (!buffer_uptodate(bh) ||
1969 (offset >= i_size_read(inode)))
1970 set_buffer_new(bh);
1971 set_buffer_uptodate(bh);
1972 set_buffer_mapped(bh);
1973 set_buffer_delay(bh);
1974 return 0;
1975 case IOMAP_UNWRITTEN:
1976 /*
1977 * For unwritten regions, we always need to ensure that regions
1978 * in the block we are not writing to are zeroed. Mark the
1979 * buffer as new to ensure this.
1980 */
1981 set_buffer_new(bh);
1982 set_buffer_unwritten(bh);
1983 fallthrough;
1984 case IOMAP_MAPPED:
1985 if ((iomap->flags & IOMAP_F_NEW) ||
1986 offset >= i_size_read(inode)) {
1987 /*
1988 * This can happen if truncating the block device races
1989 * with the check in the caller as i_size updates on
1990 * block devices aren't synchronized by i_rwsem for
1991 * block devices.
1992 */
1993 if (S_ISBLK(inode->i_mode))
1994 return -EIO;
1995 set_buffer_new(bh);
1996 }
1997 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
1998 inode->i_blkbits;
1999 set_buffer_mapped(bh);
2000 return 0;
2001 default:
2002 WARN_ON_ONCE(1);
2003 return -EIO;
2004 }
2005}
2006
2007int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2008 get_block_t *get_block, const struct iomap *iomap)
2009{
2010 size_t from = offset_in_folio(folio, pos);
2011 size_t to = from + len;
2012 struct inode *inode = folio->mapping->host;
2013 size_t block_start, block_end;
2014 sector_t block;
2015 int err = 0;
2016 size_t blocksize;
2017 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2018
2019 BUG_ON(!folio_test_locked(folio));
2020 BUG_ON(to > folio_size(folio));
2021 BUG_ON(from > to);
2022
2023 head = folio_create_buffers(folio, inode, 0);
2024 blocksize = head->b_size;
2025 block = div_u64(folio_pos(folio), blocksize);
2026
2027 for (bh = head, block_start = 0; bh != head || !block_start;
2028 block++, block_start=block_end, bh = bh->b_this_page) {
2029 block_end = block_start + blocksize;
2030 if (block_end <= from || block_start >= to) {
2031 if (folio_test_uptodate(folio)) {
2032 if (!buffer_uptodate(bh))
2033 set_buffer_uptodate(bh);
2034 }
2035 continue;
2036 }
2037 if (buffer_new(bh))
2038 clear_buffer_new(bh);
2039 if (!buffer_mapped(bh)) {
2040 WARN_ON(bh->b_size != blocksize);
2041 if (get_block)
2042 err = get_block(inode, block, bh, 1);
2043 else
2044 err = iomap_to_bh(inode, block, bh, iomap);
2045 if (err)
2046 break;
2047
2048 if (buffer_new(bh)) {
2049 clean_bdev_bh_alias(bh);
2050 if (folio_test_uptodate(folio)) {
2051 clear_buffer_new(bh);
2052 set_buffer_uptodate(bh);
2053 mark_buffer_dirty(bh);
2054 continue;
2055 }
2056 if (block_end > to || block_start < from)
2057 folio_zero_segments(folio,
2058 to, block_end,
2059 block_start, from);
2060 continue;
2061 }
2062 }
2063 if (folio_test_uptodate(folio)) {
2064 if (!buffer_uptodate(bh))
2065 set_buffer_uptodate(bh);
2066 continue;
2067 }
2068 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2069 !buffer_unwritten(bh) &&
2070 (block_start < from || block_end > to)) {
2071 bh_read_nowait(bh, 0);
2072 *wait_bh++=bh;
2073 }
2074 }
2075 /*
2076 * If we issued read requests - let them complete.
2077 */
2078 while(wait_bh > wait) {
2079 wait_on_buffer(*--wait_bh);
2080 if (!buffer_uptodate(*wait_bh))
2081 err = -EIO;
2082 }
2083 if (unlikely(err))
2084 folio_zero_new_buffers(folio, from, to);
2085 return err;
2086}
2087
2088int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2089 get_block_t *get_block)
2090{
2091 return __block_write_begin_int(folio, pos, len, get_block, NULL);
2092}
2093EXPORT_SYMBOL(__block_write_begin);
2094
2095void block_commit_write(struct folio *folio, size_t from, size_t to)
2096{
2097 size_t block_start, block_end;
2098 bool partial = false;
2099 unsigned blocksize;
2100 struct buffer_head *bh, *head;
2101
2102 bh = head = folio_buffers(folio);
2103 if (!bh)
2104 return;
2105 blocksize = bh->b_size;
2106
2107 block_start = 0;
2108 do {
2109 block_end = block_start + blocksize;
2110 if (block_end <= from || block_start >= to) {
2111 if (!buffer_uptodate(bh))
2112 partial = true;
2113 } else {
2114 set_buffer_uptodate(bh);
2115 mark_buffer_dirty(bh);
2116 }
2117 if (buffer_new(bh))
2118 clear_buffer_new(bh);
2119
2120 block_start = block_end;
2121 bh = bh->b_this_page;
2122 } while (bh != head);
2123
2124 /*
2125 * If this is a partial write which happened to make all buffers
2126 * uptodate then we can optimize away a bogus read_folio() for
2127 * the next read(). Here we 'discover' whether the folio went
2128 * uptodate as a result of this (potentially partial) write.
2129 */
2130 if (!partial)
2131 folio_mark_uptodate(folio);
2132}
2133EXPORT_SYMBOL(block_commit_write);
2134
2135/*
2136 * block_write_begin takes care of the basic task of block allocation and
2137 * bringing partial write blocks uptodate first.
2138 *
2139 * The filesystem needs to handle block truncation upon failure.
2140 */
2141int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2142 struct folio **foliop, get_block_t *get_block)
2143{
2144 pgoff_t index = pos >> PAGE_SHIFT;
2145 struct folio *folio;
2146 int status;
2147
2148 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2149 mapping_gfp_mask(mapping));
2150 if (IS_ERR(folio))
2151 return PTR_ERR(folio);
2152
2153 status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2154 if (unlikely(status)) {
2155 folio_unlock(folio);
2156 folio_put(folio);
2157 folio = NULL;
2158 }
2159
2160 *foliop = folio;
2161 return status;
2162}
2163EXPORT_SYMBOL(block_write_begin);
2164
2165int block_write_end(loff_t pos, unsigned len, unsigned copied,
2166 struct folio *folio)
2167{
2168 size_t start = pos - folio_pos(folio);
2169
2170 if (unlikely(copied < len)) {
2171 /*
2172 * The buffers that were written will now be uptodate, so
2173 * we don't have to worry about a read_folio reading them
2174 * and overwriting a partial write. However if we have
2175 * encountered a short write and only partially written
2176 * into a buffer, it will not be marked uptodate, so a
2177 * read_folio might come in and destroy our partial write.
2178 *
2179 * Do the simplest thing, and just treat any short write to a
2180 * non uptodate folio as a zero-length write, and force the
2181 * caller to redo the whole thing.
2182 */
2183 if (!folio_test_uptodate(folio))
2184 copied = 0;
2185
2186 folio_zero_new_buffers(folio, start+copied, start+len);
2187 }
2188 flush_dcache_folio(folio);
2189
2190 /* This could be a short (even 0-length) commit */
2191 block_commit_write(folio, start, start + copied);
2192
2193 return copied;
2194}
2195EXPORT_SYMBOL(block_write_end);
2196
2197int generic_write_end(const struct kiocb *iocb, struct address_space *mapping,
2198 loff_t pos, unsigned len, unsigned copied,
2199 struct folio *folio, void *fsdata)
2200{
2201 struct inode *inode = mapping->host;
2202 loff_t old_size = inode->i_size;
2203 bool i_size_changed = false;
2204
2205 copied = block_write_end(pos, len, copied, folio);
2206
2207 /*
2208 * No need to use i_size_read() here, the i_size cannot change under us
2209 * because we hold i_rwsem.
2210 *
2211 * But it's important to update i_size while still holding folio lock:
2212 * page writeout could otherwise come in and zero beyond i_size.
2213 */
2214 if (pos + copied > inode->i_size) {
2215 i_size_write(inode, pos + copied);
2216 i_size_changed = true;
2217 }
2218
2219 folio_unlock(folio);
2220 folio_put(folio);
2221
2222 if (old_size < pos)
2223 pagecache_isize_extended(inode, old_size, pos);
2224 /*
2225 * Don't mark the inode dirty under page lock. First, it unnecessarily
2226 * makes the holding time of page lock longer. Second, it forces lock
2227 * ordering of page lock and transaction start for journaling
2228 * filesystems.
2229 */
2230 if (i_size_changed)
2231 mark_inode_dirty(inode);
2232 return copied;
2233}
2234EXPORT_SYMBOL(generic_write_end);
2235
2236/*
2237 * block_is_partially_uptodate checks whether buffers within a folio are
2238 * uptodate or not.
2239 *
2240 * Returns true if all buffers which correspond to the specified part
2241 * of the folio are uptodate.
2242 */
2243bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2244{
2245 unsigned block_start, block_end, blocksize;
2246 unsigned to;
2247 struct buffer_head *bh, *head;
2248 bool ret = true;
2249
2250 head = folio_buffers(folio);
2251 if (!head)
2252 return false;
2253 blocksize = head->b_size;
2254 to = min(folio_size(folio) - from, count);
2255 to = from + to;
2256 if (from < blocksize && to > folio_size(folio) - blocksize)
2257 return false;
2258
2259 bh = head;
2260 block_start = 0;
2261 do {
2262 block_end = block_start + blocksize;
2263 if (block_end > from && block_start < to) {
2264 if (!buffer_uptodate(bh)) {
2265 ret = false;
2266 break;
2267 }
2268 if (block_end >= to)
2269 break;
2270 }
2271 block_start = block_end;
2272 bh = bh->b_this_page;
2273 } while (bh != head);
2274
2275 return ret;
2276}
2277EXPORT_SYMBOL(block_is_partially_uptodate);
2278
2279/*
2280 * Generic "read_folio" function for block devices that have the normal
2281 * get_block functionality. This is most of the block device filesystems.
2282 * Reads the folio asynchronously --- the unlock_buffer() and
2283 * set/clear_buffer_uptodate() functions propagate buffer state into the
2284 * folio once IO has completed.
2285 */
2286int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2287{
2288 struct inode *inode = folio->mapping->host;
2289 sector_t iblock, lblock;
2290 struct buffer_head *bh, *head, *prev = NULL;
2291 size_t blocksize;
2292 int fully_mapped = 1;
2293 bool page_error = false;
2294 loff_t limit = i_size_read(inode);
2295
2296 /* This is needed for ext4. */
2297 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2298 limit = inode->i_sb->s_maxbytes;
2299
2300 head = folio_create_buffers(folio, inode, 0);
2301 blocksize = head->b_size;
2302
2303 iblock = div_u64(folio_pos(folio), blocksize);
2304 lblock = div_u64(limit + blocksize - 1, blocksize);
2305 bh = head;
2306
2307 do {
2308 if (buffer_uptodate(bh))
2309 continue;
2310
2311 if (!buffer_mapped(bh)) {
2312 int err = 0;
2313
2314 fully_mapped = 0;
2315 if (iblock < lblock) {
2316 WARN_ON(bh->b_size != blocksize);
2317 err = get_block(inode, iblock, bh, 0);
2318 if (err)
2319 page_error = true;
2320 }
2321 if (!buffer_mapped(bh)) {
2322 folio_zero_range(folio, bh_offset(bh),
2323 blocksize);
2324 if (!err)
2325 set_buffer_uptodate(bh);
2326 continue;
2327 }
2328 /*
2329 * get_block() might have updated the buffer
2330 * synchronously
2331 */
2332 if (buffer_uptodate(bh))
2333 continue;
2334 }
2335
2336 lock_buffer(bh);
2337 if (buffer_uptodate(bh)) {
2338 unlock_buffer(bh);
2339 continue;
2340 }
2341
2342 mark_buffer_async_read(bh);
2343 if (prev)
2344 submit_bh(REQ_OP_READ, prev);
2345 prev = bh;
2346 } while (iblock++, (bh = bh->b_this_page) != head);
2347
2348 if (fully_mapped)
2349 folio_set_mappedtodisk(folio);
2350
2351 /*
2352 * All buffers are uptodate or get_block() returned an error
2353 * when trying to map them - we must finish the read because
2354 * end_buffer_async_read() will never be called on any buffer
2355 * in this folio.
2356 */
2357 if (prev)
2358 submit_bh(REQ_OP_READ, prev);
2359 else
2360 folio_end_read(folio, !page_error);
2361
2362 return 0;
2363}
2364EXPORT_SYMBOL(block_read_full_folio);
2365
2366/* utility function for filesystems that need to do work on expanding
2367 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2368 * deal with the hole.
2369 */
2370int generic_cont_expand_simple(struct inode *inode, loff_t size)
2371{
2372 struct address_space *mapping = inode->i_mapping;
2373 const struct address_space_operations *aops = mapping->a_ops;
2374 struct folio *folio;
2375 void *fsdata = NULL;
2376 int err;
2377
2378 err = inode_newsize_ok(inode, size);
2379 if (err)
2380 goto out;
2381
2382 err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2383 if (err)
2384 goto out;
2385
2386 err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2387 BUG_ON(err > 0);
2388
2389out:
2390 return err;
2391}
2392EXPORT_SYMBOL(generic_cont_expand_simple);
2393
2394static int cont_expand_zero(const struct kiocb *iocb,
2395 struct address_space *mapping,
2396 loff_t pos, loff_t *bytes)
2397{
2398 struct inode *inode = mapping->host;
2399 const struct address_space_operations *aops = mapping->a_ops;
2400 unsigned int blocksize = i_blocksize(inode);
2401 struct folio *folio;
2402 void *fsdata = NULL;
2403 pgoff_t index, curidx;
2404 loff_t curpos;
2405 unsigned zerofrom, offset, len;
2406 int err = 0;
2407
2408 index = pos >> PAGE_SHIFT;
2409 offset = pos & ~PAGE_MASK;
2410
2411 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2412 zerofrom = curpos & ~PAGE_MASK;
2413 if (zerofrom & (blocksize-1)) {
2414 *bytes |= (blocksize-1);
2415 (*bytes)++;
2416 }
2417 len = PAGE_SIZE - zerofrom;
2418
2419 err = aops->write_begin(iocb, mapping, curpos, len,
2420 &folio, &fsdata);
2421 if (err)
2422 goto out;
2423 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2424 err = aops->write_end(iocb, mapping, curpos, len, len,
2425 folio, fsdata);
2426 if (err < 0)
2427 goto out;
2428 BUG_ON(err != len);
2429 err = 0;
2430
2431 balance_dirty_pages_ratelimited(mapping);
2432
2433 if (fatal_signal_pending(current)) {
2434 err = -EINTR;
2435 goto out;
2436 }
2437 }
2438
2439 /* page covers the boundary, find the boundary offset */
2440 if (index == curidx) {
2441 zerofrom = curpos & ~PAGE_MASK;
2442 /* if we will expand the thing last block will be filled */
2443 if (offset <= zerofrom) {
2444 goto out;
2445 }
2446 if (zerofrom & (blocksize-1)) {
2447 *bytes |= (blocksize-1);
2448 (*bytes)++;
2449 }
2450 len = offset - zerofrom;
2451
2452 err = aops->write_begin(iocb, mapping, curpos, len,
2453 &folio, &fsdata);
2454 if (err)
2455 goto out;
2456 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2457 err = aops->write_end(iocb, mapping, curpos, len, len,
2458 folio, fsdata);
2459 if (err < 0)
2460 goto out;
2461 BUG_ON(err != len);
2462 err = 0;
2463 }
2464out:
2465 return err;
2466}
2467
2468/*
2469 * For moronic filesystems that do not allow holes in file.
2470 * We may have to extend the file.
2471 */
2472int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping,
2473 loff_t pos, unsigned len, struct folio **foliop,
2474 void **fsdata, get_block_t *get_block, loff_t *bytes)
2475{
2476 struct inode *inode = mapping->host;
2477 unsigned int blocksize = i_blocksize(inode);
2478 unsigned int zerofrom;
2479 int err;
2480
2481 err = cont_expand_zero(iocb, mapping, pos, bytes);
2482 if (err)
2483 return err;
2484
2485 zerofrom = *bytes & ~PAGE_MASK;
2486 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2487 *bytes |= (blocksize-1);
2488 (*bytes)++;
2489 }
2490
2491 return block_write_begin(mapping, pos, len, foliop, get_block);
2492}
2493EXPORT_SYMBOL(cont_write_begin);
2494
2495/*
2496 * block_page_mkwrite() is not allowed to change the file size as it gets
2497 * called from a page fault handler when a page is first dirtied. Hence we must
2498 * be careful to check for EOF conditions here. We set the page up correctly
2499 * for a written page which means we get ENOSPC checking when writing into
2500 * holes and correct delalloc and unwritten extent mapping on filesystems that
2501 * support these features.
2502 *
2503 * We are not allowed to take the i_rwsem here so we have to play games to
2504 * protect against truncate races as the page could now be beyond EOF. Because
2505 * truncate writes the inode size before removing pages, once we have the
2506 * page lock we can determine safely if the page is beyond EOF. If it is not
2507 * beyond EOF, then the page is guaranteed safe against truncation until we
2508 * unlock the page.
2509 *
2510 * Direct callers of this function should protect against filesystem freezing
2511 * using sb_start_pagefault() - sb_end_pagefault() functions.
2512 */
2513int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2514 get_block_t get_block)
2515{
2516 struct folio *folio = page_folio(vmf->page);
2517 struct inode *inode = file_inode(vma->vm_file);
2518 unsigned long end;
2519 loff_t size;
2520 int ret;
2521
2522 folio_lock(folio);
2523 size = i_size_read(inode);
2524 if ((folio->mapping != inode->i_mapping) ||
2525 (folio_pos(folio) >= size)) {
2526 /* We overload EFAULT to mean page got truncated */
2527 ret = -EFAULT;
2528 goto out_unlock;
2529 }
2530
2531 end = folio_size(folio);
2532 /* folio is wholly or partially inside EOF */
2533 if (folio_pos(folio) + end > size)
2534 end = size - folio_pos(folio);
2535
2536 ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2537 if (unlikely(ret))
2538 goto out_unlock;
2539
2540 block_commit_write(folio, 0, end);
2541
2542 folio_mark_dirty(folio);
2543 folio_wait_stable(folio);
2544 return 0;
2545out_unlock:
2546 folio_unlock(folio);
2547 return ret;
2548}
2549EXPORT_SYMBOL(block_page_mkwrite);
2550
2551int block_truncate_page(struct address_space *mapping,
2552 loff_t from, get_block_t *get_block)
2553{
2554 pgoff_t index = from >> PAGE_SHIFT;
2555 unsigned blocksize;
2556 sector_t iblock;
2557 size_t offset, length, pos;
2558 struct inode *inode = mapping->host;
2559 struct folio *folio;
2560 struct buffer_head *bh;
2561 int err = 0;
2562
2563 blocksize = i_blocksize(inode);
2564 length = from & (blocksize - 1);
2565
2566 /* Block boundary? Nothing to do */
2567 if (!length)
2568 return 0;
2569
2570 length = blocksize - length;
2571 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2572
2573 folio = filemap_grab_folio(mapping, index);
2574 if (IS_ERR(folio))
2575 return PTR_ERR(folio);
2576
2577 bh = folio_buffers(folio);
2578 if (!bh)
2579 bh = create_empty_buffers(folio, blocksize, 0);
2580
2581 /* Find the buffer that contains "offset" */
2582 offset = offset_in_folio(folio, from);
2583 pos = blocksize;
2584 while (offset >= pos) {
2585 bh = bh->b_this_page;
2586 iblock++;
2587 pos += blocksize;
2588 }
2589
2590 if (!buffer_mapped(bh)) {
2591 WARN_ON(bh->b_size != blocksize);
2592 err = get_block(inode, iblock, bh, 0);
2593 if (err)
2594 goto unlock;
2595 /* unmapped? It's a hole - nothing to do */
2596 if (!buffer_mapped(bh))
2597 goto unlock;
2598 }
2599
2600 /* Ok, it's mapped. Make sure it's up-to-date */
2601 if (folio_test_uptodate(folio))
2602 set_buffer_uptodate(bh);
2603
2604 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2605 err = bh_read(bh, 0);
2606 /* Uhhuh. Read error. Complain and punt. */
2607 if (err < 0)
2608 goto unlock;
2609 }
2610
2611 folio_zero_range(folio, offset, length);
2612 mark_buffer_dirty(bh);
2613
2614unlock:
2615 folio_unlock(folio);
2616 folio_put(folio);
2617
2618 return err;
2619}
2620EXPORT_SYMBOL(block_truncate_page);
2621
2622/*
2623 * The generic write folio function for buffer-backed address_spaces
2624 */
2625int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2626 void *get_block)
2627{
2628 struct inode * const inode = folio->mapping->host;
2629 loff_t i_size = i_size_read(inode);
2630
2631 /* Is the folio fully inside i_size? */
2632 if (folio_next_pos(folio) <= i_size)
2633 return __block_write_full_folio(inode, folio, get_block, wbc);
2634
2635 /* Is the folio fully outside i_size? (truncate in progress) */
2636 if (folio_pos(folio) >= i_size) {
2637 folio_unlock(folio);
2638 return 0; /* don't care */
2639 }
2640
2641 /*
2642 * The folio straddles i_size. It must be zeroed out on each and every
2643 * writeback invocation because it may be mmapped. "A file is mapped
2644 * in multiples of the page size. For a file that is not a multiple of
2645 * the page size, the remaining memory is zeroed when mapped, and
2646 * writes to that region are not written out to the file."
2647 */
2648 folio_zero_segment(folio, offset_in_folio(folio, i_size),
2649 folio_size(folio));
2650 return __block_write_full_folio(inode, folio, get_block, wbc);
2651}
2652
2653sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2654 get_block_t *get_block)
2655{
2656 struct inode *inode = mapping->host;
2657 struct buffer_head tmp = {
2658 .b_size = i_blocksize(inode),
2659 };
2660
2661 get_block(inode, block, &tmp, 0);
2662 return tmp.b_blocknr;
2663}
2664EXPORT_SYMBOL(generic_block_bmap);
2665
2666static void end_bio_bh_io_sync(struct bio *bio)
2667{
2668 struct buffer_head *bh = bio->bi_private;
2669
2670 if (unlikely(bio_flagged(bio, BIO_QUIET)))
2671 set_bit(BH_Quiet, &bh->b_state);
2672
2673 bh->b_end_io(bh, !bio->bi_status);
2674 bio_put(bio);
2675}
2676
2677static void buffer_set_crypto_ctx(struct bio *bio, const struct buffer_head *bh,
2678 gfp_t gfp_mask)
2679{
2680 const struct address_space *mapping = folio_mapping(bh->b_folio);
2681
2682 /*
2683 * The ext4 journal (jbd2) can submit a buffer_head it directly created
2684 * for a non-pagecache page. fscrypt doesn't care about these.
2685 */
2686 if (!mapping)
2687 return;
2688 fscrypt_set_bio_crypt_ctx(bio, mapping->host,
2689 folio_pos(bh->b_folio) + bh_offset(bh), gfp_mask);
2690}
2691
2692static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2693 enum rw_hint write_hint,
2694 struct writeback_control *wbc)
2695{
2696 const enum req_op op = opf & REQ_OP_MASK;
2697 struct bio *bio;
2698
2699 BUG_ON(!buffer_locked(bh));
2700 BUG_ON(!buffer_mapped(bh));
2701 BUG_ON(!bh->b_end_io);
2702 BUG_ON(buffer_delay(bh));
2703 BUG_ON(buffer_unwritten(bh));
2704
2705 /*
2706 * Only clear out a write error when rewriting
2707 */
2708 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2709 clear_buffer_write_io_error(bh);
2710
2711 if (buffer_meta(bh))
2712 opf |= REQ_META;
2713 if (buffer_prio(bh))
2714 opf |= REQ_PRIO;
2715
2716 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2717
2718 if (IS_ENABLED(CONFIG_FS_ENCRYPTION))
2719 buffer_set_crypto_ctx(bio, bh, GFP_NOIO);
2720
2721 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2722 bio->bi_write_hint = write_hint;
2723
2724 bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
2725
2726 bio->bi_end_io = end_bio_bh_io_sync;
2727 bio->bi_private = bh;
2728
2729 /* Take care of bh's that straddle the end of the device */
2730 guard_bio_eod(bio);
2731
2732 if (wbc) {
2733 wbc_init_bio(wbc, bio);
2734 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2735 }
2736
2737 blk_crypto_submit_bio(bio);
2738}
2739
2740void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2741{
2742 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2743}
2744EXPORT_SYMBOL(submit_bh);
2745
2746void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2747{
2748 lock_buffer(bh);
2749 if (!test_clear_buffer_dirty(bh)) {
2750 unlock_buffer(bh);
2751 return;
2752 }
2753 bh->b_end_io = end_buffer_write_sync;
2754 get_bh(bh);
2755 submit_bh(REQ_OP_WRITE | op_flags, bh);
2756}
2757EXPORT_SYMBOL(write_dirty_buffer);
2758
2759/*
2760 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2761 * and then start new I/O and then wait upon it. The caller must have a ref on
2762 * the buffer_head.
2763 */
2764int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2765{
2766 WARN_ON(atomic_read(&bh->b_count) < 1);
2767 lock_buffer(bh);
2768 if (test_clear_buffer_dirty(bh)) {
2769 /*
2770 * The bh should be mapped, but it might not be if the
2771 * device was hot-removed. Not much we can do but fail the I/O.
2772 */
2773 if (!buffer_mapped(bh)) {
2774 unlock_buffer(bh);
2775 return -EIO;
2776 }
2777
2778 get_bh(bh);
2779 bh->b_end_io = end_buffer_write_sync;
2780 submit_bh(REQ_OP_WRITE | op_flags, bh);
2781 wait_on_buffer(bh);
2782 if (!buffer_uptodate(bh))
2783 return -EIO;
2784 } else {
2785 unlock_buffer(bh);
2786 }
2787 return 0;
2788}
2789EXPORT_SYMBOL(__sync_dirty_buffer);
2790
2791int sync_dirty_buffer(struct buffer_head *bh)
2792{
2793 return __sync_dirty_buffer(bh, REQ_SYNC);
2794}
2795EXPORT_SYMBOL(sync_dirty_buffer);
2796
2797static inline int buffer_busy(struct buffer_head *bh)
2798{
2799 return atomic_read(&bh->b_count) |
2800 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2801}
2802
2803static bool
2804drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2805{
2806 struct buffer_head *head = folio_buffers(folio);
2807 struct buffer_head *bh;
2808
2809 bh = head;
2810 do {
2811 if (buffer_busy(bh))
2812 goto failed;
2813 bh = bh->b_this_page;
2814 } while (bh != head);
2815
2816 do {
2817 struct buffer_head *next = bh->b_this_page;
2818
2819 remove_assoc_queue(bh);
2820 bh = next;
2821 } while (bh != head);
2822 *buffers_to_free = head;
2823 folio_detach_private(folio);
2824 return true;
2825failed:
2826 return false;
2827}
2828
2829/**
2830 * try_to_free_buffers - Release buffers attached to this folio.
2831 * @folio: The folio.
2832 *
2833 * If any buffers are in use (dirty, under writeback, elevated refcount),
2834 * no buffers will be freed.
2835 *
2836 * If the folio is dirty but all the buffers are clean then we need to
2837 * be sure to mark the folio clean as well. This is because the folio
2838 * may be against a block device, and a later reattachment of buffers
2839 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2840 * filesystem data on the same device.
2841 *
2842 * The same applies to regular filesystem folios: if all the buffers are
2843 * clean then we set the folio clean and proceed. To do that, we require
2844 * total exclusion from block_dirty_folio(). That is obtained with
2845 * i_private_lock.
2846 *
2847 * Exclusion against try_to_free_buffers may be obtained by either
2848 * locking the folio or by holding its mapping's i_private_lock.
2849 *
2850 * Context: Process context. @folio must be locked. Will not sleep.
2851 * Return: true if all buffers attached to this folio were freed.
2852 */
2853bool try_to_free_buffers(struct folio *folio)
2854{
2855 struct address_space * const mapping = folio->mapping;
2856 struct buffer_head *buffers_to_free = NULL;
2857 bool ret = 0;
2858
2859 BUG_ON(!folio_test_locked(folio));
2860 if (folio_test_writeback(folio))
2861 return false;
2862
2863 /* Misconfigured folio check */
2864 if (WARN_ON_ONCE(!folio_buffers(folio)))
2865 return true;
2866
2867 if (mapping == NULL) { /* can this still happen? */
2868 ret = drop_buffers(folio, &buffers_to_free);
2869 goto out;
2870 }
2871
2872 spin_lock(&mapping->i_private_lock);
2873 ret = drop_buffers(folio, &buffers_to_free);
2874
2875 /*
2876 * If the filesystem writes its buffers by hand (eg ext3)
2877 * then we can have clean buffers against a dirty folio. We
2878 * clean the folio here; otherwise the VM will never notice
2879 * that the filesystem did any IO at all.
2880 *
2881 * Also, during truncate, discard_buffer will have marked all
2882 * the folio's buffers clean. We discover that here and clean
2883 * the folio also.
2884 *
2885 * i_private_lock must be held over this entire operation in order
2886 * to synchronise against block_dirty_folio and prevent the
2887 * dirty bit from being lost.
2888 */
2889 if (ret)
2890 folio_cancel_dirty(folio);
2891 spin_unlock(&mapping->i_private_lock);
2892out:
2893 if (buffers_to_free) {
2894 struct buffer_head *bh = buffers_to_free;
2895
2896 do {
2897 struct buffer_head *next = bh->b_this_page;
2898 free_buffer_head(bh);
2899 bh = next;
2900 } while (bh != buffers_to_free);
2901 }
2902 return ret;
2903}
2904EXPORT_SYMBOL(try_to_free_buffers);
2905
2906/*
2907 * Buffer-head allocation
2908 */
2909static struct kmem_cache *bh_cachep __ro_after_init;
2910
2911/*
2912 * Once the number of bh's in the machine exceeds this level, we start
2913 * stripping them in writeback.
2914 */
2915static unsigned long max_buffer_heads __ro_after_init;
2916
2917int buffer_heads_over_limit;
2918
2919struct bh_accounting {
2920 int nr; /* Number of live bh's */
2921 int ratelimit; /* Limit cacheline bouncing */
2922};
2923
2924static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2925
2926static void recalc_bh_state(void)
2927{
2928 int i;
2929 int tot = 0;
2930
2931 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
2932 return;
2933 __this_cpu_write(bh_accounting.ratelimit, 0);
2934 for_each_online_cpu(i)
2935 tot += per_cpu(bh_accounting, i).nr;
2936 buffer_heads_over_limit = (tot > max_buffer_heads);
2937}
2938
2939struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2940{
2941 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2942 if (ret) {
2943 INIT_LIST_HEAD(&ret->b_assoc_buffers);
2944 spin_lock_init(&ret->b_uptodate_lock);
2945 preempt_disable();
2946 __this_cpu_inc(bh_accounting.nr);
2947 recalc_bh_state();
2948 preempt_enable();
2949 }
2950 return ret;
2951}
2952EXPORT_SYMBOL(alloc_buffer_head);
2953
2954void free_buffer_head(struct buffer_head *bh)
2955{
2956 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2957 kmem_cache_free(bh_cachep, bh);
2958 preempt_disable();
2959 __this_cpu_dec(bh_accounting.nr);
2960 recalc_bh_state();
2961 preempt_enable();
2962}
2963EXPORT_SYMBOL(free_buffer_head);
2964
2965static int buffer_exit_cpu_dead(unsigned int cpu)
2966{
2967 int i;
2968 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2969
2970 for (i = 0; i < BH_LRU_SIZE; i++) {
2971 brelse(b->bhs[i]);
2972 b->bhs[i] = NULL;
2973 }
2974 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
2975 per_cpu(bh_accounting, cpu).nr = 0;
2976 return 0;
2977}
2978
2979/**
2980 * bh_uptodate_or_lock - Test whether the buffer is uptodate
2981 * @bh: struct buffer_head
2982 *
2983 * Return true if the buffer is up-to-date and false,
2984 * with the buffer locked, if not.
2985 */
2986int bh_uptodate_or_lock(struct buffer_head *bh)
2987{
2988 if (!buffer_uptodate(bh)) {
2989 lock_buffer(bh);
2990 if (!buffer_uptodate(bh))
2991 return 0;
2992 unlock_buffer(bh);
2993 }
2994 return 1;
2995}
2996EXPORT_SYMBOL(bh_uptodate_or_lock);
2997
2998/**
2999 * __bh_read - Submit read for a locked buffer
3000 * @bh: struct buffer_head
3001 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3002 * @wait: wait until reading finish
3003 *
3004 * Returns zero on success or don't wait, and -EIO on error.
3005 */
3006int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3007{
3008 int ret = 0;
3009
3010 BUG_ON(!buffer_locked(bh));
3011
3012 get_bh(bh);
3013 bh->b_end_io = end_buffer_read_sync;
3014 submit_bh(REQ_OP_READ | op_flags, bh);
3015 if (wait) {
3016 wait_on_buffer(bh);
3017 if (!buffer_uptodate(bh))
3018 ret = -EIO;
3019 }
3020 return ret;
3021}
3022EXPORT_SYMBOL(__bh_read);
3023
3024/**
3025 * __bh_read_batch - Submit read for a batch of unlocked buffers
3026 * @nr: entry number of the buffer batch
3027 * @bhs: a batch of struct buffer_head
3028 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3029 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3030 * buffer that cannot lock.
3031 *
3032 * Returns zero on success or don't wait, and -EIO on error.
3033 */
3034void __bh_read_batch(int nr, struct buffer_head *bhs[],
3035 blk_opf_t op_flags, bool force_lock)
3036{
3037 int i;
3038
3039 for (i = 0; i < nr; i++) {
3040 struct buffer_head *bh = bhs[i];
3041
3042 if (buffer_uptodate(bh))
3043 continue;
3044
3045 if (force_lock)
3046 lock_buffer(bh);
3047 else
3048 if (!trylock_buffer(bh))
3049 continue;
3050
3051 if (buffer_uptodate(bh)) {
3052 unlock_buffer(bh);
3053 continue;
3054 }
3055
3056 bh->b_end_io = end_buffer_read_sync;
3057 get_bh(bh);
3058 submit_bh(REQ_OP_READ | op_flags, bh);
3059 }
3060}
3061EXPORT_SYMBOL(__bh_read_batch);
3062
3063void __init buffer_init(void)
3064{
3065 unsigned long nrpages;
3066 int ret;
3067
3068 bh_cachep = KMEM_CACHE(buffer_head,
3069 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3070 /*
3071 * Limit the bh occupancy to 10% of ZONE_NORMAL
3072 */
3073 nrpages = (nr_free_buffer_pages() * 10) / 100;
3074 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3075 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3076 NULL, buffer_exit_cpu_dead);
3077 WARN_ON(ret < 0);
3078}