Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/readpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
7 *
8 * This was originally taken from fs/mpage.c
9 *
10 * The ext4_mpage_readpages() function here is intended to
11 * replace mpage_readahead() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
15 *
16 * This will allow us to attach a callback function to support ext4
17 * encryption.
18 *
19 * If anything unusual happens, such as:
20 *
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
24 *
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/export.h>
33#include <linux/mm.h>
34#include <linux/kdev_t.h>
35#include <linux/gfp.h>
36#include <linux/bio.h>
37#include <linux/fs.h>
38#include <linux/buffer_head.h>
39#include <linux/blk-crypto.h>
40#include <linux/blkdev.h>
41#include <linux/highmem.h>
42#include <linux/prefetch.h>
43#include <linux/mpage.h>
44#include <linux/writeback.h>
45#include <linux/backing-dev.h>
46
47#include "ext4.h"
48#include <trace/events/ext4.h>
49
50#define NUM_PREALLOC_POST_READ_CTXS 128
51
52static struct kmem_cache *bio_post_read_ctx_cache;
53static mempool_t *bio_post_read_ctx_pool;
54
55/* postprocessing steps for read bios */
56enum bio_post_read_step {
57 STEP_INITIAL = 0,
58 STEP_DECRYPT,
59 STEP_VERITY,
60 STEP_MAX,
61};
62
63struct bio_post_read_ctx {
64 struct bio *bio;
65 struct fsverity_info *vi;
66 struct work_struct work;
67 unsigned int cur_step;
68 unsigned int enabled_steps;
69};
70
71static void __read_end_io(struct bio *bio)
72{
73 struct folio_iter fi;
74
75 bio_for_each_folio_all(fi, bio)
76 folio_end_read(fi.folio, bio->bi_status == 0);
77 if (bio->bi_private)
78 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
79 bio_put(bio);
80}
81
82static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
83
84static void decrypt_work(struct work_struct *work)
85{
86 struct bio_post_read_ctx *ctx =
87 container_of(work, struct bio_post_read_ctx, work);
88 struct bio *bio = ctx->bio;
89
90 if (fscrypt_decrypt_bio(bio))
91 bio_post_read_processing(ctx);
92 else
93 __read_end_io(bio);
94}
95
96static void verity_work(struct work_struct *work)
97{
98 struct bio_post_read_ctx *ctx =
99 container_of(work, struct bio_post_read_ctx, work);
100 struct bio *bio = ctx->bio;
101 struct fsverity_info *vi = ctx->vi;
102
103 /*
104 * fsverity_verify_bio() may call readahead() again, and although verity
105 * will be disabled for that, decryption may still be needed, causing
106 * another bio_post_read_ctx to be allocated. So to guarantee that
107 * mempool_alloc() never deadlocks we must free the current ctx first.
108 * This is safe because verity is the last post-read step.
109 */
110 BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
111 mempool_free(ctx, bio_post_read_ctx_pool);
112 bio->bi_private = NULL;
113
114 fsverity_verify_bio(vi, bio);
115
116 __read_end_io(bio);
117}
118
119static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
120{
121 /*
122 * We use different work queues for decryption and for verity because
123 * verity may require reading metadata pages that need decryption, and
124 * we shouldn't recurse to the same workqueue.
125 */
126 switch (++ctx->cur_step) {
127 case STEP_DECRYPT:
128 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
129 INIT_WORK(&ctx->work, decrypt_work);
130 fscrypt_enqueue_decrypt_work(&ctx->work);
131 return;
132 }
133 ctx->cur_step++;
134 fallthrough;
135 case STEP_VERITY:
136 if (IS_ENABLED(CONFIG_FS_VERITY) &&
137 ctx->enabled_steps & (1 << STEP_VERITY)) {
138 INIT_WORK(&ctx->work, verity_work);
139 fsverity_enqueue_verify_work(&ctx->work);
140 return;
141 }
142 ctx->cur_step++;
143 fallthrough;
144 default:
145 __read_end_io(ctx->bio);
146 }
147}
148
149static bool bio_post_read_required(struct bio *bio)
150{
151 return bio->bi_private && !bio->bi_status;
152}
153
154/*
155 * I/O completion handler for multipage BIOs.
156 *
157 * The mpage code never puts partial pages into a BIO (except for end-of-file).
158 * If a page does not map to a contiguous run of blocks then it simply falls
159 * back to block_read_full_folio().
160 *
161 * Why is this? If a page's completion depends on a number of different BIOs
162 * which can complete in any order (or at the same time) then determining the
163 * status of that page is hard. See end_buffer_async_read() for the details.
164 * There is no point in duplicating all that complexity.
165 */
166static void mpage_end_io(struct bio *bio)
167{
168 if (bio_post_read_required(bio)) {
169 struct bio_post_read_ctx *ctx = bio->bi_private;
170
171 ctx->cur_step = STEP_INITIAL;
172 bio_post_read_processing(ctx);
173 return;
174 }
175 __read_end_io(bio);
176}
177
178static void ext4_set_bio_post_read_ctx(struct bio *bio,
179 const struct inode *inode,
180 struct fsverity_info *vi)
181{
182 unsigned int post_read_steps = 0;
183
184 if (fscrypt_inode_uses_fs_layer_crypto(inode))
185 post_read_steps |= 1 << STEP_DECRYPT;
186
187 if (vi)
188 post_read_steps |= 1 << STEP_VERITY;
189
190 if (post_read_steps) {
191 /* Due to the mempool, this never fails. */
192 struct bio_post_read_ctx *ctx =
193 mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
194
195 ctx->bio = bio;
196 ctx->vi = vi;
197 ctx->enabled_steps = post_read_steps;
198 bio->bi_private = ctx;
199 }
200}
201
202static inline loff_t ext4_readpage_limit(struct inode *inode)
203{
204 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
205 return inode->i_sb->s_maxbytes;
206
207 return i_size_read(inode);
208}
209
210static int ext4_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
211 struct readahead_control *rac, struct folio *folio)
212{
213 struct bio *bio = NULL;
214 sector_t last_block_in_bio = 0;
215 const unsigned blkbits = inode->i_blkbits;
216 const unsigned blocksize = 1 << blkbits;
217 sector_t block_in_file;
218 sector_t last_block;
219 sector_t last_block_in_file;
220 sector_t first_block;
221 loff_t pos;
222 unsigned page_block;
223 struct block_device *bdev = inode->i_sb->s_bdev;
224 int length;
225 unsigned relative_block = 0;
226 struct ext4_map_blocks map;
227 unsigned int nr_pages, folio_pages;
228
229 map.m_pblk = 0;
230 map.m_lblk = 0;
231 map.m_len = 0;
232 map.m_flags = 0;
233
234 nr_pages = rac ? readahead_count(rac) : folio_nr_pages(folio);
235 for (; nr_pages; nr_pages -= folio_pages) {
236 int fully_mapped = 1;
237 unsigned int first_hole;
238 unsigned int blocks_per_folio;
239
240 if (rac)
241 folio = readahead_folio(rac);
242
243 folio_pages = folio_nr_pages(folio);
244 prefetchw(&folio->flags);
245
246 if (folio_buffers(folio))
247 goto confused;
248
249 blocks_per_folio = folio_size(folio) >> blkbits;
250 first_hole = blocks_per_folio;
251 pos = folio_pos(folio);
252 block_in_file = pos >> blkbits;
253 last_block = EXT4_PG_TO_LBLK(inode, folio->index + nr_pages);
254 last_block_in_file = (ext4_readpage_limit(inode) +
255 blocksize - 1) >> blkbits;
256 if (last_block > last_block_in_file)
257 last_block = last_block_in_file;
258 page_block = 0;
259
260 /*
261 * Map blocks using the previous result first.
262 */
263 if ((map.m_flags & EXT4_MAP_MAPPED) &&
264 block_in_file > map.m_lblk &&
265 block_in_file < (map.m_lblk + map.m_len)) {
266 unsigned map_offset = block_in_file - map.m_lblk;
267 unsigned last = map.m_len - map_offset;
268
269 first_block = map.m_pblk + map_offset;
270 for (relative_block = 0; ; relative_block++) {
271 if (relative_block == last) {
272 /* needed? */
273 map.m_flags &= ~EXT4_MAP_MAPPED;
274 break;
275 }
276 if (page_block == blocks_per_folio)
277 break;
278 page_block++;
279 block_in_file++;
280 }
281 }
282
283 /*
284 * Then do more ext4_map_blocks() calls until we are
285 * done with this folio.
286 */
287 while (page_block < blocks_per_folio) {
288 if (block_in_file < last_block) {
289 map.m_lblk = block_in_file;
290 map.m_len = last_block - block_in_file;
291
292 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
293 set_error_page:
294 folio_zero_segment(folio, 0,
295 folio_size(folio));
296 folio_unlock(folio);
297 goto next_page;
298 }
299 }
300 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
301 fully_mapped = 0;
302 if (first_hole == blocks_per_folio)
303 first_hole = page_block;
304 page_block++;
305 block_in_file++;
306 continue;
307 }
308 if (first_hole != blocks_per_folio)
309 goto confused; /* hole -> non-hole */
310
311 /* Contiguous blocks? */
312 if (!page_block)
313 first_block = map.m_pblk;
314 else if (first_block + page_block != map.m_pblk)
315 goto confused;
316 for (relative_block = 0; ; relative_block++) {
317 if (relative_block == map.m_len) {
318 /* needed? */
319 map.m_flags &= ~EXT4_MAP_MAPPED;
320 break;
321 } else if (page_block == blocks_per_folio)
322 break;
323 page_block++;
324 block_in_file++;
325 }
326 }
327 if (first_hole != blocks_per_folio) {
328 folio_zero_segment(folio, first_hole << blkbits,
329 folio_size(folio));
330 if (first_hole == 0) {
331 if (vi && !fsverity_verify_folio(vi, folio))
332 goto set_error_page;
333 folio_end_read(folio, true);
334 continue;
335 }
336 } else if (fully_mapped) {
337 folio_set_mappedtodisk(folio);
338 }
339
340 /*
341 * This folio will go to BIO. Do we need to send this
342 * BIO off first?
343 */
344 if (bio && (last_block_in_bio != first_block - 1 ||
345 !fscrypt_mergeable_bio(bio, inode, pos))) {
346 submit_and_realloc:
347 blk_crypto_submit_bio(bio);
348 bio = NULL;
349 }
350 if (bio == NULL) {
351 /*
352 * bio_alloc will _always_ be able to allocate a bio if
353 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
354 */
355 bio = bio_alloc(bdev, bio_max_segs(nr_pages),
356 REQ_OP_READ, GFP_KERNEL);
357 fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_KERNEL);
358 ext4_set_bio_post_read_ctx(bio, inode, vi);
359 bio->bi_iter.bi_sector = first_block << (blkbits - 9);
360 bio->bi_end_io = mpage_end_io;
361 if (rac)
362 bio->bi_opf |= REQ_RAHEAD;
363 }
364
365 length = first_hole << blkbits;
366 if (!bio_add_folio(bio, folio, length, 0))
367 goto submit_and_realloc;
368
369 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
370 (relative_block == map.m_len)) ||
371 (first_hole != blocks_per_folio)) {
372 blk_crypto_submit_bio(bio);
373 bio = NULL;
374 } else
375 last_block_in_bio = first_block + blocks_per_folio - 1;
376 continue;
377 confused:
378 if (bio) {
379 blk_crypto_submit_bio(bio);
380 bio = NULL;
381 }
382 if (!folio_test_uptodate(folio))
383 block_read_full_folio(folio, ext4_get_block);
384 else
385 folio_unlock(folio);
386next_page:
387 ; /* A label shall be followed by a statement until C23 */
388 }
389 if (bio)
390 blk_crypto_submit_bio(bio);
391 return 0;
392}
393
394int ext4_read_folio(struct file *file, struct folio *folio)
395{
396 struct inode *inode = folio->mapping->host;
397 struct fsverity_info *vi = NULL;
398 int ret;
399
400 trace_ext4_read_folio(inode, folio);
401
402 if (ext4_has_inline_data(inode)) {
403 ret = ext4_readpage_inline(inode, folio);
404 if (ret != -EAGAIN)
405 return ret;
406 }
407
408 if (folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
409 vi = fsverity_get_info(inode);
410 if (vi)
411 fsverity_readahead(vi, folio->index, folio_nr_pages(folio));
412 return ext4_mpage_readpages(inode, vi, NULL, folio);
413}
414
415void ext4_readahead(struct readahead_control *rac)
416{
417 struct inode *inode = rac->mapping->host;
418 struct fsverity_info *vi = NULL;
419
420 /* If the file has inline data, no need to do readahead. */
421 if (ext4_has_inline_data(inode))
422 return;
423
424 if (readahead_index(rac) < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
425 vi = fsverity_get_info(inode);
426 if (vi)
427 fsverity_readahead(vi, readahead_index(rac),
428 readahead_count(rac));
429 ext4_mpage_readpages(inode, vi, rac, NULL);
430}
431
432int __init ext4_init_post_read_processing(void)
433{
434 bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
435
436 if (!bio_post_read_ctx_cache)
437 goto fail;
438 bio_post_read_ctx_pool =
439 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
440 bio_post_read_ctx_cache);
441 if (!bio_post_read_ctx_pool)
442 goto fail_free_cache;
443 return 0;
444
445fail_free_cache:
446 kmem_cache_destroy(bio_post_read_ctx_cache);
447fail:
448 return -ENOMEM;
449}
450
451void ext4_exit_post_read_processing(void)
452{
453 mempool_destroy(bio_post_read_ctx_pool);
454 kmem_cache_destroy(bio_post_read_ctx_cache);
455}