Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

btrfs: zero out left over bytes after processing compression streams

Don Bailey noticed that our page zeroing for compression at end-io time
isn't complete. This reworks a patch from Linus to push the zeroing
into the zlib and lzo specific functions instead of trying to handle the
corners inside btrfs_decompress_buf2page

Signed-off-by: Chris Mason <clm@fb.com>
Reviewed-by: Josef Bacik <jbacik@fb.com>
Reported-by: Don A. Bailey <donb@securitymouse.com>
cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Chris Mason and committed by
Linus Torvalds
2f19cad9 7a5a4f97

+67 -5
+31 -2
fs/btrfs/compression.c
··· 1011 1011 bytes = min(bytes, working_bytes); 1012 1012 kaddr = kmap_atomic(page_out); 1013 1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1014 - if (*pg_index == (vcnt - 1) && *pg_offset == 0) 1015 - memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); 1016 1014 kunmap_atomic(kaddr); 1017 1015 flush_dcache_page(page_out); 1018 1016 ··· 1051 1053 } 1052 1054 1053 1055 return 1; 1056 + } 1057 + 1058 + /* 1059 + * When uncompressing data, we need to make sure and zero any parts of 1060 + * the biovec that were not filled in by the decompression code. pg_index 1061 + * and pg_offset indicate the last page and the last offset of that page 1062 + * that have been filled in. This will zero everything remaining in the 1063 + * biovec. 1064 + */ 1065 + void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, 1066 + unsigned long pg_index, 1067 + unsigned long pg_offset) 1068 + { 1069 + while (pg_index < vcnt) { 1070 + struct page *page = bvec[pg_index].bv_page; 1071 + unsigned long off = bvec[pg_index].bv_offset; 1072 + unsigned long len = bvec[pg_index].bv_len; 1073 + 1074 + if (pg_offset < off) 1075 + pg_offset = off; 1076 + if (pg_offset < off + len) { 1077 + unsigned long bytes = off + len - pg_offset; 1078 + char *kaddr; 1079 + 1080 + kaddr = kmap_atomic(page); 1081 + memset(kaddr + pg_offset, 0, bytes); 1082 + kunmap_atomic(kaddr); 1083 + } 1084 + pg_index++; 1085 + pg_offset = 0; 1086 + } 1054 1087 }
+3 -1
fs/btrfs/compression.h
··· 45 45 unsigned long nr_pages); 46 46 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 47 47 int mirror_num, unsigned long bio_flags); 48 - 48 + void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, 49 + unsigned long pg_index, 50 + unsigned long pg_offset); 49 51 struct btrfs_compress_op { 50 52 struct list_head *(*alloc_workspace)(void); 51 53
+15
fs/btrfs/lzo.c
··· 373 373 } 374 374 done: 375 375 kunmap(pages_in[page_in_index]); 376 + if (!ret) 377 + btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); 376 378 return ret; 377 379 } 378 380 ··· 412 410 goto out; 413 411 } 414 412 413 + /* 414 + * the caller is already checking against PAGE_SIZE, but lets 415 + * move this check closer to the memcpy/memset 416 + */ 417 + destlen = min_t(unsigned long, destlen, PAGE_SIZE); 415 418 bytes = min_t(unsigned long, destlen, out_len - start_byte); 416 419 417 420 kaddr = kmap_atomic(dest_page); 418 421 memcpy(kaddr, workspace->buf + start_byte, bytes); 422 + 423 + /* 424 + * btrfs_getblock is doing a zero on the tail of the page too, 425 + * but this will cover anything missing from the decompressed 426 + * data. 427 + */ 428 + if (bytes < destlen) 429 + memset(kaddr+bytes, 0, destlen-bytes); 419 430 kunmap_atomic(kaddr); 420 431 out: 421 432 return ret;
+18 -2
fs/btrfs/zlib.c
··· 299 299 zlib_inflateEnd(&workspace->strm); 300 300 if (data_in) 301 301 kunmap(pages_in[page_in_index]); 302 + if (!ret) 303 + btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); 302 304 return ret; 303 305 } 304 306 ··· 312 310 struct workspace *workspace = list_entry(ws, struct workspace, list); 313 311 int ret = 0; 314 312 int wbits = MAX_WBITS; 315 - unsigned long bytes_left = destlen; 313 + unsigned long bytes_left; 316 314 unsigned long total_out = 0; 315 + unsigned long pg_offset = 0; 317 316 char *kaddr; 317 + 318 + destlen = min_t(unsigned long, destlen, PAGE_SIZE); 319 + bytes_left = destlen; 318 320 319 321 workspace->strm.next_in = data_in; 320 322 workspace->strm.avail_in = srclen; ··· 347 341 unsigned long buf_start; 348 342 unsigned long buf_offset; 349 343 unsigned long bytes; 350 - unsigned long pg_offset = 0; 351 344 352 345 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); 353 346 if (ret != Z_OK && ret != Z_STREAM_END) ··· 389 384 ret = 0; 390 385 391 386 zlib_inflateEnd(&workspace->strm); 387 + 388 + /* 389 + * this should only happen if zlib returned fewer bytes than we 390 + * expected. btrfs_get_block is responsible for zeroing from the 391 + * end of the inline extent (destlen) to the end of the page 392 + */ 393 + if (pg_offset < destlen) { 394 + kaddr = kmap_atomic(dest_page); 395 + memset(kaddr + pg_offset, 0, destlen - pg_offset); 396 + kunmap_atomic(kaddr); 397 + } 392 398 return ret; 393 399 } 394 400