Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/joern/logfs

* git://git.kernel.org/pub/scm/linux/kernel/git/joern/logfs:
[LogFS] Erase new journal segments
[LogFS] Move reserved segments with journal
[LogFS] Clear PagePrivate when moving journal
Simplify and fix pad_wbuf
Prevent data corruption in logfs_rewrite_block()
Use deactivate_locked_super
Fix logfs_get_sb_final error path
Write out both superblocks on mismatch
Prevent schedule while atomic in __logfs_readdir
Plug memory leak in writeseg_end_io
Limit max_pages for insane devices
Open segment file before using it

+67 -36
+7 -2
fs/logfs/dev_bdev.c
··· 80 80 prefetchw(&bvec->bv_page->flags); 81 81 82 82 end_page_writeback(page); 83 + page_cache_release(page); 83 84 } while (bvec >= bio->bi_io_vec); 84 85 bio_put(bio); 85 86 if (atomic_dec_and_test(&super->s_pending_writes)) ··· 98 97 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 99 98 int i; 100 99 100 + if (max_pages > BIO_MAX_PAGES) 101 + max_pages = BIO_MAX_PAGES; 101 102 bio = bio_alloc(GFP_NOFS, max_pages); 102 - BUG_ON(!bio); /* FIXME: handle this */ 103 + BUG_ON(!bio); 103 104 104 105 for (i = 0; i < nr_pages; i++) { 105 106 if (i >= max_pages) { ··· 194 191 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 195 192 int i; 196 193 194 + if (max_pages > BIO_MAX_PAGES) 195 + max_pages = BIO_MAX_PAGES; 197 196 bio = bio_alloc(GFP_NOFS, max_pages); 198 - BUG_ON(!bio); /* FIXME: handle this */ 197 + BUG_ON(!bio); 199 198 200 199 for (i = 0; i < nr_pages; i++) { 201 200 if (i >= max_pages) {
+2 -2
fs/logfs/dir.c
··· 303 303 (filler_t *)logfs_readpage, NULL); 304 304 if (IS_ERR(page)) 305 305 return PTR_ERR(page); 306 - dd = kmap_atomic(page, KM_USER0); 306 + dd = kmap(page); 307 307 BUG_ON(dd->namelen == 0); 308 308 309 309 full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), 310 310 pos, be64_to_cpu(dd->ino), dd->type); 311 - kunmap_atomic(dd, KM_USER0); 311 + kunmap(page); 312 312 page_cache_release(page); 313 313 if (full) 314 314 break;
+7
fs/logfs/journal.c
··· 800 800 { 801 801 struct logfs_super *super = logfs_super(sb); 802 802 struct logfs_area *area = super->s_journal_area; 803 + struct btree_head32 *head = &super->s_reserved_segments; 803 804 u32 segno, ec; 804 805 int i, err; 805 806 ··· 808 807 /* Drop old segments */ 809 808 journal_for_each(i) 810 809 if (super->s_journal_seg[i]) { 810 + btree_remove32(head, super->s_journal_seg[i]); 811 811 logfs_set_segment_unreserved(sb, 812 812 super->s_journal_seg[i], 813 813 super->s_journal_ec[i]); ··· 821 819 super->s_journal_seg[i] = segno; 822 820 super->s_journal_ec[i] = ec; 823 821 logfs_set_segment_reserved(sb, segno); 822 + err = btree_insert32(head, segno, (void *)1, GFP_KERNEL); 823 + BUG_ON(err); /* mempool should prevent this */ 824 + err = logfs_erase_segment(sb, segno, 1); 825 + BUG_ON(err); /* FIXME: remount-ro would be nicer */ 824 826 } 825 827 /* Manually move journal_area */ 828 + freeseg(sb, area->a_segno); 826 829 area->a_segno = super->s_journal_seg[0]; 827 830 area->a_is_open = 0; 828 831 area->a_used_bytes = 0;
+1
fs/logfs/logfs.h
··· 587 587 int logfs_init_mapping(struct super_block *sb); 588 588 void logfs_sync_area(struct logfs_area *area); 589 589 void logfs_sync_segments(struct super_block *sb); 590 + void freeseg(struct super_block *sb, u32 segno); 590 591 591 592 /* area handling */ 592 593 int logfs_init_areas(struct super_block *sb);
+12 -1
fs/logfs/readwrite.c
··· 1594 1594 return ret; 1595 1595 } 1596 1596 1597 - /* Rewrite cannot mark the inode dirty but has to write it immediatly. */ 1598 1597 int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, 1599 1598 gc_level_t gc_level, long flags) 1600 1599 { ··· 1610 1611 if (level != 0) 1611 1612 alloc_indirect_block(inode, page, 0); 1612 1613 err = logfs_write_buf(inode, page, flags); 1614 + if (!err && shrink_level(gc_level) == 0) { 1615 + /* Rewrite cannot mark the inode dirty but has to 1616 + * write it immediatly. 1617 + * Q: Can't we just create an alias for the inode 1618 + * instead? And if not, why not? 1619 + */ 1620 + if (inode->i_ino == LOGFS_INO_MASTER) 1621 + logfs_write_anchor(inode->i_sb); 1622 + else { 1623 + err = __logfs_write_inode(inode, flags); 1624 + } 1625 + } 1613 1626 } 1614 1627 logfs_put_write_page(page); 1615 1628 return err;
+31 -23
fs/logfs/segment.c
··· 93 93 } while (len); 94 94 } 95 95 96 - /* 97 - * bdev_writeseg will write full pages. Memset the tail to prevent data leaks. 98 - */ 99 - static void pad_wbuf(struct logfs_area *area, int final) 96 + static void pad_partial_page(struct logfs_area *area) 100 97 { 101 98 struct super_block *sb = area->a_sb; 102 - struct logfs_super *super = logfs_super(sb); 103 99 struct page *page; 104 100 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 105 101 pgoff_t index = ofs >> PAGE_SHIFT; 106 102 long offset = ofs & (PAGE_SIZE-1); 107 103 u32 len = PAGE_SIZE - offset; 108 104 109 - if (len == PAGE_SIZE) { 110 - /* The math in this function can surely use some love */ 111 - len = 0; 112 - } 113 - if (len) { 114 - BUG_ON(area->a_used_bytes >= super->s_segsize); 115 - 116 - page = get_mapping_page(area->a_sb, index, 0); 105 + if (len % PAGE_SIZE) { 106 + page = get_mapping_page(sb, index, 0); 117 107 BUG_ON(!page); /* FIXME: reserve a pool */ 118 108 memset(page_address(page) + offset, 0xff, len); 119 109 SetPagePrivate(page); 120 110 page_cache_release(page); 121 111 } 112 + } 122 113 123 - if (!final) 124 - return; 114 + static void pad_full_pages(struct logfs_area *area) 115 + { 116 + struct super_block *sb = area->a_sb; 117 + struct logfs_super *super = logfs_super(sb); 118 + u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 119 + u32 len = super->s_segsize - area->a_used_bytes; 120 + pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT; 121 + pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT; 122 + struct page *page; 125 123 126 - area->a_used_bytes += len; 127 - for ( ; area->a_used_bytes < super->s_segsize; 128 - area->a_used_bytes += PAGE_SIZE) { 129 - /* Memset another page */ 130 - index++; 131 - page = get_mapping_page(area->a_sb, index, 0); 124 + while (no_indizes) { 125 + page = get_mapping_page(sb, index, 0); 132 126 BUG_ON(!page); /* FIXME: reserve a pool */ 133 - memset(page_address(page), 0xff, PAGE_SIZE); 127 + SetPageUptodate(page); 128 + memset(page_address(page), 0xff, PAGE_CACHE_SIZE); 134 129 SetPagePrivate(page); 135 130 page_cache_release(page); 131 + index++; 132 + no_indizes--; 136 133 } 134 + } 135 + 136 + /* 137 + * bdev_writeseg will write full pages. Memset the tail to prevent data leaks. 138 + * Also make sure we allocate (and memset) all pages for final writeout. 139 + */ 140 + static void pad_wbuf(struct logfs_area *area, int final) 141 + { 142 + pad_partial_page(area); 143 + if (final) 144 + pad_full_pages(area); 137 145 } 138 146 139 147 /* ··· 691 683 return 0; 692 684 } 693 685 694 - static void freeseg(struct super_block *sb, u32 segno) 686 + void freeseg(struct super_block *sb, u32 segno) 695 687 { 696 688 struct logfs_super *super = logfs_super(sb); 697 689 struct address_space *mapping = super->s_mapping_inode->i_mapping;
+7 -8
fs/logfs/super.c
··· 277 277 } 278 278 if (valid0 && valid1 && ds_cmp(ds0, ds1)) { 279 279 printk(KERN_INFO"Superblocks don't match - fixing.\n"); 280 - return write_one_sb(sb, super->s_devops->find_last_sb); 280 + return logfs_write_sb(sb); 281 281 } 282 282 /* If neither is valid now, something's wrong. Didn't we properly 283 283 * check them before?!? */ ··· 289 289 { 290 290 int err; 291 291 292 + err = logfs_open_segfile(sb); 293 + if (err) 294 + return err; 295 + 292 296 /* Repair any broken superblock copies */ 293 297 err = logfs_recover_sb(sb); 294 298 if (err) ··· 300 296 301 297 /* Check areas for trailing unaccounted data */ 302 298 err = logfs_check_areas(sb); 303 - if (err) 304 - return err; 305 - 306 - err = logfs_open_segfile(sb); 307 299 if (err) 308 300 return err; 309 301 ··· 328 328 329 329 sb->s_root = d_alloc_root(rootdir); 330 330 if (!sb->s_root) 331 - goto fail; 331 + goto fail2; 332 332 333 333 super->s_erase_page = alloc_pages(GFP_KERNEL, 0); 334 334 if (!super->s_erase_page) ··· 572 572 return 0; 573 573 574 574 err1: 575 - up_write(&sb->s_umount); 576 - deactivate_super(sb); 575 + deactivate_locked_super(sb); 577 576 return err; 578 577 err0: 579 578 kfree(super);