Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'dm-3.9-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm

Pull device-mapper fixes from Alasdair Kergon:
"A pair of patches to fix the writethrough mode of the device-mapper
cache target when the device being cached is not itself wrapped with
device-mapper."

* tag 'dm-3.9-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm:
dm cache: reduce bio front_pad size in writeback mode
dm cache: fix writes to cache device in writethrough mode

+38 -13
+38 -13
drivers/md/dm-cache-target.c
··· 6 6 7 7 #include "dm.h" 8 8 #include "dm-bio-prison.h" 9 + #include "dm-bio-record.h" 9 10 #include "dm-cache-metadata.h" 10 11 11 12 #include <linux/dm-io.h> ··· 202 201 unsigned req_nr:2; 203 202 struct dm_deferred_entry *all_io_entry; 204 203 205 - /* writethrough fields */ 204 + /* 205 + * writethrough fields. These MUST remain at the end of this 206 + * structure and the 'cache' member must be the first as it 207 + * is used to determine the offsetof the writethrough fields. 208 + */ 206 209 struct cache *cache; 207 210 dm_cblock_t cblock; 208 211 bio_end_io_t *saved_bi_end_io; 212 + struct dm_bio_details bio_details; 209 213 }; 210 214 211 215 struct dm_cache_migration { ··· 519 513 /*---------------------------------------------------------------- 520 514 * Per bio data 521 515 *--------------------------------------------------------------*/ 522 - static struct per_bio_data *get_per_bio_data(struct bio *bio) 516 + 517 + /* 518 + * If using writeback, leave out struct per_bio_data's writethrough fields. 519 + */ 520 + #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) 521 + #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) 522 + 523 + static size_t get_per_bio_data_size(struct cache *cache) 523 524 { 524 - struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 525 + return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; 526 + } 527 + 528 + static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) 529 + { 530 + struct per_bio_data *pb = dm_per_bio_data(bio, data_size); 525 531 BUG_ON(!pb); 526 532 return pb; 527 533 } 528 534 529 - static struct per_bio_data *init_per_bio_data(struct bio *bio) 535 + static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) 530 536 { 531 - struct per_bio_data *pb = get_per_bio_data(bio); 537 + struct per_bio_data *pb = get_per_bio_data(bio, data_size); 532 538 533 539 pb->tick = false; 534 540 pb->req_nr = dm_bio_get_target_bio_nr(bio); ··· 574 556 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 575 557 { 576 558 unsigned long flags; 577 - struct per_bio_data *pb = get_per_bio_data(bio); 559 + size_t pb_data_size = get_per_bio_data_size(cache); 560 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 578 561 579 562 spin_lock_irqsave(&cache->lock, flags); 580 563 if (cache->need_tick_bio && ··· 654 635 655 636 static void writethrough_endio(struct bio *bio, int err) 656 637 { 657 - struct per_bio_data *pb = get_per_bio_data(bio); 638 + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 658 639 bio->bi_end_io = pb->saved_bi_end_io; 659 640 660 641 if (err) { ··· 662 643 return; 663 644 } 664 645 646 + dm_bio_restore(&pb->bio_details, bio); 665 647 remap_to_cache(pb->cache, bio, pb->cblock); 666 648 667 649 /* ··· 682 662 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, 683 663 dm_oblock_t oblock, dm_cblock_t cblock) 684 664 { 685 - struct per_bio_data *pb = get_per_bio_data(bio); 665 + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 686 666 687 667 pb->cache = cache; 688 668 pb->cblock = cblock; 689 669 pb->saved_bi_end_io = bio->bi_end_io; 670 + dm_bio_record(&pb->bio_details, bio); 690 671 bio->bi_end_io = writethrough_endio; 691 672 692 673 remap_to_origin_clear_discard(pb->cache, bio, oblock); ··· 1056 1035 1057 1036 static void process_flush_bio(struct cache *cache, struct bio *bio) 1058 1037 { 1059 - struct per_bio_data *pb = get_per_bio_data(bio); 1038 + size_t pb_data_size = get_per_bio_data_size(cache); 1039 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1060 1040 1061 1041 BUG_ON(bio->bi_size); 1062 1042 if (!pb->req_nr) ··· 1129 1107 dm_oblock_t block = get_bio_block(cache, bio); 1130 1108 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1131 1109 struct policy_result lookup_result; 1132 - struct per_bio_data *pb = get_per_bio_data(bio); 1110 + size_t pb_data_size = get_per_bio_data_size(cache); 1111 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1133 1112 bool discarded_block = is_discarded_oblock(cache, block); 1134 1113 bool can_migrate = discarded_block || spare_migration_bandwidth(cache); 1135 1114 ··· 1904 1881 1905 1882 cache->ti = ca->ti; 1906 1883 ti->private = cache; 1907 - ti->per_bio_data_size = sizeof(struct per_bio_data); 1908 1884 ti->num_flush_bios = 2; 1909 1885 ti->flush_supported = true; 1910 1886 ··· 1912 1890 ti->discard_zeroes_data_unsupported = true; 1913 1891 1914 1892 memcpy(&cache->features, &ca->features, sizeof(cache->features)); 1893 + ti->per_bio_data_size = get_per_bio_data_size(cache); 1915 1894 1916 1895 cache->callbacks.congested_fn = cache_is_congested; 1917 1896 dm_table_add_target_callbacks(ti->table, &cache->callbacks); ··· 2115 2092 2116 2093 int r; 2117 2094 dm_oblock_t block = get_bio_block(cache, bio); 2095 + size_t pb_data_size = get_per_bio_data_size(cache); 2118 2096 bool can_migrate = false; 2119 2097 bool discarded_block; 2120 2098 struct dm_bio_prison_cell *cell; ··· 2132 2108 return DM_MAPIO_REMAPPED; 2133 2109 } 2134 2110 2135 - pb = init_per_bio_data(bio); 2111 + pb = init_per_bio_data(bio, pb_data_size); 2136 2112 2137 2113 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2138 2114 defer_bio(cache, bio); ··· 2217 2193 { 2218 2194 struct cache *cache = ti->private; 2219 2195 unsigned long flags; 2220 - struct per_bio_data *pb = get_per_bio_data(bio); 2196 + size_t pb_data_size = get_per_bio_data_size(cache); 2197 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 2221 2198 2222 2199 if (pb->tick) { 2223 2200 policy_tick(cache->policy);