Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'md-3.6' of git://neil.brown.name/md

Pull additional md update from NeilBrown:
"This contains a few patches that depend on plugging changes in the
block layer so needed to wait for those.

It also contains a Kconfig fix for the new RAID10 support in dm-raid."

* tag 'md-3.6' of git://neil.brown.name/md:
md/dm-raid: DM_RAID should select MD_RAID10
md/raid1: submit IO from originating thread instead of md thread.
raid5: raid5d handle stripe in batch way
raid5: make_request use batch stripe release

+150 -22
+3 -2
drivers/md/Kconfig
··· 268 268 needed for live data migration tools such as 'pvmove'. 269 269 270 270 config DM_RAID 271 - tristate "RAID 1/4/5/6 target" 271 + tristate "RAID 1/4/5/6/10 target" 272 272 depends on BLK_DEV_DM 273 273 select MD_RAID1 274 + select MD_RAID10 274 275 select MD_RAID456 275 276 select BLK_DEV_MD 276 277 ---help--- 277 - A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings 278 + A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings 278 279 279 280 A RAID-5 set of N drives with a capacity of C MB per drive provides 280 281 the capacity of C * (N - 1) MB, and protects against a failure
+1 -1
drivers/md/bitmap.c
··· 1305 1305 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1306 1306 TASK_UNINTERRUPTIBLE); 1307 1307 spin_unlock_irq(&bitmap->counts.lock); 1308 - io_schedule(); 1308 + schedule(); 1309 1309 finish_wait(&bitmap->overflow_wait, &__wait); 1310 1310 continue; 1311 1311 }
+54 -3
drivers/md/raid1.c
··· 944 944 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 945 945 } 946 946 947 + struct raid1_plug_cb { 948 + struct blk_plug_cb cb; 949 + struct bio_list pending; 950 + int pending_cnt; 951 + }; 952 + 953 + static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) 954 + { 955 + struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, 956 + cb); 957 + struct mddev *mddev = plug->cb.data; 958 + struct r1conf *conf = mddev->private; 959 + struct bio *bio; 960 + 961 + if (from_schedule) { 962 + spin_lock_irq(&conf->device_lock); 963 + bio_list_merge(&conf->pending_bio_list, &plug->pending); 964 + conf->pending_count += plug->pending_cnt; 965 + spin_unlock_irq(&conf->device_lock); 966 + md_wakeup_thread(mddev->thread); 967 + kfree(plug); 968 + return; 969 + } 970 + 971 + /* we aren't scheduling, so we can do the write-out directly. */ 972 + bio = bio_list_get(&plug->pending); 973 + bitmap_unplug(mddev->bitmap); 974 + wake_up(&conf->wait_barrier); 975 + 976 + while (bio) { /* submit pending writes */ 977 + struct bio *next = bio->bi_next; 978 + bio->bi_next = NULL; 979 + generic_make_request(bio); 980 + bio = next; 981 + } 982 + kfree(plug); 983 + } 984 + 947 985 static void make_request(struct mddev *mddev, struct bio * bio) 948 986 { 949 987 struct r1conf *conf = mddev->private; ··· 995 957 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 996 958 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 997 959 struct md_rdev *blocked_rdev; 960 + struct blk_plug_cb *cb; 961 + struct raid1_plug_cb *plug = NULL; 998 962 int first_clone; 999 963 int sectors_handled; 1000 964 int max_sectors; ··· 1299 1259 mbio->bi_private = r1_bio; 1300 1260 1301 1261 atomic_inc(&r1_bio->remaining); 1262 + 1263 + cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1264 + if (cb) 1265 + plug = container_of(cb, struct raid1_plug_cb, cb); 1266 + else 1267 + plug = NULL; 1302 1268 spin_lock_irqsave(&conf->device_lock, flags); 1303 - bio_list_add(&conf->pending_bio_list, mbio); 1304 - conf->pending_count++; 1269 + if (plug) { 1270 + bio_list_add(&plug->pending, mbio); 1271 + plug->pending_cnt++; 1272 + } else { 1273 + bio_list_add(&conf->pending_bio_list, mbio); 1274 + conf->pending_count++; 1275 + } 1305 1276 spin_unlock_irqrestore(&conf->device_lock, flags); 1306 - if (!mddev_check_plugged(mddev)) 1277 + if (!plug) 1307 1278 md_wakeup_thread(mddev->thread); 1308 1279 } 1309 1280 /* Mustn't call r1_bio_write_done before this next test,
+91 -16
drivers/md/raid5.c
··· 484 484 } else { 485 485 if (atomic_read(&sh->count)) { 486 486 BUG_ON(!list_empty(&sh->lru) 487 - && !test_bit(STRIPE_EXPANDING, &sh->state)); 487 + && !test_bit(STRIPE_EXPANDING, &sh->state) 488 + && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)); 488 489 } else { 489 490 if (!test_bit(STRIPE_HANDLE, &sh->state)) 490 491 atomic_inc(&conf->active_stripes); ··· 4011 4010 return sh; 4012 4011 } 4013 4012 4013 + struct raid5_plug_cb { 4014 + struct blk_plug_cb cb; 4015 + struct list_head list; 4016 + }; 4017 + 4018 + static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 4019 + { 4020 + struct raid5_plug_cb *cb = container_of( 4021 + blk_cb, struct raid5_plug_cb, cb); 4022 + struct stripe_head *sh; 4023 + struct mddev *mddev = cb->cb.data; 4024 + struct r5conf *conf = mddev->private; 4025 + 4026 + if (cb->list.next && !list_empty(&cb->list)) { 4027 + spin_lock_irq(&conf->device_lock); 4028 + while (!list_empty(&cb->list)) { 4029 + sh = list_first_entry(&cb->list, struct stripe_head, lru); 4030 + list_del_init(&sh->lru); 4031 + /* 4032 + * avoid race release_stripe_plug() sees 4033 + * STRIPE_ON_UNPLUG_LIST clear but the stripe 4034 + * is still in our list 4035 + */ 4036 + smp_mb__before_clear_bit(); 4037 + clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4038 + __release_stripe(conf, sh); 4039 + } 4040 + spin_unlock_irq(&conf->device_lock); 4041 + } 4042 + kfree(cb); 4043 + } 4044 + 4045 + static void release_stripe_plug(struct mddev *mddev, 4046 + struct stripe_head *sh) 4047 + { 4048 + struct blk_plug_cb *blk_cb = blk_check_plugged( 4049 + raid5_unplug, mddev, 4050 + sizeof(struct raid5_plug_cb)); 4051 + struct raid5_plug_cb *cb; 4052 + 4053 + if (!blk_cb) { 4054 + release_stripe(sh); 4055 + return; 4056 + } 4057 + 4058 + cb = container_of(blk_cb, struct raid5_plug_cb, cb); 4059 + 4060 + if (cb->list.next == NULL) 4061 + INIT_LIST_HEAD(&cb->list); 4062 + 4063 + if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 4064 + list_add_tail(&sh->lru, &cb->list); 4065 + else 4066 + release_stripe(sh); 4067 + } 4068 + 4014 4069 static void make_request(struct mddev *mddev, struct bio * bi) 4015 4070 { 4016 4071 struct r5conf *conf = mddev->private; ··· 4195 4138 if ((bi->bi_rw & REQ_NOIDLE) && 4196 4139 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4197 4140 atomic_inc(&conf->preread_active_stripes); 4198 - mddev_check_plugged(mddev); 4199 - release_stripe(sh); 4141 + release_stripe_plug(mddev, sh); 4200 4142 } else { 4201 4143 /* cannot get stripe for read-ahead, just give-up */ 4202 4144 clear_bit(BIO_UPTODATE, &bi->bi_flags); ··· 4593 4537 return handled; 4594 4538 } 4595 4539 4540 + #define MAX_STRIPE_BATCH 8 4541 + static int handle_active_stripes(struct r5conf *conf) 4542 + { 4543 + struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 4544 + int i, batch_size = 0; 4545 + 4546 + while (batch_size < MAX_STRIPE_BATCH && 4547 + (sh = __get_priority_stripe(conf)) != NULL) 4548 + batch[batch_size++] = sh; 4549 + 4550 + if (batch_size == 0) 4551 + return batch_size; 4552 + spin_unlock_irq(&conf->device_lock); 4553 + 4554 + for (i = 0; i < batch_size; i++) 4555 + handle_stripe(batch[i]); 4556 + 4557 + cond_resched(); 4558 + 4559 + spin_lock_irq(&conf->device_lock); 4560 + for (i = 0; i < batch_size; i++) 4561 + __release_stripe(conf, batch[i]); 4562 + return batch_size; 4563 + } 4596 4564 4597 4565 /* 4598 4566 * This is our raid5 kernel thread. ··· 4627 4547 */ 4628 4548 static void raid5d(struct mddev *mddev) 4629 4549 { 4630 - struct stripe_head *sh; 4631 4550 struct r5conf *conf = mddev->private; 4632 4551 int handled; 4633 4552 struct blk_plug plug; ··· 4640 4561 spin_lock_irq(&conf->device_lock); 4641 4562 while (1) { 4642 4563 struct bio *bio; 4564 + int batch_size; 4643 4565 4644 4566 if ( 4645 4567 !list_empty(&conf->bitmap_list)) { ··· 4664 4584 handled++; 4665 4585 } 4666 4586 4667 - sh = __get_priority_stripe(conf); 4668 - 4669 - if (!sh) 4587 + batch_size = handle_active_stripes(conf); 4588 + if (!batch_size) 4670 4589 break; 4671 - spin_unlock_irq(&conf->device_lock); 4672 - 4673 - handled++; 4674 - handle_stripe(sh); 4675 - release_stripe(sh); 4676 - cond_resched(); 4590 + handled += batch_size; 4677 4591 4678 - if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 4592 + if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { 4593 + spin_unlock_irq(&conf->device_lock); 4679 4594 md_check_recovery(mddev); 4680 - 4681 - spin_lock_irq(&conf->device_lock); 4595 + spin_lock_irq(&conf->device_lock); 4596 + } 4682 4597 } 4683 4598 pr_debug("%d stripes handled\n", handled); 4684 4599
+1
drivers/md/raid5.h
··· 321 321 STRIPE_BIOFILL_RUN, 322 322 STRIPE_COMPUTE_RUN, 323 323 STRIPE_OPS_REQ_PENDING, 324 + STRIPE_ON_UNPLUG_LIST, 324 325 }; 325 326 326 327 /*