Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md:
md: raid5 crash during degradation
md/raid5: never wait for bad-block acks on failed device.
md: ensure new badblocks are handled promptly.
md: bad blocks shouldn't cause a Blocked status on a Faulty device.
md: take a reference to mddev during sysfs access.
md: refine interpretation of "hold_active == UNTIL_IOCTL".
md/lock: ensure updates to page_attrs are properly locked.

+32 -7
+4
drivers/md/bitmap.c
··· 1106 1106 */ 1107 1107 int i; 1108 1108 1109 + spin_lock_irq(&bitmap->lock); 1109 1110 for (i = 0; i < bitmap->file_pages; i++) 1110 1111 set_page_attr(bitmap, bitmap->filemap[i], 1111 1112 BITMAP_PAGE_NEEDWRITE); 1112 1113 bitmap->allclean = 0; 1114 + spin_unlock_irq(&bitmap->lock); 1113 1115 } 1114 1116 1115 1117 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) ··· 1607 1605 for (chunk = s; chunk <= e; chunk++) { 1608 1606 sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); 1609 1607 bitmap_set_memory_bits(bitmap, sec, 1); 1608 + spin_lock_irq(&bitmap->lock); 1610 1609 bitmap_file_set_bit(bitmap, sec); 1610 + spin_unlock_irq(&bitmap->lock); 1611 1611 if (sec < bitmap->mddev->recovery_cp) 1612 1612 /* We are asserting that the array is dirty, 1613 1613 * so move the recovery_cp address back so
+23 -4
drivers/md/md.c
··· 570 570 mddev->ctime == 0 && !mddev->hold_active) { 571 571 /* Array is not configured at all, and not held active, 572 572 * so destroy it */ 573 - list_del(&mddev->all_mddevs); 573 + list_del_init(&mddev->all_mddevs); 574 574 bs = mddev->bio_set; 575 575 mddev->bio_set = NULL; 576 576 if (mddev->gendisk) { ··· 2546 2546 sep = ","; 2547 2547 } 2548 2548 if (test_bit(Blocked, &rdev->flags) || 2549 - rdev->badblocks.unacked_exist) { 2549 + (rdev->badblocks.unacked_exist 2550 + && !test_bit(Faulty, &rdev->flags))) { 2550 2551 len += sprintf(page+len, "%sblocked", sep); 2551 2552 sep = ","; 2552 2553 } ··· 3789 3788 if (err) 3790 3789 return err; 3791 3790 else { 3791 + if (mddev->hold_active == UNTIL_IOCTL) 3792 + mddev->hold_active = 0; 3792 3793 sysfs_notify_dirent_safe(mddev->sysfs_state); 3793 3794 return len; 3794 3795 } ··· 4490 4487 4491 4488 if (!entry->show) 4492 4489 return -EIO; 4490 + spin_lock(&all_mddevs_lock); 4491 + if (list_empty(&mddev->all_mddevs)) { 4492 + spin_unlock(&all_mddevs_lock); 4493 + return -EBUSY; 4494 + } 4495 + mddev_get(mddev); 4496 + spin_unlock(&all_mddevs_lock); 4497 + 4493 4498 rv = mddev_lock(mddev); 4494 4499 if (!rv) { 4495 4500 rv = entry->show(mddev, page); 4496 4501 mddev_unlock(mddev); 4497 4502 } 4503 + mddev_put(mddev); 4498 4504 return rv; 4499 4505 } 4500 4506 ··· 4519 4507 return -EIO; 4520 4508 if (!capable(CAP_SYS_ADMIN)) 4521 4509 return -EACCES; 4510 + spin_lock(&all_mddevs_lock); 4511 + if (list_empty(&mddev->all_mddevs)) { 4512 + spin_unlock(&all_mddevs_lock); 4513 + return -EBUSY; 4514 + } 4515 + mddev_get(mddev); 4516 + spin_unlock(&all_mddevs_lock); 4522 4517 rv = mddev_lock(mddev); 4523 - if (mddev->hold_active == UNTIL_IOCTL) 4524 - mddev->hold_active = 0; 4525 4518 if (!rv) { 4526 4519 rv = entry->store(mddev, page, length); 4527 4520 mddev_unlock(mddev); 4528 4521 } 4522 + mddev_put(mddev); 4529 4523 return rv; 4530 4524 } 4531 4525 ··· 7858 7840 s + rdev->data_offset, sectors, acknowledged); 7859 7841 if (rv) { 7860 7842 /* Make sure they get written out promptly */ 7843 + sysfs_notify_dirent_safe(rdev->sysfs_state); 7861 7844 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 7862 7845 md_wakeup_thread(rdev->mddev->thread); 7863 7846 }
+5 -3
drivers/md/raid5.c
··· 3036 3036 if (dev->written) 3037 3037 s->written++; 3038 3038 rdev = rcu_dereference(conf->disks[i].rdev); 3039 + if (rdev && test_bit(Faulty, &rdev->flags)) 3040 + rdev = NULL; 3039 3041 if (rdev) { 3040 3042 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3041 3043 &first_bad, &bad_sectors); ··· 3065 3063 } 3066 3064 } else if (test_bit(In_sync, &rdev->flags)) 3067 3065 set_bit(R5_Insync, &dev->flags); 3068 - else if (!test_bit(Faulty, &rdev->flags)) { 3066 + else { 3069 3067 /* in sync if before recovery_offset */ 3070 3068 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3071 3069 set_bit(R5_Insync, &dev->flags); 3072 3070 } 3073 - if (test_bit(R5_WriteError, &dev->flags)) { 3071 + if (rdev && test_bit(R5_WriteError, &dev->flags)) { 3074 3072 clear_bit(R5_Insync, &dev->flags); 3075 3073 if (!test_bit(Faulty, &rdev->flags)) { 3076 3074 s->handle_bad_blocks = 1; ··· 3078 3076 } else 3079 3077 clear_bit(R5_WriteError, &dev->flags); 3080 3078 } 3081 - if (test_bit(R5_MadeGood, &dev->flags)) { 3079 + if (rdev && test_bit(R5_MadeGood, &dev->flags)) { 3082 3080 if (!test_bit(Faulty, &rdev->flags)) { 3083 3081 s->handle_bad_blocks = 1; 3084 3082 atomic_inc(&rdev->nr_pending);