aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c413
1 files changed, 322 insertions, 91 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5f154ef1e4be..f4f5f82f9f53 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,11 +39,13 @@
39#include <linux/buffer_head.h> /* for invalidate_bdev */ 39#include <linux/buffer_head.h> /* for invalidate_bdev */
40#include <linux/poll.h> 40#include <linux/poll.h>
41#include <linux/ctype.h> 41#include <linux/ctype.h>
42#include <linux/string.h>
42#include <linux/hdreg.h> 43#include <linux/hdreg.h>
43#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
44#include <linux/random.h> 45#include <linux/random.h>
45#include <linux/reboot.h> 46#include <linux/reboot.h>
46#include <linux/file.h> 47#include <linux/file.h>
48#include <linux/compat.h>
47#include <linux/delay.h> 49#include <linux/delay.h>
48#include <linux/raid/md_p.h> 50#include <linux/raid/md_p.h>
49#include <linux/raid/md_u.h> 51#include <linux/raid/md_u.h>
@@ -68,6 +70,12 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
68#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 70#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
69 71
70/* 72/*
73 * Default number of read corrections we'll attempt on an rdev
74 * before ejecting it from the array. We divide the read error
75 * count by 2 for every hour elapsed between read errors.
76 */
77#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
78/*
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 79 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much. 80 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that 81 * Increase it if you want to have more _guaranteed_ speed. Note that
@@ -213,12 +221,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
213 return 0; 221 return 0;
214 } 222 }
215 rcu_read_lock(); 223 rcu_read_lock();
216 if (mddev->suspended) { 224 if (mddev->suspended || mddev->barrier) {
217 DEFINE_WAIT(__wait); 225 DEFINE_WAIT(__wait);
218 for (;;) { 226 for (;;) {
219 prepare_to_wait(&mddev->sb_wait, &__wait, 227 prepare_to_wait(&mddev->sb_wait, &__wait,
220 TASK_UNINTERRUPTIBLE); 228 TASK_UNINTERRUPTIBLE);
221 if (!mddev->suspended) 229 if (!mddev->suspended && !mddev->barrier)
222 break; 230 break;
223 rcu_read_unlock(); 231 rcu_read_unlock();
224 schedule(); 232 schedule();
@@ -260,10 +268,110 @@ static void mddev_resume(mddev_t *mddev)
260 268
261int mddev_congested(mddev_t *mddev, int bits) 269int mddev_congested(mddev_t *mddev, int bits)
262{ 270{
271 if (mddev->barrier)
272 return 1;
263 return mddev->suspended; 273 return mddev->suspended;
264} 274}
265EXPORT_SYMBOL(mddev_congested); 275EXPORT_SYMBOL(mddev_congested);
266 276
277/*
278 * Generic barrier handling for md
279 */
280
281#define POST_REQUEST_BARRIER ((void*)1)
282
283static void md_end_barrier(struct bio *bio, int err)
284{
285 mdk_rdev_t *rdev = bio->bi_private;
286 mddev_t *mddev = rdev->mddev;
287 if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
288 set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
289
290 rdev_dec_pending(rdev, mddev);
291
292 if (atomic_dec_and_test(&mddev->flush_pending)) {
293 if (mddev->barrier == POST_REQUEST_BARRIER) {
294 /* This was a post-request barrier */
295 mddev->barrier = NULL;
296 wake_up(&mddev->sb_wait);
297 } else
298 /* The pre-request barrier has finished */
299 schedule_work(&mddev->barrier_work);
300 }
301 bio_put(bio);
302}
303
304static void submit_barriers(mddev_t *mddev)
305{
306 mdk_rdev_t *rdev;
307
308 rcu_read_lock();
309 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
310 if (rdev->raid_disk >= 0 &&
311 !test_bit(Faulty, &rdev->flags)) {
312 /* Take two references, one is dropped
313 * when request finishes, one after
314 * we reclaim rcu_read_lock
315 */
316 struct bio *bi;
317 atomic_inc(&rdev->nr_pending);
318 atomic_inc(&rdev->nr_pending);
319 rcu_read_unlock();
320 bi = bio_alloc(GFP_KERNEL, 0);
321 bi->bi_end_io = md_end_barrier;
322 bi->bi_private = rdev;
323 bi->bi_bdev = rdev->bdev;
324 atomic_inc(&mddev->flush_pending);
325 submit_bio(WRITE_BARRIER, bi);
326 rcu_read_lock();
327 rdev_dec_pending(rdev, mddev);
328 }
329 rcu_read_unlock();
330}
331
332static void md_submit_barrier(struct work_struct *ws)
333{
334 mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
335 struct bio *bio = mddev->barrier;
336
337 atomic_set(&mddev->flush_pending, 1);
338
339 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
340 bio_endio(bio, -EOPNOTSUPP);
341 else if (bio->bi_size == 0)
342 /* an empty barrier - all done */
343 bio_endio(bio, 0);
344 else {
345 bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
346 if (mddev->pers->make_request(mddev->queue, bio))
347 generic_make_request(bio);
348 mddev->barrier = POST_REQUEST_BARRIER;
349 submit_barriers(mddev);
350 }
351 if (atomic_dec_and_test(&mddev->flush_pending)) {
352 mddev->barrier = NULL;
353 wake_up(&mddev->sb_wait);
354 }
355}
356
357void md_barrier_request(mddev_t *mddev, struct bio *bio)
358{
359 spin_lock_irq(&mddev->write_lock);
360 wait_event_lock_irq(mddev->sb_wait,
361 !mddev->barrier,
362 mddev->write_lock, /*nothing*/);
363 mddev->barrier = bio;
364 spin_unlock_irq(&mddev->write_lock);
365
366 atomic_set(&mddev->flush_pending, 1);
367 INIT_WORK(&mddev->barrier_work, md_submit_barrier);
368
369 submit_barriers(mddev);
370
371 if (atomic_dec_and_test(&mddev->flush_pending))
372 schedule_work(&mddev->barrier_work);
373}
374EXPORT_SYMBOL(md_barrier_request);
267 375
268static inline mddev_t *mddev_get(mddev_t *mddev) 376static inline mddev_t *mddev_get(mddev_t *mddev)
269{ 377{
@@ -363,6 +471,7 @@ static mddev_t * mddev_find(dev_t unit)
363 471
364 mutex_init(&new->open_mutex); 472 mutex_init(&new->open_mutex);
365 mutex_init(&new->reconfig_mutex); 473 mutex_init(&new->reconfig_mutex);
474 mutex_init(&new->bitmap_info.mutex);
366 INIT_LIST_HEAD(&new->disks); 475 INIT_LIST_HEAD(&new->disks);
367 INIT_LIST_HEAD(&new->all_mddevs); 476 INIT_LIST_HEAD(&new->all_mddevs);
368 init_timer(&new->safemode_timer); 477 init_timer(&new->safemode_timer);
@@ -370,6 +479,7 @@ static mddev_t * mddev_find(dev_t unit)
370 atomic_set(&new->openers, 0); 479 atomic_set(&new->openers, 0);
371 atomic_set(&new->active_io, 0); 480 atomic_set(&new->active_io, 0);
372 spin_lock_init(&new->write_lock); 481 spin_lock_init(&new->write_lock);
482 atomic_set(&new->flush_pending, 0);
373 init_waitqueue_head(&new->sb_wait); 483 init_waitqueue_head(&new->sb_wait);
374 init_waitqueue_head(&new->recovery_wait); 484 init_waitqueue_head(&new->recovery_wait);
375 new->reshape_position = MaxSector; 485 new->reshape_position = MaxSector;
@@ -748,7 +858,7 @@ struct super_type {
748 */ 858 */
749int md_check_no_bitmap(mddev_t *mddev) 859int md_check_no_bitmap(mddev_t *mddev)
750{ 860{
751 if (!mddev->bitmap_file && !mddev->bitmap_offset) 861 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
752 return 0; 862 return 0;
753 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 863 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
754 mdname(mddev), mddev->pers->name); 864 mdname(mddev), mddev->pers->name);
@@ -876,8 +986,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
876 mddev->raid_disks = sb->raid_disks; 986 mddev->raid_disks = sb->raid_disks;
877 mddev->dev_sectors = sb->size * 2; 987 mddev->dev_sectors = sb->size * 2;
878 mddev->events = ev1; 988 mddev->events = ev1;
879 mddev->bitmap_offset = 0; 989 mddev->bitmap_info.offset = 0;
880 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 990 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
881 991
882 if (mddev->minor_version >= 91) { 992 if (mddev->minor_version >= 91) {
883 mddev->reshape_position = sb->reshape_position; 993 mddev->reshape_position = sb->reshape_position;
@@ -911,8 +1021,9 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
911 mddev->max_disks = MD_SB_DISKS; 1021 mddev->max_disks = MD_SB_DISKS;
912 1022
913 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1023 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
914 mddev->bitmap_file == NULL) 1024 mddev->bitmap_info.file == NULL)
915 mddev->bitmap_offset = mddev->default_bitmap_offset; 1025 mddev->bitmap_info.offset =
1026 mddev->bitmap_info.default_offset;
916 1027
917 } else if (mddev->pers == NULL) { 1028 } else if (mddev->pers == NULL) {
918 /* Insist on good event counter while assembling */ 1029 /* Insist on good event counter while assembling */
@@ -1029,7 +1140,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1029 sb->layout = mddev->layout; 1140 sb->layout = mddev->layout;
1030 sb->chunk_size = mddev->chunk_sectors << 9; 1141 sb->chunk_size = mddev->chunk_sectors << 9;
1031 1142
1032 if (mddev->bitmap && mddev->bitmap_file == NULL) 1143 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1033 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1144 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1034 1145
1035 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1146 sb->disks[0].state = (1<<MD_DISK_REMOVED);
@@ -1107,7 +1218,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1107{ 1218{
1108 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1219 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1109 return 0; /* component must fit device */ 1220 return 0; /* component must fit device */
1110 if (rdev->mddev->bitmap_offset) 1221 if (rdev->mddev->bitmap_info.offset)
1111 return 0; /* can't move bitmap */ 1222 return 0; /* can't move bitmap */
1112 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1223 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1113 if (!num_sectors || num_sectors > rdev->sb_start) 1224 if (!num_sectors || num_sectors > rdev->sb_start)
@@ -1286,8 +1397,8 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1286 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1397 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1287 mddev->dev_sectors = le64_to_cpu(sb->size); 1398 mddev->dev_sectors = le64_to_cpu(sb->size);
1288 mddev->events = ev1; 1399 mddev->events = ev1;
1289 mddev->bitmap_offset = 0; 1400 mddev->bitmap_info.offset = 0;
1290 mddev->default_bitmap_offset = 1024 >> 9; 1401 mddev->bitmap_info.default_offset = 1024 >> 9;
1291 1402
1292 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1403 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1293 memcpy(mddev->uuid, sb->set_uuid, 16); 1404 memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -1295,8 +1406,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1295 mddev->max_disks = (4096-256)/2; 1406 mddev->max_disks = (4096-256)/2;
1296 1407
1297 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1408 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1298 mddev->bitmap_file == NULL ) 1409 mddev->bitmap_info.file == NULL )
1299 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1410 mddev->bitmap_info.offset =
1411 (__s32)le32_to_cpu(sb->bitmap_offset);
1300 1412
1301 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1413 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1302 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1414 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
@@ -1390,19 +1502,17 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1390 sb->level = cpu_to_le32(mddev->level); 1502 sb->level = cpu_to_le32(mddev->level);
1391 sb->layout = cpu_to_le32(mddev->layout); 1503 sb->layout = cpu_to_le32(mddev->layout);
1392 1504
1393 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1505 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1394 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1506 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1395 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1507 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1396 } 1508 }
1397 1509
1398 if (rdev->raid_disk >= 0 && 1510 if (rdev->raid_disk >= 0 &&
1399 !test_bit(In_sync, &rdev->flags)) { 1511 !test_bit(In_sync, &rdev->flags)) {
1400 if (rdev->recovery_offset > 0) { 1512 sb->feature_map |=
1401 sb->feature_map |= 1513 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1402 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1514 sb->recovery_offset =
1403 sb->recovery_offset = 1515 cpu_to_le64(rdev->recovery_offset);
1404 cpu_to_le64(rdev->recovery_offset);
1405 }
1406 } 1516 }
1407 1517
1408 if (mddev->reshape_position != MaxSector) { 1518 if (mddev->reshape_position != MaxSector) {
@@ -1436,7 +1546,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1436 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1546 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1437 else if (test_bit(In_sync, &rdev2->flags)) 1547 else if (test_bit(In_sync, &rdev2->flags))
1438 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1548 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1439 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0) 1549 else if (rdev2->raid_disk >= 0)
1440 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1550 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1441 else 1551 else
1442 sb->dev_roles[i] = cpu_to_le16(0xffff); 1552 sb->dev_roles[i] = cpu_to_le16(0xffff);
@@ -1458,7 +1568,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1458 max_sectors -= rdev->data_offset; 1568 max_sectors -= rdev->data_offset;
1459 if (!num_sectors || num_sectors > max_sectors) 1569 if (!num_sectors || num_sectors > max_sectors)
1460 num_sectors = max_sectors; 1570 num_sectors = max_sectors;
1461 } else if (rdev->mddev->bitmap_offset) { 1571 } else if (rdev->mddev->bitmap_info.offset) {
1462 /* minor version 0 with bitmap we can't move */ 1572 /* minor version 0 with bitmap we can't move */
1463 return 0; 1573 return 0;
1464 } else { 1574 } else {
@@ -1826,15 +1936,11 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1826 1936
1827 uuid = sb->set_uuid; 1937 uuid = sb->set_uuid;
1828 printk(KERN_INFO 1938 printk(KERN_INFO
1829 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" 1939 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
1830 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1831 "md: Name: \"%s\" CT:%llu\n", 1940 "md: Name: \"%s\" CT:%llu\n",
1832 le32_to_cpu(sb->major_version), 1941 le32_to_cpu(sb->major_version),
1833 le32_to_cpu(sb->feature_map), 1942 le32_to_cpu(sb->feature_map),
1834 uuid[0], uuid[1], uuid[2], uuid[3], 1943 uuid,
1835 uuid[4], uuid[5], uuid[6], uuid[7],
1836 uuid[8], uuid[9], uuid[10], uuid[11],
1837 uuid[12], uuid[13], uuid[14], uuid[15],
1838 sb->set_name, 1944 sb->set_name,
1839 (unsigned long long)le64_to_cpu(sb->ctime) 1945 (unsigned long long)le64_to_cpu(sb->ctime)
1840 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 1946 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
@@ -1843,8 +1949,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1843 printk(KERN_INFO 1949 printk(KERN_INFO
1844 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 1950 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1845 " RO:%llu\n" 1951 " RO:%llu\n"
1846 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" 1952 "md: Dev:%08x UUID: %pU\n"
1847 ":%02x%02x%02x%02x%02x%02x\n"
1848 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 1953 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1849 "md: (MaxDev:%u) \n", 1954 "md: (MaxDev:%u) \n",
1850 le32_to_cpu(sb->level), 1955 le32_to_cpu(sb->level),
@@ -1857,10 +1962,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1857 (unsigned long long)le64_to_cpu(sb->super_offset), 1962 (unsigned long long)le64_to_cpu(sb->super_offset),
1858 (unsigned long long)le64_to_cpu(sb->recovery_offset), 1963 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1859 le32_to_cpu(sb->dev_number), 1964 le32_to_cpu(sb->dev_number),
1860 uuid[0], uuid[1], uuid[2], uuid[3], 1965 uuid,
1861 uuid[4], uuid[5], uuid[6], uuid[7],
1862 uuid[8], uuid[9], uuid[10], uuid[11],
1863 uuid[12], uuid[13], uuid[14], uuid[15],
1864 sb->devflags, 1966 sb->devflags,
1865 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 1967 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1866 (unsigned long long)le64_to_cpu(sb->events), 1968 (unsigned long long)le64_to_cpu(sb->events),
@@ -2442,12 +2544,49 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2442static struct rdev_sysfs_entry rdev_size = 2544static struct rdev_sysfs_entry rdev_size =
2443__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2545__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2444 2546
2547
2548static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
2549{
2550 unsigned long long recovery_start = rdev->recovery_offset;
2551
2552 if (test_bit(In_sync, &rdev->flags) ||
2553 recovery_start == MaxSector)
2554 return sprintf(page, "none\n");
2555
2556 return sprintf(page, "%llu\n", recovery_start);
2557}
2558
2559static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2560{
2561 unsigned long long recovery_start;
2562
2563 if (cmd_match(buf, "none"))
2564 recovery_start = MaxSector;
2565 else if (strict_strtoull(buf, 10, &recovery_start))
2566 return -EINVAL;
2567
2568 if (rdev->mddev->pers &&
2569 rdev->raid_disk >= 0)
2570 return -EBUSY;
2571
2572 rdev->recovery_offset = recovery_start;
2573 if (recovery_start == MaxSector)
2574 set_bit(In_sync, &rdev->flags);
2575 else
2576 clear_bit(In_sync, &rdev->flags);
2577 return len;
2578}
2579
2580static struct rdev_sysfs_entry rdev_recovery_start =
2581__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2582
2445static struct attribute *rdev_default_attrs[] = { 2583static struct attribute *rdev_default_attrs[] = {
2446 &rdev_state.attr, 2584 &rdev_state.attr,
2447 &rdev_errors.attr, 2585 &rdev_errors.attr,
2448 &rdev_slot.attr, 2586 &rdev_slot.attr,
2449 &rdev_offset.attr, 2587 &rdev_offset.attr,
2450 &rdev_size.attr, 2588 &rdev_size.attr,
2589 &rdev_recovery_start.attr,
2451 NULL, 2590 NULL,
2452}; 2591};
2453static ssize_t 2592static ssize_t
@@ -2549,6 +2688,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2549 rdev->flags = 0; 2688 rdev->flags = 0;
2550 rdev->data_offset = 0; 2689 rdev->data_offset = 0;
2551 rdev->sb_events = 0; 2690 rdev->sb_events = 0;
2691 rdev->last_read_error.tv_sec = 0;
2692 rdev->last_read_error.tv_nsec = 0;
2552 atomic_set(&rdev->nr_pending, 0); 2693 atomic_set(&rdev->nr_pending, 0);
2553 atomic_set(&rdev->read_errors, 0); 2694 atomic_set(&rdev->read_errors, 0);
2554 atomic_set(&rdev->corrected_errors, 0); 2695 atomic_set(&rdev->corrected_errors, 0);
@@ -2659,6 +2800,47 @@ static void analyze_sbs(mddev_t * mddev)
2659 } 2800 }
2660} 2801}
2661 2802
2803/* Read a fixed-point number.
2804 * Numbers in sysfs attributes should be in "standard" units where
2805 * possible, so time should be in seconds.
2806 * However we internally use a a much smaller unit such as
2807 * milliseconds or jiffies.
2808 * This function takes a decimal number with a possible fractional
2809 * component, and produces an integer which is the result of
2810 * multiplying that number by 10^'scale'.
2811 * all without any floating-point arithmetic.
2812 */
2813int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
2814{
2815 unsigned long result = 0;
2816 long decimals = -1;
2817 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
2818 if (*cp == '.')
2819 decimals = 0;
2820 else if (decimals < scale) {
2821 unsigned int value;
2822 value = *cp - '0';
2823 result = result * 10 + value;
2824 if (decimals >= 0)
2825 decimals++;
2826 }
2827 cp++;
2828 }
2829 if (*cp == '\n')
2830 cp++;
2831 if (*cp)
2832 return -EINVAL;
2833 if (decimals < 0)
2834 decimals = 0;
2835 while (decimals < scale) {
2836 result *= 10;
2837 decimals ++;
2838 }
2839 *res = result;
2840 return 0;
2841}
2842
2843
2662static void md_safemode_timeout(unsigned long data); 2844static void md_safemode_timeout(unsigned long data);
2663 2845
2664static ssize_t 2846static ssize_t
@@ -2670,31 +2852,10 @@ safe_delay_show(mddev_t *mddev, char *page)
2670static ssize_t 2852static ssize_t
2671safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 2853safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2672{ 2854{
2673 int scale=1;
2674 int dot=0;
2675 int i;
2676 unsigned long msec; 2855 unsigned long msec;
2677 char buf[30];
2678 2856
2679 /* remove a period, and count digits after it */ 2857 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
2680 if (len >= sizeof(buf))
2681 return -EINVAL;
2682 strlcpy(buf, cbuf, sizeof(buf));
2683 for (i=0; i<len; i++) {
2684 if (dot) {
2685 if (isdigit(buf[i])) {
2686 buf[i-1] = buf[i];
2687 scale *= 10;
2688 }
2689 buf[i] = 0;
2690 } else if (buf[i] == '.') {
2691 dot=1;
2692 buf[i] = 0;
2693 }
2694 }
2695 if (strict_strtoul(buf, 10, &msec) < 0)
2696 return -EINVAL; 2858 return -EINVAL;
2697 msec = (msec * 1000) / scale;
2698 if (msec == 0) 2859 if (msec == 0)
2699 mddev->safemode_delay = 0; 2860 mddev->safemode_delay = 0;
2700 else { 2861 else {
@@ -2970,7 +3131,9 @@ resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2970 3131
2971 if (mddev->pers) 3132 if (mddev->pers)
2972 return -EBUSY; 3133 return -EBUSY;
2973 if (!*buf || (*e && *e != '\n')) 3134 if (cmd_match(buf, "none"))
3135 n = MaxSector;
3136 else if (!*buf || (*e && *e != '\n'))
2974 return -EINVAL; 3137 return -EINVAL;
2975 3138
2976 mddev->recovery_cp = n; 3139 mddev->recovery_cp = n;
@@ -3166,6 +3329,29 @@ static struct md_sysfs_entry md_array_state =
3166__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3329__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3167 3330
3168static ssize_t 3331static ssize_t
3332max_corrected_read_errors_show(mddev_t *mddev, char *page) {
3333 return sprintf(page, "%d\n",
3334 atomic_read(&mddev->max_corr_read_errors));
3335}
3336
3337static ssize_t
3338max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
3339{
3340 char *e;
3341 unsigned long n = simple_strtoul(buf, &e, 10);
3342
3343 if (*buf && (*e == 0 || *e == '\n')) {
3344 atomic_set(&mddev->max_corr_read_errors, n);
3345 return len;
3346 }
3347 return -EINVAL;
3348}
3349
3350static struct md_sysfs_entry max_corr_read_errors =
3351__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3352 max_corrected_read_errors_store);
3353
3354static ssize_t
3169null_show(mddev_t *mddev, char *page) 3355null_show(mddev_t *mddev, char *page)
3170{ 3356{
3171 return -EINVAL; 3357 return -EINVAL;
@@ -3246,8 +3432,7 @@ bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3246 } 3432 }
3247 if (*end && !isspace(*end)) break; 3433 if (*end && !isspace(*end)) break;
3248 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3434 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3249 buf = end; 3435 buf = skip_spaces(end);
3250 while (isspace(*buf)) buf++;
3251 } 3436 }
3252 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3437 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3253out: 3438out:
@@ -3790,6 +3975,7 @@ static struct attribute *md_default_attrs[] = {
3790 &md_array_state.attr, 3975 &md_array_state.attr,
3791 &md_reshape_position.attr, 3976 &md_reshape_position.attr,
3792 &md_array_size.attr, 3977 &md_array_size.attr,
3978 &max_corr_read_errors.attr,
3793 NULL, 3979 NULL,
3794}; 3980};
3795 3981
@@ -3894,6 +4080,7 @@ static void mddev_delayed_delete(struct work_struct *ws)
3894 mddev->sysfs_action = NULL; 4080 mddev->sysfs_action = NULL;
3895 mddev->private = NULL; 4081 mddev->private = NULL;
3896 } 4082 }
4083 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
3897 kobject_del(&mddev->kobj); 4084 kobject_del(&mddev->kobj);
3898 kobject_put(&mddev->kobj); 4085 kobject_put(&mddev->kobj);
3899} 4086}
@@ -3985,6 +4172,8 @@ static int md_alloc(dev_t dev, char *name)
3985 disk->disk_name); 4172 disk->disk_name);
3986 error = 0; 4173 error = 0;
3987 } 4174 }
4175 if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4176 printk(KERN_DEBUG "pointless warning\n");
3988 abort: 4177 abort:
3989 mutex_unlock(&disks_mutex); 4178 mutex_unlock(&disks_mutex);
3990 if (!error) { 4179 if (!error) {
@@ -4206,6 +4395,8 @@ static int do_md_run(mddev_t * mddev)
4206 mddev->ro = 0; 4395 mddev->ro = 0;
4207 4396
4208 atomic_set(&mddev->writes_pending,0); 4397 atomic_set(&mddev->writes_pending,0);
4398 atomic_set(&mddev->max_corr_read_errors,
4399 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4209 mddev->safemode = 0; 4400 mddev->safemode = 0;
4210 mddev->safemode_timer.function = md_safemode_timeout; 4401 mddev->safemode_timer.function = md_safemode_timeout;
4211 mddev->safemode_timer.data = (unsigned long) mddev; 4402 mddev->safemode_timer.data = (unsigned long) mddev;
@@ -4310,7 +4501,7 @@ static int deny_bitmap_write_access(struct file * file)
4310 return 0; 4501 return 0;
4311} 4502}
4312 4503
4313static void restore_bitmap_write_access(struct file *file) 4504void restore_bitmap_write_access(struct file *file)
4314{ 4505{
4315 struct inode *inode = file->f_mapping->host; 4506 struct inode *inode = file->f_mapping->host;
4316 4507
@@ -4405,12 +4596,12 @@ out:
4405 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4596 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4406 4597
4407 bitmap_destroy(mddev); 4598 bitmap_destroy(mddev);
4408 if (mddev->bitmap_file) { 4599 if (mddev->bitmap_info.file) {
4409 restore_bitmap_write_access(mddev->bitmap_file); 4600 restore_bitmap_write_access(mddev->bitmap_info.file);
4410 fput(mddev->bitmap_file); 4601 fput(mddev->bitmap_info.file);
4411 mddev->bitmap_file = NULL; 4602 mddev->bitmap_info.file = NULL;
4412 } 4603 }
4413 mddev->bitmap_offset = 0; 4604 mddev->bitmap_info.offset = 0;
4414 4605
4415 /* make sure all md_delayed_delete calls have finished */ 4606 /* make sure all md_delayed_delete calls have finished */
4416 flush_scheduled_work(); 4607 flush_scheduled_work();
@@ -4451,6 +4642,11 @@ out:
4451 mddev->degraded = 0; 4642 mddev->degraded = 0;
4452 mddev->barriers_work = 0; 4643 mddev->barriers_work = 0;
4453 mddev->safemode = 0; 4644 mddev->safemode = 0;
4645 mddev->bitmap_info.offset = 0;
4646 mddev->bitmap_info.default_offset = 0;
4647 mddev->bitmap_info.chunksize = 0;
4648 mddev->bitmap_info.daemon_sleep = 0;
4649 mddev->bitmap_info.max_write_behind = 0;
4454 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4650 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4455 if (mddev->hold_active == UNTIL_STOP) 4651 if (mddev->hold_active == UNTIL_STOP)
4456 mddev->hold_active = 0; 4652 mddev->hold_active = 0;
@@ -4636,7 +4832,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
4636 info.state = 0; 4832 info.state = 0;
4637 if (mddev->in_sync) 4833 if (mddev->in_sync)
4638 info.state = (1<<MD_SB_CLEAN); 4834 info.state = (1<<MD_SB_CLEAN);
4639 if (mddev->bitmap && mddev->bitmap_offset) 4835 if (mddev->bitmap && mddev->bitmap_info.offset)
4640 info.state = (1<<MD_SB_BITMAP_PRESENT); 4836 info.state = (1<<MD_SB_BITMAP_PRESENT);
4641 info.active_disks = insync; 4837 info.active_disks = insync;
4642 info.working_disks = working; 4838 info.working_disks = working;
@@ -4994,23 +5190,23 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
4994 if (fd >= 0) { 5190 if (fd >= 0) {
4995 if (mddev->bitmap) 5191 if (mddev->bitmap)
4996 return -EEXIST; /* cannot add when bitmap is present */ 5192 return -EEXIST; /* cannot add when bitmap is present */
4997 mddev->bitmap_file = fget(fd); 5193 mddev->bitmap_info.file = fget(fd);
4998 5194
4999 if (mddev->bitmap_file == NULL) { 5195 if (mddev->bitmap_info.file == NULL) {
5000 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 5196 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5001 mdname(mddev)); 5197 mdname(mddev));
5002 return -EBADF; 5198 return -EBADF;
5003 } 5199 }
5004 5200
5005 err = deny_bitmap_write_access(mddev->bitmap_file); 5201 err = deny_bitmap_write_access(mddev->bitmap_info.file);
5006 if (err) { 5202 if (err) {
5007 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5203 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5008 mdname(mddev)); 5204 mdname(mddev));
5009 fput(mddev->bitmap_file); 5205 fput(mddev->bitmap_info.file);
5010 mddev->bitmap_file = NULL; 5206 mddev->bitmap_info.file = NULL;
5011 return err; 5207 return err;
5012 } 5208 }
5013 mddev->bitmap_offset = 0; /* file overrides offset */ 5209 mddev->bitmap_info.offset = 0; /* file overrides offset */
5014 } else if (mddev->bitmap == NULL) 5210 } else if (mddev->bitmap == NULL)
5015 return -ENOENT; /* cannot remove what isn't there */ 5211 return -ENOENT; /* cannot remove what isn't there */
5016 err = 0; 5212 err = 0;
@@ -5025,11 +5221,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
5025 mddev->pers->quiesce(mddev, 0); 5221 mddev->pers->quiesce(mddev, 0);
5026 } 5222 }
5027 if (fd < 0) { 5223 if (fd < 0) {
5028 if (mddev->bitmap_file) { 5224 if (mddev->bitmap_info.file) {
5029 restore_bitmap_write_access(mddev->bitmap_file); 5225 restore_bitmap_write_access(mddev->bitmap_info.file);
5030 fput(mddev->bitmap_file); 5226 fput(mddev->bitmap_info.file);
5031 } 5227 }
5032 mddev->bitmap_file = NULL; 5228 mddev->bitmap_info.file = NULL;
5033 } 5229 }
5034 5230
5035 return err; 5231 return err;
@@ -5096,8 +5292,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5096 mddev->flags = 0; 5292 mddev->flags = 0;
5097 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5293 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5098 5294
5099 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 5295 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5100 mddev->bitmap_offset = 0; 5296 mddev->bitmap_info.offset = 0;
5101 5297
5102 mddev->reshape_position = MaxSector; 5298 mddev->reshape_position = MaxSector;
5103 5299
@@ -5197,7 +5393,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5197 int state = 0; 5393 int state = 0;
5198 5394
5199 /* calculate expected state,ignoring low bits */ 5395 /* calculate expected state,ignoring low bits */
5200 if (mddev->bitmap && mddev->bitmap_offset) 5396 if (mddev->bitmap && mddev->bitmap_info.offset)
5201 state |= (1 << MD_SB_BITMAP_PRESENT); 5397 state |= (1 << MD_SB_BITMAP_PRESENT);
5202 5398
5203 if (mddev->major_version != info->major_version || 5399 if (mddev->major_version != info->major_version ||
@@ -5256,9 +5452,10 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5256 /* add the bitmap */ 5452 /* add the bitmap */
5257 if (mddev->bitmap) 5453 if (mddev->bitmap)
5258 return -EEXIST; 5454 return -EEXIST;
5259 if (mddev->default_bitmap_offset == 0) 5455 if (mddev->bitmap_info.default_offset == 0)
5260 return -EINVAL; 5456 return -EINVAL;
5261 mddev->bitmap_offset = mddev->default_bitmap_offset; 5457 mddev->bitmap_info.offset =
5458 mddev->bitmap_info.default_offset;
5262 mddev->pers->quiesce(mddev, 1); 5459 mddev->pers->quiesce(mddev, 1);
5263 rv = bitmap_create(mddev); 5460 rv = bitmap_create(mddev);
5264 if (rv) 5461 if (rv)
@@ -5273,7 +5470,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5273 mddev->pers->quiesce(mddev, 1); 5470 mddev->pers->quiesce(mddev, 1);
5274 bitmap_destroy(mddev); 5471 bitmap_destroy(mddev);
5275 mddev->pers->quiesce(mddev, 0); 5472 mddev->pers->quiesce(mddev, 0);
5276 mddev->bitmap_offset = 0; 5473 mddev->bitmap_info.offset = 0;
5277 } 5474 }
5278 } 5475 }
5279 md_update_sb(mddev, 1); 5476 md_update_sb(mddev, 1);
@@ -5524,6 +5721,25 @@ done:
5524abort: 5721abort:
5525 return err; 5722 return err;
5526} 5723}
5724#ifdef CONFIG_COMPAT
5725static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
5726 unsigned int cmd, unsigned long arg)
5727{
5728 switch (cmd) {
5729 case HOT_REMOVE_DISK:
5730 case HOT_ADD_DISK:
5731 case SET_DISK_FAULTY:
5732 case SET_BITMAP_FILE:
5733 /* These take in integer arg, do not convert */
5734 break;
5735 default:
5736 arg = (unsigned long)compat_ptr(arg);
5737 break;
5738 }
5739
5740 return md_ioctl(bdev, mode, cmd, arg);
5741}
5742#endif /* CONFIG_COMPAT */
5527 5743
5528static int md_open(struct block_device *bdev, fmode_t mode) 5744static int md_open(struct block_device *bdev, fmode_t mode)
5529{ 5745{
@@ -5589,6 +5805,9 @@ static const struct block_device_operations md_fops =
5589 .open = md_open, 5805 .open = md_open,
5590 .release = md_release, 5806 .release = md_release,
5591 .ioctl = md_ioctl, 5807 .ioctl = md_ioctl,
5808#ifdef CONFIG_COMPAT
5809 .compat_ioctl = md_compat_ioctl,
5810#endif
5592 .getgeo = md_getgeo, 5811 .getgeo = md_getgeo,
5593 .media_changed = md_media_changed, 5812 .media_changed = md_media_changed,
5594 .revalidate_disk= md_revalidate, 5813 .revalidate_disk= md_revalidate,
@@ -5982,14 +6201,14 @@ static int md_seq_show(struct seq_file *seq, void *v)
5982 unsigned long chunk_kb; 6201 unsigned long chunk_kb;
5983 unsigned long flags; 6202 unsigned long flags;
5984 spin_lock_irqsave(&bitmap->lock, flags); 6203 spin_lock_irqsave(&bitmap->lock, flags);
5985 chunk_kb = bitmap->chunksize >> 10; 6204 chunk_kb = mddev->bitmap_info.chunksize >> 10;
5986 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 6205 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5987 "%lu%s chunk", 6206 "%lu%s chunk",
5988 bitmap->pages - bitmap->missing_pages, 6207 bitmap->pages - bitmap->missing_pages,
5989 bitmap->pages, 6208 bitmap->pages,
5990 (bitmap->pages - bitmap->missing_pages) 6209 (bitmap->pages - bitmap->missing_pages)
5991 << (PAGE_SHIFT - 10), 6210 << (PAGE_SHIFT - 10),
5992 chunk_kb ? chunk_kb : bitmap->chunksize, 6211 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
5993 chunk_kb ? "KB" : "B"); 6212 chunk_kb ? "KB" : "B");
5994 if (bitmap->file) { 6213 if (bitmap->file) {
5995 seq_printf(seq, ", file: "); 6214 seq_printf(seq, ", file: ");
@@ -6338,12 +6557,14 @@ void md_do_sync(mddev_t *mddev)
6338 /* recovery follows the physical size of devices */ 6557 /* recovery follows the physical size of devices */
6339 max_sectors = mddev->dev_sectors; 6558 max_sectors = mddev->dev_sectors;
6340 j = MaxSector; 6559 j = MaxSector;
6341 list_for_each_entry(rdev, &mddev->disks, same_set) 6560 rcu_read_lock();
6561 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6342 if (rdev->raid_disk >= 0 && 6562 if (rdev->raid_disk >= 0 &&
6343 !test_bit(Faulty, &rdev->flags) && 6563 !test_bit(Faulty, &rdev->flags) &&
6344 !test_bit(In_sync, &rdev->flags) && 6564 !test_bit(In_sync, &rdev->flags) &&
6345 rdev->recovery_offset < j) 6565 rdev->recovery_offset < j)
6346 j = rdev->recovery_offset; 6566 j = rdev->recovery_offset;
6567 rcu_read_unlock();
6347 } 6568 }
6348 6569
6349 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 6570 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
@@ -6380,6 +6601,7 @@ void md_do_sync(mddev_t *mddev)
6380 desc, mdname(mddev)); 6601 desc, mdname(mddev));
6381 mddev->curr_resync = j; 6602 mddev->curr_resync = j;
6382 } 6603 }
6604 mddev->curr_resync_completed = mddev->curr_resync;
6383 6605
6384 while (j < max_sectors) { 6606 while (j < max_sectors) {
6385 sector_t sectors; 6607 sector_t sectors;
@@ -6512,22 +6734,29 @@ void md_do_sync(mddev_t *mddev)
6512 } else { 6734 } else {
6513 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6735 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6514 mddev->curr_resync = MaxSector; 6736 mddev->curr_resync = MaxSector;
6515 list_for_each_entry(rdev, &mddev->disks, same_set) 6737 rcu_read_lock();
6738 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6516 if (rdev->raid_disk >= 0 && 6739 if (rdev->raid_disk >= 0 &&
6517 !test_bit(Faulty, &rdev->flags) && 6740 !test_bit(Faulty, &rdev->flags) &&
6518 !test_bit(In_sync, &rdev->flags) && 6741 !test_bit(In_sync, &rdev->flags) &&
6519 rdev->recovery_offset < mddev->curr_resync) 6742 rdev->recovery_offset < mddev->curr_resync)
6520 rdev->recovery_offset = mddev->curr_resync; 6743 rdev->recovery_offset = mddev->curr_resync;
6744 rcu_read_unlock();
6521 } 6745 }
6522 } 6746 }
6523 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6747 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6524 6748
6525 skip: 6749 skip:
6750 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6751 /* We completed so min/max setting can be forgotten if used. */
6752 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6753 mddev->resync_min = 0;
6754 mddev->resync_max = MaxSector;
6755 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6756 mddev->resync_min = mddev->curr_resync_completed;
6526 mddev->curr_resync = 0; 6757 mddev->curr_resync = 0;
6527 mddev->curr_resync_completed = 0;
6528 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6758 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6529 /* We completed so max setting can be forgotten. */ 6759 mddev->curr_resync_completed = 0;
6530 mddev->resync_max = MaxSector;
6531 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6760 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6532 wake_up(&resync_wait); 6761 wake_up(&resync_wait);
6533 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6762 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
@@ -6590,6 +6819,7 @@ static int remove_and_add_spares(mddev_t *mddev)
6590 nm, mdname(mddev)); 6819 nm, mdname(mddev));
6591 spares++; 6820 spares++;
6592 md_new_event(mddev); 6821 md_new_event(mddev);
6822 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6593 } else 6823 } else
6594 break; 6824 break;
6595 } 6825 }
@@ -6625,7 +6855,7 @@ void md_check_recovery(mddev_t *mddev)
6625 6855
6626 6856
6627 if (mddev->bitmap) 6857 if (mddev->bitmap)
6628 bitmap_daemon_work(mddev->bitmap); 6858 bitmap_daemon_work(mddev);
6629 6859
6630 if (mddev->ro) 6860 if (mddev->ro)
6631 return; 6861 return;
@@ -6995,5 +7225,6 @@ EXPORT_SYMBOL(md_unregister_thread);
6995EXPORT_SYMBOL(md_wakeup_thread); 7225EXPORT_SYMBOL(md_wakeup_thread);
6996EXPORT_SYMBOL(md_check_recovery); 7226EXPORT_SYMBOL(md_check_recovery);
6997MODULE_LICENSE("GPL"); 7227MODULE_LICENSE("GPL");
7228MODULE_DESCRIPTION("MD RAID framework");
6998MODULE_ALIAS("md"); 7229MODULE_ALIAS("md");
6999MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 7230MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);