aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2010-05-03 00:09:02 -0400
committerNeilBrown <neilb@suse.de>2010-05-18 01:27:58 -0400
commit0c55e02259115c151e4835dd417cf41467bb02e2 (patch)
treebcc12cd6c9b2d40b8bc31a4d85cae13103168493 /drivers/md/raid5.c
parent08fb730ca346ff16598ef31911c88fbca6133bf5 (diff)
md/raid5: improve consistency of error messages.
Many 'printk' messages from the raid456 module mention 'raid5' even though it may be a 'raid6' or even 'raid4' array. This can cause confusion. Also the actual array name is not always reported and when it is it is not reported consistently. So change all the messages to start: md/raid:%s: where '%s' becomes e.g. md3 to identify the particular array. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c149
1 files changed, 69 insertions, 80 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 81563b7c0357..cee9f93b35c4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1509,7 +1509,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1509 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1509 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1510 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1510 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1511 rdev = conf->disks[i].rdev; 1511 rdev = conf->disks[i].rdev;
1512 printk_rl(KERN_INFO "raid5:%s: read error corrected" 1512 printk_rl(KERN_INFO "md/raid:%s: read error corrected"
1513 " (%lu sectors at %llu on %s)\n", 1513 " (%lu sectors at %llu on %s)\n",
1514 mdname(conf->mddev), STRIPE_SECTORS, 1514 mdname(conf->mddev), STRIPE_SECTORS,
1515 (unsigned long long)(sh->sector 1515 (unsigned long long)(sh->sector
@@ -1529,7 +1529,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1529 atomic_inc(&rdev->read_errors); 1529 atomic_inc(&rdev->read_errors);
1530 if (conf->mddev->degraded) 1530 if (conf->mddev->degraded)
1531 printk_rl(KERN_WARNING 1531 printk_rl(KERN_WARNING
1532 "raid5:%s: read error not correctable " 1532 "md/raid:%s: read error not correctable "
1533 "(sector %llu on %s).\n", 1533 "(sector %llu on %s).\n",
1534 mdname(conf->mddev), 1534 mdname(conf->mddev),
1535 (unsigned long long)(sh->sector 1535 (unsigned long long)(sh->sector
@@ -1538,7 +1538,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1538 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1538 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1539 /* Oh, no!!! */ 1539 /* Oh, no!!! */
1540 printk_rl(KERN_WARNING 1540 printk_rl(KERN_WARNING
1541 "raid5:%s: read error NOT corrected!! " 1541 "md/raid:%s: read error NOT corrected!! "
1542 "(sector %llu on %s).\n", 1542 "(sector %llu on %s).\n",
1543 mdname(conf->mddev), 1543 mdname(conf->mddev),
1544 (unsigned long long)(sh->sector 1544 (unsigned long long)(sh->sector
@@ -1547,7 +1547,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1547 else if (atomic_read(&rdev->read_errors) 1547 else if (atomic_read(&rdev->read_errors)
1548 > conf->max_nr_stripes) 1548 > conf->max_nr_stripes)
1549 printk(KERN_WARNING 1549 printk(KERN_WARNING
1550 "raid5:%s: Too many read errors, failing device %s.\n", 1550 "md/raid:%s: Too many read errors, failing device %s.\n",
1551 mdname(conf->mddev), bdn); 1551 mdname(conf->mddev), bdn);
1552 else 1552 else
1553 retry = 1; 1553 retry = 1;
@@ -1620,7 +1620,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1620{ 1620{
1621 char b[BDEVNAME_SIZE]; 1621 char b[BDEVNAME_SIZE];
1622 raid5_conf_t *conf = mddev->private; 1622 raid5_conf_t *conf = mddev->private;
1623 pr_debug("raid5: error called\n"); 1623 pr_debug("raid456: error called\n");
1624 1624
1625 if (!test_bit(Faulty, &rdev->flags)) { 1625 if (!test_bit(Faulty, &rdev->flags)) {
1626 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1626 set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1636,9 +1636,13 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1636 } 1636 }
1637 set_bit(Faulty, &rdev->flags); 1637 set_bit(Faulty, &rdev->flags);
1638 printk(KERN_ALERT 1638 printk(KERN_ALERT
1639 "raid5: Disk failure on %s, disabling device.\n" 1639 "md/raid:%s: Disk failure on %s, disabling device.\n"
1640 "raid5: Operation continuing on %d devices.\n", 1640 KERN_ALERT
1641 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1641 "md/raid:%s: Operation continuing on %d devices.\n",
1642 mdname(mddev),
1643 bdevname(rdev->bdev, b),
1644 mdname(mddev),
1645 conf->raid_disks - mddev->degraded);
1642 } 1646 }
1643} 1647}
1644 1648
@@ -1719,8 +1723,6 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1719 pd_idx = data_disks; 1723 pd_idx = data_disks;
1720 break; 1724 break;
1721 default: 1725 default:
1722 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1723 algorithm);
1724 BUG(); 1726 BUG();
1725 } 1727 }
1726 break; 1728 break;
@@ -1836,10 +1838,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1836 qd_idx = raid_disks - 1; 1838 qd_idx = raid_disks - 1;
1837 break; 1839 break;
1838 1840
1839
1840 default: 1841 default:
1841 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1842 algorithm);
1843 BUG(); 1842 BUG();
1844 } 1843 }
1845 break; 1844 break;
@@ -1902,8 +1901,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1902 case ALGORITHM_PARITY_N: 1901 case ALGORITHM_PARITY_N:
1903 break; 1902 break;
1904 default: 1903 default:
1905 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1906 algorithm);
1907 BUG(); 1904 BUG();
1908 } 1905 }
1909 break; 1906 break;
@@ -1962,8 +1959,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1962 i -= 1; 1959 i -= 1;
1963 break; 1960 break;
1964 default: 1961 default:
1965 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1966 algorithm);
1967 BUG(); 1962 BUG();
1968 } 1963 }
1969 break; 1964 break;
@@ -1976,7 +1971,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1976 previous, &dummy1, &sh2); 1971 previous, &dummy1, &sh2);
1977 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 1972 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1978 || sh2.qd_idx != sh->qd_idx) { 1973 || sh2.qd_idx != sh->qd_idx) {
1979 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1974 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
1975 mdname(conf->mddev));
1980 return 0; 1976 return 0;
1981 } 1977 }
1982 return r_sector; 1978 return r_sector;
@@ -3942,7 +3938,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3942 new_sector = raid5_compute_sector(conf, logical_sector, 3938 new_sector = raid5_compute_sector(conf, logical_sector,
3943 previous, 3939 previous,
3944 &dd_idx, NULL); 3940 &dd_idx, NULL);
3945 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3941 pr_debug("raid456: make_request, sector %llu logical %llu\n",
3946 (unsigned long long)new_sector, 3942 (unsigned long long)new_sector,
3947 (unsigned long long)logical_sector); 3943 (unsigned long long)logical_sector);
3948 3944
@@ -4721,7 +4717,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4721 if (mddev->new_level != 5 4717 if (mddev->new_level != 5
4722 && mddev->new_level != 4 4718 && mddev->new_level != 4
4723 && mddev->new_level != 6) { 4719 && mddev->new_level != 6) {
4724 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4720 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4725 mdname(mddev), mddev->new_level); 4721 mdname(mddev), mddev->new_level);
4726 return ERR_PTR(-EIO); 4722 return ERR_PTR(-EIO);
4727 } 4723 }
@@ -4729,12 +4725,12 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4729 && !algorithm_valid_raid5(mddev->new_layout)) || 4725 && !algorithm_valid_raid5(mddev->new_layout)) ||
4730 (mddev->new_level == 6 4726 (mddev->new_level == 6
4731 && !algorithm_valid_raid6(mddev->new_layout))) { 4727 && !algorithm_valid_raid6(mddev->new_layout))) {
4732 printk(KERN_ERR "raid5: %s: layout %d not supported\n", 4728 printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4733 mdname(mddev), mddev->new_layout); 4729 mdname(mddev), mddev->new_layout);
4734 return ERR_PTR(-EIO); 4730 return ERR_PTR(-EIO);
4735 } 4731 }
4736 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4732 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4737 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4733 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4738 mdname(mddev), mddev->raid_disks); 4734 mdname(mddev), mddev->raid_disks);
4739 return ERR_PTR(-EINVAL); 4735 return ERR_PTR(-EINVAL);
4740 } 4736 }
@@ -4742,8 +4738,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4742 if (!mddev->new_chunk_sectors || 4738 if (!mddev->new_chunk_sectors ||
4743 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 4739 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4744 !is_power_of_2(mddev->new_chunk_sectors)) { 4740 !is_power_of_2(mddev->new_chunk_sectors)) {
4745 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4741 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4746 mddev->new_chunk_sectors << 9, mdname(mddev)); 4742 mdname(mddev), mddev->new_chunk_sectors << 9);
4747 return ERR_PTR(-EINVAL); 4743 return ERR_PTR(-EINVAL);
4748 } 4744 }
4749 4745
@@ -4785,7 +4781,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4785 if (raid5_alloc_percpu(conf) != 0) 4781 if (raid5_alloc_percpu(conf) != 0)
4786 goto abort; 4782 goto abort;
4787 4783
4788 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4784 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4789 4785
4790 list_for_each_entry(rdev, &mddev->disks, same_set) { 4786 list_for_each_entry(rdev, &mddev->disks, same_set) {
4791 raid_disk = rdev->raid_disk; 4787 raid_disk = rdev->raid_disk;
@@ -4798,9 +4794,9 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4798 4794
4799 if (test_bit(In_sync, &rdev->flags)) { 4795 if (test_bit(In_sync, &rdev->flags)) {
4800 char b[BDEVNAME_SIZE]; 4796 char b[BDEVNAME_SIZE];
4801 printk(KERN_INFO "raid5: device %s operational as raid" 4797 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4802 " disk %d\n", bdevname(rdev->bdev,b), 4798 " disk %d\n",
4803 raid_disk); 4799 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4804 } else 4800 } else
4805 /* Cannot rely on bitmap to complete recovery */ 4801 /* Cannot rely on bitmap to complete recovery */
4806 conf->fullsync = 1; 4802 conf->fullsync = 1;
@@ -4824,16 +4820,17 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4824 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4820 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4825 if (grow_stripes(conf, conf->max_nr_stripes)) { 4821 if (grow_stripes(conf, conf->max_nr_stripes)) {
4826 printk(KERN_ERR 4822 printk(KERN_ERR
4827 "raid5: couldn't allocate %dkB for buffers\n", memory); 4823 "md/raid:%s: couldn't allocate %dkB for buffers\n",
4824 mdname(mddev), memory);
4828 goto abort; 4825 goto abort;
4829 } else 4826 } else
4830 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4827 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4831 memory, mdname(mddev)); 4828 mdname(mddev), memory);
4832 4829
4833 conf->thread = md_register_thread(raid5d, mddev, NULL); 4830 conf->thread = md_register_thread(raid5d, mddev, NULL);
4834 if (!conf->thread) { 4831 if (!conf->thread) {
4835 printk(KERN_ERR 4832 printk(KERN_ERR
4836 "raid5: couldn't allocate thread for %s\n", 4833 "md/raid:%s: couldn't allocate thread.\n",
4837 mdname(mddev)); 4834 mdname(mddev));
4838 goto abort; 4835 goto abort;
4839 } 4836 }
@@ -4884,7 +4881,7 @@ static int run(mddev_t *mddev)
4884 sector_t reshape_offset = 0; 4881 sector_t reshape_offset = 0;
4885 4882
4886 if (mddev->recovery_cp != MaxSector) 4883 if (mddev->recovery_cp != MaxSector)
4887 printk(KERN_NOTICE "raid5: %s is not clean" 4884 printk(KERN_NOTICE "md/raid:%s: not clean"
4888 " -- starting background reconstruction\n", 4885 " -- starting background reconstruction\n",
4889 mdname(mddev)); 4886 mdname(mddev));
4890 if (mddev->reshape_position != MaxSector) { 4887 if (mddev->reshape_position != MaxSector) {
@@ -4898,7 +4895,7 @@ static int run(mddev_t *mddev)
4898 int max_degraded = (mddev->level == 6 ? 2 : 1); 4895 int max_degraded = (mddev->level == 6 ? 2 : 1);
4899 4896
4900 if (mddev->new_level != mddev->level) { 4897 if (mddev->new_level != mddev->level) {
4901 printk(KERN_ERR "raid5: %s: unsupported reshape " 4898 printk(KERN_ERR "md/raid:%s: unsupported reshape "
4902 "required - aborting.\n", 4899 "required - aborting.\n",
4903 mdname(mddev)); 4900 mdname(mddev));
4904 return -EINVAL; 4901 return -EINVAL;
@@ -4911,8 +4908,8 @@ static int run(mddev_t *mddev)
4911 here_new = mddev->reshape_position; 4908 here_new = mddev->reshape_position;
4912 if (sector_div(here_new, mddev->new_chunk_sectors * 4909 if (sector_div(here_new, mddev->new_chunk_sectors *
4913 (mddev->raid_disks - max_degraded))) { 4910 (mddev->raid_disks - max_degraded))) {
4914 printk(KERN_ERR "raid5: reshape_position not " 4911 printk(KERN_ERR "md/raid:%s: reshape_position not "
4915 "on a stripe boundary\n"); 4912 "on a stripe boundary\n", mdname(mddev));
4916 return -EINVAL; 4913 return -EINVAL;
4917 } 4914 }
4918 reshape_offset = here_new * mddev->new_chunk_sectors; 4915 reshape_offset = here_new * mddev->new_chunk_sectors;
@@ -4933,8 +4930,9 @@ static int run(mddev_t *mddev)
4933 if ((here_new * mddev->new_chunk_sectors != 4930 if ((here_new * mddev->new_chunk_sectors !=
4934 here_old * mddev->chunk_sectors) || 4931 here_old * mddev->chunk_sectors) ||
4935 mddev->ro == 0) { 4932 mddev->ro == 0) {
4936 printk(KERN_ERR "raid5: in-place reshape must be started" 4933 printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
4937 " in read-only mode - aborting\n"); 4934 " in read-only mode - aborting\n",
4935 mdname(mddev));
4938 return -EINVAL; 4936 return -EINVAL;
4939 } 4937 }
4940 } else if (mddev->delta_disks < 0 4938 } else if (mddev->delta_disks < 0
@@ -4943,11 +4941,13 @@ static int run(mddev_t *mddev)
4943 : (here_new * mddev->new_chunk_sectors >= 4941 : (here_new * mddev->new_chunk_sectors >=
4944 here_old * mddev->chunk_sectors)) { 4942 here_old * mddev->chunk_sectors)) {
4945 /* Reading from the same stripe as writing to - bad */ 4943 /* Reading from the same stripe as writing to - bad */
4946 printk(KERN_ERR "raid5: reshape_position too early for " 4944 printk(KERN_ERR "md/raid:%s: reshape_position too early for "
4947 "auto-recovery - aborting.\n"); 4945 "auto-recovery - aborting.\n",
4946 mdname(mddev));
4948 return -EINVAL; 4947 return -EINVAL;
4949 } 4948 }
4950 printk(KERN_INFO "raid5: reshape will continue\n"); 4949 printk(KERN_INFO "md/raid:%s: reshape will continue\n",
4950 mdname(mddev));
4951 /* OK, we should be able to continue; */ 4951 /* OK, we should be able to continue; */
4952 } else { 4952 } else {
4953 BUG_ON(mddev->level != mddev->new_level); 4953 BUG_ON(mddev->level != mddev->new_level);
@@ -4989,18 +4989,6 @@ static int run(mddev_t *mddev)
4989 mddev->minor_version > 90) 4989 mddev->minor_version > 90)
4990 rdev->recovery_offset = reshape_offset; 4990 rdev->recovery_offset = reshape_offset;
4991 4991
4992 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
4993 rdev->raid_disk, working_disks, conf->prev_algo,
4994 conf->previous_raid_disks, conf->max_degraded,
4995 conf->algorithm, conf->raid_disks,
4996 only_parity(rdev->raid_disk,
4997 conf->prev_algo,
4998 conf->previous_raid_disks,
4999 conf->max_degraded),
5000 only_parity(rdev->raid_disk,
5001 conf->algorithm,
5002 conf->raid_disks,
5003 conf->max_degraded));
5004 if (rdev->recovery_offset < reshape_offset) { 4992 if (rdev->recovery_offset < reshape_offset) {
5005 /* We need to check old and new layout */ 4993 /* We need to check old and new layout */
5006 if (!only_parity(rdev->raid_disk, 4994 if (!only_parity(rdev->raid_disk,
@@ -5021,7 +5009,7 @@ static int run(mddev_t *mddev)
5021 - working_disks); 5009 - working_disks);
5022 5010
5023 if (mddev->degraded > conf->max_degraded) { 5011 if (mddev->degraded > conf->max_degraded) {
5024 printk(KERN_ERR "raid5: not enough operational devices for %s" 5012 printk(KERN_ERR "md/raid:%s: not enough operational devices"
5025 " (%d/%d failed)\n", 5013 " (%d/%d failed)\n",
5026 mdname(mddev), mddev->degraded, conf->raid_disks); 5014 mdname(mddev), mddev->degraded, conf->raid_disks);
5027 goto abort; 5015 goto abort;
@@ -5035,32 +5023,32 @@ static int run(mddev_t *mddev)
5035 mddev->recovery_cp != MaxSector) { 5023 mddev->recovery_cp != MaxSector) {
5036 if (mddev->ok_start_degraded) 5024 if (mddev->ok_start_degraded)
5037 printk(KERN_WARNING 5025 printk(KERN_WARNING
5038 "raid5: starting dirty degraded array: %s" 5026 "md/raid:%s: starting dirty degraded array"
5039 "- data corruption possible.\n", 5027 " - data corruption possible.\n",
5040 mdname(mddev)); 5028 mdname(mddev));
5041 else { 5029 else {
5042 printk(KERN_ERR 5030 printk(KERN_ERR
5043 "raid5: cannot start dirty degraded array for %s\n", 5031 "md/raid:%s: cannot start dirty degraded array.\n",
5044 mdname(mddev)); 5032 mdname(mddev));
5045 goto abort; 5033 goto abort;
5046 } 5034 }
5047 } 5035 }
5048 5036
5049 if (mddev->degraded == 0) 5037 if (mddev->degraded == 0)
5050 printk("raid5: raid level %d set %s active with %d out of %d" 5038 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
5051 " devices, algorithm %d\n", conf->level, mdname(mddev), 5039 " devices, algorithm %d\n", mdname(mddev), conf->level,
5052 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 5040 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
5053 mddev->new_layout); 5041 mddev->new_layout);
5054 else 5042 else
5055 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 5043 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
5056 " out of %d devices, algorithm %d\n", conf->level, 5044 " out of %d devices, algorithm %d\n",
5057 mdname(mddev), mddev->raid_disks - mddev->degraded, 5045 mdname(mddev), conf->level,
5058 mddev->raid_disks, mddev->new_layout); 5046 mddev->raid_disks - mddev->degraded,
5047 mddev->raid_disks, mddev->new_layout);
5059 5048
5060 print_raid5_conf(conf); 5049 print_raid5_conf(conf);
5061 5050
5062 if (conf->reshape_progress != MaxSector) { 5051 if (conf->reshape_progress != MaxSector) {
5063 printk("...ok start reshape thread\n");
5064 conf->reshape_safe = conf->reshape_progress; 5052 conf->reshape_safe = conf->reshape_progress;
5065 atomic_set(&conf->reshape_stripes, 0); 5053 atomic_set(&conf->reshape_stripes, 0);
5066 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5054 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5087,7 +5075,7 @@ static int run(mddev_t *mddev)
5087 mddev->to_remove = NULL; 5075 mddev->to_remove = NULL;
5088 else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 5076 else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
5089 printk(KERN_WARNING 5077 printk(KERN_WARNING
5090 "raid5: failed to create sysfs attributes for %s\n", 5078 "md/raid:%s: failed to create sysfs attributes.\n",
5091 mdname(mddev)); 5079 mdname(mddev));
5092 5080
5093 mddev->queue->queue_lock = &conf->device_lock; 5081 mddev->queue->queue_lock = &conf->device_lock;
@@ -5117,12 +5105,10 @@ abort:
5117 free_conf(conf); 5105 free_conf(conf);
5118 } 5106 }
5119 mddev->private = NULL; 5107 mddev->private = NULL;
5120 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 5108 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
5121 return -EIO; 5109 return -EIO;
5122} 5110}
5123 5111
5124
5125
5126static int stop(mddev_t *mddev) 5112static int stop(mddev_t *mddev)
5127{ 5113{
5128 raid5_conf_t *conf = mddev->private; 5114 raid5_conf_t *conf = mddev->private;
@@ -5196,21 +5182,22 @@ static void print_raid5_conf (raid5_conf_t *conf)
5196 int i; 5182 int i;
5197 struct disk_info *tmp; 5183 struct disk_info *tmp;
5198 5184
5199 printk("RAID5 conf printout:\n"); 5185 printk(KERN_DEBUG "RAID conf printout:\n");
5200 if (!conf) { 5186 if (!conf) {
5201 printk("(conf==NULL)\n"); 5187 printk("(conf==NULL)\n");
5202 return; 5188 return;
5203 } 5189 }
5204 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 5190 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
5205 conf->raid_disks - conf->mddev->degraded); 5191 conf->raid_disks,
5192 conf->raid_disks - conf->mddev->degraded);
5206 5193
5207 for (i = 0; i < conf->raid_disks; i++) { 5194 for (i = 0; i < conf->raid_disks; i++) {
5208 char b[BDEVNAME_SIZE]; 5195 char b[BDEVNAME_SIZE];
5209 tmp = conf->disks + i; 5196 tmp = conf->disks + i;
5210 if (tmp->rdev) 5197 if (tmp->rdev)
5211 printk(" disk %d, o:%d, dev:%s\n", 5198 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
5212 i, !test_bit(Faulty, &tmp->rdev->flags), 5199 i, !test_bit(Faulty, &tmp->rdev->flags),
5213 bdevname(tmp->rdev->bdev,b)); 5200 bdevname(tmp->rdev->bdev, b));
5214 } 5201 }
5215} 5202}
5216 5203
@@ -5358,7 +5345,8 @@ static int check_stripe_cache(mddev_t *mddev)
5358 > conf->max_nr_stripes || 5345 > conf->max_nr_stripes ||
5359 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 5346 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5360 > conf->max_nr_stripes) { 5347 > conf->max_nr_stripes) {
5361 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 5348 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
5349 mdname(mddev),
5362 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 5350 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5363 / STRIPE_SIZE)*4); 5351 / STRIPE_SIZE)*4);
5364 return 0; 5352 return 0;
@@ -5429,7 +5417,7 @@ static int raid5_start_reshape(mddev_t *mddev)
5429 */ 5417 */
5430 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 5418 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5431 < mddev->array_sectors) { 5419 < mddev->array_sectors) {
5432 printk(KERN_ERR "md: %s: array size must be reduced " 5420 printk(KERN_ERR "md/raid:%s: array size must be reduced "
5433 "before number of disks\n", mdname(mddev)); 5421 "before number of disks\n", mdname(mddev));
5434 return -EINVAL; 5422 return -EINVAL;
5435 } 5423 }
@@ -5467,9 +5455,9 @@ static int raid5_start_reshape(mddev_t *mddev)
5467 if (sysfs_create_link(&mddev->kobj, 5455 if (sysfs_create_link(&mddev->kobj,
5468 &rdev->kobj, nm)) 5456 &rdev->kobj, nm))
5469 printk(KERN_WARNING 5457 printk(KERN_WARNING
5470 "raid5: failed to create " 5458 "md/raid:%s: failed to create "
5471 " link %s for %s\n", 5459 " link %s\n",
5472 nm, mdname(mddev)); 5460 mdname(mddev), nm);
5473 } else 5461 } else
5474 break; 5462 break;
5475 } 5463 }
@@ -5616,7 +5604,8 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5616 5604
5617 /* for raid0 takeover only one zone is supported */ 5605 /* for raid0 takeover only one zone is supported */
5618 if (raid0_priv->nr_strip_zones > 1) { 5606 if (raid0_priv->nr_strip_zones > 1) {
5619 printk(KERN_ERR "md: cannot takeover raid0 with more than one zone.\n"); 5607 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5608 mdname(mddev));
5620 return ERR_PTR(-EINVAL); 5609 return ERR_PTR(-EINVAL);
5621 } 5610 }
5622 5611