aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2010-05-21 18:31:36 -0400
committerNeilBrown <neilb@suse.de>2010-05-21 18:31:36 -0400
commit19fdb9eefb21b72edbc365b838502780c392bad6 (patch)
treedeae04c48532d6eab64ed4b0396737bb854b5506 /drivers/md/raid5.c
parentbe6800a73aa2f3dc14744c3b80e676d189789f04 (diff)
parent3ff195b011d7decf501a4d55aeed312731094796 (diff)
Merge commit '3ff195b011d7decf501a4d55aeed312731094796' into for-linus
Conflicts: drivers/md/md.c - Resolved conflict in md_update_sb - Added extra 'NULL' arg to new instance of sysfs_get_dirent. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c53
1 files changed, 25 insertions, 28 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index eacf02a6ec5f..9ea17d6c799b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -50,6 +50,7 @@
50#include <linux/async.h> 50#include <linux/async.h>
51#include <linux/seq_file.h> 51#include <linux/seq_file.h>
52#include <linux/cpu.h> 52#include <linux/cpu.h>
53#include <linux/slab.h>
53#include "md.h" 54#include "md.h"
54#include "raid5.h" 55#include "raid5.h"
55#include "raid0.h" 56#include "raid0.h"
@@ -1654,8 +1655,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1654 int previous, int *dd_idx, 1655 int previous, int *dd_idx,
1655 struct stripe_head *sh) 1656 struct stripe_head *sh)
1656{ 1657{
1657 long stripe; 1658 sector_t stripe, stripe2;
1658 unsigned long chunk_number; 1659 sector_t chunk_number;
1659 unsigned int chunk_offset; 1660 unsigned int chunk_offset;
1660 int pd_idx, qd_idx; 1661 int pd_idx, qd_idx;
1661 int ddf_layout = 0; 1662 int ddf_layout = 0;
@@ -1675,18 +1676,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1675 */ 1676 */
1676 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1677 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1677 chunk_number = r_sector; 1678 chunk_number = r_sector;
1678 BUG_ON(r_sector != chunk_number);
1679 1679
1680 /* 1680 /*
1681 * Compute the stripe number 1681 * Compute the stripe number
1682 */ 1682 */
1683 stripe = chunk_number / data_disks; 1683 stripe = chunk_number;
1684 1684 *dd_idx = sector_div(stripe, data_disks);
1685 /* 1685 stripe2 = stripe;
1686 * Compute the data disk and parity disk indexes inside the stripe
1687 */
1688 *dd_idx = chunk_number % data_disks;
1689
1690 /* 1686 /*
1691 * Select the parity disk based on the user selected algorithm. 1687 * Select the parity disk based on the user selected algorithm.
1692 */ 1688 */
@@ -1698,21 +1694,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1698 case 5: 1694 case 5:
1699 switch (algorithm) { 1695 switch (algorithm) {
1700 case ALGORITHM_LEFT_ASYMMETRIC: 1696 case ALGORITHM_LEFT_ASYMMETRIC:
1701 pd_idx = data_disks - stripe % raid_disks; 1697 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1702 if (*dd_idx >= pd_idx) 1698 if (*dd_idx >= pd_idx)
1703 (*dd_idx)++; 1699 (*dd_idx)++;
1704 break; 1700 break;
1705 case ALGORITHM_RIGHT_ASYMMETRIC: 1701 case ALGORITHM_RIGHT_ASYMMETRIC:
1706 pd_idx = stripe % raid_disks; 1702 pd_idx = sector_div(stripe2, raid_disks);
1707 if (*dd_idx >= pd_idx) 1703 if (*dd_idx >= pd_idx)
1708 (*dd_idx)++; 1704 (*dd_idx)++;
1709 break; 1705 break;
1710 case ALGORITHM_LEFT_SYMMETRIC: 1706 case ALGORITHM_LEFT_SYMMETRIC:
1711 pd_idx = data_disks - stripe % raid_disks; 1707 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1712 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1708 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1713 break; 1709 break;
1714 case ALGORITHM_RIGHT_SYMMETRIC: 1710 case ALGORITHM_RIGHT_SYMMETRIC:
1715 pd_idx = stripe % raid_disks; 1711 pd_idx = sector_div(stripe2, raid_disks);
1716 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1712 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1717 break; 1713 break;
1718 case ALGORITHM_PARITY_0: 1714 case ALGORITHM_PARITY_0:
@@ -1730,7 +1726,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1730 1726
1731 switch (algorithm) { 1727 switch (algorithm) {
1732 case ALGORITHM_LEFT_ASYMMETRIC: 1728 case ALGORITHM_LEFT_ASYMMETRIC:
1733 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1729 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1734 qd_idx = pd_idx + 1; 1730 qd_idx = pd_idx + 1;
1735 if (pd_idx == raid_disks-1) { 1731 if (pd_idx == raid_disks-1) {
1736 (*dd_idx)++; /* Q D D D P */ 1732 (*dd_idx)++; /* Q D D D P */
@@ -1739,7 +1735,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1739 (*dd_idx) += 2; /* D D P Q D */ 1735 (*dd_idx) += 2; /* D D P Q D */
1740 break; 1736 break;
1741 case ALGORITHM_RIGHT_ASYMMETRIC: 1737 case ALGORITHM_RIGHT_ASYMMETRIC:
1742 pd_idx = stripe % raid_disks; 1738 pd_idx = sector_div(stripe2, raid_disks);
1743 qd_idx = pd_idx + 1; 1739 qd_idx = pd_idx + 1;
1744 if (pd_idx == raid_disks-1) { 1740 if (pd_idx == raid_disks-1) {
1745 (*dd_idx)++; /* Q D D D P */ 1741 (*dd_idx)++; /* Q D D D P */
@@ -1748,12 +1744,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1748 (*dd_idx) += 2; /* D D P Q D */ 1744 (*dd_idx) += 2; /* D D P Q D */
1749 break; 1745 break;
1750 case ALGORITHM_LEFT_SYMMETRIC: 1746 case ALGORITHM_LEFT_SYMMETRIC:
1751 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1747 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1752 qd_idx = (pd_idx + 1) % raid_disks; 1748 qd_idx = (pd_idx + 1) % raid_disks;
1753 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1749 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1754 break; 1750 break;
1755 case ALGORITHM_RIGHT_SYMMETRIC: 1751 case ALGORITHM_RIGHT_SYMMETRIC:
1756 pd_idx = stripe % raid_disks; 1752 pd_idx = sector_div(stripe2, raid_disks);
1757 qd_idx = (pd_idx + 1) % raid_disks; 1753 qd_idx = (pd_idx + 1) % raid_disks;
1758 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1754 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1759 break; 1755 break;
@@ -1772,7 +1768,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1772 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1768 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1773 * of blocks for computing Q is different. 1769 * of blocks for computing Q is different.
1774 */ 1770 */
1775 pd_idx = stripe % raid_disks; 1771 pd_idx = sector_div(stripe2, raid_disks);
1776 qd_idx = pd_idx + 1; 1772 qd_idx = pd_idx + 1;
1777 if (pd_idx == raid_disks-1) { 1773 if (pd_idx == raid_disks-1) {
1778 (*dd_idx)++; /* Q D D D P */ 1774 (*dd_idx)++; /* Q D D D P */
@@ -1787,7 +1783,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1787 * D D D P Q rather than 1783 * D D D P Q rather than
1788 * Q D D D P 1784 * Q D D D P
1789 */ 1785 */
1790 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1786 stripe2 += 1;
1787 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1791 qd_idx = pd_idx + 1; 1788 qd_idx = pd_idx + 1;
1792 if (pd_idx == raid_disks-1) { 1789 if (pd_idx == raid_disks-1) {
1793 (*dd_idx)++; /* Q D D D P */ 1790 (*dd_idx)++; /* Q D D D P */
@@ -1799,7 +1796,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1799 1796
1800 case ALGORITHM_ROTATING_N_CONTINUE: 1797 case ALGORITHM_ROTATING_N_CONTINUE:
1801 /* Same as left_symmetric but Q is before P */ 1798 /* Same as left_symmetric but Q is before P */
1802 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1799 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1803 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1800 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1804 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1801 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1805 ddf_layout = 1; 1802 ddf_layout = 1;
@@ -1807,27 +1804,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1807 1804
1808 case ALGORITHM_LEFT_ASYMMETRIC_6: 1805 case ALGORITHM_LEFT_ASYMMETRIC_6:
1809 /* RAID5 left_asymmetric, with Q on last device */ 1806 /* RAID5 left_asymmetric, with Q on last device */
1810 pd_idx = data_disks - stripe % (raid_disks-1); 1807 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1811 if (*dd_idx >= pd_idx) 1808 if (*dd_idx >= pd_idx)
1812 (*dd_idx)++; 1809 (*dd_idx)++;
1813 qd_idx = raid_disks - 1; 1810 qd_idx = raid_disks - 1;
1814 break; 1811 break;
1815 1812
1816 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1813 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1817 pd_idx = stripe % (raid_disks-1); 1814 pd_idx = sector_div(stripe2, raid_disks-1);
1818 if (*dd_idx >= pd_idx) 1815 if (*dd_idx >= pd_idx)
1819 (*dd_idx)++; 1816 (*dd_idx)++;
1820 qd_idx = raid_disks - 1; 1817 qd_idx = raid_disks - 1;
1821 break; 1818 break;
1822 1819
1823 case ALGORITHM_LEFT_SYMMETRIC_6: 1820 case ALGORITHM_LEFT_SYMMETRIC_6:
1824 pd_idx = data_disks - stripe % (raid_disks-1); 1821 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1825 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1822 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1826 qd_idx = raid_disks - 1; 1823 qd_idx = raid_disks - 1;
1827 break; 1824 break;
1828 1825
1829 case ALGORITHM_RIGHT_SYMMETRIC_6: 1826 case ALGORITHM_RIGHT_SYMMETRIC_6:
1830 pd_idx = stripe % (raid_disks-1); 1827 pd_idx = sector_div(stripe2, raid_disks-1);
1831 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1828 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1832 qd_idx = raid_disks - 1; 1829 qd_idx = raid_disks - 1;
1833 break; 1830 break;
@@ -1869,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1869 : conf->algorithm; 1866 : conf->algorithm;
1870 sector_t stripe; 1867 sector_t stripe;
1871 int chunk_offset; 1868 int chunk_offset;
1872 int chunk_number, dummy1, dd_idx = i; 1869 sector_t chunk_number;
1870 int dummy1, dd_idx = i;
1873 sector_t r_sector; 1871 sector_t r_sector;
1874 struct stripe_head sh2; 1872 struct stripe_head sh2;
1875 1873
1876 1874
1877 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1875 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1878 stripe = new_sector; 1876 stripe = new_sector;
1879 BUG_ON(new_sector != stripe);
1880 1877
1881 if (i == sh->pd_idx) 1878 if (i == sh->pd_idx)
1882 return 0; 1879 return 0;
@@ -1965,7 +1962,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1965 } 1962 }
1966 1963
1967 chunk_number = stripe * data_disks + i; 1964 chunk_number = stripe * data_disks + i;
1968 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1965 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
1969 1966
1970 check = raid5_compute_sector(conf, r_sector, 1967 check = raid5_compute_sector(conf, r_sector,
1971 previous, &dummy1, &sh2); 1968 previous, &dummy1, &sh2);