aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c40
1 files changed, 14 insertions, 26 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9fc50487e2ed..6e4db95cebb1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -35,12 +35,10 @@
35#define STRIPE_SHIFT (PAGE_SHIFT - 9) 35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9) 36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1 37#define IO_THRESHOLD 1
38#define HASH_PAGES 1 38#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
39#define HASH_PAGES_ORDER 0
40#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
41#define HASH_MASK (NR_HASH - 1) 39#define HASH_MASK (NR_HASH - 1)
42 40
43#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) 41#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
44 42
45/* bio's attached to a stripe+device for I/O are linked together in bi_sector 43/* bio's attached to a stripe+device for I/O are linked together in bi_sector
46 * order without overlap. There may be several bio's per stripe+device, and 44 * order without overlap. There may be several bio's per stripe+device, and
@@ -113,29 +111,21 @@ static void release_stripe(struct stripe_head *sh)
113 spin_unlock_irqrestore(&conf->device_lock, flags); 111 spin_unlock_irqrestore(&conf->device_lock, flags);
114} 112}
115 113
116static void remove_hash(struct stripe_head *sh) 114static inline void remove_hash(struct stripe_head *sh)
117{ 115{
118 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
119 117
120 if (sh->hash_pprev) { 118 hlist_del_init(&sh->hash);
121 if (sh->hash_next)
122 sh->hash_next->hash_pprev = sh->hash_pprev;
123 *sh->hash_pprev = sh->hash_next;
124 sh->hash_pprev = NULL;
125 }
126} 119}
127 120
128static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 121static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
129{ 122{
130 struct stripe_head **shp = &stripe_hash(conf, sh->sector); 123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
131 124
132 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
133 126
134 CHECK_DEVLOCK(); 127 CHECK_DEVLOCK();
135 if ((sh->hash_next = *shp) != NULL) 128 hlist_add_head(&sh->hash, hp);
136 (*shp)->hash_pprev = &sh->hash_next;
137 *shp = sh;
138 sh->hash_pprev = shp;
139} 129}
140 130
141 131
@@ -228,10 +218,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
228static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) 218static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
229{ 219{
230 struct stripe_head *sh; 220 struct stripe_head *sh;
221 struct hlist_node *hn;
231 222
232 CHECK_DEVLOCK(); 223 CHECK_DEVLOCK();
233 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 224 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
234 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) 225 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
235 if (sh->sector == sector) 226 if (sh->sector == sector)
236 return sh; 227 return sh;
237 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 228 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -1835,9 +1826,8 @@ static int run(mddev_t *mddev)
1835 1826
1836 conf->mddev = mddev; 1827 conf->mddev = mddev;
1837 1828
1838 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) 1829 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1839 goto abort; 1830 goto abort;
1840 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1841 1831
1842 spin_lock_init(&conf->device_lock); 1832 spin_lock_init(&conf->device_lock);
1843 init_waitqueue_head(&conf->wait_for_stripe); 1833 init_waitqueue_head(&conf->wait_for_stripe);
@@ -1972,9 +1962,7 @@ static int run(mddev_t *mddev)
1972abort: 1962abort:
1973 if (conf) { 1963 if (conf) {
1974 print_raid5_conf(conf); 1964 print_raid5_conf(conf);
1975 if (conf->stripe_hashtbl) 1965 kfree(conf->stripe_hashtbl);
1976 free_pages((unsigned long) conf->stripe_hashtbl,
1977 HASH_PAGES_ORDER);
1978 kfree(conf); 1966 kfree(conf);
1979 } 1967 }
1980 mddev->private = NULL; 1968 mddev->private = NULL;
@@ -1991,7 +1979,7 @@ static int stop(mddev_t *mddev)
1991 md_unregister_thread(mddev->thread); 1979 md_unregister_thread(mddev->thread);
1992 mddev->thread = NULL; 1980 mddev->thread = NULL;
1993 shrink_stripes(conf); 1981 shrink_stripes(conf);
1994 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); 1982 kfree(conf->stripe_hashtbl);
1995 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 1983 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1996 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 1984 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
1997 kfree(conf); 1985 kfree(conf);
@@ -2019,12 +2007,12 @@ static void print_sh (struct stripe_head *sh)
2019static void printall (raid5_conf_t *conf) 2007static void printall (raid5_conf_t *conf)
2020{ 2008{
2021 struct stripe_head *sh; 2009 struct stripe_head *sh;
2010 struct hlist_node *hn;
2022 int i; 2011 int i;
2023 2012
2024 spin_lock_irq(&conf->device_lock); 2013 spin_lock_irq(&conf->device_lock);
2025 for (i = 0; i < NR_HASH; i++) { 2014 for (i = 0; i < NR_HASH; i++) {
2026 sh = conf->stripe_hashtbl[i]; 2015 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2027 for (; sh; sh = sh->hash_next) {
2028 if (sh->raid_conf != conf) 2016 if (sh->raid_conf != conf)
2029 continue; 2017 continue;
2030 print_sh(sh); 2018 print_sh(sh);