diff options
author | Andre Noll <maan@systemlinux.org> | 2009-01-08 16:31:08 -0500 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-01-08 16:31:08 -0500 |
commit | ccacc7d2cf03114a24ab903f710118e9e5d43273 (patch) | |
tree | c856e2a17f6c6a26996a8cfba87680a2375061d5 /drivers | |
parent | 83838ed87898e0a8ff8dbf001e54e6c017f0a011 (diff) |
md: raid0: make hash_spacing and preshift sector-based.
This patch renames the hash_spacing and preshift members of struct
raid0_private_data to spacing and sector_shift respectively and
changes the semantics as follows:
We always have spacing = 2 * hash_spacing. In case
sizeof(sector_t) > sizeof(u32) we also have sector_shift = preshift + 1
while sector_shift = preshift = 0 otherwise.
Note that the values of nb_zone and zone are unaffected by these changes
because in the sector_div() preceeding the assignement of these two
variables both arguments double.
Signed-off-by: Andre Noll <maan@systemlinux.org>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/raid0.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 6e85e88bbae9..90f5b24f6e6c 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -213,16 +213,16 @@ static int create_strip_zones (mddev_t *mddev) | |||
213 | * strip though as it's size has no bearing on the efficacy of the hash | 213 | * strip though as it's size has no bearing on the efficacy of the hash |
214 | * table. | 214 | * table. |
215 | */ | 215 | */ |
216 | conf->hash_spacing = curr_zone_start / 2; | 216 | conf->spacing = curr_zone_start; |
217 | min_spacing = curr_zone_start / 2; | 217 | min_spacing = curr_zone_start; |
218 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); | 218 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); |
219 | for (i=0; i < conf->nr_strip_zones-1; i++) { | 219 | for (i=0; i < conf->nr_strip_zones-1; i++) { |
220 | sector_t sz = 0; | 220 | sector_t s = 0; |
221 | for (j=i; j<conf->nr_strip_zones-1 && | 221 | for (j = i; j < conf->nr_strip_zones - 1 && |
222 | sz < min_spacing ; j++) | 222 | s < min_spacing; j++) |
223 | sz += conf->strip_zone[j].sectors / 2; | 223 | s += conf->strip_zone[j].sectors; |
224 | if (sz >= min_spacing && sz < conf->hash_spacing) | 224 | if (s >= min_spacing && s < conf->spacing) |
225 | conf->hash_spacing = sz; | 225 | conf->spacing = s; |
226 | } | 226 | } |
227 | 227 | ||
228 | mddev->queue->unplug_fn = raid0_unplug; | 228 | mddev->queue->unplug_fn = raid0_unplug; |
@@ -265,7 +265,7 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
265 | static int raid0_run (mddev_t *mddev) | 265 | static int raid0_run (mddev_t *mddev) |
266 | { | 266 | { |
267 | unsigned cur=0, i=0, nb_zone; | 267 | unsigned cur=0, i=0, nb_zone; |
268 | s64 size; | 268 | s64 sectors; |
269 | raid0_conf_t *conf; | 269 | raid0_conf_t *conf; |
270 | mdk_rdev_t *rdev; | 270 | mdk_rdev_t *rdev; |
271 | struct list_head *tmp; | 271 | struct list_head *tmp; |
@@ -297,51 +297,51 @@ static int raid0_run (mddev_t *mddev) | |||
297 | rdev_for_each(rdev, tmp, mddev) | 297 | rdev_for_each(rdev, tmp, mddev) |
298 | mddev->array_sectors += rdev->size * 2; | 298 | mddev->array_sectors += rdev->size * 2; |
299 | 299 | ||
300 | printk("raid0 : md_size is %llu blocks.\n", | 300 | printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", |
301 | (unsigned long long)mddev->array_sectors / 2); | 301 | (unsigned long long)mddev->array_sectors); |
302 | printk("raid0 : conf->hash_spacing is %llu blocks.\n", | 302 | printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n", |
303 | (unsigned long long)conf->hash_spacing); | 303 | (unsigned long long)conf->spacing); |
304 | { | 304 | { |
305 | sector_t s = mddev->array_sectors / 2; | 305 | sector_t s = mddev->array_sectors; |
306 | sector_t space = conf->hash_spacing; | 306 | sector_t space = conf->spacing; |
307 | int round; | 307 | int round; |
308 | conf->preshift = 0; | 308 | conf->sector_shift = 0; |
309 | if (sizeof(sector_t) > sizeof(u32)) { | 309 | if (sizeof(sector_t) > sizeof(u32)) { |
310 | /*shift down space and s so that sector_div will work */ | 310 | /*shift down space and s so that sector_div will work */ |
311 | while (space > (sector_t) (~(u32)0)) { | 311 | while (space > (sector_t) (~(u32)0)) { |
312 | s >>= 1; | 312 | s >>= 1; |
313 | space >>= 1; | 313 | space >>= 1; |
314 | s += 1; /* force round-up */ | 314 | s += 1; /* force round-up */ |
315 | conf->preshift++; | 315 | conf->sector_shift++; |
316 | } | 316 | } |
317 | } | 317 | } |
318 | round = sector_div(s, (u32)space) ? 1 : 0; | 318 | round = sector_div(s, (u32)space) ? 1 : 0; |
319 | nb_zone = s + round; | 319 | nb_zone = s + round; |
320 | } | 320 | } |
321 | printk("raid0 : nb_zone is %d.\n", nb_zone); | 321 | printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone); |
322 | 322 | ||
323 | printk("raid0 : Allocating %Zd bytes for hash.\n", | 323 | printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n", |
324 | nb_zone*sizeof(struct strip_zone*)); | 324 | nb_zone*sizeof(struct strip_zone*)); |
325 | conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); | 325 | conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); |
326 | if (!conf->hash_table) | 326 | if (!conf->hash_table) |
327 | goto out_free_conf; | 327 | goto out_free_conf; |
328 | size = conf->strip_zone[cur].sectors / 2; | 328 | sectors = conf->strip_zone[cur].sectors; |
329 | 329 | ||
330 | conf->hash_table[0] = conf->strip_zone + cur; | 330 | conf->hash_table[0] = conf->strip_zone + cur; |
331 | for (i=1; i< nb_zone; i++) { | 331 | for (i=1; i< nb_zone; i++) { |
332 | while (size <= conf->hash_spacing) { | 332 | while (sectors <= conf->spacing) { |
333 | cur++; | 333 | cur++; |
334 | size += conf->strip_zone[cur].sectors / 2; | 334 | sectors += conf->strip_zone[cur].sectors; |
335 | } | 335 | } |
336 | size -= conf->hash_spacing; | 336 | sectors -= conf->spacing; |
337 | conf->hash_table[i] = conf->strip_zone + cur; | 337 | conf->hash_table[i] = conf->strip_zone + cur; |
338 | } | 338 | } |
339 | if (conf->preshift) { | 339 | if (conf->sector_shift) { |
340 | conf->hash_spacing >>= conf->preshift; | 340 | conf->spacing >>= conf->sector_shift; |
341 | /* round hash_spacing up so when we divide by it, we | 341 | /* round spacing up so when we divide by it, we |
342 | * err on the side of too-low, which is safest | 342 | * err on the side of too-low, which is safest |
343 | */ | 343 | */ |
344 | conf->hash_spacing++; | 344 | conf->spacing++; |
345 | } | 345 | } |
346 | 346 | ||
347 | /* calculate the max read-ahead size. | 347 | /* calculate the max read-ahead size. |
@@ -435,8 +435,8 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) | |||
435 | 435 | ||
436 | 436 | ||
437 | { | 437 | { |
438 | sector_t x = sector >> (conf->preshift + 1); | 438 | sector_t x = sector >> conf->sector_shift; |
439 | sector_div(x, (u32)conf->hash_spacing); | 439 | sector_div(x, (u32)conf->spacing); |
440 | zone = conf->hash_table[x]; | 440 | zone = conf->hash_table[x]; |
441 | } | 441 | } |
442 | 442 | ||