aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorAndre Noll <maan@systemlinux.org>2009-06-16 02:47:10 -0400
committerNeilBrown <neilb@suse.de>2009-06-16 02:47:10 -0400
commit8f79cfcdb65472f1504ade2f53e5f2bfdaeb95da (patch)
tree381814b9d62a338156ada3fbd92daca908c724eb /drivers/md
parent09770e0b6ee649313611a2d6a9b44f456072dbd6 (diff)
md: raid0: Remove hash spacing and sector shift.
The "sector_shift" and "spacing" fields of struct raid0_private_data were only used for the hash table lookups. So the removal of the hash table allows get rid of these fields as well which simplifies create_strip_zones() and raid0_run() quite a bit. Signed-off-by: Andre Noll <maan@systemlinux.org> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid0.c63
-rw-r--r--drivers/md/raid0.h3
2 files changed, 1 insertions, 65 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d4c9c5d5d7f5..edffc4940b49 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -56,7 +56,6 @@ static int create_strip_zones (mddev_t *mddev)
56{ 56{
57 int i, c, j; 57 int i, c, j;
58 sector_t curr_zone_end; 58 sector_t curr_zone_end;
59 sector_t min_spacing;
60 raid0_conf_t *conf = mddev_to_conf(mddev); 59 raid0_conf_t *conf = mddev_to_conf(mddev);
61 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; 60 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
62 struct strip_zone *zone; 61 struct strip_zone *zone;
@@ -202,28 +201,7 @@ static int create_strip_zones (mddev_t *mddev)
202 printk(KERN_INFO "raid0: current zone start: %llu\n", 201 printk(KERN_INFO "raid0: current zone start: %llu\n",
203 (unsigned long long)smallest->sectors); 202 (unsigned long long)smallest->sectors);
204 } 203 }
205 /* Now find appropriate hash spacing.
206 * We want a number which causes most hash entries to cover
207 * at most two strips, but the hash table must be at most
208 * 1 PAGE. We choose the smallest strip, or contiguous collection
209 * of strips, that has big enough size. We never consider the last
210 * strip though as it's size has no bearing on the efficacy of the hash
211 * table.
212 */
213 conf->spacing = curr_zone_end;
214 min_spacing = curr_zone_end;
215 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
216 for (i=0; i < conf->nr_strip_zones-1; i++) {
217 sector_t s = 0;
218 for (j = i; j < conf->nr_strip_zones - 1 &&
219 s < min_spacing; j++)
220 s += conf->strip_zone[j].sectors;
221 if (s >= min_spacing && s < conf->spacing)
222 conf->spacing = s;
223 }
224
225 mddev->queue->unplug_fn = raid0_unplug; 204 mddev->queue->unplug_fn = raid0_unplug;
226
227 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 205 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
228 mddev->queue->backing_dev_info.congested_data = mddev; 206 mddev->queue->backing_dev_info.congested_data = mddev;
229 207
@@ -273,10 +251,8 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
273 return array_sectors; 251 return array_sectors;
274} 252}
275 253
276static int raid0_run (mddev_t *mddev) 254static int raid0_run(mddev_t *mddev)
277{ 255{
278 unsigned cur=0, i=0, nb_zone;
279 s64 sectors;
280 raid0_conf_t *conf; 256 raid0_conf_t *conf;
281 257
282 if (mddev->chunk_size == 0) { 258 if (mddev->chunk_size == 0) {
@@ -306,43 +282,6 @@ static int raid0_run (mddev_t *mddev)
306 282
307 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", 283 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
308 (unsigned long long)mddev->array_sectors); 284 (unsigned long long)mddev->array_sectors);
309 printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
310 (unsigned long long)conf->spacing);
311 {
312 sector_t s = raid0_size(mddev, 0, 0);
313 sector_t space = conf->spacing;
314 int round;
315 conf->sector_shift = 0;
316 if (sizeof(sector_t) > sizeof(u32)) {
317 /*shift down space and s so that sector_div will work */
318 while (space > (sector_t) (~(u32)0)) {
319 s >>= 1;
320 space >>= 1;
321 s += 1; /* force round-up */
322 conf->sector_shift++;
323 }
324 }
325 round = sector_div(s, (u32)space) ? 1 : 0;
326 nb_zone = s + round;
327 }
328 printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
329 sectors = conf->strip_zone[cur].sectors;
330
331 for (i=1; i< nb_zone; i++) {
332 while (sectors <= conf->spacing) {
333 cur++;
334 sectors += conf->strip_zone[cur].sectors;
335 }
336 sectors -= conf->spacing;
337 }
338 if (conf->sector_shift) {
339 conf->spacing >>= conf->sector_shift;
340 /* round spacing up so when we divide by it, we
341 * err on the side of too-low, which is safest
342 */
343 conf->spacing++;
344 }
345
346 /* calculate the max read-ahead size. 285 /* calculate the max read-ahead size.
347 * For read-ahead of large files to be effective, we need to 286 * For read-ahead of large files to be effective, we need to
348 * readahead at least twice a whole stripe. i.e. number of devices 287 * readahead at least twice a whole stripe. i.e. number of devices
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index a14630a25aa4..dbcf1da916b7 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -15,9 +15,6 @@ struct raid0_private_data
15 struct strip_zone *strip_zone; 15 struct strip_zone *strip_zone;
16 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ 16 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
17 int nr_strip_zones; 17 int nr_strip_zones;
18
19 sector_t spacing;
20 int sector_shift; /* shift this before divide by spacing */
21}; 18};
22 19
23typedef struct raid0_private_data raid0_conf_t; 20typedef struct raid0_private_data raid0_conf_t;