diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 44 | ||||
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/md/raid0.c | 2 | ||||
-rw-r--r-- | drivers/md/raid1.c | 10 | ||||
-rw-r--r-- | drivers/md/raid10.c | 8 | ||||
-rw-r--r-- | drivers/md/raid5.c | 4 | ||||
-rw-r--r-- | drivers/md/raid6main.c | 6 |
7 files changed, 38 insertions, 38 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index b65c36d9e240..fc05d1205aa0 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -341,7 +341,7 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait) | |||
341 | /* add to list to be waited for by daemon */ | 341 | /* add to list to be waited for by daemon */ |
342 | struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); | 342 | struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); |
343 | item->page = page; | 343 | item->page = page; |
344 | page_cache_get(page); | 344 | get_page(page); |
345 | spin_lock(&bitmap->write_lock); | 345 | spin_lock(&bitmap->write_lock); |
346 | list_add(&item->list, &bitmap->complete_pages); | 346 | list_add(&item->list, &bitmap->complete_pages); |
347 | spin_unlock(&bitmap->write_lock); | 347 | spin_unlock(&bitmap->write_lock); |
@@ -357,10 +357,10 @@ static struct page *read_page(struct file *file, unsigned long index, | |||
357 | struct inode *inode = file->f_mapping->host; | 357 | struct inode *inode = file->f_mapping->host; |
358 | struct page *page = NULL; | 358 | struct page *page = NULL; |
359 | loff_t isize = i_size_read(inode); | 359 | loff_t isize = i_size_read(inode); |
360 | unsigned long end_index = isize >> PAGE_CACHE_SHIFT; | 360 | unsigned long end_index = isize >> PAGE_SHIFT; |
361 | 361 | ||
362 | PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE, | 362 | PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE, |
363 | (unsigned long long)index << PAGE_CACHE_SHIFT); | 363 | (unsigned long long)index << PAGE_SHIFT); |
364 | 364 | ||
365 | page = read_cache_page(inode->i_mapping, index, | 365 | page = read_cache_page(inode->i_mapping, index, |
366 | (filler_t *)inode->i_mapping->a_ops->readpage, file); | 366 | (filler_t *)inode->i_mapping->a_ops->readpage, file); |
@@ -368,7 +368,7 @@ static struct page *read_page(struct file *file, unsigned long index, | |||
368 | goto out; | 368 | goto out; |
369 | wait_on_page_locked(page); | 369 | wait_on_page_locked(page); |
370 | if (!PageUptodate(page) || PageError(page)) { | 370 | if (!PageUptodate(page) || PageError(page)) { |
371 | page_cache_release(page); | 371 | put_page(page); |
372 | page = ERR_PTR(-EIO); | 372 | page = ERR_PTR(-EIO); |
373 | goto out; | 373 | goto out; |
374 | } | 374 | } |
@@ -376,14 +376,14 @@ static struct page *read_page(struct file *file, unsigned long index, | |||
376 | if (index > end_index) /* we have read beyond EOF */ | 376 | if (index > end_index) /* we have read beyond EOF */ |
377 | *bytes_read = 0; | 377 | *bytes_read = 0; |
378 | else if (index == end_index) /* possible short read */ | 378 | else if (index == end_index) /* possible short read */ |
379 | *bytes_read = isize & ~PAGE_CACHE_MASK; | 379 | *bytes_read = isize & ~PAGE_MASK; |
380 | else | 380 | else |
381 | *bytes_read = PAGE_CACHE_SIZE; /* got a full page */ | 381 | *bytes_read = PAGE_SIZE; /* got a full page */ |
382 | out: | 382 | out: |
383 | if (IS_ERR(page)) | 383 | if (IS_ERR(page)) |
384 | printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", | 384 | printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", |
385 | (int)PAGE_CACHE_SIZE, | 385 | (int)PAGE_SIZE, |
386 | (unsigned long long)index << PAGE_CACHE_SHIFT, | 386 | (unsigned long long)index << PAGE_SHIFT, |
387 | PTR_ERR(page)); | 387 | PTR_ERR(page)); |
388 | return page; | 388 | return page; |
389 | } | 389 | } |
@@ -558,7 +558,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, | |||
558 | spin_unlock_irqrestore(&bitmap->lock, flags); | 558 | spin_unlock_irqrestore(&bitmap->lock, flags); |
559 | return; | 559 | return; |
560 | } | 560 | } |
561 | page_cache_get(bitmap->sb_page); | 561 | get_page(bitmap->sb_page); |
562 | spin_unlock_irqrestore(&bitmap->lock, flags); | 562 | spin_unlock_irqrestore(&bitmap->lock, flags); |
563 | sb = (bitmap_super_t *)kmap(bitmap->sb_page); | 563 | sb = (bitmap_super_t *)kmap(bitmap->sb_page); |
564 | switch (op) { | 564 | switch (op) { |
@@ -569,7 +569,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, | |||
569 | default: BUG(); | 569 | default: BUG(); |
570 | } | 570 | } |
571 | kunmap(bitmap->sb_page); | 571 | kunmap(bitmap->sb_page); |
572 | page_cache_release(bitmap->sb_page); | 572 | put_page(bitmap->sb_page); |
573 | } | 573 | } |
574 | 574 | ||
575 | /* | 575 | /* |
@@ -622,12 +622,12 @@ static void bitmap_file_unmap(struct bitmap *bitmap) | |||
622 | 622 | ||
623 | while (pages--) | 623 | while (pages--) |
624 | if (map[pages]->index != 0) /* 0 is sb_page, release it below */ | 624 | if (map[pages]->index != 0) /* 0 is sb_page, release it below */ |
625 | page_cache_release(map[pages]); | 625 | put_page(map[pages]); |
626 | kfree(map); | 626 | kfree(map); |
627 | kfree(attr); | 627 | kfree(attr); |
628 | 628 | ||
629 | if (sb_page) | 629 | if (sb_page) |
630 | page_cache_release(sb_page); | 630 | put_page(sb_page); |
631 | } | 631 | } |
632 | 632 | ||
633 | static void bitmap_stop_daemon(struct bitmap *bitmap); | 633 | static void bitmap_stop_daemon(struct bitmap *bitmap); |
@@ -654,7 +654,7 @@ static void drain_write_queues(struct bitmap *bitmap) | |||
654 | 654 | ||
655 | while ((item = dequeue_page(bitmap))) { | 655 | while ((item = dequeue_page(bitmap))) { |
656 | /* don't bother to wait */ | 656 | /* don't bother to wait */ |
657 | page_cache_release(item->page); | 657 | put_page(item->page); |
658 | mempool_free(item, bitmap->write_pool); | 658 | mempool_free(item, bitmap->write_pool); |
659 | } | 659 | } |
660 | 660 | ||
@@ -763,7 +763,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) | |||
763 | 763 | ||
764 | /* make sure the page stays cached until it gets written out */ | 764 | /* make sure the page stays cached until it gets written out */ |
765 | if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY)) | 765 | if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY)) |
766 | page_cache_get(page); | 766 | get_page(page); |
767 | 767 | ||
768 | /* set the bit */ | 768 | /* set the bit */ |
769 | kaddr = kmap_atomic(page, KM_USER0); | 769 | kaddr = kmap_atomic(page, KM_USER0); |
@@ -938,7 +938,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) | |||
938 | if (ret) { | 938 | if (ret) { |
939 | kunmap(page); | 939 | kunmap(page); |
940 | /* release, page not in filemap yet */ | 940 | /* release, page not in filemap yet */ |
941 | page_cache_release(page); | 941 | put_page(page); |
942 | goto out; | 942 | goto out; |
943 | } | 943 | } |
944 | } | 944 | } |
@@ -1043,7 +1043,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) | |||
1043 | /* skip this page unless it's marked as needing cleaning */ | 1043 | /* skip this page unless it's marked as needing cleaning */ |
1044 | if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { | 1044 | if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { |
1045 | if (attr & BITMAP_PAGE_NEEDWRITE) { | 1045 | if (attr & BITMAP_PAGE_NEEDWRITE) { |
1046 | page_cache_get(page); | 1046 | get_page(page); |
1047 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); | 1047 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); |
1048 | } | 1048 | } |
1049 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1049 | spin_unlock_irqrestore(&bitmap->lock, flags); |
@@ -1057,13 +1057,13 @@ int bitmap_daemon_work(struct bitmap *bitmap) | |||
1057 | default: | 1057 | default: |
1058 | bitmap_file_kick(bitmap); | 1058 | bitmap_file_kick(bitmap); |
1059 | } | 1059 | } |
1060 | page_cache_release(page); | 1060 | put_page(page); |
1061 | } | 1061 | } |
1062 | continue; | 1062 | continue; |
1063 | } | 1063 | } |
1064 | 1064 | ||
1065 | /* grab the new page, sync and release the old */ | 1065 | /* grab the new page, sync and release the old */ |
1066 | page_cache_get(page); | 1066 | get_page(page); |
1067 | if (lastpage != NULL) { | 1067 | if (lastpage != NULL) { |
1068 | if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { | 1068 | if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { |
1069 | clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); | 1069 | clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); |
@@ -1078,7 +1078,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) | |||
1078 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1078 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1079 | } | 1079 | } |
1080 | kunmap(lastpage); | 1080 | kunmap(lastpage); |
1081 | page_cache_release(lastpage); | 1081 | put_page(lastpage); |
1082 | if (err) | 1082 | if (err) |
1083 | bitmap_file_kick(bitmap); | 1083 | bitmap_file_kick(bitmap); |
1084 | } else | 1084 | } else |
@@ -1133,7 +1133,7 @@ int bitmap_daemon_work(struct bitmap *bitmap) | |||
1133 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1133 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | page_cache_release(lastpage); | 1136 | put_page(lastpage); |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | return err; | 1139 | return err; |
@@ -1184,7 +1184,7 @@ static void bitmap_writeback_daemon(mddev_t *mddev) | |||
1184 | PRINTK("finished page writeback: %p\n", page); | 1184 | PRINTK("finished page writeback: %p\n", page); |
1185 | 1185 | ||
1186 | err = PageError(page); | 1186 | err = PageError(page); |
1187 | page_cache_release(page); | 1187 | put_page(page); |
1188 | if (err) { | 1188 | if (err) { |
1189 | printk(KERN_WARNING "%s: bitmap file writeback " | 1189 | printk(KERN_WARNING "%s: bitmap file writeback " |
1190 | "failed (page %lu): %d\n", | 1190 | "failed (page %lu): %d\n", |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 6101879a730f..c3ac67cffe62 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -339,7 +339,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev) | |||
339 | static void free_disk_sb(mdk_rdev_t * rdev) | 339 | static void free_disk_sb(mdk_rdev_t * rdev) |
340 | { | 340 | { |
341 | if (rdev->sb_page) { | 341 | if (rdev->sb_page) { |
342 | page_cache_release(rdev->sb_page); | 342 | put_page(rdev->sb_page); |
343 | rdev->sb_loaded = 0; | 343 | rdev->sb_loaded = 0; |
344 | rdev->sb_page = NULL; | 344 | rdev->sb_page = NULL; |
345 | rdev->sb_offset = 0; | 345 | rdev->sb_offset = 0; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index fece3277c2a5..a2c2e184c0ac 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -361,7 +361,7 @@ static int raid0_run (mddev_t *mddev) | |||
361 | * chunksize should be used in that case. | 361 | * chunksize should be used in that case. |
362 | */ | 362 | */ |
363 | { | 363 | { |
364 | int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; | 364 | int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; |
365 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) | 365 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) |
366 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; | 366 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; |
367 | } | 367 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6c10f28bc25e..bbe0b817572b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -139,7 +139,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
139 | out_free_pages: | 139 | out_free_pages: |
140 | for (i=0; i < RESYNC_PAGES ; i++) | 140 | for (i=0; i < RESYNC_PAGES ; i++) |
141 | for (j=0 ; j < pi->raid_disks; j++) | 141 | for (j=0 ; j < pi->raid_disks; j++) |
142 | __free_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); | 142 | put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); |
143 | j = -1; | 143 | j = -1; |
144 | out_free_bio: | 144 | out_free_bio: |
145 | while ( ++j < pi->raid_disks ) | 145 | while ( ++j < pi->raid_disks ) |
@@ -159,7 +159,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data) | |||
159 | if (j == 0 || | 159 | if (j == 0 || |
160 | r1bio->bios[j]->bi_io_vec[i].bv_page != | 160 | r1bio->bios[j]->bi_io_vec[i].bv_page != |
161 | r1bio->bios[0]->bi_io_vec[i].bv_page) | 161 | r1bio->bios[0]->bi_io_vec[i].bv_page) |
162 | __free_page(r1bio->bios[j]->bi_io_vec[i].bv_page); | 162 | put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); |
163 | } | 163 | } |
164 | for (i=0 ; i < pi->raid_disks; i++) | 164 | for (i=0 ; i < pi->raid_disks; i++) |
165 | bio_put(r1bio->bios[i]); | 165 | bio_put(r1bio->bios[i]); |
@@ -384,7 +384,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int | |||
384 | /* free extra copy of the data pages */ | 384 | /* free extra copy of the data pages */ |
385 | int i = bio->bi_vcnt; | 385 | int i = bio->bi_vcnt; |
386 | while (i--) | 386 | while (i--) |
387 | __free_page(bio->bi_io_vec[i].bv_page); | 387 | put_page(bio->bi_io_vec[i].bv_page); |
388 | } | 388 | } |
389 | /* clear the bitmap if all writes complete successfully */ | 389 | /* clear the bitmap if all writes complete successfully */ |
390 | bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, | 390 | bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, |
@@ -733,7 +733,7 @@ static struct page **alloc_behind_pages(struct bio *bio) | |||
733 | do_sync_io: | 733 | do_sync_io: |
734 | if (pages) | 734 | if (pages) |
735 | for (i = 0; i < bio->bi_vcnt && pages[i]; i++) | 735 | for (i = 0; i < bio->bi_vcnt && pages[i]; i++) |
736 | __free_page(pages[i]); | 736 | put_page(pages[i]); |
737 | kfree(pages); | 737 | kfree(pages); |
738 | PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); | 738 | PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); |
739 | return NULL; | 739 | return NULL; |
@@ -1893,7 +1893,7 @@ out_free_conf: | |||
1893 | if (conf->r1bio_pool) | 1893 | if (conf->r1bio_pool) |
1894 | mempool_destroy(conf->r1bio_pool); | 1894 | mempool_destroy(conf->r1bio_pool); |
1895 | kfree(conf->mirrors); | 1895 | kfree(conf->mirrors); |
1896 | __free_page(conf->tmppage); | 1896 | put_page(conf->tmppage); |
1897 | kfree(conf->poolinfo); | 1897 | kfree(conf->poolinfo); |
1898 | kfree(conf); | 1898 | kfree(conf); |
1899 | mddev->private = NULL; | 1899 | mddev->private = NULL; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3f8df2ecbae3..ce729d6daf78 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -134,10 +134,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
134 | 134 | ||
135 | out_free_pages: | 135 | out_free_pages: |
136 | for ( ; i > 0 ; i--) | 136 | for ( ; i > 0 ; i--) |
137 | __free_page(bio->bi_io_vec[i-1].bv_page); | 137 | put_page(bio->bi_io_vec[i-1].bv_page); |
138 | while (j--) | 138 | while (j--) |
139 | for (i = 0; i < RESYNC_PAGES ; i++) | 139 | for (i = 0; i < RESYNC_PAGES ; i++) |
140 | __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); | 140 | put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); |
141 | j = -1; | 141 | j = -1; |
142 | out_free_bio: | 142 | out_free_bio: |
143 | while ( ++j < nalloc ) | 143 | while ( ++j < nalloc ) |
@@ -157,7 +157,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data) | |||
157 | struct bio *bio = r10bio->devs[j].bio; | 157 | struct bio *bio = r10bio->devs[j].bio; |
158 | if (bio) { | 158 | if (bio) { |
159 | for (i = 0; i < RESYNC_PAGES; i++) { | 159 | for (i = 0; i < RESYNC_PAGES; i++) { |
160 | __free_page(bio->bi_io_vec[i].bv_page); | 160 | put_page(bio->bi_io_vec[i].bv_page); |
161 | bio->bi_io_vec[i].bv_page = NULL; | 161 | bio->bi_io_vec[i].bv_page = NULL; |
162 | } | 162 | } |
163 | bio_put(bio); | 163 | bio_put(bio); |
@@ -2015,7 +2015,7 @@ static int run(mddev_t *mddev) | |||
2015 | * maybe... | 2015 | * maybe... |
2016 | */ | 2016 | */ |
2017 | { | 2017 | { |
2018 | int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; | 2018 | int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE; |
2019 | stripe /= conf->near_copies; | 2019 | stripe /= conf->near_copies; |
2020 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) | 2020 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) |
2021 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; | 2021 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0222ba1a6d35..ec5186fd510a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -167,7 +167,7 @@ static void shrink_buffers(struct stripe_head *sh, int num) | |||
167 | if (!p) | 167 | if (!p) |
168 | continue; | 168 | continue; |
169 | sh->dev[i].page = NULL; | 169 | sh->dev[i].page = NULL; |
170 | page_cache_release(p); | 170 | put_page(p); |
171 | } | 171 | } |
172 | } | 172 | } |
173 | 173 | ||
@@ -1956,7 +1956,7 @@ static int run(mddev_t *mddev) | |||
1956 | */ | 1956 | */ |
1957 | { | 1957 | { |
1958 | int stripe = (mddev->raid_disks-1) * mddev->chunk_size | 1958 | int stripe = (mddev->raid_disks-1) * mddev->chunk_size |
1959 | / PAGE_CACHE_SIZE; | 1959 | / PAGE_SIZE; |
1960 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) | 1960 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
1961 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; | 1961 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
1962 | } | 1962 | } |
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index b5b7a8d0b165..4062fc16ac2b 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c | |||
@@ -186,7 +186,7 @@ static void shrink_buffers(struct stripe_head *sh, int num) | |||
186 | if (!p) | 186 | if (!p) |
187 | continue; | 187 | continue; |
188 | sh->dev[i].page = NULL; | 188 | sh->dev[i].page = NULL; |
189 | page_cache_release(p); | 189 | put_page(p); |
190 | } | 190 | } |
191 | } | 191 | } |
192 | 192 | ||
@@ -2069,7 +2069,7 @@ static int run(mddev_t *mddev) | |||
2069 | */ | 2069 | */ |
2070 | { | 2070 | { |
2071 | int stripe = (mddev->raid_disks-2) * mddev->chunk_size | 2071 | int stripe = (mddev->raid_disks-2) * mddev->chunk_size |
2072 | / PAGE_CACHE_SIZE; | 2072 | / PAGE_SIZE; |
2073 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) | 2073 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
2074 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; | 2074 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
2075 | } | 2075 | } |
@@ -2084,7 +2084,7 @@ abort: | |||
2084 | if (conf) { | 2084 | if (conf) { |
2085 | print_raid6_conf(conf); | 2085 | print_raid6_conf(conf); |
2086 | if (conf->spare_page) | 2086 | if (conf->spare_page) |
2087 | page_cache_release(conf->spare_page); | 2087 | put_page(conf->spare_page); |
2088 | if (conf->stripe_hashtbl) | 2088 | if (conf->stripe_hashtbl) |
2089 | free_pages((unsigned long) conf->stripe_hashtbl, | 2089 | free_pages((unsigned long) conf->stripe_hashtbl, |
2090 | HASH_PAGES_ORDER); | 2090 | HASH_PAGES_ORDER); |