aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2007-05-09 05:33:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:47 -0400
commitbf17ce3a604d943f29bf1bc1a66a4e0d2ad4ec96 (patch)
tree531058ab5ad63fb2e104bfadddbdbde080a0289b /drivers/md
parent88be163abb5324bab09f5eff9646590eec5314eb (diff)
dm io: remove old interface
Remove old dm-io interface. Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-io.c131
-rw-r--r--drivers/md/dm-io.h51
2 files changed, 7 insertions, 175 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0c63809ab70e..352c6fbeac53 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -13,8 +13,6 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15 15
16static struct bio_set *_bios;
17
18struct dm_io_client { 16struct dm_io_client {
19 mempool_t *pool; 17 mempool_t *pool;
20 struct bio_set *bios; 18 struct bio_set *bios;
@@ -35,74 +33,12 @@ struct io {
35 * io. Since async io is likely to be the majority of io we'll 33 * io. Since async io is likely to be the majority of io we'll
36 * have the same number of io contexts as bios! (FIXME: must reduce this). 34 * have the same number of io contexts as bios! (FIXME: must reduce this).
37 */ 35 */
38static unsigned _num_ios;
39static mempool_t *_io_pool;
40
41/*
42 * Temporary functions to allow old and new interfaces to co-exist.
43 */
44static struct bio_set *bios(struct dm_io_client *client)
45{
46 return client ? client->bios : _bios;
47}
48
49static mempool_t *io_pool(struct dm_io_client *client)
50{
51 return client ? client->pool : _io_pool;
52}
53 36
54static unsigned int pages_to_ios(unsigned int pages) 37static unsigned int pages_to_ios(unsigned int pages)
55{ 38{
56 return 4 * pages; /* too many ? */ 39 return 4 * pages; /* too many ? */
57} 40}
58 41
59static int resize_pool(unsigned int new_ios)
60{
61 int r = 0;
62
63 if (_io_pool) {
64 if (new_ios == 0) {
65 /* free off the pool */
66 mempool_destroy(_io_pool);
67 _io_pool = NULL;
68 bioset_free(_bios);
69
70 } else {
71 /* resize the pool */
72 r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
73 }
74
75 } else {
76 /* create new pool */
77 _io_pool = mempool_create_kmalloc_pool(new_ios,
78 sizeof(struct io));
79 if (!_io_pool)
80 return -ENOMEM;
81
82 _bios = bioset_create(16, 16);
83 if (!_bios) {
84 mempool_destroy(_io_pool);
85 _io_pool = NULL;
86 return -ENOMEM;
87 }
88 }
89
90 if (!r)
91 _num_ios = new_ios;
92
93 return r;
94}
95
96int dm_io_get(unsigned int num_pages)
97{
98 return resize_pool(_num_ios + pages_to_ios(num_pages));
99}
100
101void dm_io_put(unsigned int num_pages)
102{
103 resize_pool(_num_ios - pages_to_ios(num_pages));
104}
105
106/* 42/*
107 * Create a client with mempool and bioset. 43 * Create a client with mempool and bioset.
108 */ 44 */
@@ -182,7 +118,7 @@ static void dec_count(struct io *io, unsigned int region, int error)
182 io_notify_fn fn = io->callback; 118 io_notify_fn fn = io->callback;
183 void *context = io->context; 119 void *context = io->context;
184 120
185 mempool_free(io, io_pool(io->client)); 121 mempool_free(io, io->client->pool);
186 fn(r, context); 122 fn(r, context);
187 } 123 }
188 } 124 }
@@ -310,7 +246,7 @@ static void dm_bio_destructor(struct bio *bio)
310{ 246{
311 struct io *io = bio->bi_private; 247 struct io *io = bio->bi_private;
312 248
313 bio_free(bio, bios(io->client)); 249 bio_free(bio, io->client->bios);
314} 250}
315 251
316/* 252/*
@@ -358,7 +294,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
358 * to hide it from bio_add_page(). 294 * to hide it from bio_add_page().
359 */ 295 */
360 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; 296 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
361 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, bios(io->client)); 297 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
362 bio->bi_sector = where->sector + (where->count - remaining); 298 bio->bi_sector = where->sector + (where->count - remaining);
363 bio->bi_bdev = where->bdev; 299 bio->bi_bdev = where->bdev;
364 bio->bi_end_io = endio; 300 bio->bi_end_io = endio;
@@ -462,7 +398,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
462 return -EIO; 398 return -EIO;
463 } 399 }
464 400
465 io = mempool_alloc(io_pool(client), GFP_NOIO); 401 io = mempool_alloc(client->pool, GFP_NOIO);
466 io->error = 0; 402 io->error = 0;
467 atomic_set(&io->count, 1); /* see dispatch_io() */ 403 atomic_set(&io->count, 1); /* see dispatch_io() */
468 io->sleeper = NULL; 404 io->sleeper = NULL;
@@ -474,56 +410,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
474 return 0; 410 return 0;
475} 411}
476 412
477int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
478 struct page_list *pl, unsigned int offset,
479 unsigned long *error_bits)
480{
481 struct dpages dp;
482 list_dp_init(&dp, pl, offset);
483 return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
484}
485
486int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
487 struct bio_vec *bvec, unsigned long *error_bits)
488{
489 struct dpages dp;
490 bvec_dp_init(&dp, bvec);
491 return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
492}
493
494int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
495 void *data, unsigned long *error_bits)
496{
497 struct dpages dp;
498 vm_dp_init(&dp, data);
499 return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
500}
501
502int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
503 struct page_list *pl, unsigned int offset,
504 io_notify_fn fn, void *context)
505{
506 struct dpages dp;
507 list_dp_init(&dp, pl, offset);
508 return async_io(NULL, num_regions, where, rw, &dp, fn, context);
509}
510
511int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
512 struct bio_vec *bvec, io_notify_fn fn, void *context)
513{
514 struct dpages dp;
515 bvec_dp_init(&dp, bvec);
516 return async_io(NULL, num_regions, where, rw, &dp, fn, context);
517}
518
519int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
520 void *data, io_notify_fn fn, void *context)
521{
522 struct dpages dp;
523 vm_dp_init(&dp, data);
524 return async_io(NULL, num_regions, where, rw, &dp, fn, context);
525}
526
527static int dp_init(struct dm_io_request *io_req, struct dpages *dp) 413static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
528{ 414{
529 /* Set up dpages based on memory type */ 415 /* Set up dpages based on memory type */
@@ -572,12 +458,3 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
572 &dp, io_req->notify.fn, io_req->notify.context); 458 &dp, io_req->notify.fn, io_req->notify.context);
573} 459}
574EXPORT_SYMBOL(dm_io); 460EXPORT_SYMBOL(dm_io);
575
576EXPORT_SYMBOL(dm_io_get);
577EXPORT_SYMBOL(dm_io_put);
578EXPORT_SYMBOL(dm_io_sync);
579EXPORT_SYMBOL(dm_io_async);
580EXPORT_SYMBOL(dm_io_sync_bvec);
581EXPORT_SYMBOL(dm_io_async_bvec);
582EXPORT_SYMBOL(dm_io_sync_vm);
583EXPORT_SYMBOL(dm_io_async_vm);
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index 05b133825580..f647e2cceaa6 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -12,7 +12,7 @@
12struct io_region { 12struct io_region {
13 struct block_device *bdev; 13 struct block_device *bdev;
14 sector_t sector; 14 sector_t sector;
15 sector_t count; 15 sector_t count; /* If this is zero the region is ignored. */
16}; 16};
17 17
18struct page_list { 18struct page_list {
@@ -20,10 +20,6 @@ struct page_list {
20 struct page *page; 20 struct page *page;
21}; 21};
22 22
23/*
24 * 'error' is a bitset, with each bit indicating whether an error
25 * occurred doing io to the corresponding region.
26 */
27typedef void (*io_notify_fn)(unsigned long error, void *context); 23typedef void (*io_notify_fn)(unsigned long error, void *context);
28 24
29enum dm_io_mem_type { 25enum dm_io_mem_type {
@@ -63,16 +59,6 @@ struct dm_io_request {
63}; 59};
64 60
65/* 61/*
66 * Before anyone uses the IO interface they should call
67 * dm_io_get(), specifying roughly how many pages they are
68 * expecting to perform io on concurrently.
69 *
70 * This function may block.
71 */
72int dm_io_get(unsigned int num_pages);
73void dm_io_put(unsigned int num_pages);
74
75/*
76 * For async io calls, users can alternatively use the dm_io() function below 62 * For async io calls, users can alternatively use the dm_io() function below
77 * and dm_io_client_create() to create private mempools for the client. 63 * and dm_io_client_create() to create private mempools for the client.
78 * 64 *
@@ -83,40 +69,9 @@ int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
83void dm_io_client_destroy(struct dm_io_client *client); 69void dm_io_client_destroy(struct dm_io_client *client);
84 70
85/* 71/*
86 * Synchronous IO.
87 *
88 * Please ensure that the rw flag in the next two functions is
89 * either READ or WRITE, ie. we don't take READA. Any
90 * regions with a zero count field will be ignored.
91 */
92int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
93 struct page_list *pl, unsigned int offset,
94 unsigned long *error_bits);
95
96int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
97 struct bio_vec *bvec, unsigned long *error_bits);
98
99int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
100 void *data, unsigned long *error_bits);
101
102/*
103 * Aynchronous IO.
104 *
105 * The 'where' array may be safely allocated on the stack since
106 * the function takes a copy.
107 */
108int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
109 struct page_list *pl, unsigned int offset,
110 io_notify_fn fn, void *context);
111
112int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
113 struct bio_vec *bvec, io_notify_fn fn, void *context);
114
115int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
116 void *data, io_notify_fn fn, void *context);
117
118/*
119 * IO interface using private per-client pools. 72 * IO interface using private per-client pools.
73 * Each bit in the optional 'sync_error_bits' bitset indicates whether an
74 * error occurred doing io to the corresponding region.
120 */ 75 */
121int dm_io(struct dm_io_request *io_req, unsigned num_regions, 76int dm_io(struct dm_io_request *io_req, unsigned num_regions,
122 struct io_region *region, unsigned long *sync_error_bits); 77 struct io_region *region, unsigned long *sync_error_bits);