aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHeinz Mauelshagen <hjm@redhat.com>2007-05-09 05:33:01 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:47 -0400
commitc8b03afe3d38a635861e4bfa5c563d844e754a91 (patch)
treed23d2aa8d6cec93bab6e23ffd2199509e7d85113 /drivers
parent891ce207011d3d9219f79fd5114c8594bbacc653 (diff)
dm io: new interface
Add a new API to dm-io.c that uses a private mempool and bio_set for each client. The new functions to use are dm_io_client_create(), dm_io_client_destroy(), dm_io_client_resize() and dm_io(). Signed-off-by: Heinz Mauelshagen <hjm@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Cc: Milan Broz <mbroz@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-io.c122
-rw-r--r--drivers/md/dm-io.h52
2 files changed, 173 insertions, 1 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 66db79208c1d..0c63809ab70e 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -103,6 +103,51 @@ void dm_io_put(unsigned int num_pages)
103 resize_pool(_num_ios - pages_to_ios(num_pages)); 103 resize_pool(_num_ios - pages_to_ios(num_pages));
104} 104}
105 105
106/*
107 * Create a client with mempool and bioset.
108 */
109struct dm_io_client *dm_io_client_create(unsigned num_pages)
110{
111 unsigned ios = pages_to_ios(num_pages);
112 struct dm_io_client *client;
113
114 client = kmalloc(sizeof(*client), GFP_KERNEL);
115 if (!client)
116 return ERR_PTR(-ENOMEM);
117
118 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
119 if (!client->pool)
120 goto bad;
121
122 client->bios = bioset_create(16, 16);
123 if (!client->bios)
124 goto bad;
125
126 return client;
127
128 bad:
129 if (client->pool)
130 mempool_destroy(client->pool);
131 kfree(client);
132 return ERR_PTR(-ENOMEM);
133}
134EXPORT_SYMBOL(dm_io_client_create);
135
136int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
137{
138 return mempool_resize(client->pool, pages_to_ios(num_pages),
139 GFP_KERNEL);
140}
141EXPORT_SYMBOL(dm_io_client_resize);
142
143void dm_io_client_destroy(struct dm_io_client *client)
144{
145 mempool_destroy(client->pool);
146 bioset_free(client->bios);
147 kfree(client);
148}
149EXPORT_SYMBOL(dm_io_client_destroy);
150
106/*----------------------------------------------------------------- 151/*-----------------------------------------------------------------
107 * We need to keep track of which region a bio is doing io for. 152 * We need to keep track of which region a bio is doing io for.
108 * In order to save a memory allocation we store this the last 153 * In order to save a memory allocation we store this the last
@@ -236,6 +281,9 @@ static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
236 dp->context_ptr = bvec; 281 dp->context_ptr = bvec;
237} 282}
238 283
284/*
285 * Functions for getting the pages from a VMA.
286 */
239static void vm_get_page(struct dpages *dp, 287static void vm_get_page(struct dpages *dp,
240 struct page **p, unsigned long *len, unsigned *offset) 288 struct page **p, unsigned long *len, unsigned *offset)
241{ 289{
@@ -265,6 +313,31 @@ static void dm_bio_destructor(struct bio *bio)
265 bio_free(bio, bios(io->client)); 313 bio_free(bio, bios(io->client));
266} 314}
267 315
316/*
317 * Functions for getting the pages from kernel memory.
318 */
319static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
320 unsigned *offset)
321{
322 *p = virt_to_page(dp->context_ptr);
323 *offset = dp->context_u;
324 *len = PAGE_SIZE - dp->context_u;
325}
326
327static void km_next_page(struct dpages *dp)
328{
329 dp->context_ptr += PAGE_SIZE - dp->context_u;
330 dp->context_u = 0;
331}
332
333static void km_dp_init(struct dpages *dp, void *data)
334{
335 dp->get_page = km_get_page;
336 dp->next_page = km_next_page;
337 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
338 dp->context_ptr = data;
339}
340
268/*----------------------------------------------------------------- 341/*-----------------------------------------------------------------
269 * IO routines that accept a list of pages. 342 * IO routines that accept a list of pages.
270 *---------------------------------------------------------------*/ 343 *---------------------------------------------------------------*/
@@ -451,6 +524,55 @@ int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
451 return async_io(NULL, num_regions, where, rw, &dp, fn, context); 524 return async_io(NULL, num_regions, where, rw, &dp, fn, context);
452} 525}
453 526
527static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
528{
529 /* Set up dpages based on memory type */
530 switch (io_req->mem.type) {
531 case DM_IO_PAGE_LIST:
532 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
533 break;
534
535 case DM_IO_BVEC:
536 bvec_dp_init(dp, io_req->mem.ptr.bvec);
537 break;
538
539 case DM_IO_VMA:
540 vm_dp_init(dp, io_req->mem.ptr.vma);
541 break;
542
543 case DM_IO_KMEM:
544 km_dp_init(dp, io_req->mem.ptr.addr);
545 break;
546
547 default:
548 return -EINVAL;
549 }
550
551 return 0;
552}
553
554/*
555 * New collapsed (a)synchronous interface
556 */
557int dm_io(struct dm_io_request *io_req, unsigned num_regions,
558 struct io_region *where, unsigned long *sync_error_bits)
559{
560 int r;
561 struct dpages dp;
562
563 r = dp_init(io_req, &dp);
564 if (r)
565 return r;
566
567 if (!io_req->notify.fn)
568 return sync_io(io_req->client, num_regions, where,
569 io_req->bi_rw, &dp, sync_error_bits);
570
571 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
572 &dp, io_req->notify.fn, io_req->notify.context);
573}
574EXPORT_SYMBOL(dm_io);
575
454EXPORT_SYMBOL(dm_io_get); 576EXPORT_SYMBOL(dm_io_get);
455EXPORT_SYMBOL(dm_io_put); 577EXPORT_SYMBOL(dm_io_put);
456EXPORT_SYMBOL(dm_io_sync); 578EXPORT_SYMBOL(dm_io_sync);
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index f9035bfd1a9f..05b133825580 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -20,13 +20,47 @@ struct page_list {
20 struct page *page; 20 struct page *page;
21}; 21};
22 22
23
24/* 23/*
25 * 'error' is a bitset, with each bit indicating whether an error 24 * 'error' is a bitset, with each bit indicating whether an error
26 * occurred doing io to the corresponding region. 25 * occurred doing io to the corresponding region.
27 */ 26 */
28typedef void (*io_notify_fn)(unsigned long error, void *context); 27typedef void (*io_notify_fn)(unsigned long error, void *context);
29 28
29enum dm_io_mem_type {
30 DM_IO_PAGE_LIST,/* Page list */
31 DM_IO_BVEC, /* Bio vector */
32 DM_IO_VMA, /* Virtual memory area */
33 DM_IO_KMEM, /* Kernel memory */
34};
35
36struct dm_io_memory {
37 enum dm_io_mem_type type;
38
39 union {
40 struct page_list *pl;
41 struct bio_vec *bvec;
42 void *vma;
43 void *addr;
44 } ptr;
45
46 unsigned offset;
47};
48
49struct dm_io_notify {
50 io_notify_fn fn; /* Callback for asynchronous requests */
51 void *context; /* Passed to callback */
52};
53
54/*
55 * IO request structure
56 */
57struct dm_io_client;
58struct dm_io_request {
59 int bi_rw; /* READ|WRITE - not READA */
60 struct dm_io_memory mem; /* Memory to use for io */
61 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
62 struct dm_io_client *client; /* Client memory handler */
63};
30 64
31/* 65/*
32 * Before anyone uses the IO interface they should call 66 * Before anyone uses the IO interface they should call
@@ -39,6 +73,16 @@ int dm_io_get(unsigned int num_pages);
39void dm_io_put(unsigned int num_pages); 73void dm_io_put(unsigned int num_pages);
40 74
41/* 75/*
76 * For async io calls, users can alternatively use the dm_io() function below
77 * and dm_io_client_create() to create private mempools for the client.
78 *
79 * Create/destroy may block.
80 */
81struct dm_io_client *dm_io_client_create(unsigned num_pages);
82int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
83void dm_io_client_destroy(struct dm_io_client *client);
84
85/*
42 * Synchronous IO. 86 * Synchronous IO.
43 * 87 *
44 * Please ensure that the rw flag in the next two functions is 88 * Please ensure that the rw flag in the next two functions is
@@ -71,4 +115,10 @@ int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
71int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, 115int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
72 void *data, io_notify_fn fn, void *context); 116 void *data, io_notify_fn fn, void *context);
73 117
118/*
119 * IO interface using private per-client pools.
120 */
121int dm_io(struct dm_io_request *io_req, unsigned num_regions,
122 struct io_region *region, unsigned long *sync_error_bits);
123
74#endif 124#endif