diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/md/dm-io.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/md/dm-io.c')
-rw-r--r-- | drivers/md/dm-io.c | 49 |
1 files changed, 10 insertions, 39 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 0590c75b0ab6..2067288f61f9 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #define DM_MSG_PREFIX "io" | 19 | #define DM_MSG_PREFIX "io" |
20 | 20 | ||
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | 21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG |
22 | #define MIN_IOS 16 | ||
23 | #define MIN_BIOS 16 | ||
22 | 24 | ||
23 | struct dm_io_client { | 25 | struct dm_io_client { |
24 | mempool_t *pool; | 26 | mempool_t *pool; |
@@ -31,7 +33,6 @@ struct dm_io_client { | |||
31 | */ | 33 | */ |
32 | struct io { | 34 | struct io { |
33 | unsigned long error_bits; | 35 | unsigned long error_bits; |
34 | unsigned long eopnotsupp_bits; | ||
35 | atomic_t count; | 36 | atomic_t count; |
36 | struct task_struct *sleeper; | 37 | struct task_struct *sleeper; |
37 | struct dm_io_client *client; | 38 | struct dm_io_client *client; |
@@ -42,33 +43,21 @@ struct io { | |||
42 | static struct kmem_cache *_dm_io_cache; | 43 | static struct kmem_cache *_dm_io_cache; |
43 | 44 | ||
44 | /* | 45 | /* |
45 | * io contexts are only dynamically allocated for asynchronous | ||
46 | * io. Since async io is likely to be the majority of io we'll | ||
47 | * have the same number of io contexts as bios! (FIXME: must reduce this). | ||
48 | */ | ||
49 | |||
50 | static unsigned int pages_to_ios(unsigned int pages) | ||
51 | { | ||
52 | return 4 * pages; /* too many ? */ | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Create a client with mempool and bioset. | 46 | * Create a client with mempool and bioset. |
57 | */ | 47 | */ |
58 | struct dm_io_client *dm_io_client_create(unsigned num_pages) | 48 | struct dm_io_client *dm_io_client_create(void) |
59 | { | 49 | { |
60 | unsigned ios = pages_to_ios(num_pages); | ||
61 | struct dm_io_client *client; | 50 | struct dm_io_client *client; |
62 | 51 | ||
63 | client = kmalloc(sizeof(*client), GFP_KERNEL); | 52 | client = kmalloc(sizeof(*client), GFP_KERNEL); |
64 | if (!client) | 53 | if (!client) |
65 | return ERR_PTR(-ENOMEM); | 54 | return ERR_PTR(-ENOMEM); |
66 | 55 | ||
67 | client->pool = mempool_create_slab_pool(ios, _dm_io_cache); | 56 | client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); |
68 | if (!client->pool) | 57 | if (!client->pool) |
69 | goto bad; | 58 | goto bad; |
70 | 59 | ||
71 | client->bios = bioset_create(16, 0); | 60 | client->bios = bioset_create(MIN_BIOS, 0); |
72 | if (!client->bios) | 61 | if (!client->bios) |
73 | goto bad; | 62 | goto bad; |
74 | 63 | ||
@@ -82,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) | |||
82 | } | 71 | } |
83 | EXPORT_SYMBOL(dm_io_client_create); | 72 | EXPORT_SYMBOL(dm_io_client_create); |
84 | 73 | ||
85 | int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) | ||
86 | { | ||
87 | return mempool_resize(client->pool, pages_to_ios(num_pages), | ||
88 | GFP_KERNEL); | ||
89 | } | ||
90 | EXPORT_SYMBOL(dm_io_client_resize); | ||
91 | |||
92 | void dm_io_client_destroy(struct dm_io_client *client) | 74 | void dm_io_client_destroy(struct dm_io_client *client) |
93 | { | 75 | { |
94 | mempool_destroy(client->pool); | 76 | mempool_destroy(client->pool); |
@@ -130,11 +112,8 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, | |||
130 | *---------------------------------------------------------------*/ | 112 | *---------------------------------------------------------------*/ |
131 | static void dec_count(struct io *io, unsigned int region, int error) | 113 | static void dec_count(struct io *io, unsigned int region, int error) |
132 | { | 114 | { |
133 | if (error) { | 115 | if (error) |
134 | set_bit(region, &io->error_bits); | 116 | set_bit(region, &io->error_bits); |
135 | if (error == -EOPNOTSUPP) | ||
136 | set_bit(region, &io->eopnotsupp_bits); | ||
137 | } | ||
138 | 117 | ||
139 | if (atomic_dec_and_test(&io->count)) { | 118 | if (atomic_dec_and_test(&io->count)) { |
140 | if (io->sleeper) | 119 | if (io->sleeper) |
@@ -310,8 +289,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
310 | sector_t remaining = where->count; | 289 | sector_t remaining = where->count; |
311 | 290 | ||
312 | /* | 291 | /* |
313 | * where->count may be zero if rw holds a write barrier and we | 292 | * where->count may be zero if rw holds a flush and we need to |
314 | * need to send a zero-sized barrier. | 293 | * send a zero-sized flush. |
315 | */ | 294 | */ |
316 | do { | 295 | do { |
317 | /* | 296 | /* |
@@ -356,7 +335,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
356 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); | 335 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
357 | 336 | ||
358 | if (sync) | 337 | if (sync) |
359 | rw |= REQ_SYNC | REQ_UNPLUG; | 338 | rw |= REQ_SYNC; |
360 | 339 | ||
361 | /* | 340 | /* |
362 | * For multiple regions we need to be careful to rewind | 341 | * For multiple regions we need to be careful to rewind |
@@ -364,7 +343,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
364 | */ | 343 | */ |
365 | for (i = 0; i < num_regions; i++) { | 344 | for (i = 0; i < num_regions; i++) { |
366 | *dp = old_pages; | 345 | *dp = old_pages; |
367 | if (where[i].count || (rw & REQ_HARDBARRIER)) | 346 | if (where[i].count || (rw & REQ_FLUSH)) |
368 | do_region(rw, i, where + i, dp, io); | 347 | do_region(rw, i, where + i, dp, io); |
369 | } | 348 | } |
370 | 349 | ||
@@ -393,9 +372,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
393 | return -EIO; | 372 | return -EIO; |
394 | } | 373 | } |
395 | 374 | ||
396 | retry: | ||
397 | io->error_bits = 0; | 375 | io->error_bits = 0; |
398 | io->eopnotsupp_bits = 0; | ||
399 | atomic_set(&io->count, 1); /* see dispatch_io() */ | 376 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
400 | io->sleeper = current; | 377 | io->sleeper = current; |
401 | io->client = client; | 378 | io->client = client; |
@@ -412,11 +389,6 @@ retry: | |||
412 | } | 389 | } |
413 | set_current_state(TASK_RUNNING); | 390 | set_current_state(TASK_RUNNING); |
414 | 391 | ||
415 | if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { | ||
416 | rw &= ~REQ_HARDBARRIER; | ||
417 | goto retry; | ||
418 | } | ||
419 | |||
420 | if (error_bits) | 392 | if (error_bits) |
421 | *error_bits = io->error_bits; | 393 | *error_bits = io->error_bits; |
422 | 394 | ||
@@ -437,7 +409,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, | |||
437 | 409 | ||
438 | io = mempool_alloc(client->pool, GFP_NOIO); | 410 | io = mempool_alloc(client->pool, GFP_NOIO); |
439 | io->error_bits = 0; | 411 | io->error_bits = 0; |
440 | io->eopnotsupp_bits = 0; | ||
441 | atomic_set(&io->count, 1); /* see dispatch_io() */ | 412 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
442 | io->sleeper = NULL; | 413 | io->sleeper = NULL; |
443 | io->client = client; | 414 | io->client = client; |