diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-29 14:20:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-29 14:20:48 -0400 |
commit | b11b06d90a41766c2d31f0acb8a87aa0f2a7188f (patch) | |
tree | be326cde997bcc38b2a9e0a6d6bc7d7ee7b4cbed /drivers | |
parent | f1d1c9fa8f360990e263bdcb73e35ab6fbdc41fe (diff) | |
parent | fa34ce73072f90ecd90dcc43f29d82e70e5f8676 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
dm kcopyd: return client directly and not through a pointer
dm kcopyd: reserve fewer pages
dm io: use fixed initial mempool size
dm kcopyd: alloc pages from the main page allocator
dm kcopyd: add gfp parm to alloc_pl
dm kcopyd: remove superfluous page allocation spinlock
dm kcopyd: preallocate sub jobs to avoid deadlock
dm kcopyd: avoid pointless job splitting
dm mpath: do not fail paths after integrity errors
dm table: reject devices without request fns
dm table: allow targets to support discards internally
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-io.c | 27 | ||||
-rw-r--r-- | drivers/md/dm-kcopyd.c | 168 | ||||
-rw-r--r-- | drivers/md/dm-log.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 10 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 13 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 10 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 23 |
8 files changed, 136 insertions, 120 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 76a5af00a26b..2067288f61f9 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #define DM_MSG_PREFIX "io" | 19 | #define DM_MSG_PREFIX "io" |
20 | 20 | ||
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | 21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG |
22 | #define MIN_IOS 16 | ||
23 | #define MIN_BIOS 16 | ||
22 | 24 | ||
23 | struct dm_io_client { | 25 | struct dm_io_client { |
24 | mempool_t *pool; | 26 | mempool_t *pool; |
@@ -41,33 +43,21 @@ struct io { | |||
41 | static struct kmem_cache *_dm_io_cache; | 43 | static struct kmem_cache *_dm_io_cache; |
42 | 44 | ||
43 | /* | 45 | /* |
44 | * io contexts are only dynamically allocated for asynchronous | ||
45 | * io. Since async io is likely to be the majority of io we'll | ||
46 | * have the same number of io contexts as bios! (FIXME: must reduce this). | ||
47 | */ | ||
48 | |||
49 | static unsigned int pages_to_ios(unsigned int pages) | ||
50 | { | ||
51 | return 4 * pages; /* too many ? */ | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Create a client with mempool and bioset. | 46 | * Create a client with mempool and bioset. |
56 | */ | 47 | */ |
57 | struct dm_io_client *dm_io_client_create(unsigned num_pages) | 48 | struct dm_io_client *dm_io_client_create(void) |
58 | { | 49 | { |
59 | unsigned ios = pages_to_ios(num_pages); | ||
60 | struct dm_io_client *client; | 50 | struct dm_io_client *client; |
61 | 51 | ||
62 | client = kmalloc(sizeof(*client), GFP_KERNEL); | 52 | client = kmalloc(sizeof(*client), GFP_KERNEL); |
63 | if (!client) | 53 | if (!client) |
64 | return ERR_PTR(-ENOMEM); | 54 | return ERR_PTR(-ENOMEM); |
65 | 55 | ||
66 | client->pool = mempool_create_slab_pool(ios, _dm_io_cache); | 56 | client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); |
67 | if (!client->pool) | 57 | if (!client->pool) |
68 | goto bad; | 58 | goto bad; |
69 | 59 | ||
70 | client->bios = bioset_create(16, 0); | 60 | client->bios = bioset_create(MIN_BIOS, 0); |
71 | if (!client->bios) | 61 | if (!client->bios) |
72 | goto bad; | 62 | goto bad; |
73 | 63 | ||
@@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) | |||
81 | } | 71 | } |
82 | EXPORT_SYMBOL(dm_io_client_create); | 72 | EXPORT_SYMBOL(dm_io_client_create); |
83 | 73 | ||
84 | int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) | ||
85 | { | ||
86 | return mempool_resize(client->pool, pages_to_ios(num_pages), | ||
87 | GFP_KERNEL); | ||
88 | } | ||
89 | EXPORT_SYMBOL(dm_io_client_resize); | ||
90 | |||
91 | void dm_io_client_destroy(struct dm_io_client *client) | 74 | void dm_io_client_destroy(struct dm_io_client *client) |
92 | { | 75 | { |
93 | mempool_destroy(client->pool); | 76 | mempool_destroy(client->pool); |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 1bb73a13ca40..819e37eaaeba 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -27,15 +27,19 @@ | |||
27 | 27 | ||
28 | #include "dm.h" | 28 | #include "dm.h" |
29 | 29 | ||
30 | #define SUB_JOB_SIZE 128 | ||
31 | #define SPLIT_COUNT 8 | ||
32 | #define MIN_JOBS 8 | ||
33 | #define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE)) | ||
34 | |||
30 | /*----------------------------------------------------------------- | 35 | /*----------------------------------------------------------------- |
31 | * Each kcopyd client has its own little pool of preallocated | 36 | * Each kcopyd client has its own little pool of preallocated |
32 | * pages for kcopyd io. | 37 | * pages for kcopyd io. |
33 | *---------------------------------------------------------------*/ | 38 | *---------------------------------------------------------------*/ |
34 | struct dm_kcopyd_client { | 39 | struct dm_kcopyd_client { |
35 | spinlock_t lock; | ||
36 | struct page_list *pages; | 40 | struct page_list *pages; |
37 | unsigned int nr_pages; | 41 | unsigned nr_reserved_pages; |
38 | unsigned int nr_free_pages; | 42 | unsigned nr_free_pages; |
39 | 43 | ||
40 | struct dm_io_client *io_client; | 44 | struct dm_io_client *io_client; |
41 | 45 | ||
@@ -67,15 +71,18 @@ static void wake(struct dm_kcopyd_client *kc) | |||
67 | queue_work(kc->kcopyd_wq, &kc->kcopyd_work); | 71 | queue_work(kc->kcopyd_wq, &kc->kcopyd_work); |
68 | } | 72 | } |
69 | 73 | ||
70 | static struct page_list *alloc_pl(void) | 74 | /* |
75 | * Obtain one page for the use of kcopyd. | ||
76 | */ | ||
77 | static struct page_list *alloc_pl(gfp_t gfp) | ||
71 | { | 78 | { |
72 | struct page_list *pl; | 79 | struct page_list *pl; |
73 | 80 | ||
74 | pl = kmalloc(sizeof(*pl), GFP_KERNEL); | 81 | pl = kmalloc(sizeof(*pl), gfp); |
75 | if (!pl) | 82 | if (!pl) |
76 | return NULL; | 83 | return NULL; |
77 | 84 | ||
78 | pl->page = alloc_page(GFP_KERNEL); | 85 | pl->page = alloc_page(gfp); |
79 | if (!pl->page) { | 86 | if (!pl->page) { |
80 | kfree(pl); | 87 | kfree(pl); |
81 | return NULL; | 88 | return NULL; |
@@ -90,41 +97,56 @@ static void free_pl(struct page_list *pl) | |||
90 | kfree(pl); | 97 | kfree(pl); |
91 | } | 98 | } |
92 | 99 | ||
93 | static int kcopyd_get_pages(struct dm_kcopyd_client *kc, | 100 | /* |
94 | unsigned int nr, struct page_list **pages) | 101 | * Add the provided pages to a client's free page list, releasing |
102 | * back to the system any beyond the reserved_pages limit. | ||
103 | */ | ||
104 | static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) | ||
95 | { | 105 | { |
96 | struct page_list *pl; | 106 | struct page_list *next; |
97 | |||
98 | spin_lock(&kc->lock); | ||
99 | if (kc->nr_free_pages < nr) { | ||
100 | spin_unlock(&kc->lock); | ||
101 | return -ENOMEM; | ||
102 | } | ||
103 | |||
104 | kc->nr_free_pages -= nr; | ||
105 | for (*pages = pl = kc->pages; --nr; pl = pl->next) | ||
106 | ; | ||
107 | 107 | ||
108 | kc->pages = pl->next; | 108 | do { |
109 | pl->next = NULL; | 109 | next = pl->next; |
110 | 110 | ||
111 | spin_unlock(&kc->lock); | 111 | if (kc->nr_free_pages >= kc->nr_reserved_pages) |
112 | free_pl(pl); | ||
113 | else { | ||
114 | pl->next = kc->pages; | ||
115 | kc->pages = pl; | ||
116 | kc->nr_free_pages++; | ||
117 | } | ||
112 | 118 | ||
113 | return 0; | 119 | pl = next; |
120 | } while (pl); | ||
114 | } | 121 | } |
115 | 122 | ||
116 | static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) | 123 | static int kcopyd_get_pages(struct dm_kcopyd_client *kc, |
124 | unsigned int nr, struct page_list **pages) | ||
117 | { | 125 | { |
118 | struct page_list *cursor; | 126 | struct page_list *pl; |
127 | |||
128 | *pages = NULL; | ||
129 | |||
130 | do { | ||
131 | pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY); | ||
132 | if (unlikely(!pl)) { | ||
133 | /* Use reserved pages */ | ||
134 | pl = kc->pages; | ||
135 | if (unlikely(!pl)) | ||
136 | goto out_of_memory; | ||
137 | kc->pages = pl->next; | ||
138 | kc->nr_free_pages--; | ||
139 | } | ||
140 | pl->next = *pages; | ||
141 | *pages = pl; | ||
142 | } while (--nr); | ||
119 | 143 | ||
120 | spin_lock(&kc->lock); | 144 | return 0; |
121 | for (cursor = pl; cursor->next; cursor = cursor->next) | ||
122 | kc->nr_free_pages++; | ||
123 | 145 | ||
124 | kc->nr_free_pages++; | 146 | out_of_memory: |
125 | cursor->next = kc->pages; | 147 | if (*pages) |
126 | kc->pages = pl; | 148 | kcopyd_put_pages(kc, *pages); |
127 | spin_unlock(&kc->lock); | 149 | return -ENOMEM; |
128 | } | 150 | } |
129 | 151 | ||
130 | /* | 152 | /* |
@@ -141,13 +163,16 @@ static void drop_pages(struct page_list *pl) | |||
141 | } | 163 | } |
142 | } | 164 | } |
143 | 165 | ||
144 | static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) | 166 | /* |
167 | * Allocate and reserve nr_pages for the use of a specific client. | ||
168 | */ | ||
169 | static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages) | ||
145 | { | 170 | { |
146 | unsigned int i; | 171 | unsigned i; |
147 | struct page_list *pl = NULL, *next; | 172 | struct page_list *pl = NULL, *next; |
148 | 173 | ||
149 | for (i = 0; i < nr; i++) { | 174 | for (i = 0; i < nr_pages; i++) { |
150 | next = alloc_pl(); | 175 | next = alloc_pl(GFP_KERNEL); |
151 | if (!next) { | 176 | if (!next) { |
152 | if (pl) | 177 | if (pl) |
153 | drop_pages(pl); | 178 | drop_pages(pl); |
@@ -157,17 +182,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) | |||
157 | pl = next; | 182 | pl = next; |
158 | } | 183 | } |
159 | 184 | ||
185 | kc->nr_reserved_pages += nr_pages; | ||
160 | kcopyd_put_pages(kc, pl); | 186 | kcopyd_put_pages(kc, pl); |
161 | kc->nr_pages += nr; | 187 | |
162 | return 0; | 188 | return 0; |
163 | } | 189 | } |
164 | 190 | ||
165 | static void client_free_pages(struct dm_kcopyd_client *kc) | 191 | static void client_free_pages(struct dm_kcopyd_client *kc) |
166 | { | 192 | { |
167 | BUG_ON(kc->nr_free_pages != kc->nr_pages); | 193 | BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages); |
168 | drop_pages(kc->pages); | 194 | drop_pages(kc->pages); |
169 | kc->pages = NULL; | 195 | kc->pages = NULL; |
170 | kc->nr_free_pages = kc->nr_pages = 0; | 196 | kc->nr_free_pages = kc->nr_reserved_pages = 0; |
171 | } | 197 | } |
172 | 198 | ||
173 | /*----------------------------------------------------------------- | 199 | /*----------------------------------------------------------------- |
@@ -216,16 +242,17 @@ struct kcopyd_job { | |||
216 | struct mutex lock; | 242 | struct mutex lock; |
217 | atomic_t sub_jobs; | 243 | atomic_t sub_jobs; |
218 | sector_t progress; | 244 | sector_t progress; |
219 | }; | ||
220 | 245 | ||
221 | /* FIXME: this should scale with the number of pages */ | 246 | struct kcopyd_job *master_job; |
222 | #define MIN_JOBS 512 | 247 | }; |
223 | 248 | ||
224 | static struct kmem_cache *_job_cache; | 249 | static struct kmem_cache *_job_cache; |
225 | 250 | ||
226 | int __init dm_kcopyd_init(void) | 251 | int __init dm_kcopyd_init(void) |
227 | { | 252 | { |
228 | _job_cache = KMEM_CACHE(kcopyd_job, 0); | 253 | _job_cache = kmem_cache_create("kcopyd_job", |
254 | sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1), | ||
255 | __alignof__(struct kcopyd_job), 0, NULL); | ||
229 | if (!_job_cache) | 256 | if (!_job_cache) |
230 | return -ENOMEM; | 257 | return -ENOMEM; |
231 | 258 | ||
@@ -299,7 +326,12 @@ static int run_complete_job(struct kcopyd_job *job) | |||
299 | 326 | ||
300 | if (job->pages) | 327 | if (job->pages) |
301 | kcopyd_put_pages(kc, job->pages); | 328 | kcopyd_put_pages(kc, job->pages); |
302 | mempool_free(job, kc->job_pool); | 329 | /* |
330 | * If this is the master job, the sub jobs have already | ||
331 | * completed so we can free everything. | ||
332 | */ | ||
333 | if (job->master_job == job) | ||
334 | mempool_free(job, kc->job_pool); | ||
303 | fn(read_err, write_err, context); | 335 | fn(read_err, write_err, context); |
304 | 336 | ||
305 | if (atomic_dec_and_test(&kc->nr_jobs)) | 337 | if (atomic_dec_and_test(&kc->nr_jobs)) |
@@ -460,14 +492,14 @@ static void dispatch_job(struct kcopyd_job *job) | |||
460 | wake(kc); | 492 | wake(kc); |
461 | } | 493 | } |
462 | 494 | ||
463 | #define SUB_JOB_SIZE 128 | ||
464 | static void segment_complete(int read_err, unsigned long write_err, | 495 | static void segment_complete(int read_err, unsigned long write_err, |
465 | void *context) | 496 | void *context) |
466 | { | 497 | { |
467 | /* FIXME: tidy this function */ | 498 | /* FIXME: tidy this function */ |
468 | sector_t progress = 0; | 499 | sector_t progress = 0; |
469 | sector_t count = 0; | 500 | sector_t count = 0; |
470 | struct kcopyd_job *job = (struct kcopyd_job *) context; | 501 | struct kcopyd_job *sub_job = (struct kcopyd_job *) context; |
502 | struct kcopyd_job *job = sub_job->master_job; | ||
471 | struct dm_kcopyd_client *kc = job->kc; | 503 | struct dm_kcopyd_client *kc = job->kc; |
472 | 504 | ||
473 | mutex_lock(&job->lock); | 505 | mutex_lock(&job->lock); |
@@ -498,8 +530,6 @@ static void segment_complete(int read_err, unsigned long write_err, | |||
498 | 530 | ||
499 | if (count) { | 531 | if (count) { |
500 | int i; | 532 | int i; |
501 | struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, | ||
502 | GFP_NOIO); | ||
503 | 533 | ||
504 | *sub_job = *job; | 534 | *sub_job = *job; |
505 | sub_job->source.sector += progress; | 535 | sub_job->source.sector += progress; |
@@ -511,7 +541,7 @@ static void segment_complete(int read_err, unsigned long write_err, | |||
511 | } | 541 | } |
512 | 542 | ||
513 | sub_job->fn = segment_complete; | 543 | sub_job->fn = segment_complete; |
514 | sub_job->context = job; | 544 | sub_job->context = sub_job; |
515 | dispatch_job(sub_job); | 545 | dispatch_job(sub_job); |
516 | 546 | ||
517 | } else if (atomic_dec_and_test(&job->sub_jobs)) { | 547 | } else if (atomic_dec_and_test(&job->sub_jobs)) { |
@@ -531,19 +561,19 @@ static void segment_complete(int read_err, unsigned long write_err, | |||
531 | } | 561 | } |
532 | 562 | ||
533 | /* | 563 | /* |
534 | * Create some little jobs that will do the move between | 564 | * Create some sub jobs to share the work between them. |
535 | * them. | ||
536 | */ | 565 | */ |
537 | #define SPLIT_COUNT 8 | 566 | static void split_job(struct kcopyd_job *master_job) |
538 | static void split_job(struct kcopyd_job *job) | ||
539 | { | 567 | { |
540 | int i; | 568 | int i; |
541 | 569 | ||
542 | atomic_inc(&job->kc->nr_jobs); | 570 | atomic_inc(&master_job->kc->nr_jobs); |
543 | 571 | ||
544 | atomic_set(&job->sub_jobs, SPLIT_COUNT); | 572 | atomic_set(&master_job->sub_jobs, SPLIT_COUNT); |
545 | for (i = 0; i < SPLIT_COUNT; i++) | 573 | for (i = 0; i < SPLIT_COUNT; i++) { |
546 | segment_complete(0, 0u, job); | 574 | master_job[i + 1].master_job = master_job; |
575 | segment_complete(0, 0u, &master_job[i + 1]); | ||
576 | } | ||
547 | } | 577 | } |
548 | 578 | ||
549 | int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, | 579 | int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, |
@@ -553,7 +583,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, | |||
553 | struct kcopyd_job *job; | 583 | struct kcopyd_job *job; |
554 | 584 | ||
555 | /* | 585 | /* |
556 | * Allocate a new job. | 586 | * Allocate an array of jobs consisting of one master job |
587 | * followed by SPLIT_COUNT sub jobs. | ||
557 | */ | 588 | */ |
558 | job = mempool_alloc(kc->job_pool, GFP_NOIO); | 589 | job = mempool_alloc(kc->job_pool, GFP_NOIO); |
559 | 590 | ||
@@ -577,10 +608,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, | |||
577 | 608 | ||
578 | job->fn = fn; | 609 | job->fn = fn; |
579 | job->context = context; | 610 | job->context = context; |
611 | job->master_job = job; | ||
580 | 612 | ||
581 | if (job->source.count < SUB_JOB_SIZE) | 613 | if (job->source.count <= SUB_JOB_SIZE) |
582 | dispatch_job(job); | 614 | dispatch_job(job); |
583 | |||
584 | else { | 615 | else { |
585 | mutex_init(&job->lock); | 616 | mutex_init(&job->lock); |
586 | job->progress = 0; | 617 | job->progress = 0; |
@@ -606,17 +637,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block) | |||
606 | /*----------------------------------------------------------------- | 637 | /*----------------------------------------------------------------- |
607 | * Client setup | 638 | * Client setup |
608 | *---------------------------------------------------------------*/ | 639 | *---------------------------------------------------------------*/ |
609 | int dm_kcopyd_client_create(unsigned int nr_pages, | 640 | struct dm_kcopyd_client *dm_kcopyd_client_create(void) |
610 | struct dm_kcopyd_client **result) | ||
611 | { | 641 | { |
612 | int r = -ENOMEM; | 642 | int r = -ENOMEM; |
613 | struct dm_kcopyd_client *kc; | 643 | struct dm_kcopyd_client *kc; |
614 | 644 | ||
615 | kc = kmalloc(sizeof(*kc), GFP_KERNEL); | 645 | kc = kmalloc(sizeof(*kc), GFP_KERNEL); |
616 | if (!kc) | 646 | if (!kc) |
617 | return -ENOMEM; | 647 | return ERR_PTR(-ENOMEM); |
618 | 648 | ||
619 | spin_lock_init(&kc->lock); | ||
620 | spin_lock_init(&kc->job_lock); | 649 | spin_lock_init(&kc->job_lock); |
621 | INIT_LIST_HEAD(&kc->complete_jobs); | 650 | INIT_LIST_HEAD(&kc->complete_jobs); |
622 | INIT_LIST_HEAD(&kc->io_jobs); | 651 | INIT_LIST_HEAD(&kc->io_jobs); |
@@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages, | |||
633 | goto bad_workqueue; | 662 | goto bad_workqueue; |
634 | 663 | ||
635 | kc->pages = NULL; | 664 | kc->pages = NULL; |
636 | kc->nr_pages = kc->nr_free_pages = 0; | 665 | kc->nr_reserved_pages = kc->nr_free_pages = 0; |
637 | r = client_alloc_pages(kc, nr_pages); | 666 | r = client_reserve_pages(kc, RESERVE_PAGES); |
638 | if (r) | 667 | if (r) |
639 | goto bad_client_pages; | 668 | goto bad_client_pages; |
640 | 669 | ||
641 | kc->io_client = dm_io_client_create(nr_pages); | 670 | kc->io_client = dm_io_client_create(); |
642 | if (IS_ERR(kc->io_client)) { | 671 | if (IS_ERR(kc->io_client)) { |
643 | r = PTR_ERR(kc->io_client); | 672 | r = PTR_ERR(kc->io_client); |
644 | goto bad_io_client; | 673 | goto bad_io_client; |
@@ -647,8 +676,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages, | |||
647 | init_waitqueue_head(&kc->destroyq); | 676 | init_waitqueue_head(&kc->destroyq); |
648 | atomic_set(&kc->nr_jobs, 0); | 677 | atomic_set(&kc->nr_jobs, 0); |
649 | 678 | ||
650 | *result = kc; | 679 | return kc; |
651 | return 0; | ||
652 | 680 | ||
653 | bad_io_client: | 681 | bad_io_client: |
654 | client_free_pages(kc); | 682 | client_free_pages(kc); |
@@ -659,7 +687,7 @@ bad_workqueue: | |||
659 | bad_slab: | 687 | bad_slab: |
660 | kfree(kc); | 688 | kfree(kc); |
661 | 689 | ||
662 | return r; | 690 | return ERR_PTR(r); |
663 | } | 691 | } |
664 | EXPORT_SYMBOL(dm_kcopyd_client_create); | 692 | EXPORT_SYMBOL(dm_kcopyd_client_create); |
665 | 693 | ||
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index a1f321889676..948e3f4925bf 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, | |||
449 | 449 | ||
450 | lc->io_req.mem.type = DM_IO_VMA; | 450 | lc->io_req.mem.type = DM_IO_VMA; |
451 | lc->io_req.notify.fn = NULL; | 451 | lc->io_req.notify.fn = NULL; |
452 | lc->io_req.client = dm_io_client_create(dm_div_up(buf_size, | 452 | lc->io_req.client = dm_io_client_create(); |
453 | PAGE_SIZE)); | ||
454 | if (IS_ERR(lc->io_req.client)) { | 453 | if (IS_ERR(lc->io_req.client)) { |
455 | r = PTR_ERR(lc->io_req.client); | 454 | r = PTR_ERR(lc->io_req.client); |
456 | DMWARN("couldn't allocate disk io client"); | 455 | DMWARN("couldn't allocate disk io client"); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index a550a057d991..aa4e570c2cb5 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone, | |||
1290 | if (!error && !clone->errors) | 1290 | if (!error && !clone->errors) |
1291 | return 0; /* I/O complete */ | 1291 | return 0; /* I/O complete */ |
1292 | 1292 | ||
1293 | if (error == -EOPNOTSUPP || error == -EREMOTEIO) | 1293 | if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) |
1294 | return error; | 1294 | return error; |
1295 | 1295 | ||
1296 | if (mpio->pgpath) | 1296 | if (mpio->pgpath) |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 976ad4688afc..9bfd057be686 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -22,8 +22,6 @@ | |||
22 | #define DM_MSG_PREFIX "raid1" | 22 | #define DM_MSG_PREFIX "raid1" |
23 | 23 | ||
24 | #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ | 24 | #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ |
25 | #define DM_IO_PAGES 64 | ||
26 | #define DM_KCOPYD_PAGES 64 | ||
27 | 25 | ||
28 | #define DM_RAID1_HANDLE_ERRORS 0x01 | 26 | #define DM_RAID1_HANDLE_ERRORS 0x01 |
29 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) | 27 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) |
@@ -887,7 +885,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, | |||
887 | return NULL; | 885 | return NULL; |
888 | } | 886 | } |
889 | 887 | ||
890 | ms->io_client = dm_io_client_create(DM_IO_PAGES); | 888 | ms->io_client = dm_io_client_create(); |
891 | if (IS_ERR(ms->io_client)) { | 889 | if (IS_ERR(ms->io_client)) { |
892 | ti->error = "Error creating dm_io client"; | 890 | ti->error = "Error creating dm_io client"; |
893 | mempool_destroy(ms->read_record_pool); | 891 | mempool_destroy(ms->read_record_pool); |
@@ -1117,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1117 | goto err_destroy_wq; | 1115 | goto err_destroy_wq; |
1118 | } | 1116 | } |
1119 | 1117 | ||
1120 | r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); | 1118 | ms->kcopyd_client = dm_kcopyd_client_create(); |
1121 | if (r) | 1119 | if (IS_ERR(ms->kcopyd_client)) { |
1120 | r = PTR_ERR(ms->kcopyd_client); | ||
1122 | goto err_destroy_wq; | 1121 | goto err_destroy_wq; |
1122 | } | ||
1123 | 1123 | ||
1124 | wakeup_mirrord(ms); | 1124 | wakeup_mirrord(ms); |
1125 | return 0; | 1125 | return 0; |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 95891dfcbca0..135c2f1fdbfc 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -154,11 +154,6 @@ struct pstore { | |||
154 | struct workqueue_struct *metadata_wq; | 154 | struct workqueue_struct *metadata_wq; |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static unsigned sectors_to_pages(unsigned sectors) | ||
158 | { | ||
159 | return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9); | ||
160 | } | ||
161 | |||
162 | static int alloc_area(struct pstore *ps) | 157 | static int alloc_area(struct pstore *ps) |
163 | { | 158 | { |
164 | int r = -ENOMEM; | 159 | int r = -ENOMEM; |
@@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
318 | chunk_size_supplied = 0; | 313 | chunk_size_supplied = 0; |
319 | } | 314 | } |
320 | 315 | ||
321 | ps->io_client = dm_io_client_create(sectors_to_pages(ps->store-> | 316 | ps->io_client = dm_io_client_create(); |
322 | chunk_size)); | ||
323 | if (IS_ERR(ps->io_client)) | 317 | if (IS_ERR(ps->io_client)) |
324 | return PTR_ERR(ps->io_client); | 318 | return PTR_ERR(ps->io_client); |
325 | 319 | ||
@@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
368 | return r; | 362 | return r; |
369 | } | 363 | } |
370 | 364 | ||
371 | r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), | ||
372 | ps->io_client); | ||
373 | if (r) | ||
374 | return r; | ||
375 | |||
376 | r = alloc_area(ps); | 365 | r = alloc_area(ps); |
377 | return r; | 366 | return r; |
378 | 367 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index a2d330942cb2..9ecff5f3023a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -40,11 +40,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; | |||
40 | #define SNAPSHOT_COPY_PRIORITY 2 | 40 | #define SNAPSHOT_COPY_PRIORITY 2 |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Reserve 1MB for each snapshot initially (with minimum of 1 page). | ||
44 | */ | ||
45 | #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) | ||
46 | |||
47 | /* | ||
48 | * The size of the mempool used to track chunks in use. | 43 | * The size of the mempool used to track chunks in use. |
49 | */ | 44 | */ |
50 | #define MIN_IOS 256 | 45 | #define MIN_IOS 256 |
@@ -1116,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1116 | goto bad_hash_tables; | 1111 | goto bad_hash_tables; |
1117 | } | 1112 | } |
1118 | 1113 | ||
1119 | r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); | 1114 | s->kcopyd_client = dm_kcopyd_client_create(); |
1120 | if (r) { | 1115 | if (IS_ERR(s->kcopyd_client)) { |
1116 | r = PTR_ERR(s->kcopyd_client); | ||
1121 | ti->error = "Could not create kcopyd client"; | 1117 | ti->error = "Could not create kcopyd client"; |
1122 | goto bad_kcopyd; | 1118 | goto bad_kcopyd; |
1123 | } | 1119 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index cb8380c9767f..451c3bb176d2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) | |||
362 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, | 362 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, |
363 | sector_t start, sector_t len, void *data) | 363 | sector_t start, sector_t len, void *data) |
364 | { | 364 | { |
365 | struct request_queue *q; | ||
365 | struct queue_limits *limits = data; | 366 | struct queue_limits *limits = data; |
366 | struct block_device *bdev = dev->bdev; | 367 | struct block_device *bdev = dev->bdev; |
367 | sector_t dev_size = | 368 | sector_t dev_size = |
@@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, | |||
370 | limits->logical_block_size >> SECTOR_SHIFT; | 371 | limits->logical_block_size >> SECTOR_SHIFT; |
371 | char b[BDEVNAME_SIZE]; | 372 | char b[BDEVNAME_SIZE]; |
372 | 373 | ||
374 | /* | ||
375 | * Some devices exist without request functions, | ||
376 | * such as loop devices not yet bound to backing files. | ||
377 | * Forbid the use of such devices. | ||
378 | */ | ||
379 | q = bdev_get_queue(bdev); | ||
380 | if (!q || !q->make_request_fn) { | ||
381 | DMWARN("%s: %s is not yet initialised: " | ||
382 | "start=%llu, len=%llu, dev_size=%llu", | ||
383 | dm_device_name(ti->table->md), bdevname(bdev, b), | ||
384 | (unsigned long long)start, | ||
385 | (unsigned long long)len, | ||
386 | (unsigned long long)dev_size); | ||
387 | return 1; | ||
388 | } | ||
389 | |||
373 | if (!dev_size) | 390 | if (!dev_size) |
374 | return 0; | 391 | return 0; |
375 | 392 | ||
@@ -1346,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t) | |||
1346 | return 0; | 1363 | return 0; |
1347 | 1364 | ||
1348 | /* | 1365 | /* |
1349 | * Ensure that at least one underlying device supports discards. | 1366 | * Unless any target used by the table set discards_supported, |
1367 | * require at least one underlying device to support discards. | ||
1350 | * t->devices includes internal dm devices such as mirror logs | 1368 | * t->devices includes internal dm devices such as mirror logs |
1351 | * so we need to use iterate_devices here, which targets | 1369 | * so we need to use iterate_devices here, which targets |
1352 | * supporting discard must provide. | 1370 | * supporting discard must provide. |
@@ -1354,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t) | |||
1354 | while (i < dm_table_get_num_targets(t)) { | 1372 | while (i < dm_table_get_num_targets(t)) { |
1355 | ti = dm_table_get_target(t, i++); | 1373 | ti = dm_table_get_target(t, i++); |
1356 | 1374 | ||
1375 | if (ti->discards_supported) | ||
1376 | return 1; | ||
1377 | |||
1357 | if (ti->type->iterate_devices && | 1378 | if (ti->type->iterate_devices && |
1358 | ti->type->iterate_devices(ti, device_discard_capable, NULL)) | 1379 | ti->type->iterate_devices(ti, device_discard_capable, NULL)) |
1359 | return 1; | 1380 | return 1; |