diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/Kconfig | 6 | ||||
-rw-r--r-- | drivers/md/Makefile | 2 | ||||
-rw-r--r-- | drivers/md/dm-bio-list.h | 4 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 33 | ||||
-rw-r--r-- | drivers/md/dm-delay.c | 23 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.c | 76 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-mpath-rdac.c | 700 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 34 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 75 | ||||
-rw-r--r-- | drivers/md/dm-round-robin.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 116 | ||||
-rw-r--r-- | drivers/md/dm-snap.h | 6 | ||||
-rw-r--r-- | drivers/md/dm.c | 33 | ||||
-rw-r--r-- | drivers/md/dm.h | 40 | ||||
-rw-r--r-- | drivers/md/kcopyd.c | 11 |
16 files changed, 984 insertions, 182 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index bfd9b9c6252c..64bf3a81db93 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -264,6 +264,12 @@ config DM_MULTIPATH_EMC | |||
264 | ---help--- | 264 | ---help--- |
265 | Multipath support for EMC CX/AX series hardware. | 265 | Multipath support for EMC CX/AX series hardware. |
266 | 266 | ||
267 | config DM_MULTIPATH_RDAC | ||
268 | tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)" | ||
269 | depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL | ||
270 | ---help--- | ||
271 | Multipath support for LSI/Engenio RDAC. | ||
272 | |||
267 | config DM_DELAY | 273 | config DM_DELAY |
268 | tristate "I/O delaying target (EXPERIMENTAL)" | 274 | tristate "I/O delaying target (EXPERIMENTAL)" |
269 | depends on BLK_DEV_DM && EXPERIMENTAL | 275 | depends on BLK_DEV_DM && EXPERIMENTAL |
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 71eb45f74171..c49366cdc05d 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -7,6 +7,7 @@ dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ | |||
7 | dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o | 7 | dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o |
8 | dm-snapshot-objs := dm-snap.o dm-exception-store.o | 8 | dm-snapshot-objs := dm-snap.o dm-exception-store.o |
9 | dm-mirror-objs := dm-log.o dm-raid1.o | 9 | dm-mirror-objs := dm-log.o dm-raid1.o |
10 | dm-rdac-objs := dm-mpath-rdac.o | ||
10 | md-mod-objs := md.o bitmap.o | 11 | md-mod-objs := md.o bitmap.o |
11 | raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ | 12 | raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ |
12 | raid6int1.o raid6int2.o raid6int4.o \ | 13 | raid6int1.o raid6int2.o raid6int4.o \ |
@@ -34,6 +35,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | |||
34 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 35 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
35 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o | 36 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o |
36 | obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o | 37 | obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o |
38 | obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o | ||
37 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o | 39 | obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o |
38 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o | 40 | obj-$(CONFIG_DM_MIRROR) += dm-mirror.o |
39 | obj-$(CONFIG_DM_ZERO) += dm-zero.o | 41 | obj-$(CONFIG_DM_ZERO) += dm-zero.o |
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h index c6be88826fae..16ee3b018b3a 100644 --- a/drivers/md/dm-bio-list.h +++ b/drivers/md/dm-bio-list.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #define DM_BIO_LIST_H | 8 | #define DM_BIO_LIST_H |
9 | 9 | ||
10 | #include <linux/bio.h> | 10 | #include <linux/bio.h> |
11 | #include <linux/prefetch.h> | ||
12 | 11 | ||
13 | struct bio_list { | 12 | struct bio_list { |
14 | struct bio *head; | 13 | struct bio *head; |
@@ -31,8 +30,7 @@ static inline void bio_list_init(struct bio_list *bl) | |||
31 | } | 30 | } |
32 | 31 | ||
33 | #define bio_list_for_each(bio, bl) \ | 32 | #define bio_list_for_each(bio, bl) \ |
34 | for (bio = (bl)->head; bio && ({ prefetch(bio->bi_next); 1; }); \ | 33 | for (bio = (bl)->head; bio; bio = bio->bi_next) |
35 | bio = bio->bi_next) | ||
36 | 34 | ||
37 | static inline unsigned bio_list_size(const struct bio_list *bl) | 35 | static inline unsigned bio_list_size(const struct bio_list *bl) |
38 | { | 36 | { |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 7b0fcfc9eaa5..ba952a032598 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -30,7 +30,7 @@ | |||
30 | /* | 30 | /* |
31 | * per bio private data | 31 | * per bio private data |
32 | */ | 32 | */ |
33 | struct crypt_io { | 33 | struct dm_crypt_io { |
34 | struct dm_target *target; | 34 | struct dm_target *target; |
35 | struct bio *base_bio; | 35 | struct bio *base_bio; |
36 | struct work_struct work; | 36 | struct work_struct work; |
@@ -106,7 +106,7 @@ struct crypt_config { | |||
106 | 106 | ||
107 | static struct kmem_cache *_crypt_io_pool; | 107 | static struct kmem_cache *_crypt_io_pool; |
108 | 108 | ||
109 | static void clone_init(struct crypt_io *, struct bio *); | 109 | static void clone_init(struct dm_crypt_io *, struct bio *); |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Different IV generation algorithms: | 112 | * Different IV generation algorithms: |
@@ -382,7 +382,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
382 | 382 | ||
383 | static void dm_crypt_bio_destructor(struct bio *bio) | 383 | static void dm_crypt_bio_destructor(struct bio *bio) |
384 | { | 384 | { |
385 | struct crypt_io *io = bio->bi_private; | 385 | struct dm_crypt_io *io = bio->bi_private; |
386 | struct crypt_config *cc = io->target->private; | 386 | struct crypt_config *cc = io->target->private; |
387 | 387 | ||
388 | bio_free(bio, cc->bs); | 388 | bio_free(bio, cc->bs); |
@@ -393,7 +393,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
393 | * This should never violate the device limitations | 393 | * This should never violate the device limitations |
394 | * May return a smaller bio when running out of pages | 394 | * May return a smaller bio when running out of pages |
395 | */ | 395 | */ |
396 | static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size) | 396 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) |
397 | { | 397 | { |
398 | struct crypt_config *cc = io->target->private; | 398 | struct crypt_config *cc = io->target->private; |
399 | struct bio *clone; | 399 | struct bio *clone; |
@@ -479,7 +479,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, | |||
479 | * One of the bios was finished. Check for completion of | 479 | * One of the bios was finished. Check for completion of |
480 | * the whole request and correctly clean up the buffer. | 480 | * the whole request and correctly clean up the buffer. |
481 | */ | 481 | */ |
482 | static void dec_pending(struct crypt_io *io, int error) | 482 | static void dec_pending(struct dm_crypt_io *io, int error) |
483 | { | 483 | { |
484 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | 484 | struct crypt_config *cc = (struct crypt_config *) io->target->private; |
485 | 485 | ||
@@ -503,7 +503,7 @@ static void dec_pending(struct crypt_io *io, int error) | |||
503 | static struct workqueue_struct *_kcryptd_workqueue; | 503 | static struct workqueue_struct *_kcryptd_workqueue; |
504 | static void kcryptd_do_work(struct work_struct *work); | 504 | static void kcryptd_do_work(struct work_struct *work); |
505 | 505 | ||
506 | static void kcryptd_queue_io(struct crypt_io *io) | 506 | static void kcryptd_queue_io(struct dm_crypt_io *io) |
507 | { | 507 | { |
508 | INIT_WORK(&io->work, kcryptd_do_work); | 508 | INIT_WORK(&io->work, kcryptd_do_work); |
509 | queue_work(_kcryptd_workqueue, &io->work); | 509 | queue_work(_kcryptd_workqueue, &io->work); |
@@ -511,7 +511,7 @@ static void kcryptd_queue_io(struct crypt_io *io) | |||
511 | 511 | ||
512 | static int crypt_endio(struct bio *clone, unsigned int done, int error) | 512 | static int crypt_endio(struct bio *clone, unsigned int done, int error) |
513 | { | 513 | { |
514 | struct crypt_io *io = clone->bi_private; | 514 | struct dm_crypt_io *io = clone->bi_private; |
515 | struct crypt_config *cc = io->target->private; | 515 | struct crypt_config *cc = io->target->private; |
516 | unsigned read_io = bio_data_dir(clone) == READ; | 516 | unsigned read_io = bio_data_dir(clone) == READ; |
517 | 517 | ||
@@ -545,7 +545,7 @@ out: | |||
545 | return error; | 545 | return error; |
546 | } | 546 | } |
547 | 547 | ||
548 | static void clone_init(struct crypt_io *io, struct bio *clone) | 548 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
549 | { | 549 | { |
550 | struct crypt_config *cc = io->target->private; | 550 | struct crypt_config *cc = io->target->private; |
551 | 551 | ||
@@ -556,7 +556,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone) | |||
556 | clone->bi_destructor = dm_crypt_bio_destructor; | 556 | clone->bi_destructor = dm_crypt_bio_destructor; |
557 | } | 557 | } |
558 | 558 | ||
559 | static void process_read(struct crypt_io *io) | 559 | static void process_read(struct dm_crypt_io *io) |
560 | { | 560 | { |
561 | struct crypt_config *cc = io->target->private; | 561 | struct crypt_config *cc = io->target->private; |
562 | struct bio *base_bio = io->base_bio; | 562 | struct bio *base_bio = io->base_bio; |
@@ -587,7 +587,7 @@ static void process_read(struct crypt_io *io) | |||
587 | generic_make_request(clone); | 587 | generic_make_request(clone); |
588 | } | 588 | } |
589 | 589 | ||
590 | static void process_write(struct crypt_io *io) | 590 | static void process_write(struct dm_crypt_io *io) |
591 | { | 591 | { |
592 | struct crypt_config *cc = io->target->private; | 592 | struct crypt_config *cc = io->target->private; |
593 | struct bio *base_bio = io->base_bio; | 593 | struct bio *base_bio = io->base_bio; |
@@ -644,7 +644,7 @@ static void process_write(struct crypt_io *io) | |||
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | static void process_read_endio(struct crypt_io *io) | 647 | static void process_read_endio(struct dm_crypt_io *io) |
648 | { | 648 | { |
649 | struct crypt_config *cc = io->target->private; | 649 | struct crypt_config *cc = io->target->private; |
650 | struct convert_context ctx; | 650 | struct convert_context ctx; |
@@ -657,7 +657,7 @@ static void process_read_endio(struct crypt_io *io) | |||
657 | 657 | ||
658 | static void kcryptd_do_work(struct work_struct *work) | 658 | static void kcryptd_do_work(struct work_struct *work) |
659 | { | 659 | { |
660 | struct crypt_io *io = container_of(work, struct crypt_io, work); | 660 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
661 | 661 | ||
662 | if (io->post_process) | 662 | if (io->post_process) |
663 | process_read_endio(io); | 663 | process_read_endio(io); |
@@ -939,10 +939,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
939 | union map_info *map_context) | 939 | union map_info *map_context) |
940 | { | 940 | { |
941 | struct crypt_config *cc = ti->private; | 941 | struct crypt_config *cc = ti->private; |
942 | struct crypt_io *io; | 942 | struct dm_crypt_io *io; |
943 | |||
944 | if (bio_barrier(bio)) | ||
945 | return -EOPNOTSUPP; | ||
946 | 943 | ||
947 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 944 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
948 | io->target = ti; | 945 | io->target = ti; |
@@ -1062,9 +1059,7 @@ static int __init dm_crypt_init(void) | |||
1062 | { | 1059 | { |
1063 | int r; | 1060 | int r; |
1064 | 1061 | ||
1065 | _crypt_io_pool = kmem_cache_create("dm-crypt_io", | 1062 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); |
1066 | sizeof(struct crypt_io), | ||
1067 | 0, 0, NULL, NULL); | ||
1068 | if (!_crypt_io_pool) | 1063 | if (!_crypt_io_pool) |
1069 | return -ENOMEM; | 1064 | return -ENOMEM; |
1070 | 1065 | ||
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 52c7cf9e5803..6928c136d3c5 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | struct delay_c { | 21 | struct delay_c { |
22 | struct timer_list delay_timer; | 22 | struct timer_list delay_timer; |
23 | struct semaphore timer_lock; | 23 | struct mutex timer_lock; |
24 | struct work_struct flush_expired_bios; | 24 | struct work_struct flush_expired_bios; |
25 | struct list_head delayed_bios; | 25 | struct list_head delayed_bios; |
26 | atomic_t may_delay; | 26 | atomic_t may_delay; |
@@ -37,7 +37,7 @@ struct delay_c { | |||
37 | unsigned writes; | 37 | unsigned writes; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | struct delay_info { | 40 | struct dm_delay_info { |
41 | struct delay_c *context; | 41 | struct delay_c *context; |
42 | struct list_head list; | 42 | struct list_head list; |
43 | struct bio *bio; | 43 | struct bio *bio; |
@@ -58,12 +58,12 @@ static void handle_delayed_timer(unsigned long data) | |||
58 | 58 | ||
59 | static void queue_timeout(struct delay_c *dc, unsigned long expires) | 59 | static void queue_timeout(struct delay_c *dc, unsigned long expires) |
60 | { | 60 | { |
61 | down(&dc->timer_lock); | 61 | mutex_lock(&dc->timer_lock); |
62 | 62 | ||
63 | if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) | 63 | if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) |
64 | mod_timer(&dc->delay_timer, expires); | 64 | mod_timer(&dc->delay_timer, expires); |
65 | 65 | ||
66 | up(&dc->timer_lock); | 66 | mutex_unlock(&dc->timer_lock); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void flush_bios(struct bio *bio) | 69 | static void flush_bios(struct bio *bio) |
@@ -80,7 +80,7 @@ static void flush_bios(struct bio *bio) | |||
80 | 80 | ||
81 | static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) | 81 | static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) |
82 | { | 82 | { |
83 | struct delay_info *delayed, *next; | 83 | struct dm_delay_info *delayed, *next; |
84 | unsigned long next_expires = 0; | 84 | unsigned long next_expires = 0; |
85 | int start_timer = 0; | 85 | int start_timer = 0; |
86 | BIO_LIST(flush_bios); | 86 | BIO_LIST(flush_bios); |
@@ -193,13 +193,11 @@ out: | |||
193 | goto bad; | 193 | goto bad; |
194 | } | 194 | } |
195 | 195 | ||
196 | init_timer(&dc->delay_timer); | 196 | setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); |
197 | dc->delay_timer.function = handle_delayed_timer; | ||
198 | dc->delay_timer.data = (unsigned long)dc; | ||
199 | 197 | ||
200 | INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); | 198 | INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); |
201 | INIT_LIST_HEAD(&dc->delayed_bios); | 199 | INIT_LIST_HEAD(&dc->delayed_bios); |
202 | init_MUTEX(&dc->timer_lock); | 200 | mutex_init(&dc->timer_lock); |
203 | atomic_set(&dc->may_delay, 1); | 201 | atomic_set(&dc->may_delay, 1); |
204 | 202 | ||
205 | ti->private = dc; | 203 | ti->private = dc; |
@@ -227,7 +225,7 @@ static void delay_dtr(struct dm_target *ti) | |||
227 | 225 | ||
228 | static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) | 226 | static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) |
229 | { | 227 | { |
230 | struct delay_info *delayed; | 228 | struct dm_delay_info *delayed; |
231 | unsigned long expires = 0; | 229 | unsigned long expires = 0; |
232 | 230 | ||
233 | if (!delay || !atomic_read(&dc->may_delay)) | 231 | if (!delay || !atomic_read(&dc->may_delay)) |
@@ -338,10 +336,7 @@ static int __init dm_delay_init(void) | |||
338 | goto bad_queue; | 336 | goto bad_queue; |
339 | } | 337 | } |
340 | 338 | ||
341 | delayed_cache = kmem_cache_create("dm-delay", | 339 | delayed_cache = KMEM_CACHE(dm_delay_info, 0); |
342 | sizeof(struct delay_info), | ||
343 | __alignof__(struct delay_info), | ||
344 | 0, NULL, NULL); | ||
345 | if (!delayed_cache) { | 340 | if (!delayed_cache) { |
346 | DMERR("Couldn't create delayed bio cache."); | 341 | DMERR("Couldn't create delayed bio cache."); |
347 | goto bad_memcache; | 342 | goto bad_memcache; |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 07e0a0c84f6e..3d65917a1bbb 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -125,9 +125,11 @@ struct pstore { | |||
125 | uint32_t callback_count; | 125 | uint32_t callback_count; |
126 | struct commit_callback *callbacks; | 126 | struct commit_callback *callbacks; |
127 | struct dm_io_client *io_client; | 127 | struct dm_io_client *io_client; |
128 | |||
129 | struct workqueue_struct *metadata_wq; | ||
128 | }; | 130 | }; |
129 | 131 | ||
130 | static inline unsigned int sectors_to_pages(unsigned int sectors) | 132 | static unsigned sectors_to_pages(unsigned sectors) |
131 | { | 133 | { |
132 | return sectors / (PAGE_SIZE >> 9); | 134 | return sectors / (PAGE_SIZE >> 9); |
133 | } | 135 | } |
@@ -156,10 +158,24 @@ static void free_area(struct pstore *ps) | |||
156 | ps->area = NULL; | 158 | ps->area = NULL; |
157 | } | 159 | } |
158 | 160 | ||
161 | struct mdata_req { | ||
162 | struct io_region *where; | ||
163 | struct dm_io_request *io_req; | ||
164 | struct work_struct work; | ||
165 | int result; | ||
166 | }; | ||
167 | |||
168 | static void do_metadata(struct work_struct *work) | ||
169 | { | ||
170 | struct mdata_req *req = container_of(work, struct mdata_req, work); | ||
171 | |||
172 | req->result = dm_io(req->io_req, 1, req->where, NULL); | ||
173 | } | ||
174 | |||
159 | /* | 175 | /* |
160 | * Read or write a chunk aligned and sized block of data from a device. | 176 | * Read or write a chunk aligned and sized block of data from a device. |
161 | */ | 177 | */ |
162 | static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) | 178 | static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) |
163 | { | 179 | { |
164 | struct io_region where = { | 180 | struct io_region where = { |
165 | .bdev = ps->snap->cow->bdev, | 181 | .bdev = ps->snap->cow->bdev, |
@@ -173,8 +189,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) | |||
173 | .client = ps->io_client, | 189 | .client = ps->io_client, |
174 | .notify.fn = NULL, | 190 | .notify.fn = NULL, |
175 | }; | 191 | }; |
192 | struct mdata_req req; | ||
193 | |||
194 | if (!metadata) | ||
195 | return dm_io(&io_req, 1, &where, NULL); | ||
196 | |||
197 | req.where = &where; | ||
198 | req.io_req = &io_req; | ||
176 | 199 | ||
177 | return dm_io(&io_req, 1, &where, NULL); | 200 | /* |
201 | * Issue the synchronous I/O from a different thread | ||
202 | * to avoid generic_make_request recursion. | ||
203 | */ | ||
204 | INIT_WORK(&req.work, do_metadata); | ||
205 | queue_work(ps->metadata_wq, &req.work); | ||
206 | flush_workqueue(ps->metadata_wq); | ||
207 | |||
208 | return req.result; | ||
178 | } | 209 | } |
179 | 210 | ||
180 | /* | 211 | /* |
@@ -189,7 +220,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw) | |||
189 | /* convert a metadata area index to a chunk index */ | 220 | /* convert a metadata area index to a chunk index */ |
190 | chunk = 1 + ((ps->exceptions_per_area + 1) * area); | 221 | chunk = 1 + ((ps->exceptions_per_area + 1) * area); |
191 | 222 | ||
192 | r = chunk_io(ps, chunk, rw); | 223 | r = chunk_io(ps, chunk, rw, 0); |
193 | if (r) | 224 | if (r) |
194 | return r; | 225 | return r; |
195 | 226 | ||
@@ -230,7 +261,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
230 | if (r) | 261 | if (r) |
231 | return r; | 262 | return r; |
232 | 263 | ||
233 | r = chunk_io(ps, 0, READ); | 264 | r = chunk_io(ps, 0, READ, 1); |
234 | if (r) | 265 | if (r) |
235 | goto bad; | 266 | goto bad; |
236 | 267 | ||
@@ -292,7 +323,7 @@ static int write_header(struct pstore *ps) | |||
292 | dh->version = cpu_to_le32(ps->version); | 323 | dh->version = cpu_to_le32(ps->version); |
293 | dh->chunk_size = cpu_to_le32(ps->snap->chunk_size); | 324 | dh->chunk_size = cpu_to_le32(ps->snap->chunk_size); |
294 | 325 | ||
295 | return chunk_io(ps, 0, WRITE); | 326 | return chunk_io(ps, 0, WRITE, 1); |
296 | } | 327 | } |
297 | 328 | ||
298 | /* | 329 | /* |
@@ -393,7 +424,7 @@ static int read_exceptions(struct pstore *ps) | |||
393 | return 0; | 424 | return 0; |
394 | } | 425 | } |
395 | 426 | ||
396 | static inline struct pstore *get_info(struct exception_store *store) | 427 | static struct pstore *get_info(struct exception_store *store) |
397 | { | 428 | { |
398 | return (struct pstore *) store->context; | 429 | return (struct pstore *) store->context; |
399 | } | 430 | } |
@@ -409,6 +440,7 @@ static void persistent_destroy(struct exception_store *store) | |||
409 | { | 440 | { |
410 | struct pstore *ps = get_info(store); | 441 | struct pstore *ps = get_info(store); |
411 | 442 | ||
443 | destroy_workqueue(ps->metadata_wq); | ||
412 | dm_io_client_destroy(ps->io_client); | 444 | dm_io_client_destroy(ps->io_client); |
413 | vfree(ps->callbacks); | 445 | vfree(ps->callbacks); |
414 | free_area(ps); | 446 | free_area(ps); |
@@ -457,11 +489,6 @@ static int persistent_read_metadata(struct exception_store *store) | |||
457 | /* | 489 | /* |
458 | * Sanity checks. | 490 | * Sanity checks. |
459 | */ | 491 | */ |
460 | if (!ps->valid) { | ||
461 | DMWARN("snapshot is marked invalid"); | ||
462 | return -EINVAL; | ||
463 | } | ||
464 | |||
465 | if (ps->version != SNAPSHOT_DISK_VERSION) { | 492 | if (ps->version != SNAPSHOT_DISK_VERSION) { |
466 | DMWARN("unable to handle snapshot disk version %d", | 493 | DMWARN("unable to handle snapshot disk version %d", |
467 | ps->version); | 494 | ps->version); |
@@ -469,6 +496,12 @@ static int persistent_read_metadata(struct exception_store *store) | |||
469 | } | 496 | } |
470 | 497 | ||
471 | /* | 498 | /* |
499 | * Metadata are valid, but snapshot is invalidated | ||
500 | */ | ||
501 | if (!ps->valid) | ||
502 | return 1; | ||
503 | |||
504 | /* | ||
472 | * Read the metadata. | 505 | * Read the metadata. |
473 | */ | 506 | */ |
474 | r = read_exceptions(ps); | 507 | r = read_exceptions(ps); |
@@ -480,7 +513,7 @@ static int persistent_read_metadata(struct exception_store *store) | |||
480 | } | 513 | } |
481 | 514 | ||
482 | static int persistent_prepare(struct exception_store *store, | 515 | static int persistent_prepare(struct exception_store *store, |
483 | struct exception *e) | 516 | struct dm_snap_exception *e) |
484 | { | 517 | { |
485 | struct pstore *ps = get_info(store); | 518 | struct pstore *ps = get_info(store); |
486 | uint32_t stride; | 519 | uint32_t stride; |
@@ -505,7 +538,7 @@ static int persistent_prepare(struct exception_store *store, | |||
505 | } | 538 | } |
506 | 539 | ||
507 | static void persistent_commit(struct exception_store *store, | 540 | static void persistent_commit(struct exception_store *store, |
508 | struct exception *e, | 541 | struct dm_snap_exception *e, |
509 | void (*callback) (void *, int success), | 542 | void (*callback) (void *, int success), |
510 | void *callback_context) | 543 | void *callback_context) |
511 | { | 544 | { |
@@ -588,6 +621,12 @@ int dm_create_persistent(struct exception_store *store) | |||
588 | atomic_set(&ps->pending_count, 0); | 621 | atomic_set(&ps->pending_count, 0); |
589 | ps->callbacks = NULL; | 622 | ps->callbacks = NULL; |
590 | 623 | ||
624 | ps->metadata_wq = create_singlethread_workqueue("ksnaphd"); | ||
625 | if (!ps->metadata_wq) { | ||
626 | DMERR("couldn't start header metadata update thread"); | ||
627 | return -ENOMEM; | ||
628 | } | ||
629 | |||
591 | store->destroy = persistent_destroy; | 630 | store->destroy = persistent_destroy; |
592 | store->read_metadata = persistent_read_metadata; | 631 | store->read_metadata = persistent_read_metadata; |
593 | store->prepare_exception = persistent_prepare; | 632 | store->prepare_exception = persistent_prepare; |
@@ -616,7 +655,8 @@ static int transient_read_metadata(struct exception_store *store) | |||
616 | return 0; | 655 | return 0; |
617 | } | 656 | } |
618 | 657 | ||
619 | static int transient_prepare(struct exception_store *store, struct exception *e) | 658 | static int transient_prepare(struct exception_store *store, |
659 | struct dm_snap_exception *e) | ||
620 | { | 660 | { |
621 | struct transient_c *tc = (struct transient_c *) store->context; | 661 | struct transient_c *tc = (struct transient_c *) store->context; |
622 | sector_t size = get_dev_size(store->snap->cow->bdev); | 662 | sector_t size = get_dev_size(store->snap->cow->bdev); |
@@ -631,9 +671,9 @@ static int transient_prepare(struct exception_store *store, struct exception *e) | |||
631 | } | 671 | } |
632 | 672 | ||
633 | static void transient_commit(struct exception_store *store, | 673 | static void transient_commit(struct exception_store *store, |
634 | struct exception *e, | 674 | struct dm_snap_exception *e, |
635 | void (*callback) (void *, int success), | 675 | void (*callback) (void *, int success), |
636 | void *callback_context) | 676 | void *callback_context) |
637 | { | 677 | { |
638 | /* Just succeed */ | 678 | /* Just succeed */ |
639 | callback(callback_context, 1); | 679 | callback(callback_context, 1); |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 352c6fbeac53..f3a772486437 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -293,7 +293,10 @@ static void do_region(int rw, unsigned int region, struct io_region *where, | |||
293 | * bvec for bio_get/set_region() and decrement bi_max_vecs | 293 | * bvec for bio_get/set_region() and decrement bi_max_vecs |
294 | * to hide it from bio_add_page(). | 294 | * to hide it from bio_add_page(). |
295 | */ | 295 | */ |
296 | num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; | 296 | num_bvecs = dm_sector_div_up(remaining, |
297 | (PAGE_SIZE >> SECTOR_SHIFT)); | ||
298 | num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), | ||
299 | num_bvecs); | ||
297 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); | 300 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
298 | bio->bi_sector = where->sector + (where->count - remaining); | 301 | bio->bi_sector = where->sector + (where->count - remaining); |
299 | bio->bi_bdev = where->bdev; | 302 | bio->bi_bdev = where->bdev; |
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c new file mode 100644 index 000000000000..8b776b8cb7f7 --- /dev/null +++ b/drivers/md/dm-mpath-rdac.c | |||
@@ -0,0 +1,700 @@ | |||
1 | /* | ||
2 | * Engenio/LSI RDAC DM HW handler | ||
3 | * | ||
4 | * Copyright (C) 2005 Mike Christie. All rights reserved. | ||
5 | * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | */ | ||
22 | #include <scsi/scsi.h> | ||
23 | #include <scsi/scsi_cmnd.h> | ||
24 | #include <scsi/scsi_eh.h> | ||
25 | |||
26 | #define DM_MSG_PREFIX "multipath rdac" | ||
27 | |||
28 | #include "dm.h" | ||
29 | #include "dm-hw-handler.h" | ||
30 | |||
31 | #define RDAC_DM_HWH_NAME "rdac" | ||
32 | #define RDAC_DM_HWH_VER "0.4" | ||
33 | |||
34 | /* | ||
35 | * LSI mode page stuff | ||
36 | * | ||
37 | * These struct definitions and the forming of the | ||
38 | * mode page were taken from the LSI RDAC 2.4 GPL'd | ||
39 | * driver, and then converted to Linux conventions. | ||
40 | */ | ||
41 | #define RDAC_QUIESCENCE_TIME 20; | ||
42 | /* | ||
43 | * Page Codes | ||
44 | */ | ||
45 | #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c | ||
46 | |||
47 | /* | ||
48 | * Controller modes definitions | ||
49 | */ | ||
50 | #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01 | ||
51 | #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 | ||
52 | |||
53 | /* | ||
54 | * RDAC Options field | ||
55 | */ | ||
56 | #define RDAC_FORCED_QUIESENCE 0x02 | ||
57 | |||
58 | #define RDAC_FAILOVER_TIMEOUT (60 * HZ) | ||
59 | |||
60 | struct rdac_mode_6_hdr { | ||
61 | u8 data_len; | ||
62 | u8 medium_type; | ||
63 | u8 device_params; | ||
64 | u8 block_desc_len; | ||
65 | }; | ||
66 | |||
67 | struct rdac_mode_10_hdr { | ||
68 | u16 data_len; | ||
69 | u8 medium_type; | ||
70 | u8 device_params; | ||
71 | u16 reserved; | ||
72 | u16 block_desc_len; | ||
73 | }; | ||
74 | |||
75 | struct rdac_mode_common { | ||
76 | u8 controller_serial[16]; | ||
77 | u8 alt_controller_serial[16]; | ||
78 | u8 rdac_mode[2]; | ||
79 | u8 alt_rdac_mode[2]; | ||
80 | u8 quiescence_timeout; | ||
81 | u8 rdac_options; | ||
82 | }; | ||
83 | |||
84 | struct rdac_pg_legacy { | ||
85 | struct rdac_mode_6_hdr hdr; | ||
86 | u8 page_code; | ||
87 | u8 page_len; | ||
88 | struct rdac_mode_common common; | ||
89 | #define MODE6_MAX_LUN 32 | ||
90 | u8 lun_table[MODE6_MAX_LUN]; | ||
91 | u8 reserved2[32]; | ||
92 | u8 reserved3; | ||
93 | u8 reserved4; | ||
94 | }; | ||
95 | |||
96 | struct rdac_pg_expanded { | ||
97 | struct rdac_mode_10_hdr hdr; | ||
98 | u8 page_code; | ||
99 | u8 subpage_code; | ||
100 | u8 page_len[2]; | ||
101 | struct rdac_mode_common common; | ||
102 | u8 lun_table[256]; | ||
103 | u8 reserved3; | ||
104 | u8 reserved4; | ||
105 | }; | ||
106 | |||
107 | struct c9_inquiry { | ||
108 | u8 peripheral_info; | ||
109 | u8 page_code; /* 0xC9 */ | ||
110 | u8 reserved1; | ||
111 | u8 page_len; | ||
112 | u8 page_id[4]; /* "vace" */ | ||
113 | u8 avte_cvp; | ||
114 | u8 path_prio; | ||
115 | u8 reserved2[38]; | ||
116 | }; | ||
117 | |||
118 | #define SUBSYS_ID_LEN 16 | ||
119 | #define SLOT_ID_LEN 2 | ||
120 | |||
121 | struct c4_inquiry { | ||
122 | u8 peripheral_info; | ||
123 | u8 page_code; /* 0xC4 */ | ||
124 | u8 reserved1; | ||
125 | u8 page_len; | ||
126 | u8 page_id[4]; /* "subs" */ | ||
127 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
128 | u8 revision[4]; | ||
129 | u8 slot_id[SLOT_ID_LEN]; | ||
130 | u8 reserved[2]; | ||
131 | }; | ||
132 | |||
133 | struct rdac_controller { | ||
134 | u8 subsys_id[SUBSYS_ID_LEN]; | ||
135 | u8 slot_id[SLOT_ID_LEN]; | ||
136 | int use_10_ms; | ||
137 | struct kref kref; | ||
138 | struct list_head node; /* list of all controllers */ | ||
139 | spinlock_t lock; | ||
140 | int submitted; | ||
141 | struct list_head cmd_list; /* list of commands to be submitted */ | ||
142 | union { | ||
143 | struct rdac_pg_legacy legacy; | ||
144 | struct rdac_pg_expanded expanded; | ||
145 | } mode_select; | ||
146 | }; | ||
147 | struct c8_inquiry { | ||
148 | u8 peripheral_info; | ||
149 | u8 page_code; /* 0xC8 */ | ||
150 | u8 reserved1; | ||
151 | u8 page_len; | ||
152 | u8 page_id[4]; /* "edid" */ | ||
153 | u8 reserved2[3]; | ||
154 | u8 vol_uniq_id_len; | ||
155 | u8 vol_uniq_id[16]; | ||
156 | u8 vol_user_label_len; | ||
157 | u8 vol_user_label[60]; | ||
158 | u8 array_uniq_id_len; | ||
159 | u8 array_unique_id[16]; | ||
160 | u8 array_user_label_len; | ||
161 | u8 array_user_label[60]; | ||
162 | u8 lun[8]; | ||
163 | }; | ||
164 | |||
165 | struct c2_inquiry { | ||
166 | u8 peripheral_info; | ||
167 | u8 page_code; /* 0xC2 */ | ||
168 | u8 reserved1; | ||
169 | u8 page_len; | ||
170 | u8 page_id[4]; /* "swr4" */ | ||
171 | u8 sw_version[3]; | ||
172 | u8 sw_date[3]; | ||
173 | u8 features_enabled; | ||
174 | u8 max_lun_supported; | ||
175 | u8 partitions[239]; /* Total allocation length should be 0xFF */ | ||
176 | }; | ||
177 | |||
178 | struct rdac_handler { | ||
179 | struct list_head entry; /* list waiting to submit MODE SELECT */ | ||
180 | unsigned timeout; | ||
181 | struct rdac_controller *ctlr; | ||
182 | #define UNINITIALIZED_LUN (1 << 8) | ||
183 | unsigned lun; | ||
184 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | ||
185 | struct dm_path *path; | ||
186 | struct work_struct work; | ||
187 | #define SEND_C2_INQUIRY 1 | ||
188 | #define SEND_C4_INQUIRY 2 | ||
189 | #define SEND_C8_INQUIRY 3 | ||
190 | #define SEND_C9_INQUIRY 4 | ||
191 | #define SEND_MODE_SELECT 5 | ||
192 | int cmd_to_send; | ||
193 | union { | ||
194 | struct c2_inquiry c2; | ||
195 | struct c4_inquiry c4; | ||
196 | struct c8_inquiry c8; | ||
197 | struct c9_inquiry c9; | ||
198 | } inq; | ||
199 | }; | ||
200 | |||
201 | static LIST_HEAD(ctlr_list); | ||
202 | static DEFINE_SPINLOCK(list_lock); | ||
203 | static struct workqueue_struct *rdac_wkqd; | ||
204 | |||
205 | static inline int had_failures(struct request *req, int error) | ||
206 | { | ||
207 | return (error || host_byte(req->errors) != DID_OK || | ||
208 | msg_byte(req->errors) != COMMAND_COMPLETE); | ||
209 | } | ||
210 | |||
211 | static void rdac_resubmit_all(struct rdac_handler *h) | ||
212 | { | ||
213 | struct rdac_controller *ctlr = h->ctlr; | ||
214 | struct rdac_handler *tmp, *h1; | ||
215 | |||
216 | spin_lock(&ctlr->lock); | ||
217 | list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) { | ||
218 | h1->cmd_to_send = SEND_C9_INQUIRY; | ||
219 | queue_work(rdac_wkqd, &h1->work); | ||
220 | list_del(&h1->entry); | ||
221 | } | ||
222 | ctlr->submitted = 0; | ||
223 | spin_unlock(&ctlr->lock); | ||
224 | } | ||
225 | |||
226 | static void mode_select_endio(struct request *req, int error) | ||
227 | { | ||
228 | struct rdac_handler *h = req->end_io_data; | ||
229 | struct scsi_sense_hdr sense_hdr; | ||
230 | int sense = 0, fail = 0; | ||
231 | |||
232 | if (had_failures(req, error)) { | ||
233 | fail = 1; | ||
234 | goto failed; | ||
235 | } | ||
236 | |||
237 | if (status_byte(req->errors) == CHECK_CONDITION) { | ||
238 | scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE, | ||
239 | &sense_hdr); | ||
240 | sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) | | ||
241 | sense_hdr.ascq; | ||
242 | /* If it is retryable failure, submit the c9 inquiry again */ | ||
243 | if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 || | ||
244 | sense == 0x62900) { | ||
245 | /* 0x59136 - Command lock contention | ||
246 | * 0x[6b]8b02 - Quiesense in progress or achieved | ||
247 | * 0x62900 - Power On, Reset, or Bus Device Reset | ||
248 | */ | ||
249 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
250 | queue_work(rdac_wkqd, &h->work); | ||
251 | goto done; | ||
252 | } | ||
253 | if (sense) | ||
254 | DMINFO("MODE_SELECT failed on %s with sense 0x%x", | ||
255 | h->path->dev->name, sense); | ||
256 | } | ||
257 | failed: | ||
258 | if (fail || sense) | ||
259 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
260 | else | ||
261 | dm_pg_init_complete(h->path, 0); | ||
262 | |||
263 | done: | ||
264 | rdac_resubmit_all(h); | ||
265 | __blk_put_request(req->q, req); | ||
266 | } | ||
267 | |||
268 | static struct request *get_rdac_req(struct rdac_handler *h, | ||
269 | void *buffer, unsigned buflen, int rw) | ||
270 | { | ||
271 | struct request *rq; | ||
272 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
273 | |||
274 | rq = blk_get_request(q, rw, GFP_KERNEL); | ||
275 | |||
276 | if (!rq) { | ||
277 | DMINFO("get_rdac_req: blk_get_request failed"); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { | ||
282 | blk_put_request(rq); | ||
283 | DMINFO("get_rdac_req: blk_rq_map_kern failed"); | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | memset(&rq->cmd, 0, BLK_MAX_CDB); | ||
288 | rq->sense = h->sense; | ||
289 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); | ||
290 | rq->sense_len = 0; | ||
291 | |||
292 | rq->end_io_data = h; | ||
293 | rq->timeout = h->timeout; | ||
294 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
295 | rq->cmd_flags = REQ_FAILFAST | REQ_NOMERGE; | ||
296 | return rq; | ||
297 | } | ||
298 | |||
299 | static struct request *rdac_failover_get(struct rdac_handler *h) | ||
300 | { | ||
301 | struct request *rq; | ||
302 | struct rdac_mode_common *common; | ||
303 | unsigned data_size; | ||
304 | |||
305 | if (h->ctlr->use_10_ms) { | ||
306 | struct rdac_pg_expanded *rdac_pg; | ||
307 | |||
308 | data_size = sizeof(struct rdac_pg_expanded); | ||
309 | rdac_pg = &h->ctlr->mode_select.expanded; | ||
310 | memset(rdac_pg, 0, data_size); | ||
311 | common = &rdac_pg->common; | ||
312 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; | ||
313 | rdac_pg->subpage_code = 0x1; | ||
314 | rdac_pg->page_len[0] = 0x01; | ||
315 | rdac_pg->page_len[1] = 0x28; | ||
316 | rdac_pg->lun_table[h->lun] = 0x81; | ||
317 | } else { | ||
318 | struct rdac_pg_legacy *rdac_pg; | ||
319 | |||
320 | data_size = sizeof(struct rdac_pg_legacy); | ||
321 | rdac_pg = &h->ctlr->mode_select.legacy; | ||
322 | memset(rdac_pg, 0, data_size); | ||
323 | common = &rdac_pg->common; | ||
324 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; | ||
325 | rdac_pg->page_len = 0x68; | ||
326 | rdac_pg->lun_table[h->lun] = 0x81; | ||
327 | } | ||
328 | common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; | ||
329 | common->quiescence_timeout = RDAC_QUIESCENCE_TIME; | ||
330 | common->rdac_options = RDAC_FORCED_QUIESENCE; | ||
331 | |||
332 | /* get request for block layer packet command */ | ||
333 | rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE); | ||
334 | if (!rq) { | ||
335 | DMERR("rdac_failover_get: no rq"); | ||
336 | return NULL; | ||
337 | } | ||
338 | |||
339 | /* Prepare the command. */ | ||
340 | if (h->ctlr->use_10_ms) { | ||
341 | rq->cmd[0] = MODE_SELECT_10; | ||
342 | rq->cmd[7] = data_size >> 8; | ||
343 | rq->cmd[8] = data_size & 0xff; | ||
344 | } else { | ||
345 | rq->cmd[0] = MODE_SELECT; | ||
346 | rq->cmd[4] = data_size; | ||
347 | } | ||
348 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | ||
349 | |||
350 | return rq; | ||
351 | } | ||
352 | |||
353 | /* Acquires h->ctlr->lock */ | ||
354 | static void submit_mode_select(struct rdac_handler *h) | ||
355 | { | ||
356 | struct request *rq; | ||
357 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
358 | |||
359 | spin_lock(&h->ctlr->lock); | ||
360 | if (h->ctlr->submitted) { | ||
361 | list_add(&h->entry, &h->ctlr->cmd_list); | ||
362 | goto drop_lock; | ||
363 | } | ||
364 | |||
365 | if (!q) { | ||
366 | DMINFO("submit_mode_select: no queue"); | ||
367 | goto fail_path; | ||
368 | } | ||
369 | |||
370 | rq = rdac_failover_get(h); | ||
371 | if (!rq) { | ||
372 | DMERR("submit_mode_select: no rq"); | ||
373 | goto fail_path; | ||
374 | } | ||
375 | |||
376 | DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name); | ||
377 | |||
378 | blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio); | ||
379 | h->ctlr->submitted = 1; | ||
380 | goto drop_lock; | ||
381 | fail_path: | ||
382 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
383 | drop_lock: | ||
384 | spin_unlock(&h->ctlr->lock); | ||
385 | } | ||
386 | |||
387 | static void release_ctlr(struct kref *kref) | ||
388 | { | ||
389 | struct rdac_controller *ctlr; | ||
390 | ctlr = container_of(kref, struct rdac_controller, kref); | ||
391 | |||
392 | spin_lock(&list_lock); | ||
393 | list_del(&ctlr->node); | ||
394 | spin_unlock(&list_lock); | ||
395 | kfree(ctlr); | ||
396 | } | ||
397 | |||
398 | static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) | ||
399 | { | ||
400 | struct rdac_controller *ctlr, *tmp; | ||
401 | |||
402 | spin_lock(&list_lock); | ||
403 | |||
404 | list_for_each_entry(tmp, &ctlr_list, node) { | ||
405 | if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && | ||
406 | (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { | ||
407 | kref_get(&tmp->kref); | ||
408 | spin_unlock(&list_lock); | ||
409 | return tmp; | ||
410 | } | ||
411 | } | ||
412 | ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); | ||
413 | if (!ctlr) | ||
414 | goto done; | ||
415 | |||
416 | /* initialize fields of controller */ | ||
417 | memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); | ||
418 | memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); | ||
419 | kref_init(&ctlr->kref); | ||
420 | spin_lock_init(&ctlr->lock); | ||
421 | ctlr->submitted = 0; | ||
422 | ctlr->use_10_ms = -1; | ||
423 | INIT_LIST_HEAD(&ctlr->cmd_list); | ||
424 | list_add(&ctlr->node, &ctlr_list); | ||
425 | done: | ||
426 | spin_unlock(&list_lock); | ||
427 | return ctlr; | ||
428 | } | ||
429 | |||
430 | static void c4_endio(struct request *req, int error) | ||
431 | { | ||
432 | struct rdac_handler *h = req->end_io_data; | ||
433 | struct c4_inquiry *sp; | ||
434 | |||
435 | if (had_failures(req, error)) { | ||
436 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
437 | goto done; | ||
438 | } | ||
439 | |||
440 | sp = &h->inq.c4; | ||
441 | |||
442 | h->ctlr = get_controller(sp->subsys_id, sp->slot_id); | ||
443 | |||
444 | if (h->ctlr) { | ||
445 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
446 | queue_work(rdac_wkqd, &h->work); | ||
447 | } else | ||
448 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
449 | done: | ||
450 | __blk_put_request(req->q, req); | ||
451 | } | ||
452 | |||
453 | static void c2_endio(struct request *req, int error) | ||
454 | { | ||
455 | struct rdac_handler *h = req->end_io_data; | ||
456 | struct c2_inquiry *sp; | ||
457 | |||
458 | if (had_failures(req, error)) { | ||
459 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
460 | goto done; | ||
461 | } | ||
462 | |||
463 | sp = &h->inq.c2; | ||
464 | |||
465 | /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */ | ||
466 | if (sp->max_lun_supported >= MODE6_MAX_LUN) | ||
467 | h->ctlr->use_10_ms = 1; | ||
468 | else | ||
469 | h->ctlr->use_10_ms = 0; | ||
470 | |||
471 | h->cmd_to_send = SEND_MODE_SELECT; | ||
472 | queue_work(rdac_wkqd, &h->work); | ||
473 | done: | ||
474 | __blk_put_request(req->q, req); | ||
475 | } | ||
476 | |||
477 | static void c9_endio(struct request *req, int error) | ||
478 | { | ||
479 | struct rdac_handler *h = req->end_io_data; | ||
480 | struct c9_inquiry *sp; | ||
481 | |||
482 | if (had_failures(req, error)) { | ||
483 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
484 | goto done; | ||
485 | } | ||
486 | |||
487 | /* We need to look at the sense keys here to take clear action. | ||
488 | * For now simple logic: If the host is in AVT mode or if controller | ||
489 | * owns the lun, return dm_pg_init_complete(), otherwise submit | ||
490 | * MODE SELECT. | ||
491 | */ | ||
492 | sp = &h->inq.c9; | ||
493 | |||
494 | /* If in AVT mode, return success */ | ||
495 | if ((sp->avte_cvp >> 7) == 0x1) { | ||
496 | dm_pg_init_complete(h->path, 0); | ||
497 | goto done; | ||
498 | } | ||
499 | |||
500 | /* If the controller on this path owns the LUN, return success */ | ||
501 | if (sp->avte_cvp & 0x1) { | ||
502 | dm_pg_init_complete(h->path, 0); | ||
503 | goto done; | ||
504 | } | ||
505 | |||
506 | if (h->ctlr) { | ||
507 | if (h->ctlr->use_10_ms == -1) | ||
508 | h->cmd_to_send = SEND_C2_INQUIRY; | ||
509 | else | ||
510 | h->cmd_to_send = SEND_MODE_SELECT; | ||
511 | } else | ||
512 | h->cmd_to_send = SEND_C4_INQUIRY; | ||
513 | queue_work(rdac_wkqd, &h->work); | ||
514 | done: | ||
515 | __blk_put_request(req->q, req); | ||
516 | } | ||
517 | |||
518 | static void c8_endio(struct request *req, int error) | ||
519 | { | ||
520 | struct rdac_handler *h = req->end_io_data; | ||
521 | struct c8_inquiry *sp; | ||
522 | |||
523 | if (had_failures(req, error)) { | ||
524 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
525 | goto done; | ||
526 | } | ||
527 | |||
528 | /* We need to look at the sense keys here to take clear action. | ||
529 | * For now simple logic: Get the lun from the inquiry page. | ||
530 | */ | ||
531 | sp = &h->inq.c8; | ||
532 | h->lun = sp->lun[7]; /* currently it uses only one byte */ | ||
533 | h->cmd_to_send = SEND_C9_INQUIRY; | ||
534 | queue_work(rdac_wkqd, &h->work); | ||
535 | done: | ||
536 | __blk_put_request(req->q, req); | ||
537 | } | ||
538 | |||
539 | static void submit_inquiry(struct rdac_handler *h, int page_code, | ||
540 | unsigned int len, rq_end_io_fn endio) | ||
541 | { | ||
542 | struct request *rq; | ||
543 | struct request_queue *q = bdev_get_queue(h->path->dev->bdev); | ||
544 | |||
545 | if (!q) | ||
546 | goto fail_path; | ||
547 | |||
548 | rq = get_rdac_req(h, &h->inq, len, READ); | ||
549 | if (!rq) | ||
550 | goto fail_path; | ||
551 | |||
552 | /* Prepare the command. */ | ||
553 | rq->cmd[0] = INQUIRY; | ||
554 | rq->cmd[1] = 1; | ||
555 | rq->cmd[2] = page_code; | ||
556 | rq->cmd[4] = len; | ||
557 | rq->cmd_len = COMMAND_SIZE(INQUIRY); | ||
558 | blk_execute_rq_nowait(q, NULL, rq, 1, endio); | ||
559 | return; | ||
560 | |||
561 | fail_path: | ||
562 | dm_pg_init_complete(h->path, MP_FAIL_PATH); | ||
563 | } | ||
564 | |||
565 | static void service_wkq(struct work_struct *work) | ||
566 | { | ||
567 | struct rdac_handler *h = container_of(work, struct rdac_handler, work); | ||
568 | |||
569 | switch (h->cmd_to_send) { | ||
570 | case SEND_C2_INQUIRY: | ||
571 | submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio); | ||
572 | break; | ||
573 | case SEND_C4_INQUIRY: | ||
574 | submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio); | ||
575 | break; | ||
576 | case SEND_C8_INQUIRY: | ||
577 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
578 | break; | ||
579 | case SEND_C9_INQUIRY: | ||
580 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
581 | break; | ||
582 | case SEND_MODE_SELECT: | ||
583 | submit_mode_select(h); | ||
584 | break; | ||
585 | default: | ||
586 | BUG(); | ||
587 | } | ||
588 | } | ||
589 | /* | ||
590 | * only support subpage2c until we confirm that this is just a matter of | ||
591 | * of updating firmware or not, and RDAC (basic AVT works already) for now | ||
592 | * but we can add these in in when we get time and testers | ||
593 | */ | ||
594 | static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv) | ||
595 | { | ||
596 | struct rdac_handler *h; | ||
597 | unsigned timeout; | ||
598 | |||
599 | if (argc == 0) { | ||
600 | /* No arguments: use defaults */ | ||
601 | timeout = RDAC_FAILOVER_TIMEOUT; | ||
602 | } else if (argc != 1) { | ||
603 | DMWARN("incorrect number of arguments"); | ||
604 | return -EINVAL; | ||
605 | } else { | ||
606 | if (sscanf(argv[1], "%u", &timeout) != 1) { | ||
607 | DMWARN("invalid timeout value"); | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | } | ||
611 | |||
612 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
613 | if (!h) | ||
614 | return -ENOMEM; | ||
615 | |||
616 | hwh->context = h; | ||
617 | h->timeout = timeout; | ||
618 | h->lun = UNINITIALIZED_LUN; | ||
619 | INIT_WORK(&h->work, service_wkq); | ||
620 | DMWARN("using RDAC command with timeout %u", h->timeout); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | static void rdac_destroy(struct hw_handler *hwh) | ||
626 | { | ||
627 | struct rdac_handler *h = hwh->context; | ||
628 | |||
629 | if (h->ctlr) | ||
630 | kref_put(&h->ctlr->kref, release_ctlr); | ||
631 | kfree(h); | ||
632 | hwh->context = NULL; | ||
633 | } | ||
634 | |||
635 | static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio) | ||
636 | { | ||
637 | /* Try default handler */ | ||
638 | return dm_scsi_err_handler(hwh, bio); | ||
639 | } | ||
640 | |||
641 | static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed, | ||
642 | struct dm_path *path) | ||
643 | { | ||
644 | struct rdac_handler *h = hwh->context; | ||
645 | |||
646 | h->path = path; | ||
647 | switch (h->lun) { | ||
648 | case UNINITIALIZED_LUN: | ||
649 | submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); | ||
650 | break; | ||
651 | default: | ||
652 | submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); | ||
653 | } | ||
654 | } | ||
655 | |||
656 | static struct hw_handler_type rdac_handler = { | ||
657 | .name = RDAC_DM_HWH_NAME, | ||
658 | .module = THIS_MODULE, | ||
659 | .create = rdac_create, | ||
660 | .destroy = rdac_destroy, | ||
661 | .pg_init = rdac_pg_init, | ||
662 | .error = rdac_error, | ||
663 | }; | ||
664 | |||
665 | static int __init rdac_init(void) | ||
666 | { | ||
667 | int r = dm_register_hw_handler(&rdac_handler); | ||
668 | |||
669 | if (r < 0) { | ||
670 | DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r); | ||
671 | return r; | ||
672 | } | ||
673 | |||
674 | rdac_wkqd = create_singlethread_workqueue("rdac_wkqd"); | ||
675 | if (!rdac_wkqd) { | ||
676 | DMERR("Failed to create workqueue rdac_wkqd."); | ||
677 | dm_unregister_hw_handler(&rdac_handler); | ||
678 | return -ENOMEM; | ||
679 | } | ||
680 | |||
681 | DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER); | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | static void __exit rdac_exit(void) | ||
686 | { | ||
687 | int r = dm_unregister_hw_handler(&rdac_handler); | ||
688 | |||
689 | destroy_workqueue(rdac_wkqd); | ||
690 | if (r < 0) | ||
691 | DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r); | ||
692 | } | ||
693 | |||
694 | module_init(rdac_init); | ||
695 | module_exit(rdac_exit); | ||
696 | |||
697 | MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support"); | ||
698 | MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); | ||
699 | MODULE_LICENSE("GPL"); | ||
700 | MODULE_VERSION(RDAC_DM_HWH_VER); | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index de54b39e6ffe..d6ca9d0a6fd1 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -83,7 +83,7 @@ struct multipath { | |||
83 | struct work_struct trigger_event; | 83 | struct work_struct trigger_event; |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * We must use a mempool of mpath_io structs so that we | 86 | * We must use a mempool of dm_mpath_io structs so that we |
87 | * can resubmit bios on error. | 87 | * can resubmit bios on error. |
88 | */ | 88 | */ |
89 | mempool_t *mpio_pool; | 89 | mempool_t *mpio_pool; |
@@ -92,7 +92,7 @@ struct multipath { | |||
92 | /* | 92 | /* |
93 | * Context information attached to each bio we process. | 93 | * Context information attached to each bio we process. |
94 | */ | 94 | */ |
95 | struct mpath_io { | 95 | struct dm_mpath_io { |
96 | struct pgpath *pgpath; | 96 | struct pgpath *pgpath; |
97 | struct dm_bio_details details; | 97 | struct dm_bio_details details; |
98 | }; | 98 | }; |
@@ -122,7 +122,7 @@ static struct pgpath *alloc_pgpath(void) | |||
122 | return pgpath; | 122 | return pgpath; |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline void free_pgpath(struct pgpath *pgpath) | 125 | static void free_pgpath(struct pgpath *pgpath) |
126 | { | 126 | { |
127 | kfree(pgpath); | 127 | kfree(pgpath); |
128 | } | 128 | } |
@@ -299,8 +299,8 @@ static int __must_push_back(struct multipath *m) | |||
299 | dm_noflush_suspending(m->ti)); | 299 | dm_noflush_suspending(m->ti)); |
300 | } | 300 | } |
301 | 301 | ||
302 | static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, | 302 | static int map_io(struct multipath *m, struct bio *bio, |
303 | unsigned was_queued) | 303 | struct dm_mpath_io *mpio, unsigned was_queued) |
304 | { | 304 | { |
305 | int r = DM_MAPIO_REMAPPED; | 305 | int r = DM_MAPIO_REMAPPED; |
306 | unsigned long flags; | 306 | unsigned long flags; |
@@ -374,7 +374,7 @@ static void dispatch_queued_ios(struct multipath *m) | |||
374 | int r; | 374 | int r; |
375 | unsigned long flags; | 375 | unsigned long flags; |
376 | struct bio *bio = NULL, *next; | 376 | struct bio *bio = NULL, *next; |
377 | struct mpath_io *mpio; | 377 | struct dm_mpath_io *mpio; |
378 | union map_info *info; | 378 | union map_info *info; |
379 | 379 | ||
380 | spin_lock_irqsave(&m->lock, flags); | 380 | spin_lock_irqsave(&m->lock, flags); |
@@ -795,12 +795,9 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, | |||
795 | union map_info *map_context) | 795 | union map_info *map_context) |
796 | { | 796 | { |
797 | int r; | 797 | int r; |
798 | struct mpath_io *mpio; | 798 | struct dm_mpath_io *mpio; |
799 | struct multipath *m = (struct multipath *) ti->private; | 799 | struct multipath *m = (struct multipath *) ti->private; |
800 | 800 | ||
801 | if (bio_barrier(bio)) | ||
802 | return -EOPNOTSUPP; | ||
803 | |||
804 | mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); | 801 | mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); |
805 | dm_bio_record(&mpio->details, bio); | 802 | dm_bio_record(&mpio->details, bio); |
806 | 803 | ||
@@ -1014,7 +1011,7 @@ void dm_pg_init_complete(struct dm_path *path, unsigned err_flags) | |||
1014 | * end_io handling | 1011 | * end_io handling |
1015 | */ | 1012 | */ |
1016 | static int do_end_io(struct multipath *m, struct bio *bio, | 1013 | static int do_end_io(struct multipath *m, struct bio *bio, |
1017 | int error, struct mpath_io *mpio) | 1014 | int error, struct dm_mpath_io *mpio) |
1018 | { | 1015 | { |
1019 | struct hw_handler *hwh = &m->hw_handler; | 1016 | struct hw_handler *hwh = &m->hw_handler; |
1020 | unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ | 1017 | unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ |
@@ -1075,8 +1072,8 @@ static int do_end_io(struct multipath *m, struct bio *bio, | |||
1075 | static int multipath_end_io(struct dm_target *ti, struct bio *bio, | 1072 | static int multipath_end_io(struct dm_target *ti, struct bio *bio, |
1076 | int error, union map_info *map_context) | 1073 | int error, union map_info *map_context) |
1077 | { | 1074 | { |
1078 | struct multipath *m = (struct multipath *) ti->private; | 1075 | struct multipath *m = ti->private; |
1079 | struct mpath_io *mpio = (struct mpath_io *) map_context->ptr; | 1076 | struct dm_mpath_io *mpio = map_context->ptr; |
1080 | struct pgpath *pgpath = mpio->pgpath; | 1077 | struct pgpath *pgpath = mpio->pgpath; |
1081 | struct path_selector *ps; | 1078 | struct path_selector *ps; |
1082 | int r; | 1079 | int r; |
@@ -1346,22 +1343,20 @@ static int __init dm_multipath_init(void) | |||
1346 | int r; | 1343 | int r; |
1347 | 1344 | ||
1348 | /* allocate a slab for the dm_ios */ | 1345 | /* allocate a slab for the dm_ios */ |
1349 | _mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io), | 1346 | _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); |
1350 | 0, 0, NULL, NULL); | ||
1351 | if (!_mpio_cache) | 1347 | if (!_mpio_cache) |
1352 | return -ENOMEM; | 1348 | return -ENOMEM; |
1353 | 1349 | ||
1354 | r = dm_register_target(&multipath_target); | 1350 | r = dm_register_target(&multipath_target); |
1355 | if (r < 0) { | 1351 | if (r < 0) { |
1356 | DMERR("%s: register failed %d", multipath_target.name, r); | 1352 | DMERR("register failed %d", r); |
1357 | kmem_cache_destroy(_mpio_cache); | 1353 | kmem_cache_destroy(_mpio_cache); |
1358 | return -EINVAL; | 1354 | return -EINVAL; |
1359 | } | 1355 | } |
1360 | 1356 | ||
1361 | kmultipathd = create_workqueue("kmpathd"); | 1357 | kmultipathd = create_workqueue("kmpathd"); |
1362 | if (!kmultipathd) { | 1358 | if (!kmultipathd) { |
1363 | DMERR("%s: failed to create workqueue kmpathd", | 1359 | DMERR("failed to create workqueue kmpathd"); |
1364 | multipath_target.name); | ||
1365 | dm_unregister_target(&multipath_target); | 1360 | dm_unregister_target(&multipath_target); |
1366 | kmem_cache_destroy(_mpio_cache); | 1361 | kmem_cache_destroy(_mpio_cache); |
1367 | return -ENOMEM; | 1362 | return -ENOMEM; |
@@ -1382,8 +1377,7 @@ static void __exit dm_multipath_exit(void) | |||
1382 | 1377 | ||
1383 | r = dm_unregister_target(&multipath_target); | 1378 | r = dm_unregister_target(&multipath_target); |
1384 | if (r < 0) | 1379 | if (r < 0) |
1385 | DMERR("%s: target unregister failed %d", | 1380 | DMERR("target unregister failed %d", r); |
1386 | multipath_target.name, r); | ||
1387 | kmem_cache_destroy(_mpio_cache); | 1381 | kmem_cache_destroy(_mpio_cache); |
1388 | } | 1382 | } |
1389 | 1383 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ef124b71ccc8..1a876f9965e0 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #define DM_IO_PAGES 64 | 24 | #define DM_IO_PAGES 64 |
25 | 25 | ||
26 | #define DM_RAID1_HANDLE_ERRORS 0x01 | 26 | #define DM_RAID1_HANDLE_ERRORS 0x01 |
27 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) | ||
27 | 28 | ||
28 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); | 29 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); |
29 | 30 | ||
@@ -85,6 +86,7 @@ struct region_hash { | |||
85 | struct list_head clean_regions; | 86 | struct list_head clean_regions; |
86 | struct list_head quiesced_regions; | 87 | struct list_head quiesced_regions; |
87 | struct list_head recovered_regions; | 88 | struct list_head recovered_regions; |
89 | struct list_head failed_recovered_regions; | ||
88 | }; | 90 | }; |
89 | 91 | ||
90 | enum { | 92 | enum { |
@@ -132,6 +134,7 @@ struct mirror_set { | |||
132 | /* recovery */ | 134 | /* recovery */ |
133 | region_t nr_regions; | 135 | region_t nr_regions; |
134 | int in_sync; | 136 | int in_sync; |
137 | int log_failure; | ||
135 | 138 | ||
136 | struct mirror *default_mirror; /* Default mirror */ | 139 | struct mirror *default_mirror; /* Default mirror */ |
137 | 140 | ||
@@ -204,6 +207,7 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms, | |||
204 | INIT_LIST_HEAD(&rh->clean_regions); | 207 | INIT_LIST_HEAD(&rh->clean_regions); |
205 | INIT_LIST_HEAD(&rh->quiesced_regions); | 208 | INIT_LIST_HEAD(&rh->quiesced_regions); |
206 | INIT_LIST_HEAD(&rh->recovered_regions); | 209 | INIT_LIST_HEAD(&rh->recovered_regions); |
210 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | ||
207 | 211 | ||
208 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | 212 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
209 | sizeof(struct region)); | 213 | sizeof(struct region)); |
@@ -368,6 +372,7 @@ static void rh_update_states(struct region_hash *rh) | |||
368 | 372 | ||
369 | LIST_HEAD(clean); | 373 | LIST_HEAD(clean); |
370 | LIST_HEAD(recovered); | 374 | LIST_HEAD(recovered); |
375 | LIST_HEAD(failed_recovered); | ||
371 | 376 | ||
372 | /* | 377 | /* |
373 | * Quickly grab the lists. | 378 | * Quickly grab the lists. |
@@ -378,10 +383,8 @@ static void rh_update_states(struct region_hash *rh) | |||
378 | list_splice(&rh->clean_regions, &clean); | 383 | list_splice(&rh->clean_regions, &clean); |
379 | INIT_LIST_HEAD(&rh->clean_regions); | 384 | INIT_LIST_HEAD(&rh->clean_regions); |
380 | 385 | ||
381 | list_for_each_entry (reg, &clean, list) { | 386 | list_for_each_entry(reg, &clean, list) |
382 | rh->log->type->clear_region(rh->log, reg->key); | ||
383 | list_del(®->hash_list); | 387 | list_del(®->hash_list); |
384 | } | ||
385 | } | 388 | } |
386 | 389 | ||
387 | if (!list_empty(&rh->recovered_regions)) { | 390 | if (!list_empty(&rh->recovered_regions)) { |
@@ -391,6 +394,15 @@ static void rh_update_states(struct region_hash *rh) | |||
391 | list_for_each_entry (reg, &recovered, list) | 394 | list_for_each_entry (reg, &recovered, list) |
392 | list_del(®->hash_list); | 395 | list_del(®->hash_list); |
393 | } | 396 | } |
397 | |||
398 | if (!list_empty(&rh->failed_recovered_regions)) { | ||
399 | list_splice(&rh->failed_recovered_regions, &failed_recovered); | ||
400 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | ||
401 | |||
402 | list_for_each_entry(reg, &failed_recovered, list) | ||
403 | list_del(®->hash_list); | ||
404 | } | ||
405 | |||
394 | spin_unlock(&rh->region_lock); | 406 | spin_unlock(&rh->region_lock); |
395 | write_unlock_irq(&rh->hash_lock); | 407 | write_unlock_irq(&rh->hash_lock); |
396 | 408 | ||
@@ -405,10 +417,17 @@ static void rh_update_states(struct region_hash *rh) | |||
405 | mempool_free(reg, rh->region_pool); | 417 | mempool_free(reg, rh->region_pool); |
406 | } | 418 | } |
407 | 419 | ||
408 | rh->log->type->flush(rh->log); | 420 | list_for_each_entry_safe(reg, next, &failed_recovered, list) { |
421 | complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1); | ||
422 | mempool_free(reg, rh->region_pool); | ||
423 | } | ||
409 | 424 | ||
410 | list_for_each_entry_safe (reg, next, &clean, list) | 425 | list_for_each_entry_safe(reg, next, &clean, list) { |
426 | rh->log->type->clear_region(rh->log, reg->key); | ||
411 | mempool_free(reg, rh->region_pool); | 427 | mempool_free(reg, rh->region_pool); |
428 | } | ||
429 | |||
430 | rh->log->type->flush(rh->log); | ||
412 | } | 431 | } |
413 | 432 | ||
414 | static void rh_inc(struct region_hash *rh, region_t region) | 433 | static void rh_inc(struct region_hash *rh, region_t region) |
@@ -555,21 +574,25 @@ static struct region *rh_recovery_start(struct region_hash *rh) | |||
555 | return reg; | 574 | return reg; |
556 | } | 575 | } |
557 | 576 | ||
558 | /* FIXME: success ignored for now */ | ||
559 | static void rh_recovery_end(struct region *reg, int success) | 577 | static void rh_recovery_end(struct region *reg, int success) |
560 | { | 578 | { |
561 | struct region_hash *rh = reg->rh; | 579 | struct region_hash *rh = reg->rh; |
562 | 580 | ||
563 | spin_lock_irq(&rh->region_lock); | 581 | spin_lock_irq(&rh->region_lock); |
564 | list_add(®->list, ®->rh->recovered_regions); | 582 | if (success) |
583 | list_add(®->list, ®->rh->recovered_regions); | ||
584 | else { | ||
585 | reg->state = RH_NOSYNC; | ||
586 | list_add(®->list, ®->rh->failed_recovered_regions); | ||
587 | } | ||
565 | spin_unlock_irq(&rh->region_lock); | 588 | spin_unlock_irq(&rh->region_lock); |
566 | 589 | ||
567 | wake(rh->ms); | 590 | wake(rh->ms); |
568 | } | 591 | } |
569 | 592 | ||
570 | static void rh_flush(struct region_hash *rh) | 593 | static int rh_flush(struct region_hash *rh) |
571 | { | 594 | { |
572 | rh->log->type->flush(rh->log); | 595 | return rh->log->type->flush(rh->log); |
573 | } | 596 | } |
574 | 597 | ||
575 | static void rh_delay(struct region_hash *rh, struct bio *bio) | 598 | static void rh_delay(struct region_hash *rh, struct bio *bio) |
@@ -633,7 +656,14 @@ static void recovery_complete(int read_err, unsigned int write_err, | |||
633 | { | 656 | { |
634 | struct region *reg = (struct region *) context; | 657 | struct region *reg = (struct region *) context; |
635 | 658 | ||
636 | /* FIXME: better error handling */ | 659 | if (read_err) |
660 | /* Read error means the failure of default mirror. */ | ||
661 | DMERR_LIMIT("Unable to read primary mirror during recovery"); | ||
662 | |||
663 | if (write_err) | ||
664 | DMERR_LIMIT("Write error during recovery (error = 0x%x)", | ||
665 | write_err); | ||
666 | |||
637 | rh_recovery_end(reg, !(read_err || write_err)); | 667 | rh_recovery_end(reg, !(read_err || write_err)); |
638 | } | 668 | } |
639 | 669 | ||
@@ -863,12 +893,15 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
863 | */ | 893 | */ |
864 | rh_inc_pending(&ms->rh, &sync); | 894 | rh_inc_pending(&ms->rh, &sync); |
865 | rh_inc_pending(&ms->rh, &nosync); | 895 | rh_inc_pending(&ms->rh, &nosync); |
866 | rh_flush(&ms->rh); | 896 | ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; |
867 | 897 | ||
868 | /* | 898 | /* |
869 | * Dispatch io. | 899 | * Dispatch io. |
870 | */ | 900 | */ |
871 | while ((bio = bio_list_pop(&sync))) | 901 | if (unlikely(ms->log_failure)) |
902 | while ((bio = bio_list_pop(&sync))) | ||
903 | bio_endio(bio, bio->bi_size, -EIO); | ||
904 | else while ((bio = bio_list_pop(&sync))) | ||
872 | do_write(ms, bio); | 905 | do_write(ms, bio); |
873 | 906 | ||
874 | while ((bio = bio_list_pop(&recover))) | 907 | while ((bio = bio_list_pop(&recover))) |
@@ -1145,6 +1178,15 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1145 | argv += args_used; | 1178 | argv += args_used; |
1146 | argc -= args_used; | 1179 | argc -= args_used; |
1147 | 1180 | ||
1181 | /* | ||
1182 | * Any read-balancing addition depends on the | ||
1183 | * DM_RAID1_HANDLE_ERRORS flag being present. | ||
1184 | * This is because the decision to balance depends | ||
1185 | * on the sync state of a region. If the above | ||
1186 | * flag is not present, we ignore errors; and | ||
1187 | * the sync state may be inaccurate. | ||
1188 | */ | ||
1189 | |||
1148 | if (argc) { | 1190 | if (argc) { |
1149 | ti->error = "Too many mirror arguments"; | 1191 | ti->error = "Too many mirror arguments"; |
1150 | free_context(ms, ti, ms->nr_mirrors); | 1192 | free_context(ms, ti, ms->nr_mirrors); |
@@ -1288,12 +1330,12 @@ static int mirror_status(struct dm_target *ti, status_type_t type, | |||
1288 | for (m = 0; m < ms->nr_mirrors; m++) | 1330 | for (m = 0; m < ms->nr_mirrors; m++) |
1289 | DMEMIT("%s ", ms->mirror[m].dev->name); | 1331 | DMEMIT("%s ", ms->mirror[m].dev->name); |
1290 | 1332 | ||
1291 | DMEMIT("%llu/%llu", | 1333 | DMEMIT("%llu/%llu 0 ", |
1292 | (unsigned long long)ms->rh.log->type-> | 1334 | (unsigned long long)ms->rh.log->type-> |
1293 | get_sync_count(ms->rh.log), | 1335 | get_sync_count(ms->rh.log), |
1294 | (unsigned long long)ms->nr_regions); | 1336 | (unsigned long long)ms->nr_regions); |
1295 | 1337 | ||
1296 | sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen); | 1338 | sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz); |
1297 | 1339 | ||
1298 | break; | 1340 | break; |
1299 | 1341 | ||
@@ -1335,8 +1377,7 @@ static int __init dm_mirror_init(void) | |||
1335 | 1377 | ||
1336 | r = dm_register_target(&mirror_target); | 1378 | r = dm_register_target(&mirror_target); |
1337 | if (r < 0) { | 1379 | if (r < 0) { |
1338 | DMERR("%s: Failed to register mirror target", | 1380 | DMERR("Failed to register mirror target"); |
1339 | mirror_target.name); | ||
1340 | dm_dirty_log_exit(); | 1381 | dm_dirty_log_exit(); |
1341 | } | 1382 | } |
1342 | 1383 | ||
@@ -1349,7 +1390,7 @@ static void __exit dm_mirror_exit(void) | |||
1349 | 1390 | ||
1350 | r = dm_unregister_target(&mirror_target); | 1391 | r = dm_unregister_target(&mirror_target); |
1351 | if (r < 0) | 1392 | if (r < 0) |
1352 | DMERR("%s: unregister failed %d", mirror_target.name, r); | 1393 | DMERR("unregister failed %d", r); |
1353 | 1394 | ||
1354 | dm_dirty_log_exit(); | 1395 | dm_dirty_log_exit(); |
1355 | } | 1396 | } |
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index a348a97b65af..391dfa2ad434 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c | |||
@@ -205,7 +205,7 @@ static void __exit dm_rr_exit(void) | |||
205 | int r = dm_unregister_path_selector(&rr_ps); | 205 | int r = dm_unregister_path_selector(&rr_ps); |
206 | 206 | ||
207 | if (r < 0) | 207 | if (r < 0) |
208 | DMERR("round-robin: unregister failed %d", r); | 208 | DMERR("unregister failed %d", r); |
209 | } | 209 | } |
210 | 210 | ||
211 | module_init(dm_rr_init); | 211 | module_init(dm_rr_init); |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 0821a2b68a73..83ddbfe6b8a4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -42,8 +42,8 @@ | |||
42 | static struct workqueue_struct *ksnapd; | 42 | static struct workqueue_struct *ksnapd; |
43 | static void flush_queued_bios(struct work_struct *work); | 43 | static void flush_queued_bios(struct work_struct *work); |
44 | 44 | ||
45 | struct pending_exception { | 45 | struct dm_snap_pending_exception { |
46 | struct exception e; | 46 | struct dm_snap_exception e; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Origin buffers waiting for this to complete are held | 49 | * Origin buffers waiting for this to complete are held |
@@ -63,7 +63,7 @@ struct pending_exception { | |||
63 | * group of pending_exceptions. It is always last to get freed. | 63 | * group of pending_exceptions. It is always last to get freed. |
64 | * These fields get set up when writing to the origin. | 64 | * These fields get set up when writing to the origin. |
65 | */ | 65 | */ |
66 | struct pending_exception *primary_pe; | 66 | struct dm_snap_pending_exception *primary_pe; |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Number of pending_exceptions processing this chunk. | 69 | * Number of pending_exceptions processing this chunk. |
@@ -137,7 +137,7 @@ static void exit_origin_hash(void) | |||
137 | kfree(_origins); | 137 | kfree(_origins); |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline unsigned int origin_hash(struct block_device *bdev) | 140 | static unsigned origin_hash(struct block_device *bdev) |
141 | { | 141 | { |
142 | return bdev->bd_dev & ORIGIN_MASK; | 142 | return bdev->bd_dev & ORIGIN_MASK; |
143 | } | 143 | } |
@@ -231,7 +231,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size) | |||
231 | static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) | 231 | static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) |
232 | { | 232 | { |
233 | struct list_head *slot; | 233 | struct list_head *slot; |
234 | struct exception *ex, *next; | 234 | struct dm_snap_exception *ex, *next; |
235 | int i, size; | 235 | int i, size; |
236 | 236 | ||
237 | size = et->hash_mask + 1; | 237 | size = et->hash_mask + 1; |
@@ -245,18 +245,19 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache * | |||
245 | vfree(et->table); | 245 | vfree(et->table); |
246 | } | 246 | } |
247 | 247 | ||
248 | static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) | 248 | static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) |
249 | { | 249 | { |
250 | return chunk & et->hash_mask; | 250 | return chunk & et->hash_mask; |
251 | } | 251 | } |
252 | 252 | ||
253 | static void insert_exception(struct exception_table *eh, struct exception *e) | 253 | static void insert_exception(struct exception_table *eh, |
254 | struct dm_snap_exception *e) | ||
254 | { | 255 | { |
255 | struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; | 256 | struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; |
256 | list_add(&e->hash_list, l); | 257 | list_add(&e->hash_list, l); |
257 | } | 258 | } |
258 | 259 | ||
259 | static inline void remove_exception(struct exception *e) | 260 | static void remove_exception(struct dm_snap_exception *e) |
260 | { | 261 | { |
261 | list_del(&e->hash_list); | 262 | list_del(&e->hash_list); |
262 | } | 263 | } |
@@ -265,11 +266,11 @@ static inline void remove_exception(struct exception *e) | |||
265 | * Return the exception data for a sector, or NULL if not | 266 | * Return the exception data for a sector, or NULL if not |
266 | * remapped. | 267 | * remapped. |
267 | */ | 268 | */ |
268 | static struct exception *lookup_exception(struct exception_table *et, | 269 | static struct dm_snap_exception *lookup_exception(struct exception_table *et, |
269 | chunk_t chunk) | 270 | chunk_t chunk) |
270 | { | 271 | { |
271 | struct list_head *slot; | 272 | struct list_head *slot; |
272 | struct exception *e; | 273 | struct dm_snap_exception *e; |
273 | 274 | ||
274 | slot = &et->table[exception_hash(et, chunk)]; | 275 | slot = &et->table[exception_hash(et, chunk)]; |
275 | list_for_each_entry (e, slot, hash_list) | 276 | list_for_each_entry (e, slot, hash_list) |
@@ -279,9 +280,9 @@ static struct exception *lookup_exception(struct exception_table *et, | |||
279 | return NULL; | 280 | return NULL; |
280 | } | 281 | } |
281 | 282 | ||
282 | static inline struct exception *alloc_exception(void) | 283 | static struct dm_snap_exception *alloc_exception(void) |
283 | { | 284 | { |
284 | struct exception *e; | 285 | struct dm_snap_exception *e; |
285 | 286 | ||
286 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); | 287 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); |
287 | if (!e) | 288 | if (!e) |
@@ -290,24 +291,24 @@ static inline struct exception *alloc_exception(void) | |||
290 | return e; | 291 | return e; |
291 | } | 292 | } |
292 | 293 | ||
293 | static inline void free_exception(struct exception *e) | 294 | static void free_exception(struct dm_snap_exception *e) |
294 | { | 295 | { |
295 | kmem_cache_free(exception_cache, e); | 296 | kmem_cache_free(exception_cache, e); |
296 | } | 297 | } |
297 | 298 | ||
298 | static inline struct pending_exception *alloc_pending_exception(void) | 299 | static struct dm_snap_pending_exception *alloc_pending_exception(void) |
299 | { | 300 | { |
300 | return mempool_alloc(pending_pool, GFP_NOIO); | 301 | return mempool_alloc(pending_pool, GFP_NOIO); |
301 | } | 302 | } |
302 | 303 | ||
303 | static inline void free_pending_exception(struct pending_exception *pe) | 304 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
304 | { | 305 | { |
305 | mempool_free(pe, pending_pool); | 306 | mempool_free(pe, pending_pool); |
306 | } | 307 | } |
307 | 308 | ||
308 | int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) | 309 | int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) |
309 | { | 310 | { |
310 | struct exception *e; | 311 | struct dm_snap_exception *e; |
311 | 312 | ||
312 | e = alloc_exception(); | 313 | e = alloc_exception(); |
313 | if (!e) | 314 | if (!e) |
@@ -334,7 +335,7 @@ static int calc_max_buckets(void) | |||
334 | /* | 335 | /* |
335 | * Rounds a number down to a power of 2. | 336 | * Rounds a number down to a power of 2. |
336 | */ | 337 | */ |
337 | static inline uint32_t round_down(uint32_t n) | 338 | static uint32_t round_down(uint32_t n) |
338 | { | 339 | { |
339 | while (n & (n - 1)) | 340 | while (n & (n - 1)) |
340 | n &= (n - 1); | 341 | n &= (n - 1); |
@@ -384,7 +385,7 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
384 | * Round a number up to the nearest 'size' boundary. size must | 385 | * Round a number up to the nearest 'size' boundary. size must |
385 | * be a power of 2. | 386 | * be a power of 2. |
386 | */ | 387 | */ |
387 | static inline ulong round_up(ulong n, ulong size) | 388 | static ulong round_up(ulong n, ulong size) |
388 | { | 389 | { |
389 | size--; | 390 | size--; |
390 | return (n + size) & ~size; | 391 | return (n + size) & ~size; |
@@ -522,9 +523,12 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
522 | 523 | ||
523 | /* Metadata must only be loaded into one table at once */ | 524 | /* Metadata must only be loaded into one table at once */ |
524 | r = s->store.read_metadata(&s->store); | 525 | r = s->store.read_metadata(&s->store); |
525 | if (r) { | 526 | if (r < 0) { |
526 | ti->error = "Failed to read snapshot metadata"; | 527 | ti->error = "Failed to read snapshot metadata"; |
527 | goto bad6; | 528 | goto bad6; |
529 | } else if (r > 0) { | ||
530 | s->valid = 0; | ||
531 | DMWARN("Snapshot is marked invalid."); | ||
528 | } | 532 | } |
529 | 533 | ||
530 | bio_list_init(&s->queued_bios); | 534 | bio_list_init(&s->queued_bios); |
@@ -577,7 +581,7 @@ static void __free_exceptions(struct dm_snapshot *s) | |||
577 | 581 | ||
578 | static void snapshot_dtr(struct dm_target *ti) | 582 | static void snapshot_dtr(struct dm_target *ti) |
579 | { | 583 | { |
580 | struct dm_snapshot *s = (struct dm_snapshot *) ti->private; | 584 | struct dm_snapshot *s = ti->private; |
581 | 585 | ||
582 | flush_workqueue(ksnapd); | 586 | flush_workqueue(ksnapd); |
583 | 587 | ||
@@ -655,14 +659,14 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) | |||
655 | dm_table_event(s->table); | 659 | dm_table_event(s->table); |
656 | } | 660 | } |
657 | 661 | ||
658 | static void get_pending_exception(struct pending_exception *pe) | 662 | static void get_pending_exception(struct dm_snap_pending_exception *pe) |
659 | { | 663 | { |
660 | atomic_inc(&pe->ref_count); | 664 | atomic_inc(&pe->ref_count); |
661 | } | 665 | } |
662 | 666 | ||
663 | static struct bio *put_pending_exception(struct pending_exception *pe) | 667 | static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) |
664 | { | 668 | { |
665 | struct pending_exception *primary_pe; | 669 | struct dm_snap_pending_exception *primary_pe; |
666 | struct bio *origin_bios = NULL; | 670 | struct bio *origin_bios = NULL; |
667 | 671 | ||
668 | primary_pe = pe->primary_pe; | 672 | primary_pe = pe->primary_pe; |
@@ -692,9 +696,9 @@ static struct bio *put_pending_exception(struct pending_exception *pe) | |||
692 | return origin_bios; | 696 | return origin_bios; |
693 | } | 697 | } |
694 | 698 | ||
695 | static void pending_complete(struct pending_exception *pe, int success) | 699 | static void pending_complete(struct dm_snap_pending_exception *pe, int success) |
696 | { | 700 | { |
697 | struct exception *e; | 701 | struct dm_snap_exception *e; |
698 | struct dm_snapshot *s = pe->snap; | 702 | struct dm_snapshot *s = pe->snap; |
699 | struct bio *origin_bios = NULL; | 703 | struct bio *origin_bios = NULL; |
700 | struct bio *snapshot_bios = NULL; | 704 | struct bio *snapshot_bios = NULL; |
@@ -748,7 +752,8 @@ static void pending_complete(struct pending_exception *pe, int success) | |||
748 | 752 | ||
749 | static void commit_callback(void *context, int success) | 753 | static void commit_callback(void *context, int success) |
750 | { | 754 | { |
751 | struct pending_exception *pe = (struct pending_exception *) context; | 755 | struct dm_snap_pending_exception *pe = context; |
756 | |||
752 | pending_complete(pe, success); | 757 | pending_complete(pe, success); |
753 | } | 758 | } |
754 | 759 | ||
@@ -758,7 +763,7 @@ static void commit_callback(void *context, int success) | |||
758 | */ | 763 | */ |
759 | static void copy_callback(int read_err, unsigned int write_err, void *context) | 764 | static void copy_callback(int read_err, unsigned int write_err, void *context) |
760 | { | 765 | { |
761 | struct pending_exception *pe = (struct pending_exception *) context; | 766 | struct dm_snap_pending_exception *pe = context; |
762 | struct dm_snapshot *s = pe->snap; | 767 | struct dm_snapshot *s = pe->snap; |
763 | 768 | ||
764 | if (read_err || write_err) | 769 | if (read_err || write_err) |
@@ -773,7 +778,7 @@ static void copy_callback(int read_err, unsigned int write_err, void *context) | |||
773 | /* | 778 | /* |
774 | * Dispatches the copy operation to kcopyd. | 779 | * Dispatches the copy operation to kcopyd. |
775 | */ | 780 | */ |
776 | static void start_copy(struct pending_exception *pe) | 781 | static void start_copy(struct dm_snap_pending_exception *pe) |
777 | { | 782 | { |
778 | struct dm_snapshot *s = pe->snap; | 783 | struct dm_snapshot *s = pe->snap; |
779 | struct io_region src, dest; | 784 | struct io_region src, dest; |
@@ -803,11 +808,11 @@ static void start_copy(struct pending_exception *pe) | |||
803 | * NOTE: a write lock must be held on snap->lock before calling | 808 | * NOTE: a write lock must be held on snap->lock before calling |
804 | * this. | 809 | * this. |
805 | */ | 810 | */ |
806 | static struct pending_exception * | 811 | static struct dm_snap_pending_exception * |
807 | __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | 812 | __find_pending_exception(struct dm_snapshot *s, struct bio *bio) |
808 | { | 813 | { |
809 | struct exception *e; | 814 | struct dm_snap_exception *e; |
810 | struct pending_exception *pe; | 815 | struct dm_snap_pending_exception *pe; |
811 | chunk_t chunk = sector_to_chunk(s, bio->bi_sector); | 816 | chunk_t chunk = sector_to_chunk(s, bio->bi_sector); |
812 | 817 | ||
813 | /* | 818 | /* |
@@ -816,7 +821,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
816 | e = lookup_exception(&s->pending, chunk); | 821 | e = lookup_exception(&s->pending, chunk); |
817 | if (e) { | 822 | if (e) { |
818 | /* cast the exception to a pending exception */ | 823 | /* cast the exception to a pending exception */ |
819 | pe = container_of(e, struct pending_exception, e); | 824 | pe = container_of(e, struct dm_snap_pending_exception, e); |
820 | goto out; | 825 | goto out; |
821 | } | 826 | } |
822 | 827 | ||
@@ -836,7 +841,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
836 | e = lookup_exception(&s->pending, chunk); | 841 | e = lookup_exception(&s->pending, chunk); |
837 | if (e) { | 842 | if (e) { |
838 | free_pending_exception(pe); | 843 | free_pending_exception(pe); |
839 | pe = container_of(e, struct pending_exception, e); | 844 | pe = container_of(e, struct dm_snap_pending_exception, e); |
840 | goto out; | 845 | goto out; |
841 | } | 846 | } |
842 | 847 | ||
@@ -860,8 +865,8 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
860 | return pe; | 865 | return pe; |
861 | } | 866 | } |
862 | 867 | ||
863 | static inline void remap_exception(struct dm_snapshot *s, struct exception *e, | 868 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, |
864 | struct bio *bio) | 869 | struct bio *bio) |
865 | { | 870 | { |
866 | bio->bi_bdev = s->cow->bdev; | 871 | bio->bi_bdev = s->cow->bdev; |
867 | bio->bi_sector = chunk_to_sector(s, e->new_chunk) + | 872 | bio->bi_sector = chunk_to_sector(s, e->new_chunk) + |
@@ -871,11 +876,11 @@ static inline void remap_exception(struct dm_snapshot *s, struct exception *e, | |||
871 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | 876 | static int snapshot_map(struct dm_target *ti, struct bio *bio, |
872 | union map_info *map_context) | 877 | union map_info *map_context) |
873 | { | 878 | { |
874 | struct exception *e; | 879 | struct dm_snap_exception *e; |
875 | struct dm_snapshot *s = (struct dm_snapshot *) ti->private; | 880 | struct dm_snapshot *s = ti->private; |
876 | int r = DM_MAPIO_REMAPPED; | 881 | int r = DM_MAPIO_REMAPPED; |
877 | chunk_t chunk; | 882 | chunk_t chunk; |
878 | struct pending_exception *pe = NULL; | 883 | struct dm_snap_pending_exception *pe = NULL; |
879 | 884 | ||
880 | chunk = sector_to_chunk(s, bio->bi_sector); | 885 | chunk = sector_to_chunk(s, bio->bi_sector); |
881 | 886 | ||
@@ -884,9 +889,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
884 | if (!s->valid) | 889 | if (!s->valid) |
885 | return -EIO; | 890 | return -EIO; |
886 | 891 | ||
887 | if (unlikely(bio_barrier(bio))) | ||
888 | return -EOPNOTSUPP; | ||
889 | |||
890 | /* FIXME: should only take write lock if we need | 892 | /* FIXME: should only take write lock if we need |
891 | * to copy an exception */ | 893 | * to copy an exception */ |
892 | down_write(&s->lock); | 894 | down_write(&s->lock); |
@@ -945,7 +947,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
945 | 947 | ||
946 | static void snapshot_resume(struct dm_target *ti) | 948 | static void snapshot_resume(struct dm_target *ti) |
947 | { | 949 | { |
948 | struct dm_snapshot *s = (struct dm_snapshot *) ti->private; | 950 | struct dm_snapshot *s = ti->private; |
949 | 951 | ||
950 | down_write(&s->lock); | 952 | down_write(&s->lock); |
951 | s->active = 1; | 953 | s->active = 1; |
@@ -955,7 +957,7 @@ static void snapshot_resume(struct dm_target *ti) | |||
955 | static int snapshot_status(struct dm_target *ti, status_type_t type, | 957 | static int snapshot_status(struct dm_target *ti, status_type_t type, |
956 | char *result, unsigned int maxlen) | 958 | char *result, unsigned int maxlen) |
957 | { | 959 | { |
958 | struct dm_snapshot *snap = (struct dm_snapshot *) ti->private; | 960 | struct dm_snapshot *snap = ti->private; |
959 | 961 | ||
960 | switch (type) { | 962 | switch (type) { |
961 | case STATUSTYPE_INFO: | 963 | case STATUSTYPE_INFO: |
@@ -999,8 +1001,8 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) | |||
999 | { | 1001 | { |
1000 | int r = DM_MAPIO_REMAPPED, first = 0; | 1002 | int r = DM_MAPIO_REMAPPED, first = 0; |
1001 | struct dm_snapshot *snap; | 1003 | struct dm_snapshot *snap; |
1002 | struct exception *e; | 1004 | struct dm_snap_exception *e; |
1003 | struct pending_exception *pe, *next_pe, *primary_pe = NULL; | 1005 | struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; |
1004 | chunk_t chunk; | 1006 | chunk_t chunk; |
1005 | LIST_HEAD(pe_queue); | 1007 | LIST_HEAD(pe_queue); |
1006 | 1008 | ||
@@ -1147,19 +1149,16 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1147 | 1149 | ||
1148 | static void origin_dtr(struct dm_target *ti) | 1150 | static void origin_dtr(struct dm_target *ti) |
1149 | { | 1151 | { |
1150 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1152 | struct dm_dev *dev = ti->private; |
1151 | dm_put_device(ti, dev); | 1153 | dm_put_device(ti, dev); |
1152 | } | 1154 | } |
1153 | 1155 | ||
1154 | static int origin_map(struct dm_target *ti, struct bio *bio, | 1156 | static int origin_map(struct dm_target *ti, struct bio *bio, |
1155 | union map_info *map_context) | 1157 | union map_info *map_context) |
1156 | { | 1158 | { |
1157 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1159 | struct dm_dev *dev = ti->private; |
1158 | bio->bi_bdev = dev->bdev; | 1160 | bio->bi_bdev = dev->bdev; |
1159 | 1161 | ||
1160 | if (unlikely(bio_barrier(bio))) | ||
1161 | return -EOPNOTSUPP; | ||
1162 | |||
1163 | /* Only tell snapshots if this is a write */ | 1162 | /* Only tell snapshots if this is a write */ |
1164 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; | 1163 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; |
1165 | } | 1164 | } |
@@ -1172,7 +1171,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio, | |||
1172 | */ | 1171 | */ |
1173 | static void origin_resume(struct dm_target *ti) | 1172 | static void origin_resume(struct dm_target *ti) |
1174 | { | 1173 | { |
1175 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1174 | struct dm_dev *dev = ti->private; |
1176 | struct dm_snapshot *snap; | 1175 | struct dm_snapshot *snap; |
1177 | struct origin *o; | 1176 | struct origin *o; |
1178 | chunk_t chunk_size = 0; | 1177 | chunk_t chunk_size = 0; |
@@ -1190,7 +1189,7 @@ static void origin_resume(struct dm_target *ti) | |||
1190 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, | 1189 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, |
1191 | unsigned int maxlen) | 1190 | unsigned int maxlen) |
1192 | { | 1191 | { |
1193 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1192 | struct dm_dev *dev = ti->private; |
1194 | 1193 | ||
1195 | switch (type) { | 1194 | switch (type) { |
1196 | case STATUSTYPE_INFO: | 1195 | case STATUSTYPE_INFO: |
@@ -1249,21 +1248,14 @@ static int __init dm_snapshot_init(void) | |||
1249 | goto bad2; | 1248 | goto bad2; |
1250 | } | 1249 | } |
1251 | 1250 | ||
1252 | exception_cache = kmem_cache_create("dm-snapshot-ex", | 1251 | exception_cache = KMEM_CACHE(dm_snap_exception, 0); |
1253 | sizeof(struct exception), | ||
1254 | __alignof__(struct exception), | ||
1255 | 0, NULL, NULL); | ||
1256 | if (!exception_cache) { | 1252 | if (!exception_cache) { |
1257 | DMERR("Couldn't create exception cache."); | 1253 | DMERR("Couldn't create exception cache."); |
1258 | r = -ENOMEM; | 1254 | r = -ENOMEM; |
1259 | goto bad3; | 1255 | goto bad3; |
1260 | } | 1256 | } |
1261 | 1257 | ||
1262 | pending_cache = | 1258 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
1263 | kmem_cache_create("dm-snapshot-in", | ||
1264 | sizeof(struct pending_exception), | ||
1265 | __alignof__(struct pending_exception), | ||
1266 | 0, NULL, NULL); | ||
1267 | if (!pending_cache) { | 1259 | if (!pending_cache) { |
1268 | DMERR("Couldn't create pending cache."); | 1260 | DMERR("Couldn't create pending cache."); |
1269 | r = -ENOMEM; | 1261 | r = -ENOMEM; |
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index 15fa2ae6cdc2..650e0f1f51d8 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h | |||
@@ -30,7 +30,7 @@ typedef sector_t chunk_t; | |||
30 | * An exception is used where an old chunk of data has been | 30 | * An exception is used where an old chunk of data has been |
31 | * replaced by a new one. | 31 | * replaced by a new one. |
32 | */ | 32 | */ |
33 | struct exception { | 33 | struct dm_snap_exception { |
34 | struct list_head hash_list; | 34 | struct list_head hash_list; |
35 | 35 | ||
36 | chunk_t old_chunk; | 36 | chunk_t old_chunk; |
@@ -58,13 +58,13 @@ struct exception_store { | |||
58 | * Find somewhere to store the next exception. | 58 | * Find somewhere to store the next exception. |
59 | */ | 59 | */ |
60 | int (*prepare_exception) (struct exception_store *store, | 60 | int (*prepare_exception) (struct exception_store *store, |
61 | struct exception *e); | 61 | struct dm_snap_exception *e); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Update the metadata with this exception. | 64 | * Update the metadata with this exception. |
65 | */ | 65 | */ |
66 | void (*commit_exception) (struct exception_store *store, | 66 | void (*commit_exception) (struct exception_store *store, |
67 | struct exception *e, | 67 | struct dm_snap_exception *e, |
68 | void (*callback) (void *, int success), | 68 | void (*callback) (void *, int success), |
69 | void *callback_context); | 69 | void *callback_context); |
70 | 70 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2717a355dc5b..f4f7d35561ab 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -45,7 +45,7 @@ struct dm_io { | |||
45 | * One of these is allocated per target within a bio. Hopefully | 45 | * One of these is allocated per target within a bio. Hopefully |
46 | * this will be simplified out one day. | 46 | * this will be simplified out one day. |
47 | */ | 47 | */ |
48 | struct target_io { | 48 | struct dm_target_io { |
49 | struct dm_io *io; | 49 | struct dm_io *io; |
50 | struct dm_target *ti; | 50 | struct dm_target *ti; |
51 | union map_info info; | 51 | union map_info info; |
@@ -54,7 +54,7 @@ struct target_io { | |||
54 | union map_info *dm_get_mapinfo(struct bio *bio) | 54 | union map_info *dm_get_mapinfo(struct bio *bio) |
55 | { | 55 | { |
56 | if (bio && bio->bi_private) | 56 | if (bio && bio->bi_private) |
57 | return &((struct target_io *)bio->bi_private)->info; | 57 | return &((struct dm_target_io *)bio->bi_private)->info; |
58 | return NULL; | 58 | return NULL; |
59 | } | 59 | } |
60 | 60 | ||
@@ -132,14 +132,12 @@ static int __init local_init(void) | |||
132 | int r; | 132 | int r; |
133 | 133 | ||
134 | /* allocate a slab for the dm_ios */ | 134 | /* allocate a slab for the dm_ios */ |
135 | _io_cache = kmem_cache_create("dm_io", | 135 | _io_cache = KMEM_CACHE(dm_io, 0); |
136 | sizeof(struct dm_io), 0, 0, NULL, NULL); | ||
137 | if (!_io_cache) | 136 | if (!_io_cache) |
138 | return -ENOMEM; | 137 | return -ENOMEM; |
139 | 138 | ||
140 | /* allocate a slab for the target ios */ | 139 | /* allocate a slab for the target ios */ |
141 | _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io), | 140 | _tio_cache = KMEM_CACHE(dm_target_io, 0); |
142 | 0, 0, NULL, NULL); | ||
143 | if (!_tio_cache) { | 141 | if (!_tio_cache) { |
144 | kmem_cache_destroy(_io_cache); | 142 | kmem_cache_destroy(_io_cache); |
145 | return -ENOMEM; | 143 | return -ENOMEM; |
@@ -325,22 +323,22 @@ out: | |||
325 | return r; | 323 | return r; |
326 | } | 324 | } |
327 | 325 | ||
328 | static inline struct dm_io *alloc_io(struct mapped_device *md) | 326 | static struct dm_io *alloc_io(struct mapped_device *md) |
329 | { | 327 | { |
330 | return mempool_alloc(md->io_pool, GFP_NOIO); | 328 | return mempool_alloc(md->io_pool, GFP_NOIO); |
331 | } | 329 | } |
332 | 330 | ||
333 | static inline void free_io(struct mapped_device *md, struct dm_io *io) | 331 | static void free_io(struct mapped_device *md, struct dm_io *io) |
334 | { | 332 | { |
335 | mempool_free(io, md->io_pool); | 333 | mempool_free(io, md->io_pool); |
336 | } | 334 | } |
337 | 335 | ||
338 | static inline struct target_io *alloc_tio(struct mapped_device *md) | 336 | static struct dm_target_io *alloc_tio(struct mapped_device *md) |
339 | { | 337 | { |
340 | return mempool_alloc(md->tio_pool, GFP_NOIO); | 338 | return mempool_alloc(md->tio_pool, GFP_NOIO); |
341 | } | 339 | } |
342 | 340 | ||
343 | static inline void free_tio(struct mapped_device *md, struct target_io *tio) | 341 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
344 | { | 342 | { |
345 | mempool_free(tio, md->tio_pool); | 343 | mempool_free(tio, md->tio_pool); |
346 | } | 344 | } |
@@ -498,7 +496,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
498 | static int clone_endio(struct bio *bio, unsigned int done, int error) | 496 | static int clone_endio(struct bio *bio, unsigned int done, int error) |
499 | { | 497 | { |
500 | int r = 0; | 498 | int r = 0; |
501 | struct target_io *tio = bio->bi_private; | 499 | struct dm_target_io *tio = bio->bi_private; |
502 | struct mapped_device *md = tio->io->md; | 500 | struct mapped_device *md = tio->io->md; |
503 | dm_endio_fn endio = tio->ti->type->end_io; | 501 | dm_endio_fn endio = tio->ti->type->end_io; |
504 | 502 | ||
@@ -558,7 +556,7 @@ static sector_t max_io_len(struct mapped_device *md, | |||
558 | } | 556 | } |
559 | 557 | ||
560 | static void __map_bio(struct dm_target *ti, struct bio *clone, | 558 | static void __map_bio(struct dm_target *ti, struct bio *clone, |
561 | struct target_io *tio) | 559 | struct dm_target_io *tio) |
562 | { | 560 | { |
563 | int r; | 561 | int r; |
564 | sector_t sector; | 562 | sector_t sector; |
@@ -672,7 +670,7 @@ static void __clone_and_map(struct clone_info *ci) | |||
672 | struct bio *clone, *bio = ci->bio; | 670 | struct bio *clone, *bio = ci->bio; |
673 | struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); | 671 | struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); |
674 | sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); | 672 | sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); |
675 | struct target_io *tio; | 673 | struct dm_target_io *tio; |
676 | 674 | ||
677 | /* | 675 | /* |
678 | * Allocate a target io object. | 676 | * Allocate a target io object. |
@@ -802,6 +800,15 @@ static int dm_request(request_queue_t *q, struct bio *bio) | |||
802 | int rw = bio_data_dir(bio); | 800 | int rw = bio_data_dir(bio); |
803 | struct mapped_device *md = q->queuedata; | 801 | struct mapped_device *md = q->queuedata; |
804 | 802 | ||
803 | /* | ||
804 | * There is no use in forwarding any barrier request since we can't | ||
805 | * guarantee it is (or can be) handled by the targets correctly. | ||
806 | */ | ||
807 | if (unlikely(bio_barrier(bio))) { | ||
808 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); | ||
809 | return 0; | ||
810 | } | ||
811 | |||
805 | down_read(&md->io_lock); | 812 | down_read(&md->io_lock); |
806 | 813 | ||
807 | disk_stat_inc(dm_disk(md), ios[rw]); | 814 | disk_stat_inc(dm_disk(md), ios[rw]); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 2f796b1436b2..462ee652a890 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -18,13 +18,45 @@ | |||
18 | 18 | ||
19 | #define DM_NAME "device-mapper" | 19 | #define DM_NAME "device-mapper" |
20 | 20 | ||
21 | #define DMERR(f, arg...) printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) | 21 | #define DMERR(f, arg...) \ |
22 | #define DMWARN(f, arg...) printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) | 22 | printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) |
23 | #define DMINFO(f, arg...) printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) | 23 | #define DMERR_LIMIT(f, arg...) \ |
24 | do { \ | ||
25 | if (printk_ratelimit()) \ | ||
26 | printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \ | ||
27 | f "\n", ## arg); \ | ||
28 | } while (0) | ||
29 | |||
30 | #define DMWARN(f, arg...) \ | ||
31 | printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) | ||
32 | #define DMWARN_LIMIT(f, arg...) \ | ||
33 | do { \ | ||
34 | if (printk_ratelimit()) \ | ||
35 | printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \ | ||
36 | f "\n", ## arg); \ | ||
37 | } while (0) | ||
38 | |||
39 | #define DMINFO(f, arg...) \ | ||
40 | printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) | ||
41 | #define DMINFO_LIMIT(f, arg...) \ | ||
42 | do { \ | ||
43 | if (printk_ratelimit()) \ | ||
44 | printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ | ||
45 | "\n", ## arg); \ | ||
46 | } while (0) | ||
47 | |||
24 | #ifdef CONFIG_DM_DEBUG | 48 | #ifdef CONFIG_DM_DEBUG |
25 | # define DMDEBUG(f, arg...) printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) | 49 | # define DMDEBUG(f, arg...) \ |
50 | printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) | ||
51 | # define DMDEBUG_LIMIT(f, arg...) \ | ||
52 | do { \ | ||
53 | if (printk_ratelimit()) \ | ||
54 | printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ | ||
55 | "\n", ## arg); \ | ||
56 | } while (0) | ||
26 | #else | 57 | #else |
27 | # define DMDEBUG(f, arg...) do {} while (0) | 58 | # define DMDEBUG(f, arg...) do {} while (0) |
59 | # define DMDEBUG_LIMIT(f, arg...) do {} while (0) | ||
28 | #endif | 60 | #endif |
29 | 61 | ||
30 | #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ | 62 | #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ |
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index dbc234e3c69f..7e052378c47e 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c | |||
@@ -29,7 +29,7 @@ | |||
29 | static struct workqueue_struct *_kcopyd_wq; | 29 | static struct workqueue_struct *_kcopyd_wq; |
30 | static struct work_struct _kcopyd_work; | 30 | static struct work_struct _kcopyd_work; |
31 | 31 | ||
32 | static inline void wake(void) | 32 | static void wake(void) |
33 | { | 33 | { |
34 | queue_work(_kcopyd_wq, &_kcopyd_work); | 34 | queue_work(_kcopyd_wq, &_kcopyd_work); |
35 | } | 35 | } |
@@ -226,10 +226,7 @@ static LIST_HEAD(_pages_jobs); | |||
226 | 226 | ||
227 | static int jobs_init(void) | 227 | static int jobs_init(void) |
228 | { | 228 | { |
229 | _job_cache = kmem_cache_create("kcopyd-jobs", | 229 | _job_cache = KMEM_CACHE(kcopyd_job, 0); |
230 | sizeof(struct kcopyd_job), | ||
231 | __alignof__(struct kcopyd_job), | ||
232 | 0, NULL, NULL); | ||
233 | if (!_job_cache) | 230 | if (!_job_cache) |
234 | return -ENOMEM; | 231 | return -ENOMEM; |
235 | 232 | ||
@@ -258,7 +255,7 @@ static void jobs_exit(void) | |||
258 | * Functions to push and pop a job onto the head of a given job | 255 | * Functions to push and pop a job onto the head of a given job |
259 | * list. | 256 | * list. |
260 | */ | 257 | */ |
261 | static inline struct kcopyd_job *pop(struct list_head *jobs) | 258 | static struct kcopyd_job *pop(struct list_head *jobs) |
262 | { | 259 | { |
263 | struct kcopyd_job *job = NULL; | 260 | struct kcopyd_job *job = NULL; |
264 | unsigned long flags; | 261 | unsigned long flags; |
@@ -274,7 +271,7 @@ static inline struct kcopyd_job *pop(struct list_head *jobs) | |||
274 | return job; | 271 | return job; |
275 | } | 272 | } |
276 | 273 | ||
277 | static inline void push(struct list_head *jobs, struct kcopyd_job *job) | 274 | static void push(struct list_head *jobs, struct kcopyd_job *job) |
278 | { | 275 | { |
279 | unsigned long flags; | 276 | unsigned long flags; |
280 | 277 | ||