diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-crypt.c | 30 | ||||
-rw-r--r-- | drivers/md/dm-delay.c | 11 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.c | 17 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 23 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 105 | ||||
-rw-r--r-- | drivers/md/dm-snap.h | 6 | ||||
-rw-r--r-- | drivers/md/dm.c | 24 | ||||
-rw-r--r-- | drivers/md/kcopyd.c | 11 |
8 files changed, 106 insertions, 121 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 7b0fcfc9eaa5..ece7c7c4cdbb 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -30,7 +30,7 @@ | |||
30 | /* | 30 | /* |
31 | * per bio private data | 31 | * per bio private data |
32 | */ | 32 | */ |
33 | struct crypt_io { | 33 | struct dm_crypt_io { |
34 | struct dm_target *target; | 34 | struct dm_target *target; |
35 | struct bio *base_bio; | 35 | struct bio *base_bio; |
36 | struct work_struct work; | 36 | struct work_struct work; |
@@ -106,7 +106,7 @@ struct crypt_config { | |||
106 | 106 | ||
107 | static struct kmem_cache *_crypt_io_pool; | 107 | static struct kmem_cache *_crypt_io_pool; |
108 | 108 | ||
109 | static void clone_init(struct crypt_io *, struct bio *); | 109 | static void clone_init(struct dm_crypt_io *, struct bio *); |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Different IV generation algorithms: | 112 | * Different IV generation algorithms: |
@@ -382,7 +382,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
382 | 382 | ||
383 | static void dm_crypt_bio_destructor(struct bio *bio) | 383 | static void dm_crypt_bio_destructor(struct bio *bio) |
384 | { | 384 | { |
385 | struct crypt_io *io = bio->bi_private; | 385 | struct dm_crypt_io *io = bio->bi_private; |
386 | struct crypt_config *cc = io->target->private; | 386 | struct crypt_config *cc = io->target->private; |
387 | 387 | ||
388 | bio_free(bio, cc->bs); | 388 | bio_free(bio, cc->bs); |
@@ -393,7 +393,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
393 | * This should never violate the device limitations | 393 | * This should never violate the device limitations |
394 | * May return a smaller bio when running out of pages | 394 | * May return a smaller bio when running out of pages |
395 | */ | 395 | */ |
396 | static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size) | 396 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) |
397 | { | 397 | { |
398 | struct crypt_config *cc = io->target->private; | 398 | struct crypt_config *cc = io->target->private; |
399 | struct bio *clone; | 399 | struct bio *clone; |
@@ -479,7 +479,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, | |||
479 | * One of the bios was finished. Check for completion of | 479 | * One of the bios was finished. Check for completion of |
480 | * the whole request and correctly clean up the buffer. | 480 | * the whole request and correctly clean up the buffer. |
481 | */ | 481 | */ |
482 | static void dec_pending(struct crypt_io *io, int error) | 482 | static void dec_pending(struct dm_crypt_io *io, int error) |
483 | { | 483 | { |
484 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | 484 | struct crypt_config *cc = (struct crypt_config *) io->target->private; |
485 | 485 | ||
@@ -503,7 +503,7 @@ static void dec_pending(struct crypt_io *io, int error) | |||
503 | static struct workqueue_struct *_kcryptd_workqueue; | 503 | static struct workqueue_struct *_kcryptd_workqueue; |
504 | static void kcryptd_do_work(struct work_struct *work); | 504 | static void kcryptd_do_work(struct work_struct *work); |
505 | 505 | ||
506 | static void kcryptd_queue_io(struct crypt_io *io) | 506 | static void kcryptd_queue_io(struct dm_crypt_io *io) |
507 | { | 507 | { |
508 | INIT_WORK(&io->work, kcryptd_do_work); | 508 | INIT_WORK(&io->work, kcryptd_do_work); |
509 | queue_work(_kcryptd_workqueue, &io->work); | 509 | queue_work(_kcryptd_workqueue, &io->work); |
@@ -511,7 +511,7 @@ static void kcryptd_queue_io(struct crypt_io *io) | |||
511 | 511 | ||
512 | static int crypt_endio(struct bio *clone, unsigned int done, int error) | 512 | static int crypt_endio(struct bio *clone, unsigned int done, int error) |
513 | { | 513 | { |
514 | struct crypt_io *io = clone->bi_private; | 514 | struct dm_crypt_io *io = clone->bi_private; |
515 | struct crypt_config *cc = io->target->private; | 515 | struct crypt_config *cc = io->target->private; |
516 | unsigned read_io = bio_data_dir(clone) == READ; | 516 | unsigned read_io = bio_data_dir(clone) == READ; |
517 | 517 | ||
@@ -545,7 +545,7 @@ out: | |||
545 | return error; | 545 | return error; |
546 | } | 546 | } |
547 | 547 | ||
548 | static void clone_init(struct crypt_io *io, struct bio *clone) | 548 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
549 | { | 549 | { |
550 | struct crypt_config *cc = io->target->private; | 550 | struct crypt_config *cc = io->target->private; |
551 | 551 | ||
@@ -556,7 +556,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone) | |||
556 | clone->bi_destructor = dm_crypt_bio_destructor; | 556 | clone->bi_destructor = dm_crypt_bio_destructor; |
557 | } | 557 | } |
558 | 558 | ||
559 | static void process_read(struct crypt_io *io) | 559 | static void process_read(struct dm_crypt_io *io) |
560 | { | 560 | { |
561 | struct crypt_config *cc = io->target->private; | 561 | struct crypt_config *cc = io->target->private; |
562 | struct bio *base_bio = io->base_bio; | 562 | struct bio *base_bio = io->base_bio; |
@@ -587,7 +587,7 @@ static void process_read(struct crypt_io *io) | |||
587 | generic_make_request(clone); | 587 | generic_make_request(clone); |
588 | } | 588 | } |
589 | 589 | ||
590 | static void process_write(struct crypt_io *io) | 590 | static void process_write(struct dm_crypt_io *io) |
591 | { | 591 | { |
592 | struct crypt_config *cc = io->target->private; | 592 | struct crypt_config *cc = io->target->private; |
593 | struct bio *base_bio = io->base_bio; | 593 | struct bio *base_bio = io->base_bio; |
@@ -644,7 +644,7 @@ static void process_write(struct crypt_io *io) | |||
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | static void process_read_endio(struct crypt_io *io) | 647 | static void process_read_endio(struct dm_crypt_io *io) |
648 | { | 648 | { |
649 | struct crypt_config *cc = io->target->private; | 649 | struct crypt_config *cc = io->target->private; |
650 | struct convert_context ctx; | 650 | struct convert_context ctx; |
@@ -657,7 +657,7 @@ static void process_read_endio(struct crypt_io *io) | |||
657 | 657 | ||
658 | static void kcryptd_do_work(struct work_struct *work) | 658 | static void kcryptd_do_work(struct work_struct *work) |
659 | { | 659 | { |
660 | struct crypt_io *io = container_of(work, struct crypt_io, work); | 660 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
661 | 661 | ||
662 | if (io->post_process) | 662 | if (io->post_process) |
663 | process_read_endio(io); | 663 | process_read_endio(io); |
@@ -939,7 +939,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
939 | union map_info *map_context) | 939 | union map_info *map_context) |
940 | { | 940 | { |
941 | struct crypt_config *cc = ti->private; | 941 | struct crypt_config *cc = ti->private; |
942 | struct crypt_io *io; | 942 | struct dm_crypt_io *io; |
943 | 943 | ||
944 | if (bio_barrier(bio)) | 944 | if (bio_barrier(bio)) |
945 | return -EOPNOTSUPP; | 945 | return -EOPNOTSUPP; |
@@ -1062,9 +1062,7 @@ static int __init dm_crypt_init(void) | |||
1062 | { | 1062 | { |
1063 | int r; | 1063 | int r; |
1064 | 1064 | ||
1065 | _crypt_io_pool = kmem_cache_create("dm-crypt_io", | 1065 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); |
1066 | sizeof(struct crypt_io), | ||
1067 | 0, 0, NULL, NULL); | ||
1068 | if (!_crypt_io_pool) | 1066 | if (!_crypt_io_pool) |
1069 | return -ENOMEM; | 1067 | return -ENOMEM; |
1070 | 1068 | ||
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 52c7cf9e5803..eb218266cbf3 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -37,7 +37,7 @@ struct delay_c { | |||
37 | unsigned writes; | 37 | unsigned writes; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | struct delay_info { | 40 | struct dm_delay_info { |
41 | struct delay_c *context; | 41 | struct delay_c *context; |
42 | struct list_head list; | 42 | struct list_head list; |
43 | struct bio *bio; | 43 | struct bio *bio; |
@@ -80,7 +80,7 @@ static void flush_bios(struct bio *bio) | |||
80 | 80 | ||
81 | static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) | 81 | static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) |
82 | { | 82 | { |
83 | struct delay_info *delayed, *next; | 83 | struct dm_delay_info *delayed, *next; |
84 | unsigned long next_expires = 0; | 84 | unsigned long next_expires = 0; |
85 | int start_timer = 0; | 85 | int start_timer = 0; |
86 | BIO_LIST(flush_bios); | 86 | BIO_LIST(flush_bios); |
@@ -227,7 +227,7 @@ static void delay_dtr(struct dm_target *ti) | |||
227 | 227 | ||
228 | static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) | 228 | static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) |
229 | { | 229 | { |
230 | struct delay_info *delayed; | 230 | struct dm_delay_info *delayed; |
231 | unsigned long expires = 0; | 231 | unsigned long expires = 0; |
232 | 232 | ||
233 | if (!delay || !atomic_read(&dc->may_delay)) | 233 | if (!delay || !atomic_read(&dc->may_delay)) |
@@ -338,10 +338,7 @@ static int __init dm_delay_init(void) | |||
338 | goto bad_queue; | 338 | goto bad_queue; |
339 | } | 339 | } |
340 | 340 | ||
341 | delayed_cache = kmem_cache_create("dm-delay", | 341 | delayed_cache = KMEM_CACHE(dm_delay_info, 0); |
342 | sizeof(struct delay_info), | ||
343 | __alignof__(struct delay_info), | ||
344 | 0, NULL, NULL); | ||
345 | if (!delayed_cache) { | 342 | if (!delayed_cache) { |
346 | DMERR("Couldn't create delayed bio cache."); | 343 | DMERR("Couldn't create delayed bio cache."); |
347 | goto bad_memcache; | 344 | goto bad_memcache; |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 07e0a0c84f6e..cb05b744deaa 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -127,7 +127,7 @@ struct pstore { | |||
127 | struct dm_io_client *io_client; | 127 | struct dm_io_client *io_client; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | static inline unsigned int sectors_to_pages(unsigned int sectors) | 130 | static unsigned sectors_to_pages(unsigned sectors) |
131 | { | 131 | { |
132 | return sectors / (PAGE_SIZE >> 9); | 132 | return sectors / (PAGE_SIZE >> 9); |
133 | } | 133 | } |
@@ -393,7 +393,7 @@ static int read_exceptions(struct pstore *ps) | |||
393 | return 0; | 393 | return 0; |
394 | } | 394 | } |
395 | 395 | ||
396 | static inline struct pstore *get_info(struct exception_store *store) | 396 | static struct pstore *get_info(struct exception_store *store) |
397 | { | 397 | { |
398 | return (struct pstore *) store->context; | 398 | return (struct pstore *) store->context; |
399 | } | 399 | } |
@@ -480,7 +480,7 @@ static int persistent_read_metadata(struct exception_store *store) | |||
480 | } | 480 | } |
481 | 481 | ||
482 | static int persistent_prepare(struct exception_store *store, | 482 | static int persistent_prepare(struct exception_store *store, |
483 | struct exception *e) | 483 | struct dm_snap_exception *e) |
484 | { | 484 | { |
485 | struct pstore *ps = get_info(store); | 485 | struct pstore *ps = get_info(store); |
486 | uint32_t stride; | 486 | uint32_t stride; |
@@ -505,7 +505,7 @@ static int persistent_prepare(struct exception_store *store, | |||
505 | } | 505 | } |
506 | 506 | ||
507 | static void persistent_commit(struct exception_store *store, | 507 | static void persistent_commit(struct exception_store *store, |
508 | struct exception *e, | 508 | struct dm_snap_exception *e, |
509 | void (*callback) (void *, int success), | 509 | void (*callback) (void *, int success), |
510 | void *callback_context) | 510 | void *callback_context) |
511 | { | 511 | { |
@@ -616,7 +616,8 @@ static int transient_read_metadata(struct exception_store *store) | |||
616 | return 0; | 616 | return 0; |
617 | } | 617 | } |
618 | 618 | ||
619 | static int transient_prepare(struct exception_store *store, struct exception *e) | 619 | static int transient_prepare(struct exception_store *store, |
620 | struct dm_snap_exception *e) | ||
620 | { | 621 | { |
621 | struct transient_c *tc = (struct transient_c *) store->context; | 622 | struct transient_c *tc = (struct transient_c *) store->context; |
622 | sector_t size = get_dev_size(store->snap->cow->bdev); | 623 | sector_t size = get_dev_size(store->snap->cow->bdev); |
@@ -631,9 +632,9 @@ static int transient_prepare(struct exception_store *store, struct exception *e) | |||
631 | } | 632 | } |
632 | 633 | ||
633 | static void transient_commit(struct exception_store *store, | 634 | static void transient_commit(struct exception_store *store, |
634 | struct exception *e, | 635 | struct dm_snap_exception *e, |
635 | void (*callback) (void *, int success), | 636 | void (*callback) (void *, int success), |
636 | void *callback_context) | 637 | void *callback_context) |
637 | { | 638 | { |
638 | /* Just succeed */ | 639 | /* Just succeed */ |
639 | callback(callback_context, 1); | 640 | callback(callback_context, 1); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index de54b39e6ffe..c0950a0b5312 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -83,7 +83,7 @@ struct multipath { | |||
83 | struct work_struct trigger_event; | 83 | struct work_struct trigger_event; |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * We must use a mempool of mpath_io structs so that we | 86 | * We must use a mempool of dm_mpath_io structs so that we |
87 | * can resubmit bios on error. | 87 | * can resubmit bios on error. |
88 | */ | 88 | */ |
89 | mempool_t *mpio_pool; | 89 | mempool_t *mpio_pool; |
@@ -92,7 +92,7 @@ struct multipath { | |||
92 | /* | 92 | /* |
93 | * Context information attached to each bio we process. | 93 | * Context information attached to each bio we process. |
94 | */ | 94 | */ |
95 | struct mpath_io { | 95 | struct dm_mpath_io { |
96 | struct pgpath *pgpath; | 96 | struct pgpath *pgpath; |
97 | struct dm_bio_details details; | 97 | struct dm_bio_details details; |
98 | }; | 98 | }; |
@@ -122,7 +122,7 @@ static struct pgpath *alloc_pgpath(void) | |||
122 | return pgpath; | 122 | return pgpath; |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline void free_pgpath(struct pgpath *pgpath) | 125 | static void free_pgpath(struct pgpath *pgpath) |
126 | { | 126 | { |
127 | kfree(pgpath); | 127 | kfree(pgpath); |
128 | } | 128 | } |
@@ -299,8 +299,8 @@ static int __must_push_back(struct multipath *m) | |||
299 | dm_noflush_suspending(m->ti)); | 299 | dm_noflush_suspending(m->ti)); |
300 | } | 300 | } |
301 | 301 | ||
302 | static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, | 302 | static int map_io(struct multipath *m, struct bio *bio, |
303 | unsigned was_queued) | 303 | struct dm_mpath_io *mpio, unsigned was_queued) |
304 | { | 304 | { |
305 | int r = DM_MAPIO_REMAPPED; | 305 | int r = DM_MAPIO_REMAPPED; |
306 | unsigned long flags; | 306 | unsigned long flags; |
@@ -374,7 +374,7 @@ static void dispatch_queued_ios(struct multipath *m) | |||
374 | int r; | 374 | int r; |
375 | unsigned long flags; | 375 | unsigned long flags; |
376 | struct bio *bio = NULL, *next; | 376 | struct bio *bio = NULL, *next; |
377 | struct mpath_io *mpio; | 377 | struct dm_mpath_io *mpio; |
378 | union map_info *info; | 378 | union map_info *info; |
379 | 379 | ||
380 | spin_lock_irqsave(&m->lock, flags); | 380 | spin_lock_irqsave(&m->lock, flags); |
@@ -795,7 +795,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, | |||
795 | union map_info *map_context) | 795 | union map_info *map_context) |
796 | { | 796 | { |
797 | int r; | 797 | int r; |
798 | struct mpath_io *mpio; | 798 | struct dm_mpath_io *mpio; |
799 | struct multipath *m = (struct multipath *) ti->private; | 799 | struct multipath *m = (struct multipath *) ti->private; |
800 | 800 | ||
801 | if (bio_barrier(bio)) | 801 | if (bio_barrier(bio)) |
@@ -1014,7 +1014,7 @@ void dm_pg_init_complete(struct dm_path *path, unsigned err_flags) | |||
1014 | * end_io handling | 1014 | * end_io handling |
1015 | */ | 1015 | */ |
1016 | static int do_end_io(struct multipath *m, struct bio *bio, | 1016 | static int do_end_io(struct multipath *m, struct bio *bio, |
1017 | int error, struct mpath_io *mpio) | 1017 | int error, struct dm_mpath_io *mpio) |
1018 | { | 1018 | { |
1019 | struct hw_handler *hwh = &m->hw_handler; | 1019 | struct hw_handler *hwh = &m->hw_handler; |
1020 | unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ | 1020 | unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ |
@@ -1075,8 +1075,8 @@ static int do_end_io(struct multipath *m, struct bio *bio, | |||
1075 | static int multipath_end_io(struct dm_target *ti, struct bio *bio, | 1075 | static int multipath_end_io(struct dm_target *ti, struct bio *bio, |
1076 | int error, union map_info *map_context) | 1076 | int error, union map_info *map_context) |
1077 | { | 1077 | { |
1078 | struct multipath *m = (struct multipath *) ti->private; | 1078 | struct multipath *m = ti->private; |
1079 | struct mpath_io *mpio = (struct mpath_io *) map_context->ptr; | 1079 | struct dm_mpath_io *mpio = map_context->ptr; |
1080 | struct pgpath *pgpath = mpio->pgpath; | 1080 | struct pgpath *pgpath = mpio->pgpath; |
1081 | struct path_selector *ps; | 1081 | struct path_selector *ps; |
1082 | int r; | 1082 | int r; |
@@ -1346,8 +1346,7 @@ static int __init dm_multipath_init(void) | |||
1346 | int r; | 1346 | int r; |
1347 | 1347 | ||
1348 | /* allocate a slab for the dm_ios */ | 1348 | /* allocate a slab for the dm_ios */ |
1349 | _mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io), | 1349 | _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); |
1350 | 0, 0, NULL, NULL); | ||
1351 | if (!_mpio_cache) | 1350 | if (!_mpio_cache) |
1352 | return -ENOMEM; | 1351 | return -ENOMEM; |
1353 | 1352 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 0821a2b68a73..1da41229fbf2 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -42,8 +42,8 @@ | |||
42 | static struct workqueue_struct *ksnapd; | 42 | static struct workqueue_struct *ksnapd; |
43 | static void flush_queued_bios(struct work_struct *work); | 43 | static void flush_queued_bios(struct work_struct *work); |
44 | 44 | ||
45 | struct pending_exception { | 45 | struct dm_snap_pending_exception { |
46 | struct exception e; | 46 | struct dm_snap_exception e; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Origin buffers waiting for this to complete are held | 49 | * Origin buffers waiting for this to complete are held |
@@ -63,7 +63,7 @@ struct pending_exception { | |||
63 | * group of pending_exceptions. It is always last to get freed. | 63 | * group of pending_exceptions. It is always last to get freed. |
64 | * These fields get set up when writing to the origin. | 64 | * These fields get set up when writing to the origin. |
65 | */ | 65 | */ |
66 | struct pending_exception *primary_pe; | 66 | struct dm_snap_pending_exception *primary_pe; |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Number of pending_exceptions processing this chunk. | 69 | * Number of pending_exceptions processing this chunk. |
@@ -137,7 +137,7 @@ static void exit_origin_hash(void) | |||
137 | kfree(_origins); | 137 | kfree(_origins); |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline unsigned int origin_hash(struct block_device *bdev) | 140 | static unsigned origin_hash(struct block_device *bdev) |
141 | { | 141 | { |
142 | return bdev->bd_dev & ORIGIN_MASK; | 142 | return bdev->bd_dev & ORIGIN_MASK; |
143 | } | 143 | } |
@@ -231,7 +231,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size) | |||
231 | static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) | 231 | static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) |
232 | { | 232 | { |
233 | struct list_head *slot; | 233 | struct list_head *slot; |
234 | struct exception *ex, *next; | 234 | struct dm_snap_exception *ex, *next; |
235 | int i, size; | 235 | int i, size; |
236 | 236 | ||
237 | size = et->hash_mask + 1; | 237 | size = et->hash_mask + 1; |
@@ -245,18 +245,19 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache * | |||
245 | vfree(et->table); | 245 | vfree(et->table); |
246 | } | 246 | } |
247 | 247 | ||
248 | static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) | 248 | static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) |
249 | { | 249 | { |
250 | return chunk & et->hash_mask; | 250 | return chunk & et->hash_mask; |
251 | } | 251 | } |
252 | 252 | ||
253 | static void insert_exception(struct exception_table *eh, struct exception *e) | 253 | static void insert_exception(struct exception_table *eh, |
254 | struct dm_snap_exception *e) | ||
254 | { | 255 | { |
255 | struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; | 256 | struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; |
256 | list_add(&e->hash_list, l); | 257 | list_add(&e->hash_list, l); |
257 | } | 258 | } |
258 | 259 | ||
259 | static inline void remove_exception(struct exception *e) | 260 | static void remove_exception(struct dm_snap_exception *e) |
260 | { | 261 | { |
261 | list_del(&e->hash_list); | 262 | list_del(&e->hash_list); |
262 | } | 263 | } |
@@ -265,11 +266,11 @@ static inline void remove_exception(struct exception *e) | |||
265 | * Return the exception data for a sector, or NULL if not | 266 | * Return the exception data for a sector, or NULL if not |
266 | * remapped. | 267 | * remapped. |
267 | */ | 268 | */ |
268 | static struct exception *lookup_exception(struct exception_table *et, | 269 | static struct dm_snap_exception *lookup_exception(struct exception_table *et, |
269 | chunk_t chunk) | 270 | chunk_t chunk) |
270 | { | 271 | { |
271 | struct list_head *slot; | 272 | struct list_head *slot; |
272 | struct exception *e; | 273 | struct dm_snap_exception *e; |
273 | 274 | ||
274 | slot = &et->table[exception_hash(et, chunk)]; | 275 | slot = &et->table[exception_hash(et, chunk)]; |
275 | list_for_each_entry (e, slot, hash_list) | 276 | list_for_each_entry (e, slot, hash_list) |
@@ -279,9 +280,9 @@ static struct exception *lookup_exception(struct exception_table *et, | |||
279 | return NULL; | 280 | return NULL; |
280 | } | 281 | } |
281 | 282 | ||
282 | static inline struct exception *alloc_exception(void) | 283 | static struct dm_snap_exception *alloc_exception(void) |
283 | { | 284 | { |
284 | struct exception *e; | 285 | struct dm_snap_exception *e; |
285 | 286 | ||
286 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); | 287 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); |
287 | if (!e) | 288 | if (!e) |
@@ -290,24 +291,24 @@ static inline struct exception *alloc_exception(void) | |||
290 | return e; | 291 | return e; |
291 | } | 292 | } |
292 | 293 | ||
293 | static inline void free_exception(struct exception *e) | 294 | static void free_exception(struct dm_snap_exception *e) |
294 | { | 295 | { |
295 | kmem_cache_free(exception_cache, e); | 296 | kmem_cache_free(exception_cache, e); |
296 | } | 297 | } |
297 | 298 | ||
298 | static inline struct pending_exception *alloc_pending_exception(void) | 299 | static struct dm_snap_pending_exception *alloc_pending_exception(void) |
299 | { | 300 | { |
300 | return mempool_alloc(pending_pool, GFP_NOIO); | 301 | return mempool_alloc(pending_pool, GFP_NOIO); |
301 | } | 302 | } |
302 | 303 | ||
303 | static inline void free_pending_exception(struct pending_exception *pe) | 304 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
304 | { | 305 | { |
305 | mempool_free(pe, pending_pool); | 306 | mempool_free(pe, pending_pool); |
306 | } | 307 | } |
307 | 308 | ||
308 | int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) | 309 | int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) |
309 | { | 310 | { |
310 | struct exception *e; | 311 | struct dm_snap_exception *e; |
311 | 312 | ||
312 | e = alloc_exception(); | 313 | e = alloc_exception(); |
313 | if (!e) | 314 | if (!e) |
@@ -334,7 +335,7 @@ static int calc_max_buckets(void) | |||
334 | /* | 335 | /* |
335 | * Rounds a number down to a power of 2. | 336 | * Rounds a number down to a power of 2. |
336 | */ | 337 | */ |
337 | static inline uint32_t round_down(uint32_t n) | 338 | static uint32_t round_down(uint32_t n) |
338 | { | 339 | { |
339 | while (n & (n - 1)) | 340 | while (n & (n - 1)) |
340 | n &= (n - 1); | 341 | n &= (n - 1); |
@@ -384,7 +385,7 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
384 | * Round a number up to the nearest 'size' boundary. size must | 385 | * Round a number up to the nearest 'size' boundary. size must |
385 | * be a power of 2. | 386 | * be a power of 2. |
386 | */ | 387 | */ |
387 | static inline ulong round_up(ulong n, ulong size) | 388 | static ulong round_up(ulong n, ulong size) |
388 | { | 389 | { |
389 | size--; | 390 | size--; |
390 | return (n + size) & ~size; | 391 | return (n + size) & ~size; |
@@ -577,7 +578,7 @@ static void __free_exceptions(struct dm_snapshot *s) | |||
577 | 578 | ||
578 | static void snapshot_dtr(struct dm_target *ti) | 579 | static void snapshot_dtr(struct dm_target *ti) |
579 | { | 580 | { |
580 | struct dm_snapshot *s = (struct dm_snapshot *) ti->private; | 581 | struct dm_snapshot *s = ti->private; |
581 | 582 | ||
582 | flush_workqueue(ksnapd); | 583 | flush_workqueue(ksnapd); |
583 | 584 | ||
@@ -655,14 +656,14 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) | |||
655 | dm_table_event(s->table); | 656 | dm_table_event(s->table); |
656 | } | 657 | } |
657 | 658 | ||
658 | static void get_pending_exception(struct pending_exception *pe) | 659 | static void get_pending_exception(struct dm_snap_pending_exception *pe) |
659 | { | 660 | { |
660 | atomic_inc(&pe->ref_count); | 661 | atomic_inc(&pe->ref_count); |
661 | } | 662 | } |
662 | 663 | ||
663 | static struct bio *put_pending_exception(struct pending_exception *pe) | 664 | static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) |
664 | { | 665 | { |
665 | struct pending_exception *primary_pe; | 666 | struct dm_snap_pending_exception *primary_pe; |
666 | struct bio *origin_bios = NULL; | 667 | struct bio *origin_bios = NULL; |
667 | 668 | ||
668 | primary_pe = pe->primary_pe; | 669 | primary_pe = pe->primary_pe; |
@@ -692,9 +693,9 @@ static struct bio *put_pending_exception(struct pending_exception *pe) | |||
692 | return origin_bios; | 693 | return origin_bios; |
693 | } | 694 | } |
694 | 695 | ||
695 | static void pending_complete(struct pending_exception *pe, int success) | 696 | static void pending_complete(struct dm_snap_pending_exception *pe, int success) |
696 | { | 697 | { |
697 | struct exception *e; | 698 | struct dm_snap_exception *e; |
698 | struct dm_snapshot *s = pe->snap; | 699 | struct dm_snapshot *s = pe->snap; |
699 | struct bio *origin_bios = NULL; | 700 | struct bio *origin_bios = NULL; |
700 | struct bio *snapshot_bios = NULL; | 701 | struct bio *snapshot_bios = NULL; |
@@ -748,7 +749,8 @@ static void pending_complete(struct pending_exception *pe, int success) | |||
748 | 749 | ||
749 | static void commit_callback(void *context, int success) | 750 | static void commit_callback(void *context, int success) |
750 | { | 751 | { |
751 | struct pending_exception *pe = (struct pending_exception *) context; | 752 | struct dm_snap_pending_exception *pe = context; |
753 | |||
752 | pending_complete(pe, success); | 754 | pending_complete(pe, success); |
753 | } | 755 | } |
754 | 756 | ||
@@ -758,7 +760,7 @@ static void commit_callback(void *context, int success) | |||
758 | */ | 760 | */ |
759 | static void copy_callback(int read_err, unsigned int write_err, void *context) | 761 | static void copy_callback(int read_err, unsigned int write_err, void *context) |
760 | { | 762 | { |
761 | struct pending_exception *pe = (struct pending_exception *) context; | 763 | struct dm_snap_pending_exception *pe = context; |
762 | struct dm_snapshot *s = pe->snap; | 764 | struct dm_snapshot *s = pe->snap; |
763 | 765 | ||
764 | if (read_err || write_err) | 766 | if (read_err || write_err) |
@@ -773,7 +775,7 @@ static void copy_callback(int read_err, unsigned int write_err, void *context) | |||
773 | /* | 775 | /* |
774 | * Dispatches the copy operation to kcopyd. | 776 | * Dispatches the copy operation to kcopyd. |
775 | */ | 777 | */ |
776 | static void start_copy(struct pending_exception *pe) | 778 | static void start_copy(struct dm_snap_pending_exception *pe) |
777 | { | 779 | { |
778 | struct dm_snapshot *s = pe->snap; | 780 | struct dm_snapshot *s = pe->snap; |
779 | struct io_region src, dest; | 781 | struct io_region src, dest; |
@@ -803,11 +805,11 @@ static void start_copy(struct pending_exception *pe) | |||
803 | * NOTE: a write lock must be held on snap->lock before calling | 805 | * NOTE: a write lock must be held on snap->lock before calling |
804 | * this. | 806 | * this. |
805 | */ | 807 | */ |
806 | static struct pending_exception * | 808 | static struct dm_snap_pending_exception * |
807 | __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | 809 | __find_pending_exception(struct dm_snapshot *s, struct bio *bio) |
808 | { | 810 | { |
809 | struct exception *e; | 811 | struct dm_snap_exception *e; |
810 | struct pending_exception *pe; | 812 | struct dm_snap_pending_exception *pe; |
811 | chunk_t chunk = sector_to_chunk(s, bio->bi_sector); | 813 | chunk_t chunk = sector_to_chunk(s, bio->bi_sector); |
812 | 814 | ||
813 | /* | 815 | /* |
@@ -816,7 +818,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
816 | e = lookup_exception(&s->pending, chunk); | 818 | e = lookup_exception(&s->pending, chunk); |
817 | if (e) { | 819 | if (e) { |
818 | /* cast the exception to a pending exception */ | 820 | /* cast the exception to a pending exception */ |
819 | pe = container_of(e, struct pending_exception, e); | 821 | pe = container_of(e, struct dm_snap_pending_exception, e); |
820 | goto out; | 822 | goto out; |
821 | } | 823 | } |
822 | 824 | ||
@@ -836,7 +838,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
836 | e = lookup_exception(&s->pending, chunk); | 838 | e = lookup_exception(&s->pending, chunk); |
837 | if (e) { | 839 | if (e) { |
838 | free_pending_exception(pe); | 840 | free_pending_exception(pe); |
839 | pe = container_of(e, struct pending_exception, e); | 841 | pe = container_of(e, struct dm_snap_pending_exception, e); |
840 | goto out; | 842 | goto out; |
841 | } | 843 | } |
842 | 844 | ||
@@ -860,8 +862,8 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) | |||
860 | return pe; | 862 | return pe; |
861 | } | 863 | } |
862 | 864 | ||
863 | static inline void remap_exception(struct dm_snapshot *s, struct exception *e, | 865 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, |
864 | struct bio *bio) | 866 | struct bio *bio) |
865 | { | 867 | { |
866 | bio->bi_bdev = s->cow->bdev; | 868 | bio->bi_bdev = s->cow->bdev; |
867 | bio->bi_sector = chunk_to_sector(s, e->new_chunk) + | 869 | bio->bi_sector = chunk_to_sector(s, e->new_chunk) + |
@@ -871,11 +873,11 @@ static inline void remap_exception(struct dm_snapshot *s, struct exception *e, | |||
871 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | 873 | static int snapshot_map(struct dm_target *ti, struct bio *bio, |
872 | union map_info *map_context) | 874 | union map_info *map_context) |
873 | { | 875 | { |
874 | struct exception *e; | 876 | struct dm_snap_exception *e; |
875 | struct dm_snapshot *s = (struct dm_snapshot *) ti->private; | 877 | struct dm_snapshot *s = ti->private; |
876 | int r = DM_MAPIO_REMAPPED; | 878 | int r = DM_MAPIO_REMAPPED; |
877 | chunk_t chunk; | 879 | chunk_t chunk; |
878 | struct pending_exception *pe = NULL; | 880 | struct dm_snap_pending_exception *pe = NULL; |
879 | 881 | ||
880 | chunk = sector_to_chunk(s, bio->bi_sector); | 882 | chunk = sector_to_chunk(s, bio->bi_sector); |
881 | 883 | ||
@@ -945,7 +947,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
945 | 947 | ||
946 | static void snapshot_resume(struct dm_target *ti) | 948 | static void snapshot_resume(struct dm_target *ti) |
947 | { | 949 | { |
948 | struct dm_snapshot *s = (struct dm_snapshot *) ti->private; | 950 | struct dm_snapshot *s = ti->private; |
949 | 951 | ||
950 | down_write(&s->lock); | 952 | down_write(&s->lock); |
951 | s->active = 1; | 953 | s->active = 1; |
@@ -955,7 +957,7 @@ static void snapshot_resume(struct dm_target *ti) | |||
955 | static int snapshot_status(struct dm_target *ti, status_type_t type, | 957 | static int snapshot_status(struct dm_target *ti, status_type_t type, |
956 | char *result, unsigned int maxlen) | 958 | char *result, unsigned int maxlen) |
957 | { | 959 | { |
958 | struct dm_snapshot *snap = (struct dm_snapshot *) ti->private; | 960 | struct dm_snapshot *snap = ti->private; |
959 | 961 | ||
960 | switch (type) { | 962 | switch (type) { |
961 | case STATUSTYPE_INFO: | 963 | case STATUSTYPE_INFO: |
@@ -999,8 +1001,8 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) | |||
999 | { | 1001 | { |
1000 | int r = DM_MAPIO_REMAPPED, first = 0; | 1002 | int r = DM_MAPIO_REMAPPED, first = 0; |
1001 | struct dm_snapshot *snap; | 1003 | struct dm_snapshot *snap; |
1002 | struct exception *e; | 1004 | struct dm_snap_exception *e; |
1003 | struct pending_exception *pe, *next_pe, *primary_pe = NULL; | 1005 | struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; |
1004 | chunk_t chunk; | 1006 | chunk_t chunk; |
1005 | LIST_HEAD(pe_queue); | 1007 | LIST_HEAD(pe_queue); |
1006 | 1008 | ||
@@ -1147,14 +1149,14 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1147 | 1149 | ||
1148 | static void origin_dtr(struct dm_target *ti) | 1150 | static void origin_dtr(struct dm_target *ti) |
1149 | { | 1151 | { |
1150 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1152 | struct dm_dev *dev = ti->private; |
1151 | dm_put_device(ti, dev); | 1153 | dm_put_device(ti, dev); |
1152 | } | 1154 | } |
1153 | 1155 | ||
1154 | static int origin_map(struct dm_target *ti, struct bio *bio, | 1156 | static int origin_map(struct dm_target *ti, struct bio *bio, |
1155 | union map_info *map_context) | 1157 | union map_info *map_context) |
1156 | { | 1158 | { |
1157 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1159 | struct dm_dev *dev = ti->private; |
1158 | bio->bi_bdev = dev->bdev; | 1160 | bio->bi_bdev = dev->bdev; |
1159 | 1161 | ||
1160 | if (unlikely(bio_barrier(bio))) | 1162 | if (unlikely(bio_barrier(bio))) |
@@ -1172,7 +1174,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio, | |||
1172 | */ | 1174 | */ |
1173 | static void origin_resume(struct dm_target *ti) | 1175 | static void origin_resume(struct dm_target *ti) |
1174 | { | 1176 | { |
1175 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1177 | struct dm_dev *dev = ti->private; |
1176 | struct dm_snapshot *snap; | 1178 | struct dm_snapshot *snap; |
1177 | struct origin *o; | 1179 | struct origin *o; |
1178 | chunk_t chunk_size = 0; | 1180 | chunk_t chunk_size = 0; |
@@ -1190,7 +1192,7 @@ static void origin_resume(struct dm_target *ti) | |||
1190 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, | 1192 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, |
1191 | unsigned int maxlen) | 1193 | unsigned int maxlen) |
1192 | { | 1194 | { |
1193 | struct dm_dev *dev = (struct dm_dev *) ti->private; | 1195 | struct dm_dev *dev = ti->private; |
1194 | 1196 | ||
1195 | switch (type) { | 1197 | switch (type) { |
1196 | case STATUSTYPE_INFO: | 1198 | case STATUSTYPE_INFO: |
@@ -1249,21 +1251,14 @@ static int __init dm_snapshot_init(void) | |||
1249 | goto bad2; | 1251 | goto bad2; |
1250 | } | 1252 | } |
1251 | 1253 | ||
1252 | exception_cache = kmem_cache_create("dm-snapshot-ex", | 1254 | exception_cache = KMEM_CACHE(dm_snap_exception, 0); |
1253 | sizeof(struct exception), | ||
1254 | __alignof__(struct exception), | ||
1255 | 0, NULL, NULL); | ||
1256 | if (!exception_cache) { | 1255 | if (!exception_cache) { |
1257 | DMERR("Couldn't create exception cache."); | 1256 | DMERR("Couldn't create exception cache."); |
1258 | r = -ENOMEM; | 1257 | r = -ENOMEM; |
1259 | goto bad3; | 1258 | goto bad3; |
1260 | } | 1259 | } |
1261 | 1260 | ||
1262 | pending_cache = | 1261 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
1263 | kmem_cache_create("dm-snapshot-in", | ||
1264 | sizeof(struct pending_exception), | ||
1265 | __alignof__(struct pending_exception), | ||
1266 | 0, NULL, NULL); | ||
1267 | if (!pending_cache) { | 1262 | if (!pending_cache) { |
1268 | DMERR("Couldn't create pending cache."); | 1263 | DMERR("Couldn't create pending cache."); |
1269 | r = -ENOMEM; | 1264 | r = -ENOMEM; |
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index 15fa2ae6cdc2..650e0f1f51d8 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h | |||
@@ -30,7 +30,7 @@ typedef sector_t chunk_t; | |||
30 | * An exception is used where an old chunk of data has been | 30 | * An exception is used where an old chunk of data has been |
31 | * replaced by a new one. | 31 | * replaced by a new one. |
32 | */ | 32 | */ |
33 | struct exception { | 33 | struct dm_snap_exception { |
34 | struct list_head hash_list; | 34 | struct list_head hash_list; |
35 | 35 | ||
36 | chunk_t old_chunk; | 36 | chunk_t old_chunk; |
@@ -58,13 +58,13 @@ struct exception_store { | |||
58 | * Find somewhere to store the next exception. | 58 | * Find somewhere to store the next exception. |
59 | */ | 59 | */ |
60 | int (*prepare_exception) (struct exception_store *store, | 60 | int (*prepare_exception) (struct exception_store *store, |
61 | struct exception *e); | 61 | struct dm_snap_exception *e); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Update the metadata with this exception. | 64 | * Update the metadata with this exception. |
65 | */ | 65 | */ |
66 | void (*commit_exception) (struct exception_store *store, | 66 | void (*commit_exception) (struct exception_store *store, |
67 | struct exception *e, | 67 | struct dm_snap_exception *e, |
68 | void (*callback) (void *, int success), | 68 | void (*callback) (void *, int success), |
69 | void *callback_context); | 69 | void *callback_context); |
70 | 70 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2717a355dc5b..b5e56af8f85a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -45,7 +45,7 @@ struct dm_io { | |||
45 | * One of these is allocated per target within a bio. Hopefully | 45 | * One of these is allocated per target within a bio. Hopefully |
46 | * this will be simplified out one day. | 46 | * this will be simplified out one day. |
47 | */ | 47 | */ |
48 | struct target_io { | 48 | struct dm_target_io { |
49 | struct dm_io *io; | 49 | struct dm_io *io; |
50 | struct dm_target *ti; | 50 | struct dm_target *ti; |
51 | union map_info info; | 51 | union map_info info; |
@@ -54,7 +54,7 @@ struct target_io { | |||
54 | union map_info *dm_get_mapinfo(struct bio *bio) | 54 | union map_info *dm_get_mapinfo(struct bio *bio) |
55 | { | 55 | { |
56 | if (bio && bio->bi_private) | 56 | if (bio && bio->bi_private) |
57 | return &((struct target_io *)bio->bi_private)->info; | 57 | return &((struct dm_target_io *)bio->bi_private)->info; |
58 | return NULL; | 58 | return NULL; |
59 | } | 59 | } |
60 | 60 | ||
@@ -132,14 +132,12 @@ static int __init local_init(void) | |||
132 | int r; | 132 | int r; |
133 | 133 | ||
134 | /* allocate a slab for the dm_ios */ | 134 | /* allocate a slab for the dm_ios */ |
135 | _io_cache = kmem_cache_create("dm_io", | 135 | _io_cache = KMEM_CACHE(dm_io, 0); |
136 | sizeof(struct dm_io), 0, 0, NULL, NULL); | ||
137 | if (!_io_cache) | 136 | if (!_io_cache) |
138 | return -ENOMEM; | 137 | return -ENOMEM; |
139 | 138 | ||
140 | /* allocate a slab for the target ios */ | 139 | /* allocate a slab for the target ios */ |
141 | _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io), | 140 | _tio_cache = KMEM_CACHE(dm_target_io, 0); |
142 | 0, 0, NULL, NULL); | ||
143 | if (!_tio_cache) { | 141 | if (!_tio_cache) { |
144 | kmem_cache_destroy(_io_cache); | 142 | kmem_cache_destroy(_io_cache); |
145 | return -ENOMEM; | 143 | return -ENOMEM; |
@@ -325,22 +323,22 @@ out: | |||
325 | return r; | 323 | return r; |
326 | } | 324 | } |
327 | 325 | ||
328 | static inline struct dm_io *alloc_io(struct mapped_device *md) | 326 | static struct dm_io *alloc_io(struct mapped_device *md) |
329 | { | 327 | { |
330 | return mempool_alloc(md->io_pool, GFP_NOIO); | 328 | return mempool_alloc(md->io_pool, GFP_NOIO); |
331 | } | 329 | } |
332 | 330 | ||
333 | static inline void free_io(struct mapped_device *md, struct dm_io *io) | 331 | static void free_io(struct mapped_device *md, struct dm_io *io) |
334 | { | 332 | { |
335 | mempool_free(io, md->io_pool); | 333 | mempool_free(io, md->io_pool); |
336 | } | 334 | } |
337 | 335 | ||
338 | static inline struct target_io *alloc_tio(struct mapped_device *md) | 336 | static struct dm_target_io *alloc_tio(struct mapped_device *md) |
339 | { | 337 | { |
340 | return mempool_alloc(md->tio_pool, GFP_NOIO); | 338 | return mempool_alloc(md->tio_pool, GFP_NOIO); |
341 | } | 339 | } |
342 | 340 | ||
343 | static inline void free_tio(struct mapped_device *md, struct target_io *tio) | 341 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
344 | { | 342 | { |
345 | mempool_free(tio, md->tio_pool); | 343 | mempool_free(tio, md->tio_pool); |
346 | } | 344 | } |
@@ -498,7 +496,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
498 | static int clone_endio(struct bio *bio, unsigned int done, int error) | 496 | static int clone_endio(struct bio *bio, unsigned int done, int error) |
499 | { | 497 | { |
500 | int r = 0; | 498 | int r = 0; |
501 | struct target_io *tio = bio->bi_private; | 499 | struct dm_target_io *tio = bio->bi_private; |
502 | struct mapped_device *md = tio->io->md; | 500 | struct mapped_device *md = tio->io->md; |
503 | dm_endio_fn endio = tio->ti->type->end_io; | 501 | dm_endio_fn endio = tio->ti->type->end_io; |
504 | 502 | ||
@@ -558,7 +556,7 @@ static sector_t max_io_len(struct mapped_device *md, | |||
558 | } | 556 | } |
559 | 557 | ||
560 | static void __map_bio(struct dm_target *ti, struct bio *clone, | 558 | static void __map_bio(struct dm_target *ti, struct bio *clone, |
561 | struct target_io *tio) | 559 | struct dm_target_io *tio) |
562 | { | 560 | { |
563 | int r; | 561 | int r; |
564 | sector_t sector; | 562 | sector_t sector; |
@@ -672,7 +670,7 @@ static void __clone_and_map(struct clone_info *ci) | |||
672 | struct bio *clone, *bio = ci->bio; | 670 | struct bio *clone, *bio = ci->bio; |
673 | struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); | 671 | struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); |
674 | sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); | 672 | sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); |
675 | struct target_io *tio; | 673 | struct dm_target_io *tio; |
676 | 674 | ||
677 | /* | 675 | /* |
678 | * Allocate a target io object. | 676 | * Allocate a target io object. |
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index dbc234e3c69f..7e052378c47e 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c | |||
@@ -29,7 +29,7 @@ | |||
29 | static struct workqueue_struct *_kcopyd_wq; | 29 | static struct workqueue_struct *_kcopyd_wq; |
30 | static struct work_struct _kcopyd_work; | 30 | static struct work_struct _kcopyd_work; |
31 | 31 | ||
32 | static inline void wake(void) | 32 | static void wake(void) |
33 | { | 33 | { |
34 | queue_work(_kcopyd_wq, &_kcopyd_work); | 34 | queue_work(_kcopyd_wq, &_kcopyd_work); |
35 | } | 35 | } |
@@ -226,10 +226,7 @@ static LIST_HEAD(_pages_jobs); | |||
226 | 226 | ||
227 | static int jobs_init(void) | 227 | static int jobs_init(void) |
228 | { | 228 | { |
229 | _job_cache = kmem_cache_create("kcopyd-jobs", | 229 | _job_cache = KMEM_CACHE(kcopyd_job, 0); |
230 | sizeof(struct kcopyd_job), | ||
231 | __alignof__(struct kcopyd_job), | ||
232 | 0, NULL, NULL); | ||
233 | if (!_job_cache) | 230 | if (!_job_cache) |
234 | return -ENOMEM; | 231 | return -ENOMEM; |
235 | 232 | ||
@@ -258,7 +255,7 @@ static void jobs_exit(void) | |||
258 | * Functions to push and pop a job onto the head of a given job | 255 | * Functions to push and pop a job onto the head of a given job |
259 | * list. | 256 | * list. |
260 | */ | 257 | */ |
261 | static inline struct kcopyd_job *pop(struct list_head *jobs) | 258 | static struct kcopyd_job *pop(struct list_head *jobs) |
262 | { | 259 | { |
263 | struct kcopyd_job *job = NULL; | 260 | struct kcopyd_job *job = NULL; |
264 | unsigned long flags; | 261 | unsigned long flags; |
@@ -274,7 +271,7 @@ static inline struct kcopyd_job *pop(struct list_head *jobs) | |||
274 | return job; | 271 | return job; |
275 | } | 272 | } |
276 | 273 | ||
277 | static inline void push(struct list_head *jobs, struct kcopyd_job *job) | 274 | static void push(struct list_head *jobs, struct kcopyd_job *job) |
278 | { | 275 | { |
279 | unsigned long flags; | 276 | unsigned long flags; |
280 | 277 | ||