aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/device-mapper/dm-crypt.txt52
-rw-r--r--drivers/md/Makefile6
-rw-r--r--drivers/md/dm-exception-store.c10
-rw-r--r--drivers/md/dm-io.c38
-rw-r--r--drivers/md/dm-kcopyd.c (renamed from drivers/md/kcopyd.c)298
-rw-r--r--drivers/md/dm-log.c254
-rw-r--r--drivers/md/dm-raid1.c132
-rw-r--r--drivers/md/dm-snap.c22
-rw-r--r--drivers/md/dm-snap.h4
-rw-r--r--drivers/md/dm-table.c42
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/md/dm.h98
-rw-r--r--drivers/md/kcopyd.h42
-rw-r--r--include/linux/device-mapper.h96
-rw-r--r--include/linux/dm-dirty-log.h (renamed from drivers/md/dm-log.h)84
-rw-r--r--include/linux/dm-io.h (renamed from drivers/md/dm-io.h)18
-rw-r--r--include/linux/dm-kcopyd.h47
17 files changed, 659 insertions, 600 deletions
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
new file mode 100644
index 000000000000..6680cab2c705
--- /dev/null
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -0,0 +1,52 @@
1dm-crypt
2=========
3
4Device-Mapper's "crypt" target provides transparent encryption of block devices
5using the kernel crypto API.
6
7Parameters: <cipher> <key> <iv_offset> <device path> <offset>
8
9<cipher>
10 Encryption cipher and an optional IV generation mode.
11 (In format cipher-chainmode-ivopts:ivmode).
12 Examples:
13 des
14 aes-cbc-essiv:sha256
15 twofish-ecb
16
17 /proc/crypto contains supported crypto modes
18
19<key>
20 Key used for encryption. It is encoded as a hexadecimal number.
21 You can only use key sizes that are valid for the selected cipher.
22
23<iv_offset>
24 The IV offset is a sector count that is added to the sector number
25 before creating the IV.
26
27<device path>
28 This is the device that is going to be used as backend and contains the
29 encrypted data. You can specify it as a path like /dev/xxx or a device
30 number <major>:<minor>.
31
32<offset>
33 Starting sector within the device where the encrypted data begins.
34
35Example scripts
36===============
37LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
38encryption with dm-crypt using the 'cryptsetup' utility, see
39http://luks.endorphin.org/
40
41[[
42#!/bin/sh
43# Create a crypt device using dmsetup
44dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
45]]
46
47[[
48#!/bin/sh
49# Create a crypt device using cryptsetup and LUKS header with default cipher
50cryptsetup luksFormat $1
51cryptsetup luksOpen $1 crypt1
52]]
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d9aa7edb8780..7be09eeea293 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,10 +3,10 @@
3# 3#
4 4
5dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ 5dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
6 dm-ioctl.o dm-io.o kcopyd.o 6 dm-ioctl.o dm-io.o dm-kcopyd.o
7dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o 7dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
8dm-snapshot-objs := dm-snap.o dm-exception-store.o 8dm-snapshot-objs := dm-snap.o dm-exception-store.o
9dm-mirror-objs := dm-log.o dm-raid1.o 9dm-mirror-objs := dm-raid1.o
10dm-rdac-objs := dm-mpath-rdac.o 10dm-rdac-objs := dm-mpath-rdac.o
11dm-hp-sw-objs := dm-mpath-hp-sw.o 11dm-hp-sw-objs := dm-mpath-hp-sw.o
12md-mod-objs := md.o bitmap.o 12md-mod-objs := md.o bitmap.o
@@ -39,7 +39,7 @@ obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
39obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o 39obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o
40obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o 40obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o
41obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 41obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
42obj-$(CONFIG_DM_MIRROR) += dm-mirror.o 42obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
43obj-$(CONFIG_DM_ZERO) += dm-zero.o 43obj-$(CONFIG_DM_ZERO) += dm-zero.o
44 44
45quiet_cmd_unroll = UNROLL $@ 45quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 5bbce29f143a..41f408068a7c 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -9,13 +9,13 @@
9 9
10#include "dm.h" 10#include "dm.h"
11#include "dm-snap.h" 11#include "dm-snap.h"
12#include "dm-io.h"
13#include "kcopyd.h"
14 12
15#include <linux/mm.h> 13#include <linux/mm.h>
16#include <linux/pagemap.h> 14#include <linux/pagemap.h>
17#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
18#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/dm-io.h>
18#include <linux/dm-kcopyd.h>
19 19
20#define DM_MSG_PREFIX "snapshots" 20#define DM_MSG_PREFIX "snapshots"
21#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ 21#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
@@ -131,7 +131,7 @@ struct pstore {
131 131
132static unsigned sectors_to_pages(unsigned sectors) 132static unsigned sectors_to_pages(unsigned sectors)
133{ 133{
134 return sectors / (PAGE_SIZE >> 9); 134 return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
135} 135}
136 136
137static int alloc_area(struct pstore *ps) 137static int alloc_area(struct pstore *ps)
@@ -159,7 +159,7 @@ static void free_area(struct pstore *ps)
159} 159}
160 160
161struct mdata_req { 161struct mdata_req {
162 struct io_region *where; 162 struct dm_io_region *where;
163 struct dm_io_request *io_req; 163 struct dm_io_request *io_req;
164 struct work_struct work; 164 struct work_struct work;
165 int result; 165 int result;
@@ -177,7 +177,7 @@ static void do_metadata(struct work_struct *work)
177 */ 177 */
178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) 178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
179{ 179{
180 struct io_region where = { 180 struct dm_io_region where = {
181 .bdev = ps->snap->cow->bdev, 181 .bdev = ps->snap->cow->bdev,
182 .sector = ps->snap->chunk_size * chunk, 182 .sector = ps->snap->chunk_size * chunk,
183 .count = ps->snap->chunk_size, 183 .count = ps->snap->chunk_size,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 8f25f628ef16..4789c42d9a3a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,13 +5,14 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm-io.h" 8#include "dm.h"
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/mempool.h> 11#include <linux/mempool.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/dm-io.h>
15 16
16struct dm_io_client { 17struct dm_io_client {
17 mempool_t *pool; 18 mempool_t *pool;
@@ -20,7 +21,7 @@ struct dm_io_client {
20 21
21/* FIXME: can we shrink this ? */ 22/* FIXME: can we shrink this ? */
22struct io { 23struct io {
23 unsigned long error; 24 unsigned long error_bits;
24 atomic_t count; 25 atomic_t count;
25 struct task_struct *sleeper; 26 struct task_struct *sleeper;
26 struct dm_io_client *client; 27 struct dm_io_client *client;
@@ -107,14 +108,14 @@ static inline unsigned bio_get_region(struct bio *bio)
107static void dec_count(struct io *io, unsigned int region, int error) 108static void dec_count(struct io *io, unsigned int region, int error)
108{ 109{
109 if (error) 110 if (error)
110 set_bit(region, &io->error); 111 set_bit(region, &io->error_bits);
111 112
112 if (atomic_dec_and_test(&io->count)) { 113 if (atomic_dec_and_test(&io->count)) {
113 if (io->sleeper) 114 if (io->sleeper)
114 wake_up_process(io->sleeper); 115 wake_up_process(io->sleeper);
115 116
116 else { 117 else {
117 unsigned long r = io->error; 118 unsigned long r = io->error_bits;
118 io_notify_fn fn = io->callback; 119 io_notify_fn fn = io->callback;
119 void *context = io->context; 120 void *context = io->context;
120 121
@@ -271,7 +272,7 @@ static void km_dp_init(struct dpages *dp, void *data)
271/*----------------------------------------------------------------- 272/*-----------------------------------------------------------------
272 * IO routines that accept a list of pages. 273 * IO routines that accept a list of pages.
273 *---------------------------------------------------------------*/ 274 *---------------------------------------------------------------*/
274static void do_region(int rw, unsigned int region, struct io_region *where, 275static void do_region(int rw, unsigned region, struct dm_io_region *where,
275 struct dpages *dp, struct io *io) 276 struct dpages *dp, struct io *io)
276{ 277{
277 struct bio *bio; 278 struct bio *bio;
@@ -320,7 +321,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
320} 321}
321 322
322static void dispatch_io(int rw, unsigned int num_regions, 323static void dispatch_io(int rw, unsigned int num_regions,
323 struct io_region *where, struct dpages *dp, 324 struct dm_io_region *where, struct dpages *dp,
324 struct io *io, int sync) 325 struct io *io, int sync)
325{ 326{
326 int i; 327 int i;
@@ -347,17 +348,17 @@ static void dispatch_io(int rw, unsigned int num_regions,
347} 348}
348 349
349static int sync_io(struct dm_io_client *client, unsigned int num_regions, 350static int sync_io(struct dm_io_client *client, unsigned int num_regions,
350 struct io_region *where, int rw, struct dpages *dp, 351 struct dm_io_region *where, int rw, struct dpages *dp,
351 unsigned long *error_bits) 352 unsigned long *error_bits)
352{ 353{
353 struct io io; 354 struct io io;
354 355
355 if (num_regions > 1 && rw != WRITE) { 356 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
356 WARN_ON(1); 357 WARN_ON(1);
357 return -EIO; 358 return -EIO;
358 } 359 }
359 360
360 io.error = 0; 361 io.error_bits = 0;
361 atomic_set(&io.count, 1); /* see dispatch_io() */ 362 atomic_set(&io.count, 1); /* see dispatch_io() */
362 io.sleeper = current; 363 io.sleeper = current;
363 io.client = client; 364 io.client = client;
@@ -378,25 +379,25 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
378 return -EINTR; 379 return -EINTR;
379 380
380 if (error_bits) 381 if (error_bits)
381 *error_bits = io.error; 382 *error_bits = io.error_bits;
382 383
383 return io.error ? -EIO : 0; 384 return io.error_bits ? -EIO : 0;
384} 385}
385 386
386static int async_io(struct dm_io_client *client, unsigned int num_regions, 387static int async_io(struct dm_io_client *client, unsigned int num_regions,
387 struct io_region *where, int rw, struct dpages *dp, 388 struct dm_io_region *where, int rw, struct dpages *dp,
388 io_notify_fn fn, void *context) 389 io_notify_fn fn, void *context)
389{ 390{
390 struct io *io; 391 struct io *io;
391 392
392 if (num_regions > 1 && rw != WRITE) { 393 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
393 WARN_ON(1); 394 WARN_ON(1);
394 fn(1, context); 395 fn(1, context);
395 return -EIO; 396 return -EIO;
396 } 397 }
397 398
398 io = mempool_alloc(client->pool, GFP_NOIO); 399 io = mempool_alloc(client->pool, GFP_NOIO);
399 io->error = 0; 400 io->error_bits = 0;
400 atomic_set(&io->count, 1); /* see dispatch_io() */ 401 atomic_set(&io->count, 1); /* see dispatch_io() */
401 io->sleeper = NULL; 402 io->sleeper = NULL;
402 io->client = client; 403 io->client = client;
@@ -435,10 +436,15 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
435} 436}
436 437
437/* 438/*
438 * New collapsed (a)synchronous interface 439 * New collapsed (a)synchronous interface.
440 *
441 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
442 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
443 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
444 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
439 */ 445 */
440int dm_io(struct dm_io_request *io_req, unsigned num_regions, 446int dm_io(struct dm_io_request *io_req, unsigned num_regions,
441 struct io_region *where, unsigned long *sync_error_bits) 447 struct dm_io_region *where, unsigned long *sync_error_bits)
442{ 448{
443 int r; 449 int r;
444 struct dpages dp; 450 struct dpages dp;
diff --git a/drivers/md/kcopyd.c b/drivers/md/dm-kcopyd.c
index e76b52ade690..996802b8a452 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -9,9 +9,8 @@
9 * completion notification. 9 * completion notification.
10 */ 10 */
11 11
12#include <asm/types.h> 12#include <linux/types.h>
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14
15#include <linux/blkdev.h> 14#include <linux/blkdev.h>
16#include <linux/fs.h> 15#include <linux/fs.h>
17#include <linux/init.h> 16#include <linux/init.h>
@@ -23,24 +22,15 @@
23#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
24#include <linux/workqueue.h> 23#include <linux/workqueue.h>
25#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/dm-kcopyd.h>
26 26
27#include "kcopyd.h" 27#include "dm.h"
28
29static struct workqueue_struct *_kcopyd_wq;
30static struct work_struct _kcopyd_work;
31
32static void wake(void)
33{
34 queue_work(_kcopyd_wq, &_kcopyd_work);
35}
36 28
37/*----------------------------------------------------------------- 29/*-----------------------------------------------------------------
38 * Each kcopyd client has its own little pool of preallocated 30 * Each kcopyd client has its own little pool of preallocated
39 * pages for kcopyd io. 31 * pages for kcopyd io.
40 *---------------------------------------------------------------*/ 32 *---------------------------------------------------------------*/
41struct kcopyd_client { 33struct dm_kcopyd_client {
42 struct list_head list;
43
44 spinlock_t lock; 34 spinlock_t lock;
45 struct page_list *pages; 35 struct page_list *pages;
46 unsigned int nr_pages; 36 unsigned int nr_pages;
@@ -50,8 +40,32 @@ struct kcopyd_client {
50 40
51 wait_queue_head_t destroyq; 41 wait_queue_head_t destroyq;
52 atomic_t nr_jobs; 42 atomic_t nr_jobs;
43
44 mempool_t *job_pool;
45
46 struct workqueue_struct *kcopyd_wq;
47 struct work_struct kcopyd_work;
48
49/*
50 * We maintain three lists of jobs:
51 *
52 * i) jobs waiting for pages
53 * ii) jobs that have pages, and are waiting for the io to be issued.
54 * iii) jobs that have completed.
55 *
56 * All three of these are protected by job_lock.
57 */
58 spinlock_t job_lock;
59 struct list_head complete_jobs;
60 struct list_head io_jobs;
61 struct list_head pages_jobs;
53}; 62};
54 63
64static void wake(struct dm_kcopyd_client *kc)
65{
66 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
67}
68
55static struct page_list *alloc_pl(void) 69static struct page_list *alloc_pl(void)
56{ 70{
57 struct page_list *pl; 71 struct page_list *pl;
@@ -75,7 +89,7 @@ static void free_pl(struct page_list *pl)
75 kfree(pl); 89 kfree(pl);
76} 90}
77 91
78static int kcopyd_get_pages(struct kcopyd_client *kc, 92static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
79 unsigned int nr, struct page_list **pages) 93 unsigned int nr, struct page_list **pages)
80{ 94{
81 struct page_list *pl; 95 struct page_list *pl;
@@ -98,7 +112,7 @@ static int kcopyd_get_pages(struct kcopyd_client *kc,
98 return 0; 112 return 0;
99} 113}
100 114
101static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl) 115static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
102{ 116{
103 struct page_list *cursor; 117 struct page_list *cursor;
104 118
@@ -126,7 +140,7 @@ static void drop_pages(struct page_list *pl)
126 } 140 }
127} 141}
128 142
129static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr) 143static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
130{ 144{
131 unsigned int i; 145 unsigned int i;
132 struct page_list *pl = NULL, *next; 146 struct page_list *pl = NULL, *next;
@@ -147,7 +161,7 @@ static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
147 return 0; 161 return 0;
148} 162}
149 163
150static void client_free_pages(struct kcopyd_client *kc) 164static void client_free_pages(struct dm_kcopyd_client *kc)
151{ 165{
152 BUG_ON(kc->nr_free_pages != kc->nr_pages); 166 BUG_ON(kc->nr_free_pages != kc->nr_pages);
153 drop_pages(kc->pages); 167 drop_pages(kc->pages);
@@ -161,7 +175,7 @@ static void client_free_pages(struct kcopyd_client *kc)
161 * ever having to do io (which could cause a deadlock). 175 * ever having to do io (which could cause a deadlock).
162 *---------------------------------------------------------------*/ 176 *---------------------------------------------------------------*/
163struct kcopyd_job { 177struct kcopyd_job {
164 struct kcopyd_client *kc; 178 struct dm_kcopyd_client *kc;
165 struct list_head list; 179 struct list_head list;
166 unsigned long flags; 180 unsigned long flags;
167 181
@@ -175,13 +189,13 @@ struct kcopyd_job {
175 * Either READ or WRITE 189 * Either READ or WRITE
176 */ 190 */
177 int rw; 191 int rw;
178 struct io_region source; 192 struct dm_io_region source;
179 193
180 /* 194 /*
181 * The destinations for the transfer. 195 * The destinations for the transfer.
182 */ 196 */
183 unsigned int num_dests; 197 unsigned int num_dests;
184 struct io_region dests[KCOPYD_MAX_REGIONS]; 198 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
185 199
186 sector_t offset; 200 sector_t offset;
187 unsigned int nr_pages; 201 unsigned int nr_pages;
@@ -191,7 +205,7 @@ struct kcopyd_job {
191 * Set this to ensure you are notified when the job has 205 * Set this to ensure you are notified when the job has
192 * completed. 'context' is for callback to use. 206 * completed. 'context' is for callback to use.
193 */ 207 */
194 kcopyd_notify_fn fn; 208 dm_kcopyd_notify_fn fn;
195 void *context; 209 void *context;
196 210
197 /* 211 /*
@@ -207,47 +221,19 @@ struct kcopyd_job {
207#define MIN_JOBS 512 221#define MIN_JOBS 512
208 222
209static struct kmem_cache *_job_cache; 223static struct kmem_cache *_job_cache;
210static mempool_t *_job_pool;
211 224
212/* 225int __init dm_kcopyd_init(void)
213 * We maintain three lists of jobs:
214 *
215 * i) jobs waiting for pages
216 * ii) jobs that have pages, and are waiting for the io to be issued.
217 * iii) jobs that have completed.
218 *
219 * All three of these are protected by job_lock.
220 */
221static DEFINE_SPINLOCK(_job_lock);
222
223static LIST_HEAD(_complete_jobs);
224static LIST_HEAD(_io_jobs);
225static LIST_HEAD(_pages_jobs);
226
227static int jobs_init(void)
228{ 226{
229 _job_cache = KMEM_CACHE(kcopyd_job, 0); 227 _job_cache = KMEM_CACHE(kcopyd_job, 0);
230 if (!_job_cache) 228 if (!_job_cache)
231 return -ENOMEM; 229 return -ENOMEM;
232 230
233 _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
234 if (!_job_pool) {
235 kmem_cache_destroy(_job_cache);
236 return -ENOMEM;
237 }
238
239 return 0; 231 return 0;
240} 232}
241 233
242static void jobs_exit(void) 234void dm_kcopyd_exit(void)
243{ 235{
244 BUG_ON(!list_empty(&_complete_jobs));
245 BUG_ON(!list_empty(&_io_jobs));
246 BUG_ON(!list_empty(&_pages_jobs));
247
248 mempool_destroy(_job_pool);
249 kmem_cache_destroy(_job_cache); 236 kmem_cache_destroy(_job_cache);
250 _job_pool = NULL;
251 _job_cache = NULL; 237 _job_cache = NULL;
252} 238}
253 239
@@ -255,18 +241,19 @@ static void jobs_exit(void)
255 * Functions to push and pop a job onto the head of a given job 241 * Functions to push and pop a job onto the head of a given job
256 * list. 242 * list.
257 */ 243 */
258static struct kcopyd_job *pop(struct list_head *jobs) 244static struct kcopyd_job *pop(struct list_head *jobs,
245 struct dm_kcopyd_client *kc)
259{ 246{
260 struct kcopyd_job *job = NULL; 247 struct kcopyd_job *job = NULL;
261 unsigned long flags; 248 unsigned long flags;
262 249
263 spin_lock_irqsave(&_job_lock, flags); 250 spin_lock_irqsave(&kc->job_lock, flags);
264 251
265 if (!list_empty(jobs)) { 252 if (!list_empty(jobs)) {
266 job = list_entry(jobs->next, struct kcopyd_job, list); 253 job = list_entry(jobs->next, struct kcopyd_job, list);
267 list_del(&job->list); 254 list_del(&job->list);
268 } 255 }
269 spin_unlock_irqrestore(&_job_lock, flags); 256 spin_unlock_irqrestore(&kc->job_lock, flags);
270 257
271 return job; 258 return job;
272} 259}
@@ -274,10 +261,11 @@ static struct kcopyd_job *pop(struct list_head *jobs)
274static void push(struct list_head *jobs, struct kcopyd_job *job) 261static void push(struct list_head *jobs, struct kcopyd_job *job)
275{ 262{
276 unsigned long flags; 263 unsigned long flags;
264 struct dm_kcopyd_client *kc = job->kc;
277 265
278 spin_lock_irqsave(&_job_lock, flags); 266 spin_lock_irqsave(&kc->job_lock, flags);
279 list_add_tail(&job->list, jobs); 267 list_add_tail(&job->list, jobs);
280 spin_unlock_irqrestore(&_job_lock, flags); 268 spin_unlock_irqrestore(&kc->job_lock, flags);
281} 269}
282 270
283/* 271/*
@@ -294,11 +282,11 @@ static int run_complete_job(struct kcopyd_job *job)
294 void *context = job->context; 282 void *context = job->context;
295 int read_err = job->read_err; 283 int read_err = job->read_err;
296 unsigned long write_err = job->write_err; 284 unsigned long write_err = job->write_err;
297 kcopyd_notify_fn fn = job->fn; 285 dm_kcopyd_notify_fn fn = job->fn;
298 struct kcopyd_client *kc = job->kc; 286 struct dm_kcopyd_client *kc = job->kc;
299 287
300 kcopyd_put_pages(kc, job->pages); 288 kcopyd_put_pages(kc, job->pages);
301 mempool_free(job, _job_pool); 289 mempool_free(job, kc->job_pool);
302 fn(read_err, write_err, context); 290 fn(read_err, write_err, context);
303 291
304 if (atomic_dec_and_test(&kc->nr_jobs)) 292 if (atomic_dec_and_test(&kc->nr_jobs))
@@ -310,6 +298,7 @@ static int run_complete_job(struct kcopyd_job *job)
310static void complete_io(unsigned long error, void *context) 298static void complete_io(unsigned long error, void *context)
311{ 299{
312 struct kcopyd_job *job = (struct kcopyd_job *) context; 300 struct kcopyd_job *job = (struct kcopyd_job *) context;
301 struct dm_kcopyd_client *kc = job->kc;
313 302
314 if (error) { 303 if (error) {
315 if (job->rw == WRITE) 304 if (job->rw == WRITE)
@@ -317,22 +306,22 @@ static void complete_io(unsigned long error, void *context)
317 else 306 else
318 job->read_err = 1; 307 job->read_err = 1;
319 308
320 if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { 309 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
321 push(&_complete_jobs, job); 310 push(&kc->complete_jobs, job);
322 wake(); 311 wake(kc);
323 return; 312 return;
324 } 313 }
325 } 314 }
326 315
327 if (job->rw == WRITE) 316 if (job->rw == WRITE)
328 push(&_complete_jobs, job); 317 push(&kc->complete_jobs, job);
329 318
330 else { 319 else {
331 job->rw = WRITE; 320 job->rw = WRITE;
332 push(&_io_jobs, job); 321 push(&kc->io_jobs, job);
333 } 322 }
334 323
335 wake(); 324 wake(kc);
336} 325}
337 326
338/* 327/*
@@ -343,7 +332,7 @@ static int run_io_job(struct kcopyd_job *job)
343{ 332{
344 int r; 333 int r;
345 struct dm_io_request io_req = { 334 struct dm_io_request io_req = {
346 .bi_rw = job->rw, 335 .bi_rw = job->rw | (1 << BIO_RW_SYNC),
347 .mem.type = DM_IO_PAGE_LIST, 336 .mem.type = DM_IO_PAGE_LIST,
348 .mem.ptr.pl = job->pages, 337 .mem.ptr.pl = job->pages,
349 .mem.offset = job->offset, 338 .mem.offset = job->offset,
@@ -369,7 +358,7 @@ static int run_pages_job(struct kcopyd_job *job)
369 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); 358 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
370 if (!r) { 359 if (!r) {
371 /* this job is ready for io */ 360 /* this job is ready for io */
372 push(&_io_jobs, job); 361 push(&job->kc->io_jobs, job);
373 return 0; 362 return 0;
374 } 363 }
375 364
@@ -384,12 +373,13 @@ static int run_pages_job(struct kcopyd_job *job)
384 * Run through a list for as long as possible. Returns the count 373 * Run through a list for as long as possible. Returns the count
385 * of successful jobs. 374 * of successful jobs.
386 */ 375 */
387static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) 376static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
377 int (*fn) (struct kcopyd_job *))
388{ 378{
389 struct kcopyd_job *job; 379 struct kcopyd_job *job;
390 int r, count = 0; 380 int r, count = 0;
391 381
392 while ((job = pop(jobs))) { 382 while ((job = pop(jobs, kc))) {
393 383
394 r = fn(job); 384 r = fn(job);
395 385
@@ -399,7 +389,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
399 job->write_err = (unsigned long) -1L; 389 job->write_err = (unsigned long) -1L;
400 else 390 else
401 job->read_err = 1; 391 job->read_err = 1;
402 push(&_complete_jobs, job); 392 push(&kc->complete_jobs, job);
403 break; 393 break;
404 } 394 }
405 395
@@ -421,8 +411,11 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
421/* 411/*
422 * kcopyd does this every time it's woken up. 412 * kcopyd does this every time it's woken up.
423 */ 413 */
424static void do_work(struct work_struct *ignored) 414static void do_work(struct work_struct *work)
425{ 415{
416 struct dm_kcopyd_client *kc = container_of(work,
417 struct dm_kcopyd_client, kcopyd_work);
418
426 /* 419 /*
427 * The order that these are called is *very* important. 420 * The order that these are called is *very* important.
428 * complete jobs can free some pages for pages jobs. 421 * complete jobs can free some pages for pages jobs.
@@ -430,9 +423,9 @@ static void do_work(struct work_struct *ignored)
430 * list. io jobs call wake when they complete and it all 423 * list. io jobs call wake when they complete and it all
431 * starts again. 424 * starts again.
432 */ 425 */
433 process_jobs(&_complete_jobs, run_complete_job); 426 process_jobs(&kc->complete_jobs, kc, run_complete_job);
434 process_jobs(&_pages_jobs, run_pages_job); 427 process_jobs(&kc->pages_jobs, kc, run_pages_job);
435 process_jobs(&_io_jobs, run_io_job); 428 process_jobs(&kc->io_jobs, kc, run_io_job);
436} 429}
437 430
438/* 431/*
@@ -442,9 +435,10 @@ static void do_work(struct work_struct *ignored)
442 */ 435 */
443static void dispatch_job(struct kcopyd_job *job) 436static void dispatch_job(struct kcopyd_job *job)
444{ 437{
445 atomic_inc(&job->kc->nr_jobs); 438 struct dm_kcopyd_client *kc = job->kc;
446 push(&_pages_jobs, job); 439 atomic_inc(&kc->nr_jobs);
447 wake(); 440 push(&kc->pages_jobs, job);
441 wake(kc);
448} 442}
449 443
450#define SUB_JOB_SIZE 128 444#define SUB_JOB_SIZE 128
@@ -469,7 +463,7 @@ static void segment_complete(int read_err, unsigned long write_err,
469 * Only dispatch more work if there hasn't been an error. 463 * Only dispatch more work if there hasn't been an error.
470 */ 464 */
471 if ((!job->read_err && !job->write_err) || 465 if ((!job->read_err && !job->write_err) ||
472 test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { 466 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
473 /* get the next chunk of work */ 467 /* get the next chunk of work */
474 progress = job->progress; 468 progress = job->progress;
475 count = job->source.count - progress; 469 count = job->source.count - progress;
@@ -484,7 +478,8 @@ static void segment_complete(int read_err, unsigned long write_err,
484 478
485 if (count) { 479 if (count) {
486 int i; 480 int i;
487 struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO); 481 struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
482 GFP_NOIO);
488 483
489 *sub_job = *job; 484 *sub_job = *job;
490 sub_job->source.sector += progress; 485 sub_job->source.sector += progress;
@@ -508,7 +503,7 @@ static void segment_complete(int read_err, unsigned long write_err,
508 * after we've completed. 503 * after we've completed.
509 */ 504 */
510 job->fn(read_err, write_err, job->context); 505 job->fn(read_err, write_err, job->context);
511 mempool_free(job, _job_pool); 506 mempool_free(job, job->kc->job_pool);
512 } 507 }
513} 508}
514 509
@@ -526,16 +521,16 @@ static void split_job(struct kcopyd_job *job)
526 segment_complete(0, 0u, job); 521 segment_complete(0, 0u, job);
527} 522}
528 523
529int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, 524int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
530 unsigned int num_dests, struct io_region *dests, 525 unsigned int num_dests, struct dm_io_region *dests,
531 unsigned int flags, kcopyd_notify_fn fn, void *context) 526 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
532{ 527{
533 struct kcopyd_job *job; 528 struct kcopyd_job *job;
534 529
535 /* 530 /*
536 * Allocate a new job. 531 * Allocate a new job.
537 */ 532 */
538 job = mempool_alloc(_job_pool, GFP_NOIO); 533 job = mempool_alloc(kc->job_pool, GFP_NOIO);
539 534
540 /* 535 /*
541 * set up for the read. 536 * set up for the read.
@@ -569,6 +564,7 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
569 564
570 return 0; 565 return 0;
571} 566}
567EXPORT_SYMBOL(dm_kcopyd_copy);
572 568
573/* 569/*
574 * Cancels a kcopyd job, eg. someone might be deactivating a 570 * Cancels a kcopyd job, eg. someone might be deactivating a
@@ -583,126 +579,76 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
583#endif /* 0 */ 579#endif /* 0 */
584 580
585/*----------------------------------------------------------------- 581/*-----------------------------------------------------------------
586 * Unit setup 582 * Client setup
587 *---------------------------------------------------------------*/ 583 *---------------------------------------------------------------*/
588static DEFINE_MUTEX(_client_lock); 584int dm_kcopyd_client_create(unsigned int nr_pages,
589static LIST_HEAD(_clients); 585 struct dm_kcopyd_client **result)
590
591static void client_add(struct kcopyd_client *kc)
592{ 586{
593 mutex_lock(&_client_lock); 587 int r = -ENOMEM;
594 list_add(&kc->list, &_clients); 588 struct dm_kcopyd_client *kc;
595 mutex_unlock(&_client_lock);
596}
597
598static void client_del(struct kcopyd_client *kc)
599{
600 mutex_lock(&_client_lock);
601 list_del(&kc->list);
602 mutex_unlock(&_client_lock);
603}
604
605static DEFINE_MUTEX(kcopyd_init_lock);
606static int kcopyd_clients = 0;
607 589
608static int kcopyd_init(void) 590 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
609{ 591 if (!kc)
610 int r;
611
612 mutex_lock(&kcopyd_init_lock);
613
614 if (kcopyd_clients) {
615 /* Already initialized. */
616 kcopyd_clients++;
617 mutex_unlock(&kcopyd_init_lock);
618 return 0;
619 }
620
621 r = jobs_init();
622 if (r) {
623 mutex_unlock(&kcopyd_init_lock);
624 return r;
625 }
626
627 _kcopyd_wq = create_singlethread_workqueue("kcopyd");
628 if (!_kcopyd_wq) {
629 jobs_exit();
630 mutex_unlock(&kcopyd_init_lock);
631 return -ENOMEM; 592 return -ENOMEM;
632 }
633
634 kcopyd_clients++;
635 INIT_WORK(&_kcopyd_work, do_work);
636 mutex_unlock(&kcopyd_init_lock);
637 return 0;
638}
639 593
640static void kcopyd_exit(void) 594 spin_lock_init(&kc->lock);
641{ 595 spin_lock_init(&kc->job_lock);
642 mutex_lock(&kcopyd_init_lock); 596 INIT_LIST_HEAD(&kc->complete_jobs);
643 kcopyd_clients--; 597 INIT_LIST_HEAD(&kc->io_jobs);
644 if (!kcopyd_clients) { 598 INIT_LIST_HEAD(&kc->pages_jobs);
645 jobs_exit();
646 destroy_workqueue(_kcopyd_wq);
647 _kcopyd_wq = NULL;
648 }
649 mutex_unlock(&kcopyd_init_lock);
650}
651
652int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
653{
654 int r = 0;
655 struct kcopyd_client *kc;
656 599
657 r = kcopyd_init(); 600 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
658 if (r) 601 if (!kc->job_pool)
659 return r; 602 goto bad_slab;
660 603
661 kc = kmalloc(sizeof(*kc), GFP_KERNEL); 604 INIT_WORK(&kc->kcopyd_work, do_work);
662 if (!kc) { 605 kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
663 kcopyd_exit(); 606 if (!kc->kcopyd_wq)
664 return -ENOMEM; 607 goto bad_workqueue;
665 }
666 608
667 spin_lock_init(&kc->lock);
668 kc->pages = NULL; 609 kc->pages = NULL;
669 kc->nr_pages = kc->nr_free_pages = 0; 610 kc->nr_pages = kc->nr_free_pages = 0;
670 r = client_alloc_pages(kc, nr_pages); 611 r = client_alloc_pages(kc, nr_pages);
671 if (r) { 612 if (r)
672 kfree(kc); 613 goto bad_client_pages;
673 kcopyd_exit();
674 return r;
675 }
676 614
677 kc->io_client = dm_io_client_create(nr_pages); 615 kc->io_client = dm_io_client_create(nr_pages);
678 if (IS_ERR(kc->io_client)) { 616 if (IS_ERR(kc->io_client)) {
679 r = PTR_ERR(kc->io_client); 617 r = PTR_ERR(kc->io_client);
680 client_free_pages(kc); 618 goto bad_io_client;
681 kfree(kc);
682 kcopyd_exit();
683 return r;
684 } 619 }
685 620
686 init_waitqueue_head(&kc->destroyq); 621 init_waitqueue_head(&kc->destroyq);
687 atomic_set(&kc->nr_jobs, 0); 622 atomic_set(&kc->nr_jobs, 0);
688 623
689 client_add(kc);
690 *result = kc; 624 *result = kc;
691 return 0; 625 return 0;
626
627bad_io_client:
628 client_free_pages(kc);
629bad_client_pages:
630 destroy_workqueue(kc->kcopyd_wq);
631bad_workqueue:
632 mempool_destroy(kc->job_pool);
633bad_slab:
634 kfree(kc);
635
636 return r;
692} 637}
638EXPORT_SYMBOL(dm_kcopyd_client_create);
693 639
694void kcopyd_client_destroy(struct kcopyd_client *kc) 640void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
695{ 641{
696 /* Wait for completion of all jobs submitted by this client. */ 642 /* Wait for completion of all jobs submitted by this client. */
697 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); 643 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
698 644
645 BUG_ON(!list_empty(&kc->complete_jobs));
646 BUG_ON(!list_empty(&kc->io_jobs));
647 BUG_ON(!list_empty(&kc->pages_jobs));
648 destroy_workqueue(kc->kcopyd_wq);
699 dm_io_client_destroy(kc->io_client); 649 dm_io_client_destroy(kc->io_client);
700 client_free_pages(kc); 650 client_free_pages(kc);
701 client_del(kc); 651 mempool_destroy(kc->job_pool);
702 kfree(kc); 652 kfree(kc);
703 kcopyd_exit();
704} 653}
705 654EXPORT_SYMBOL(dm_kcopyd_client_destroy);
706EXPORT_SYMBOL(kcopyd_client_create);
707EXPORT_SYMBOL(kcopyd_client_destroy);
708EXPORT_SYMBOL(kcopyd_copy);
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 2a74b2142f50..67a6f31b7fc3 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
3 * 4 *
4 * This file is released under the LGPL. 5 * This file is released under the LGPL.
5 */ 6 */
@@ -8,64 +9,58 @@
8#include <linux/slab.h> 9#include <linux/slab.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/dm-io.h>
13#include <linux/dm-dirty-log.h>
11 14
12#include "dm-log.h" 15#include "dm.h"
13#include "dm-io.h"
14 16
15#define DM_MSG_PREFIX "mirror log" 17#define DM_MSG_PREFIX "dirty region log"
16 18
17static LIST_HEAD(_log_types); 19struct dm_dirty_log_internal {
18static DEFINE_SPINLOCK(_lock); 20 struct dm_dirty_log_type *type;
19 21
20int dm_register_dirty_log_type(struct dirty_log_type *type) 22 struct list_head list;
21{ 23 long use;
22 spin_lock(&_lock); 24};
23 type->use_count = 0;
24 list_add(&type->list, &_log_types);
25 spin_unlock(&_lock);
26 25
27 return 0; 26static LIST_HEAD(_log_types);
28} 27static DEFINE_SPINLOCK(_lock);
29 28
30int dm_unregister_dirty_log_type(struct dirty_log_type *type) 29static struct dm_dirty_log_internal *__find_dirty_log_type(const char *name)
31{ 30{
32 spin_lock(&_lock); 31 struct dm_dirty_log_internal *log_type;
33
34 if (type->use_count)
35 DMWARN("Attempt to unregister a log type that is still in use");
36 else
37 list_del(&type->list);
38 32
39 spin_unlock(&_lock); 33 list_for_each_entry(log_type, &_log_types, list)
34 if (!strcmp(name, log_type->type->name))
35 return log_type;
40 36
41 return 0; 37 return NULL;
42} 38}
43 39
44static struct dirty_log_type *_get_type(const char *type_name) 40static struct dm_dirty_log_internal *_get_dirty_log_type(const char *name)
45{ 41{
46 struct dirty_log_type *type; 42 struct dm_dirty_log_internal *log_type;
47 43
48 spin_lock(&_lock); 44 spin_lock(&_lock);
49 list_for_each_entry (type, &_log_types, list) 45
50 if (!strcmp(type_name, type->name)) { 46 log_type = __find_dirty_log_type(name);
51 if (!type->use_count && !try_module_get(type->module)){ 47 if (log_type) {
52 spin_unlock(&_lock); 48 if (!log_type->use && !try_module_get(log_type->type->module))
53 return NULL; 49 log_type = NULL;
54 } 50 else
55 type->use_count++; 51 log_type->use++;
56 spin_unlock(&_lock); 52 }
57 return type;
58 }
59 53
60 spin_unlock(&_lock); 54 spin_unlock(&_lock);
61 return NULL; 55
56 return log_type;
62} 57}
63 58
64/* 59/*
65 * get_type 60 * get_type
66 * @type_name 61 * @type_name
67 * 62 *
68 * Attempt to retrieve the dirty_log_type by name. If not already 63 * Attempt to retrieve the dm_dirty_log_type by name. If not already
69 * available, attempt to load the appropriate module. 64 * available, attempt to load the appropriate module.
70 * 65 *
71 * Log modules are named "dm-log-" followed by the 'type_name'. 66 * Log modules are named "dm-log-" followed by the 'type_name'.
@@ -78,14 +73,17 @@ static struct dirty_log_type *_get_type(const char *type_name)
78 * 73 *
79 * Returns: dirty_log_type* on success, NULL on failure 74 * Returns: dirty_log_type* on success, NULL on failure
80 */ 75 */
81static struct dirty_log_type *get_type(const char *type_name) 76static struct dm_dirty_log_type *get_type(const char *type_name)
82{ 77{
83 char *p, *type_name_dup; 78 char *p, *type_name_dup;
84 struct dirty_log_type *type; 79 struct dm_dirty_log_internal *log_type;
80
81 if (!type_name)
82 return NULL;
85 83
86 type = _get_type(type_name); 84 log_type = _get_dirty_log_type(type_name);
87 if (type) 85 if (log_type)
88 return type; 86 return log_type->type;
89 87
90 type_name_dup = kstrdup(type_name, GFP_KERNEL); 88 type_name_dup = kstrdup(type_name, GFP_KERNEL);
91 if (!type_name_dup) { 89 if (!type_name_dup) {
@@ -95,34 +93,106 @@ static struct dirty_log_type *get_type(const char *type_name)
95 } 93 }
96 94
97 while (request_module("dm-log-%s", type_name_dup) || 95 while (request_module("dm-log-%s", type_name_dup) ||
98 !(type = _get_type(type_name))) { 96 !(log_type = _get_dirty_log_type(type_name))) {
99 p = strrchr(type_name_dup, '-'); 97 p = strrchr(type_name_dup, '-');
100 if (!p) 98 if (!p)
101 break; 99 break;
102 p[0] = '\0'; 100 p[0] = '\0';
103 } 101 }
104 102
105 if (!type) 103 if (!log_type)
106 DMWARN("Module for logging type \"%s\" not found.", type_name); 104 DMWARN("Module for logging type \"%s\" not found.", type_name);
107 105
108 kfree(type_name_dup); 106 kfree(type_name_dup);
109 107
110 return type; 108 return log_type ? log_type->type : NULL;
111} 109}
112 110
113static void put_type(struct dirty_log_type *type) 111static void put_type(struct dm_dirty_log_type *type)
114{ 112{
113 struct dm_dirty_log_internal *log_type;
114
115 if (!type)
116 return;
117
115 spin_lock(&_lock); 118 spin_lock(&_lock);
116 if (!--type->use_count) 119 log_type = __find_dirty_log_type(type->name);
120 if (!log_type)
121 goto out;
122
123 if (!--log_type->use)
117 module_put(type->module); 124 module_put(type->module);
125
126 BUG_ON(log_type->use < 0);
127
128out:
118 spin_unlock(&_lock); 129 spin_unlock(&_lock);
119} 130}
120 131
121struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti, 132static struct dm_dirty_log_internal *_alloc_dirty_log_type(struct dm_dirty_log_type *type)
122 unsigned int argc, char **argv)
123{ 133{
124 struct dirty_log_type *type; 134 struct dm_dirty_log_internal *log_type = kzalloc(sizeof(*log_type),
125 struct dirty_log *log; 135 GFP_KERNEL);
136
137 if (log_type)
138 log_type->type = type;
139
140 return log_type;
141}
142
143int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
144{
145 struct dm_dirty_log_internal *log_type = _alloc_dirty_log_type(type);
146 int r = 0;
147
148 if (!log_type)
149 return -ENOMEM;
150
151 spin_lock(&_lock);
152 if (!__find_dirty_log_type(type->name))
153 list_add(&log_type->list, &_log_types);
154 else {
155 kfree(log_type);
156 r = -EEXIST;
157 }
158 spin_unlock(&_lock);
159
160 return r;
161}
162EXPORT_SYMBOL(dm_dirty_log_type_register);
163
164int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
165{
166 struct dm_dirty_log_internal *log_type;
167
168 spin_lock(&_lock);
169
170 log_type = __find_dirty_log_type(type->name);
171 if (!log_type) {
172 spin_unlock(&_lock);
173 return -EINVAL;
174 }
175
176 if (log_type->use) {
177 spin_unlock(&_lock);
178 return -ETXTBSY;
179 }
180
181 list_del(&log_type->list);
182
183 spin_unlock(&_lock);
184 kfree(log_type);
185
186 return 0;
187}
188EXPORT_SYMBOL(dm_dirty_log_type_unregister);
189
190struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
191 struct dm_target *ti,
192 unsigned int argc, char **argv)
193{
194 struct dm_dirty_log_type *type;
195 struct dm_dirty_log *log;
126 196
127 log = kmalloc(sizeof(*log), GFP_KERNEL); 197 log = kmalloc(sizeof(*log), GFP_KERNEL);
128 if (!log) 198 if (!log)
@@ -143,13 +213,15 @@ struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *t
143 213
144 return log; 214 return log;
145} 215}
216EXPORT_SYMBOL(dm_dirty_log_create);
146 217
147void dm_destroy_dirty_log(struct dirty_log *log) 218void dm_dirty_log_destroy(struct dm_dirty_log *log)
148{ 219{
149 log->type->dtr(log); 220 log->type->dtr(log);
150 put_type(log->type); 221 put_type(log->type);
151 kfree(log); 222 kfree(log);
152} 223}
224EXPORT_SYMBOL(dm_dirty_log_destroy);
153 225
154/*----------------------------------------------------------------- 226/*-----------------------------------------------------------------
155 * Persistent and core logs share a lot of their implementation. 227 * Persistent and core logs share a lot of their implementation.
@@ -207,7 +279,7 @@ struct log_c {
207 struct dm_dev *log_dev; 279 struct dm_dev *log_dev;
208 struct log_header header; 280 struct log_header header;
209 281
210 struct io_region header_location; 282 struct dm_io_region header_location;
211 struct log_header *disk_header; 283 struct log_header *disk_header;
212}; 284};
213 285
@@ -215,7 +287,7 @@ struct log_c {
215 * The touched member needs to be updated every time we access 287 * The touched member needs to be updated every time we access
216 * one of the bitsets. 288 * one of the bitsets.
217 */ 289 */
218static inline int log_test_bit(uint32_t *bs, unsigned bit) 290static inline int log_test_bit(uint32_t *bs, unsigned bit)
219{ 291{
220 return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0; 292 return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
221} 293}
@@ -302,7 +374,7 @@ static inline int write_header(struct log_c *log)
302 * argv contains region_size followed optionally by [no]sync 374 * argv contains region_size followed optionally by [no]sync
303 *--------------------------------------------------------------*/ 375 *--------------------------------------------------------------*/
304#define BYTE_SHIFT 3 376#define BYTE_SHIFT 3
305static int create_log_context(struct dirty_log *log, struct dm_target *ti, 377static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
306 unsigned int argc, char **argv, 378 unsigned int argc, char **argv,
307 struct dm_dev *dev) 379 struct dm_dev *dev)
308{ 380{
@@ -315,7 +387,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
315 int r; 387 int r;
316 388
317 if (argc < 1 || argc > 2) { 389 if (argc < 1 || argc > 2) {
318 DMWARN("wrong number of arguments to mirror log"); 390 DMWARN("wrong number of arguments to dirty region log");
319 return -EINVAL; 391 return -EINVAL;
320 } 392 }
321 393
@@ -325,8 +397,8 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
325 else if (!strcmp(argv[1], "nosync")) 397 else if (!strcmp(argv[1], "nosync"))
326 sync = NOSYNC; 398 sync = NOSYNC;
327 else { 399 else {
328 DMWARN("unrecognised sync argument to mirror log: %s", 400 DMWARN("unrecognised sync argument to "
329 argv[1]); 401 "dirty region log: %s", argv[1]);
330 return -EINVAL; 402 return -EINVAL;
331 } 403 }
332 } 404 }
@@ -434,7 +506,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
434 return 0; 506 return 0;
435} 507}
436 508
437static int core_ctr(struct dirty_log *log, struct dm_target *ti, 509static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
438 unsigned int argc, char **argv) 510 unsigned int argc, char **argv)
439{ 511{
440 return create_log_context(log, ti, argc, argv, NULL); 512 return create_log_context(log, ti, argc, argv, NULL);
@@ -447,7 +519,7 @@ static void destroy_log_context(struct log_c *lc)
447 kfree(lc); 519 kfree(lc);
448} 520}
449 521
450static void core_dtr(struct dirty_log *log) 522static void core_dtr(struct dm_dirty_log *log)
451{ 523{
452 struct log_c *lc = (struct log_c *) log->context; 524 struct log_c *lc = (struct log_c *) log->context;
453 525
@@ -460,14 +532,14 @@ static void core_dtr(struct dirty_log *log)
460 * 532 *
461 * argv contains log_device region_size followed optionally by [no]sync 533 * argv contains log_device region_size followed optionally by [no]sync
462 *--------------------------------------------------------------*/ 534 *--------------------------------------------------------------*/
463static int disk_ctr(struct dirty_log *log, struct dm_target *ti, 535static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
464 unsigned int argc, char **argv) 536 unsigned int argc, char **argv)
465{ 537{
466 int r; 538 int r;
467 struct dm_dev *dev; 539 struct dm_dev *dev;
468 540
469 if (argc < 2 || argc > 3) { 541 if (argc < 2 || argc > 3) {
470 DMWARN("wrong number of arguments to disk mirror log"); 542 DMWARN("wrong number of arguments to disk dirty region log");
471 return -EINVAL; 543 return -EINVAL;
472 } 544 }
473 545
@@ -485,7 +557,7 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
485 return 0; 557 return 0;
486} 558}
487 559
488static void disk_dtr(struct dirty_log *log) 560static void disk_dtr(struct dm_dirty_log *log)
489{ 561{
490 struct log_c *lc = (struct log_c *) log->context; 562 struct log_c *lc = (struct log_c *) log->context;
491 563
@@ -514,7 +586,7 @@ static void fail_log_device(struct log_c *lc)
514 dm_table_event(lc->ti->table); 586 dm_table_event(lc->ti->table);
515} 587}
516 588
517static int disk_resume(struct dirty_log *log) 589static int disk_resume(struct dm_dirty_log *log)
518{ 590{
519 int r; 591 int r;
520 unsigned i; 592 unsigned i;
@@ -524,7 +596,7 @@ static int disk_resume(struct dirty_log *log)
524 /* read the disk header */ 596 /* read the disk header */
525 r = read_header(lc); 597 r = read_header(lc);
526 if (r) { 598 if (r) {
527 DMWARN("%s: Failed to read header on mirror log device", 599 DMWARN("%s: Failed to read header on dirty region log device",
528 lc->log_dev->name); 600 lc->log_dev->name);
529 fail_log_device(lc); 601 fail_log_device(lc);
530 /* 602 /*
@@ -562,7 +634,7 @@ static int disk_resume(struct dirty_log *log)
562 /* write the new header */ 634 /* write the new header */
563 r = write_header(lc); 635 r = write_header(lc);
564 if (r) { 636 if (r) {
565 DMWARN("%s: Failed to write header on mirror log device", 637 DMWARN("%s: Failed to write header on dirty region log device",
566 lc->log_dev->name); 638 lc->log_dev->name);
567 fail_log_device(lc); 639 fail_log_device(lc);
568 } 640 }
@@ -570,38 +642,38 @@ static int disk_resume(struct dirty_log *log)
570 return r; 642 return r;
571} 643}
572 644
573static uint32_t core_get_region_size(struct dirty_log *log) 645static uint32_t core_get_region_size(struct dm_dirty_log *log)
574{ 646{
575 struct log_c *lc = (struct log_c *) log->context; 647 struct log_c *lc = (struct log_c *) log->context;
576 return lc->region_size; 648 return lc->region_size;
577} 649}
578 650
579static int core_resume(struct dirty_log *log) 651static int core_resume(struct dm_dirty_log *log)
580{ 652{
581 struct log_c *lc = (struct log_c *) log->context; 653 struct log_c *lc = (struct log_c *) log->context;
582 lc->sync_search = 0; 654 lc->sync_search = 0;
583 return 0; 655 return 0;
584} 656}
585 657
586static int core_is_clean(struct dirty_log *log, region_t region) 658static int core_is_clean(struct dm_dirty_log *log, region_t region)
587{ 659{
588 struct log_c *lc = (struct log_c *) log->context; 660 struct log_c *lc = (struct log_c *) log->context;
589 return log_test_bit(lc->clean_bits, region); 661 return log_test_bit(lc->clean_bits, region);
590} 662}
591 663
592static int core_in_sync(struct dirty_log *log, region_t region, int block) 664static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
593{ 665{
594 struct log_c *lc = (struct log_c *) log->context; 666 struct log_c *lc = (struct log_c *) log->context;
595 return log_test_bit(lc->sync_bits, region); 667 return log_test_bit(lc->sync_bits, region);
596} 668}
597 669
598static int core_flush(struct dirty_log *log) 670static int core_flush(struct dm_dirty_log *log)
599{ 671{
600 /* no op */ 672 /* no op */
601 return 0; 673 return 0;
602} 674}
603 675
604static int disk_flush(struct dirty_log *log) 676static int disk_flush(struct dm_dirty_log *log)
605{ 677{
606 int r; 678 int r;
607 struct log_c *lc = (struct log_c *) log->context; 679 struct log_c *lc = (struct log_c *) log->context;
@@ -619,19 +691,19 @@ static int disk_flush(struct dirty_log *log)
619 return r; 691 return r;
620} 692}
621 693
622static void core_mark_region(struct dirty_log *log, region_t region) 694static void core_mark_region(struct dm_dirty_log *log, region_t region)
623{ 695{
624 struct log_c *lc = (struct log_c *) log->context; 696 struct log_c *lc = (struct log_c *) log->context;
625 log_clear_bit(lc, lc->clean_bits, region); 697 log_clear_bit(lc, lc->clean_bits, region);
626} 698}
627 699
628static void core_clear_region(struct dirty_log *log, region_t region) 700static void core_clear_region(struct dm_dirty_log *log, region_t region)
629{ 701{
630 struct log_c *lc = (struct log_c *) log->context; 702 struct log_c *lc = (struct log_c *) log->context;
631 log_set_bit(lc, lc->clean_bits, region); 703 log_set_bit(lc, lc->clean_bits, region);
632} 704}
633 705
634static int core_get_resync_work(struct dirty_log *log, region_t *region) 706static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
635{ 707{
636 struct log_c *lc = (struct log_c *) log->context; 708 struct log_c *lc = (struct log_c *) log->context;
637 709
@@ -654,7 +726,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
654 return 1; 726 return 1;
655} 727}
656 728
657static void core_set_region_sync(struct dirty_log *log, region_t region, 729static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
658 int in_sync) 730 int in_sync)
659{ 731{
660 struct log_c *lc = (struct log_c *) log->context; 732 struct log_c *lc = (struct log_c *) log->context;
@@ -669,7 +741,7 @@ static void core_set_region_sync(struct dirty_log *log, region_t region,
669 } 741 }
670} 742}
671 743
672static region_t core_get_sync_count(struct dirty_log *log) 744static region_t core_get_sync_count(struct dm_dirty_log *log)
673{ 745{
674 struct log_c *lc = (struct log_c *) log->context; 746 struct log_c *lc = (struct log_c *) log->context;
675 747
@@ -680,7 +752,7 @@ static region_t core_get_sync_count(struct dirty_log *log)
680 if (lc->sync != DEFAULTSYNC) \ 752 if (lc->sync != DEFAULTSYNC) \
681 DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "") 753 DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
682 754
683static int core_status(struct dirty_log *log, status_type_t status, 755static int core_status(struct dm_dirty_log *log, status_type_t status,
684 char *result, unsigned int maxlen) 756 char *result, unsigned int maxlen)
685{ 757{
686 int sz = 0; 758 int sz = 0;
@@ -700,7 +772,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
700 return sz; 772 return sz;
701} 773}
702 774
703static int disk_status(struct dirty_log *log, status_type_t status, 775static int disk_status(struct dm_dirty_log *log, status_type_t status,
704 char *result, unsigned int maxlen) 776 char *result, unsigned int maxlen)
705{ 777{
706 int sz = 0; 778 int sz = 0;
@@ -722,7 +794,7 @@ static int disk_status(struct dirty_log *log, status_type_t status,
722 return sz; 794 return sz;
723} 795}
724 796
725static struct dirty_log_type _core_type = { 797static struct dm_dirty_log_type _core_type = {
726 .name = "core", 798 .name = "core",
727 .module = THIS_MODULE, 799 .module = THIS_MODULE,
728 .ctr = core_ctr, 800 .ctr = core_ctr,
@@ -740,7 +812,7 @@ static struct dirty_log_type _core_type = {
740 .status = core_status, 812 .status = core_status,
741}; 813};
742 814
743static struct dirty_log_type _disk_type = { 815static struct dm_dirty_log_type _disk_type = {
744 .name = "disk", 816 .name = "disk",
745 .module = THIS_MODULE, 817 .module = THIS_MODULE,
746 .ctr = disk_ctr, 818 .ctr = disk_ctr,
@@ -763,26 +835,28 @@ int __init dm_dirty_log_init(void)
763{ 835{
764 int r; 836 int r;
765 837
766 r = dm_register_dirty_log_type(&_core_type); 838 r = dm_dirty_log_type_register(&_core_type);
767 if (r) 839 if (r)
768 DMWARN("couldn't register core log"); 840 DMWARN("couldn't register core log");
769 841
770 r = dm_register_dirty_log_type(&_disk_type); 842 r = dm_dirty_log_type_register(&_disk_type);
771 if (r) { 843 if (r) {
772 DMWARN("couldn't register disk type"); 844 DMWARN("couldn't register disk type");
773 dm_unregister_dirty_log_type(&_core_type); 845 dm_dirty_log_type_unregister(&_core_type);
774 } 846 }
775 847
776 return r; 848 return r;
777} 849}
778 850
779void dm_dirty_log_exit(void) 851void __exit dm_dirty_log_exit(void)
780{ 852{
781 dm_unregister_dirty_log_type(&_disk_type); 853 dm_dirty_log_type_unregister(&_disk_type);
782 dm_unregister_dirty_log_type(&_core_type); 854 dm_dirty_log_type_unregister(&_core_type);
783} 855}
784 856
785EXPORT_SYMBOL(dm_register_dirty_log_type); 857module_init(dm_dirty_log_init);
786EXPORT_SYMBOL(dm_unregister_dirty_log_type); 858module_exit(dm_dirty_log_exit);
787EXPORT_SYMBOL(dm_create_dirty_log); 859
788EXPORT_SYMBOL(dm_destroy_dirty_log); 860MODULE_DESCRIPTION(DM_NAME " dirty region log");
861MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
862MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 762cb086bb7f..ff05fe893083 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -7,9 +7,6 @@
7#include "dm.h" 7#include "dm.h"
8#include "dm-bio-list.h" 8#include "dm-bio-list.h"
9#include "dm-bio-record.h" 9#include "dm-bio-record.h"
10#include "dm-io.h"
11#include "dm-log.h"
12#include "kcopyd.h"
13 10
14#include <linux/ctype.h> 11#include <linux/ctype.h>
15#include <linux/init.h> 12#include <linux/init.h>
@@ -22,6 +19,9 @@
22#include <linux/workqueue.h> 19#include <linux/workqueue.h>
23#include <linux/log2.h> 20#include <linux/log2.h>
24#include <linux/hardirq.h> 21#include <linux/hardirq.h>
22#include <linux/dm-io.h>
23#include <linux/dm-dirty-log.h>
24#include <linux/dm-kcopyd.h>
25 25
26#define DM_MSG_PREFIX "raid1" 26#define DM_MSG_PREFIX "raid1"
27#define DM_IO_PAGES 64 27#define DM_IO_PAGES 64
@@ -74,7 +74,7 @@ struct region_hash {
74 unsigned region_shift; 74 unsigned region_shift;
75 75
76 /* holds persistent region state */ 76 /* holds persistent region state */
77 struct dirty_log *log; 77 struct dm_dirty_log *log;
78 78
79 /* hash table */ 79 /* hash table */
80 rwlock_t hash_lock; 80 rwlock_t hash_lock;
@@ -133,7 +133,7 @@ struct mirror_set {
133 struct dm_target *ti; 133 struct dm_target *ti;
134 struct list_head list; 134 struct list_head list;
135 struct region_hash rh; 135 struct region_hash rh;
136 struct kcopyd_client *kcopyd_client; 136 struct dm_kcopyd_client *kcopyd_client;
137 uint64_t features; 137 uint64_t features;
138 138
139 spinlock_t lock; /* protects the lists */ 139 spinlock_t lock; /* protects the lists */
@@ -154,6 +154,9 @@ struct mirror_set {
154 154
155 struct workqueue_struct *kmirrord_wq; 155 struct workqueue_struct *kmirrord_wq;
156 struct work_struct kmirrord_work; 156 struct work_struct kmirrord_work;
157 struct timer_list timer;
158 unsigned long timer_pending;
159
157 struct work_struct trigger_event; 160 struct work_struct trigger_event;
158 161
159 unsigned int nr_mirrors; 162 unsigned int nr_mirrors;
@@ -178,13 +181,32 @@ static void wake(struct mirror_set *ms)
178 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 181 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
179} 182}
180 183
184static void delayed_wake_fn(unsigned long data)
185{
186 struct mirror_set *ms = (struct mirror_set *) data;
187
188 clear_bit(0, &ms->timer_pending);
189 wake(ms);
190}
191
192static void delayed_wake(struct mirror_set *ms)
193{
194 if (test_and_set_bit(0, &ms->timer_pending))
195 return;
196
197 ms->timer.expires = jiffies + HZ / 5;
198 ms->timer.data = (unsigned long) ms;
199 ms->timer.function = delayed_wake_fn;
200 add_timer(&ms->timer);
201}
202
181/* FIXME move this */ 203/* FIXME move this */
182static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 204static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
183 205
184#define MIN_REGIONS 64 206#define MIN_REGIONS 64
185#define MAX_RECOVERY 1 207#define MAX_RECOVERY 1
186static int rh_init(struct region_hash *rh, struct mirror_set *ms, 208static int rh_init(struct region_hash *rh, struct mirror_set *ms,
187 struct dirty_log *log, uint32_t region_size, 209 struct dm_dirty_log *log, uint32_t region_size,
188 region_t nr_regions) 210 region_t nr_regions)
189{ 211{
190 unsigned int nr_buckets, max_buckets; 212 unsigned int nr_buckets, max_buckets;
@@ -249,7 +271,7 @@ static void rh_exit(struct region_hash *rh)
249 } 271 }
250 272
251 if (rh->log) 273 if (rh->log)
252 dm_destroy_dirty_log(rh->log); 274 dm_dirty_log_destroy(rh->log);
253 if (rh->region_pool) 275 if (rh->region_pool)
254 mempool_destroy(rh->region_pool); 276 mempool_destroy(rh->region_pool);
255 vfree(rh->buckets); 277 vfree(rh->buckets);
@@ -405,24 +427,22 @@ static void rh_update_states(struct region_hash *rh)
405 write_lock_irq(&rh->hash_lock); 427 write_lock_irq(&rh->hash_lock);
406 spin_lock(&rh->region_lock); 428 spin_lock(&rh->region_lock);
407 if (!list_empty(&rh->clean_regions)) { 429 if (!list_empty(&rh->clean_regions)) {
408 list_splice(&rh->clean_regions, &clean); 430 list_splice_init(&rh->clean_regions, &clean);
409 INIT_LIST_HEAD(&rh->clean_regions);
410 431
411 list_for_each_entry(reg, &clean, list) 432 list_for_each_entry(reg, &clean, list)
412 list_del(&reg->hash_list); 433 list_del(&reg->hash_list);
413 } 434 }
414 435
415 if (!list_empty(&rh->recovered_regions)) { 436 if (!list_empty(&rh->recovered_regions)) {
416 list_splice(&rh->recovered_regions, &recovered); 437 list_splice_init(&rh->recovered_regions, &recovered);
417 INIT_LIST_HEAD(&rh->recovered_regions);
418 438
419 list_for_each_entry (reg, &recovered, list) 439 list_for_each_entry (reg, &recovered, list)
420 list_del(&reg->hash_list); 440 list_del(&reg->hash_list);
421 } 441 }
422 442
423 if (!list_empty(&rh->failed_recovered_regions)) { 443 if (!list_empty(&rh->failed_recovered_regions)) {
424 list_splice(&rh->failed_recovered_regions, &failed_recovered); 444 list_splice_init(&rh->failed_recovered_regions,
425 INIT_LIST_HEAD(&rh->failed_recovered_regions); 445 &failed_recovered);
426 446
427 list_for_each_entry(reg, &failed_recovered, list) 447 list_for_each_entry(reg, &failed_recovered, list)
428 list_del(&reg->hash_list); 448 list_del(&reg->hash_list);
@@ -790,7 +810,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
790{ 810{
791 int r; 811 int r;
792 unsigned int i; 812 unsigned int i;
793 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest; 813 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
794 struct mirror *m; 814 struct mirror *m;
795 unsigned long flags = 0; 815 unsigned long flags = 0;
796 816
@@ -822,9 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg)
822 } 842 }
823 843
824 /* hand to kcopyd */ 844 /* hand to kcopyd */
825 set_bit(KCOPYD_IGNORE_ERROR, &flags); 845 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
826 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, 846 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
827 recovery_complete, reg); 847 flags, recovery_complete, reg);
828 848
829 return r; 849 return r;
830} 850}
@@ -833,7 +853,7 @@ static void do_recovery(struct mirror_set *ms)
833{ 853{
834 int r; 854 int r;
835 struct region *reg; 855 struct region *reg;
836 struct dirty_log *log = ms->rh.log; 856 struct dm_dirty_log *log = ms->rh.log;
837 857
838 /* 858 /*
839 * Start quiescing some regions. 859 * Start quiescing some regions.
@@ -909,7 +929,7 @@ static void map_bio(struct mirror *m, struct bio *bio)
909 bio->bi_sector = map_sector(m, bio); 929 bio->bi_sector = map_sector(m, bio);
910} 930}
911 931
912static void map_region(struct io_region *io, struct mirror *m, 932static void map_region(struct dm_io_region *io, struct mirror *m,
913 struct bio *bio) 933 struct bio *bio)
914{ 934{
915 io->bdev = m->dev->bdev; 935 io->bdev = m->dev->bdev;
@@ -951,7 +971,7 @@ static void read_callback(unsigned long error, void *context)
951/* Asynchronous read. */ 971/* Asynchronous read. */
952static void read_async_bio(struct mirror *m, struct bio *bio) 972static void read_async_bio(struct mirror *m, struct bio *bio)
953{ 973{
954 struct io_region io; 974 struct dm_io_region io;
955 struct dm_io_request io_req = { 975 struct dm_io_request io_req = {
956 .bi_rw = READ, 976 .bi_rw = READ,
957 .mem.type = DM_IO_BVEC, 977 .mem.type = DM_IO_BVEC,
@@ -1019,7 +1039,7 @@ static void __bio_mark_nosync(struct mirror_set *ms,
1019{ 1039{
1020 unsigned long flags; 1040 unsigned long flags;
1021 struct region_hash *rh = &ms->rh; 1041 struct region_hash *rh = &ms->rh;
1022 struct dirty_log *log = ms->rh.log; 1042 struct dm_dirty_log *log = ms->rh.log;
1023 struct region *reg; 1043 struct region *reg;
1024 region_t region = bio_to_region(rh, bio); 1044 region_t region = bio_to_region(rh, bio);
1025 int recovering = 0; 1045 int recovering = 0;
@@ -1107,7 +1127,7 @@ out:
1107static void do_write(struct mirror_set *ms, struct bio *bio) 1127static void do_write(struct mirror_set *ms, struct bio *bio)
1108{ 1128{
1109 unsigned int i; 1129 unsigned int i;
1110 struct io_region io[ms->nr_mirrors], *dest = io; 1130 struct dm_io_region io[ms->nr_mirrors], *dest = io;
1111 struct mirror *m; 1131 struct mirror *m;
1112 struct dm_io_request io_req = { 1132 struct dm_io_request io_req = {
1113 .bi_rw = WRITE, 1133 .bi_rw = WRITE,
@@ -1182,6 +1202,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1182 spin_lock_irq(&ms->lock); 1202 spin_lock_irq(&ms->lock);
1183 bio_list_merge(&ms->failures, &sync); 1203 bio_list_merge(&ms->failures, &sync);
1184 spin_unlock_irq(&ms->lock); 1204 spin_unlock_irq(&ms->lock);
1205 wake(ms);
1185 } else 1206 } else
1186 while ((bio = bio_list_pop(&sync))) 1207 while ((bio = bio_list_pop(&sync)))
1187 do_write(ms, bio); 1208 do_write(ms, bio);
@@ -1241,7 +1262,7 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1241 bio_list_merge(&ms->failures, failures); 1262 bio_list_merge(&ms->failures, failures);
1242 spin_unlock_irq(&ms->lock); 1263 spin_unlock_irq(&ms->lock);
1243 1264
1244 wake(ms); 1265 delayed_wake(ms);
1245} 1266}
1246 1267
1247static void trigger_event(struct work_struct *work) 1268static void trigger_event(struct work_struct *work)
@@ -1255,7 +1276,7 @@ static void trigger_event(struct work_struct *work)
1255/*----------------------------------------------------------------- 1276/*-----------------------------------------------------------------
1256 * kmirrord 1277 * kmirrord
1257 *---------------------------------------------------------------*/ 1278 *---------------------------------------------------------------*/
1258static int _do_mirror(struct work_struct *work) 1279static void do_mirror(struct work_struct *work)
1259{ 1280{
1260 struct mirror_set *ms =container_of(work, struct mirror_set, 1281 struct mirror_set *ms =container_of(work, struct mirror_set,
1261 kmirrord_work); 1282 kmirrord_work);
@@ -1277,23 +1298,7 @@ static int _do_mirror(struct work_struct *work)
1277 do_writes(ms, &writes); 1298 do_writes(ms, &writes);
1278 do_failures(ms, &failures); 1299 do_failures(ms, &failures);
1279 1300
1280 return (ms->failures.head) ? 1 : 0; 1301 dm_table_unplug_all(ms->ti->table);
1281}
1282
1283static void do_mirror(struct work_struct *work)
1284{
1285 /*
1286 * If _do_mirror returns 1, we give it
1287 * another shot. This helps for cases like
1288 * 'suspend' where we call flush_workqueue
1289 * and expect all work to be finished. If
1290 * a failure happens during a suspend, we
1291 * couldn't issue a 'wake' because it would
1292 * not be honored. Therefore, we return '1'
1293 * from _do_mirror, and retry here.
1294 */
1295 while (_do_mirror(work))
1296 schedule();
1297} 1302}
1298 1303
1299 1304
@@ -1303,7 +1308,7 @@ static void do_mirror(struct work_struct *work)
1303static struct mirror_set *alloc_context(unsigned int nr_mirrors, 1308static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1304 uint32_t region_size, 1309 uint32_t region_size,
1305 struct dm_target *ti, 1310 struct dm_target *ti,
1306 struct dirty_log *dl) 1311 struct dm_dirty_log *dl)
1307{ 1312{
1308 size_t len; 1313 size_t len;
1309 struct mirror_set *ms = NULL; 1314 struct mirror_set *ms = NULL;
@@ -1403,12 +1408,12 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1403/* 1408/*
1404 * Create dirty log: log_type #log_params <log_params> 1409 * Create dirty log: log_type #log_params <log_params>
1405 */ 1410 */
1406static struct dirty_log *create_dirty_log(struct dm_target *ti, 1411static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1407 unsigned int argc, char **argv, 1412 unsigned int argc, char **argv,
1408 unsigned int *args_used) 1413 unsigned int *args_used)
1409{ 1414{
1410 unsigned int param_count; 1415 unsigned int param_count;
1411 struct dirty_log *dl; 1416 struct dm_dirty_log *dl;
1412 1417
1413 if (argc < 2) { 1418 if (argc < 2) {
1414 ti->error = "Insufficient mirror log arguments"; 1419 ti->error = "Insufficient mirror log arguments";
@@ -1427,7 +1432,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1427 return NULL; 1432 return NULL;
1428 } 1433 }
1429 1434
1430 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); 1435 dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
1431 if (!dl) { 1436 if (!dl) {
1432 ti->error = "Error creating mirror dirty log"; 1437 ti->error = "Error creating mirror dirty log";
1433 return NULL; 1438 return NULL;
@@ -1435,7 +1440,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1435 1440
1436 if (!_check_region_size(ti, dl->type->get_region_size(dl))) { 1441 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1437 ti->error = "Invalid region size"; 1442 ti->error = "Invalid region size";
1438 dm_destroy_dirty_log(dl); 1443 dm_dirty_log_destroy(dl);
1439 return NULL; 1444 return NULL;
1440 } 1445 }
1441 1446
@@ -1496,7 +1501,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1496 int r; 1501 int r;
1497 unsigned int nr_mirrors, m, args_used; 1502 unsigned int nr_mirrors, m, args_used;
1498 struct mirror_set *ms; 1503 struct mirror_set *ms;
1499 struct dirty_log *dl; 1504 struct dm_dirty_log *dl;
1500 1505
1501 dl = create_dirty_log(ti, argc, argv, &args_used); 1506 dl = create_dirty_log(ti, argc, argv, &args_used);
1502 if (!dl) 1507 if (!dl)
@@ -1506,9 +1511,9 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1506 argc -= args_used; 1511 argc -= args_used;
1507 1512
1508 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || 1513 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1509 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { 1514 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1510 ti->error = "Invalid number of mirrors"; 1515 ti->error = "Invalid number of mirrors";
1511 dm_destroy_dirty_log(dl); 1516 dm_dirty_log_destroy(dl);
1512 return -EINVAL; 1517 return -EINVAL;
1513 } 1518 }
1514 1519
@@ -1516,13 +1521,13 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1516 1521
1517 if (argc < nr_mirrors * 2) { 1522 if (argc < nr_mirrors * 2) {
1518 ti->error = "Too few mirror arguments"; 1523 ti->error = "Too few mirror arguments";
1519 dm_destroy_dirty_log(dl); 1524 dm_dirty_log_destroy(dl);
1520 return -EINVAL; 1525 return -EINVAL;
1521 } 1526 }
1522 1527
1523 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); 1528 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1524 if (!ms) { 1529 if (!ms) {
1525 dm_destroy_dirty_log(dl); 1530 dm_dirty_log_destroy(dl);
1526 return -ENOMEM; 1531 return -ENOMEM;
1527 } 1532 }
1528 1533
@@ -1547,6 +1552,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1547 goto err_free_context; 1552 goto err_free_context;
1548 } 1553 }
1549 INIT_WORK(&ms->kmirrord_work, do_mirror); 1554 INIT_WORK(&ms->kmirrord_work, do_mirror);
1555 init_timer(&ms->timer);
1556 ms->timer_pending = 0;
1550 INIT_WORK(&ms->trigger_event, trigger_event); 1557 INIT_WORK(&ms->trigger_event, trigger_event);
1551 1558
1552 r = parse_features(ms, argc, argv, &args_used); 1559 r = parse_features(ms, argc, argv, &args_used);
@@ -1571,7 +1578,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1571 goto err_destroy_wq; 1578 goto err_destroy_wq;
1572 } 1579 }
1573 1580
1574 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1581 r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1575 if (r) 1582 if (r)
1576 goto err_destroy_wq; 1583 goto err_destroy_wq;
1577 1584
@@ -1589,8 +1596,9 @@ static void mirror_dtr(struct dm_target *ti)
1589{ 1596{
1590 struct mirror_set *ms = (struct mirror_set *) ti->private; 1597 struct mirror_set *ms = (struct mirror_set *) ti->private;
1591 1598
1599 del_timer_sync(&ms->timer);
1592 flush_workqueue(ms->kmirrord_wq); 1600 flush_workqueue(ms->kmirrord_wq);
1593 kcopyd_client_destroy(ms->kcopyd_client); 1601 dm_kcopyd_client_destroy(ms->kcopyd_client);
1594 destroy_workqueue(ms->kmirrord_wq); 1602 destroy_workqueue(ms->kmirrord_wq);
1595 free_context(ms, ti, ms->nr_mirrors); 1603 free_context(ms, ti, ms->nr_mirrors);
1596} 1604}
@@ -1734,7 +1742,7 @@ out:
1734static void mirror_presuspend(struct dm_target *ti) 1742static void mirror_presuspend(struct dm_target *ti)
1735{ 1743{
1736 struct mirror_set *ms = (struct mirror_set *) ti->private; 1744 struct mirror_set *ms = (struct mirror_set *) ti->private;
1737 struct dirty_log *log = ms->rh.log; 1745 struct dm_dirty_log *log = ms->rh.log;
1738 1746
1739 atomic_set(&ms->suspend, 1); 1747 atomic_set(&ms->suspend, 1);
1740 1748
@@ -1763,7 +1771,7 @@ static void mirror_presuspend(struct dm_target *ti)
1763static void mirror_postsuspend(struct dm_target *ti) 1771static void mirror_postsuspend(struct dm_target *ti)
1764{ 1772{
1765 struct mirror_set *ms = ti->private; 1773 struct mirror_set *ms = ti->private;
1766 struct dirty_log *log = ms->rh.log; 1774 struct dm_dirty_log *log = ms->rh.log;
1767 1775
1768 if (log->type->postsuspend && log->type->postsuspend(log)) 1776 if (log->type->postsuspend && log->type->postsuspend(log))
1769 /* FIXME: need better error handling */ 1777 /* FIXME: need better error handling */
@@ -1773,7 +1781,7 @@ static void mirror_postsuspend(struct dm_target *ti)
1773static void mirror_resume(struct dm_target *ti) 1781static void mirror_resume(struct dm_target *ti)
1774{ 1782{
1775 struct mirror_set *ms = ti->private; 1783 struct mirror_set *ms = ti->private;
1776 struct dirty_log *log = ms->rh.log; 1784 struct dm_dirty_log *log = ms->rh.log;
1777 1785
1778 atomic_set(&ms->suspend, 0); 1786 atomic_set(&ms->suspend, 0);
1779 if (log->type->resume && log->type->resume(log)) 1787 if (log->type->resume && log->type->resume(log))
@@ -1811,7 +1819,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1811{ 1819{
1812 unsigned int m, sz = 0; 1820 unsigned int m, sz = 0;
1813 struct mirror_set *ms = (struct mirror_set *) ti->private; 1821 struct mirror_set *ms = (struct mirror_set *) ti->private;
1814 struct dirty_log *log = ms->rh.log; 1822 struct dm_dirty_log *log = ms->rh.log;
1815 char buffer[ms->nr_mirrors + 1]; 1823 char buffer[ms->nr_mirrors + 1];
1816 1824
1817 switch (type) { 1825 switch (type) {
@@ -1864,15 +1872,9 @@ static int __init dm_mirror_init(void)
1864{ 1872{
1865 int r; 1873 int r;
1866 1874
1867 r = dm_dirty_log_init();
1868 if (r)
1869 return r;
1870
1871 r = dm_register_target(&mirror_target); 1875 r = dm_register_target(&mirror_target);
1872 if (r < 0) { 1876 if (r < 0)
1873 DMERR("Failed to register mirror target"); 1877 DMERR("Failed to register mirror target");
1874 dm_dirty_log_exit();
1875 }
1876 1878
1877 return r; 1879 return r;
1878} 1880}
@@ -1884,8 +1886,6 @@ static void __exit dm_mirror_exit(void)
1884 r = dm_unregister_target(&mirror_target); 1886 r = dm_unregister_target(&mirror_target);
1885 if (r < 0) 1887 if (r < 0)
1886 DMERR("unregister failed %d", r); 1888 DMERR("unregister failed %d", r);
1887
1888 dm_dirty_log_exit();
1889} 1889}
1890 1890
1891/* Module hooks */ 1891/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4dc8a43c034b..1ba8a47d61b1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -18,10 +18,10 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h>
21 22
22#include "dm-snap.h" 23#include "dm-snap.h"
23#include "dm-bio-list.h" 24#include "dm-bio-list.h"
24#include "kcopyd.h"
25 25
26#define DM_MSG_PREFIX "snapshots" 26#define DM_MSG_PREFIX "snapshots"
27 27
@@ -36,9 +36,9 @@
36#define SNAPSHOT_COPY_PRIORITY 2 36#define SNAPSHOT_COPY_PRIORITY 2
37 37
38/* 38/*
39 * Each snapshot reserves this many pages for io 39 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
40 */ 40 */
41#define SNAPSHOT_PAGES 256 41#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
42 42
43static struct workqueue_struct *ksnapd; 43static struct workqueue_struct *ksnapd;
44static void flush_queued_bios(struct work_struct *work); 44static void flush_queued_bios(struct work_struct *work);
@@ -536,7 +536,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
536 s->last_percent = 0; 536 s->last_percent = 0;
537 init_rwsem(&s->lock); 537 init_rwsem(&s->lock);
538 spin_lock_init(&s->pe_lock); 538 spin_lock_init(&s->pe_lock);
539 s->table = ti->table; 539 s->ti = ti;
540 540
541 /* Allocate hash table for COW data */ 541 /* Allocate hash table for COW data */
542 if (init_hash_tables(s)) { 542 if (init_hash_tables(s)) {
@@ -558,7 +558,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
558 goto bad4; 558 goto bad4;
559 } 559 }
560 560
561 r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 561 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
562 if (r) { 562 if (r) {
563 ti->error = "Could not create kcopyd client"; 563 ti->error = "Could not create kcopyd client";
564 goto bad5; 564 goto bad5;
@@ -591,7 +591,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
591 return 0; 591 return 0;
592 592
593 bad6: 593 bad6:
594 kcopyd_client_destroy(s->kcopyd_client); 594 dm_kcopyd_client_destroy(s->kcopyd_client);
595 595
596 bad5: 596 bad5:
597 s->store.destroy(&s->store); 597 s->store.destroy(&s->store);
@@ -613,7 +613,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
613 613
614static void __free_exceptions(struct dm_snapshot *s) 614static void __free_exceptions(struct dm_snapshot *s)
615{ 615{
616 kcopyd_client_destroy(s->kcopyd_client); 616 dm_kcopyd_client_destroy(s->kcopyd_client);
617 s->kcopyd_client = NULL; 617 s->kcopyd_client = NULL;
618 618
619 exit_exception_table(&s->pending, pending_cache); 619 exit_exception_table(&s->pending, pending_cache);
@@ -699,7 +699,7 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
699 699
700 s->valid = 0; 700 s->valid = 0;
701 701
702 dm_table_event(s->table); 702 dm_table_event(s->ti->table);
703} 703}
704 704
705static void get_pending_exception(struct dm_snap_pending_exception *pe) 705static void get_pending_exception(struct dm_snap_pending_exception *pe)
@@ -824,7 +824,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
824static void start_copy(struct dm_snap_pending_exception *pe) 824static void start_copy(struct dm_snap_pending_exception *pe)
825{ 825{
826 struct dm_snapshot *s = pe->snap; 826 struct dm_snapshot *s = pe->snap;
827 struct io_region src, dest; 827 struct dm_io_region src, dest;
828 struct block_device *bdev = s->origin->bdev; 828 struct block_device *bdev = s->origin->bdev;
829 sector_t dev_size; 829 sector_t dev_size;
830 830
@@ -839,7 +839,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
839 dest.count = src.count; 839 dest.count = src.count;
840 840
841 /* Hand over to kcopyd */ 841 /* Hand over to kcopyd */
842 kcopyd_copy(s->kcopyd_client, 842 dm_kcopyd_copy(s->kcopyd_client,
843 &src, 1, &dest, 0, copy_callback, pe); 843 &src, 1, &dest, 0, copy_callback, pe);
844} 844}
845 845
@@ -1060,7 +1060,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1060 goto next_snapshot; 1060 goto next_snapshot;
1061 1061
1062 /* Nothing to do if writing beyond end of snapshot */ 1062 /* Nothing to do if writing beyond end of snapshot */
1063 if (bio->bi_sector >= dm_table_get_size(snap->table)) 1063 if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
1064 goto next_snapshot; 1064 goto next_snapshot;
1065 1065
1066 /* 1066 /*
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 93bce5d49742..24f9fb73b982 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -132,7 +132,7 @@ struct exception_store {
132 132
133struct dm_snapshot { 133struct dm_snapshot {
134 struct rw_semaphore lock; 134 struct rw_semaphore lock;
135 struct dm_table *table; 135 struct dm_target *ti;
136 136
137 struct dm_dev *origin; 137 struct dm_dev *origin;
138 struct dm_dev *cow; 138 struct dm_dev *cow;
@@ -169,7 +169,7 @@ struct dm_snapshot {
169 /* The on disk metadata handler */ 169 /* The on disk metadata handler */
170 struct exception_store store; 170 struct exception_store store;
171 171
172 struct kcopyd_client *kcopyd_client; 172 struct dm_kcopyd_client *kcopyd_client;
173 173
174 /* Queue of snapshot writes for ksnapd to flush */ 174 /* Queue of snapshot writes for ksnapd to flush */
175 struct bio_list queued_bios; 175 struct bio_list queued_bios;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e75b1437b58b..51be53344214 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -245,44 +245,6 @@ int dm_table_create(struct dm_table **result, int mode,
245 return 0; 245 return 0;
246} 246}
247 247
248int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
249{
250 struct dm_table *t;
251 sector_t dev_size = 1;
252 int r;
253
254 /*
255 * Find current size of device.
256 * Default to 1 sector if inactive.
257 */
258 t = dm_get_table(md);
259 if (t) {
260 dev_size = dm_table_get_size(t);
261 dm_table_put(t);
262 }
263
264 r = dm_table_create(&t, FMODE_READ, 1, md);
265 if (r)
266 return r;
267
268 r = dm_table_add_target(t, "error", 0, dev_size, NULL);
269 if (r)
270 goto out;
271
272 r = dm_table_complete(t);
273 if (r)
274 goto out;
275
276 *result = t;
277
278out:
279 if (r)
280 dm_table_put(t);
281
282 return r;
283}
284EXPORT_SYMBOL_GPL(dm_create_error_table);
285
286static void free_devices(struct list_head *devices) 248static void free_devices(struct list_head *devices)
287{ 249{
288 struct list_head *tmp, *next; 250 struct list_head *tmp, *next;
@@ -954,7 +916,7 @@ void dm_table_presuspend_targets(struct dm_table *t)
954 if (!t) 916 if (!t)
955 return; 917 return;
956 918
957 return suspend_targets(t, 0); 919 suspend_targets(t, 0);
958} 920}
959 921
960void dm_table_postsuspend_targets(struct dm_table *t) 922void dm_table_postsuspend_targets(struct dm_table *t)
@@ -962,7 +924,7 @@ void dm_table_postsuspend_targets(struct dm_table *t)
962 if (!t) 924 if (!t)
963 return; 925 return;
964 926
965 return suspend_targets(t, 1); 927 suspend_targets(t, 1);
966} 928}
967 929
968int dm_table_resume_targets(struct dm_table *t) 930int dm_table_resume_targets(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6617ce4af095..372369b1cc20 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -204,6 +204,7 @@ static int (*_inits[])(void) __initdata = {
204 dm_target_init, 204 dm_target_init,
205 dm_linear_init, 205 dm_linear_init,
206 dm_stripe_init, 206 dm_stripe_init,
207 dm_kcopyd_init,
207 dm_interface_init, 208 dm_interface_init,
208}; 209};
209 210
@@ -212,6 +213,7 @@ static void (*_exits[])(void) = {
212 dm_target_exit, 213 dm_target_exit,
213 dm_linear_exit, 214 dm_linear_exit,
214 dm_stripe_exit, 215 dm_stripe_exit,
216 dm_kcopyd_exit,
215 dm_interface_exit, 217 dm_interface_exit,
216}; 218};
217 219
@@ -922,7 +924,7 @@ static void free_minor(int minor)
922/* 924/*
923 * See if the device with a specific minor # is free. 925 * See if the device with a specific minor # is free.
924 */ 926 */
925static int specific_minor(struct mapped_device *md, int minor) 927static int specific_minor(int minor)
926{ 928{
927 int r, m; 929 int r, m;
928 930
@@ -955,7 +957,7 @@ out:
955 return r; 957 return r;
956} 958}
957 959
958static int next_free_minor(struct mapped_device *md, int *minor) 960static int next_free_minor(int *minor)
959{ 961{
960 int r, m; 962 int r, m;
961 963
@@ -966,9 +968,8 @@ static int next_free_minor(struct mapped_device *md, int *minor)
966 spin_lock(&_minor_lock); 968 spin_lock(&_minor_lock);
967 969
968 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 970 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
969 if (r) { 971 if (r)
970 goto out; 972 goto out;
971 }
972 973
973 if (m >= (1 << MINORBITS)) { 974 if (m >= (1 << MINORBITS)) {
974 idr_remove(&_minor_idr, m); 975 idr_remove(&_minor_idr, m);
@@ -991,7 +992,7 @@ static struct block_device_operations dm_blk_dops;
991static struct mapped_device *alloc_dev(int minor) 992static struct mapped_device *alloc_dev(int minor)
992{ 993{
993 int r; 994 int r;
994 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); 995 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
995 void *old_md; 996 void *old_md;
996 997
997 if (!md) { 998 if (!md) {
@@ -1004,13 +1005,12 @@ static struct mapped_device *alloc_dev(int minor)
1004 1005
1005 /* get a minor number for the dev */ 1006 /* get a minor number for the dev */
1006 if (minor == DM_ANY_MINOR) 1007 if (minor == DM_ANY_MINOR)
1007 r = next_free_minor(md, &minor); 1008 r = next_free_minor(&minor);
1008 else 1009 else
1009 r = specific_minor(md, minor); 1010 r = specific_minor(minor);
1010 if (r < 0) 1011 if (r < 0)
1011 goto bad_minor; 1012 goto bad_minor;
1012 1013
1013 memset(md, 0, sizeof(*md));
1014 init_rwsem(&md->io_lock); 1014 init_rwsem(&md->io_lock);
1015 mutex_init(&md->suspend_lock); 1015 mutex_init(&md->suspend_lock);
1016 spin_lock_init(&md->pushback_lock); 1016 spin_lock_init(&md->pushback_lock);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index b4584a39383b..8c03b634e62e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -16,67 +16,6 @@
16#include <linux/blkdev.h> 16#include <linux/blkdev.h>
17#include <linux/hdreg.h> 17#include <linux/hdreg.h>
18 18
19#define DM_NAME "device-mapper"
20
21#define DMERR(f, arg...) \
22 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
23#define DMERR_LIMIT(f, arg...) \
24 do { \
25 if (printk_ratelimit()) \
26 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
27 f "\n", ## arg); \
28 } while (0)
29
30#define DMWARN(f, arg...) \
31 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
32#define DMWARN_LIMIT(f, arg...) \
33 do { \
34 if (printk_ratelimit()) \
35 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
36 f "\n", ## arg); \
37 } while (0)
38
39#define DMINFO(f, arg...) \
40 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
41#define DMINFO_LIMIT(f, arg...) \
42 do { \
43 if (printk_ratelimit()) \
44 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
45 "\n", ## arg); \
46 } while (0)
47
48#ifdef CONFIG_DM_DEBUG
49# define DMDEBUG(f, arg...) \
50 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
51# define DMDEBUG_LIMIT(f, arg...) \
52 do { \
53 if (printk_ratelimit()) \
54 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
55 "\n", ## arg); \
56 } while (0)
57#else
58# define DMDEBUG(f, arg...) do {} while (0)
59# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
60#endif
61
62#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
63 0 : scnprintf(result + sz, maxlen - sz, x))
64
65#define SECTOR_SHIFT 9
66
67/*
68 * Definitions of return values from target end_io function.
69 */
70#define DM_ENDIO_INCOMPLETE 1
71#define DM_ENDIO_REQUEUE 2
72
73/*
74 * Definitions of return values from target map function.
75 */
76#define DM_MAPIO_SUBMITTED 0
77#define DM_MAPIO_REMAPPED 1
78#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
79
80/* 19/*
81 * Suspend feature flags 20 * Suspend feature flags
82 */ 21 */
@@ -136,34 +75,6 @@ static inline int array_too_big(unsigned long fixed, unsigned long obj,
136 return (num > (ULONG_MAX - fixed) / obj); 75 return (num > (ULONG_MAX - fixed) / obj);
137} 76}
138 77
139/*
140 * Ceiling(n / sz)
141 */
142#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
143
144#define dm_sector_div_up(n, sz) ( \
145{ \
146 sector_t _r = ((n) + (sz) - 1); \
147 sector_div(_r, (sz)); \
148 _r; \
149} \
150)
151
152/*
153 * ceiling(n / size) * size
154 */
155#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
156
157static inline sector_t to_sector(unsigned long n)
158{
159 return (n >> 9);
160}
161
162static inline unsigned long to_bytes(sector_t n)
163{
164 return (n << 9);
165}
166
167int dm_split_args(int *argc, char ***argvp, char *input); 78int dm_split_args(int *argc, char ***argvp, char *input);
168 79
169/* 80/*
@@ -189,4 +100,13 @@ int dm_lock_for_deletion(struct mapped_device *md);
189 100
190void dm_kobject_uevent(struct mapped_device *md); 101void dm_kobject_uevent(struct mapped_device *md);
191 102
103/*
104 * Dirty log
105 */
106int dm_dirty_log_init(void);
107void dm_dirty_log_exit(void);
108
109int dm_kcopyd_init(void);
110void dm_kcopyd_exit(void);
111
192#endif 112#endif
diff --git a/drivers/md/kcopyd.h b/drivers/md/kcopyd.h
deleted file mode 100644
index 4845f2a0c676..000000000000
--- a/drivers/md/kcopyd.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (C) 2001 Sistina Software
3 *
4 * This file is released under the GPL.
5 *
6 * Kcopyd provides a simple interface for copying an area of one
7 * block-device to one or more other block-devices, with an asynchronous
8 * completion notification.
9 */
10
11#ifndef DM_KCOPYD_H
12#define DM_KCOPYD_H
13
14#include "dm-io.h"
15
16/* FIXME: make this configurable */
17#define KCOPYD_MAX_REGIONS 8
18
19#define KCOPYD_IGNORE_ERROR 1
20
21/*
22 * To use kcopyd you must first create a kcopyd client object.
23 */
24struct kcopyd_client;
25int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result);
26void kcopyd_client_destroy(struct kcopyd_client *kc);
27
28/*
29 * Submit a copy job to kcopyd. This is built on top of the
30 * previous three fns.
31 *
32 * read_err is a boolean,
33 * write_err is a bitset, with 1 bit for each destination region
34 */
35typedef void (*kcopyd_notify_fn)(int read_err, unsigned long write_err,
36 void *context);
37
38int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
39 unsigned int num_dests, struct io_region *dests,
40 unsigned int flags, kcopyd_notify_fn fn, void *context);
41
42#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index cb784579956b..ad3b787479a4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited. 2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the LGPL. 5 * This file is released under the LGPL.
6 */ 6 */
@@ -10,6 +10,8 @@
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13#include <linux/bio.h>
14
13struct dm_target; 15struct dm_target;
14struct dm_table; 16struct dm_table;
15struct dm_dev; 17struct dm_dev;
@@ -250,11 +252,97 @@ void dm_table_event(struct dm_table *t);
250 */ 252 */
251int dm_swap_table(struct mapped_device *md, struct dm_table *t); 253int dm_swap_table(struct mapped_device *md, struct dm_table *t);
252 254
255/*-----------------------------------------------------------------
256 * Macros.
257 *---------------------------------------------------------------*/
258#define DM_NAME "device-mapper"
259
260#define DMERR(f, arg...) \
261 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
262#define DMERR_LIMIT(f, arg...) \
263 do { \
264 if (printk_ratelimit()) \
265 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
266 f "\n", ## arg); \
267 } while (0)
268
269#define DMWARN(f, arg...) \
270 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
271#define DMWARN_LIMIT(f, arg...) \
272 do { \
273 if (printk_ratelimit()) \
274 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
275 f "\n", ## arg); \
276 } while (0)
277
278#define DMINFO(f, arg...) \
279 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
280#define DMINFO_LIMIT(f, arg...) \
281 do { \
282 if (printk_ratelimit()) \
283 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
284 "\n", ## arg); \
285 } while (0)
286
287#ifdef CONFIG_DM_DEBUG
288# define DMDEBUG(f, arg...) \
289 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
290# define DMDEBUG_LIMIT(f, arg...) \
291 do { \
292 if (printk_ratelimit()) \
293 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
294 "\n", ## arg); \
295 } while (0)
296#else
297# define DMDEBUG(f, arg...) do {} while (0)
298# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
299#endif
300
301#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
302 0 : scnprintf(result + sz, maxlen - sz, x))
303
304#define SECTOR_SHIFT 9
305
306/*
307 * Definitions of return values from target end_io function.
308 */
309#define DM_ENDIO_INCOMPLETE 1
310#define DM_ENDIO_REQUEUE 2
311
312/*
313 * Definitions of return values from target map function.
314 */
315#define DM_MAPIO_SUBMITTED 0
316#define DM_MAPIO_REMAPPED 1
317#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
318
319/*
320 * Ceiling(n / sz)
321 */
322#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
323
324#define dm_sector_div_up(n, sz) ( \
325{ \
326 sector_t _r = ((n) + (sz) - 1); \
327 sector_div(_r, (sz)); \
328 _r; \
329} \
330)
331
253/* 332/*
254 * Prepare a table for a device that will error all I/O. 333 * ceiling(n / size) * size
255 * To make it active, call dm_suspend(), dm_swap_table() then dm_resume().
256 */ 334 */
257int dm_create_error_table(struct dm_table **result, struct mapped_device *md); 335#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
336
337static inline sector_t to_sector(unsigned long n)
338{
339 return (n >> SECTOR_SHIFT);
340}
341
342static inline unsigned long to_bytes(sector_t n)
343{
344 return (n << SECTOR_SHIFT);
345}
258 346
259#endif /* __KERNEL__ */ 347#endif /* __KERNEL__ */
260#endif /* _LINUX_DEVICE_MAPPER_H */ 348#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/drivers/md/dm-log.h b/include/linux/dm-dirty-log.h
index 3fae87eb5963..600c5fb2daad 100644
--- a/drivers/md/dm-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -1,52 +1,56 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * Device-Mapper dirty region log.
3 * 6 *
4 * This file is released under the LGPL. 7 * This file is released under the LGPL.
5 */ 8 */
6 9
7#ifndef DM_DIRTY_LOG 10#ifndef _LINUX_DM_DIRTY_LOG
8#define DM_DIRTY_LOG 11#define _LINUX_DM_DIRTY_LOG
12
13#ifdef __KERNEL__
9 14
10#include "dm.h" 15#include <linux/types.h>
16#include <linux/device-mapper.h>
11 17
12typedef sector_t region_t; 18typedef sector_t region_t;
13 19
14struct dirty_log_type; 20struct dm_dirty_log_type;
15 21
16struct dirty_log { 22struct dm_dirty_log {
17 struct dirty_log_type *type; 23 struct dm_dirty_log_type *type;
18 void *context; 24 void *context;
19}; 25};
20 26
21struct dirty_log_type { 27struct dm_dirty_log_type {
22 struct list_head list;
23 const char *name; 28 const char *name;
24 struct module *module; 29 struct module *module;
25 unsigned int use_count;
26 30
27 int (*ctr)(struct dirty_log *log, struct dm_target *ti, 31 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
28 unsigned int argc, char **argv); 32 unsigned argc, char **argv);
29 void (*dtr)(struct dirty_log *log); 33 void (*dtr)(struct dm_dirty_log *log);
30 34
31 /* 35 /*
32 * There are times when we don't want the log to touch 36 * There are times when we don't want the log to touch
33 * the disk. 37 * the disk.
34 */ 38 */
35 int (*presuspend)(struct dirty_log *log); 39 int (*presuspend)(struct dm_dirty_log *log);
36 int (*postsuspend)(struct dirty_log *log); 40 int (*postsuspend)(struct dm_dirty_log *log);
37 int (*resume)(struct dirty_log *log); 41 int (*resume)(struct dm_dirty_log *log);
38 42
39 /* 43 /*
40 * Retrieves the smallest size of region that the log can 44 * Retrieves the smallest size of region that the log can
41 * deal with. 45 * deal with.
42 */ 46 */
43 uint32_t (*get_region_size)(struct dirty_log *log); 47 uint32_t (*get_region_size)(struct dm_dirty_log *log);
44 48
45 /* 49 /*
46 * A predicate to say whether a region is clean or not. 50 * A predicate to say whether a region is clean or not.
47 * May block. 51 * May block.
48 */ 52 */
49 int (*is_clean)(struct dirty_log *log, region_t region); 53 int (*is_clean)(struct dm_dirty_log *log, region_t region);
50 54
51 /* 55 /*
52 * Returns: 0, 1, -EWOULDBLOCK, < 0 56 * Returns: 0, 1, -EWOULDBLOCK, < 0
@@ -59,13 +63,14 @@ struct dirty_log_type {
59 * passed to a daemon to deal with, since a daemon is 63 * passed to a daemon to deal with, since a daemon is
60 * allowed to block. 64 * allowed to block.
61 */ 65 */
62 int (*in_sync)(struct dirty_log *log, region_t region, int can_block); 66 int (*in_sync)(struct dm_dirty_log *log, region_t region,
67 int can_block);
63 68
64 /* 69 /*
65 * Flush the current log state (eg, to disk). This 70 * Flush the current log state (eg, to disk). This
66 * function may block. 71 * function may block.
67 */ 72 */
68 int (*flush)(struct dirty_log *log); 73 int (*flush)(struct dm_dirty_log *log);
69 74
70 /* 75 /*
71 * Mark an area as clean or dirty. These functions may 76 * Mark an area as clean or dirty. These functions may
@@ -73,8 +78,8 @@ struct dirty_log_type {
73 * be extremely rare (eg, allocating another chunk of 78 * be extremely rare (eg, allocating another chunk of
74 * memory for some reason). 79 * memory for some reason).
75 */ 80 */
76 void (*mark_region)(struct dirty_log *log, region_t region); 81 void (*mark_region)(struct dm_dirty_log *log, region_t region);
77 void (*clear_region)(struct dirty_log *log, region_t region); 82 void (*clear_region)(struct dm_dirty_log *log, region_t region);
78 83
79 /* 84 /*
80 * Returns: <0 (error), 0 (no region), 1 (region) 85 * Returns: <0 (error), 0 (no region), 1 (region)
@@ -88,44 +93,39 @@ struct dirty_log_type {
88 * tells you if an area is synchronised, the other 93 * tells you if an area is synchronised, the other
89 * assigns recovery work. 94 * assigns recovery work.
90 */ 95 */
91 int (*get_resync_work)(struct dirty_log *log, region_t *region); 96 int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
92 97
93 /* 98 /*
94 * This notifies the log that the resync status of a region 99 * This notifies the log that the resync status of a region
95 * has changed. It also clears the region from the recovering 100 * has changed. It also clears the region from the recovering
96 * list (if present). 101 * list (if present).
97 */ 102 */
98 void (*set_region_sync)(struct dirty_log *log, 103 void (*set_region_sync)(struct dm_dirty_log *log,
99 region_t region, int in_sync); 104 region_t region, int in_sync);
100 105
101 /* 106 /*
102 * Returns the number of regions that are in sync. 107 * Returns the number of regions that are in sync.
103 */ 108 */
104 region_t (*get_sync_count)(struct dirty_log *log); 109 region_t (*get_sync_count)(struct dm_dirty_log *log);
105 110
106 /* 111 /*
107 * Support function for mirror status requests. 112 * Support function for mirror status requests.
108 */ 113 */
109 int (*status)(struct dirty_log *log, status_type_t status_type, 114 int (*status)(struct dm_dirty_log *log, status_type_t status_type,
110 char *result, unsigned int maxlen); 115 char *result, unsigned maxlen);
111}; 116};
112 117
113int dm_register_dirty_log_type(struct dirty_log_type *type); 118int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
114int dm_unregister_dirty_log_type(struct dirty_log_type *type); 119int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
115
116 120
117/* 121/*
118 * Make sure you use these two functions, rather than calling 122 * Make sure you use these two functions, rather than calling
119 * type->constructor/destructor() directly. 123 * type->constructor/destructor() directly.
120 */ 124 */
121struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti, 125struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
122 unsigned int argc, char **argv); 126 struct dm_target *ti,
123void dm_destroy_dirty_log(struct dirty_log *log); 127 unsigned argc, char **argv);
124 128void dm_dirty_log_destroy(struct dm_dirty_log *log);
125/*
126 * init/exit functions.
127 */
128int dm_dirty_log_init(void);
129void dm_dirty_log_exit(void);
130 129
131#endif 130#endif /* __KERNEL__ */
131#endif /* _LINUX_DM_DIRTY_LOG_H */
diff --git a/drivers/md/dm-io.h b/include/linux/dm-io.h
index f647e2cceaa6..b6bf17ee2f61 100644
--- a/drivers/md/dm-io.h
+++ b/include/linux/dm-io.h
@@ -1,15 +1,20 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
4 *
5 * Device-Mapper low-level I/O.
3 * 6 *
4 * This file is released under the GPL. 7 * This file is released under the GPL.
5 */ 8 */
6 9
7#ifndef _DM_IO_H 10#ifndef _LINUX_DM_IO_H
8#define _DM_IO_H 11#define _LINUX_DM_IO_H
12
13#ifdef __KERNEL__
9 14
10#include "dm.h" 15#include <linux/types.h>
11 16
12struct io_region { 17struct dm_io_region {
13 struct block_device *bdev; 18 struct block_device *bdev;
14 sector_t sector; 19 sector_t sector;
15 sector_t count; /* If this is zero the region is ignored. */ 20 sector_t count; /* If this is zero the region is ignored. */
@@ -74,6 +79,7 @@ void dm_io_client_destroy(struct dm_io_client *client);
74 * error occurred doing io to the corresponding region. 79 * error occurred doing io to the corresponding region.
75 */ 80 */
76int dm_io(struct dm_io_request *io_req, unsigned num_regions, 81int dm_io(struct dm_io_request *io_req, unsigned num_regions,
77 struct io_region *region, unsigned long *sync_error_bits); 82 struct dm_io_region *region, unsigned long *sync_error_bits);
78 83
79#endif 84#endif /* __KERNEL__ */
85#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
new file mode 100644
index 000000000000..5db216311695
--- /dev/null
+++ b/include/linux/dm-kcopyd.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2001 - 2003 Sistina Software
3 * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
4 *
5 * kcopyd provides a simple interface for copying an area of one
6 * block-device to one or more other block-devices, either synchronous
7 * or with an asynchronous completion notification.
8 *
9 * This file is released under the GPL.
10 */
11
12#ifndef _LINUX_DM_KCOPYD_H
13#define _LINUX_DM_KCOPYD_H
14
15#ifdef __KERNEL__
16
17#include <linux/dm-io.h>
18
19/* FIXME: make this configurable */
20#define DM_KCOPYD_MAX_REGIONS 8
21
22#define DM_KCOPYD_IGNORE_ERROR 1
23
24/*
25 * To use kcopyd you must first create a dm_kcopyd_client object.
26 */
27struct dm_kcopyd_client;
28int dm_kcopyd_client_create(unsigned num_pages,
29 struct dm_kcopyd_client **result);
30void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
31
32/*
33 * Submit a copy job to kcopyd. This is built on top of the
34 * previous three fns.
35 *
36 * read_err is a boolean,
37 * write_err is a bitset, with 1 bit for each destination region
38 */
39typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
40 void *context);
41
42int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
43 unsigned num_dests, struct dm_io_region *dests,
44 unsigned flags, dm_kcopyd_notify_fn fn, void *context);
45
46#endif /* __KERNEL__ */
47#endif /* _LINUX_DM_KCOPYD_H */