aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-io.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2011-08-02 07:32:01 -0400
committerAlasdair G Kergon <agk@redhat.com>2011-08-02 07:32:01 -0400
commitbb91bc7bacb906c9f3a9b22744c53fa7564b51ba (patch)
tree8acd6ee46ab5a557afcd1c491b55a68830252301 /drivers/md/dm-io.c
parent286f367dad40beb3234a18c17391d03ba939a7f3 (diff)
dm io: flush cpu cache with vmapped io
For normal kernel pages, CPU cache is synchronized by the dma layer. However, this is not done for pages allocated with vmalloc. If we do I/O to/from vmallocated pages, we must synchronize CPU cache explicitly. Prior to doing I/O on vmallocated page we must call flush_kernel_vmap_range to flush dirty cache on the virtual address. After finished read we must call invalidate_kernel_vmap_range to invalidate cache on the virtual address, so that accesses to the virtual address return newly read data and not stale data from CPU cache. This patch fixes metadata corruption on dm-snapshots on PA-RISC and possibly other architectures with caches indexed by virtual address. Cc: stable <stable@kernel.org> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-io.c')
-rw-r--r--drivers/md/dm-io.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2067288f61f9..ad2eba40e319 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -38,6 +38,8 @@ struct io {
38 struct dm_io_client *client; 38 struct dm_io_client *client;
39 io_notify_fn callback; 39 io_notify_fn callback;
40 void *context; 40 void *context;
41 void *vma_invalidate_address;
42 unsigned long vma_invalidate_size;
41} __attribute__((aligned(DM_IO_MAX_REGIONS))); 43} __attribute__((aligned(DM_IO_MAX_REGIONS)));
42 44
43static struct kmem_cache *_dm_io_cache; 45static struct kmem_cache *_dm_io_cache;
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error)
116 set_bit(region, &io->error_bits); 118 set_bit(region, &io->error_bits);
117 119
118 if (atomic_dec_and_test(&io->count)) { 120 if (atomic_dec_and_test(&io->count)) {
121 if (io->vma_invalidate_size)
122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
123 io->vma_invalidate_size);
124
119 if (io->sleeper) 125 if (io->sleeper)
120 wake_up_process(io->sleeper); 126 wake_up_process(io->sleeper);
121 127
@@ -159,6 +165,9 @@ struct dpages {
159 165
160 unsigned context_u; 166 unsigned context_u;
161 void *context_ptr; 167 void *context_ptr;
168
169 void *vma_invalidate_address;
170 unsigned long vma_invalidate_size;
162}; 171};
163 172
164/* 173/*
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
377 io->sleeper = current; 386 io->sleeper = current;
378 io->client = client; 387 io->client = client;
379 388
389 io->vma_invalidate_address = dp->vma_invalidate_address;
390 io->vma_invalidate_size = dp->vma_invalidate_size;
391
380 dispatch_io(rw, num_regions, where, dp, io, 1); 392 dispatch_io(rw, num_regions, where, dp, io, 1);
381 393
382 while (1) { 394 while (1) {
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
415 io->callback = fn; 427 io->callback = fn;
416 io->context = context; 428 io->context = context;
417 429
430 io->vma_invalidate_address = dp->vma_invalidate_address;
431 io->vma_invalidate_size = dp->vma_invalidate_size;
432
418 dispatch_io(rw, num_regions, where, dp, io, 0); 433 dispatch_io(rw, num_regions, where, dp, io, 0);
419 return 0; 434 return 0;
420} 435}
421 436
422static int dp_init(struct dm_io_request *io_req, struct dpages *dp) 437static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
438 unsigned long size)
423{ 439{
424 /* Set up dpages based on memory type */ 440 /* Set up dpages based on memory type */
441
442 dp->vma_invalidate_address = NULL;
443 dp->vma_invalidate_size = 0;
444
425 switch (io_req->mem.type) { 445 switch (io_req->mem.type) {
426 case DM_IO_PAGE_LIST: 446 case DM_IO_PAGE_LIST:
427 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 447 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
432 break; 452 break;
433 453
434 case DM_IO_VMA: 454 case DM_IO_VMA:
455 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
456 if ((io_req->bi_rw & RW_MASK) == READ) {
457 dp->vma_invalidate_address = io_req->mem.ptr.vma;
458 dp->vma_invalidate_size = size;
459 }
435 vm_dp_init(dp, io_req->mem.ptr.vma); 460 vm_dp_init(dp, io_req->mem.ptr.vma);
436 break; 461 break;
437 462
@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
460 int r; 485 int r;
461 struct dpages dp; 486 struct dpages dp;
462 487
463 r = dp_init(io_req, &dp); 488 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
464 if (r) 489 if (r)
465 return r; 490 return r;
466 491