aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2008-10-10 08:37:01 -0400
committerAlasdair G Kergon <agk@redhat.com>2008-10-10 08:37:01 -0400
commitfd14acf6fc9f4635be201960004d847b14236a20 (patch)
tree5c856ce7353374d784e7546769b74a3eea724c54 /drivers/md
parenta481db784682b33d078c7bf8a1d0581dc09946c1 (diff)
dm exception store: use chunk_t for_areas
Change uint32_t into chunk_t to remove 32-bit limitation on the number of chunks on systems with 64-bit sector numbers. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-exception-store.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 824cf31967c5..769ab677f8e0 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -108,12 +108,12 @@ struct pstore {
108 * Used to keep track of which metadata area the data in 108 * Used to keep track of which metadata area the data in
109 * 'chunk' refers to. 109 * 'chunk' refers to.
110 */ 110 */
111 uint32_t current_area; 111 chunk_t current_area;
112 112
113 /* 113 /*
114 * The next free chunk for an exception. 114 * The next free chunk for an exception.
115 */ 115 */
116 uint32_t next_free; 116 chunk_t next_free;
117 117
118 /* 118 /*
119 * The index of next free exception in the current 119 * The index of next free exception in the current
@@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work)
175/* 175/*
176 * Read or write a chunk aligned and sized block of data from a device. 176 * Read or write a chunk aligned and sized block of data from a device.
177 */ 177 */
178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) 178static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
179{ 179{
180 struct dm_io_region where = { 180 struct dm_io_region where = {
181 .bdev = ps->snap->cow->bdev, 181 .bdev = ps->snap->cow->bdev,
@@ -220,10 +220,10 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
220 * Read or write a metadata area. Remembering to skip the first 220 * Read or write a metadata area. Remembering to skip the first
221 * chunk which holds the header. 221 * chunk which holds the header.
222 */ 222 */
223static int area_io(struct pstore *ps, uint32_t area, int rw) 223static int area_io(struct pstore *ps, chunk_t area, int rw)
224{ 224{
225 int r; 225 int r;
226 uint32_t chunk; 226 chunk_t chunk;
227 227
228 chunk = area_location(ps, area); 228 chunk = area_location(ps, area);
229 229
@@ -235,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
235 return 0; 235 return 0;
236} 236}
237 237
238static int zero_area(struct pstore *ps, uint32_t area) 238static int zero_area(struct pstore *ps, chunk_t area)
239{ 239{
240 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); 240 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
241 return area_io(ps, area, WRITE); 241 return area_io(ps, area, WRITE);
@@ -411,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full)
411 411
412static int read_exceptions(struct pstore *ps) 412static int read_exceptions(struct pstore *ps)
413{ 413{
414 uint32_t area; 414 chunk_t area;
415 int r, full = 1; 415 int r, full = 1;
416 416
417 /* 417 /*
@@ -524,6 +524,7 @@ static int persistent_prepare(struct exception_store *store,
524{ 524{
525 struct pstore *ps = get_info(store); 525 struct pstore *ps = get_info(store);
526 uint32_t stride; 526 uint32_t stride;
527 chunk_t next_free;
527 sector_t size = get_dev_size(store->snap->cow->bdev); 528 sector_t size = get_dev_size(store->snap->cow->bdev);
528 529
529 /* Is there enough room ? */ 530 /* Is there enough room ? */
@@ -537,7 +538,8 @@ static int persistent_prepare(struct exception_store *store,
537 * into account the location of the metadata chunks. 538 * into account the location of the metadata chunks.
538 */ 539 */
539 stride = (ps->exceptions_per_area + 1); 540 stride = (ps->exceptions_per_area + 1);
540 if ((++ps->next_free % stride) == 1) 541 next_free = ++ps->next_free;
542 if (sector_div(next_free, stride) == 1)
541 ps->next_free++; 543 ps->next_free++;
542 544
543 atomic_inc(&ps->pending_count); 545 atomic_inc(&ps->pending_count);