diff options
author | Alasdair G Kergon <agk@redhat.com> | 2011-08-02 07:32:01 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2011-08-02 07:32:01 -0400 |
commit | 283a8328ca5b987e547848de8ff0e28edcfb9e08 (patch) | |
tree | b500dbe90c0ebad2ff96ba1b7f0db16089bb3560 /drivers/md | |
parent | d15b774c2920d55e3d58275c97fbe3adc3afde38 (diff) |
dm: suppress endian warnings
Suppress sparse warnings about cpu_to_le32() by using __le32 types for
on-disk data etc.
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-log.c | 20 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 71 |
3 files changed, 54 insertions, 43 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index bae6c4e23d3f..f5406766ece3 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -239,7 +239,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, | |||
239 | struct dm_crypt_request *dmreq) | 239 | struct dm_crypt_request *dmreq) |
240 | { | 240 | { |
241 | memset(iv, 0, cc->iv_size); | 241 | memset(iv, 0, cc->iv_size); |
242 | *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); | 242 | *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); |
243 | 243 | ||
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
@@ -248,7 +248,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, | |||
248 | struct dm_crypt_request *dmreq) | 248 | struct dm_crypt_request *dmreq) |
249 | { | 249 | { |
250 | memset(iv, 0, cc->iv_size); | 250 | memset(iv, 0, cc->iv_size); |
251 | *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); | 251 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
252 | 252 | ||
253 | return 0; | 253 | return 0; |
254 | } | 254 | } |
@@ -415,7 +415,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, | |||
415 | struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; | 415 | struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; |
416 | 416 | ||
417 | memset(iv, 0, cc->iv_size); | 417 | memset(iv, 0, cc->iv_size); |
418 | *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); | 418 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
419 | crypto_cipher_encrypt_one(essiv_tfm, iv, iv); | 419 | crypto_cipher_encrypt_one(essiv_tfm, iv, iv); |
420 | 420 | ||
421 | return 0; | 421 | return 0; |
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 948e3f4925bf..5f06fb687408 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy); | |||
197 | #define MIRROR_DISK_VERSION 2 | 197 | #define MIRROR_DISK_VERSION 2 |
198 | #define LOG_OFFSET 2 | 198 | #define LOG_OFFSET 2 |
199 | 199 | ||
200 | struct log_header { | 200 | struct log_header_disk { |
201 | uint32_t magic; | 201 | __le32 magic; |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Simple, incrementing version. no backward | 204 | * Simple, incrementing version. no backward |
205 | * compatibility. | 205 | * compatibility. |
206 | */ | 206 | */ |
207 | __le32 version; | ||
208 | __le64 nr_regions; | ||
209 | } __packed; | ||
210 | |||
211 | struct log_header_core { | ||
212 | uint32_t magic; | ||
207 | uint32_t version; | 213 | uint32_t version; |
208 | sector_t nr_regions; | 214 | uint64_t nr_regions; |
209 | }; | 215 | }; |
210 | 216 | ||
211 | struct log_c { | 217 | struct log_c { |
@@ -239,10 +245,10 @@ struct log_c { | |||
239 | int log_dev_failed; | 245 | int log_dev_failed; |
240 | int log_dev_flush_failed; | 246 | int log_dev_flush_failed; |
241 | struct dm_dev *log_dev; | 247 | struct dm_dev *log_dev; |
242 | struct log_header header; | 248 | struct log_header_core header; |
243 | 249 | ||
244 | struct dm_io_region header_location; | 250 | struct dm_io_region header_location; |
245 | struct log_header *disk_header; | 251 | struct log_header_disk *disk_header; |
246 | }; | 252 | }; |
247 | 253 | ||
248 | /* | 254 | /* |
@@ -271,14 +277,14 @@ static inline void log_clear_bit(struct log_c *l, | |||
271 | /*---------------------------------------------------------------- | 277 | /*---------------------------------------------------------------- |
272 | * Header IO | 278 | * Header IO |
273 | *--------------------------------------------------------------*/ | 279 | *--------------------------------------------------------------*/ |
274 | static void header_to_disk(struct log_header *core, struct log_header *disk) | 280 | static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk) |
275 | { | 281 | { |
276 | disk->magic = cpu_to_le32(core->magic); | 282 | disk->magic = cpu_to_le32(core->magic); |
277 | disk->version = cpu_to_le32(core->version); | 283 | disk->version = cpu_to_le32(core->version); |
278 | disk->nr_regions = cpu_to_le64(core->nr_regions); | 284 | disk->nr_regions = cpu_to_le64(core->nr_regions); |
279 | } | 285 | } |
280 | 286 | ||
281 | static void header_from_disk(struct log_header *core, struct log_header *disk) | 287 | static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk) |
282 | { | 288 | { |
283 | core->magic = le32_to_cpu(disk->magic); | 289 | core->magic = le32_to_cpu(disk->magic); |
284 | core->version = le32_to_cpu(disk->version); | 290 | core->version = le32_to_cpu(disk->version); |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index e4ecadf0548a..39becbec4dfe 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -58,25 +58,30 @@ | |||
58 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 | 58 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 |
59 | 59 | ||
60 | struct disk_header { | 60 | struct disk_header { |
61 | uint32_t magic; | 61 | __le32 magic; |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Is this snapshot valid. There is no way of recovering | 64 | * Is this snapshot valid. There is no way of recovering |
65 | * an invalid snapshot. | 65 | * an invalid snapshot. |
66 | */ | 66 | */ |
67 | uint32_t valid; | 67 | __le32 valid; |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * Simple, incrementing version. no backward | 70 | * Simple, incrementing version. no backward |
71 | * compatibility. | 71 | * compatibility. |
72 | */ | 72 | */ |
73 | uint32_t version; | 73 | __le32 version; |
74 | 74 | ||
75 | /* In sectors */ | 75 | /* In sectors */ |
76 | uint32_t chunk_size; | 76 | __le32 chunk_size; |
77 | }; | 77 | } __packed; |
78 | 78 | ||
79 | struct disk_exception { | 79 | struct disk_exception { |
80 | __le64 old_chunk; | ||
81 | __le64 new_chunk; | ||
82 | } __packed; | ||
83 | |||
84 | struct core_exception { | ||
80 | uint64_t old_chunk; | 85 | uint64_t old_chunk; |
81 | uint64_t new_chunk; | 86 | uint64_t new_chunk; |
82 | }; | 87 | }; |
@@ -396,32 +401,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) | |||
396 | } | 401 | } |
397 | 402 | ||
398 | static void read_exception(struct pstore *ps, | 403 | static void read_exception(struct pstore *ps, |
399 | uint32_t index, struct disk_exception *result) | 404 | uint32_t index, struct core_exception *result) |
400 | { | 405 | { |
401 | struct disk_exception *e = get_exception(ps, index); | 406 | struct disk_exception *de = get_exception(ps, index); |
402 | 407 | ||
403 | /* copy it */ | 408 | /* copy it */ |
404 | result->old_chunk = le64_to_cpu(e->old_chunk); | 409 | result->old_chunk = le64_to_cpu(de->old_chunk); |
405 | result->new_chunk = le64_to_cpu(e->new_chunk); | 410 | result->new_chunk = le64_to_cpu(de->new_chunk); |
406 | } | 411 | } |
407 | 412 | ||
408 | static void write_exception(struct pstore *ps, | 413 | static void write_exception(struct pstore *ps, |
409 | uint32_t index, struct disk_exception *de) | 414 | uint32_t index, struct core_exception *e) |
410 | { | 415 | { |
411 | struct disk_exception *e = get_exception(ps, index); | 416 | struct disk_exception *de = get_exception(ps, index); |
412 | 417 | ||
413 | /* copy it */ | 418 | /* copy it */ |
414 | e->old_chunk = cpu_to_le64(de->old_chunk); | 419 | de->old_chunk = cpu_to_le64(e->old_chunk); |
415 | e->new_chunk = cpu_to_le64(de->new_chunk); | 420 | de->new_chunk = cpu_to_le64(e->new_chunk); |
416 | } | 421 | } |
417 | 422 | ||
418 | static void clear_exception(struct pstore *ps, uint32_t index) | 423 | static void clear_exception(struct pstore *ps, uint32_t index) |
419 | { | 424 | { |
420 | struct disk_exception *e = get_exception(ps, index); | 425 | struct disk_exception *de = get_exception(ps, index); |
421 | 426 | ||
422 | /* clear it */ | 427 | /* clear it */ |
423 | e->old_chunk = 0; | 428 | de->old_chunk = 0; |
424 | e->new_chunk = 0; | 429 | de->new_chunk = 0; |
425 | } | 430 | } |
426 | 431 | ||
427 | /* | 432 | /* |
@@ -437,13 +442,13 @@ static int insert_exceptions(struct pstore *ps, | |||
437 | { | 442 | { |
438 | int r; | 443 | int r; |
439 | unsigned int i; | 444 | unsigned int i; |
440 | struct disk_exception de; | 445 | struct core_exception e; |
441 | 446 | ||
442 | /* presume the area is full */ | 447 | /* presume the area is full */ |
443 | *full = 1; | 448 | *full = 1; |
444 | 449 | ||
445 | for (i = 0; i < ps->exceptions_per_area; i++) { | 450 | for (i = 0; i < ps->exceptions_per_area; i++) { |
446 | read_exception(ps, i, &de); | 451 | read_exception(ps, i, &e); |
447 | 452 | ||
448 | /* | 453 | /* |
449 | * If the new_chunk is pointing at the start of | 454 | * If the new_chunk is pointing at the start of |
@@ -451,7 +456,7 @@ static int insert_exceptions(struct pstore *ps, | |||
451 | * is we know that we've hit the end of the | 456 | * is we know that we've hit the end of the |
452 | * exceptions. Therefore the area is not full. | 457 | * exceptions. Therefore the area is not full. |
453 | */ | 458 | */ |
454 | if (de.new_chunk == 0LL) { | 459 | if (e.new_chunk == 0LL) { |
455 | ps->current_committed = i; | 460 | ps->current_committed = i; |
456 | *full = 0; | 461 | *full = 0; |
457 | break; | 462 | break; |
@@ -460,13 +465,13 @@ static int insert_exceptions(struct pstore *ps, | |||
460 | /* | 465 | /* |
461 | * Keep track of the start of the free chunks. | 466 | * Keep track of the start of the free chunks. |
462 | */ | 467 | */ |
463 | if (ps->next_free <= de.new_chunk) | 468 | if (ps->next_free <= e.new_chunk) |
464 | ps->next_free = de.new_chunk + 1; | 469 | ps->next_free = e.new_chunk + 1; |
465 | 470 | ||
466 | /* | 471 | /* |
467 | * Otherwise we add the exception to the snapshot. | 472 | * Otherwise we add the exception to the snapshot. |
468 | */ | 473 | */ |
469 | r = callback(callback_context, de.old_chunk, de.new_chunk); | 474 | r = callback(callback_context, e.old_chunk, e.new_chunk); |
470 | if (r) | 475 | if (r) |
471 | return r; | 476 | return r; |
472 | } | 477 | } |
@@ -641,12 +646,12 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
641 | { | 646 | { |
642 | unsigned int i; | 647 | unsigned int i; |
643 | struct pstore *ps = get_info(store); | 648 | struct pstore *ps = get_info(store); |
644 | struct disk_exception de; | 649 | struct core_exception ce; |
645 | struct commit_callback *cb; | 650 | struct commit_callback *cb; |
646 | 651 | ||
647 | de.old_chunk = e->old_chunk; | 652 | ce.old_chunk = e->old_chunk; |
648 | de.new_chunk = e->new_chunk; | 653 | ce.new_chunk = e->new_chunk; |
649 | write_exception(ps, ps->current_committed++, &de); | 654 | write_exception(ps, ps->current_committed++, &ce); |
650 | 655 | ||
651 | /* | 656 | /* |
652 | * Add the callback to the back of the array. This code | 657 | * Add the callback to the back of the array. This code |
@@ -701,7 +706,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
701 | chunk_t *last_new_chunk) | 706 | chunk_t *last_new_chunk) |
702 | { | 707 | { |
703 | struct pstore *ps = get_info(store); | 708 | struct pstore *ps = get_info(store); |
704 | struct disk_exception de; | 709 | struct core_exception ce; |
705 | int nr_consecutive; | 710 | int nr_consecutive; |
706 | int r; | 711 | int r; |
707 | 712 | ||
@@ -722,9 +727,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
722 | ps->current_committed = ps->exceptions_per_area; | 727 | ps->current_committed = ps->exceptions_per_area; |
723 | } | 728 | } |
724 | 729 | ||
725 | read_exception(ps, ps->current_committed - 1, &de); | 730 | read_exception(ps, ps->current_committed - 1, &ce); |
726 | *last_old_chunk = de.old_chunk; | 731 | *last_old_chunk = ce.old_chunk; |
727 | *last_new_chunk = de.new_chunk; | 732 | *last_new_chunk = ce.new_chunk; |
728 | 733 | ||
729 | /* | 734 | /* |
730 | * Find number of consecutive chunks within the current area, | 735 | * Find number of consecutive chunks within the current area, |
@@ -733,9 +738,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
733 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | 738 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; |
734 | nr_consecutive++) { | 739 | nr_consecutive++) { |
735 | read_exception(ps, ps->current_committed - 1 - nr_consecutive, | 740 | read_exception(ps, ps->current_committed - 1 - nr_consecutive, |
736 | &de); | 741 | &ce); |
737 | if (de.old_chunk != *last_old_chunk - nr_consecutive || | 742 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || |
738 | de.new_chunk != *last_new_chunk - nr_consecutive) | 743 | ce.new_chunk != *last_new_chunk - nr_consecutive) |
739 | break; | 744 | break; |
740 | } | 745 | } |
741 | 746 | ||