summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJason Yan <yanaijie@huawei.com>2017-03-24 21:44:39 -0400
committerShaohua Li <shli@fb.com>2017-03-25 12:38:22 -0400
commit1ad45a9bc4e0cd5a6e6fb0e6c5d35d6c87f14c76 (patch)
treec5953666b7983147f1660fd5a50ec8f56f001545 /drivers/md
parent41743c1f046a14c6749fd1808bb3793c08e47a3e (diff)
md/raid5-cache: fix payload endianness problem in raid5-cache
The payload->header.type and payload->size are little-endian, so just convert them to the right byte order. Signed-off-by: Jason Yan <yanaijie@huawei.com> Cc: <stable@vger.kernel.org> #v4.10+ Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5-cache.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 25eb048298fe..b6194e082e48 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2002,12 +2002,12 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
2002 payload = (void *)mb + mb_offset; 2002 payload = (void *)mb + mb_offset;
2003 payload_flush = (void *)mb + mb_offset; 2003 payload_flush = (void *)mb + mb_offset;
2004 2004
2005 if (payload->header.type == R5LOG_PAYLOAD_DATA) { 2005 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2006 if (r5l_recovery_verify_data_checksum( 2006 if (r5l_recovery_verify_data_checksum(
2007 log, ctx, page, log_offset, 2007 log, ctx, page, log_offset,
2008 payload->checksum[0]) < 0) 2008 payload->checksum[0]) < 0)
2009 goto mismatch; 2009 goto mismatch;
2010 } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) { 2010 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2011 if (r5l_recovery_verify_data_checksum( 2011 if (r5l_recovery_verify_data_checksum(
2012 log, ctx, page, log_offset, 2012 log, ctx, page, log_offset,
2013 payload->checksum[0]) < 0) 2013 payload->checksum[0]) < 0)
@@ -2019,12 +2019,12 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
2019 BLOCK_SECTORS), 2019 BLOCK_SECTORS),
2020 payload->checksum[1]) < 0) 2020 payload->checksum[1]) < 0)
2021 goto mismatch; 2021 goto mismatch;
2022 } else if (payload->header.type == R5LOG_PAYLOAD_FLUSH) { 2022 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2023 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */ 2023 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2024 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */ 2024 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2025 goto mismatch; 2025 goto mismatch;
2026 2026
2027 if (payload->header.type == R5LOG_PAYLOAD_FLUSH) { 2027 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2028 mb_offset += sizeof(struct r5l_payload_flush) + 2028 mb_offset += sizeof(struct r5l_payload_flush) +
2029 le32_to_cpu(payload_flush->size); 2029 le32_to_cpu(payload_flush->size);
2030 } else { 2030 } else {
@@ -2091,7 +2091,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2091 payload = (void *)mb + mb_offset; 2091 payload = (void *)mb + mb_offset;
2092 payload_flush = (void *)mb + mb_offset; 2092 payload_flush = (void *)mb + mb_offset;
2093 2093
2094 if (payload->header.type == R5LOG_PAYLOAD_FLUSH) { 2094 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2095 int i, count; 2095 int i, count;
2096 2096
2097 count = le32_to_cpu(payload_flush->size) / sizeof(__le64); 2097 count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
@@ -2113,7 +2113,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2113 } 2113 }
2114 2114
2115 /* DATA or PARITY payload */ 2115 /* DATA or PARITY payload */
2116 stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ? 2116 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2117 raid5_compute_sector( 2117 raid5_compute_sector(
2118 conf, le64_to_cpu(payload->location), 0, &dd, 2118 conf, le64_to_cpu(payload->location), 0, &dd,
2119 NULL) 2119 NULL)
@@ -2151,7 +2151,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2151 list_add_tail(&sh->lru, cached_stripe_list); 2151 list_add_tail(&sh->lru, cached_stripe_list);
2152 } 2152 }
2153 2153
2154 if (payload->header.type == R5LOG_PAYLOAD_DATA) { 2154 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2155 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 2155 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2156 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { 2156 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2157 r5l_recovery_replay_one_stripe(conf, sh, ctx); 2157 r5l_recovery_replay_one_stripe(conf, sh, ctx);
@@ -2159,7 +2159,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2159 } 2159 }
2160 r5l_recovery_load_data(log, sh, ctx, payload, 2160 r5l_recovery_load_data(log, sh, ctx, payload,
2161 log_offset); 2161 log_offset);
2162 } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) 2162 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2163 r5l_recovery_load_parity(log, sh, ctx, payload, 2163 r5l_recovery_load_parity(log, sh, ctx, payload,
2164 log_offset); 2164 log_offset);
2165 else 2165 else
@@ -2361,7 +2361,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2361 payload = (void *)mb + offset; 2361 payload = (void *)mb + offset;
2362 payload->header.type = cpu_to_le16( 2362 payload->header.type = cpu_to_le16(
2363 R5LOG_PAYLOAD_DATA); 2363 R5LOG_PAYLOAD_DATA);
2364 payload->size = BLOCK_SECTORS; 2364 payload->size = cpu_to_le32(BLOCK_SECTORS);
2365 payload->location = cpu_to_le64( 2365 payload->location = cpu_to_le64(
2366 raid5_compute_blocknr(sh, i, 0)); 2366 raid5_compute_blocknr(sh, i, 0));
2367 addr = kmap_atomic(dev->page); 2367 addr = kmap_atomic(dev->page);