aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/hpsa.c
diff options
context:
space:
mode:
authorScott Teel <scott.teel@hp.com>2014-02-18 14:56:55 -0500
committerJames Bottomley <JBottomley@Parallels.com>2014-03-15 13:19:07 -0400
commit6b80b18fe51540baf7f0c76b7d68df02f69db58c (patch)
treef94a392c56acc9a7a138542fe178d0eef82f9613 /drivers/scsi/hpsa.c
parent9fb0de2d12be813294174127efaa14b8046b1411 (diff)
[SCSI] hpsa: complete the ioaccel raidmap code
Load balance across members of a N-way mirror set, and handle the meta-RAID levels: R10, R50, R60. Signed-off-by: Scott Teel <scott.teel@hp.com> Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r--drivers/scsi/hpsa.c174
1 files changed, 168 insertions, 6 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 41c717310f90..7cd28714d8dd 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -423,6 +423,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
423static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 423static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
424 "1(ADM)", "UNKNOWN" 424 "1(ADM)", "UNKNOWN"
425}; 425};
426#define HPSA_RAID_0 0
427#define HPSA_RAID_4 1
428#define HPSA_RAID_1 2 /* also used for RAID 10 */
429#define HPSA_RAID_5 3 /* also used for RAID 50 */
430#define HPSA_RAID_51 4
431#define HPSA_RAID_6 5 /* also used for RAID 60 */
432#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
426#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 433#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
427 434
428static ssize_t raid_level_show(struct device *dev, 435static ssize_t raid_level_show(struct device *dev,
@@ -2941,6 +2948,31 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
2941 cdb, cdb_len, scsi3addr); 2948 cdb, cdb_len, scsi3addr);
2942} 2949}
2943 2950
2951static void raid_map_helper(struct raid_map_data *map,
2952 int offload_to_mirror, u32 *map_index, u32 *current_group)
2953{
2954 if (offload_to_mirror == 0) {
2955 /* use physical disk in the first mirrored group. */
2956 *map_index %= map->data_disks_per_row;
2957 return;
2958 }
2959 do {
2960 /* determine mirror group that *map_index indicates */
2961 *current_group = *map_index / map->data_disks_per_row;
2962 if (offload_to_mirror == *current_group)
2963 continue;
2964 if (*current_group < (map->layout_map_count - 1)) {
2965 /* select map index from next group */
2966 *map_index += map->data_disks_per_row;
2967 (*current_group)++;
2968 } else {
2969 /* select map index from first group */
2970 *map_index %= map->data_disks_per_row;
2971 *current_group = 0;
2972 }
2973 } while (offload_to_mirror != *current_group);
2974}
2975
2944/* 2976/*
2945 * Attempt to perform offload RAID mapping for a logical volume I/O. 2977 * Attempt to perform offload RAID mapping for a logical volume I/O.
2946 */ 2978 */
@@ -2959,6 +2991,14 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
2959 u64 first_row, last_row; 2991 u64 first_row, last_row;
2960 u32 first_row_offset, last_row_offset; 2992 u32 first_row_offset, last_row_offset;
2961 u32 first_column, last_column; 2993 u32 first_column, last_column;
2994 u64 r0_first_row, r0_last_row;
2995 u32 r5or6_blocks_per_row;
2996 u64 r5or6_first_row, r5or6_last_row;
2997 u32 r5or6_first_row_offset, r5or6_last_row_offset;
2998 u32 r5or6_first_column, r5or6_last_column;
2999 u32 total_disks_per_row;
3000 u32 stripesize;
3001 u32 first_group, last_group, current_group;
2962 u32 map_row; 3002 u32 map_row;
2963 u32 disk_handle; 3003 u32 disk_handle;
2964 u64 disk_block; 3004 u64 disk_block;
@@ -2968,6 +3008,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
2968#if BITS_PER_LONG == 32 3008#if BITS_PER_LONG == 32
2969 u64 tmpdiv; 3009 u64 tmpdiv;
2970#endif 3010#endif
3011 int offload_to_mirror;
2971 3012
2972 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3013 BUG_ON(!(dev->offload_config && dev->offload_enabled));
2973 3014
@@ -3070,19 +3111,140 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3070 return IO_ACCEL_INELIGIBLE; 3111 return IO_ACCEL_INELIGIBLE;
3071 3112
3072 /* proceeding with driver mapping */ 3113 /* proceeding with driver mapping */
3114 total_disks_per_row = map->data_disks_per_row +
3115 map->metadata_disks_per_row;
3073 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3116 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3074 map->row_cnt; 3117 map->row_cnt;
3075 map_index = (map_row * (map->data_disks_per_row + 3118 map_index = (map_row * total_disks_per_row) + first_column;
3076 map->metadata_disks_per_row)) + first_column; 3119
3077 if (dev->raid_level == 2) { 3120 switch (dev->raid_level) {
3078 /* simple round-robin balancing of RAID 1+0 reads across 3121 case HPSA_RAID_0:
3079 * primary and mirror members. this is appropriate for SSD 3122 break; /* nothing special to do */
3080 * but not optimal for HDD. 3123 case HPSA_RAID_1:
3124 /* Handles load balance across RAID 1 members.
3125 * (2-drive R1 and R10 with even # of drives.)
3126 * Appropriate for SSDs, not optimal for HDDs
3081 */ 3127 */
3128 BUG_ON(map->layout_map_count != 2);
3082 if (dev->offload_to_mirror) 3129 if (dev->offload_to_mirror)
3083 map_index += map->data_disks_per_row; 3130 map_index += map->data_disks_per_row;
3084 dev->offload_to_mirror = !dev->offload_to_mirror; 3131 dev->offload_to_mirror = !dev->offload_to_mirror;
3132 break;
3133 case HPSA_RAID_ADM:
3134 /* Handles N-way mirrors (R1-ADM)
3135 * and R10 with # of drives divisible by 3.)
3136 */
3137 BUG_ON(map->layout_map_count != 3);
3138
3139 offload_to_mirror = dev->offload_to_mirror;
3140 raid_map_helper(map, offload_to_mirror,
3141 &map_index, &current_group);
3142 /* set mirror group to use next time */
3143 offload_to_mirror =
3144 (offload_to_mirror >= map->layout_map_count - 1)
3145 ? 0 : offload_to_mirror + 1;
3146 /* FIXME: remove after debug/dev */
3147 BUG_ON(offload_to_mirror >= map->layout_map_count);
3148 dev_warn(&h->pdev->dev,
3149 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3150 map_index, offload_to_mirror);
3151 dev->offload_to_mirror = offload_to_mirror;
3152 /* Avoid direct use of dev->offload_to_mirror within this
3153 * function since multiple threads might simultaneously
3154 * increment it beyond the range of dev->layout_map_count -1.
3155 */
3156 break;
3157 case HPSA_RAID_5:
3158 case HPSA_RAID_6:
3159 if (map->layout_map_count <= 1)
3160 break;
3161
3162 /* Verify first and last block are in same RAID group */
3163 r5or6_blocks_per_row =
3164 map->strip_size * map->data_disks_per_row;
3165 BUG_ON(r5or6_blocks_per_row == 0);
3166 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3167#if BITS_PER_LONG == 32
3168 tmpdiv = first_block;
3169 first_group = do_div(tmpdiv, stripesize);
3170 tmpdiv = first_group;
3171 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3172 first_group = tmpdiv;
3173 tmpdiv = last_block;
3174 last_group = do_div(tmpdiv, stripesize);
3175 tmpdiv = last_group;
3176 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3177 last_group = tmpdiv;
3178#else
3179 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3180 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3181 if (first_group != last_group)
3182#endif
3183 return IO_ACCEL_INELIGIBLE;
3184
3185 /* Verify request is in a single row of RAID 5/6 */
3186#if BITS_PER_LONG == 32
3187 tmpdiv = first_block;
3188 (void) do_div(tmpdiv, stripesize);
3189 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3190 tmpdiv = last_block;
3191 (void) do_div(tmpdiv, stripesize);
3192 r5or6_last_row = r0_last_row = tmpdiv;
3193#else
3194 first_row = r5or6_first_row = r0_first_row =
3195 first_block / stripesize;
3196 r5or6_last_row = r0_last_row = last_block / stripesize;
3197#endif
3198 if (r5or6_first_row != r5or6_last_row)
3199 return IO_ACCEL_INELIGIBLE;
3200
3201
3202 /* Verify request is in a single column */
3203#if BITS_PER_LONG == 32
3204 tmpdiv = first_block;
3205 first_row_offset = do_div(tmpdiv, stripesize);
3206 tmpdiv = first_row_offset;
3207 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3208 r5or6_first_row_offset = first_row_offset;
3209 tmpdiv = last_block;
3210 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3211 tmpdiv = r5or6_last_row_offset;
3212 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3213 tmpdiv = r5or6_first_row_offset;
3214 (void) do_div(tmpdiv, map->strip_size);
3215 first_column = r5or6_first_column = tmpdiv;
3216 tmpdiv = r5or6_last_row_offset;
3217 (void) do_div(tmpdiv, map->strip_size);
3218 r5or6_last_column = tmpdiv;
3219#else
3220 first_row_offset = r5or6_first_row_offset =
3221 (u32)((first_block % stripesize) %
3222 r5or6_blocks_per_row);
3223
3224 r5or6_last_row_offset =
3225 (u32)((last_block % stripesize) %
3226 r5or6_blocks_per_row);
3227
3228 first_column = r5or6_first_column =
3229 r5or6_first_row_offset / map->strip_size;
3230 r5or6_last_column =
3231 r5or6_last_row_offset / map->strip_size;
3232#endif
3233 if (r5or6_first_column != r5or6_last_column)
3234 return IO_ACCEL_INELIGIBLE;
3235
3236 /* Request is eligible */
3237 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3238 map->row_cnt;
3239
3240 map_index = (first_group *
3241 (map->row_cnt * total_disks_per_row)) +
3242 (map_row * total_disks_per_row) + first_column;
3243 break;
3244 default:
3245 return IO_ACCEL_INELIGIBLE;
3085 } 3246 }
3247
3086 disk_handle = dd[map_index].ioaccel_handle; 3248 disk_handle = dd[map_index].ioaccel_handle;
3087 disk_block = map->disk_starting_blk + (first_row * map->strip_size) + 3249 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3088 (first_row_offset - (first_column * map->strip_size)); 3250 (first_row_offset - (first_column * map->strip_size));