aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorWayne Boyer <wayneb@linux.vnet.ibm.com>2010-02-19 16:23:59 -0500
committerJames Bottomley <James.Bottomley@suse.de>2010-03-03 05:34:57 -0500
commit3e7ebdfa58ddaef361f9538219e66a7226fb1e5d (patch)
tree949b86d83da2dc36bb8b0d8bd01cb4059bf93834 /drivers/scsi/ipr.c
parenta74c16390a47dcb6c96b20b572ffc9936073d4b1 (diff)
[SCSI] ipr: update the configuration table code for the next generation chip
This patch changes the configuration table structures and related code such that both 32 bit and 64 bit based adapters can work with the driver. This patch also implements the code to generate the virtual bus/id/lun values for devices connected to the new adapters. It also implements support for the new device resource path. Signed-off-by: Wayne Boyer <wayneb@linux.vnet.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c512
1 files changed, 416 insertions, 96 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e6bab3fb6945..91e330a12721 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -72,6 +72,7 @@
72#include <linux/moduleparam.h> 72#include <linux/moduleparam.h>
73#include <linux/libata.h> 73#include <linux/libata.h>
74#include <linux/hdreg.h> 74#include <linux/hdreg.h>
75#include <linux/stringify.h>
75#include <asm/io.h> 76#include <asm/io.h>
76#include <asm/irq.h> 77#include <asm/irq.h>
77#include <asm/processor.h> 78#include <asm/processor.h>
@@ -93,6 +94,7 @@ static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = 0; 94static unsigned int ipr_transop_timeout = 0;
94static unsigned int ipr_enable_cache = 1; 95static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0; 96static unsigned int ipr_debug = 0;
97static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
96static unsigned int ipr_dual_ioa_raid = 1; 98static unsigned int ipr_dual_ioa_raid = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock); 99static DEFINE_SPINLOCK(ipr_driver_lock);
98 100
@@ -177,6 +179,9 @@ module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
177MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 179MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
178module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 180module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
179MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 181MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
182module_param_named(max_devs, ipr_max_devs, int, 0);
183MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
184 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
180MODULE_LICENSE("GPL"); 185MODULE_LICENSE("GPL");
181MODULE_VERSION(IPR_DRIVER_VERSION); 186MODULE_VERSION(IPR_DRIVER_VERSION);
182 187
@@ -921,14 +926,46 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
921} 926}
922 927
923/** 928/**
929 * ipr_update_ata_class - Update the ata class in the resource entry
930 * @res: resource entry struct
931 * @proto: cfgte device bus protocol value
932 *
933 * Return value:
934 * none
935 **/
936static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
937{
938 switch(proto) {
939 case IPR_PROTO_SATA:
940 case IPR_PROTO_SAS_STP:
941 res->ata_class = ATA_DEV_ATA;
942 break;
943 case IPR_PROTO_SATA_ATAPI:
944 case IPR_PROTO_SAS_STP_ATAPI:
945 res->ata_class = ATA_DEV_ATAPI;
946 break;
947 default:
948 res->ata_class = ATA_DEV_UNKNOWN;
949 break;
950 };
951}
952
953/**
924 * ipr_init_res_entry - Initialize a resource entry struct. 954 * ipr_init_res_entry - Initialize a resource entry struct.
925 * @res: resource entry struct 955 * @res: resource entry struct
956 * @cfgtew: config table entry wrapper struct
926 * 957 *
927 * Return value: 958 * Return value:
928 * none 959 * none
929 **/ 960 **/
930static void ipr_init_res_entry(struct ipr_resource_entry *res) 961static void ipr_init_res_entry(struct ipr_resource_entry *res,
962 struct ipr_config_table_entry_wrapper *cfgtew)
931{ 963{
964 int found = 0;
965 unsigned int proto;
966 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
967 struct ipr_resource_entry *gscsi_res = NULL;
968
932 res->needs_sync_complete = 0; 969 res->needs_sync_complete = 0;
933 res->in_erp = 0; 970 res->in_erp = 0;
934 res->add_to_ml = 0; 971 res->add_to_ml = 0;
@@ -936,6 +973,205 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
936 res->resetting_device = 0; 973 res->resetting_device = 0;
937 res->sdev = NULL; 974 res->sdev = NULL;
938 res->sata_port = NULL; 975 res->sata_port = NULL;
976
977 if (ioa_cfg->sis64) {
978 proto = cfgtew->u.cfgte64->proto;
979 res->res_flags = cfgtew->u.cfgte64->res_flags;
980 res->qmodel = IPR_QUEUEING_MODEL64(res);
981 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
982
983 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
984 sizeof(res->res_path));
985
986 res->bus = 0;
987 res->lun = scsilun_to_int(&res->dev_lun);
988
989 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
990 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
991 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
992 found = 1;
993 res->target = gscsi_res->target;
994 break;
995 }
996 }
997 if (!found) {
998 res->target = find_first_zero_bit(ioa_cfg->target_ids,
999 ioa_cfg->max_devs_supported);
1000 set_bit(res->target, ioa_cfg->target_ids);
1001 }
1002
1003 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1004 sizeof(res->dev_lun.scsi_lun));
1005 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1006 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1007 res->target = 0;
1008 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1009 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1010 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1011 ioa_cfg->max_devs_supported);
1012 set_bit(res->target, ioa_cfg->array_ids);
1013 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1014 res->bus = IPR_VSET_VIRTUAL_BUS;
1015 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1016 ioa_cfg->max_devs_supported);
1017 set_bit(res->target, ioa_cfg->vset_ids);
1018 } else {
1019 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1020 ioa_cfg->max_devs_supported);
1021 set_bit(res->target, ioa_cfg->target_ids);
1022 }
1023 } else {
1024 proto = cfgtew->u.cfgte->proto;
1025 res->qmodel = IPR_QUEUEING_MODEL(res);
1026 res->flags = cfgtew->u.cfgte->flags;
1027 if (res->flags & IPR_IS_IOA_RESOURCE)
1028 res->type = IPR_RES_TYPE_IOAFP;
1029 else
1030 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1031
1032 res->bus = cfgtew->u.cfgte->res_addr.bus;
1033 res->target = cfgtew->u.cfgte->res_addr.target;
1034 res->lun = cfgtew->u.cfgte->res_addr.lun;
1035 }
1036
1037 ipr_update_ata_class(res, proto);
1038}
1039
1040/**
1041 * ipr_is_same_device - Determine if two devices are the same.
1042 * @res: resource entry struct
1043 * @cfgtew: config table entry wrapper struct
1044 *
1045 * Return value:
1046 * 1 if the devices are the same / 0 otherwise
1047 **/
1048static int ipr_is_same_device(struct ipr_resource_entry *res,
1049 struct ipr_config_table_entry_wrapper *cfgtew)
1050{
1051 if (res->ioa_cfg->sis64) {
1052 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1053 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1054 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1055 sizeof(cfgtew->u.cfgte64->lun))) {
1056 return 1;
1057 }
1058 } else {
1059 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1060 res->target == cfgtew->u.cfgte->res_addr.target &&
1061 res->lun == cfgtew->u.cfgte->res_addr.lun)
1062 return 1;
1063 }
1064
1065 return 0;
1066}
1067
1068/**
1069 * ipr_format_resource_path - Format the resource path for printing.
1070 * @res_path: resource path
1071 * @buf: buffer
1072 *
1073 * Return value:
1074 * pointer to buffer
1075 **/
1076static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1077{
1078 int i;
1079
1080 sprintf(buffer, "%02X", res_path[0]);
1081 for (i=1; res_path[i] != 0xff; i++)
1082 sprintf(buffer, "%s:%02X", buffer, res_path[i]);
1083
1084 return buffer;
1085}
1086
1087/**
1088 * ipr_update_res_entry - Update the resource entry.
1089 * @res: resource entry struct
1090 * @cfgtew: config table entry wrapper struct
1091 *
1092 * Return value:
1093 * none
1094 **/
1095static void ipr_update_res_entry(struct ipr_resource_entry *res,
1096 struct ipr_config_table_entry_wrapper *cfgtew)
1097{
1098 char buffer[IPR_MAX_RES_PATH_LENGTH];
1099 unsigned int proto;
1100 int new_path = 0;
1101
1102 if (res->ioa_cfg->sis64) {
1103 res->flags = cfgtew->u.cfgte64->flags;
1104 res->res_flags = cfgtew->u.cfgte64->res_flags;
1105 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1106
1107 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1108 sizeof(struct ipr_std_inq_data));
1109
1110 res->qmodel = IPR_QUEUEING_MODEL64(res);
1111 proto = cfgtew->u.cfgte64->proto;
1112 res->res_handle = cfgtew->u.cfgte64->res_handle;
1113 res->dev_id = cfgtew->u.cfgte64->dev_id;
1114
1115 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1116 sizeof(res->dev_lun.scsi_lun));
1117
1118 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1119 sizeof(res->res_path))) {
1120 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1121 sizeof(res->res_path));
1122 new_path = 1;
1123 }
1124
1125 if (res->sdev && new_path)
1126 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1127 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1128 } else {
1129 res->flags = cfgtew->u.cfgte->flags;
1130 if (res->flags & IPR_IS_IOA_RESOURCE)
1131 res->type = IPR_RES_TYPE_IOAFP;
1132 else
1133 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1134
1135 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1136 sizeof(struct ipr_std_inq_data));
1137
1138 res->qmodel = IPR_QUEUEING_MODEL(res);
1139 proto = cfgtew->u.cfgte->proto;
1140 res->res_handle = cfgtew->u.cfgte->res_handle;
1141 }
1142
1143 ipr_update_ata_class(res, proto);
1144}
1145
1146/**
1147 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1148 * for the resource.
1149 * @res: resource entry struct
1150 * @cfgtew: config table entry wrapper struct
1151 *
1152 * Return value:
1153 * none
1154 **/
1155static void ipr_clear_res_target(struct ipr_resource_entry *res)
1156{
1157 struct ipr_resource_entry *gscsi_res = NULL;
1158 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1159
1160 if (!ioa_cfg->sis64)
1161 return;
1162
1163 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1164 clear_bit(res->target, ioa_cfg->array_ids);
1165 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1166 clear_bit(res->target, ioa_cfg->vset_ids);
1167 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1168 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1169 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1170 return;
1171 clear_bit(res->target, ioa_cfg->target_ids);
1172
1173 } else if (res->bus == 0)
1174 clear_bit(res->target, ioa_cfg->target_ids);
939} 1175}
940 1176
941/** 1177/**
@@ -947,17 +1183,24 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
947 * none 1183 * none
948 **/ 1184 **/
949static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1185static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
950 struct ipr_hostrcb *hostrcb) 1186 struct ipr_hostrcb *hostrcb)
951{ 1187{
952 struct ipr_resource_entry *res = NULL; 1188 struct ipr_resource_entry *res = NULL;
953 struct ipr_config_table_entry *cfgte; 1189 struct ipr_config_table_entry_wrapper cfgtew;
1190 __be32 cc_res_handle;
1191
954 u32 is_ndn = 1; 1192 u32 is_ndn = 1;
955 1193
956 cfgte = &hostrcb->hcam.u.ccn.cfgte; 1194 if (ioa_cfg->sis64) {
1195 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1196 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1197 } else {
1198 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1199 cc_res_handle = cfgtew.u.cfgte->res_handle;
1200 }
957 1201
958 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1202 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
959 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, 1203 if (res->res_handle == cc_res_handle) {
960 sizeof(cfgte->res_addr))) {
961 is_ndn = 0; 1204 is_ndn = 0;
962 break; 1205 break;
963 } 1206 }
@@ -975,20 +1218,22 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
975 struct ipr_resource_entry, queue); 1218 struct ipr_resource_entry, queue);
976 1219
977 list_del(&res->queue); 1220 list_del(&res->queue);
978 ipr_init_res_entry(res); 1221 ipr_init_res_entry(res, &cfgtew);
979 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1222 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
980 } 1223 }
981 1224
982 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 1225 ipr_update_res_entry(res, &cfgtew);
983 1226
984 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1227 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
985 if (res->sdev) { 1228 if (res->sdev) {
986 res->del_from_ml = 1; 1229 res->del_from_ml = 1;
987 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 1230 res->res_handle = IPR_INVALID_RES_HANDLE;
988 if (ioa_cfg->allow_ml_add_del) 1231 if (ioa_cfg->allow_ml_add_del)
989 schedule_work(&ioa_cfg->work_q); 1232 schedule_work(&ioa_cfg->work_q);
990 } else 1233 } else {
1234 ipr_clear_res_target(res);
991 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1235 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1236 }
992 } else if (!res->sdev) { 1237 } else if (!res->sdev) {
993 res->add_to_ml = 1; 1238 res->add_to_ml = 1;
994 if (ioa_cfg->allow_ml_add_del) 1239 if (ioa_cfg->allow_ml_add_del)
@@ -1941,12 +2186,14 @@ static const struct ipr_ses_table_entry *
1941ipr_find_ses_entry(struct ipr_resource_entry *res) 2186ipr_find_ses_entry(struct ipr_resource_entry *res)
1942{ 2187{
1943 int i, j, matches; 2188 int i, j, matches;
2189 struct ipr_std_inq_vpids *vpids;
1944 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2190 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1945 2191
1946 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2192 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1947 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2193 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1948 if (ste->compare_product_id_byte[j] == 'X') { 2194 if (ste->compare_product_id_byte[j] == 'X') {
1949 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) 2195 vpids = &res->std_inq_data.vpids;
2196 if (vpids->product_id[j] == ste->product_id[j])
1950 matches++; 2197 matches++;
1951 else 2198 else
1952 break; 2199 break;
@@ -1981,10 +2228,10 @@ static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_wi
1981 2228
1982 /* Loop through each config table entry in the config table buffer */ 2229 /* Loop through each config table entry in the config table buffer */
1983 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2230 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1984 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) 2231 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1985 continue; 2232 continue;
1986 2233
1987 if (bus != res->cfgte.res_addr.bus) 2234 if (bus != res->bus)
1988 continue; 2235 continue;
1989 2236
1990 if (!(ste = ipr_find_ses_entry(res))) 2237 if (!(ste = ipr_find_ses_entry(res)))
@@ -2518,9 +2765,9 @@ restart:
2518 2765
2519 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2766 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2520 if (res->add_to_ml) { 2767 if (res->add_to_ml) {
2521 bus = res->cfgte.res_addr.bus; 2768 bus = res->bus;
2522 target = res->cfgte.res_addr.target; 2769 target = res->target;
2523 lun = res->cfgte.res_addr.lun; 2770 lun = res->lun;
2524 res->add_to_ml = 0; 2771 res->add_to_ml = 0;
2525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2772 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2526 scsi_add_device(ioa_cfg->host, bus, target, lun); 2773 scsi_add_device(ioa_cfg->host, bus, target, lun);
@@ -3578,7 +3825,7 @@ static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribu
3578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3825 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3579 res = (struct ipr_resource_entry *)sdev->hostdata; 3826 res = (struct ipr_resource_entry *)sdev->hostdata;
3580 if (res) 3827 if (res)
3581 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); 3828 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
3582 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3829 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3583 return len; 3830 return len;
3584} 3831}
@@ -3591,8 +3838,43 @@ static struct device_attribute ipr_adapter_handle_attr = {
3591 .show = ipr_show_adapter_handle 3838 .show = ipr_show_adapter_handle
3592}; 3839};
3593 3840
3841/**
3842 * ipr_show_resource_path - Show the resource path for this device.
3843 * @dev: device struct
3844 * @buf: buffer
3845 *
3846 * Return value:
3847 * number of bytes printed to buffer
3848 **/
3849static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
3850{
3851 struct scsi_device *sdev = to_scsi_device(dev);
3852 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3853 struct ipr_resource_entry *res;
3854 unsigned long lock_flags = 0;
3855 ssize_t len = -ENXIO;
3856 char buffer[IPR_MAX_RES_PATH_LENGTH];
3857
3858 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3859 res = (struct ipr_resource_entry *)sdev->hostdata;
3860 if (res)
3861 len = snprintf(buf, PAGE_SIZE, "%s\n",
3862 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
3863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3864 return len;
3865}
3866
3867static struct device_attribute ipr_resource_path_attr = {
3868 .attr = {
3869 .name = "resource_path",
3870 .mode = S_IRUSR,
3871 },
3872 .show = ipr_show_resource_path
3873};
3874
3594static struct device_attribute *ipr_dev_attrs[] = { 3875static struct device_attribute *ipr_dev_attrs[] = {
3595 &ipr_adapter_handle_attr, 3876 &ipr_adapter_handle_attr,
3877 &ipr_resource_path_attr,
3596 NULL, 3878 NULL,
3597}; 3879};
3598 3880
@@ -3645,9 +3927,9 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3645 struct ipr_resource_entry *res; 3927 struct ipr_resource_entry *res;
3646 3928
3647 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3929 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3648 if ((res->cfgte.res_addr.bus == starget->channel) && 3930 if ((res->bus == starget->channel) &&
3649 (res->cfgte.res_addr.target == starget->id) && 3931 (res->target == starget->id) &&
3650 (res->cfgte.res_addr.lun == 0)) { 3932 (res->lun == 0)) {
3651 return res; 3933 return res;
3652 } 3934 }
3653 } 3935 }
@@ -3717,6 +3999,17 @@ static int ipr_target_alloc(struct scsi_target *starget)
3717static void ipr_target_destroy(struct scsi_target *starget) 3999static void ipr_target_destroy(struct scsi_target *starget)
3718{ 4000{
3719 struct ipr_sata_port *sata_port = starget->hostdata; 4001 struct ipr_sata_port *sata_port = starget->hostdata;
4002 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4003 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4004
4005 if (ioa_cfg->sis64) {
4006 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4007 clear_bit(starget->id, ioa_cfg->array_ids);
4008 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4009 clear_bit(starget->id, ioa_cfg->vset_ids);
4010 else if (starget->channel == 0)
4011 clear_bit(starget->id, ioa_cfg->target_ids);
4012 }
3720 4013
3721 if (sata_port) { 4014 if (sata_port) {
3722 starget->hostdata = NULL; 4015 starget->hostdata = NULL;
@@ -3738,9 +4031,9 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3738 struct ipr_resource_entry *res; 4031 struct ipr_resource_entry *res;
3739 4032
3740 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4033 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3741 if ((res->cfgte.res_addr.bus == sdev->channel) && 4034 if ((res->bus == sdev->channel) &&
3742 (res->cfgte.res_addr.target == sdev->id) && 4035 (res->target == sdev->id) &&
3743 (res->cfgte.res_addr.lun == sdev->lun)) 4036 (res->lun == sdev->lun))
3744 return res; 4037 return res;
3745 } 4038 }
3746 4039
@@ -3789,6 +4082,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3789 struct ipr_resource_entry *res; 4082 struct ipr_resource_entry *res;
3790 struct ata_port *ap = NULL; 4083 struct ata_port *ap = NULL;
3791 unsigned long lock_flags = 0; 4084 unsigned long lock_flags = 0;
4085 char buffer[IPR_MAX_RES_PATH_LENGTH];
3792 4086
3793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4087 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3794 res = sdev->hostdata; 4088 res = sdev->hostdata;
@@ -3815,6 +4109,9 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3815 ata_sas_slave_configure(sdev, ap); 4109 ata_sas_slave_configure(sdev, ap);
3816 } else 4110 } else
3817 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4111 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4112 if (ioa_cfg->sis64)
4113 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4114 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
3818 return 0; 4115 return 0;
3819 } 4116 }
3820 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3963,7 +4260,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3963 } else 4260 } else
3964 regs = &ioarcb->u.add_data.u.regs; 4261 regs = &ioarcb->u.add_data.u.regs;
3965 4262
3966 ioarcb->res_handle = res->cfgte.res_handle; 4263 ioarcb->res_handle = res->res_handle;
3967 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4264 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3968 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4265 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3969 if (ipr_is_gata(res)) { 4266 if (ipr_is_gata(res)) {
@@ -4013,19 +4310,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4013 res = sata_port->res; 4310 res = sata_port->res;
4014 if (res) { 4311 if (res) {
4015 rc = ipr_device_reset(ioa_cfg, res); 4312 rc = ipr_device_reset(ioa_cfg, res);
4016 switch(res->cfgte.proto) { 4313 *classes = res->ata_class;
4017 case IPR_PROTO_SATA:
4018 case IPR_PROTO_SAS_STP:
4019 *classes = ATA_DEV_ATA;
4020 break;
4021 case IPR_PROTO_SATA_ATAPI:
4022 case IPR_PROTO_SAS_STP_ATAPI:
4023 *classes = ATA_DEV_ATAPI;
4024 break;
4025 default:
4026 *classes = ATA_DEV_UNKNOWN;
4027 break;
4028 };
4029 } 4314 }
4030 4315
4031 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4070,7 +4355,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4070 return FAILED; 4355 return FAILED;
4071 4356
4072 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4357 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4073 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4358 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4074 if (ipr_cmd->scsi_cmd) 4359 if (ipr_cmd->scsi_cmd)
4075 ipr_cmd->done = ipr_scsi_eh_done; 4360 ipr_cmd->done = ipr_scsi_eh_done;
4076 if (ipr_cmd->qc) 4361 if (ipr_cmd->qc)
@@ -4092,7 +4377,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4092 spin_lock_irq(scsi_cmd->device->host->host_lock); 4377 spin_lock_irq(scsi_cmd->device->host->host_lock);
4093 4378
4094 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4379 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4095 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4380 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4096 rc = -EIO; 4381 rc = -EIO;
4097 break; 4382 break;
4098 } 4383 }
@@ -4131,13 +4416,13 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4131 struct ipr_resource_entry *res; 4416 struct ipr_resource_entry *res;
4132 4417
4133 ENTER; 4418 ENTER;
4134 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4419 if (!ioa_cfg->sis64)
4135 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, 4420 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4136 sizeof(res->cfgte.res_handle))) { 4421 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4137 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); 4422 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4138 break; 4423 break;
4424 }
4139 } 4425 }
4140 }
4141 4426
4142 /* 4427 /*
4143 * If abort has not completed, indicate the reset has, else call the 4428 * If abort has not completed, indicate the reset has, else call the
@@ -4235,7 +4520,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4235 return SUCCESS; 4520 return SUCCESS;
4236 4521
4237 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4522 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4238 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 4523 ipr_cmd->ioarcb.res_handle = res->res_handle;
4239 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 4524 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4240 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4525 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4241 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 4526 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
@@ -5071,9 +5356,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5071 5356
5072 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5357 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5073 ipr_cmd->scsi_cmd = scsi_cmd; 5358 ipr_cmd->scsi_cmd = scsi_cmd;
5074 ioarcb->res_handle = res->cfgte.res_handle; 5359 ioarcb->res_handle = res->res_handle;
5075 ipr_cmd->done = ipr_scsi_done; 5360 ipr_cmd->done = ipr_scsi_done;
5076 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5361 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5077 5362
5078 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5363 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5079 if (scsi_cmd->underflow == 0) 5364 if (scsi_cmd->underflow == 0)
@@ -5216,20 +5501,9 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
5216 goto out_unlock; 5501 goto out_unlock;
5217 } 5502 }
5218 5503
5219 switch(res->cfgte.proto) { 5504 ap->link.device[0].class = res->ata_class;
5220 case IPR_PROTO_SATA: 5505 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5221 case IPR_PROTO_SAS_STP:
5222 ap->link.device[0].class = ATA_DEV_ATA;
5223 break;
5224 case IPR_PROTO_SATA_ATAPI:
5225 case IPR_PROTO_SAS_STP_ATAPI:
5226 ap->link.device[0].class = ATA_DEV_ATAPI;
5227 break;
5228 default:
5229 ap->link.device[0].class = ATA_DEV_UNKNOWN;
5230 ata_port_disable(ap); 5506 ata_port_disable(ap);
5231 break;
5232 };
5233 5507
5234out_unlock: 5508out_unlock:
5235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 5509 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -5315,8 +5589,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5315 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5589 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5316 5590
5317 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 5591 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5318 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus, 5592 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5319 res->cfgte.res_addr.target);
5320 5593
5321 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5594 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5322 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5595 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
@@ -5452,7 +5725,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5452 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 5725 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5453 ipr_cmd->qc = qc; 5726 ipr_cmd->qc = qc;
5454 ipr_cmd->done = ipr_sata_done; 5727 ipr_cmd->done = ipr_sata_done;
5455 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 5728 ipr_cmd->ioarcb.res_handle = res->res_handle;
5456 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5729 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5457 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5730 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5458 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5731 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
@@ -5466,7 +5739,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5466 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5739 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5467 ipr_copy_sata_tf(regs, &qc->tf); 5740 ipr_copy_sata_tf(regs, &qc->tf);
5468 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 5741 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5469 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5742 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5470 5743
5471 switch (qc->tf.protocol) { 5744 switch (qc->tf.protocol) {
5472 case ATA_PROT_NODATA: 5745 case ATA_PROT_NODATA:
@@ -5715,13 +5988,14 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5715 continue; 5988 continue;
5716 5989
5717 ipr_cmd->u.res = res; 5990 ipr_cmd->u.res = res;
5718 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); 5991 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
5719 5992
5720 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 5993 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5721 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5994 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5722 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 5995 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5723 5996
5724 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 5997 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5998 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
5725 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 5999 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5726 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 6000 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5727 6001
@@ -5734,7 +6008,8 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5734 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 6008 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5735 IPR_SET_SUP_DEVICE_TIMEOUT); 6009 IPR_SET_SUP_DEVICE_TIMEOUT);
5736 6010
5737 ipr_cmd->job_step = ipr_set_supported_devs; 6011 if (!ioa_cfg->sis64)
6012 ipr_cmd->job_step = ipr_set_supported_devs;
5738 return IPR_RC_JOB_RETURN; 6013 return IPR_RC_JOB_RETURN;
5739 } 6014 }
5740 6015
@@ -6182,24 +6457,36 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6182{ 6457{
6183 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6458 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6184 struct ipr_resource_entry *res, *temp; 6459 struct ipr_resource_entry *res, *temp;
6185 struct ipr_config_table_entry *cfgte; 6460 struct ipr_config_table_entry_wrapper cfgtew;
6186 int found, i; 6461 int entries, found, flag, i;
6187 LIST_HEAD(old_res); 6462 LIST_HEAD(old_res);
6188 6463
6189 ENTER; 6464 ENTER;
6190 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) 6465 if (ioa_cfg->sis64)
6466 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6467 else
6468 flag = ioa_cfg->u.cfg_table->hdr.flags;
6469
6470 if (flag & IPR_UCODE_DOWNLOAD_REQ)
6191 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 6471 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6192 6472
6193 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 6473 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6194 list_move_tail(&res->queue, &old_res); 6474 list_move_tail(&res->queue, &old_res);
6195 6475
6196 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { 6476 if (ioa_cfg->sis64)
6197 cfgte = &ioa_cfg->cfg_table->dev[i]; 6477 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6478 else
6479 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6480
6481 for (i = 0; i < entries; i++) {
6482 if (ioa_cfg->sis64)
6483 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6484 else
6485 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6198 found = 0; 6486 found = 0;
6199 6487
6200 list_for_each_entry_safe(res, temp, &old_res, queue) { 6488 list_for_each_entry_safe(res, temp, &old_res, queue) {
6201 if (!memcmp(&res->cfgte.res_addr, 6489 if (ipr_is_same_device(res, &cfgtew)) {
6202 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
6203 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6490 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6204 found = 1; 6491 found = 1;
6205 break; 6492 break;
@@ -6216,24 +6503,27 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6216 res = list_entry(ioa_cfg->free_res_q.next, 6503 res = list_entry(ioa_cfg->free_res_q.next,
6217 struct ipr_resource_entry, queue); 6504 struct ipr_resource_entry, queue);
6218 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6505 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6219 ipr_init_res_entry(res); 6506 ipr_init_res_entry(res, &cfgtew);
6220 res->add_to_ml = 1; 6507 res->add_to_ml = 1;
6221 } 6508 }
6222 6509
6223 if (found) 6510 if (found)
6224 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 6511 ipr_update_res_entry(res, &cfgtew);
6225 } 6512 }
6226 6513
6227 list_for_each_entry_safe(res, temp, &old_res, queue) { 6514 list_for_each_entry_safe(res, temp, &old_res, queue) {
6228 if (res->sdev) { 6515 if (res->sdev) {
6229 res->del_from_ml = 1; 6516 res->del_from_ml = 1;
6230 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 6517 res->res_handle = IPR_INVALID_RES_HANDLE;
6231 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6518 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6232 } else {
6233 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6234 } 6519 }
6235 } 6520 }
6236 6521
6522 list_for_each_entry_safe(res, temp, &old_res, queue) {
6523 ipr_clear_res_target(res);
6524 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6525 }
6526
6237 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 6527 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6238 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 6528 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6239 else 6529 else
@@ -6270,11 +6560,10 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6270 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6560 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6271 6561
6272 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 6562 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6273 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; 6563 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6274 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; 6564 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6275 6565
6276 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, 6566 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6277 sizeof(struct ipr_config_table),
6278 IPR_IOADL_FLAGS_READ_LAST); 6567 IPR_IOADL_FLAGS_READ_LAST);
6279 6568
6280 ipr_cmd->job_step = ipr_init_res_table; 6569 ipr_cmd->job_step = ipr_init_res_table;
@@ -6567,7 +6856,7 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6567 ioa_cfg->toggle_bit = 1; 6856 ioa_cfg->toggle_bit = 1;
6568 6857
6569 /* Zero out config table */ 6858 /* Zero out config table */
6570 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); 6859 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
6571} 6860}
6572 6861
6573/** 6862/**
@@ -7370,8 +7659,8 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7370 ipr_free_cmd_blks(ioa_cfg); 7659 ipr_free_cmd_blks(ioa_cfg);
7371 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 7660 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7372 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 7661 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7373 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), 7662 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7374 ioa_cfg->cfg_table, 7663 ioa_cfg->u.cfg_table,
7375 ioa_cfg->cfg_table_dma); 7664 ioa_cfg->cfg_table_dma);
7376 7665
7377 for (i = 0; i < IPR_NUM_HCAMS; i++) { 7666 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7488,13 +7777,24 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7488 7777
7489 ENTER; 7778 ENTER;
7490 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 7779 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7491 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); 7780 ioa_cfg->max_devs_supported, GFP_KERNEL);
7492 7781
7493 if (!ioa_cfg->res_entries) 7782 if (!ioa_cfg->res_entries)
7494 goto out; 7783 goto out;
7495 7784
7496 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) 7785 if (ioa_cfg->sis64) {
7786 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
7787 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
7788 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
7789 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
7790 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
7791 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
7792 }
7793
7794 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
7497 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 7795 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7796 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
7797 }
7498 7798
7499 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 7799 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7500 sizeof(struct ipr_misc_cbs), 7800 sizeof(struct ipr_misc_cbs),
@@ -7513,11 +7813,11 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7513 if (!ioa_cfg->host_rrq) 7813 if (!ioa_cfg->host_rrq)
7514 goto out_ipr_free_cmd_blocks; 7814 goto out_ipr_free_cmd_blocks;
7515 7815
7516 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 7816 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7517 sizeof(struct ipr_config_table), 7817 ioa_cfg->cfg_table_size,
7518 &ioa_cfg->cfg_table_dma); 7818 &ioa_cfg->cfg_table_dma);
7519 7819
7520 if (!ioa_cfg->cfg_table) 7820 if (!ioa_cfg->u.cfg_table)
7521 goto out_free_host_rrq; 7821 goto out_free_host_rrq;
7522 7822
7523 for (i = 0; i < IPR_NUM_HCAMS; i++) { 7823 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7551,8 +7851,9 @@ out_free_hostrcb_dma:
7551 ioa_cfg->hostrcb[i], 7851 ioa_cfg->hostrcb[i],
7552 ioa_cfg->hostrcb_dma[i]); 7852 ioa_cfg->hostrcb_dma[i]);
7553 } 7853 }
7554 pci_free_consistent(pdev, sizeof(struct ipr_config_table), 7854 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
7555 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma); 7855 ioa_cfg->u.cfg_table,
7856 ioa_cfg->cfg_table_dma);
7556out_free_host_rrq: 7857out_free_host_rrq:
7557 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 7858 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7558 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 7859 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
@@ -7633,9 +7934,19 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7633 ioa_cfg->cache_state = CACHE_DISABLED; 7934 ioa_cfg->cache_state = CACHE_DISABLED;
7634 7935
7635 ipr_initialize_bus_attr(ioa_cfg); 7936 ipr_initialize_bus_attr(ioa_cfg);
7937 ioa_cfg->max_devs_supported = ipr_max_devs;
7636 7938
7637 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 7939 if (ioa_cfg->sis64) {
7638 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 7940 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
7941 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
7942 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
7943 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
7944 } else {
7945 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7946 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7947 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
7948 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
7949 }
7639 host->max_channel = IPR_MAX_BUS_TO_SCAN; 7950 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7640 host->unique_id = host->host_no; 7951 host->unique_id = host->host_no;
7641 host->max_cmd_len = IPR_MAX_CDB_LEN; 7952 host->max_cmd_len = IPR_MAX_CDB_LEN;
@@ -7896,6 +8207,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7896 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8207 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7897 goto cleanup_nomem; 8208 goto cleanup_nomem;
7898 8209
8210 if (ioa_cfg->sis64)
8211 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8212 + ((sizeof(struct ipr_config_table_entry64)
8213 * ioa_cfg->max_devs_supported)));
8214 else
8215 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8216 + ((sizeof(struct ipr_config_table_entry)
8217 * ioa_cfg->max_devs_supported)));
8218
7899 rc = ipr_alloc_mem(ioa_cfg); 8219 rc = ipr_alloc_mem(ioa_cfg);
7900 if (rc < 0) { 8220 if (rc < 0) {
7901 dev_err(&pdev->dev, 8221 dev_err(&pdev->dev,