aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mpt2sas
diff options
context:
space:
mode:
authornagalakshmi.nandigama@lsi.com <nagalakshmi.nandigama@lsi.com>2011-09-07 20:48:50 -0400
committerJames Bottomley <JBottomley@Parallels.com>2011-09-22 07:08:35 -0400
commit911ae9434f83e7355d343f6c2be3ef5b00ea7aed (patch)
tree3129ff0724f0ae65a7e23ab1addba47fb5c4b634 /drivers/scsi/mpt2sas
parent66195fc9fad98e00abf2cd1a141bbcf0994daaf7 (diff)
[SCSI] mpt2sas: Added NUNA IO support in driver which uses multi-reply queue support of the HBA
Support added for controllers capable of multi reply queues. The following are the modifications to the driver to support NUMA. 1) Create the new structure adapter_reply_queue to contain the reply queue info for every msix vector. This object will contain a reply_post_host_index, reply_post_free for each instance, msix_index, among other parameters. We will track all the reply queues on a link list called ioc->reply_queue_list. Each reply queue is aligned with each IRQ, and is passed to the interrupt via the bus_id parameter. (2) The driver will figure out the msix_vector_count from the PCIe MSIX capabilities register instead of the IOC Facts->MaxMSIxVectors. This is because the firmware is not filling in this field until the driver has already registered MSIX support. (3) If the ioc_facts reports that the controller is MSIX compatible in the capabilities, then the driver will request for multiple irqs. This count is calculated based on the minimum between the online cpus available and the ioc->msix_vector_count. This count is reported to firmware in the ioc_init request. (4) New routines were added _base_free_irq and _base_request_irq, so registering and freeing msix vectors were done thru simple function API. (5) The new routine _base_assign_reply_queues was added to align the msix indexes across cpus. This will initialize the array called ioc->cpu_msix_table. This array is looked up on every MPI request so the MSIxIndex is set appropriately. (6) A new shost sysfs attribute was added to report the reply_queue_count. (7) User needs to set the affinity cpu mask, so the interrupts occur on the same cpu that sent the original request. Signed-off-by: Nagalakshmi Nandigama <nagalakshmi.nandigama@lsi.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/mpt2sas')
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c447
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h42
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c28
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
4 files changed, 401 insertions, 117 deletions
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 83035bd1c489..ef323e9a3e19 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -834,25 +834,31 @@ union reply_descriptor {
834static irqreturn_t 834static irqreturn_t
835_base_interrupt(int irq, void *bus_id) 835_base_interrupt(int irq, void *bus_id)
836{ 836{
837 struct adapter_reply_queue *reply_q = bus_id;
837 union reply_descriptor rd; 838 union reply_descriptor rd;
838 u32 completed_cmds; 839 u32 completed_cmds;
839 u8 request_desript_type; 840 u8 request_desript_type;
840 u16 smid; 841 u16 smid;
841 u8 cb_idx; 842 u8 cb_idx;
842 u32 reply; 843 u32 reply;
843 u8 msix_index; 844 u8 msix_index = reply_q->msix_index;
844 struct MPT2SAS_ADAPTER *ioc = bus_id; 845 struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
845 Mpi2ReplyDescriptorsUnion_t *rpf; 846 Mpi2ReplyDescriptorsUnion_t *rpf;
846 u8 rc; 847 u8 rc;
847 848
848 if (ioc->mask_interrupts) 849 if (ioc->mask_interrupts)
849 return IRQ_NONE; 850 return IRQ_NONE;
850 851
851 rpf = &ioc->reply_post_free[ioc->reply_post_host_index]; 852 if (!atomic_add_unless(&reply_q->busy, 1, 1))
853 return IRQ_NONE;
854
855 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
852 request_desript_type = rpf->Default.ReplyFlags 856 request_desript_type = rpf->Default.ReplyFlags
853 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 857 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
854 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 858 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
859 atomic_dec(&reply_q->busy);
855 return IRQ_NONE; 860 return IRQ_NONE;
861 }
856 862
857 completed_cmds = 0; 863 completed_cmds = 0;
858 cb_idx = 0xFF; 864 cb_idx = 0xFF;
@@ -861,9 +867,7 @@ _base_interrupt(int irq, void *bus_id)
861 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 867 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
862 goto out; 868 goto out;
863 reply = 0; 869 reply = 0;
864 cb_idx = 0xFF;
865 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 870 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
866 msix_index = rpf->Default.MSIxIndex;
867 if (request_desript_type == 871 if (request_desript_type ==
868 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 872 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
869 reply = le32_to_cpu 873 reply = le32_to_cpu
@@ -907,32 +911,86 @@ _base_interrupt(int irq, void *bus_id)
907 next: 911 next:
908 912
909 rpf->Words = cpu_to_le64(ULLONG_MAX); 913 rpf->Words = cpu_to_le64(ULLONG_MAX);
910 ioc->reply_post_host_index = (ioc->reply_post_host_index == 914 reply_q->reply_post_host_index =
915 (reply_q->reply_post_host_index ==
911 (ioc->reply_post_queue_depth - 1)) ? 0 : 916 (ioc->reply_post_queue_depth - 1)) ? 0 :
912 ioc->reply_post_host_index + 1; 917 reply_q->reply_post_host_index + 1;
913 request_desript_type = 918 request_desript_type =
914 ioc->reply_post_free[ioc->reply_post_host_index].Default. 919 reply_q->reply_post_free[reply_q->reply_post_host_index].
915 ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 920 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
916 completed_cmds++; 921 completed_cmds++;
917 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 922 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
918 goto out; 923 goto out;
919 if (!ioc->reply_post_host_index) 924 if (!reply_q->reply_post_host_index)
920 rpf = ioc->reply_post_free; 925 rpf = reply_q->reply_post_free;
921 else 926 else
922 rpf++; 927 rpf++;
923 } while (1); 928 } while (1);
924 929
925 out: 930 out:
926 931
927 if (!completed_cmds) 932 if (!completed_cmds) {
933 atomic_dec(&reply_q->busy);
928 return IRQ_NONE; 934 return IRQ_NONE;
929 935 }
930 wmb(); 936 wmb();
931 writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex); 937 if (ioc->is_warpdrive) {
938 writel(reply_q->reply_post_host_index,
939 ioc->reply_post_host_index[msix_index]);
940 atomic_dec(&reply_q->busy);
941 return IRQ_HANDLED;
942 }
943 writel(reply_q->reply_post_host_index | (msix_index <<
944 MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
945 atomic_dec(&reply_q->busy);
932 return IRQ_HANDLED; 946 return IRQ_HANDLED;
933} 947}
934 948
935/** 949/**
950 * _base_is_controller_msix_enabled - is controller support muli-reply queues
951 * @ioc: per adapter object
952 *
953 */
954static inline int
955_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
956{
957 return (ioc->facts.IOCCapabilities &
958 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
959}
960
961/**
962 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
963 * @ioc: per adapter object
964 * Context: ISR conext
965 *
966 * Called when a Task Management request has completed. We want
967 * to flush the other reply queues so all the outstanding IO has been
968 * completed back to OS before we process the TM completetion.
969 *
970 * Return nothing.
971 */
972void
973mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
974{
975 struct adapter_reply_queue *reply_q;
976
977 /* If MSIX capability is turned off
978 * then multi-queues are not enabled
979 */
980 if (!_base_is_controller_msix_enabled(ioc))
981 return;
982
983 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
984 if (ioc->shost_recovery)
985 return;
986 /* TMs are on msix_index == 0 */
987 if (reply_q->msix_index == 0)
988 continue;
989 _base_interrupt(reply_q->vector, (void *)reply_q);
990 }
991}
992
993/**
936 * mpt2sas_base_release_callback_handler - clear interrupt callback handler 994 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
937 * @cb_idx: callback index 995 * @cb_idx: callback index
938 * 996 *
@@ -1082,74 +1140,171 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1082} 1140}
1083 1141
1084/** 1142/**
1085 * _base_save_msix_table - backup msix vector table 1143 * _base_check_enable_msix - checks MSIX capabable.
1086 * @ioc: per adapter object 1144 * @ioc: per adapter object
1087 * 1145 *
1088 * This address an errata where diag reset clears out the table 1146 * Check to see if card is capable of MSIX, and set number
1147 * of available msix vectors
1089 */ 1148 */
1090static void 1149static int
1091_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc) 1150_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1092{ 1151{
1093 int i; 1152 int base;
1153 u16 message_control;
1094 1154
1095 if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
1096 return;
1097 1155
1098 for (i = 0; i < ioc->msix_vector_count; i++) 1156 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1099 ioc->msix_table_backup[i] = ioc->msix_table[i]; 1157 if (!base) {
1158 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1159 "supported\n", ioc->name));
1160 return -EINVAL;
1161 }
1162
1163 /* get msix vector count */
1164 /* NUMA_IO not supported for older controllers */
1165 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1166 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1167 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1168 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1169 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1170 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1171 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1172 ioc->msix_vector_count = 1;
1173 else {
1174 pci_read_config_word(ioc->pdev, base + 2, &message_control);
1175 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1176 }
1177 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1178 "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1179
1180 return 0;
1100} 1181}
1101 1182
1102/** 1183/**
1103 * _base_restore_msix_table - this restores the msix vector table 1184 * _base_free_irq - free irq
1104 * @ioc: per adapter object 1185 * @ioc: per adapter object
1105 * 1186 *
1187 * Freeing respective reply_queue from the list.
1106 */ 1188 */
1107static void 1189static void
1108_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc) 1190_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1109{ 1191{
1110 int i; 1192 struct adapter_reply_queue *reply_q, *next;
1111 1193
1112 if (!ioc->msix_enable || ioc->msix_table_backup == NULL) 1194 if (list_empty(&ioc->reply_queue_list))
1113 return; 1195 return;
1114 1196
1115 for (i = 0; i < ioc->msix_vector_count; i++) 1197 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1116 ioc->msix_table[i] = ioc->msix_table_backup[i]; 1198 list_del(&reply_q->list);
1199 synchronize_irq(reply_q->vector);
1200 free_irq(reply_q->vector, reply_q);
1201 kfree(reply_q);
1202 }
1117} 1203}
1118 1204
1119/** 1205/**
1120 * _base_check_enable_msix - checks MSIX capabable. 1206 * _base_request_irq - request irq
1121 * @ioc: per adapter object 1207 * @ioc: per adapter object
1208 * @index: msix index into vector table
1209 * @vector: irq vector
1122 * 1210 *
1123 * Check to see if card is capable of MSIX, and set number 1211 * Inserting respective reply_queue into the list.
1124 * of available msix vectors
1125 */ 1212 */
1126static int 1213static int
1127_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) 1214_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1128{ 1215{
1129 int base; 1216 struct adapter_reply_queue *reply_q;
1130 u16 message_control; 1217 int r;
1131 u32 msix_table_offset;
1132 1218
1133 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 1219 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1134 if (!base) { 1220 if (!reply_q) {
1135 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " 1221 printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1136 "supported\n", ioc->name)); 1222 ioc->name, (int)sizeof(struct adapter_reply_queue));
1137 return -EINVAL; 1223 return -ENOMEM;
1224 }
1225 reply_q->ioc = ioc;
1226 reply_q->msix_index = index;
1227 reply_q->vector = vector;
1228 atomic_set(&reply_q->busy, 0);
1229 if (ioc->msix_enable)
1230 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1231 MPT2SAS_DRIVER_NAME, ioc->id, index);
1232 else
1233 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1234 MPT2SAS_DRIVER_NAME, ioc->id);
1235 r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1236 reply_q);
1237 if (r) {
1238 printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1239 reply_q->name, vector);
1240 kfree(reply_q);
1241 return -EBUSY;
1138 } 1242 }
1139 1243
1140 /* get msix vector count */ 1244 INIT_LIST_HEAD(&reply_q->list);
1141 pci_read_config_word(ioc->pdev, base + 2, &message_control); 1245 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1142 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 1246 return 0;
1247}
1143 1248
1144 /* get msix table */ 1249/**
1145 pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset); 1250 * _base_assign_reply_queues - assigning msix index for each cpu
1146 msix_table_offset &= 0xFFFFFFF8; 1251 * @ioc: per adapter object
1147 ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset); 1252 *
1253 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1254 *
1255 * It would nice if we could call irq_set_affinity, however it is not
1256 * an exported symbol
1257 */
1258static void
1259_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1260{
1261 struct adapter_reply_queue *reply_q;
1262 int cpu_id;
1263 int cpu_grouping, loop, grouping, grouping_mod;
1148 1264
1149 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " 1265 if (!_base_is_controller_msix_enabled(ioc))
1150 "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name, 1266 return;
1151 ioc->msix_vector_count, msix_table_offset, ioc->msix_table)); 1267
1152 return 0; 1268 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1269 /* when there are more cpus than available msix vectors,
1270 * then group cpus togeather on same irq
1271 */
1272 if (ioc->cpu_count > ioc->msix_vector_count) {
1273 grouping = ioc->cpu_count / ioc->msix_vector_count;
1274 grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1275 if (grouping < 2 || (grouping == 2 && !grouping_mod))
1276 cpu_grouping = 2;
1277 else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1278 cpu_grouping = 4;
1279 else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1280 cpu_grouping = 8;
1281 else
1282 cpu_grouping = 16;
1283 } else
1284 cpu_grouping = 0;
1285
1286 loop = 0;
1287 reply_q = list_entry(ioc->reply_queue_list.next,
1288 struct adapter_reply_queue, list);
1289 for_each_online_cpu(cpu_id) {
1290 if (!cpu_grouping) {
1291 ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1292 reply_q = list_entry(reply_q->list.next,
1293 struct adapter_reply_queue, list);
1294 } else {
1295 if (loop < cpu_grouping) {
1296 ioc->cpu_msix_table[cpu_id] =
1297 reply_q->msix_index;
1298 loop++;
1299 } else {
1300 reply_q = list_entry(reply_q->list.next,
1301 struct adapter_reply_queue, list);
1302 ioc->cpu_msix_table[cpu_id] =
1303 reply_q->msix_index;
1304 loop = 1;
1305 }
1306 }
1307 }
1153} 1308}
1154 1309
1155/** 1310/**
@@ -1162,8 +1317,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1162{ 1317{
1163 if (ioc->msix_enable) { 1318 if (ioc->msix_enable) {
1164 pci_disable_msix(ioc->pdev); 1319 pci_disable_msix(ioc->pdev);
1165 kfree(ioc->msix_table_backup);
1166 ioc->msix_table_backup = NULL;
1167 ioc->msix_enable = 0; 1320 ioc->msix_enable = 0;
1168 } 1321 }
1169} 1322}
@@ -1176,10 +1329,13 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1176static int 1329static int
1177_base_enable_msix(struct MPT2SAS_ADAPTER *ioc) 1330_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1178{ 1331{
1179 struct msix_entry entries; 1332 struct msix_entry *entries, *a;
1180 int r; 1333 int r;
1334 int i;
1181 u8 try_msix = 0; 1335 u8 try_msix = 0;
1182 1336
1337 INIT_LIST_HEAD(&ioc->reply_queue_list);
1338
1183 if (msix_disable == -1 || msix_disable == 0) 1339 if (msix_disable == -1 || msix_disable == 0)
1184 try_msix = 1; 1340 try_msix = 1;
1185 1341
@@ -1189,51 +1345,48 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1189 if (_base_check_enable_msix(ioc) != 0) 1345 if (_base_check_enable_msix(ioc) != 0)
1190 goto try_ioapic; 1346 goto try_ioapic;
1191 1347
1192 ioc->msix_table_backup = kcalloc(ioc->msix_vector_count, 1348 ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
1193 sizeof(u32), GFP_KERNEL); 1349 ioc->msix_vector_count);
1194 if (!ioc->msix_table_backup) { 1350
1195 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " 1351 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1196 "msix_table_backup failed!!!\n", ioc->name)); 1352 GFP_KERNEL);
1353 if (!entries) {
1354 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1355 "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1356 __LINE__, __func__));
1197 goto try_ioapic; 1357 goto try_ioapic;
1198 } 1358 }
1199 1359
1200 memset(&entries, 0, sizeof(struct msix_entry)); 1360 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1201 r = pci_enable_msix(ioc->pdev, &entries, 1); 1361 a->entry = i;
1362
1363 r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1202 if (r) { 1364 if (r) {
1203 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix " 1365 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1204 "failed (r=%d) !!!\n", ioc->name, r)); 1366 "failed (r=%d) !!!\n", ioc->name, r));
1367 kfree(entries);
1205 goto try_ioapic; 1368 goto try_ioapic;
1206 } 1369 }
1207 1370
1208 r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED, 1371 ioc->msix_enable = 1;
1209 ioc->name, ioc); 1372 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1210 if (r) { 1373 r = _base_request_irq(ioc, i, a->vector);
1211 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate " 1374 if (r) {
1212 "interrupt %d !!!\n", ioc->name, entries.vector)); 1375 _base_free_irq(ioc);
1213 pci_disable_msix(ioc->pdev); 1376 _base_disable_msix(ioc);
1214 goto try_ioapic; 1377 kfree(entries);
1378 goto try_ioapic;
1379 }
1215 } 1380 }
1216 1381
1217 ioc->pci_irq = entries.vector; 1382 kfree(entries);
1218 ioc->msix_enable = 1;
1219 return 0; 1383 return 0;
1220 1384
1221/* failback to io_apic interrupt routing */ 1385/* failback to io_apic interrupt routing */
1222 try_ioapic: 1386 try_ioapic:
1223 1387
1224 r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED, 1388 r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1225 ioc->name, ioc);
1226 if (r) {
1227 printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1228 ioc->name, ioc->pdev->irq);
1229 r = -EBUSY;
1230 goto out_fail;
1231 }
1232 1389
1233 ioc->pci_irq = ioc->pdev->irq;
1234 return 0;
1235
1236 out_fail:
1237 return r; 1390 return r;
1238} 1391}
1239 1392
@@ -1252,6 +1405,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1252 int i, r = 0; 1405 int i, r = 0;
1253 u64 pio_chip = 0; 1406 u64 pio_chip = 0;
1254 u64 chip_phys = 0; 1407 u64 chip_phys = 0;
1408 struct adapter_reply_queue *reply_q;
1255 1409
1256 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", 1410 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1257 ioc->name, __func__)); 1411 ioc->name, __func__));
@@ -1314,9 +1468,11 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1314 if (r) 1468 if (r)
1315 goto out_fail; 1469 goto out_fail;
1316 1470
1317 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", 1471 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1318 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 1472 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1319 "IO-APIC enabled"), ioc->pci_irq); 1473 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1474 "IO-APIC enabled"), reply_q->vector);
1475
1320 printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 1476 printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1321 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); 1477 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1322 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", 1478 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
@@ -1331,7 +1487,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1331 if (ioc->chip_phys) 1487 if (ioc->chip_phys)
1332 iounmap(ioc->chip); 1488 iounmap(ioc->chip);
1333 ioc->chip_phys = 0; 1489 ioc->chip_phys = 0;
1334 ioc->pci_irq = -1;
1335 pci_release_selected_regions(ioc->pdev, ioc->bars); 1490 pci_release_selected_regions(ioc->pdev, ioc->bars);
1336 pci_disable_pcie_error_reporting(pdev); 1491 pci_disable_pcie_error_reporting(pdev);
1337 pci_disable_device(pdev); 1492 pci_disable_device(pdev);
@@ -1578,6 +1733,12 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1578} 1733}
1579#endif 1734#endif
1580 1735
1736static inline u8
1737_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1738{
1739 return ioc->cpu_msix_table[smp_processor_id()];
1740}
1741
1581/** 1742/**
1582 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware 1743 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1583 * @ioc: per adapter object 1744 * @ioc: per adapter object
@@ -1594,7 +1755,7 @@ mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1594 1755
1595 1756
1596 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1757 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1597 descriptor.SCSIIO.MSIxIndex = 0; /* TODO */ 1758 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
1598 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 1759 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1599 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 1760 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1600 descriptor.SCSIIO.LMID = 0; 1761 descriptor.SCSIIO.LMID = 0;
@@ -1618,7 +1779,7 @@ mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1618 1779
1619 descriptor.HighPriority.RequestFlags = 1780 descriptor.HighPriority.RequestFlags =
1620 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1781 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1621 descriptor.HighPriority.MSIxIndex = 0; /* TODO */ 1782 descriptor.HighPriority.MSIxIndex = 0;
1622 descriptor.HighPriority.SMID = cpu_to_le16(smid); 1783 descriptor.HighPriority.SMID = cpu_to_le16(smid);
1623 descriptor.HighPriority.LMID = 0; 1784 descriptor.HighPriority.LMID = 0;
1624 descriptor.HighPriority.Reserved1 = 0; 1785 descriptor.HighPriority.Reserved1 = 0;
@@ -1640,7 +1801,7 @@ mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1640 u64 *request = (u64 *)&descriptor; 1801 u64 *request = (u64 *)&descriptor;
1641 1802
1642 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1803 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1643 descriptor.Default.MSIxIndex = 0; /* TODO */ 1804 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
1644 descriptor.Default.SMID = cpu_to_le16(smid); 1805 descriptor.Default.SMID = cpu_to_le16(smid);
1645 descriptor.Default.LMID = 0; 1806 descriptor.Default.LMID = 0;
1646 descriptor.Default.DescriptorTypeDependent = 0; 1807 descriptor.Default.DescriptorTypeDependent = 0;
@@ -1665,7 +1826,7 @@ mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1665 1826
1666 descriptor.SCSITarget.RequestFlags = 1827 descriptor.SCSITarget.RequestFlags =
1667 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; 1828 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1668 descriptor.SCSITarget.MSIxIndex = 0; /* TODO */ 1829 descriptor.SCSITarget.MSIxIndex = _base_get_msix_index(ioc);
1669 descriptor.SCSITarget.SMID = cpu_to_le16(smid); 1830 descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1670 descriptor.SCSITarget.LMID = 0; 1831 descriptor.SCSITarget.LMID = 0;
1671 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index); 1832 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
@@ -2172,7 +2333,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2172 u16 max_sge_elements; 2333 u16 max_sge_elements;
2173 u16 num_of_reply_frames; 2334 u16 num_of_reply_frames;
2174 u16 chains_needed_per_io; 2335 u16 chains_needed_per_io;
2175 u32 sz, total_sz; 2336 u32 sz, total_sz, reply_post_free_sz;
2176 u32 retry_sz; 2337 u32 retry_sz;
2177 u16 max_request_credit; 2338 u16 max_request_credit;
2178 int i; 2339 int i;
@@ -2499,7 +2660,12 @@ chain_done:
2499 total_sz += sz; 2660 total_sz += sz;
2500 2661
2501 /* reply post queue, 16 byte align */ 2662 /* reply post queue, 16 byte align */
2502 sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t); 2663 reply_post_free_sz = ioc->reply_post_queue_depth *
2664 sizeof(Mpi2DefaultReplyDescriptor_t);
2665 if (_base_is_controller_msix_enabled(ioc))
2666 sz = reply_post_free_sz * ioc->reply_queue_count;
2667 else
2668 sz = reply_post_free_sz;
2503 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", 2669 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2504 ioc->pdev, sz, 16, 0); 2670 ioc->pdev, sz, 16, 0);
2505 if (!ioc->reply_post_free_dma_pool) { 2671 if (!ioc->reply_post_free_dma_pool) {
@@ -3187,6 +3353,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3187 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 3353 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3188 facts->WhoInit = mpi_reply.WhoInit; 3354 facts->WhoInit = mpi_reply.WhoInit;
3189 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 3355 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3356 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3190 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 3357 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3191 facts->MaxReplyDescriptorPostQueueDepth = 3358 facts->MaxReplyDescriptorPostQueueDepth =
3192 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 3359 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
@@ -3244,7 +3411,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3244 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 3411 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3245 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 3412 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3246 3413
3247 3414 if (_base_is_controller_msix_enabled(ioc))
3415 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3248 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 3416 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3249 mpi_request.ReplyDescriptorPostQueueDepth = 3417 mpi_request.ReplyDescriptorPostQueueDepth =
3250 cpu_to_le16(ioc->reply_post_queue_depth); 3418 cpu_to_le16(ioc->reply_post_queue_depth);
@@ -3513,9 +3681,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3513 u32 hcb_size; 3681 u32 hcb_size;
3514 3682
3515 printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name); 3683 printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3516
3517 _base_save_msix_table(ioc);
3518
3519 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n", 3684 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3520 ioc->name)); 3685 ioc->name));
3521 3686
@@ -3611,7 +3776,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3611 goto out; 3776 goto out;
3612 } 3777 }
3613 3778
3614 _base_restore_msix_table(ioc);
3615 printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name); 3779 printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
3616 return 0; 3780 return 0;
3617 3781
@@ -3692,6 +3856,9 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3692 u16 smid; 3856 u16 smid;
3693 struct _tr_list *delayed_tr, *delayed_tr_next; 3857 struct _tr_list *delayed_tr, *delayed_tr_next;
3694 u8 hide_flag; 3858 u8 hide_flag;
3859 struct adapter_reply_queue *reply_q;
3860 long reply_post_free;
3861 u32 reply_post_free_sz;
3695 3862
3696 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3863 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3697 __func__)); 3864 __func__));
@@ -3757,19 +3924,43 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3757 ioc->reply_sz) 3924 ioc->reply_sz)
3758 ioc->reply_free[i] = cpu_to_le32(reply_address); 3925 ioc->reply_free[i] = cpu_to_le32(reply_address);
3759 3926
3927 /* initialize reply queues */
3928 _base_assign_reply_queues(ioc);
3929
3760 /* initialize Reply Post Free Queue */ 3930 /* initialize Reply Post Free Queue */
3761 for (i = 0; i < ioc->reply_post_queue_depth; i++) 3931 reply_post_free = (long)ioc->reply_post_free;
3762 ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX); 3932 reply_post_free_sz = ioc->reply_post_queue_depth *
3933 sizeof(Mpi2DefaultReplyDescriptor_t);
3934 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3935 reply_q->reply_post_host_index = 0;
3936 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
3937 reply_post_free;
3938 for (i = 0; i < ioc->reply_post_queue_depth; i++)
3939 reply_q->reply_post_free[i].Words =
3940 cpu_to_le64(ULLONG_MAX);
3941 if (!_base_is_controller_msix_enabled(ioc))
3942 goto skip_init_reply_post_free_queue;
3943 reply_post_free += reply_post_free_sz;
3944 }
3945 skip_init_reply_post_free_queue:
3763 3946
3764 r = _base_send_ioc_init(ioc, sleep_flag); 3947 r = _base_send_ioc_init(ioc, sleep_flag);
3765 if (r) 3948 if (r)
3766 return r; 3949 return r;
3767 3950
3768 /* initialize the index's */ 3951 /* initialize reply free host index */
3769 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 3952 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
3770 ioc->reply_post_host_index = 0;
3771 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 3953 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
3772 writel(0, &ioc->chip->ReplyPostHostIndex); 3954
3955 /* initialize reply post host index */
3956 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3957 writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
3958 &ioc->chip->ReplyPostHostIndex);
3959 if (!_base_is_controller_msix_enabled(ioc))
3960 goto skip_init_reply_post_host_index;
3961 }
3962
3963 skip_init_reply_post_host_index:
3773 3964
3774 _base_unmask_interrupts(ioc); 3965 _base_unmask_interrupts(ioc);
3775 r = _base_event_notification(ioc, sleep_flag); 3966 r = _base_event_notification(ioc, sleep_flag);
@@ -3820,14 +4011,10 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3820 ioc->shost_recovery = 1; 4011 ioc->shost_recovery = 1;
3821 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 4012 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3822 ioc->shost_recovery = 0; 4013 ioc->shost_recovery = 0;
3823 if (ioc->pci_irq) { 4014 _base_free_irq(ioc);
3824 synchronize_irq(pdev->irq);
3825 free_irq(ioc->pci_irq, ioc);
3826 }
3827 _base_disable_msix(ioc); 4015 _base_disable_msix(ioc);
3828 if (ioc->chip_phys) 4016 if (ioc->chip_phys)
3829 iounmap(ioc->chip); 4017 iounmap(ioc->chip);
3830 ioc->pci_irq = -1;
3831 ioc->chip_phys = 0; 4018 ioc->chip_phys = 0;
3832 pci_release_selected_regions(ioc->pdev, ioc->bars); 4019 pci_release_selected_regions(ioc->pdev, ioc->bars);
3833 pci_disable_pcie_error_reporting(pdev); 4020 pci_disable_pcie_error_reporting(pdev);
@@ -3845,14 +4032,50 @@ int
3845mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) 4032mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3846{ 4033{
3847 int r, i; 4034 int r, i;
4035 int cpu_id, last_cpu_id = 0;
3848 4036
3849 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 4037 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3850 __func__)); 4038 __func__));
3851 4039
4040 /* setup cpu_msix_table */
4041 ioc->cpu_count = num_online_cpus();
4042 for_each_online_cpu(cpu_id)
4043 last_cpu_id = cpu_id;
4044 ioc->cpu_msix_table_sz = last_cpu_id + 1;
4045 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4046 ioc->reply_queue_count = 1;
4047 if (!ioc->cpu_msix_table) {
4048 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
4049 "cpu_msix_table failed!!!\n", ioc->name));
4050 r = -ENOMEM;
4051 goto out_free_resources;
4052 }
4053
4054 if (ioc->is_warpdrive) {
4055 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
4056 sizeof(resource_size_t *), GFP_KERNEL);
4057 if (!ioc->reply_post_host_index) {
4058 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
4059 "for cpu_msix_table failed!!!\n", ioc->name));
4060 r = -ENOMEM;
4061 goto out_free_resources;
4062 }
4063 }
4064
3852 r = mpt2sas_base_map_resources(ioc); 4065 r = mpt2sas_base_map_resources(ioc);
3853 if (r) 4066 if (r)
3854 return r; 4067 return r;
3855 4068
4069 if (ioc->is_warpdrive) {
4070 ioc->reply_post_host_index[0] =
4071 (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
4072
4073 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
4074 ioc->reply_post_host_index[i] = (resource_size_t *)
4075 ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
4076 * 4)));
4077 }
4078
3856 pci_set_drvdata(ioc->pdev, ioc->shost); 4079 pci_set_drvdata(ioc->pdev, ioc->shost);
3857 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 4080 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
3858 if (r) 4081 if (r)
@@ -3973,6 +4196,9 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3973 mpt2sas_base_free_resources(ioc); 4196 mpt2sas_base_free_resources(ioc);
3974 _base_release_memory_pools(ioc); 4197 _base_release_memory_pools(ioc);
3975 pci_set_drvdata(ioc->pdev, NULL); 4198 pci_set_drvdata(ioc->pdev, NULL);
4199 kfree(ioc->cpu_msix_table);
4200 if (ioc->is_warpdrive)
4201 kfree(ioc->reply_post_host_index);
3976 kfree(ioc->pd_handles); 4202 kfree(ioc->pd_handles);
3977 kfree(ioc->tm_cmds.reply); 4203 kfree(ioc->tm_cmds.reply);
3978 kfree(ioc->transport_cmds.reply); 4204 kfree(ioc->transport_cmds.reply);
@@ -4010,6 +4236,9 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4010 mpt2sas_base_free_resources(ioc); 4236 mpt2sas_base_free_resources(ioc);
4011 _base_release_memory_pools(ioc); 4237 _base_release_memory_pools(ioc);
4012 pci_set_drvdata(ioc->pdev, NULL); 4238 pci_set_drvdata(ioc->pdev, NULL);
4239 kfree(ioc->cpu_msix_table);
4240 if (ioc->is_warpdrive)
4241 kfree(ioc->reply_post_host_index);
4013 kfree(ioc->pd_handles); 4242 kfree(ioc->pd_handles);
4014 kfree(ioc->pfacts); 4243 kfree(ioc->pfacts);
4015 kfree(ioc->ctl_cmds.reply); 4244 kfree(ioc->ctl_cmds.reply);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 8d5be2120c63..051da4989d84 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -544,6 +544,28 @@ struct _tr_list {
544 544
545typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); 545typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
546 546
547/**
548 * struct adapter_reply_queue - the reply queue struct
549 * @ioc: per adapter object
550 * @msix_index: msix index into vector table
551 * @vector: irq vector
552 * @reply_post_host_index: head index in the pool where FW completes IO
553 * @reply_post_free: reply post base virt address
554 * @name: the name registered to request_irq()
555 * @busy: isr is actively processing replies on another cpu
556 * @list: this list
557*/
558struct adapter_reply_queue {
559 struct MPT2SAS_ADAPTER *ioc;
560 u8 msix_index;
561 unsigned int vector;
562 u32 reply_post_host_index;
563 Mpi2ReplyDescriptorsUnion_t *reply_post_free;
564 char name[MPT_NAME_LENGTH];
565 atomic_t busy;
566 struct list_head list;
567};
568
547/* IOC Facts and Port Facts converted from little endian to cpu */ 569/* IOC Facts and Port Facts converted from little endian to cpu */
548union mpi2_version_union { 570union mpi2_version_union {
549 MPI2_VERSION_STRUCT Struct; 571 MPI2_VERSION_STRUCT Struct;
@@ -606,7 +628,7 @@ enum mutex_type {
606 * @list: ioc_list 628 * @list: ioc_list
607 * @shost: shost object 629 * @shost: shost object
608 * @id: unique adapter id 630 * @id: unique adapter id
609 * @pci_irq: irq number 631 * @cpu_count: number online cpus
610 * @name: generic ioc string 632 * @name: generic ioc string
611 * @tmp_string: tmp string used for logging 633 * @tmp_string: tmp string used for logging
612 * @pdev: pci pdev object 634 * @pdev: pci pdev object
@@ -636,8 +658,8 @@ enum mutex_type {
636 * @wait_for_port_enable_to_complete: 658 * @wait_for_port_enable_to_complete:
637 * @msix_enable: flag indicating msix is enabled 659 * @msix_enable: flag indicating msix is enabled
638 * @msix_vector_count: number msix vectors 660 * @msix_vector_count: number msix vectors
639 * @msix_table: virt address to the msix table 661 * @cpu_msix_table: table for mapping cpus to msix index
640 * @msix_table_backup: backup msix table 662 * @cpu_msix_table_sz: table size
641 * @scsi_io_cb_idx: shost generated commands 663 * @scsi_io_cb_idx: shost generated commands
642 * @tm_cb_idx: task management commands 664 * @tm_cb_idx: task management commands
643 * @scsih_cb_idx: scsih internal commands 665 * @scsih_cb_idx: scsih internal commands
@@ -728,7 +750,8 @@ enum mutex_type {
728 * @reply_post_queue_depth: reply post queue depth 750 * @reply_post_queue_depth: reply post queue depth
729 * @reply_post_free: pool for reply post (64bit descriptor) 751 * @reply_post_free: pool for reply post (64bit descriptor)
730 * @reply_post_free_dma: 752 * @reply_post_free_dma:
731 * @reply_post_free_dma_pool: 753 * @reply_queue_count: number of reply queue's
754 * @reply_queue_list: link list contaning the reply queue info
732 * @reply_post_host_index: head index in the pool where FW completes IO 755 * @reply_post_host_index: head index in the pool where FW completes IO
733 * @delayed_tr_list: target reset link list 756 * @delayed_tr_list: target reset link list
734 * @delayed_tr_volume_list: volume target reset link list 757 * @delayed_tr_volume_list: volume target reset link list
@@ -737,7 +760,7 @@ struct MPT2SAS_ADAPTER {
737 struct list_head list; 760 struct list_head list;
738 struct Scsi_Host *shost; 761 struct Scsi_Host *shost;
739 u8 id; 762 u8 id;
740 u32 pci_irq; 763 int cpu_count;
741 char name[MPT_NAME_LENGTH]; 764 char name[MPT_NAME_LENGTH];
742 char tmp_string[MPT_STRING_LENGTH]; 765 char tmp_string[MPT_STRING_LENGTH];
743 struct pci_dev *pdev; 766 struct pci_dev *pdev;
@@ -779,8 +802,9 @@ struct MPT2SAS_ADAPTER {
779 802
780 u8 msix_enable; 803 u8 msix_enable;
781 u16 msix_vector_count; 804 u16 msix_vector_count;
782 u32 *msix_table; 805 u8 *cpu_msix_table;
783 u32 *msix_table_backup; 806 resource_size_t **reply_post_host_index;
807 u16 cpu_msix_table_sz;
784 u32 ioc_reset_count; 808 u32 ioc_reset_count;
785 809
786 /* internal commands, callback index */ 810 /* internal commands, callback index */
@@ -911,7 +935,8 @@ struct MPT2SAS_ADAPTER {
911 Mpi2ReplyDescriptorsUnion_t *reply_post_free; 935 Mpi2ReplyDescriptorsUnion_t *reply_post_free;
912 dma_addr_t reply_post_free_dma; 936 dma_addr_t reply_post_free_dma;
913 struct dma_pool *reply_post_free_dma_pool; 937 struct dma_pool *reply_post_free_dma_pool;
914 u32 reply_post_host_index; 938 u8 reply_queue_count;
939 struct list_head reply_queue_list;
915 940
916 struct list_head delayed_tr_list; 941 struct list_head delayed_tr_list;
917 struct list_head delayed_tr_volume_list; 942 struct list_head delayed_tr_volume_list;
@@ -955,6 +980,7 @@ void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
955void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); 980void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
956__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, 981__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
957 u16 smid); 982 u16 smid);
983void mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc);
958 984
959/* hi-priority queue */ 985/* hi-priority queue */
960u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx); 986u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 38ed0260959d..bf70f95f19ce 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -2705,6 +2705,33 @@ _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2705static DEVICE_ATTR(ioc_reset_count, S_IRUGO, 2705static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
2706 _ctl_ioc_reset_count_show, NULL); 2706 _ctl_ioc_reset_count_show, NULL);
2707 2707
2708/**
2709 * _ctl_ioc_reply_queue_count_show - number of reply queues
2710 * @cdev - pointer to embedded class device
2711 * @buf - the buffer returned
2712 *
2713 * This is number of reply queues
2714 *
2715 * A sysfs 'read-only' shost attribute.
2716 */
2717static ssize_t
2718_ctl_ioc_reply_queue_count_show(struct device *cdev,
2719 struct device_attribute *attr, char *buf)
2720{
2721 u8 reply_queue_count;
2722 struct Scsi_Host *shost = class_to_shost(cdev);
2723 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2724
2725 if ((ioc->facts.IOCCapabilities &
2726 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
2727 reply_queue_count = ioc->reply_queue_count;
2728 else
2729 reply_queue_count = 1;
2730 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
2731}
2732static DEVICE_ATTR(reply_queue_count, S_IRUGO,
2733 _ctl_ioc_reply_queue_count_show, NULL);
2734
2708struct DIAG_BUFFER_START { 2735struct DIAG_BUFFER_START {
2709 __le32 Size; 2736 __le32 Size;
2710 __le32 DiagVersion; 2737 __le32 DiagVersion;
@@ -2915,6 +2942,7 @@ struct device_attribute *mpt2sas_host_attrs[] = {
2915 &dev_attr_host_trace_buffer_size, 2942 &dev_attr_host_trace_buffer_size,
2916 &dev_attr_host_trace_buffer, 2943 &dev_attr_host_trace_buffer,
2917 &dev_attr_host_trace_buffer_enable, 2944 &dev_attr_host_trace_buffer_enable,
2945 &dev_attr_reply_queue_count,
2918 NULL, 2946 NULL,
2919}; 2947};
2920 2948
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 6bb3986094d3..7c762b9dda54 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2162,6 +2162,7 @@ _scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2162 return 1; 2162 return 1;
2163 if (ioc->tm_cmds.smid != smid) 2163 if (ioc->tm_cmds.smid != smid)
2164 return 1; 2164 return 1;
2165 mpt2sas_base_flush_reply_queues(ioc);
2165 ioc->tm_cmds.status |= MPT2_CMD_COMPLETE; 2166 ioc->tm_cmds.status |= MPT2_CMD_COMPLETE;
2166 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 2167 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
2167 if (mpi_reply) { 2168 if (mpi_reply) {