aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/stex.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/stex.c')
-rw-r--r--drivers/scsi/stex.c288
1 files changed, 196 insertions, 92 deletions
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 09fa8861fc58..9c73dbda3bbb 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/slab.h>
20#include <linux/time.h> 21#include <linux/time.h>
21#include <linux/pci.h> 22#include <linux/pci.h>
22#include <linux/blkdev.h> 23#include <linux/blkdev.h>
@@ -36,11 +37,11 @@
36#include <scsi/scsi_eh.h> 37#include <scsi/scsi_eh.h>
37 38
38#define DRV_NAME "stex" 39#define DRV_NAME "stex"
39#define ST_DRIVER_VERSION "4.6.0000.3" 40#define ST_DRIVER_VERSION "4.6.0000.4"
40#define ST_VER_MAJOR 4 41#define ST_VER_MAJOR 4
41#define ST_VER_MINOR 6 42#define ST_VER_MINOR 6
42#define ST_OEM 0 43#define ST_OEM 0
43#define ST_BUILD_VER 3 44#define ST_BUILD_VER 4
44 45
45enum { 46enum {
46 /* MU register offset */ 47 /* MU register offset */
@@ -64,24 +65,24 @@ enum {
64 YH2I_REQ_HI = 0xc4, 65 YH2I_REQ_HI = 0xc4,
65 66
66 /* MU register value */ 67 /* MU register value */
67 MU_INBOUND_DOORBELL_HANDSHAKE = 1, 68 MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0),
68 MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, 69 MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1),
69 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4, 70 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2),
70 MU_INBOUND_DOORBELL_HMUSTOPPED = 8, 71 MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3),
71 MU_INBOUND_DOORBELL_RESET = 16, 72 MU_INBOUND_DOORBELL_RESET = (1 << 4),
72 73
73 MU_OUTBOUND_DOORBELL_HANDSHAKE = 1, 74 MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0),
74 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2, 75 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1),
75 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4, 76 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2),
76 MU_OUTBOUND_DOORBELL_BUSCHANGE = 8, 77 MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3),
77 MU_OUTBOUND_DOORBELL_HASEVENT = 16, 78 MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4),
79 MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27),
78 80
79 /* MU status code */ 81 /* MU status code */
80 MU_STATE_STARTING = 1, 82 MU_STATE_STARTING = 1,
81 MU_STATE_FMU_READY_FOR_HANDSHAKE = 2, 83 MU_STATE_STARTED = 2,
82 MU_STATE_SEND_HANDSHAKE_FRAME = 3, 84 MU_STATE_RESETTING = 3,
83 MU_STATE_STARTED = 4, 85 MU_STATE_FAILED = 4,
84 MU_STATE_RESETTING = 5,
85 86
86 MU_MAX_DELAY = 120, 87 MU_MAX_DELAY = 120,
87 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, 88 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
@@ -111,6 +112,8 @@ enum {
111 112
112 SS_H2I_INT_RESET = 0x100, 113 SS_H2I_INT_RESET = 0x100,
113 114
115 SS_I2H_REQUEST_RESET = 0x2000,
116
114 SS_MU_OPERATIONAL = 0x80000000, 117 SS_MU_OPERATIONAL = 0x80000000,
115 118
116 STEX_CDB_LENGTH = 16, 119 STEX_CDB_LENGTH = 16,
@@ -160,6 +163,7 @@ enum {
160 INQUIRY_EVPD = 0x01, 163 INQUIRY_EVPD = 0x01,
161 164
162 ST_ADDITIONAL_MEM = 0x200000, 165 ST_ADDITIONAL_MEM = 0x200000,
166 ST_ADDITIONAL_MEM_MIN = 0x80000,
163}; 167};
164 168
165struct st_sgitem { 169struct st_sgitem {
@@ -311,6 +315,10 @@ struct st_hba {
311 struct st_ccb *wait_ccb; 315 struct st_ccb *wait_ccb;
312 __le32 *scratch; 316 __le32 *scratch;
313 317
318 char work_q_name[20];
319 struct workqueue_struct *work_q;
320 struct work_struct reset_work;
321 wait_queue_head_t reset_waitq;
314 unsigned int mu_status; 322 unsigned int mu_status;
315 unsigned int cardtype; 323 unsigned int cardtype;
316 int msi_enabled; 324 int msi_enabled;
@@ -577,6 +585,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
577 lun = cmd->device->lun; 585 lun = cmd->device->lun;
578 hba = (struct st_hba *) &host->hostdata[0]; 586 hba = (struct st_hba *) &host->hostdata[0];
579 587
588 if (unlikely(hba->mu_status == MU_STATE_RESETTING))
589 return SCSI_MLQUEUE_HOST_BUSY;
590
580 switch (cmd->cmnd[0]) { 591 switch (cmd->cmnd[0]) {
581 case MODE_SENSE_10: 592 case MODE_SENSE_10:
582 { 593 {
@@ -613,6 +624,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
613 } 624 }
614 break; 625 break;
615 case INQUIRY: 626 case INQUIRY:
627 if (lun >= host->max_lun) {
628 cmd->result = DID_NO_CONNECT << 16;
629 done(cmd);
630 return 0;
631 }
616 if (id != host->max_id - 1) 632 if (id != host->max_id - 1)
617 break; 633 break;
618 if (!lun && !cmd->device->channel && 634 if (!lun && !cmd->device->channel &&
@@ -841,7 +857,6 @@ static irqreturn_t stex_intr(int irq, void *__hba)
841 void __iomem *base = hba->mmio_base; 857 void __iomem *base = hba->mmio_base;
842 u32 data; 858 u32 data;
843 unsigned long flags; 859 unsigned long flags;
844 int handled = 0;
845 860
846 spin_lock_irqsave(hba->host->host_lock, flags); 861 spin_lock_irqsave(hba->host->host_lock, flags);
847 862
@@ -852,12 +867,16 @@ static irqreturn_t stex_intr(int irq, void *__hba)
852 writel(data, base + ODBL); 867 writel(data, base + ODBL);
853 readl(base + ODBL); /* flush */ 868 readl(base + ODBL); /* flush */
854 stex_mu_intr(hba, data); 869 stex_mu_intr(hba, data);
855 handled = 1; 870 spin_unlock_irqrestore(hba->host->host_lock, flags);
871 if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
872 hba->cardtype == st_shasta))
873 queue_work(hba->work_q, &hba->reset_work);
874 return IRQ_HANDLED;
856 } 875 }
857 876
858 spin_unlock_irqrestore(hba->host->host_lock, flags); 877 spin_unlock_irqrestore(hba->host->host_lock, flags);
859 878
860 return IRQ_RETVAL(handled); 879 return IRQ_NONE;
861} 880}
862 881
863static void stex_ss_mu_intr(struct st_hba *hba) 882static void stex_ss_mu_intr(struct st_hba *hba)
@@ -939,7 +958,6 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba)
939 void __iomem *base = hba->mmio_base; 958 void __iomem *base = hba->mmio_base;
940 u32 data; 959 u32 data;
941 unsigned long flags; 960 unsigned long flags;
942 int handled = 0;
943 961
944 spin_lock_irqsave(hba->host->host_lock, flags); 962 spin_lock_irqsave(hba->host->host_lock, flags);
945 963
@@ -948,12 +966,15 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba)
948 /* clear the interrupt */ 966 /* clear the interrupt */
949 writel(data, base + YI2H_INT_C); 967 writel(data, base + YI2H_INT_C);
950 stex_ss_mu_intr(hba); 968 stex_ss_mu_intr(hba);
951 handled = 1; 969 spin_unlock_irqrestore(hba->host->host_lock, flags);
970 if (unlikely(data & SS_I2H_REQUEST_RESET))
971 queue_work(hba->work_q, &hba->reset_work);
972 return IRQ_HANDLED;
952 } 973 }
953 974
954 spin_unlock_irqrestore(hba->host->host_lock, flags); 975 spin_unlock_irqrestore(hba->host->host_lock, flags);
955 976
956 return IRQ_RETVAL(handled); 977 return IRQ_NONE;
957} 978}
958 979
959static int stex_common_handshake(struct st_hba *hba) 980static int stex_common_handshake(struct st_hba *hba)
@@ -1001,7 +1022,7 @@ static int stex_common_handshake(struct st_hba *hba)
1001 h->partner_type = HMU_PARTNER_TYPE; 1022 h->partner_type = HMU_PARTNER_TYPE;
1002 if (hba->extra_offset) { 1023 if (hba->extra_offset) {
1003 h->extra_offset = cpu_to_le32(hba->extra_offset); 1024 h->extra_offset = cpu_to_le32(hba->extra_offset);
1004 h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM); 1025 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1005 } else 1026 } else
1006 h->extra_offset = h->extra_size = 0; 1027 h->extra_offset = h->extra_size = 0;
1007 1028
@@ -1046,7 +1067,7 @@ static int stex_ss_handshake(struct st_hba *hba)
1046 struct st_msg_header *msg_h; 1067 struct st_msg_header *msg_h;
1047 struct handshake_frame *h; 1068 struct handshake_frame *h;
1048 __le32 *scratch; 1069 __le32 *scratch;
1049 u32 data; 1070 u32 data, scratch_size;
1050 unsigned long before; 1071 unsigned long before;
1051 int ret = 0; 1072 int ret = 0;
1052 1073
@@ -1074,13 +1095,16 @@ static int stex_ss_handshake(struct st_hba *hba)
1074 stex_gettime(&h->hosttime); 1095 stex_gettime(&h->hosttime);
1075 h->partner_type = HMU_PARTNER_TYPE; 1096 h->partner_type = HMU_PARTNER_TYPE;
1076 h->extra_offset = h->extra_size = 0; 1097 h->extra_offset = h->extra_size = 0;
1077 h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32)); 1098 scratch_size = (hba->sts_count+1)*sizeof(u32);
1099 h->scratch_size = cpu_to_le32(scratch_size);
1078 1100
1079 data = readl(base + YINT_EN); 1101 data = readl(base + YINT_EN);
1080 data &= ~4; 1102 data &= ~4;
1081 writel(data, base + YINT_EN); 1103 writel(data, base + YINT_EN);
1082 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); 1104 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1105 readl(base + YH2I_REQ_HI);
1083 writel(hba->dma_handle, base + YH2I_REQ); 1106 writel(hba->dma_handle, base + YH2I_REQ);
1107 readl(base + YH2I_REQ); /* flush */
1084 1108
1085 scratch = hba->scratch; 1109 scratch = hba->scratch;
1086 before = jiffies; 1110 before = jiffies;
@@ -1096,7 +1120,7 @@ static int stex_ss_handshake(struct st_hba *hba)
1096 msleep(1); 1120 msleep(1);
1097 } 1121 }
1098 1122
1099 *scratch = 0; 1123 memset(scratch, 0, scratch_size);
1100 msg_h->flag = 0; 1124 msg_h->flag = 0;
1101 return ret; 1125 return ret;
1102} 1126}
@@ -1105,19 +1129,24 @@ static int stex_handshake(struct st_hba *hba)
1105{ 1129{
1106 int err; 1130 int err;
1107 unsigned long flags; 1131 unsigned long flags;
1132 unsigned int mu_status;
1108 1133
1109 err = (hba->cardtype == st_yel) ? 1134 err = (hba->cardtype == st_yel) ?
1110 stex_ss_handshake(hba) : stex_common_handshake(hba); 1135 stex_ss_handshake(hba) : stex_common_handshake(hba);
1136 spin_lock_irqsave(hba->host->host_lock, flags);
1137 mu_status = hba->mu_status;
1111 if (err == 0) { 1138 if (err == 0) {
1112 spin_lock_irqsave(hba->host->host_lock, flags);
1113 hba->req_head = 0; 1139 hba->req_head = 0;
1114 hba->req_tail = 0; 1140 hba->req_tail = 0;
1115 hba->status_head = 0; 1141 hba->status_head = 0;
1116 hba->status_tail = 0; 1142 hba->status_tail = 0;
1117 hba->out_req_cnt = 0; 1143 hba->out_req_cnt = 0;
1118 hba->mu_status = MU_STATE_STARTED; 1144 hba->mu_status = MU_STATE_STARTED;
1119 spin_unlock_irqrestore(hba->host->host_lock, flags); 1145 } else
1120 } 1146 hba->mu_status = MU_STATE_FAILED;
1147 if (mu_status == MU_STATE_RESETTING)
1148 wake_up_all(&hba->reset_waitq);
1149 spin_unlock_irqrestore(hba->host->host_lock, flags);
1121 return err; 1150 return err;
1122} 1151}
1123 1152
@@ -1137,17 +1166,11 @@ static int stex_abort(struct scsi_cmnd *cmd)
1137 1166
1138 base = hba->mmio_base; 1167 base = hba->mmio_base;
1139 spin_lock_irqsave(host->host_lock, flags); 1168 spin_lock_irqsave(host->host_lock, flags);
1140 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd) 1169 if (tag < host->can_queue &&
1170 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1141 hba->wait_ccb = &hba->ccb[tag]; 1171 hba->wait_ccb = &hba->ccb[tag];
1142 else { 1172 else
1143 for (tag = 0; tag < host->can_queue; tag++) 1173 goto out;
1144 if (hba->ccb[tag].cmd == cmd) {
1145 hba->wait_ccb = &hba->ccb[tag];
1146 break;
1147 }
1148 if (tag >= host->can_queue)
1149 goto out;
1150 }
1151 1174
1152 if (hba->cardtype == st_yel) { 1175 if (hba->cardtype == st_yel) {
1153 data = readl(base + YI2H_INT); 1176 data = readl(base + YI2H_INT);
@@ -1221,6 +1244,37 @@ static void stex_hard_reset(struct st_hba *hba)
1221 hba->pdev->saved_config_space[i]); 1244 hba->pdev->saved_config_space[i]);
1222} 1245}
1223 1246
1247static int stex_yos_reset(struct st_hba *hba)
1248{
1249 void __iomem *base;
1250 unsigned long flags, before;
1251 int ret = 0;
1252
1253 base = hba->mmio_base;
1254 writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1255 readl(base + IDBL); /* flush */
1256 before = jiffies;
1257 while (hba->out_req_cnt > 0) {
1258 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1259 printk(KERN_WARNING DRV_NAME
1260 "(%s): reset timeout\n", pci_name(hba->pdev));
1261 ret = -1;
1262 break;
1263 }
1264 msleep(1);
1265 }
1266
1267 spin_lock_irqsave(hba->host->host_lock, flags);
1268 if (ret == -1)
1269 hba->mu_status = MU_STATE_FAILED;
1270 else
1271 hba->mu_status = MU_STATE_STARTED;
1272 wake_up_all(&hba->reset_waitq);
1273 spin_unlock_irqrestore(hba->host->host_lock, flags);
1274
1275 return ret;
1276}
1277
1224static void stex_ss_reset(struct st_hba *hba) 1278static void stex_ss_reset(struct st_hba *hba)
1225{ 1279{
1226 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); 1280 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
@@ -1228,66 +1282,86 @@ static void stex_ss_reset(struct st_hba *hba)
1228 ssleep(5); 1282 ssleep(5);
1229} 1283}
1230 1284
1231static int stex_reset(struct scsi_cmnd *cmd) 1285static int stex_do_reset(struct st_hba *hba)
1232{ 1286{
1233 struct st_hba *hba; 1287 struct st_ccb *ccb;
1234 void __iomem *base; 1288 unsigned long flags;
1235 unsigned long flags, before; 1289 unsigned int mu_status = MU_STATE_RESETTING;
1290 u16 tag;
1236 1291
1237 hba = (struct st_hba *) &cmd->device->host->hostdata[0]; 1292 spin_lock_irqsave(hba->host->host_lock, flags);
1293 if (hba->mu_status == MU_STATE_STARTING) {
1294 spin_unlock_irqrestore(hba->host->host_lock, flags);
1295 printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1296 pci_name(hba->pdev));
1297 return 0;
1298 }
1299 while (hba->mu_status == MU_STATE_RESETTING) {
1300 spin_unlock_irqrestore(hba->host->host_lock, flags);
1301 wait_event_timeout(hba->reset_waitq,
1302 hba->mu_status != MU_STATE_RESETTING,
1303 MU_MAX_DELAY * HZ);
1304 spin_lock_irqsave(hba->host->host_lock, flags);
1305 mu_status = hba->mu_status;
1306 }
1238 1307
1239 printk(KERN_INFO DRV_NAME 1308 if (mu_status != MU_STATE_RESETTING) {
1240 "(%s): resetting host\n", pci_name(hba->pdev)); 1309 spin_unlock_irqrestore(hba->host->host_lock, flags);
1241 scsi_print_command(cmd); 1310 return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1311 }
1242 1312
1243 hba->mu_status = MU_STATE_RESETTING; 1313 hba->mu_status = MU_STATE_RESETTING;
1314 spin_unlock_irqrestore(hba->host->host_lock, flags);
1315
1316 if (hba->cardtype == st_yosemite)
1317 return stex_yos_reset(hba);
1244 1318
1245 if (hba->cardtype == st_shasta) 1319 if (hba->cardtype == st_shasta)
1246 stex_hard_reset(hba); 1320 stex_hard_reset(hba);
1247 else if (hba->cardtype == st_yel) 1321 else if (hba->cardtype == st_yel)
1248 stex_ss_reset(hba); 1322 stex_ss_reset(hba);
1249 1323
1250 if (hba->cardtype != st_yosemite) { 1324 spin_lock_irqsave(hba->host->host_lock, flags);
1251 if (stex_handshake(hba)) { 1325 for (tag = 0; tag < hba->host->can_queue; tag++) {
1252 printk(KERN_WARNING DRV_NAME 1326 ccb = &hba->ccb[tag];
1253 "(%s): resetting: handshake failed\n", 1327 if (ccb->req == NULL)
1254 pci_name(hba->pdev)); 1328 continue;
1255 return FAILED; 1329 ccb->req = NULL;
1330 if (ccb->cmd) {
1331 scsi_dma_unmap(ccb->cmd);
1332 ccb->cmd->result = DID_RESET << 16;
1333 ccb->cmd->scsi_done(ccb->cmd);
1334 ccb->cmd = NULL;
1256 } 1335 }
1257 return SUCCESS;
1258 } 1336 }
1337 spin_unlock_irqrestore(hba->host->host_lock, flags);
1259 1338
1260 /* st_yosemite */ 1339 if (stex_handshake(hba) == 0)
1261 writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL); 1340 return 0;
1262 readl(hba->mmio_base + IDBL); /* flush */
1263 before = jiffies;
1264 while (hba->out_req_cnt > 0) {
1265 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1266 printk(KERN_WARNING DRV_NAME
1267 "(%s): reset timeout\n", pci_name(hba->pdev));
1268 return FAILED;
1269 }
1270 msleep(1);
1271 }
1272 1341
1273 base = hba->mmio_base; 1342 printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1274 writel(0, base + IMR0); 1343 pci_name(hba->pdev));
1275 readl(base + IMR0); 1344 return -1;
1276 writel(0, base + OMR0); 1345}
1277 readl(base + OMR0); 1346
1278 writel(0, base + IMR1); 1347static int stex_reset(struct scsi_cmnd *cmd)
1279 readl(base + IMR1); 1348{
1280 writel(0, base + OMR1); 1349 struct st_hba *hba;
1281 readl(base + OMR1); /* flush */ 1350
1282 spin_lock_irqsave(hba->host->host_lock, flags); 1351 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1283 hba->req_head = 0; 1352
1284 hba->req_tail = 0; 1353 printk(KERN_INFO DRV_NAME
1285 hba->status_head = 0; 1354 "(%s): resetting host\n", pci_name(hba->pdev));
1286 hba->status_tail = 0; 1355 scsi_print_command(cmd);
1287 hba->out_req_cnt = 0; 1356
1288 hba->mu_status = MU_STATE_STARTED; 1357 return stex_do_reset(hba) ? FAILED : SUCCESS;
1289 spin_unlock_irqrestore(hba->host->host_lock, flags); 1358}
1290 return SUCCESS; 1359
1360static void stex_reset_work(struct work_struct *work)
1361{
1362 struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1363
1364 stex_do_reset(hba);
1291} 1365}
1292 1366
1293static int stex_biosparam(struct scsi_device *sdev, 1367static int stex_biosparam(struct scsi_device *sdev,
@@ -1420,8 +1494,8 @@ static int stex_set_dma_mask(struct pci_dev * pdev)
1420{ 1494{
1421 int ret; 1495 int ret;
1422 1496
1423 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1497 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1424 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 1498 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1425 return 0; 1499 return 0;
1426 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1500 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1427 if (!ret) 1501 if (!ret)
@@ -1528,10 +1602,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1528 hba->dma_mem = dma_alloc_coherent(&pdev->dev, 1602 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1529 hba->dma_size, &hba->dma_handle, GFP_KERNEL); 1603 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1530 if (!hba->dma_mem) { 1604 if (!hba->dma_mem) {
1531 err = -ENOMEM; 1605 /* Retry minimum coherent mapping for st_seq and st_vsc */
1532 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", 1606 if (hba->cardtype == st_seq ||
1533 pci_name(pdev)); 1607 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1534 goto out_iounmap; 1608 printk(KERN_WARNING DRV_NAME
1609 "(%s): allocating min buffer for controller\n",
1610 pci_name(pdev));
1611 hba->dma_size = hba->extra_offset
1612 + ST_ADDITIONAL_MEM_MIN;
1613 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1614 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1615 }
1616
1617 if (!hba->dma_mem) {
1618 err = -ENOMEM;
1619 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1620 pci_name(pdev));
1621 goto out_iounmap;
1622 }
1535 } 1623 }
1536 1624
1537 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); 1625 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
@@ -1568,12 +1656,24 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1568 1656
1569 hba->host = host; 1657 hba->host = host;
1570 hba->pdev = pdev; 1658 hba->pdev = pdev;
1659 init_waitqueue_head(&hba->reset_waitq);
1660
1661 snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1662 "stex_wq_%d", host->host_no);
1663 hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1664 if (!hba->work_q) {
1665 printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1666 pci_name(pdev));
1667 err = -ENOMEM;
1668 goto out_ccb_free;
1669 }
1670 INIT_WORK(&hba->reset_work, stex_reset_work);
1571 1671
1572 err = stex_request_irq(hba); 1672 err = stex_request_irq(hba);
1573 if (err) { 1673 if (err) {
1574 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", 1674 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1575 pci_name(pdev)); 1675 pci_name(pdev));
1576 goto out_ccb_free; 1676 goto out_free_wq;
1577 } 1677 }
1578 1678
1579 err = stex_handshake(hba); 1679 err = stex_handshake(hba);
@@ -1602,6 +1702,8 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1602 1702
1603out_free_irq: 1703out_free_irq:
1604 stex_free_irq(hba); 1704 stex_free_irq(hba);
1705out_free_wq:
1706 destroy_workqueue(hba->work_q);
1605out_ccb_free: 1707out_ccb_free:
1606 kfree(hba->ccb); 1708 kfree(hba->ccb);
1607out_pci_free: 1709out_pci_free:
@@ -1669,6 +1771,8 @@ static void stex_hba_free(struct st_hba *hba)
1669{ 1771{
1670 stex_free_irq(hba); 1772 stex_free_irq(hba);
1671 1773
1774 destroy_workqueue(hba->work_q);
1775
1672 iounmap(hba->mmio_base); 1776 iounmap(hba->mmio_base);
1673 1777
1674 pci_release_regions(hba->pdev); 1778 pci_release_regions(hba->pdev);