aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/arm/acornscsi-io.S2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c14
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c11
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c13
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h1
-rw-r--r--drivers/scsi/esp_scsi.h3
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c37
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ide-scsi.c133
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c3
-rw-r--r--drivers/scsi/ips.h1
-rw-r--r--drivers/scsi/libiscsi.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c125
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/nsp32.h1
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h71
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c338
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicpti.c147
-rw-r--r--drivers/scsi/qlogicpti.h2
-rw-r--r--drivers/scsi/scsi.c105
-rw-r--r--drivers/scsi/scsi_error.c92
-rw-r--r--drivers/scsi/scsi_lib.c59
-rw-r--r--drivers/scsi/scsi_netlink.c523
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_proc.c8
-rw-r--r--drivers/scsi/scsi_scan.c23
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_tgt_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c8
-rw-r--r--drivers/scsi/sd.c122
-rw-r--r--drivers/scsi/ses.c18
-rw-r--r--drivers/scsi/sg.c684
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sun_esp.c267
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/tmscsim.c4
69 files changed, 1965 insertions, 1366 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c7f06298bd3c..d3b211af4e1c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -63,7 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
63config BLK_DEV_SD 63config BLK_DEV_SD
64 tristate "SCSI disk support" 64 tristate "SCSI disk support"
65 depends on SCSI 65 depends on SCSI
66 select CRC_T10DIF 66 select CRC_T10DIF if BLK_DEV_INTEGRITY
67 ---help--- 67 ---help---
68 If you want to use SCSI hard disks, Fibre Channel disks, 68 If you want to use SCSI hard disks, Fibre Channel disks,
69 Serial ATA (SATA) or Parallel ATA (PATA) hard disks, 69 Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
@@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS
1325 To compile this driver as a module, choose M here: the 1325 To compile this driver as a module, choose M here: the
1326 module will be called qlogicfas. 1326 module will be called qlogicfas.
1327 1327
1328config SCSI_QLOGIC_FC_FIRMWARE
1329 bool "Include loadable firmware in driver"
1330 depends on SCSI_QLOGIC_FC
1331 help
1332 Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with
1333 expanded LUN addressing and FcTape (FCP-2) support, in the
1334 qlogicfc driver. This is required on some platforms.
1335
1336config SCSI_QLOGIC_1280 1328config SCSI_QLOGIC_1280
1337 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support" 1329 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
1338 depends on PCI && SCSI 1330 depends on PCI && SCSI
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index aa4e77c25273..8abfd06b5a72 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
1139 srbcmd->id = cpu_to_le32(scmd_id(cmd)); 1139 srbcmd->id = cpu_to_le32(scmd_id(cmd));
1140 srbcmd->lun = cpu_to_le32(cmd->device->lun); 1140 srbcmd->lun = cpu_to_le32(cmd->device->lun);
1141 srbcmd->flags = cpu_to_le32(flag); 1141 srbcmd->flags = cpu_to_le32(flag);
1142 timeout = cmd->timeout_per_command/HZ; 1142 timeout = cmd->request->timeout/HZ;
1143 if (timeout == 0) 1143 if (timeout == 0)
1144 timeout = 1; 1144 timeout = 1;
1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds 1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S
index 5cebe3105260..22171b2110a8 100644
--- a/drivers/scsi/arm/acornscsi-io.S
+++ b/drivers/scsi/arm/acornscsi-io.S
@@ -8,7 +8,7 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9 9
10#include <asm/assembler.h> 10#include <asm/assembler.h>
11#include <asm/hardware.h> 11#include <mach/hardware.h>
12 12
13#if defined(__APCS_32__) 13#if defined(__APCS_32__)
14#define LOADREGS(t,r,l...) ldm##t r, l 14#define LOADREGS(t,r,l...) ldm##t r, l
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fcdd73f25625..708e475896b9 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -425,7 +425,7 @@ static int alua_check_sense(struct scsi_device *sdev,
425 /* 425 /*
426 * LUN Not Accessible - ALUA state transition 426 * LUN Not Accessible - ALUA state transition
427 */ 427 */
428 return NEEDS_RETRY; 428 return ADD_TO_MLQUEUE;
429 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b) 429 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b)
430 /* 430 /*
431 * LUN Not Accessible -- Target port in standby state 431 * LUN Not Accessible -- Target port in standby state
@@ -447,18 +447,18 @@ static int alua_check_sense(struct scsi_device *sdev,
447 /* 447 /*
448 * Power On, Reset, or Bus Device Reset, just retry. 448 * Power On, Reset, or Bus Device Reset, just retry.
449 */ 449 */
450 return NEEDS_RETRY; 450 return ADD_TO_MLQUEUE;
451 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { 451 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
452 /* 452 /*
453 * ALUA state changed 453 * ALUA state changed
454 */ 454 */
455 return NEEDS_RETRY; 455 return ADD_TO_MLQUEUE;
456 } 456 }
457 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { 457 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
458 /* 458 /*
459 * Implicit ALUA state transition failed 459 * Implicit ALUA state transition failed
460 */ 460 */
461 return NEEDS_RETRY; 461 return ADD_TO_MLQUEUE;
462 } 462 }
463 break; 463 break;
464 } 464 }
@@ -490,7 +490,7 @@ static int alua_stpg(struct scsi_device *sdev, int state,
490 if (!err) 490 if (!err)
491 return SCSI_DH_IO; 491 return SCSI_DH_IO;
492 err = alua_check_sense(sdev, &sense_hdr); 492 err = alua_check_sense(sdev, &sense_hdr);
493 if (retry > 0 && err == NEEDS_RETRY) { 493 if (retry > 0 && err == ADD_TO_MLQUEUE) {
494 retry--; 494 retry--;
495 goto retry; 495 goto retry;
496 } 496 }
@@ -535,7 +535,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
535 return SCSI_DH_IO; 535 return SCSI_DH_IO;
536 536
537 err = alua_check_sense(sdev, &sense_hdr); 537 err = alua_check_sense(sdev, &sense_hdr);
538 if (err == NEEDS_RETRY) 538 if (err == ADD_TO_MLQUEUE)
539 goto retry; 539 goto retry;
540 sdev_printk(KERN_INFO, sdev, 540 sdev_printk(KERN_INFO, sdev,
541 "%s: rtpg sense code %02x/%02x/%02x\n", 541 "%s: rtpg sense code %02x/%02x/%02x\n",
@@ -680,7 +680,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
680 680
681} 681}
682 682
683const struct scsi_dh_devlist alua_dev_list[] = { 683static const struct scsi_dh_devlist alua_dev_list[] = {
684 {"HP", "MSA VOLUME" }, 684 {"HP", "MSA VOLUME" },
685 {"HP", "HSV101" }, 685 {"HP", "HSV101" },
686 {"HP", "HSV111" }, 686 {"HP", "HSV111" },
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index aa46b131b20e..8f45570a8a01 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -84,7 +84,7 @@ struct clariion_dh_data {
84 /* 84 /*
85 * I/O buffer for both MODE_SELECT and INQUIRY commands. 85 * I/O buffer for both MODE_SELECT and INQUIRY commands.
86 */ 86 */
87 char buffer[CLARIION_BUFFER_SIZE]; 87 unsigned char buffer[CLARIION_BUFFER_SIZE];
88 /* 88 /*
89 * SCSI sense buffer for commands -- assumes serial issuance 89 * SCSI sense buffer for commands -- assumes serial issuance
90 * and completion sequence of all commands for same multipath. 90 * and completion sequence of all commands for same multipath.
@@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
176 err = SCSI_DH_DEV_TEMP_BUSY; 176 err = SCSI_DH_DEV_TEMP_BUSY;
177 goto out; 177 goto out;
178 } 178 }
179 if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) { 179 if (csdev->buffer[4] > 2) {
180 /* Invalid buffer format */ 180 /* Invalid buffer format */
181 sdev_printk(KERN_NOTICE, sdev, 181 sdev_printk(KERN_NOTICE, sdev,
182 "%s: invalid VPD page 0xC0 format\n", 182 "%s: invalid VPD page 0xC0 format\n",
@@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
278 return NULL; 278 return NULL;
279 } 279 }
280 280
281 memset(rq->cmd, 0, BLK_MAX_CDB);
282 rq->cmd_len = COMMAND_SIZE(cmd); 281 rq->cmd_len = COMMAND_SIZE(cmd);
283 rq->cmd[0] = cmd; 282 rq->cmd[0] = cmd;
284 283
@@ -439,7 +438,7 @@ static int clariion_check_sense(struct scsi_device *sdev,
439 * Unit Attention Code. This is the first IO 438 * Unit Attention Code. This is the first IO
440 * to the new path, so just retry. 439 * to the new path, so just retry.
441 */ 440 */
442 return NEEDS_RETRY; 441 return ADD_TO_MLQUEUE;
443 break; 442 break;
444 } 443 }
445 444
@@ -514,7 +513,7 @@ retry:
514 return SCSI_DH_IO; 513 return SCSI_DH_IO;
515 514
516 err = clariion_check_sense(sdev, &sshdr); 515 err = clariion_check_sense(sdev, &sshdr);
517 if (retry > 0 && err == NEEDS_RETRY) { 516 if (retry > 0 && err == ADD_TO_MLQUEUE) {
518 retry--; 517 retry--;
519 goto retry; 518 goto retry;
520 } 519 }
@@ -562,7 +561,7 @@ done:
562 return result; 561 return result;
563} 562}
564 563
565const struct scsi_dh_devlist clariion_dev_list[] = { 564static const struct scsi_dh_devlist clariion_dev_list[] = {
566 {"DGC", "RAID"}, 565 {"DGC", "RAID"},
567 {"DGC", "DISK"}, 566 {"DGC", "DISK"},
568 {"DGC", "VRAID"}, 567 {"DGC", "VRAID"},
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9c7a1f8ebb72..5e93c88ad66b 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -114,7 +114,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
114 req->cmd_type = REQ_TYPE_BLOCK_PC; 114 req->cmd_type = REQ_TYPE_BLOCK_PC;
115 req->cmd_flags |= REQ_FAILFAST; 115 req->cmd_flags |= REQ_FAILFAST;
116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); 116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
117 memset(req->cmd, 0, MAX_COMMAND_SIZE);
118 req->cmd[0] = TEST_UNIT_READY; 117 req->cmd[0] = TEST_UNIT_READY;
119 req->timeout = HP_SW_TIMEOUT; 118 req->timeout = HP_SW_TIMEOUT;
120 req->sense = h->sense; 119 req->sense = h->sense;
@@ -207,7 +206,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
207 req->cmd_type = REQ_TYPE_BLOCK_PC; 206 req->cmd_type = REQ_TYPE_BLOCK_PC;
208 req->cmd_flags |= REQ_FAILFAST; 207 req->cmd_flags |= REQ_FAILFAST;
209 req->cmd_len = COMMAND_SIZE(START_STOP); 208 req->cmd_len = COMMAND_SIZE(START_STOP);
210 memset(req->cmd, 0, MAX_COMMAND_SIZE);
211 req->cmd[0] = START_STOP; 209 req->cmd[0] = START_STOP;
212 req->cmd[4] = 1; /* Start spin cycle */ 210 req->cmd[4] = 1; /* Start spin cycle */
213 req->timeout = HP_SW_TIMEOUT; 211 req->timeout = HP_SW_TIMEOUT;
@@ -282,7 +280,7 @@ static int hp_sw_activate(struct scsi_device *sdev)
282 return ret; 280 return ret;
283} 281}
284 282
285const struct scsi_dh_devlist hp_sw_dh_data_list[] = { 283static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
286 {"COMPAQ", "MSA1000 VOLUME"}, 284 {"COMPAQ", "MSA1000 VOLUME"},
287 {"COMPAQ", "HSV110"}, 285 {"COMPAQ", "HSV110"},
288 {"HP", "HSV100"}, 286 {"HP", "HSV100"},
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b093a501f8ae..50bf95f3b5c4 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -225,8 +225,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
225 return NULL; 225 return NULL;
226 } 226 }
227 227
228 memset(rq->cmd, 0, BLK_MAX_CDB);
229
230 rq->cmd_type = REQ_TYPE_BLOCK_PC; 228 rq->cmd_type = REQ_TYPE_BLOCK_PC;
231 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 229 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
232 rq->retries = RDAC_RETRIES; 230 rq->retries = RDAC_RETRIES;
@@ -376,7 +374,7 @@ static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
376 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || 374 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
377 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') 375 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
378 return SCSI_DH_NOSYS; 376 return SCSI_DH_NOSYS;
379 h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun); 377 h->lun = inqp->lun[7]; /* Uses only the last byte */
380 } 378 }
381 return err; 379 return err;
382} 380}
@@ -386,6 +384,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
386 int err; 384 int err;
387 struct c9_inquiry *inqp; 385 struct c9_inquiry *inqp;
388 386
387 h->lun_state = RDAC_LUN_UNOWNED;
389 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 388 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
390 if (err == SCSI_DH_OK) { 389 if (err == SCSI_DH_OK) {
391 inqp = &h->inq.c9; 390 inqp = &h->inq.c9;
@@ -550,7 +549,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
550 * 549 *
551 * Just retry and wait. 550 * Just retry and wait.
552 */ 551 */
553 return NEEDS_RETRY; 552 return ADD_TO_MLQUEUE;
554 break; 553 break;
555 case ILLEGAL_REQUEST: 554 case ILLEGAL_REQUEST:
556 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { 555 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
@@ -567,14 +566,14 @@ static int rdac_check_sense(struct scsi_device *sdev,
567 /* 566 /*
568 * Power On, Reset, or Bus Device Reset, just retry. 567 * Power On, Reset, or Bus Device Reset, just retry.
569 */ 568 */
570 return NEEDS_RETRY; 569 return ADD_TO_MLQUEUE;
571 break; 570 break;
572 } 571 }
573 /* success just means we do not care what scsi-ml does */ 572 /* success just means we do not care what scsi-ml does */
574 return SCSI_RETURN_NOT_HANDLED; 573 return SCSI_RETURN_NOT_HANDLED;
575} 574}
576 575
577const struct scsi_dh_devlist rdac_dev_list[] = { 576static const struct scsi_dh_devlist rdac_dev_list[] = {
578 {"IBM", "1722"}, 577 {"IBM", "1722"},
579 {"IBM", "1724"}, 578 {"IBM", "1724"},
580 {"IBM", "1726"}, 579 {"IBM", "1726"},
@@ -589,6 +588,8 @@ const struct scsi_dh_devlist rdac_dev_list[] = {
589 {"STK", "OPENstorage D280"}, 588 {"STK", "OPENstorage D280"},
590 {"SUN", "CSM200_R"}, 589 {"SUN", "CSM200_R"},
591 {"SUN", "LCSM100_F"}, 590 {"SUN", "LCSM100_F"},
591 {"DELL", "MD3000"},
592 {"DELL", "MD3000i"},
592 {NULL, NULL}, 593 {NULL, NULL},
593}; 594};
594 595
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 19406cea6d6a..179ad77f6cc9 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/i2o-dev.h> 22#include <linux/i2o-dev.h>
23 23
24#include <linux/version.h>
25#include <linux/notifier.h> 24#include <linux/notifier.h>
26#include <asm/atomic.h> 25#include <asm/atomic.h>
27 26
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index bb43a1388188..28e22acf87ea 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -521,7 +521,8 @@ struct esp {
521 521
522 struct completion *eh_reset; 522 struct completion *eh_reset;
523 523
524 struct sbus_dma *dma; 524 void *dma;
525 int dmarev;
525}; 526};
526 527
527/* A front-end driver for the ESP chip should do the following in 528/* A front-end driver for the ESP chip should do the following in
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 822d5214692b..c387c15a2128 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
464 464
465 /* use request field to save the ptr. to completion struct. */ 465 /* use request field to save the ptr. to completion struct. */
466 scp->request = (struct request *)&wait; 466 scp->request = (struct request *)&wait;
467 scp->timeout_per_command = timeout*HZ;
468 scp->cmd_len = 12; 467 scp->cmd_len = 12;
469 scp->cmnd = cmnd; 468 scp->cmnd = cmnd;
470 cmndinfo.priority = IOCTL_PRI; 469 cmndinfo.priority = IOCTL_PRI;
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
1995 register Scsi_Cmnd *pscp; 1994 register Scsi_Cmnd *pscp;
1996 register Scsi_Cmnd *nscp; 1995 register Scsi_Cmnd *nscp;
1997 ulong flags; 1996 ulong flags;
1998 unchar b, t;
1999 1997
2000 TRACE(("gdth_putq() priority %d\n",priority)); 1998 TRACE(("gdth_putq() priority %d\n",priority));
2001 spin_lock_irqsave(&ha->smp_lock, flags); 1999 spin_lock_irqsave(&ha->smp_lock, flags);
2002 2000
2003 if (!cmndinfo->internal_command) { 2001 if (!cmndinfo->internal_command)
2004 cmndinfo->priority = priority; 2002 cmndinfo->priority = priority;
2005 b = scp->device->channel;
2006 t = scp->device->id;
2007 if (priority >= DEFAULT_PRI) {
2008 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
2009 (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
2010 TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
2011 cmndinfo->timeout = gdth_update_timeout(scp, 0);
2012 }
2013 }
2014 }
2015 2003
2016 if (ha->req_first==NULL) { 2004 if (ha->req_first==NULL) {
2017 ha->req_first = scp; /* queue was empty */ 2005 ha->req_first = scp; /* queue was empty */
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
3899 return ((const char *)ha->binfo.type_string); 3887 return ((const char *)ha->binfo.type_string);
3900} 3888}
3901 3889
3890static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3891{
3892 gdth_ha_str *ha = shost_priv(scp->device->host);
3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3894 unchar b, t;
3895 ulong flags;
3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
3897
3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
3899 b = scp->device->channel;
3900 t = scp->device->id;
3901
3902 /*
3903 * We don't really honor the command timeout, but we try to
3904 * honor 6 times of the actual command timeout! So reset the
3905 * timer if this is less than 6th timeout on this command!
3906 */
3907 if (++cmndinfo->timeout_count < 6)
3908 retval = BLK_EH_RESET_TIMER;
3909
3910 /* Reset the timeout if it is locked IO */
3911 spin_lock_irqsave(&ha->smp_lock, flags);
3912 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
3913 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
3914 TRACE2(("%s(): locked IO, reset timeout\n", __func__));
3915 retval = BLK_EH_RESET_TIMER;
3916 }
3917 spin_unlock_irqrestore(&ha->smp_lock, flags);
3918
3919 return retval;
3920}
3921
3922
3902static int gdth_eh_bus_reset(Scsi_Cmnd *scp) 3923static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3903{ 3924{
3904 gdth_ha_str *ha = shost_priv(scp->device->host); 3925 gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
3992 BUG_ON(!cmndinfo); 4013 BUG_ON(!cmndinfo);
3993 4014
3994 scp->scsi_done = done; 4015 scp->scsi_done = done;
3995 gdth_update_timeout(scp, scp->timeout_per_command * 6); 4016 cmndinfo->timeout_count = 0;
3996 cmndinfo->priority = DEFAULT_PRI; 4017 cmndinfo->priority = DEFAULT_PRI;
3997 4018
3998 return __gdth_queuecommand(ha, scp, cmndinfo); 4019 return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
4096 ha->hdr[j].lock = 1; 4117 ha->hdr[j].lock = 1;
4097 spin_unlock_irqrestore(&ha->smp_lock, flags); 4118 spin_unlock_irqrestore(&ha->smp_lock, flags);
4098 gdth_wait_completion(ha, ha->bus_cnt, j); 4119 gdth_wait_completion(ha, ha->bus_cnt, j);
4099 gdth_stop_timeout(ha, ha->bus_cnt, j);
4100 } else { 4120 } else {
4101 spin_lock_irqsave(&ha->smp_lock, flags); 4121 spin_lock_irqsave(&ha->smp_lock, flags);
4102 ha->hdr[j].lock = 0; 4122 ha->hdr[j].lock = 0;
4103 spin_unlock_irqrestore(&ha->smp_lock, flags); 4123 spin_unlock_irqrestore(&ha->smp_lock, flags);
4104 gdth_start_timeout(ha, ha->bus_cnt, j);
4105 gdth_next(ha); 4124 gdth_next(ha);
4106 } 4125 }
4107 } 4126 }
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4539 spin_lock_irqsave(&ha->smp_lock, flags); 4558 spin_lock_irqsave(&ha->smp_lock, flags);
4540 ha->raw[i].lock = 1; 4559 ha->raw[i].lock = 1;
4541 spin_unlock_irqrestore(&ha->smp_lock, flags); 4560 spin_unlock_irqrestore(&ha->smp_lock, flags);
4542 for (j = 0; j < ha->tid_cnt; ++j) { 4561 for (j = 0; j < ha->tid_cnt; ++j)
4543 gdth_wait_completion(ha, i, j); 4562 gdth_wait_completion(ha, i, j);
4544 gdth_stop_timeout(ha, i, j);
4545 }
4546 } else { 4563 } else {
4547 spin_lock_irqsave(&ha->smp_lock, flags); 4564 spin_lock_irqsave(&ha->smp_lock, flags);
4548 ha->raw[i].lock = 0; 4565 ha->raw[i].lock = 0;
4549 spin_unlock_irqrestore(&ha->smp_lock, flags); 4566 spin_unlock_irqrestore(&ha->smp_lock, flags);
4550 for (j = 0; j < ha->tid_cnt; ++j) { 4567 for (j = 0; j < ha->tid_cnt; ++j)
4551 gdth_start_timeout(ha, i, j);
4552 gdth_next(ha); 4568 gdth_next(ha);
4553 }
4554 } 4569 }
4555 } 4570 }
4556 break; 4571 break;
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
4644 .slave_configure = gdth_slave_configure, 4659 .slave_configure = gdth_slave_configure,
4645 .bios_param = gdth_bios_param, 4660 .bios_param = gdth_bios_param,
4646 .proc_info = gdth_proc_info, 4661 .proc_info = gdth_proc_info,
4662 .eh_timed_out = gdth_timed_out,
4647 .proc_name = "gdth", 4663 .proc_name = "gdth",
4648 .can_queue = GDTH_MAXCMDS, 4664 .can_queue = GDTH_MAXCMDS,
4649 .this_id = -1, 4665 .this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ca92476727cf..1646444e9bd5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ 916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
917 dma_addr_t sense_paddr; /* sense dma-addr */ 917 dma_addr_t sense_paddr; /* sense dma-addr */
918 unchar priority; 918 unchar priority;
919 int timeout; 919 int timeout_count; /* # of timeout calls */
920 volatile int wait_for_completion; 920 volatile int wait_for_completion;
921 ushort status; 921 ushort status;
922 ulong32 info; 922 ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index ce0228e26aec..59349a316e13 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
748 } 748 }
749 spin_unlock_irqrestore(&ha->smp_lock, flags); 749 spin_unlock_irqrestore(&ha->smp_lock, flags);
750} 750}
751
752static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
753{
754 ulong flags;
755 Scsi_Cmnd *scp;
756 unchar b, t;
757
758 spin_lock_irqsave(&ha->smp_lock, flags);
759
760 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
761 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
762 if (!cmndinfo->internal_command) {
763 b = scp->device->channel;
764 t = scp->device->id;
765 if (t == (unchar)id && b == (unchar)busnum) {
766 TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
767 cmndinfo->timeout = gdth_update_timeout(scp, 0);
768 }
769 }
770 }
771 spin_unlock_irqrestore(&ha->smp_lock, flags);
772}
773
774static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
775{
776 ulong flags;
777 Scsi_Cmnd *scp;
778 unchar b, t;
779
780 spin_lock_irqsave(&ha->smp_lock, flags);
781
782 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
783 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
784 if (!cmndinfo->internal_command) {
785 b = scp->device->channel;
786 t = scp->device->id;
787 if (t == (unchar)id && b == (unchar)busnum) {
788 TRACE2(("gdth_start_timeout(): update_timeout()\n"));
789 gdth_update_timeout(scp, cmndinfo->timeout);
790 }
791 }
792 }
793 spin_unlock_irqrestore(&ha->smp_lock, flags);
794}
795
796static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
797{
798 int oldto;
799
800 oldto = scp->timeout_per_command;
801 scp->timeout_per_command = timeout;
802
803 if (timeout == 0) {
804 del_timer(&scp->eh_timeout);
805 scp->eh_timeout.data = (unsigned long) NULL;
806 scp->eh_timeout.expires = 0;
807 } else {
808 if (scp->eh_timeout.data != (unsigned long) NULL)
809 del_timer(&scp->eh_timeout);
810 scp->eh_timeout.data = (unsigned long) scp;
811 scp->eh_timeout.expires = jiffies + timeout;
812 add_timer(&scp->eh_timeout);
813 }
814
815 return oldto;
816}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 45e6fdacf36e..9b900cc9ebe8 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
20 ulong64 *paddr); 20 ulong64 *paddr);
21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); 21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); 22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
23static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
24static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
25static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
26 23
27#endif 24#endif
28 25
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fed0b02ebc1d..3fdbb13e80a8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data)
464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) 464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
465{ 465{
466 struct device *cdev; 466 struct device *cdev;
467 struct Scsi_Host *shost = ERR_PTR(-ENXIO); 467 struct Scsi_Host *shost = NULL;
468 468
469 cdev = class_find_device(&shost_class, NULL, &hostnum, 469 cdev = class_find_device(&shost_class, NULL, &hostnum,
470 __scsi_host_match); 470 __scsi_host_match);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index da876d3924be..a48e4990fe12 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -25,7 +25,6 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/hdreg.h>
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
30#include <asm/io.h> 29#include <asm/io.h>
31#include <asm/div64.h> 30#include <asm/div64.h>
@@ -1249,6 +1248,13 @@ static struct pci_device_id hptiop_id_table[] = {
1249 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, 1248 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1250 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, 1249 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1251 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, 1250 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1251 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1252 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1253 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1254 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1255 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1256 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1257 { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1252 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, 1258 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1253 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, 1259 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1254 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, 1260 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ae560bc04f9d..4e0b7c8eb32e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -556,11 +556,12 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
556/** 556/**
557 * ibmvfc_init_host - Start host initialization 557 * ibmvfc_init_host - Start host initialization
558 * @vhost: ibmvfc host struct 558 * @vhost: ibmvfc host struct
559 * @relogin: is this a re-login?
559 * 560 *
560 * Return value: 561 * Return value:
561 * nothing 562 * nothing
562 **/ 563 **/
563static void ibmvfc_init_host(struct ibmvfc_host *vhost) 564static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
564{ 565{
565 struct ibmvfc_target *tgt; 566 struct ibmvfc_target *tgt;
566 567
@@ -574,6 +575,11 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
574 } 575 }
575 576
576 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { 577 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
578 if (!relogin) {
579 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
580 vhost->async_crq.cur = 0;
581 }
582
577 list_for_each_entry(tgt, &vhost->targets, queue) 583 list_for_each_entry(tgt, &vhost->targets, queue)
578 tgt->need_login = 1; 584 tgt->need_login = 1;
579 scsi_block_requests(vhost->host); 585 scsi_block_requests(vhost->host);
@@ -1059,9 +1065,10 @@ static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1059static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost) 1065static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1060{ 1066{
1061 long timeout = wait_event_timeout(vhost->init_wait_q, 1067 long timeout = wait_event_timeout(vhost->init_wait_q,
1062 (vhost->state == IBMVFC_ACTIVE || 1068 ((vhost->state == IBMVFC_ACTIVE ||
1063 vhost->state == IBMVFC_HOST_OFFLINE || 1069 vhost->state == IBMVFC_HOST_OFFLINE ||
1064 vhost->state == IBMVFC_LINK_DEAD), 1070 vhost->state == IBMVFC_LINK_DEAD) &&
1071 vhost->action == IBMVFC_HOST_ACTION_NONE),
1065 (init_timeout * HZ)); 1072 (init_timeout * HZ));
1066 1073
1067 return timeout ? 0 : -EIO; 1074 return timeout ? 0 : -EIO;
@@ -1450,8 +1457,8 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1450 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; 1457 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1451 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; 1458 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1452 struct scsi_cmnd *cmnd = evt->cmnd; 1459 struct scsi_cmnd *cmnd = evt->cmnd;
1453 int rsp_len = 0; 1460 u32 rsp_len = 0;
1454 int sense_len = rsp->fcp_sense_len; 1461 u32 sense_len = rsp->fcp_sense_len;
1455 1462
1456 if (cmnd) { 1463 if (cmnd) {
1457 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID) 1464 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
@@ -1468,7 +1475,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1468 rsp_len = rsp->fcp_rsp_len; 1475 rsp_len = rsp->fcp_rsp_len;
1469 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) 1476 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1470 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; 1477 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1471 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len) 1478 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1472 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1479 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1473 1480
1474 ibmvfc_log_error(evt); 1481 ibmvfc_log_error(evt);
@@ -2077,17 +2084,18 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2077{ 2084{
2078 const char *desc = ibmvfc_get_ae_desc(crq->event); 2085 const char *desc = ibmvfc_get_ae_desc(crq->event);
2079 2086
2080 ibmvfc_log(vhost, 3, "%s event received\n", desc); 2087 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx,"
2088 " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2081 2089
2082 switch (crq->event) { 2090 switch (crq->event) {
2083 case IBMVFC_AE_LINK_UP: 2091 case IBMVFC_AE_LINK_UP:
2084 case IBMVFC_AE_RESUME: 2092 case IBMVFC_AE_RESUME:
2085 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2093 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2086 ibmvfc_init_host(vhost); 2094 ibmvfc_init_host(vhost, 1);
2087 break; 2095 break;
2088 case IBMVFC_AE_SCN_FABRIC: 2096 case IBMVFC_AE_SCN_FABRIC:
2089 vhost->events_to_log |= IBMVFC_AE_RSCN; 2097 vhost->events_to_log |= IBMVFC_AE_RSCN;
2090 ibmvfc_init_host(vhost); 2098 ibmvfc_init_host(vhost, 1);
2091 break; 2099 break;
2092 case IBMVFC_AE_SCN_NPORT: 2100 case IBMVFC_AE_SCN_NPORT:
2093 case IBMVFC_AE_SCN_GROUP: 2101 case IBMVFC_AE_SCN_GROUP:
@@ -2133,13 +2141,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2133 /* Send back a response */ 2141 /* Send back a response */
2134 rc = ibmvfc_send_crq_init_complete(vhost); 2142 rc = ibmvfc_send_crq_init_complete(vhost);
2135 if (rc == 0) 2143 if (rc == 0)
2136 ibmvfc_init_host(vhost); 2144 ibmvfc_init_host(vhost, 0);
2137 else 2145 else
2138 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); 2146 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2139 break; 2147 break;
2140 case IBMVFC_CRQ_INIT_COMPLETE: 2148 case IBMVFC_CRQ_INIT_COMPLETE:
2141 dev_info(vhost->dev, "Partner initialization complete\n"); 2149 dev_info(vhost->dev, "Partner initialization complete\n");
2142 ibmvfc_init_host(vhost); 2150 ibmvfc_init_host(vhost, 0);
2143 break; 2151 break;
2144 default: 2152 default:
2145 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); 2153 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
@@ -3357,8 +3365,6 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3357 mad->buffer.va = vhost->login_buf_dma; 3365 mad->buffer.va = vhost->login_buf_dma;
3358 mad->buffer.len = sizeof(*vhost->login_buf); 3366 mad->buffer.len = sizeof(*vhost->login_buf);
3359 3367
3360 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
3361 vhost->async_crq.cur = 0;
3362 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); 3368 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3363 3369
3364 if (!ibmvfc_send_event(evt, vhost, default_timeout)) 3370 if (!ibmvfc_send_event(evt, vhost, default_timeout))
@@ -3601,8 +3607,9 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3601 } 3607 }
3602 } 3608 }
3603 3609
3604 if (vhost->reinit) { 3610 if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
3605 vhost->reinit = 0; 3611 vhost->reinit = 0;
3612 scsi_block_requests(vhost->host);
3606 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); 3613 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3607 } else { 3614 } else {
3608 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 3615 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 4bf6e374f076..fb3177ab6691 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.1" 32#define IBMVFC_DRIVER_VERSION "1.0.2"
33#define IBMVFC_DRIVER_DATE "(July 11, 2008)" 33#define IBMVFC_DRIVER_DATE "(August 14, 2008)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 15 35#define IBMVFC_DEFAULT_TIMEOUT 15
36#define IBMVFC_INIT_TIMEOUT 30 36#define IBMVFC_INIT_TIMEOUT 30
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 6b24b9cdb04c..87e09f35d3d4 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
756 init_event_struct(evt_struct, 756 init_event_struct(evt_struct,
757 handle_cmd_rsp, 757 handle_cmd_rsp,
758 VIOSRP_SRP_FORMAT, 758 VIOSRP_SRP_FORMAT,
759 cmnd->timeout_per_command/HZ); 759 cmnd->request->timeout/HZ);
760 760
761 evt_struct->cmnd = cmnd; 761 evt_struct->cmnd = cmnd;
762 evt_struct->cmnd_done = done; 762 evt_struct->cmnd_done = done;
@@ -1636,7 +1636,7 @@ static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
1636 unsigned long desired_io = max_requests * sizeof(union viosrp_iu); 1636 unsigned long desired_io = max_requests * sizeof(union viosrp_iu);
1637 1637
1638 /* add io space for sg data */ 1638 /* add io space for sg data */
1639 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 1639 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
1640 IBMVSCSI_CMDS_PER_LUN_DEFAULT); 1640 IBMVSCSI_CMDS_PER_LUN_DEFAULT);
1641 1641
1642 return desired_io; 1642 return desired_io;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index b40a673985aa..90212ac33be3 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -40,7 +40,6 @@
40#include <linux/ioport.h> 40#include <linux/ioport.h>
41#include <linux/blkdev.h> 41#include <linux/blkdev.h>
42#include <linux/errno.h> 42#include <linux/errno.h>
43#include <linux/hdreg.h>
44#include <linux/slab.h> 43#include <linux/slab.h>
45#include <linux/ide.h> 44#include <linux/ide.h>
46#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
@@ -102,11 +101,10 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
102 mutex_lock(&idescsi_ref_mutex); 101 mutex_lock(&idescsi_ref_mutex);
103 scsi = ide_scsi_g(disk); 102 scsi = ide_scsi_g(disk);
104 if (scsi) { 103 if (scsi) {
105 scsi_host_get(scsi->host); 104 if (ide_device_get(scsi->drive))
106 if (ide_device_get(scsi->drive)) {
107 scsi_host_put(scsi->host);
108 scsi = NULL; 105 scsi = NULL;
109 } 106 else
107 scsi_host_get(scsi->host);
110 } 108 }
111 mutex_unlock(&idescsi_ref_mutex); 109 mutex_unlock(&idescsi_ref_mutex);
112 return scsi; 110 return scsi;
@@ -114,9 +112,11 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
114 112
115static void ide_scsi_put(struct ide_scsi_obj *scsi) 113static void ide_scsi_put(struct ide_scsi_obj *scsi)
116{ 114{
115 ide_drive_t *drive = scsi->drive;
116
117 mutex_lock(&idescsi_ref_mutex); 117 mutex_lock(&idescsi_ref_mutex);
118 ide_device_put(scsi->drive);
119 scsi_host_put(scsi->host); 118 scsi_host_put(scsi->host);
119 ide_device_put(drive);
120 mutex_unlock(&idescsi_ref_mutex); 120 mutex_unlock(&idescsi_ref_mutex);
121} 121}
122 122
@@ -130,50 +130,6 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
130 return scsihost_to_idescsi(ide_drive->driver_data); 130 return scsihost_to_idescsi(ide_drive->driver_data);
131} 131}
132 132
133/*
134 * PIO data transfer routine using the scatter gather table.
135 */
136static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
137 unsigned int bcount, int write)
138{
139 ide_hwif_t *hwif = drive->hwif;
140 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
141 xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
142 char *buf;
143 int count;
144
145 while (bcount) {
146 count = min(pc->sg->length - pc->b_count, bcount);
147 if (PageHighMem(sg_page(pc->sg))) {
148 unsigned long flags;
149
150 local_irq_save(flags);
151 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
152 pc->sg->offset;
153 xf(drive, NULL, buf + pc->b_count, count);
154 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
155 local_irq_restore(flags);
156 } else {
157 buf = sg_virt(pc->sg);
158 xf(drive, NULL, buf + pc->b_count, count);
159 }
160 bcount -= count; pc->b_count += count;
161 if (pc->b_count == pc->sg->length) {
162 if (!--pc->sg_cnt)
163 break;
164 pc->sg = sg_next(pc->sg);
165 pc->b_count = 0;
166 }
167 }
168
169 if (bcount) {
170 printk(KERN_ERR "%s: scatter gather table too small, %s\n",
171 drive->name, write ? "padding with zeros"
172 : "discarding data");
173 ide_pad_transfer(drive, write, bcount);
174 }
175}
176
177static void ide_scsi_hex_dump(u8 *data, int len) 133static void ide_scsi_hex_dump(u8 *data, int len)
178{ 134{
179 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0); 135 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0);
@@ -243,9 +199,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
243{ 199{
244 ide_hwif_t *hwif = drive->hwif; 200 ide_hwif_t *hwif = drive->hwif;
245 201
246 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT)) 202 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
247 /* force an abort */ 203 /* force an abort */
248 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE); 204 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
249 205
250 rq->errors++; 206 rq->errors++;
251 207
@@ -343,7 +299,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
343 299
344 return ide_pc_intr(drive, pc, idescsi_pc_intr, get_timeout(pc), 300 return ide_pc_intr(drive, pc, idescsi_pc_intr, get_timeout(pc),
345 idescsi_expiry, NULL, NULL, NULL, 301 idescsi_expiry, NULL, NULL, NULL,
346 ide_scsi_io_buffers); 302 ide_io_buffers);
347} 303}
348 304
349static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive) 305static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
@@ -429,21 +385,41 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
429} 385}
430 386
431#ifdef CONFIG_IDE_PROC_FS 387#ifdef CONFIG_IDE_PROC_FS
432static void idescsi_add_settings(ide_drive_t *drive) 388#define ide_scsi_devset_get(name, field) \
433{ 389static int get_##name(ide_drive_t *drive) \
434 idescsi_scsi_t *scsi = drive_to_idescsi(drive); 390{ \
435 391 idescsi_scsi_t *scsi = drive_to_idescsi(drive); \
436/* 392 return scsi->field; \
437 * drive setting name read/write data type min max mul_factor div_factor data pointer set function 393}
438 */ 394
439 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL); 395#define ide_scsi_devset_set(name, field) \
440 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); 396static int set_##name(ide_drive_t *drive, int arg) \
441 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); 397{ \
442 ide_add_setting(drive, "transform", SETTING_RW, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL); 398 idescsi_scsi_t *scsi = drive_to_idescsi(drive); \
443 ide_add_setting(drive, "log", SETTING_RW, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL); 399 scsi->field = arg; \
444} 400 return 0; \
445#else 401}
446static inline void idescsi_add_settings(ide_drive_t *drive) { ; } 402
403#define ide_scsi_devset_rw_field(_name, _field) \
404ide_scsi_devset_get(_name, _field); \
405ide_scsi_devset_set(_name, _field); \
406IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name);
407
408ide_devset_rw_field(bios_cyl, bios_cyl);
409ide_devset_rw_field(bios_head, bios_head);
410ide_devset_rw_field(bios_sect, bios_sect);
411
412ide_scsi_devset_rw_field(transform, transform);
413ide_scsi_devset_rw_field(log, log);
414
415static const struct ide_proc_devset idescsi_settings[] = {
416 IDE_PROC_DEVSET(bios_cyl, 0, 1023),
417 IDE_PROC_DEVSET(bios_head, 0, 255),
418 IDE_PROC_DEVSET(bios_sect, 0, 63),
419 IDE_PROC_DEVSET(log, 0, 1),
420 IDE_PROC_DEVSET(transform, 0, 3),
421 { 0 },
422};
447#endif 423#endif
448 424
449/* 425/*
@@ -451,7 +427,7 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
451 */ 427 */
452static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) 428static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
453{ 429{
454 if (drive->id && (drive->id->config & 0x0060) == 0x20) 430 if ((drive->id[ATA_ID_CONFIG] & 0x0060) == 0x20)
455 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags); 431 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
456 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); 432 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
457#if IDESCSI_DEBUG_LOG 433#if IDESCSI_DEBUG_LOG
@@ -460,7 +436,7 @@ static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
460 436
461 drive->pc_callback = ide_scsi_callback; 437 drive->pc_callback = ide_scsi_callback;
462 438
463 idescsi_add_settings(drive); 439 ide_proc_register_driver(drive, scsi->driver);
464} 440}
465 441
466static void ide_scsi_remove(ide_drive_t *drive) 442static void ide_scsi_remove(ide_drive_t *drive)
@@ -502,12 +478,12 @@ static ide_driver_t idescsi_driver = {
502 .remove = ide_scsi_remove, 478 .remove = ide_scsi_remove,
503 .version = IDESCSI_VERSION, 479 .version = IDESCSI_VERSION,
504 .media = ide_scsi, 480 .media = ide_scsi,
505 .supports_dsc_overlap = 0,
506 .do_request = idescsi_do_request, 481 .do_request = idescsi_do_request,
507 .end_request = idescsi_end_request, 482 .end_request = idescsi_end_request,
508 .error = idescsi_atapi_error, 483 .error = idescsi_atapi_error,
509#ifdef CONFIG_IDE_PROC_FS 484#ifdef CONFIG_IDE_PROC_FS
510 .proc = idescsi_proc, 485 .proc = idescsi_proc,
486 .settings = idescsi_settings,
511#endif 487#endif
512}; 488};
513 489
@@ -611,7 +587,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
611 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); 587 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
612 pc->scsi_cmd = cmd; 588 pc->scsi_cmd = cmd;
613 pc->done = done; 589 pc->done = done;
614 pc->timeout = jiffies + cmd->timeout_per_command; 590 pc->timeout = jiffies + cmd->request->timeout;
615 591
616 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 592 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
617 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 593 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -810,6 +786,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
810 struct gendisk *g; 786 struct gendisk *g;
811 static int warned; 787 static int warned;
812 int err = -ENOMEM; 788 int err = -ENOMEM;
789 u16 last_lun;
813 790
814 if (!warned && drive->media == ide_cdrom) { 791 if (!warned && drive->media == ide_cdrom) {
815 printk(KERN_WARNING "ide-scsi is deprecated for cd burning! Use ide-cd and give dev=/dev/hdX as device\n"); 792 printk(KERN_WARNING "ide-scsi is deprecated for cd burning! Use ide-cd and give dev=/dev/hdX as device\n");
@@ -820,7 +797,6 @@ static int ide_scsi_probe(ide_drive_t *drive)
820 return -ENODEV; 797 return -ENODEV;
821 798
822 if (!strstr("ide-scsi", drive->driver_req) || 799 if (!strstr("ide-scsi", drive->driver_req) ||
823 !drive->present ||
824 drive->media == ide_disk || 800 drive->media == ide_disk ||
825 !(host = scsi_host_alloc(&idescsi_template,sizeof(idescsi_scsi_t)))) 801 !(host = scsi_host_alloc(&idescsi_template,sizeof(idescsi_scsi_t))))
826 return -ENODEV; 802 return -ENODEV;
@@ -835,12 +811,12 @@ static int ide_scsi_probe(ide_drive_t *drive)
835 811
836 host->max_id = 1; 812 host->max_id = 1;
837 813
838 if (drive->id->last_lun) 814 last_lun = drive->id[ATA_ID_LAST_LUN];
839 debug_log("%s: id->last_lun=%u\n", drive->name, 815 if (last_lun)
840 drive->id->last_lun); 816 debug_log("%s: last_lun=%u\n", drive->name, last_lun);
841 817
842 if ((drive->id->last_lun & 0x7) != 7) 818 if ((last_lun & 7) != 7)
843 host->max_lun = (drive->id->last_lun & 0x7) + 1; 819 host->max_lun = (last_lun & 7) + 1;
844 else 820 else
845 host->max_lun = 1; 821 host->max_lun = 1;
846 822
@@ -851,7 +827,6 @@ static int ide_scsi_probe(ide_drive_t *drive)
851 idescsi->host = host; 827 idescsi->host = host;
852 idescsi->disk = g; 828 idescsi->disk = g;
853 g->private_data = &idescsi->driver; 829 g->private_data = &idescsi->driver;
854 ide_proc_register_driver(drive, &idescsi_driver);
855 err = 0; 830 err = 0;
856 idescsi_setup(drive, idescsi); 831 idescsi_setup(drive, idescsi);
857 g->fops = &idescsi_ops; 832 g->fops = &idescsi_ops;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e7a3a6554425..d30eb7ba018e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3670 sdev->no_uld_attach = 1; 3670 sdev->no_uld_attach = 1;
3671 } 3671 }
3672 if (ipr_is_vset_device(res)) { 3672 if (ipr_is_vset_device(res)) {
3673 sdev->timeout = IPR_VSET_RW_TIMEOUT; 3673 blk_queue_rq_timeout(sdev->request_queue,
3674 IPR_VSET_RW_TIMEOUT);
3674 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 3675 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3675 } 3676 }
3676 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3677 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 7c615c70ec5c..ef683f0d2b5a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -165,7 +165,6 @@
165#include <asm/byteorder.h> 165#include <asm/byteorder.h>
166#include <asm/page.h> 166#include <asm/page.h>
167#include <linux/stddef.h> 167#include <linux/stddef.h>
168#include <linux/version.h>
169#include <linux/string.h> 168#include <linux/string.h>
170#include <linux/errno.h> 169#include <linux/errno.h>
171#include <linux/kernel.h> 170#include <linux/kernel.h>
@@ -3819,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3819 scb->cmd.dcdb.segment_4G = 0; 3818 scb->cmd.dcdb.segment_4G = 0;
3820 scb->cmd.dcdb.enhanced_sg = 0; 3819 scb->cmd.dcdb.enhanced_sg = 0;
3821 3820
3822 TimeOut = scb->scsi_cmd->timeout_per_command; 3821 TimeOut = scb->scsi_cmd->request->timeout;
3823 3822
3824 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ 3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
3825 if (!scb->sg_len) { 3824 if (!scb->sg_len) {
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index e0657b6f009c..4e49fbcfe8af 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -50,7 +50,6 @@
50#ifndef _IPS_H_ 50#ifndef _IPS_H_
51 #define _IPS_H_ 51 #define _IPS_H_
52 52
53#include <linux/version.h>
54#include <linux/nmi.h> 53#include <linux/nmi.h>
55 #include <asm/uaccess.h> 54 #include <asm/uaccess.h>
56 #include <asm/io.h> 55 #include <asm/io.h>
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 299e075a7b34..da7b67d30d9a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1456,7 +1456,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1456 if (lun == task->sc->device->lun || lun == -1) { 1456 if (lun == task->sc->device->lun || lun == -1) {
1457 debug_scsi("failing in progress sc %p itt 0x%x\n", 1457 debug_scsi("failing in progress sc %p itt 0x%x\n",
1458 task->sc, task->itt); 1458 task->sc, task->itt);
1459 fail_command(conn, task, DID_BUS_BUSY << 16); 1459 fail_command(conn, task, error << 16);
1460 } 1460 }
1461 } 1461 }
1462} 1462}
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
1476 scsi_queue_work(conn->session->host, &conn->xmitwork); 1476 scsi_queue_work(conn->session->host, &conn->xmitwork);
1477} 1477}
1478 1478
1479static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1479static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1480{ 1480{
1481 struct iscsi_cls_session *cls_session; 1481 struct iscsi_cls_session *cls_session;
1482 struct iscsi_session *session; 1482 struct iscsi_session *session;
1483 struct iscsi_conn *conn; 1483 struct iscsi_conn *conn;
1484 enum scsi_eh_timer_return rc = EH_NOT_HANDLED; 1484 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1485 1485
1486 cls_session = starget_to_session(scsi_target(scmd->device)); 1486 cls_session = starget_to_session(scsi_target(scmd->device));
1487 session = cls_session->dd_data; 1487 session = cls_session->dd_data;
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1494 * We are probably in the middle of iscsi recovery so let 1494 * We are probably in the middle of iscsi recovery so let
1495 * that complete and handle the error. 1495 * that complete and handle the error.
1496 */ 1496 */
1497 rc = EH_RESET_TIMER; 1497 rc = BLK_EH_RESET_TIMER;
1498 goto done; 1498 goto done;
1499 } 1499 }
1500 1500
1501 conn = session->leadconn; 1501 conn = session->leadconn;
1502 if (!conn) { 1502 if (!conn) {
1503 /* In the middle of shuting down */ 1503 /* In the middle of shuting down */
1504 rc = EH_RESET_TIMER; 1504 rc = BLK_EH_RESET_TIMER;
1505 goto done; 1505 goto done;
1506 } 1506 }
1507 1507
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1513 */ 1513 */
1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1515 (conn->ping_timeout * HZ), jiffies)) 1515 (conn->ping_timeout * HZ), jiffies))
1516 rc = EH_RESET_TIMER; 1516 rc = BLK_EH_RESET_TIMER;
1517 /* 1517 /*
1518 * if we are about to check the transport then give the command 1518 * if we are about to check the transport then give the command
1519 * more time 1519 * more time
1520 */ 1520 */
1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1522 jiffies)) 1522 jiffies))
1523 rc = EH_RESET_TIMER; 1523 rc = BLK_EH_RESET_TIMER;
1524 /* if in the middle of checking the transport then give us more time */ 1524 /* if in the middle of checking the transport then give us more time */
1525 if (conn->ping_task) 1525 if (conn->ping_task)
1526 rc = EH_RESET_TIMER; 1526 rc = BLK_EH_RESET_TIMER;
1527done: 1527done:
1528 spin_unlock(&session->lock); 1528 spin_unlock(&session->lock);
1529 debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh"); 1529 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
1530 "timer reset" : "nh");
1530 return rc; 1531 return rc;
1531} 1532}
1532 1533
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 48ee8c7f5bdd..e15501170698 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -294,10 +294,10 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
294 } 294 }
295} 295}
296 296
297static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, 297static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
298 u32 val) 298 u32 val)
299{ 299{
300 struct domain_device *dev = ap->private_data; 300 struct domain_device *dev = link->ap->private_data;
301 301
302 SAS_DPRINTK("STUB %s\n", __func__); 302 SAS_DPRINTK("STUB %s\n", __func__);
303 switch (sc_reg_in) { 303 switch (sc_reg_in) {
@@ -319,10 +319,10 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
319 return 0; 319 return 0;
320} 320}
321 321
322static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in, 322static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
323 u32 *val) 323 u32 *val)
324{ 324{
325 struct domain_device *dev = ap->private_data; 325 struct domain_device *dev = link->ap->private_data;
326 326
327 SAS_DPRINTK("STUB %s\n", __func__); 327 SAS_DPRINTK("STUB %s\n", __func__);
328 switch (sc_reg_in) { 328 switch (sc_reg_in) {
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
398 398
399 /* Bounce SCSI-initiated commands to the SCSI EH */ 399 /* Bounce SCSI-initiated commands to the SCSI EH */
400 if (qc->scsicmd) { 400 if (qc->scsicmd) {
401 scsi_req_abort_cmd(qc->scsicmd); 401 blk_abort_request(qc->scsicmd->request);
402 scsi_schedule_eh(qc->scsicmd->device->host); 402 scsi_schedule_eh(qc->scsicmd->device->host);
403 return; 403 return;
404 } 404 }
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b4f9368f116a..0001374bd6b2 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
55int sas_register_ports(struct sas_ha_struct *sas_ha); 55int sas_register_ports(struct sas_ha_struct *sas_ha);
56void sas_unregister_ports(struct sas_ha_struct *sas_ha); 56void sas_unregister_ports(struct sas_ha_struct *sas_ha);
57 57
58enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 58enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
59 59
60int sas_init_queue(struct sas_ha_struct *sas_ha); 60int sas_init_queue(struct sas_ha_struct *sas_ha);
61int sas_init_events(struct sas_ha_struct *sas_ha); 61int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a8e3ef309070..744838780ada 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
673 return; 673 return;
674} 674}
675 675
676enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 676enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
677{ 677{
678 struct sas_task *task = TO_SAS_TASK(cmd); 678 struct sas_task *task = TO_SAS_TASK(cmd);
679 unsigned long flags; 679 unsigned long flags;
680 680
681 if (!task) { 681 if (!task) {
682 cmd->timeout_per_command /= 2; 682 cmd->request->timeout /= 2;
683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", 683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
684 cmd, task, (cmd->timeout_per_command ? 684 cmd, task, (cmd->request->timeout ?
685 "EH_RESET_TIMER" : "EH_NOT_HANDLED")); 685 "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
686 if (!cmd->timeout_per_command) 686 if (!cmd->request->timeout)
687 return EH_NOT_HANDLED; 687 return BLK_EH_NOT_HANDLED;
688 return EH_RESET_TIMER; 688 return BLK_EH_RESET_TIMER;
689 } 689 }
690 690
691 spin_lock_irqsave(&task->task_state_lock, flags); 691 spin_lock_irqsave(&task->task_state_lock, flags);
692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); 692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
693 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 693 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
694 spin_unlock_irqrestore(&task->task_state_lock, flags); 694 spin_unlock_irqrestore(&task->task_state_lock, flags);
695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
696 cmd, task); 696 "BLK_EH_HANDLED\n", cmd, task);
697 return EH_HANDLED; 697 return BLK_EH_HANDLED;
698 } 698 }
699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { 699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
700 spin_unlock_irqrestore(&task->task_state_lock, flags); 700 spin_unlock_irqrestore(&task->task_state_lock, flags);
701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " 701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
702 "EH_RESET_TIMER\n", 702 "BLK_EH_RESET_TIMER\n",
703 cmd, task); 703 cmd, task);
704 return EH_RESET_TIMER; 704 return BLK_EH_RESET_TIMER;
705 } 705 }
706 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 706 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
707 spin_unlock_irqrestore(&task->task_state_lock, flags); 707 spin_unlock_irqrestore(&task->task_state_lock, flags);
708 708
709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", 709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
710 cmd, task); 710 cmd, task);
711 711
712 return EH_NOT_HANDLED; 712 return BLK_EH_NOT_HANDLED;
713} 713}
714 714
715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
1039 return; 1039 return;
1040 } 1040 }
1041 1041
1042 scsi_req_abort_cmd(sc); 1042 blk_abort_request(sc->request);
1043 scsi_schedule_eh(sc->device->host); 1043 scsi_schedule_eh(sc->device->host);
1044} 1044}
1045 1045
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 90272e65957a..094b47e94b29 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -27,7 +27,6 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/version.h>
31 30
32#include <scsi/scsi.h> 31#include <scsi/scsi.h>
33#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index fc7ac158476c..afe1de998763 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.03.20-rc1 13 * Version : v00.00.04.01-rc1
14 * 14 *
15 * Authors: 15 * Authors:
16 * (email-id : megaraidlinux@lsi.com) 16 * (email-id : megaraidlinux@lsi.com)
@@ -71,6 +71,10 @@ static struct pci_device_id megasas_pci_table[] = {
71 /* ppc IOP */ 71 /* ppc IOP */
72 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 72 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
73 /* ppc IOP */ 73 /* ppc IOP */
74 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
75 /* gen2*/
76 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
77 /* gen2*/
74 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 78 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
75 /* xscale IOP, vega */ 79 /* xscale IOP, vega */
76 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 80 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
@@ -198,6 +202,9 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
198 */ 202 */
199 writel(status, &regs->outbound_intr_status); 203 writel(status, &regs->outbound_intr_status);
200 204
205 /* Dummy readl to force pci flush */
206 readl(&regs->outbound_intr_status);
207
201 return 0; 208 return 0;
202} 209}
203 210
@@ -293,6 +300,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
293 */ 300 */
294 writel(status, &regs->outbound_doorbell_clear); 301 writel(status, &regs->outbound_doorbell_clear);
295 302
303 /* Dummy readl to force pci flush */
304 readl(&regs->outbound_doorbell_clear);
305
296 return 0; 306 return 0;
297} 307}
298/** 308/**
@@ -318,6 +328,99 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
318}; 328};
319 329
320/** 330/**
331* The following functions are defined for gen2 (deviceid : 0x78 0x79)
332* controllers
333*/
334
335/**
336 * megasas_enable_intr_gen2 - Enables interrupts
337 * @regs: MFI register set
338 */
339static inline void
340megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs)
341{
342 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
343
344 /* write ~0x00000005 (4 & 1) to the intr mask*/
345 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
346
347 /* Dummy readl to force pci flush */
348 readl(&regs->outbound_intr_mask);
349}
350
351/**
352 * megasas_disable_intr_gen2 - Disables interrupt
353 * @regs: MFI register set
354 */
355static inline void
356megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs)
357{
358 u32 mask = 0xFFFFFFFF;
359 writel(mask, &regs->outbound_intr_mask);
360 /* Dummy readl to force pci flush */
361 readl(&regs->outbound_intr_mask);
362}
363
364/**
365 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
366 * @regs: MFI register set
367 */
368static u32
369megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
370{
371 return readl(&(regs)->outbound_scratch_pad);
372}
373
374/**
375 * megasas_clear_interrupt_gen2 - Check & clear interrupt
376 * @regs: MFI register set
377 */
378static int
379megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
380{
381 u32 status;
382 /*
383 * Check if it is our interrupt
384 */
385 status = readl(&regs->outbound_intr_status);
386
387 if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK))
388 return 1;
389
390 /*
391 * Clear the interrupt by writing back the same value
392 */
393 writel(status, &regs->outbound_doorbell_clear);
394
395 /* Dummy readl to force pci flush */
396 readl(&regs->outbound_intr_status);
397
398 return 0;
399}
400/**
401 * megasas_fire_cmd_gen2 - Sends command to the FW
402 * @frame_phys_addr : Physical address of cmd
403 * @frame_count : Number of frames for the command
404 * @regs : MFI register set
405 */
406static inline void
407megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count,
408 struct megasas_register_set __iomem *regs)
409{
410 writel((frame_phys_addr | (frame_count<<1))|1,
411 &(regs)->inbound_queue_port);
412}
413
414static struct megasas_instance_template megasas_instance_template_gen2 = {
415
416 .fire_cmd = megasas_fire_cmd_gen2,
417 .enable_intr = megasas_enable_intr_gen2,
418 .disable_intr = megasas_disable_intr_gen2,
419 .clear_intr = megasas_clear_intr_gen2,
420 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
421};
422
423/**
321* This is the end of set of functions & definitions 424* This is the end of set of functions & definitions
322* specific to ppc (deviceid : 0x60) controllers 425* specific to ppc (deviceid : 0x60) controllers
323*/ 426*/
@@ -1064,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1064 * cmd has not been completed within the timeout period. 1167 * cmd has not been completed within the timeout period.
1065 */ 1168 */
1066static enum 1169static enum
1067scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 1170blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1068{ 1171{
1069 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; 1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
1070 struct megasas_instance *instance; 1173 struct megasas_instance *instance;
@@ -1072,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1072 1175
1073 if (time_after(jiffies, scmd->jiffies_at_alloc + 1176 if (time_after(jiffies, scmd->jiffies_at_alloc +
1074 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { 1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
1075 return EH_NOT_HANDLED; 1178 return BLK_EH_NOT_HANDLED;
1076 } 1179 }
1077 1180
1078 instance = cmd->instance; 1181 instance = cmd->instance;
@@ -1086,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1086 1189
1087 spin_unlock_irqrestore(instance->host->host_lock, flags); 1190 spin_unlock_irqrestore(instance->host->host_lock, flags);
1088 } 1191 }
1089 return EH_RESET_TIMER; 1192 return BLK_EH_RESET_TIMER;
1090} 1193}
1091 1194
1092/** 1195/**
@@ -1976,7 +2079,12 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1976 /* 2079 /*
1977 * Map the message registers 2080 * Map the message registers
1978 */ 2081 */
1979 instance->base_addr = pci_resource_start(instance->pdev, 0); 2082 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
2083 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) {
2084 instance->base_addr = pci_resource_start(instance->pdev, 1);
2085 } else {
2086 instance->base_addr = pci_resource_start(instance->pdev, 0);
2087 }
1980 2088
1981 if (pci_request_regions(instance->pdev, "megasas: LSI")) { 2089 if (pci_request_regions(instance->pdev, "megasas: LSI")) {
1982 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); 2090 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
@@ -1998,6 +2106,10 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1998 case PCI_DEVICE_ID_LSI_SAS1078DE: 2106 case PCI_DEVICE_ID_LSI_SAS1078DE:
1999 instance->instancet = &megasas_instance_template_ppc; 2107 instance->instancet = &megasas_instance_template_ppc;
2000 break; 2108 break;
2109 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
2110 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
2111 instance->instancet = &megasas_instance_template_gen2;
2112 break;
2001 case PCI_DEVICE_ID_LSI_SAS1064R: 2113 case PCI_DEVICE_ID_LSI_SAS1064R:
2002 case PCI_DEVICE_ID_DELL_PERC5: 2114 case PCI_DEVICE_ID_DELL_PERC5:
2003 default: 2115 default:
@@ -2857,6 +2969,7 @@ static void megasas_shutdown(struct pci_dev *pdev)
2857{ 2969{
2858 struct megasas_instance *instance = pci_get_drvdata(pdev); 2970 struct megasas_instance *instance = pci_get_drvdata(pdev);
2859 megasas_flush_cache(instance); 2971 megasas_flush_cache(instance);
2972 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
2860} 2973}
2861 2974
2862/** 2975/**
@@ -3292,7 +3405,7 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
3292 return retval; 3405 return retval;
3293} 3406}
3294 3407
3295static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl, 3408static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
3296 megasas_sysfs_set_dbg_lvl); 3409 megasas_sysfs_set_dbg_lvl);
3297 3410
3298static ssize_t 3411static ssize_t
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index b0c41e671702..0d033248fdf1 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/* 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.03.20-rc1" 21#define MEGASAS_VERSION "00.00.04.01"
22#define MEGASAS_RELDATE "March 10, 2008" 22#define MEGASAS_RELDATE "July 24, 2008"
23#define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008" 23#define MEGASAS_EXT_VERSION "Thu July 24 11:41:51 PST 2008"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
@@ -28,6 +28,8 @@
28#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060 28#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060
29#define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C 29#define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C
30#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413 30#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
31#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078
32#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079
31 33
32/* 34/*
33 * ===================================== 35 * =====================================
@@ -580,6 +582,8 @@ struct megasas_ctrl_info {
580#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) 582#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
581 583
582#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 584#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
585#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
586#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
583 587
584/* 588/*
585* register set for both 1068 and 1078 controllers 589* register set for both 1068 and 1078 controllers
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c57c94c0ffd2..3b7240e40819 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
4170 ** 4170 **
4171 **---------------------------------------------------- 4171 **----------------------------------------------------
4172 */ 4172 */
4173 if (np->settle_time && cmd->timeout_per_command >= HZ) { 4173 if (np->settle_time && cmd->request->timeout >= HZ) {
4174 u_long tlimit = jiffies + cmd->timeout_per_command - HZ; 4174 u_long tlimit = jiffies + cmd->request->timeout - HZ;
4175 if (time_after(np->settle_time, tlimit)) 4175 if (time_after(np->settle_time, tlimit))
4176 np->settle_time = tlimit; 4176 np->settle_time = tlimit;
4177 } 4177 }
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index edf9fdb3cb3c..22052bb7becb 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -23,7 +23,6 @@
23 * 1.2: PowerPC (big endian) support. 23 * 1.2: PowerPC (big endian) support.
24 */ 24 */
25 25
26#include <linux/version.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h
index 6715ecb3bfca..9565acf1aa72 100644
--- a/drivers/scsi/nsp32.h
+++ b/drivers/scsi/nsp32.h
@@ -16,7 +16,6 @@
16#ifndef _NSP32_H 16#ifndef _NSP32_H
17#define _NSP32_H 17#define _NSP32_H
18 18
19#include <linux/version.h>
20//#define NSP32_DEBUG 9 19//#define NSP32_DEBUG 9
21 20
22/* 21/*
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 7c19bf264873..11a61ea8d5d9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -25,7 +25,6 @@
25 25
26***********************************************************************/ 26***********************************************************************/
27 27
28#include <linux/version.h>
29#include <linux/module.h> 28#include <linux/module.h>
30#include <linux/kernel.h> 29#include <linux/kernel.h>
31#include <linux/init.h> 30#include <linux/init.h>
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 37f9ba0cd798..b6cd12b2e996 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2846 2846
2847 /* Set ISP command timeout. */ 2847 /* Set ISP command timeout. */
2848 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 2848 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2849 2849
2850 /* Set device target ID and LUN */ 2850 /* Set device target ID and LUN */
2851 pkt->lun = SCSI_LUN_32(cmd); 2851 pkt->lun = SCSI_LUN_32(cmd);
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3115 3115
3116 /* Set ISP command timeout. */ 3116 /* Set ISP command timeout. */
3117 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 3117 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3118 3118
3119 /* Set device target ID and LUN */ 3119 /* Set device target ID and LUN */
3120 pkt->lun = SCSI_LUN_32(cmd); 3120 pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index a319a20ed440..0ddfe7106b3b 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
292 valid = 0; 292 valid = 0;
293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
294 valid = 1; 294 valid = 1;
295 else if (start == (FA_BOOT_CODE_ADDR*4) || 295 else if (start == (ha->flt_region_boot * 4) ||
296 start == (FA_RISC_CODE_ADDR*4)) 296 start == (ha->flt_region_fw * 4))
297 valid = 1; 297 valid = 1;
298 else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4)) 298 else if (IS_QLA25XX(ha) &&
299 start == (ha->flt_region_vpd_nvram * 4))
299 valid = 1; 300 valid = 1;
300 if (!valid) { 301 if (!valid) {
301 qla_printk(KERN_WARNING, ha, 302 qla_printk(KERN_WARNING, ha,
@@ -993,6 +994,17 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
993{ 994{
994 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 995 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
995 996
997 /*
998 * At this point all fcport's software-states are cleared. Perform any
999 * final cleanup of firmware resources (PCBs and XCBs).
1000 */
1001 if (fcport->loop_id != FC_NO_LOOP_ID) {
1002 fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id,
1003 fcport->d_id.b.domain, fcport->d_id.b.area,
1004 fcport->d_id.b.al_pa);
1005 fcport->loop_id = FC_NO_LOOP_ID;
1006 }
1007
996 qla2x00_abort_fcport_cmds(fcport); 1008 qla2x00_abort_fcport_cmds(fcport);
997 scsi_target_unblock(&rport->dev); 1009 scsi_target_unblock(&rport->dev);
998} 1010}
@@ -1054,6 +1066,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1054 pfc_host_stat->dumped_frames = stats->dumped_frames; 1066 pfc_host_stat->dumped_frames = stats->dumped_frames;
1055 pfc_host_stat->nos_count = stats->nos_rcvd; 1067 pfc_host_stat->nos_count = stats->nos_rcvd;
1056 } 1068 }
1069 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1070 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1057 1071
1058done_free: 1072done_free:
1059 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1073 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6da31ba94404..83c819216771 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,7 +25,6 @@
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/semaphore.h>
29 28
30#include <scsi/scsi.h> 29#include <scsi/scsi.h>
31#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx {
2157 2156
2158struct qla_statistics { 2157struct qla_statistics {
2159 uint32_t total_isp_aborts; 2158 uint32_t total_isp_aborts;
2159 uint64_t input_bytes;
2160 uint64_t output_bytes;
2160}; 2161};
2161 2162
2162/* 2163/*
@@ -2237,6 +2238,8 @@ typedef struct scsi_qla_host {
2237#define REGISTER_FDMI_NEEDED 26 2238#define REGISTER_FDMI_NEEDED 26
2238#define FCPORT_UPDATE_NEEDED 27 2239#define FCPORT_UPDATE_NEEDED 27
2239#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ 2240#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2241#define UNLOADING 29
2242#define NPIV_CONFIG_NEEDED 30
2240 2243
2241 uint32_t device_flags; 2244 uint32_t device_flags;
2242#define DFLG_LOCAL_DEVICES BIT_0 2245#define DFLG_LOCAL_DEVICES BIT_0
@@ -2506,7 +2509,6 @@ typedef struct scsi_qla_host {
2506 uint64_t fce_wr, fce_rd; 2509 uint64_t fce_wr, fce_rd;
2507 struct mutex fce_mutex; 2510 struct mutex fce_mutex;
2508 2511
2509 uint32_t hw_event_start;
2510 uint32_t hw_event_ptr; 2512 uint32_t hw_event_ptr;
2511 uint32_t hw_event_pause_errors; 2513 uint32_t hw_event_pause_errors;
2512 2514
@@ -2552,6 +2554,14 @@ typedef struct scsi_qla_host {
2552 uint32_t fdt_unprotect_sec_cmd; 2554 uint32_t fdt_unprotect_sec_cmd;
2553 uint32_t fdt_protect_sec_cmd; 2555 uint32_t fdt_protect_sec_cmd;
2554 2556
2557 uint32_t flt_region_flt;
2558 uint32_t flt_region_fdt;
2559 uint32_t flt_region_boot;
2560 uint32_t flt_region_fw;
2561 uint32_t flt_region_vpd_nvram;
2562 uint32_t flt_region_hw_event;
2563 uint32_t flt_region_npiv_conf;
2564
2555 /* Needed for BEACON */ 2565 /* Needed for BEACON */
2556 uint16_t beacon_blink_led; 2566 uint16_t beacon_blink_led;
2557 uint8_t beacon_color_state; 2567 uint8_t beacon_color_state;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index cf194517400d..d1d14202575a 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -789,14 +789,23 @@ struct device_reg_24xx {
789#define FA_RISC_CODE_ADDR 0x20000 789#define FA_RISC_CODE_ADDR 0x20000
790#define FA_RISC_CODE_SEGMENTS 2 790#define FA_RISC_CODE_SEGMENTS 2
791 791
792#define FA_FLASH_DESCR_ADDR_24 0x11000
793#define FA_FLASH_LAYOUT_ADDR_24 0x11400
794#define FA_NPIV_CONF0_ADDR_24 0x16000
795#define FA_NPIV_CONF1_ADDR_24 0x17000
796
792#define FA_FW_AREA_ADDR 0x40000 797#define FA_FW_AREA_ADDR 0x40000
793#define FA_VPD_NVRAM_ADDR 0x48000 798#define FA_VPD_NVRAM_ADDR 0x48000
794#define FA_FEATURE_ADDR 0x4C000 799#define FA_FEATURE_ADDR 0x4C000
795#define FA_FLASH_DESCR_ADDR 0x50000 800#define FA_FLASH_DESCR_ADDR 0x50000
801#define FA_FLASH_LAYOUT_ADDR 0x50400
796#define FA_HW_EVENT0_ADDR 0x54000 802#define FA_HW_EVENT0_ADDR 0x54000
797#define FA_HW_EVENT1_ADDR 0x54200 803#define FA_HW_EVENT1_ADDR 0x54400
798#define FA_HW_EVENT_SIZE 0x200 804#define FA_HW_EVENT_SIZE 0x200
799#define FA_HW_EVENT_ENTRY_SIZE 4 805#define FA_HW_EVENT_ENTRY_SIZE 4
806#define FA_NPIV_CONF0_ADDR 0x5C000
807#define FA_NPIV_CONF1_ADDR 0x5D000
808
800/* 809/*
801 * Flash Error Log Event Codes. 810 * Flash Error Log Event Codes.
802 */ 811 */
@@ -806,10 +815,6 @@ struct device_reg_24xx {
806#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 815#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
807#define HW_EVENT_FLASH_FW_ERR 0xF024 816#define HW_EVENT_FLASH_FW_ERR 0xF024
808 817
809#define FA_BOOT_LOG_ADDR 0x58000
810#define FA_FW_DUMP0_ADDR 0x60000
811#define FA_FW_DUMP1_ADDR 0x70000
812
813 uint32_t flash_data; /* Flash/NVRAM BIOS data. */ 818 uint32_t flash_data; /* Flash/NVRAM BIOS data. */
814 819
815 uint32_t ctrl_status; /* Control/Status. */ 820 uint32_t ctrl_status; /* Control/Status. */
@@ -1203,6 +1208,62 @@ struct qla_fdt_layout {
1203 uint8_t unused2[65]; 1208 uint8_t unused2[65];
1204}; 1209};
1205 1210
1211/* Flash Layout Table ********************************************************/
1212
1213struct qla_flt_location {
1214 uint8_t sig[4];
1215 uint32_t start_lo;
1216 uint32_t start_hi;
1217 uint16_t unused;
1218 uint16_t checksum;
1219};
1220
1221struct qla_flt_header {
1222 uint16_t version;
1223 uint16_t length;
1224 uint16_t checksum;
1225 uint16_t unused;
1226};
1227
1228#define FLT_REG_FW 0x01
1229#define FLT_REG_BOOT_CODE 0x07
1230#define FLT_REG_VPD_0 0x14
1231#define FLT_REG_NVRAM_0 0x15
1232#define FLT_REG_VPD_1 0x16
1233#define FLT_REG_NVRAM_1 0x17
1234#define FLT_REG_FDT 0x1a
1235#define FLT_REG_FLT 0x1c
1236#define FLT_REG_HW_EVENT_0 0x1d
1237#define FLT_REG_HW_EVENT_1 0x1f
1238#define FLT_REG_NPIV_CONF_0 0x29
1239#define FLT_REG_NPIV_CONF_1 0x2a
1240
1241struct qla_flt_region {
1242 uint32_t code;
1243 uint32_t size;
1244 uint32_t start;
1245 uint32_t end;
1246};
1247
1248/* Flash NPIV Configuration Table ********************************************/
1249
1250struct qla_npiv_header {
1251 uint8_t sig[2];
1252 uint16_t version;
1253 uint16_t entries;
1254 uint16_t unused[4];
1255 uint16_t checksum;
1256};
1257
1258struct qla_npiv_entry {
1259 uint16_t flags;
1260 uint16_t vf_id;
1261 uint16_t qos;
1262 uint16_t unused1;
1263 uint8_t port_name[WWN_SIZE];
1264 uint8_t node_name[WWN_SIZE];
1265};
1266
1206/* 84XX Support **************************************************************/ 1267/* 84XX Support **************************************************************/
1207 1268
1208#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ 1269#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b156735e9a6..753dbe6cce6e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t, 313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
314 uint16_t, uint16_t); 314 uint16_t, uint16_t);
315 315
316extern void qla2xxx_get_flash_info(scsi_qla_host_t *); 316extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
318 318
319extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
320
319/* 321/*
320 * Global Function Prototypes in qla_dbg.c source file. 322 * Global Function Prototypes in qla_dbg.c source file.
321 */ 323 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 601a6b29750c..a470f2d3270d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
83 83
84 ha->isp_ops->reset_chip(ha); 84 ha->isp_ops->reset_chip(ha);
85 85
86 rval = qla2xxx_get_flash_info(ha);
87 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no));
90 return (rval);
91 }
92
86 ha->isp_ops->get_flash_version(ha, ha->request_ring); 93 ha->isp_ops->get_flash_version(ha, ha->request_ring);
87 94
88 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
@@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
109 rval = qla2x00_setup_chip(ha); 116 rval = qla2x00_setup_chip(ha);
110 if (rval) 117 if (rval)
111 return (rval); 118 return (rval);
112 qla2xxx_get_flash_info(ha);
113 } 119 }
114 if (IS_QLA84XX(ha)) { 120 if (IS_QLA84XX(ha)) {
115 ha->cs84xx = qla84xx_get_chip(ha); 121 ha->cs84xx = qla84xx_get_chip(ha);
@@ -976,8 +982,9 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
976 &ha->fw_attributes, &ha->fw_memory_size); 982 &ha->fw_attributes, &ha->fw_memory_size);
977 qla2x00_resize_request_q(ha); 983 qla2x00_resize_request_q(ha);
978 ha->flags.npiv_supported = 0; 984 ha->flags.npiv_supported = 0;
979 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha)) && 985 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) ||
980 (ha->fw_attributes & BIT_2)) { 986 IS_QLA84XX(ha)) &&
987 (ha->fw_attributes & BIT_2)) {
981 ha->flags.npiv_supported = 1; 988 ha->flags.npiv_supported = 1;
982 if ((!ha->max_npiv_vports) || 989 if ((!ha->max_npiv_vports) ||
983 ((ha->max_npiv_vports + 1) % 990 ((ha->max_npiv_vports + 1) %
@@ -2015,7 +2022,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2015 DEBUG3(printk("%s: exiting normally\n", __func__)); 2022 DEBUG3(printk("%s: exiting normally\n", __func__));
2016 } 2023 }
2017 2024
2018 /* Restore state if a resync event occured during processing */ 2025 /* Restore state if a resync event occurred during processing */
2019 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2026 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
2020 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2027 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2021 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2028 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
@@ -2560,7 +2567,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2560 rval = QLA_SUCCESS; 2567 rval = QLA_SUCCESS;
2561 2568
2562 /* Try GID_PT to get device list, else GAN. */ 2569 /* Try GID_PT to get device list, else GAN. */
2563 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC); 2570 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
2564 if (!swl) { 2571 if (!swl) {
2565 /*EMPTY*/ 2572 /*EMPTY*/
2566 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2573 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
@@ -3251,6 +3258,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3251{ 3258{
3252 int rval; 3259 int rval;
3253 uint8_t status = 0; 3260 uint8_t status = 0;
3261 scsi_qla_host_t *vha;
3254 3262
3255 if (ha->flags.online) { 3263 if (ha->flags.online) {
3256 ha->flags.online = 0; 3264 ha->flags.online = 0;
@@ -3265,6 +3273,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3265 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3273 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
3266 atomic_set(&ha->loop_state, LOOP_DOWN); 3274 atomic_set(&ha->loop_state, LOOP_DOWN);
3267 qla2x00_mark_all_devices_lost(ha, 0); 3275 qla2x00_mark_all_devices_lost(ha, 0);
3276 list_for_each_entry(vha, &ha->vp_list, vp_list)
3277 qla2x00_mark_all_devices_lost(vha, 0);
3268 } else { 3278 } else {
3269 if (!atomic_read(&ha->loop_down_timer)) 3279 if (!atomic_read(&ha->loop_down_timer))
3270 atomic_set(&ha->loop_down_timer, 3280 atomic_set(&ha->loop_down_timer,
@@ -3747,7 +3757,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3747 rval = QLA_SUCCESS; 3757 rval = QLA_SUCCESS;
3748 3758
3749 segments = FA_RISC_CODE_SEGMENTS; 3759 segments = FA_RISC_CODE_SEGMENTS;
3750 faddr = FA_RISC_CODE_ADDR; 3760 faddr = ha->flt_region_fw;
3751 dcode = (uint32_t *)ha->request_ring; 3761 dcode = (uint32_t *)ha->request_ring;
3752 *srisc_addr = 0; 3762 *srisc_addr = 0;
3753 3763
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 92fafbdbbaab..e90afad120ee 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha)
52 * @ha: HA context 52 * @ha: HA context
53 * @ha_locked: is function called with the hardware lock 53 * @ha_locked: is function called with the hardware lock
54 * 54 *
55 * Returns non-zero if a failure occured, else zero. 55 * Returns non-zero if a failure occurred, else zero.
56 */ 56 */
57static inline int 57static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) 58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d57669aa4615..85bc0a48598b 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
21 * Returns the proper CF_* direction based on CDB. 21 * Returns the proper CF_* direction based on CDB.
22 */ 22 */
23static inline uint16_t 23static inline uint16_t
24qla2x00_get_cmd_direction(struct scsi_cmnd *cmd) 24qla2x00_get_cmd_direction(srb_t *sp)
25{ 25{
26 uint16_t cflags; 26 uint16_t cflags;
27 27
28 cflags = 0; 28 cflags = 0;
29 29
30 /* Set transfer direction */ 30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) 31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 32 cflags = CF_WRITE;
33 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 33 sp->fcport->ha->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
34 cflags = CF_READ; 36 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd);
39 }
35 return (cflags); 40 return (cflags);
36} 41}
37 42
@@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
169 174
170 ha = sp->ha; 175 ha = sp->ha;
171 176
172 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
173 178
174 /* Three DSDs are available in the Command Type 2 IOCB */ 179 /* Three DSDs are available in the Command Type 2 IOCB */
175 avail_dsds = 3; 180 avail_dsds = 3;
@@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
228 233
229 ha = sp->ha; 234 ha = sp->ha;
230 235
231 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
232 237
233 /* Two DSDs are available in the Command Type 3 IOCB */ 238 /* Two DSDs are available in the Command Type 3 IOCB */
234 avail_dsds = 2; 239 avail_dsds = 2;
@@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
262 * qla2x00_start_scsi() - Send a SCSI command to the ISP 267 * qla2x00_start_scsi() - Send a SCSI command to the ISP
263 * @sp: command to send to the ISP 268 * @sp: command to send to the ISP
264 * 269 *
265 * Returns non-zero if a failure occured, else zero. 270 * Returns non-zero if a failure occurred, else zero.
266 */ 271 */
267int 272int
268qla2x00_start_scsi(srb_t *sp) 273qla2x00_start_scsi(srb_t *sp)
@@ -407,7 +412,7 @@ queuing_error:
407 * 412 *
408 * Can be called from both normal and interrupt context. 413 * Can be called from both normal and interrupt context.
409 * 414 *
410 * Returns non-zero if a failure occured, else zero. 415 * Returns non-zero if a failure occurred, else zero.
411 */ 416 */
412int 417int
413__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
@@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
625 ha = sp->ha; 630 ha = sp->ha;
626 631
627 /* Set transfer direction */ 632 /* Set transfer direction */
628 if (cmd->sc_data_direction == DMA_TO_DEVICE) 633 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
629 cmd_pkt->task_mgmt_flags = 634 cmd_pkt->task_mgmt_flags =
630 __constant_cpu_to_le16(TMF_WRITE_DATA); 635 __constant_cpu_to_le16(TMF_WRITE_DATA);
631 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 636 sp->fcport->ha->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
632 cmd_pkt->task_mgmt_flags = 639 cmd_pkt->task_mgmt_flags =
633 __constant_cpu_to_le16(TMF_READ_DATA); 640 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd);
643 }
634 644
635 /* One DSD is available in the Command Type 3 IOCB */ 645 /* One DSD is available in the Command Type 3 IOCB */
636 avail_dsds = 1; 646 avail_dsds = 1;
@@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
666 * qla24xx_start_scsi() - Send a SCSI command to the ISP 676 * qla24xx_start_scsi() - Send a SCSI command to the ISP
667 * @sp: command to send to the ISP 677 * @sp: command to send to the ISP
668 * 678 *
669 * Returns non-zero if a failure occured, else zero. 679 * Returns non-zero if a failure occurred, else zero.
670 */ 680 */
671int 681int
672qla24xx_start_scsi(srb_t *sp) 682qla24xx_start_scsi(srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 874d802edb7d..fc4bfa7f839c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
391 break; 391 break;
392 392
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, 394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
395 mb[1])); 395 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); 396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 397
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 399 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 461 ha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 462 qla_printk(KERN_INFO, ha,
463 "LIP reset occured (%x).\n", mb[1]); 463 "LIP reset occurred (%x).\n", mb[1]);
464 464
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 466 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
543 543
544 case MBA_PORT_UPDATE: /* Port database update */ 544 case MBA_PORT_UPDATE: /* Port database update */
545 /* 545 /*
546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process 547 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 548 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 549 */
@@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 590 ha->host_no, mb[1], mb[2], mb[3]));
591 591
592 rscn_entry = (mb[1] << 16) | mb[2]; 592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
594 ha->d_id.b.al_pa; 594 ha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 595 if (rscn_entry == host_pid) {
@@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
600 break; 600 break;
601 } 601 }
602 602
603 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
603 rscn_queue_index = ha->rscn_in_ptr + 1; 605 rscn_queue_index = ha->rscn_in_ptr + 1;
604 if (rscn_queue_index == MAX_RSCN_COUNT) 606 if (rscn_queue_index == MAX_RSCN_COUNT)
605 rscn_queue_index = 0; 607 rscn_queue_index = 0;
@@ -879,11 +881,12 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
879 sp->request_sense_ptr += sense_len; 881 sp->request_sense_ptr += sense_len;
880 sp->request_sense_length -= sense_len; 882 sp->request_sense_length -= sense_len;
881 if (sp->request_sense_length != 0) 883 if (sp->request_sense_length != 0)
882 sp->ha->status_srb = sp; 884 sp->fcport->ha->status_srb = sp;
883 885
884 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 886 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
885 "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel, 887 "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no,
886 cp->device->id, cp->device->lun, cp, cp->serial_number)); 888 cp->device->channel, cp->device->id, cp->device->lun, cp,
889 cp->serial_number));
887 if (sense_len) 890 if (sense_len)
888 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, 891 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
889 CMD_ACTUAL_SNSLEN(cp))); 892 CMD_ACTUAL_SNSLEN(cp)));
@@ -1059,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1059 resid = resid_len; 1062 resid = resid_len;
1060 /* Use F/W calculated residual length. */ 1063 /* Use F/W calculated residual length. */
1061 if (IS_FWI2_CAPABLE(ha)) { 1064 if (IS_FWI2_CAPABLE(ha)) {
1062 if (scsi_status & SS_RESIDUAL_UNDER && 1065 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1063 resid != fw_resid_len) { 1066 lscsi_status = 0;
1067 } else if (resid != fw_resid_len) {
1064 scsi_status &= ~SS_RESIDUAL_UNDER; 1068 scsi_status &= ~SS_RESIDUAL_UNDER;
1065 lscsi_status = 0; 1069 lscsi_status = 0;
1066 } 1070 }
@@ -1184,9 +1188,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1184 atomic_read(&fcport->state))); 1188 atomic_read(&fcport->state)));
1185 1189
1186 cp->result = DID_BUS_BUSY << 16; 1190 cp->result = DID_BUS_BUSY << 16;
1187 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1191 if (atomic_read(&fcport->state) == FCS_ONLINE)
1188 qla2x00_mark_device_lost(ha, fcport, 1, 1); 1192 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
1189 }
1190 break; 1193 break;
1191 1194
1192 case CS_RESET: 1195 case CS_RESET:
@@ -1229,7 +1232,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1229 1232
1230 /* Check to see if logout occurred. */ 1233 /* Check to see if logout occurred. */
1231 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1234 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1232 qla2x00_mark_device_lost(ha, fcport, 1, 1); 1235 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
1233 break; 1236 break;
1234 1237
1235 default: 1238 default:
@@ -1834,7 +1837,6 @@ clear_risc_ints:
1834 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT); 1837 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1835 } 1838 }
1836 spin_unlock_irq(&ha->hardware_lock); 1839 spin_unlock_irq(&ha->hardware_lock);
1837 ha->isp_ops->enable_intrs(ha);
1838 1840
1839fail: 1841fail:
1840 return ret; 1842 return ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index bc90d6b8d0a0..36bc6851e23d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
233 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 233 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
234 "isp_abort_needed.\n", __func__, ha->host_no)); 234 "isp_abort_needed.\n", __func__, ha->host_no));
235 qla_printk(KERN_WARNING, ha, 235 qla_printk(KERN_WARNING, ha,
236 "Mailbox command timeout occured. Scheduling ISP " 236 "Mailbox command timeout occurred. Scheduling ISP "
237 "abort.\n"); 237 "abort.\n");
238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
239 qla2xxx_wake_dpc(ha); 239 qla2xxx_wake_dpc(ha);
@@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
244 DEBUG2_3_11(printk("%s(%ld): timeout calling " 244 DEBUG2_3_11(printk("%s(%ld): timeout calling "
245 "abort_isp\n", __func__, ha->host_no)); 245 "abort_isp\n", __func__, ha->host_no));
246 qla_printk(KERN_WARNING, ha, 246 qla_printk(KERN_WARNING, ha,
247 "Mailbox command timeout occured. Issuing ISP " 247 "Mailbox command timeout occurred. Issuing ISP "
248 "abort.\n"); 248 "abort.\n");
249 249
250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
@@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
1995 char *pmap; 1995 char *pmap;
1996 dma_addr_t pmap_dma; 1996 dma_addr_t pmap_dma;
1997 1997
1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma); 1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
1999 if (pmap == NULL) { 1999 if (pmap == NULL) {
2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
2001 __func__, ha->host_no)); 2001 __func__, ha->host_no));
@@ -2686,7 +2686,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2686 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 2686 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
2687 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 2687 set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
2688 2688
2689 wake_up_process(ha->dpc_thread); 2689 qla2xxx_wake_dpc(ha);
2690 } 2690 }
2691} 2691}
2692 2692
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 50baf6a1d67c..93560cd72784 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,7 +6,6 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/version.h>
10#include <linux/moduleparam.h> 9#include <linux/moduleparam.h>
11#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
12#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7c8af7ed2a5d..3433441b956a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -780,7 +780,8 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
780 sp = pha->outstanding_cmds[cnt]; 780 sp = pha->outstanding_cmds[cnt];
781 if (!sp) 781 if (!sp)
782 continue; 782 continue;
783 if (ha->vp_idx != sp->ha->vp_idx) 783
784 if (ha->vp_idx != sp->fcport->ha->vp_idx)
784 continue; 785 continue;
785 match = 0; 786 match = 0;
786 switch (type) { 787 switch (type) {
@@ -1080,9 +1081,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
1080 sp = ha->outstanding_cmds[cnt]; 1081 sp = ha->outstanding_cmds[cnt];
1081 if (sp) { 1082 if (sp) {
1082 ha->outstanding_cmds[cnt] = NULL; 1083 ha->outstanding_cmds[cnt] = NULL;
1083 sp->flags = 0;
1084 sp->cmd->result = res; 1084 sp->cmd->result = res;
1085 sp->cmd->host_scribble = (unsigned char *)NULL;
1086 qla2x00_sp_compl(ha, sp); 1085 qla2x00_sp_compl(ha, sp);
1087 } 1086 }
1088 } 1087 }
@@ -1518,6 +1517,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1518 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1517 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
1519 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1518 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
1520 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1519 set_bit(RSCN_UPDATE, &ha->dpc_flags);
1520 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
1521} 1521}
1522 1522
1523static int 1523static int
@@ -1664,8 +1664,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1664 ha->gid_list_info_size = 8; 1664 ha->gid_list_info_size = 8;
1665 ha->optrom_size = OPTROM_SIZE_25XX; 1665 ha->optrom_size = OPTROM_SIZE_25XX;
1666 ha->isp_ops = &qla25xx_isp_ops; 1666 ha->isp_ops = &qla25xx_isp_ops;
1667 ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
1668 FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
1669 } 1667 }
1670 host->can_queue = ha->request_q_length + 128; 1668 host->can_queue = ha->request_q_length + 128;
1671 1669
@@ -1741,6 +1739,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1741 if (ret) 1739 if (ret)
1742 goto probe_failed; 1740 goto probe_failed;
1743 1741
1742 ha->isp_ops->enable_intrs(ha);
1743
1744 scsi_scan_host(host); 1744 scsi_scan_host(host);
1745 1745
1746 qla2x00_alloc_sysfs_attr(ha); 1746 qla2x00_alloc_sysfs_attr(ha);
@@ -1776,10 +1776,15 @@ probe_out:
1776static void 1776static void
1777qla2x00_remove_one(struct pci_dev *pdev) 1777qla2x00_remove_one(struct pci_dev *pdev)
1778{ 1778{
1779 scsi_qla_host_t *ha; 1779 scsi_qla_host_t *ha, *vha, *temp;
1780 1780
1781 ha = pci_get_drvdata(pdev); 1781 ha = pci_get_drvdata(pdev);
1782 1782
1783 list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list)
1784 fc_vport_terminate(vha->fc_vport);
1785
1786 set_bit(UNLOADING, &ha->dpc_flags);
1787
1783 qla2x00_dfs_remove(ha); 1788 qla2x00_dfs_remove(ha);
1784 1789
1785 qla84xx_put_chip(ha); 1790 qla84xx_put_chip(ha);
@@ -2427,6 +2432,12 @@ qla2x00_do_dpc(void *data)
2427 ha->host_no)); 2432 ha->host_no));
2428 } 2433 }
2429 2434
2435 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
2436 atomic_read(&ha->loop_state) == LOOP_READY) {
2437 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
2438 qla2xxx_flash_npiv_conf(ha);
2439 }
2440
2430 if (!ha->interrupts_on) 2441 if (!ha->interrupts_on)
2431 ha->isp_ops->enable_intrs(ha); 2442 ha->isp_ops->enable_intrs(ha);
2432 2443
@@ -2451,8 +2462,10 @@ qla2x00_do_dpc(void *data)
2451void 2462void
2452qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2463qla2xxx_wake_dpc(scsi_qla_host_t *ha)
2453{ 2464{
2454 if (ha->dpc_thread) 2465 struct task_struct *t = ha->dpc_thread;
2455 wake_up_process(ha->dpc_thread); 2466
2467 if (!test_bit(UNLOADING, &ha->dpc_flags) && t)
2468 wake_up_process(t);
2456} 2469}
2457 2470
2458/* 2471/*
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1bca74474935..90a13211717f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -543,23 +543,198 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
543 } 543 }
544} 544}
545 545
546void 546static int
547qla2xxx_get_flash_info(scsi_qla_host_t *ha) 547qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
548{
549 const char *loc, *locations[] = { "DEF", "PCI" };
550 uint32_t pcihdr, pcids;
551 uint32_t *dcode;
552 uint8_t *buf, *bcode, last_image;
553 uint16_t cnt, chksum, *wptr;
554 struct qla_flt_location *fltl;
555
556 /*
557 * FLT-location structure resides after the last PCI region.
558 */
559
560 /* Begin with sane defaults. */
561 loc = locations[0];
562 *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
563 FA_FLASH_LAYOUT_ADDR;
564
565 /* Begin with first PCI expansion ROM header. */
566 buf = (uint8_t *)ha->request_ring;
567 dcode = (uint32_t *)ha->request_ring;
568 pcihdr = 0;
569 last_image = 1;
570 do {
571 /* Verify PCI expansion ROM header. */
572 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
573 bcode = buf + (pcihdr % 4);
574 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
575 goto end;
576
577 /* Locate PCI data structure. */
578 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
579 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
580 bcode = buf + (pcihdr % 4);
581
582 /* Validate signature of PCI data structure. */
583 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
584 bcode[0x2] != 'I' || bcode[0x3] != 'R')
585 goto end;
586
587 last_image = bcode[0x15] & BIT_7;
588
589 /* Locate next PCI expansion ROM. */
590 pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
591 } while (!last_image);
592
593 /* Now verify FLT-location structure. */
594 fltl = (struct qla_flt_location *)ha->request_ring;
595 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
596 sizeof(struct qla_flt_location) >> 2);
597 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
598 fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
599 goto end;
600
601 wptr = (uint16_t *)ha->request_ring;
602 cnt = sizeof(struct qla_flt_location) >> 1;
603 for (chksum = 0; cnt; cnt--)
604 chksum += le16_to_cpu(*wptr++);
605 if (chksum) {
606 qla_printk(KERN_ERR, ha,
607 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
608 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
609 return QLA_FUNCTION_FAILED;
610 }
611
612 /* Good data. Use specified location. */
613 loc = locations[1];
614 *start = le16_to_cpu(fltl->start_hi) << 16 |
615 le16_to_cpu(fltl->start_lo);
616end:
617 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
618 return QLA_SUCCESS;
619}
620
621static void
622qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
623{
624 const char *loc, *locations[] = { "DEF", "FLT" };
625 uint16_t *wptr;
626 uint16_t cnt, chksum;
627 uint32_t start;
628 struct qla_flt_header *flt;
629 struct qla_flt_region *region;
630
631 ha->flt_region_flt = flt_addr;
632 wptr = (uint16_t *)ha->request_ring;
633 flt = (struct qla_flt_header *)ha->request_ring;
634 region = (struct qla_flt_region *)&flt[1];
635 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
636 flt_addr << 2, OPTROM_BURST_SIZE);
637 if (*wptr == __constant_cpu_to_le16(0xffff))
638 goto no_flash_data;
639 if (flt->version != __constant_cpu_to_le16(1)) {
640 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
641 "version=0x%x length=0x%x checksum=0x%x.\n",
642 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
643 le16_to_cpu(flt->checksum)));
644 goto no_flash_data;
645 }
646
647 cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
648 for (chksum = 0; cnt; cnt--)
649 chksum += le16_to_cpu(*wptr++);
650 if (chksum) {
651 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
652 "version=0x%x length=0x%x checksum=0x%x.\n",
653 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
654 chksum));
655 goto no_flash_data;
656 }
657
658 loc = locations[1];
659 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
660 for ( ; cnt; cnt--, region++) {
661 /* Store addresses as DWORD offsets. */
662 start = le32_to_cpu(region->start) >> 2;
663
664 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
665 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
666 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
667
668 switch (le32_to_cpu(region->code)) {
669 case FLT_REG_FW:
670 ha->flt_region_fw = start;
671 break;
672 case FLT_REG_BOOT_CODE:
673 ha->flt_region_boot = start;
674 break;
675 case FLT_REG_VPD_0:
676 ha->flt_region_vpd_nvram = start;
677 break;
678 case FLT_REG_FDT:
679 ha->flt_region_fdt = start;
680 break;
681 case FLT_REG_HW_EVENT_0:
682 if (!PCI_FUNC(ha->pdev->devfn))
683 ha->flt_region_hw_event = start;
684 break;
685 case FLT_REG_HW_EVENT_1:
686 if (PCI_FUNC(ha->pdev->devfn))
687 ha->flt_region_hw_event = start;
688 break;
689 case FLT_REG_NPIV_CONF_0:
690 if (!PCI_FUNC(ha->pdev->devfn))
691 ha->flt_region_npiv_conf = start;
692 break;
693 case FLT_REG_NPIV_CONF_1:
694 if (PCI_FUNC(ha->pdev->devfn))
695 ha->flt_region_npiv_conf = start;
696 break;
697 }
698 }
699 goto done;
700
701no_flash_data:
702 /* Use hardcoded defaults. */
703 loc = locations[0];
704 ha->flt_region_fw = FA_RISC_CODE_ADDR;
705 ha->flt_region_boot = FA_BOOT_CODE_ADDR;
706 ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
707 ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
708 FA_FLASH_DESCR_ADDR;
709 ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
710 FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
711 ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
712 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
713 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
714done:
715 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
716 "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
717 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
718 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
719 ha->flt_region_npiv_conf));
720}
721
722static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
548{ 724{
549#define FLASH_BLK_SIZE_32K 0x8000 725#define FLASH_BLK_SIZE_32K 0x8000
550#define FLASH_BLK_SIZE_64K 0x10000 726#define FLASH_BLK_SIZE_64K 0x10000
727 const char *loc, *locations[] = { "MID", "FDT" };
551 uint16_t cnt, chksum; 728 uint16_t cnt, chksum;
552 uint16_t *wptr; 729 uint16_t *wptr;
553 struct qla_fdt_layout *fdt; 730 struct qla_fdt_layout *fdt;
554 uint8_t man_id, flash_id; 731 uint8_t man_id, flash_id;
555 732 uint16_t mid, fid;
556 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
557 return;
558 733
559 wptr = (uint16_t *)ha->request_ring; 734 wptr = (uint16_t *)ha->request_ring;
560 fdt = (struct qla_fdt_layout *)ha->request_ring; 735 fdt = (struct qla_fdt_layout *)ha->request_ring;
561 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 736 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
562 FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE); 737 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
563 if (*wptr == __constant_cpu_to_le16(0xffff)) 738 if (*wptr == __constant_cpu_to_le16(0xffff))
564 goto no_flash_data; 739 goto no_flash_data;
565 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || 740 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -577,7 +752,10 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
577 goto no_flash_data; 752 goto no_flash_data;
578 } 753 }
579 754
580 ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f; 755 loc = locations[1];
756 mid = le16_to_cpu(fdt->man_id);
757 fid = le16_to_cpu(fdt->id);
758 ha->fdt_odd_index = mid == 0x1f;
581 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 759 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
582 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
583 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 761 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
588 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): 766 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
589 flash_conf_to_access_addr(0x0336); 767 flash_conf_to_access_addr(0x0336);
590 } 768 }
591 769 goto done;
592 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
593 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
594 le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
595 ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
596 ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
597 return;
598
599no_flash_data: 770no_flash_data:
771 loc = locations[0];
600 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); 772 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
773 mid = man_id;
774 fid = flash_id;
601 ha->fdt_wrt_disable = 0x9c; 775 ha->fdt_wrt_disable = 0x9c;
602 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); 776 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
603 switch (man_id) { 777 switch (man_id) {
@@ -625,14 +799,117 @@ no_flash_data:
625 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 799 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
626 break; 800 break;
627 } 801 }
628 802done:
629 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x " 803 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
630 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id, 804 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
631 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 805 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
632 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, 806 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
633 ha->fdt_block_size)); 807 ha->fdt_block_size));
634} 808}
635 809
810int
811qla2xxx_get_flash_info(scsi_qla_host_t *ha)
812{
813 int ret;
814 uint32_t flt_addr;
815
816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
817 return QLA_SUCCESS;
818
819 ret = qla2xxx_find_flt_start(ha, &flt_addr);
820 if (ret != QLA_SUCCESS)
821 return ret;
822
823 qla2xxx_get_flt_info(ha, flt_addr);
824 qla2xxx_get_fdt_info(ha);
825
826 return QLA_SUCCESS;
827}
828
829void
830qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
831{
832#define NPIV_CONFIG_SIZE (16*1024)
833 void *data;
834 uint16_t *wptr;
835 uint16_t cnt, chksum;
836 struct qla_npiv_header hdr;
837 struct qla_npiv_entry *entry;
838
839 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
840 return;
841
842 ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
843 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
844 if (hdr.version == __constant_cpu_to_le16(0xffff))
845 return;
846 if (hdr.version != __constant_cpu_to_le16(1)) {
847 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
848 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
849 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
850 le16_to_cpu(hdr.checksum)));
851 return;
852 }
853
854 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
855 if (!data) {
856 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
857 "allocate memory.\n"));
858 return;
859 }
860
861 ha->isp_ops->read_optrom(ha, (uint8_t *)data,
862 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
863
864 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
865 sizeof(struct qla_npiv_entry)) >> 1;
866 for (wptr = data, chksum = 0; cnt; cnt--)
867 chksum += le16_to_cpu(*wptr++);
868 if (chksum) {
869 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
870 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
871 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
872 chksum));
873 goto done;
874 }
875
876 entry = data + sizeof(struct qla_npiv_header);
877 cnt = le16_to_cpu(hdr.entries);
878 for ( ; cnt; cnt--, entry++) {
879 uint16_t flags;
880 struct fc_vport_identifiers vid;
881 struct fc_vport *vport;
882
883 flags = le16_to_cpu(entry->flags);
884 if (flags == 0xffff)
885 continue;
886 if ((flags & BIT_0) == 0)
887 continue;
888
889 memset(&vid, 0, sizeof(vid));
890 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
891 vid.vport_type = FC_PORTTYPE_NPIV;
892 vid.disable = false;
893 vid.port_name = wwn_to_u64(entry->port_name);
894 vid.node_name = wwn_to_u64(entry->node_name);
895
896 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
897 "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
898 (unsigned long long)vid.port_name,
899 (unsigned long long)vid.node_name,
900 le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
901
902 vport = fc_vport_create(ha->host, 0, &vid);
903 if (!vport)
904 qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
905 "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
906 (unsigned long long)vid.port_name,
907 (unsigned long long)vid.node_name);
908 }
909done:
910 kfree(data);
911}
912
636static void 913static void
637qla24xx_unprotect_flash(scsi_qla_host_t *ha) 914qla24xx_unprotect_flash(scsi_qla_host_t *ha)
638{ 915{
@@ -920,7 +1197,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
920 dwptr = (uint32_t *)buf; 1197 dwptr = (uint32_t *)buf;
921 for (i = 0; i < bytes >> 2; i++, naddr++) 1198 for (i = 0; i < bytes >> 2; i++, naddr++)
922 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1199 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
923 flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr))); 1200 flash_data_to_access_addr(ha->flt_region_vpd_nvram |
1201 naddr)));
924 1202
925 return buf; 1203 return buf;
926} 1204}
@@ -935,10 +1213,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
935 dbuf = vmalloc(RMW_BUFFER_SIZE); 1213 dbuf = vmalloc(RMW_BUFFER_SIZE);
936 if (!dbuf) 1214 if (!dbuf)
937 return QLA_MEMORY_ALLOC_FAILED; 1215 return QLA_MEMORY_ALLOC_FAILED;
938 ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1216 ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
939 RMW_BUFFER_SIZE); 1217 RMW_BUFFER_SIZE);
940 memcpy(dbuf + (naddr << 2), buf, bytes); 1218 memcpy(dbuf + (naddr << 2), buf, bytes);
941 ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1219 ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
942 RMW_BUFFER_SIZE); 1220 RMW_BUFFER_SIZE);
943 vfree(dbuf); 1221 vfree(dbuf);
944 1222
@@ -2166,7 +2444,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2166 memset(dbyte, 0, 8); 2444 memset(dbyte, 0, 8);
2167 dcode = (uint16_t *)dbyte; 2445 dcode = (uint16_t *)dbyte;
2168 2446
2169 qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10, 2447 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2170 8); 2448 8);
2171 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", 2449 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
2172 __func__, ha->host_no)); 2450 __func__, ha->host_no));
@@ -2177,7 +2455,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2177 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2455 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2178 dcode[3] == 0)) { 2456 dcode[3] == 0)) {
2179 DEBUG2(printk("%s(): Unrecognized fw revision at " 2457 DEBUG2(printk("%s(): Unrecognized fw revision at "
2180 "%x.\n", __func__, FA_RISC_CODE_ADDR * 4)); 2458 "%x.\n", __func__, ha->flt_region_fw * 4));
2181 } else { 2459 } else {
2182 /* values are in big endian */ 2460 /* values are in big endian */
2183 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2461 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2212,7 +2490,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2212 dcode = mbuf; 2490 dcode = mbuf;
2213 2491
2214 /* Begin with first PCI expansion ROM header. */ 2492 /* Begin with first PCI expansion ROM header. */
2215 pcihdr = 0; 2493 pcihdr = ha->flt_region_boot;
2216 last_image = 1; 2494 last_image = 1;
2217 do { 2495 do {
2218 /* Verify PCI expansion ROM header. */ 2496 /* Verify PCI expansion ROM header. */
@@ -2282,7 +2560,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2282 memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); 2560 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2283 dcode = mbuf; 2561 dcode = mbuf;
2284 2562
2285 qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4); 2563 qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
2286 for (i = 0; i < 4; i++) 2564 for (i = 0; i < 4; i++)
2287 dcode[i] = be32_to_cpu(dcode[i]); 2565 dcode[i] = be32_to_cpu(dcode[i]);
2288 2566
@@ -2291,7 +2569,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2291 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2569 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2292 dcode[3] == 0)) { 2570 dcode[3] == 0)) {
2293 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", 2571 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
2294 __func__, FA_RISC_CODE_ADDR)); 2572 __func__, ha->flt_region_fw));
2295 } else { 2573 } else {
2296 ha->fw_revision[0] = dcode[0]; 2574 ha->fw_revision[0] = dcode[0];
2297 ha->fw_revision[1] = dcode[1]; 2575 ha->fw_revision[1] = dcode[1];
@@ -2355,7 +2633,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2355 /* Locate first empty entry. */ 2633 /* Locate first empty entry. */
2356 for (;;) { 2634 for (;;) {
2357 if (ha->hw_event_ptr >= 2635 if (ha->hw_event_ptr >=
2358 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2636 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2359 DEBUG2(qla_printk(KERN_WARNING, ha, 2637 DEBUG2(qla_printk(KERN_WARNING, ha,
2360 "HW event -- Log Full!\n")); 2638 "HW event -- Log Full!\n"));
2361 return QLA_MEMORY_ALLOC_FAILED; 2639 return QLA_MEMORY_ALLOC_FAILED;
@@ -2391,7 +2669,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2391 int rval; 2669 int rval;
2392 uint32_t marker[2], fdata[4]; 2670 uint32_t marker[2], fdata[4];
2393 2671
2394 if (ha->hw_event_start == 0) 2672 if (ha->flt_region_hw_event == 0)
2395 return QLA_FUNCTION_FAILED; 2673 return QLA_FUNCTION_FAILED;
2396 2674
2397 DEBUG2(qla_printk(KERN_WARNING, ha, 2675 DEBUG2(qla_printk(KERN_WARNING, ha,
@@ -2406,7 +2684,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2406 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER); 2684 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
2407 2685
2408 /* Locate marker. */ 2686 /* Locate marker. */
2409 ha->hw_event_ptr = ha->hw_event_start; 2687 ha->hw_event_ptr = ha->flt_region_hw_event;
2410 for (;;) { 2688 for (;;) {
2411 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, 2689 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
2412 4); 2690 4);
@@ -2415,7 +2693,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2415 break; 2693 break;
2416 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; 2694 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2417 if (ha->hw_event_ptr >= 2695 if (ha->hw_event_ptr >=
2418 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2696 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2419 DEBUG2(qla_printk(KERN_WARNING, ha, 2697 DEBUG2(qla_printk(KERN_WARNING, ha,
2420 "HW event -- Log Full!\n")); 2698 "HW event -- Log Full!\n"));
2421 return QLA_MEMORY_ALLOC_FAILED; 2699 return QLA_MEMORY_ALLOC_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 676c390db354..be5e299df528 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k6" 10#define QLA2XXX_VERSION "8.02.01-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 88bebb13bc52..de8279ad7d89 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1542 DEBUG2(printk(KERN_INFO 1542 DEBUG2(printk(KERN_INFO
1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
1545 cmd, jiffies, cmd->timeout_per_command / HZ, 1545 cmd, jiffies, cmd->request->timeout / HZ,
1546 ha->dpc_flags, cmd->result, cmd->allowed)); 1546 ha->dpc_flags, cmd->result, cmd->allowed));
1547 1547
1548 /* FIXME: wait for hba to go online */ 1548 /* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
1598 DEBUG2(printk(KERN_INFO 1598 DEBUG2(printk(KERN_INFO
1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
1601 ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, 1601 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
1602 ha->dpc_flags, cmd->result, cmd->allowed)); 1602 ha->dpc_flags, cmd->result, cmd->allowed));
1603 1603
1604 stat = qla4xxx_reset_target(ha, ddb_entry); 1604 stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 4a1cf6377f6c..69d6ad862b60 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1,6 +1,6 @@
1/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. 1/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
2 * 2 *
3 * Copyright (C) 1996, 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net)
4 * 4 *
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI 5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code. 6 * Qlogic ISP driver. Mucho kudos to him for this code.
@@ -25,12 +25,14 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/dma-mapping.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
28 31
29#include <asm/byteorder.h> 32#include <asm/byteorder.h>
30 33
31#include "qlogicpti.h" 34#include "qlogicpti.h"
32 35
33#include <asm/sbus.h>
34#include <asm/dma.h> 36#include <asm/dma.h>
35#include <asm/system.h> 37#include <asm/system.h>
36#include <asm/ptrace.h> 38#include <asm/ptrace.h>
@@ -157,7 +159,7 @@ static inline void set_sbus_cfg1(struct qlogicpti *qpti)
157 * is a nop and the chip ends up using the smallest burst 159 * is a nop and the chip ends up using the smallest burst
158 * size. -DaveM 160 * size. -DaveM
159 */ 161 */
160 if (sbus_can_burst64(qpti->sdev) && (bursts & DMA_BURST64)) { 162 if (sbus_can_burst64() && (bursts & DMA_BURST64)) {
161 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64); 163 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
162 } else 164 } else
163#endif 165#endif
@@ -684,19 +686,19 @@ static void __devexit qpti_chain_del(struct qlogicpti *qpti)
684 686
685static int __devinit qpti_map_regs(struct qlogicpti *qpti) 687static int __devinit qpti_map_regs(struct qlogicpti *qpti)
686{ 688{
687 struct sbus_dev *sdev = qpti->sdev; 689 struct of_device *op = qpti->op;
688 690
689 qpti->qregs = sbus_ioremap(&sdev->resource[0], 0, 691 qpti->qregs = of_ioremap(&op->resource[0], 0,
690 sdev->reg_addrs[0].reg_size, 692 resource_size(&op->resource[0]),
691 "PTI Qlogic/ISP"); 693 "PTI Qlogic/ISP");
692 if (!qpti->qregs) { 694 if (!qpti->qregs) {
693 printk("PTI: Qlogic/ISP registers are unmappable\n"); 695 printk("PTI: Qlogic/ISP registers are unmappable\n");
694 return -1; 696 return -1;
695 } 697 }
696 if (qpti->is_pti) { 698 if (qpti->is_pti) {
697 qpti->sreg = sbus_ioremap(&sdev->resource[0], (16 * 4096), 699 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
698 sizeof(unsigned char), 700 sizeof(unsigned char),
699 "PTI Qlogic/ISP statreg"); 701 "PTI Qlogic/ISP statreg");
700 if (!qpti->sreg) { 702 if (!qpti->sreg) {
701 printk("PTI: Qlogic/ISP status register is unmappable\n"); 703 printk("PTI: Qlogic/ISP status register is unmappable\n");
702 return -1; 704 return -1;
@@ -707,9 +709,9 @@ static int __devinit qpti_map_regs(struct qlogicpti *qpti)
707 709
708static int __devinit qpti_register_irq(struct qlogicpti *qpti) 710static int __devinit qpti_register_irq(struct qlogicpti *qpti)
709{ 711{
710 struct sbus_dev *sdev = qpti->sdev; 712 struct of_device *op = qpti->op;
711 713
712 qpti->qhost->irq = qpti->irq = sdev->irqs[0]; 714 qpti->qhost->irq = qpti->irq = op->irqs[0];
713 715
714 /* We used to try various overly-clever things to 716 /* We used to try various overly-clever things to
715 * reduce the interrupt processing overhead on 717 * reduce the interrupt processing overhead on
@@ -732,17 +734,19 @@ fail:
732 734
733static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) 735static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
734{ 736{
735 qpti->scsi_id = prom_getintdefault(qpti->prom_node, 737 struct of_device *op = qpti->op;
736 "initiator-id", 738 struct device_node *dp;
737 -1); 739
740 dp = op->node;
741
742 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
738 if (qpti->scsi_id == -1) 743 if (qpti->scsi_id == -1)
739 qpti->scsi_id = prom_getintdefault(qpti->prom_node, 744 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id",
740 "scsi-initiator-id", 745 -1);
741 -1);
742 if (qpti->scsi_id == -1) 746 if (qpti->scsi_id == -1)
743 qpti->scsi_id = 747 qpti->scsi_id =
744 prom_getintdefault(qpti->sdev->bus->prom_node, 748 of_getintprop_default(dp->parent,
745 "scsi-initiator-id", 7); 749 "scsi-initiator-id", 7);
746 qpti->qhost->this_id = qpti->scsi_id; 750 qpti->qhost->this_id = qpti->scsi_id;
747 qpti->qhost->max_sectors = 64; 751 qpti->qhost->max_sectors = 64;
748 752
@@ -751,12 +755,11 @@ static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
751 755
752static void qpti_get_bursts(struct qlogicpti *qpti) 756static void qpti_get_bursts(struct qlogicpti *qpti)
753{ 757{
754 struct sbus_dev *sdev = qpti->sdev; 758 struct of_device *op = qpti->op;
755 u8 bursts, bmask; 759 u8 bursts, bmask;
756 760
757 bursts = prom_getintdefault(qpti->prom_node, "burst-sizes", 0xff); 761 bursts = of_getintprop_default(op->node, "burst-sizes", 0xff);
758 bmask = prom_getintdefault(sdev->bus->prom_node, 762 bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff);
759 "burst-sizes", 0xff);
760 if (bmask != 0xff) 763 if (bmask != 0xff)
761 bursts &= bmask; 764 bursts &= bmask;
762 if (bursts == 0xff || 765 if (bursts == 0xff ||
@@ -785,25 +788,25 @@ static void qpti_get_clock(struct qlogicpti *qpti)
785 */ 788 */
786static int __devinit qpti_map_queues(struct qlogicpti *qpti) 789static int __devinit qpti_map_queues(struct qlogicpti *qpti)
787{ 790{
788 struct sbus_dev *sdev = qpti->sdev; 791 struct of_device *op = qpti->op;
789 792
790#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 793#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
791 qpti->res_cpu = sbus_alloc_consistent(sdev, 794 qpti->res_cpu = dma_alloc_coherent(&op->dev,
792 QSIZE(RES_QUEUE_LEN), 795 QSIZE(RES_QUEUE_LEN),
793 &qpti->res_dvma); 796 &qpti->res_dvma, GFP_ATOMIC);
794 if (qpti->res_cpu == NULL || 797 if (qpti->res_cpu == NULL ||
795 qpti->res_dvma == 0) { 798 qpti->res_dvma == 0) {
796 printk("QPTI: Cannot map response queue.\n"); 799 printk("QPTI: Cannot map response queue.\n");
797 return -1; 800 return -1;
798 } 801 }
799 802
800 qpti->req_cpu = sbus_alloc_consistent(sdev, 803 qpti->req_cpu = dma_alloc_coherent(&op->dev,
801 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 804 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
802 &qpti->req_dvma); 805 &qpti->req_dvma, GFP_ATOMIC);
803 if (qpti->req_cpu == NULL || 806 if (qpti->req_cpu == NULL ||
804 qpti->req_dvma == 0) { 807 qpti->req_dvma == 0) {
805 sbus_free_consistent(sdev, QSIZE(RES_QUEUE_LEN), 808 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN),
806 qpti->res_cpu, qpti->res_dvma); 809 qpti->res_cpu, qpti->res_dvma);
807 printk("QPTI: Cannot map request queue.\n"); 810 printk("QPTI: Cannot map request queue.\n");
808 return -1; 811 return -1;
809 } 812 }
@@ -875,8 +878,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
875 int sg_count; 878 int sg_count;
876 879
877 sg = scsi_sglist(Cmnd); 880 sg = scsi_sglist(Cmnd);
878 sg_count = sbus_map_sg(qpti->sdev, sg, scsi_sg_count(Cmnd), 881 sg_count = dma_map_sg(&qpti->op->dev, sg,
879 Cmnd->sc_data_direction); 882 scsi_sg_count(Cmnd),
883 Cmnd->sc_data_direction);
880 884
881 ds = cmd->dataseg; 885 ds = cmd->dataseg;
882 cmd->segment_cnt = sg_count; 886 cmd->segment_cnt = sg_count;
@@ -914,6 +918,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
914 ds[i].d_count = sg_dma_len(s); 918 ds[i].d_count = sg_dma_len(s);
915 } 919 }
916 sg_count -= n; 920 sg_count -= n;
921 sg = s;
917 } 922 }
918 } else { 923 } else {
919 cmd->dataseg[0].d_base = 0; 924 cmd->dataseg[0].d_base = 0;
@@ -1151,9 +1156,9 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1151 Cmnd->result = DID_ERROR << 16; 1156 Cmnd->result = DID_ERROR << 16;
1152 1157
1153 if (scsi_bufflen(Cmnd)) 1158 if (scsi_bufflen(Cmnd))
1154 sbus_unmap_sg(qpti->sdev, 1159 dma_unmap_sg(&qpti->op->dev,
1155 scsi_sglist(Cmnd), scsi_sg_count(Cmnd), 1160 scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
1156 Cmnd->sc_data_direction); 1161 Cmnd->sc_data_direction);
1157 1162
1158 qpti->cmd_count[Cmnd->device->id]--; 1163 qpti->cmd_count[Cmnd->device->id]--;
1159 sbus_writew(out_ptr, qpti->qregs + MBOX5); 1164 sbus_writew(out_ptr, qpti->qregs + MBOX5);
@@ -1267,34 +1272,32 @@ static struct scsi_host_template qpti_template = {
1267 .use_clustering = ENABLE_CLUSTERING, 1272 .use_clustering = ENABLE_CLUSTERING,
1268}; 1273};
1269 1274
1270static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_device_id *match) 1275static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match)
1271{ 1276{
1272 static int nqptis;
1273 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1274 struct device_node *dp = dev->node;
1275 struct scsi_host_template *tpnt = match->data; 1277 struct scsi_host_template *tpnt = match->data;
1278 struct device_node *dp = op->node;
1276 struct Scsi_Host *host; 1279 struct Scsi_Host *host;
1277 struct qlogicpti *qpti; 1280 struct qlogicpti *qpti;
1281 static int nqptis;
1278 const char *fcode; 1282 const char *fcode;
1279 1283
1280 /* Sometimes Antares cards come up not completely 1284 /* Sometimes Antares cards come up not completely
1281 * setup, and we get a report of a zero IRQ. 1285 * setup, and we get a report of a zero IRQ.
1282 */ 1286 */
1283 if (sdev->irqs[0] == 0) 1287 if (op->irqs[0] == 0)
1284 return -ENODEV; 1288 return -ENODEV;
1285 1289
1286 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); 1290 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
1287 if (!host) 1291 if (!host)
1288 return -ENOMEM; 1292 return -ENOMEM;
1289 1293
1290 qpti = (struct qlogicpti *) host->hostdata; 1294 qpti = shost_priv(host);
1291 1295
1292 host->max_id = MAX_TARGETS; 1296 host->max_id = MAX_TARGETS;
1293 qpti->qhost = host; 1297 qpti->qhost = host;
1294 qpti->sdev = sdev; 1298 qpti->op = op;
1295 qpti->qpti_id = nqptis; 1299 qpti->qpti_id = nqptis;
1296 qpti->prom_node = sdev->prom_node; 1300 strcpy(qpti->prom_name, op->node->name);
1297 strcpy(qpti->prom_name, sdev->ofdev.node->name);
1298 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); 1301 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
1299 1302
1300 if (qpti_map_regs(qpti) < 0) 1303 if (qpti_map_regs(qpti) < 0)
@@ -1340,12 +1343,12 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi
1340 (qpti->ultra ? "Ultra" : "Fast"), 1343 (qpti->ultra ? "Ultra" : "Fast"),
1341 (qpti->differential ? "differential" : "single ended")); 1344 (qpti->differential ? "differential" : "single ended"));
1342 1345
1343 if (scsi_add_host(host, &dev->dev)) { 1346 if (scsi_add_host(host, &op->dev)) {
1344 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id); 1347 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id);
1345 goto fail_unmap_queues; 1348 goto fail_unmap_queues;
1346 } 1349 }
1347 1350
1348 dev_set_drvdata(&sdev->ofdev.dev, qpti); 1351 dev_set_drvdata(&op->dev, qpti);
1349 1352
1350 qpti_chain_add(qpti); 1353 qpti_chain_add(qpti);
1351 1354
@@ -1356,19 +1359,20 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi
1356 1359
1357fail_unmap_queues: 1360fail_unmap_queues:
1358#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1361#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1359 sbus_free_consistent(qpti->sdev, 1362 dma_free_coherent(&op->dev,
1360 QSIZE(RES_QUEUE_LEN), 1363 QSIZE(RES_QUEUE_LEN),
1361 qpti->res_cpu, qpti->res_dvma); 1364 qpti->res_cpu, qpti->res_dvma);
1362 sbus_free_consistent(qpti->sdev, 1365 dma_free_coherent(&op->dev,
1363 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1366 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1364 qpti->req_cpu, qpti->req_dvma); 1367 qpti->req_cpu, qpti->req_dvma);
1365#undef QSIZE 1368#undef QSIZE
1366 1369
1367fail_unmap_regs: 1370fail_unmap_regs:
1368 sbus_iounmap(qpti->qregs, 1371 of_iounmap(&op->resource[0], qpti->qregs,
1369 qpti->sdev->reg_addrs[0].reg_size); 1372 resource_size(&op->resource[0]));
1370 if (qpti->is_pti) 1373 if (qpti->is_pti)
1371 sbus_iounmap(qpti->sreg, sizeof(unsigned char)); 1374 of_iounmap(&op->resource[0], qpti->sreg,
1375 sizeof(unsigned char));
1372 1376
1373fail_free_irq: 1377fail_free_irq:
1374 free_irq(qpti->irq, qpti); 1378 free_irq(qpti->irq, qpti);
@@ -1379,9 +1383,9 @@ fail_unlink:
1379 return -ENODEV; 1383 return -ENODEV;
1380} 1384}
1381 1385
1382static int __devexit qpti_sbus_remove(struct of_device *dev) 1386static int __devexit qpti_sbus_remove(struct of_device *op)
1383{ 1387{
1384 struct qlogicpti *qpti = dev_get_drvdata(&dev->dev); 1388 struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
1385 1389
1386 qpti_chain_del(qpti); 1390 qpti_chain_del(qpti);
1387 1391
@@ -1394,24 +1398,25 @@ static int __devexit qpti_sbus_remove(struct of_device *dev)
1394 free_irq(qpti->irq, qpti); 1398 free_irq(qpti->irq, qpti);
1395 1399
1396#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1400#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1397 sbus_free_consistent(qpti->sdev, 1401 dma_free_coherent(&op->dev,
1398 QSIZE(RES_QUEUE_LEN), 1402 QSIZE(RES_QUEUE_LEN),
1399 qpti->res_cpu, qpti->res_dvma); 1403 qpti->res_cpu, qpti->res_dvma);
1400 sbus_free_consistent(qpti->sdev, 1404 dma_free_coherent(&op->dev,
1401 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1405 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1402 qpti->req_cpu, qpti->req_dvma); 1406 qpti->req_cpu, qpti->req_dvma);
1403#undef QSIZE 1407#undef QSIZE
1404 1408
1405 sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size); 1409 of_iounmap(&op->resource[0], qpti->qregs,
1410 resource_size(&op->resource[0]));
1406 if (qpti->is_pti) 1411 if (qpti->is_pti)
1407 sbus_iounmap(qpti->sreg, sizeof(unsigned char)); 1412 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char));
1408 1413
1409 scsi_host_put(qpti->qhost); 1414 scsi_host_put(qpti->qhost);
1410 1415
1411 return 0; 1416 return 0;
1412} 1417}
1413 1418
1414static struct of_device_id qpti_match[] = { 1419static const struct of_device_id qpti_match[] = {
1415 { 1420 {
1416 .name = "ptisp", 1421 .name = "ptisp",
1417 .data = &qpti_template, 1422 .data = &qpti_template,
@@ -1441,7 +1446,7 @@ static struct of_platform_driver qpti_sbus_driver = {
1441 1446
1442static int __init qpti_init(void) 1447static int __init qpti_init(void)
1443{ 1448{
1444 return of_register_driver(&qpti_sbus_driver, &sbus_bus_type); 1449 return of_register_driver(&qpti_sbus_driver, &of_bus_type);
1445} 1450}
1446 1451
1447static void __exit qpti_exit(void) 1452static void __exit qpti_exit(void)
@@ -1452,7 +1457,7 @@ static void __exit qpti_exit(void)
1452MODULE_DESCRIPTION("QlogicISP SBUS driver"); 1457MODULE_DESCRIPTION("QlogicISP SBUS driver");
1453MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 1458MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1454MODULE_LICENSE("GPL"); 1459MODULE_LICENSE("GPL");
1455MODULE_VERSION("2.0"); 1460MODULE_VERSION("2.1");
1456 1461
1457module_init(qpti_init); 1462module_init(qpti_init);
1458module_exit(qpti_exit); 1463module_exit(qpti_exit);
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index ef6da2df584b..9c053bbaa877 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -342,7 +342,7 @@ struct qlogicpti {
342 u_int req_in_ptr; /* index of next request slot */ 342 u_int req_in_ptr; /* index of next request slot */
343 u_int res_out_ptr; /* index of next result slot */ 343 u_int res_out_ptr; /* index of next result slot */
344 long send_marker; /* must we send a marker? */ 344 long send_marker; /* must we send a marker? */
345 struct sbus_dev *sdev; 345 struct of_device *op;
346 unsigned long __pad; 346 unsigned long __pad;
347 347
348 int cmd_count[MAX_TARGETS]; 348 int cmd_count[MAX_TARGETS];
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ee6be596503d..2ac3cb2b9081 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
291 unsigned long flags; 291 unsigned long flags;
292 292
293 cmd->device = dev; 293 cmd->device = dev;
294 init_timer(&cmd->eh_timeout);
295 INIT_LIST_HEAD(&cmd->list); 294 INIT_LIST_HEAD(&cmd->list);
296 spin_lock_irqsave(&dev->list_lock, flags); 295 spin_lock_irqsave(&dev->list_lock, flags);
297 list_add_tail(&cmd->list, &dev->cmd_list); 296 list_add_tail(&cmd->list, &dev->cmd_list);
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
652 unsigned long timeout; 651 unsigned long timeout;
653 int rtn = 0; 652 int rtn = 0;
654 653
654 /*
655 * We will use a queued command if possible, otherwise we will
656 * emulate the queuing and calling of completion function ourselves.
657 */
658 atomic_inc(&cmd->device->iorequest_cnt);
659
655 /* check if the device is still usable */ 660 /* check if the device is still usable */
656 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 661 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
657 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 662 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
658 * returns an immediate error upwards, and signals 663 * returns an immediate error upwards, and signals
659 * that the device is no longer present */ 664 * that the device is no longer present */
660 cmd->result = DID_NO_CONNECT << 16; 665 cmd->result = DID_NO_CONNECT << 16;
661 atomic_inc(&cmd->device->iorequest_cnt); 666 scsi_done(cmd);
662 __scsi_done(cmd);
663 /* return 0 (because the command has been processed) */ 667 /* return 0 (because the command has been processed) */
664 goto out; 668 goto out;
665 } 669 }
666 670
667 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 671 /* Check to see if the scsi lld made this device blocked. */
668 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 672 if (unlikely(scsi_device_blocked(cmd->device))) {
669 /* 673 /*
670 * in SDEV_BLOCK, the command is just put back on the device 674 * in blocked state, the command is just put back on
671 * queue. The suspend state has already blocked the queue so 675 * the device queue. The suspend state has already
672 * future requests should not occur until the device 676 * blocked the queue so future requests should not
673 * transitions out of the suspend state. 677 * occur until the device transitions out of the
678 * suspend state.
674 */ 679 */
680
675 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 681 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
676 682
677 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 683 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
714 host->resetting = 0; 720 host->resetting = 0;
715 } 721 }
716 722
717 /*
718 * AK: unlikely race here: for some reason the timer could
719 * expire before the serial number is set up below.
720 */
721 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
722
723 scsi_log_send(cmd); 723 scsi_log_send(cmd);
724 724
725 /* 725 /*
726 * We will use a queued command if possible, otherwise we will
727 * emulate the queuing and calling of completion function ourselves.
728 */
729 atomic_inc(&cmd->device->iorequest_cnt);
730
731 /*
732 * Before we queue this command, check if the command 726 * Before we queue this command, check if the command
733 * length exceeds what the host adapter can handle. 727 * length exceeds what the host adapter can handle.
734 */ 728 */
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
744 } 738 }
745 739
746 spin_lock_irqsave(host->host_lock, flags); 740 spin_lock_irqsave(host->host_lock, flags);
741 /*
742 * AK: unlikely race here: for some reason the timer could
743 * expire before the serial number is set up below.
744 *
745 * TODO: kill serial or move to blk layer
746 */
747 scsi_cmd_get_serial(host, cmd); 747 scsi_cmd_get_serial(host, cmd);
748 748
749 if (unlikely(host->shost_state == SHOST_DEL)) { 749 if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -754,12 +754,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
754 } 754 }
755 spin_unlock_irqrestore(host->host_lock, flags); 755 spin_unlock_irqrestore(host->host_lock, flags);
756 if (rtn) { 756 if (rtn) {
757 if (scsi_delete_timer(cmd)) { 757 scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
758 atomic_inc(&cmd->device->iodone_cnt); 758 rtn : SCSI_MLQUEUE_HOST_BUSY);
759 scsi_queue_insert(cmd,
760 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
761 rtn : SCSI_MLQUEUE_HOST_BUSY);
762 }
763 SCSI_LOG_MLQUEUE(3, 759 SCSI_LOG_MLQUEUE(3,
764 printk("queuecommand : request rejected\n")); 760 printk("queuecommand : request rejected\n"));
765 } 761 }
@@ -770,24 +766,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
770} 766}
771 767
772/** 768/**
773 * scsi_req_abort_cmd -- Request command recovery for the specified command
774 * @cmd: pointer to the SCSI command of interest
775 *
776 * This function requests that SCSI Core start recovery for the
777 * command by deleting the timer and adding the command to the eh
778 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
779 * implement their own error recovery MAY ignore the timeout event if
780 * they generated scsi_req_abort_cmd.
781 */
782void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
783{
784 if (!scsi_delete_timer(cmd))
785 return;
786 scsi_times_out(cmd);
787}
788EXPORT_SYMBOL(scsi_req_abort_cmd);
789
790/**
791 * scsi_done - Enqueue the finished SCSI command into the done queue. 769 * scsi_done - Enqueue the finished SCSI command into the done queue.
792 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 770 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
793 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 771 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -802,42 +780,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
802 */ 780 */
803static void scsi_done(struct scsi_cmnd *cmd) 781static void scsi_done(struct scsi_cmnd *cmd)
804{ 782{
805 /* 783 blk_complete_request(cmd->request);
806 * We don't have to worry about this one timing out anymore.
807 * If we are unable to remove the timer, then the command
808 * has already timed out. In which case, we have no choice but to
809 * let the timeout function run, as we have no idea where in fact
810 * that function could really be. It might be on another processor,
811 * etc, etc.
812 */
813 if (!scsi_delete_timer(cmd))
814 return;
815 __scsi_done(cmd);
816}
817
818/* Private entry to scsi_done() to complete a command when the timer
819 * isn't running --- used by scsi_times_out */
820void __scsi_done(struct scsi_cmnd *cmd)
821{
822 struct request *rq = cmd->request;
823
824 /*
825 * Set the serial numbers back to zero
826 */
827 cmd->serial_number = 0;
828
829 atomic_inc(&cmd->device->iodone_cnt);
830 if (cmd->result)
831 atomic_inc(&cmd->device->ioerr_cnt);
832
833 BUG_ON(!rq);
834
835 /*
836 * The uptodate/nbytes values don't matter, as we allow partial
837 * completes and thus will check this in the softirq callback
838 */
839 rq->completion_data = cmd;
840 blk_complete_request(rq);
841} 784}
842 785
843/* Move this to a header if it becomes more generally useful */ 786/* Move this to a header if it becomes more generally useful */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 880051c89bde..fecefa05cb62 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
112} 112}
113 113
114/** 114/**
115 * scsi_add_timer - Start timeout timer for a single scsi command.
116 * @scmd: scsi command that is about to start running.
117 * @timeout: amount of time to allow this command to run.
118 * @complete: timeout function to call if timer isn't canceled.
119 *
120 * Notes:
121 * This should be turned into an inline function. Each scsi command
122 * has its own timer, and as it is added to the queue, we set up the
123 * timer. When the command completes, we cancel the timer.
124 */
125void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
126 void (*complete)(struct scsi_cmnd *))
127{
128
129 /*
130 * If the clock was already running for this command, then
131 * first delete the timer. The timer handling code gets rather
132 * confused if we don't do this.
133 */
134 if (scmd->eh_timeout.function)
135 del_timer(&scmd->eh_timeout);
136
137 scmd->eh_timeout.data = (unsigned long)scmd;
138 scmd->eh_timeout.expires = jiffies + timeout;
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __func__,
143 scmd, timeout, complete));
144
145 add_timer(&scmd->eh_timeout);
146}
147
148/**
149 * scsi_delete_timer - Delete/cancel timer for a given function.
150 * @scmd: Cmd that we are canceling timer for
151 *
152 * Notes:
153 * This should be turned into an inline function.
154 *
155 * Return value:
156 * 1 if we were able to detach the timer. 0 if we blew it, and the
157 * timer function has already started to run.
158 */
159int scsi_delete_timer(struct scsi_cmnd *scmd)
160{
161 int rtn;
162
163 rtn = del_timer(&scmd->eh_timeout);
164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __func__,
167 scmd, rtn));
168
169 scmd->eh_timeout.data = (unsigned long)NULL;
170 scmd->eh_timeout.function = NULL;
171
172 return rtn;
173}
174
175/**
176 * scsi_times_out - Timeout function for normal scsi commands. 115 * scsi_times_out - Timeout function for normal scsi commands.
177 * @scmd: Cmd that is timing out. 116 * @req: request that is timing out.
178 * 117 *
179 * Notes: 118 * Notes:
180 * We do not need to lock this. There is the potential for a race 119 * We do not need to lock this. There is the potential for a race
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
182 * normal completion function determines that the timer has already 121 * normal completion function determines that the timer has already
183 * fired, then it mustn't do anything. 122 * fired, then it mustn't do anything.
184 */ 123 */
185void scsi_times_out(struct scsi_cmnd *scmd) 124enum blk_eh_timer_return scsi_times_out(struct request *req)
186{ 125{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
188 129
189 scsi_log_completion(scmd, TIMEOUT_ERROR); 130 scsi_log_completion(scmd, TIMEOUT_ERROR);
190 131
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
196 eh_timed_out = NULL; 137 eh_timed_out = NULL;
197 138
198 if (eh_timed_out) 139 if (eh_timed_out)
199 switch (eh_timed_out(scmd)) { 140 rtn = eh_timed_out(scmd);
200 case EH_HANDLED: 141 switch (rtn) {
201 __scsi_done(scmd); 142 case BLK_EH_NOT_HANDLED:
202 return;
203 case EH_RESET_TIMER:
204 scsi_add_timer(scmd, scmd->timeout_per_command,
205 scsi_times_out);
206 return;
207 case EH_NOT_HANDLED:
208 break; 143 break;
144 default:
145 return rtn;
209 } 146 }
210 147
211 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 148 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
212 scmd->result |= DID_TIME_OUT << 16; 149 scmd->result |= DID_TIME_OUT << 16;
213 __scsi_done(scmd); 150 return BLK_EH_HANDLED;
214 } 151 }
152
153 return BLK_EH_NOT_HANDLED;
215} 154}
216 155
217/** 156/**
@@ -391,7 +330,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
391 330
392 case HARDWARE_ERROR: 331 case HARDWARE_ERROR:
393 if (scmd->device->retry_hwerror) 332 if (scmd->device->retry_hwerror)
394 return NEEDS_RETRY; 333 return ADD_TO_MLQUEUE;
395 else 334 else
396 return SUCCESS; 335 return SUCCESS;
397 336
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1793 1732
1794 blk_rq_init(NULL, &req); 1733 blk_rq_init(NULL, &req);
1795 scmd->request = &req; 1734 scmd->request = &req;
1796 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1797 1735
1798 scmd->cmnd = req.cmd; 1736 scmd->cmnd = req.cmd;
1799 1737
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1804 1742
1805 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1743 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1806 1744
1807 init_timer(&scmd->eh_timeout);
1808
1809 spin_lock_irqsave(shost->host_lock, flags); 1745 spin_lock_irqsave(shost->host_lock, flags);
1810 shost->tmf_in_progress = 1; 1746 shost->tmf_in_progress = 1;
1811 spin_unlock_irqrestore(shost->host_lock, flags); 1747 spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ff5d56b3ee4d..98ee55ced592 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -852,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
852void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 852void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
853{ 853{
854 int result = cmd->result; 854 int result = cmd->result;
855 int this_count = scsi_bufflen(cmd); 855 int this_count;
856 struct request_queue *q = cmd->device->request_queue; 856 struct request_queue *q = cmd->device->request_queue;
857 struct request *req = cmd->request; 857 struct request *req = cmd->request;
858 int error = 0; 858 int error = 0;
@@ -908,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
908 */ 908 */
909 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 909 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
910 return; 910 return;
911 this_count = blk_rq_bytes(req);
911 912
912 /* good_bytes = 0, or (inclusive) there were leftovers and 913 /* good_bytes = 0, or (inclusive) there were leftovers and
913 * result = 0, so scsi_end_request couldn't retry. 914 * result = 0, so scsi_end_request couldn't retry.
@@ -1180,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1180 1181
1181 cmd->transfersize = req->data_len; 1182 cmd->transfersize = req->data_len;
1182 cmd->allowed = req->retries; 1183 cmd->allowed = req->retries;
1183 cmd->timeout_per_command = req->timeout;
1184 return BLKPREP_OK; 1184 return BLKPREP_OK;
1185} 1185}
1186EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1186EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1250,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1250 break; 1250 break;
1251 case SDEV_QUIESCE: 1251 case SDEV_QUIESCE:
1252 case SDEV_BLOCK: 1252 case SDEV_BLOCK:
1253 case SDEV_CREATED_BLOCK:
1253 /* 1254 /*
1254 * If the devices is blocked we defer normal commands. 1255 * If the devices is blocked we defer normal commands.
1255 */ 1256 */
@@ -1415,17 +1416,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1415 spin_unlock(shost->host_lock); 1416 spin_unlock(shost->host_lock);
1416 spin_lock(sdev->request_queue->queue_lock); 1417 spin_lock(sdev->request_queue->queue_lock);
1417 1418
1418 __scsi_done(cmd); 1419 blk_complete_request(req);
1419} 1420}
1420 1421
1421static void scsi_softirq_done(struct request *rq) 1422static void scsi_softirq_done(struct request *rq)
1422{ 1423{
1423 struct scsi_cmnd *cmd = rq->completion_data; 1424 struct scsi_cmnd *cmd = rq->special;
1424 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1425 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1425 int disposition; 1426 int disposition;
1426 1427
1427 INIT_LIST_HEAD(&cmd->eh_entry); 1428 INIT_LIST_HEAD(&cmd->eh_entry);
1428 1429
1430 /*
1431 * Set the serial numbers back to zero
1432 */
1433 cmd->serial_number = 0;
1434
1435 atomic_inc(&cmd->device->iodone_cnt);
1436 if (cmd->result)
1437 atomic_inc(&cmd->device->ioerr_cnt);
1438
1429 disposition = scsi_decide_disposition(cmd); 1439 disposition = scsi_decide_disposition(cmd);
1430 if (disposition != SUCCESS && 1440 if (disposition != SUCCESS &&
1431 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1441 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1674,6 +1684,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1674 1684
1675 blk_queue_prep_rq(q, scsi_prep_fn); 1685 blk_queue_prep_rq(q, scsi_prep_fn);
1676 blk_queue_softirq_done(q, scsi_softirq_done); 1686 blk_queue_softirq_done(q, scsi_softirq_done);
1687 blk_queue_rq_timed_out(q, scsi_times_out);
1677 return q; 1688 return q;
1678} 1689}
1679 1690
@@ -2063,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2063 2074
2064 switch (state) { 2075 switch (state) {
2065 case SDEV_CREATED: 2076 case SDEV_CREATED:
2066 /* There are no legal states that come back to 2077 switch (oldstate) {
2067 * created. This is the manually initialised start 2078 case SDEV_CREATED_BLOCK:
2068 * state */ 2079 break;
2069 goto illegal; 2080 default:
2081 goto illegal;
2082 }
2083 break;
2070 2084
2071 case SDEV_RUNNING: 2085 case SDEV_RUNNING:
2072 switch (oldstate) { 2086 switch (oldstate) {
@@ -2104,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2104 2118
2105 case SDEV_BLOCK: 2119 case SDEV_BLOCK:
2106 switch (oldstate) { 2120 switch (oldstate) {
2107 case SDEV_CREATED:
2108 case SDEV_RUNNING: 2121 case SDEV_RUNNING:
2122 case SDEV_CREATED_BLOCK:
2123 break;
2124 default:
2125 goto illegal;
2126 }
2127 break;
2128
2129 case SDEV_CREATED_BLOCK:
2130 switch (oldstate) {
2131 case SDEV_CREATED:
2109 break; 2132 break;
2110 default: 2133 default:
2111 goto illegal; 2134 goto illegal;
@@ -2393,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev)
2393 int err = 0; 2416 int err = 0;
2394 2417
2395 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2418 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2396 if (err) 2419 if (err) {
2397 return err; 2420 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2421
2422 if (err)
2423 return err;
2424 }
2398 2425
2399 /* 2426 /*
2400 * The device has transitioned to SDEV_BLOCK. Stop the 2427 * The device has transitioned to SDEV_BLOCK. Stop the
@@ -2437,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
2437 * and goose the device queue if successful. 2464 * and goose the device queue if successful.
2438 */ 2465 */
2439 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2466 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2440 if (err) 2467 if (err) {
2441 return err; 2468 err = scsi_device_set_state(sdev, SDEV_CREATED);
2469
2470 if (err)
2471 return err;
2472 }
2442 2473
2443 spin_lock_irqsave(q->queue_lock, flags); 2474 spin_lock_irqsave(q->queue_lock, flags);
2444 blk_start_queue(q); 2475 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index ae7ed9a22662..b37e133de805 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -21,6 +21,7 @@
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/delay.h>
24#include <net/sock.h> 25#include <net/sock.h>
25#include <net/netlink.h> 26#include <net/netlink.h>
26 27
@@ -30,6 +31,39 @@
30struct sock *scsi_nl_sock = NULL; 31struct sock *scsi_nl_sock = NULL;
31EXPORT_SYMBOL_GPL(scsi_nl_sock); 32EXPORT_SYMBOL_GPL(scsi_nl_sock);
32 33
34static DEFINE_SPINLOCK(scsi_nl_lock);
35static struct list_head scsi_nl_drivers;
36
37static u32 scsi_nl_state;
38#define STATE_EHANDLER_BSY 0x00000001
39
40struct scsi_nl_transport {
41 int (*msg_handler)(struct sk_buff *);
42 void (*event_handler)(struct notifier_block *, unsigned long, void *);
43 unsigned int refcnt;
44 int flags;
45};
46
47/* flags values (bit flags) */
48#define HANDLER_DELETING 0x1
49
50static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
51 { {NULL, }, };
52
53
54struct scsi_nl_drvr {
55 struct list_head next;
56 int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
57 u32 len, u32 pid);
58 void (*devt_handler)(struct notifier_block *nb,
59 unsigned long event, void *notify_ptr);
60 struct scsi_host_template *hostt;
61 u64 vendor_id;
62 unsigned int refcnt;
63 int flags;
64};
65
66
33 67
34/** 68/**
35 * scsi_nl_rcv_msg - Receive message handler. 69 * scsi_nl_rcv_msg - Receive message handler.
@@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
45{ 79{
46 struct nlmsghdr *nlh; 80 struct nlmsghdr *nlh;
47 struct scsi_nl_hdr *hdr; 81 struct scsi_nl_hdr *hdr;
48 uint32_t rlen; 82 unsigned long flags;
49 int err; 83 u32 rlen;
84 int err, tport;
50 85
51 while (skb->len >= NLMSG_SPACE(0)) { 86 while (skb->len >= NLMSG_SPACE(0)) {
52 err = 0; 87 err = 0;
@@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
65 100
66 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { 101 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
67 err = -EBADMSG; 102 err = -EBADMSG;
68 return; 103 goto next_msg;
69 } 104 }
70 105
71 hdr = NLMSG_DATA(nlh); 106 hdr = NLMSG_DATA(nlh);
@@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { 118 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
84 printk(KERN_WARNING "%s: discarding partial message\n", 119 printk(KERN_WARNING "%s: discarding partial message\n",
85 __func__); 120 __func__);
86 return; 121 goto next_msg;
87 } 122 }
88 123
89 /* 124 /*
90 * We currently don't support anyone sending us a message 125 * Deliver message to the appropriate transport
91 */ 126 */
127 spin_lock_irqsave(&scsi_nl_lock, flags);
128
129 tport = hdr->transport;
130 if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
131 !(transports[tport].flags & HANDLER_DELETING) &&
132 (transports[tport].msg_handler)) {
133 transports[tport].refcnt++;
134 spin_unlock_irqrestore(&scsi_nl_lock, flags);
135 err = transports[tport].msg_handler(skb);
136 spin_lock_irqsave(&scsi_nl_lock, flags);
137 transports[tport].refcnt--;
138 } else
139 err = -ENOENT;
140
141 spin_unlock_irqrestore(&scsi_nl_lock, flags);
92 142
93next_msg: 143next_msg:
94 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) 144 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
@@ -110,14 +160,42 @@ static int
110scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) 160scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
111{ 161{
112 struct netlink_notify *n = ptr; 162 struct netlink_notify *n = ptr;
163 struct scsi_nl_drvr *driver;
164 unsigned long flags;
165 int tport;
113 166
114 if (n->protocol != NETLINK_SCSITRANSPORT) 167 if (n->protocol != NETLINK_SCSITRANSPORT)
115 return NOTIFY_DONE; 168 return NOTIFY_DONE;
116 169
170 spin_lock_irqsave(&scsi_nl_lock, flags);
171 scsi_nl_state |= STATE_EHANDLER_BSY;
172
117 /* 173 /*
118 * Currently, we are not tracking PID's, etc. There is nothing 174 * Pass event on to any transports that may be listening
119 * to handle.
120 */ 175 */
176 for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
177 if (!(transports[tport].flags & HANDLER_DELETING) &&
178 (transports[tport].event_handler)) {
179 spin_unlock_irqrestore(&scsi_nl_lock, flags);
180 transports[tport].event_handler(this, event, ptr);
181 spin_lock_irqsave(&scsi_nl_lock, flags);
182 }
183 }
184
185 /*
186 * Pass event on to any drivers that may be listening
187 */
188 list_for_each_entry(driver, &scsi_nl_drivers, next) {
189 if (!(driver->flags & HANDLER_DELETING) &&
190 (driver->devt_handler)) {
191 spin_unlock_irqrestore(&scsi_nl_lock, flags);
192 driver->devt_handler(this, event, ptr);
193 spin_lock_irqsave(&scsi_nl_lock, flags);
194 }
195 }
196
197 scsi_nl_state &= ~STATE_EHANDLER_BSY;
198 spin_unlock_irqrestore(&scsi_nl_lock, flags);
121 199
122 return NOTIFY_DONE; 200 return NOTIFY_DONE;
123} 201}
@@ -128,7 +206,281 @@ static struct notifier_block scsi_netlink_notifier = {
128 206
129 207
130/** 208/**
131 * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface 209 * GENERIC SCSI transport receive and event handlers
210 **/
211
212/**
213 * scsi_generic_msg_handler - receive message handler for GENERIC transport
214 * messages
215 *
216 * @skb: socket receive buffer
217 *
218 **/
219static int
220scsi_generic_msg_handler(struct sk_buff *skb)
221{
222 struct nlmsghdr *nlh = nlmsg_hdr(skb);
223 struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
224 struct scsi_nl_drvr *driver;
225 struct Scsi_Host *shost;
226 unsigned long flags;
227 int err = 0, match, pid;
228
229 pid = NETLINK_CREDS(skb)->pid;
230
231 switch (snlh->msgtype) {
232 case SCSI_NL_SHOST_VENDOR:
233 {
234 struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
235
236 /* Locate the driver that corresponds to the message */
237 spin_lock_irqsave(&scsi_nl_lock, flags);
238 match = 0;
239 list_for_each_entry(driver, &scsi_nl_drivers, next) {
240 if (driver->vendor_id == msg->vendor_id) {
241 match = 1;
242 break;
243 }
244 }
245
246 if ((!match) || (!driver->dmsg_handler)) {
247 spin_unlock_irqrestore(&scsi_nl_lock, flags);
248 err = -ESRCH;
249 goto rcv_exit;
250 }
251
252 if (driver->flags & HANDLER_DELETING) {
253 spin_unlock_irqrestore(&scsi_nl_lock, flags);
254 err = -ESHUTDOWN;
255 goto rcv_exit;
256 }
257
258 driver->refcnt++;
259 spin_unlock_irqrestore(&scsi_nl_lock, flags);
260
261
262 /* if successful, scsi_host_lookup takes a shost reference */
263 shost = scsi_host_lookup(msg->host_no);
264 if (!shost) {
265 err = -ENODEV;
266 goto driver_exit;
267 }
268
269 /* is this host owned by the vendor ? */
270 if (shost->hostt != driver->hostt) {
271 err = -EINVAL;
272 goto vendormsg_put;
273 }
274
275 /* pass message on to the driver */
276 err = driver->dmsg_handler(shost, (void *)&msg[1],
277 msg->vmsg_datalen, pid);
278
279vendormsg_put:
280 /* release reference by scsi_host_lookup */
281 scsi_host_put(shost);
282
283driver_exit:
284 /* release our own reference on the registration object */
285 spin_lock_irqsave(&scsi_nl_lock, flags);
286 driver->refcnt--;
287 spin_unlock_irqrestore(&scsi_nl_lock, flags);
288 break;
289 }
290
291 default:
292 err = -EBADR;
293 break;
294 }
295
296rcv_exit:
297 if (err)
298 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
299 __func__, snlh->msgtype, err);
300 return err;
301}
302
303
304/**
305 * scsi_nl_add_transport -
306 * Registers message and event handlers for a transport. Enables
307 * receipt of netlink messages and events to a transport.
308 *
309 * @tport: transport registering handlers
310 * @msg_handler: receive message handler callback
311 * @event_handler: receive event handler callback
312 **/
313int
314scsi_nl_add_transport(u8 tport,
315 int (*msg_handler)(struct sk_buff *),
316 void (*event_handler)(struct notifier_block *, unsigned long, void *))
317{
318 unsigned long flags;
319 int err = 0;
320
321 if (tport >= SCSI_NL_MAX_TRANSPORTS)
322 return -EINVAL;
323
324 spin_lock_irqsave(&scsi_nl_lock, flags);
325
326 if (scsi_nl_state & STATE_EHANDLER_BSY) {
327 spin_unlock_irqrestore(&scsi_nl_lock, flags);
328 msleep(1);
329 spin_lock_irqsave(&scsi_nl_lock, flags);
330 }
331
332 if (transports[tport].msg_handler || transports[tport].event_handler) {
333 err = -EALREADY;
334 goto register_out;
335 }
336
337 transports[tport].msg_handler = msg_handler;
338 transports[tport].event_handler = event_handler;
339 transports[tport].flags = 0;
340 transports[tport].refcnt = 0;
341
342register_out:
343 spin_unlock_irqrestore(&scsi_nl_lock, flags);
344
345 return err;
346}
347EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
348
349
350/**
351 * scsi_nl_remove_transport -
352 * Disable transport receiption of messages and events
353 *
354 * @tport: transport deregistering handlers
355 *
356 **/
357void
358scsi_nl_remove_transport(u8 tport)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&scsi_nl_lock, flags);
363 if (scsi_nl_state & STATE_EHANDLER_BSY) {
364 spin_unlock_irqrestore(&scsi_nl_lock, flags);
365 msleep(1);
366 spin_lock_irqsave(&scsi_nl_lock, flags);
367 }
368
369 if (tport < SCSI_NL_MAX_TRANSPORTS) {
370 transports[tport].flags |= HANDLER_DELETING;
371
372 while (transports[tport].refcnt != 0) {
373 spin_unlock_irqrestore(&scsi_nl_lock, flags);
374 schedule_timeout_uninterruptible(HZ/4);
375 spin_lock_irqsave(&scsi_nl_lock, flags);
376 }
377 transports[tport].msg_handler = NULL;
378 transports[tport].event_handler = NULL;
379 transports[tport].flags = 0;
380 }
381
382 spin_unlock_irqrestore(&scsi_nl_lock, flags);
383
384 return;
385}
386EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
387
388
389/**
390 * scsi_nl_add_driver -
391 * A driver is registering its interfaces for SCSI netlink messages
392 *
393 * @vendor_id: A unique identification value for the driver.
394 * @hostt: address of the driver's host template. Used
395 * to verify an shost is bound to the driver
396 * @nlmsg_handler: receive message handler callback
397 * @nlevt_handler: receive event handler callback
398 *
399 * Returns:
400 * 0 on Success
401 * error result otherwise
402 **/
403int
404scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
405 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
406 u32 len, u32 pid),
407 void (*nlevt_handler)(struct notifier_block *nb,
408 unsigned long event, void *notify_ptr))
409{
410 struct scsi_nl_drvr *driver;
411 unsigned long flags;
412
413 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
414 if (unlikely(!driver)) {
415 printk(KERN_ERR "%s: allocation failure\n", __func__);
416 return -ENOMEM;
417 }
418
419 driver->dmsg_handler = nlmsg_handler;
420 driver->devt_handler = nlevt_handler;
421 driver->hostt = hostt;
422 driver->vendor_id = vendor_id;
423
424 spin_lock_irqsave(&scsi_nl_lock, flags);
425 if (scsi_nl_state & STATE_EHANDLER_BSY) {
426 spin_unlock_irqrestore(&scsi_nl_lock, flags);
427 msleep(1);
428 spin_lock_irqsave(&scsi_nl_lock, flags);
429 }
430 list_add_tail(&driver->next, &scsi_nl_drivers);
431 spin_unlock_irqrestore(&scsi_nl_lock, flags);
432
433 return 0;
434}
435EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
436
437
438/**
439 * scsi_nl_remove_driver -
440 * An driver is unregistering with the SCSI netlink messages
441 *
442 * @vendor_id: The unique identification value for the driver.
443 **/
444void
445scsi_nl_remove_driver(u64 vendor_id)
446{
447 struct scsi_nl_drvr *driver;
448 unsigned long flags;
449
450 spin_lock_irqsave(&scsi_nl_lock, flags);
451 if (scsi_nl_state & STATE_EHANDLER_BSY) {
452 spin_unlock_irqrestore(&scsi_nl_lock, flags);
453 msleep(1);
454 spin_lock_irqsave(&scsi_nl_lock, flags);
455 }
456
457 list_for_each_entry(driver, &scsi_nl_drivers, next) {
458 if (driver->vendor_id == vendor_id) {
459 driver->flags |= HANDLER_DELETING;
460 while (driver->refcnt != 0) {
461 spin_unlock_irqrestore(&scsi_nl_lock, flags);
462 schedule_timeout_uninterruptible(HZ/4);
463 spin_lock_irqsave(&scsi_nl_lock, flags);
464 }
465 list_del(&driver->next);
466 kfree(driver);
467 spin_unlock_irqrestore(&scsi_nl_lock, flags);
468 return;
469 }
470 }
471
472 spin_unlock_irqrestore(&scsi_nl_lock, flags);
473
474 printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
475 __func__, (unsigned long long)vendor_id);
476 return;
477}
478EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
479
480
481/**
482 * scsi_netlink_init - Called by SCSI subsystem to intialize
483 * the SCSI transport netlink interface
132 * 484 *
133 **/ 485 **/
134void 486void
@@ -136,6 +488,8 @@ scsi_netlink_init(void)
136{ 488{
137 int error; 489 int error;
138 490
491 INIT_LIST_HEAD(&scsi_nl_drivers);
492
139 error = netlink_register_notifier(&scsi_netlink_notifier); 493 error = netlink_register_notifier(&scsi_netlink_notifier);
140 if (error) { 494 if (error) {
141 printk(KERN_ERR "%s: register of event handler failed - %d\n", 495 printk(KERN_ERR "%s: register of event handler failed - %d\n",
@@ -150,8 +504,15 @@ scsi_netlink_init(void)
150 printk(KERN_ERR "%s: register of recieve handler failed\n", 504 printk(KERN_ERR "%s: register of recieve handler failed\n",
151 __func__); 505 __func__);
152 netlink_unregister_notifier(&scsi_netlink_notifier); 506 netlink_unregister_notifier(&scsi_netlink_notifier);
507 return;
153 } 508 }
154 509
510 /* Register the entry points for the generic SCSI transport */
511 error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
512 scsi_generic_msg_handler, NULL);
513 if (error)
514 printk(KERN_ERR "%s: register of GENERIC transport handler"
515 " failed - %d\n", __func__, error);
155 return; 516 return;
156} 517}
157 518
@@ -163,6 +524,8 @@ scsi_netlink_init(void)
163void 524void
164scsi_netlink_exit(void) 525scsi_netlink_exit(void)
165{ 526{
527 scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
528
166 if (scsi_nl_sock) { 529 if (scsi_nl_sock) {
167 netlink_kernel_release(scsi_nl_sock); 530 netlink_kernel_release(scsi_nl_sock);
168 netlink_unregister_notifier(&scsi_netlink_notifier); 531 netlink_unregister_notifier(&scsi_netlink_notifier);
@@ -172,3 +535,147 @@ scsi_netlink_exit(void)
172} 535}
173 536
174 537
538/*
539 * Exported Interfaces
540 */
541
542/**
543 * scsi_nl_send_transport_msg -
544 * Generic function to send a single message from a SCSI transport to
545 * a single process
546 *
547 * @pid: receiving pid
548 * @hdr: message payload
549 *
550 **/
551void
552scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
553{
554 struct sk_buff *skb;
555 struct nlmsghdr *nlh;
556 const char *fn;
557 char *datab;
558 u32 len, skblen;
559 int err;
560
561 if (!scsi_nl_sock) {
562 err = -ENOENT;
563 fn = "netlink socket";
564 goto msg_fail;
565 }
566
567 len = NLMSG_SPACE(hdr->msglen);
568 skblen = NLMSG_SPACE(len);
569
570 skb = alloc_skb(skblen, GFP_KERNEL);
571 if (!skb) {
572 err = -ENOBUFS;
573 fn = "alloc_skb";
574 goto msg_fail;
575 }
576
577 nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
578 if (!nlh) {
579 err = -ENOBUFS;
580 fn = "nlmsg_put";
581 goto msg_fail_skb;
582 }
583 datab = NLMSG_DATA(nlh);
584 memcpy(datab, hdr, hdr->msglen);
585
586 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
587 if (err < 0) {
588 fn = "nlmsg_unicast";
589 /* nlmsg_unicast already kfree_skb'd */
590 goto msg_fail;
591 }
592
593 return;
594
595msg_fail_skb:
596 kfree_skb(skb);
597msg_fail:
598 printk(KERN_WARNING
599 "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
600 "msglen %d: %s : err %d\n",
601 __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
602 fn, err);
603 return;
604}
605EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
606
607
608/**
609 * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
610 * to a specific process id.
611 *
612 * @pid: process id of the receiver
613 * @host_no: host # sending the message
614 * @vendor_id: unique identifier for the driver's vendor
615 * @data_len: amount, in bytes, of vendor unique payload data
616 * @data_buf: pointer to vendor unique data buffer
617 *
618 * Returns:
619 * 0 on succesful return
620 * otherwise, failing error code
621 *
622 * Notes:
623 * This routine assumes no locks are held on entry.
624 */
625int
626scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
627 char *data_buf, u32 data_len)
628{
629 struct sk_buff *skb;
630 struct nlmsghdr *nlh;
631 struct scsi_nl_host_vendor_msg *msg;
632 u32 len, skblen;
633 int err;
634
635 if (!scsi_nl_sock) {
636 err = -ENOENT;
637 goto send_vendor_fail;
638 }
639
640 len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
641 skblen = NLMSG_SPACE(len);
642
643 skb = alloc_skb(skblen, GFP_KERNEL);
644 if (!skb) {
645 err = -ENOBUFS;
646 goto send_vendor_fail;
647 }
648
649 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
650 skblen - sizeof(*nlh), 0);
651 if (!nlh) {
652 err = -ENOBUFS;
653 goto send_vendor_fail_skb;
654 }
655 msg = NLMSG_DATA(nlh);
656
657 INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
658 SCSI_NL_SHOST_VENDOR, len);
659 msg->vendor_id = vendor_id;
660 msg->host_no = host_no;
661 msg->vmsg_datalen = data_len; /* bytes */
662 memcpy(&msg[1], data_buf, data_len);
663
664 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
665 if (err)
666 /* nlmsg_multicast already kfree_skb'd */
667 goto send_vendor_fail;
668
669 return 0;
670
671send_vendor_fail_skb:
672 kfree_skb(skb);
673send_vendor_fail:
674 printk(KERN_WARNING
675 "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
676 __func__, host_no, err);
677 return err;
678}
679EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
680
681
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 79f0f7511204..6cddd5dd323c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,6 +4,7 @@
4#include <linux/device.h> 4#include <linux/device.h>
5 5
6struct request_queue; 6struct request_queue;
7struct request;
7struct scsi_cmnd; 8struct scsi_cmnd;
8struct scsi_device; 9struct scsi_device;
9struct scsi_host_template; 10struct scsi_host_template;
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
27extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); 28extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
28extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 29extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
29extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 30extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
30extern void __scsi_done(struct scsi_cmnd *cmd);
31#ifdef CONFIG_SCSI_LOGGING 31#ifdef CONFIG_SCSI_LOGGING
32void scsi_log_send(struct scsi_cmnd *cmd); 32void scsi_log_send(struct scsi_cmnd *cmd);
33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); 33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
49extern void scsi_exit_devinfo(void); 49extern void scsi_exit_devinfo(void);
50 50
51/* scsi_error.c */ 51/* scsi_error.c */
52extern void scsi_add_timer(struct scsi_cmnd *, int, 52extern enum blk_eh_timer_return scsi_times_out(struct request *req);
53 void (*)(struct scsi_cmnd *));
54extern int scsi_delete_timer(struct scsi_cmnd *);
55extern void scsi_times_out(struct scsi_cmnd *cmd);
56extern int scsi_error_handler(void *host); 53extern int scsi_error_handler(void *host);
57extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 54extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
58extern void scsi_eh_wakeup(struct Scsi_Host *shost); 55extern void scsi_eh_wakeup(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c6a904a45bf9..82f7b2dd08a2 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
259 int error = -ENXIO; 259 int error = -ENXIO;
260 260
261 shost = scsi_host_lookup(host); 261 shost = scsi_host_lookup(host);
262 if (IS_ERR(shost)) 262 if (!shost)
263 return PTR_ERR(shost); 263 return error;
264 264
265 if (shost->transportt->user_scan) 265 if (shost->transportt->user_scan)
266 error = shost->transportt->user_scan(shost, channel, id, lun); 266 error = shost->transportt->user_scan(shost, channel, id, lun);
@@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
287 int error = -ENXIO; 287 int error = -ENXIO;
288 288
289 shost = scsi_host_lookup(host); 289 shost = scsi_host_lookup(host);
290 if (IS_ERR(shost)) 290 if (!shost)
291 return PTR_ERR(shost); 291 return error;
292 sdev = scsi_device_lookup(shost, channel, id, lun); 292 sdev = scsi_device_lookup(shost, channel, id, lun);
293 if (sdev) { 293 if (sdev) {
294 scsi_remove_device(sdev); 294 scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 84b4879cff11..334862e26a1b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -730,6 +730,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
730static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 730static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
731 int *bflags, int async) 731 int *bflags, int async)
732{ 732{
733 int ret;
734
733 /* 735 /*
734 * XXX do not save the inquiry, since it can change underneath us, 736 * XXX do not save the inquiry, since it can change underneath us,
735 * save just vendor/model/rev. 737 * save just vendor/model/rev.
@@ -885,7 +887,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
885 887
886 /* set the device running here so that slave configure 888 /* set the device running here so that slave configure
887 * may do I/O */ 889 * may do I/O */
888 scsi_device_set_state(sdev, SDEV_RUNNING); 890 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
891 if (ret) {
892 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
893
894 if (ret) {
895 sdev_printk(KERN_ERR, sdev,
896 "in wrong state %s to complete scan\n",
897 scsi_device_state_name(sdev->sdev_state));
898 return SCSI_SCAN_NO_RESPONSE;
899 }
900 }
889 901
890 if (*bflags & BLIST_MS_192_BYTES_FOR_3F) 902 if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
891 sdev->use_192_bytes_for_3f = 1; 903 sdev->use_192_bytes_for_3f = 1;
@@ -899,7 +911,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
899 transport_configure_device(&sdev->sdev_gendev); 911 transport_configure_device(&sdev->sdev_gendev);
900 912
901 if (sdev->host->hostt->slave_configure) { 913 if (sdev->host->hostt->slave_configure) {
902 int ret = sdev->host->hostt->slave_configure(sdev); 914 ret = sdev->host->hostt->slave_configure(sdev);
903 if (ret) { 915 if (ret) {
904 /* 916 /*
905 * if LLDD reports slave not present, don't clutter 917 * if LLDD reports slave not present, don't clutter
@@ -994,7 +1006,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
994 */ 1006 */
995 sdev = scsi_device_lookup_by_target(starget, lun); 1007 sdev = scsi_device_lookup_by_target(starget, lun);
996 if (sdev) { 1008 if (sdev) {
997 if (rescan || sdev->sdev_state != SDEV_CREATED) { 1009 if (rescan || !scsi_device_created(sdev)) {
998 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1010 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
999 "scsi scan: device exists on %s\n", 1011 "scsi scan: device exists on %s\n",
1000 sdev->sdev_gendev.bus_id)); 1012 sdev->sdev_gendev.bus_id));
@@ -1080,7 +1092,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1080 * PDT=1Fh none (no FDD connected to the requested logical unit) 1092 * PDT=1Fh none (no FDD connected to the requested logical unit)
1081 */ 1093 */
1082 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && 1094 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1083 (result[0] & 0x1f) == 0x1f) { 1095 (result[0] & 0x1f) == 0x1f &&
1096 !scsi_is_wlun(lun)) {
1084 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1097 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
1085 "scsi scan: peripheral device type" 1098 "scsi scan: peripheral device type"
1086 " of 31, no device added\n")); 1099 " of 31, no device added\n"));
@@ -1466,7 +1479,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1466 kfree(lun_data); 1479 kfree(lun_data);
1467 out: 1480 out:
1468 scsi_device_put(sdev); 1481 scsi_device_put(sdev);
1469 if (sdev->sdev_state == SDEV_CREATED) 1482 if (scsi_device_created(sdev))
1470 /* 1483 /*
1471 * the sdev we used didn't appear in the report luns scan 1484 * the sdev we used didn't appear in the report luns scan
1472 */ 1485 */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ab3c71869be5..93c28f30bbd7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -34,6 +34,7 @@ static const struct {
34 { SDEV_QUIESCE, "quiesce" }, 34 { SDEV_QUIESCE, "quiesce" },
35 { SDEV_OFFLINE, "offline" }, 35 { SDEV_OFFLINE, "offline" },
36 { SDEV_BLOCK, "blocked" }, 36 { SDEV_BLOCK, "blocked" },
37 { SDEV_CREATED_BLOCK, "created-blocked" },
37}; 38};
38 39
39const char *scsi_device_state_name(enum scsi_device_state state) 40const char *scsi_device_state_name(enum scsi_device_state state)
@@ -560,12 +561,15 @@ sdev_rd_attr (vendor, "%.8s\n");
560sdev_rd_attr (model, "%.16s\n"); 561sdev_rd_attr (model, "%.16s\n");
561sdev_rd_attr (rev, "%.4s\n"); 562sdev_rd_attr (rev, "%.4s\n");
562 563
564/*
565 * TODO: can we make these symlinks to the block layer ones?
566 */
563static ssize_t 567static ssize_t
564sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) 568sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
565{ 569{
566 struct scsi_device *sdev; 570 struct scsi_device *sdev;
567 sdev = to_scsi_device(dev); 571 sdev = to_scsi_device(dev);
568 return snprintf (buf, 20, "%d\n", sdev->timeout / HZ); 572 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
569} 573}
570 574
571static ssize_t 575static ssize_t
@@ -576,7 +580,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
576 int timeout; 580 int timeout;
577 sdev = to_scsi_device(dev); 581 sdev = to_scsi_device(dev);
578 sscanf (buf, "%d\n", &timeout); 582 sscanf (buf, "%d\n", &timeout);
579 sdev->timeout = timeout * HZ; 583 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
580 return count; 584 return count;
581} 585}
582static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); 586static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39af..48ba413f7f6a 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
362 int err; 362 int err;
363 363
364 dprintk("%lx %u\n", uaddr, len); 364 dprintk("%lx %u\n", uaddr, len);
365 err = blk_rq_map_user(q, rq, (void *)uaddr, len); 365 err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
366 if (err) { 366 if (err) {
367 /* 367 /*
368 * TODO: need to fixup sg_tablesize, max_segment_size, 368 * TODO: need to fixup sg_tablesize, max_segment_size,
@@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
460 460
461 /* TODO: replace with a O(1) alg */ 461 /* TODO: replace with a O(1) alg */
462 shost = scsi_host_lookup(host_no); 462 shost = scsi_host_lookup(host_no);
463 if (IS_ERR(shost)) { 463 if (!shost) {
464 printk(KERN_ERR "Could not find host no %d\n", host_no); 464 printk(KERN_ERR "Could not find host no %d\n", host_no);
465 return -EINVAL; 465 return -EINVAL;
466 } 466 }
@@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); 550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
551 551
552 shost = scsi_host_lookup(host_no); 552 shost = scsi_host_lookup(host_no);
553 if (IS_ERR(shost)) { 553 if (!shost) {
554 printk(KERN_ERR "Could not find host no %d\n", host_no); 554 printk(KERN_ERR "Could not find host no %d\n", host_no);
555 return err; 555 return err;
556 } 556 }
@@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); 603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
604 604
605 shost = scsi_host_lookup(host_no); 605 shost = scsi_host_lookup(host_no);
606 if (IS_ERR(shost)) { 606 if (!shost) {
607 printk(KERN_ERR "Could not find host no %d\n", host_no); 607 printk(KERN_ERR "Could not find host no %d\n", host_no);
608 return err; 608 return err;
609 } 609 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 56823fd1fb84..d5f7653bb94b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -40,31 +40,7 @@
40 40
41static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 41static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
42static void fc_vport_sched_delete(struct work_struct *work); 42static void fc_vport_sched_delete(struct work_struct *work);
43 43static int fc_vport_setup(struct Scsi_Host *shost, int channel,
44/*
45 * This is a temporary carrier for creating a vport. It will eventually
46 * be replaced by a real message definition for sgio or netlink.
47 *
48 * fc_vport_identifiers: This set of data contains all elements
49 * to uniquely identify and instantiate a FC virtual port.
50 *
51 * Notes:
52 * symbolic_name: The driver is to append the symbolic_name string data
53 * to the symbolic_node_name data that it generates by default.
54 * the resulting combination should then be registered with the switch.
55 * It is expected that things like Xen may stuff a VM title into
56 * this field.
57 */
58struct fc_vport_identifiers {
59 u64 node_name;
60 u64 port_name;
61 u32 roles;
62 bool disable;
63 enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
64 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
65};
66
67static int fc_vport_create(struct Scsi_Host *shost, int channel,
68 struct device *pdev, struct fc_vport_identifiers *ids, 44 struct device *pdev, struct fc_vport_identifiers *ids,
69 struct fc_vport **vport); 45 struct fc_vport **vport);
70 46
@@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1760 vid.disable = false; /* always enabled */ 1736 vid.disable = false; /* always enabled */
1761 1737
1762 /* we only allow support on Channel 0 !!! */ 1738 /* we only allow support on Channel 0 !!! */
1763 stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport); 1739 stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1764 return stat ? stat : count; 1740 return stat ? stat : count;
1765} 1741}
1766static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, 1742static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@@ -1950,15 +1926,15 @@ static int fc_vport_match(struct attribute_container *cont,
1950 * Notes: 1926 * Notes:
1951 * This routine assumes no locks are held on entry. 1927 * This routine assumes no locks are held on entry.
1952 */ 1928 */
1953static enum scsi_eh_timer_return 1929static enum blk_eh_timer_return
1954fc_timed_out(struct scsi_cmnd *scmd) 1930fc_timed_out(struct scsi_cmnd *scmd)
1955{ 1931{
1956 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); 1932 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1957 1933
1958 if (rport->port_state == FC_PORTSTATE_BLOCKED) 1934 if (rport->port_state == FC_PORTSTATE_BLOCKED)
1959 return EH_RESET_TIMER; 1935 return BLK_EH_RESET_TIMER;
1960 1936
1961 return EH_NOT_HANDLED; 1937 return BLK_EH_NOT_HANDLED;
1962} 1938}
1963 1939
1964/* 1940/*
@@ -3103,7 +3079,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3103 3079
3104 3080
3105/** 3081/**
3106 * fc_vport_create - allocates and creates a FC virtual port. 3082 * fc_vport_setup - allocates and creates a FC virtual port.
3107 * @shost: scsi host the virtual port is connected to. 3083 * @shost: scsi host the virtual port is connected to.
3108 * @channel: Channel on shost port connected to. 3084 * @channel: Channel on shost port connected to.
3109 * @pdev: parent device for vport 3085 * @pdev: parent device for vport
@@ -3118,7 +3094,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3118 * This routine assumes no locks are held on entry. 3094 * This routine assumes no locks are held on entry.
3119 */ 3095 */
3120static int 3096static int
3121fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev, 3097fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3122 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) 3098 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3123{ 3099{
3124 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3100 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@@ -3231,6 +3207,28 @@ delete_vport:
3231 return error; 3207 return error;
3232} 3208}
3233 3209
3210/**
3211 * fc_vport_create - Admin App or LLDD requests creation of a vport
3212 * @shost: scsi host the virtual port is connected to.
3213 * @channel: channel on shost port connected to.
3214 * @ids: The world wide names, FC4 port roles, etc for
3215 * the virtual port.
3216 *
3217 * Notes:
3218 * This routine assumes no locks are held on entry.
3219 */
3220struct fc_vport *
3221fc_vport_create(struct Scsi_Host *shost, int channel,
3222 struct fc_vport_identifiers *ids)
3223{
3224 int stat;
3225 struct fc_vport *vport;
3226
3227 stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3228 ids, &vport);
3229 return stat ? NULL : vport;
3230}
3231EXPORT_SYMBOL(fc_vport_create);
3234 3232
3235/** 3233/**
3236 * fc_vport_terminate - Admin App or LLDD requests termination of a vport 3234 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 043c3921164f..0ce5f7cdfe2a 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1361,7 +1361,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
1361 return -EINVAL; 1361 return -EINVAL;
1362 1362
1363 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); 1363 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
1364 if (IS_ERR(shost)) { 1364 if (!shost) {
1365 printk(KERN_ERR "target discovery could not find host no %u\n", 1365 printk(KERN_ERR "target discovery could not find host no %u\n",
1366 ev->u.tgt_dscvr.host_no); 1366 ev->u.tgt_dscvr.host_no);
1367 return -ENODEV; 1367 return -ENODEV;
@@ -1387,7 +1387,7 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1387 return -ENOSYS; 1387 return -ENOSYS;
1388 1388
1389 shost = scsi_host_lookup(ev->u.set_host_param.host_no); 1389 shost = scsi_host_lookup(ev->u.set_host_param.host_no);
1390 if (IS_ERR(shost)) { 1390 if (!shost) {
1391 printk(KERN_ERR "set_host_param could not find host no %u\n", 1391 printk(KERN_ERR "set_host_param could not find host no %u\n",
1392 ev->u.set_host_param.host_no); 1392 ev->u.set_host_param.host_no);
1393 return -ENODEV; 1393 return -ENODEV;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 75a64a6cae8c..b29360ed0bdc 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -366,12 +366,14 @@ spi_transport_rd_attr(rti, "%d\n");
366spi_transport_rd_attr(pcomp_en, "%d\n"); 366spi_transport_rd_attr(pcomp_en, "%d\n");
367spi_transport_rd_attr(hold_mcs, "%d\n"); 367spi_transport_rd_attr(hold_mcs, "%d\n");
368 368
369/* we only care about the first child device so we return 1 */ 369/* we only care about the first child device that's a real SCSI device
370 * so we return 1 to terminate the iteration when we find it */
370static int child_iter(struct device *dev, void *data) 371static int child_iter(struct device *dev, void *data)
371{ 372{
372 struct scsi_device *sdev = to_scsi_device(dev); 373 if (!scsi_is_sdev_device(dev))
374 return 0;
373 375
374 spi_dv_device(sdev); 376 spi_dv_device(to_scsi_device(dev));
375 return 1; 377 return 1;
376} 378}
377 379
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d7856454..a7b53be63367 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
47#include <linux/blkpg.h> 47#include <linux/blkpg.h>
48#include <linux/delay.h> 48#include <linux/delay.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/string_helpers.h>
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51 52
52#include <scsi/scsi.h> 53#include <scsi/scsi.h>
@@ -86,6 +87,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 87MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 88MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
88 89
90#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
91#define SD_MINORS 16
92#else
93#define SD_MINORS 0
94#endif
95
89static int sd_revalidate_disk(struct gendisk *); 96static int sd_revalidate_disk(struct gendisk *);
90static int sd_probe(struct device *); 97static int sd_probe(struct device *);
91static int sd_remove(struct device *); 98static int sd_remove(struct device *);
@@ -159,7 +166,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
159 sd_print_sense_hdr(sdkp, &sshdr); 166 sd_print_sense_hdr(sdkp, &sshdr);
160 return -EINVAL; 167 return -EINVAL;
161 } 168 }
162 sd_revalidate_disk(sdkp->disk); 169 revalidate_disk(sdkp->disk);
163 return count; 170 return count;
164} 171}
165 172
@@ -377,7 +384,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
377 sector_t block = rq->sector; 384 sector_t block = rq->sector;
378 sector_t threshold; 385 sector_t threshold;
379 unsigned int this_count = rq->nr_sectors; 386 unsigned int this_count = rq->nr_sectors;
380 unsigned int timeout = sdp->timeout;
381 int ret; 387 int ret;
382 388
383 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 389 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -578,7 +584,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
578 SCpnt->transfersize = sdp->sector_size; 584 SCpnt->transfersize = sdp->sector_size;
579 SCpnt->underflow = this_count << 9; 585 SCpnt->underflow = this_count << 9;
580 SCpnt->allowed = SD_MAX_RETRIES; 586 SCpnt->allowed = SD_MAX_RETRIES;
581 SCpnt->timeout_per_command = timeout;
582 587
583 /* 588 /*
584 * This indicates that the command is ready from our end to be 589 * This indicates that the command is ready from our end to be
@@ -910,7 +915,7 @@ static void sd_rescan(struct device *dev)
910 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 915 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
911 916
912 if (sdkp) { 917 if (sdkp) {
913 sd_revalidate_disk(sdkp->disk); 918 revalidate_disk(sdkp->disk);
914 scsi_disk_put(sdkp); 919 scsi_disk_put(sdkp);
915 } 920 }
916} 921}
@@ -1429,27 +1434,21 @@ got_data:
1429 */ 1434 */
1430 sector_size = 512; 1435 sector_size = 512;
1431 } 1436 }
1437 blk_queue_hardsect_size(sdp->request_queue, sector_size);
1438
1432 { 1439 {
1433 /* 1440 char cap_str_2[10], cap_str_10[10];
1434 * The msdos fs needs to know the hardware sector size 1441 u64 sz = sdkp->capacity << ffz(~sector_size);
1435 * So I have created this table. See ll_rw_blk.c
1436 * Jacques Gelinas (Jacques@solucorp.qc.ca)
1437 */
1438 int hard_sector = sector_size;
1439 sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
1440 struct request_queue *queue = sdp->request_queue;
1441 sector_t mb = sz;
1442 1442
1443 blk_queue_hardsect_size(queue, hard_sector); 1443 string_get_size(sz, STRING_UNITS_2, cap_str_2,
1444 /* avoid 64-bit division on 32-bit platforms */ 1444 sizeof(cap_str_2));
1445 sector_div(sz, 625); 1445 string_get_size(sz, STRING_UNITS_10, cap_str_10,
1446 mb -= sz - 974; 1446 sizeof(cap_str_10));
1447 sector_div(mb, 1950);
1448 1447
1449 sd_printk(KERN_NOTICE, sdkp, 1448 sd_printk(KERN_NOTICE, sdkp,
1450 "%llu %d-byte hardware sectors (%llu MB)\n", 1449 "%llu %d-byte hardware sectors: (%s/%s)\n",
1451 (unsigned long long)sdkp->capacity, 1450 (unsigned long long)sdkp->capacity,
1452 hard_sector, (unsigned long long)mb); 1451 sector_size, cap_str_10, cap_str_2);
1453 } 1452 }
1454 1453
1455 /* Rescale capacity to 512-byte units */ 1454 /* Rescale capacity to 512-byte units */
@@ -1764,6 +1763,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
1764} 1763}
1765 1764
1766/** 1765/**
1766 * sd_format_disk_name - format disk name
1767 * @prefix: name prefix - ie. "sd" for SCSI disks
1768 * @index: index of the disk to format name for
1769 * @buf: output buffer
1770 * @buflen: length of the output buffer
1771 *
1772 * SCSI disk names starts at sda. The 26th device is sdz and the
1773 * 27th is sdaa. The last one for two lettered suffix is sdzz
1774 * which is followed by sdaaa.
1775 *
1776 * This is basically 26 base counting with one extra 'nil' entry
1777 * at the beggining from the second digit on and can be
1778 * determined using similar method as 26 base conversion with the
1779 * index shifted -1 after each digit is computed.
1780 *
1781 * CONTEXT:
1782 * Don't care.
1783 *
1784 * RETURNS:
1785 * 0 on success, -errno on failure.
1786 */
1787static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
1788{
1789 const int base = 'z' - 'a' + 1;
1790 char *begin = buf + strlen(prefix);
1791 char *end = buf + buflen;
1792 char *p;
1793 int unit;
1794
1795 p = end - 1;
1796 *p = '\0';
1797 unit = base;
1798 do {
1799 if (p == begin)
1800 return -EINVAL;
1801 *--p = 'a' + (index % unit);
1802 index = (index / unit) - 1;
1803 } while (index >= 0);
1804
1805 memmove(begin, p, end - p);
1806 memcpy(buf, prefix, strlen(prefix));
1807
1808 return 0;
1809}
1810
1811/**
1767 * sd_probe - called during driver initialization and whenever a 1812 * sd_probe - called during driver initialization and whenever a
1768 * new scsi device is attached to the system. It is called once 1813 * new scsi device is attached to the system. It is called once
1769 * for each scsi device (not just disks) present. 1814 * for each scsi device (not just disks) present.
@@ -1801,7 +1846,7 @@ static int sd_probe(struct device *dev)
1801 if (!sdkp) 1846 if (!sdkp)
1802 goto out; 1847 goto out;
1803 1848
1804 gd = alloc_disk(16); 1849 gd = alloc_disk(SD_MINORS);
1805 if (!gd) 1850 if (!gd)
1806 goto out_free; 1851 goto out_free;
1807 1852
@@ -1815,8 +1860,8 @@ static int sd_probe(struct device *dev)
1815 if (error) 1860 if (error)
1816 goto out_put; 1861 goto out_put;
1817 1862
1818 error = -EBUSY; 1863 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
1819 if (index >= SD_MAX_DISKS) 1864 if (error)
1820 goto out_free_index; 1865 goto out_free_index;
1821 1866
1822 sdkp->device = sdp; 1867 sdkp->device = sdp;
@@ -1826,11 +1871,12 @@ static int sd_probe(struct device *dev)
1826 sdkp->openers = 0; 1871 sdkp->openers = 0;
1827 sdkp->previous_state = 1; 1872 sdkp->previous_state = 1;
1828 1873
1829 if (!sdp->timeout) { 1874 if (!sdp->request_queue->rq_timeout) {
1830 if (sdp->type != TYPE_MOD) 1875 if (sdp->type != TYPE_MOD)
1831 sdp->timeout = SD_TIMEOUT; 1876 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1832 else 1877 else
1833 sdp->timeout = SD_MOD_TIMEOUT; 1878 blk_queue_rq_timeout(sdp->request_queue,
1879 SD_MOD_TIMEOUT);
1834 } 1880 }
1835 1881
1836 device_initialize(&sdkp->dev); 1882 device_initialize(&sdkp->dev);
@@ -1843,24 +1889,12 @@ static int sd_probe(struct device *dev)
1843 1889
1844 get_device(&sdp->sdev_gendev); 1890 get_device(&sdp->sdev_gendev);
1845 1891
1846 gd->major = sd_major((index & 0xf0) >> 4); 1892 if (index < SD_MAX_DISKS) {
1847 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1893 gd->major = sd_major((index & 0xf0) >> 4);
1848 gd->minors = 16; 1894 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
1849 gd->fops = &sd_fops; 1895 gd->minors = SD_MINORS;
1850
1851 if (index < 26) {
1852 sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
1853 } else if (index < (26 + 1) * 26) {
1854 sprintf(gd->disk_name, "sd%c%c",
1855 'a' + index / 26 - 1,'a' + index % 26);
1856 } else {
1857 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
1858 const unsigned int m2 = (index / 26 - 1) % 26;
1859 const unsigned int m3 = index % 26;
1860 sprintf(gd->disk_name, "sd%c%c%c",
1861 'a' + m1, 'a' + m2, 'a' + m3);
1862 } 1896 }
1863 1897 gd->fops = &sd_fops;
1864 gd->private_data = &sdkp->driver; 1898 gd->private_data = &sdkp->driver;
1865 gd->queue = sdkp->device->request_queue; 1899 gd->queue = sdkp->device->request_queue;
1866 1900
@@ -1869,7 +1903,7 @@ static int sd_probe(struct device *dev)
1869 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1903 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1870 1904
1871 gd->driverfs_dev = &sdp->sdev_gendev; 1905 gd->driverfs_dev = &sdp->sdev_gendev;
1872 gd->flags = GENHD_FL_DRIVERFS; 1906 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
1873 if (sdp->removable) 1907 if (sdp->removable)
1874 gd->flags |= GENHD_FL_REMOVABLE; 1908 gd->flags |= GENHD_FL_REMOVABLE;
1875 1909
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0fe031f003e7..1bcf3c33d7ff 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -345,14 +345,14 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
345 return 0; 345 return 0;
346} 346}
347 347
348#define VPD_INQUIRY_SIZE 512 348#define VPD_INQUIRY_SIZE 36
349 349
350static void ses_match_to_enclosure(struct enclosure_device *edev, 350static void ses_match_to_enclosure(struct enclosure_device *edev,
351 struct scsi_device *sdev) 351 struct scsi_device *sdev)
352{ 352{
353 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL); 353 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
354 unsigned char *desc; 354 unsigned char *desc;
355 int len; 355 u16 vpd_len;
356 struct efd efd = { 356 struct efd efd = {
357 .addr = 0, 357 .addr = 0,
358 }; 358 };
@@ -372,9 +372,19 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES)) 372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
373 goto free; 373 goto free;
374 374
375 len = (buf[2] << 8) + buf[3]; 375 vpd_len = (buf[2] << 8) + buf[3];
376 kfree(buf);
377 buf = kmalloc(vpd_len, GFP_KERNEL);
378 if (!buf)
379 return;
380 cmd[3] = vpd_len >> 8;
381 cmd[4] = vpd_len & 0xff;
382 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
383 vpd_len, NULL, SES_TIMEOUT, SES_RETRIES))
384 goto free;
385
376 desc = buf + 4; 386 desc = buf + 4;
377 while (desc < buf + len) { 387 while (desc < buf + vpd_len) {
378 enum scsi_protocol proto = desc[0] >> 4; 388 enum scsi_protocol proto = desc[0] >> 4;
379 u8 code_set = desc[0] & 0x0f; 389 u8 code_set = desc[0] & 0x0f;
380 u8 piv = desc[1] & 0x80; 390 u8 piv = desc[1] & 0x80;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d3b8ebb83776..ba9b9bbd4e73 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
47#include <linux/seq_file.h> 47#include <linux/seq_file.h>
48#include <linux/blkdev.h> 48#include <linux/blkdev.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/scatterlist.h>
51#include <linux/blktrace_api.h> 50#include <linux/blktrace_api.h>
52#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
53 52
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
69#endif 68#endif
70 69
71#define SG_ALLOW_DIO_DEF 0 70#define SG_ALLOW_DIO_DEF 0
72#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
73 71
74#define SG_MAX_DEVS 32768 72#define SG_MAX_DEVS 32768
75 73
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
118 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
119 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ 117 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
120 unsigned bufflen; /* Size of (aggregate) data buffer */ 118 unsigned bufflen; /* Size of (aggregate) data buffer */
121 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 119 struct page **pages;
122 struct scatterlist *buffer;/* scatter list */ 120 int page_order;
123 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
124 unsigned char cmd_opcode; /* first byte of command */ 122 unsigned char cmd_opcode; /* first byte of command */
125} Sg_scatter_hold; 123} Sg_scatter_hold;
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
137 char orphan; /* 1 -> drop on sight, 0 -> normal */ 135 char orphan; /* 1 -> drop on sight, 0 -> normal */
138 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
139 volatile char done; /* 0->before bh, 1->before read, 2->read */ 137 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 struct request *rq;
139 struct bio *bio;
140} Sg_request; 140} Sg_request;
141 141
142typedef struct sg_fd { /* holds the state of a file descriptor */ 142typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
175 175
176static int sg_fasync(int fd, struct file *filp, int mode); 176static int sg_fasync(int fd, struct file *filp, int mode);
177/* tasklet or soft irq callback */ 177/* tasklet or soft irq callback */
178static void sg_cmd_done(void *data, char *sense, int result, int resid); 178static void sg_rq_end_io(struct request *rq, int uptodate);
179static int sg_start_req(Sg_request * srp); 179static int sg_start_req(Sg_request *srp, unsigned char *cmd);
180static void sg_finish_rem_req(Sg_request * srp); 180static void sg_finish_rem_req(Sg_request * srp);
181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
188 int read_only, Sg_request **o_srp); 188 int read_only, Sg_request **o_srp);
189static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
191static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
192 int wr_xf, int *countp, unsigned char __user **up);
193static int sg_write_xfer(Sg_request * srp);
194static int sg_read_xfer(Sg_request * srp);
195static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196static void sg_remove_scat(Sg_scatter_hold * schp); 192static void sg_remove_scat(Sg_scatter_hold * schp);
197static void sg_build_reserve(Sg_fd * sfp, int req_size); 193static void sg_build_reserve(Sg_fd * sfp, int req_size);
198static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
201static void sg_page_free(struct page *page, int size);
202static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 196static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
203static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 197static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
204static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 198static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206static Sg_request *sg_add_request(Sg_fd * sfp); 200static Sg_request *sg_add_request(Sg_fd * sfp);
207static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 201static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208static int sg_res_in_use(Sg_fd * sfp); 202static int sg_res_in_use(Sg_fd * sfp);
209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
210static Sg_device *sg_get_dev(int dev); 203static Sg_device *sg_get_dev(int dev);
211#ifdef CONFIG_SCSI_PROC_FS 204#ifdef CONFIG_SCSI_PROC_FS
212static int sg_last_dev(void); 205static int sg_last_dev(void);
@@ -217,6 +210,18 @@ static int sg_last_dev(void);
217#define SZ_SG_IOVEC sizeof(sg_iovec_t) 210#define SZ_SG_IOVEC sizeof(sg_iovec_t)
218#define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 211#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
219 212
213static int sg_allow_access(struct file *filp, unsigned char *cmd)
214{
215 struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
216 struct request_queue *q = sfp->parentdp->device->request_queue;
217
218 if (sfp->parentdp->device->type == TYPE_SCANNER)
219 return 0;
220
221 return blk_verify_command(&q->cmd_filter,
222 cmd, filp->f_mode & FMODE_WRITE);
223}
224
220static int 225static int
221sg_open(struct inode *inode, struct file *filp) 226sg_open(struct inode *inode, struct file *filp)
222{ 227{
@@ -517,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
517 err = -EFAULT; 522 err = -EFAULT;
518 goto err_out; 523 goto err_out;
519 } 524 }
520 err = sg_read_xfer(srp); 525err_out:
521 err_out:
522 sg_finish_rem_req(srp); 526 sg_finish_rem_req(srp);
523 return (0 == err) ? count : err; 527 return (0 == err) ? count : err;
524} 528}
@@ -600,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
600 else 604 else
601 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 605 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
602 hp->dxfer_len = mxsize; 606 hp->dxfer_len = mxsize;
603 hp->dxferp = (char __user *)buf + cmd_size; 607 if (hp->dxfer_direction == SG_DXFER_TO_DEV)
608 hp->dxferp = (char __user *)buf + cmd_size;
609 else
610 hp->dxferp = NULL;
604 hp->sbp = NULL; 611 hp->sbp = NULL;
605 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 612 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
606 hp->flags = input_size; /* structure abuse ... */ 613 hp->flags = input_size; /* structure abuse ... */
@@ -689,7 +696,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
689 sg_remove_request(sfp, srp); 696 sg_remove_request(sfp, srp);
690 return -EFAULT; 697 return -EFAULT;
691 } 698 }
692 if (read_only && !blk_verify_command(file, cmnd)) { 699 if (read_only && sg_allow_access(file, cmnd)) {
693 sg_remove_request(sfp, srp); 700 sg_remove_request(sfp, srp);
694 return -EPERM; 701 return -EPERM;
695 } 702 }
@@ -720,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
720 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 727 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
721 (int) cmnd[0], (int) hp->cmd_len)); 728 (int) cmnd[0], (int) hp->cmd_len));
722 729
723 if ((k = sg_start_req(srp))) { 730 k = sg_start_req(srp, cmnd);
731 if (k) {
724 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); 732 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
725 sg_finish_rem_req(srp); 733 sg_finish_rem_req(srp);
726 return k; /* probably out of space --> ENOMEM */ 734 return k; /* probably out of space --> ENOMEM */
727 } 735 }
728 if ((k = sg_write_xfer(srp))) {
729 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
730 sg_finish_rem_req(srp);
731 return k;
732 }
733 if (sdp->detached) { 736 if (sdp->detached) {
734 sg_finish_rem_req(srp); 737 sg_finish_rem_req(srp);
735 return -ENODEV; 738 return -ENODEV;
@@ -751,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
751 break; 754 break;
752 } 755 }
753 hp->duration = jiffies_to_msecs(jiffies); 756 hp->duration = jiffies_to_msecs(jiffies);
754/* Now send everything of to mid-level. The next time we hear about this 757
755 packet is when sg_cmd_done() is called (i.e. a callback). */ 758 srp->rq->timeout = timeout;
756 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, 759 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
757 hp->dxfer_len, srp->data.k_use_sg, timeout, 760 srp->rq, 1, sg_rq_end_io);
758 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 761 return 0;
759 GFP_ATOMIC)) {
760 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
761 /*
762 * most likely out of mem, but could also be a bad map
763 */
764 sg_finish_rem_req(srp);
765 return -ENOMEM;
766 } else
767 return 0;
768} 762}
769 763
770static int 764static int
@@ -793,6 +787,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
793 787
794 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 788 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
795 return -ENXIO; 789 return -ENXIO;
790
796 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", 791 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
797 sdp->disk->disk_name, (int) cmd_in)); 792 sdp->disk->disk_name, (int) cmd_in));
798 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); 793 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
@@ -1061,7 +1056,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1061 1056
1062 if (copy_from_user(&opcode, siocp->data, 1)) 1057 if (copy_from_user(&opcode, siocp->data, 1))
1063 return -EFAULT; 1058 return -EFAULT;
1064 if (!blk_verify_command(filp, &opcode)) 1059 if (sg_allow_access(filp, &opcode))
1065 return -EPERM; 1060 return -EPERM;
1066 } 1061 }
1067 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1062 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
@@ -1179,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1179 Sg_fd *sfp; 1174 Sg_fd *sfp;
1180 unsigned long offset, len, sa; 1175 unsigned long offset, len, sa;
1181 Sg_scatter_hold *rsv_schp; 1176 Sg_scatter_hold *rsv_schp;
1182 struct scatterlist *sg; 1177 int k, length;
1183 int k;
1184 1178
1185 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1179 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1186 return VM_FAULT_SIGBUS; 1180 return VM_FAULT_SIGBUS;
@@ -1190,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1190 return VM_FAULT_SIGBUS; 1184 return VM_FAULT_SIGBUS;
1191 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", 1185 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1192 offset, rsv_schp->k_use_sg)); 1186 offset, rsv_schp->k_use_sg));
1193 sg = rsv_schp->buffer;
1194 sa = vma->vm_start; 1187 sa = vma->vm_start;
1195 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1188 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1196 ++k, sg = sg_next(sg)) { 1189 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1197 len = vma->vm_end - sa; 1190 len = vma->vm_end - sa;
1198 len = (len < sg->length) ? len : sg->length; 1191 len = (len < length) ? len : length;
1199 if (offset < len) { 1192 if (offset < len) {
1200 struct page *page; 1193 struct page *page = nth_page(rsv_schp->pages[k],
1201 page = virt_to_page(page_address(sg_page(sg)) + offset); 1194 offset >> PAGE_SHIFT);
1202 get_page(page); /* increment page count */ 1195 get_page(page); /* increment page count */
1203 vmf->page = page; 1196 vmf->page = page;
1204 return 0; /* success */ 1197 return 0; /* success */
@@ -1220,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1220 Sg_fd *sfp; 1213 Sg_fd *sfp;
1221 unsigned long req_sz, len, sa; 1214 unsigned long req_sz, len, sa;
1222 Sg_scatter_hold *rsv_schp; 1215 Sg_scatter_hold *rsv_schp;
1223 int k; 1216 int k, length;
1224 struct scatterlist *sg;
1225 1217
1226 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1218 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1227 return -ENXIO; 1219 return -ENXIO;
@@ -1235,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1235 return -ENOMEM; /* cannot map more than reserved buffer */ 1227 return -ENOMEM; /* cannot map more than reserved buffer */
1236 1228
1237 sa = vma->vm_start; 1229 sa = vma->vm_start;
1238 sg = rsv_schp->buffer; 1230 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1239 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1231 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1240 ++k, sg = sg_next(sg)) {
1241 len = vma->vm_end - sa; 1232 len = vma->vm_end - sa;
1242 len = (len < sg->length) ? len : sg->length; 1233 len = (len < length) ? len : length;
1243 sa += len; 1234 sa += len;
1244 } 1235 }
1245 1236
@@ -1250,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1250 return 0; 1241 return 0;
1251} 1242}
1252 1243
1253/* This function is a "bottom half" handler that is called by the 1244/*
1254 * mid level when a command is completed (or has failed). */ 1245 * This function is a "bottom half" handler that is called by the mid
1255static void 1246 * level when a command is completed (or has failed).
1256sg_cmd_done(void *data, char *sense, int result, int resid) 1247 */
1248static void sg_rq_end_io(struct request *rq, int uptodate)
1257{ 1249{
1258 Sg_request *srp = data; 1250 struct sg_request *srp = rq->end_io_data;
1259 Sg_device *sdp = NULL; 1251 Sg_device *sdp = NULL;
1260 Sg_fd *sfp; 1252 Sg_fd *sfp;
1261 unsigned long iflags; 1253 unsigned long iflags;
1262 unsigned int ms; 1254 unsigned int ms;
1255 char *sense;
1256 int result, resid;
1263 1257
1264 if (NULL == srp) { 1258 if (NULL == srp) {
1265 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1259 printk(KERN_ERR "sg_cmd_done: NULL request\n");
@@ -1273,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1273 return; 1267 return;
1274 } 1268 }
1275 1269
1270 sense = rq->sense;
1271 result = rq->errors;
1272 resid = rq->data_len;
1276 1273
1277 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1274 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1278 sdp->disk->disk_name, srp->header.pack_id, result)); 1275 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1283,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1283 if (0 != result) { 1280 if (0 != result) {
1284 struct scsi_sense_hdr sshdr; 1281 struct scsi_sense_hdr sshdr;
1285 1282
1286 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1287 srp->header.status = 0xff & result; 1283 srp->header.status = 0xff & result;
1288 srp->header.masked_status = status_byte(result); 1284 srp->header.masked_status = status_byte(result);
1289 srp->header.msg_status = msg_byte(result); 1285 srp->header.msg_status = msg_byte(result);
@@ -1621,37 +1617,79 @@ exit_sg(void)
1621 idr_destroy(&sg_index_idr); 1617 idr_destroy(&sg_index_idr);
1622} 1618}
1623 1619
1624static int 1620static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1625sg_start_req(Sg_request * srp)
1626{ 1621{
1627 int res; 1622 int res;
1623 struct request *rq;
1628 Sg_fd *sfp = srp->parentfp; 1624 Sg_fd *sfp = srp->parentfp;
1629 sg_io_hdr_t *hp = &srp->header; 1625 sg_io_hdr_t *hp = &srp->header;
1630 int dxfer_len = (int) hp->dxfer_len; 1626 int dxfer_len = (int) hp->dxfer_len;
1631 int dxfer_dir = hp->dxfer_direction; 1627 int dxfer_dir = hp->dxfer_direction;
1628 unsigned int iov_count = hp->iovec_count;
1632 Sg_scatter_hold *req_schp = &srp->data; 1629 Sg_scatter_hold *req_schp = &srp->data;
1633 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1630 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1631 struct request_queue *q = sfp->parentdp->device->request_queue;
1632 struct rq_map_data *md, map_data;
1633 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1634
1635 SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1636 dxfer_len));
1637
1638 rq = blk_get_request(q, rw, GFP_ATOMIC);
1639 if (!rq)
1640 return -ENOMEM;
1641
1642 memcpy(rq->cmd, cmd, hp->cmd_len);
1643
1644 rq->cmd_len = hp->cmd_len;
1645 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1646
1647 srp->rq = rq;
1648 rq->end_io_data = srp;
1649 rq->sense = srp->sense_b;
1650 rq->retries = SG_DEFAULT_RETRIES;
1634 1651
1635 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1636 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1652 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1637 return 0; 1653 return 0;
1638 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1654
1639 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1655 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1640 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1656 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1641 res = sg_build_direct(srp, sfp, dxfer_len); 1657 !sfp->parentdp->device->host->unchecked_isa_dma &&
1642 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1658 blk_rq_aligned(q, hp->dxferp, dxfer_len))
1643 return res; 1659 md = NULL;
1644 } 1660 else
1645 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1661 md = &map_data;
1646 sg_link_reserve(sfp, srp, dxfer_len); 1662
1647 else { 1663 if (md) {
1648 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1664 if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1649 if (res) { 1665 sg_link_reserve(sfp, srp, dxfer_len);
1650 sg_remove_scat(req_schp); 1666 else {
1651 return res; 1667 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1668 if (res)
1669 return res;
1652 } 1670 }
1671
1672 md->pages = req_schp->pages;
1673 md->page_order = req_schp->page_order;
1674 md->nr_entries = req_schp->k_use_sg;
1653 } 1675 }
1654 return 0; 1676
1677 if (iov_count)
1678 res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
1679 hp->dxfer_len, GFP_ATOMIC);
1680 else
1681 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1682 hp->dxfer_len, GFP_ATOMIC);
1683
1684 if (!res) {
1685 srp->bio = rq->bio;
1686
1687 if (!md) {
1688 req_schp->dio_in_use = 1;
1689 hp->info |= SG_INFO_DIRECT_IO;
1690 }
1691 }
1692 return res;
1655} 1693}
1656 1694
1657static void 1695static void
@@ -1665,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp)
1665 sg_unlink_reserve(sfp, srp); 1703 sg_unlink_reserve(sfp, srp);
1666 else 1704 else
1667 sg_remove_scat(req_schp); 1705 sg_remove_scat(req_schp);
1706
1707 if (srp->rq) {
1708 if (srp->bio)
1709 blk_rq_unmap_user(srp->bio);
1710
1711 blk_put_request(srp->rq);
1712 }
1713
1668 sg_remove_request(sfp, srp); 1714 sg_remove_request(sfp, srp);
1669} 1715}
1670 1716
1671static int 1717static int
1672sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1718sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1673{ 1719{
1674 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1720 int sg_bufflen = tablesize * sizeof(struct page *);
1675 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1721 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1676 1722
1677 /* 1723 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1678 * TODO: test without low_dma, we should not need it since 1724 if (!schp->pages)
1679 * the block layer will bounce the buffer for us
1680 *
1681 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1682 */
1683 if (sfp->low_dma)
1684 gfp_flags |= GFP_DMA;
1685 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1686 if (!schp->buffer)
1687 return -ENOMEM; 1725 return -ENOMEM;
1688 sg_init_table(schp->buffer, tablesize);
1689 schp->sglist_len = sg_bufflen; 1726 schp->sglist_len = sg_bufflen;
1690 return tablesize; /* number of scat_gath elements allocated */ 1727 return tablesize; /* number of scat_gath elements allocated */
1691} 1728}
1692 1729
1693#ifdef SG_ALLOW_DIO_CODE
1694/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1695 /* TODO: hopefully we can use the generic block layer code */
1696
1697/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1698 - mapping of all pages not successful
1699 (i.e., either completely successful or fails)
1700*/
1701static int
1702st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1703 unsigned long uaddr, size_t count, int rw)
1704{
1705 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1706 unsigned long start = uaddr >> PAGE_SHIFT;
1707 const int nr_pages = end - start;
1708 int res, i, j;
1709 struct page **pages;
1710
1711 /* User attempted Overflow! */
1712 if ((uaddr + count) < uaddr)
1713 return -EINVAL;
1714
1715 /* Too big */
1716 if (nr_pages > max_pages)
1717 return -ENOMEM;
1718
1719 /* Hmm? */
1720 if (count == 0)
1721 return 0;
1722
1723 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1724 return -ENOMEM;
1725
1726 /* Try to fault in all of the necessary pages */
1727 down_read(&current->mm->mmap_sem);
1728 /* rw==READ means read from drive, write into memory area */
1729 res = get_user_pages(
1730 current,
1731 current->mm,
1732 uaddr,
1733 nr_pages,
1734 rw == READ,
1735 0, /* don't force */
1736 pages,
1737 NULL);
1738 up_read(&current->mm->mmap_sem);
1739
1740 /* Errors and no page mapped should return here */
1741 if (res < nr_pages)
1742 goto out_unmap;
1743
1744 for (i=0; i < nr_pages; i++) {
1745 /* FIXME: flush superflous for rw==READ,
1746 * probably wrong function for rw==WRITE
1747 */
1748 flush_dcache_page(pages[i]);
1749 /* ?? Is locking needed? I don't think so */
1750 /* if (TestSetPageLocked(pages[i]))
1751 goto out_unlock; */
1752 }
1753
1754 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
1755 if (nr_pages > 1) {
1756 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1757 count -= sgl[0].length;
1758 for (i=1; i < nr_pages ; i++)
1759 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
1760 }
1761 else {
1762 sgl[0].length = count;
1763 }
1764
1765 kfree(pages);
1766 return nr_pages;
1767
1768 out_unmap:
1769 if (res > 0) {
1770 for (j=0; j < res; j++)
1771 page_cache_release(pages[j]);
1772 res = 0;
1773 }
1774 kfree(pages);
1775 return res;
1776}
1777
1778
1779/* And unmap them... */
1780static int
1781st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1782 int dirtied)
1783{
1784 int i;
1785
1786 for (i=0; i < nr_pages; i++) {
1787 struct page *page = sg_page(&sgl[i]);
1788
1789 if (dirtied)
1790 SetPageDirty(page);
1791 /* unlock_page(page); */
1792 /* FIXME: cache flush missing for rw==READ
1793 * FIXME: call the correct reference counting function
1794 */
1795 page_cache_release(page);
1796 }
1797
1798 return 0;
1799}
1800
1801/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1802#endif
1803
1804
1805/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1806static int
1807sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1808{
1809#ifdef SG_ALLOW_DIO_CODE
1810 sg_io_hdr_t *hp = &srp->header;
1811 Sg_scatter_hold *schp = &srp->data;
1812 int sg_tablesize = sfp->parentdp->sg_tablesize;
1813 int mx_sc_elems, res;
1814 struct scsi_device *sdev = sfp->parentdp->device;
1815
1816 if (((unsigned long)hp->dxferp &
1817 queue_dma_alignment(sdev->request_queue)) != 0)
1818 return 1;
1819
1820 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1821 if (mx_sc_elems <= 0) {
1822 return 1;
1823 }
1824 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1825 (unsigned long)hp->dxferp, dxfer_len,
1826 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1827 if (res <= 0) {
1828 sg_remove_scat(schp);
1829 return 1;
1830 }
1831 schp->k_use_sg = res;
1832 schp->dio_in_use = 1;
1833 hp->info |= SG_INFO_DIRECT_IO;
1834 return 0;
1835#else
1836 return 1;
1837#endif
1838}
1839
1840static int 1730static int
1841sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1731sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1842{ 1732{
1843 struct scatterlist *sg; 1733 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1844 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1845 int sg_tablesize = sfp->parentdp->sg_tablesize; 1734 int sg_tablesize = sfp->parentdp->sg_tablesize;
1846 int blk_size = buff_size; 1735 int blk_size = buff_size, order;
1847 struct page *p = NULL; 1736 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1848 1737
1849 if (blk_size < 0) 1738 if (blk_size < 0)
1850 return -EFAULT; 1739 return -EFAULT;
@@ -1868,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1868 } else 1757 } else
1869 scatter_elem_sz_prev = num; 1758 scatter_elem_sz_prev = num;
1870 } 1759 }
1871 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1760
1872 (rem_sz > 0) && (k < mx_sc_elems); 1761 if (sfp->low_dma)
1873 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { 1762 gfp_mask |= GFP_DMA;
1874 1763
1764 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1765 gfp_mask |= __GFP_ZERO;
1766
1767 order = get_order(num);
1768retry:
1769 ret_sz = 1 << (PAGE_SHIFT + order);
1770
1771 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1772 k++, rem_sz -= ret_sz) {
1773
1875 num = (rem_sz > scatter_elem_sz_prev) ? 1774 num = (rem_sz > scatter_elem_sz_prev) ?
1876 scatter_elem_sz_prev : rem_sz; 1775 scatter_elem_sz_prev : rem_sz;
1877 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1776
1878 if (!p) 1777 schp->pages[k] = alloc_pages(gfp_mask, order);
1879 return -ENOMEM; 1778 if (!schp->pages[k])
1779 goto out;
1880 1780
1881 if (num == scatter_elem_sz_prev) { 1781 if (num == scatter_elem_sz_prev) {
1882 if (unlikely(ret_sz > scatter_elem_sz_prev)) { 1782 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1884,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1884 scatter_elem_sz_prev = ret_sz; 1784 scatter_elem_sz_prev = ret_sz;
1885 } 1785 }
1886 } 1786 }
1887 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
1888 1787
1889 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1788 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1890 "ret_sz=%d\n", k, num, ret_sz)); 1789 "ret_sz=%d\n", k, num, ret_sz));
1891 } /* end of for loop */ 1790 } /* end of for loop */
1892 1791
1792 schp->page_order = order;
1893 schp->k_use_sg = k; 1793 schp->k_use_sg = k;
1894 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " 1794 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1895 "rem_sz=%d\n", k, rem_sz)); 1795 "rem_sz=%d\n", k, rem_sz));
@@ -1897,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1897 schp->bufflen = blk_size; 1797 schp->bufflen = blk_size;
1898 if (rem_sz > 0) /* must have failed */ 1798 if (rem_sz > 0) /* must have failed */
1899 return -ENOMEM; 1799 return -ENOMEM;
1900
1901 return 0;
1902}
1903
1904static int
1905sg_write_xfer(Sg_request * srp)
1906{
1907 sg_io_hdr_t *hp = &srp->header;
1908 Sg_scatter_hold *schp = &srp->data;
1909 struct scatterlist *sg = schp->buffer;
1910 int num_xfer = 0;
1911 int j, k, onum, usglen, ksglen, res;
1912 int iovec_count = (int) hp->iovec_count;
1913 int dxfer_dir = hp->dxfer_direction;
1914 unsigned char *p;
1915 unsigned char __user *up;
1916 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1917
1918 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1919 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1920 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1921 if (schp->bufflen < num_xfer)
1922 num_xfer = schp->bufflen;
1923 }
1924 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1925 (new_interface
1926 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1927 return 0;
1928
1929 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1930 num_xfer, iovec_count, schp->k_use_sg));
1931 if (iovec_count) {
1932 onum = iovec_count;
1933 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1934 return -EFAULT;
1935 } else
1936 onum = 1;
1937
1938 ksglen = sg->length;
1939 p = page_address(sg_page(sg));
1940 for (j = 0, k = 0; j < onum; ++j) {
1941 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1942 if (res)
1943 return res;
1944
1945 for (; p; sg = sg_next(sg), ksglen = sg->length,
1946 p = page_address(sg_page(sg))) {
1947 if (usglen <= 0)
1948 break;
1949 if (ksglen > usglen) {
1950 if (usglen >= num_xfer) {
1951 if (__copy_from_user(p, up, num_xfer))
1952 return -EFAULT;
1953 return 0;
1954 }
1955 if (__copy_from_user(p, up, usglen))
1956 return -EFAULT;
1957 p += usglen;
1958 ksglen -= usglen;
1959 break;
1960 } else {
1961 if (ksglen >= num_xfer) {
1962 if (__copy_from_user(p, up, num_xfer))
1963 return -EFAULT;
1964 return 0;
1965 }
1966 if (__copy_from_user(p, up, ksglen))
1967 return -EFAULT;
1968 up += ksglen;
1969 usglen -= ksglen;
1970 }
1971 ++k;
1972 if (k >= schp->k_use_sg)
1973 return 0;
1974 }
1975 }
1976
1977 return 0; 1800 return 0;
1978} 1801out:
1802 for (i = 0; i < k; i++)
1803 __free_pages(schp->pages[k], order);
1979 1804
1980static int 1805 if (--order >= 0)
1981sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1806 goto retry;
1982 int wr_xf, int *countp, unsigned char __user **up)
1983{
1984 int num_xfer = (int) hp->dxfer_len;
1985 unsigned char __user *p = hp->dxferp;
1986 int count;
1987 1807
1988 if (0 == sg_num) { 1808 return -ENOMEM;
1989 if (wr_xf && ('\0' == hp->interface_id))
1990 count = (int) hp->flags; /* holds "old" input_size */
1991 else
1992 count = num_xfer;
1993 } else {
1994 sg_iovec_t iovec;
1995 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
1996 return -EFAULT;
1997 p = iovec.iov_base;
1998 count = (int) iovec.iov_len;
1999 }
2000 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2001 return -EFAULT;
2002 if (up)
2003 *up = p;
2004 if (countp)
2005 *countp = count;
2006 return 0;
2007} 1809}
2008 1810
2009static void 1811static void
2010sg_remove_scat(Sg_scatter_hold * schp) 1812sg_remove_scat(Sg_scatter_hold * schp)
2011{ 1813{
2012 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 1814 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2013 if (schp->buffer && (schp->sglist_len > 0)) { 1815 if (schp->pages && schp->sglist_len > 0) {
2014 struct scatterlist *sg = schp->buffer; 1816 if (!schp->dio_in_use) {
2015
2016 if (schp->dio_in_use) {
2017#ifdef SG_ALLOW_DIO_CODE
2018 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2019#endif
2020 } else {
2021 int k; 1817 int k;
2022 1818
2023 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); 1819 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2024 ++k, sg = sg_next(sg)) {
2025 SCSI_LOG_TIMEOUT(5, printk( 1820 SCSI_LOG_TIMEOUT(5, printk(
2026 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1821 "sg_remove_scat: k=%d, pg=0x%p\n",
2027 k, sg_page(sg), sg->length)); 1822 k, schp->pages[k]));
2028 sg_page_free(sg_page(sg), sg->length); 1823 __free_pages(schp->pages[k], schp->page_order);
2029 } 1824 }
2030 }
2031 kfree(schp->buffer);
2032 }
2033 memset(schp, 0, sizeof (*schp));
2034}
2035
2036static int
2037sg_read_xfer(Sg_request * srp)
2038{
2039 sg_io_hdr_t *hp = &srp->header;
2040 Sg_scatter_hold *schp = &srp->data;
2041 struct scatterlist *sg = schp->buffer;
2042 int num_xfer = 0;
2043 int j, k, onum, usglen, ksglen, res;
2044 int iovec_count = (int) hp->iovec_count;
2045 int dxfer_dir = hp->dxfer_direction;
2046 unsigned char *p;
2047 unsigned char __user *up;
2048 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2049
2050 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2051 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2052 num_xfer = hp->dxfer_len;
2053 if (schp->bufflen < num_xfer)
2054 num_xfer = schp->bufflen;
2055 }
2056 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2057 (new_interface
2058 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2059 return 0;
2060 1825
2061 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 1826 kfree(schp->pages);
2062 num_xfer, iovec_count, schp->k_use_sg));
2063 if (iovec_count) {
2064 onum = iovec_count;
2065 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2066 return -EFAULT;
2067 } else
2068 onum = 1;
2069
2070 p = page_address(sg_page(sg));
2071 ksglen = sg->length;
2072 for (j = 0, k = 0; j < onum; ++j) {
2073 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2074 if (res)
2075 return res;
2076
2077 for (; p; sg = sg_next(sg), ksglen = sg->length,
2078 p = page_address(sg_page(sg))) {
2079 if (usglen <= 0)
2080 break;
2081 if (ksglen > usglen) {
2082 if (usglen >= num_xfer) {
2083 if (__copy_to_user(up, p, num_xfer))
2084 return -EFAULT;
2085 return 0;
2086 }
2087 if (__copy_to_user(up, p, usglen))
2088 return -EFAULT;
2089 p += usglen;
2090 ksglen -= usglen;
2091 break;
2092 } else {
2093 if (ksglen >= num_xfer) {
2094 if (__copy_to_user(up, p, num_xfer))
2095 return -EFAULT;
2096 return 0;
2097 }
2098 if (__copy_to_user(up, p, ksglen))
2099 return -EFAULT;
2100 up += ksglen;
2101 usglen -= ksglen;
2102 }
2103 ++k;
2104 if (k >= schp->k_use_sg)
2105 return 0;
2106 } 1827 }
2107 } 1828 }
2108 1829 memset(schp, 0, sizeof (*schp));
2109 return 0;
2110} 1830}
2111 1831
2112static int 1832static int
2113sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 1833sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2114{ 1834{
2115 Sg_scatter_hold *schp = &srp->data; 1835 Sg_scatter_hold *schp = &srp->data;
2116 struct scatterlist *sg = schp->buffer;
2117 int k, num; 1836 int k, num;
2118 1837
2119 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 1838 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -2121,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2121 if ((!outp) || (num_read_xfer <= 0)) 1840 if ((!outp) || (num_read_xfer <= 0))
2122 return 0; 1841 return 0;
2123 1842
2124 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { 1843 num = 1 << (PAGE_SHIFT + schp->page_order);
2125 num = sg->length; 1844 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2126 if (num > num_read_xfer) { 1845 if (num > num_read_xfer) {
2127 if (__copy_to_user(outp, page_address(sg_page(sg)), 1846 if (__copy_to_user(outp, page_address(schp->pages[k]),
2128 num_read_xfer)) 1847 num_read_xfer))
2129 return -EFAULT; 1848 return -EFAULT;
2130 break; 1849 break;
2131 } else { 1850 } else {
2132 if (__copy_to_user(outp, page_address(sg_page(sg)), 1851 if (__copy_to_user(outp, page_address(schp->pages[k]),
2133 num)) 1852 num))
2134 return -EFAULT; 1853 return -EFAULT;
2135 num_read_xfer -= num; 1854 num_read_xfer -= num;
@@ -2164,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2164{ 1883{
2165 Sg_scatter_hold *req_schp = &srp->data; 1884 Sg_scatter_hold *req_schp = &srp->data;
2166 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1885 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2167 struct scatterlist *sg = rsv_schp->buffer;
2168 int k, num, rem; 1886 int k, num, rem;
2169 1887
2170 srp->res_used = 1; 1888 srp->res_used = 1;
2171 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 1889 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2172 rem = size; 1890 rem = size;
2173 1891
2174 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { 1892 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2175 num = sg->length; 1893 for (k = 0; k < rsv_schp->k_use_sg; k++) {
2176 if (rem <= num) { 1894 if (rem <= num) {
2177 sfp->save_scat_len = num;
2178 sg->length = rem;
2179 req_schp->k_use_sg = k + 1; 1895 req_schp->k_use_sg = k + 1;
2180 req_schp->sglist_len = rsv_schp->sglist_len; 1896 req_schp->sglist_len = rsv_schp->sglist_len;
2181 req_schp->buffer = rsv_schp->buffer; 1897 req_schp->pages = rsv_schp->pages;
2182 1898
2183 req_schp->bufflen = size; 1899 req_schp->bufflen = size;
2184 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 1900 req_schp->page_order = rsv_schp->page_order;
2185 break; 1901 break;
2186 } else 1902 } else
2187 rem -= num; 1903 rem -= num;
@@ -2195,22 +1911,13 @@ static void
2195sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 1911sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2196{ 1912{
2197 Sg_scatter_hold *req_schp = &srp->data; 1913 Sg_scatter_hold *req_schp = &srp->data;
2198 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2199 1914
2200 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 1915 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2201 (int) req_schp->k_use_sg)); 1916 (int) req_schp->k_use_sg));
2202 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2203 struct scatterlist *sg = rsv_schp->buffer;
2204
2205 if (sfp->save_scat_len > 0)
2206 (sg + (req_schp->k_use_sg - 1))->length =
2207 (unsigned) sfp->save_scat_len;
2208 else
2209 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2210 }
2211 req_schp->k_use_sg = 0; 1917 req_schp->k_use_sg = 0;
2212 req_schp->bufflen = 0; 1918 req_schp->bufflen = 0;
2213 req_schp->buffer = NULL; 1919 req_schp->pages = NULL;
1920 req_schp->page_order = 0;
2214 req_schp->sglist_len = 0; 1921 req_schp->sglist_len = 0;
2215 sfp->save_scat_len = 0; 1922 sfp->save_scat_len = 0;
2216 srp->res_used = 0; 1923 srp->res_used = 0;
@@ -2468,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp)
2468 return srp ? 1 : 0; 2175 return srp ? 1 : 0;
2469} 2176}
2470 2177
2471/* The size fetched (value output via retSzp) set when non-NULL return */
2472static struct page *
2473sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2474{
2475 struct page *resp = NULL;
2476 gfp_t page_mask;
2477 int order, a_size;
2478 int resSz;
2479
2480 if ((rqSz <= 0) || (NULL == retSzp))
2481 return resp;
2482
2483 if (lowDma)
2484 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2485 else
2486 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2487
2488 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2489 order++, a_size <<= 1) ;
2490 resSz = a_size; /* rounded up if necessary */
2491 resp = alloc_pages(page_mask, order);
2492 while ((!resp) && order) {
2493 --order;
2494 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2495 resp = alloc_pages(page_mask, order); /* try half */
2496 resSz = a_size;
2497 }
2498 if (resp) {
2499 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2500 memset(page_address(resp), 0, resSz);
2501 *retSzp = resSz;
2502 }
2503 return resp;
2504}
2505
2506static void
2507sg_page_free(struct page *page, int size)
2508{
2509 int order, a_size;
2510
2511 if (!page)
2512 return;
2513 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2514 order++, a_size <<= 1) ;
2515 __free_pages(page, order);
2516}
2517
2518#ifdef CONFIG_SCSI_PROC_FS 2178#ifdef CONFIG_SCSI_PROC_FS
2519static int 2179static int
2520sg_idr_max_id(int id, void *p, void *data) 2180sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def3..0f17009c99d2 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
331 331
332static int sr_prep_fn(struct request_queue *q, struct request *rq) 332static int sr_prep_fn(struct request_queue *q, struct request *rq)
333{ 333{
334 int block=0, this_count, s_size, timeout = SR_TIMEOUT; 334 int block = 0, this_count, s_size;
335 struct scsi_cd *cd; 335 struct scsi_cd *cd;
336 struct scsi_cmnd *SCpnt; 336 struct scsi_cmnd *SCpnt;
337 struct scsi_device *sdp = q->queuedata; 337 struct scsi_device *sdp = q->queuedata;
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
461 SCpnt->transfersize = cd->device->sector_size; 461 SCpnt->transfersize = cd->device->sector_size;
462 SCpnt->underflow = this_count << 9; 462 SCpnt->underflow = this_count << 9;
463 SCpnt->allowed = MAX_RETRIES; 463 SCpnt->allowed = MAX_RETRIES;
464 SCpnt->timeout_per_command = timeout;
465 464
466 /* 465 /*
467 * This indicates that the command is ready from our end to be 466 * This indicates that the command is ready from our end to be
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
620 disk->fops = &sr_bdops; 619 disk->fops = &sr_bdops;
621 disk->flags = GENHD_FL_CD; 620 disk->flags = GENHD_FL_CD;
622 621
622 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
623
623 cd->device = sdev; 624 cd->device = sdev;
624 cd->disk = disk; 625 cd->disk = disk;
625 cd->driver = &sr_template; 626 cd->driver = &sr_template;
@@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
878 struct gendisk *disk = cd->disk; 879 struct gendisk *disk = cd->disk;
879 880
880 spin_lock(&sr_index_lock); 881 spin_lock(&sr_index_lock);
881 clear_bit(disk->first_minor, sr_index_bits); 882 clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
882 spin_unlock(&sr_index_lock); 883 spin_unlock(&sr_index_lock);
883 884
884 unregister_cdrom(&cd->cdi); 885 unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index f9cf70151366..3d73aad4bc82 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -1,6 +1,6 @@
1/* sun_esp.c: ESP front-end for Sparc SBUS systems. 1/* sun_esp.c: ESP front-end for Sparc SBUS systems.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -9,60 +9,70 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/dma-mapping.h>
13#include <linux/of.h>
14#include <linux/of_device.h>
12 15
13#include <asm/irq.h> 16#include <asm/irq.h>
14#include <asm/io.h> 17#include <asm/io.h>
15#include <asm/dma.h> 18#include <asm/dma.h>
16 19
17#include <asm/sbus.h>
18
19#include <scsi/scsi_host.h> 20#include <scsi/scsi_host.h>
20 21
21#include "esp_scsi.h" 22#include "esp_scsi.h"
22 23
23#define DRV_MODULE_NAME "sun_esp" 24#define DRV_MODULE_NAME "sun_esp"
24#define PFX DRV_MODULE_NAME ": " 25#define PFX DRV_MODULE_NAME ": "
25#define DRV_VERSION "1.000" 26#define DRV_VERSION "1.100"
26#define DRV_MODULE_RELDATE "April 19, 2007" 27#define DRV_MODULE_RELDATE "August 27, 2008"
27 28
28#define dma_read32(REG) \ 29#define dma_read32(REG) \
29 sbus_readl(esp->dma_regs + (REG)) 30 sbus_readl(esp->dma_regs + (REG))
30#define dma_write32(VAL, REG) \ 31#define dma_write32(VAL, REG) \
31 sbus_writel((VAL), esp->dma_regs + (REG)) 32 sbus_writel((VAL), esp->dma_regs + (REG))
32 33
33static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) 34/* DVMA chip revisions */
34{ 35enum dvma_rev {
35 struct sbus_dev *sdev = esp->dev; 36 dvmarev0,
36 struct sbus_dma *dma; 37 dvmaesc1,
38 dvmarev1,
39 dvmarev2,
40 dvmarev3,
41 dvmarevplus,
42 dvmahme
43};
37 44
38 if (dma_sdev != NULL) { 45static int __devinit esp_sbus_setup_dma(struct esp *esp,
39 for_each_dvma(dma) { 46 struct of_device *dma_of)
40 if (dma->sdev == dma_sdev) 47{
41 break; 48 esp->dma = dma_of;
42 }
43 } else {
44 for_each_dvma(dma) {
45 if (dma->sdev == NULL)
46 break;
47 49
48 /* If bus + slot are the same and it has the 50 esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
49 * correct OBP name, it's ours. 51 resource_size(&dma_of->resource[0]),
50 */ 52 "espdma");
51 if (sdev->bus == dma->sdev->bus && 53 if (!esp->dma_regs)
52 sdev->slot == dma->sdev->slot && 54 return -ENOMEM;
53 (!strcmp(dma->sdev->prom_name, "dma") ||
54 !strcmp(dma->sdev->prom_name, "espdma")))
55 break;
56 }
57 }
58 55
59 if (dma == NULL) { 56 switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
60 printk(KERN_ERR PFX "[%s] Cannot find dma.\n", 57 case DMA_VERS0:
61 sdev->ofdev.node->full_name); 58 esp->dmarev = dvmarev0;
62 return -ENODEV; 59 break;
60 case DMA_ESCV1:
61 esp->dmarev = dvmaesc1;
62 break;
63 case DMA_VERS1:
64 esp->dmarev = dvmarev1;
65 break;
66 case DMA_VERS2:
67 esp->dmarev = dvmarev2;
68 break;
69 case DMA_VERHME:
70 esp->dmarev = dvmahme;
71 break;
72 case DMA_VERSPLUS:
73 esp->dmarev = dvmarevplus;
74 break;
63 } 75 }
64 esp->dma = dma;
65 esp->dma_regs = dma->regs;
66 76
67 return 0; 77 return 0;
68 78
@@ -70,18 +80,18 @@ static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sde
70 80
71static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) 81static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
72{ 82{
73 struct sbus_dev *sdev = esp->dev; 83 struct of_device *op = esp->dev;
74 struct resource *res; 84 struct resource *res;
75 85
76 /* On HME, two reg sets exist, first is DVMA, 86 /* On HME, two reg sets exist, first is DVMA,
77 * second is ESP registers. 87 * second is ESP registers.
78 */ 88 */
79 if (hme) 89 if (hme)
80 res = &sdev->resource[1]; 90 res = &op->resource[1];
81 else 91 else
82 res = &sdev->resource[0]; 92 res = &op->resource[0];
83 93
84 esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); 94 esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
85 if (!esp->regs) 95 if (!esp->regs)
86 return -ENOMEM; 96 return -ENOMEM;
87 97
@@ -90,10 +100,11 @@ static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
90 100
91static int __devinit esp_sbus_map_command_block(struct esp *esp) 101static int __devinit esp_sbus_map_command_block(struct esp *esp)
92{ 102{
93 struct sbus_dev *sdev = esp->dev; 103 struct of_device *op = esp->dev;
94 104
95 esp->command_block = sbus_alloc_consistent(sdev, 16, 105 esp->command_block = dma_alloc_coherent(&op->dev, 16,
96 &esp->command_block_dma); 106 &esp->command_block_dma,
107 GFP_ATOMIC);
97 if (!esp->command_block) 108 if (!esp->command_block)
98 return -ENOMEM; 109 return -ENOMEM;
99 return 0; 110 return 0;
@@ -102,17 +113,18 @@ static int __devinit esp_sbus_map_command_block(struct esp *esp)
102static int __devinit esp_sbus_register_irq(struct esp *esp) 113static int __devinit esp_sbus_register_irq(struct esp *esp)
103{ 114{
104 struct Scsi_Host *host = esp->host; 115 struct Scsi_Host *host = esp->host;
105 struct sbus_dev *sdev = esp->dev; 116 struct of_device *op = esp->dev;
106 117
107 host->irq = sdev->irqs[0]; 118 host->irq = op->irqs[0];
108 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); 119 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
109} 120}
110 121
111static void __devinit esp_get_scsi_id(struct esp *esp) 122static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
112{ 123{
113 struct sbus_dev *sdev = esp->dev; 124 struct of_device *op = esp->dev;
114 struct device_node *dp = sdev->ofdev.node; 125 struct device_node *dp;
115 126
127 dp = op->node;
116 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); 128 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
117 if (esp->scsi_id != 0xff) 129 if (esp->scsi_id != 0xff)
118 goto done; 130 goto done;
@@ -121,13 +133,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp)
121 if (esp->scsi_id != 0xff) 133 if (esp->scsi_id != 0xff)
122 goto done; 134 goto done;
123 135
124 if (!sdev->bus) { 136 esp->scsi_id = of_getintprop_default(espdma->node,
125 /* SUN4 */
126 esp->scsi_id = 7;
127 goto done;
128 }
129
130 esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
131 "scsi-initiator-id", 7); 137 "scsi-initiator-id", 7);
132 138
133done: 139done:
@@ -137,9 +143,10 @@ done:
137 143
138static void __devinit esp_get_differential(struct esp *esp) 144static void __devinit esp_get_differential(struct esp *esp)
139{ 145{
140 struct sbus_dev *sdev = esp->dev; 146 struct of_device *op = esp->dev;
141 struct device_node *dp = sdev->ofdev.node; 147 struct device_node *dp;
142 148
149 dp = op->node;
143 if (of_find_property(dp, "differential", NULL)) 150 if (of_find_property(dp, "differential", NULL))
144 esp->flags |= ESP_FLAG_DIFFERENTIAL; 151 esp->flags |= ESP_FLAG_DIFFERENTIAL;
145 else 152 else
@@ -148,43 +155,36 @@ static void __devinit esp_get_differential(struct esp *esp)
148 155
149static void __devinit esp_get_clock_params(struct esp *esp) 156static void __devinit esp_get_clock_params(struct esp *esp)
150{ 157{
151 struct sbus_dev *sdev = esp->dev; 158 struct of_device *op = esp->dev;
152 struct device_node *dp = sdev->ofdev.node; 159 struct device_node *bus_dp, *dp;
153 struct device_node *bus_dp;
154 int fmhz; 160 int fmhz;
155 161
156 bus_dp = NULL; 162 dp = op->node;
157 if (sdev != NULL && sdev->bus != NULL) 163 bus_dp = dp->parent;
158 bus_dp = sdev->bus->ofdev.node;
159 164
160 fmhz = of_getintprop_default(dp, "clock-frequency", 0); 165 fmhz = of_getintprop_default(dp, "clock-frequency", 0);
161 if (fmhz == 0) 166 if (fmhz == 0)
162 fmhz = (!bus_dp) ? 0 : 167 fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
163 of_getintprop_default(bus_dp, "clock-frequency", 0);
164 168
165 esp->cfreq = fmhz; 169 esp->cfreq = fmhz;
166} 170}
167 171
168static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) 172static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
169{ 173{
170 struct sbus_dev *sdev = esp->dev; 174 struct device_node *dma_dp = dma_of->node;
171 struct device_node *dp = sdev->ofdev.node; 175 struct of_device *op = esp->dev;
172 u8 bursts; 176 struct device_node *dp;
177 u8 bursts, val;
173 178
179 dp = op->node;
174 bursts = of_getintprop_default(dp, "burst-sizes", 0xff); 180 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
175 if (dma) { 181 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
176 struct device_node *dma_dp = dma->ofdev.node; 182 if (val != 0xff)
177 u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); 183 bursts &= val;
178 if (val != 0xff)
179 bursts &= val;
180 }
181 184
182 if (sdev->bus) { 185 val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
183 u8 val = of_getintprop_default(sdev->bus->ofdev.node, 186 if (val != 0xff)
184 "burst-sizes", 0xff); 187 bursts &= val;
185 if (val != 0xff)
186 bursts &= val;
187 }
188 188
189 if (bursts == 0xff || 189 if (bursts == 0xff ||
190 (bursts & DMA_BURST16) == 0 || 190 (bursts & DMA_BURST16) == 0 ||
@@ -194,9 +194,9 @@ static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
194 esp->bursts = bursts; 194 esp->bursts = bursts;
195} 195}
196 196
197static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) 197static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma)
198{ 198{
199 esp_get_scsi_id(esp); 199 esp_get_scsi_id(esp, espdma);
200 esp_get_differential(esp); 200 esp_get_differential(esp);
201 esp_get_clock_params(esp); 201 esp_get_clock_params(esp);
202 esp_get_bursts(esp, espdma); 202 esp_get_bursts(esp, espdma);
@@ -215,25 +215,33 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
215static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, 215static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
216 size_t sz, int dir) 216 size_t sz, int dir)
217{ 217{
218 return sbus_map_single(esp->dev, buf, sz, dir); 218 struct of_device *op = esp->dev;
219
220 return dma_map_single(&op->dev, buf, sz, dir);
219} 221}
220 222
221static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, 223static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
222 int num_sg, int dir) 224 int num_sg, int dir)
223{ 225{
224 return sbus_map_sg(esp->dev, sg, num_sg, dir); 226 struct of_device *op = esp->dev;
227
228 return dma_map_sg(&op->dev, sg, num_sg, dir);
225} 229}
226 230
227static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, 231static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
228 size_t sz, int dir) 232 size_t sz, int dir)
229{ 233{
230 sbus_unmap_single(esp->dev, addr, sz, dir); 234 struct of_device *op = esp->dev;
235
236 dma_unmap_single(&op->dev, addr, sz, dir);
231} 237}
232 238
233static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, 239static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
234 int num_sg, int dir) 240 int num_sg, int dir)
235{ 241{
236 sbus_unmap_sg(esp->dev, sg, num_sg, dir); 242 struct of_device *op = esp->dev;
243
244 dma_unmap_sg(&op->dev, sg, num_sg, dir);
237} 245}
238 246
239static int sbus_esp_irq_pending(struct esp *esp) 247static int sbus_esp_irq_pending(struct esp *esp)
@@ -247,24 +255,26 @@ static void sbus_esp_reset_dma(struct esp *esp)
247{ 255{
248 int can_do_burst16, can_do_burst32, can_do_burst64; 256 int can_do_burst16, can_do_burst32, can_do_burst64;
249 int can_do_sbus64, lim; 257 int can_do_sbus64, lim;
258 struct of_device *op;
250 u32 val; 259 u32 val;
251 260
252 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; 261 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
253 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; 262 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
254 can_do_burst64 = 0; 263 can_do_burst64 = 0;
255 can_do_sbus64 = 0; 264 can_do_sbus64 = 0;
256 if (sbus_can_dma_64bit(esp->dev)) 265 op = esp->dev;
266 if (sbus_can_dma_64bit())
257 can_do_sbus64 = 1; 267 can_do_sbus64 = 1;
258 if (sbus_can_burst64(esp->sdev)) 268 if (sbus_can_burst64())
259 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; 269 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
260 270
261 /* Put the DVMA into a known state. */ 271 /* Put the DVMA into a known state. */
262 if (esp->dma->revision != dvmahme) { 272 if (esp->dmarev != dvmahme) {
263 val = dma_read32(DMA_CSR); 273 val = dma_read32(DMA_CSR);
264 dma_write32(val | DMA_RST_SCSI, DMA_CSR); 274 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
265 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 275 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
266 } 276 }
267 switch (esp->dma->revision) { 277 switch (esp->dmarev) {
268 case dvmahme: 278 case dvmahme:
269 dma_write32(DMA_RESET_FAS366, DMA_CSR); 279 dma_write32(DMA_RESET_FAS366, DMA_CSR);
270 dma_write32(DMA_RST_SCSI, DMA_CSR); 280 dma_write32(DMA_RST_SCSI, DMA_CSR);
@@ -282,7 +292,7 @@ static void sbus_esp_reset_dma(struct esp *esp)
282 292
283 if (can_do_sbus64) { 293 if (can_do_sbus64) {
284 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; 294 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
285 sbus_set_sbus64(esp->dev, esp->bursts); 295 sbus_set_sbus64(&op->dev, esp->bursts);
286 } 296 }
287 297
288 lim = 1000; 298 lim = 1000;
@@ -346,14 +356,14 @@ static void sbus_esp_dma_drain(struct esp *esp)
346 u32 csr; 356 u32 csr;
347 int lim; 357 int lim;
348 358
349 if (esp->dma->revision == dvmahme) 359 if (esp->dmarev == dvmahme)
350 return; 360 return;
351 361
352 csr = dma_read32(DMA_CSR); 362 csr = dma_read32(DMA_CSR);
353 if (!(csr & DMA_FIFO_ISDRAIN)) 363 if (!(csr & DMA_FIFO_ISDRAIN))
354 return; 364 return;
355 365
356 if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) 366 if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
357 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); 367 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
358 368
359 lim = 1000; 369 lim = 1000;
@@ -369,7 +379,7 @@ static void sbus_esp_dma_drain(struct esp *esp)
369 379
370static void sbus_esp_dma_invalidate(struct esp *esp) 380static void sbus_esp_dma_invalidate(struct esp *esp)
371{ 381{
372 if (esp->dma->revision == dvmahme) { 382 if (esp->dmarev == dvmahme) {
373 dma_write32(DMA_RST_SCSI, DMA_CSR); 383 dma_write32(DMA_RST_SCSI, DMA_CSR);
374 384
375 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | 385 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
@@ -440,7 +450,7 @@ static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
440 else 450 else
441 csr &= ~DMA_ST_WRITE; 451 csr &= ~DMA_ST_WRITE;
442 dma_write32(csr, DMA_CSR); 452 dma_write32(csr, DMA_CSR);
443 if (esp->dma->revision == dvmaesc1) { 453 if (esp->dmarev == dvmaesc1) {
444 u32 end = PAGE_ALIGN(addr + dma_count + 16U); 454 u32 end = PAGE_ALIGN(addr + dma_count + 16U);
445 dma_write32(end - addr, DMA_COUNT); 455 dma_write32(end - addr, DMA_COUNT);
446 } 456 }
@@ -476,10 +486,8 @@ static const struct esp_driver_ops sbus_esp_ops = {
476 .dma_error = sbus_esp_dma_error, 486 .dma_error = sbus_esp_dma_error,
477}; 487};
478 488
479static int __devinit esp_sbus_probe_one(struct device *dev, 489static int __devinit esp_sbus_probe_one(struct of_device *op,
480 struct sbus_dev *esp_dev, 490 struct of_device *espdma,
481 struct sbus_dev *espdma,
482 struct sbus_bus *sbus,
483 int hme) 491 int hme)
484{ 492{
485 struct scsi_host_template *tpnt = &scsi_esp_template; 493 struct scsi_host_template *tpnt = &scsi_esp_template;
@@ -497,13 +505,13 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
497 esp = shost_priv(host); 505 esp = shost_priv(host);
498 506
499 esp->host = host; 507 esp->host = host;
500 esp->dev = esp_dev; 508 esp->dev = op;
501 esp->ops = &sbus_esp_ops; 509 esp->ops = &sbus_esp_ops;
502 510
503 if (hme) 511 if (hme)
504 esp->flags |= ESP_FLAG_WIDE_CAPABLE; 512 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
505 513
506 err = esp_sbus_find_dma(esp, espdma); 514 err = esp_sbus_setup_dma(esp, espdma);
507 if (err < 0) 515 if (err < 0)
508 goto fail_unlink; 516 goto fail_unlink;
509 517
@@ -525,15 +533,15 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
525 * come up with the reset bit set, so make sure that 533 * come up with the reset bit set, so make sure that
526 * is clear first. 534 * is clear first.
527 */ 535 */
528 if (esp->dma->revision == dvmaesc1) { 536 if (esp->dmarev == dvmaesc1) {
529 u32 val = dma_read32(DMA_CSR); 537 u32 val = dma_read32(DMA_CSR);
530 538
531 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 539 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
532 } 540 }
533 541
534 dev_set_drvdata(&esp_dev->ofdev.dev, esp); 542 dev_set_drvdata(&op->dev, esp);
535 543
536 err = scsi_esp_register(esp, dev); 544 err = scsi_esp_register(esp, &op->dev);
537 if (err) 545 if (err)
538 goto fail_free_irq; 546 goto fail_free_irq;
539 547
@@ -542,41 +550,46 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
542fail_free_irq: 550fail_free_irq:
543 free_irq(host->irq, esp); 551 free_irq(host->irq, esp);
544fail_unmap_command_block: 552fail_unmap_command_block:
545 sbus_free_consistent(esp->dev, 16, 553 dma_free_coherent(&op->dev, 16,
546 esp->command_block, 554 esp->command_block,
547 esp->command_block_dma); 555 esp->command_block_dma);
548fail_unmap_regs: 556fail_unmap_regs:
549 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); 557 of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
550fail_unlink: 558fail_unlink:
551 scsi_host_put(host); 559 scsi_host_put(host);
552fail: 560fail:
553 return err; 561 return err;
554} 562}
555 563
556static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) 564static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match)
557{ 565{
558 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 566 struct device_node *dma_node = NULL;
559 struct device_node *dp = dev->node; 567 struct device_node *dp = op->node;
560 struct sbus_dev *dma_sdev = NULL; 568 struct of_device *dma_of = NULL;
561 int hme = 0; 569 int hme = 0;
562 570
563 if (dp->parent && 571 if (dp->parent &&
564 (!strcmp(dp->parent->name, "espdma") || 572 (!strcmp(dp->parent->name, "espdma") ||
565 !strcmp(dp->parent->name, "dma"))) 573 !strcmp(dp->parent->name, "dma")))
566 dma_sdev = sdev->parent; 574 dma_node = dp->parent;
567 else if (!strcmp(dp->name, "SUNW,fas")) { 575 else if (!strcmp(dp->name, "SUNW,fas")) {
568 dma_sdev = sdev; 576 dma_node = op->node;
569 hme = 1; 577 hme = 1;
570 } 578 }
579 if (dma_node)
580 dma_of = of_find_device_by_node(dma_node);
581 if (!dma_of)
582 return -ENODEV;
571 583
572 return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, 584 return esp_sbus_probe_one(op, dma_of, hme);
573 sdev->bus, hme);
574} 585}
575 586
576static int __devexit esp_sbus_remove(struct of_device *dev) 587static int __devexit esp_sbus_remove(struct of_device *op)
577{ 588{
578 struct esp *esp = dev_get_drvdata(&dev->dev); 589 struct esp *esp = dev_get_drvdata(&op->dev);
590 struct of_device *dma_of = esp->dma;
579 unsigned int irq = esp->host->irq; 591 unsigned int irq = esp->host->irq;
592 bool is_hme;
580 u32 val; 593 u32 val;
581 594
582 scsi_esp_unregister(esp); 595 scsi_esp_unregister(esp);
@@ -586,17 +599,25 @@ static int __devexit esp_sbus_remove(struct of_device *dev)
586 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); 599 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
587 600
588 free_irq(irq, esp); 601 free_irq(irq, esp);
589 sbus_free_consistent(esp->dev, 16, 602
590 esp->command_block, 603 is_hme = (esp->dmarev == dvmahme);
591 esp->command_block_dma); 604
592 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); 605 dma_free_coherent(&op->dev, 16,
606 esp->command_block,
607 esp->command_block_dma);
608 of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
609 SBUS_ESP_REG_SIZE);
610 of_iounmap(&dma_of->resource[0], esp->dma_regs,
611 resource_size(&dma_of->resource[0]));
593 612
594 scsi_host_put(esp->host); 613 scsi_host_put(esp->host);
595 614
615 dev_set_drvdata(&op->dev, NULL);
616
596 return 0; 617 return 0;
597} 618}
598 619
599static struct of_device_id esp_match[] = { 620static const struct of_device_id esp_match[] = {
600 { 621 {
601 .name = "SUNW,esp", 622 .name = "SUNW,esp",
602 }, 623 },
@@ -619,7 +640,7 @@ static struct of_platform_driver esp_sbus_driver = {
619 640
620static int __init sunesp_init(void) 641static int __init sunesp_init(void)
621{ 642{
622 return of_register_driver(&esp_sbus_driver, &sbus_bus_type); 643 return of_register_driver(&esp_sbus_driver, &of_bus_type);
623} 644}
624 645
625static void __exit sunesp_exit(void) 646static void __exit sunesp_exit(void)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d39107b7669b..f4e6cde1fd0d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
519 * Shorten our settle_time if needed for 519 * Shorten our settle_time if needed for
520 * this command not to time out. 520 * this command not to time out.
521 */ 521 */
522 if (np->s.settle_time_valid && cmd->timeout_per_command) { 522 if (np->s.settle_time_valid && cmd->request->timeout) {
523 unsigned long tlimit = jiffies + cmd->timeout_per_command; 523 unsigned long tlimit = jiffies + cmd->request->timeout;
524 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 524 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
525 if (time_after(np->s.settle_time, tlimit)) { 525 if (time_after(np->s.settle_time, tlimit)) {
526 np->s.settle_time = tlimit; 526 np->s.settle_time = tlimit;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 1723d71cbf3f..69ac6e590f1d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = {
2573static int __init dc390_module_init(void) 2573static int __init dc390_module_init(void)
2574{ 2574{
2575 if (!disable_clustering) 2575 if (!disable_clustering)
2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n" 2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2577 "\twith \"disable_clustering=1\" and report to maintainers\n"); 2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2578 2578
2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) { 2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2580 tmscsim[0] = 7; 2580 tmscsim[0] = 7;