aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas22
-rw-r--r--block/bsg.c12
-rw-r--r--block/scsi_ioctl.c5
-rw-r--r--drivers/firewire/fw-sbp2.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/scsi/53c700.c6
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c133
-rw-r--r--drivers/scsi/aacraid/aacraid.h28
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c34
-rw-r--r--drivers/scsi/aacraid/linit.c22
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/constants.c10
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h16
-rw-r--r--drivers/scsi/dpt/dptsig.h8
-rw-r--r--drivers/scsi/dpt/sys_info.h4
-rw-r--r--drivers/scsi/dpt_i2o.c640
-rw-r--r--drivers/scsi/dpti.h15
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hptiop.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h9
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/scsi.c23
-rw-r--r--drivers/scsi/scsi_error.c15
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/u14-34f.c6
-rw-r--r--drivers/usb/storage/cypress_atacb.c2
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--include/scsi/scsi.h40
-rw-r--r--include/scsi/scsi_cmnd.h23
-rw-r--r--include/scsi/scsi_eh.h4
-rw-r--r--include/scsi/scsi_host.h8
43 files changed, 884 insertions, 291 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 91c81db0ba71..716fcc1cafb5 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,25 @@
11 Release Date : Mon. March 10 11:02:31 PDT 2008 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Sumant Patro
4 Bo Yang
5
62 Current Version : 00.00.03.20-RC1
73 Older Version : 00.00.03.16
8
91. Rollback the sense info implementation
10 Sense buffer ptr data type in the ioctl path is reverted back
11 to u32 * as in previous versions of driver.
12
132. Fixed the driver frame count.
14 When Driver sent wrong frame count to firmware. As this
15 particular command is sent to drive, FW is seeing continuous
16 chip resets and so the command will timeout.
17
183. Add the new controller(1078DE) support to the driver
19 and Increase the max_wait to 60 from 10 in the controller
20 operational status. With this max_wait increase, driver will
21 make sure the FW will finish the pending cmd for KDUMP case.
22
11 Release Date : Thur. Nov. 07 16:30:43 PST 2007 - 231 Release Date : Thur. Nov. 07 16:30:43 PST 2007 -
2 (emaild-id:megaraidlinux@lsi.com) 24 (emaild-id:megaraidlinux@lsi.com)
3 Sumant Patro 25 Sumant Patro
diff --git a/block/bsg.c b/block/bsg.c
index fa796b605f55..f0b7cd343216 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -174,7 +174,11 @@ unlock:
174static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 174static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
175 struct sg_io_v4 *hdr, int has_write_perm) 175 struct sg_io_v4 *hdr, int has_write_perm)
176{ 176{
177 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 177 if (hdr->request_len > BLK_MAX_CDB) {
178 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
179 if (!rq->cmd)
180 return -ENOMEM;
181 }
178 182
179 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, 183 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
180 hdr->request_len)) 184 hdr->request_len))
@@ -211,8 +215,6 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
211 215
212 if (hdr->guard != 'Q') 216 if (hdr->guard != 'Q')
213 return -EINVAL; 217 return -EINVAL;
214 if (hdr->request_len > BLK_MAX_CDB)
215 return -EINVAL;
216 if (hdr->dout_xfer_len > (q->max_sectors << 9) || 218 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
217 hdr->din_xfer_len > (q->max_sectors << 9)) 219 hdr->din_xfer_len > (q->max_sectors << 9))
218 return -EIO; 220 return -EIO;
@@ -302,6 +304,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
302 } 304 }
303 return rq; 305 return rq;
304out: 306out:
307 if (rq->cmd != rq->__cmd)
308 kfree(rq->cmd);
305 blk_put_request(rq); 309 blk_put_request(rq);
306 if (next_rq) { 310 if (next_rq) {
307 blk_rq_unmap_user(next_rq->bio); 311 blk_rq_unmap_user(next_rq->bio);
@@ -455,6 +459,8 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
455 ret = rq->errors; 459 ret = rq->errors;
456 460
457 blk_rq_unmap_user(bio); 461 blk_rq_unmap_user(bio);
462 if (rq->cmd != rq->__cmd)
463 kfree(rq->cmd);
458 blk_put_request(rq); 464 blk_put_request(rq);
459 465
460 return ret; 466 return ret;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index ffa3720e6ca0..78199c08ec92 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -33,13 +33,12 @@
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34 34
35/* Command group 3 is reserved and should never be used. */ 35/* Command group 3 is reserved and should never be used. */
36const unsigned char scsi_command_size[8] = 36const unsigned char scsi_command_size_tbl[8] =
37{ 37{
38 6, 10, 10, 12, 38 6, 10, 10, 12,
39 16, 12, 10, 10 39 16, 12, 10, 10
40}; 40};
41 41EXPORT_SYMBOL(scsi_command_size_tbl);
42EXPORT_SYMBOL(scsi_command_size);
43 42
44#include <scsi/sg.h> 43#include <scsi/sg.h>
45 44
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 62e3c9190983..b2458bb8e9ca 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -1487,7 +1487,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1487 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) 1487 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
1488 goto out; 1488 goto out;
1489 1489
1490 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); 1490 memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
1491 1491
1492 orb->base.callback = complete_command_orb; 1492 orb->base.callback = complete_command_orb;
1493 orb->base.request_bus = 1493 orb->base.request_bus =
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 37b85c67b11d..c8bad675dbd1 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -1055,7 +1055,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
1055 rec->scsi_result = scsi_cmnd->result; 1055 rec->scsi_result = scsi_cmnd->result;
1056 rec->scsi_cmnd = (unsigned long)scsi_cmnd; 1056 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
1057 rec->scsi_serial = scsi_cmnd->serial_number; 1057 rec->scsi_serial = scsi_cmnd->serial_number;
1058 memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd, 1058 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
1059 min((int)scsi_cmnd->cmd_len, 1059 min((int)scsi_cmnd->cmd_len,
1060 ZFCP_DBF_SCSI_OPCODE)); 1060 ZFCP_DBF_SCSI_OPCODE));
1061 rec->scsi_retries = scsi_cmnd->retries; 1061 rec->scsi_retries = scsi_cmnd->retries;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9af2330f07a2..b2ea4ea051f5 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4014,7 +4014,7 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4014 ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n", 4014 ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
4015 scpnt->result); 4015 scpnt->result);
4016 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, 4016 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4017 (void *) &scpnt->cmnd, scpnt->cmd_len); 4017 scpnt->cmnd, scpnt->cmd_len);
4018 4018
4019 ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n", 4019 ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
4020 fcp_rsp_iu->fcp_sns_len); 4020 fcp_rsp_iu->fcp_sns_len);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index f4c4fe90240a..f5a9addb7050 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -599,7 +599,7 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
599 (struct NCR_700_command_slot *)SCp->host_scribble; 599 (struct NCR_700_command_slot *)SCp->host_scribble;
600 600
601 dma_unmap_single(hostdata->dev, slot->pCmd, 601 dma_unmap_single(hostdata->dev, slot->pCmd,
602 sizeof(SCp->cmnd), DMA_TO_DEVICE); 602 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
603 if (slot->flags == NCR_700_FLAG_AUTOSENSE) { 603 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
604 char *cmnd = NCR_700_get_sense_cmnd(SCp->device); 604 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
605#ifdef NCR_700_DEBUG 605#ifdef NCR_700_DEBUG
@@ -1004,7 +1004,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1004 * here */ 1004 * here */
1005 NCR_700_unmap(hostdata, SCp, slot); 1005 NCR_700_unmap(hostdata, SCp, slot);
1006 dma_unmap_single(hostdata->dev, slot->pCmd, 1006 dma_unmap_single(hostdata->dev, slot->pCmd,
1007 sizeof(SCp->cmnd), 1007 MAX_COMMAND_SIZE,
1008 DMA_TO_DEVICE); 1008 DMA_TO_DEVICE);
1009 1009
1010 cmnd[0] = REQUEST_SENSE; 1010 cmnd[0] = REQUEST_SENSE;
@@ -1901,7 +1901,7 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1901 } 1901 }
1902 slot->resume_offset = 0; 1902 slot->resume_offset = 0;
1903 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd, 1903 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1904 sizeof(SCp->cmnd), DMA_TO_DEVICE); 1904 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1905 NCR_700_start_command(SCp); 1905 NCR_700_start_command(SCp);
1906 return 0; 1906 return 0;
1907} 1907}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 99c57b0c1d54..46d7e400c8be 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -504,10 +504,9 @@ config SCSI_AIC7XXX_OLD
504source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 504source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
505source "drivers/scsi/aic94xx/Kconfig" 505source "drivers/scsi/aic94xx/Kconfig"
506 506
507# All the I2O code and drivers do not seem to be 64bit safe.
508config SCSI_DPT_I2O 507config SCSI_DPT_I2O
509 tristate "Adaptec I2O RAID support " 508 tristate "Adaptec I2O RAID support "
510 depends on !64BIT && SCSI && PCI && VIRT_TO_BUS 509 depends on SCSI && PCI && VIRT_TO_BUS
511 help 510 help
512 This driver supports all of Adaptec's I2O based RAID controllers as 511 This driver supports all of Adaptec's I2O based RAID controllers as
513 well as the DPT SmartRaid V cards. This is an Adaptec maintained 512 well as the DPT SmartRaid V cards. This is an Adaptec maintained
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 792b2e807bf3..ced3eebe252c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -895,7 +895,7 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru
895 } else { 895 } else {
896 scb->tag_msg = 0; /* No tag support */ 896 scb->tag_msg = 0; /* No tag support */
897 } 897 }
898 memcpy(&scb->cdb[0], &cmd->cmnd, scb->cdb_len); 898 memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
899} 899}
900 900
901/** 901/**
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 460d4024c46c..aa4e77c25273 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -498,6 +498,11 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
498 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 498 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
499 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 499 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
500 fsa_dev_ptr->valid = 1; 500 fsa_dev_ptr->valid = 1;
501 /* sense_key holds the current state of the spin-up */
502 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
503 fsa_dev_ptr->sense_data.sense_key = NOT_READY;
504 else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
505 fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
501 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol); 506 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
502 fsa_dev_ptr->size 507 fsa_dev_ptr->size
503 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + 508 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
@@ -1509,20 +1514,35 @@ static void io_callback(void *context, struct fib * fibptr)
1509 scsi_dma_unmap(scsicmd); 1514 scsi_dma_unmap(scsicmd);
1510 1515
1511 readreply = (struct aac_read_reply *)fib_data(fibptr); 1516 readreply = (struct aac_read_reply *)fib_data(fibptr);
1512 if (le32_to_cpu(readreply->status) == ST_OK) 1517 switch (le32_to_cpu(readreply->status)) {
1513 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1518 case ST_OK:
1514 else { 1519 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1520 SAM_STAT_GOOD;
1521 dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
1522 break;
1523 case ST_NOT_READY:
1524 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1525 SAM_STAT_CHECK_CONDITION;
1526 set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
1527 SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
1528 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1529 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1530 SCSI_SENSE_BUFFERSIZE));
1531 break;
1532 default:
1515#ifdef AAC_DETAILED_STATUS_INFO 1533#ifdef AAC_DETAILED_STATUS_INFO
1516 printk(KERN_WARNING "io_callback: io failed, status = %d\n", 1534 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
1517 le32_to_cpu(readreply->status)); 1535 le32_to_cpu(readreply->status));
1518#endif 1536#endif
1519 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1537 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1538 SAM_STAT_CHECK_CONDITION;
1520 set_sense(&dev->fsa_dev[cid].sense_data, 1539 set_sense(&dev->fsa_dev[cid].sense_data,
1521 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 1540 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1522 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 1541 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1523 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1542 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1524 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1543 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1525 SCSI_SENSE_BUFFERSIZE)); 1544 SCSI_SENSE_BUFFERSIZE));
1545 break;
1526 } 1546 }
1527 aac_fib_complete(fibptr); 1547 aac_fib_complete(fibptr);
1528 aac_fib_free(fibptr); 1548 aac_fib_free(fibptr);
@@ -1863,6 +1883,84 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
1863 return SCSI_MLQUEUE_HOST_BUSY; 1883 return SCSI_MLQUEUE_HOST_BUSY;
1864} 1884}
1865 1885
1886static void aac_start_stop_callback(void *context, struct fib *fibptr)
1887{
1888 struct scsi_cmnd *scsicmd = context;
1889
1890 if (!aac_valid_context(scsicmd, fibptr))
1891 return;
1892
1893 BUG_ON(fibptr == NULL);
1894
1895 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1896
1897 aac_fib_complete(fibptr);
1898 aac_fib_free(fibptr);
1899 scsicmd->scsi_done(scsicmd);
1900}
1901
1902static int aac_start_stop(struct scsi_cmnd *scsicmd)
1903{
1904 int status;
1905 struct fib *cmd_fibcontext;
1906 struct aac_power_management *pmcmd;
1907 struct scsi_device *sdev = scsicmd->device;
1908 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
1909
1910 if (!(aac->supplement_adapter_info.SupportedOptions2 &
1911 AAC_OPTION_POWER_MANAGEMENT)) {
1912 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1913 SAM_STAT_GOOD;
1914 scsicmd->scsi_done(scsicmd);
1915 return 0;
1916 }
1917
1918 if (aac->in_reset)
1919 return SCSI_MLQUEUE_HOST_BUSY;
1920
1921 /*
1922 * Allocate and initialize a Fib
1923 */
1924 cmd_fibcontext = aac_fib_alloc(aac);
1925 if (!cmd_fibcontext)
1926 return SCSI_MLQUEUE_HOST_BUSY;
1927
1928 aac_fib_init(cmd_fibcontext);
1929
1930 pmcmd = fib_data(cmd_fibcontext);
1931 pmcmd->command = cpu_to_le32(VM_ContainerConfig);
1932 pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
1933 /* Eject bit ignored, not relevant */
1934 pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
1935 cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
1936 pmcmd->cid = cpu_to_le32(sdev_id(sdev));
1937 pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
1938 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
1939
1940 /*
1941 * Now send the Fib to the adapter
1942 */
1943 status = aac_fib_send(ContainerCommand,
1944 cmd_fibcontext,
1945 sizeof(struct aac_power_management),
1946 FsaNormal,
1947 0, 1,
1948 (fib_callback)aac_start_stop_callback,
1949 (void *)scsicmd);
1950
1951 /*
1952 * Check that the command queued to the controller
1953 */
1954 if (status == -EINPROGRESS) {
1955 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1956 return 0;
1957 }
1958
1959 aac_fib_complete(cmd_fibcontext);
1960 aac_fib_free(cmd_fibcontext);
1961 return SCSI_MLQUEUE_HOST_BUSY;
1962}
1963
1866/** 1964/**
1867 * aac_scsi_cmd() - Process SCSI command 1965 * aac_scsi_cmd() - Process SCSI command
1868 * @scsicmd: SCSI command block 1966 * @scsicmd: SCSI command block
@@ -1899,7 +1997,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1899 * If the target container doesn't exist, it may have 1997 * If the target container doesn't exist, it may have
1900 * been newly created 1998 * been newly created
1901 */ 1999 */
1902 if ((fsa_dev_ptr[cid].valid & 1) == 0) { 2000 if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
2001 (fsa_dev_ptr[cid].sense_data.sense_key ==
2002 NOT_READY)) {
1903 switch (scsicmd->cmnd[0]) { 2003 switch (scsicmd->cmnd[0]) {
1904 case SERVICE_ACTION_IN: 2004 case SERVICE_ACTION_IN:
1905 if (!(dev->raw_io_interface) || 2005 if (!(dev->raw_io_interface) ||
@@ -2091,8 +2191,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2091 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); 2191 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
2092 /* Do not cache partition table for arrays */ 2192 /* Do not cache partition table for arrays */
2093 scsicmd->device->removable = 1; 2193 scsicmd->device->removable = 1;
2094 2194 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2095 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2195 SAM_STAT_GOOD;
2096 scsicmd->scsi_done(scsicmd); 2196 scsicmd->scsi_done(scsicmd);
2097 2197
2098 return 0; 2198 return 0;
@@ -2187,15 +2287,32 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2187 * These commands are all No-Ops 2287 * These commands are all No-Ops
2188 */ 2288 */
2189 case TEST_UNIT_READY: 2289 case TEST_UNIT_READY:
2290 if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
2291 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2292 SAM_STAT_CHECK_CONDITION;
2293 set_sense(&dev->fsa_dev[cid].sense_data,
2294 NOT_READY, SENCODE_BECOMING_READY,
2295 ASENCODE_BECOMING_READY, 0, 0);
2296 memcpy(scsicmd->sense_buffer,
2297 &dev->fsa_dev[cid].sense_data,
2298 min_t(size_t,
2299 sizeof(dev->fsa_dev[cid].sense_data),
2300 SCSI_SENSE_BUFFERSIZE));
2301 scsicmd->scsi_done(scsicmd);
2302 return 0;
2303 }
2304 /* FALLTHRU */
2190 case RESERVE: 2305 case RESERVE:
2191 case RELEASE: 2306 case RELEASE:
2192 case REZERO_UNIT: 2307 case REZERO_UNIT:
2193 case REASSIGN_BLOCKS: 2308 case REASSIGN_BLOCKS:
2194 case SEEK_10: 2309 case SEEK_10:
2195 case START_STOP:
2196 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2310 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2197 scsicmd->scsi_done(scsicmd); 2311 scsicmd->scsi_done(scsicmd);
2198 return 0; 2312 return 0;
2313
2314 case START_STOP:
2315 return aac_start_stop(scsicmd);
2199 } 2316 }
2200 2317
2201 switch (scsicmd->cmnd[0]) 2318 switch (scsicmd->cmnd[0])
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 113ca9c8934c..73916adb8f80 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2455 15# define AAC_DRIVER_BUILD 2456
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -34,8 +34,8 @@
34#define CONTAINER_TO_ID(cont) (cont) 34#define CONTAINER_TO_ID(cont) (cont)
35#define CONTAINER_TO_LUN(cont) (0) 35#define CONTAINER_TO_LUN(cont) (0)
36 36
37#define aac_phys_to_logical(x) (x+1) 37#define aac_phys_to_logical(x) ((x)+1)
38#define aac_logical_to_phys(x) (x?x-1:0) 38#define aac_logical_to_phys(x) ((x)?(x)-1:0)
39 39
40/* #define AAC_DETAILED_STATUS_INFO */ 40/* #define AAC_DETAILED_STATUS_INFO */
41 41
@@ -424,6 +424,8 @@ struct aac_init
424 */ 424 */
425 __le32 InitFlags; /* flags for supported features */ 425 __le32 InitFlags; /* flags for supported features */
426#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 426#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
427#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
428#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
427 __le32 MaxIoCommands; /* max outstanding commands */ 429 __le32 MaxIoCommands; /* max outstanding commands */
428 __le32 MaxIoSize; /* largest I/O command */ 430 __le32 MaxIoSize; /* largest I/O command */
429 __le32 MaxFibSize; /* largest FIB to adapter */ 431 __le32 MaxFibSize; /* largest FIB to adapter */
@@ -867,8 +869,10 @@ struct aac_supplement_adapter_info
867}; 869};
868#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010) 870#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
869#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000) 871#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
870#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) 872/* SupportedOptions2 */
871#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) 873#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
874#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
875#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
872#define AAC_SIS_VERSION_V3 3 876#define AAC_SIS_VERSION_V3 3
873#define AAC_SIS_SLOT_UNKNOWN 0xFF 877#define AAC_SIS_SLOT_UNKNOWN 0xFF
874 878
@@ -1148,6 +1152,7 @@ struct aac_dev
1148#define ST_DQUOT 69 1152#define ST_DQUOT 69
1149#define ST_STALE 70 1153#define ST_STALE 70
1150#define ST_REMOTE 71 1154#define ST_REMOTE 71
1155#define ST_NOT_READY 72
1151#define ST_BADHANDLE 10001 1156#define ST_BADHANDLE 10001
1152#define ST_NOT_SYNC 10002 1157#define ST_NOT_SYNC 10002
1153#define ST_BAD_COOKIE 10003 1158#define ST_BAD_COOKIE 10003
@@ -1269,6 +1274,18 @@ struct aac_synchronize_reply {
1269 u8 data[16]; 1274 u8 data[16];
1270}; 1275};
1271 1276
1277#define CT_POWER_MANAGEMENT 245
1278#define CT_PM_START_UNIT 2
1279#define CT_PM_STOP_UNIT 3
1280#define CT_PM_UNIT_IMMEDIATE 1
1281struct aac_power_management {
1282 __le32 command; /* VM_ContainerConfig */
1283 __le32 type; /* CT_POWER_MANAGEMENT */
1284 __le32 sub; /* CT_PM_* */
1285 __le32 cid;
1286 __le32 parm; /* CT_PM_sub_* */
1287};
1288
1272#define CT_PAUSE_IO 65 1289#define CT_PAUSE_IO 65
1273#define CT_RELEASE_IO 66 1290#define CT_RELEASE_IO 66
1274struct aac_pause { 1291struct aac_pause {
@@ -1536,6 +1553,7 @@ struct aac_mntent {
1536#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */ 1553#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */
1537#define FSCS_READONLY 0x0002 /* possible result of broken mirror */ 1554#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
1538#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */ 1555#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */
1556#define FSCS_NOT_READY 0x0008 /* Array spinning up to fulfil request */
1539 1557
1540struct aac_query_mount { 1558struct aac_query_mount {
1541 __le32 command; 1559 __le32 command;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 294a802450be..cbac06355107 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -97,6 +97,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
97 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 97 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
98 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); 98 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
99 } 99 }
100 init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
101 INITFLAGS_DRIVER_SUPPORTS_PM);
100 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); 102 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
101 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); 103 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
102 init->MaxFibSize = cpu_to_le32(dev->max_fib_size); 104 init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index ef67816a6fe5..289304aab690 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -515,7 +515,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
515 } 515 }
516 udelay(5); 516 udelay(5);
517 } 517 }
518 } else if (down_interruptible(&fibptr->event_wait) == 0) { 518 } else if (down_interruptible(&fibptr->event_wait)) {
519 fibptr->done = 2; 519 fibptr->done = 2;
520 up(&fibptr->event_wait); 520 up(&fibptr->event_wait);
521 } 521 }
@@ -906,15 +906,22 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
906 case AifEnAddJBOD: 906 case AifEnAddJBOD:
907 case AifEnDeleteJBOD: 907 case AifEnDeleteJBOD:
908 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 908 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
909 if ((container >> 28)) 909 if ((container >> 28)) {
910 container = (u32)-1;
910 break; 911 break;
912 }
911 channel = (container >> 24) & 0xF; 913 channel = (container >> 24) & 0xF;
912 if (channel >= dev->maximum_num_channels) 914 if (channel >= dev->maximum_num_channels) {
915 container = (u32)-1;
913 break; 916 break;
917 }
914 id = container & 0xFFFF; 918 id = container & 0xFFFF;
915 if (id >= dev->maximum_num_physicals) 919 if (id >= dev->maximum_num_physicals) {
920 container = (u32)-1;
916 break; 921 break;
922 }
917 lun = (container >> 16) & 0xFF; 923 lun = (container >> 16) & 0xFF;
924 container = (u32)-1;
918 channel = aac_phys_to_logical(channel); 925 channel = aac_phys_to_logical(channel);
919 device_config_needed = 926 device_config_needed =
920 (((__le32 *)aifcmd->data)[0] == 927 (((__le32 *)aifcmd->data)[0] ==
@@ -933,13 +940,18 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
933 case EM_DRIVE_REMOVAL: 940 case EM_DRIVE_REMOVAL:
934 container = le32_to_cpu( 941 container = le32_to_cpu(
935 ((__le32 *)aifcmd->data)[2]); 942 ((__le32 *)aifcmd->data)[2]);
936 if ((container >> 28)) 943 if ((container >> 28)) {
944 container = (u32)-1;
937 break; 945 break;
946 }
938 channel = (container >> 24) & 0xF; 947 channel = (container >> 24) & 0xF;
939 if (channel >= dev->maximum_num_channels) 948 if (channel >= dev->maximum_num_channels) {
949 container = (u32)-1;
940 break; 950 break;
951 }
941 id = container & 0xFFFF; 952 id = container & 0xFFFF;
942 lun = (container >> 16) & 0xFF; 953 lun = (container >> 16) & 0xFF;
954 container = (u32)-1;
943 if (id >= dev->maximum_num_physicals) { 955 if (id >= dev->maximum_num_physicals) {
944 /* legacy dev_t ? */ 956 /* legacy dev_t ? */
945 if ((0x2000 <= id) || lun || channel || 957 if ((0x2000 <= id) || lun || channel ||
@@ -1025,9 +1037,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1025 break; 1037 break;
1026 } 1038 }
1027 1039
1040 container = 0;
1041retry_next:
1028 if (device_config_needed == NOTHING) 1042 if (device_config_needed == NOTHING)
1029 for (container = 0; container < dev->maximum_num_containers; 1043 for (; container < dev->maximum_num_containers; ++container) {
1030 ++container) {
1031 if ((dev->fsa_dev[container].config_waiting_on == 0) && 1044 if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1032 (dev->fsa_dev[container].config_needed != NOTHING) && 1045 (dev->fsa_dev[container].config_needed != NOTHING) &&
1033 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { 1046 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
@@ -1110,6 +1123,11 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1110 } 1123 }
1111 if (device_config_needed == ADD) 1124 if (device_config_needed == ADD)
1112 scsi_add_device(dev->scsi_host_ptr, channel, id, lun); 1125 scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1126 if (channel == CONTAINER_CHANNEL) {
1127 container++;
1128 device_config_needed = NOTHING;
1129 goto retry_next;
1130 }
1113} 1131}
1114 1132
1115static int _aac_reset_adapter(struct aac_dev *aac, int forced) 1133static int _aac_reset_adapter(struct aac_dev *aac, int forced)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index c109f63f8279..1f7c83607f84 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -401,6 +401,8 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
401static int aac_slave_configure(struct scsi_device *sdev) 401static int aac_slave_configure(struct scsi_device *sdev)
402{ 402{
403 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 403 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
404 if (aac->jbod && (sdev->type == TYPE_DISK))
405 sdev->removable = 1;
404 if ((sdev->type == TYPE_DISK) && 406 if ((sdev->type == TYPE_DISK) &&
405 (sdev_channel(sdev) != CONTAINER_CHANNEL) && 407 (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
406 (!aac->jbod || sdev->inq_periph_qual) && 408 (!aac->jbod || sdev->inq_periph_qual) &&
@@ -809,6 +811,12 @@ static ssize_t aac_show_flags(struct device *cdev,
809 "SAI_READ_CAPACITY_16\n"); 811 "SAI_READ_CAPACITY_16\n");
810 if (dev->jbod) 812 if (dev->jbod)
811 len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n"); 813 len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
814 if (dev->supplement_adapter_info.SupportedOptions2 &
815 AAC_OPTION_POWER_MANAGEMENT)
816 len += snprintf(buf + len, PAGE_SIZE - len,
817 "SUPPORTED_POWER_MANAGEMENT\n");
818 if (dev->msi)
819 len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
812 return len; 820 return len;
813} 821}
814 822
@@ -1106,7 +1114,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1106 aac->pdev = pdev; 1114 aac->pdev = pdev;
1107 aac->name = aac_driver_template.name; 1115 aac->name = aac_driver_template.name;
1108 aac->id = shost->unique_id; 1116 aac->id = shost->unique_id;
1109 aac->cardtype = index; 1117 aac->cardtype = index;
1110 INIT_LIST_HEAD(&aac->entry); 1118 INIT_LIST_HEAD(&aac->entry);
1111 1119
1112 aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL); 1120 aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
@@ -1146,19 +1154,19 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1146 goto out_deinit; 1154 goto out_deinit;
1147 1155
1148 /* 1156 /*
1149 * Lets override negotiations and drop the maximum SG limit to 34 1157 * Lets override negotiations and drop the maximum SG limit to 34
1150 */ 1158 */
1151 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && 1159 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
1152 (shost->sg_tablesize > 34)) { 1160 (shost->sg_tablesize > 34)) {
1153 shost->sg_tablesize = 34; 1161 shost->sg_tablesize = 34;
1154 shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1162 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1155 } 1163 }
1156 1164
1157 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) && 1165 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
1158 (shost->sg_tablesize > 17)) { 1166 (shost->sg_tablesize > 17)) {
1159 shost->sg_tablesize = 17; 1167 shost->sg_tablesize = 17;
1160 shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1168 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1161 } 1169 }
1162 1170
1163 error = pci_set_dma_max_seg_size(pdev, 1171 error = pci_set_dma_max_seg_size(pdev,
1164 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? 1172 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
@@ -1174,7 +1182,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1174 else 1182 else
1175 aac->printf_enabled = 0; 1183 aac->printf_enabled = 0;
1176 1184
1177 /* 1185 /*
1178 * max channel will be the physical channels plus 1 virtual channel 1186 * max channel will be the physical channels plus 1 virtual channel
1179 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 1187 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
1180 * physical channels are address by their actual physical number+1 1188 * physical channels are address by their actual physical number+1
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 90f5e0a6f2e3..2a730c470f62 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -529,10 +529,10 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
529/* The first entry, 0, is used for dynamic ids, the rest for devices 529/* The first entry, 0, is used for dynamic ids, the rest for devices
530 * we know about. 530 * we know about.
531 */ 531 */
532static struct asd_pcidev_struct { 532static const struct asd_pcidev_struct {
533 const char * name; 533 const char * name;
534 int (*setup)(struct asd_ha_struct *asd_ha); 534 int (*setup)(struct asd_ha_struct *asd_ha);
535} asd_pcidev_data[] = { 535} asd_pcidev_data[] __devinitconst = {
536 /* Id 0 is used for dynamic ids. */ 536 /* Id 0 is used for dynamic ids. */
537 { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter", 537 { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
538 .setup = asd_aic9410_setup 538 .setup = asd_aic9410_setup
@@ -735,7 +735,7 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
735static int __devinit asd_pci_probe(struct pci_dev *dev, 735static int __devinit asd_pci_probe(struct pci_dev *dev,
736 const struct pci_device_id *id) 736 const struct pci_device_id *id)
737{ 737{
738 struct asd_pcidev_struct *asd_dev; 738 const struct asd_pcidev_struct *asd_dev;
739 unsigned asd_id = (unsigned) id->driver_data; 739 unsigned asd_id = (unsigned) id->driver_data;
740 struct asd_ha_struct *asd_ha; 740 struct asd_ha_struct *asd_ha;
741 struct Scsi_Host *shost; 741 struct Scsi_Host *shost;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 403a7f2d8f9b..9785d7384199 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -28,7 +28,6 @@
28#define SERVICE_ACTION_OUT_12 0xa9 28#define SERVICE_ACTION_OUT_12 0xa9
29#define SERVICE_ACTION_IN_16 0x9e 29#define SERVICE_ACTION_IN_16 0x9e
30#define SERVICE_ACTION_OUT_16 0x9f 30#define SERVICE_ACTION_OUT_16 0x9f
31#define VARIABLE_LENGTH_CMD 0x7f
32 31
33 32
34 33
@@ -210,7 +209,7 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
210 cdb0 = cdbp[0]; 209 cdb0 = cdbp[0];
211 switch(cdb0) { 210 switch(cdb0) {
212 case VARIABLE_LENGTH_CMD: 211 case VARIABLE_LENGTH_CMD:
213 len = cdbp[7] + 8; 212 len = scsi_varlen_cdb_length(cdbp);
214 if (len < 10) { 213 if (len < 10) {
215 printk("short variable length command, " 214 printk("short variable length command, "
216 "len=%d ext_len=%d", len, cdb_len); 215 "len=%d ext_len=%d", len, cdb_len);
@@ -300,7 +299,7 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
300 cdb0 = cdbp[0]; 299 cdb0 = cdbp[0];
301 switch(cdb0) { 300 switch(cdb0) {
302 case VARIABLE_LENGTH_CMD: 301 case VARIABLE_LENGTH_CMD:
303 len = cdbp[7] + 8; 302 len = scsi_varlen_cdb_length(cdbp);
304 if (len < 10) { 303 if (len < 10) {
305 printk("short opcode=0x%x command, len=%d " 304 printk("short opcode=0x%x command, len=%d "
306 "ext_len=%d", cdb0, len, cdb_len); 305 "ext_len=%d", cdb0, len, cdb_len);
@@ -335,10 +334,7 @@ void __scsi_print_command(unsigned char *cdb)
335 int k, len; 334 int k, len;
336 335
337 print_opcode_name(cdb, 0); 336 print_opcode_name(cdb, 0);
338 if (VARIABLE_LENGTH_CMD == cdb[0]) 337 len = scsi_command_size(cdb);
339 len = cdb[7] + 8;
340 else
341 len = COMMAND_SIZE(cdb[0]);
342 /* print out all bytes in cdb */ 338 /* print out all bytes in cdb */
343 for (k = 0; k < len; ++k) 339 for (k = 0; k < len; ++k)
344 printk(" %02x", cdb[k]); 340 printk(" %02x", cdb[k]);
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index cc784e8f6e9d..f60236721e0d 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -89,7 +89,7 @@ typedef struct {
89 int njobs; /* # of jobs sent to HA */ 89 int njobs; /* # of jobs sent to HA */
90 int qdepth; /* Controller queue depth. */ 90 int qdepth; /* Controller queue depth. */
91 int wakebase; /* mpx wakeup base index. */ 91 int wakebase; /* mpx wakeup base index. */
92 uLONG SGsize; /* Scatter/Gather list size. */ 92 uINT SGsize; /* Scatter/Gather list size. */
93 unsigned heads; /* heads for drives on cntlr. */ 93 unsigned heads; /* heads for drives on cntlr. */
94 unsigned sectors; /* sectors for drives on cntlr. */ 94 unsigned sectors; /* sectors for drives on cntlr. */
95 uCHAR do_drive32; /* Flag for Above 16 MB Ability */ 95 uCHAR do_drive32; /* Flag for Above 16 MB Ability */
@@ -97,8 +97,8 @@ typedef struct {
97 char idPAL[4]; /* 4 Bytes Of The ID Pal */ 97 char idPAL[4]; /* 4 Bytes Of The ID Pal */
98 uCHAR primary; /* 1 For Primary, 0 For Secondary */ 98 uCHAR primary; /* 1 For Primary, 0 For Secondary */
99 uCHAR eataVersion; /* EATA Version */ 99 uCHAR eataVersion; /* EATA Version */
100 uLONG cpLength; /* EATA Command Packet Length */ 100 uINT cpLength; /* EATA Command Packet Length */
101 uLONG spLength; /* EATA Status Packet Length */ 101 uINT spLength; /* EATA Status Packet Length */
102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */ 102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */
103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */ 103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */
104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */ 104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */
@@ -107,23 +107,23 @@ typedef struct {
107typedef struct { 107typedef struct {
108 uSHORT length; // Remaining length of this 108 uSHORT length; // Remaining length of this
109 uSHORT drvrHBAnum; // Relative HBA # used by the driver 109 uSHORT drvrHBAnum; // Relative HBA # used by the driver
110 uLONG baseAddr; // Base I/O address 110 uINT baseAddr; // Base I/O address
111 uSHORT blinkState; // Blink LED state (0=Not in blink LED) 111 uSHORT blinkState; // Blink LED state (0=Not in blink LED)
112 uCHAR pciBusNum; // PCI Bus # (Optional) 112 uCHAR pciBusNum; // PCI Bus # (Optional)
113 uCHAR pciDeviceNum; // PCI Device # (Optional) 113 uCHAR pciDeviceNum; // PCI Device # (Optional)
114 uSHORT hbaFlags; // Miscellaneous HBA flags 114 uSHORT hbaFlags; // Miscellaneous HBA flags
115 uSHORT Interrupt; // Interrupt set for this device. 115 uSHORT Interrupt; // Interrupt set for this device.
116# if (defined(_DPT_ARC)) 116# if (defined(_DPT_ARC))
117 uLONG baseLength; 117 uINT baseLength;
118 ADAPTER_OBJECT *AdapterObject; 118 ADAPTER_OBJECT *AdapterObject;
119 LARGE_INTEGER DmaLogicalAddress; 119 LARGE_INTEGER DmaLogicalAddress;
120 PVOID DmaVirtualAddress; 120 PVOID DmaVirtualAddress;
121 LARGE_INTEGER ReplyLogicalAddress; 121 LARGE_INTEGER ReplyLogicalAddress;
122 PVOID ReplyVirtualAddress; 122 PVOID ReplyVirtualAddress;
123# else 123# else
124 uLONG reserved1; // Reserved for future expansion 124 uINT reserved1; // Reserved for future expansion
125 uLONG reserved2; // Reserved for future expansion 125 uINT reserved2; // Reserved for future expansion
126 uLONG reserved3; // Reserved for future expansion 126 uINT reserved3; // Reserved for future expansion
127# endif 127# endif
128} drvrHBAinfo_S; 128} drvrHBAinfo_S;
129 129
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
index 94bc894d1200..72c8992fdf21 100644
--- a/drivers/scsi/dpt/dptsig.h
+++ b/drivers/scsi/dpt/dptsig.h
@@ -33,11 +33,7 @@
33/* to make sure we are talking the same size under all OS's */ 33/* to make sure we are talking the same size under all OS's */
34typedef unsigned char sigBYTE; 34typedef unsigned char sigBYTE;
35typedef unsigned short sigWORD; 35typedef unsigned short sigWORD;
36#if (defined(_MULTI_DATAMODEL) && defined(sun) && !defined(_ILP32)) 36typedef unsigned int sigINT;
37typedef uint32_t sigLONG;
38#else
39typedef unsigned long sigLONG;
40#endif
41 37
42/* 38/*
43 * use sigWORDLittleEndian for: 39 * use sigWORDLittleEndian for:
@@ -300,7 +296,7 @@ typedef struct dpt_sig {
300 sigBYTE dsFiletype; /* type of file */ 296 sigBYTE dsFiletype; /* type of file */
301 sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */ 297 sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
302 sigBYTE dsOEM; /* OEM file was created for */ 298 sigBYTE dsOEM; /* OEM file was created for */
303 sigLONG dsOS; /* which Operating systems */ 299 sigINT dsOS; /* which Operating systems */
304 sigWORD dsCapabilities; /* RAID levels, etc. */ 300 sigWORD dsCapabilities; /* RAID levels, etc. */
305 sigWORD dsDeviceSupp; /* Types of SCSI devices supported */ 301 sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
306 sigWORD dsAdapterSupp; /* DPT adapter families supported */ 302 sigWORD dsAdapterSupp; /* DPT adapter families supported */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
index d23b70c8c768..a90c4cb8ea8b 100644
--- a/drivers/scsi/dpt/sys_info.h
+++ b/drivers/scsi/dpt/sys_info.h
@@ -145,8 +145,8 @@
145 uCHAR smartROMRevision; 145 uCHAR smartROMRevision;
146 uSHORT flags; /* See bit definitions above */ 146 uSHORT flags; /* See bit definitions above */
147 uSHORT conventionalMemSize; /* in KB */ 147 uSHORT conventionalMemSize; /* in KB */
148 uLONG extendedMemSize; /* in KB */ 148 uINT extendedMemSize; /* in KB */
149 uLONG osType; /* Same as DPTSIG's definition */ 149 uINT osType; /* Same as DPTSIG's definition */
150 uCHAR osMajorVersion; 150 uCHAR osMajorVersion;
151 uCHAR osMinorVersion; /* The OS version */ 151 uCHAR osMinorVersion; /* The OS version */
152 uCHAR osRevision; 152 uCHAR osRevision;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index ac92ac143b46..0fb5bf4c43ac 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -29,11 +29,6 @@
29/*#define DEBUG 1 */ 29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */ 30/*#define UARTDELAY 1 */
31 31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
37#include <linux/module.h> 32#include <linux/module.h>
38 33
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); 34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
@@ -108,27 +103,28 @@ static dpt_sig_S DPTI_sig = {
108 103
109static DEFINE_MUTEX(adpt_configuration_lock); 104static DEFINE_MUTEX(adpt_configuration_lock);
110 105
111static struct i2o_sys_tbl *sys_tbl = NULL; 106static struct i2o_sys_tbl *sys_tbl;
112static int sys_tbl_ind = 0; 107static dma_addr_t sys_tbl_pa;
113static int sys_tbl_len = 0; 108static int sys_tbl_ind;
109static int sys_tbl_len;
114 110
115static adpt_hba* hba_chain = NULL; 111static adpt_hba* hba_chain = NULL;
116static int hba_count = 0; 112static int hba_count = 0;
117 113
114static struct class *adpt_sysfs_class;
115
116#ifdef CONFIG_COMPAT
117static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
118#endif
119
118static const struct file_operations adpt_fops = { 120static const struct file_operations adpt_fops = {
119 .ioctl = adpt_ioctl, 121 .ioctl = adpt_ioctl,
120 .open = adpt_open, 122 .open = adpt_open,
121 .release = adpt_close 123 .release = adpt_close,
122}; 124#ifdef CONFIG_COMPAT
123 125 .compat_ioctl = compat_adpt_ioctl,
124#ifdef REBOOT_NOTIFIER
125static struct notifier_block adpt_reboot_notifier =
126{
127 adpt_reboot_event,
128 NULL,
129 0
130};
131#endif 126#endif
127};
132 128
133/* Structures and definitions for synchronous message posting. 129/* Structures and definitions for synchronous message posting.
134 * See adpt_i2o_post_wait() for description 130 * See adpt_i2o_post_wait() for description
@@ -151,6 +147,21 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
151 *============================================================================ 147 *============================================================================
152 */ 148 */
153 149
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
154static u8 adpt_read_blink_led(adpt_hba* host) 165static u8 adpt_read_blink_led(adpt_hba* host)
155{ 166{
156 if (host->FwDebugBLEDflag_P) { 167 if (host->FwDebugBLEDflag_P) {
@@ -178,8 +189,6 @@ static int adpt_detect(struct scsi_host_template* sht)
178 struct pci_dev *pDev = NULL; 189 struct pci_dev *pDev = NULL;
179 adpt_hba* pHba; 190 adpt_hba* pHba;
180 191
181 adpt_init();
182
183 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 192 PINFO("Detecting Adaptec I2O RAID controllers...\n");
184 193
185 /* search for all Adatpec I2O RAID cards */ 194 /* search for all Adatpec I2O RAID cards */
@@ -247,13 +256,29 @@ rebuild_sys_tab:
247 adpt_inquiry(pHba); 256 adpt_inquiry(pHba);
248 } 257 }
249 258
259 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
260 if (IS_ERR(adpt_sysfs_class)) {
261 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
262 adpt_sysfs_class = NULL;
263 }
264
250 for (pHba = hba_chain; pHba; pHba = pHba->next) { 265 for (pHba = hba_chain; pHba; pHba = pHba->next) {
251 if( adpt_scsi_register(pHba,sht) < 0){ 266 if (adpt_scsi_host_alloc(pHba, sht) < 0){
252 adpt_i2o_delete_hba(pHba); 267 adpt_i2o_delete_hba(pHba);
253 continue; 268 continue;
254 } 269 }
255 pHba->initialized = TRUE; 270 pHba->initialized = TRUE;
256 pHba->state &= ~DPTI_STATE_RESET; 271 pHba->state &= ~DPTI_STATE_RESET;
272 if (adpt_sysfs_class) {
273 struct device *dev = device_create(adpt_sysfs_class,
274 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit),
275 "dpti%d", pHba->unit);
276 if (IS_ERR(dev)) {
277 printk(KERN_WARNING"dpti%d: unable to "
278 "create device in dpt_i2o class\n",
279 pHba->unit);
280 }
281 }
257 } 282 }
258 283
259 // Register our control device node 284 // Register our control device node
@@ -282,7 +307,7 @@ static int adpt_release(struct Scsi_Host *host)
282 307
283static void adpt_inquiry(adpt_hba* pHba) 308static void adpt_inquiry(adpt_hba* pHba)
284{ 309{
285 u32 msg[14]; 310 u32 msg[17];
286 u32 *mptr; 311 u32 *mptr;
287 u32 *lenptr; 312 u32 *lenptr;
288 int direction; 313 int direction;
@@ -290,11 +315,12 @@ static void adpt_inquiry(adpt_hba* pHba)
290 u32 len; 315 u32 len;
291 u32 reqlen; 316 u32 reqlen;
292 u8* buf; 317 u8* buf;
318 dma_addr_t addr;
293 u8 scb[16]; 319 u8 scb[16];
294 s32 rcode; 320 s32 rcode;
295 321
296 memset(msg, 0, sizeof(msg)); 322 memset(msg, 0, sizeof(msg));
297 buf = kmalloc(80,GFP_KERNEL|ADDR32); 323 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
298 if(!buf){ 324 if(!buf){
299 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); 325 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
300 return; 326 return;
@@ -305,7 +331,10 @@ static void adpt_inquiry(adpt_hba* pHba)
305 direction = 0x00000000; 331 direction = 0x00000000;
306 scsidir =0x40000000; // DATA IN (iop<--dev) 332 scsidir =0x40000000; // DATA IN (iop<--dev)
307 333
308 reqlen = 14; // SINGLE SGE 334 if (dpt_dma64(pHba))
335 reqlen = 17; // SINGLE SGE, 64 bit
336 else
337 reqlen = 14; // SINGLE SGE, 32 bit
309 /* Stick the headers on */ 338 /* Stick the headers on */
310 msg[0] = reqlen<<16 | SGL_OFFSET_12; 339 msg[0] = reqlen<<16 | SGL_OFFSET_12;
311 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); 340 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
@@ -338,8 +367,16 @@ static void adpt_inquiry(adpt_hba* pHba)
338 367
339 /* Now fill in the SGList and command */ 368 /* Now fill in the SGList and command */
340 *lenptr = len; 369 *lenptr = len;
341 *mptr++ = 0xD0000000|direction|len; 370 if (dpt_dma64(pHba)) {
342 *mptr++ = virt_to_bus(buf); 371 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
372 *mptr++ = 1 << PAGE_SHIFT;
373 *mptr++ = 0xD0000000|direction|len;
374 *mptr++ = dma_low(addr);
375 *mptr++ = dma_high(addr);
376 } else {
377 *mptr++ = 0xD0000000|direction|len;
378 *mptr++ = addr;
379 }
343 380
344 // Send it on it's way 381 // Send it on it's way
345 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); 382 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
@@ -347,7 +384,7 @@ static void adpt_inquiry(adpt_hba* pHba)
347 sprintf(pHba->detail, "Adaptec I2O RAID"); 384 sprintf(pHba->detail, "Adaptec I2O RAID");
348 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); 385 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
349 if (rcode != -ETIME && rcode != -EINTR) 386 if (rcode != -ETIME && rcode != -EINTR)
350 kfree(buf); 387 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
351 } else { 388 } else {
352 memset(pHba->detail, 0, sizeof(pHba->detail)); 389 memset(pHba->detail, 0, sizeof(pHba->detail));
353 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); 390 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
@@ -356,7 +393,7 @@ static void adpt_inquiry(adpt_hba* pHba)
356 memcpy(&(pHba->detail[40]), " FW: ", 4); 393 memcpy(&(pHba->detail[40]), " FW: ", 4);
357 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); 394 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
358 pHba->detail[48] = '\0'; /* precautionary */ 395 pHba->detail[48] = '\0'; /* precautionary */
359 kfree(buf); 396 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
360 } 397 }
361 adpt_i2o_status_get(pHba); 398 adpt_i2o_status_get(pHba);
362 return ; 399 return ;
@@ -632,6 +669,91 @@ stop_output:
632 return len; 669 return len;
633} 670}
634 671
672/*
673 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
674 */
675static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
676{
677 return (u32)cmd->serial_number;
678}
679
680/*
681 * Go from a u32 'context' to a struct scsi_cmnd * .
682 * This could probably be made more efficient.
683 */
684static struct scsi_cmnd *
685 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
686{
687 struct scsi_cmnd * cmd;
688 struct scsi_device * d;
689
690 if (context == 0)
691 return NULL;
692
693 spin_unlock(pHba->host->host_lock);
694 shost_for_each_device(d, pHba->host) {
695 unsigned long flags;
696 spin_lock_irqsave(&d->list_lock, flags);
697 list_for_each_entry(cmd, &d->cmd_list, list) {
698 if (((u32)cmd->serial_number == context)) {
699 spin_unlock_irqrestore(&d->list_lock, flags);
700 scsi_device_put(d);
701 spin_lock(pHba->host->host_lock);
702 return cmd;
703 }
704 }
705 spin_unlock_irqrestore(&d->list_lock, flags);
706 }
707 spin_lock(pHba->host->host_lock);
708
709 return NULL;
710}
711
712/*
713 * Turn a pointer to ioctl reply data into an u32 'context'
714 */
715static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
716{
717#if BITS_PER_LONG == 32
718 return (u32)(unsigned long)reply;
719#else
720 ulong flags = 0;
721 u32 nr, i;
722
723 spin_lock_irqsave(pHba->host->host_lock, flags);
724 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
725 for (i = 0; i < nr; i++) {
726 if (pHba->ioctl_reply_context[i] == NULL) {
727 pHba->ioctl_reply_context[i] = reply;
728 break;
729 }
730 }
731 spin_unlock_irqrestore(pHba->host->host_lock, flags);
732 if (i >= nr) {
733 kfree (reply);
734 printk(KERN_WARNING"%s: Too many outstanding "
735 "ioctl commands\n", pHba->name);
736 return (u32)-1;
737 }
738
739 return i;
740#endif
741}
742
743/*
744 * Go from an u32 'context' to a pointer to ioctl reply data.
745 */
746static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
747{
748#if BITS_PER_LONG == 32
749 return (void *)(unsigned long)context;
750#else
751 void *p = pHba->ioctl_reply_context[context];
752 pHba->ioctl_reply_context[context] = NULL;
753
754 return p;
755#endif
756}
635 757
636/*=========================================================================== 758/*===========================================================================
637 * Error Handling routines 759 * Error Handling routines
@@ -660,7 +782,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
660 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; 782 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
661 msg[2] = 0; 783 msg[2] = 0;
662 msg[3]= 0; 784 msg[3]= 0;
663 msg[4] = (u32)cmd; 785 msg[4] = adpt_cmd_to_context(cmd);
664 if (pHba->host) 786 if (pHba->host)
665 spin_lock_irq(pHba->host->host_lock); 787 spin_lock_irq(pHba->host->host_lock);
666 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); 788 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -861,27 +983,6 @@ static void adpt_i2o_sys_shutdown(void)
861 printk(KERN_INFO "Adaptec I2O controllers down.\n"); 983 printk(KERN_INFO "Adaptec I2O controllers down.\n");
862} 984}
863 985
864/*
865 * reboot/shutdown notification.
866 *
867 * - Quiesce each IOP in the system
868 *
869 */
870
871#ifdef REBOOT_NOTIFIER
872static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
873{
874
875 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
876 return NOTIFY_DONE;
877
878 adpt_i2o_sys_shutdown();
879
880 return NOTIFY_DONE;
881}
882#endif
883
884
885static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) 986static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
886{ 987{
887 988
@@ -893,6 +994,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
893 u32 hba_map1_area_size = 0; 994 u32 hba_map1_area_size = 0;
894 void __iomem *base_addr_virt = NULL; 995 void __iomem *base_addr_virt = NULL;
895 void __iomem *msg_addr_virt = NULL; 996 void __iomem *msg_addr_virt = NULL;
997 int dma64 = 0;
896 998
897 int raptorFlag = FALSE; 999 int raptorFlag = FALSE;
898 1000
@@ -906,9 +1008,21 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
906 } 1008 }
907 1009
908 pci_set_master(pDev); 1010 pci_set_master(pDev);
909 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) 1011
1012 /*
1013 * See if we should enable dma64 mode.
1014 */
1015 if (sizeof(dma_addr_t) > 4 &&
1016 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
1017 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1018 dma64 = 1;
1019 }
1020 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
910 return -EINVAL; 1021 return -EINVAL;
911 1022
1023 /* adapter only supports message blocks below 4GB */
1024 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
1025
912 base_addr0_phys = pci_resource_start(pDev,0); 1026 base_addr0_phys = pci_resource_start(pDev,0);
913 hba_map0_area_size = pci_resource_len(pDev,0); 1027 hba_map0_area_size = pci_resource_len(pDev,0);
914 1028
@@ -929,6 +1043,25 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
929 raptorFlag = TRUE; 1043 raptorFlag = TRUE;
930 } 1044 }
931 1045
1046#if BITS_PER_LONG == 64
1047 /*
1048 * The original Adaptec 64 bit driver has this comment here:
1049 * "x86_64 machines need more optimal mappings"
1050 *
1051 * I assume some HBAs report ridiculously large mappings
1052 * and we need to limit them on platforms with IOMMUs.
1053 */
1054 if (raptorFlag == TRUE) {
1055 if (hba_map0_area_size > 128)
1056 hba_map0_area_size = 128;
1057 if (hba_map1_area_size > 524288)
1058 hba_map1_area_size = 524288;
1059 } else {
1060 if (hba_map0_area_size > 524288)
1061 hba_map0_area_size = 524288;
1062 }
1063#endif
1064
932 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); 1065 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
933 if (!base_addr_virt) { 1066 if (!base_addr_virt) {
934 pci_release_regions(pDev); 1067 pci_release_regions(pDev);
@@ -991,16 +1124,22 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
991 pHba->state = DPTI_STATE_RESET; 1124 pHba->state = DPTI_STATE_RESET;
992 pHba->pDev = pDev; 1125 pHba->pDev = pDev;
993 pHba->devices = NULL; 1126 pHba->devices = NULL;
1127 pHba->dma64 = dma64;
994 1128
995 // Initializing the spinlocks 1129 // Initializing the spinlocks
996 spin_lock_init(&pHba->state_lock); 1130 spin_lock_init(&pHba->state_lock);
997 spin_lock_init(&adpt_post_wait_lock); 1131 spin_lock_init(&adpt_post_wait_lock);
998 1132
999 if(raptorFlag == 0){ 1133 if(raptorFlag == 0){
1000 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 1134 printk(KERN_INFO "Adaptec I2O RAID controller"
1001 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); 1135 " %d at %p size=%x irq=%d%s\n",
1136 hba_count-1, base_addr_virt,
1137 hba_map0_area_size, pDev->irq,
1138 dma64 ? " (64-bit DMA)" : "");
1002 } else { 1139 } else {
1003 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); 1140 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1141 hba_count-1, pDev->irq,
1142 dma64 ? " (64-bit DMA)" : "");
1004 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); 1143 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1005 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); 1144 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1006 } 1145 }
@@ -1053,10 +1192,26 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1053 if(pHba->msg_addr_virt != pHba->base_addr_virt){ 1192 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1054 iounmap(pHba->msg_addr_virt); 1193 iounmap(pHba->msg_addr_virt);
1055 } 1194 }
1056 kfree(pHba->hrt); 1195 if(pHba->FwDebugBuffer_P)
1057 kfree(pHba->lct); 1196 iounmap(pHba->FwDebugBuffer_P);
1058 kfree(pHba->status_block); 1197 if(pHba->hrt) {
1059 kfree(pHba->reply_pool); 1198 dma_free_coherent(&pHba->pDev->dev,
1199 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1200 pHba->hrt, pHba->hrt_pa);
1201 }
1202 if(pHba->lct) {
1203 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1204 pHba->lct, pHba->lct_pa);
1205 }
1206 if(pHba->status_block) {
1207 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1208 pHba->status_block, pHba->status_block_pa);
1209 }
1210 if(pHba->reply_pool) {
1211 dma_free_coherent(&pHba->pDev->dev,
1212 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1213 pHba->reply_pool, pHba->reply_pool_pa);
1214 }
1060 1215
1061 for(d = pHba->devices; d ; d = next){ 1216 for(d = pHba->devices; d ; d = next){
1062 next = d->next; 1217 next = d->next;
@@ -1075,23 +1230,19 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1075 pci_dev_put(pHba->pDev); 1230 pci_dev_put(pHba->pDev);
1076 kfree(pHba); 1231 kfree(pHba);
1077 1232
1233 if (adpt_sysfs_class)
1234 device_destroy(adpt_sysfs_class,
1235 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1236
1078 if(hba_count <= 0){ 1237 if(hba_count <= 0){
1079 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); 1238 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1239 if (adpt_sysfs_class) {
1240 class_destroy(adpt_sysfs_class);
1241 adpt_sysfs_class = NULL;
1242 }
1080 } 1243 }
1081} 1244}
1082 1245
1083
1084static int adpt_init(void)
1085{
1086 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1087#ifdef REBOOT_NOTIFIER
1088 register_reboot_notifier(&adpt_reboot_notifier);
1089#endif
1090
1091 return 0;
1092}
1093
1094
1095static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun) 1246static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1096{ 1247{
1097 struct adpt_device* d; 1248 struct adpt_device* d;
@@ -1283,6 +1434,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1283{ 1434{
1284 u32 msg[8]; 1435 u32 msg[8];
1285 u8* status; 1436 u8* status;
1437 dma_addr_t addr;
1286 u32 m = EMPTY_QUEUE ; 1438 u32 m = EMPTY_QUEUE ;
1287 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ); 1439 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1288 1440
@@ -1305,12 +1457,13 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1305 schedule_timeout_uninterruptible(1); 1457 schedule_timeout_uninterruptible(1);
1306 } while (m == EMPTY_QUEUE); 1458 } while (m == EMPTY_QUEUE);
1307 1459
1308 status = kzalloc(4, GFP_KERNEL|ADDR32); 1460 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1309 if(status == NULL) { 1461 if(status == NULL) {
1310 adpt_send_nop(pHba, m); 1462 adpt_send_nop(pHba, m);
1311 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1463 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1312 return -ENOMEM; 1464 return -ENOMEM;
1313 } 1465 }
1466 memset(status,0,4);
1314 1467
1315 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; 1468 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1316 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; 1469 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
@@ -1318,8 +1471,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1318 msg[3]=0; 1471 msg[3]=0;
1319 msg[4]=0; 1472 msg[4]=0;
1320 msg[5]=0; 1473 msg[5]=0;
1321 msg[6]=virt_to_bus(status); 1474 msg[6]=dma_low(addr);
1322 msg[7]=0; 1475 msg[7]=dma_high(addr);
1323 1476
1324 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); 1477 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1325 wmb(); 1478 wmb();
@@ -1329,7 +1482,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1329 while(*status == 0){ 1482 while(*status == 0){
1330 if(time_after(jiffies,timeout)){ 1483 if(time_after(jiffies,timeout)){
1331 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name); 1484 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1332 kfree(status); 1485 /* We lose 4 bytes of "status" here, but we cannot
1486 free these because controller may awake and corrupt
1487 those bytes at any time */
1488 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1333 return -ETIMEDOUT; 1489 return -ETIMEDOUT;
1334 } 1490 }
1335 rmb(); 1491 rmb();
@@ -1348,6 +1504,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1348 } 1504 }
1349 if(time_after(jiffies,timeout)){ 1505 if(time_after(jiffies,timeout)){
1350 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); 1506 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1507 /* We lose 4 bytes of "status" here, but we
1508 cannot free these because controller may
1509 awake and corrupt those bytes at any time */
1510 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1351 return -ETIMEDOUT; 1511 return -ETIMEDOUT;
1352 } 1512 }
1353 schedule_timeout_uninterruptible(1); 1513 schedule_timeout_uninterruptible(1);
@@ -1364,7 +1524,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1364 PDEBUG("%s: Reset completed.\n", pHba->name); 1524 PDEBUG("%s: Reset completed.\n", pHba->name);
1365 } 1525 }
1366 1526
1367 kfree(status); 1527 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1368#ifdef UARTDELAY 1528#ifdef UARTDELAY
1369 // This delay is to allow someone attached to the card through the debug UART to 1529 // This delay is to allow someone attached to the card through the debug UART to
1370 // set up the dump levels that they want before the rest of the initialization sequence 1530 // set up the dump levels that they want before the rest of the initialization sequence
@@ -1636,6 +1796,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1636 u32 i = 0; 1796 u32 i = 0;
1637 u32 rcode = 0; 1797 u32 rcode = 0;
1638 void *p = NULL; 1798 void *p = NULL;
1799 dma_addr_t addr;
1639 ulong flags = 0; 1800 ulong flags = 0;
1640 1801
1641 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 1802 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
@@ -1668,10 +1829,13 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1668 } 1829 }
1669 sg_offset = (msg[0]>>4)&0xf; 1830 sg_offset = (msg[0]>>4)&0xf;
1670 msg[2] = 0x40000000; // IOCTL context 1831 msg[2] = 0x40000000; // IOCTL context
1671 msg[3] = (u32)reply; 1832 msg[3] = adpt_ioctl_to_context(pHba, reply);
1833 if (msg[3] == (u32)-1)
1834 return -EBUSY;
1835
1672 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1836 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1673 if(sg_offset) { 1837 if(sg_offset) {
1674 // TODO 64bit fix 1838 // TODO add 64 bit API
1675 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); 1839 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1676 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1840 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1677 if (sg_count > pHba->sg_tablesize){ 1841 if (sg_count > pHba->sg_tablesize){
@@ -1690,7 +1854,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1690 } 1854 }
1691 sg_size = sg[i].flag_count & 0xffffff; 1855 sg_size = sg[i].flag_count & 0xffffff;
1692 /* Allocate memory for the transfer */ 1856 /* Allocate memory for the transfer */
1693 p = kmalloc(sg_size, GFP_KERNEL|ADDR32); 1857 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1694 if(!p) { 1858 if(!p) {
1695 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 1859 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1696 pHba->name,sg_size,i,sg_count); 1860 pHba->name,sg_size,i,sg_count);
@@ -1700,15 +1864,15 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1700 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. 1864 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1701 /* Copy in the user's SG buffer if necessary */ 1865 /* Copy in the user's SG buffer if necessary */
1702 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { 1866 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1703 // TODO 64bit fix 1867 // sg_simple_element API is 32 bit
1704 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { 1868 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1705 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); 1869 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1706 rcode = -EFAULT; 1870 rcode = -EFAULT;
1707 goto cleanup; 1871 goto cleanup;
1708 } 1872 }
1709 } 1873 }
1710 //TODO 64bit fix 1874 /* sg_simple_element API is 32 bit, but addr < 4GB */
1711 sg[i].addr_bus = (u32)virt_to_bus(p); 1875 sg[i].addr_bus = addr;
1712 } 1876 }
1713 } 1877 }
1714 1878
@@ -1736,7 +1900,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1736 if(sg_offset) { 1900 if(sg_offset) {
1737 /* Copy back the Scatter Gather buffers back to user space */ 1901 /* Copy back the Scatter Gather buffers back to user space */
1738 u32 j; 1902 u32 j;
1739 // TODO 64bit fix 1903 // TODO add 64 bit API
1740 struct sg_simple_element* sg; 1904 struct sg_simple_element* sg;
1741 int sg_size; 1905 int sg_size;
1742 1906
@@ -1756,14 +1920,14 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1756 } 1920 }
1757 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1921 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1758 1922
1759 // TODO 64bit fix 1923 // TODO add 64 bit API
1760 sg = (struct sg_simple_element*)(msg + sg_offset); 1924 sg = (struct sg_simple_element*)(msg + sg_offset);
1761 for (j = 0; j < sg_count; j++) { 1925 for (j = 0; j < sg_count; j++) {
1762 /* Copy out the SG list to user's buffer if necessary */ 1926 /* Copy out the SG list to user's buffer if necessary */
1763 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { 1927 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1764 sg_size = sg[j].flag_count & 0xffffff; 1928 sg_size = sg[j].flag_count & 0xffffff;
1765 // TODO 64bit fix 1929 // sg_simple_element API is 32 bit
1766 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { 1930 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1767 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); 1931 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1768 rcode = -EFAULT; 1932 rcode = -EFAULT;
1769 goto cleanup; 1933 goto cleanup;
@@ -1787,12 +1951,17 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1787 1951
1788 1952
1789cleanup: 1953cleanup:
1790 if (rcode != -ETIME && rcode != -EINTR) 1954 if (rcode != -ETIME && rcode != -EINTR) {
1955 struct sg_simple_element *sg =
1956 (struct sg_simple_element*) (msg +sg_offset);
1791 kfree (reply); 1957 kfree (reply);
1792 while(sg_index) { 1958 while(sg_index) {
1793 if(sg_list[--sg_index]) { 1959 if(sg_list[--sg_index]) {
1794 if (rcode != -ETIME && rcode != -EINTR) 1960 dma_free_coherent(&pHba->pDev->dev,
1795 kfree(sg_list[sg_index]); 1961 sg[sg_index].flag_count & 0xffffff,
1962 sg_list[sg_index],
1963 sg[sg_index].addr_bus);
1964 }
1796 } 1965 }
1797 } 1966 }
1798 return rcode; 1967 return rcode;
@@ -1978,6 +2147,38 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1978 return error; 2147 return error;
1979} 2148}
1980 2149
2150#ifdef CONFIG_COMPAT
2151static long compat_adpt_ioctl(struct file *file,
2152 unsigned int cmd, unsigned long arg)
2153{
2154 struct inode *inode;
2155 long ret;
2156
2157 inode = file->f_dentry->d_inode;
2158
2159 lock_kernel();
2160
2161 switch(cmd) {
2162 case DPT_SIGNATURE:
2163 case I2OUSRCMD:
2164 case DPT_CTRLINFO:
2165 case DPT_SYSINFO:
2166 case DPT_BLINKLED:
2167 case I2ORESETCMD:
2168 case I2ORESCANCMD:
2169 case (DPT_TARGET_BUSY & 0xFFFF):
2170 case DPT_TARGET_BUSY:
2171 ret = adpt_ioctl(inode, file, cmd, arg);
2172 break;
2173 default:
2174 ret = -ENOIOCTLCMD;
2175 }
2176
2177 unlock_kernel();
2178
2179 return ret;
2180}
2181#endif
1981 2182
1982static irqreturn_t adpt_isr(int irq, void *dev_id) 2183static irqreturn_t adpt_isr(int irq, void *dev_id)
1983{ 2184{
@@ -2009,7 +2210,16 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2009 goto out; 2210 goto out;
2010 } 2211 }
2011 } 2212 }
2012 reply = bus_to_virt(m); 2213 if (pHba->reply_pool_pa <= m &&
2214 m < pHba->reply_pool_pa +
2215 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2216 reply = (u8 *)pHba->reply_pool +
2217 (m - pHba->reply_pool_pa);
2218 } else {
2219 /* Ick, we should *never* be here */
2220 printk(KERN_ERR "dpti: reply frame not from pool\n");
2221 reply = (u8 *)bus_to_virt(m);
2222 }
2013 2223
2014 if (readl(reply) & MSG_FAIL) { 2224 if (readl(reply) & MSG_FAIL) {
2015 u32 old_m = readl(reply+28); 2225 u32 old_m = readl(reply+28);
@@ -2029,7 +2239,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2029 } 2239 }
2030 context = readl(reply+8); 2240 context = readl(reply+8);
2031 if(context & 0x40000000){ // IOCTL 2241 if(context & 0x40000000){ // IOCTL
2032 void *p = (void *)readl(reply+12); 2242 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2033 if( p != NULL) { 2243 if( p != NULL) {
2034 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); 2244 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2035 } 2245 }
@@ -2043,15 +2253,17 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2043 status = I2O_POST_WAIT_OK; 2253 status = I2O_POST_WAIT_OK;
2044 } 2254 }
2045 if(!(context & 0x40000000)) { 2255 if(!(context & 0x40000000)) {
2046 cmd = (struct scsi_cmnd*) readl(reply+12); 2256 cmd = adpt_cmd_from_context(pHba,
2257 readl(reply+12));
2047 if(cmd != NULL) { 2258 if(cmd != NULL) {
2048 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); 2259 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2049 } 2260 }
2050 } 2261 }
2051 adpt_i2o_post_wait_complete(context, status); 2262 adpt_i2o_post_wait_complete(context, status);
2052 } else { // SCSI message 2263 } else { // SCSI message
2053 cmd = (struct scsi_cmnd*) readl(reply+12); 2264 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2054 if(cmd != NULL){ 2265 if(cmd != NULL){
2266 scsi_dma_unmap(cmd);
2055 if(cmd->serial_number != 0) { // If not timedout 2267 if(cmd->serial_number != 0) { // If not timedout
2056 adpt_i2o_to_scsi(reply, cmd); 2268 adpt_i2o_to_scsi(reply, cmd);
2057 } 2269 }
@@ -2072,6 +2284,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2072 int i; 2284 int i;
2073 u32 msg[MAX_MESSAGE_SIZE]; 2285 u32 msg[MAX_MESSAGE_SIZE];
2074 u32* mptr; 2286 u32* mptr;
2287 u32* lptr;
2075 u32 *lenptr; 2288 u32 *lenptr;
2076 int direction; 2289 int direction;
2077 int scsidir; 2290 int scsidir;
@@ -2079,6 +2292,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2079 u32 len; 2292 u32 len;
2080 u32 reqlen; 2293 u32 reqlen;
2081 s32 rcode; 2294 s32 rcode;
2295 dma_addr_t addr;
2082 2296
2083 memset(msg, 0 , sizeof(msg)); 2297 memset(msg, 0 , sizeof(msg));
2084 len = scsi_bufflen(cmd); 2298 len = scsi_bufflen(cmd);
@@ -2118,7 +2332,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2118 // I2O_CMD_SCSI_EXEC 2332 // I2O_CMD_SCSI_EXEC
2119 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); 2333 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2120 msg[2] = 0; 2334 msg[2] = 0;
2121 msg[3] = (u32)cmd; /* We want the SCSI control block back */ 2335 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2122 // Our cards use the transaction context as the tag for queueing 2336 // Our cards use the transaction context as the tag for queueing
2123 // Adaptec/DPT Private stuff 2337 // Adaptec/DPT Private stuff
2124 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); 2338 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2136,7 +2350,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2136 memcpy(mptr, cmd->cmnd, cmd->cmd_len); 2350 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2137 mptr+=4; 2351 mptr+=4;
2138 lenptr=mptr++; /* Remember me - fill in when we know */ 2352 lenptr=mptr++; /* Remember me - fill in when we know */
2139 reqlen = 14; // SINGLE SGE 2353 if (dpt_dma64(pHba)) {
2354 reqlen = 16; // SINGLE SGE
2355 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2356 *mptr++ = 1 << PAGE_SHIFT;
2357 } else {
2358 reqlen = 14; // SINGLE SGE
2359 }
2140 /* Now fill in the SGList and command */ 2360 /* Now fill in the SGList and command */
2141 2361
2142 nseg = scsi_dma_map(cmd); 2362 nseg = scsi_dma_map(cmd);
@@ -2146,12 +2366,16 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2146 2366
2147 len = 0; 2367 len = 0;
2148 scsi_for_each_sg(cmd, sg, nseg, i) { 2368 scsi_for_each_sg(cmd, sg, nseg, i) {
2369 lptr = mptr;
2149 *mptr++ = direction|0x10000000|sg_dma_len(sg); 2370 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2150 len+=sg_dma_len(sg); 2371 len+=sg_dma_len(sg);
2151 *mptr++ = sg_dma_address(sg); 2372 addr = sg_dma_address(sg);
2373 *mptr++ = dma_low(addr);
2374 if (dpt_dma64(pHba))
2375 *mptr++ = dma_high(addr);
2152 /* Make this an end of list */ 2376 /* Make this an end of list */
2153 if (i == nseg - 1) 2377 if (i == nseg - 1)
2154 mptr[-2] = direction|0xD0000000|sg_dma_len(sg); 2378 *lptr = direction|0xD0000000|sg_dma_len(sg);
2155 } 2379 }
2156 reqlen = mptr - msg; 2380 reqlen = mptr - msg;
2157 *lenptr = len; 2381 *lenptr = len;
@@ -2177,13 +2401,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2177} 2401}
2178 2402
2179 2403
2180static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht) 2404static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2181{ 2405{
2182 struct Scsi_Host *host = NULL; 2406 struct Scsi_Host *host;
2183 2407
2184 host = scsi_register(sht, sizeof(adpt_hba*)); 2408 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2185 if (host == NULL) { 2409 if (host == NULL) {
2186 printk ("%s: scsi_register returned NULL\n",pHba->name); 2410 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2187 return -1; 2411 return -1;
2188 } 2412 }
2189 host->hostdata[0] = (unsigned long)pHba; 2413 host->hostdata[0] = (unsigned long)pHba;
@@ -2200,7 +2424,7 @@ static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2200 host->max_lun = 256; 2424 host->max_lun = 256;
2201 host->max_channel = pHba->top_scsi_channel + 1; 2425 host->max_channel = pHba->top_scsi_channel + 1;
2202 host->cmd_per_lun = 1; 2426 host->cmd_per_lun = 1;
2203 host->unique_id = (uint) pHba; 2427 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2204 host->sg_tablesize = pHba->sg_tablesize; 2428 host->sg_tablesize = pHba->sg_tablesize;
2205 host->can_queue = pHba->post_fifo_size; 2429 host->can_queue = pHba->post_fifo_size;
2206 2430
@@ -2640,11 +2864,10 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2640static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) 2864static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2641{ 2865{
2642 u8 *status; 2866 u8 *status;
2867 dma_addr_t addr;
2643 u32 __iomem *msg = NULL; 2868 u32 __iomem *msg = NULL;
2644 int i; 2869 int i;
2645 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; 2870 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2646 u32* ptr;
2647 u32 outbound_frame; // This had to be a 32 bit address
2648 u32 m; 2871 u32 m;
2649 2872
2650 do { 2873 do {
@@ -2663,13 +2886,14 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2663 2886
2664 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); 2887 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2665 2888
2666 status = kzalloc(4, GFP_KERNEL|ADDR32); 2889 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2667 if (!status) { 2890 if (!status) {
2668 adpt_send_nop(pHba, m); 2891 adpt_send_nop(pHba, m);
2669 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n", 2892 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2670 pHba->name); 2893 pHba->name);
2671 return -ENOMEM; 2894 return -ENOMEM;
2672 } 2895 }
2896 memset(status, 0, 4);
2673 2897
2674 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); 2898 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2675 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); 2899 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
@@ -2678,7 +2902,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2678 writel(4096, &msg[4]); /* Host page frame size */ 2902 writel(4096, &msg[4]); /* Host page frame size */
2679 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ 2903 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2680 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ 2904 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2681 writel(virt_to_bus(status), &msg[7]); 2905 writel((u32)addr, &msg[7]);
2682 2906
2683 writel(m, pHba->post_port); 2907 writel(m, pHba->post_port);
2684 wmb(); 2908 wmb();
@@ -2693,6 +2917,10 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2693 rmb(); 2917 rmb();
2694 if(time_after(jiffies,timeout)){ 2918 if(time_after(jiffies,timeout)){
2695 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); 2919 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2920 /* We lose 4 bytes of "status" here, but we
2921 cannot free these because controller may
2922 awake and corrupt those bytes at any time */
2923 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2696 return -ETIMEDOUT; 2924 return -ETIMEDOUT;
2697 } 2925 }
2698 schedule_timeout_uninterruptible(1); 2926 schedule_timeout_uninterruptible(1);
@@ -2701,25 +2929,30 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2701 // If the command was successful, fill the fifo with our reply 2929 // If the command was successful, fill the fifo with our reply
2702 // message packets 2930 // message packets
2703 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { 2931 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2704 kfree(status); 2932 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2705 return -2; 2933 return -2;
2706 } 2934 }
2707 kfree(status); 2935 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2708 2936
2709 kfree(pHba->reply_pool); 2937 if(pHba->reply_pool != NULL) {
2938 dma_free_coherent(&pHba->pDev->dev,
2939 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2940 pHba->reply_pool, pHba->reply_pool_pa);
2941 }
2710 2942
2711 pHba->reply_pool = kzalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); 2943 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2944 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2945 &pHba->reply_pool_pa, GFP_KERNEL);
2712 if (!pHba->reply_pool) { 2946 if (!pHba->reply_pool) {
2713 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name); 2947 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2714 return -ENOMEM; 2948 return -ENOMEM;
2715 } 2949 }
2950 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2716 2951
2717 ptr = pHba->reply_pool;
2718 for(i = 0; i < pHba->reply_fifo_size; i++) { 2952 for(i = 0; i < pHba->reply_fifo_size; i++) {
2719 outbound_frame = (u32)virt_to_bus(ptr); 2953 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2720 writel(outbound_frame, pHba->reply_port); 2954 pHba->reply_port);
2721 wmb(); 2955 wmb();
2722 ptr += REPLY_FRAME_SIZE;
2723 } 2956 }
2724 adpt_i2o_status_get(pHba); 2957 adpt_i2o_status_get(pHba);
2725 return 0; 2958 return 0;
@@ -2743,11 +2976,11 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2743 u32 m; 2976 u32 m;
2744 u32 __iomem *msg; 2977 u32 __iomem *msg;
2745 u8 *status_block=NULL; 2978 u8 *status_block=NULL;
2746 ulong status_block_bus;
2747 2979
2748 if(pHba->status_block == NULL) { 2980 if(pHba->status_block == NULL) {
2749 pHba->status_block = (i2o_status_block*) 2981 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2750 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32); 2982 sizeof(i2o_status_block),
2983 &pHba->status_block_pa, GFP_KERNEL);
2751 if(pHba->status_block == NULL) { 2984 if(pHba->status_block == NULL) {
2752 printk(KERN_ERR 2985 printk(KERN_ERR
2753 "dpti%d: Get Status Block failed; Out of memory. \n", 2986 "dpti%d: Get Status Block failed; Out of memory. \n",
@@ -2757,7 +2990,6 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2757 } 2990 }
2758 memset(pHba->status_block, 0, sizeof(i2o_status_block)); 2991 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2759 status_block = (u8*)(pHba->status_block); 2992 status_block = (u8*)(pHba->status_block);
2760 status_block_bus = virt_to_bus(pHba->status_block);
2761 timeout = jiffies+TMOUT_GETSTATUS*HZ; 2993 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2762 do { 2994 do {
2763 rmb(); 2995 rmb();
@@ -2782,8 +3014,8 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2782 writel(0, &msg[3]); 3014 writel(0, &msg[3]);
2783 writel(0, &msg[4]); 3015 writel(0, &msg[4]);
2784 writel(0, &msg[5]); 3016 writel(0, &msg[5]);
2785 writel(((u32)status_block_bus)&0xffffffff, &msg[6]); 3017 writel( dma_low(pHba->status_block_pa), &msg[6]);
2786 writel(0, &msg[7]); 3018 writel( dma_high(pHba->status_block_pa), &msg[7]);
2787 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes 3019 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2788 3020
2789 //post message 3021 //post message
@@ -2812,7 +3044,17 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2812 } 3044 }
2813 3045
2814 // Calculate the Scatter Gather list size 3046 // Calculate the Scatter Gather list size
2815 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element); 3047 if (dpt_dma64(pHba)) {
3048 pHba->sg_tablesize
3049 = ((pHba->status_block->inbound_frame_size * 4
3050 - 14 * sizeof(u32))
3051 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3052 } else {
3053 pHba->sg_tablesize
3054 = ((pHba->status_block->inbound_frame_size * 4
3055 - 12 * sizeof(u32))
3056 / sizeof(struct sg_simple_element));
3057 }
2816 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { 3058 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2817 pHba->sg_tablesize = SG_LIST_ELEMENTS; 3059 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2818 } 3060 }
@@ -2863,7 +3105,9 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2863 } 3105 }
2864 do { 3106 do {
2865 if (pHba->lct == NULL) { 3107 if (pHba->lct == NULL) {
2866 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32); 3108 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3109 pHba->lct_size, &pHba->lct_pa,
3110 GFP_KERNEL);
2867 if(pHba->lct == NULL) { 3111 if(pHba->lct == NULL) {
2868 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", 3112 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2869 pHba->name); 3113 pHba->name);
@@ -2879,7 +3123,7 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2879 msg[4] = 0xFFFFFFFF; /* All devices */ 3123 msg[4] = 0xFFFFFFFF; /* All devices */
2880 msg[5] = 0x00000000; /* Report now */ 3124 msg[5] = 0x00000000; /* Report now */
2881 msg[6] = 0xD0000000|pHba->lct_size; 3125 msg[6] = 0xD0000000|pHba->lct_size;
2882 msg[7] = virt_to_bus(pHba->lct); 3126 msg[7] = (u32)pHba->lct_pa;
2883 3127
2884 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { 3128 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2885 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 3129 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
@@ -2890,7 +3134,8 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2890 3134
2891 if ((pHba->lct->table_size << 2) > pHba->lct_size) { 3135 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2892 pHba->lct_size = pHba->lct->table_size << 2; 3136 pHba->lct_size = pHba->lct->table_size << 2;
2893 kfree(pHba->lct); 3137 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3138 pHba->lct, pHba->lct_pa);
2894 pHba->lct = NULL; 3139 pHba->lct = NULL;
2895 } 3140 }
2896 } while (pHba->lct == NULL); 3141 } while (pHba->lct == NULL);
@@ -2901,13 +3146,19 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2901 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; 3146 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2902 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { 3147 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2903 pHba->FwDebugBufferSize = buf[1]; 3148 pHba->FwDebugBufferSize = buf[1];
2904 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0]; 3149 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
2905 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET; 3150 pHba->FwDebugBufferSize);
2906 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET; 3151 if (pHba->FwDebugBuffer_P) {
2907 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; 3152 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
2908 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET; 3153 FW_DEBUG_FLAGS_OFFSET;
2909 pHba->FwDebugBuffer_P += buf[2]; 3154 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
2910 pHba->FwDebugFlags = 0; 3155 FW_DEBUG_BLED_OFFSET;
3156 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3157 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3158 FW_DEBUG_STR_LENGTH_OFFSET;
3159 pHba->FwDebugBuffer_P += buf[2];
3160 pHba->FwDebugFlags = 0;
3161 }
2911 } 3162 }
2912 3163
2913 return 0; 3164 return 0;
@@ -2915,25 +3166,30 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2915 3166
2916static int adpt_i2o_build_sys_table(void) 3167static int adpt_i2o_build_sys_table(void)
2917{ 3168{
2918 adpt_hba* pHba = NULL; 3169 adpt_hba* pHba = hba_chain;
2919 int count = 0; 3170 int count = 0;
2920 3171
3172 if (sys_tbl)
3173 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3174 sys_tbl, sys_tbl_pa);
3175
2921 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs 3176 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2922 (hba_count) * sizeof(struct i2o_sys_tbl_entry); 3177 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2923 3178
2924 kfree(sys_tbl); 3179 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
2925 3180 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
2926 sys_tbl = kzalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2927 if (!sys_tbl) { 3181 if (!sys_tbl) {
2928 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n"); 3182 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2929 return -ENOMEM; 3183 return -ENOMEM;
2930 } 3184 }
3185 memset(sys_tbl, 0, sys_tbl_len);
2931 3186
2932 sys_tbl->num_entries = hba_count; 3187 sys_tbl->num_entries = hba_count;
2933 sys_tbl->version = I2OVERSION; 3188 sys_tbl->version = I2OVERSION;
2934 sys_tbl->change_ind = sys_tbl_ind++; 3189 sys_tbl->change_ind = sys_tbl_ind++;
2935 3190
2936 for(pHba = hba_chain; pHba; pHba = pHba->next) { 3191 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3192 u64 addr;
2937 // Get updated Status Block so we have the latest information 3193 // Get updated Status Block so we have the latest information
2938 if (adpt_i2o_status_get(pHba)) { 3194 if (adpt_i2o_status_get(pHba)) {
2939 sys_tbl->num_entries--; 3195 sys_tbl->num_entries--;
@@ -2949,8 +3205,9 @@ static int adpt_i2o_build_sys_table(void)
2949 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; 3205 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2950 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? 3206 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2951 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; 3207 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2952 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port); 3208 addr = pHba->base_addr_phys + 0x40;
2953 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32); 3209 sys_tbl->iops[count].inbound_low = dma_low(addr);
3210 sys_tbl->iops[count].inbound_high = dma_high(addr);
2954 3211
2955 count++; 3212 count++;
2956 } 3213 }
@@ -3086,7 +3343,8 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3086 3343
3087 do { 3344 do {
3088 if (pHba->hrt == NULL) { 3345 if (pHba->hrt == NULL) {
3089 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32); 3346 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3347 size, &pHba->hrt_pa, GFP_KERNEL);
3090 if (pHba->hrt == NULL) { 3348 if (pHba->hrt == NULL) {
3091 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name); 3349 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3092 return -ENOMEM; 3350 return -ENOMEM;
@@ -3098,7 +3356,7 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3098 msg[2]= 0; 3356 msg[2]= 0;
3099 msg[3]= 0; 3357 msg[3]= 0;
3100 msg[4]= (0xD0000000 | size); /* Simple transaction */ 3358 msg[4]= (0xD0000000 | size); /* Simple transaction */
3101 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */ 3359 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3102 3360
3103 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { 3361 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3104 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret); 3362 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
@@ -3106,8 +3364,10 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3106 } 3364 }
3107 3365
3108 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) { 3366 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3109 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2; 3367 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3110 kfree(pHba->hrt); 3368 dma_free_coherent(&pHba->pDev->dev, size,
3369 pHba->hrt, pHba->hrt_pa);
3370 size = newsize;
3111 pHba->hrt = NULL; 3371 pHba->hrt = NULL;
3112 } 3372 }
3113 } while(pHba->hrt == NULL); 3373 } while(pHba->hrt == NULL);
@@ -3121,33 +3381,54 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3121 int group, int field, void *buf, int buflen) 3381 int group, int field, void *buf, int buflen)
3122{ 3382{
3123 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 3383 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3124 u8 *resblk; 3384 u8 *opblk_va;
3385 dma_addr_t opblk_pa;
3386 u8 *resblk_va;
3387 dma_addr_t resblk_pa;
3125 3388
3126 int size; 3389 int size;
3127 3390
3128 /* 8 bytes for header */ 3391 /* 8 bytes for header */
3129 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32); 3392 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3130 if (resblk == NULL) { 3393 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3394 if (resblk_va == NULL) {
3131 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); 3395 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3132 return -ENOMEM; 3396 return -ENOMEM;
3133 } 3397 }
3134 3398
3399 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3400 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3401 if (opblk_va == NULL) {
3402 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3403 resblk_va, resblk_pa);
3404 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3405 pHba->name);
3406 return -ENOMEM;
3407 }
3135 if (field == -1) /* whole group */ 3408 if (field == -1) /* whole group */
3136 opblk[4] = -1; 3409 opblk[4] = -1;
3137 3410
3411 memcpy(opblk_va, opblk, sizeof(opblk));
3138 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 3412 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3139 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen)); 3413 opblk_va, opblk_pa, sizeof(opblk),
3414 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3415 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3140 if (size == -ETIME) { 3416 if (size == -ETIME) {
3417 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3418 resblk_va, resblk_pa);
3141 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); 3419 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3142 return -ETIME; 3420 return -ETIME;
3143 } else if (size == -EINTR) { 3421 } else if (size == -EINTR) {
3422 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3423 resblk_va, resblk_pa);
3144 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); 3424 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3145 return -EINTR; 3425 return -EINTR;
3146 } 3426 }
3147 3427
3148 memcpy(buf, resblk+8, buflen); /* cut off header */ 3428 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3149 3429
3150 kfree(resblk); 3430 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3431 resblk_va, resblk_pa);
3151 if (size < 0) 3432 if (size < 0)
3152 return size; 3433 return size;
3153 3434
@@ -3164,10 +3445,11 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3164 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 3445 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3165 */ 3446 */
3166static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 3447static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3167 void *opblk, int oplen, void *resblk, int reslen) 3448 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3449 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3168{ 3450{
3169 u32 msg[9]; 3451 u32 msg[9];
3170 u32 *res = (u32 *)resblk; 3452 u32 *res = (u32 *)resblk_va;
3171 int wait_status; 3453 int wait_status;
3172 3454
3173 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; 3455 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
@@ -3176,12 +3458,12 @@ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3176 msg[3] = 0; 3458 msg[3] = 0;
3177 msg[4] = 0; 3459 msg[4] = 0;
3178 msg[5] = 0x54000000 | oplen; /* OperationBlock */ 3460 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3179 msg[6] = virt_to_bus(opblk); 3461 msg[6] = (u32)opblk_pa;
3180 msg[7] = 0xD0000000 | reslen; /* ResultBlock */ 3462 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3181 msg[8] = virt_to_bus(resblk); 3463 msg[8] = (u32)resblk_pa;
3182 3464
3183 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { 3465 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3184 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk); 3466 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3185 return wait_status; /* -DetailedStatus */ 3467 return wait_status; /* -DetailedStatus */
3186 } 3468 }
3187 3469
@@ -3284,7 +3566,7 @@ static int adpt_i2o_systab_send(adpt_hba* pHba)
3284 * Private i/o space declaration 3566 * Private i/o space declaration
3285 */ 3567 */
3286 msg[6] = 0x54000000 | sys_tbl_len; 3568 msg[6] = 0x54000000 | sys_tbl_len;
3287 msg[7] = virt_to_phys(sys_tbl); 3569 msg[7] = (u32)sys_tbl_pa;
3288 msg[8] = 0x54000000 | 0; 3570 msg[8] = 0x54000000 | 0;
3289 msg[9] = 0; 3571 msg[9] = 0;
3290 msg[10] = 0xD4000000 | 0; 3572 msg[10] = 0xD4000000 | 0;
@@ -3323,11 +3605,10 @@ static static void adpt_delay(int millisec)
3323#endif 3605#endif
3324 3606
3325static struct scsi_host_template driver_template = { 3607static struct scsi_host_template driver_template = {
3608 .module = THIS_MODULE,
3326 .name = "dpt_i2o", 3609 .name = "dpt_i2o",
3327 .proc_name = "dpt_i2o", 3610 .proc_name = "dpt_i2o",
3328 .proc_info = adpt_proc_info, 3611 .proc_info = adpt_proc_info,
3329 .detect = adpt_detect,
3330 .release = adpt_release,
3331 .info = adpt_info, 3612 .info = adpt_info,
3332 .queuecommand = adpt_queue, 3613 .queuecommand = adpt_queue,
3333 .eh_abort_handler = adpt_abort, 3614 .eh_abort_handler = adpt_abort,
@@ -3341,5 +3622,48 @@ static struct scsi_host_template driver_template = {
3341 .cmd_per_lun = 1, 3622 .cmd_per_lun = 1,
3342 .use_clustering = ENABLE_CLUSTERING, 3623 .use_clustering = ENABLE_CLUSTERING,
3343}; 3624};
3344#include "scsi_module.c" 3625
3626static int __init adpt_init(void)
3627{
3628 int error;
3629 adpt_hba *pHba, *next;
3630
3631 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3632
3633 error = adpt_detect(&driver_template);
3634 if (error < 0)
3635 return error;
3636 if (hba_chain == NULL)
3637 return -ENODEV;
3638
3639 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3640 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3641 if (error)
3642 goto fail;
3643 scsi_scan_host(pHba->host);
3644 }
3645 return 0;
3646fail:
3647 for (pHba = hba_chain; pHba; pHba = next) {
3648 next = pHba->next;
3649 scsi_remove_host(pHba->host);
3650 }
3651 return error;
3652}
3653
3654static void __exit adpt_exit(void)
3655{
3656 adpt_hba *pHba, *next;
3657
3658 for (pHba = hba_chain; pHba; pHba = pHba->next)
3659 scsi_remove_host(pHba->host);
3660 for (pHba = hba_chain; pHba; pHba = next) {
3661 next = pHba->next;
3662 adpt_release(pHba->host);
3663 }
3664}
3665
3666module_init(adpt_init);
3667module_exit(adpt_exit);
3668
3345MODULE_LICENSE("GPL"); 3669MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index fd79068c5869..924cd5a51676 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -84,7 +84,6 @@ static int adpt_device_reset(struct scsi_cmnd* cmd);
84#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID 84#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
85#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511) 85#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
86 86
87//#define REBOOT_NOTIFIER 1
88/* Debugging macro from Linux Device Drivers - Rubini */ 87/* Debugging macro from Linux Device Drivers - Rubini */
89#undef PDEBUG 88#undef PDEBUG
90#ifdef DEBUG 89#ifdef DEBUG
@@ -229,14 +228,19 @@ typedef struct _adpt_hba {
229 u32 post_fifo_size; 228 u32 post_fifo_size;
230 u32 reply_fifo_size; 229 u32 reply_fifo_size;
231 u32* reply_pool; 230 u32* reply_pool;
231 dma_addr_t reply_pool_pa;
232 u32 sg_tablesize; // Scatter/Gather List Size. 232 u32 sg_tablesize; // Scatter/Gather List Size.
233 u8 top_scsi_channel; 233 u8 top_scsi_channel;
234 u8 top_scsi_id; 234 u8 top_scsi_id;
235 u8 top_scsi_lun; 235 u8 top_scsi_lun;
236 u8 dma64;
236 237
237 i2o_status_block* status_block; 238 i2o_status_block* status_block;
239 dma_addr_t status_block_pa;
238 i2o_hrt* hrt; 240 i2o_hrt* hrt;
241 dma_addr_t hrt_pa;
239 i2o_lct* lct; 242 i2o_lct* lct;
243 dma_addr_t lct_pa;
240 uint lct_size; 244 uint lct_size;
241 struct i2o_device* devices; 245 struct i2o_device* devices;
242 struct adpt_channel channel[MAX_CHANNEL]; 246 struct adpt_channel channel[MAX_CHANNEL];
@@ -249,6 +253,7 @@ typedef struct _adpt_hba {
249 void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED 253 void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
250 void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED 254 void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
251 u32 FwDebugFlags; 255 u32 FwDebugFlags;
256 u32 *ioctl_reply_context[4];
252} adpt_hba; 257} adpt_hba;
253 258
254struct sg_simple_element { 259struct sg_simple_element {
@@ -264,9 +269,6 @@ static void adpt_i2o_sys_shutdown(void);
264static int adpt_init(void); 269static int adpt_init(void);
265static int adpt_i2o_build_sys_table(void); 270static int adpt_i2o_build_sys_table(void);
266static irqreturn_t adpt_isr(int irq, void *dev_id); 271static irqreturn_t adpt_isr(int irq, void *dev_id);
267#ifdef REBOOT_NOTIFIER
268static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p);
269#endif
270 272
271static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d); 273static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
272static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 274static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
@@ -275,7 +277,8 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
275static const char *adpt_i2o_get_class_name(int class); 277static const char *adpt_i2o_get_class_name(int class);
276#endif 278#endif
277static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 279static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
278 void *opblk, int oplen, void *resblk, int reslen); 280 void *opblk, dma_addr_t opblk_pa, int oplen,
281 void *resblk, dma_addr_t resblk_pa, int reslen);
279static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout); 282static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
280static int adpt_i2o_lct_get(adpt_hba* pHba); 283static int adpt_i2o_lct_get(adpt_hba* pHba);
281static int adpt_i2o_parse_lct(adpt_hba* pHba); 284static int adpt_i2o_parse_lct(adpt_hba* pHba);
@@ -289,7 +292,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
289static s32 adpt_i2o_hrt_get(adpt_hba* pHba); 292static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
290static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice); 293static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
291static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd); 294static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
292static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht); 295static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
293static s32 adpt_hba_reset(adpt_hba* pHba); 296static s32 adpt_hba_reset(adpt_hba* pHba);
294static s32 adpt_i2o_reset_hba(adpt_hba* pHba); 297static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
295static s32 adpt_rescan(adpt_hba* pHba); 298static s32 adpt_rescan(adpt_hba* pHba);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c6d6e7c6559a..8e2e964af668 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -465,7 +465,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
465 scp->request = (struct request *)&wait; 465 scp->request = (struct request *)&wait;
466 scp->timeout_per_command = timeout*HZ; 466 scp->timeout_per_command = timeout*HZ;
467 scp->cmd_len = 12; 467 scp->cmd_len = 12;
468 memcpy(scp->cmnd, cmnd, 12); 468 scp->cmnd = cmnd;
469 cmndinfo.priority = IOCTL_PRI; 469 cmndinfo.priority = IOCTL_PRI;
470 cmndinfo.internal_cmd_str = gdtcmd; 470 cmndinfo.internal_cmd_str = gdtcmd;
471 cmndinfo.internal_command = 1; 471 cmndinfo.internal_command = 1;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 5b7be1e9841c..aaa48e0c8ed0 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -763,9 +763,9 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
763 scp, 763 scp,
764 host->host_no, scp->device->channel, 764 host->host_no, scp->device->channel,
765 scp->device->id, scp->device->lun, 765 scp->device->id, scp->device->lun,
766 *((u32 *)&scp->cmnd), 766 ((u32 *)scp->cmnd)[0],
767 *((u32 *)&scp->cmnd + 1), 767 ((u32 *)scp->cmnd)[1],
768 *((u32 *)&scp->cmnd + 2), 768 ((u32 *)scp->cmnd)[2],
769 _req->index, _req->req_virt); 769 _req->index, _req->req_virt);
770 770
771 scp->result = 0; 771 scp->result = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 4a922c57125e..ccfd8aca3765 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -686,7 +686,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
686 } 686 }
687 687
688 if (cmnd) { 688 if (cmnd) {
689 cmnd->result = rsp->status; 689 cmnd->result |= rsp->status;
690 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 690 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
691 memcpy(cmnd->sense_buffer, 691 memcpy(cmnd->sense_buffer,
692 rsp->data, 692 rsp->data,
@@ -730,6 +730,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
730 u16 lun = lun_from_dev(cmnd->device); 730 u16 lun = lun_from_dev(cmnd->device);
731 u8 out_fmt, in_fmt; 731 u8 out_fmt, in_fmt;
732 732
733 cmnd->result = (DID_OK << 16);
733 evt_struct = get_event_struct(&hostdata->pool); 734 evt_struct = get_event_struct(&hostdata->pool);
734 if (!evt_struct) 735 if (!evt_struct)
735 return SCSI_MLQUEUE_HOST_BUSY; 736 return SCSI_MLQUEUE_HOST_BUSY;
@@ -738,7 +739,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
738 srp_cmd = &evt_struct->iu.srp.cmd; 739 srp_cmd = &evt_struct->iu.srp.cmd;
739 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); 740 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
740 srp_cmd->opcode = SRP_CMD; 741 srp_cmd->opcode = SRP_CMD;
741 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 742 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
742 srp_cmd->lun = ((u64) lun) << 48; 743 srp_cmd->lun = ((u64) lun) << 48;
743 744
744 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 745 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
@@ -1347,6 +1348,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1347 1348
1348 del_timer(&evt_struct->timer); 1349 del_timer(&evt_struct->timer);
1349 1350
1351 if (crq->status != VIOSRP_OK && evt_struct->cmnd)
1352 evt_struct->cmnd->result = DID_ERROR << 16;
1350 if (evt_struct->done) 1353 if (evt_struct->done)
1351 evt_struct->done(evt_struct); 1354 evt_struct->done(evt_struct);
1352 else 1355 else
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 90f1a61283ad..4c4aadb3e405 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -59,6 +59,15 @@ enum viosrp_crq_formats {
59 VIOSRP_INLINE_FORMAT = 0x07 59 VIOSRP_INLINE_FORMAT = 0x07
60}; 60};
61 61
62enum viosrp_crq_status {
63 VIOSRP_OK = 0x0,
64 VIOSRP_NONRECOVERABLE_ERR = 0x1,
65 VIOSRP_VIOLATES_MAX_XFER = 0x2,
66 VIOSRP_PARTNER_PANIC = 0x3,
67 VIOSRP_DEVICE_BUSY = 0x8,
68 VIOSRP_ADAPTER_FAIL = 0x10
69};
70
62struct viosrp_crq { 71struct viosrp_crq {
63 u8 valid; /* used by RPA */ 72 u8 valid; /* used by RPA */
64 u8 format; /* SCSI vs out-of-band */ 73 u8 format; /* SCSI vs out-of-band */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index dbae3fdb8506..e3f739776bad 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2590,7 +2590,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
2590 cblk->hastat = 0; 2590 cblk->hastat = 0;
2591 cblk->tastat = 0; 2591 cblk->tastat = 0;
2592 /* Command the command */ 2592 /* Command the command */
2593 memcpy(&cblk->cdb[0], &cmnd->cmnd, cmnd->cmd_len); 2593 memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len);
2594 2594
2595 /* Set up tags */ 2595 /* Set up tags */
2596 if (cmnd->device->tagged_supported) { /* Tag Support */ 2596 if (cmnd->device->tagged_supported) { /* Tag Support */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index de5ae6a65029..999e91ea7451 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2791,7 +2791,7 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
2791 2791
2792static struct device_attribute ipr_ioa_state_attr = { 2792static struct device_attribute ipr_ioa_state_attr = {
2793 .attr = { 2793 .attr = {
2794 .name = "state", 2794 .name = "online_state",
2795 .mode = S_IRUGO | S_IWUSR, 2795 .mode = S_IRUGO | S_IWUSR,
2796 }, 2796 },
2797 .show = ipr_show_adapter_state, 2797 .show = ipr_show_adapter_state,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 820f91fb63ba..70a0f11f48b2 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3168,6 +3168,23 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
3168 uint8_t raw_mbox[sizeof(mbox_t)]; 3168 uint8_t raw_mbox[sizeof(mbox_t)];
3169 int rval; 3169 int rval;
3170 3170
3171 /*
3172 * Newer firmware on Dell CERC expect a different
3173 * random deletion handling, so disable it.
3174 */
3175 if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
3176 adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
3177 adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
3178 adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
3179 (adapter->fw_version[0] > '6' ||
3180 (adapter->fw_version[0] == '6' &&
3181 adapter->fw_version[2] > '6') ||
3182 (adapter->fw_version[0] == '6'
3183 && adapter->fw_version[2] == '6'
3184 && adapter->fw_version[3] > '1'))) {
3185 con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
3186 return 0;
3187 }
3171 3188
3172 mbox = (mbox_t *)raw_mbox; 3189 mbox = (mbox_t *)raw_mbox;
3173 3190
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 626459d1e902..c1d86d961a92 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -88,6 +88,7 @@
88#define PCI_SUBSYS_ID_PERC3_QC 0x0471 88#define PCI_SUBSYS_ID_PERC3_QC 0x0471
89#define PCI_SUBSYS_ID_PERC3_DC 0x0493 89#define PCI_SUBSYS_ID_PERC3_DC 0x0493
90#define PCI_SUBSYS_ID_PERC3_SC 0x0475 90#define PCI_SUBSYS_ID_PERC3_SC 0x0475
91#define PCI_SUBSYS_ID_CERC_ATA100_4CH 0x0511
91 92
92 93
93#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel 94#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index b937e9cddb23..7d84c8bbcf3f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.03.16-rc1 13 * Version : v00.00.03.20-rc1
14 * 14 *
15 * Authors: 15 * Authors:
16 * (email-id : megaraidlinux@lsi.com) 16 * (email-id : megaraidlinux@lsi.com)
@@ -2650,12 +2650,13 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
2650 return; 2650 return;
2651} 2651}
2652 2652
2653#ifdef CONFIG_PM
2653/** 2654/**
2654 * megasas_suspend - driver suspend entry point 2655 * megasas_suspend - driver suspend entry point
2655 * @pdev: PCI device structure 2656 * @pdev: PCI device structure
2656 * @state: PCI power state to suspend routine 2657 * @state: PCI power state to suspend routine
2657 */ 2658 */
2658static int __devinit 2659static int
2659megasas_suspend(struct pci_dev *pdev, pm_message_t state) 2660megasas_suspend(struct pci_dev *pdev, pm_message_t state)
2660{ 2661{
2661 struct Scsi_Host *host; 2662 struct Scsi_Host *host;
@@ -2687,7 +2688,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
2687 * megasas_resume- driver resume entry point 2688 * megasas_resume- driver resume entry point
2688 * @pdev: PCI device structure 2689 * @pdev: PCI device structure
2689 */ 2690 */
2690static int __devinit 2691static int
2691megasas_resume(struct pci_dev *pdev) 2692megasas_resume(struct pci_dev *pdev)
2692{ 2693{
2693 int rval; 2694 int rval;
@@ -2782,12 +2783,16 @@ fail_ready_state:
2782 2783
2783 return -ENODEV; 2784 return -ENODEV;
2784} 2785}
2786#else
2787#define megasas_suspend NULL
2788#define megasas_resume NULL
2789#endif
2785 2790
2786/** 2791/**
2787 * megasas_detach_one - PCI hot"un"plug entry point 2792 * megasas_detach_one - PCI hot"un"plug entry point
2788 * @pdev: PCI device structure 2793 * @pdev: PCI device structure
2789 */ 2794 */
2790static void megasas_detach_one(struct pci_dev *pdev) 2795static void __devexit megasas_detach_one(struct pci_dev *pdev)
2791{ 2796{
2792 int i; 2797 int i;
2793 struct Scsi_Host *host; 2798 struct Scsi_Host *host;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3a997eb457bf..b0c41e671702 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/* 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.03.16-rc1" 21#define MEGASAS_VERSION "00.00.03.20-rc1"
22#define MEGASAS_RELDATE "Nov. 07, 2007" 22#define MEGASAS_RELDATE "March 10, 2008"
23#define MEGASAS_EXT_VERSION "Thu. Nov. 07 10:09:32 PDT 2007" 23#define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 09ab3eac1c1a..fa060932d2b4 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2858,7 +2858,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2858 2858
2859 /* Load SCSI command packet. */ 2859 /* Load SCSI command packet. */
2860 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 2860 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2861 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 2861 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2862 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 2862 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2863 2863
2864 /* Set transfer direction. */ 2864 /* Set transfer direction. */
@@ -3127,7 +3127,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3127 3127
3128 /* Load SCSI command packet. */ 3128 /* Load SCSI command packet. */
3129 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3129 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3130 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 3130 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3131 3131
3132 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3132 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3133 /* Set transfer direction. */ 3133 /* Set transfer direction. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 12d69d7c8577..110e776d1a07 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -79,15 +79,6 @@ static void scsi_done(struct scsi_cmnd *cmd);
79#define MIN_RESET_PERIOD (15*HZ) 79#define MIN_RESET_PERIOD (15*HZ)
80 80
81/* 81/*
82 * Macro to determine the size of SCSI command. This macro takes vendor
83 * unique commands into account. SCSI commands in groups 6 and 7 are
84 * vendor unique and we will depend upon the command length being
85 * supplied correctly in cmd_len.
86 */
87#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
89
90/*
91 * Note - the initial logging level can be set here to log events at boot time. 82 * Note - the initial logging level can be set here to log events at boot time.
92 * After the system is up, you may enable logging via the /proc interface. 83 * After the system is up, you may enable logging via the /proc interface.
93 */ 84 */
@@ -469,6 +460,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
469 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 460 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
470 if (!cmd) { 461 if (!cmd) {
471 scsi_put_host_cmd_pool(gfp_mask); 462 scsi_put_host_cmd_pool(gfp_mask);
463 shost->cmd_pool = NULL;
472 return -ENOMEM; 464 return -ENOMEM;
473 } 465 }
474 list_add(&cmd->list, &shost->free_list); 466 list_add(&cmd->list, &shost->free_list);
@@ -481,6 +473,13 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
481 */ 473 */
482void scsi_destroy_command_freelist(struct Scsi_Host *shost) 474void scsi_destroy_command_freelist(struct Scsi_Host *shost)
483{ 475{
476 /*
477 * If cmd_pool is NULL the free list was not initialized, so
478 * do not attempt to release resources.
479 */
480 if (!shost->cmd_pool)
481 return;
482
484 while (!list_empty(&shost->free_list)) { 483 while (!list_empty(&shost->free_list)) {
485 struct scsi_cmnd *cmd; 484 struct scsi_cmnd *cmd;
486 485
@@ -701,9 +700,11 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
701 * Before we queue this command, check if the command 700 * Before we queue this command, check if the command
702 * length exceeds what the host adapter can handle. 701 * length exceeds what the host adapter can handle.
703 */ 702 */
704 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) { 703 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
705 SCSI_LOG_MLQUEUE(3, 704 SCSI_LOG_MLQUEUE(3,
706 printk("queuecommand : command too long.\n")); 705 printk("queuecommand : command too long. "
706 "cdb_size=%d host->max_cmd_len=%d\n",
707 cmd->cmd_len, cmd->device->host->max_cmd_len));
707 cmd->result = (DID_ABORT << 16); 708 cmd->result = (DID_ABORT << 16);
708 709
709 scsi_done(cmd); 710 scsi_done(cmd);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1eaba6cd80f4..eaf5a8add1ba 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -626,7 +626,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
626 * @scmd: SCSI command structure to hijack 626 * @scmd: SCSI command structure to hijack
627 * @ses: structure to save restore information 627 * @ses: structure to save restore information
628 * @cmnd: CDB to send. Can be NULL if no new cmnd is needed 628 * @cmnd: CDB to send. Can be NULL if no new cmnd is needed
629 * @cmnd_size: size in bytes of @cmnd 629 * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
630 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored) 630 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
631 * 631 *
632 * This function is used to save a scsi command information before re-execution 632 * This function is used to save a scsi command information before re-execution
@@ -648,12 +648,14 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
648 * command. 648 * command.
649 */ 649 */
650 ses->cmd_len = scmd->cmd_len; 650 ses->cmd_len = scmd->cmd_len;
651 memcpy(ses->cmnd, scmd->cmnd, sizeof(scmd->cmnd)); 651 ses->cmnd = scmd->cmnd;
652 ses->data_direction = scmd->sc_data_direction; 652 ses->data_direction = scmd->sc_data_direction;
653 ses->sdb = scmd->sdb; 653 ses->sdb = scmd->sdb;
654 ses->next_rq = scmd->request->next_rq; 654 ses->next_rq = scmd->request->next_rq;
655 ses->result = scmd->result; 655 ses->result = scmd->result;
656 656
657 scmd->cmnd = ses->eh_cmnd;
658 memset(scmd->cmnd, 0, BLK_MAX_CDB);
657 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 659 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
658 scmd->request->next_rq = NULL; 660 scmd->request->next_rq = NULL;
659 661
@@ -665,14 +667,13 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
665 scmd->sdb.table.sgl = &ses->sense_sgl; 667 scmd->sdb.table.sgl = &ses->sense_sgl;
666 scmd->sc_data_direction = DMA_FROM_DEVICE; 668 scmd->sc_data_direction = DMA_FROM_DEVICE;
667 scmd->sdb.table.nents = 1; 669 scmd->sdb.table.nents = 1;
668 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
669 scmd->cmnd[0] = REQUEST_SENSE; 670 scmd->cmnd[0] = REQUEST_SENSE;
670 scmd->cmnd[4] = scmd->sdb.length; 671 scmd->cmnd[4] = scmd->sdb.length;
671 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 672 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
672 } else { 673 } else {
673 scmd->sc_data_direction = DMA_NONE; 674 scmd->sc_data_direction = DMA_NONE;
674 if (cmnd) { 675 if (cmnd) {
675 memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); 676 BUG_ON(cmnd_size > BLK_MAX_CDB);
676 memcpy(scmd->cmnd, cmnd, cmnd_size); 677 memcpy(scmd->cmnd, cmnd, cmnd_size);
677 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 678 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
678 } 679 }
@@ -705,7 +706,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
705 * Restore original data 706 * Restore original data
706 */ 707 */
707 scmd->cmd_len = ses->cmd_len; 708 scmd->cmd_len = ses->cmd_len;
708 memcpy(scmd->cmnd, ses->cmnd, sizeof(scmd->cmnd)); 709 scmd->cmnd = ses->cmnd;
709 scmd->sc_data_direction = ses->data_direction; 710 scmd->sc_data_direction = ses->data_direction;
710 scmd->sdb = ses->sdb; 711 scmd->sdb = ses->sdb;
711 scmd->request->next_rq = ses->next_rq; 712 scmd->request->next_rq = ses->next_rq;
@@ -1775,8 +1776,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1775 scmd->request = &req; 1776 scmd->request = &req;
1776 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); 1777 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1777 1778
1778 memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd)); 1779 scmd->cmnd = req.cmd;
1779 1780
1780 scmd->scsi_done = scsi_reset_provider_done_command; 1781 scmd->scsi_done = scsi_reset_provider_done_command;
1781 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 1782 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
1782 1783
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d545ad1cf47a..a82d2fe80fb5 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -445,7 +445,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
445 scsi_set_resid(cmd, 0); 445 scsi_set_resid(cmd, 0);
446 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 446 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
447 if (cmd->cmd_len == 0) 447 if (cmd->cmd_len == 0)
448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 448 cmd->cmd_len = scsi_command_size(cmd->cmnd);
449} 449}
450 450
451void scsi_device_unbusy(struct scsi_device *sdev) 451void scsi_device_unbusy(struct scsi_device *sdev)
@@ -1094,6 +1094,8 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1094 cmd->tag = req->tag; 1094 cmd->tag = req->tag;
1095 cmd->request = req; 1095 cmd->request = req;
1096 1096
1097 cmd->cmnd = req->cmd;
1098
1097 return cmd; 1099 return cmd;
1098} 1100}
1099 1101
@@ -1131,8 +1133,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1131 req->buffer = NULL; 1133 req->buffer = NULL;
1132 } 1134 }
1133 1135
1134 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1135 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1136 cmd->cmd_len = req->cmd_len; 1136 cmd->cmd_len = req->cmd_len;
1137 if (!req->data_len) 1137 if (!req->data_len)
1138 cmd->sc_data_direction = DMA_NONE; 1138 cmd->sc_data_direction = DMA_NONE;
@@ -1169,6 +1169,7 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1169 if (unlikely(!cmd)) 1169 if (unlikely(!cmd))
1170 return BLKPREP_DEFER; 1170 return BLKPREP_DEFER;
1171 1171
1172 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1172 return scsi_init_io(cmd, GFP_ATOMIC); 1173 return scsi_init_io(cmd, GFP_ATOMIC);
1173} 1174}
1174EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1175EXPORT_SYMBOL(scsi_setup_fs_cmnd);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index ee8496aa0336..257e097c39af 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -107,6 +107,8 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
107 cmd->jiffies_at_alloc = jiffies; 107 cmd->jiffies_at_alloc = jiffies;
108 cmd->request = rq; 108 cmd->request = rq;
109 109
110 cmd->cmnd = rq->cmd;
111
110 rq->special = cmd; 112 rq->special = cmd;
111 rq->cmd_type = REQ_TYPE_SPECIAL; 113 rq->cmd_type = REQ_TYPE_SPECIAL;
112 rq->cmd_flags |= REQ_TYPE_BLOCK_PC; 114 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 640333b1e75c..329eb8780e74 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -744,7 +744,8 @@ static int wait_on_busy(unsigned long iobase, unsigned int loop) {
744static int board_inquiry(unsigned int j) { 744static int board_inquiry(unsigned int j) {
745 struct mscp *cpp; 745 struct mscp *cpp;
746 dma_addr_t id_dma_addr; 746 dma_addr_t id_dma_addr;
747 unsigned int time, limit = 0; 747 unsigned int limit = 0;
748 unsigned long time;
748 749
749 id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id, 750 id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id,
750 sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL); 751 sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL);
@@ -1392,7 +1393,8 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
1392} 1393}
1393 1394
1394static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { 1395static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1395 unsigned int i, j, time, k, c, limit = 0; 1396 unsigned int i, j, k, c, limit = 0;
1397 unsigned long time;
1396 int arg_done = FALSE; 1398 int arg_done = FALSE;
1397 struct scsi_cmnd *SCpnt; 1399 struct scsi_cmnd *SCpnt;
1398 1400
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index d88824b3511c..898e67d30e56 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -46,7 +46,7 @@ void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
46 } 46 }
47 47
48 memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd)); 48 memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd));
49 memset(srb->cmnd, 0, sizeof(srb->cmnd)); 49 memset(srb->cmnd, 0, MAX_COMMAND_SIZE);
50 50
51 /* check if we support the command */ 51 /* check if we support the command */
52 if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */ 52 if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 971d13dd5e65..3addcd8f827b 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -292,6 +292,7 @@ struct isd200_info {
292 292
293 /* maximum number of LUNs supported */ 293 /* maximum number of LUNs supported */
294 unsigned char MaxLUNs; 294 unsigned char MaxLUNs;
295 unsigned char cmnd[BLK_MAX_CDB];
295 struct scsi_cmnd srb; 296 struct scsi_cmnd srb;
296 struct scatterlist sg; 297 struct scatterlist sg;
297}; 298};
@@ -450,6 +451,7 @@ static int isd200_action( struct us_data *us, int action,
450 451
451 memset(&ata, 0, sizeof(ata)); 452 memset(&ata, 0, sizeof(ata));
452 memset(&srb_dev, 0, sizeof(srb_dev)); 453 memset(&srb_dev, 0, sizeof(srb_dev));
454 srb->cmnd = info->cmnd;
453 srb->device = &srb_dev; 455 srb->device = &srb_dev;
454 ++srb->serial_number; 456 ++srb->serial_number;
455 457
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 1f74bcd603fe..32742c4563de 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -30,13 +30,6 @@
30#endif 30#endif
31 31
32/* 32/*
33 * SCSI command lengths
34 */
35
36extern const unsigned char scsi_command_size[8];
37#define COMMAND_SIZE(opcode) scsi_command_size[((opcode) >> 5) & 7]
38
39/*
40 * Special value for scanning to specify scanning or rescanning of all 33 * Special value for scanning to specify scanning or rescanning of all
41 * possible channels, (target) ids, or luns on a given shost. 34 * possible channels, (target) ids, or luns on a given shost.
42 */ 35 */
@@ -109,6 +102,7 @@ extern const unsigned char scsi_command_size[8];
109#define MODE_SENSE_10 0x5a 102#define MODE_SENSE_10 0x5a
110#define PERSISTENT_RESERVE_IN 0x5e 103#define PERSISTENT_RESERVE_IN 0x5e
111#define PERSISTENT_RESERVE_OUT 0x5f 104#define PERSISTENT_RESERVE_OUT 0x5f
105#define VARIABLE_LENGTH_CMD 0x7f
112#define REPORT_LUNS 0xa0 106#define REPORT_LUNS 0xa0
113#define MAINTENANCE_IN 0xa3 107#define MAINTENANCE_IN 0xa3
114#define MOVE_MEDIUM 0xa5 108#define MOVE_MEDIUM 0xa5
@@ -136,6 +130,38 @@ extern const unsigned char scsi_command_size[8];
136#define ATA_12 0xa1 /* 12-byte pass-thru */ 130#define ATA_12 0xa1 /* 12-byte pass-thru */
137 131
138/* 132/*
133 * SCSI command lengths
134 */
135
136#define SCSI_MAX_VARLEN_CDB_SIZE 260
137
138/* defined in T10 SCSI Primary Commands-2 (SPC2) */
139struct scsi_varlen_cdb_hdr {
140 u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
141 u8 control;
142 u8 misc[5];
143 u8 additional_cdb_length; /* total cdb length - 8 */
144 __be16 service_action;
145 /* service specific data follows */
146};
147
148static inline unsigned
149scsi_varlen_cdb_length(const void *hdr)
150{
151 return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
152}
153
154extern const unsigned char scsi_command_size_tbl[8];
155#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
156
157static inline unsigned
158scsi_command_size(const unsigned char *cmnd)
159{
160 return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
161 scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
162}
163
164/*
139 * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft 165 * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
140 * T10/1561-D Revision 4 Draft dated 7th November 2002. 166 * T10/1561-D Revision 4 Draft dated 7th November 2002.
141 */ 167 */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 8d20e60a94b7..3e46dfae8194 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -7,10 +7,28 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <linux/blkdev.h>
10 11
11struct Scsi_Host; 12struct Scsi_Host;
12struct scsi_device; 13struct scsi_device;
13 14
15/*
16 * MAX_COMMAND_SIZE is:
17 * The longest fixed-length SCSI CDB as per the SCSI standard.
18 * fixed-length means: commands that their size can be determined
19 * by their opcode and the CDB does not carry a length specifier, (unlike
20 * the VARIABLE_LENGTH_CMD(0x7f) command). This is actually not exactly
21 * true and the SCSI standard also defines extended commands and
22 * vendor specific commands that can be bigger than 16 bytes. The kernel
23 * will support these using the same infrastructure used for VARLEN CDB's.
24 * So in effect MAX_COMMAND_SIZE means the maximum size command scsi-ml
25 * supports without specifying a cmd_len by ULD's
26 */
27#define MAX_COMMAND_SIZE 16
28#if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
29# error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
30#endif
31
14struct scsi_data_buffer { 32struct scsi_data_buffer {
15 struct sg_table table; 33 struct sg_table table;
16 unsigned length; 34 unsigned length;
@@ -60,12 +78,11 @@ struct scsi_cmnd {
60 int allowed; 78 int allowed;
61 int timeout_per_command; 79 int timeout_per_command;
62 80
63 unsigned char cmd_len; 81 unsigned short cmd_len;
64 enum dma_data_direction sc_data_direction; 82 enum dma_data_direction sc_data_direction;
65 83
66 /* These elements define the operation we are about to perform */ 84 /* These elements define the operation we are about to perform */
67#define MAX_COMMAND_SIZE 16 85 unsigned char *cmnd;
68 unsigned char cmnd[MAX_COMMAND_SIZE];
69 86
70 struct timer_list eh_timeout; /* Used to time out the command. */ 87 struct timer_list eh_timeout; /* Used to time out the command. */
71 88
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index d3a133b4a072..2a9add21267d 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -75,11 +75,11 @@ struct scsi_eh_save {
75 int result; 75 int result;
76 enum dma_data_direction data_direction; 76 enum dma_data_direction data_direction;
77 unsigned char cmd_len; 77 unsigned char cmd_len;
78 unsigned char cmnd[MAX_COMMAND_SIZE]; 78 unsigned char *cmnd;
79 struct scsi_data_buffer sdb; 79 struct scsi_data_buffer sdb;
80 struct request *next_rq; 80 struct request *next_rq;
81
82 /* new command support */ 81 /* new command support */
82 unsigned char eh_cmnd[BLK_MAX_CDB];
83 struct scatterlist sense_sgl; 83 struct scatterlist sense_sgl;
84}; 84};
85 85
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index d967d6dc7a28..1834fdfe82a7 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -573,13 +573,11 @@ struct Scsi_Host {
573 /* 573 /*
574 * The maximum length of SCSI commands that this host can accept. 574 * The maximum length of SCSI commands that this host can accept.
575 * Probably 12 for most host adapters, but could be 16 for others. 575 * Probably 12 for most host adapters, but could be 16 for others.
576 * or 260 if the driver supports variable length cdbs.
576 * For drivers that don't set this field, a value of 12 is 577 * For drivers that don't set this field, a value of 12 is
577 * assumed. I am leaving this as a number rather than a bit 578 * assumed.
578 * because you never know what subsequent SCSI standards might do
579 * (i.e. could there be a 20 byte or a 24-byte command a few years
580 * down the road?).
581 */ 579 */
582 unsigned char max_cmd_len; 580 unsigned short max_cmd_len;
583 581
584 int this_id; 582 int this_id;
585 int can_queue; 583 int can_queue;