aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ips.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ips.c')
-rw-r--r--drivers/scsi/ips.c152
1 files changed, 134 insertions, 18 deletions
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 4cdd891781b1..749c95bb7df7 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -219,15 +219,12 @@ module_param(ips, charp, 0);
219#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0) 219#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
220#include <linux/blk.h> 220#include <linux/blk.h>
221#include "sd.h" 221#include "sd.h"
222#define IPS_SG_ADDRESS(sg) ((sg)->address)
223#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags) 222#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
224#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags) 223#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
225#ifndef __devexit_p 224#ifndef __devexit_p
226#define __devexit_p(x) x 225#define __devexit_p(x) x
227#endif 226#endif
228#else 227#else
229#define IPS_SG_ADDRESS(sg) (page_address((sg)->page) ? \
230 page_address((sg)->page)+(sg)->offset : NULL)
231#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0) 228#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
232#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0) 229#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
233#endif 230#endif
@@ -358,6 +355,9 @@ static int ips_init_phase2(int index);
358static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr); 355static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
359static int ips_register_scsi(int index); 356static int ips_register_scsi(int index);
360 357
358static int ips_poll_for_flush_complete(ips_ha_t * ha);
359static void ips_flush_and_reset(ips_ha_t *ha);
360
361/* 361/*
362 * global variables 362 * global variables
363 */ 363 */
@@ -1125,8 +1125,8 @@ ips_queue(Scsi_Cmnd * SC, void (*done) (Scsi_Cmnd *))
1125 SC->device->channel, SC->device->id, SC->device->lun); 1125 SC->device->channel, SC->device->id, SC->device->lun);
1126 1126
1127 /* Check for command to initiator IDs */ 1127 /* Check for command to initiator IDs */
1128 if ((SC->device->channel > 0) 1128 if ((scmd_channel(SC) > 0)
1129 && (SC->device->id == ha->ha_id[SC->device->channel])) { 1129 && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
1130 SC->result = DID_NO_CONNECT << 16; 1130 SC->result = DID_NO_CONNECT << 16;
1131 done(SC); 1131 done(SC);
1132 1132
@@ -1605,6 +1605,8 @@ ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
1605static int 1605static int
1606ips_is_passthru(Scsi_Cmnd * SC) 1606ips_is_passthru(Scsi_Cmnd * SC)
1607{ 1607{
1608 unsigned long flags;
1609
1608 METHOD_TRACE("ips_is_passthru", 1); 1610 METHOD_TRACE("ips_is_passthru", 1);
1609 1611
1610 if (!SC) 1612 if (!SC)
@@ -1622,10 +1624,20 @@ ips_is_passthru(Scsi_Cmnd * SC)
1622 return 1; 1624 return 1;
1623 else if (SC->use_sg) { 1625 else if (SC->use_sg) {
1624 struct scatterlist *sg = SC->request_buffer; 1626 struct scatterlist *sg = SC->request_buffer;
1625 char *buffer = IPS_SG_ADDRESS(sg); 1627 char *buffer;
1628
1629 /* kmap_atomic() ensures addressability of the user buffer.*/
1630 /* local_irq_save() protects the KM_IRQ0 address slot. */
1631 local_irq_save(flags);
1632 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1626 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1633 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1627 buffer[2] == 'P' && buffer[3] == 'P') 1634 buffer[2] == 'P' && buffer[3] == 'P') {
1635 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1636 local_irq_restore(flags);
1628 return 1; 1637 return 1;
1638 }
1639 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1640 local_irq_restore(flags);
1629 } 1641 }
1630 } 1642 }
1631 return 0; 1643 return 0;
@@ -2830,10 +2842,10 @@ ips_next(ips_ha_t * ha, int intr)
2830 2842
2831 p = ha->scb_waitlist.head; 2843 p = ha->scb_waitlist.head;
2832 while ((p) && (scb = ips_getscb(ha))) { 2844 while ((p) && (scb = ips_getscb(ha))) {
2833 if ((p->device->channel > 0) 2845 if ((scmd_channel(p) > 0)
2834 && (ha-> 2846 && (ha->
2835 dcdb_active[p->device->channel - 2847 dcdb_active[scmd_channel(p) -
2836 1] & (1 << p->device->id))) { 2848 1] & (1 << scmd_id(p)))) {
2837 ips_freescb(ha, scb); 2849 ips_freescb(ha, scb);
2838 p = (Scsi_Cmnd *) p->host_scribble; 2850 p = (Scsi_Cmnd *) p->host_scribble;
2839 continue; 2851 continue;
@@ -3656,14 +3668,21 @@ ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, unsigned
3656 int i; 3668 int i;
3657 unsigned int min_cnt, xfer_cnt; 3669 unsigned int min_cnt, xfer_cnt;
3658 char *cdata = (char *) data; 3670 char *cdata = (char *) data;
3671 unsigned char *buffer;
3672 unsigned long flags;
3659 struct scatterlist *sg = scmd->request_buffer; 3673 struct scatterlist *sg = scmd->request_buffer;
3660 for (i = 0, xfer_cnt = 0; 3674 for (i = 0, xfer_cnt = 0;
3661 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3675 (i < scmd->use_sg) && (xfer_cnt < count); i++) {
3662 if (!IPS_SG_ADDRESS(&sg[i]))
3663 return;
3664 min_cnt = min(count - xfer_cnt, sg[i].length); 3676 min_cnt = min(count - xfer_cnt, sg[i].length);
3665 memcpy(IPS_SG_ADDRESS(&sg[i]), &cdata[xfer_cnt], 3677
3666 min_cnt); 3678 /* kmap_atomic() ensures addressability of the data buffer.*/
3679 /* local_irq_save() protects the KM_IRQ0 address slot. */
3680 local_irq_save(flags);
3681 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3682 memcpy(buffer, &cdata[xfer_cnt], min_cnt);
3683 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3684 local_irq_restore(flags);
3685
3667 xfer_cnt += min_cnt; 3686 xfer_cnt += min_cnt;
3668 } 3687 }
3669 3688
@@ -3688,14 +3707,21 @@ ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned
3688 int i; 3707 int i;
3689 unsigned int min_cnt, xfer_cnt; 3708 unsigned int min_cnt, xfer_cnt;
3690 char *cdata = (char *) data; 3709 char *cdata = (char *) data;
3710 unsigned char *buffer;
3711 unsigned long flags;
3691 struct scatterlist *sg = scmd->request_buffer; 3712 struct scatterlist *sg = scmd->request_buffer;
3692 for (i = 0, xfer_cnt = 0; 3713 for (i = 0, xfer_cnt = 0;
3693 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3714 (i < scmd->use_sg) && (xfer_cnt < count); i++) {
3694 if (!IPS_SG_ADDRESS(&sg[i]))
3695 return;
3696 min_cnt = min(count - xfer_cnt, sg[i].length); 3715 min_cnt = min(count - xfer_cnt, sg[i].length);
3697 memcpy(&cdata[xfer_cnt], IPS_SG_ADDRESS(&sg[i]), 3716
3698 min_cnt); 3717 /* kmap_atomic() ensures addressability of the data buffer.*/
3718 /* local_irq_save() protects the KM_IRQ0 address slot. */
3719 local_irq_save(flags);
3720 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3721 memcpy(&cdata[xfer_cnt], buffer, min_cnt);
3722 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3723 local_irq_restore(flags);
3724
3699 xfer_cnt += min_cnt; 3725 xfer_cnt += min_cnt;
3700 } 3726 }
3701 3727
@@ -4807,6 +4833,9 @@ ips_isinit_morpheus(ips_ha_t * ha)
4807 uint32_t bits; 4833 uint32_t bits;
4808 4834
4809 METHOD_TRACE("ips_is_init_morpheus", 1); 4835 METHOD_TRACE("ips_is_init_morpheus", 1);
4836
4837 if (ips_isintr_morpheus(ha))
4838 ips_flush_and_reset(ha);
4810 4839
4811 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0); 4840 post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
4812 bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR); 4841 bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
@@ -4821,6 +4850,93 @@ ips_isinit_morpheus(ips_ha_t * ha)
4821 4850
4822/****************************************************************************/ 4851/****************************************************************************/
4823/* */ 4852/* */
4853/* Routine Name: ips_flush_and_reset */
4854/* */
4855/* Routine Description: */
4856/* */
4857/* Perform cleanup ( FLUSH and RESET ) when the adapter is in an unknown */
4858/* state ( was trying to INIT and an interrupt was already pending ) ... */
4859/* */
4860/****************************************************************************/
4861static void
4862ips_flush_and_reset(ips_ha_t *ha)
4863{
4864 ips_scb_t *scb;
4865 int ret;
4866 int time;
4867 int done;
4868 dma_addr_t command_dma;
4869
4870 /* Create a usuable SCB */
4871 scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
4872 if (scb) {
4873 memset(scb, 0, sizeof(ips_scb_t));
4874 ips_init_scb(ha, scb);
4875 scb->scb_busaddr = command_dma;
4876
4877 scb->timeout = ips_cmd_timeout;
4878 scb->cdb[0] = IPS_CMD_FLUSH;
4879
4880 scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
4881 scb->cmd.flush_cache.command_id = IPS_MAX_CMDS; /* Use an ID that would otherwise not exist */
4882 scb->cmd.flush_cache.state = IPS_NORM_STATE;
4883 scb->cmd.flush_cache.reserved = 0;
4884 scb->cmd.flush_cache.reserved2 = 0;
4885 scb->cmd.flush_cache.reserved3 = 0;
4886 scb->cmd.flush_cache.reserved4 = 0;
4887
4888 ret = ips_send_cmd(ha, scb); /* Send the Flush Command */
4889
4890 if (ret == IPS_SUCCESS) {
4891 time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */
4892 done = 0;
4893
4894 while ((time > 0) && (!done)) {
4895 done = ips_poll_for_flush_complete(ha);
4896 /* This may look evil, but it's only done during extremely rare start-up conditions ! */
4897 udelay(1000);
4898 time--;
4899 }
4900 }
4901 }
4902
4903 /* Now RESET and INIT the adapter */
4904 (*ha->func.reset) (ha);
4905
4906 pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
4907 return;
4908}
4909
4910/****************************************************************************/
4911/* */
4912/* Routine Name: ips_poll_for_flush_complete */
4913/* */
4914/* Routine Description: */
4915/* */
4916/* Poll for the Flush Command issued by ips_flush_and_reset() to complete */
4917/* All other responses are just taken off the queue and ignored */
4918/* */
4919/****************************************************************************/
4920static int
4921ips_poll_for_flush_complete(ips_ha_t * ha)
4922{
4923 IPS_STATUS cstatus;
4924
4925 while (TRUE) {
4926 cstatus.value = (*ha->func.statupd) (ha);
4927
4928 if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
4929 break;
4930
4931 /* Success is when we see the Flush Command ID */
4932 if (cstatus.fields.command_id == IPS_MAX_CMDS )
4933 return 1;
4934 }
4935
4936 return 0;
4937
4938/****************************************************************************/
4939/* */
4824/* Routine Name: ips_enable_int_copperhead */ 4940/* Routine Name: ips_enable_int_copperhead */
4825/* */ 4941/* */
4826/* Routine Description: */ 4942/* Routine Description: */