aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h16
-rw-r--r--drivers/scsi/dpt/dptsig.h8
-rw-r--r--drivers/scsi/dpt/sys_info.h4
-rw-r--r--drivers/scsi/dpt_i2o.c280
-rw-r--r--drivers/scsi/dpti.h2
6 files changed, 260 insertions, 53 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 99c57b0c1d54..46d7e400c8be 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -504,10 +504,9 @@ config SCSI_AIC7XXX_OLD
504source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 504source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
505source "drivers/scsi/aic94xx/Kconfig" 505source "drivers/scsi/aic94xx/Kconfig"
506 506
507# All the I2O code and drivers do not seem to be 64bit safe.
508config SCSI_DPT_I2O 507config SCSI_DPT_I2O
509 tristate "Adaptec I2O RAID support " 508 tristate "Adaptec I2O RAID support "
510 depends on !64BIT && SCSI && PCI && VIRT_TO_BUS 509 depends on SCSI && PCI && VIRT_TO_BUS
511 help 510 help
512 This driver supports all of Adaptec's I2O based RAID controllers as 511 This driver supports all of Adaptec's I2O based RAID controllers as
513 well as the DPT SmartRaid V cards. This is an Adaptec maintained 512 well as the DPT SmartRaid V cards. This is an Adaptec maintained
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index cc784e8f6e9d..f60236721e0d 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -89,7 +89,7 @@ typedef struct {
89 int njobs; /* # of jobs sent to HA */ 89 int njobs; /* # of jobs sent to HA */
90 int qdepth; /* Controller queue depth. */ 90 int qdepth; /* Controller queue depth. */
91 int wakebase; /* mpx wakeup base index. */ 91 int wakebase; /* mpx wakeup base index. */
92 uLONG SGsize; /* Scatter/Gather list size. */ 92 uINT SGsize; /* Scatter/Gather list size. */
93 unsigned heads; /* heads for drives on cntlr. */ 93 unsigned heads; /* heads for drives on cntlr. */
94 unsigned sectors; /* sectors for drives on cntlr. */ 94 unsigned sectors; /* sectors for drives on cntlr. */
95 uCHAR do_drive32; /* Flag for Above 16 MB Ability */ 95 uCHAR do_drive32; /* Flag for Above 16 MB Ability */
@@ -97,8 +97,8 @@ typedef struct {
97 char idPAL[4]; /* 4 Bytes Of The ID Pal */ 97 char idPAL[4]; /* 4 Bytes Of The ID Pal */
98 uCHAR primary; /* 1 For Primary, 0 For Secondary */ 98 uCHAR primary; /* 1 For Primary, 0 For Secondary */
99 uCHAR eataVersion; /* EATA Version */ 99 uCHAR eataVersion; /* EATA Version */
100 uLONG cpLength; /* EATA Command Packet Length */ 100 uINT cpLength; /* EATA Command Packet Length */
101 uLONG spLength; /* EATA Status Packet Length */ 101 uINT spLength; /* EATA Status Packet Length */
102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */ 102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */
103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */ 103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */
104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */ 104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */
@@ -107,23 +107,23 @@ typedef struct {
107typedef struct { 107typedef struct {
108 uSHORT length; // Remaining length of this 108 uSHORT length; // Remaining length of this
109 uSHORT drvrHBAnum; // Relative HBA # used by the driver 109 uSHORT drvrHBAnum; // Relative HBA # used by the driver
110 uLONG baseAddr; // Base I/O address 110 uINT baseAddr; // Base I/O address
111 uSHORT blinkState; // Blink LED state (0=Not in blink LED) 111 uSHORT blinkState; // Blink LED state (0=Not in blink LED)
112 uCHAR pciBusNum; // PCI Bus # (Optional) 112 uCHAR pciBusNum; // PCI Bus # (Optional)
113 uCHAR pciDeviceNum; // PCI Device # (Optional) 113 uCHAR pciDeviceNum; // PCI Device # (Optional)
114 uSHORT hbaFlags; // Miscellaneous HBA flags 114 uSHORT hbaFlags; // Miscellaneous HBA flags
115 uSHORT Interrupt; // Interrupt set for this device. 115 uSHORT Interrupt; // Interrupt set for this device.
116# if (defined(_DPT_ARC)) 116# if (defined(_DPT_ARC))
117 uLONG baseLength; 117 uINT baseLength;
118 ADAPTER_OBJECT *AdapterObject; 118 ADAPTER_OBJECT *AdapterObject;
119 LARGE_INTEGER DmaLogicalAddress; 119 LARGE_INTEGER DmaLogicalAddress;
120 PVOID DmaVirtualAddress; 120 PVOID DmaVirtualAddress;
121 LARGE_INTEGER ReplyLogicalAddress; 121 LARGE_INTEGER ReplyLogicalAddress;
122 PVOID ReplyVirtualAddress; 122 PVOID ReplyVirtualAddress;
123# else 123# else
124 uLONG reserved1; // Reserved for future expansion 124 uINT reserved1; // Reserved for future expansion
125 uLONG reserved2; // Reserved for future expansion 125 uINT reserved2; // Reserved for future expansion
126 uLONG reserved3; // Reserved for future expansion 126 uINT reserved3; // Reserved for future expansion
127# endif 127# endif
128} drvrHBAinfo_S; 128} drvrHBAinfo_S;
129 129
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
index 94bc894d1200..72c8992fdf21 100644
--- a/drivers/scsi/dpt/dptsig.h
+++ b/drivers/scsi/dpt/dptsig.h
@@ -33,11 +33,7 @@
33/* to make sure we are talking the same size under all OS's */ 33/* to make sure we are talking the same size under all OS's */
34typedef unsigned char sigBYTE; 34typedef unsigned char sigBYTE;
35typedef unsigned short sigWORD; 35typedef unsigned short sigWORD;
36#if (defined(_MULTI_DATAMODEL) && defined(sun) && !defined(_ILP32)) 36typedef unsigned int sigINT;
37typedef uint32_t sigLONG;
38#else
39typedef unsigned long sigLONG;
40#endif
41 37
42/* 38/*
43 * use sigWORDLittleEndian for: 39 * use sigWORDLittleEndian for:
@@ -300,7 +296,7 @@ typedef struct dpt_sig {
300 sigBYTE dsFiletype; /* type of file */ 296 sigBYTE dsFiletype; /* type of file */
301 sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */ 297 sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
302 sigBYTE dsOEM; /* OEM file was created for */ 298 sigBYTE dsOEM; /* OEM file was created for */
303 sigLONG dsOS; /* which Operating systems */ 299 sigINT dsOS; /* which Operating systems */
304 sigWORD dsCapabilities; /* RAID levels, etc. */ 300 sigWORD dsCapabilities; /* RAID levels, etc. */
305 sigWORD dsDeviceSupp; /* Types of SCSI devices supported */ 301 sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
306 sigWORD dsAdapterSupp; /* DPT adapter families supported */ 302 sigWORD dsAdapterSupp; /* DPT adapter families supported */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
index d23b70c8c768..a90c4cb8ea8b 100644
--- a/drivers/scsi/dpt/sys_info.h
+++ b/drivers/scsi/dpt/sys_info.h
@@ -145,8 +145,8 @@
145 uCHAR smartROMRevision; 145 uCHAR smartROMRevision;
146 uSHORT flags; /* See bit definitions above */ 146 uSHORT flags; /* See bit definitions above */
147 uSHORT conventionalMemSize; /* in KB */ 147 uSHORT conventionalMemSize; /* in KB */
148 uLONG extendedMemSize; /* in KB */ 148 uINT extendedMemSize; /* in KB */
149 uLONG osType; /* Same as DPTSIG's definition */ 149 uINT osType; /* Same as DPTSIG's definition */
150 uCHAR osMajorVersion; 150 uCHAR osMajorVersion;
151 uCHAR osMinorVersion; /* The OS version */ 151 uCHAR osMinorVersion; /* The OS version */
152 uCHAR osRevision; 152 uCHAR osRevision;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 7b1a084ec94e..dc6b2d4a9aa1 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -111,10 +111,17 @@ static int sys_tbl_len;
111static adpt_hba* hba_chain = NULL; 111static adpt_hba* hba_chain = NULL;
112static int hba_count = 0; 112static int hba_count = 0;
113 113
114#ifdef CONFIG_COMPAT
115static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
116#endif
117
114static const struct file_operations adpt_fops = { 118static const struct file_operations adpt_fops = {
115 .ioctl = adpt_ioctl, 119 .ioctl = adpt_ioctl,
116 .open = adpt_open, 120 .open = adpt_open,
117 .release = adpt_close 121 .release = adpt_close,
122#ifdef CONFIG_COMPAT
123 .compat_ioctl = compat_adpt_ioctl,
124#endif
118}; 125};
119 126
120/* Structures and definitions for synchronous message posting. 127/* Structures and definitions for synchronous message posting.
@@ -138,6 +145,11 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
138 *============================================================================ 145 *============================================================================
139 */ 146 */
140 147
148static inline int dpt_dma64(adpt_hba *pHba)
149{
150 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
151}
152
141static inline u32 dma_high(dma_addr_t addr) 153static inline u32 dma_high(dma_addr_t addr)
142{ 154{
143 return upper_32_bits(addr); 155 return upper_32_bits(addr);
@@ -277,7 +289,7 @@ static int adpt_release(struct Scsi_Host *host)
277 289
278static void adpt_inquiry(adpt_hba* pHba) 290static void adpt_inquiry(adpt_hba* pHba)
279{ 291{
280 u32 msg[14]; 292 u32 msg[17];
281 u32 *mptr; 293 u32 *mptr;
282 u32 *lenptr; 294 u32 *lenptr;
283 int direction; 295 int direction;
@@ -301,7 +313,10 @@ static void adpt_inquiry(adpt_hba* pHba)
301 direction = 0x00000000; 313 direction = 0x00000000;
302 scsidir =0x40000000; // DATA IN (iop<--dev) 314 scsidir =0x40000000; // DATA IN (iop<--dev)
303 315
304 reqlen = 14; // SINGLE SGE 316 if (dpt_dma64(pHba))
317 reqlen = 17; // SINGLE SGE, 64 bit
318 else
319 reqlen = 14; // SINGLE SGE, 32 bit
305 /* Stick the headers on */ 320 /* Stick the headers on */
306 msg[0] = reqlen<<16 | SGL_OFFSET_12; 321 msg[0] = reqlen<<16 | SGL_OFFSET_12;
307 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); 322 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
@@ -334,8 +349,16 @@ static void adpt_inquiry(adpt_hba* pHba)
334 349
335 /* Now fill in the SGList and command */ 350 /* Now fill in the SGList and command */
336 *lenptr = len; 351 *lenptr = len;
337 *mptr++ = 0xD0000000|direction|len; 352 if (dpt_dma64(pHba)) {
338 *mptr++ = addr; 353 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
354 *mptr++ = 1 << PAGE_SHIFT;
355 *mptr++ = 0xD0000000|direction|len;
356 *mptr++ = dma_low(addr);
357 *mptr++ = dma_high(addr);
358 } else {
359 *mptr++ = 0xD0000000|direction|len;
360 *mptr++ = addr;
361 }
339 362
340 // Send it on it's way 363 // Send it on it's way
341 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); 364 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
@@ -628,6 +651,92 @@ stop_output:
628 return len; 651 return len;
629} 652}
630 653
654/*
655 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
656 */
657static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
658{
659 return (u32)cmd->serial_number;
660}
661
662/*
663 * Go from a u32 'context' to a struct scsi_cmnd * .
664 * This could probably be made more efficient.
665 */
666static struct scsi_cmnd *
667 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
668{
669 struct scsi_cmnd * cmd;
670 struct scsi_device * d;
671
672 if (context == 0)
673 return NULL;
674
675 spin_unlock(pHba->host->host_lock);
676 shost_for_each_device(d, pHba->host) {
677 unsigned long flags;
678 spin_lock_irqsave(&d->list_lock, flags);
679 list_for_each_entry(cmd, &d->cmd_list, list) {
680 if (((u32)cmd->serial_number == context)) {
681 spin_unlock_irqrestore(&d->list_lock, flags);
682 scsi_device_put(d);
683 spin_lock(pHba->host->host_lock);
684 return cmd;
685 }
686 }
687 spin_unlock_irqrestore(&d->list_lock, flags);
688 }
689 spin_lock(pHba->host->host_lock);
690
691 return NULL;
692}
693
694/*
695 * Turn a pointer to ioctl reply data into an u32 'context'
696 */
697static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
698{
699#if BITS_PER_LONG == 32
700 return (u32)(unsigned long)reply;
701#else
702 ulong flags = 0;
703 u32 nr, i;
704
705 spin_lock_irqsave(pHba->host->host_lock, flags);
706 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
707 for (i = 0; i < nr; i++) {
708 if (pHba->ioctl_reply_context[i] == NULL) {
709 pHba->ioctl_reply_context[i] = reply;
710 break;
711 }
712 }
713 spin_unlock_irqrestore(pHba->host->host_lock, flags);
714 if (i >= nr) {
715 kfree (reply);
716 printk(KERN_WARNING"%s: Too many outstanding "
717 "ioctl commands\n", pHba->name);
718 return (u32)-1;
719 }
720
721 return i;
722#endif
723}
724
725/*
726 * Go from an u32 'context' to a pointer to ioctl reply data.
727 */
728static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
729{
730#if BITS_PER_LONG == 32
731 return (void *)(unsigned long)context;
732#else
733 void *p = pHba->ioctl_reply_context[context];
734 pHba->ioctl_reply_context[context] = NULL;
735
736 return p;
737#endif
738}
739
631/*=========================================================================== 740/*===========================================================================
632 * Error Handling routines 741 * Error Handling routines
633 *=========================================================================== 742 *===========================================================================
@@ -655,7 +764,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
655 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; 764 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
656 msg[2] = 0; 765 msg[2] = 0;
657 msg[3]= 0; 766 msg[3]= 0;
658 msg[4] = (u32)cmd; 767 msg[4] = adpt_cmd_to_context(cmd);
659 if (pHba->host) 768 if (pHba->host)
660 spin_lock_irq(pHba->host->host_lock); 769 spin_lock_irq(pHba->host->host_lock);
661 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); 770 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -867,6 +976,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
867 u32 hba_map1_area_size = 0; 976 u32 hba_map1_area_size = 0;
868 void __iomem *base_addr_virt = NULL; 977 void __iomem *base_addr_virt = NULL;
869 void __iomem *msg_addr_virt = NULL; 978 void __iomem *msg_addr_virt = NULL;
979 int dma64 = 0;
870 980
871 int raptorFlag = FALSE; 981 int raptorFlag = FALSE;
872 982
@@ -880,7 +990,16 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
880 } 990 }
881 991
882 pci_set_master(pDev); 992 pci_set_master(pDev);
883 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) 993
994 /*
995 * See if we should enable dma64 mode.
996 */
997 if (sizeof(dma_addr_t) > 4 &&
998 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
999 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1000 dma64 = 1;
1001 }
1002 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
884 return -EINVAL; 1003 return -EINVAL;
885 1004
886 /* adapter only supports message blocks below 4GB */ 1005 /* adapter only supports message blocks below 4GB */
@@ -906,6 +1025,25 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
906 raptorFlag = TRUE; 1025 raptorFlag = TRUE;
907 } 1026 }
908 1027
1028#if BITS_PER_LONG == 64
1029 /*
1030 * The original Adaptec 64 bit driver has this comment here:
1031 * "x86_64 machines need more optimal mappings"
1032 *
1033 * I assume some HBAs report ridiculously large mappings
1034 * and we need to limit them on platforms with IOMMUs.
1035 */
1036 if (raptorFlag == TRUE) {
1037 if (hba_map0_area_size > 128)
1038 hba_map0_area_size = 128;
1039 if (hba_map1_area_size > 524288)
1040 hba_map1_area_size = 524288;
1041 } else {
1042 if (hba_map0_area_size > 524288)
1043 hba_map0_area_size = 524288;
1044 }
1045#endif
1046
909 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); 1047 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
910 if (!base_addr_virt) { 1048 if (!base_addr_virt) {
911 pci_release_regions(pDev); 1049 pci_release_regions(pDev);
@@ -968,16 +1106,22 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
968 pHba->state = DPTI_STATE_RESET; 1106 pHba->state = DPTI_STATE_RESET;
969 pHba->pDev = pDev; 1107 pHba->pDev = pDev;
970 pHba->devices = NULL; 1108 pHba->devices = NULL;
1109 pHba->dma64 = dma64;
971 1110
972 // Initializing the spinlocks 1111 // Initializing the spinlocks
973 spin_lock_init(&pHba->state_lock); 1112 spin_lock_init(&pHba->state_lock);
974 spin_lock_init(&adpt_post_wait_lock); 1113 spin_lock_init(&adpt_post_wait_lock);
975 1114
976 if(raptorFlag == 0){ 1115 if(raptorFlag == 0){
977 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 1116 printk(KERN_INFO "Adaptec I2O RAID controller"
978 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); 1117 " %d at %p size=%x irq=%d%s\n",
1118 hba_count-1, base_addr_virt,
1119 hba_map0_area_size, pDev->irq,
1120 dma64 ? " (64-bit DMA)" : "");
979 } else { 1121 } else {
980 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); 1122 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1123 hba_count-1, pDev->irq,
1124 dma64 ? " (64-bit DMA)" : "");
981 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); 1125 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
982 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); 1126 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
983 } 1127 }
@@ -1030,6 +1174,8 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1030 if(pHba->msg_addr_virt != pHba->base_addr_virt){ 1174 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1031 iounmap(pHba->msg_addr_virt); 1175 iounmap(pHba->msg_addr_virt);
1032 } 1176 }
1177 if(pHba->FwDebugBuffer_P)
1178 iounmap(pHba->FwDebugBuffer_P);
1033 if(pHba->hrt) { 1179 if(pHba->hrt) {
1034 dma_free_coherent(&pHba->pDev->dev, 1180 dma_free_coherent(&pHba->pDev->dev,
1035 pHba->hrt->num_entries * pHba->hrt->entry_len << 2, 1181 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
@@ -1657,10 +1803,13 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1657 } 1803 }
1658 sg_offset = (msg[0]>>4)&0xf; 1804 sg_offset = (msg[0]>>4)&0xf;
1659 msg[2] = 0x40000000; // IOCTL context 1805 msg[2] = 0x40000000; // IOCTL context
1660 msg[3] = (u32)reply; 1806 msg[3] = adpt_ioctl_to_context(pHba, reply);
1807 if (msg[3] == (u32)-1)
1808 return -EBUSY;
1809
1661 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1810 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1662 if(sg_offset) { 1811 if(sg_offset) {
1663 // TODO 64bit fix 1812 // TODO add 64 bit API
1664 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); 1813 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1665 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1814 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1666 if (sg_count > pHba->sg_tablesize){ 1815 if (sg_count > pHba->sg_tablesize){
@@ -1689,15 +1838,15 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1689 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. 1838 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1690 /* Copy in the user's SG buffer if necessary */ 1839 /* Copy in the user's SG buffer if necessary */
1691 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { 1840 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1692 // TODO 64bit fix 1841 // sg_simple_element API is 32 bit
1693 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { 1842 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1694 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); 1843 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1695 rcode = -EFAULT; 1844 rcode = -EFAULT;
1696 goto cleanup; 1845 goto cleanup;
1697 } 1846 }
1698 } 1847 }
1699 //TODO 64bit fix 1848 /* sg_simple_element API is 32 bit, but addr < 4GB */
1700 sg[i].addr_bus = (u32)virt_to_bus(p); 1849 sg[i].addr_bus = addr;
1701 } 1850 }
1702 } 1851 }
1703 1852
@@ -1725,7 +1874,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1725 if(sg_offset) { 1874 if(sg_offset) {
1726 /* Copy back the Scatter Gather buffers back to user space */ 1875 /* Copy back the Scatter Gather buffers back to user space */
1727 u32 j; 1876 u32 j;
1728 // TODO 64bit fix 1877 // TODO add 64 bit API
1729 struct sg_simple_element* sg; 1878 struct sg_simple_element* sg;
1730 int sg_size; 1879 int sg_size;
1731 1880
@@ -1745,14 +1894,14 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1745 } 1894 }
1746 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1895 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1747 1896
1748 // TODO 64bit fix 1897 // TODO add 64 bit API
1749 sg = (struct sg_simple_element*)(msg + sg_offset); 1898 sg = (struct sg_simple_element*)(msg + sg_offset);
1750 for (j = 0; j < sg_count; j++) { 1899 for (j = 0; j < sg_count; j++) {
1751 /* Copy out the SG list to user's buffer if necessary */ 1900 /* Copy out the SG list to user's buffer if necessary */
1752 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { 1901 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1753 sg_size = sg[j].flag_count & 0xffffff; 1902 sg_size = sg[j].flag_count & 0xffffff;
1754 // TODO 64bit fix 1903 // sg_simple_element API is 32 bit
1755 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { 1904 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1756 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); 1905 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1757 rcode = -EFAULT; 1906 rcode = -EFAULT;
1758 goto cleanup; 1907 goto cleanup;
@@ -1972,6 +2121,38 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1972 return error; 2121 return error;
1973} 2122}
1974 2123
2124#ifdef CONFIG_COMPAT
2125static long compat_adpt_ioctl(struct file *file,
2126 unsigned int cmd, unsigned long arg)
2127{
2128 struct inode *inode;
2129 long ret;
2130
2131 inode = file->f_dentry->d_inode;
2132
2133 lock_kernel();
2134
2135 switch(cmd) {
2136 case DPT_SIGNATURE:
2137 case I2OUSRCMD:
2138 case DPT_CTRLINFO:
2139 case DPT_SYSINFO:
2140 case DPT_BLINKLED:
2141 case I2ORESETCMD:
2142 case I2ORESCANCMD:
2143 case (DPT_TARGET_BUSY & 0xFFFF):
2144 case DPT_TARGET_BUSY:
2145 ret = adpt_ioctl(inode, file, cmd, arg);
2146 break;
2147 default:
2148 ret = -ENOIOCTLCMD;
2149 }
2150
2151 unlock_kernel();
2152
2153 return ret;
2154}
2155#endif
1975 2156
1976static irqreturn_t adpt_isr(int irq, void *dev_id) 2157static irqreturn_t adpt_isr(int irq, void *dev_id)
1977{ 2158{
@@ -2032,7 +2213,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2032 } 2213 }
2033 context = readl(reply+8); 2214 context = readl(reply+8);
2034 if(context & 0x40000000){ // IOCTL 2215 if(context & 0x40000000){ // IOCTL
2035 void *p = (void *)readl(reply+12); 2216 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2036 if( p != NULL) { 2217 if( p != NULL) {
2037 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); 2218 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2038 } 2219 }
@@ -2046,14 +2227,15 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2046 status = I2O_POST_WAIT_OK; 2227 status = I2O_POST_WAIT_OK;
2047 } 2228 }
2048 if(!(context & 0x40000000)) { 2229 if(!(context & 0x40000000)) {
2049 cmd = (struct scsi_cmnd*) readl(reply+12); 2230 cmd = adpt_cmd_from_context(pHba,
2231 readl(reply+12));
2050 if(cmd != NULL) { 2232 if(cmd != NULL) {
2051 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); 2233 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2052 } 2234 }
2053 } 2235 }
2054 adpt_i2o_post_wait_complete(context, status); 2236 adpt_i2o_post_wait_complete(context, status);
2055 } else { // SCSI message 2237 } else { // SCSI message
2056 cmd = (struct scsi_cmnd*) readl(reply+12); 2238 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2057 if(cmd != NULL){ 2239 if(cmd != NULL){
2058 scsi_dma_unmap(cmd); 2240 scsi_dma_unmap(cmd);
2059 if(cmd->serial_number != 0) { // If not timedout 2241 if(cmd->serial_number != 0) { // If not timedout
@@ -2076,6 +2258,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2076 int i; 2258 int i;
2077 u32 msg[MAX_MESSAGE_SIZE]; 2259 u32 msg[MAX_MESSAGE_SIZE];
2078 u32* mptr; 2260 u32* mptr;
2261 u32* lptr;
2079 u32 *lenptr; 2262 u32 *lenptr;
2080 int direction; 2263 int direction;
2081 int scsidir; 2264 int scsidir;
@@ -2083,6 +2266,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2083 u32 len; 2266 u32 len;
2084 u32 reqlen; 2267 u32 reqlen;
2085 s32 rcode; 2268 s32 rcode;
2269 dma_addr_t addr;
2086 2270
2087 memset(msg, 0 , sizeof(msg)); 2271 memset(msg, 0 , sizeof(msg));
2088 len = scsi_bufflen(cmd); 2272 len = scsi_bufflen(cmd);
@@ -2122,7 +2306,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2122 // I2O_CMD_SCSI_EXEC 2306 // I2O_CMD_SCSI_EXEC
2123 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); 2307 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2124 msg[2] = 0; 2308 msg[2] = 0;
2125 msg[3] = (u32)cmd; /* We want the SCSI control block back */ 2309 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2126 // Our cards use the transaction context as the tag for queueing 2310 // Our cards use the transaction context as the tag for queueing
2127 // Adaptec/DPT Private stuff 2311 // Adaptec/DPT Private stuff
2128 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); 2312 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2140,7 +2324,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2140 memcpy(mptr, cmd->cmnd, cmd->cmd_len); 2324 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2141 mptr+=4; 2325 mptr+=4;
2142 lenptr=mptr++; /* Remember me - fill in when we know */ 2326 lenptr=mptr++; /* Remember me - fill in when we know */
2143 reqlen = 14; // SINGLE SGE 2327 if (dpt_dma64(pHba)) {
2328 reqlen = 16; // SINGLE SGE
2329 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2330 *mptr++ = 1 << PAGE_SHIFT;
2331 } else {
2332 reqlen = 14; // SINGLE SGE
2333 }
2144 /* Now fill in the SGList and command */ 2334 /* Now fill in the SGList and command */
2145 2335
2146 nseg = scsi_dma_map(cmd); 2336 nseg = scsi_dma_map(cmd);
@@ -2150,12 +2340,16 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2150 2340
2151 len = 0; 2341 len = 0;
2152 scsi_for_each_sg(cmd, sg, nseg, i) { 2342 scsi_for_each_sg(cmd, sg, nseg, i) {
2343 lptr = mptr;
2153 *mptr++ = direction|0x10000000|sg_dma_len(sg); 2344 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2154 len+=sg_dma_len(sg); 2345 len+=sg_dma_len(sg);
2155 *mptr++ = sg_dma_address(sg); 2346 addr = sg_dma_address(sg);
2347 *mptr++ = dma_low(addr);
2348 if (dpt_dma64(pHba))
2349 *mptr++ = dma_high(addr);
2156 /* Make this an end of list */ 2350 /* Make this an end of list */
2157 if (i == nseg - 1) 2351 if (i == nseg - 1)
2158 mptr[-2] = direction|0xD0000000|sg_dma_len(sg); 2352 *lptr = direction|0xD0000000|sg_dma_len(sg);
2159 } 2353 }
2160 reqlen = mptr - msg; 2354 reqlen = mptr - msg;
2161 *lenptr = len; 2355 *lenptr = len;
@@ -2824,7 +3018,17 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2824 } 3018 }
2825 3019
2826 // Calculate the Scatter Gather list size 3020 // Calculate the Scatter Gather list size
2827 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element); 3021 if (dpt_dma64(pHba)) {
3022 pHba->sg_tablesize
3023 = ((pHba->status_block->inbound_frame_size * 4
3024 - 14 * sizeof(u32))
3025 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3026 } else {
3027 pHba->sg_tablesize
3028 = ((pHba->status_block->inbound_frame_size * 4
3029 - 12 * sizeof(u32))
3030 / sizeof(struct sg_simple_element));
3031 }
2828 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { 3032 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2829 pHba->sg_tablesize = SG_LIST_ELEMENTS; 3033 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2830 } 3034 }
@@ -2916,13 +3120,19 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2916 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; 3120 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2917 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { 3121 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2918 pHba->FwDebugBufferSize = buf[1]; 3122 pHba->FwDebugBufferSize = buf[1];
2919 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0]; 3123 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
2920 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET; 3124 pHba->FwDebugBufferSize);
2921 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET; 3125 if (pHba->FwDebugBuffer_P) {
2922 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; 3126 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
2923 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET; 3127 FW_DEBUG_FLAGS_OFFSET;
2924 pHba->FwDebugBuffer_P += buf[2]; 3128 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
2925 pHba->FwDebugFlags = 0; 3129 FW_DEBUG_BLED_OFFSET;
3130 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3131 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3132 FW_DEBUG_STR_LENGTH_OFFSET;
3133 pHba->FwDebugBuffer_P += buf[2];
3134 pHba->FwDebugFlags = 0;
3135 }
2926 } 3136 }
2927 3137
2928 return 0; 3138 return 0;
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 5181b92c9ddb..924cd5a51676 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -233,6 +233,7 @@ typedef struct _adpt_hba {
233 u8 top_scsi_channel; 233 u8 top_scsi_channel;
234 u8 top_scsi_id; 234 u8 top_scsi_id;
235 u8 top_scsi_lun; 235 u8 top_scsi_lun;
236 u8 dma64;
236 237
237 i2o_status_block* status_block; 238 i2o_status_block* status_block;
238 dma_addr_t status_block_pa; 239 dma_addr_t status_block_pa;
@@ -252,6 +253,7 @@ typedef struct _adpt_hba {
252 void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED 253 void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
253 void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED 254 void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
254 u32 FwDebugFlags; 255 u32 FwDebugFlags;
256 u32 *ioctl_reply_context[4];
255} adpt_hba; 257} adpt_hba;
256 258
257struct sg_simple_element { 259struct sg_simple_element {