aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/dpt_i2o.c
diff options
context:
space:
mode:
authorMiquel van Smoorenburg <miquels@cistron.nl>2008-05-01 19:07:27 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-05-02 14:17:01 -0400
commit62ac5aedc51485d672a5d91c262a001acecbe447 (patch)
tree6ef44abca20c87312defd3486f5c5db82de36128 /drivers/scsi/dpt_i2o.c
parent67af2b060e027c84b8e48d77e00b2369d997c0d4 (diff)
[SCSI] dpt_i2o: 64 bit support
This is the code to actually support 64 bit platforms. 64 bit DMA is enabled on both x86_32 PAE and 64 bit platforms. This code is based in part on the unofficial adaptec 64-bit dpt_i2o driver update that I got from Mark Salyzyn at Adaptec. Signed-off-by: Miquel van Smoorenburg <miquels@cistron.nl> Acked-by: Mark Salyzyn <Mark_Salyzyn@adaptec.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/dpt_i2o.c')
-rw-r--r--drivers/scsi/dpt_i2o.c280
1 files changed, 245 insertions, 35 deletions
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 7b1a084ec94e..dc6b2d4a9aa1 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -111,10 +111,17 @@ static int sys_tbl_len;
111static adpt_hba* hba_chain = NULL; 111static adpt_hba* hba_chain = NULL;
112static int hba_count = 0; 112static int hba_count = 0;
113 113
114#ifdef CONFIG_COMPAT
115static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
116#endif
117
114static const struct file_operations adpt_fops = { 118static const struct file_operations adpt_fops = {
115 .ioctl = adpt_ioctl, 119 .ioctl = adpt_ioctl,
116 .open = adpt_open, 120 .open = adpt_open,
117 .release = adpt_close 121 .release = adpt_close,
122#ifdef CONFIG_COMPAT
123 .compat_ioctl = compat_adpt_ioctl,
124#endif
118}; 125};
119 126
120/* Structures and definitions for synchronous message posting. 127/* Structures and definitions for synchronous message posting.
@@ -138,6 +145,11 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
138 *============================================================================ 145 *============================================================================
139 */ 146 */
140 147
148static inline int dpt_dma64(adpt_hba *pHba)
149{
150 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
151}
152
141static inline u32 dma_high(dma_addr_t addr) 153static inline u32 dma_high(dma_addr_t addr)
142{ 154{
143 return upper_32_bits(addr); 155 return upper_32_bits(addr);
@@ -277,7 +289,7 @@ static int adpt_release(struct Scsi_Host *host)
277 289
278static void adpt_inquiry(adpt_hba* pHba) 290static void adpt_inquiry(adpt_hba* pHba)
279{ 291{
280 u32 msg[14]; 292 u32 msg[17];
281 u32 *mptr; 293 u32 *mptr;
282 u32 *lenptr; 294 u32 *lenptr;
283 int direction; 295 int direction;
@@ -301,7 +313,10 @@ static void adpt_inquiry(adpt_hba* pHba)
301 direction = 0x00000000; 313 direction = 0x00000000;
302 scsidir =0x40000000; // DATA IN (iop<--dev) 314 scsidir =0x40000000; // DATA IN (iop<--dev)
303 315
304 reqlen = 14; // SINGLE SGE 316 if (dpt_dma64(pHba))
317 reqlen = 17; // SINGLE SGE, 64 bit
318 else
319 reqlen = 14; // SINGLE SGE, 32 bit
305 /* Stick the headers on */ 320 /* Stick the headers on */
306 msg[0] = reqlen<<16 | SGL_OFFSET_12; 321 msg[0] = reqlen<<16 | SGL_OFFSET_12;
307 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); 322 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
@@ -334,8 +349,16 @@ static void adpt_inquiry(adpt_hba* pHba)
334 349
335 /* Now fill in the SGList and command */ 350 /* Now fill in the SGList and command */
336 *lenptr = len; 351 *lenptr = len;
337 *mptr++ = 0xD0000000|direction|len; 352 if (dpt_dma64(pHba)) {
338 *mptr++ = addr; 353 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
354 *mptr++ = 1 << PAGE_SHIFT;
355 *mptr++ = 0xD0000000|direction|len;
356 *mptr++ = dma_low(addr);
357 *mptr++ = dma_high(addr);
358 } else {
359 *mptr++ = 0xD0000000|direction|len;
360 *mptr++ = addr;
361 }
339 362
340 // Send it on it's way 363 // Send it on it's way
341 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); 364 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
@@ -628,6 +651,92 @@ stop_output:
628 return len; 651 return len;
629} 652}
630 653
654/*
655 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
656 */
657static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
658{
659 return (u32)cmd->serial_number;
660}
661
662/*
663 * Go from a u32 'context' to a struct scsi_cmnd * .
664 * This could probably be made more efficient.
665 */
666static struct scsi_cmnd *
667 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
668{
669 struct scsi_cmnd * cmd;
670 struct scsi_device * d;
671
672 if (context == 0)
673 return NULL;
674
675 spin_unlock(pHba->host->host_lock);
676 shost_for_each_device(d, pHba->host) {
677 unsigned long flags;
678 spin_lock_irqsave(&d->list_lock, flags);
679 list_for_each_entry(cmd, &d->cmd_list, list) {
680 if (((u32)cmd->serial_number == context)) {
681 spin_unlock_irqrestore(&d->list_lock, flags);
682 scsi_device_put(d);
683 spin_lock(pHba->host->host_lock);
684 return cmd;
685 }
686 }
687 spin_unlock_irqrestore(&d->list_lock, flags);
688 }
689 spin_lock(pHba->host->host_lock);
690
691 return NULL;
692}
693
694/*
695 * Turn a pointer to ioctl reply data into an u32 'context'
696 */
697static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
698{
699#if BITS_PER_LONG == 32
700 return (u32)(unsigned long)reply;
701#else
702 ulong flags = 0;
703 u32 nr, i;
704
705 spin_lock_irqsave(pHba->host->host_lock, flags);
706 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
707 for (i = 0; i < nr; i++) {
708 if (pHba->ioctl_reply_context[i] == NULL) {
709 pHba->ioctl_reply_context[i] = reply;
710 break;
711 }
712 }
713 spin_unlock_irqrestore(pHba->host->host_lock, flags);
714 if (i >= nr) {
715 kfree (reply);
716 printk(KERN_WARNING"%s: Too many outstanding "
717 "ioctl commands\n", pHba->name);
718 return (u32)-1;
719 }
720
721 return i;
722#endif
723}
724
725/*
726 * Go from an u32 'context' to a pointer to ioctl reply data.
727 */
728static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
729{
730#if BITS_PER_LONG == 32
731 return (void *)(unsigned long)context;
732#else
733 void *p = pHba->ioctl_reply_context[context];
734 pHba->ioctl_reply_context[context] = NULL;
735
736 return p;
737#endif
738}
739
631/*=========================================================================== 740/*===========================================================================
632 * Error Handling routines 741 * Error Handling routines
633 *=========================================================================== 742 *===========================================================================
@@ -655,7 +764,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
655 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; 764 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
656 msg[2] = 0; 765 msg[2] = 0;
657 msg[3]= 0; 766 msg[3]= 0;
658 msg[4] = (u32)cmd; 767 msg[4] = adpt_cmd_to_context(cmd);
659 if (pHba->host) 768 if (pHba->host)
660 spin_lock_irq(pHba->host->host_lock); 769 spin_lock_irq(pHba->host->host_lock);
661 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); 770 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -867,6 +976,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
867 u32 hba_map1_area_size = 0; 976 u32 hba_map1_area_size = 0;
868 void __iomem *base_addr_virt = NULL; 977 void __iomem *base_addr_virt = NULL;
869 void __iomem *msg_addr_virt = NULL; 978 void __iomem *msg_addr_virt = NULL;
979 int dma64 = 0;
870 980
871 int raptorFlag = FALSE; 981 int raptorFlag = FALSE;
872 982
@@ -880,7 +990,16 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
880 } 990 }
881 991
882 pci_set_master(pDev); 992 pci_set_master(pDev);
883 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) 993
994 /*
995 * See if we should enable dma64 mode.
996 */
997 if (sizeof(dma_addr_t) > 4 &&
998 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
999 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1000 dma64 = 1;
1001 }
1002 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
884 return -EINVAL; 1003 return -EINVAL;
885 1004
886 /* adapter only supports message blocks below 4GB */ 1005 /* adapter only supports message blocks below 4GB */
@@ -906,6 +1025,25 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
906 raptorFlag = TRUE; 1025 raptorFlag = TRUE;
907 } 1026 }
908 1027
1028#if BITS_PER_LONG == 64
1029 /*
1030 * The original Adaptec 64 bit driver has this comment here:
1031 * "x86_64 machines need more optimal mappings"
1032 *
1033 * I assume some HBAs report ridiculously large mappings
1034 * and we need to limit them on platforms with IOMMUs.
1035 */
1036 if (raptorFlag == TRUE) {
1037 if (hba_map0_area_size > 128)
1038 hba_map0_area_size = 128;
1039 if (hba_map1_area_size > 524288)
1040 hba_map1_area_size = 524288;
1041 } else {
1042 if (hba_map0_area_size > 524288)
1043 hba_map0_area_size = 524288;
1044 }
1045#endif
1046
909 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); 1047 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
910 if (!base_addr_virt) { 1048 if (!base_addr_virt) {
911 pci_release_regions(pDev); 1049 pci_release_regions(pDev);
@@ -968,16 +1106,22 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
968 pHba->state = DPTI_STATE_RESET; 1106 pHba->state = DPTI_STATE_RESET;
969 pHba->pDev = pDev; 1107 pHba->pDev = pDev;
970 pHba->devices = NULL; 1108 pHba->devices = NULL;
1109 pHba->dma64 = dma64;
971 1110
972 // Initializing the spinlocks 1111 // Initializing the spinlocks
973 spin_lock_init(&pHba->state_lock); 1112 spin_lock_init(&pHba->state_lock);
974 spin_lock_init(&adpt_post_wait_lock); 1113 spin_lock_init(&adpt_post_wait_lock);
975 1114
976 if(raptorFlag == 0){ 1115 if(raptorFlag == 0){
977 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 1116 printk(KERN_INFO "Adaptec I2O RAID controller"
978 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); 1117 " %d at %p size=%x irq=%d%s\n",
1118 hba_count-1, base_addr_virt,
1119 hba_map0_area_size, pDev->irq,
1120 dma64 ? " (64-bit DMA)" : "");
979 } else { 1121 } else {
980 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); 1122 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1123 hba_count-1, pDev->irq,
1124 dma64 ? " (64-bit DMA)" : "");
981 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); 1125 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
982 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); 1126 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
983 } 1127 }
@@ -1030,6 +1174,8 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1030 if(pHba->msg_addr_virt != pHba->base_addr_virt){ 1174 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1031 iounmap(pHba->msg_addr_virt); 1175 iounmap(pHba->msg_addr_virt);
1032 } 1176 }
1177 if(pHba->FwDebugBuffer_P)
1178 iounmap(pHba->FwDebugBuffer_P);
1033 if(pHba->hrt) { 1179 if(pHba->hrt) {
1034 dma_free_coherent(&pHba->pDev->dev, 1180 dma_free_coherent(&pHba->pDev->dev,
1035 pHba->hrt->num_entries * pHba->hrt->entry_len << 2, 1181 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
@@ -1657,10 +1803,13 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1657 } 1803 }
1658 sg_offset = (msg[0]>>4)&0xf; 1804 sg_offset = (msg[0]>>4)&0xf;
1659 msg[2] = 0x40000000; // IOCTL context 1805 msg[2] = 0x40000000; // IOCTL context
1660 msg[3] = (u32)reply; 1806 msg[3] = adpt_ioctl_to_context(pHba, reply);
1807 if (msg[3] == (u32)-1)
1808 return -EBUSY;
1809
1661 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1810 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1662 if(sg_offset) { 1811 if(sg_offset) {
1663 // TODO 64bit fix 1812 // TODO add 64 bit API
1664 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); 1813 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1665 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1814 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1666 if (sg_count > pHba->sg_tablesize){ 1815 if (sg_count > pHba->sg_tablesize){
@@ -1689,15 +1838,15 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1689 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. 1838 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1690 /* Copy in the user's SG buffer if necessary */ 1839 /* Copy in the user's SG buffer if necessary */
1691 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { 1840 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1692 // TODO 64bit fix 1841 // sg_simple_element API is 32 bit
1693 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { 1842 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1694 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); 1843 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1695 rcode = -EFAULT; 1844 rcode = -EFAULT;
1696 goto cleanup; 1845 goto cleanup;
1697 } 1846 }
1698 } 1847 }
1699 //TODO 64bit fix 1848 /* sg_simple_element API is 32 bit, but addr < 4GB */
1700 sg[i].addr_bus = (u32)virt_to_bus(p); 1849 sg[i].addr_bus = addr;
1701 } 1850 }
1702 } 1851 }
1703 1852
@@ -1725,7 +1874,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1725 if(sg_offset) { 1874 if(sg_offset) {
1726 /* Copy back the Scatter Gather buffers back to user space */ 1875 /* Copy back the Scatter Gather buffers back to user space */
1727 u32 j; 1876 u32 j;
1728 // TODO 64bit fix 1877 // TODO add 64 bit API
1729 struct sg_simple_element* sg; 1878 struct sg_simple_element* sg;
1730 int sg_size; 1879 int sg_size;
1731 1880
@@ -1745,14 +1894,14 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1745 } 1894 }
1746 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1895 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1747 1896
1748 // TODO 64bit fix 1897 // TODO add 64 bit API
1749 sg = (struct sg_simple_element*)(msg + sg_offset); 1898 sg = (struct sg_simple_element*)(msg + sg_offset);
1750 for (j = 0; j < sg_count; j++) { 1899 for (j = 0; j < sg_count; j++) {
1751 /* Copy out the SG list to user's buffer if necessary */ 1900 /* Copy out the SG list to user's buffer if necessary */
1752 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { 1901 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1753 sg_size = sg[j].flag_count & 0xffffff; 1902 sg_size = sg[j].flag_count & 0xffffff;
1754 // TODO 64bit fix 1903 // sg_simple_element API is 32 bit
1755 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { 1904 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1756 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); 1905 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1757 rcode = -EFAULT; 1906 rcode = -EFAULT;
1758 goto cleanup; 1907 goto cleanup;
@@ -1972,6 +2121,38 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1972 return error; 2121 return error;
1973} 2122}
1974 2123
2124#ifdef CONFIG_COMPAT
2125static long compat_adpt_ioctl(struct file *file,
2126 unsigned int cmd, unsigned long arg)
2127{
2128 struct inode *inode;
2129 long ret;
2130
2131 inode = file->f_dentry->d_inode;
2132
2133 lock_kernel();
2134
2135 switch(cmd) {
2136 case DPT_SIGNATURE:
2137 case I2OUSRCMD:
2138 case DPT_CTRLINFO:
2139 case DPT_SYSINFO:
2140 case DPT_BLINKLED:
2141 case I2ORESETCMD:
2142 case I2ORESCANCMD:
2143 case (DPT_TARGET_BUSY & 0xFFFF):
2144 case DPT_TARGET_BUSY:
2145 ret = adpt_ioctl(inode, file, cmd, arg);
2146 break;
2147 default:
2148 ret = -ENOIOCTLCMD;
2149 }
2150
2151 unlock_kernel();
2152
2153 return ret;
2154}
2155#endif
1975 2156
1976static irqreturn_t adpt_isr(int irq, void *dev_id) 2157static irqreturn_t adpt_isr(int irq, void *dev_id)
1977{ 2158{
@@ -2032,7 +2213,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2032 } 2213 }
2033 context = readl(reply+8); 2214 context = readl(reply+8);
2034 if(context & 0x40000000){ // IOCTL 2215 if(context & 0x40000000){ // IOCTL
2035 void *p = (void *)readl(reply+12); 2216 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2036 if( p != NULL) { 2217 if( p != NULL) {
2037 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); 2218 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2038 } 2219 }
@@ -2046,14 +2227,15 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2046 status = I2O_POST_WAIT_OK; 2227 status = I2O_POST_WAIT_OK;
2047 } 2228 }
2048 if(!(context & 0x40000000)) { 2229 if(!(context & 0x40000000)) {
2049 cmd = (struct scsi_cmnd*) readl(reply+12); 2230 cmd = adpt_cmd_from_context(pHba,
2231 readl(reply+12));
2050 if(cmd != NULL) { 2232 if(cmd != NULL) {
2051 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); 2233 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2052 } 2234 }
2053 } 2235 }
2054 adpt_i2o_post_wait_complete(context, status); 2236 adpt_i2o_post_wait_complete(context, status);
2055 } else { // SCSI message 2237 } else { // SCSI message
2056 cmd = (struct scsi_cmnd*) readl(reply+12); 2238 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2057 if(cmd != NULL){ 2239 if(cmd != NULL){
2058 scsi_dma_unmap(cmd); 2240 scsi_dma_unmap(cmd);
2059 if(cmd->serial_number != 0) { // If not timedout 2241 if(cmd->serial_number != 0) { // If not timedout
@@ -2076,6 +2258,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2076 int i; 2258 int i;
2077 u32 msg[MAX_MESSAGE_SIZE]; 2259 u32 msg[MAX_MESSAGE_SIZE];
2078 u32* mptr; 2260 u32* mptr;
2261 u32* lptr;
2079 u32 *lenptr; 2262 u32 *lenptr;
2080 int direction; 2263 int direction;
2081 int scsidir; 2264 int scsidir;
@@ -2083,6 +2266,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2083 u32 len; 2266 u32 len;
2084 u32 reqlen; 2267 u32 reqlen;
2085 s32 rcode; 2268 s32 rcode;
2269 dma_addr_t addr;
2086 2270
2087 memset(msg, 0 , sizeof(msg)); 2271 memset(msg, 0 , sizeof(msg));
2088 len = scsi_bufflen(cmd); 2272 len = scsi_bufflen(cmd);
@@ -2122,7 +2306,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2122 // I2O_CMD_SCSI_EXEC 2306 // I2O_CMD_SCSI_EXEC
2123 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); 2307 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2124 msg[2] = 0; 2308 msg[2] = 0;
2125 msg[3] = (u32)cmd; /* We want the SCSI control block back */ 2309 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2126 // Our cards use the transaction context as the tag for queueing 2310 // Our cards use the transaction context as the tag for queueing
2127 // Adaptec/DPT Private stuff 2311 // Adaptec/DPT Private stuff
2128 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); 2312 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2140,7 +2324,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2140 memcpy(mptr, cmd->cmnd, cmd->cmd_len); 2324 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2141 mptr+=4; 2325 mptr+=4;
2142 lenptr=mptr++; /* Remember me - fill in when we know */ 2326 lenptr=mptr++; /* Remember me - fill in when we know */
2143 reqlen = 14; // SINGLE SGE 2327 if (dpt_dma64(pHba)) {
2328 reqlen = 16; // SINGLE SGE
2329 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2330 *mptr++ = 1 << PAGE_SHIFT;
2331 } else {
2332 reqlen = 14; // SINGLE SGE
2333 }
2144 /* Now fill in the SGList and command */ 2334 /* Now fill in the SGList and command */
2145 2335
2146 nseg = scsi_dma_map(cmd); 2336 nseg = scsi_dma_map(cmd);
@@ -2150,12 +2340,16 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2150 2340
2151 len = 0; 2341 len = 0;
2152 scsi_for_each_sg(cmd, sg, nseg, i) { 2342 scsi_for_each_sg(cmd, sg, nseg, i) {
2343 lptr = mptr;
2153 *mptr++ = direction|0x10000000|sg_dma_len(sg); 2344 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2154 len+=sg_dma_len(sg); 2345 len+=sg_dma_len(sg);
2155 *mptr++ = sg_dma_address(sg); 2346 addr = sg_dma_address(sg);
2347 *mptr++ = dma_low(addr);
2348 if (dpt_dma64(pHba))
2349 *mptr++ = dma_high(addr);
2156 /* Make this an end of list */ 2350 /* Make this an end of list */
2157 if (i == nseg - 1) 2351 if (i == nseg - 1)
2158 mptr[-2] = direction|0xD0000000|sg_dma_len(sg); 2352 *lptr = direction|0xD0000000|sg_dma_len(sg);
2159 } 2353 }
2160 reqlen = mptr - msg; 2354 reqlen = mptr - msg;
2161 *lenptr = len; 2355 *lenptr = len;
@@ -2824,7 +3018,17 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2824 } 3018 }
2825 3019
2826 // Calculate the Scatter Gather list size 3020 // Calculate the Scatter Gather list size
2827 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element); 3021 if (dpt_dma64(pHba)) {
3022 pHba->sg_tablesize
3023 = ((pHba->status_block->inbound_frame_size * 4
3024 - 14 * sizeof(u32))
3025 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3026 } else {
3027 pHba->sg_tablesize
3028 = ((pHba->status_block->inbound_frame_size * 4
3029 - 12 * sizeof(u32))
3030 / sizeof(struct sg_simple_element));
3031 }
2828 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { 3032 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2829 pHba->sg_tablesize = SG_LIST_ELEMENTS; 3033 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2830 } 3034 }
@@ -2916,13 +3120,19 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2916 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; 3120 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2917 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { 3121 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2918 pHba->FwDebugBufferSize = buf[1]; 3122 pHba->FwDebugBufferSize = buf[1];
2919 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0]; 3123 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
2920 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET; 3124 pHba->FwDebugBufferSize);
2921 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET; 3125 if (pHba->FwDebugBuffer_P) {
2922 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; 3126 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
2923 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET; 3127 FW_DEBUG_FLAGS_OFFSET;
2924 pHba->FwDebugBuffer_P += buf[2]; 3128 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
2925 pHba->FwDebugFlags = 0; 3129 FW_DEBUG_BLED_OFFSET;
3130 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3131 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3132 FW_DEBUG_STR_LENGTH_OFFSET;
3133 pHba->FwDebugBuffer_P += buf[2];
3134 pHba->FwDebugFlags = 0;
3135 }
2926 } 3136 }
2927 3137
2928 return 0; 3138 return 0;