aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/dpt_i2o.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/dpt_i2o.c')
-rw-r--r--drivers/scsi/dpt_i2o.c640
1 files changed, 482 insertions, 158 deletions
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index ac92ac143b46..0fb5bf4c43ac 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -29,11 +29,6 @@
29/*#define DEBUG 1 */ 29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */ 30/*#define UARTDELAY 1 */
31 31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
37#include <linux/module.h> 32#include <linux/module.h>
38 33
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); 34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
@@ -108,27 +103,28 @@ static dpt_sig_S DPTI_sig = {
108 103
109static DEFINE_MUTEX(adpt_configuration_lock); 104static DEFINE_MUTEX(adpt_configuration_lock);
110 105
111static struct i2o_sys_tbl *sys_tbl = NULL; 106static struct i2o_sys_tbl *sys_tbl;
112static int sys_tbl_ind = 0; 107static dma_addr_t sys_tbl_pa;
113static int sys_tbl_len = 0; 108static int sys_tbl_ind;
109static int sys_tbl_len;
114 110
115static adpt_hba* hba_chain = NULL; 111static adpt_hba* hba_chain = NULL;
116static int hba_count = 0; 112static int hba_count = 0;
117 113
114static struct class *adpt_sysfs_class;
115
116#ifdef CONFIG_COMPAT
117static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
118#endif
119
118static const struct file_operations adpt_fops = { 120static const struct file_operations adpt_fops = {
119 .ioctl = adpt_ioctl, 121 .ioctl = adpt_ioctl,
120 .open = adpt_open, 122 .open = adpt_open,
121 .release = adpt_close 123 .release = adpt_close,
122}; 124#ifdef CONFIG_COMPAT
123 125 .compat_ioctl = compat_adpt_ioctl,
124#ifdef REBOOT_NOTIFIER
125static struct notifier_block adpt_reboot_notifier =
126{
127 adpt_reboot_event,
128 NULL,
129 0
130};
131#endif 126#endif
127};
132 128
133/* Structures and definitions for synchronous message posting. 129/* Structures and definitions for synchronous message posting.
134 * See adpt_i2o_post_wait() for description 130 * See adpt_i2o_post_wait() for description
@@ -151,6 +147,21 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
151 *============================================================================ 147 *============================================================================
152 */ 148 */
153 149
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
154static u8 adpt_read_blink_led(adpt_hba* host) 165static u8 adpt_read_blink_led(adpt_hba* host)
155{ 166{
156 if (host->FwDebugBLEDflag_P) { 167 if (host->FwDebugBLEDflag_P) {
@@ -178,8 +189,6 @@ static int adpt_detect(struct scsi_host_template* sht)
178 struct pci_dev *pDev = NULL; 189 struct pci_dev *pDev = NULL;
179 adpt_hba* pHba; 190 adpt_hba* pHba;
180 191
181 adpt_init();
182
183 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 192 PINFO("Detecting Adaptec I2O RAID controllers...\n");
184 193
185 /* search for all Adatpec I2O RAID cards */ 194 /* search for all Adatpec I2O RAID cards */
@@ -247,13 +256,29 @@ rebuild_sys_tab:
247 adpt_inquiry(pHba); 256 adpt_inquiry(pHba);
248 } 257 }
249 258
259 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
260 if (IS_ERR(adpt_sysfs_class)) {
261 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
262 adpt_sysfs_class = NULL;
263 }
264
250 for (pHba = hba_chain; pHba; pHba = pHba->next) { 265 for (pHba = hba_chain; pHba; pHba = pHba->next) {
251 if( adpt_scsi_register(pHba,sht) < 0){ 266 if (adpt_scsi_host_alloc(pHba, sht) < 0){
252 adpt_i2o_delete_hba(pHba); 267 adpt_i2o_delete_hba(pHba);
253 continue; 268 continue;
254 } 269 }
255 pHba->initialized = TRUE; 270 pHba->initialized = TRUE;
256 pHba->state &= ~DPTI_STATE_RESET; 271 pHba->state &= ~DPTI_STATE_RESET;
272 if (adpt_sysfs_class) {
273 struct device *dev = device_create(adpt_sysfs_class,
274 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit),
275 "dpti%d", pHba->unit);
276 if (IS_ERR(dev)) {
277 printk(KERN_WARNING"dpti%d: unable to "
278 "create device in dpt_i2o class\n",
279 pHba->unit);
280 }
281 }
257 } 282 }
258 283
259 // Register our control device node 284 // Register our control device node
@@ -282,7 +307,7 @@ static int adpt_release(struct Scsi_Host *host)
282 307
283static void adpt_inquiry(adpt_hba* pHba) 308static void adpt_inquiry(adpt_hba* pHba)
284{ 309{
285 u32 msg[14]; 310 u32 msg[17];
286 u32 *mptr; 311 u32 *mptr;
287 u32 *lenptr; 312 u32 *lenptr;
288 int direction; 313 int direction;
@@ -290,11 +315,12 @@ static void adpt_inquiry(adpt_hba* pHba)
290 u32 len; 315 u32 len;
291 u32 reqlen; 316 u32 reqlen;
292 u8* buf; 317 u8* buf;
318 dma_addr_t addr;
293 u8 scb[16]; 319 u8 scb[16];
294 s32 rcode; 320 s32 rcode;
295 321
296 memset(msg, 0, sizeof(msg)); 322 memset(msg, 0, sizeof(msg));
297 buf = kmalloc(80,GFP_KERNEL|ADDR32); 323 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
298 if(!buf){ 324 if(!buf){
299 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); 325 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
300 return; 326 return;
@@ -305,7 +331,10 @@ static void adpt_inquiry(adpt_hba* pHba)
305 direction = 0x00000000; 331 direction = 0x00000000;
306 scsidir =0x40000000; // DATA IN (iop<--dev) 332 scsidir =0x40000000; // DATA IN (iop<--dev)
307 333
308 reqlen = 14; // SINGLE SGE 334 if (dpt_dma64(pHba))
335 reqlen = 17; // SINGLE SGE, 64 bit
336 else
337 reqlen = 14; // SINGLE SGE, 32 bit
309 /* Stick the headers on */ 338 /* Stick the headers on */
310 msg[0] = reqlen<<16 | SGL_OFFSET_12; 339 msg[0] = reqlen<<16 | SGL_OFFSET_12;
311 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); 340 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
@@ -338,8 +367,16 @@ static void adpt_inquiry(adpt_hba* pHba)
338 367
339 /* Now fill in the SGList and command */ 368 /* Now fill in the SGList and command */
340 *lenptr = len; 369 *lenptr = len;
341 *mptr++ = 0xD0000000|direction|len; 370 if (dpt_dma64(pHba)) {
342 *mptr++ = virt_to_bus(buf); 371 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
372 *mptr++ = 1 << PAGE_SHIFT;
373 *mptr++ = 0xD0000000|direction|len;
374 *mptr++ = dma_low(addr);
375 *mptr++ = dma_high(addr);
376 } else {
377 *mptr++ = 0xD0000000|direction|len;
378 *mptr++ = addr;
379 }
343 380
344 // Send it on it's way 381 // Send it on it's way
345 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); 382 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
@@ -347,7 +384,7 @@ static void adpt_inquiry(adpt_hba* pHba)
347 sprintf(pHba->detail, "Adaptec I2O RAID"); 384 sprintf(pHba->detail, "Adaptec I2O RAID");
348 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); 385 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
349 if (rcode != -ETIME && rcode != -EINTR) 386 if (rcode != -ETIME && rcode != -EINTR)
350 kfree(buf); 387 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
351 } else { 388 } else {
352 memset(pHba->detail, 0, sizeof(pHba->detail)); 389 memset(pHba->detail, 0, sizeof(pHba->detail));
353 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); 390 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
@@ -356,7 +393,7 @@ static void adpt_inquiry(adpt_hba* pHba)
356 memcpy(&(pHba->detail[40]), " FW: ", 4); 393 memcpy(&(pHba->detail[40]), " FW: ", 4);
357 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); 394 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
358 pHba->detail[48] = '\0'; /* precautionary */ 395 pHba->detail[48] = '\0'; /* precautionary */
359 kfree(buf); 396 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
360 } 397 }
361 adpt_i2o_status_get(pHba); 398 adpt_i2o_status_get(pHba);
362 return ; 399 return ;
@@ -632,6 +669,91 @@ stop_output:
632 return len; 669 return len;
633} 670}
634 671
672/*
673 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
674 */
675static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
676{
677 return (u32)cmd->serial_number;
678}
679
680/*
681 * Go from a u32 'context' to a struct scsi_cmnd * .
682 * This could probably be made more efficient.
683 */
684static struct scsi_cmnd *
685 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
686{
687 struct scsi_cmnd * cmd;
688 struct scsi_device * d;
689
690 if (context == 0)
691 return NULL;
692
693 spin_unlock(pHba->host->host_lock);
694 shost_for_each_device(d, pHba->host) {
695 unsigned long flags;
696 spin_lock_irqsave(&d->list_lock, flags);
697 list_for_each_entry(cmd, &d->cmd_list, list) {
698 if (((u32)cmd->serial_number == context)) {
699 spin_unlock_irqrestore(&d->list_lock, flags);
700 scsi_device_put(d);
701 spin_lock(pHba->host->host_lock);
702 return cmd;
703 }
704 }
705 spin_unlock_irqrestore(&d->list_lock, flags);
706 }
707 spin_lock(pHba->host->host_lock);
708
709 return NULL;
710}
711
712/*
713 * Turn a pointer to ioctl reply data into an u32 'context'
714 */
715static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
716{
717#if BITS_PER_LONG == 32
718 return (u32)(unsigned long)reply;
719#else
720 ulong flags = 0;
721 u32 nr, i;
722
723 spin_lock_irqsave(pHba->host->host_lock, flags);
724 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
725 for (i = 0; i < nr; i++) {
726 if (pHba->ioctl_reply_context[i] == NULL) {
727 pHba->ioctl_reply_context[i] = reply;
728 break;
729 }
730 }
731 spin_unlock_irqrestore(pHba->host->host_lock, flags);
732 if (i >= nr) {
733 kfree (reply);
734 printk(KERN_WARNING"%s: Too many outstanding "
735 "ioctl commands\n", pHba->name);
736 return (u32)-1;
737 }
738
739 return i;
740#endif
741}
742
743/*
744 * Go from an u32 'context' to a pointer to ioctl reply data.
745 */
746static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
747{
748#if BITS_PER_LONG == 32
749 return (void *)(unsigned long)context;
750#else
751 void *p = pHba->ioctl_reply_context[context];
752 pHba->ioctl_reply_context[context] = NULL;
753
754 return p;
755#endif
756}
635 757
636/*=========================================================================== 758/*===========================================================================
637 * Error Handling routines 759 * Error Handling routines
@@ -660,7 +782,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
660 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; 782 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
661 msg[2] = 0; 783 msg[2] = 0;
662 msg[3]= 0; 784 msg[3]= 0;
663 msg[4] = (u32)cmd; 785 msg[4] = adpt_cmd_to_context(cmd);
664 if (pHba->host) 786 if (pHba->host)
665 spin_lock_irq(pHba->host->host_lock); 787 spin_lock_irq(pHba->host->host_lock);
666 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); 788 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -861,27 +983,6 @@ static void adpt_i2o_sys_shutdown(void)
861 printk(KERN_INFO "Adaptec I2O controllers down.\n"); 983 printk(KERN_INFO "Adaptec I2O controllers down.\n");
862} 984}
863 985
864/*
865 * reboot/shutdown notification.
866 *
867 * - Quiesce each IOP in the system
868 *
869 */
870
871#ifdef REBOOT_NOTIFIER
872static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
873{
874
875 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
876 return NOTIFY_DONE;
877
878 adpt_i2o_sys_shutdown();
879
880 return NOTIFY_DONE;
881}
882#endif
883
884
885static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) 986static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
886{ 987{
887 988
@@ -893,6 +994,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
893 u32 hba_map1_area_size = 0; 994 u32 hba_map1_area_size = 0;
894 void __iomem *base_addr_virt = NULL; 995 void __iomem *base_addr_virt = NULL;
895 void __iomem *msg_addr_virt = NULL; 996 void __iomem *msg_addr_virt = NULL;
997 int dma64 = 0;
896 998
897 int raptorFlag = FALSE; 999 int raptorFlag = FALSE;
898 1000
@@ -906,9 +1008,21 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
906 } 1008 }
907 1009
908 pci_set_master(pDev); 1010 pci_set_master(pDev);
909 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) 1011
1012 /*
1013 * See if we should enable dma64 mode.
1014 */
1015 if (sizeof(dma_addr_t) > 4 &&
1016 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
1017 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1018 dma64 = 1;
1019 }
1020 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
910 return -EINVAL; 1021 return -EINVAL;
911 1022
1023 /* adapter only supports message blocks below 4GB */
1024 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
1025
912 base_addr0_phys = pci_resource_start(pDev,0); 1026 base_addr0_phys = pci_resource_start(pDev,0);
913 hba_map0_area_size = pci_resource_len(pDev,0); 1027 hba_map0_area_size = pci_resource_len(pDev,0);
914 1028
@@ -929,6 +1043,25 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
929 raptorFlag = TRUE; 1043 raptorFlag = TRUE;
930 } 1044 }
931 1045
1046#if BITS_PER_LONG == 64
1047 /*
1048 * The original Adaptec 64 bit driver has this comment here:
1049 * "x86_64 machines need more optimal mappings"
1050 *
1051 * I assume some HBAs report ridiculously large mappings
1052 * and we need to limit them on platforms with IOMMUs.
1053 */
1054 if (raptorFlag == TRUE) {
1055 if (hba_map0_area_size > 128)
1056 hba_map0_area_size = 128;
1057 if (hba_map1_area_size > 524288)
1058 hba_map1_area_size = 524288;
1059 } else {
1060 if (hba_map0_area_size > 524288)
1061 hba_map0_area_size = 524288;
1062 }
1063#endif
1064
932 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); 1065 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
933 if (!base_addr_virt) { 1066 if (!base_addr_virt) {
934 pci_release_regions(pDev); 1067 pci_release_regions(pDev);
@@ -991,16 +1124,22 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
991 pHba->state = DPTI_STATE_RESET; 1124 pHba->state = DPTI_STATE_RESET;
992 pHba->pDev = pDev; 1125 pHba->pDev = pDev;
993 pHba->devices = NULL; 1126 pHba->devices = NULL;
1127 pHba->dma64 = dma64;
994 1128
995 // Initializing the spinlocks 1129 // Initializing the spinlocks
996 spin_lock_init(&pHba->state_lock); 1130 spin_lock_init(&pHba->state_lock);
997 spin_lock_init(&adpt_post_wait_lock); 1131 spin_lock_init(&adpt_post_wait_lock);
998 1132
999 if(raptorFlag == 0){ 1133 if(raptorFlag == 0){
1000 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 1134 printk(KERN_INFO "Adaptec I2O RAID controller"
1001 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); 1135 " %d at %p size=%x irq=%d%s\n",
1136 hba_count-1, base_addr_virt,
1137 hba_map0_area_size, pDev->irq,
1138 dma64 ? " (64-bit DMA)" : "");
1002 } else { 1139 } else {
1003 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); 1140 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1141 hba_count-1, pDev->irq,
1142 dma64 ? " (64-bit DMA)" : "");
1004 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); 1143 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1005 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); 1144 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1006 } 1145 }
@@ -1053,10 +1192,26 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1053 if(pHba->msg_addr_virt != pHba->base_addr_virt){ 1192 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1054 iounmap(pHba->msg_addr_virt); 1193 iounmap(pHba->msg_addr_virt);
1055 } 1194 }
1056 kfree(pHba->hrt); 1195 if(pHba->FwDebugBuffer_P)
1057 kfree(pHba->lct); 1196 iounmap(pHba->FwDebugBuffer_P);
1058 kfree(pHba->status_block); 1197 if(pHba->hrt) {
1059 kfree(pHba->reply_pool); 1198 dma_free_coherent(&pHba->pDev->dev,
1199 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1200 pHba->hrt, pHba->hrt_pa);
1201 }
1202 if(pHba->lct) {
1203 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1204 pHba->lct, pHba->lct_pa);
1205 }
1206 if(pHba->status_block) {
1207 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1208 pHba->status_block, pHba->status_block_pa);
1209 }
1210 if(pHba->reply_pool) {
1211 dma_free_coherent(&pHba->pDev->dev,
1212 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1213 pHba->reply_pool, pHba->reply_pool_pa);
1214 }
1060 1215
1061 for(d = pHba->devices; d ; d = next){ 1216 for(d = pHba->devices; d ; d = next){
1062 next = d->next; 1217 next = d->next;
@@ -1075,23 +1230,19 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1075 pci_dev_put(pHba->pDev); 1230 pci_dev_put(pHba->pDev);
1076 kfree(pHba); 1231 kfree(pHba);
1077 1232
1233 if (adpt_sysfs_class)
1234 device_destroy(adpt_sysfs_class,
1235 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1236
1078 if(hba_count <= 0){ 1237 if(hba_count <= 0){
1079 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); 1238 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1239 if (adpt_sysfs_class) {
1240 class_destroy(adpt_sysfs_class);
1241 adpt_sysfs_class = NULL;
1242 }
1080 } 1243 }
1081} 1244}
1082 1245
1083
1084static int adpt_init(void)
1085{
1086 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1087#ifdef REBOOT_NOTIFIER
1088 register_reboot_notifier(&adpt_reboot_notifier);
1089#endif
1090
1091 return 0;
1092}
1093
1094
1095static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun) 1246static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1096{ 1247{
1097 struct adpt_device* d; 1248 struct adpt_device* d;
@@ -1283,6 +1434,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1283{ 1434{
1284 u32 msg[8]; 1435 u32 msg[8];
1285 u8* status; 1436 u8* status;
1437 dma_addr_t addr;
1286 u32 m = EMPTY_QUEUE ; 1438 u32 m = EMPTY_QUEUE ;
1287 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ); 1439 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1288 1440
@@ -1305,12 +1457,13 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1305 schedule_timeout_uninterruptible(1); 1457 schedule_timeout_uninterruptible(1);
1306 } while (m == EMPTY_QUEUE); 1458 } while (m == EMPTY_QUEUE);
1307 1459
1308 status = kzalloc(4, GFP_KERNEL|ADDR32); 1460 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1309 if(status == NULL) { 1461 if(status == NULL) {
1310 adpt_send_nop(pHba, m); 1462 adpt_send_nop(pHba, m);
1311 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1463 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1312 return -ENOMEM; 1464 return -ENOMEM;
1313 } 1465 }
1466 memset(status,0,4);
1314 1467
1315 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; 1468 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1316 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; 1469 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
@@ -1318,8 +1471,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1318 msg[3]=0; 1471 msg[3]=0;
1319 msg[4]=0; 1472 msg[4]=0;
1320 msg[5]=0; 1473 msg[5]=0;
1321 msg[6]=virt_to_bus(status); 1474 msg[6]=dma_low(addr);
1322 msg[7]=0; 1475 msg[7]=dma_high(addr);
1323 1476
1324 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); 1477 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1325 wmb(); 1478 wmb();
@@ -1329,7 +1482,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1329 while(*status == 0){ 1482 while(*status == 0){
1330 if(time_after(jiffies,timeout)){ 1483 if(time_after(jiffies,timeout)){
1331 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name); 1484 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1332 kfree(status); 1485 /* We lose 4 bytes of "status" here, but we cannot
1486 free these because controller may awake and corrupt
1487 those bytes at any time */
1488 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1333 return -ETIMEDOUT; 1489 return -ETIMEDOUT;
1334 } 1490 }
1335 rmb(); 1491 rmb();
@@ -1348,6 +1504,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1348 } 1504 }
1349 if(time_after(jiffies,timeout)){ 1505 if(time_after(jiffies,timeout)){
1350 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); 1506 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1507 /* We lose 4 bytes of "status" here, but we
1508 cannot free these because controller may
1509 awake and corrupt those bytes at any time */
1510 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1351 return -ETIMEDOUT; 1511 return -ETIMEDOUT;
1352 } 1512 }
1353 schedule_timeout_uninterruptible(1); 1513 schedule_timeout_uninterruptible(1);
@@ -1364,7 +1524,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1364 PDEBUG("%s: Reset completed.\n", pHba->name); 1524 PDEBUG("%s: Reset completed.\n", pHba->name);
1365 } 1525 }
1366 1526
1367 kfree(status); 1527 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1368#ifdef UARTDELAY 1528#ifdef UARTDELAY
1369 // This delay is to allow someone attached to the card through the debug UART to 1529 // This delay is to allow someone attached to the card through the debug UART to
1370 // set up the dump levels that they want before the rest of the initialization sequence 1530 // set up the dump levels that they want before the rest of the initialization sequence
@@ -1636,6 +1796,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1636 u32 i = 0; 1796 u32 i = 0;
1637 u32 rcode = 0; 1797 u32 rcode = 0;
1638 void *p = NULL; 1798 void *p = NULL;
1799 dma_addr_t addr;
1639 ulong flags = 0; 1800 ulong flags = 0;
1640 1801
1641 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 1802 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
@@ -1668,10 +1829,13 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1668 } 1829 }
1669 sg_offset = (msg[0]>>4)&0xf; 1830 sg_offset = (msg[0]>>4)&0xf;
1670 msg[2] = 0x40000000; // IOCTL context 1831 msg[2] = 0x40000000; // IOCTL context
1671 msg[3] = (u32)reply; 1832 msg[3] = adpt_ioctl_to_context(pHba, reply);
1833 if (msg[3] == (u32)-1)
1834 return -EBUSY;
1835
1672 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1836 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1673 if(sg_offset) { 1837 if(sg_offset) {
1674 // TODO 64bit fix 1838 // TODO add 64 bit API
1675 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); 1839 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1676 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1840 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1677 if (sg_count > pHba->sg_tablesize){ 1841 if (sg_count > pHba->sg_tablesize){
@@ -1690,7 +1854,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1690 } 1854 }
1691 sg_size = sg[i].flag_count & 0xffffff; 1855 sg_size = sg[i].flag_count & 0xffffff;
1692 /* Allocate memory for the transfer */ 1856 /* Allocate memory for the transfer */
1693 p = kmalloc(sg_size, GFP_KERNEL|ADDR32); 1857 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1694 if(!p) { 1858 if(!p) {
1695 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 1859 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1696 pHba->name,sg_size,i,sg_count); 1860 pHba->name,sg_size,i,sg_count);
@@ -1700,15 +1864,15 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1700 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. 1864 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1701 /* Copy in the user's SG buffer if necessary */ 1865 /* Copy in the user's SG buffer if necessary */
1702 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { 1866 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1703 // TODO 64bit fix 1867 // sg_simple_element API is 32 bit
1704 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { 1868 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1705 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); 1869 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1706 rcode = -EFAULT; 1870 rcode = -EFAULT;
1707 goto cleanup; 1871 goto cleanup;
1708 } 1872 }
1709 } 1873 }
1710 //TODO 64bit fix 1874 /* sg_simple_element API is 32 bit, but addr < 4GB */
1711 sg[i].addr_bus = (u32)virt_to_bus(p); 1875 sg[i].addr_bus = addr;
1712 } 1876 }
1713 } 1877 }
1714 1878
@@ -1736,7 +1900,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1736 if(sg_offset) { 1900 if(sg_offset) {
1737 /* Copy back the Scatter Gather buffers back to user space */ 1901 /* Copy back the Scatter Gather buffers back to user space */
1738 u32 j; 1902 u32 j;
1739 // TODO 64bit fix 1903 // TODO add 64 bit API
1740 struct sg_simple_element* sg; 1904 struct sg_simple_element* sg;
1741 int sg_size; 1905 int sg_size;
1742 1906
@@ -1756,14 +1920,14 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1756 } 1920 }
1757 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1921 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1758 1922
1759 // TODO 64bit fix 1923 // TODO add 64 bit API
1760 sg = (struct sg_simple_element*)(msg + sg_offset); 1924 sg = (struct sg_simple_element*)(msg + sg_offset);
1761 for (j = 0; j < sg_count; j++) { 1925 for (j = 0; j < sg_count; j++) {
1762 /* Copy out the SG list to user's buffer if necessary */ 1926 /* Copy out the SG list to user's buffer if necessary */
1763 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { 1927 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1764 sg_size = sg[j].flag_count & 0xffffff; 1928 sg_size = sg[j].flag_count & 0xffffff;
1765 // TODO 64bit fix 1929 // sg_simple_element API is 32 bit
1766 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { 1930 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1767 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); 1931 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1768 rcode = -EFAULT; 1932 rcode = -EFAULT;
1769 goto cleanup; 1933 goto cleanup;
@@ -1787,12 +1951,17 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1787 1951
1788 1952
1789cleanup: 1953cleanup:
1790 if (rcode != -ETIME && rcode != -EINTR) 1954 if (rcode != -ETIME && rcode != -EINTR) {
1955 struct sg_simple_element *sg =
1956 (struct sg_simple_element*) (msg +sg_offset);
1791 kfree (reply); 1957 kfree (reply);
1792 while(sg_index) { 1958 while(sg_index) {
1793 if(sg_list[--sg_index]) { 1959 if(sg_list[--sg_index]) {
1794 if (rcode != -ETIME && rcode != -EINTR) 1960 dma_free_coherent(&pHba->pDev->dev,
1795 kfree(sg_list[sg_index]); 1961 sg[sg_index].flag_count & 0xffffff,
1962 sg_list[sg_index],
1963 sg[sg_index].addr_bus);
1964 }
1796 } 1965 }
1797 } 1966 }
1798 return rcode; 1967 return rcode;
@@ -1978,6 +2147,38 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1978 return error; 2147 return error;
1979} 2148}
1980 2149
2150#ifdef CONFIG_COMPAT
2151static long compat_adpt_ioctl(struct file *file,
2152 unsigned int cmd, unsigned long arg)
2153{
2154 struct inode *inode;
2155 long ret;
2156
2157 inode = file->f_dentry->d_inode;
2158
2159 lock_kernel();
2160
2161 switch(cmd) {
2162 case DPT_SIGNATURE:
2163 case I2OUSRCMD:
2164 case DPT_CTRLINFO:
2165 case DPT_SYSINFO:
2166 case DPT_BLINKLED:
2167 case I2ORESETCMD:
2168 case I2ORESCANCMD:
2169 case (DPT_TARGET_BUSY & 0xFFFF):
2170 case DPT_TARGET_BUSY:
2171 ret = adpt_ioctl(inode, file, cmd, arg);
2172 break;
2173 default:
2174 ret = -ENOIOCTLCMD;
2175 }
2176
2177 unlock_kernel();
2178
2179 return ret;
2180}
2181#endif
1981 2182
1982static irqreturn_t adpt_isr(int irq, void *dev_id) 2183static irqreturn_t adpt_isr(int irq, void *dev_id)
1983{ 2184{
@@ -2009,7 +2210,16 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2009 goto out; 2210 goto out;
2010 } 2211 }
2011 } 2212 }
2012 reply = bus_to_virt(m); 2213 if (pHba->reply_pool_pa <= m &&
2214 m < pHba->reply_pool_pa +
2215 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2216 reply = (u8 *)pHba->reply_pool +
2217 (m - pHba->reply_pool_pa);
2218 } else {
2219 /* Ick, we should *never* be here */
2220 printk(KERN_ERR "dpti: reply frame not from pool\n");
2221 reply = (u8 *)bus_to_virt(m);
2222 }
2013 2223
2014 if (readl(reply) & MSG_FAIL) { 2224 if (readl(reply) & MSG_FAIL) {
2015 u32 old_m = readl(reply+28); 2225 u32 old_m = readl(reply+28);
@@ -2029,7 +2239,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2029 } 2239 }
2030 context = readl(reply+8); 2240 context = readl(reply+8);
2031 if(context & 0x40000000){ // IOCTL 2241 if(context & 0x40000000){ // IOCTL
2032 void *p = (void *)readl(reply+12); 2242 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2033 if( p != NULL) { 2243 if( p != NULL) {
2034 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); 2244 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2035 } 2245 }
@@ -2043,15 +2253,17 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2043 status = I2O_POST_WAIT_OK; 2253 status = I2O_POST_WAIT_OK;
2044 } 2254 }
2045 if(!(context & 0x40000000)) { 2255 if(!(context & 0x40000000)) {
2046 cmd = (struct scsi_cmnd*) readl(reply+12); 2256 cmd = adpt_cmd_from_context(pHba,
2257 readl(reply+12));
2047 if(cmd != NULL) { 2258 if(cmd != NULL) {
2048 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); 2259 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2049 } 2260 }
2050 } 2261 }
2051 adpt_i2o_post_wait_complete(context, status); 2262 adpt_i2o_post_wait_complete(context, status);
2052 } else { // SCSI message 2263 } else { // SCSI message
2053 cmd = (struct scsi_cmnd*) readl(reply+12); 2264 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2054 if(cmd != NULL){ 2265 if(cmd != NULL){
2266 scsi_dma_unmap(cmd);
2055 if(cmd->serial_number != 0) { // If not timedout 2267 if(cmd->serial_number != 0) { // If not timedout
2056 adpt_i2o_to_scsi(reply, cmd); 2268 adpt_i2o_to_scsi(reply, cmd);
2057 } 2269 }
@@ -2072,6 +2284,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2072 int i; 2284 int i;
2073 u32 msg[MAX_MESSAGE_SIZE]; 2285 u32 msg[MAX_MESSAGE_SIZE];
2074 u32* mptr; 2286 u32* mptr;
2287 u32* lptr;
2075 u32 *lenptr; 2288 u32 *lenptr;
2076 int direction; 2289 int direction;
2077 int scsidir; 2290 int scsidir;
@@ -2079,6 +2292,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2079 u32 len; 2292 u32 len;
2080 u32 reqlen; 2293 u32 reqlen;
2081 s32 rcode; 2294 s32 rcode;
2295 dma_addr_t addr;
2082 2296
2083 memset(msg, 0 , sizeof(msg)); 2297 memset(msg, 0 , sizeof(msg));
2084 len = scsi_bufflen(cmd); 2298 len = scsi_bufflen(cmd);
@@ -2118,7 +2332,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2118 // I2O_CMD_SCSI_EXEC 2332 // I2O_CMD_SCSI_EXEC
2119 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); 2333 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2120 msg[2] = 0; 2334 msg[2] = 0;
2121 msg[3] = (u32)cmd; /* We want the SCSI control block back */ 2335 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2122 // Our cards use the transaction context as the tag for queueing 2336 // Our cards use the transaction context as the tag for queueing
2123 // Adaptec/DPT Private stuff 2337 // Adaptec/DPT Private stuff
2124 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); 2338 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2136,7 +2350,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2136 memcpy(mptr, cmd->cmnd, cmd->cmd_len); 2350 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2137 mptr+=4; 2351 mptr+=4;
2138 lenptr=mptr++; /* Remember me - fill in when we know */ 2352 lenptr=mptr++; /* Remember me - fill in when we know */
2139 reqlen = 14; // SINGLE SGE 2353 if (dpt_dma64(pHba)) {
2354 reqlen = 16; // SINGLE SGE
2355 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2356 *mptr++ = 1 << PAGE_SHIFT;
2357 } else {
2358 reqlen = 14; // SINGLE SGE
2359 }
2140 /* Now fill in the SGList and command */ 2360 /* Now fill in the SGList and command */
2141 2361
2142 nseg = scsi_dma_map(cmd); 2362 nseg = scsi_dma_map(cmd);
@@ -2146,12 +2366,16 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2146 2366
2147 len = 0; 2367 len = 0;
2148 scsi_for_each_sg(cmd, sg, nseg, i) { 2368 scsi_for_each_sg(cmd, sg, nseg, i) {
2369 lptr = mptr;
2149 *mptr++ = direction|0x10000000|sg_dma_len(sg); 2370 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2150 len+=sg_dma_len(sg); 2371 len+=sg_dma_len(sg);
2151 *mptr++ = sg_dma_address(sg); 2372 addr = sg_dma_address(sg);
2373 *mptr++ = dma_low(addr);
2374 if (dpt_dma64(pHba))
2375 *mptr++ = dma_high(addr);
2152 /* Make this an end of list */ 2376 /* Make this an end of list */
2153 if (i == nseg - 1) 2377 if (i == nseg - 1)
2154 mptr[-2] = direction|0xD0000000|sg_dma_len(sg); 2378 *lptr = direction|0xD0000000|sg_dma_len(sg);
2155 } 2379 }
2156 reqlen = mptr - msg; 2380 reqlen = mptr - msg;
2157 *lenptr = len; 2381 *lenptr = len;
@@ -2177,13 +2401,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2177} 2401}
2178 2402
2179 2403
2180static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht) 2404static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2181{ 2405{
2182 struct Scsi_Host *host = NULL; 2406 struct Scsi_Host *host;
2183 2407
2184 host = scsi_register(sht, sizeof(adpt_hba*)); 2408 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2185 if (host == NULL) { 2409 if (host == NULL) {
2186 printk ("%s: scsi_register returned NULL\n",pHba->name); 2410 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2187 return -1; 2411 return -1;
2188 } 2412 }
2189 host->hostdata[0] = (unsigned long)pHba; 2413 host->hostdata[0] = (unsigned long)pHba;
@@ -2200,7 +2424,7 @@ static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2200 host->max_lun = 256; 2424 host->max_lun = 256;
2201 host->max_channel = pHba->top_scsi_channel + 1; 2425 host->max_channel = pHba->top_scsi_channel + 1;
2202 host->cmd_per_lun = 1; 2426 host->cmd_per_lun = 1;
2203 host->unique_id = (uint) pHba; 2427 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2204 host->sg_tablesize = pHba->sg_tablesize; 2428 host->sg_tablesize = pHba->sg_tablesize;
2205 host->can_queue = pHba->post_fifo_size; 2429 host->can_queue = pHba->post_fifo_size;
2206 2430
@@ -2640,11 +2864,10 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2640static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) 2864static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2641{ 2865{
2642 u8 *status; 2866 u8 *status;
2867 dma_addr_t addr;
2643 u32 __iomem *msg = NULL; 2868 u32 __iomem *msg = NULL;
2644 int i; 2869 int i;
2645 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; 2870 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2646 u32* ptr;
2647 u32 outbound_frame; // This had to be a 32 bit address
2648 u32 m; 2871 u32 m;
2649 2872
2650 do { 2873 do {
@@ -2663,13 +2886,14 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2663 2886
2664 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); 2887 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2665 2888
2666 status = kzalloc(4, GFP_KERNEL|ADDR32); 2889 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2667 if (!status) { 2890 if (!status) {
2668 adpt_send_nop(pHba, m); 2891 adpt_send_nop(pHba, m);
2669 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n", 2892 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2670 pHba->name); 2893 pHba->name);
2671 return -ENOMEM; 2894 return -ENOMEM;
2672 } 2895 }
2896 memset(status, 0, 4);
2673 2897
2674 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); 2898 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2675 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); 2899 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
@@ -2678,7 +2902,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2678 writel(4096, &msg[4]); /* Host page frame size */ 2902 writel(4096, &msg[4]); /* Host page frame size */
2679 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ 2903 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2680 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ 2904 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2681 writel(virt_to_bus(status), &msg[7]); 2905 writel((u32)addr, &msg[7]);
2682 2906
2683 writel(m, pHba->post_port); 2907 writel(m, pHba->post_port);
2684 wmb(); 2908 wmb();
@@ -2693,6 +2917,10 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2693 rmb(); 2917 rmb();
2694 if(time_after(jiffies,timeout)){ 2918 if(time_after(jiffies,timeout)){
2695 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); 2919 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2920 /* We lose 4 bytes of "status" here, but we
2921 cannot free these because controller may
2922 awake and corrupt those bytes at any time */
2923 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2696 return -ETIMEDOUT; 2924 return -ETIMEDOUT;
2697 } 2925 }
2698 schedule_timeout_uninterruptible(1); 2926 schedule_timeout_uninterruptible(1);
@@ -2701,25 +2929,30 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2701 // If the command was successful, fill the fifo with our reply 2929 // If the command was successful, fill the fifo with our reply
2702 // message packets 2930 // message packets
2703 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { 2931 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2704 kfree(status); 2932 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2705 return -2; 2933 return -2;
2706 } 2934 }
2707 kfree(status); 2935 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2708 2936
2709 kfree(pHba->reply_pool); 2937 if(pHba->reply_pool != NULL) {
2938 dma_free_coherent(&pHba->pDev->dev,
2939 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2940 pHba->reply_pool, pHba->reply_pool_pa);
2941 }
2710 2942
2711 pHba->reply_pool = kzalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); 2943 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2944 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2945 &pHba->reply_pool_pa, GFP_KERNEL);
2712 if (!pHba->reply_pool) { 2946 if (!pHba->reply_pool) {
2713 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name); 2947 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2714 return -ENOMEM; 2948 return -ENOMEM;
2715 } 2949 }
2950 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2716 2951
2717 ptr = pHba->reply_pool;
2718 for(i = 0; i < pHba->reply_fifo_size; i++) { 2952 for(i = 0; i < pHba->reply_fifo_size; i++) {
2719 outbound_frame = (u32)virt_to_bus(ptr); 2953 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2720 writel(outbound_frame, pHba->reply_port); 2954 pHba->reply_port);
2721 wmb(); 2955 wmb();
2722 ptr += REPLY_FRAME_SIZE;
2723 } 2956 }
2724 adpt_i2o_status_get(pHba); 2957 adpt_i2o_status_get(pHba);
2725 return 0; 2958 return 0;
@@ -2743,11 +2976,11 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2743 u32 m; 2976 u32 m;
2744 u32 __iomem *msg; 2977 u32 __iomem *msg;
2745 u8 *status_block=NULL; 2978 u8 *status_block=NULL;
2746 ulong status_block_bus;
2747 2979
2748 if(pHba->status_block == NULL) { 2980 if(pHba->status_block == NULL) {
2749 pHba->status_block = (i2o_status_block*) 2981 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2750 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32); 2982 sizeof(i2o_status_block),
2983 &pHba->status_block_pa, GFP_KERNEL);
2751 if(pHba->status_block == NULL) { 2984 if(pHba->status_block == NULL) {
2752 printk(KERN_ERR 2985 printk(KERN_ERR
2753 "dpti%d: Get Status Block failed; Out of memory. \n", 2986 "dpti%d: Get Status Block failed; Out of memory. \n",
@@ -2757,7 +2990,6 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2757 } 2990 }
2758 memset(pHba->status_block, 0, sizeof(i2o_status_block)); 2991 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2759 status_block = (u8*)(pHba->status_block); 2992 status_block = (u8*)(pHba->status_block);
2760 status_block_bus = virt_to_bus(pHba->status_block);
2761 timeout = jiffies+TMOUT_GETSTATUS*HZ; 2993 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2762 do { 2994 do {
2763 rmb(); 2995 rmb();
@@ -2782,8 +3014,8 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2782 writel(0, &msg[3]); 3014 writel(0, &msg[3]);
2783 writel(0, &msg[4]); 3015 writel(0, &msg[4]);
2784 writel(0, &msg[5]); 3016 writel(0, &msg[5]);
2785 writel(((u32)status_block_bus)&0xffffffff, &msg[6]); 3017 writel( dma_low(pHba->status_block_pa), &msg[6]);
2786 writel(0, &msg[7]); 3018 writel( dma_high(pHba->status_block_pa), &msg[7]);
2787 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes 3019 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2788 3020
2789 //post message 3021 //post message
@@ -2812,7 +3044,17 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2812 } 3044 }
2813 3045
2814 // Calculate the Scatter Gather list size 3046 // Calculate the Scatter Gather list size
2815 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element); 3047 if (dpt_dma64(pHba)) {
3048 pHba->sg_tablesize
3049 = ((pHba->status_block->inbound_frame_size * 4
3050 - 14 * sizeof(u32))
3051 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3052 } else {
3053 pHba->sg_tablesize
3054 = ((pHba->status_block->inbound_frame_size * 4
3055 - 12 * sizeof(u32))
3056 / sizeof(struct sg_simple_element));
3057 }
2816 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { 3058 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2817 pHba->sg_tablesize = SG_LIST_ELEMENTS; 3059 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2818 } 3060 }
@@ -2863,7 +3105,9 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2863 } 3105 }
2864 do { 3106 do {
2865 if (pHba->lct == NULL) { 3107 if (pHba->lct == NULL) {
2866 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32); 3108 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3109 pHba->lct_size, &pHba->lct_pa,
3110 GFP_KERNEL);
2867 if(pHba->lct == NULL) { 3111 if(pHba->lct == NULL) {
2868 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", 3112 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2869 pHba->name); 3113 pHba->name);
@@ -2879,7 +3123,7 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2879 msg[4] = 0xFFFFFFFF; /* All devices */ 3123 msg[4] = 0xFFFFFFFF; /* All devices */
2880 msg[5] = 0x00000000; /* Report now */ 3124 msg[5] = 0x00000000; /* Report now */
2881 msg[6] = 0xD0000000|pHba->lct_size; 3125 msg[6] = 0xD0000000|pHba->lct_size;
2882 msg[7] = virt_to_bus(pHba->lct); 3126 msg[7] = (u32)pHba->lct_pa;
2883 3127
2884 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { 3128 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2885 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 3129 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
@@ -2890,7 +3134,8 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2890 3134
2891 if ((pHba->lct->table_size << 2) > pHba->lct_size) { 3135 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2892 pHba->lct_size = pHba->lct->table_size << 2; 3136 pHba->lct_size = pHba->lct->table_size << 2;
2893 kfree(pHba->lct); 3137 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3138 pHba->lct, pHba->lct_pa);
2894 pHba->lct = NULL; 3139 pHba->lct = NULL;
2895 } 3140 }
2896 } while (pHba->lct == NULL); 3141 } while (pHba->lct == NULL);
@@ -2901,13 +3146,19 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2901 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; 3146 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2902 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { 3147 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2903 pHba->FwDebugBufferSize = buf[1]; 3148 pHba->FwDebugBufferSize = buf[1];
2904 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0]; 3149 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
2905 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET; 3150 pHba->FwDebugBufferSize);
2906 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET; 3151 if (pHba->FwDebugBuffer_P) {
2907 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; 3152 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
2908 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET; 3153 FW_DEBUG_FLAGS_OFFSET;
2909 pHba->FwDebugBuffer_P += buf[2]; 3154 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
2910 pHba->FwDebugFlags = 0; 3155 FW_DEBUG_BLED_OFFSET;
3156 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3157 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3158 FW_DEBUG_STR_LENGTH_OFFSET;
3159 pHba->FwDebugBuffer_P += buf[2];
3160 pHba->FwDebugFlags = 0;
3161 }
2911 } 3162 }
2912 3163
2913 return 0; 3164 return 0;
@@ -2915,25 +3166,30 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2915 3166
2916static int adpt_i2o_build_sys_table(void) 3167static int adpt_i2o_build_sys_table(void)
2917{ 3168{
2918 adpt_hba* pHba = NULL; 3169 adpt_hba* pHba = hba_chain;
2919 int count = 0; 3170 int count = 0;
2920 3171
3172 if (sys_tbl)
3173 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3174 sys_tbl, sys_tbl_pa);
3175
2921 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs 3176 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2922 (hba_count) * sizeof(struct i2o_sys_tbl_entry); 3177 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2923 3178
2924 kfree(sys_tbl); 3179 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
2925 3180 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
2926 sys_tbl = kzalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2927 if (!sys_tbl) { 3181 if (!sys_tbl) {
2928 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n"); 3182 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2929 return -ENOMEM; 3183 return -ENOMEM;
2930 } 3184 }
3185 memset(sys_tbl, 0, sys_tbl_len);
2931 3186
2932 sys_tbl->num_entries = hba_count; 3187 sys_tbl->num_entries = hba_count;
2933 sys_tbl->version = I2OVERSION; 3188 sys_tbl->version = I2OVERSION;
2934 sys_tbl->change_ind = sys_tbl_ind++; 3189 sys_tbl->change_ind = sys_tbl_ind++;
2935 3190
2936 for(pHba = hba_chain; pHba; pHba = pHba->next) { 3191 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3192 u64 addr;
2937 // Get updated Status Block so we have the latest information 3193 // Get updated Status Block so we have the latest information
2938 if (adpt_i2o_status_get(pHba)) { 3194 if (adpt_i2o_status_get(pHba)) {
2939 sys_tbl->num_entries--; 3195 sys_tbl->num_entries--;
@@ -2949,8 +3205,9 @@ static int adpt_i2o_build_sys_table(void)
2949 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; 3205 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2950 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? 3206 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2951 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; 3207 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2952 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port); 3208 addr = pHba->base_addr_phys + 0x40;
2953 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32); 3209 sys_tbl->iops[count].inbound_low = dma_low(addr);
3210 sys_tbl->iops[count].inbound_high = dma_high(addr);
2954 3211
2955 count++; 3212 count++;
2956 } 3213 }
@@ -3086,7 +3343,8 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3086 3343
3087 do { 3344 do {
3088 if (pHba->hrt == NULL) { 3345 if (pHba->hrt == NULL) {
3089 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32); 3346 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3347 size, &pHba->hrt_pa, GFP_KERNEL);
3090 if (pHba->hrt == NULL) { 3348 if (pHba->hrt == NULL) {
3091 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name); 3349 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3092 return -ENOMEM; 3350 return -ENOMEM;
@@ -3098,7 +3356,7 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3098 msg[2]= 0; 3356 msg[2]= 0;
3099 msg[3]= 0; 3357 msg[3]= 0;
3100 msg[4]= (0xD0000000 | size); /* Simple transaction */ 3358 msg[4]= (0xD0000000 | size); /* Simple transaction */
3101 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */ 3359 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3102 3360
3103 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { 3361 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3104 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret); 3362 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
@@ -3106,8 +3364,10 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3106 } 3364 }
3107 3365
3108 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) { 3366 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3109 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2; 3367 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3110 kfree(pHba->hrt); 3368 dma_free_coherent(&pHba->pDev->dev, size,
3369 pHba->hrt, pHba->hrt_pa);
3370 size = newsize;
3111 pHba->hrt = NULL; 3371 pHba->hrt = NULL;
3112 } 3372 }
3113 } while(pHba->hrt == NULL); 3373 } while(pHba->hrt == NULL);
@@ -3121,33 +3381,54 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3121 int group, int field, void *buf, int buflen) 3381 int group, int field, void *buf, int buflen)
3122{ 3382{
3123 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 3383 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3124 u8 *resblk; 3384 u8 *opblk_va;
3385 dma_addr_t opblk_pa;
3386 u8 *resblk_va;
3387 dma_addr_t resblk_pa;
3125 3388
3126 int size; 3389 int size;
3127 3390
3128 /* 8 bytes for header */ 3391 /* 8 bytes for header */
3129 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32); 3392 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3130 if (resblk == NULL) { 3393 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3394 if (resblk_va == NULL) {
3131 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); 3395 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3132 return -ENOMEM; 3396 return -ENOMEM;
3133 } 3397 }
3134 3398
3399 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3400 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3401 if (opblk_va == NULL) {
3402 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3403 resblk_va, resblk_pa);
3404 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3405 pHba->name);
3406 return -ENOMEM;
3407 }
3135 if (field == -1) /* whole group */ 3408 if (field == -1) /* whole group */
3136 opblk[4] = -1; 3409 opblk[4] = -1;
3137 3410
3411 memcpy(opblk_va, opblk, sizeof(opblk));
3138 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 3412 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3139 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen)); 3413 opblk_va, opblk_pa, sizeof(opblk),
3414 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3415 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3140 if (size == -ETIME) { 3416 if (size == -ETIME) {
3417 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3418 resblk_va, resblk_pa);
3141 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); 3419 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3142 return -ETIME; 3420 return -ETIME;
3143 } else if (size == -EINTR) { 3421 } else if (size == -EINTR) {
3422 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3423 resblk_va, resblk_pa);
3144 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); 3424 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3145 return -EINTR; 3425 return -EINTR;
3146 } 3426 }
3147 3427
3148 memcpy(buf, resblk+8, buflen); /* cut off header */ 3428 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3149 3429
3150 kfree(resblk); 3430 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3431 resblk_va, resblk_pa);
3151 if (size < 0) 3432 if (size < 0)
3152 return size; 3433 return size;
3153 3434
@@ -3164,10 +3445,11 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3164 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 3445 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3165 */ 3446 */
3166static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 3447static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3167 void *opblk, int oplen, void *resblk, int reslen) 3448 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3449 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3168{ 3450{
3169 u32 msg[9]; 3451 u32 msg[9];
3170 u32 *res = (u32 *)resblk; 3452 u32 *res = (u32 *)resblk_va;
3171 int wait_status; 3453 int wait_status;
3172 3454
3173 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; 3455 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
@@ -3176,12 +3458,12 @@ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3176 msg[3] = 0; 3458 msg[3] = 0;
3177 msg[4] = 0; 3459 msg[4] = 0;
3178 msg[5] = 0x54000000 | oplen; /* OperationBlock */ 3460 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3179 msg[6] = virt_to_bus(opblk); 3461 msg[6] = (u32)opblk_pa;
3180 msg[7] = 0xD0000000 | reslen; /* ResultBlock */ 3462 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3181 msg[8] = virt_to_bus(resblk); 3463 msg[8] = (u32)resblk_pa;
3182 3464
3183 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { 3465 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3184 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk); 3466 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3185 return wait_status; /* -DetailedStatus */ 3467 return wait_status; /* -DetailedStatus */
3186 } 3468 }
3187 3469
@@ -3284,7 +3566,7 @@ static int adpt_i2o_systab_send(adpt_hba* pHba)
3284 * Private i/o space declaration 3566 * Private i/o space declaration
3285 */ 3567 */
3286 msg[6] = 0x54000000 | sys_tbl_len; 3568 msg[6] = 0x54000000 | sys_tbl_len;
3287 msg[7] = virt_to_phys(sys_tbl); 3569 msg[7] = (u32)sys_tbl_pa;
3288 msg[8] = 0x54000000 | 0; 3570 msg[8] = 0x54000000 | 0;
3289 msg[9] = 0; 3571 msg[9] = 0;
3290 msg[10] = 0xD4000000 | 0; 3572 msg[10] = 0xD4000000 | 0;
@@ -3323,11 +3605,10 @@ static static void adpt_delay(int millisec)
3323#endif 3605#endif
3324 3606
3325static struct scsi_host_template driver_template = { 3607static struct scsi_host_template driver_template = {
3608 .module = THIS_MODULE,
3326 .name = "dpt_i2o", 3609 .name = "dpt_i2o",
3327 .proc_name = "dpt_i2o", 3610 .proc_name = "dpt_i2o",
3328 .proc_info = adpt_proc_info, 3611 .proc_info = adpt_proc_info,
3329 .detect = adpt_detect,
3330 .release = adpt_release,
3331 .info = adpt_info, 3612 .info = adpt_info,
3332 .queuecommand = adpt_queue, 3613 .queuecommand = adpt_queue,
3333 .eh_abort_handler = adpt_abort, 3614 .eh_abort_handler = adpt_abort,
@@ -3341,5 +3622,48 @@ static struct scsi_host_template driver_template = {
3341 .cmd_per_lun = 1, 3622 .cmd_per_lun = 1,
3342 .use_clustering = ENABLE_CLUSTERING, 3623 .use_clustering = ENABLE_CLUSTERING,
3343}; 3624};
3344#include "scsi_module.c" 3625
3626static int __init adpt_init(void)
3627{
3628 int error;
3629 adpt_hba *pHba, *next;
3630
3631 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3632
3633 error = adpt_detect(&driver_template);
3634 if (error < 0)
3635 return error;
3636 if (hba_chain == NULL)
3637 return -ENODEV;
3638
3639 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3640 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3641 if (error)
3642 goto fail;
3643 scsi_scan_host(pHba->host);
3644 }
3645 return 0;
3646fail:
3647 for (pHba = hba_chain; pHba; pHba = next) {
3648 next = pHba->next;
3649 scsi_remove_host(pHba->host);
3650 }
3651 return error;
3652}
3653
3654static void __exit adpt_exit(void)
3655{
3656 adpt_hba *pHba, *next;
3657
3658 for (pHba = hba_chain; pHba; pHba = pHba->next)
3659 scsi_remove_host(pHba->host);
3660 for (pHba = hba_chain; pHba; pHba = next) {
3661 next = pHba->next;
3662 adpt_release(pHba->host);
3663 }
3664}
3665
3666module_init(adpt_init);
3667module_exit(adpt_exit);
3668
3345MODULE_LICENSE("GPL"); 3669MODULE_LICENSE("GPL");