aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMiquel van Smoorenburg <miquels@cistron.nl>2008-05-01 19:06:39 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-05-02 14:15:26 -0400
commit67af2b060e027c84b8e48d77e00b2369d997c0d4 (patch)
tree0d56ebdd995415c574bd574c9434da1e8931e104 /drivers
parentc864cb145dc2218cfad9fe53d323b54b48dbab6c (diff)
[SCSI] dpt_i2o: move from virt_to_bus/bus_to_virt to dma_alloc_coherent
Remove virt_to_bus/bus_to_virt code from dpt_i2o, and use dma_alloc_coherent() / dma_free_coherent(). This is in preparation of 64-bit support, dma_alloc_coherent() can allocate memory in the lower 32 bits of physical memory which is needed because the HBA only supports message blocks under 4GB This code is based in part on the unofficial adaptec 64-bit dpt_i2o driver update that I got from Mark Salyzyn at Adaptec. Signed-off-by: Miquel van Smoorenburg <miquels@cistron.nl> Acked-by: Mark Salyzyn <Mark_Salyzyn@adaptec.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/dpt_i2o.c236
-rw-r--r--drivers/scsi/dpti.h7
2 files changed, 169 insertions, 74 deletions
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 30c741a12a62..7b1a084ec94e 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -29,11 +29,6 @@
29/*#define DEBUG 1 */ 29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */ 30/*#define UARTDELAY 1 */
31 31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
37#include <linux/module.h> 32#include <linux/module.h>
38 33
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); 34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
@@ -108,9 +103,10 @@ static dpt_sig_S DPTI_sig = {
108 103
109static DEFINE_MUTEX(adpt_configuration_lock); 104static DEFINE_MUTEX(adpt_configuration_lock);
110 105
111static struct i2o_sys_tbl *sys_tbl = NULL; 106static struct i2o_sys_tbl *sys_tbl;
112static int sys_tbl_ind = 0; 107static dma_addr_t sys_tbl_pa;
113static int sys_tbl_len = 0; 108static int sys_tbl_ind;
109static int sys_tbl_len;
114 110
115static adpt_hba* hba_chain = NULL; 111static adpt_hba* hba_chain = NULL;
116static int hba_count = 0; 112static int hba_count = 0;
@@ -142,6 +138,16 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
142 *============================================================================ 138 *============================================================================
143 */ 139 */
144 140
141static inline u32 dma_high(dma_addr_t addr)
142{
143 return upper_32_bits(addr);
144}
145
146static inline u32 dma_low(dma_addr_t addr)
147{
148 return (u32)addr;
149}
150
145static u8 adpt_read_blink_led(adpt_hba* host) 151static u8 adpt_read_blink_led(adpt_hba* host)
146{ 152{
147 if(host->FwDebugBLEDflag_P != 0) { 153 if(host->FwDebugBLEDflag_P != 0) {
@@ -279,11 +285,12 @@ static void adpt_inquiry(adpt_hba* pHba)
279 u32 len; 285 u32 len;
280 u32 reqlen; 286 u32 reqlen;
281 u8* buf; 287 u8* buf;
288 dma_addr_t addr;
282 u8 scb[16]; 289 u8 scb[16];
283 s32 rcode; 290 s32 rcode;
284 291
285 memset(msg, 0, sizeof(msg)); 292 memset(msg, 0, sizeof(msg));
286 buf = kmalloc(80,GFP_KERNEL|ADDR32); 293 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
287 if(!buf){ 294 if(!buf){
288 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); 295 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
289 return; 296 return;
@@ -328,7 +335,7 @@ static void adpt_inquiry(adpt_hba* pHba)
328 /* Now fill in the SGList and command */ 335 /* Now fill in the SGList and command */
329 *lenptr = len; 336 *lenptr = len;
330 *mptr++ = 0xD0000000|direction|len; 337 *mptr++ = 0xD0000000|direction|len;
331 *mptr++ = virt_to_bus(buf); 338 *mptr++ = addr;
332 339
333 // Send it on it's way 340 // Send it on it's way
334 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); 341 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
@@ -336,7 +343,7 @@ static void adpt_inquiry(adpt_hba* pHba)
336 sprintf(pHba->detail, "Adaptec I2O RAID"); 343 sprintf(pHba->detail, "Adaptec I2O RAID");
337 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); 344 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
338 if (rcode != -ETIME && rcode != -EINTR) 345 if (rcode != -ETIME && rcode != -EINTR)
339 kfree(buf); 346 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
340 } else { 347 } else {
341 memset(pHba->detail, 0, sizeof(pHba->detail)); 348 memset(pHba->detail, 0, sizeof(pHba->detail));
342 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); 349 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
@@ -345,7 +352,7 @@ static void adpt_inquiry(adpt_hba* pHba)
345 memcpy(&(pHba->detail[40]), " FW: ", 4); 352 memcpy(&(pHba->detail[40]), " FW: ", 4);
346 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); 353 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
347 pHba->detail[48] = '\0'; /* precautionary */ 354 pHba->detail[48] = '\0'; /* precautionary */
348 kfree(buf); 355 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
349 } 356 }
350 adpt_i2o_status_get(pHba); 357 adpt_i2o_status_get(pHba);
351 return ; 358 return ;
@@ -621,7 +628,6 @@ stop_output:
621 return len; 628 return len;
622} 629}
623 630
624
625/*=========================================================================== 631/*===========================================================================
626 * Error Handling routines 632 * Error Handling routines
627 *=========================================================================== 633 *===========================================================================
@@ -877,6 +883,9 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
877 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) 883 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK))
878 return -EINVAL; 884 return -EINVAL;
879 885
886 /* adapter only supports message blocks below 4GB */
887 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
888
880 base_addr0_phys = pci_resource_start(pDev,0); 889 base_addr0_phys = pci_resource_start(pDev,0);
881 hba_map0_area_size = pci_resource_len(pDev,0); 890 hba_map0_area_size = pci_resource_len(pDev,0);
882 891
@@ -1021,10 +1030,24 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1021 if(pHba->msg_addr_virt != pHba->base_addr_virt){ 1030 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1022 iounmap(pHba->msg_addr_virt); 1031 iounmap(pHba->msg_addr_virt);
1023 } 1032 }
1024 kfree(pHba->hrt); 1033 if(pHba->hrt) {
1025 kfree(pHba->lct); 1034 dma_free_coherent(&pHba->pDev->dev,
1026 kfree(pHba->status_block); 1035 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1027 kfree(pHba->reply_pool); 1036 pHba->hrt, pHba->hrt_pa);
1037 }
1038 if(pHba->lct) {
1039 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1040 pHba->lct, pHba->lct_pa);
1041 }
1042 if(pHba->status_block) {
1043 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1044 pHba->status_block, pHba->status_block_pa);
1045 }
1046 if(pHba->reply_pool) {
1047 dma_free_coherent(&pHba->pDev->dev,
1048 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1049 pHba->reply_pool, pHba->reply_pool_pa);
1050 }
1028 1051
1029 for(d = pHba->devices; d ; d = next){ 1052 for(d = pHba->devices; d ; d = next){
1030 next = d->next; 1053 next = d->next;
@@ -1239,6 +1262,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1239{ 1262{
1240 u32 msg[8]; 1263 u32 msg[8];
1241 u8* status; 1264 u8* status;
1265 dma_addr_t addr;
1242 u32 m = EMPTY_QUEUE ; 1266 u32 m = EMPTY_QUEUE ;
1243 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ); 1267 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1244 1268
@@ -1261,12 +1285,13 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1261 schedule_timeout_uninterruptible(1); 1285 schedule_timeout_uninterruptible(1);
1262 } while (m == EMPTY_QUEUE); 1286 } while (m == EMPTY_QUEUE);
1263 1287
1264 status = kzalloc(4, GFP_KERNEL|ADDR32); 1288 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1265 if(status == NULL) { 1289 if(status == NULL) {
1266 adpt_send_nop(pHba, m); 1290 adpt_send_nop(pHba, m);
1267 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1291 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1268 return -ENOMEM; 1292 return -ENOMEM;
1269 } 1293 }
1294 memset(status,0,4);
1270 1295
1271 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; 1296 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1272 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; 1297 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
@@ -1274,8 +1299,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1274 msg[3]=0; 1299 msg[3]=0;
1275 msg[4]=0; 1300 msg[4]=0;
1276 msg[5]=0; 1301 msg[5]=0;
1277 msg[6]=virt_to_bus(status); 1302 msg[6]=dma_low(addr);
1278 msg[7]=0; 1303 msg[7]=dma_high(addr);
1279 1304
1280 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); 1305 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1281 wmb(); 1306 wmb();
@@ -1285,7 +1310,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1285 while(*status == 0){ 1310 while(*status == 0){
1286 if(time_after(jiffies,timeout)){ 1311 if(time_after(jiffies,timeout)){
1287 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name); 1312 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1288 kfree(status); 1313 /* We lose 4 bytes of "status" here, but we cannot
1314 free these because controller may awake and corrupt
1315 those bytes at any time */
1316 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1289 return -ETIMEDOUT; 1317 return -ETIMEDOUT;
1290 } 1318 }
1291 rmb(); 1319 rmb();
@@ -1304,6 +1332,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1304 } 1332 }
1305 if(time_after(jiffies,timeout)){ 1333 if(time_after(jiffies,timeout)){
1306 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); 1334 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1335 /* We lose 4 bytes of "status" here, but we
1336 cannot free these because controller may
1337 awake and corrupt those bytes at any time */
1338 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1307 return -ETIMEDOUT; 1339 return -ETIMEDOUT;
1308 } 1340 }
1309 schedule_timeout_uninterruptible(1); 1341 schedule_timeout_uninterruptible(1);
@@ -1320,7 +1352,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1320 PDEBUG("%s: Reset completed.\n", pHba->name); 1352 PDEBUG("%s: Reset completed.\n", pHba->name);
1321 } 1353 }
1322 1354
1323 kfree(status); 1355 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1324#ifdef UARTDELAY 1356#ifdef UARTDELAY
1325 // This delay is to allow someone attached to the card through the debug UART to 1357 // This delay is to allow someone attached to the card through the debug UART to
1326 // set up the dump levels that they want before the rest of the initialization sequence 1358 // set up the dump levels that they want before the rest of the initialization sequence
@@ -1592,6 +1624,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1592 u32 i = 0; 1624 u32 i = 0;
1593 u32 rcode = 0; 1625 u32 rcode = 0;
1594 void *p = NULL; 1626 void *p = NULL;
1627 dma_addr_t addr;
1595 ulong flags = 0; 1628 ulong flags = 0;
1596 1629
1597 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 1630 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
@@ -1646,7 +1679,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1646 } 1679 }
1647 sg_size = sg[i].flag_count & 0xffffff; 1680 sg_size = sg[i].flag_count & 0xffffff;
1648 /* Allocate memory for the transfer */ 1681 /* Allocate memory for the transfer */
1649 p = kmalloc(sg_size, GFP_KERNEL|ADDR32); 1682 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1650 if(!p) { 1683 if(!p) {
1651 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 1684 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1652 pHba->name,sg_size,i,sg_count); 1685 pHba->name,sg_size,i,sg_count);
@@ -1743,12 +1776,17 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1743 1776
1744 1777
1745cleanup: 1778cleanup:
1746 if (rcode != -ETIME && rcode != -EINTR) 1779 if (rcode != -ETIME && rcode != -EINTR) {
1780 struct sg_simple_element *sg =
1781 (struct sg_simple_element*) (msg +sg_offset);
1747 kfree (reply); 1782 kfree (reply);
1748 while(sg_index) { 1783 while(sg_index) {
1749 if(sg_list[--sg_index]) { 1784 if(sg_list[--sg_index]) {
1750 if (rcode != -ETIME && rcode != -EINTR) 1785 dma_free_coherent(&pHba->pDev->dev,
1751 kfree(sg_list[sg_index]); 1786 sg[sg_index].flag_count & 0xffffff,
1787 sg_list[sg_index],
1788 sg[sg_index].addr_bus);
1789 }
1752 } 1790 }
1753 } 1791 }
1754 return rcode; 1792 return rcode;
@@ -1965,7 +2003,16 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
1965 goto out; 2003 goto out;
1966 } 2004 }
1967 } 2005 }
1968 reply = bus_to_virt(m); 2006 if (pHba->reply_pool_pa <= m &&
2007 m < pHba->reply_pool_pa +
2008 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2009 reply = (u8 *)pHba->reply_pool +
2010 (m - pHba->reply_pool_pa);
2011 } else {
2012 /* Ick, we should *never* be here */
2013 printk(KERN_ERR "dpti: reply frame not from pool\n");
2014 reply = (u8 *)bus_to_virt(m);
2015 }
1969 2016
1970 if (readl(reply) & MSG_FAIL) { 2017 if (readl(reply) & MSG_FAIL) {
1971 u32 old_m = readl(reply+28); 2018 u32 old_m = readl(reply+28);
@@ -2008,6 +2055,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2008 } else { // SCSI message 2055 } else { // SCSI message
2009 cmd = (struct scsi_cmnd*) readl(reply+12); 2056 cmd = (struct scsi_cmnd*) readl(reply+12);
2010 if(cmd != NULL){ 2057 if(cmd != NULL){
2058 scsi_dma_unmap(cmd);
2011 if(cmd->serial_number != 0) { // If not timedout 2059 if(cmd->serial_number != 0) { // If not timedout
2012 adpt_i2o_to_scsi(reply, cmd); 2060 adpt_i2o_to_scsi(reply, cmd);
2013 } 2061 }
@@ -2156,7 +2204,7 @@ static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2156 host->max_lun = 256; 2204 host->max_lun = 256;
2157 host->max_channel = pHba->top_scsi_channel + 1; 2205 host->max_channel = pHba->top_scsi_channel + 1;
2158 host->cmd_per_lun = 1; 2206 host->cmd_per_lun = 1;
2159 host->unique_id = (uint) pHba; 2207 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2160 host->sg_tablesize = pHba->sg_tablesize; 2208 host->sg_tablesize = pHba->sg_tablesize;
2161 host->can_queue = pHba->post_fifo_size; 2209 host->can_queue = pHba->post_fifo_size;
2162 2210
@@ -2596,11 +2644,10 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2596static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) 2644static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2597{ 2645{
2598 u8 *status; 2646 u8 *status;
2647 dma_addr_t addr;
2599 u32 __iomem *msg = NULL; 2648 u32 __iomem *msg = NULL;
2600 int i; 2649 int i;
2601 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; 2650 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2602 u32* ptr;
2603 u32 outbound_frame; // This had to be a 32 bit address
2604 u32 m; 2651 u32 m;
2605 2652
2606 do { 2653 do {
@@ -2619,13 +2666,14 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2619 2666
2620 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); 2667 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2621 2668
2622 status = kzalloc(4, GFP_KERNEL|ADDR32); 2669 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2623 if (!status) { 2670 if (!status) {
2624 adpt_send_nop(pHba, m); 2671 adpt_send_nop(pHba, m);
2625 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n", 2672 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2626 pHba->name); 2673 pHba->name);
2627 return -ENOMEM; 2674 return -ENOMEM;
2628 } 2675 }
2676 memset(status, 0, 4);
2629 2677
2630 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); 2678 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2631 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); 2679 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
@@ -2634,7 +2682,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2634 writel(4096, &msg[4]); /* Host page frame size */ 2682 writel(4096, &msg[4]); /* Host page frame size */
2635 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ 2683 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2636 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ 2684 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2637 writel(virt_to_bus(status), &msg[7]); 2685 writel((u32)addr, &msg[7]);
2638 2686
2639 writel(m, pHba->post_port); 2687 writel(m, pHba->post_port);
2640 wmb(); 2688 wmb();
@@ -2649,6 +2697,10 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2649 rmb(); 2697 rmb();
2650 if(time_after(jiffies,timeout)){ 2698 if(time_after(jiffies,timeout)){
2651 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); 2699 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2700 /* We lose 4 bytes of "status" here, but we
2701 cannot free these because controller may
2702 awake and corrupt those bytes at any time */
2703 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2652 return -ETIMEDOUT; 2704 return -ETIMEDOUT;
2653 } 2705 }
2654 schedule_timeout_uninterruptible(1); 2706 schedule_timeout_uninterruptible(1);
@@ -2657,25 +2709,30 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2657 // If the command was successful, fill the fifo with our reply 2709 // If the command was successful, fill the fifo with our reply
2658 // message packets 2710 // message packets
2659 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { 2711 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2660 kfree(status); 2712 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2661 return -2; 2713 return -2;
2662 } 2714 }
2663 kfree(status); 2715 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2664 2716
2665 kfree(pHba->reply_pool); 2717 if(pHba->reply_pool != NULL) {
2718 dma_free_coherent(&pHba->pDev->dev,
2719 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2720 pHba->reply_pool, pHba->reply_pool_pa);
2721 }
2666 2722
2667 pHba->reply_pool = kzalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); 2723 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2724 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2725 &pHba->reply_pool_pa, GFP_KERNEL);
2668 if (!pHba->reply_pool) { 2726 if (!pHba->reply_pool) {
2669 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name); 2727 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2670 return -ENOMEM; 2728 return -ENOMEM;
2671 } 2729 }
2730 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2672 2731
2673 ptr = pHba->reply_pool;
2674 for(i = 0; i < pHba->reply_fifo_size; i++) { 2732 for(i = 0; i < pHba->reply_fifo_size; i++) {
2675 outbound_frame = (u32)virt_to_bus(ptr); 2733 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2676 writel(outbound_frame, pHba->reply_port); 2734 pHba->reply_port);
2677 wmb(); 2735 wmb();
2678 ptr += REPLY_FRAME_SIZE;
2679 } 2736 }
2680 adpt_i2o_status_get(pHba); 2737 adpt_i2o_status_get(pHba);
2681 return 0; 2738 return 0;
@@ -2699,11 +2756,11 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2699 u32 m; 2756 u32 m;
2700 u32 __iomem *msg; 2757 u32 __iomem *msg;
2701 u8 *status_block=NULL; 2758 u8 *status_block=NULL;
2702 ulong status_block_bus;
2703 2759
2704 if(pHba->status_block == NULL) { 2760 if(pHba->status_block == NULL) {
2705 pHba->status_block = (i2o_status_block*) 2761 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2706 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32); 2762 sizeof(i2o_status_block),
2763 &pHba->status_block_pa, GFP_KERNEL);
2707 if(pHba->status_block == NULL) { 2764 if(pHba->status_block == NULL) {
2708 printk(KERN_ERR 2765 printk(KERN_ERR
2709 "dpti%d: Get Status Block failed; Out of memory. \n", 2766 "dpti%d: Get Status Block failed; Out of memory. \n",
@@ -2713,7 +2770,6 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2713 } 2770 }
2714 memset(pHba->status_block, 0, sizeof(i2o_status_block)); 2771 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2715 status_block = (u8*)(pHba->status_block); 2772 status_block = (u8*)(pHba->status_block);
2716 status_block_bus = virt_to_bus(pHba->status_block);
2717 timeout = jiffies+TMOUT_GETSTATUS*HZ; 2773 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2718 do { 2774 do {
2719 rmb(); 2775 rmb();
@@ -2738,8 +2794,8 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2738 writel(0, &msg[3]); 2794 writel(0, &msg[3]);
2739 writel(0, &msg[4]); 2795 writel(0, &msg[4]);
2740 writel(0, &msg[5]); 2796 writel(0, &msg[5]);
2741 writel(((u32)status_block_bus)&0xffffffff, &msg[6]); 2797 writel( dma_low(pHba->status_block_pa), &msg[6]);
2742 writel(0, &msg[7]); 2798 writel( dma_high(pHba->status_block_pa), &msg[7]);
2743 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes 2799 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2744 2800
2745 //post message 2801 //post message
@@ -2819,7 +2875,9 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2819 } 2875 }
2820 do { 2876 do {
2821 if (pHba->lct == NULL) { 2877 if (pHba->lct == NULL) {
2822 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32); 2878 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2879 pHba->lct_size, &pHba->lct_pa,
2880 GFP_KERNEL);
2823 if(pHba->lct == NULL) { 2881 if(pHba->lct == NULL) {
2824 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", 2882 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2825 pHba->name); 2883 pHba->name);
@@ -2835,7 +2893,7 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2835 msg[4] = 0xFFFFFFFF; /* All devices */ 2893 msg[4] = 0xFFFFFFFF; /* All devices */
2836 msg[5] = 0x00000000; /* Report now */ 2894 msg[5] = 0x00000000; /* Report now */
2837 msg[6] = 0xD0000000|pHba->lct_size; 2895 msg[6] = 0xD0000000|pHba->lct_size;
2838 msg[7] = virt_to_bus(pHba->lct); 2896 msg[7] = (u32)pHba->lct_pa;
2839 2897
2840 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { 2898 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2841 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 2899 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
@@ -2846,7 +2904,8 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2846 2904
2847 if ((pHba->lct->table_size << 2) > pHba->lct_size) { 2905 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2848 pHba->lct_size = pHba->lct->table_size << 2; 2906 pHba->lct_size = pHba->lct->table_size << 2;
2849 kfree(pHba->lct); 2907 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
2908 pHba->lct, pHba->lct_pa);
2850 pHba->lct = NULL; 2909 pHba->lct = NULL;
2851 } 2910 }
2852 } while (pHba->lct == NULL); 2911 } while (pHba->lct == NULL);
@@ -2871,25 +2930,30 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2871 2930
2872static int adpt_i2o_build_sys_table(void) 2931static int adpt_i2o_build_sys_table(void)
2873{ 2932{
2874 adpt_hba* pHba = NULL; 2933 adpt_hba* pHba = hba_chain;
2875 int count = 0; 2934 int count = 0;
2876 2935
2936 if (sys_tbl)
2937 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
2938 sys_tbl, sys_tbl_pa);
2939
2877 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs 2940 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2878 (hba_count) * sizeof(struct i2o_sys_tbl_entry); 2941 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2879 2942
2880 kfree(sys_tbl); 2943 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
2881 2944 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
2882 sys_tbl = kzalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2883 if (!sys_tbl) { 2945 if (!sys_tbl) {
2884 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n"); 2946 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2885 return -ENOMEM; 2947 return -ENOMEM;
2886 } 2948 }
2949 memset(sys_tbl, 0, sys_tbl_len);
2887 2950
2888 sys_tbl->num_entries = hba_count; 2951 sys_tbl->num_entries = hba_count;
2889 sys_tbl->version = I2OVERSION; 2952 sys_tbl->version = I2OVERSION;
2890 sys_tbl->change_ind = sys_tbl_ind++; 2953 sys_tbl->change_ind = sys_tbl_ind++;
2891 2954
2892 for(pHba = hba_chain; pHba; pHba = pHba->next) { 2955 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2956 u64 addr;
2893 // Get updated Status Block so we have the latest information 2957 // Get updated Status Block so we have the latest information
2894 if (adpt_i2o_status_get(pHba)) { 2958 if (adpt_i2o_status_get(pHba)) {
2895 sys_tbl->num_entries--; 2959 sys_tbl->num_entries--;
@@ -2905,8 +2969,9 @@ static int adpt_i2o_build_sys_table(void)
2905 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; 2969 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2906 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? 2970 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2907 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; 2971 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2908 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port); 2972 addr = pHba->base_addr_phys + 0x40;
2909 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32); 2973 sys_tbl->iops[count].inbound_low = dma_low(addr);
2974 sys_tbl->iops[count].inbound_high = dma_high(addr);
2910 2975
2911 count++; 2976 count++;
2912 } 2977 }
@@ -3042,7 +3107,8 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3042 3107
3043 do { 3108 do {
3044 if (pHba->hrt == NULL) { 3109 if (pHba->hrt == NULL) {
3045 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32); 3110 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3111 size, &pHba->hrt_pa, GFP_KERNEL);
3046 if (pHba->hrt == NULL) { 3112 if (pHba->hrt == NULL) {
3047 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name); 3113 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3048 return -ENOMEM; 3114 return -ENOMEM;
@@ -3054,7 +3120,7 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3054 msg[2]= 0; 3120 msg[2]= 0;
3055 msg[3]= 0; 3121 msg[3]= 0;
3056 msg[4]= (0xD0000000 | size); /* Simple transaction */ 3122 msg[4]= (0xD0000000 | size); /* Simple transaction */
3057 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */ 3123 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3058 3124
3059 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { 3125 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3060 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret); 3126 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
@@ -3062,8 +3128,10 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3062 } 3128 }
3063 3129
3064 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) { 3130 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3065 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2; 3131 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3066 kfree(pHba->hrt); 3132 dma_free_coherent(&pHba->pDev->dev, size,
3133 pHba->hrt, pHba->hrt_pa);
3134 size = newsize;
3067 pHba->hrt = NULL; 3135 pHba->hrt = NULL;
3068 } 3136 }
3069 } while(pHba->hrt == NULL); 3137 } while(pHba->hrt == NULL);
@@ -3077,33 +3145,54 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3077 int group, int field, void *buf, int buflen) 3145 int group, int field, void *buf, int buflen)
3078{ 3146{
3079 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 3147 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3080 u8 *resblk; 3148 u8 *opblk_va;
3149 dma_addr_t opblk_pa;
3150 u8 *resblk_va;
3151 dma_addr_t resblk_pa;
3081 3152
3082 int size; 3153 int size;
3083 3154
3084 /* 8 bytes for header */ 3155 /* 8 bytes for header */
3085 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32); 3156 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3086 if (resblk == NULL) { 3157 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3158 if (resblk_va == NULL) {
3087 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); 3159 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3088 return -ENOMEM; 3160 return -ENOMEM;
3089 } 3161 }
3090 3162
3163 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3164 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3165 if (opblk_va == NULL) {
3166 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3167 resblk_va, resblk_pa);
3168 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3169 pHba->name);
3170 return -ENOMEM;
3171 }
3091 if (field == -1) /* whole group */ 3172 if (field == -1) /* whole group */
3092 opblk[4] = -1; 3173 opblk[4] = -1;
3093 3174
3175 memcpy(opblk_va, opblk, sizeof(opblk));
3094 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 3176 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3095 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen)); 3177 opblk_va, opblk_pa, sizeof(opblk),
3178 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3179 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3096 if (size == -ETIME) { 3180 if (size == -ETIME) {
3181 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3182 resblk_va, resblk_pa);
3097 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); 3183 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3098 return -ETIME; 3184 return -ETIME;
3099 } else if (size == -EINTR) { 3185 } else if (size == -EINTR) {
3186 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3187 resblk_va, resblk_pa);
3100 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); 3188 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3101 return -EINTR; 3189 return -EINTR;
3102 } 3190 }
3103 3191
3104 memcpy(buf, resblk+8, buflen); /* cut off header */ 3192 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3105 3193
3106 kfree(resblk); 3194 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3195 resblk_va, resblk_pa);
3107 if (size < 0) 3196 if (size < 0)
3108 return size; 3197 return size;
3109 3198
@@ -3120,10 +3209,11 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3120 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 3209 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3121 */ 3210 */
3122static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 3211static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3123 void *opblk, int oplen, void *resblk, int reslen) 3212 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3213 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3124{ 3214{
3125 u32 msg[9]; 3215 u32 msg[9];
3126 u32 *res = (u32 *)resblk; 3216 u32 *res = (u32 *)resblk_va;
3127 int wait_status; 3217 int wait_status;
3128 3218
3129 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; 3219 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
@@ -3132,12 +3222,12 @@ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3132 msg[3] = 0; 3222 msg[3] = 0;
3133 msg[4] = 0; 3223 msg[4] = 0;
3134 msg[5] = 0x54000000 | oplen; /* OperationBlock */ 3224 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3135 msg[6] = virt_to_bus(opblk); 3225 msg[6] = (u32)opblk_pa;
3136 msg[7] = 0xD0000000 | reslen; /* ResultBlock */ 3226 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3137 msg[8] = virt_to_bus(resblk); 3227 msg[8] = (u32)resblk_pa;
3138 3228
3139 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { 3229 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3140 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk); 3230 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3141 return wait_status; /* -DetailedStatus */ 3231 return wait_status; /* -DetailedStatus */
3142 } 3232 }
3143 3233
@@ -3240,7 +3330,7 @@ static int adpt_i2o_systab_send(adpt_hba* pHba)
3240 * Private i/o space declaration 3330 * Private i/o space declaration
3241 */ 3331 */
3242 msg[6] = 0x54000000 | sys_tbl_len; 3332 msg[6] = 0x54000000 | sys_tbl_len;
3243 msg[7] = virt_to_phys(sys_tbl); 3333 msg[7] = (u32)sys_tbl_pa;
3244 msg[8] = 0x54000000 | 0; 3334 msg[8] = 0x54000000 | 0;
3245 msg[9] = 0; 3335 msg[9] = 0;
3246 msg[10] = 0xD4000000 | 0; 3336 msg[10] = 0xD4000000 | 0;
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index acc692915b4c..5181b92c9ddb 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -228,14 +228,18 @@ typedef struct _adpt_hba {
228 u32 post_fifo_size; 228 u32 post_fifo_size;
229 u32 reply_fifo_size; 229 u32 reply_fifo_size;
230 u32* reply_pool; 230 u32* reply_pool;
231 dma_addr_t reply_pool_pa;
231 u32 sg_tablesize; // Scatter/Gather List Size. 232 u32 sg_tablesize; // Scatter/Gather List Size.
232 u8 top_scsi_channel; 233 u8 top_scsi_channel;
233 u8 top_scsi_id; 234 u8 top_scsi_id;
234 u8 top_scsi_lun; 235 u8 top_scsi_lun;
235 236
236 i2o_status_block* status_block; 237 i2o_status_block* status_block;
238 dma_addr_t status_block_pa;
237 i2o_hrt* hrt; 239 i2o_hrt* hrt;
240 dma_addr_t hrt_pa;
238 i2o_lct* lct; 241 i2o_lct* lct;
242 dma_addr_t lct_pa;
239 uint lct_size; 243 uint lct_size;
240 struct i2o_device* devices; 244 struct i2o_device* devices;
241 struct adpt_channel channel[MAX_CHANNEL]; 245 struct adpt_channel channel[MAX_CHANNEL];
@@ -271,7 +275,8 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
271static const char *adpt_i2o_get_class_name(int class); 275static const char *adpt_i2o_get_class_name(int class);
272#endif 276#endif
273static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 277static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
274 void *opblk, int oplen, void *resblk, int reslen); 278 void *opblk, dma_addr_t opblk_pa, int oplen,
279 void *resblk, dma_addr_t resblk_pa, int reslen);
275static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout); 280static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
276static int adpt_i2o_lct_get(adpt_hba* pHba); 281static int adpt_i2o_lct_get(adpt_hba* pHba);
277static int adpt_i2o_parse_lct(adpt_hba* pHba); 282static int adpt_i2o_parse_lct(adpt_hba* pHba);