aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-12-06 23:38:56 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 11:39:41 -0500
commitd3fa72e4556ec1f04e46a0d561d9e785ecaa173d (patch)
tree9c9b51dbecc27e977135b4e4793ea3dc99e8ba66
parentf67637ee4b5d90d41160d755b9a8cca18c394586 (diff)
[PATCH] Pass struct dev pointer to dma_cache_sync()
Pass struct dev pointer to dma_cache_sync() dma_cache_sync() is ill-designed in that it does not have a struct device pointer argument which makes proper support for systems that consist of a mix of coherent and non-coherent DMA devices hard. Change dma_cache_sync to take a struct device pointer as first argument and fix all its callers to pass it. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Greg KH <greg@kroah.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--Documentation/DMA-API.txt2
-rw-r--r--arch/avr32/mm/dma-coherent.c2
-rw-r--r--arch/mips/mm/dma-coherent.c2
-rw-r--r--arch/mips/mm/dma-ip27.c2
-rw-r--r--arch/mips/mm/dma-ip32.c3
-rw-r--r--arch/mips/mm/dma-noncoherent.c3
-rw-r--r--drivers/net/lasi_82596.c94
-rw-r--r--drivers/scsi/53c700.c80
-rw-r--r--drivers/scsi/53c700.h16
-rw-r--r--drivers/serial/mpsc.c22
-rw-r--r--include/asm-alpha/dma-mapping.h2
-rw-r--r--include/asm-avr32/dma-mapping.h3
-rw-r--r--include/asm-cris/dma-mapping.h2
-rw-r--r--include/asm-frv/dma-mapping.h2
-rw-r--r--include/asm-generic/dma-mapping.h2
-rw-r--r--include/asm-i386/dma-mapping.h2
-rw-r--r--include/asm-ia64/dma-mapping.h3
-rw-r--r--include/asm-m68k/dma-mapping.h2
-rw-r--r--include/asm-mips/dma-mapping.h2
-rw-r--r--include/asm-parisc/dma-mapping.h2
-rw-r--r--include/asm-powerpc/dma-mapping.h2
-rw-r--r--include/asm-sh/dma-mapping.h2
-rw-r--r--include/asm-sh64/dma-mapping.h2
-rw-r--r--include/asm-sparc64/dma-mapping.h2
-rw-r--r--include/asm-um/dma-mapping.h2
-rw-r--r--include/asm-x86_64/dma-mapping.h3
-rw-r--r--include/asm-xtensa/dma-mapping.h2
27 files changed, 137 insertions, 126 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 3dc1f9125caf..805db4b2cba6 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -459,7 +459,7 @@ anything like this. You must also be extra careful about accessing
459memory you intend to sync partially. 459memory you intend to sync partially.
460 460
461void 461void
462dma_cache_sync(void *vaddr, size_t size, 462dma_cache_sync(struct device *dev, void *vaddr, size_t size,
463 enum dma_data_direction direction) 463 enum dma_data_direction direction)
464 464
465Do a partial sync of memory that was allocated by 465Do a partial sync of memory that was allocated by
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 44ab8a7bdae2..b68d669f823d 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -11,7 +11,7 @@
11#include <asm/addrspace.h> 11#include <asm/addrspace.h>
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13 13
14void dma_cache_sync(void *vaddr, size_t size, int direction) 14void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
15{ 15{
16 /* 16 /*
17 * No need to sync an uncached area 17 * No need to sync an uncached area
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c
index 18bc83e577c1..5697c6e250a3 100644
--- a/arch/mips/mm/dma-coherent.c
+++ b/arch/mips/mm/dma-coherent.c
@@ -197,7 +197,7 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
197 197
198EXPORT_SYMBOL(dma_is_consistent); 198EXPORT_SYMBOL(dma_is_consistent);
199 199
200void dma_cache_sync(void *vaddr, size_t size, 200void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
201 enum dma_data_direction direction) 201 enum dma_data_direction direction)
202{ 202{
203 BUG_ON(direction == DMA_NONE); 203 BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c
index 8e9a5a8f5d65..f088344db465 100644
--- a/arch/mips/mm/dma-ip27.c
+++ b/arch/mips/mm/dma-ip27.c
@@ -204,7 +204,7 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
204 204
205EXPORT_SYMBOL(dma_is_consistent); 205EXPORT_SYMBOL(dma_is_consistent);
206 206
207void dma_cache_sync(void *vaddr, size_t size, 207void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
208 enum dma_data_direction direction) 208 enum dma_data_direction direction)
209{ 209{
210 BUG_ON(direction == DMA_NONE); 210 BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index 08720a42100f..b42b6f7456e6 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -370,7 +370,8 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
370 370
371EXPORT_SYMBOL(dma_is_consistent); 371EXPORT_SYMBOL(dma_is_consistent);
372 372
373void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) 373void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
374 enum dma_data_direction direction)
374{ 375{
375 if (direction == DMA_NONE) 376 if (direction == DMA_NONE)
376 return; 377 return;
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 4a3efc633373..8cecef0957c3 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -306,7 +306,8 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
306 306
307EXPORT_SYMBOL(dma_is_consistent); 307EXPORT_SYMBOL(dma_is_consistent);
308 308
309void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) 309void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
310 enum dma_data_direction direction)
310{ 311{
311 if (direction == DMA_NONE) 312 if (direction == DMA_NONE)
312 return; 313 return;
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index f4d815bca643..ea392f2a5aa2 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -119,14 +119,14 @@
119#define DEB(x,y) if (i596_debug & (x)) { y; } 119#define DEB(x,y) if (i596_debug & (x)) { y; }
120 120
121 121
122#define CHECK_WBACK(addr,len) \ 122#define CHECK_WBACK(priv, addr,len) \
123 do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0) 123 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
124 124
125#define CHECK_INV(addr,len) \ 125#define CHECK_INV(priv, addr,len) \
126 do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0) 126 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
127 127
128#define CHECK_WBACK_INV(addr,len) \ 128#define CHECK_WBACK_INV(priv, addr,len) \
129 do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0) 129 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
130 130
131 131
132#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ 132#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
@@ -449,10 +449,10 @@ static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
449 449
450static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) 450static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
451{ 451{
452 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp)); 452 CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
453 while (--delcnt && lp->iscp.stat) { 453 while (--delcnt && lp->iscp.stat) {
454 udelay(10); 454 udelay(10);
455 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp)); 455 CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
456 } 456 }
457 if (!delcnt) { 457 if (!delcnt) {
458 printk("%s: %s, iscp.stat %04x, didn't clear\n", 458 printk("%s: %s, iscp.stat %04x, didn't clear\n",
@@ -466,10 +466,10 @@ static inline int wait_istat(struct net_device *dev, struct i596_private *lp, in
466 466
467static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) 467static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
468{ 468{
469 CHECK_INV(&(lp->scb), sizeof(struct i596_scb)); 469 CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
470 while (--delcnt && lp->scb.command) { 470 while (--delcnt && lp->scb.command) {
471 udelay(10); 471 udelay(10);
472 CHECK_INV(&(lp->scb), sizeof(struct i596_scb)); 472 CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
473 } 473 }
474 if (!delcnt) { 474 if (!delcnt) {
475 printk("%s: %s, status %4.4x, cmd %4.4x.\n", 475 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
@@ -522,7 +522,7 @@ static void i596_display_data(struct net_device *dev)
522 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); 522 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
523 rbd = rbd->v_next; 523 rbd = rbd->v_next;
524 } while (rbd != lp->rbd_head); 524 } while (rbd != lp->rbd_head);
525 CHECK_INV(lp, sizeof(struct i596_private)); 525 CHECK_INV(lp, lp, sizeof(struct i596_private));
526} 526}
527 527
528 528
@@ -592,7 +592,7 @@ static inline void init_rx_bufs(struct net_device *dev)
592 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds)); 592 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
593 rfd->cmd = CMD_EOL|CMD_FLEX; 593 rfd->cmd = CMD_EOL|CMD_FLEX;
594 594
595 CHECK_WBACK_INV(lp, sizeof(struct i596_private)); 595 CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
596} 596}
597 597
598static inline void remove_rx_bufs(struct net_device *dev) 598static inline void remove_rx_bufs(struct net_device *dev)
@@ -629,7 +629,7 @@ static void rebuild_rx_bufs(struct net_device *dev)
629 lp->rbd_head = lp->rbds; 629 lp->rbd_head = lp->rbds;
630 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds)); 630 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
631 631
632 CHECK_WBACK_INV(lp, sizeof(struct i596_private)); 632 CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
633} 633}
634 634
635 635
@@ -663,8 +663,8 @@ static int init_i596_mem(struct net_device *dev)
663 663
664 DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name)); 664 DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
665 665
666 CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp)); 666 CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
667 CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp)); 667 CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
668 668
669 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp)); 669 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
670 670
@@ -678,25 +678,25 @@ static int init_i596_mem(struct net_device *dev)
678 rebuild_rx_bufs(dev); 678 rebuild_rx_bufs(dev);
679 679
680 lp->scb.command = 0; 680 lp->scb.command = 0;
681 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); 681 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
682 682
683 enable_irq(dev->irq); /* enable IRQs from LAN */ 683 enable_irq(dev->irq); /* enable IRQs from LAN */
684 684
685 DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name)); 685 DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
686 memcpy(lp->cf_cmd.i596_config, init_setup, 14); 686 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
687 lp->cf_cmd.cmd.command = CmdConfigure; 687 lp->cf_cmd.cmd.command = CmdConfigure;
688 CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd)); 688 CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
689 i596_add_cmd(dev, &lp->cf_cmd.cmd); 689 i596_add_cmd(dev, &lp->cf_cmd.cmd);
690 690
691 DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name)); 691 DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
692 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); 692 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
693 lp->sa_cmd.cmd.command = CmdSASetup; 693 lp->sa_cmd.cmd.command = CmdSASetup;
694 CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd)); 694 CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
695 i596_add_cmd(dev, &lp->sa_cmd.cmd); 695 i596_add_cmd(dev, &lp->sa_cmd.cmd);
696 696
697 DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name)); 697 DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
698 lp->tdr_cmd.cmd.command = CmdTDR; 698 lp->tdr_cmd.cmd.command = CmdTDR;
699 CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd)); 699 CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
700 i596_add_cmd(dev, &lp->tdr_cmd.cmd); 700 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
701 701
702 spin_lock_irqsave (&lp->lock, flags); 702 spin_lock_irqsave (&lp->lock, flags);
@@ -708,7 +708,7 @@ static int init_i596_mem(struct net_device *dev)
708 DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name)); 708 DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
709 lp->scb.command = RX_START; 709 lp->scb.command = RX_START;
710 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds)); 710 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
711 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); 711 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
712 712
713 CA(dev); 713 CA(dev);
714 714
@@ -740,13 +740,13 @@ static inline int i596_rx(struct net_device *dev)
740 740
741 rfd = lp->rfd_head; /* Ref next frame to check */ 741 rfd = lp->rfd_head; /* Ref next frame to check */
742 742
743 CHECK_INV(rfd, sizeof(struct i596_rfd)); 743 CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
744 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ 744 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
745 if (rfd->rbd == I596_NULL) 745 if (rfd->rbd == I596_NULL)
746 rbd = NULL; 746 rbd = NULL;
747 else if (rfd->rbd == lp->rbd_head->b_addr) { 747 else if (rfd->rbd == lp->rbd_head->b_addr) {
748 rbd = lp->rbd_head; 748 rbd = lp->rbd_head;
749 CHECK_INV(rbd, sizeof(struct i596_rbd)); 749 CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
750 } 750 }
751 else { 751 else {
752 printk("%s: rbd chain broken!\n", dev->name); 752 printk("%s: rbd chain broken!\n", dev->name);
@@ -790,7 +790,7 @@ static inline int i596_rx(struct net_device *dev)
790 dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); 790 dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
791 rbd->v_data = newskb->data; 791 rbd->v_data = newskb->data;
792 rbd->b_data = WSWAPchar(dma_addr); 792 rbd->b_data = WSWAPchar(dma_addr);
793 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); 793 CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
794 } 794 }
795 else 795 else
796 skb = dev_alloc_skb(pkt_len + 2); 796 skb = dev_alloc_skb(pkt_len + 2);
@@ -842,7 +842,7 @@ memory_squeeze:
842 if (rbd != NULL && (rbd->count & 0x4000)) { 842 if (rbd != NULL && (rbd->count & 0x4000)) {
843 rbd->count = 0; 843 rbd->count = 0;
844 lp->rbd_head = rbd->v_next; 844 lp->rbd_head = rbd->v_next;
845 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); 845 CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
846 } 846 }
847 847
848 /* Tidy the frame descriptor, marking it as end of list */ 848 /* Tidy the frame descriptor, marking it as end of list */
@@ -860,10 +860,10 @@ memory_squeeze:
860 860
861 lp->scb.rfd = rfd->b_next; 861 lp->scb.rfd = rfd->b_next;
862 lp->rfd_head = rfd->v_next; 862 lp->rfd_head = rfd->v_next;
863 CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd)); 863 CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
864 CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd)); 864 CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
865 rfd = lp->rfd_head; 865 rfd = lp->rfd_head;
866 CHECK_INV(rfd, sizeof(struct i596_rfd)); 866 CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
867 } 867 }
868 868
869 DEB(DEB_RXFRAME, printk("frames %d\n", frames)); 869 DEB(DEB_RXFRAME, printk("frames %d\n", frames));
@@ -902,12 +902,12 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
902 ptr->v_next = NULL; 902 ptr->v_next = NULL;
903 ptr->b_next = I596_NULL; 903 ptr->b_next = I596_NULL;
904 } 904 }
905 CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd)); 905 CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
906 } 906 }
907 907
908 wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out"); 908 wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
909 lp->scb.cmd = I596_NULL; 909 lp->scb.cmd = I596_NULL;
910 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); 910 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
911} 911}
912 912
913 913
@@ -925,7 +925,7 @@ static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
925 925
926 /* FIXME: this command might cause an lpmc */ 926 /* FIXME: this command might cause an lpmc */
927 lp->scb.command = CUC_ABORT | RX_ABORT; 927 lp->scb.command = CUC_ABORT | RX_ABORT;
928 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); 928 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
929 CA(dev); 929 CA(dev);
930 930
931 /* wait for shutdown */ 931 /* wait for shutdown */
@@ -951,20 +951,20 @@ static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
951 cmd->command |= (CMD_EOL | CMD_INTR); 951 cmd->command |= (CMD_EOL | CMD_INTR);
952 cmd->v_next = NULL; 952 cmd->v_next = NULL;
953 cmd->b_next = I596_NULL; 953 cmd->b_next = I596_NULL;
954 CHECK_WBACK(cmd, sizeof(struct i596_cmd)); 954 CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
955 955
956 spin_lock_irqsave (&lp->lock, flags); 956 spin_lock_irqsave (&lp->lock, flags);
957 957
958 if (lp->cmd_head != NULL) { 958 if (lp->cmd_head != NULL) {
959 lp->cmd_tail->v_next = cmd; 959 lp->cmd_tail->v_next = cmd;
960 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status)); 960 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
961 CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd)); 961 CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
962 } else { 962 } else {
963 lp->cmd_head = cmd; 963 lp->cmd_head = cmd;
964 wait_cmd(dev, lp, 100, "i596_add_cmd timed out"); 964 wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
965 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status)); 965 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
966 lp->scb.command = CUC_START; 966 lp->scb.command = CUC_START;
967 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); 967 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
968 CA(dev); 968 CA(dev);
969 } 969 }
970 lp->cmd_tail = cmd; 970 lp->cmd_tail = cmd;
@@ -998,12 +998,12 @@ static int i596_test(struct net_device *dev)
998 data = virt_to_dma(lp,tint); 998 data = virt_to_dma(lp,tint);
999 999
1000 tint[1] = -1; 1000 tint[1] = -1;
1001 CHECK_WBACK(tint,PAGE_SIZE); 1001 CHECK_WBACK(lp, tint, PAGE_SIZE);
1002 1002
1003 MPU_PORT(dev, 1, data); 1003 MPU_PORT(dev, 1, data);
1004 1004
1005 for(data = 1000000; data; data--) { 1005 for(data = 1000000; data; data--) {
1006 CHECK_INV(tint,PAGE_SIZE); 1006 CHECK_INV(lp, tint, PAGE_SIZE);
1007 if(tint[1] != -1) 1007 if(tint[1] != -1)
1008 break; 1008 break;
1009 1009
@@ -1061,7 +1061,7 @@ static void i596_tx_timeout (struct net_device *dev)
1061 /* Issue a channel attention signal */ 1061 /* Issue a channel attention signal */
1062 DEB(DEB_ERRORS, printk("Kicking board.\n")); 1062 DEB(DEB_ERRORS, printk("Kicking board.\n"));
1063 lp->scb.command = CUC_START | RX_START; 1063 lp->scb.command = CUC_START | RX_START;
1064 CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb)); 1064 CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
1065 CA (dev); 1065 CA (dev);
1066 lp->last_restart = lp->stats.tx_packets; 1066 lp->last_restart = lp->stats.tx_packets;
1067 } 1067 }
@@ -1118,8 +1118,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1118 tbd->data = WSWAPchar(tx_cmd->dma_addr); 1118 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1119 1119
1120 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); 1120 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1121 CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd)); 1121 CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
1122 CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd)); 1122 CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
1123 i596_add_cmd(dev, &tx_cmd->cmd); 1123 i596_add_cmd(dev, &tx_cmd->cmd);
1124 1124
1125 lp->stats.tx_packets++; 1125 lp->stats.tx_packets++;
@@ -1228,7 +1228,7 @@ static int __devinit i82596_probe(struct net_device *dev,
1228 lp->dma_addr = dma_addr; 1228 lp->dma_addr = dma_addr;
1229 lp->dev = gen_dev; 1229 lp->dev = gen_dev;
1230 1230
1231 CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private)); 1231 CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
1232 1232
1233 i = register_netdev(dev); 1233 i = register_netdev(dev);
1234 if (i) { 1234 if (i) {
@@ -1295,7 +1295,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1295 DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); 1295 DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1296 1296
1297 while (lp->cmd_head != NULL) { 1297 while (lp->cmd_head != NULL) {
1298 CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd)); 1298 CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
1299 if (!(lp->cmd_head->status & STAT_C)) 1299 if (!(lp->cmd_head->status & STAT_C))
1300 break; 1300 break;
1301 1301
@@ -1358,7 +1358,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1358 } 1358 }
1359 ptr->v_next = NULL; 1359 ptr->v_next = NULL;
1360 ptr->b_next = I596_NULL; 1360 ptr->b_next = I596_NULL;
1361 CHECK_WBACK(ptr, sizeof(struct i596_cmd)); 1361 CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
1362 lp->last_cmd = jiffies; 1362 lp->last_cmd = jiffies;
1363 } 1363 }
1364 1364
@@ -1372,13 +1372,13 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1372 1372
1373 ptr->command &= 0x1fff; 1373 ptr->command &= 0x1fff;
1374 ptr = ptr->v_next; 1374 ptr = ptr->v_next;
1375 CHECK_WBACK_INV(prev, sizeof(struct i596_cmd)); 1375 CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
1376 } 1376 }
1377 1377
1378 if ((lp->cmd_head != NULL)) 1378 if ((lp->cmd_head != NULL))
1379 ack_cmd |= CUC_START; 1379 ack_cmd |= CUC_START;
1380 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status)); 1380 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1381 CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb)); 1381 CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
1382 } 1382 }
1383 if ((status & 0x1000) || (status & 0x4000)) { 1383 if ((status & 0x1000) || (status & 0x4000)) {
1384 if ((status & 0x4000)) 1384 if ((status & 0x4000))
@@ -1397,7 +1397,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1397 } 1397 }
1398 wait_cmd(dev, lp, 100, "i596 interrupt, timeout"); 1398 wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
1399 lp->scb.command = ack_cmd; 1399 lp->scb.command = ack_cmd;
1400 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb)); 1400 CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
1401 1401
1402 /* DANGER: I suspect that some kind of interrupt 1402 /* DANGER: I suspect that some kind of interrupt
1403 acknowledgement aside from acking the 82596 might be needed 1403 acknowledgement aside from acking the 82596 might be needed
@@ -1426,7 +1426,7 @@ static int i596_close(struct net_device *dev)
1426 1426
1427 wait_cmd(dev, lp, 100, "close1 timed out"); 1427 wait_cmd(dev, lp, 100, "close1 timed out");
1428 lp->scb.command = CUC_ABORT | RX_ABORT; 1428 lp->scb.command = CUC_ABORT | RX_ABORT;
1429 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb)); 1429 CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
1430 1430
1431 CA(dev); 1431 CA(dev);
1432 1432
@@ -1486,7 +1486,7 @@ static void set_multicast_list(struct net_device *dev)
1486 dev->name); 1486 dev->name);
1487 else { 1487 else {
1488 lp->cf_cmd.cmd.command = CmdConfigure; 1488 lp->cf_cmd.cmd.command = CmdConfigure;
1489 CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd)); 1489 CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
1490 i596_add_cmd(dev, &lp->cf_cmd.cmd); 1490 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1491 } 1491 }
1492 } 1492 }
@@ -1514,7 +1514,7 @@ static void set_multicast_list(struct net_device *dev)
1514 DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", 1514 DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1515 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5])); 1515 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1516 } 1516 }
1517 CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd)); 1517 CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
1518 i596_add_cmd(dev, &cmd->cmd); 1518 i596_add_cmd(dev, &cmd->cmd);
1519 } 1519 }
1520} 1520}
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index acee062cd6f6..68103e508db7 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -362,11 +362,11 @@ NCR_700_detect(struct scsi_host_template *tpnt,
362 for (j = 0; j < PATCHES; j++) 362 for (j = 0; j < PATCHES; j++)
363 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]); 363 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
364 /* now patch up fixed addresses. */ 364 /* now patch up fixed addresses. */
365 script_patch_32(script, MessageLocation, 365 script_patch_32(hostdata->dev, script, MessageLocation,
366 pScript + MSGOUT_OFFSET); 366 pScript + MSGOUT_OFFSET);
367 script_patch_32(script, StatusAddress, 367 script_patch_32(hostdata->dev, script, StatusAddress,
368 pScript + STATUS_OFFSET); 368 pScript + STATUS_OFFSET);
369 script_patch_32(script, ReceiveMsgAddress, 369 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
370 pScript + MSGIN_OFFSET); 370 pScript + MSGIN_OFFSET);
371 371
372 hostdata->script = script; 372 hostdata->script = script;
@@ -821,8 +821,9 @@ process_extended_message(struct Scsi_Host *host,
821 shost_printk(KERN_WARNING, host, 821 shost_printk(KERN_WARNING, host,
822 "Unexpected SDTR msg\n"); 822 "Unexpected SDTR msg\n");
823 hostdata->msgout[0] = A_REJECT_MSG; 823 hostdata->msgout[0] = A_REJECT_MSG;
824 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); 824 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
825 script_patch_16(hostdata->script, MessageCount, 1); 825 script_patch_16(hostdata->dev, hostdata->script,
826 MessageCount, 1);
826 /* SendMsgOut returns, so set up the return 827 /* SendMsgOut returns, so set up the return
827 * address */ 828 * address */
828 resume_offset = hostdata->pScript + Ent_SendMessageWithATN; 829 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -833,8 +834,9 @@ process_extended_message(struct Scsi_Host *host,
833 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n", 834 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
834 host->host_no, pun, lun); 835 host->host_no, pun, lun);
835 hostdata->msgout[0] = A_REJECT_MSG; 836 hostdata->msgout[0] = A_REJECT_MSG;
836 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); 837 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
837 script_patch_16(hostdata->script, MessageCount, 1); 838 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
839 1);
838 resume_offset = hostdata->pScript + Ent_SendMessageWithATN; 840 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
839 841
840 break; 842 break;
@@ -847,8 +849,9 @@ process_extended_message(struct Scsi_Host *host,
847 printk("\n"); 849 printk("\n");
848 /* just reject it */ 850 /* just reject it */
849 hostdata->msgout[0] = A_REJECT_MSG; 851 hostdata->msgout[0] = A_REJECT_MSG;
850 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); 852 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
851 script_patch_16(hostdata->script, MessageCount, 1); 853 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
854 1);
852 /* SendMsgOut returns, so set up the return 855 /* SendMsgOut returns, so set up the return
853 * address */ 856 * address */
854 resume_offset = hostdata->pScript + Ent_SendMessageWithATN; 857 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -929,8 +932,9 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
929 printk("\n"); 932 printk("\n");
930 /* just reject it */ 933 /* just reject it */
931 hostdata->msgout[0] = A_REJECT_MSG; 934 hostdata->msgout[0] = A_REJECT_MSG;
932 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); 935 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
933 script_patch_16(hostdata->script, MessageCount, 1); 936 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
937 1);
934 /* SendMsgOut returns, so set up the return 938 /* SendMsgOut returns, so set up the return
935 * address */ 939 * address */
936 resume_offset = hostdata->pScript + Ent_SendMessageWithATN; 940 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -939,7 +943,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
939 } 943 }
940 NCR_700_writel(temp, host, TEMP_REG); 944 NCR_700_writel(temp, host, TEMP_REG);
941 /* set us up to receive another message */ 945 /* set us up to receive another message */
942 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); 946 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
943 return resume_offset; 947 return resume_offset;
944} 948}
945 949
@@ -1019,9 +1023,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1019 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN); 1023 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1020 slot->SG[1].pAddr = 0; 1024 slot->SG[1].pAddr = 0;
1021 slot->resume_offset = hostdata->pScript; 1025 slot->resume_offset = hostdata->pScript;
1022 dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE); 1026 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1023 dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 1027 dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1024 1028
1025 /* queue the command for reissue */ 1029 /* queue the command for reissue */
1026 slot->state = NCR_700_SLOT_QUEUED; 1030 slot->state = NCR_700_SLOT_QUEUED;
1027 slot->flags = NCR_700_FLAG_AUTOSENSE; 1031 slot->flags = NCR_700_FLAG_AUTOSENSE;
@@ -1136,11 +1140,12 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1136 hostdata->cmd = slot->cmnd; 1140 hostdata->cmd = slot->cmnd;
1137 1141
1138 /* re-patch for this command */ 1142 /* re-patch for this command */
1139 script_patch_32_abs(hostdata->script, CommandAddress, 1143 script_patch_32_abs(hostdata->dev, hostdata->script,
1140 slot->pCmd); 1144 CommandAddress, slot->pCmd);
1141 script_patch_16(hostdata->script, 1145 script_patch_16(hostdata->dev, hostdata->script,
1142 CommandCount, slot->cmnd->cmd_len); 1146 CommandCount, slot->cmnd->cmd_len);
1143 script_patch_32_abs(hostdata->script, SGScriptStartAddress, 1147 script_patch_32_abs(hostdata->dev, hostdata->script,
1148 SGScriptStartAddress,
1144 to32bit(&slot->pSG[0].ins)); 1149 to32bit(&slot->pSG[0].ins));
1145 1150
1146 /* Note: setting SXFER only works if we're 1151 /* Note: setting SXFER only works if we're
@@ -1150,13 +1155,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1150 * should therefore always clear ACK */ 1155 * should therefore always clear ACK */
1151 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device), 1156 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1152 host, SXFER_REG); 1157 host, SXFER_REG);
1153 dma_cache_sync(hostdata->msgin, 1158 dma_cache_sync(hostdata->dev, hostdata->msgin,
1154 MSG_ARRAY_SIZE, DMA_FROM_DEVICE); 1159 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1155 dma_cache_sync(hostdata->msgout, 1160 dma_cache_sync(hostdata->dev, hostdata->msgout,
1156 MSG_ARRAY_SIZE, DMA_TO_DEVICE); 1161 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1157 /* I'm just being paranoid here, the command should 1162 /* I'm just being paranoid here, the command should
1158 * already have been flushed from the cache */ 1163 * already have been flushed from the cache */
1159 dma_cache_sync(slot->cmnd->cmnd, 1164 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1160 slot->cmnd->cmd_len, DMA_TO_DEVICE); 1165 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1161 1166
1162 1167
@@ -1220,7 +1225,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1220 hostdata->reselection_id = reselection_id; 1225 hostdata->reselection_id = reselection_id;
1221 /* just in case we have a stale simple tag message, clear it */ 1226 /* just in case we have a stale simple tag message, clear it */
1222 hostdata->msgin[1] = 0; 1227 hostdata->msgin[1] = 0;
1223 dma_cache_sync(hostdata->msgin, 1228 dma_cache_sync(hostdata->dev, hostdata->msgin,
1224 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL); 1229 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1225 if(hostdata->tag_negotiated & (1<<reselection_id)) { 1230 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1226 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag; 1231 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
@@ -1336,7 +1341,7 @@ process_selection(struct Scsi_Host *host, __u32 dsp)
1336 hostdata->cmd = NULL; 1341 hostdata->cmd = NULL;
1337 /* clear any stale simple tag message */ 1342 /* clear any stale simple tag message */
1338 hostdata->msgin[1] = 0; 1343 hostdata->msgin[1] = 0;
1339 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, 1344 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1340 DMA_BIDIRECTIONAL); 1345 DMA_BIDIRECTIONAL);
1341 1346
1342 if(id == 0xff) { 1347 if(id == 0xff) {
@@ -1433,29 +1438,30 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
1433 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); 1438 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1434 } 1439 }
1435 1440
1436 script_patch_16(hostdata->script, MessageCount, count); 1441 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1437 1442
1438 1443
1439 script_patch_ID(hostdata->script, 1444 script_patch_ID(hostdata->dev, hostdata->script,
1440 Device_ID, 1<<scmd_id(SCp)); 1445 Device_ID, 1<<scmd_id(SCp));
1441 1446
1442 script_patch_32_abs(hostdata->script, CommandAddress, 1447 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1443 slot->pCmd); 1448 slot->pCmd);
1444 script_patch_16(hostdata->script, CommandCount, SCp->cmd_len); 1449 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1450 SCp->cmd_len);
1445 /* finally plumb the beginning of the SG list into the script 1451 /* finally plumb the beginning of the SG list into the script
1446 * */ 1452 * */
1447 script_patch_32_abs(hostdata->script, SGScriptStartAddress, 1453 script_patch_32_abs(hostdata->dev, hostdata->script,
1448 to32bit(&slot->pSG[0].ins)); 1454 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1449 NCR_700_clear_fifo(SCp->device->host); 1455 NCR_700_clear_fifo(SCp->device->host);
1450 1456
1451 if(slot->resume_offset == 0) 1457 if(slot->resume_offset == 0)
1452 slot->resume_offset = hostdata->pScript; 1458 slot->resume_offset = hostdata->pScript;
1453 /* now perform all the writebacks and invalidates */ 1459 /* now perform all the writebacks and invalidates */
1454 dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE); 1460 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1455 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, 1461 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1456 DMA_FROM_DEVICE); 1462 DMA_FROM_DEVICE);
1457 dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE); 1463 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1458 dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE); 1464 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1459 1465
1460 /* set the synchronous period/offset */ 1466 /* set the synchronous period/offset */
1461 NCR_700_writeb(NCR_700_get_SXFER(SCp->device), 1467 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
@@ -1631,7 +1637,7 @@ NCR_700_intr(int irq, void *dev_id)
1631 slot->SG[i].ins = bS_to_host(SCRIPT_NOP); 1637 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1632 slot->SG[i].pAddr = 0; 1638 slot->SG[i].pAddr = 0;
1633 } 1639 }
1634 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); 1640 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1635 /* and pretend we disconnected after 1641 /* and pretend we disconnected after
1636 * the command phase */ 1642 * the command phase */
1637 resume_offset = hostdata->pScript + Ent_MsgInDuringData; 1643 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
@@ -1897,9 +1903,9 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1897 } 1903 }
1898 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); 1904 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1899 slot->SG[i].pAddr = 0; 1905 slot->SG[i].pAddr = 0;
1900 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); 1906 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1901 DEBUG((" SETTING %08lx to %x\n", 1907 DEBUG((" SETTING %08lx to %x\n",
1902 (&slot->pSG[i].ins), 1908 (&slot->pSG[i].ins),
1903 slot->SG[i].ins)); 1909 slot->SG[i].ins));
1904 } 1910 }
1905 slot->resume_offset = 0; 1911 slot->resume_offset = 0;
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index f5c3caf344a7..f38822db4210 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -415,31 +415,31 @@ struct NCR_700_Host_Parameters {
415#define NCR_710_MIN_XFERP 0 415#define NCR_710_MIN_XFERP 0
416#define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */ 416#define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */
417 417
418#define script_patch_32(script, symbol, value) \ 418#define script_patch_32(dev, script, symbol, value) \
419{ \ 419{ \
420 int i; \ 420 int i; \
421 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ 421 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
422 __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \ 422 __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
423 (script)[A_##symbol##_used[i]] = bS_to_host(val); \ 423 (script)[A_##symbol##_used[i]] = bS_to_host(val); \
424 dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ 424 dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
425 DEBUG((" script, patching %s at %d to 0x%lx\n", \ 425 DEBUG((" script, patching %s at %d to 0x%lx\n", \
426 #symbol, A_##symbol##_used[i], (value))); \ 426 #symbol, A_##symbol##_used[i], (value))); \
427 } \ 427 } \
428} 428}
429 429
430#define script_patch_32_abs(script, symbol, value) \ 430#define script_patch_32_abs(dev, script, symbol, value) \
431{ \ 431{ \
432 int i; \ 432 int i; \
433 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ 433 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
434 (script)[A_##symbol##_used[i]] = bS_to_host(value); \ 434 (script)[A_##symbol##_used[i]] = bS_to_host(value); \
435 dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ 435 dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
436 DEBUG((" script, patching %s at %d to 0x%lx\n", \ 436 DEBUG((" script, patching %s at %d to 0x%lx\n", \
437 #symbol, A_##symbol##_used[i], (value))); \ 437 #symbol, A_##symbol##_used[i], (value))); \
438 } \ 438 } \
439} 439}
440 440
441/* Used for patching the SCSI ID in the SELECT instruction */ 441/* Used for patching the SCSI ID in the SELECT instruction */
442#define script_patch_ID(script, symbol, value) \ 442#define script_patch_ID(dev, script, symbol, value) \
443{ \ 443{ \
444 int i; \ 444 int i; \
445 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ 445 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -447,13 +447,13 @@ struct NCR_700_Host_Parameters {
447 val &= 0xff00ffff; \ 447 val &= 0xff00ffff; \
448 val |= ((value) & 0xff) << 16; \ 448 val |= ((value) & 0xff) << 16; \
449 (script)[A_##symbol##_used[i]] = bS_to_host(val); \ 449 (script)[A_##symbol##_used[i]] = bS_to_host(val); \
450 dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ 450 dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
451 DEBUG((" script, patching ID field %s at %d to 0x%x\n", \ 451 DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
452 #symbol, A_##symbol##_used[i], val)); \ 452 #symbol, A_##symbol##_used[i], val)); \
453 } \ 453 } \
454} 454}
455 455
456#define script_patch_16(script, symbol, value) \ 456#define script_patch_16(dev, script, symbol, value) \
457{ \ 457{ \
458 int i; \ 458 int i; \
459 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ 459 for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -461,7 +461,7 @@ struct NCR_700_Host_Parameters {
461 val &= 0xffff0000; \ 461 val &= 0xffff0000; \
462 val |= ((value) & 0xffff); \ 462 val |= ((value) & 0xffff); \
463 (script)[A_##symbol##_used[i]] = bS_to_host(val); \ 463 (script)[A_##symbol##_used[i]] = bS_to_host(val); \
464 dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ 464 dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
465 DEBUG((" script, patching short field %s at %d to 0x%x\n", \ 465 DEBUG((" script, patching short field %s at %d to 0x%x\n", \
466 #symbol, A_##symbol##_used[i], val)); \ 466 #symbol, A_##symbol##_used[i], val)); \
467 } \ 467 } \
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 8eea69f29989..29823bd60fb0 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -555,7 +555,7 @@ mpsc_sdma_start_tx(struct mpsc_port_info *pi)
555 if (!mpsc_sdma_tx_active(pi)) { 555 if (!mpsc_sdma_tx_active(pi)) {
556 txre = (struct mpsc_tx_desc *)(pi->txr + 556 txre = (struct mpsc_tx_desc *)(pi->txr +
557 (pi->txr_tail * MPSC_TXRE_SIZE)); 557 (pi->txr_tail * MPSC_TXRE_SIZE));
558 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); 558 dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
559#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 559#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
560 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 560 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
561 invalidate_dcache_range((ulong)txre, 561 invalidate_dcache_range((ulong)txre,
@@ -931,7 +931,7 @@ mpsc_init_rings(struct mpsc_port_info *pi)
931 } 931 }
932 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ 932 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
933 933
934 dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE, 934 dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
935 DMA_BIDIRECTIONAL); 935 DMA_BIDIRECTIONAL);
936#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 936#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
937 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 937 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1005,7 +1005,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
1005 1005
1006 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); 1006 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
1007 1007
1008 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1008 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1009#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1009#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1010 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1010 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1011 invalidate_dcache_range((ulong)rxre, 1011 invalidate_dcache_range((ulong)rxre,
@@ -1029,7 +1029,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
1029 } 1029 }
1030 1030
1031 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 1031 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1032 dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); 1032 dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1033#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1033#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1034 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1034 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1035 invalidate_dcache_range((ulong)bp, 1035 invalidate_dcache_range((ulong)bp,
@@ -1098,7 +1098,7 @@ next_frame:
1098 SDMA_DESC_CMDSTAT_F | 1098 SDMA_DESC_CMDSTAT_F |
1099 SDMA_DESC_CMDSTAT_L); 1099 SDMA_DESC_CMDSTAT_L);
1100 wmb(); 1100 wmb();
1101 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); 1101 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1102#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1102#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1103 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1103 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1104 flush_dcache_range((ulong)rxre, 1104 flush_dcache_range((ulong)rxre,
@@ -1109,7 +1109,7 @@ next_frame:
1109 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); 1109 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
1110 rxre = (struct mpsc_rx_desc *)(pi->rxr + 1110 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1111 (pi->rxr_posn * MPSC_RXRE_SIZE)); 1111 (pi->rxr_posn * MPSC_RXRE_SIZE));
1112 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1112 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1113#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1113#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1114 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1114 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1115 invalidate_dcache_range((ulong)rxre, 1115 invalidate_dcache_range((ulong)rxre,
@@ -1143,7 +1143,7 @@ mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1143 SDMA_DESC_CMDSTAT_EI 1143 SDMA_DESC_CMDSTAT_EI
1144 : 0)); 1144 : 0));
1145 wmb(); 1145 wmb();
1146 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL); 1146 dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
1147#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1147#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1148 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1148 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1149 flush_dcache_range((ulong)txre, 1149 flush_dcache_range((ulong)txre,
@@ -1192,7 +1192,7 @@ mpsc_copy_tx_data(struct mpsc_port_info *pi)
1192 else /* All tx data copied into ring bufs */ 1192 else /* All tx data copied into ring bufs */
1193 return; 1193 return;
1194 1194
1195 dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); 1195 dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
1196#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1196#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1197 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1197 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1198 flush_dcache_range((ulong)bp, 1198 flush_dcache_range((ulong)bp,
@@ -1217,7 +1217,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
1217 txre = (struct mpsc_tx_desc *)(pi->txr + 1217 txre = (struct mpsc_tx_desc *)(pi->txr +
1218 (pi->txr_tail * MPSC_TXRE_SIZE)); 1218 (pi->txr_tail * MPSC_TXRE_SIZE));
1219 1219
1220 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); 1220 dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1221#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1221#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1222 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1222 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1223 invalidate_dcache_range((ulong)txre, 1223 invalidate_dcache_range((ulong)txre,
@@ -1235,7 +1235,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
1235 1235
1236 txre = (struct mpsc_tx_desc *)(pi->txr + 1236 txre = (struct mpsc_tx_desc *)(pi->txr +
1237 (pi->txr_tail * MPSC_TXRE_SIZE)); 1237 (pi->txr_tail * MPSC_TXRE_SIZE));
1238 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, 1238 dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE,
1239 DMA_FROM_DEVICE); 1239 DMA_FROM_DEVICE);
1240#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1240#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1241 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1241 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1652,7 +1652,7 @@ mpsc_console_write(struct console *co, const char *s, uint count)
1652 count--; 1652 count--;
1653 } 1653 }
1654 1654
1655 dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); 1655 dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
1656#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1656#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1657 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1657 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1658 flush_dcache_range((ulong)bp, 1658 flush_dcache_range((ulong)bp,
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index b274bf6317c7..57e09f5e3424 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -60,7 +60,7 @@ int dma_set_mask(struct device *dev, u64 mask);
60#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0) 60#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0)
61#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0) 61#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0)
62#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0) 62#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0)
63#define dma_cache_sync(va, size, dir) do { } while (0) 63#define dma_cache_sync(dev, va, size, dir) do { } while (0)
64 64
65#define dma_get_cache_alignment() L1_CACHE_BYTES 65#define dma_get_cache_alignment() L1_CACHE_BYTES
66 66
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 44630be2ee22..0580b5d62bba 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -8,7 +8,8 @@
8#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
9#include <asm/io.h> 9#include <asm/io.h>
10 10
11extern void dma_cache_sync(void *vaddr, size_t size, int direction); 11extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
12 int direction);
12 13
13/* 14/*
14 * Return whether the given device DMA address mask can be supported 15 * Return whether the given device DMA address mask can be supported
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index af704fdd3d0d..662cea70152d 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -159,7 +159,7 @@ dma_get_cache_alignment(void)
159#define dma_is_consistent(d, h) (1) 159#define dma_is_consistent(d, h) (1)
160 160
161static inline void 161static inline void
162dma_cache_sync(void *vaddr, size_t size, 162dma_cache_sync(struct device *dev, void *vaddr, size_t size,
163 enum dma_data_direction direction) 163 enum dma_data_direction direction)
164{ 164{
165} 165}
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index 7b97fc785f72..bcb2df68496e 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -175,7 +175,7 @@ int dma_get_cache_alignment(void)
175#define dma_is_consistent(d, h) (1) 175#define dma_is_consistent(d, h) (1)
176 176
177static inline 177static inline
178void dma_cache_sync(void *vaddr, size_t size, 178void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
179 enum dma_data_direction direction) 179 enum dma_data_direction direction)
180{ 180{
181 flush_write_buffers(); 181 flush_write_buffers();
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index b9be3fc344c7..783ab9944d70 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -295,7 +295,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
295} 295}
296 296
297static inline void 297static inline void
298dma_cache_sync(void *vaddr, size_t size, 298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299 enum dma_data_direction direction) 299 enum dma_data_direction direction)
300{ 300{
301 /* could define this in terms of the dma_cache ... operations, 301 /* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 7da64c9f1738..183eebeebbdc 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -159,7 +159,7 @@ dma_get_cache_alignment(void)
159#define dma_is_consistent(d, h) (1) 159#define dma_is_consistent(d, h) (1)
160 160
161static inline void 161static inline void
162dma_cache_sync(void *vaddr, size_t size, 162dma_cache_sync(struct device *dev, void *vaddr, size_t size,
163 enum dma_data_direction direction) 163 enum dma_data_direction direction)
164{ 164{
165 flush_write_buffers(); 165 flush_write_buffers();
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 4b075bc032ec..ebd5887f4b1a 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -50,7 +50,8 @@ dma_set_mask (struct device *dev, u64 mask)
50extern int dma_get_cache_alignment(void); 50extern int dma_get_cache_alignment(void);
51 51
52static inline void 52static inline void
53dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) 53dma_cache_sync (struct device *dev, void *vaddr, size_t size,
54 enum dma_data_direction dir)
54{ 55{
55 /* 56 /*
56 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to 57 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index efc89c12f837..00259ed6fc95 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -41,7 +41,7 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
41{ 41{
42 dma_free_coherent(dev, size, addr, handle); 42 dma_free_coherent(dev, size, addr, handle);
43} 43}
44static inline void dma_cache_sync(void *vaddr, size_t size, 44static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
45 enum dma_data_direction dir) 45 enum dma_data_direction dir)
46{ 46{
47 /* we use coherent allocation, so not much to do here. */ 47 /* we use coherent allocation, so not much to do here. */
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index e17f70d7b702..236d1a467cc7 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -65,7 +65,7 @@ dma_get_cache_alignment(void)
65 65
66extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr); 66extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
67 67
68extern void dma_cache_sync(void *vaddr, size_t size, 68extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
69 enum dma_data_direction direction); 69 enum dma_data_direction direction);
70 70
71#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 71#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index c40d48afdc52..66f0b408c669 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -197,7 +197,7 @@ dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
197} 197}
198 198
199static inline void 199static inline void
200dma_cache_sync(void *vaddr, size_t size, 200dma_cache_sync(struct device *dev, void *vaddr, size_t size,
201 enum dma_data_direction direction) 201 enum dma_data_direction direction)
202{ 202{
203 if(hppa_dma_ops->dma_sync_single_for_cpu) 203 if(hppa_dma_ops->dma_sync_single_for_cpu)
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 3cf635b53b88..7c7de87bd8ae 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -378,7 +378,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
378 dma_sync_single_for_device(dev, dma_handle, offset + size, direction); 378 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
379} 379}
380 380
381static inline void dma_cache_sync(void *vaddr, size_t size, 381static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
382 enum dma_data_direction direction) 382 enum dma_data_direction direction)
383{ 383{
384 BUG_ON(direction == DMA_NONE); 384 BUG_ON(direction == DMA_NONE);
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 56cd4b977232..37ab0c131a4d 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
53 consistent_free(vaddr, size); 53 consistent_free(vaddr, size);
54} 54}
55 55
56static inline void dma_cache_sync(void *vaddr, size_t size, 56static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
57 enum dma_data_direction dir) 57 enum dma_data_direction dir)
58{ 58{
59 consistent_sync(vaddr, size, (int)dir); 59 consistent_sync(vaddr, size, (int)dir);
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index 68e27a8fca31..5efe906c59f7 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -35,7 +35,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
35 consistent_free(NULL, size, vaddr, dma_handle); 35 consistent_free(NULL, size, vaddr, dma_handle);
36} 36}
37 37
38static inline void dma_cache_sync(void *vaddr, size_t size, 38static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
39 enum dma_data_direction dir) 39 enum dma_data_direction dir)
40{ 40{
41 dma_cache_wback_inv((unsigned long)vaddr, size); 41 dma_cache_wback_inv((unsigned long)vaddr, size);
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 5fe0072f3f82..2f858a2df94a 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -210,7 +210,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
210} 210}
211 211
212static inline void 212static inline void
213dma_cache_sync(void *vaddr, size_t size, 213dma_cache_sync(struct device *dev, void *vaddr, size_t size,
214 enum dma_data_direction direction) 214 enum dma_data_direction direction)
215{ 215{
216 /* could define this in terms of the dma_cache ... operations, 216 /* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index defb5b8307de..f0ee4fb55911 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -112,7 +112,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
112} 112}
113 113
114static inline void 114static inline void
115dma_cache_sync(void *vaddr, size_t size, 115dma_cache_sync(struct device *dev, void *vaddr, size_t size,
116 enum dma_data_direction direction) 116 enum dma_data_direction direction)
117{ 117{
118 BUG(); 118 BUG();
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index c8cc4887fba6..be9ec6890723 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -185,7 +185,8 @@ static inline int dma_get_cache_alignment(void)
185extern int dma_set_mask(struct device *dev, u64 mask); 185extern int dma_set_mask(struct device *dev, u64 mask);
186 186
187static inline void 187static inline void
188dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) 188dma_cache_sync(struct device *dev, void *vaddr, size_t size,
189 enum dma_data_direction dir)
189{ 190{
190 flush_write_buffers(); 191 flush_write_buffers();
191} 192}
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index 827d1dfd9e1d..82b03b3a2ee6 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -173,7 +173,7 @@ dma_get_cache_alignment(void)
173#define dma_is_consistent(d, h) (1) 173#define dma_is_consistent(d, h) (1)
174 174
175static inline void 175static inline void
176dma_cache_sync(void *vaddr, size_t size, 176dma_cache_sync(struct device *dev, void *vaddr, size_t size,
177 enum dma_data_direction direction) 177 enum dma_data_direction direction)
178{ 178{
179 consistent_sync(vaddr, size, direction); 179 consistent_sync(vaddr, size, direction);