diff options
author | Arnd Bergmann <arnd@arndb.de> | 2009-08-09 22:53:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-10 03:34:57 -0400 |
commit | a8ad568dd8ca122aa8048ea067d3599820d1c1b4 (patch) | |
tree | 3d8e4674a254f24906c05e0f7be880cf495b39ed /include/asm-generic | |
parent | b683d42693c4e92b838117f5c6f7b90bfa1525c9 (diff) |
dma-ops: Remove flush_write_buffers() in dma-mapping-common.h
This moves flush_write_buffers() in
asm-generic/dma-mapping-common.h to
arch/x86/kernel/pci-nommu.c.
The purpose of this patch is that, we can avoid defining NULL
flush_write_buffers() on IA64 and SPARC.
dma-mapping-common.h is used by X86 and IA64 (and SPARC soon)
but only X86 with CONFIG_X86_OOSTORE or CONFIG_X86_PPRO_FENCE
actually uses flush_write_buffers(). CONFIG_X86_OOSTORE or
CONFIG_X86_PPRO_FENCE is usable with only kernel/pci-nommu.c
(that is, not usable with other X86 IOMMU implementations such
as SWIOTLB, VT-d, etc) so we can safely move
flush_write_buffers() in asm-generic/dma-mapping-common.h to
arch/x86/kernel/pci-nommu.c.
The further discussion is:
http://lkml.org/lkml/2009/6/28/104
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: davem@davemloft.net
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
LKML-Reference: <1249872797-1314-2-git-send-email-fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/dma-mapping-common.h | 6 |
1 files changed, 0 insertions, 6 deletions
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 5406a601185c..e694263445f7 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h | |||
@@ -103,7 +103,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |||
103 | if (ops->sync_single_for_cpu) | 103 | if (ops->sync_single_for_cpu) |
104 | ops->sync_single_for_cpu(dev, addr, size, dir); | 104 | ops->sync_single_for_cpu(dev, addr, size, dir); |
105 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | 105 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
106 | flush_write_buffers(); | ||
107 | } | 106 | } |
108 | 107 | ||
109 | static inline void dma_sync_single_for_device(struct device *dev, | 108 | static inline void dma_sync_single_for_device(struct device *dev, |
@@ -116,7 +115,6 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
116 | if (ops->sync_single_for_device) | 115 | if (ops->sync_single_for_device) |
117 | ops->sync_single_for_device(dev, addr, size, dir); | 116 | ops->sync_single_for_device(dev, addr, size, dir); |
118 | debug_dma_sync_single_for_device(dev, addr, size, dir); | 117 | debug_dma_sync_single_for_device(dev, addr, size, dir); |
119 | flush_write_buffers(); | ||
120 | } | 118 | } |
121 | 119 | ||
122 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 120 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
@@ -132,7 +130,6 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
132 | ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); | 130 | ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); |
133 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | 131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); |
134 | 132 | ||
135 | flush_write_buffers(); | ||
136 | } else | 133 | } else |
137 | dma_sync_single_for_cpu(dev, addr, size, dir); | 134 | dma_sync_single_for_cpu(dev, addr, size, dir); |
138 | } | 135 | } |
@@ -150,7 +147,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
150 | ops->sync_single_range_for_device(dev, addr, offset, size, dir); | 147 | ops->sync_single_range_for_device(dev, addr, offset, size, dir); |
151 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | 148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); |
152 | 149 | ||
153 | flush_write_buffers(); | ||
154 | } else | 150 | } else |
155 | dma_sync_single_for_device(dev, addr, size, dir); | 151 | dma_sync_single_for_device(dev, addr, size, dir); |
156 | } | 152 | } |
@@ -165,7 +161,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
165 | if (ops->sync_sg_for_cpu) | 161 | if (ops->sync_sg_for_cpu) |
166 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | 162 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
167 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | 163 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
168 | flush_write_buffers(); | ||
169 | } | 164 | } |
170 | 165 | ||
171 | static inline void | 166 | static inline void |
@@ -179,7 +174,6 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
179 | ops->sync_sg_for_device(dev, sg, nelems, dir); | 174 | ops->sync_sg_for_device(dev, sg, nelems, dir); |
180 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | 175 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
181 | 176 | ||
182 | flush_write_buffers(); | ||
183 | } | 177 | } |
184 | 178 | ||
185 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | 179 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |