aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/pci
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-01-05 09:36:15 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 08:06:51 -0500
commitcdc28d59a31e3fd711982bd07600f3e5b449b9f7 (patch)
tree7ddb4092f76bf03931e00919b77bed2a40f36216 /arch/ia64/sn/pci
parentfad6a029c4afa499dddd8e9ff70264bb977ea7bf (diff)
make sn DMA mapping functions static
Now we don't need to export sn DMA mapping functions. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64/sn/pci')
-rw-r--r--arch/ia64/sn/pci/pci_dma.c64
1 files changed, 26 insertions, 38 deletions
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 174a74e63882..efdd69490009 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -32,7 +32,7 @@
32 * this function. Of course, SN only supports devices that have 32 or more 32 * this function. Of course, SN only supports devices that have 32 or more
33 * address bits when using the PMU. 33 * address bits when using the PMU.
34 */ 34 */
35int sn_dma_supported(struct device *dev, u64 mask) 35static int sn_dma_supported(struct device *dev, u64 mask)
36{ 36{
37 BUG_ON(dev->bus != &pci_bus_type); 37 BUG_ON(dev->bus != &pci_bus_type);
38 38
@@ -40,7 +40,6 @@ int sn_dma_supported(struct device *dev, u64 mask)
40 return 0; 40 return 0;
41 return 1; 41 return 1;
42} 42}
43EXPORT_SYMBOL(sn_dma_supported);
44 43
45/** 44/**
46 * sn_dma_set_mask - set the DMA mask 45 * sn_dma_set_mask - set the DMA mask
@@ -76,8 +75,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
76 * queue for a SCSI controller). See Documentation/DMA-API.txt for 75 * queue for a SCSI controller). See Documentation/DMA-API.txt for
77 * more information. 76 * more information.
78 */ 77 */
79void *sn_dma_alloc_coherent(struct device *dev, size_t size, 78static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
80 dma_addr_t * dma_handle, gfp_t flags) 79 dma_addr_t * dma_handle, gfp_t flags)
81{ 80{
82 void *cpuaddr; 81 void *cpuaddr;
83 unsigned long phys_addr; 82 unsigned long phys_addr;
@@ -125,7 +124,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
125 124
126 return cpuaddr; 125 return cpuaddr;
127} 126}
128EXPORT_SYMBOL(sn_dma_alloc_coherent);
129 127
130/** 128/**
131 * sn_pci_free_coherent - free memory associated with coherent DMAable region 129 * sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -137,8 +135,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
137 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping 135 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
138 * any associated IOMMU mappings. 136 * any associated IOMMU mappings.
139 */ 137 */
140void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 138static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
141 dma_addr_t dma_handle) 139 dma_addr_t dma_handle)
142{ 140{
143 struct pci_dev *pdev = to_pci_dev(dev); 141 struct pci_dev *pdev = to_pci_dev(dev);
144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 142 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -148,7 +146,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
148 provider->dma_unmap(pdev, dma_handle, 0); 146 provider->dma_unmap(pdev, dma_handle, 0);
149 free_pages((unsigned long)cpu_addr, get_order(size)); 147 free_pages((unsigned long)cpu_addr, get_order(size));
150} 148}
151EXPORT_SYMBOL(sn_dma_free_coherent);
152 149
153/** 150/**
154 * sn_dma_map_single_attrs - map a single page for DMA 151 * sn_dma_map_single_attrs - map a single page for DMA
@@ -174,9 +171,9 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
174 * TODO: simplify our interface; 171 * TODO: simplify our interface;
175 * figure out how to save dmamap handle so can use two step. 172 * figure out how to save dmamap handle so can use two step.
176 */ 173 */
177dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, 174static dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
178 size_t size, int direction, 175 size_t size, int direction,
179 struct dma_attrs *attrs) 176 struct dma_attrs *attrs)
180{ 177{
181 dma_addr_t dma_addr; 178 dma_addr_t dma_addr;
182 unsigned long phys_addr; 179 unsigned long phys_addr;
@@ -202,7 +199,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
202 } 199 }
203 return dma_addr; 200 return dma_addr;
204} 201}
205EXPORT_SYMBOL(sn_dma_map_single_attrs);
206 202
207/** 203/**
208 * sn_dma_unmap_single_attrs - unamp a DMA mapped page 204 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
@@ -216,9 +212,9 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs);
216 * by @dma_handle into the coherence domain. On SN, we're always cache 212 * by @dma_handle into the coherence domain. On SN, we're always cache
217 * coherent, so we just need to free any ATEs associated with this mapping. 213 * coherent, so we just need to free any ATEs associated with this mapping.
218 */ 214 */
219void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 215static void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
220 size_t size, int direction, 216 size_t size, int direction,
221 struct dma_attrs *attrs) 217 struct dma_attrs *attrs)
222{ 218{
223 struct pci_dev *pdev = to_pci_dev(dev); 219 struct pci_dev *pdev = to_pci_dev(dev);
224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 220 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -227,7 +223,6 @@ void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
227 223
228 provider->dma_unmap(pdev, dma_addr, direction); 224 provider->dma_unmap(pdev, dma_addr, direction);
229} 225}
230EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
231 226
232/** 227/**
233 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist 228 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
@@ -239,9 +234,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
239 * 234 *
240 * Unmap a set of streaming mode DMA translations. 235 * Unmap a set of streaming mode DMA translations.
241 */ 236 */
242void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 237static void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
243 int nhwentries, int direction, 238 int nhwentries, int direction,
244 struct dma_attrs *attrs) 239 struct dma_attrs *attrs)
245{ 240{
246 int i; 241 int i;
247 struct pci_dev *pdev = to_pci_dev(dev); 242 struct pci_dev *pdev = to_pci_dev(dev);
@@ -256,7 +251,6 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
256 sg->dma_length = 0; 251 sg->dma_length = 0;
257 } 252 }
258} 253}
259EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
260 254
261/** 255/**
262 * sn_dma_map_sg_attrs - map a scatterlist for DMA 256 * sn_dma_map_sg_attrs - map a scatterlist for DMA
@@ -273,8 +267,8 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
273 * 267 *
274 * Maps each entry of @sg for DMA. 268 * Maps each entry of @sg for DMA.
275 */ 269 */
276int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 270static int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
277 int nhwentries, int direction, struct dma_attrs *attrs) 271 int nhwentries, int direction, struct dma_attrs *attrs)
278{ 272{
279 unsigned long phys_addr; 273 unsigned long phys_addr;
280 struct scatterlist *saved_sg = sgl, *sg; 274 struct scatterlist *saved_sg = sgl, *sg;
@@ -321,41 +315,35 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
321 315
322 return nhwentries; 316 return nhwentries;
323} 317}
324EXPORT_SYMBOL(sn_dma_map_sg_attrs);
325 318
326void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 319static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
327 size_t size, int direction) 320 size_t size, int direction)
328{ 321{
329 BUG_ON(dev->bus != &pci_bus_type); 322 BUG_ON(dev->bus != &pci_bus_type);
330} 323}
331EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
332 324
333void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 325static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
334 size_t size, int direction) 326 size_t size, int direction)
335{ 327{
336 BUG_ON(dev->bus != &pci_bus_type); 328 BUG_ON(dev->bus != &pci_bus_type);
337} 329}
338EXPORT_SYMBOL(sn_dma_sync_single_for_device);
339 330
340void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 331static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
341 int nelems, int direction) 332 int nelems, int direction)
342{ 333{
343 BUG_ON(dev->bus != &pci_bus_type); 334 BUG_ON(dev->bus != &pci_bus_type);
344} 335}
345EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
346 336
347void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 337static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
348 int nelems, int direction) 338 int nelems, int direction)
349{ 339{
350 BUG_ON(dev->bus != &pci_bus_type); 340 BUG_ON(dev->bus != &pci_bus_type);
351} 341}
352EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
353 342
354int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 343static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
355{ 344{
356 return 0; 345 return 0;
357} 346}
358EXPORT_SYMBOL(sn_dma_mapping_error);
359 347
360char *sn_pci_get_legacy_mem(struct pci_bus *bus) 348char *sn_pci_get_legacy_mem(struct pci_bus *bus)
361{ 349{
@@ -467,7 +455,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
467 return ret; 455 return ret;
468} 456}
469 457
470struct dma_mapping_ops sn_dma_ops = { 458static struct dma_mapping_ops sn_dma_ops = {
471 .alloc_coherent = sn_dma_alloc_coherent, 459 .alloc_coherent = sn_dma_alloc_coherent,
472 .free_coherent = sn_dma_free_coherent, 460 .free_coherent = sn_dma_free_coherent,
473 .map_single_attrs = sn_dma_map_single_attrs, 461 .map_single_attrs = sn_dma_map_single_attrs,