aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/bpa_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/bpa_iommu.c')
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index da1b4b7a3269..74f999b4ac9e 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * IOMMU implementation for Broadband Processor Architecture 2 * IOMMU implementation for Cell Broadband Processor Architecture
3 * We just establish a linear mapping at boot by setting all the 3 * We just establish a linear mapping at boot by setting all the
4 * IOPT cache entries in the CPU. 4 * IOPT cache entries in the CPU.
5 * The mapping functions should be identical to pci_direct_iommu, 5 * The mapping functions should be identical to pci_direct_iommu,
@@ -41,7 +41,7 @@
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/ppc-pci.h> 42#include <asm/ppc-pci.h>
43 43
44#include "bpa_iommu.h" 44#include "iommu.h"
45 45
46static inline unsigned long 46static inline unsigned long
47get_iopt_entry(unsigned long real_address, unsigned long ioid, 47get_iopt_entry(unsigned long real_address, unsigned long ioid,
@@ -276,7 +276,7 @@ static void iommu_dev_setup_null(struct pci_dev *d) { }
276 * for each DMA window used by any device. For now, we 276 * for each DMA window used by any device. For now, we
277 * happen to know that there is only one DMA window in use, 277 * happen to know that there is only one DMA window in use,
278 * starting at iopt_phys_offset. */ 278 * starting at iopt_phys_offset. */
279static void bpa_map_iommu(void) 279static void cell_map_iommu(void)
280{ 280{
281 unsigned long address; 281 unsigned long address;
282 void __iomem *base; 282 void __iomem *base;
@@ -309,7 +309,7 @@ static void bpa_map_iommu(void)
309} 309}
310 310
311 311
312static void *bpa_alloc_coherent(struct device *hwdev, size_t size, 312static void *cell_alloc_coherent(struct device *hwdev, size_t size,
313 dma_addr_t *dma_handle, gfp_t flag) 313 dma_addr_t *dma_handle, gfp_t flag)
314{ 314{
315 void *ret; 315 void *ret;
@@ -317,65 +317,65 @@ static void *bpa_alloc_coherent(struct device *hwdev, size_t size,
317 ret = (void *)__get_free_pages(flag, get_order(size)); 317 ret = (void *)__get_free_pages(flag, get_order(size));
318 if (ret != NULL) { 318 if (ret != NULL) {
319 memset(ret, 0, size); 319 memset(ret, 0, size);
320 *dma_handle = virt_to_abs(ret) | BPA_DMA_VALID; 320 *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID;
321 } 321 }
322 return ret; 322 return ret;
323} 323}
324 324
325static void bpa_free_coherent(struct device *hwdev, size_t size, 325static void cell_free_coherent(struct device *hwdev, size_t size,
326 void *vaddr, dma_addr_t dma_handle) 326 void *vaddr, dma_addr_t dma_handle)
327{ 327{
328 free_pages((unsigned long)vaddr, get_order(size)); 328 free_pages((unsigned long)vaddr, get_order(size));
329} 329}
330 330
331static dma_addr_t bpa_map_single(struct device *hwdev, void *ptr, 331static dma_addr_t cell_map_single(struct device *hwdev, void *ptr,
332 size_t size, enum dma_data_direction direction) 332 size_t size, enum dma_data_direction direction)
333{ 333{
334 return virt_to_abs(ptr) | BPA_DMA_VALID; 334 return virt_to_abs(ptr) | CELL_DMA_VALID;
335} 335}
336 336
337static void bpa_unmap_single(struct device *hwdev, dma_addr_t dma_addr, 337static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
338 size_t size, enum dma_data_direction direction) 338 size_t size, enum dma_data_direction direction)
339{ 339{
340} 340}
341 341
342static int bpa_map_sg(struct device *hwdev, struct scatterlist *sg, 342static int cell_map_sg(struct device *hwdev, struct scatterlist *sg,
343 int nents, enum dma_data_direction direction) 343 int nents, enum dma_data_direction direction)
344{ 344{
345 int i; 345 int i;
346 346
347 for (i = 0; i < nents; i++, sg++) { 347 for (i = 0; i < nents; i++, sg++) {
348 sg->dma_address = (page_to_phys(sg->page) + sg->offset) 348 sg->dma_address = (page_to_phys(sg->page) + sg->offset)
349 | BPA_DMA_VALID; 349 | CELL_DMA_VALID;
350 sg->dma_length = sg->length; 350 sg->dma_length = sg->length;
351 } 351 }
352 352
353 return nents; 353 return nents;
354} 354}
355 355
356static void bpa_unmap_sg(struct device *hwdev, struct scatterlist *sg, 356static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg,
357 int nents, enum dma_data_direction direction) 357 int nents, enum dma_data_direction direction)
358{ 358{
359} 359}
360 360
361static int bpa_dma_supported(struct device *dev, u64 mask) 361static int cell_dma_supported(struct device *dev, u64 mask)
362{ 362{
363 return mask < 0x100000000ull; 363 return mask < 0x100000000ull;
364} 364}
365 365
366void bpa_init_iommu(void) 366void cell_init_iommu(void)
367{ 367{
368 bpa_map_iommu(); 368 cell_map_iommu();
369 369
370 /* Direct I/O, IOMMU off */ 370 /* Direct I/O, IOMMU off */
371 ppc_md.iommu_dev_setup = iommu_dev_setup_null; 371 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
372 ppc_md.iommu_bus_setup = iommu_bus_setup_null; 372 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
373 373
374 pci_dma_ops.alloc_coherent = bpa_alloc_coherent; 374 pci_dma_ops.alloc_coherent = cell_alloc_coherent;
375 pci_dma_ops.free_coherent = bpa_free_coherent; 375 pci_dma_ops.free_coherent = cell_free_coherent;
376 pci_dma_ops.map_single = bpa_map_single; 376 pci_dma_ops.map_single = cell_map_single;
377 pci_dma_ops.unmap_single = bpa_unmap_single; 377 pci_dma_ops.unmap_single = cell_unmap_single;
378 pci_dma_ops.map_sg = bpa_map_sg; 378 pci_dma_ops.map_sg = cell_map_sg;
379 pci_dma_ops.unmap_sg = bpa_unmap_sg; 379 pci_dma_ops.unmap_sg = cell_unmap_sg;
380 pci_dma_ops.dma_supported = bpa_dma_supported; 380 pci_dma_ops.dma_supported = cell_dma_supported;
381} 381}