aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/dma_64.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2006-11-11 01:25:02 -0500
committerPaul Mackerras <paulus@samba.org>2006-12-04 04:38:40 -0500
commit12d04eef927bf61328af2c7cbe756c96f98ac3bf (patch)
tree18865369100e9059c7e883dec93ea67f7b52a287 /arch/powerpc/kernel/dma_64.c
parent7c719871ff4d5f15b71f0138d08b758281b58631 (diff)
[POWERPC] Refactor 64 bits DMA operations
This patch completely refactors DMA operations for 64 bits powerpc. 32 bits is untouched for now. We use the new dev_archdata structure to add the dma operations pointer and associated data to struct device. While at it, we also add the OF node pointer and numa node. In the future, we might want to look into merging that with pci_dn as well. The old vio, pci-iommu and pci-direct DMA ops are gone. They are now replaced by a set of generic iommu and direct DMA ops (non PCI specific) that can be used by bus types. The toplevel implementation is now inline. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/dma_64.c')
-rw-r--r--arch/powerpc/kernel/dma_64.c240
1 files changed, 137 insertions, 103 deletions
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 6c168f6ea142..4e6551199782 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -1,151 +1,185 @@
1/* 1/*
2 * Copyright (C) 2004 IBM Corporation 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 * 3 *
4 * Implements the generic device dma API for ppc64. Handles 4 * Provide default implementations of the DMA mapping callbacks for
5 * the pci and vio busses 5 * directly mapped busses and busses using the iommu infrastructure
6 */ 6 */
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10/* Include the busses we support */
11#include <linux/pci.h>
12#include <asm/vio.h>
13#include <asm/ibmebus.h>
14#include <asm/scatterlist.h>
15#include <asm/bug.h> 10#include <asm/bug.h>
11#include <asm/iommu.h>
12#include <asm/abs_addr.h>
16 13
17static struct dma_mapping_ops *get_dma_ops(struct device *dev) 14/*
18{ 15 * Generic iommu implementation
19#ifdef CONFIG_PCI 16 */
20 if (dev->bus == &pci_bus_type)
21 return &pci_dma_ops;
22#endif
23#ifdef CONFIG_IBMVIO
24 if (dev->bus == &vio_bus_type)
25 return &vio_dma_ops;
26#endif
27#ifdef CONFIG_IBMEBUS
28 if (dev->bus == &ibmebus_bus_type)
29 return &ibmebus_dma_ops;
30#endif
31 return NULL;
32}
33 17
34int dma_supported(struct device *dev, u64 mask) 18static inline unsigned long device_to_mask(struct device *dev)
35{ 19{
36 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 20 if (dev->dma_mask && *dev->dma_mask)
21 return *dev->dma_mask;
22 /* Assume devices without mask can take 32 bit addresses */
23 return 0xfffffffful;
24}
37 25
38 BUG_ON(!dma_ops);
39 26
40 return dma_ops->dma_supported(dev, mask); 27/* Allocates a contiguous real buffer and creates mappings over it.
28 * Returns the virtual address of the buffer and sets dma_handle
29 * to the dma address (mapping) of the first page.
30 */
31static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
32 dma_addr_t *dma_handle, gfp_t flag)
33{
34 return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
35 device_to_mask(dev), flag,
36 dev->archdata.numa_node);
41} 37}
42EXPORT_SYMBOL(dma_supported);
43 38
44int dma_set_mask(struct device *dev, u64 dma_mask) 39static void dma_iommu_free_coherent(struct device *dev, size_t size,
40 void *vaddr, dma_addr_t dma_handle)
45{ 41{
46#ifdef CONFIG_PCI 42 iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
47 if (dev->bus == &pci_bus_type)
48 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
49#endif
50#ifdef CONFIG_IBMVIO
51 if (dev->bus == &vio_bus_type)
52 return -EIO;
53#endif /* CONFIG_IBMVIO */
54#ifdef CONFIG_IBMEBUS
55 if (dev->bus == &ibmebus_bus_type)
56 return -EIO;
57#endif
58 BUG();
59 return 0;
60} 43}
61EXPORT_SYMBOL(dma_set_mask);
62 44
63void *dma_alloc_coherent(struct device *dev, size_t size, 45/* Creates TCEs for a user provided buffer. The user buffer must be
64 dma_addr_t *dma_handle, gfp_t flag) 46 * contiguous real kernel storage (not vmalloc). The address of the buffer
47 * passed here is the kernel (virtual) address of the buffer. The buffer
48 * need not be page aligned, the dma_addr_t returned will point to the same
49 * byte within the page as vaddr.
50 */
51static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
52 size_t size,
53 enum dma_data_direction direction)
65{ 54{
66 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 55 return iommu_map_single(dev->archdata.dma_data, vaddr, size,
67 56 device_to_mask(dev), direction);
68 BUG_ON(!dma_ops);
69
70 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
71} 57}
72EXPORT_SYMBOL(dma_alloc_coherent);
73 58
74void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 59
75 dma_addr_t dma_handle) 60static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
61 size_t size,
62 enum dma_data_direction direction)
76{ 63{
77 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 64 iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
65}
78 66
79 BUG_ON(!dma_ops);
80 67
81 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 68static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
69 int nelems, enum dma_data_direction direction)
70{
71 return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
72 device_to_mask(dev), direction);
82} 73}
83EXPORT_SYMBOL(dma_free_coherent);
84 74
85dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, 75static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
86 enum dma_data_direction direction) 76 int nelems, enum dma_data_direction direction)
87{ 77{
88 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 78 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
89
90 BUG_ON(!dma_ops);
91
92 return dma_ops->map_single(dev, cpu_addr, size, direction);
93} 79}
94EXPORT_SYMBOL(dma_map_single);
95 80
96void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 81/* We support DMA to/from any memory page via the iommu */
97 enum dma_data_direction direction) 82static int dma_iommu_dma_supported(struct device *dev, u64 mask)
98{ 83{
99 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 84 struct iommu_table *tbl = dev->archdata.dma_data;
100 85
101 BUG_ON(!dma_ops); 86 if (!tbl || tbl->it_offset > mask) {
102 87 printk(KERN_INFO
103 dma_ops->unmap_single(dev, dma_addr, size, direction); 88 "Warning: IOMMU offset too big for device mask\n");
89 if (tbl)
90 printk(KERN_INFO
91 "mask: 0x%08lx, table offset: 0x%08lx\n",
92 mask, tbl->it_offset);
93 else
94 printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
95 mask);
96 return 0;
97 } else
98 return 1;
104} 99}
105EXPORT_SYMBOL(dma_unmap_single);
106 100
107dma_addr_t dma_map_page(struct device *dev, struct page *page, 101struct dma_mapping_ops dma_iommu_ops = {
108 unsigned long offset, size_t size, 102 .alloc_coherent = dma_iommu_alloc_coherent,
109 enum dma_data_direction direction) 103 .free_coherent = dma_iommu_free_coherent,
110{ 104 .map_single = dma_iommu_map_single,
111 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 105 .unmap_single = dma_iommu_unmap_single,
106 .map_sg = dma_iommu_map_sg,
107 .unmap_sg = dma_iommu_unmap_sg,
108 .dma_supported = dma_iommu_dma_supported,
109};
110EXPORT_SYMBOL(dma_iommu_ops);
112 111
113 BUG_ON(!dma_ops); 112/*
113 * Generic direct DMA implementation
114 */
114 115
115 return dma_ops->map_single(dev, page_address(page) + offset, size, 116static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
116 direction); 117 dma_addr_t *dma_handle, gfp_t flag)
118{
119 void *ret;
120
121 /* TODO: Maybe use the numa node here too ? */
122 ret = (void *)__get_free_pages(flag, get_order(size));
123 if (ret != NULL) {
124 memset(ret, 0, size);
125 *dma_handle = virt_to_abs(ret);
126 }
127 return ret;
117} 128}
118EXPORT_SYMBOL(dma_map_page);
119 129
120void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 130static void dma_direct_free_coherent(struct device *dev, size_t size,
121 enum dma_data_direction direction) 131 void *vaddr, dma_addr_t dma_handle)
122{ 132{
123 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 133 free_pages((unsigned long)vaddr, get_order(size));
134}
124 135
125 BUG_ON(!dma_ops); 136static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
137 size_t size,
138 enum dma_data_direction direction)
139{
140 return virt_to_abs(ptr);
141}
126 142
127 dma_ops->unmap_single(dev, dma_address, size, direction); 143static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
144 size_t size,
145 enum dma_data_direction direction)
146{
128} 147}
129EXPORT_SYMBOL(dma_unmap_page);
130 148
131int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 149static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
132 enum dma_data_direction direction) 150 int nents, enum dma_data_direction direction)
133{ 151{
134 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 152 int i;
135 153
136 BUG_ON(!dma_ops); 154 for (i = 0; i < nents; i++, sg++) {
155 sg->dma_address = page_to_phys(sg->page) + sg->offset;
156 sg->dma_length = sg->length;
157 }
137 158
138 return dma_ops->map_sg(dev, sg, nents, direction); 159 return nents;
139} 160}
140EXPORT_SYMBOL(dma_map_sg);
141 161
142void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 162static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
143 enum dma_data_direction direction) 163 int nents, enum dma_data_direction direction)
144{ 164{
145 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 165}
146
147 BUG_ON(!dma_ops);
148 166
149 dma_ops->unmap_sg(dev, sg, nhwentries, direction); 167static int dma_direct_dma_supported(struct device *dev, u64 mask)
168{
169 /* Could be improved to check for memory though it better be
170 * done via some global so platforms can set the limit in case
171 * they have limited DMA windows
172 */
173 return mask >= DMA_32BIT_MASK;
150} 174}
151EXPORT_SYMBOL(dma_unmap_sg); 175
176struct dma_mapping_ops dma_direct_ops = {
177 .alloc_coherent = dma_direct_alloc_coherent,
178 .free_coherent = dma_direct_free_coherent,
179 .map_single = dma_direct_map_single,
180 .unmap_single = dma_direct_unmap_single,
181 .map_sg = dma_direct_map_sg,
182 .unmap_sg = dma_direct_unmap_sg,
183 .dma_supported = dma_direct_dma_supported,
184};
185EXPORT_SYMBOL(dma_direct_ops);