diff options
Diffstat (limited to 'arch/powerpc/kernel/dma_64.c')
-rw-r--r-- | arch/powerpc/kernel/dma_64.c | 249 |
1 files changed, 146 insertions, 103 deletions
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c index 6c168f6ea142..7b0e754383cf 100644 --- a/arch/powerpc/kernel/dma_64.c +++ b/arch/powerpc/kernel/dma_64.c | |||
@@ -1,151 +1,194 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004 IBM Corporation | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
3 | * | 3 | * |
4 | * Implements the generic device dma API for ppc64. Handles | 4 | * Provide default implementations of the DMA mapping callbacks for |
5 | * the pci and vio busses | 5 | * directly mapped busses and busses using the iommu infrastructure |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/device.h> | 8 | #include <linux/device.h> |
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | /* Include the busses we support */ | ||
11 | #include <linux/pci.h> | ||
12 | #include <asm/vio.h> | ||
13 | #include <asm/ibmebus.h> | ||
14 | #include <asm/scatterlist.h> | ||
15 | #include <asm/bug.h> | 10 | #include <asm/bug.h> |
11 | #include <asm/iommu.h> | ||
12 | #include <asm/abs_addr.h> | ||
16 | 13 | ||
17 | static struct dma_mapping_ops *get_dma_ops(struct device *dev) | 14 | /* |
18 | { | 15 | * Generic iommu implementation |
19 | #ifdef CONFIG_PCI | 16 | */ |
20 | if (dev->bus == &pci_bus_type) | ||
21 | return &pci_dma_ops; | ||
22 | #endif | ||
23 | #ifdef CONFIG_IBMVIO | ||
24 | if (dev->bus == &vio_bus_type) | ||
25 | return &vio_dma_ops; | ||
26 | #endif | ||
27 | #ifdef CONFIG_IBMEBUS | ||
28 | if (dev->bus == &ibmebus_bus_type) | ||
29 | return &ibmebus_dma_ops; | ||
30 | #endif | ||
31 | return NULL; | ||
32 | } | ||
33 | 17 | ||
34 | int dma_supported(struct device *dev, u64 mask) | 18 | static inline unsigned long device_to_mask(struct device *dev) |
35 | { | 19 | { |
36 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 20 | if (dev->dma_mask && *dev->dma_mask) |
21 | return *dev->dma_mask; | ||
22 | /* Assume devices without mask can take 32 bit addresses */ | ||
23 | return 0xfffffffful; | ||
24 | } | ||
37 | 25 | ||
38 | BUG_ON(!dma_ops); | ||
39 | 26 | ||
40 | return dma_ops->dma_supported(dev, mask); | 27 | /* Allocates a contiguous real buffer and creates mappings over it. |
28 | * Returns the virtual address of the buffer and sets dma_handle | ||
29 | * to the dma address (mapping) of the first page. | ||
30 | */ | ||
31 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | ||
32 | dma_addr_t *dma_handle, gfp_t flag) | ||
33 | { | ||
34 | return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle, | ||
35 | device_to_mask(dev), flag, | ||
36 | dev->archdata.numa_node); | ||
41 | } | 37 | } |
42 | EXPORT_SYMBOL(dma_supported); | ||
43 | 38 | ||
44 | int dma_set_mask(struct device *dev, u64 dma_mask) | 39 | static void dma_iommu_free_coherent(struct device *dev, size_t size, |
40 | void *vaddr, dma_addr_t dma_handle) | ||
45 | { | 41 | { |
46 | #ifdef CONFIG_PCI | 42 | iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); |
47 | if (dev->bus == &pci_bus_type) | ||
48 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
49 | #endif | ||
50 | #ifdef CONFIG_IBMVIO | ||
51 | if (dev->bus == &vio_bus_type) | ||
52 | return -EIO; | ||
53 | #endif /* CONFIG_IBMVIO */ | ||
54 | #ifdef CONFIG_IBMEBUS | ||
55 | if (dev->bus == &ibmebus_bus_type) | ||
56 | return -EIO; | ||
57 | #endif | ||
58 | BUG(); | ||
59 | return 0; | ||
60 | } | 43 | } |
61 | EXPORT_SYMBOL(dma_set_mask); | ||
62 | 44 | ||
63 | void *dma_alloc_coherent(struct device *dev, size_t size, | 45 | /* Creates TCEs for a user provided buffer. The user buffer must be |
64 | dma_addr_t *dma_handle, gfp_t flag) | 46 | * contiguous real kernel storage (not vmalloc). The address of the buffer |
47 | * passed here is the kernel (virtual) address of the buffer. The buffer | ||
48 | * need not be page aligned, the dma_addr_t returned will point to the same | ||
49 | * byte within the page as vaddr. | ||
50 | */ | ||
51 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | ||
52 | size_t size, | ||
53 | enum dma_data_direction direction) | ||
65 | { | 54 | { |
66 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 55 | return iommu_map_single(dev->archdata.dma_data, vaddr, size, |
67 | 56 | device_to_mask(dev), direction); | |
68 | BUG_ON(!dma_ops); | ||
69 | |||
70 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
71 | } | 57 | } |
72 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
73 | 58 | ||
74 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 59 | |
75 | dma_addr_t dma_handle) | 60 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, |
61 | size_t size, | ||
62 | enum dma_data_direction direction) | ||
76 | { | 63 | { |
77 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 64 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction); |
65 | } | ||
78 | 66 | ||
79 | BUG_ON(!dma_ops); | ||
80 | 67 | ||
81 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | 68 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
69 | int nelems, enum dma_data_direction direction) | ||
70 | { | ||
71 | return iommu_map_sg(dev->archdata.dma_data, sglist, nelems, | ||
72 | device_to_mask(dev), direction); | ||
82 | } | 73 | } |
83 | EXPORT_SYMBOL(dma_free_coherent); | ||
84 | 74 | ||
85 | dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 75 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, |
86 | enum dma_data_direction direction) | 76 | int nelems, enum dma_data_direction direction) |
87 | { | 77 | { |
88 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 78 | iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction); |
89 | |||
90 | BUG_ON(!dma_ops); | ||
91 | |||
92 | return dma_ops->map_single(dev, cpu_addr, size, direction); | ||
93 | } | 79 | } |
94 | EXPORT_SYMBOL(dma_map_single); | ||
95 | 80 | ||
96 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 81 | /* We support DMA to/from any memory page via the iommu */ |
97 | enum dma_data_direction direction) | 82 | static int dma_iommu_dma_supported(struct device *dev, u64 mask) |
98 | { | 83 | { |
99 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 84 | struct iommu_table *tbl = dev->archdata.dma_data; |
100 | 85 | ||
101 | BUG_ON(!dma_ops); | 86 | if (!tbl || tbl->it_offset > mask) { |
102 | 87 | printk(KERN_INFO | |
103 | dma_ops->unmap_single(dev, dma_addr, size, direction); | 88 | "Warning: IOMMU offset too big for device mask\n"); |
89 | if (tbl) | ||
90 | printk(KERN_INFO | ||
91 | "mask: 0x%08lx, table offset: 0x%08lx\n", | ||
92 | mask, tbl->it_offset); | ||
93 | else | ||
94 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | ||
95 | mask); | ||
96 | return 0; | ||
97 | } else | ||
98 | return 1; | ||
104 | } | 99 | } |
105 | EXPORT_SYMBOL(dma_unmap_single); | ||
106 | 100 | ||
107 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 101 | struct dma_mapping_ops dma_iommu_ops = { |
108 | unsigned long offset, size_t size, | 102 | .alloc_coherent = dma_iommu_alloc_coherent, |
109 | enum dma_data_direction direction) | 103 | .free_coherent = dma_iommu_free_coherent, |
110 | { | 104 | .map_single = dma_iommu_map_single, |
111 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 105 | .unmap_single = dma_iommu_unmap_single, |
106 | .map_sg = dma_iommu_map_sg, | ||
107 | .unmap_sg = dma_iommu_unmap_sg, | ||
108 | .dma_supported = dma_iommu_dma_supported, | ||
109 | }; | ||
110 | EXPORT_SYMBOL(dma_iommu_ops); | ||
112 | 111 | ||
113 | BUG_ON(!dma_ops); | 112 | /* |
113 | * Generic direct DMA implementation | ||
114 | * | ||
115 | * This implementation supports a global offset that can be applied if | ||
116 | * the address at which memory is visible to devices is not 0. | ||
117 | */ | ||
118 | unsigned long dma_direct_offset; | ||
114 | 119 | ||
115 | return dma_ops->map_single(dev, page_address(page) + offset, size, | 120 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
116 | direction); | 121 | dma_addr_t *dma_handle, gfp_t flag) |
122 | { | ||
123 | struct page *page; | ||
124 | void *ret; | ||
125 | int node = dev->archdata.numa_node; | ||
126 | |||
127 | /* TODO: Maybe use the numa node here too ? */ | ||
128 | page = alloc_pages_node(node, flag, get_order(size)); | ||
129 | if (page == NULL) | ||
130 | return NULL; | ||
131 | ret = page_address(page); | ||
132 | memset(ret, 0, size); | ||
133 | *dma_handle = virt_to_abs(ret) | dma_direct_offset; | ||
134 | |||
135 | return ret; | ||
117 | } | 136 | } |
118 | EXPORT_SYMBOL(dma_map_page); | ||
119 | 137 | ||
120 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 138 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
121 | enum dma_data_direction direction) | 139 | void *vaddr, dma_addr_t dma_handle) |
122 | { | 140 | { |
123 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 141 | free_pages((unsigned long)vaddr, get_order(size)); |
142 | } | ||
124 | 143 | ||
125 | BUG_ON(!dma_ops); | 144 | static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, |
145 | size_t size, | ||
146 | enum dma_data_direction direction) | ||
147 | { | ||
148 | return virt_to_abs(ptr) | dma_direct_offset; | ||
149 | } | ||
126 | 150 | ||
127 | dma_ops->unmap_single(dev, dma_address, size, direction); | 151 | static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, |
152 | size_t size, | ||
153 | enum dma_data_direction direction) | ||
154 | { | ||
128 | } | 155 | } |
129 | EXPORT_SYMBOL(dma_unmap_page); | ||
130 | 156 | ||
131 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 157 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, |
132 | enum dma_data_direction direction) | 158 | int nents, enum dma_data_direction direction) |
133 | { | 159 | { |
134 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 160 | int i; |
135 | 161 | ||
136 | BUG_ON(!dma_ops); | 162 | for (i = 0; i < nents; i++, sg++) { |
163 | sg->dma_address = (page_to_phys(sg->page) + sg->offset) | | ||
164 | dma_direct_offset; | ||
165 | sg->dma_length = sg->length; | ||
166 | } | ||
137 | 167 | ||
138 | return dma_ops->map_sg(dev, sg, nents, direction); | 168 | return nents; |
139 | } | 169 | } |
140 | EXPORT_SYMBOL(dma_map_sg); | ||
141 | 170 | ||
142 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 171 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
143 | enum dma_data_direction direction) | 172 | int nents, enum dma_data_direction direction) |
144 | { | 173 | { |
145 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 174 | } |
146 | |||
147 | BUG_ON(!dma_ops); | ||
148 | 175 | ||
149 | dma_ops->unmap_sg(dev, sg, nhwentries, direction); | 176 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
177 | { | ||
178 | /* Could be improved to check for memory though it better be | ||
179 | * done via some global so platforms can set the limit in case | ||
180 | * they have limited DMA windows | ||
181 | */ | ||
182 | return mask >= DMA_32BIT_MASK; | ||
150 | } | 183 | } |
151 | EXPORT_SYMBOL(dma_unmap_sg); | 184 | |
185 | struct dma_mapping_ops dma_direct_ops = { | ||
186 | .alloc_coherent = dma_direct_alloc_coherent, | ||
187 | .free_coherent = dma_direct_free_coherent, | ||
188 | .map_single = dma_direct_map_single, | ||
189 | .unmap_single = dma_direct_unmap_single, | ||
190 | .map_sg = dma_direct_map_sg, | ||
191 | .unmap_sg = dma_direct_unmap_sg, | ||
192 | .dma_supported = dma_direct_dma_supported, | ||
193 | }; | ||
194 | EXPORT_SYMBOL(dma_direct_ops); | ||