aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-11 06:57:01 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-11 06:57:01 -0400
commit6de9c70882ecdee63a652d493bf2353963bd4c22 (patch)
tree9d219e705492331c97f5f7dccce3b0b1a29251bf /arch/x86/kernel/pci-dma.c
parentd406d21d90dce2e66c7eb4a44605aac947fe55fb (diff)
parent796aadeb1b2db9b5d463946766c5bbfd7717158c (diff)
Merge branch 'linus' into x86/cleanups
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c157
1 files changed, 26 insertions, 131 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 88ddd04cfa98..f704cb51ff82 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,7 +11,7 @@
11 11
12static int forbid_dac __read_mostly; 12static int forbid_dac __read_mostly;
13 13
14const struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
16 16
17static int iommu_sac_force __read_mostly; 17static int iommu_sac_force __read_mostly;
@@ -123,6 +123,14 @@ void __init pci_iommu_alloc(void)
123 123
124 pci_swiotlb_init(); 124 pci_swiotlb_init();
125} 125}
126
127unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
128{
129 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
130
131 return size >> PAGE_SHIFT;
132}
133EXPORT_SYMBOL(iommu_num_pages);
126#endif 134#endif
127 135
128/* 136/*
@@ -192,126 +200,10 @@ static __init int iommu_setup(char *p)
192} 200}
193early_param("iommu", iommu_setup); 201early_param("iommu", iommu_setup);
194 202
195#ifdef CONFIG_X86_32
196int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
197 dma_addr_t device_addr, size_t size, int flags)
198{
199 void __iomem *mem_base = NULL;
200 int pages = size >> PAGE_SHIFT;
201 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
202
203 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
204 goto out;
205 if (!size)
206 goto out;
207 if (dev->dma_mem)
208 goto out;
209
210 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
211
212 mem_base = ioremap(bus_addr, size);
213 if (!mem_base)
214 goto out;
215
216 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
217 if (!dev->dma_mem)
218 goto out;
219 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
220 if (!dev->dma_mem->bitmap)
221 goto free1_out;
222
223 dev->dma_mem->virt_base = mem_base;
224 dev->dma_mem->device_base = device_addr;
225 dev->dma_mem->size = pages;
226 dev->dma_mem->flags = flags;
227
228 if (flags & DMA_MEMORY_MAP)
229 return DMA_MEMORY_MAP;
230
231 return DMA_MEMORY_IO;
232
233 free1_out:
234 kfree(dev->dma_mem);
235 out:
236 if (mem_base)
237 iounmap(mem_base);
238 return 0;
239}
240EXPORT_SYMBOL(dma_declare_coherent_memory);
241
242void dma_release_declared_memory(struct device *dev)
243{
244 struct dma_coherent_mem *mem = dev->dma_mem;
245
246 if (!mem)
247 return;
248 dev->dma_mem = NULL;
249 iounmap(mem->virt_base);
250 kfree(mem->bitmap);
251 kfree(mem);
252}
253EXPORT_SYMBOL(dma_release_declared_memory);
254
255void *dma_mark_declared_memory_occupied(struct device *dev,
256 dma_addr_t device_addr, size_t size)
257{
258 struct dma_coherent_mem *mem = dev->dma_mem;
259 int pos, err;
260 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
261
262 pages >>= PAGE_SHIFT;
263
264 if (!mem)
265 return ERR_PTR(-EINVAL);
266
267 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
268 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
269 if (err != 0)
270 return ERR_PTR(err);
271 return mem->virt_base + (pos << PAGE_SHIFT);
272}
273EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
274
275static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
276 dma_addr_t *dma_handle, void **ret)
277{
278 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
279 int order = get_order(size);
280
281 if (mem) {
282 int page = bitmap_find_free_region(mem->bitmap, mem->size,
283 order);
284 if (page >= 0) {
285 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
286 *ret = mem->virt_base + (page << PAGE_SHIFT);
287 memset(*ret, 0, size);
288 }
289 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
290 *ret = NULL;
291 }
292 return (mem != NULL);
293}
294
295static int dma_release_coherent(struct device *dev, int order, void *vaddr)
296{
297 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
298
299 if (mem && vaddr >= mem->virt_base && vaddr <
300 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
301 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
302
303 bitmap_release_region(mem->bitmap, page, order);
304 return 1;
305 }
306 return 0;
307}
308#else
309#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
310#define dma_release_coherent(dev, order, vaddr) (0)
311#endif /* CONFIG_X86_32 */
312
313int dma_supported(struct device *dev, u64 mask) 203int dma_supported(struct device *dev, u64 mask)
314{ 204{
205 struct dma_mapping_ops *ops = get_dma_ops(dev);
206
315#ifdef CONFIG_PCI 207#ifdef CONFIG_PCI
316 if (mask > 0xffffffff && forbid_dac > 0) { 208 if (mask > 0xffffffff && forbid_dac > 0) {
317 dev_info(dev, "PCI: Disallowing DAC for device\n"); 209 dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -319,8 +211,8 @@ int dma_supported(struct device *dev, u64 mask)
319 } 211 }
320#endif 212#endif
321 213
322 if (dma_ops->dma_supported) 214 if (ops->dma_supported)
323 return dma_ops->dma_supported(dev, mask); 215 return ops->dma_supported(dev, mask);
324 216
325 /* Copied from i386. Doesn't make much sense, because it will 217 /* Copied from i386. Doesn't make much sense, because it will
326 only work for pci_alloc_coherent. 218 only work for pci_alloc_coherent.
@@ -367,6 +259,7 @@ void *
367dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 259dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
368 gfp_t gfp) 260 gfp_t gfp)
369{ 261{
262 struct dma_mapping_ops *ops = get_dma_ops(dev);
370 void *memory = NULL; 263 void *memory = NULL;
371 struct page *page; 264 struct page *page;
372 unsigned long dma_mask = 0; 265 unsigned long dma_mask = 0;
@@ -376,7 +269,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
376 /* ignore region specifiers */ 269 /* ignore region specifiers */
377 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 270 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
378 271
379 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 272 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
380 return memory; 273 return memory;
381 274
382 if (!dev) { 275 if (!dev) {
@@ -435,8 +328,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
435 /* Let low level make its own zone decisions */ 328 /* Let low level make its own zone decisions */
436 gfp &= ~(GFP_DMA32|GFP_DMA); 329 gfp &= ~(GFP_DMA32|GFP_DMA);
437 330
438 if (dma_ops->alloc_coherent) 331 if (ops->alloc_coherent)
439 return dma_ops->alloc_coherent(dev, size, 332 return ops->alloc_coherent(dev, size,
440 dma_handle, gfp); 333 dma_handle, gfp);
441 return NULL; 334 return NULL;
442 } 335 }
@@ -448,14 +341,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
448 } 341 }
449 } 342 }
450 343
451 if (dma_ops->alloc_coherent) { 344 if (ops->alloc_coherent) {
452 free_pages((unsigned long)memory, get_order(size)); 345 free_pages((unsigned long)memory, get_order(size));
453 gfp &= ~(GFP_DMA|GFP_DMA32); 346 gfp &= ~(GFP_DMA|GFP_DMA32);
454 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 347 return ops->alloc_coherent(dev, size, dma_handle, gfp);
455 } 348 }
456 349
457 if (dma_ops->map_simple) { 350 if (ops->map_simple) {
458 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 351 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
459 size, 352 size,
460 PCI_DMA_BIDIRECTIONAL); 353 PCI_DMA_BIDIRECTIONAL);
461 if (*dma_handle != bad_dma_address) 354 if (*dma_handle != bad_dma_address)
@@ -477,12 +370,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
477void dma_free_coherent(struct device *dev, size_t size, 370void dma_free_coherent(struct device *dev, size_t size,
478 void *vaddr, dma_addr_t bus) 371 void *vaddr, dma_addr_t bus)
479{ 372{
373 struct dma_mapping_ops *ops = get_dma_ops(dev);
374
480 int order = get_order(size); 375 int order = get_order(size);
481 WARN_ON(irqs_disabled()); /* for portability */ 376 WARN_ON(irqs_disabled()); /* for portability */
482 if (dma_release_coherent(dev, order, vaddr)) 377 if (dma_release_from_coherent(dev, order, vaddr))
483 return; 378 return;
484 if (dma_ops->unmap_single) 379 if (ops->unmap_single)
485 dma_ops->unmap_single(dev, bus, size, 0); 380 ops->unmap_single(dev, bus, size, 0);
486 free_pages((unsigned long)vaddr, order); 381 free_pages((unsigned long)vaddr, order);
487} 382}
488EXPORT_SYMBOL(dma_free_coherent); 383EXPORT_SYMBOL(dma_free_coherent);