diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-04-08 12:20:58 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:19:58 -0400 |
commit | d09d815c1b1d437a3ea89ecd92c91179266d1243 (patch) | |
tree | 860aac1786676a01909d8ad1325bd3ffbe029039 /arch/x86 | |
parent | 8e8edc6401205da3000cc3dfa76f3fd28a21d73c (diff) |
x86: isolate coherent mapping functions
i386 implements the declare coherent memory API, and x86_64 does not
it is reflected in pieces of dma_alloc_coherent and dma_free_coherent.
Those pieces are isolated in separate functions, that are declared
as empty macros in x86_64. This way we can make the code the same.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/pci-dma_32.c | 51 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma_64.c | 11 |
2 files changed, 45 insertions, 17 deletions
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c index 818d95efc3cb..78c7640252a4 100644 --- a/arch/x86/kernel/pci-dma_32.c +++ b/arch/x86/kernel/pci-dma_32.c | |||
@@ -18,27 +18,50 @@ | |||
18 | dma_addr_t bad_dma_address __read_mostly = 0x0; | 18 | dma_addr_t bad_dma_address __read_mostly = 0x0; |
19 | EXPORT_SYMBOL(bad_dma_address); | 19 | EXPORT_SYMBOL(bad_dma_address); |
20 | 20 | ||
21 | void *dma_alloc_coherent(struct device *dev, size_t size, | 21 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, |
22 | dma_addr_t *dma_handle, gfp_t gfp) | 22 | dma_addr_t *dma_handle, void **ret) |
23 | { | 23 | { |
24 | void *ret; | ||
25 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 24 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
26 | int order = get_order(size); | 25 | int order = get_order(size); |
27 | /* ignore region specifiers */ | ||
28 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
29 | 26 | ||
30 | if (mem) { | 27 | if (mem) { |
31 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | 28 | int page = bitmap_find_free_region(mem->bitmap, mem->size, |
32 | order); | 29 | order); |
33 | if (page >= 0) { | 30 | if (page >= 0) { |
34 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | 31 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); |
35 | ret = mem->virt_base + (page << PAGE_SHIFT); | 32 | *ret = mem->virt_base + (page << PAGE_SHIFT); |
36 | memset(ret, 0, size); | 33 | memset(*ret, 0, size); |
37 | return ret; | ||
38 | } | 34 | } |
39 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | 35 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) |
40 | return NULL; | 36 | *ret = NULL; |
37 | } | ||
38 | return (mem != NULL); | ||
39 | } | ||
40 | |||
41 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | ||
42 | { | ||
43 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
44 | |||
45 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
46 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
47 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
48 | |||
49 | bitmap_release_region(mem->bitmap, page, order); | ||
50 | return 1; | ||
41 | } | 51 | } |
52 | return 0; | ||
53 | } | ||
54 | |||
55 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
56 | dma_addr_t *dma_handle, gfp_t gfp) | ||
57 | { | ||
58 | void *ret = NULL; | ||
59 | int order = get_order(size); | ||
60 | /* ignore region specifiers */ | ||
61 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
62 | |||
63 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret)) | ||
64 | return ret; | ||
42 | 65 | ||
43 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | 66 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) |
44 | gfp |= GFP_DMA; | 67 | gfp |= GFP_DMA; |
@@ -56,15 +79,11 @@ EXPORT_SYMBOL(dma_alloc_coherent); | |||
56 | void dma_free_coherent(struct device *dev, size_t size, | 79 | void dma_free_coherent(struct device *dev, size_t size, |
57 | void *vaddr, dma_addr_t dma_handle) | 80 | void *vaddr, dma_addr_t dma_handle) |
58 | { | 81 | { |
59 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
60 | int order = get_order(size); | 82 | int order = get_order(size); |
61 | 83 | ||
62 | WARN_ON(irqs_disabled()); /* for portability */ | 84 | WARN_ON(irqs_disabled()); /* for portability */ |
63 | if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 85 | if (dma_release_coherent(dev, order, vaddr)) |
64 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | 86 | return; |
65 | 87 | free_pages((unsigned long)vaddr, order); | |
66 | bitmap_release_region(mem->bitmap, page, order); | ||
67 | } else | ||
68 | free_pages((unsigned long)vaddr, order); | ||
69 | } | 88 | } |
70 | EXPORT_SYMBOL(dma_free_coherent); | 89 | EXPORT_SYMBOL(dma_free_coherent); |
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index e7d45cf82251..6eacd58e451b 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c | |||
@@ -39,6 +39,8 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | |||
39 | return page ? page_address(page) : NULL; | 39 | return page ? page_address(page) : NULL; |
40 | } | 40 | } |
41 | 41 | ||
42 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | ||
43 | #define dma_release_coherent(dev, order, vaddr) (0) | ||
42 | /* | 44 | /* |
43 | * Allocate memory for a coherent mapping. | 45 | * Allocate memory for a coherent mapping. |
44 | */ | 46 | */ |
@@ -50,6 +52,10 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
50 | unsigned long dma_mask = 0; | 52 | unsigned long dma_mask = 0; |
51 | u64 bus; | 53 | u64 bus; |
52 | 54 | ||
55 | |||
56 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | ||
57 | return memory; | ||
58 | |||
53 | if (!dev) | 59 | if (!dev) |
54 | dev = &fallback_dev; | 60 | dev = &fallback_dev; |
55 | dma_mask = dev->coherent_dma_mask; | 61 | dma_mask = dev->coherent_dma_mask; |
@@ -141,9 +147,12 @@ EXPORT_SYMBOL(dma_alloc_coherent); | |||
141 | void dma_free_coherent(struct device *dev, size_t size, | 147 | void dma_free_coherent(struct device *dev, size_t size, |
142 | void *vaddr, dma_addr_t bus) | 148 | void *vaddr, dma_addr_t bus) |
143 | { | 149 | { |
150 | int order = get_order(size); | ||
144 | WARN_ON(irqs_disabled()); /* for portability */ | 151 | WARN_ON(irqs_disabled()); /* for portability */ |
152 | if (dma_release_coherent(dev, order, vaddr)) | ||
153 | return; | ||
145 | if (dma_ops->unmap_single) | 154 | if (dma_ops->unmap_single) |
146 | dma_ops->unmap_single(dev, bus, size, 0); | 155 | dma_ops->unmap_single(dev, bus, size, 0); |
147 | free_pages((unsigned long)vaddr, get_order(size)); | 156 | free_pages((unsigned long)vaddr, order); |
148 | } | 157 | } |
149 | EXPORT_SYMBOL(dma_free_coherent); | 158 | EXPORT_SYMBOL(dma_free_coherent); |