diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-10-25 20:50:51 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-25 20:50:51 -0400 |
commit | f32154c9b580f11017b01bf093514c900c09364e (patch) | |
tree | 3ec811bc69fd2e562bd9000c323fc3ae1584ce68 | |
parent | 73c926bee0e4b7739bbb992a0a3df561178dd522 (diff) |
sh: Add dma-mapping support for dma_alloc/free_coherent() overrides.
This moves the current dma_alloc/free_coherent() calls to a generic
variant and plugs them in for the nommu default. Other variants can
override the defaults in the dma mapping ops directly.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/include/asm/dma-mapping.h | 48 | ||||
-rw-r--r-- | arch/sh/kernel/dma-nommu.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 22 |
3 files changed, 47 insertions, 25 deletions
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index b9a8f18f35a2..653076018df0 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h | |||
@@ -9,6 +9,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
9 | return dma_ops; | 9 | return dma_ops; |
10 | } | 10 | } |
11 | 11 | ||
12 | #include <asm-generic/dma-coherent.h> | ||
13 | #include <asm-generic/dma-mapping-common.h> | ||
14 | |||
12 | static inline int dma_supported(struct device *dev, u64 mask) | 15 | static inline int dma_supported(struct device *dev, u64 mask) |
13 | { | 16 | { |
14 | struct dma_map_ops *ops = get_dma_ops(dev); | 17 | struct dma_map_ops *ops = get_dma_ops(dev); |
@@ -33,12 +36,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) | |||
33 | return 0; | 36 | return 0; |
34 | } | 37 | } |
35 | 38 | ||
36 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
37 | dma_addr_t *dma_handle, gfp_t flag); | ||
38 | |||
39 | void dma_free_coherent(struct device *dev, size_t size, | ||
40 | void *vaddr, dma_addr_t dma_handle); | ||
41 | |||
42 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 39 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
43 | enum dma_data_direction dir); | 40 | enum dma_data_direction dir); |
44 | 41 | ||
@@ -65,7 +62,42 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
65 | return dma_addr == 0; | 62 | return dma_addr == 0; |
66 | } | 63 | } |
67 | 64 | ||
68 | #include <asm-generic/dma-coherent.h> | 65 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
69 | #include <asm-generic/dma-mapping-common.h> | 66 | dma_addr_t *dma_handle, gfp_t gfp) |
67 | { | ||
68 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
69 | void *memory; | ||
70 | |||
71 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) | ||
72 | return memory; | ||
73 | if (!ops->alloc_coherent) | ||
74 | return NULL; | ||
75 | |||
76 | memory = ops->alloc_coherent(dev, size, dma_handle, gfp); | ||
77 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | ||
78 | |||
79 | return memory; | ||
80 | } | ||
81 | |||
82 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
83 | void *vaddr, dma_addr_t dma_handle) | ||
84 | { | ||
85 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
86 | |||
87 | WARN_ON(irqs_disabled()); /* for portability */ | ||
88 | |||
89 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | ||
90 | return; | ||
91 | |||
92 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); | ||
93 | if (ops->free_coherent) | ||
94 | ops->free_coherent(dev, size, vaddr, dma_handle); | ||
95 | } | ||
96 | |||
97 | /* arch/sh/mm/consistent.c */ | ||
98 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | ||
99 | dma_addr_t *dma_addr, gfp_t flag); | ||
100 | extern void dma_generic_free_coherent(struct device *dev, size_t size, | ||
101 | void *vaddr, dma_addr_t dma_handle); | ||
70 | 102 | ||
71 | #endif /* __ASM_SH_DMA_MAPPING_H */ | 103 | #endif /* __ASM_SH_DMA_MAPPING_H */ |
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index e88fcebf860c..b336fcf40f12 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c | |||
@@ -61,6 +61,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, | |||
61 | } | 61 | } |
62 | 62 | ||
63 | struct dma_map_ops nommu_dma_ops = { | 63 | struct dma_map_ops nommu_dma_ops = { |
64 | .alloc_coherent = dma_generic_alloc_coherent, | ||
65 | .free_coherent = dma_generic_free_coherent, | ||
64 | .map_page = nommu_map_page, | 66 | .map_page = nommu_map_page, |
65 | .map_sg = nommu_map_sg, | 67 | .map_sg = nommu_map_sg, |
66 | .sync_single_for_device = nommu_sync_single, | 68 | .sync_single_for_device = nommu_sync_single, |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 1165161e472c..ef20bbabefa0 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -33,15 +33,12 @@ static int __init dma_init(void) | |||
33 | } | 33 | } |
34 | fs_initcall(dma_init); | 34 | fs_initcall(dma_init); |
35 | 35 | ||
36 | void *dma_alloc_coherent(struct device *dev, size_t size, | 36 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
37 | dma_addr_t *dma_handle, gfp_t gfp) | 37 | dma_addr_t *dma_handle, gfp_t gfp) |
38 | { | 38 | { |
39 | void *ret, *ret_nocache; | 39 | void *ret, *ret_nocache; |
40 | int order = get_order(size); | 40 | int order = get_order(size); |
41 | 41 | ||
42 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) | ||
43 | return ret; | ||
44 | |||
45 | ret = (void *)__get_free_pages(gfp, order); | 42 | ret = (void *)__get_free_pages(gfp, order); |
46 | if (!ret) | 43 | if (!ret) |
47 | return NULL; | 44 | return NULL; |
@@ -63,30 +60,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
63 | 60 | ||
64 | *dma_handle = virt_to_phys(ret); | 61 | *dma_handle = virt_to_phys(ret); |
65 | 62 | ||
66 | debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache); | ||
67 | |||
68 | return ret_nocache; | 63 | return ret_nocache; |
69 | } | 64 | } |
70 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
71 | 65 | ||
72 | void dma_free_coherent(struct device *dev, size_t size, | 66 | void dma_generic_free_coherent(struct device *dev, size_t size, |
73 | void *vaddr, dma_addr_t dma_handle) | 67 | void *vaddr, dma_addr_t dma_handle) |
74 | { | 68 | { |
75 | int order = get_order(size); | 69 | int order = get_order(size); |
76 | unsigned long pfn = dma_handle >> PAGE_SHIFT; | 70 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
77 | int k; | 71 | int k; |
78 | 72 | ||
79 | WARN_ON(irqs_disabled()); /* for portability */ | ||
80 | |||
81 | if (dma_release_from_coherent(dev, order, vaddr)) | ||
82 | return; | ||
83 | |||
84 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); | ||
85 | for (k = 0; k < (1 << order); k++) | 73 | for (k = 0; k < (1 << order); k++) |
86 | __free_pages(pfn_to_page(pfn + k), 0); | 74 | __free_pages(pfn_to_page(pfn + k), 0); |
75 | |||
87 | iounmap(vaddr); | 76 | iounmap(vaddr); |
88 | } | 77 | } |
89 | EXPORT_SYMBOL(dma_free_coherent); | ||
90 | 78 | ||
91 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 79 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
92 | enum dma_data_direction direction) | 80 | enum dma_data_direction direction) |