diff options
author | Mikael Starvik <mikael.starvik@axis.com> | 2005-07-27 14:44:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-27 19:26:00 -0400 |
commit | 59c61138a556cf89692e0d5bd2c9de5df54b824f (patch) | |
tree | d068341a2b6384a5b8be6b86b85a2b1073f43a19 /include/asm-cris/dma-mapping.h | |
parent | 4f18cfbf0990bfc2e8e7706eeb9e5bef898ae923 (diff) |
[PATCH] CRIS update: pci
Patches to make it possible to add PCI support.
Signed-off-by: Mikael Starvik <starvik@axis.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-cris/dma-mapping.h')
-rw-r--r-- | include/asm-cris/dma-mapping.h | 170 |
1 files changed, 112 insertions, 58 deletions
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h index 0d770f60127a..0b5c3fdaefe1 100644 --- a/include/asm-cris/dma-mapping.h +++ b/include/asm-cris/dma-mapping.h | |||
@@ -1,125 +1,179 @@ | |||
1 | /* DMA mapping. Nothing tricky here, just virt_to_phys */ | ||
2 | |||
1 | #ifndef _ASM_CRIS_DMA_MAPPING_H | 3 | #ifndef _ASM_CRIS_DMA_MAPPING_H |
2 | #define _ASM_CRIS_DMA_MAPPING_H | 4 | #define _ASM_CRIS_DMA_MAPPING_H |
3 | 5 | ||
4 | #include "scatterlist.h" | 6 | #include <linux/mm.h> |
7 | #include <linux/kernel.h> | ||
5 | 8 | ||
6 | static inline int | 9 | #include <asm/cache.h> |
7 | dma_supported(struct device *dev, u64 mask) | 10 | #include <asm/io.h> |
8 | { | 11 | #include <asm/scatterlist.h> |
9 | BUG(); | ||
10 | return 0; | ||
11 | } | ||
12 | 12 | ||
13 | static inline int | 13 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
14 | dma_set_mask(struct device *dev, u64 dma_mask) | 14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
15 | { | 15 | |
16 | BUG(); | 16 | #ifdef CONFIG_PCI |
17 | return 1; | 17 | void *dma_alloc_coherent(struct device *dev, size_t size, |
18 | } | 18 | dma_addr_t *dma_handle, int flag); |
19 | 19 | ||
20 | void dma_free_coherent(struct device *dev, size_t size, | ||
21 | void *vaddr, dma_addr_t dma_handle); | ||
22 | #else | ||
20 | static inline void * | 23 | static inline void * |
21 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 24 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
22 | int flag) | 25 | int flag) |
23 | { | 26 | { |
24 | BUG(); | 27 | BUG(); |
25 | return NULL; | 28 | return NULL; |
26 | } | 29 | } |
27 | 30 | ||
28 | static inline void | 31 | static inline void |
29 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 32 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
30 | dma_addr_t dma_handle) | 33 | dma_addr_t dma_handle) |
31 | { | 34 | { |
32 | BUG(); | 35 | BUG(); |
33 | } | 36 | } |
34 | 37 | #endif | |
35 | static inline dma_addr_t | 38 | static inline dma_addr_t |
36 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 39 | dma_map_single(struct device *dev, void *ptr, size_t size, |
37 | enum dma_data_direction direction) | 40 | enum dma_data_direction direction) |
38 | { | 41 | { |
39 | BUG(); | 42 | BUG_ON(direction == DMA_NONE); |
40 | return 0; | 43 | return virt_to_phys(ptr); |
41 | } | 44 | } |
42 | 45 | ||
43 | static inline void | 46 | static inline void |
44 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 47 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
45 | enum dma_data_direction direction) | 48 | enum dma_data_direction direction) |
46 | { | 49 | { |
47 | BUG(); | 50 | BUG_ON(direction == DMA_NONE); |
51 | } | ||
52 | |||
53 | static inline int | ||
54 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
55 | enum dma_data_direction direction) | ||
56 | { | ||
57 | printk("Map sg\n"); | ||
58 | return nents; | ||
48 | } | 59 | } |
49 | 60 | ||
50 | static inline dma_addr_t | 61 | static inline dma_addr_t |
51 | dma_map_page(struct device *dev, struct page *page, | 62 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, |
52 | unsigned long offset, size_t size, | 63 | size_t size, enum dma_data_direction direction) |
53 | enum dma_data_direction direction) | ||
54 | { | 64 | { |
55 | BUG(); | 65 | BUG_ON(direction == DMA_NONE); |
56 | return 0; | 66 | return page_to_phys(page) + offset; |
57 | } | 67 | } |
58 | 68 | ||
59 | static inline void | 69 | static inline void |
60 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 70 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
61 | enum dma_data_direction direction) | 71 | enum dma_data_direction direction) |
62 | { | 72 | { |
63 | BUG(); | 73 | BUG_ON(direction == DMA_NONE); |
64 | } | 74 | } |
65 | 75 | ||
66 | static inline int | ||
67 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
68 | enum dma_data_direction direction) | ||
69 | { | ||
70 | BUG(); | ||
71 | return 1; | ||
72 | } | ||
73 | 76 | ||
74 | static inline void | 77 | static inline void |
75 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 78 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
76 | enum dma_data_direction direction) | 79 | enum dma_data_direction direction) |
77 | { | 80 | { |
78 | BUG(); | 81 | BUG_ON(direction == DMA_NONE); |
79 | } | 82 | } |
80 | 83 | ||
81 | static inline void | 84 | static inline void |
82 | dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, | 85 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
83 | enum dma_data_direction direction) | 86 | enum dma_data_direction direction) |
84 | { | 87 | { |
85 | BUG(); | ||
86 | } | 88 | } |
87 | 89 | ||
88 | static inline void | 90 | static inline void |
89 | dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, | 91 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, |
90 | enum dma_data_direction direction) | 92 | enum dma_data_direction direction) |
91 | { | 93 | { |
92 | BUG(); | ||
93 | } | 94 | } |
94 | 95 | ||
95 | /* Now for the API extensions over the pci_ one */ | 96 | static inline void |
97 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
98 | unsigned long offset, size_t size, | ||
99 | enum dma_data_direction direction) | ||
100 | { | ||
101 | } | ||
96 | 102 | ||
97 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 103 | static inline void |
98 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 104 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, |
99 | #define dma_is_consistent(d) (1) | 105 | unsigned long offset, size_t size, |
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | } | ||
100 | 109 | ||
101 | static inline int | 110 | static inline void |
102 | dma_get_cache_alignment(void) | 111 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, |
112 | enum dma_data_direction direction) | ||
103 | { | 113 | { |
104 | /* no easy way to get cache size on all processors, so return | ||
105 | * the maximum possible, to be safe */ | ||
106 | return (1 << L1_CACHE_SHIFT_MAX); | ||
107 | } | 114 | } |
108 | 115 | ||
109 | static inline void | 116 | static inline void |
110 | dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, | 117 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, |
111 | unsigned long offset, size_t size, | 118 | enum dma_data_direction direction) |
112 | enum dma_data_direction direction) | ||
113 | { | 119 | { |
114 | BUG(); | ||
115 | } | 120 | } |
116 | 121 | ||
122 | static inline int | ||
123 | dma_mapping_error(dma_addr_t dma_addr) | ||
124 | { | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static inline int | ||
129 | dma_supported(struct device *dev, u64 mask) | ||
130 | { | ||
131 | /* | ||
132 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
133 | * so we can't guarantee allocations that must be | ||
134 | * within a tighter range than GFP_DMA.. | ||
135 | */ | ||
136 | if(mask < 0x00ffffff) | ||
137 | return 0; | ||
138 | |||
139 | return 1; | ||
140 | } | ||
141 | |||
142 | static inline int | ||
143 | dma_set_mask(struct device *dev, u64 mask) | ||
144 | { | ||
145 | if(!dev->dma_mask || !dma_supported(dev, mask)) | ||
146 | return -EIO; | ||
147 | |||
148 | *dev->dma_mask = mask; | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static inline int | ||
154 | dma_get_cache_alignment(void) | ||
155 | { | ||
156 | return (1 << L1_CACHE_SHIFT_MAX); | ||
157 | } | ||
158 | |||
159 | #define dma_is_consistent(d) (1) | ||
160 | |||
117 | static inline void | 161 | static inline void |
118 | dma_cache_sync(void *vaddr, size_t size, | 162 | dma_cache_sync(void *vaddr, size_t size, |
119 | enum dma_data_direction direction) | 163 | enum dma_data_direction direction) |
120 | { | 164 | { |
121 | BUG(); | ||
122 | } | 165 | } |
123 | 166 | ||
124 | #endif | 167 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY |
168 | extern int | ||
169 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
170 | dma_addr_t device_addr, size_t size, int flags); | ||
171 | |||
172 | extern void | ||
173 | dma_release_declared_memory(struct device *dev); | ||
125 | 174 | ||
175 | extern void * | ||
176 | dma_mark_declared_memory_occupied(struct device *dev, | ||
177 | dma_addr_t device_addr, size_t size); | ||
178 | |||
179 | #endif | ||