diff options
Diffstat (limited to 'drivers/ieee1394/dma.c')
-rw-r--r-- | drivers/ieee1394/dma.c | 73 |
1 files changed, 45 insertions, 28 deletions
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c index b79ddb43e746..9fb2769d9abc 100644 --- a/drivers/ieee1394/dma.c +++ b/drivers/ieee1394/dma.c | |||
@@ -23,7 +23,8 @@ void dma_prog_region_init(struct dma_prog_region *prog) | |||
23 | prog->bus_addr = 0; | 23 | prog->bus_addr = 0; |
24 | } | 24 | } |
25 | 25 | ||
26 | int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev) | 26 | int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, |
27 | struct pci_dev *dev) | ||
27 | { | 28 | { |
28 | /* round up to page size */ | 29 | /* round up to page size */ |
29 | n_bytes = PAGE_ALIGN(n_bytes); | 30 | n_bytes = PAGE_ALIGN(n_bytes); |
@@ -32,7 +33,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, | |||
32 | 33 | ||
33 | prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr); | 34 | prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr); |
34 | if (!prog->kvirt) { | 35 | if (!prog->kvirt) { |
35 | printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n"); | 36 | printk(KERN_ERR |
37 | "dma_prog_region_alloc: pci_alloc_consistent() failed\n"); | ||
36 | dma_prog_region_free(prog); | 38 | dma_prog_region_free(prog); |
37 | return -ENOMEM; | 39 | return -ENOMEM; |
38 | } | 40 | } |
@@ -45,7 +47,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, | |||
45 | void dma_prog_region_free(struct dma_prog_region *prog) | 47 | void dma_prog_region_free(struct dma_prog_region *prog) |
46 | { | 48 | { |
47 | if (prog->kvirt) { | 49 | if (prog->kvirt) { |
48 | pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr); | 50 | pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, |
51 | prog->kvirt, prog->bus_addr); | ||
49 | } | 52 | } |
50 | 53 | ||
51 | prog->kvirt = NULL; | 54 | prog->kvirt = NULL; |
@@ -65,7 +68,8 @@ void dma_region_init(struct dma_region *dma) | |||
65 | dma->sglist = NULL; | 68 | dma->sglist = NULL; |
66 | } | 69 | } |
67 | 70 | ||
68 | int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction) | 71 | int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, |
72 | struct pci_dev *dev, int direction) | ||
69 | { | 73 | { |
70 | unsigned int i; | 74 | unsigned int i; |
71 | 75 | ||
@@ -95,14 +99,16 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d | |||
95 | 99 | ||
96 | /* fill scatter/gather list with pages */ | 100 | /* fill scatter/gather list with pages */ |
97 | for (i = 0; i < dma->n_pages; i++) { | 101 | for (i = 0; i < dma->n_pages; i++) { |
98 | unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT); | 102 | unsigned long va = |
103 | (unsigned long)dma->kvirt + (i << PAGE_SHIFT); | ||
99 | 104 | ||
100 | dma->sglist[i].page = vmalloc_to_page((void *)va); | 105 | dma->sglist[i].page = vmalloc_to_page((void *)va); |
101 | dma->sglist[i].length = PAGE_SIZE; | 106 | dma->sglist[i].length = PAGE_SIZE; |
102 | } | 107 | } |
103 | 108 | ||
104 | /* map sglist to the IOMMU */ | 109 | /* map sglist to the IOMMU */ |
105 | dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction); | 110 | dma->n_dma_pages = |
111 | pci_map_sg(dev, dma->sglist, dma->n_pages, direction); | ||
106 | 112 | ||
107 | if (dma->n_dma_pages == 0) { | 113 | if (dma->n_dma_pages == 0) { |
108 | printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n"); | 114 | printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n"); |
@@ -114,7 +120,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d | |||
114 | 120 | ||
115 | return 0; | 121 | return 0; |
116 | 122 | ||
117 | err: | 123 | err: |
118 | dma_region_free(dma); | 124 | dma_region_free(dma); |
119 | return -ENOMEM; | 125 | return -ENOMEM; |
120 | } | 126 | } |
@@ -122,7 +128,8 @@ err: | |||
122 | void dma_region_free(struct dma_region *dma) | 128 | void dma_region_free(struct dma_region *dma) |
123 | { | 129 | { |
124 | if (dma->n_dma_pages) { | 130 | if (dma->n_dma_pages) { |
125 | pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction); | 131 | pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, |
132 | dma->direction); | ||
126 | dma->n_dma_pages = 0; | 133 | dma->n_dma_pages = 0; |
127 | dma->dev = NULL; | 134 | dma->dev = NULL; |
128 | } | 135 | } |
@@ -137,7 +144,8 @@ void dma_region_free(struct dma_region *dma) | |||
137 | 144 | ||
138 | /* find the scatterlist index and remaining offset corresponding to a | 145 | /* find the scatterlist index and remaining offset corresponding to a |
139 | given offset from the beginning of the buffer */ | 146 | given offset from the beginning of the buffer */ |
140 | static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem) | 147 | static inline int dma_region_find(struct dma_region *dma, unsigned long offset, |
148 | unsigned long *rem) | ||
141 | { | 149 | { |
142 | int i; | 150 | int i; |
143 | unsigned long off = offset; | 151 | unsigned long off = offset; |
@@ -156,15 +164,18 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset, | |||
156 | return i; | 164 | return i; |
157 | } | 165 | } |
158 | 166 | ||
159 | dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset) | 167 | dma_addr_t dma_region_offset_to_bus(struct dma_region * dma, |
168 | unsigned long offset) | ||
160 | { | 169 | { |
161 | unsigned long rem = 0; | 170 | unsigned long rem = 0; |
162 | 171 | ||
163 | struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)]; | 172 | struct scatterlist *sg = |
173 | &dma->sglist[dma_region_find(dma, offset, &rem)]; | ||
164 | return sg_dma_address(sg) + rem; | 174 | return sg_dma_address(sg) + rem; |
165 | } | 175 | } |
166 | 176 | ||
167 | void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len) | 177 | void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, |
178 | unsigned long len) | ||
168 | { | 179 | { |
169 | int first, last; | 180 | int first, last; |
170 | unsigned long rem; | 181 | unsigned long rem; |
@@ -175,10 +186,12 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsig | |||
175 | first = dma_region_find(dma, offset, &rem); | 186 | first = dma_region_find(dma, offset, &rem); |
176 | last = dma_region_find(dma, offset + len - 1, &rem); | 187 | last = dma_region_find(dma, offset + len - 1, &rem); |
177 | 188 | ||
178 | pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); | 189 | pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, |
190 | dma->direction); | ||
179 | } | 191 | } |
180 | 192 | ||
181 | void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len) | 193 | void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, |
194 | unsigned long len) | ||
182 | { | 195 | { |
183 | int first, last; | 196 | int first, last; |
184 | unsigned long rem; | 197 | unsigned long rem; |
@@ -189,44 +202,47 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, un | |||
189 | first = dma_region_find(dma, offset, &rem); | 202 | first = dma_region_find(dma, offset, &rem); |
190 | last = dma_region_find(dma, offset + len - 1, &rem); | 203 | last = dma_region_find(dma, offset + len - 1, &rem); |
191 | 204 | ||
192 | pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); | 205 | pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], |
206 | last - first + 1, dma->direction); | ||
193 | } | 207 | } |
194 | 208 | ||
195 | #ifdef CONFIG_MMU | 209 | #ifdef CONFIG_MMU |
196 | 210 | ||
197 | /* nopage() handler for mmap access */ | 211 | /* nopage() handler for mmap access */ |
198 | 212 | ||
199 | static struct page* | 213 | static struct page *dma_region_pagefault(struct vm_area_struct *area, |
200 | dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type) | 214 | unsigned long address, int *type) |
201 | { | 215 | { |
202 | unsigned long offset; | 216 | unsigned long offset; |
203 | unsigned long kernel_virt_addr; | 217 | unsigned long kernel_virt_addr; |
204 | struct page *ret = NOPAGE_SIGBUS; | 218 | struct page *ret = NOPAGE_SIGBUS; |
205 | 219 | ||
206 | struct dma_region *dma = (struct dma_region*) area->vm_private_data; | 220 | struct dma_region *dma = (struct dma_region *)area->vm_private_data; |
207 | 221 | ||
208 | if (!dma->kvirt) | 222 | if (!dma->kvirt) |
209 | goto out; | 223 | goto out; |
210 | 224 | ||
211 | if ( (address < (unsigned long) area->vm_start) || | 225 | if ((address < (unsigned long)area->vm_start) || |
212 | (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) ) | 226 | (address > |
227 | (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT))) | ||
213 | goto out; | 228 | goto out; |
214 | 229 | ||
215 | if (type) | 230 | if (type) |
216 | *type = VM_FAULT_MINOR; | 231 | *type = VM_FAULT_MINOR; |
217 | offset = address - area->vm_start; | 232 | offset = address - area->vm_start; |
218 | kernel_virt_addr = (unsigned long) dma->kvirt + offset; | 233 | kernel_virt_addr = (unsigned long)dma->kvirt + offset; |
219 | ret = vmalloc_to_page((void*) kernel_virt_addr); | 234 | ret = vmalloc_to_page((void *)kernel_virt_addr); |
220 | get_page(ret); | 235 | get_page(ret); |
221 | out: | 236 | out: |
222 | return ret; | 237 | return ret; |
223 | } | 238 | } |
224 | 239 | ||
225 | static struct vm_operations_struct dma_region_vm_ops = { | 240 | static struct vm_operations_struct dma_region_vm_ops = { |
226 | .nopage = dma_region_pagefault, | 241 | .nopage = dma_region_pagefault, |
227 | }; | 242 | }; |
228 | 243 | ||
229 | int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) | 244 | int dma_region_mmap(struct dma_region *dma, struct file *file, |
245 | struct vm_area_struct *vma) | ||
230 | { | 246 | { |
231 | unsigned long size; | 247 | unsigned long size; |
232 | 248 | ||
@@ -250,11 +266,12 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_st | |||
250 | return 0; | 266 | return 0; |
251 | } | 267 | } |
252 | 268 | ||
253 | #else /* CONFIG_MMU */ | 269 | #else /* CONFIG_MMU */ |
254 | 270 | ||
255 | int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) | 271 | int dma_region_mmap(struct dma_region *dma, struct file *file, |
272 | struct vm_area_struct *vma) | ||
256 | { | 273 | { |
257 | return -EINVAL; | 274 | return -EINVAL; |
258 | } | 275 | } |
259 | 276 | ||
260 | #endif /* CONFIG_MMU */ | 277 | #endif /* CONFIG_MMU */ |