diff options
Diffstat (limited to 'drivers/gpu/drm/drm_vm.c')
-rw-r--r-- | drivers/gpu/drm/drm_vm.c | 673 |
1 files changed, 673 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c new file mode 100644 index 000000000000..c234c6f24a8d --- /dev/null +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -0,0 +1,673 @@ | |||
1 | /** | ||
2 | * \file drm_vm.c | ||
3 | * Memory mapping for DRM | ||
4 | * | ||
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | ||
6 | * \author Gareth Hughes <gareth@valinux.com> | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com | ||
11 | * | ||
12 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
14 | * All Rights Reserved. | ||
15 | * | ||
16 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
17 | * copy of this software and associated documentation files (the "Software"), | ||
18 | * to deal in the Software without restriction, including without limitation | ||
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
20 | * and/or sell copies of the Software, and to permit persons to whom the | ||
21 | * Software is furnished to do so, subject to the following conditions: | ||
22 | * | ||
23 | * The above copyright notice and this permission notice (including the next | ||
24 | * paragraph) shall be included in all copies or substantial portions of the | ||
25 | * Software. | ||
26 | * | ||
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
33 | * OTHER DEALINGS IN THE SOFTWARE. | ||
34 | */ | ||
35 | |||
36 | #include "drmP.h" | ||
37 | #if defined(__ia64__) | ||
38 | #include <linux/efi.h> | ||
39 | #endif | ||
40 | |||
41 | static void drm_vm_open(struct vm_area_struct *vma); | ||
42 | static void drm_vm_close(struct vm_area_struct *vma); | ||
43 | |||
44 | static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) | ||
45 | { | ||
46 | pgprot_t tmp = vm_get_page_prot(vma->vm_flags); | ||
47 | |||
48 | #if defined(__i386__) || defined(__x86_64__) | ||
49 | if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { | ||
50 | pgprot_val(tmp) |= _PAGE_PCD; | ||
51 | pgprot_val(tmp) &= ~_PAGE_PWT; | ||
52 | } | ||
53 | #elif defined(__powerpc__) | ||
54 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | ||
55 | if (map_type == _DRM_REGISTERS) | ||
56 | pgprot_val(tmp) |= _PAGE_GUARDED; | ||
57 | #elif defined(__ia64__) | ||
58 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - | ||
59 | vma->vm_start)) | ||
60 | tmp = pgprot_writecombine(tmp); | ||
61 | else | ||
62 | tmp = pgprot_noncached(tmp); | ||
63 | #elif defined(__sparc__) | ||
64 | tmp = pgprot_noncached(tmp); | ||
65 | #endif | ||
66 | return tmp; | ||
67 | } | ||
68 | |||
69 | static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) | ||
70 | { | ||
71 | pgprot_t tmp = vm_get_page_prot(vma->vm_flags); | ||
72 | |||
73 | #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) | ||
74 | tmp |= _PAGE_NO_CACHE; | ||
75 | #endif | ||
76 | return tmp; | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * \c fault method for AGP virtual memory. | ||
81 | * | ||
82 | * \param vma virtual memory area. | ||
83 | * \param address access address. | ||
84 | * \return pointer to the page structure. | ||
85 | * | ||
86 | * Find the right map and if it's AGP memory find the real physical page to | ||
87 | * map, get the page, increment the use count and return it. | ||
88 | */ | ||
89 | #if __OS_HAS_AGP | ||
90 | static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
91 | { | ||
92 | struct drm_file *priv = vma->vm_file->private_data; | ||
93 | struct drm_device *dev = priv->minor->dev; | ||
94 | struct drm_map *map = NULL; | ||
95 | struct drm_map_list *r_list; | ||
96 | struct drm_hash_item *hash; | ||
97 | |||
98 | /* | ||
99 | * Find the right map | ||
100 | */ | ||
101 | if (!drm_core_has_AGP(dev)) | ||
102 | goto vm_fault_error; | ||
103 | |||
104 | if (!dev->agp || !dev->agp->cant_use_aperture) | ||
105 | goto vm_fault_error; | ||
106 | |||
107 | if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) | ||
108 | goto vm_fault_error; | ||
109 | |||
110 | r_list = drm_hash_entry(hash, struct drm_map_list, hash); | ||
111 | map = r_list->map; | ||
112 | |||
113 | if (map && map->type == _DRM_AGP) { | ||
114 | /* | ||
115 | * Using vm_pgoff as a selector forces us to use this unusual | ||
116 | * addressing scheme. | ||
117 | */ | ||
118 | unsigned long offset = (unsigned long)vmf->virtual_address - | ||
119 | vma->vm_start; | ||
120 | unsigned long baddr = map->offset + offset; | ||
121 | struct drm_agp_mem *agpmem; | ||
122 | struct page *page; | ||
123 | |||
124 | #ifdef __alpha__ | ||
125 | /* | ||
126 | * Adjust to a bus-relative address | ||
127 | */ | ||
128 | baddr -= dev->hose->mem_space->start; | ||
129 | #endif | ||
130 | |||
131 | /* | ||
132 | * It's AGP memory - find the real physical page to map | ||
133 | */ | ||
134 | list_for_each_entry(agpmem, &dev->agp->memory, head) { | ||
135 | if (agpmem->bound <= baddr && | ||
136 | agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | if (!agpmem) | ||
141 | goto vm_fault_error; | ||
142 | |||
143 | /* | ||
144 | * Get the page, inc the use count, and return it | ||
145 | */ | ||
146 | offset = (baddr - agpmem->bound) >> PAGE_SHIFT; | ||
147 | page = virt_to_page(__va(agpmem->memory->memory[offset])); | ||
148 | get_page(page); | ||
149 | vmf->page = page; | ||
150 | |||
151 | DRM_DEBUG | ||
152 | ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", | ||
153 | baddr, __va(agpmem->memory->memory[offset]), offset, | ||
154 | page_count(page)); | ||
155 | return 0; | ||
156 | } | ||
157 | vm_fault_error: | ||
158 | return VM_FAULT_SIGBUS; /* Disallow mremap */ | ||
159 | } | ||
160 | #else /* __OS_HAS_AGP */ | ||
161 | static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
162 | { | ||
163 | return VM_FAULT_SIGBUS; | ||
164 | } | ||
165 | #endif /* __OS_HAS_AGP */ | ||
166 | |||
167 | /** | ||
168 | * \c nopage method for shared virtual memory. | ||
169 | * | ||
170 | * \param vma virtual memory area. | ||
171 | * \param address access address. | ||
172 | * \return pointer to the page structure. | ||
173 | * | ||
174 | * Get the mapping, find the real physical page to map, get the page, and | ||
175 | * return it. | ||
176 | */ | ||
177 | static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
178 | { | ||
179 | struct drm_map *map = (struct drm_map *) vma->vm_private_data; | ||
180 | unsigned long offset; | ||
181 | unsigned long i; | ||
182 | struct page *page; | ||
183 | |||
184 | if (!map) | ||
185 | return VM_FAULT_SIGBUS; /* Nothing allocated */ | ||
186 | |||
187 | offset = (unsigned long)vmf->virtual_address - vma->vm_start; | ||
188 | i = (unsigned long)map->handle + offset; | ||
189 | page = vmalloc_to_page((void *)i); | ||
190 | if (!page) | ||
191 | return VM_FAULT_SIGBUS; | ||
192 | get_page(page); | ||
193 | vmf->page = page; | ||
194 | |||
195 | DRM_DEBUG("shm_fault 0x%lx\n", offset); | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * \c close method for shared virtual memory. | ||
201 | * | ||
202 | * \param vma virtual memory area. | ||
203 | * | ||
204 | * Deletes map information if we are the last | ||
205 | * person to close a mapping and it's not in the global maplist. | ||
206 | */ | ||
207 | static void drm_vm_shm_close(struct vm_area_struct *vma) | ||
208 | { | ||
209 | struct drm_file *priv = vma->vm_file->private_data; | ||
210 | struct drm_device *dev = priv->minor->dev; | ||
211 | struct drm_vma_entry *pt, *temp; | ||
212 | struct drm_map *map; | ||
213 | struct drm_map_list *r_list; | ||
214 | int found_maps = 0; | ||
215 | |||
216 | DRM_DEBUG("0x%08lx,0x%08lx\n", | ||
217 | vma->vm_start, vma->vm_end - vma->vm_start); | ||
218 | atomic_dec(&dev->vma_count); | ||
219 | |||
220 | map = vma->vm_private_data; | ||
221 | |||
222 | mutex_lock(&dev->struct_mutex); | ||
223 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | ||
224 | if (pt->vma->vm_private_data == map) | ||
225 | found_maps++; | ||
226 | if (pt->vma == vma) { | ||
227 | list_del(&pt->head); | ||
228 | drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | /* We were the only map that was found */ | ||
233 | if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { | ||
234 | /* Check to see if we are in the maplist, if we are not, then | ||
235 | * we delete this mappings information. | ||
236 | */ | ||
237 | found_maps = 0; | ||
238 | list_for_each_entry(r_list, &dev->maplist, head) { | ||
239 | if (r_list->map == map) | ||
240 | found_maps++; | ||
241 | } | ||
242 | |||
243 | if (!found_maps) { | ||
244 | drm_dma_handle_t dmah; | ||
245 | |||
246 | switch (map->type) { | ||
247 | case _DRM_REGISTERS: | ||
248 | case _DRM_FRAME_BUFFER: | ||
249 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { | ||
250 | int retcode; | ||
251 | retcode = mtrr_del(map->mtrr, | ||
252 | map->offset, | ||
253 | map->size); | ||
254 | DRM_DEBUG("mtrr_del = %d\n", retcode); | ||
255 | } | ||
256 | iounmap(map->handle); | ||
257 | break; | ||
258 | case _DRM_SHM: | ||
259 | vfree(map->handle); | ||
260 | break; | ||
261 | case _DRM_AGP: | ||
262 | case _DRM_SCATTER_GATHER: | ||
263 | break; | ||
264 | case _DRM_CONSISTENT: | ||
265 | dmah.vaddr = map->handle; | ||
266 | dmah.busaddr = map->offset; | ||
267 | dmah.size = map->size; | ||
268 | __drm_pci_free(dev, &dmah); | ||
269 | break; | ||
270 | } | ||
271 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
272 | } | ||
273 | } | ||
274 | mutex_unlock(&dev->struct_mutex); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * \c fault method for DMA virtual memory. | ||
279 | * | ||
280 | * \param vma virtual memory area. | ||
281 | * \param address access address. | ||
282 | * \return pointer to the page structure. | ||
283 | * | ||
284 | * Determine the page number from the page offset and get it from drm_device_dma::pagelist. | ||
285 | */ | ||
286 | static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
287 | { | ||
288 | struct drm_file *priv = vma->vm_file->private_data; | ||
289 | struct drm_device *dev = priv->minor->dev; | ||
290 | struct drm_device_dma *dma = dev->dma; | ||
291 | unsigned long offset; | ||
292 | unsigned long page_nr; | ||
293 | struct page *page; | ||
294 | |||
295 | if (!dma) | ||
296 | return VM_FAULT_SIGBUS; /* Error */ | ||
297 | if (!dma->pagelist) | ||
298 | return VM_FAULT_SIGBUS; /* Nothing allocated */ | ||
299 | |||
300 | offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ | ||
301 | page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ | ||
302 | page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); | ||
303 | |||
304 | get_page(page); | ||
305 | vmf->page = page; | ||
306 | |||
307 | DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | /** | ||
312 | * \c fault method for scatter-gather virtual memory. | ||
313 | * | ||
314 | * \param vma virtual memory area. | ||
315 | * \param address access address. | ||
316 | * \return pointer to the page structure. | ||
317 | * | ||
318 | * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. | ||
319 | */ | ||
320 | static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
321 | { | ||
322 | struct drm_map *map = (struct drm_map *) vma->vm_private_data; | ||
323 | struct drm_file *priv = vma->vm_file->private_data; | ||
324 | struct drm_device *dev = priv->minor->dev; | ||
325 | struct drm_sg_mem *entry = dev->sg; | ||
326 | unsigned long offset; | ||
327 | unsigned long map_offset; | ||
328 | unsigned long page_offset; | ||
329 | struct page *page; | ||
330 | |||
331 | if (!entry) | ||
332 | return VM_FAULT_SIGBUS; /* Error */ | ||
333 | if (!entry->pagelist) | ||
334 | return VM_FAULT_SIGBUS; /* Nothing allocated */ | ||
335 | |||
336 | offset = (unsigned long)vmf->virtual_address - vma->vm_start; | ||
337 | map_offset = map->offset - (unsigned long)dev->sg->virtual; | ||
338 | page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); | ||
339 | page = entry->pagelist[page_offset]; | ||
340 | get_page(page); | ||
341 | vmf->page = page; | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
347 | { | ||
348 | return drm_do_vm_fault(vma, vmf); | ||
349 | } | ||
350 | |||
351 | static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
352 | { | ||
353 | return drm_do_vm_shm_fault(vma, vmf); | ||
354 | } | ||
355 | |||
356 | static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
357 | { | ||
358 | return drm_do_vm_dma_fault(vma, vmf); | ||
359 | } | ||
360 | |||
361 | static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
362 | { | ||
363 | return drm_do_vm_sg_fault(vma, vmf); | ||
364 | } | ||
365 | |||
366 | /** AGP virtual memory operations */ | ||
367 | static struct vm_operations_struct drm_vm_ops = { | ||
368 | .fault = drm_vm_fault, | ||
369 | .open = drm_vm_open, | ||
370 | .close = drm_vm_close, | ||
371 | }; | ||
372 | |||
373 | /** Shared virtual memory operations */ | ||
374 | static struct vm_operations_struct drm_vm_shm_ops = { | ||
375 | .fault = drm_vm_shm_fault, | ||
376 | .open = drm_vm_open, | ||
377 | .close = drm_vm_shm_close, | ||
378 | }; | ||
379 | |||
380 | /** DMA virtual memory operations */ | ||
381 | static struct vm_operations_struct drm_vm_dma_ops = { | ||
382 | .fault = drm_vm_dma_fault, | ||
383 | .open = drm_vm_open, | ||
384 | .close = drm_vm_close, | ||
385 | }; | ||
386 | |||
387 | /** Scatter-gather virtual memory operations */ | ||
388 | static struct vm_operations_struct drm_vm_sg_ops = { | ||
389 | .fault = drm_vm_sg_fault, | ||
390 | .open = drm_vm_open, | ||
391 | .close = drm_vm_close, | ||
392 | }; | ||
393 | |||
394 | /** | ||
395 | * \c open method for shared virtual memory. | ||
396 | * | ||
397 | * \param vma virtual memory area. | ||
398 | * | ||
399 | * Create a new drm_vma_entry structure as the \p vma private data entry and | ||
400 | * add it to drm_device::vmalist. | ||
401 | */ | ||
402 | static void drm_vm_open_locked(struct vm_area_struct *vma) | ||
403 | { | ||
404 | struct drm_file *priv = vma->vm_file->private_data; | ||
405 | struct drm_device *dev = priv->minor->dev; | ||
406 | struct drm_vma_entry *vma_entry; | ||
407 | |||
408 | DRM_DEBUG("0x%08lx,0x%08lx\n", | ||
409 | vma->vm_start, vma->vm_end - vma->vm_start); | ||
410 | atomic_inc(&dev->vma_count); | ||
411 | |||
412 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); | ||
413 | if (vma_entry) { | ||
414 | vma_entry->vma = vma; | ||
415 | vma_entry->pid = current->pid; | ||
416 | list_add(&vma_entry->head, &dev->vmalist); | ||
417 | } | ||
418 | } | ||
419 | |||
420 | static void drm_vm_open(struct vm_area_struct *vma) | ||
421 | { | ||
422 | struct drm_file *priv = vma->vm_file->private_data; | ||
423 | struct drm_device *dev = priv->minor->dev; | ||
424 | |||
425 | mutex_lock(&dev->struct_mutex); | ||
426 | drm_vm_open_locked(vma); | ||
427 | mutex_unlock(&dev->struct_mutex); | ||
428 | } | ||
429 | |||
430 | /** | ||
431 | * \c close method for all virtual memory types. | ||
432 | * | ||
433 | * \param vma virtual memory area. | ||
434 | * | ||
435 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | ||
436 | * free it. | ||
437 | */ | ||
438 | static void drm_vm_close(struct vm_area_struct *vma) | ||
439 | { | ||
440 | struct drm_file *priv = vma->vm_file->private_data; | ||
441 | struct drm_device *dev = priv->minor->dev; | ||
442 | struct drm_vma_entry *pt, *temp; | ||
443 | |||
444 | DRM_DEBUG("0x%08lx,0x%08lx\n", | ||
445 | vma->vm_start, vma->vm_end - vma->vm_start); | ||
446 | atomic_dec(&dev->vma_count); | ||
447 | |||
448 | mutex_lock(&dev->struct_mutex); | ||
449 | list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { | ||
450 | if (pt->vma == vma) { | ||
451 | list_del(&pt->head); | ||
452 | drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); | ||
453 | break; | ||
454 | } | ||
455 | } | ||
456 | mutex_unlock(&dev->struct_mutex); | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * mmap DMA memory. | ||
461 | * | ||
462 | * \param file_priv DRM file private. | ||
463 | * \param vma virtual memory area. | ||
464 | * \return zero on success or a negative number on failure. | ||
465 | * | ||
466 | * Sets the virtual memory area operations structure to vm_dma_ops, the file | ||
467 | * pointer, and calls vm_open(). | ||
468 | */ | ||
469 | static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | ||
470 | { | ||
471 | struct drm_file *priv = filp->private_data; | ||
472 | struct drm_device *dev; | ||
473 | struct drm_device_dma *dma; | ||
474 | unsigned long length = vma->vm_end - vma->vm_start; | ||
475 | |||
476 | dev = priv->minor->dev; | ||
477 | dma = dev->dma; | ||
478 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", | ||
479 | vma->vm_start, vma->vm_end, vma->vm_pgoff); | ||
480 | |||
481 | /* Length must match exact page count */ | ||
482 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { | ||
483 | return -EINVAL; | ||
484 | } | ||
485 | |||
486 | if (!capable(CAP_SYS_ADMIN) && | ||
487 | (dma->flags & _DRM_DMA_USE_PCI_RO)) { | ||
488 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); | ||
489 | #if defined(__i386__) || defined(__x86_64__) | ||
490 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; | ||
491 | #else | ||
492 | /* Ye gads this is ugly. With more thought | ||
493 | we could move this up higher and use | ||
494 | `protection_map' instead. */ | ||
495 | vma->vm_page_prot = | ||
496 | __pgprot(pte_val | ||
497 | (pte_wrprotect | ||
498 | (__pte(pgprot_val(vma->vm_page_prot))))); | ||
499 | #endif | ||
500 | } | ||
501 | |||
502 | vma->vm_ops = &drm_vm_dma_ops; | ||
503 | |||
504 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | ||
505 | vma->vm_flags |= VM_DONTEXPAND; | ||
506 | |||
507 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | ||
508 | drm_vm_open_locked(vma); | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | unsigned long drm_core_get_map_ofs(struct drm_map * map) | ||
513 | { | ||
514 | return map->offset; | ||
515 | } | ||
516 | |||
517 | EXPORT_SYMBOL(drm_core_get_map_ofs); | ||
518 | |||
519 | unsigned long drm_core_get_reg_ofs(struct drm_device *dev) | ||
520 | { | ||
521 | #ifdef __alpha__ | ||
522 | return dev->hose->dense_mem_base - dev->hose->mem_space->start; | ||
523 | #else | ||
524 | return 0; | ||
525 | #endif | ||
526 | } | ||
527 | |||
528 | EXPORT_SYMBOL(drm_core_get_reg_ofs); | ||
529 | |||
530 | /** | ||
531 | * mmap DMA memory. | ||
532 | * | ||
533 | * \param file_priv DRM file private. | ||
534 | * \param vma virtual memory area. | ||
535 | * \return zero on success or a negative number on failure. | ||
536 | * | ||
537 | * If the virtual memory area has no offset associated with it then it's a DMA | ||
538 | * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, | ||
539 | * checks that the restricted flag is not set, sets the virtual memory operations | ||
540 | * according to the mapping type and remaps the pages. Finally sets the file | ||
541 | * pointer and calls vm_open(). | ||
542 | */ | ||
543 | static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | ||
544 | { | ||
545 | struct drm_file *priv = filp->private_data; | ||
546 | struct drm_device *dev = priv->minor->dev; | ||
547 | struct drm_map *map = NULL; | ||
548 | unsigned long offset = 0; | ||
549 | struct drm_hash_item *hash; | ||
550 | |||
551 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", | ||
552 | vma->vm_start, vma->vm_end, vma->vm_pgoff); | ||
553 | |||
554 | if (!priv->authenticated) | ||
555 | return -EACCES; | ||
556 | |||
557 | /* We check for "dma". On Apple's UniNorth, it's valid to have | ||
558 | * the AGP mapped at physical address 0 | ||
559 | * --BenH. | ||
560 | */ | ||
561 | if (!vma->vm_pgoff | ||
562 | #if __OS_HAS_AGP | ||
563 | && (!dev->agp | ||
564 | || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) | ||
565 | #endif | ||
566 | ) | ||
567 | return drm_mmap_dma(filp, vma); | ||
568 | |||
569 | if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { | ||
570 | DRM_ERROR("Could not find map\n"); | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | |||
574 | map = drm_hash_entry(hash, struct drm_map_list, hash)->map; | ||
575 | if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) | ||
576 | return -EPERM; | ||
577 | |||
578 | /* Check for valid size. */ | ||
579 | if (map->size < vma->vm_end - vma->vm_start) | ||
580 | return -EINVAL; | ||
581 | |||
582 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { | ||
583 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); | ||
584 | #if defined(__i386__) || defined(__x86_64__) | ||
585 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; | ||
586 | #else | ||
587 | /* Ye gads this is ugly. With more thought | ||
588 | we could move this up higher and use | ||
589 | `protection_map' instead. */ | ||
590 | vma->vm_page_prot = | ||
591 | __pgprot(pte_val | ||
592 | (pte_wrprotect | ||
593 | (__pte(pgprot_val(vma->vm_page_prot))))); | ||
594 | #endif | ||
595 | } | ||
596 | |||
597 | switch (map->type) { | ||
598 | case _DRM_AGP: | ||
599 | if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { | ||
600 | /* | ||
601 | * On some platforms we can't talk to bus dma address from the CPU, so for | ||
602 | * memory of type DRM_AGP, we'll deal with sorting out the real physical | ||
603 | * pages and mappings in fault() | ||
604 | */ | ||
605 | #if defined(__powerpc__) | ||
606 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | ||
607 | #endif | ||
608 | vma->vm_ops = &drm_vm_ops; | ||
609 | break; | ||
610 | } | ||
611 | /* fall through to _DRM_FRAME_BUFFER... */ | ||
612 | case _DRM_FRAME_BUFFER: | ||
613 | case _DRM_REGISTERS: | ||
614 | offset = dev->driver->get_reg_ofs(dev); | ||
615 | vma->vm_flags |= VM_IO; /* not in core dump */ | ||
616 | vma->vm_page_prot = drm_io_prot(map->type, vma); | ||
617 | if (io_remap_pfn_range(vma, vma->vm_start, | ||
618 | (map->offset + offset) >> PAGE_SHIFT, | ||
619 | vma->vm_end - vma->vm_start, | ||
620 | vma->vm_page_prot)) | ||
621 | return -EAGAIN; | ||
622 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," | ||
623 | " offset = 0x%lx\n", | ||
624 | map->type, | ||
625 | vma->vm_start, vma->vm_end, map->offset + offset); | ||
626 | vma->vm_ops = &drm_vm_ops; | ||
627 | break; | ||
628 | case _DRM_CONSISTENT: | ||
629 | /* Consistent memory is really like shared memory. But | ||
630 | * it's allocated in a different way, so avoid fault */ | ||
631 | if (remap_pfn_range(vma, vma->vm_start, | ||
632 | page_to_pfn(virt_to_page(map->handle)), | ||
633 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
634 | return -EAGAIN; | ||
635 | vma->vm_page_prot = drm_dma_prot(map->type, vma); | ||
636 | /* fall through to _DRM_SHM */ | ||
637 | case _DRM_SHM: | ||
638 | vma->vm_ops = &drm_vm_shm_ops; | ||
639 | vma->vm_private_data = (void *)map; | ||
640 | /* Don't let this area swap. Change when | ||
641 | DRM_KERNEL advisory is supported. */ | ||
642 | vma->vm_flags |= VM_RESERVED; | ||
643 | break; | ||
644 | case _DRM_SCATTER_GATHER: | ||
645 | vma->vm_ops = &drm_vm_sg_ops; | ||
646 | vma->vm_private_data = (void *)map; | ||
647 | vma->vm_flags |= VM_RESERVED; | ||
648 | vma->vm_page_prot = drm_dma_prot(map->type, vma); | ||
649 | break; | ||
650 | default: | ||
651 | return -EINVAL; /* This should never happen. */ | ||
652 | } | ||
653 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | ||
654 | vma->vm_flags |= VM_DONTEXPAND; | ||
655 | |||
656 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | ||
657 | drm_vm_open_locked(vma); | ||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) | ||
662 | { | ||
663 | struct drm_file *priv = filp->private_data; | ||
664 | struct drm_device *dev = priv->minor->dev; | ||
665 | int ret; | ||
666 | |||
667 | mutex_lock(&dev->struct_mutex); | ||
668 | ret = drm_mmap_locked(filp, vma); | ||
669 | mutex_unlock(&dev->struct_mutex); | ||
670 | |||
671 | return ret; | ||
672 | } | ||
673 | EXPORT_SYMBOL(drm_mmap); | ||