diff options
author | Paul Mackerras <paulus@samba.org> | 2007-05-07 23:37:51 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-05-07 23:37:51 -0400 |
commit | 02bbc0f09c90cefdb2837605c96a66c5ce4ba2e1 (patch) | |
tree | 04ef573cd4de095c500c9fc3477f4278c0b36300 /drivers/char/drm/drm_vm.c | |
parent | 7487a2245b8841c77ba9db406cf99a483b9334e9 (diff) | |
parent | 5b94f675f57e4ff16c8fda09088d7480a84dcd91 (diff) |
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/char/drm/drm_vm.c')
-rw-r--r-- | drivers/char/drm/drm_vm.c | 102 |
1 files changed, 63 insertions, 39 deletions
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 54a632848955..35540cfb43dd 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c | |||
@@ -41,6 +41,30 @@ | |||
41 | static void drm_vm_open(struct vm_area_struct *vma); | 41 | static void drm_vm_open(struct vm_area_struct *vma); |
42 | static void drm_vm_close(struct vm_area_struct *vma); | 42 | static void drm_vm_close(struct vm_area_struct *vma); |
43 | 43 | ||
44 | static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) | ||
45 | { | ||
46 | pgprot_t tmp = vm_get_page_prot(vma->vm_flags); | ||
47 | |||
48 | #if defined(__i386__) || defined(__x86_64__) | ||
49 | if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { | ||
50 | pgprot_val(tmp) |= _PAGE_PCD; | ||
51 | pgprot_val(tmp) &= ~_PAGE_PWT; | ||
52 | } | ||
53 | #elif defined(__powerpc__) | ||
54 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | ||
55 | if (map_type == _DRM_REGISTERS) | ||
56 | pgprot_val(tmp) |= _PAGE_GUARDED; | ||
57 | #endif | ||
58 | #if defined(__ia64__) | ||
59 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - | ||
60 | vma->vm_start)) | ||
61 | tmp = pgprot_writecombine(tmp); | ||
62 | else | ||
63 | tmp = pgprot_noncached(tmp); | ||
64 | #endif | ||
65 | return tmp; | ||
66 | } | ||
67 | |||
44 | /** | 68 | /** |
45 | * \c nopage method for AGP virtual memory. | 69 | * \c nopage method for AGP virtual memory. |
46 | * | 70 | * |
@@ -151,8 +175,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, | |||
151 | 175 | ||
152 | offset = address - vma->vm_start; | 176 | offset = address - vma->vm_start; |
153 | i = (unsigned long)map->handle + offset; | 177 | i = (unsigned long)map->handle + offset; |
154 | page = (map->type == _DRM_CONSISTENT) ? | 178 | page = vmalloc_to_page((void *)i); |
155 | virt_to_page((void *)i) : vmalloc_to_page((void *)i); | ||
156 | if (!page) | 179 | if (!page) |
157 | return NOPAGE_SIGBUS; | 180 | return NOPAGE_SIGBUS; |
158 | get_page(page); | 181 | get_page(page); |
@@ -389,7 +412,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { | |||
389 | * Create a new drm_vma_entry structure as the \p vma private data entry and | 412 | * Create a new drm_vma_entry structure as the \p vma private data entry and |
390 | * add it to drm_device::vmalist. | 413 | * add it to drm_device::vmalist. |
391 | */ | 414 | */ |
392 | static void drm_vm_open(struct vm_area_struct *vma) | 415 | static void drm_vm_open_locked(struct vm_area_struct *vma) |
393 | { | 416 | { |
394 | drm_file_t *priv = vma->vm_file->private_data; | 417 | drm_file_t *priv = vma->vm_file->private_data; |
395 | drm_device_t *dev = priv->head->dev; | 418 | drm_device_t *dev = priv->head->dev; |
@@ -401,15 +424,23 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
401 | 424 | ||
402 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); | 425 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); |
403 | if (vma_entry) { | 426 | if (vma_entry) { |
404 | mutex_lock(&dev->struct_mutex); | ||
405 | vma_entry->vma = vma; | 427 | vma_entry->vma = vma; |
406 | vma_entry->next = dev->vmalist; | 428 | vma_entry->next = dev->vmalist; |
407 | vma_entry->pid = current->pid; | 429 | vma_entry->pid = current->pid; |
408 | dev->vmalist = vma_entry; | 430 | dev->vmalist = vma_entry; |
409 | mutex_unlock(&dev->struct_mutex); | ||
410 | } | 431 | } |
411 | } | 432 | } |
412 | 433 | ||
434 | static void drm_vm_open(struct vm_area_struct *vma) | ||
435 | { | ||
436 | drm_file_t *priv = vma->vm_file->private_data; | ||
437 | drm_device_t *dev = priv->head->dev; | ||
438 | |||
439 | mutex_lock(&dev->struct_mutex); | ||
440 | drm_vm_open_locked(vma); | ||
441 | mutex_unlock(&dev->struct_mutex); | ||
442 | } | ||
443 | |||
413 | /** | 444 | /** |
414 | * \c close method for all virtual memory types. | 445 | * \c close method for all virtual memory types. |
415 | * | 446 | * |
@@ -460,7 +491,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
460 | drm_device_dma_t *dma; | 491 | drm_device_dma_t *dma; |
461 | unsigned long length = vma->vm_end - vma->vm_start; | 492 | unsigned long length = vma->vm_end - vma->vm_start; |
462 | 493 | ||
463 | lock_kernel(); | ||
464 | dev = priv->head->dev; | 494 | dev = priv->head->dev; |
465 | dma = dev->dma; | 495 | dma = dev->dma; |
466 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", | 496 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", |
@@ -468,10 +498,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
468 | 498 | ||
469 | /* Length must match exact page count */ | 499 | /* Length must match exact page count */ |
470 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { | 500 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { |
471 | unlock_kernel(); | ||
472 | return -EINVAL; | 501 | return -EINVAL; |
473 | } | 502 | } |
474 | unlock_kernel(); | ||
475 | 503 | ||
476 | if (!capable(CAP_SYS_ADMIN) && | 504 | if (!capable(CAP_SYS_ADMIN) && |
477 | (dma->flags & _DRM_DMA_USE_PCI_RO)) { | 505 | (dma->flags & _DRM_DMA_USE_PCI_RO)) { |
@@ -494,7 +522,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
494 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 522 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
495 | 523 | ||
496 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 524 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
497 | drm_vm_open(vma); | 525 | drm_vm_open_locked(vma); |
498 | return 0; | 526 | return 0; |
499 | } | 527 | } |
500 | 528 | ||
@@ -529,7 +557,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); | |||
529 | * according to the mapping type and remaps the pages. Finally sets the file | 557 | * according to the mapping type and remaps the pages. Finally sets the file |
530 | * pointer and calls vm_open(). | 558 | * pointer and calls vm_open(). |
531 | */ | 559 | */ |
532 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) | 560 | static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) |
533 | { | 561 | { |
534 | drm_file_t *priv = filp->private_data; | 562 | drm_file_t *priv = filp->private_data; |
535 | drm_device_t *dev = priv->head->dev; | 563 | drm_device_t *dev = priv->head->dev; |
@@ -565,7 +593,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
565 | return -EPERM; | 593 | return -EPERM; |
566 | 594 | ||
567 | /* Check for valid size. */ | 595 | /* Check for valid size. */ |
568 | if (map->size != vma->vm_end - vma->vm_start) | 596 | if (map->size < vma->vm_end - vma->vm_start) |
569 | return -EINVAL; | 597 | return -EINVAL; |
570 | 598 | ||
571 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { | 599 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { |
@@ -600,37 +628,16 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
600 | /* fall through to _DRM_FRAME_BUFFER... */ | 628 | /* fall through to _DRM_FRAME_BUFFER... */ |
601 | case _DRM_FRAME_BUFFER: | 629 | case _DRM_FRAME_BUFFER: |
602 | case _DRM_REGISTERS: | 630 | case _DRM_REGISTERS: |
603 | #if defined(__i386__) || defined(__x86_64__) | ||
604 | if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { | ||
605 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | ||
606 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; | ||
607 | } | ||
608 | #elif defined(__powerpc__) | ||
609 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | ||
610 | if (map->type == _DRM_REGISTERS) | ||
611 | pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED; | ||
612 | #endif | ||
613 | vma->vm_flags |= VM_IO; /* not in core dump */ | ||
614 | #if defined(__ia64__) | ||
615 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) | ||
616 | vma->vm_page_prot = | ||
617 | pgprot_writecombine(vma->vm_page_prot); | ||
618 | else | ||
619 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
620 | #endif | ||
621 | offset = dev->driver->get_reg_ofs(dev); | 631 | offset = dev->driver->get_reg_ofs(dev); |
632 | vma->vm_flags |= VM_IO; /* not in core dump */ | ||
633 | vma->vm_page_prot = drm_io_prot(map->type, vma); | ||
622 | #ifdef __sparc__ | 634 | #ifdef __sparc__ |
623 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 635 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
636 | #endif | ||
624 | if (io_remap_pfn_range(vma, vma->vm_start, | 637 | if (io_remap_pfn_range(vma, vma->vm_start, |
625 | (map->offset + offset) >> PAGE_SHIFT, | 638 | (map->offset + offset) >> PAGE_SHIFT, |
626 | vma->vm_end - vma->vm_start, | 639 | vma->vm_end - vma->vm_start, |
627 | vma->vm_page_prot)) | 640 | vma->vm_page_prot)) |
628 | #else | ||
629 | if (io_remap_pfn_range(vma, vma->vm_start, | ||
630 | (map->offset + offset) >> PAGE_SHIFT, | ||
631 | vma->vm_end - vma->vm_start, | ||
632 | vma->vm_page_prot)) | ||
633 | #endif | ||
634 | return -EAGAIN; | 641 | return -EAGAIN; |
635 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," | 642 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," |
636 | " offset = 0x%lx\n", | 643 | " offset = 0x%lx\n", |
@@ -638,10 +645,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
638 | vma->vm_start, vma->vm_end, map->offset + offset); | 645 | vma->vm_start, vma->vm_end, map->offset + offset); |
639 | vma->vm_ops = &drm_vm_ops; | 646 | vma->vm_ops = &drm_vm_ops; |
640 | break; | 647 | break; |
641 | case _DRM_SHM: | ||
642 | case _DRM_CONSISTENT: | 648 | case _DRM_CONSISTENT: |
643 | /* Consistent memory is really like shared memory. It's only | 649 | /* Consistent memory is really like shared memory. But |
644 | * allocate in a different way */ | 650 | * it's allocated in a different way, so avoid nopage */ |
651 | if (remap_pfn_range(vma, vma->vm_start, | ||
652 | page_to_pfn(virt_to_page(map->handle)), | ||
653 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
654 | return -EAGAIN; | ||
655 | /* fall through to _DRM_SHM */ | ||
656 | case _DRM_SHM: | ||
645 | vma->vm_ops = &drm_vm_shm_ops; | 657 | vma->vm_ops = &drm_vm_shm_ops; |
646 | vma->vm_private_data = (void *)map; | 658 | vma->vm_private_data = (void *)map; |
647 | /* Don't let this area swap. Change when | 659 | /* Don't let this area swap. Change when |
@@ -659,8 +671,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
659 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 671 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
660 | 672 | ||
661 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 673 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
662 | drm_vm_open(vma); | 674 | drm_vm_open_locked(vma); |
663 | return 0; | 675 | return 0; |
664 | } | 676 | } |
665 | 677 | ||
678 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) | ||
679 | { | ||
680 | drm_file_t *priv = filp->private_data; | ||
681 | drm_device_t *dev = priv->head->dev; | ||
682 | int ret; | ||
683 | |||
684 | mutex_lock(&dev->struct_mutex); | ||
685 | ret = drm_mmap_locked(filp, vma); | ||
686 | mutex_unlock(&dev->struct_mutex); | ||
687 | |||
688 | return ret; | ||
689 | } | ||
666 | EXPORT_SYMBOL(drm_mmap); | 690 | EXPORT_SYMBOL(drm_mmap); |