diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2006-02-02 03:37:46 -0500 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2006-02-02 03:37:46 -0500 |
commit | 30e2fb188194908e48d3f27a53ccea6740eb1e98 (patch) | |
tree | eef1e9495aa6db6cddc67cf7f20369a3acdd2291 | |
parent | ce60fe02fbe737cbce09e2ba5a2ef1efd20eff73 (diff) |
sem2mutex: drivers/char/drm/
From: Arjan van de Ven <arjan@infradead.org>
Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Dave Airlie <airlied@linux.ie>
-rw-r--r-- | drivers/char/drm/drmP.h | 5 | ||||
-rw-r--r-- | drivers/char/drm/drm_auth.c | 20 | ||||
-rw-r--r-- | drivers/char/drm/drm_bufs.c | 80 | ||||
-rw-r--r-- | drivers/char/drm/drm_context.c | 52 | ||||
-rw-r--r-- | drivers/char/drm/drm_drv.c | 4 | ||||
-rw-r--r-- | drivers/char/drm/drm_fops.c | 12 | ||||
-rw-r--r-- | drivers/char/drm/drm_ioctl.c | 18 | ||||
-rw-r--r-- | drivers/char/drm/drm_irq.c | 16 | ||||
-rw-r--r-- | drivers/char/drm/drm_proc.c | 28 | ||||
-rw-r--r-- | drivers/char/drm/drm_stub.c | 4 | ||||
-rw-r--r-- | drivers/char/drm/drm_vm.c | 12 |
11 files changed, 126 insertions, 125 deletions
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 54b561e69486..71b8b32b075f 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/smp_lock.h> /* For (un)lock_kernel */ | 57 | #include <linux/smp_lock.h> /* For (un)lock_kernel */ |
58 | #include <linux/mm.h> | 58 | #include <linux/mm.h> |
59 | #include <linux/cdev.h> | 59 | #include <linux/cdev.h> |
60 | #include <linux/mutex.h> | ||
60 | #if defined(__alpha__) || defined(__powerpc__) | 61 | #if defined(__alpha__) || defined(__powerpc__) |
61 | #include <asm/pgtable.h> /* For pte_wrprotect */ | 62 | #include <asm/pgtable.h> /* For pte_wrprotect */ |
62 | #endif | 63 | #endif |
@@ -623,7 +624,7 @@ typedef struct drm_device { | |||
623 | /** \name Locks */ | 624 | /** \name Locks */ |
624 | /*@{ */ | 625 | /*@{ */ |
625 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ | 626 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ |
626 | struct semaphore struct_sem; /**< For others */ | 627 | struct mutex struct_mutex; /**< For others */ |
627 | /*@} */ | 628 | /*@} */ |
628 | 629 | ||
629 | /** \name Usage Counters */ | 630 | /** \name Usage Counters */ |
@@ -658,7 +659,7 @@ typedef struct drm_device { | |||
658 | /*@{ */ | 659 | /*@{ */ |
659 | drm_ctx_list_t *ctxlist; /**< Linked list of context handles */ | 660 | drm_ctx_list_t *ctxlist; /**< Linked list of context handles */ |
660 | int ctx_count; /**< Number of context handles */ | 661 | int ctx_count; /**< Number of context handles */ |
661 | struct semaphore ctxlist_sem; /**< For ctxlist */ | 662 | struct mutex ctxlist_mutex; /**< For ctxlist */ |
662 | 663 | ||
663 | drm_map_t **context_sareas; /**< per-context SAREA's */ | 664 | drm_map_t **context_sareas; /**< per-context SAREA's */ |
664 | int max_context; | 665 | int max_context; |
diff --git a/drivers/char/drm/drm_auth.c b/drivers/char/drm/drm_auth.c index a47b502bc7cc..2a37586a7ee8 100644 --- a/drivers/char/drm/drm_auth.c +++ b/drivers/char/drm/drm_auth.c | |||
@@ -56,7 +56,7 @@ static int drm_hash_magic(drm_magic_t magic) | |||
56 | * \param magic magic number. | 56 | * \param magic magic number. |
57 | * | 57 | * |
58 | * Searches in drm_device::magiclist within all files with the same hash key | 58 | * Searches in drm_device::magiclist within all files with the same hash key |
59 | * the one with matching magic number, while holding the drm_device::struct_sem | 59 | * the one with matching magic number, while holding the drm_device::struct_mutex |
60 | * lock. | 60 | * lock. |
61 | */ | 61 | */ |
62 | static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) | 62 | static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) |
@@ -65,14 +65,14 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) | |||
65 | drm_magic_entry_t *pt; | 65 | drm_magic_entry_t *pt; |
66 | int hash = drm_hash_magic(magic); | 66 | int hash = drm_hash_magic(magic); |
67 | 67 | ||
68 | down(&dev->struct_sem); | 68 | mutex_lock(&dev->struct_mutex); |
69 | for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { | 69 | for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { |
70 | if (pt->magic == magic) { | 70 | if (pt->magic == magic) { |
71 | retval = pt->priv; | 71 | retval = pt->priv; |
72 | break; | 72 | break; |
73 | } | 73 | } |
74 | } | 74 | } |
75 | up(&dev->struct_sem); | 75 | mutex_unlock(&dev->struct_mutex); |
76 | return retval; | 76 | return retval; |
77 | } | 77 | } |
78 | 78 | ||
@@ -85,7 +85,7 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) | |||
85 | * | 85 | * |
86 | * Creates a drm_magic_entry structure and appends to the linked list | 86 | * Creates a drm_magic_entry structure and appends to the linked list |
87 | * associated the magic number hash key in drm_device::magiclist, while holding | 87 | * associated the magic number hash key in drm_device::magiclist, while holding |
88 | * the drm_device::struct_sem lock. | 88 | * the drm_device::struct_mutex lock. |
89 | */ | 89 | */ |
90 | static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, | 90 | static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, |
91 | drm_magic_t magic) | 91 | drm_magic_t magic) |
@@ -104,7 +104,7 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, | |||
104 | entry->priv = priv; | 104 | entry->priv = priv; |
105 | entry->next = NULL; | 105 | entry->next = NULL; |
106 | 106 | ||
107 | down(&dev->struct_sem); | 107 | mutex_lock(&dev->struct_mutex); |
108 | if (dev->magiclist[hash].tail) { | 108 | if (dev->magiclist[hash].tail) { |
109 | dev->magiclist[hash].tail->next = entry; | 109 | dev->magiclist[hash].tail->next = entry; |
110 | dev->magiclist[hash].tail = entry; | 110 | dev->magiclist[hash].tail = entry; |
@@ -112,7 +112,7 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, | |||
112 | dev->magiclist[hash].head = entry; | 112 | dev->magiclist[hash].head = entry; |
113 | dev->magiclist[hash].tail = entry; | 113 | dev->magiclist[hash].tail = entry; |
114 | } | 114 | } |
115 | up(&dev->struct_sem); | 115 | mutex_unlock(&dev->struct_mutex); |
116 | 116 | ||
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
@@ -124,7 +124,7 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, | |||
124 | * \param magic magic number. | 124 | * \param magic magic number. |
125 | * | 125 | * |
126 | * Searches and unlinks the entry in drm_device::magiclist with the magic | 126 | * Searches and unlinks the entry in drm_device::magiclist with the magic |
127 | * number hash key, while holding the drm_device::struct_sem lock. | 127 | * number hash key, while holding the drm_device::struct_mutex lock. |
128 | */ | 128 | */ |
129 | static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) | 129 | static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) |
130 | { | 130 | { |
@@ -135,7 +135,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) | |||
135 | DRM_DEBUG("%d\n", magic); | 135 | DRM_DEBUG("%d\n", magic); |
136 | hash = drm_hash_magic(magic); | 136 | hash = drm_hash_magic(magic); |
137 | 137 | ||
138 | down(&dev->struct_sem); | 138 | mutex_lock(&dev->struct_mutex); |
139 | for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { | 139 | for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { |
140 | if (pt->magic == magic) { | 140 | if (pt->magic == magic) { |
141 | if (dev->magiclist[hash].head == pt) { | 141 | if (dev->magiclist[hash].head == pt) { |
@@ -147,11 +147,11 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) | |||
147 | if (prev) { | 147 | if (prev) { |
148 | prev->next = pt->next; | 148 | prev->next = pt->next; |
149 | } | 149 | } |
150 | up(&dev->struct_sem); | 150 | mutex_unlock(&dev->struct_mutex); |
151 | return 0; | 151 | return 0; |
152 | } | 152 | } |
153 | } | 153 | } |
154 | up(&dev->struct_sem); | 154 | mutex_unlock(&dev->struct_mutex); |
155 | 155 | ||
156 | drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); | 156 | drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); |
157 | 157 | ||
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index 1db12dcb6802..e2637b4d51de 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c | |||
@@ -255,14 +255,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, | |||
255 | memset(list, 0, sizeof(*list)); | 255 | memset(list, 0, sizeof(*list)); |
256 | list->map = map; | 256 | list->map = map; |
257 | 257 | ||
258 | down(&dev->struct_sem); | 258 | mutex_lock(&dev->struct_mutex); |
259 | list_add(&list->head, &dev->maplist->head); | 259 | list_add(&list->head, &dev->maplist->head); |
260 | /* Assign a 32-bit handle */ | 260 | /* Assign a 32-bit handle */ |
261 | /* We do it here so that dev->struct_sem protects the increment */ | 261 | /* We do it here so that dev->struct_mutex protects the increment */ |
262 | list->user_token = HandleID(map->type == _DRM_SHM | 262 | list->user_token = HandleID(map->type == _DRM_SHM |
263 | ? (unsigned long)map->handle | 263 | ? (unsigned long)map->handle |
264 | : map->offset, dev); | 264 | : map->offset, dev); |
265 | up(&dev->struct_sem); | 265 | mutex_unlock(&dev->struct_mutex); |
266 | 266 | ||
267 | *maplist = list; | 267 | *maplist = list; |
268 | return 0; | 268 | return 0; |
@@ -392,9 +392,9 @@ int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) | |||
392 | { | 392 | { |
393 | int ret; | 393 | int ret; |
394 | 394 | ||
395 | down(&dev->struct_sem); | 395 | mutex_lock(&dev->struct_mutex); |
396 | ret = drm_rmmap_locked(dev, map); | 396 | ret = drm_rmmap_locked(dev, map); |
397 | up(&dev->struct_sem); | 397 | mutex_unlock(&dev->struct_mutex); |
398 | 398 | ||
399 | return ret; | 399 | return ret; |
400 | } | 400 | } |
@@ -423,7 +423,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
423 | return -EFAULT; | 423 | return -EFAULT; |
424 | } | 424 | } |
425 | 425 | ||
426 | down(&dev->struct_sem); | 426 | mutex_lock(&dev->struct_mutex); |
427 | list_for_each(list, &dev->maplist->head) { | 427 | list_for_each(list, &dev->maplist->head) { |
428 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); | 428 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); |
429 | 429 | ||
@@ -439,7 +439,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
439 | * find anything. | 439 | * find anything. |
440 | */ | 440 | */ |
441 | if (list == (&dev->maplist->head)) { | 441 | if (list == (&dev->maplist->head)) { |
442 | up(&dev->struct_sem); | 442 | mutex_unlock(&dev->struct_mutex); |
443 | return -EINVAL; | 443 | return -EINVAL; |
444 | } | 444 | } |
445 | 445 | ||
@@ -448,13 +448,13 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | |||
448 | 448 | ||
449 | /* Register and framebuffer maps are permanent */ | 449 | /* Register and framebuffer maps are permanent */ |
450 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { | 450 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { |
451 | up(&dev->struct_sem); | 451 | mutex_unlock(&dev->struct_mutex); |
452 | return 0; | 452 | return 0; |
453 | } | 453 | } |
454 | 454 | ||
455 | ret = drm_rmmap_locked(dev, map); | 455 | ret = drm_rmmap_locked(dev, map); |
456 | 456 | ||
457 | up(&dev->struct_sem); | 457 | mutex_unlock(&dev->struct_mutex); |
458 | 458 | ||
459 | return ret; | 459 | return ret; |
460 | } | 460 | } |
@@ -566,16 +566,16 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
566 | atomic_inc(&dev->buf_alloc); | 566 | atomic_inc(&dev->buf_alloc); |
567 | spin_unlock(&dev->count_lock); | 567 | spin_unlock(&dev->count_lock); |
568 | 568 | ||
569 | down(&dev->struct_sem); | 569 | mutex_lock(&dev->struct_mutex); |
570 | entry = &dma->bufs[order]; | 570 | entry = &dma->bufs[order]; |
571 | if (entry->buf_count) { | 571 | if (entry->buf_count) { |
572 | up(&dev->struct_sem); | 572 | mutex_unlock(&dev->struct_mutex); |
573 | atomic_dec(&dev->buf_alloc); | 573 | atomic_dec(&dev->buf_alloc); |
574 | return -ENOMEM; /* May only call once for each order */ | 574 | return -ENOMEM; /* May only call once for each order */ |
575 | } | 575 | } |
576 | 576 | ||
577 | if (count < 0 || count > 4096) { | 577 | if (count < 0 || count > 4096) { |
578 | up(&dev->struct_sem); | 578 | mutex_unlock(&dev->struct_mutex); |
579 | atomic_dec(&dev->buf_alloc); | 579 | atomic_dec(&dev->buf_alloc); |
580 | return -EINVAL; | 580 | return -EINVAL; |
581 | } | 581 | } |
@@ -583,7 +583,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
583 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 583 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
584 | DRM_MEM_BUFS); | 584 | DRM_MEM_BUFS); |
585 | if (!entry->buflist) { | 585 | if (!entry->buflist) { |
586 | up(&dev->struct_sem); | 586 | mutex_unlock(&dev->struct_mutex); |
587 | atomic_dec(&dev->buf_alloc); | 587 | atomic_dec(&dev->buf_alloc); |
588 | return -ENOMEM; | 588 | return -ENOMEM; |
589 | } | 589 | } |
@@ -616,7 +616,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
616 | /* Set count correctly so we free the proper amount. */ | 616 | /* Set count correctly so we free the proper amount. */ |
617 | entry->buf_count = count; | 617 | entry->buf_count = count; |
618 | drm_cleanup_buf_error(dev, entry); | 618 | drm_cleanup_buf_error(dev, entry); |
619 | up(&dev->struct_sem); | 619 | mutex_unlock(&dev->struct_mutex); |
620 | atomic_dec(&dev->buf_alloc); | 620 | atomic_dec(&dev->buf_alloc); |
621 | return -ENOMEM; | 621 | return -ENOMEM; |
622 | } | 622 | } |
@@ -638,7 +638,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
638 | if (!temp_buflist) { | 638 | if (!temp_buflist) { |
639 | /* Free the entry because it isn't valid */ | 639 | /* Free the entry because it isn't valid */ |
640 | drm_cleanup_buf_error(dev, entry); | 640 | drm_cleanup_buf_error(dev, entry); |
641 | up(&dev->struct_sem); | 641 | mutex_unlock(&dev->struct_mutex); |
642 | atomic_dec(&dev->buf_alloc); | 642 | atomic_dec(&dev->buf_alloc); |
643 | return -ENOMEM; | 643 | return -ENOMEM; |
644 | } | 644 | } |
@@ -656,7 +656,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) | |||
656 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | 656 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
657 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | 657 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
658 | 658 | ||
659 | up(&dev->struct_sem); | 659 | mutex_unlock(&dev->struct_mutex); |
660 | 660 | ||
661 | request->count = entry->buf_count; | 661 | request->count = entry->buf_count; |
662 | request->size = size; | 662 | request->size = size; |
@@ -722,16 +722,16 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
722 | atomic_inc(&dev->buf_alloc); | 722 | atomic_inc(&dev->buf_alloc); |
723 | spin_unlock(&dev->count_lock); | 723 | spin_unlock(&dev->count_lock); |
724 | 724 | ||
725 | down(&dev->struct_sem); | 725 | mutex_lock(&dev->struct_mutex); |
726 | entry = &dma->bufs[order]; | 726 | entry = &dma->bufs[order]; |
727 | if (entry->buf_count) { | 727 | if (entry->buf_count) { |
728 | up(&dev->struct_sem); | 728 | mutex_unlock(&dev->struct_mutex); |
729 | atomic_dec(&dev->buf_alloc); | 729 | atomic_dec(&dev->buf_alloc); |
730 | return -ENOMEM; /* May only call once for each order */ | 730 | return -ENOMEM; /* May only call once for each order */ |
731 | } | 731 | } |
732 | 732 | ||
733 | if (count < 0 || count > 4096) { | 733 | if (count < 0 || count > 4096) { |
734 | up(&dev->struct_sem); | 734 | mutex_unlock(&dev->struct_mutex); |
735 | atomic_dec(&dev->buf_alloc); | 735 | atomic_dec(&dev->buf_alloc); |
736 | return -EINVAL; | 736 | return -EINVAL; |
737 | } | 737 | } |
@@ -739,7 +739,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
739 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 739 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
740 | DRM_MEM_BUFS); | 740 | DRM_MEM_BUFS); |
741 | if (!entry->buflist) { | 741 | if (!entry->buflist) { |
742 | up(&dev->struct_sem); | 742 | mutex_unlock(&dev->struct_mutex); |
743 | atomic_dec(&dev->buf_alloc); | 743 | atomic_dec(&dev->buf_alloc); |
744 | return -ENOMEM; | 744 | return -ENOMEM; |
745 | } | 745 | } |
@@ -750,7 +750,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
750 | if (!entry->seglist) { | 750 | if (!entry->seglist) { |
751 | drm_free(entry->buflist, | 751 | drm_free(entry->buflist, |
752 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | 752 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); |
753 | up(&dev->struct_sem); | 753 | mutex_unlock(&dev->struct_mutex); |
754 | atomic_dec(&dev->buf_alloc); | 754 | atomic_dec(&dev->buf_alloc); |
755 | return -ENOMEM; | 755 | return -ENOMEM; |
756 | } | 756 | } |
@@ -766,7 +766,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
766 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); | 766 | count * sizeof(*entry->buflist), DRM_MEM_BUFS); |
767 | drm_free(entry->seglist, | 767 | drm_free(entry->seglist, |
768 | count * sizeof(*entry->seglist), DRM_MEM_SEGS); | 768 | count * sizeof(*entry->seglist), DRM_MEM_SEGS); |
769 | up(&dev->struct_sem); | 769 | mutex_unlock(&dev->struct_mutex); |
770 | atomic_dec(&dev->buf_alloc); | 770 | atomic_dec(&dev->buf_alloc); |
771 | return -ENOMEM; | 771 | return -ENOMEM; |
772 | } | 772 | } |
@@ -790,7 +790,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
790 | drm_free(temp_pagelist, | 790 | drm_free(temp_pagelist, |
791 | (dma->page_count + (count << page_order)) | 791 | (dma->page_count + (count << page_order)) |
792 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | 792 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); |
793 | up(&dev->struct_sem); | 793 | mutex_unlock(&dev->struct_mutex); |
794 | atomic_dec(&dev->buf_alloc); | 794 | atomic_dec(&dev->buf_alloc); |
795 | return -ENOMEM; | 795 | return -ENOMEM; |
796 | } | 796 | } |
@@ -831,7 +831,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
831 | (count << page_order)) | 831 | (count << page_order)) |
832 | * sizeof(*dma->pagelist), | 832 | * sizeof(*dma->pagelist), |
833 | DRM_MEM_PAGES); | 833 | DRM_MEM_PAGES); |
834 | up(&dev->struct_sem); | 834 | mutex_unlock(&dev->struct_mutex); |
835 | atomic_dec(&dev->buf_alloc); | 835 | atomic_dec(&dev->buf_alloc); |
836 | return -ENOMEM; | 836 | return -ENOMEM; |
837 | } | 837 | } |
@@ -853,7 +853,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
853 | drm_free(temp_pagelist, | 853 | drm_free(temp_pagelist, |
854 | (dma->page_count + (count << page_order)) | 854 | (dma->page_count + (count << page_order)) |
855 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); | 855 | * sizeof(*dma->pagelist), DRM_MEM_PAGES); |
856 | up(&dev->struct_sem); | 856 | mutex_unlock(&dev->struct_mutex); |
857 | atomic_dec(&dev->buf_alloc); | 857 | atomic_dec(&dev->buf_alloc); |
858 | return -ENOMEM; | 858 | return -ENOMEM; |
859 | } | 859 | } |
@@ -878,7 +878,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) | |||
878 | dma->page_count += entry->seg_count << page_order; | 878 | dma->page_count += entry->seg_count << page_order; |
879 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); | 879 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); |
880 | 880 | ||
881 | up(&dev->struct_sem); | 881 | mutex_unlock(&dev->struct_mutex); |
882 | 882 | ||
883 | request->count = entry->buf_count; | 883 | request->count = entry->buf_count; |
884 | request->size = size; | 884 | request->size = size; |
@@ -948,16 +948,16 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
948 | atomic_inc(&dev->buf_alloc); | 948 | atomic_inc(&dev->buf_alloc); |
949 | spin_unlock(&dev->count_lock); | 949 | spin_unlock(&dev->count_lock); |
950 | 950 | ||
951 | down(&dev->struct_sem); | 951 | mutex_lock(&dev->struct_mutex); |
952 | entry = &dma->bufs[order]; | 952 | entry = &dma->bufs[order]; |
953 | if (entry->buf_count) { | 953 | if (entry->buf_count) { |
954 | up(&dev->struct_sem); | 954 | mutex_unlock(&dev->struct_mutex); |
955 | atomic_dec(&dev->buf_alloc); | 955 | atomic_dec(&dev->buf_alloc); |
956 | return -ENOMEM; /* May only call once for each order */ | 956 | return -ENOMEM; /* May only call once for each order */ |
957 | } | 957 | } |
958 | 958 | ||
959 | if (count < 0 || count > 4096) { | 959 | if (count < 0 || count > 4096) { |
960 | up(&dev->struct_sem); | 960 | mutex_unlock(&dev->struct_mutex); |
961 | atomic_dec(&dev->buf_alloc); | 961 | atomic_dec(&dev->buf_alloc); |
962 | return -EINVAL; | 962 | return -EINVAL; |
963 | } | 963 | } |
@@ -965,7 +965,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
965 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 965 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
966 | DRM_MEM_BUFS); | 966 | DRM_MEM_BUFS); |
967 | if (!entry->buflist) { | 967 | if (!entry->buflist) { |
968 | up(&dev->struct_sem); | 968 | mutex_unlock(&dev->struct_mutex); |
969 | atomic_dec(&dev->buf_alloc); | 969 | atomic_dec(&dev->buf_alloc); |
970 | return -ENOMEM; | 970 | return -ENOMEM; |
971 | } | 971 | } |
@@ -999,7 +999,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
999 | /* Set count correctly so we free the proper amount. */ | 999 | /* Set count correctly so we free the proper amount. */ |
1000 | entry->buf_count = count; | 1000 | entry->buf_count = count; |
1001 | drm_cleanup_buf_error(dev, entry); | 1001 | drm_cleanup_buf_error(dev, entry); |
1002 | up(&dev->struct_sem); | 1002 | mutex_unlock(&dev->struct_mutex); |
1003 | atomic_dec(&dev->buf_alloc); | 1003 | atomic_dec(&dev->buf_alloc); |
1004 | return -ENOMEM; | 1004 | return -ENOMEM; |
1005 | } | 1005 | } |
@@ -1022,7 +1022,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
1022 | if (!temp_buflist) { | 1022 | if (!temp_buflist) { |
1023 | /* Free the entry because it isn't valid */ | 1023 | /* Free the entry because it isn't valid */ |
1024 | drm_cleanup_buf_error(dev, entry); | 1024 | drm_cleanup_buf_error(dev, entry); |
1025 | up(&dev->struct_sem); | 1025 | mutex_unlock(&dev->struct_mutex); |
1026 | atomic_dec(&dev->buf_alloc); | 1026 | atomic_dec(&dev->buf_alloc); |
1027 | return -ENOMEM; | 1027 | return -ENOMEM; |
1028 | } | 1028 | } |
@@ -1040,7 +1040,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) | |||
1040 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | 1040 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
1041 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | 1041 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
1042 | 1042 | ||
1043 | up(&dev->struct_sem); | 1043 | mutex_unlock(&dev->struct_mutex); |
1044 | 1044 | ||
1045 | request->count = entry->buf_count; | 1045 | request->count = entry->buf_count; |
1046 | request->size = size; | 1046 | request->size = size; |
@@ -1110,16 +1110,16 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1110 | atomic_inc(&dev->buf_alloc); | 1110 | atomic_inc(&dev->buf_alloc); |
1111 | spin_unlock(&dev->count_lock); | 1111 | spin_unlock(&dev->count_lock); |
1112 | 1112 | ||
1113 | down(&dev->struct_sem); | 1113 | mutex_lock(&dev->struct_mutex); |
1114 | entry = &dma->bufs[order]; | 1114 | entry = &dma->bufs[order]; |
1115 | if (entry->buf_count) { | 1115 | if (entry->buf_count) { |
1116 | up(&dev->struct_sem); | 1116 | mutex_unlock(&dev->struct_mutex); |
1117 | atomic_dec(&dev->buf_alloc); | 1117 | atomic_dec(&dev->buf_alloc); |
1118 | return -ENOMEM; /* May only call once for each order */ | 1118 | return -ENOMEM; /* May only call once for each order */ |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | if (count < 0 || count > 4096) { | 1121 | if (count < 0 || count > 4096) { |
1122 | up(&dev->struct_sem); | 1122 | mutex_unlock(&dev->struct_mutex); |
1123 | atomic_dec(&dev->buf_alloc); | 1123 | atomic_dec(&dev->buf_alloc); |
1124 | return -EINVAL; | 1124 | return -EINVAL; |
1125 | } | 1125 | } |
@@ -1127,7 +1127,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1127 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | 1127 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), |
1128 | DRM_MEM_BUFS); | 1128 | DRM_MEM_BUFS); |
1129 | if (!entry->buflist) { | 1129 | if (!entry->buflist) { |
1130 | up(&dev->struct_sem); | 1130 | mutex_unlock(&dev->struct_mutex); |
1131 | atomic_dec(&dev->buf_alloc); | 1131 | atomic_dec(&dev->buf_alloc); |
1132 | return -ENOMEM; | 1132 | return -ENOMEM; |
1133 | } | 1133 | } |
@@ -1160,7 +1160,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1160 | /* Set count correctly so we free the proper amount. */ | 1160 | /* Set count correctly so we free the proper amount. */ |
1161 | entry->buf_count = count; | 1161 | entry->buf_count = count; |
1162 | drm_cleanup_buf_error(dev, entry); | 1162 | drm_cleanup_buf_error(dev, entry); |
1163 | up(&dev->struct_sem); | 1163 | mutex_unlock(&dev->struct_mutex); |
1164 | atomic_dec(&dev->buf_alloc); | 1164 | atomic_dec(&dev->buf_alloc); |
1165 | return -ENOMEM; | 1165 | return -ENOMEM; |
1166 | } | 1166 | } |
@@ -1182,7 +1182,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1182 | if (!temp_buflist) { | 1182 | if (!temp_buflist) { |
1183 | /* Free the entry because it isn't valid */ | 1183 | /* Free the entry because it isn't valid */ |
1184 | drm_cleanup_buf_error(dev, entry); | 1184 | drm_cleanup_buf_error(dev, entry); |
1185 | up(&dev->struct_sem); | 1185 | mutex_unlock(&dev->struct_mutex); |
1186 | atomic_dec(&dev->buf_alloc); | 1186 | atomic_dec(&dev->buf_alloc); |
1187 | return -ENOMEM; | 1187 | return -ENOMEM; |
1188 | } | 1188 | } |
@@ -1200,7 +1200,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) | |||
1200 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | 1200 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
1201 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | 1201 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
1202 | 1202 | ||
1203 | up(&dev->struct_sem); | 1203 | mutex_unlock(&dev->struct_mutex); |
1204 | 1204 | ||
1205 | request->count = entry->buf_count; | 1205 | request->count = entry->buf_count; |
1206 | request->size = size; | 1206 | request->size = size; |
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c index f84254526949..83094c73da67 100644 --- a/drivers/char/drm/drm_context.c +++ b/drivers/char/drm/drm_context.c | |||
@@ -53,7 +53,7 @@ | |||
53 | * \param ctx_handle context handle. | 53 | * \param ctx_handle context handle. |
54 | * | 54 | * |
55 | * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry | 55 | * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry |
56 | * in drm_device::context_sareas, while holding the drm_device::struct_sem | 56 | * in drm_device::context_sareas, while holding the drm_device::struct_mutex |
57 | * lock. | 57 | * lock. |
58 | */ | 58 | */ |
59 | void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) | 59 | void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) |
@@ -64,10 +64,10 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) | |||
64 | goto failed; | 64 | goto failed; |
65 | 65 | ||
66 | if (ctx_handle < DRM_MAX_CTXBITMAP) { | 66 | if (ctx_handle < DRM_MAX_CTXBITMAP) { |
67 | down(&dev->struct_sem); | 67 | mutex_lock(&dev->struct_mutex); |
68 | clear_bit(ctx_handle, dev->ctx_bitmap); | 68 | clear_bit(ctx_handle, dev->ctx_bitmap); |
69 | dev->context_sareas[ctx_handle] = NULL; | 69 | dev->context_sareas[ctx_handle] = NULL; |
70 | up(&dev->struct_sem); | 70 | mutex_unlock(&dev->struct_mutex); |
71 | return; | 71 | return; |
72 | } | 72 | } |
73 | failed: | 73 | failed: |
@@ -83,7 +83,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) | |||
83 | * | 83 | * |
84 | * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates | 84 | * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates |
85 | * drm_device::context_sareas to accommodate the new entry while holding the | 85 | * drm_device::context_sareas to accommodate the new entry while holding the |
86 | * drm_device::struct_sem lock. | 86 | * drm_device::struct_mutex lock. |
87 | */ | 87 | */ |
88 | static int drm_ctxbitmap_next(drm_device_t * dev) | 88 | static int drm_ctxbitmap_next(drm_device_t * dev) |
89 | { | 89 | { |
@@ -92,7 +92,7 @@ static int drm_ctxbitmap_next(drm_device_t * dev) | |||
92 | if (!dev->ctx_bitmap) | 92 | if (!dev->ctx_bitmap) |
93 | return -1; | 93 | return -1; |
94 | 94 | ||
95 | down(&dev->struct_sem); | 95 | mutex_lock(&dev->struct_mutex); |
96 | bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP); | 96 | bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP); |
97 | if (bit < DRM_MAX_CTXBITMAP) { | 97 | if (bit < DRM_MAX_CTXBITMAP) { |
98 | set_bit(bit, dev->ctx_bitmap); | 98 | set_bit(bit, dev->ctx_bitmap); |
@@ -113,7 +113,7 @@ static int drm_ctxbitmap_next(drm_device_t * dev) | |||
113 | DRM_MEM_MAPS); | 113 | DRM_MEM_MAPS); |
114 | if (!ctx_sareas) { | 114 | if (!ctx_sareas) { |
115 | clear_bit(bit, dev->ctx_bitmap); | 115 | clear_bit(bit, dev->ctx_bitmap); |
116 | up(&dev->struct_sem); | 116 | mutex_unlock(&dev->struct_mutex); |
117 | return -1; | 117 | return -1; |
118 | } | 118 | } |
119 | dev->context_sareas = ctx_sareas; | 119 | dev->context_sareas = ctx_sareas; |
@@ -126,16 +126,16 @@ static int drm_ctxbitmap_next(drm_device_t * dev) | |||
126 | DRM_MEM_MAPS); | 126 | DRM_MEM_MAPS); |
127 | if (!dev->context_sareas) { | 127 | if (!dev->context_sareas) { |
128 | clear_bit(bit, dev->ctx_bitmap); | 128 | clear_bit(bit, dev->ctx_bitmap); |
129 | up(&dev->struct_sem); | 129 | mutex_unlock(&dev->struct_mutex); |
130 | return -1; | 130 | return -1; |
131 | } | 131 | } |
132 | dev->context_sareas[bit] = NULL; | 132 | dev->context_sareas[bit] = NULL; |
133 | } | 133 | } |
134 | } | 134 | } |
135 | up(&dev->struct_sem); | 135 | mutex_unlock(&dev->struct_mutex); |
136 | return bit; | 136 | return bit; |
137 | } | 137 | } |
138 | up(&dev->struct_sem); | 138 | mutex_unlock(&dev->struct_mutex); |
139 | return -1; | 139 | return -1; |
140 | } | 140 | } |
141 | 141 | ||
@@ -145,24 +145,24 @@ static int drm_ctxbitmap_next(drm_device_t * dev) | |||
145 | * \param dev DRM device. | 145 | * \param dev DRM device. |
146 | * | 146 | * |
147 | * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding | 147 | * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding |
148 | * the drm_device::struct_sem lock. | 148 | * the drm_device::struct_mutex lock. |
149 | */ | 149 | */ |
150 | int drm_ctxbitmap_init(drm_device_t * dev) | 150 | int drm_ctxbitmap_init(drm_device_t * dev) |
151 | { | 151 | { |
152 | int i; | 152 | int i; |
153 | int temp; | 153 | int temp; |
154 | 154 | ||
155 | down(&dev->struct_sem); | 155 | mutex_lock(&dev->struct_mutex); |
156 | dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE, | 156 | dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE, |
157 | DRM_MEM_CTXBITMAP); | 157 | DRM_MEM_CTXBITMAP); |
158 | if (dev->ctx_bitmap == NULL) { | 158 | if (dev->ctx_bitmap == NULL) { |
159 | up(&dev->struct_sem); | 159 | mutex_unlock(&dev->struct_mutex); |
160 | return -ENOMEM; | 160 | return -ENOMEM; |
161 | } | 161 | } |
162 | memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE); | 162 | memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE); |
163 | dev->context_sareas = NULL; | 163 | dev->context_sareas = NULL; |
164 | dev->max_context = -1; | 164 | dev->max_context = -1; |
165 | up(&dev->struct_sem); | 165 | mutex_unlock(&dev->struct_mutex); |
166 | 166 | ||
167 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { | 167 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { |
168 | temp = drm_ctxbitmap_next(dev); | 168 | temp = drm_ctxbitmap_next(dev); |
@@ -178,17 +178,17 @@ int drm_ctxbitmap_init(drm_device_t * dev) | |||
178 | * \param dev DRM device. | 178 | * \param dev DRM device. |
179 | * | 179 | * |
180 | * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding | 180 | * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding |
181 | * the drm_device::struct_sem lock. | 181 | * the drm_device::struct_mutex lock. |
182 | */ | 182 | */ |
183 | void drm_ctxbitmap_cleanup(drm_device_t * dev) | 183 | void drm_ctxbitmap_cleanup(drm_device_t * dev) |
184 | { | 184 | { |
185 | down(&dev->struct_sem); | 185 | mutex_lock(&dev->struct_mutex); |
186 | if (dev->context_sareas) | 186 | if (dev->context_sareas) |
187 | drm_free(dev->context_sareas, | 187 | drm_free(dev->context_sareas, |
188 | sizeof(*dev->context_sareas) * | 188 | sizeof(*dev->context_sareas) * |
189 | dev->max_context, DRM_MEM_MAPS); | 189 | dev->max_context, DRM_MEM_MAPS); |
190 | drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP); | 190 | drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP); |
191 | up(&dev->struct_sem); | 191 | mutex_unlock(&dev->struct_mutex); |
192 | } | 192 | } |
193 | 193 | ||
194 | /*@}*/ | 194 | /*@}*/ |
@@ -222,15 +222,15 @@ int drm_getsareactx(struct inode *inode, struct file *filp, | |||
222 | if (copy_from_user(&request, argp, sizeof(request))) | 222 | if (copy_from_user(&request, argp, sizeof(request))) |
223 | return -EFAULT; | 223 | return -EFAULT; |
224 | 224 | ||
225 | down(&dev->struct_sem); | 225 | mutex_lock(&dev->struct_mutex); |
226 | if (dev->max_context < 0 | 226 | if (dev->max_context < 0 |
227 | || request.ctx_id >= (unsigned)dev->max_context) { | 227 | || request.ctx_id >= (unsigned)dev->max_context) { |
228 | up(&dev->struct_sem); | 228 | mutex_unlock(&dev->struct_mutex); |
229 | return -EINVAL; | 229 | return -EINVAL; |
230 | } | 230 | } |
231 | 231 | ||
232 | map = dev->context_sareas[request.ctx_id]; | 232 | map = dev->context_sareas[request.ctx_id]; |
233 | up(&dev->struct_sem); | 233 | mutex_unlock(&dev->struct_mutex); |
234 | 234 | ||
235 | request.handle = NULL; | 235 | request.handle = NULL; |
236 | list_for_each_entry(_entry, &dev->maplist->head, head) { | 236 | list_for_each_entry(_entry, &dev->maplist->head, head) { |
@@ -274,7 +274,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, | |||
274 | (drm_ctx_priv_map_t __user *) arg, sizeof(request))) | 274 | (drm_ctx_priv_map_t __user *) arg, sizeof(request))) |
275 | return -EFAULT; | 275 | return -EFAULT; |
276 | 276 | ||
277 | down(&dev->struct_sem); | 277 | mutex_lock(&dev->struct_mutex); |
278 | list_for_each(list, &dev->maplist->head) { | 278 | list_for_each(list, &dev->maplist->head) { |
279 | r_list = list_entry(list, drm_map_list_t, head); | 279 | r_list = list_entry(list, drm_map_list_t, head); |
280 | if (r_list->map | 280 | if (r_list->map |
@@ -282,7 +282,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, | |||
282 | goto found; | 282 | goto found; |
283 | } | 283 | } |
284 | bad: | 284 | bad: |
285 | up(&dev->struct_sem); | 285 | mutex_unlock(&dev->struct_mutex); |
286 | return -EINVAL; | 286 | return -EINVAL; |
287 | 287 | ||
288 | found: | 288 | found: |
@@ -294,7 +294,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, | |||
294 | if (request.ctx_id >= (unsigned)dev->max_context) | 294 | if (request.ctx_id >= (unsigned)dev->max_context) |
295 | goto bad; | 295 | goto bad; |
296 | dev->context_sareas[request.ctx_id] = map; | 296 | dev->context_sareas[request.ctx_id] = map; |
297 | up(&dev->struct_sem); | 297 | mutex_unlock(&dev->struct_mutex); |
298 | return 0; | 298 | return 0; |
299 | } | 299 | } |
300 | 300 | ||
@@ -448,10 +448,10 @@ int drm_addctx(struct inode *inode, struct file *filp, | |||
448 | ctx_entry->handle = ctx.handle; | 448 | ctx_entry->handle = ctx.handle; |
449 | ctx_entry->tag = priv; | 449 | ctx_entry->tag = priv; |
450 | 450 | ||
451 | down(&dev->ctxlist_sem); | 451 | mutex_lock(&dev->ctxlist_mutex); |
452 | list_add(&ctx_entry->head, &dev->ctxlist->head); | 452 | list_add(&ctx_entry->head, &dev->ctxlist->head); |
453 | ++dev->ctx_count; | 453 | ++dev->ctx_count; |
454 | up(&dev->ctxlist_sem); | 454 | mutex_unlock(&dev->ctxlist_mutex); |
455 | 455 | ||
456 | if (copy_to_user(argp, &ctx, sizeof(ctx))) | 456 | if (copy_to_user(argp, &ctx, sizeof(ctx))) |
457 | return -EFAULT; | 457 | return -EFAULT; |
@@ -574,7 +574,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, | |||
574 | drm_ctxbitmap_free(dev, ctx.handle); | 574 | drm_ctxbitmap_free(dev, ctx.handle); |
575 | } | 575 | } |
576 | 576 | ||
577 | down(&dev->ctxlist_sem); | 577 | mutex_lock(&dev->ctxlist_mutex); |
578 | if (!list_empty(&dev->ctxlist->head)) { | 578 | if (!list_empty(&dev->ctxlist->head)) { |
579 | drm_ctx_list_t *pos, *n; | 579 | drm_ctx_list_t *pos, *n; |
580 | 580 | ||
@@ -586,7 +586,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, | |||
586 | } | 586 | } |
587 | } | 587 | } |
588 | } | 588 | } |
589 | up(&dev->ctxlist_sem); | 589 | mutex_unlock(&dev->ctxlist_mutex); |
590 | 590 | ||
591 | return 0; | 591 | return 0; |
592 | } | 592 | } |
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c index c4fa5a29582b..dc6bbe8a18dc 100644 --- a/drivers/char/drm/drm_drv.c +++ b/drivers/char/drm/drm_drv.c | |||
@@ -151,7 +151,7 @@ int drm_lastclose(drm_device_t * dev) | |||
151 | if (dev->irq_enabled) | 151 | if (dev->irq_enabled) |
152 | drm_irq_uninstall(dev); | 152 | drm_irq_uninstall(dev); |
153 | 153 | ||
154 | down(&dev->struct_sem); | 154 | mutex_lock(&dev->struct_mutex); |
155 | del_timer(&dev->timer); | 155 | del_timer(&dev->timer); |
156 | 156 | ||
157 | /* Clear pid list */ | 157 | /* Clear pid list */ |
@@ -231,7 +231,7 @@ int drm_lastclose(drm_device_t * dev) | |||
231 | dev->lock.filp = NULL; | 231 | dev->lock.filp = NULL; |
232 | wake_up_interruptible(&dev->lock.lock_queue); | 232 | wake_up_interruptible(&dev->lock.lock_queue); |
233 | } | 233 | } |
234 | up(&dev->struct_sem); | 234 | mutex_unlock(&dev->struct_mutex); |
235 | 235 | ||
236 | DRM_DEBUG("lastclose completed\n"); | 236 | DRM_DEBUG("lastclose completed\n"); |
237 | return 0; | 237 | return 0; |
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 403f44a1bf01..641f7633878c 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c | |||
@@ -262,7 +262,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
262 | goto out_free; | 262 | goto out_free; |
263 | } | 263 | } |
264 | 264 | ||
265 | down(&dev->struct_sem); | 265 | mutex_lock(&dev->struct_mutex); |
266 | if (!dev->file_last) { | 266 | if (!dev->file_last) { |
267 | priv->next = NULL; | 267 | priv->next = NULL; |
268 | priv->prev = NULL; | 268 | priv->prev = NULL; |
@@ -276,7 +276,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
276 | dev->file_last->next = priv; | 276 | dev->file_last->next = priv; |
277 | dev->file_last = priv; | 277 | dev->file_last = priv; |
278 | } | 278 | } |
279 | up(&dev->struct_sem); | 279 | mutex_unlock(&dev->struct_mutex); |
280 | 280 | ||
281 | #ifdef __alpha__ | 281 | #ifdef __alpha__ |
282 | /* | 282 | /* |
@@ -413,7 +413,7 @@ int drm_release(struct inode *inode, struct file *filp) | |||
413 | 413 | ||
414 | drm_fasync(-1, filp, 0); | 414 | drm_fasync(-1, filp, 0); |
415 | 415 | ||
416 | down(&dev->ctxlist_sem); | 416 | mutex_lock(&dev->ctxlist_mutex); |
417 | if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) { | 417 | if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) { |
418 | drm_ctx_list_t *pos, *n; | 418 | drm_ctx_list_t *pos, *n; |
419 | 419 | ||
@@ -432,9 +432,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
432 | } | 432 | } |
433 | } | 433 | } |
434 | } | 434 | } |
435 | up(&dev->ctxlist_sem); | 435 | mutex_unlock(&dev->ctxlist_mutex); |
436 | 436 | ||
437 | down(&dev->struct_sem); | 437 | mutex_lock(&dev->struct_mutex); |
438 | if (priv->remove_auth_on_close == 1) { | 438 | if (priv->remove_auth_on_close == 1) { |
439 | drm_file_t *temp = dev->file_first; | 439 | drm_file_t *temp = dev->file_first; |
440 | while (temp) { | 440 | while (temp) { |
@@ -452,7 +452,7 @@ int drm_release(struct inode *inode, struct file *filp) | |||
452 | } else { | 452 | } else { |
453 | dev->file_last = priv->prev; | 453 | dev->file_last = priv->prev; |
454 | } | 454 | } |
455 | up(&dev->struct_sem); | 455 | mutex_unlock(&dev->struct_mutex); |
456 | 456 | ||
457 | if (dev->driver->postclose) | 457 | if (dev->driver->postclose) |
458 | dev->driver->postclose(dev, priv); | 458 | dev->driver->postclose(dev, priv); |
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c index bcd4e604d3ec..555f323b8a32 100644 --- a/drivers/char/drm/drm_ioctl.c +++ b/drivers/char/drm/drm_ioctl.c | |||
@@ -194,9 +194,9 @@ int drm_getmap(struct inode *inode, struct file *filp, | |||
194 | return -EFAULT; | 194 | return -EFAULT; |
195 | idx = map.offset; | 195 | idx = map.offset; |
196 | 196 | ||
197 | down(&dev->struct_sem); | 197 | mutex_lock(&dev->struct_mutex); |
198 | if (idx < 0) { | 198 | if (idx < 0) { |
199 | up(&dev->struct_sem); | 199 | mutex_unlock(&dev->struct_mutex); |
200 | return -EINVAL; | 200 | return -EINVAL; |
201 | } | 201 | } |
202 | 202 | ||
@@ -209,7 +209,7 @@ int drm_getmap(struct inode *inode, struct file *filp, | |||
209 | i++; | 209 | i++; |
210 | } | 210 | } |
211 | if (!r_list || !r_list->map) { | 211 | if (!r_list || !r_list->map) { |
212 | up(&dev->struct_sem); | 212 | mutex_unlock(&dev->struct_mutex); |
213 | return -EINVAL; | 213 | return -EINVAL; |
214 | } | 214 | } |
215 | 215 | ||
@@ -219,7 +219,7 @@ int drm_getmap(struct inode *inode, struct file *filp, | |||
219 | map.flags = r_list->map->flags; | 219 | map.flags = r_list->map->flags; |
220 | map.handle = (void *)(unsigned long)r_list->user_token; | 220 | map.handle = (void *)(unsigned long)r_list->user_token; |
221 | map.mtrr = r_list->map->mtrr; | 221 | map.mtrr = r_list->map->mtrr; |
222 | up(&dev->struct_sem); | 222 | mutex_unlock(&dev->struct_mutex); |
223 | 223 | ||
224 | if (copy_to_user(argp, &map, sizeof(map))) | 224 | if (copy_to_user(argp, &map, sizeof(map))) |
225 | return -EFAULT; | 225 | return -EFAULT; |
@@ -253,11 +253,11 @@ int drm_getclient(struct inode *inode, struct file *filp, | |||
253 | if (copy_from_user(&client, argp, sizeof(client))) | 253 | if (copy_from_user(&client, argp, sizeof(client))) |
254 | return -EFAULT; | 254 | return -EFAULT; |
255 | idx = client.idx; | 255 | idx = client.idx; |
256 | down(&dev->struct_sem); | 256 | mutex_lock(&dev->struct_mutex); |
257 | for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ; | 257 | for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ; |
258 | 258 | ||
259 | if (!pt) { | 259 | if (!pt) { |
260 | up(&dev->struct_sem); | 260 | mutex_unlock(&dev->struct_mutex); |
261 | return -EINVAL; | 261 | return -EINVAL; |
262 | } | 262 | } |
263 | client.auth = pt->authenticated; | 263 | client.auth = pt->authenticated; |
@@ -265,7 +265,7 @@ int drm_getclient(struct inode *inode, struct file *filp, | |||
265 | client.uid = pt->uid; | 265 | client.uid = pt->uid; |
266 | client.magic = pt->magic; | 266 | client.magic = pt->magic; |
267 | client.iocs = pt->ioctl_count; | 267 | client.iocs = pt->ioctl_count; |
268 | up(&dev->struct_sem); | 268 | mutex_unlock(&dev->struct_mutex); |
269 | 269 | ||
270 | if (copy_to_user(argp, &client, sizeof(client))) | 270 | if (copy_to_user(argp, &client, sizeof(client))) |
271 | return -EFAULT; | 271 | return -EFAULT; |
@@ -292,7 +292,7 @@ int drm_getstats(struct inode *inode, struct file *filp, | |||
292 | 292 | ||
293 | memset(&stats, 0, sizeof(stats)); | 293 | memset(&stats, 0, sizeof(stats)); |
294 | 294 | ||
295 | down(&dev->struct_sem); | 295 | mutex_lock(&dev->struct_mutex); |
296 | 296 | ||
297 | for (i = 0; i < dev->counters; i++) { | 297 | for (i = 0; i < dev->counters; i++) { |
298 | if (dev->types[i] == _DRM_STAT_LOCK) | 298 | if (dev->types[i] == _DRM_STAT_LOCK) |
@@ -305,7 +305,7 @@ int drm_getstats(struct inode *inode, struct file *filp, | |||
305 | 305 | ||
306 | stats.count = dev->counters; | 306 | stats.count = dev->counters; |
307 | 307 | ||
308 | up(&dev->struct_sem); | 308 | mutex_unlock(&dev->struct_mutex); |
309 | 309 | ||
310 | if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) | 310 | if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) |
311 | return -EFAULT; | 311 | return -EFAULT; |
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c index b0d4b236e837..611a1173091d 100644 --- a/drivers/char/drm/drm_irq.c +++ b/drivers/char/drm/drm_irq.c | |||
@@ -98,20 +98,20 @@ static int drm_irq_install(drm_device_t * dev) | |||
98 | if (dev->irq == 0) | 98 | if (dev->irq == 0) |
99 | return -EINVAL; | 99 | return -EINVAL; |
100 | 100 | ||
101 | down(&dev->struct_sem); | 101 | mutex_lock(&dev->struct_mutex); |
102 | 102 | ||
103 | /* Driver must have been initialized */ | 103 | /* Driver must have been initialized */ |
104 | if (!dev->dev_private) { | 104 | if (!dev->dev_private) { |
105 | up(&dev->struct_sem); | 105 | mutex_unlock(&dev->struct_mutex); |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | } | 107 | } |
108 | 108 | ||
109 | if (dev->irq_enabled) { | 109 | if (dev->irq_enabled) { |
110 | up(&dev->struct_sem); | 110 | mutex_unlock(&dev->struct_mutex); |
111 | return -EBUSY; | 111 | return -EBUSY; |
112 | } | 112 | } |
113 | dev->irq_enabled = 1; | 113 | dev->irq_enabled = 1; |
114 | up(&dev->struct_sem); | 114 | mutex_unlock(&dev->struct_mutex); |
115 | 115 | ||
116 | DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); | 116 | DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); |
117 | 117 | ||
@@ -135,9 +135,9 @@ static int drm_irq_install(drm_device_t * dev) | |||
135 | ret = request_irq(dev->irq, dev->driver->irq_handler, | 135 | ret = request_irq(dev->irq, dev->driver->irq_handler, |
136 | sh_flags, dev->devname, dev); | 136 | sh_flags, dev->devname, dev); |
137 | if (ret < 0) { | 137 | if (ret < 0) { |
138 | down(&dev->struct_sem); | 138 | mutex_lock(&dev->struct_mutex); |
139 | dev->irq_enabled = 0; | 139 | dev->irq_enabled = 0; |
140 | up(&dev->struct_sem); | 140 | mutex_unlock(&dev->struct_mutex); |
141 | return ret; | 141 | return ret; |
142 | } | 142 | } |
143 | 143 | ||
@@ -161,10 +161,10 @@ int drm_irq_uninstall(drm_device_t * dev) | |||
161 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | 161 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) |
162 | return -EINVAL; | 162 | return -EINVAL; |
163 | 163 | ||
164 | down(&dev->struct_sem); | 164 | mutex_lock(&dev->struct_mutex); |
165 | irq_enabled = dev->irq_enabled; | 165 | irq_enabled = dev->irq_enabled; |
166 | dev->irq_enabled = 0; | 166 | dev->irq_enabled = 0; |
167 | up(&dev->struct_sem); | 167 | mutex_unlock(&dev->struct_mutex); |
168 | 168 | ||
169 | if (!irq_enabled) | 169 | if (!irq_enabled) |
170 | return -EINVAL; | 170 | return -EINVAL; |
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c index 6f943e3309ef..362a270af0f1 100644 --- a/drivers/char/drm/drm_proc.c +++ b/drivers/char/drm/drm_proc.c | |||
@@ -258,7 +258,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, | |||
258 | } | 258 | } |
259 | 259 | ||
260 | /** | 260 | /** |
261 | * Simply calls _vm_info() while holding the drm_device::struct_sem lock. | 261 | * Simply calls _vm_info() while holding the drm_device::struct_mutex lock. |
262 | */ | 262 | */ |
263 | static int drm_vm_info(char *buf, char **start, off_t offset, int request, | 263 | static int drm_vm_info(char *buf, char **start, off_t offset, int request, |
264 | int *eof, void *data) | 264 | int *eof, void *data) |
@@ -266,9 +266,9 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request, | |||
266 | drm_device_t *dev = (drm_device_t *) data; | 266 | drm_device_t *dev = (drm_device_t *) data; |
267 | int ret; | 267 | int ret; |
268 | 268 | ||
269 | down(&dev->struct_sem); | 269 | mutex_lock(&dev->struct_mutex); |
270 | ret = drm__vm_info(buf, start, offset, request, eof, data); | 270 | ret = drm__vm_info(buf, start, offset, request, eof, data); |
271 | up(&dev->struct_sem); | 271 | mutex_unlock(&dev->struct_mutex); |
272 | return ret; | 272 | return ret; |
273 | } | 273 | } |
274 | 274 | ||
@@ -331,7 +331,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset, | |||
331 | } | 331 | } |
332 | 332 | ||
333 | /** | 333 | /** |
334 | * Simply calls _queues_info() while holding the drm_device::struct_sem lock. | 334 | * Simply calls _queues_info() while holding the drm_device::struct_mutex lock. |
335 | */ | 335 | */ |
336 | static int drm_queues_info(char *buf, char **start, off_t offset, int request, | 336 | static int drm_queues_info(char *buf, char **start, off_t offset, int request, |
337 | int *eof, void *data) | 337 | int *eof, void *data) |
@@ -339,9 +339,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request, | |||
339 | drm_device_t *dev = (drm_device_t *) data; | 339 | drm_device_t *dev = (drm_device_t *) data; |
340 | int ret; | 340 | int ret; |
341 | 341 | ||
342 | down(&dev->struct_sem); | 342 | mutex_lock(&dev->struct_mutex); |
343 | ret = drm__queues_info(buf, start, offset, request, eof, data); | 343 | ret = drm__queues_info(buf, start, offset, request, eof, data); |
344 | up(&dev->struct_sem); | 344 | mutex_unlock(&dev->struct_mutex); |
345 | return ret; | 345 | return ret; |
346 | } | 346 | } |
347 | 347 | ||
@@ -403,7 +403,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request, | |||
403 | } | 403 | } |
404 | 404 | ||
405 | /** | 405 | /** |
406 | * Simply calls _bufs_info() while holding the drm_device::struct_sem lock. | 406 | * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock. |
407 | */ | 407 | */ |
408 | static int drm_bufs_info(char *buf, char **start, off_t offset, int request, | 408 | static int drm_bufs_info(char *buf, char **start, off_t offset, int request, |
409 | int *eof, void *data) | 409 | int *eof, void *data) |
@@ -411,9 +411,9 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, | |||
411 | drm_device_t *dev = (drm_device_t *) data; | 411 | drm_device_t *dev = (drm_device_t *) data; |
412 | int ret; | 412 | int ret; |
413 | 413 | ||
414 | down(&dev->struct_sem); | 414 | mutex_lock(&dev->struct_mutex); |
415 | ret = drm__bufs_info(buf, start, offset, request, eof, data); | 415 | ret = drm__bufs_info(buf, start, offset, request, eof, data); |
416 | up(&dev->struct_sem); | 416 | mutex_unlock(&dev->struct_mutex); |
417 | return ret; | 417 | return ret; |
418 | } | 418 | } |
419 | 419 | ||
@@ -459,7 +459,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset, | |||
459 | } | 459 | } |
460 | 460 | ||
461 | /** | 461 | /** |
462 | * Simply calls _clients_info() while holding the drm_device::struct_sem lock. | 462 | * Simply calls _clients_info() while holding the drm_device::struct_mutex lock. |
463 | */ | 463 | */ |
464 | static int drm_clients_info(char *buf, char **start, off_t offset, | 464 | static int drm_clients_info(char *buf, char **start, off_t offset, |
465 | int request, int *eof, void *data) | 465 | int request, int *eof, void *data) |
@@ -467,9 +467,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset, | |||
467 | drm_device_t *dev = (drm_device_t *) data; | 467 | drm_device_t *dev = (drm_device_t *) data; |
468 | int ret; | 468 | int ret; |
469 | 469 | ||
470 | down(&dev->struct_sem); | 470 | mutex_lock(&dev->struct_mutex); |
471 | ret = drm__clients_info(buf, start, offset, request, eof, data); | 471 | ret = drm__clients_info(buf, start, offset, request, eof, data); |
472 | up(&dev->struct_sem); | 472 | mutex_unlock(&dev->struct_mutex); |
473 | return ret; | 473 | return ret; |
474 | } | 474 | } |
475 | 475 | ||
@@ -540,9 +540,9 @@ static int drm_vma_info(char *buf, char **start, off_t offset, int request, | |||
540 | drm_device_t *dev = (drm_device_t *) data; | 540 | drm_device_t *dev = (drm_device_t *) data; |
541 | int ret; | 541 | int ret; |
542 | 542 | ||
543 | down(&dev->struct_sem); | 543 | mutex_lock(&dev->struct_mutex); |
544 | ret = drm__vma_info(buf, start, offset, request, eof, data); | 544 | ret = drm__vma_info(buf, start, offset, request, eof, data); |
545 | up(&dev->struct_sem); | 545 | mutex_unlock(&dev->struct_mutex); |
546 | return ret; | 546 | return ret; |
547 | } | 547 | } |
548 | #endif | 548 | #endif |
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 42d766359caa..7a9263ff3007 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c | |||
@@ -61,8 +61,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, | |||
61 | 61 | ||
62 | spin_lock_init(&dev->count_lock); | 62 | spin_lock_init(&dev->count_lock); |
63 | init_timer(&dev->timer); | 63 | init_timer(&dev->timer); |
64 | sema_init(&dev->struct_sem, 1); | 64 | mutex_init(&dev->struct_mutex); |
65 | sema_init(&dev->ctxlist_sem, 1); | 65 | mutex_init(&dev->ctxlist_mutex); |
66 | 66 | ||
67 | dev->pdev = pdev; | 67 | dev->pdev = pdev; |
68 | 68 | ||
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 3f73aa774c80..0291cd62c69f 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c | |||
@@ -188,7 +188,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
188 | 188 | ||
189 | map = vma->vm_private_data; | 189 | map = vma->vm_private_data; |
190 | 190 | ||
191 | down(&dev->struct_sem); | 191 | mutex_lock(&dev->struct_mutex); |
192 | for (pt = dev->vmalist, prev = NULL; pt; pt = next) { | 192 | for (pt = dev->vmalist, prev = NULL; pt; pt = next) { |
193 | next = pt->next; | 193 | next = pt->next; |
194 | if (pt->vma->vm_private_data == map) | 194 | if (pt->vma->vm_private_data == map) |
@@ -248,7 +248,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
248 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 248 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
249 | } | 249 | } |
250 | } | 250 | } |
251 | up(&dev->struct_sem); | 251 | mutex_unlock(&dev->struct_mutex); |
252 | } | 252 | } |
253 | 253 | ||
254 | /** | 254 | /** |
@@ -404,12 +404,12 @@ static void drm_vm_open(struct vm_area_struct *vma) | |||
404 | 404 | ||
405 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); | 405 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); |
406 | if (vma_entry) { | 406 | if (vma_entry) { |
407 | down(&dev->struct_sem); | 407 | mutex_lock(&dev->struct_mutex); |
408 | vma_entry->vma = vma; | 408 | vma_entry->vma = vma; |
409 | vma_entry->next = dev->vmalist; | 409 | vma_entry->next = dev->vmalist; |
410 | vma_entry->pid = current->pid; | 410 | vma_entry->pid = current->pid; |
411 | dev->vmalist = vma_entry; | 411 | dev->vmalist = vma_entry; |
412 | up(&dev->struct_sem); | 412 | mutex_unlock(&dev->struct_mutex); |
413 | } | 413 | } |
414 | } | 414 | } |
415 | 415 | ||
@@ -431,7 +431,7 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
431 | vma->vm_start, vma->vm_end - vma->vm_start); | 431 | vma->vm_start, vma->vm_end - vma->vm_start); |
432 | atomic_dec(&dev->vma_count); | 432 | atomic_dec(&dev->vma_count); |
433 | 433 | ||
434 | down(&dev->struct_sem); | 434 | mutex_lock(&dev->struct_mutex); |
435 | for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { | 435 | for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { |
436 | if (pt->vma == vma) { | 436 | if (pt->vma == vma) { |
437 | if (prev) { | 437 | if (prev) { |
@@ -443,7 +443,7 @@ static void drm_vm_close(struct vm_area_struct *vma) | |||
443 | break; | 443 | break; |
444 | } | 444 | } |
445 | } | 445 | } |
446 | up(&dev->struct_sem); | 446 | mutex_unlock(&dev->struct_mutex); |
447 | } | 447 | } |
448 | 448 | ||
449 | /** | 449 | /** |