aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_bufs.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-12-16 05:21:06 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-04-23 04:32:43 -0400
commit2177a2182f3f375f64d9938dd884895c3872c380 (patch)
treed34e7ccc73ba72cac17ad1449400e2a4ea8cc23e /drivers/gpu/drm/drm_bufs.c
parentfc8fd40eb29a936cc689d0008863d39a67741c67 (diff)
drm: rename dev->count_lock to dev->buf_lock
Since really that's all it protects - legacy horror stories in drm_bufs.c. Since I don't want to waste any more time on this I didn't bother to actually look at what it protects in there, but it's at least contained now. v2: Move the spurious hunk to the right patch (Thierry). Cc: Thierry Reding <thierry.reding@gmail.com> Reviewed-by: Thierry Reding <treding@nvidia.com> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/drm_bufs.c')
-rw-r--r--drivers/gpu/drm/drm_bufs.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index edec31fe3fed..ef7f0199a0f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
656 DRM_DEBUG("zone invalid\n"); 656 DRM_DEBUG("zone invalid\n");
657 return -EINVAL; 657 return -EINVAL;
658 } 658 }
659 spin_lock(&dev->count_lock); 659 spin_lock(&dev->buf_lock);
660 if (dev->buf_use) { 660 if (dev->buf_use) {
661 spin_unlock(&dev->count_lock); 661 spin_unlock(&dev->buf_lock);
662 return -EBUSY; 662 return -EBUSY;
663 } 663 }
664 atomic_inc(&dev->buf_alloc); 664 atomic_inc(&dev->buf_alloc);
665 spin_unlock(&dev->count_lock); 665 spin_unlock(&dev->buf_lock);
666 666
667 mutex_lock(&dev->struct_mutex); 667 mutex_lock(&dev->struct_mutex);
668 entry = &dma->bufs[order]; 668 entry = &dma->bufs[order];
@@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
806 total = PAGE_SIZE << page_order; 806 total = PAGE_SIZE << page_order;
807 807
808 spin_lock(&dev->count_lock); 808 spin_lock(&dev->buf_lock);
809 if (dev->buf_use) { 809 if (dev->buf_use) {
810 spin_unlock(&dev->count_lock); 810 spin_unlock(&dev->buf_lock);
811 return -EBUSY; 811 return -EBUSY;
812 } 812 }
813 atomic_inc(&dev->buf_alloc); 813 atomic_inc(&dev->buf_alloc);
814 spin_unlock(&dev->count_lock); 814 spin_unlock(&dev->buf_lock);
815 815
816 mutex_lock(&dev->struct_mutex); 816 mutex_lock(&dev->struct_mutex);
817 entry = &dma->bufs[order]; 817 entry = &dma->bufs[order];
@@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1016 return -EINVAL; 1016 return -EINVAL;
1017 1017
1018 spin_lock(&dev->count_lock); 1018 spin_lock(&dev->buf_lock);
1019 if (dev->buf_use) { 1019 if (dev->buf_use) {
1020 spin_unlock(&dev->count_lock); 1020 spin_unlock(&dev->buf_lock);
1021 return -EBUSY; 1021 return -EBUSY;
1022 } 1022 }
1023 atomic_inc(&dev->buf_alloc); 1023 atomic_inc(&dev->buf_alloc);
1024 spin_unlock(&dev->count_lock); 1024 spin_unlock(&dev->buf_lock);
1025 1025
1026 mutex_lock(&dev->struct_mutex); 1026 mutex_lock(&dev->struct_mutex);
1027 entry = &dma->bufs[order]; 1027 entry = &dma->bufs[order];
@@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
1175 * \param arg pointer to a drm_buf_info structure. 1175 * \param arg pointer to a drm_buf_info structure.
1176 * \return zero on success or a negative number on failure. 1176 * \return zero on success or a negative number on failure.
1177 * 1177 *
1178 * Increments drm_device::buf_use while holding the drm_device::count_lock 1178 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1179 * lock, preventing of allocating more buffers after this call. Information 1179 * lock, preventing of allocating more buffers after this call. Information
1180 * about each requested buffer is then copied into user space. 1180 * about each requested buffer is then copied into user space.
1181 */ 1181 */
@@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
1196 if (!dma) 1196 if (!dma)
1197 return -EINVAL; 1197 return -EINVAL;
1198 1198
1199 spin_lock(&dev->count_lock); 1199 spin_lock(&dev->buf_lock);
1200 if (atomic_read(&dev->buf_alloc)) { 1200 if (atomic_read(&dev->buf_alloc)) {
1201 spin_unlock(&dev->count_lock); 1201 spin_unlock(&dev->buf_lock);
1202 return -EBUSY; 1202 return -EBUSY;
1203 } 1203 }
1204 ++dev->buf_use; /* Can't allocate more after this call */ 1204 ++dev->buf_use; /* Can't allocate more after this call */
1205 spin_unlock(&dev->count_lock); 1205 spin_unlock(&dev->buf_lock);
1206 1206
1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1208 if (dma->bufs[i].buf_count) 1208 if (dma->bufs[i].buf_count)
@@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1381 if (!dma) 1381 if (!dma)
1382 return -EINVAL; 1382 return -EINVAL;
1383 1383
1384 spin_lock(&dev->count_lock); 1384 spin_lock(&dev->buf_lock);
1385 if (atomic_read(&dev->buf_alloc)) { 1385 if (atomic_read(&dev->buf_alloc)) {
1386 spin_unlock(&dev->count_lock); 1386 spin_unlock(&dev->buf_lock);
1387 return -EBUSY; 1387 return -EBUSY;
1388 } 1388 }
1389 dev->buf_use++; /* Can't allocate more after this call */ 1389 dev->buf_use++; /* Can't allocate more after this call */
1390 spin_unlock(&dev->count_lock); 1390 spin_unlock(&dev->buf_lock);
1391 1391
1392 if (request->count >= dma->buf_count) { 1392 if (request->count >= dma->buf_count) {
1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))