aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_bufs.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-04-30 19:32:21 -0400
committerDave Airlie <airlied@redhat.com>2014-04-30 19:32:21 -0400
commit444c9a08bf787e8236e132fab7eceeb2f065aa4c (patch)
tree52a117f1531f8fcc7f775cacd309d92e64c27a10 /drivers/gpu/drm/drm_bufs.c
parent7e9ab4081e646fc317d0a87929a352f0e5082190 (diff)
parent3c8413951cbd8a2d855740823fc547c97b845f6f (diff)
Merge branch 'drm-init-cleanup' of git://people.freedesktop.org/~danvet/drm into drm-next
Next pull request, this time more of the drm de-midlayering work. The big thing is that his patch series here removes everything from drm_bus except the set_busid callback. Thierry has a few more patches on top of this to make that one optional to. With that we can ditch all the non-pci drm_bus implementations, which Thierry has already done for the fake tegra host1x drm_bus. Reviewed by Thierry, Laurent and David and now also survived some testing on my intel boxes to make sure the irq fumble is fixed correctly ;-) The last minute rebase was just to add the r-b tags from Thierry for the 2 patches I've redone. * 'drm-init-cleanup' of git://people.freedesktop.org/~danvet/drm: drm/<drivers>: don't set driver->dev_priv_size to 0 drm: Remove dev->kdriver drm: remove drm_bus->get_name drm: rip out dev->devname drm: inline drm_pci_set_unique drm: remove bus->get_irq implementations drm: pass the irq explicitly to drm_irq_install drm/irq: Look up the pci irq directly in the drm_control ioctl drm/irq: track the irq installed in drm_irq_install in dev->irq drm: rename dev->count_lock to dev->buf_lock drm: Rip out totally bogus vga_switcheroo->can_switch locking drm: kill drm_bus->bus_type drm: remove drm_dev_to_irq from drivers drm/irq: remove cargo-culted locking from irq_install/uninstall drm/irq: drm_control is a legacy ioctl, so pci devices only drm/pci: fold in irq_by_busid support drm/irq: simplify irq checks in drm_wait_vblank
Diffstat (limited to 'drivers/gpu/drm/drm_bufs.c')
-rw-r--r--drivers/gpu/drm/drm_bufs.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index edec31fe3fed..ef7f0199a0f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
656 DRM_DEBUG("zone invalid\n"); 656 DRM_DEBUG("zone invalid\n");
657 return -EINVAL; 657 return -EINVAL;
658 } 658 }
659 spin_lock(&dev->count_lock); 659 spin_lock(&dev->buf_lock);
660 if (dev->buf_use) { 660 if (dev->buf_use) {
661 spin_unlock(&dev->count_lock); 661 spin_unlock(&dev->buf_lock);
662 return -EBUSY; 662 return -EBUSY;
663 } 663 }
664 atomic_inc(&dev->buf_alloc); 664 atomic_inc(&dev->buf_alloc);
665 spin_unlock(&dev->count_lock); 665 spin_unlock(&dev->buf_lock);
666 666
667 mutex_lock(&dev->struct_mutex); 667 mutex_lock(&dev->struct_mutex);
668 entry = &dma->bufs[order]; 668 entry = &dma->bufs[order];
@@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 805 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
806 total = PAGE_SIZE << page_order; 806 total = PAGE_SIZE << page_order;
807 807
808 spin_lock(&dev->count_lock); 808 spin_lock(&dev->buf_lock);
809 if (dev->buf_use) { 809 if (dev->buf_use) {
810 spin_unlock(&dev->count_lock); 810 spin_unlock(&dev->buf_lock);
811 return -EBUSY; 811 return -EBUSY;
812 } 812 }
813 atomic_inc(&dev->buf_alloc); 813 atomic_inc(&dev->buf_alloc);
814 spin_unlock(&dev->count_lock); 814 spin_unlock(&dev->buf_lock);
815 815
816 mutex_lock(&dev->struct_mutex); 816 mutex_lock(&dev->struct_mutex);
817 entry = &dma->bufs[order]; 817 entry = &dma->bufs[order];
@@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1016 return -EINVAL; 1016 return -EINVAL;
1017 1017
1018 spin_lock(&dev->count_lock); 1018 spin_lock(&dev->buf_lock);
1019 if (dev->buf_use) { 1019 if (dev->buf_use) {
1020 spin_unlock(&dev->count_lock); 1020 spin_unlock(&dev->buf_lock);
1021 return -EBUSY; 1021 return -EBUSY;
1022 } 1022 }
1023 atomic_inc(&dev->buf_alloc); 1023 atomic_inc(&dev->buf_alloc);
1024 spin_unlock(&dev->count_lock); 1024 spin_unlock(&dev->buf_lock);
1025 1025
1026 mutex_lock(&dev->struct_mutex); 1026 mutex_lock(&dev->struct_mutex);
1027 entry = &dma->bufs[order]; 1027 entry = &dma->bufs[order];
@@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
1175 * \param arg pointer to a drm_buf_info structure. 1175 * \param arg pointer to a drm_buf_info structure.
1176 * \return zero on success or a negative number on failure. 1176 * \return zero on success or a negative number on failure.
1177 * 1177 *
1178 * Increments drm_device::buf_use while holding the drm_device::count_lock 1178 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1179 * lock, preventing of allocating more buffers after this call. Information 1179 * lock, preventing of allocating more buffers after this call. Information
1180 * about each requested buffer is then copied into user space. 1180 * about each requested buffer is then copied into user space.
1181 */ 1181 */
@@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
1196 if (!dma) 1196 if (!dma)
1197 return -EINVAL; 1197 return -EINVAL;
1198 1198
1199 spin_lock(&dev->count_lock); 1199 spin_lock(&dev->buf_lock);
1200 if (atomic_read(&dev->buf_alloc)) { 1200 if (atomic_read(&dev->buf_alloc)) {
1201 spin_unlock(&dev->count_lock); 1201 spin_unlock(&dev->buf_lock);
1202 return -EBUSY; 1202 return -EBUSY;
1203 } 1203 }
1204 ++dev->buf_use; /* Can't allocate more after this call */ 1204 ++dev->buf_use; /* Can't allocate more after this call */
1205 spin_unlock(&dev->count_lock); 1205 spin_unlock(&dev->buf_lock);
1206 1206
1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1207 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1208 if (dma->bufs[i].buf_count) 1208 if (dma->bufs[i].buf_count)
@@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
1381 if (!dma) 1381 if (!dma)
1382 return -EINVAL; 1382 return -EINVAL;
1383 1383
1384 spin_lock(&dev->count_lock); 1384 spin_lock(&dev->buf_lock);
1385 if (atomic_read(&dev->buf_alloc)) { 1385 if (atomic_read(&dev->buf_alloc)) {
1386 spin_unlock(&dev->count_lock); 1386 spin_unlock(&dev->buf_lock);
1387 return -EBUSY; 1387 return -EBUSY;
1388 } 1388 }
1389 dev->buf_use++; /* Can't allocate more after this call */ 1389 dev->buf_use++; /* Can't allocate more after this call */
1390 spin_unlock(&dev->count_lock); 1390 spin_unlock(&dev->buf_lock);
1391 1391
1392 if (request->count >= dma->buf_count) { 1392 if (request->count >= dma->buf_count) {
1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1393 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))