aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-07-14 21:01:37 -0400
committerDave Airlie <airlied@redhat.com>2016-07-14 21:01:37 -0400
commit6c181c82106e12dced317e93a7a396cbb8c64f75 (patch)
tree95f81eaf5fa9e5539411f5d9690b31c7c0c20044
parent1640142b3d900cd7e5bc593d130a84f9187d9819 (diff)
parent01d3434a565ada5ca084c68ec1e087ada5a7b157 (diff)
Merge tag 'topic/drm-misc-2016-07-14' of git://anongit.freedesktop.org/drm-intel into drm-next
I recovered dri-devel backlog from my vacation, more misc stuff: - of_put_node fixes from Peter Chen (not all yet) - more patches from Gustavo to use kms-native drm_crtc_vblank_* funcs - docs sphinxification from Lukas Wunner - bunch of fixes all over from Dan Carpenter - more follow up work from Chris register/unregister rework in various places - vgem dma-buf export (for writing testcases) - small things all over from tons of different people * tag 'topic/drm-misc-2016-07-14' of git://anongit.freedesktop.org/drm-intel: (52 commits) drm: Don't overwrite user ioctl arg unless requested dma-buf/sync_file: improve Kconfig description for Sync Files MAINTAINERS: add entry for the Sync File Framework drm: Resurrect atomic rmfb code drm/vgem: Use PAGE_KERNEL in place of x86-specific PAGE_KERNEL_IO qxl: silence uninitialized variable warning qxl: check for kmap failures vga_switcheroo: Sphinxify docs drm: Restore double clflush on the last partial cacheline gpu: drm: rockchip_drm_drv: add missing of_node_put after calling of_parse_phandle gpu: drm: sti_vtg: add missing of_node_put after calling of_parse_phandle gpu: drm: sti_hqvdp: add missing of_node_put after calling of_parse_phandle gpu: drm: sti_vdo: add missing of_node_put after calling of_parse_phandle gpu: drm: sti_compositor: add missing of_node_put after calling of_parse_phandle drm/tilcdc: use drm_crtc_handle_vblank() drm/rcar-du: use drm_crtc_handle_vblank() drm/nouveau: use drm_crtc_handle_vblank() drm/atmel: use drm_crtc_handle_vblank() drm/armada: use drm_crtc_handle_vblank() drm: make drm_vblank_count_and_time() static ...
-rw-r--r--Documentation/gpu/drm-internals.rst4
-rw-r--r--Documentation/gpu/vga-switcheroo.rst8
-rw-r--r--MAINTAINERS11
-rw-r--r--drivers/dma-buf/Kconfig15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c12
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c66
-rw-r--r--drivers/gpu/drm/drm_cache.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c78
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c3
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c52
-rw-r--r--drivers/gpu/drm/drm_irq.c27
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c16
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c4
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c253
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c28
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/drm/drm_mipi_dsi.h2
49 files changed, 407 insertions, 283 deletions
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 4f7176576feb..490d655cda20 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -280,8 +280,8 @@ private data in the open method should free it here.
280The lastclose method should restore CRTC and plane properties to default 280The lastclose method should restore CRTC and plane properties to default
281value, so that a subsequent open of the device will not inherit state 281value, so that a subsequent open of the device will not inherit state
282from the previous user. It can also be used to execute delayed power 282from the previous user. It can also be used to execute delayed power
283switching state changes, e.g. in conjunction with the vga_switcheroo 283switching state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
284infrastructure (see ?). Beyond that KMS drivers should not do any 284infrastructure. Beyond that KMS drivers should not do any
285further cleanup. Only legacy UMS drivers might need to clean up device 285further cleanup. Only legacy UMS drivers might need to clean up device
286state so that the vga console or an independent fbdev driver could take 286state so that the vga console or an independent fbdev driver could take
287over. 287over.
diff --git a/Documentation/gpu/vga-switcheroo.rst b/Documentation/gpu/vga-switcheroo.rst
index 327d930a2229..cbbdb994f1dd 100644
--- a/Documentation/gpu/vga-switcheroo.rst
+++ b/Documentation/gpu/vga-switcheroo.rst
@@ -1,3 +1,5 @@
1.. _vga_switcheroo:
2
1============== 3==============
2VGA Switcheroo 4VGA Switcheroo
3============== 5==============
@@ -94,9 +96,3 @@ Public functions
94 96
95.. kernel-doc:: include/linux/apple-gmux.h 97.. kernel-doc:: include/linux/apple-gmux.h
96 :internal: 98 :internal:
97
98.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/vga/vga_switcheroo.c
99
100.. WARNING: DOCPROC directive not supported: !Cinclude/linux/vga_switcheroo.h
101
102.. WARNING: DOCPROC directive not supported: !Cdrivers/platform/x86/apple-gmux.c
diff --git a/MAINTAINERS b/MAINTAINERS
index 0c22fe584283..5351c4be3aa2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3779,6 +3779,17 @@ F: include/linux/*fence.h
3779F: Documentation/dma-buf-sharing.txt 3779F: Documentation/dma-buf-sharing.txt
3780T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git 3780T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
3781 3781
3782SYNC FILE FRAMEWORK
3783M: Sumit Semwal <sumit.semwal@linaro.org>
3784R: Gustavo Padovan <gustavo@padovan.org>
3785S: Maintained
3786L: linux-media@vger.kernel.org
3787L: dri-devel@lists.freedesktop.org
3788F: drivers/dma-buf/sync_file.c
3789F: include/linux/sync_file.h
3790F: Documentation/sync_file.txt
3791T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
3792
3782DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 3793DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
3783M: Vinod Koul <vinod.koul@intel.com> 3794M: Vinod Koul <vinod.koul@intel.com>
3784L: dmaengine@vger.kernel.org 3795L: dmaengine@vger.kernel.org
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 9824bc4addf8..25bcfa0b474f 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -1,11 +1,20 @@
1menu "DMABUF options" 1menu "DMABUF options"
2 2
3config SYNC_FILE 3config SYNC_FILE
4 bool "sync_file support for fences" 4 bool "Explicit Synchronization Framework"
5 default n 5 default n
6 select ANON_INODES 6 select ANON_INODES
7 select DMA_SHARED_BUFFER 7 select DMA_SHARED_BUFFER
8 ---help--- 8 ---help---
9 This option enables the fence framework synchronization to export 9 The Sync File Framework adds explicit syncronization via
10 sync_files to userspace that can represent one or more fences. 10 userspace. It enables send/receive 'struct fence' objects to/from
11 userspace via Sync File fds for synchronization between drivers via
12 userspace components. It has been ported from Android.
13
14 The first and main user for this is graphics in which a fence is
15 associated with a buffer. When a job is submitted to the GPU a fence
16 is attached to the buffer and is transferred via userspace, using Sync
17 Files fds, to the DRM driver for example. More details at
18 Documentation/sync_file.txt.
19
11endmenu 20endmenu
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9c9f28c1ce84..614fb026436d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1712,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1712 amdgpu_bo_evict_vram(adev); 1712 amdgpu_bo_evict_vram(adev);
1713 amdgpu_ib_pool_fini(adev); 1713 amdgpu_ib_pool_fini(adev);
1714 amdgpu_fence_driver_fini(adev); 1714 amdgpu_fence_driver_fini(adev);
1715 drm_crtc_force_disable_all(adev->ddev);
1715 amdgpu_fbdev_fini(adev); 1716 amdgpu_fbdev_fini(adev);
1716 r = amdgpu_fini(adev); 1717 r = amdgpu_fini(adev);
1717 kfree(adev->ip_block_status); 1718 kfree(adev->ip_block_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b464aaa1da3e..a8efbb54423f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
60 if (adev->rmmio == NULL) 60 if (adev->rmmio == NULL)
61 goto done_free; 61 goto done_free;
62 62
63 pm_runtime_get_sync(dev->dev); 63 if (amdgpu_device_is_px(dev)) {
64 pm_runtime_get_sync(dev->dev);
65 pm_runtime_forbid(dev->dev);
66 }
64 67
65 amdgpu_amdkfd_device_fini(adev); 68 amdgpu_amdkfd_device_fini(adev);
66 69
@@ -135,9 +138,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
135 } 138 }
136 139
137out: 140out:
138 if (r) 141 if (r) {
142 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
143 if (adev->rmmio && amdgpu_device_is_px(dev))
144 pm_runtime_put_noidle(dev->dev);
139 amdgpu_driver_unload_kms(dev); 145 amdgpu_driver_unload_kms(dev);
140 146 }
141 147
142 return r; 148 return r;
143} 149}
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 34405e4a5d36..2f58e9e2a59c 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
410 DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num); 410 DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
411 411
412 if (stat & VSYNC_IRQ) 412 if (stat & VSYNC_IRQ)
413 drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num); 413 drm_crtc_handle_vblank(&dcrtc->crtc);
414 414
415 spin_lock(&dcrtc->irq_lock); 415 spin_lock(&dcrtc->irq_lock);
416 ovl_plane = dcrtc->plane; 416 ovl_plane = dcrtc->plane;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 613f6c99b76a..a978381ef95b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -383,7 +383,7 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
383 383
384void atmel_hlcdc_crtc_irq(struct drm_crtc *c) 384void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
385{ 385{
386 drm_handle_vblank(c->dev, 0); 386 drm_crtc_handle_vblank(c);
387 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); 387 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
388} 388}
389 389
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3cee084e9d28..9359be4a0ca9 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1589,6 +1589,72 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
1589} 1589}
1590EXPORT_SYMBOL(drm_atomic_clean_old_fb); 1590EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1591 1591
1592int drm_atomic_remove_fb(struct drm_framebuffer *fb)
1593{
1594 struct drm_modeset_acquire_ctx ctx;
1595 struct drm_device *dev = fb->dev;
1596 struct drm_atomic_state *state;
1597 struct drm_plane *plane;
1598 int ret = 0;
1599 unsigned plane_mask;
1600
1601 state = drm_atomic_state_alloc(dev);
1602 if (!state)
1603 return -ENOMEM;
1604
1605 drm_modeset_acquire_init(&ctx, 0);
1606 state->acquire_ctx = &ctx;
1607
1608retry:
1609 plane_mask = 0;
1610 ret = drm_modeset_lock_all_ctx(dev, &ctx);
1611 if (ret)
1612 goto unlock;
1613
1614 drm_for_each_plane(plane, dev) {
1615 struct drm_plane_state *plane_state;
1616
1617 if (plane->state->fb != fb)
1618 continue;
1619
1620 plane_state = drm_atomic_get_plane_state(state, plane);
1621 if (IS_ERR(plane_state)) {
1622 ret = PTR_ERR(plane_state);
1623 goto unlock;
1624 }
1625
1626 drm_atomic_set_fb_for_plane(plane_state, NULL);
1627 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
1628 if (ret)
1629 goto unlock;
1630
1631 plane_mask |= BIT(drm_plane_index(plane));
1632
1633 plane->old_fb = plane->fb;
1634 plane->fb = NULL;
1635 }
1636
1637 if (plane_mask)
1638 ret = drm_atomic_commit(state);
1639
1640unlock:
1641 if (plane_mask)
1642 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1643
1644 if (ret == -EDEADLK) {
1645 drm_modeset_backoff(&ctx);
1646 goto retry;
1647 }
1648
1649 if (ret || !plane_mask)
1650 drm_atomic_state_free(state);
1651
1652 drm_modeset_drop_locks(&ctx);
1653 drm_modeset_acquire_fini(&ctx);
1654
1655 return ret;
1656}
1657
1592int drm_mode_atomic_ioctl(struct drm_device *dev, 1658int drm_mode_atomic_ioctl(struct drm_device *dev,
1593 void *data, struct drm_file *file_priv) 1659 void *data, struct drm_file *file_priv)
1594{ 1660{
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 059f7c39c582..a7916e5f8864 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
136 mb(); 136 mb();
137 for (; addr < end; addr += size) 137 for (; addr < end; addr += size)
138 clflushopt(addr); 138 clflushopt(addr);
139 clflushopt(end - 1); /* force serialisation */
139 mb(); 140 mb();
140 return; 141 return;
141 } 142 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fd93e9c79d28..9d3f80efc9cc 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -396,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj)
396} 396}
397EXPORT_SYMBOL(drm_mode_object_reference); 397EXPORT_SYMBOL(drm_mode_object_reference);
398 398
399/**
400 * drm_crtc_force_disable - Forcibly turn off a CRTC
401 * @crtc: CRTC to turn off
402 *
403 * Returns:
404 * Zero on success, error code on failure.
405 */
406int drm_crtc_force_disable(struct drm_crtc *crtc)
407{
408 struct drm_mode_set set = {
409 .crtc = crtc,
410 };
411
412 return drm_mode_set_config_internal(&set);
413}
414EXPORT_SYMBOL(drm_crtc_force_disable);
415
416/**
417 * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
418 * @dev: DRM device whose CRTCs to turn off
419 *
420 * Drivers may want to call this on unload to ensure that all displays are
421 * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
422 *
423 * Returns:
424 * Zero on success, error code on failure.
425 */
426int drm_crtc_force_disable_all(struct drm_device *dev)
427{
428 struct drm_crtc *crtc;
429 int ret = 0;
430
431 drm_modeset_lock_all(dev);
432 drm_for_each_crtc(crtc, dev)
433 if (crtc->enabled) {
434 ret = drm_crtc_force_disable(crtc);
435 if (ret)
436 goto out;
437 }
438out:
439 drm_modeset_unlock_all(dev);
440 return ret;
441}
442EXPORT_SYMBOL(drm_crtc_force_disable_all);
443
399static void drm_framebuffer_free(struct kref *kref) 444static void drm_framebuffer_free(struct kref *kref)
400{ 445{
401 struct drm_framebuffer *fb = 446 struct drm_framebuffer *fb =
@@ -544,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
544 struct drm_device *dev; 589 struct drm_device *dev;
545 struct drm_crtc *crtc; 590 struct drm_crtc *crtc;
546 struct drm_plane *plane; 591 struct drm_plane *plane;
547 struct drm_mode_set set;
548 int ret;
549 592
550 if (!fb) 593 if (!fb)
551 return; 594 return;
@@ -570,16 +613,17 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
570 * in this manner. 613 * in this manner.
571 */ 614 */
572 if (drm_framebuffer_read_refcount(fb) > 1) { 615 if (drm_framebuffer_read_refcount(fb) > 1) {
616 if (dev->mode_config.funcs->atomic_commit) {
617 drm_atomic_remove_fb(fb);
618 goto out;
619 }
620
573 drm_modeset_lock_all(dev); 621 drm_modeset_lock_all(dev);
574 /* remove from any CRTC */ 622 /* remove from any CRTC */
575 drm_for_each_crtc(crtc, dev) { 623 drm_for_each_crtc(crtc, dev) {
576 if (crtc->primary->fb == fb) { 624 if (crtc->primary->fb == fb) {
577 /* should turn off the crtc */ 625 /* should turn off the crtc */
578 memset(&set, 0, sizeof(struct drm_mode_set)); 626 if (drm_crtc_force_disable(crtc))
579 set.crtc = crtc;
580 set.fb = NULL;
581 ret = drm_mode_set_config_internal(&set);
582 if (ret)
583 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); 627 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
584 } 628 }
585 } 629 }
@@ -591,6 +635,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
591 drm_modeset_unlock_all(dev); 635 drm_modeset_unlock_all(dev);
592 } 636 }
593 637
638out:
594 drm_framebuffer_unreference(fb); 639 drm_framebuffer_unreference(fb);
595} 640}
596EXPORT_SYMBOL(drm_framebuffer_remove); 641EXPORT_SYMBOL(drm_framebuffer_remove);
@@ -1068,23 +1113,7 @@ void drm_connector_unregister(struct drm_connector *connector)
1068} 1113}
1069EXPORT_SYMBOL(drm_connector_unregister); 1114EXPORT_SYMBOL(drm_connector_unregister);
1070 1115
1071/** 1116static int drm_connector_register_all(struct drm_device *dev)
1072 * drm_connector_register_all - register all connectors
1073 * @dev: drm device
1074 *
1075 * This function registers all connectors in sysfs and other places so that
1076 * userspace can start to access them. drm_connector_register_all() is called
1077 * automatically from drm_dev_register() to complete the device registration,
1078 * if they don't call drm_connector_register() on each connector individually.
1079 *
1080 * When a device is unplugged and should be removed from userspace access,
1081 * call drm_connector_unregister_all(), which is the inverse of this
1082 * function.
1083 *
1084 * Returns:
1085 * Zero on success, error code on failure.
1086 */
1087int drm_connector_register_all(struct drm_device *dev)
1088{ 1117{
1089 struct drm_connector *connector; 1118 struct drm_connector *connector;
1090 int ret; 1119 int ret;
@@ -1106,7 +1135,6 @@ err:
1106 drm_connector_unregister_all(dev); 1135 drm_connector_unregister_all(dev);
1107 return ret; 1136 return ret;
1108} 1137}
1109EXPORT_SYMBOL(drm_connector_register_all);
1110 1138
1111/** 1139/**
1112 * drm_connector_unregister_all - unregister connector userspace interfaces 1140 * drm_connector_unregister_all - unregister connector userspace interfaces
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 47a500b90fd7..b248e2238a05 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -125,6 +125,7 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
125 struct drm_property *property, uint64_t *val); 125 struct drm_property *property, uint64_t *val);
126int drm_mode_atomic_ioctl(struct drm_device *dev, 126int drm_mode_atomic_ioctl(struct drm_device *dev,
127 void *data, struct drm_file *file_priv); 127 void *data, struct drm_file *file_priv);
128int drm_atomic_remove_fb(struct drm_framebuffer *fb);
128 129
129int drm_modeset_register_all(struct drm_device *dev); 130int drm_modeset_register_all(struct drm_device *dev);
130void drm_modeset_unregister_all(struct drm_device *dev); 131void drm_modeset_unregister_all(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 3334baacf43d..734f86a345f6 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void)
355 355
356 drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev"); 356 drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
357 if (IS_ERR(drm_dp_aux_dev_class)) { 357 if (IS_ERR(drm_dp_aux_dev_class)) {
358 res = PTR_ERR(drm_dp_aux_dev_class); 358 return PTR_ERR(drm_dp_aux_dev_class);
359 goto out;
360 } 359 }
361 drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups; 360 drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
362 361
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index aead9ffcbe29..be27ed36f56e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -362,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev);
362void drm_unplug_dev(struct drm_device *dev) 362void drm_unplug_dev(struct drm_device *dev)
363{ 363{
364 /* for a USB device */ 364 /* for a USB device */
365 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 365 drm_dev_unregister(dev);
366 drm_minor_unregister(dev, DRM_MINOR_RENDER);
367 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
368 366
369 mutex_lock(&drm_global_mutex); 367 mutex_lock(&drm_global_mutex);
370 368
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 1f84ff5f1bf8..33af4a5ddca1 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -648,7 +648,7 @@ long drm_ioctl(struct file *filp,
648 int retcode = -EINVAL; 648 int retcode = -EINVAL;
649 char stack_kdata[128]; 649 char stack_kdata[128];
650 char *kdata = NULL; 650 char *kdata = NULL;
651 unsigned int usize, asize, drv_size; 651 unsigned int in_size, out_size, drv_size, ksize;
652 bool is_driver_ioctl; 652 bool is_driver_ioctl;
653 653
654 dev = file_priv->minor->dev; 654 dev = file_priv->minor->dev;
@@ -671,9 +671,12 @@ long drm_ioctl(struct file *filp,
671 } 671 }
672 672
673 drv_size = _IOC_SIZE(ioctl->cmd); 673 drv_size = _IOC_SIZE(ioctl->cmd);
674 usize = _IOC_SIZE(cmd); 674 out_size = in_size = _IOC_SIZE(cmd);
675 asize = max(usize, drv_size); 675 if ((cmd & ioctl->cmd & IOC_IN) == 0)
676 cmd = ioctl->cmd; 676 in_size = 0;
677 if ((cmd & ioctl->cmd & IOC_OUT) == 0)
678 out_size = 0;
679 ksize = max(max(in_size, out_size), drv_size);
677 680
678 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", 681 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
679 task_pid_nr(current), 682 task_pid_nr(current),
@@ -693,30 +696,24 @@ long drm_ioctl(struct file *filp,
693 if (unlikely(retcode)) 696 if (unlikely(retcode))
694 goto err_i1; 697 goto err_i1;
695 698
696 if (cmd & (IOC_IN | IOC_OUT)) { 699 if (ksize <= sizeof(stack_kdata)) {
697 if (asize <= sizeof(stack_kdata)) { 700 kdata = stack_kdata;
698 kdata = stack_kdata; 701 } else {
699 } else { 702 kdata = kmalloc(ksize, GFP_KERNEL);
700 kdata = kmalloc(asize, GFP_KERNEL); 703 if (!kdata) {
701 if (!kdata) { 704 retcode = -ENOMEM;
702 retcode = -ENOMEM; 705 goto err_i1;
703 goto err_i1;
704 }
705 } 706 }
706 if (asize > usize)
707 memset(kdata + usize, 0, asize - usize);
708 } 707 }
709 708
710 if (cmd & IOC_IN) { 709 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
711 if (copy_from_user(kdata, (void __user *)arg, 710 retcode = -EFAULT;
712 usize) != 0) { 711 goto err_i1;
713 retcode = -EFAULT;
714 goto err_i1;
715 }
716 } else if (cmd & IOC_OUT) {
717 memset(kdata, 0, usize);
718 } 712 }
719 713
714 if (ksize > in_size)
715 memset(kdata + in_size, 0, ksize - in_size);
716
720 /* Enforce sane locking for kms driver ioctls. Core ioctls are 717 /* Enforce sane locking for kms driver ioctls. Core ioctls are
721 * too messy still. */ 718 * too messy still. */
722 if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) || 719 if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
@@ -728,11 +725,8 @@ long drm_ioctl(struct file *filp,
728 mutex_unlock(&drm_global_mutex); 725 mutex_unlock(&drm_global_mutex);
729 } 726 }
730 727
731 if (cmd & IOC_OUT) { 728 if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
732 if (copy_to_user((void __user *)arg, kdata, 729 retcode = -EFAULT;
733 usize) != 0)
734 retcode = -EFAULT;
735 }
736 730
737 err_i1: 731 err_i1:
738 if (!ioctl) 732 if (!ioctl)
@@ -759,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl);
759 * shouldn't be used by any drivers. 753 * shouldn't be used by any drivers.
760 * 754 *
761 * Returns: 755 * Returns:
762 * True if the @nr corresponds to a DRM core ioctl numer, false otherwise. 756 * True if the @nr corresponds to a DRM core ioctl number, false otherwise.
763 */ 757 */
764bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) 758bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
765{ 759{
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8ca3d2bf2bda..35c86acede38 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -532,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev)
532 532
533 /* 533 /*
534 * Wake up any waiters so they don't hang. This is just to paper over 534 * Wake up any waiters so they don't hang. This is just to paper over
535 * isssues for UMS drivers which aren't in full control of their 535 * issues for UMS drivers which aren't in full control of their
536 * vblank/irq handling. KMS drivers must ensure that vblanks are all 536 * vblank/irq handling. KMS drivers must ensure that vblanks are all
537 * disabled when uninstalling the irq handler. 537 * disabled when uninstalling the irq handler.
538 */ 538 */
@@ -594,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data,
594 return 0; 594 return 0;
595 if (drm_core_check_feature(dev, DRIVER_MODESET)) 595 if (drm_core_check_feature(dev, DRIVER_MODESET))
596 return 0; 596 return 0;
597 /* UMS was only ever support on pci devices. */ 597 /* UMS was only ever supported on pci devices. */
598 if (WARN_ON(!dev->pdev)) 598 if (WARN_ON(!dev->pdev))
599 return -EINVAL; 599 return -EINVAL;
600 600
@@ -945,8 +945,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
945 * 945 *
946 * This is the legacy version of drm_crtc_vblank_count_and_time(). 946 * This is the legacy version of drm_crtc_vblank_count_and_time().
947 */ 947 */
948u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, 948static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
949 struct timeval *vblanktime) 949 struct timeval *vblanktime)
950{ 950{
951 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 951 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
952 u32 vblank_count; 952 u32 vblank_count;
@@ -963,7 +963,6 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
963 963
964 return vblank_count; 964 return vblank_count;
965} 965}
966EXPORT_SYMBOL(drm_vblank_count_and_time);
967 966
968/** 967/**
969 * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value 968 * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
@@ -975,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time);
975 * vblank events since the system was booted, including lost events due to 974 * vblank events since the system was booted, including lost events due to
976 * modesetting activity. Returns corresponding system timestamp of the time 975 * modesetting activity. Returns corresponding system timestamp of the time
977 * of the vblank interval that corresponds to the current vblank counter value. 976 * of the vblank interval that corresponds to the current vblank counter value.
978 *
979 * This is the native KMS version of drm_vblank_count_and_time().
980 */ 977 */
981u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, 978u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
982 struct timeval *vblanktime) 979 struct timeval *vblanktime)
@@ -1588,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
1588 1585
1589 seq = drm_vblank_count_and_time(dev, pipe, &now); 1586 seq = drm_vblank_count_and_time(dev, pipe, &now);
1590 1587
1591 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
1592 (seq - vblwait->request.sequence) <= (1 << 23)) {
1593 vblwait->request.sequence = seq + 1;
1594 vblwait->reply.sequence = vblwait->request.sequence;
1595 }
1596
1597 DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", 1588 DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
1598 vblwait->request.sequence, seq, pipe); 1589 vblwait->request.sequence, seq, pipe);
1599 1590
@@ -1690,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1690 goto done; 1681 goto done;
1691 } 1682 }
1692 1683
1684 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1685 (seq - vblwait->request.sequence) <= (1 << 23)) {
1686 vblwait->request.sequence = seq + 1;
1687 }
1688
1693 if (flags & _DRM_VBLANK_EVENT) { 1689 if (flags & _DRM_VBLANK_EVENT) {
1694 /* must hold on to the vblank ref until the event fires 1690 /* must hold on to the vblank ref until the event fires
1695 * drm_vblank_put will be called asynchronously 1691 * drm_vblank_put will be called asynchronously
@@ -1697,11 +1693,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1697 return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); 1693 return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
1698 } 1694 }
1699 1695
1700 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1701 (seq - vblwait->request.sequence) <= (1<<23)) {
1702 vblwait->request.sequence = seq + 1;
1703 }
1704
1705 DRM_DEBUG("waiting on vblank count %d, crtc %u\n", 1696 DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
1706 vblwait->request.sequence, pipe); 1697 vblwait->request.sequence, pipe);
1707 vblank->last_wait = vblwait->request.sequence; 1698 vblank->last_wait = vblwait->request.sequence;
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 87a8cb73366f..fc0ebd273ef8 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -44,7 +44,7 @@
44# include <asm/agp.h> 44# include <asm/agp.h>
45#else 45#else
46# ifdef __powerpc__ 46# ifdef __powerpc__
47# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 47# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
48# else 48# else
49# define PAGE_AGP PAGE_KERNEL 49# define PAGE_AGP PAGE_KERNEL
50# endif 50# endif
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 49311fc61d5d..af0d471ee246 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -999,17 +999,17 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
999EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); 999EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
1000 1000
1001/** 1001/**
1002 * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect 1002 * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
1003 * output signal on the TE signal line when display module reaches line N 1003 * the Tearing Effect output signal of the display module
1004 * defined by STS[n:0].
1005 * @dsi: DSI peripheral device 1004 * @dsi: DSI peripheral device
1006 * @param: STS[10:0] 1005 * @scanline: scanline to use as trigger
1006 *
1007 * Return: 0 on success or a negative error code on failure 1007 * Return: 0 on success or a negative error code on failure
1008 */ 1008 */
1009int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param) 1009int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
1010{ 1010{
1011 u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8, 1011 u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
1012 param & 0xff }; 1012 scanline & 0xff };
1013 ssize_t err; 1013 ssize_t err;
1014 1014
1015 err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); 1015 err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
@@ -1018,7 +1018,7 @@ int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param)
1018 1018
1019 return 0; 1019 return 0;
1020} 1020}
1021EXPORT_SYMBOL(mipi_dsi_set_tear_scanline); 1021EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
1022 1022
1023/** 1023/**
1024 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image 1024 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 4f0f3b36d537..bf70431073f6 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -41,7 +41,7 @@
41static inline void *drm_vmalloc_dma(unsigned long size) 41static inline void *drm_vmalloc_dma(unsigned long size)
42{ 42{
43#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) 43#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
44 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); 44 return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
45#else 45#else
46 return vmalloc_32(size); 46 return vmalloc_32(size);
47#endif 47#endif
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 43ff44a2b8e7..caa4e4ca616d 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
81 81
82#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) 82#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
83 tmp |= _PAGE_NO_CACHE; 83 tmp = pgprot_noncached_wc(tmp);
84#endif 84#endif
85 return tmp; 85 return tmp;
86} 86}
@@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
593 * pages and mappings in fault() 593 * pages and mappings in fault()
594 */ 594 */
595#if defined(__powerpc__) 595#if defined(__powerpc__)
596 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 596 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
597#endif 597#endif
598 vma->vm_ops = &drm_vm_ops; 598 vma->vm_ops = &drm_vm_ops;
599 break; 599 break;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index d814b3048ee5..1c7e14cf2781 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,10 +2,6 @@ config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
6 select FB_CFB_FILLRECT
7 select FB_CFB_COPYAREA
8 select FB_CFB_IMAGEBLIT
9 select VIDEOMODE_HELPERS 5 select VIDEOMODE_HELPERS
10 help 6 help
11 Choose this option if you have a Samsung SoC EXYNOS chipset. 7 Choose this option if you have a Samsung SoC EXYNOS chipset.
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 0594c45f7164..e9e8ae2ec06b 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
361 361
362 /* Disable the crtc to ensure a full modeset is 362 /* Disable the crtc to ensure a full modeset is
363 * performed whenever it's turned on again. */ 363 * performed whenever it's turned on again. */
364 if (crtc) { 364 if (crtc)
365 struct drm_mode_set modeset = { 365 drm_crtc_force_disable(crtc);
366 .crtc = crtc,
367 };
368
369 drm_mode_set_config_internal(&modeset);
370 }
371 } 366 }
372 367
373 return 0; 368 return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index cf8f38d39e10..1c366f8cb2d0 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
431 phy_set_drvdata(phy, mipi_tx); 431 phy_set_drvdata(phy, mipi_tx);
432 432
433 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 433 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
434 if (IS_ERR(phy)) { 434 if (IS_ERR(phy_provider)) {
435 ret = PTR_ERR(phy_provider); 435 ret = PTR_ERR(phy_provider);
436 return ret; 436 return ret;
437 } 437 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index aea81a547e85..34c0f2f67548 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev)
125 struct nv04_display *disp = nv04_display(dev); 125 struct nv04_display *disp = nv04_display(dev);
126 struct nouveau_drm *drm = nouveau_drm(dev); 126 struct nouveau_drm *drm = nouveau_drm(dev);
127 struct nouveau_encoder *encoder; 127 struct nouveau_encoder *encoder;
128 struct drm_crtc *crtc;
129 struct nouveau_crtc *nv_crtc; 128 struct nouveau_crtc *nv_crtc;
130 129
131 /* Turn every CRTC off. */
132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
133 struct drm_mode_set modeset = {
134 .crtc = crtc,
135 };
136
137 drm_mode_set_config_internal(&modeset);
138 }
139
140 /* Restore state */ 130 /* Restore state */
141 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) 131 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
142 encoder->enc_restore(&encoder->base.base); 132 encoder->enc_restore(&encoder->base.base);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index a665b78b2af5..434d1e29f279 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
749 749
750 /* Disable the crtc to ensure a full modeset is 750 /* Disable the crtc to ensure a full modeset is
751 * performed whenever it's turned on again. */ 751 * performed whenever it's turned on again. */
752 if (crtc) { 752 if (crtc)
753 struct drm_mode_set modeset = { 753 drm_crtc_force_disable(crtc);
754 .crtc = crtc,
755 };
756
757 drm_mode_set_config_internal(&modeset);
758 }
759 } 754 }
760 755
761 return 0; 756 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 844bd9951456..afbf557b23d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
47{ 47{
48 struct nouveau_crtc *nv_crtc = 48 struct nouveau_crtc *nv_crtc =
49 container_of(notify, typeof(*nv_crtc), vblank); 49 container_of(notify, typeof(*nv_crtc), vblank);
50 drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index); 50 drm_crtc_handle_vblank(&nv_crtc->base);
51 return NVIF_NOTIFY_KEEP; 51 return NVIF_NOTIFY_KEEP;
52} 52}
53 53
@@ -556,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev)
556 nouveau_display_vblank_fini(dev); 556 nouveau_display_vblank_fini(dev);
557 557
558 drm_kms_helper_poll_fini(dev); 558 drm_kms_helper_poll_fini(dev);
559 drm_crtc_force_disable_all(dev);
559 drm_mode_config_cleanup(dev); 560 drm_mode_config_cleanup(dev);
560 561
561 if (disp->dtor) 562 if (disp->dtor)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6dd396f56884..66c1280c0f1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -505,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev)
505{ 505{
506 struct nouveau_drm *drm = nouveau_drm(dev); 506 struct nouveau_drm *drm = nouveau_drm(dev);
507 507
508 pm_runtime_get_sync(dev->dev); 508 if (nouveau_runtime_pm != 0) {
509 pm_runtime_get_sync(dev->dev);
510 pm_runtime_forbid(dev->dev);
511 }
512
509 nouveau_fbcon_fini(dev); 513 nouveau_fbcon_fini(dev);
510 nouveau_accel_fini(drm); 514 nouveau_accel_fini(drm);
511 nouveau_hwmon_fini(dev); 515 nouveau_hwmon_fini(dev);
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index b5d4b41361bd..04270f5d110c 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
203bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) 203bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
204{ 204{
205 if (!qxl_check_idle(qdev->release_ring)) { 205 if (!qxl_check_idle(qdev->release_ring)) {
206 queue_work(qdev->gc_queue, &qdev->gc_work); 206 schedule_work(&qdev->gc_work);
207 if (flush) 207 if (flush)
208 flush_work(&qdev->gc_work); 208 flush_work(&qdev->gc_work);
209 return true; 209 return true;
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 56e1d633875e..ffe885395145 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev,
37 * the qxl_clip_rects. This is *not* the same as the memory allocated 37 * the qxl_clip_rects. This is *not* the same as the memory allocated
38 * on the device, it is offset to qxl_clip_rects.chunk.data */ 38 * on the device, it is offset to qxl_clip_rects.chunk.data */
39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, 39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
40 struct qxl_drawable *drawable,
41 unsigned num_clips, 40 unsigned num_clips,
42 struct qxl_bo *clips_bo) 41 struct qxl_bo *clips_bo)
43{ 42{
@@ -136,6 +135,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
136 * correctly globaly, since that would require 135 * correctly globaly, since that would require
137 * tracking all of our palettes. */ 136 * tracking all of our palettes. */
138 ret = qxl_bo_kmap(palette_bo, (void **)&pal); 137 ret = qxl_bo_kmap(palette_bo, (void **)&pal);
138 if (ret)
139 return ret;
139 pal->num_ents = 2; 140 pal->num_ents = 2;
140 pal->unique = unique++; 141 pal->unique = unique++;
141 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { 142 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -349,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
349 if (ret) 350 if (ret)
350 goto out_release_backoff; 351 goto out_release_backoff;
351 352
352 rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); 353 rects = drawable_set_clipping(qdev, num_clips, clips_bo);
353 if (!rects) 354 if (!rects)
354 goto out_release_backoff; 355 goto out_release_backoff;
355 356
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3ad6604b34ce..8e633caa4078 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -321,7 +321,6 @@ struct qxl_device {
321 struct qxl_bo *current_release_bo[3]; 321 struct qxl_bo *current_release_bo[3];
322 int current_release_bo_offset[3]; 322 int current_release_bo_offset[3];
323 323
324 struct workqueue_struct *gc_queue;
325 struct work_struct gc_work; 324 struct work_struct gc_work;
326 325
327 struct drm_property *hotplug_mode_update_property; 326 struct drm_property *hotplug_mode_update_property;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 2319800b7add..e642242728c0 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev,
258 (unsigned long)qdev->surfaceram_size); 258 (unsigned long)qdev->surfaceram_size);
259 259
260 260
261 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
262 INIT_WORK(&qdev->gc_work, qxl_gc_work); 261 INIT_WORK(&qdev->gc_work, qxl_gc_work);
263 262
264 return 0; 263 return 0;
@@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
270 qxl_bo_unref(&qdev->current_release_bo[0]); 269 qxl_bo_unref(&qdev->current_release_bo[0]);
271 if (qdev->current_release_bo[1]) 270 if (qdev->current_release_bo[1])
272 qxl_bo_unref(&qdev->current_release_bo[1]); 271 qxl_bo_unref(&qdev->current_release_bo[1]);
273 flush_workqueue(qdev->gc_queue); 272 flush_work(&qdev->gc_work);
274 destroy_workqueue(qdev->gc_queue);
275 qdev->gc_queue = NULL;
276
277 qxl_ring_free(qdev->command_ring); 273 qxl_ring_free(qdev->command_ring);
278 qxl_ring_free(qdev->cursor_ring); 274 qxl_ring_free(qdev->cursor_ring);
279 qxl_ring_free(qdev->release_ring); 275 qxl_ring_free(qdev->release_ring);
@@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
310 struct qxl_device *qdev; 306 struct qxl_device *qdev;
311 int r; 307 int r;
312 308
313 /* require kms */
314 if (!drm_core_check_feature(dev, DRIVER_MODESET))
315 return -ENODEV;
316
317 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); 309 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
318 if (qdev == NULL) 310 if (qdev == NULL)
319 return -ENOMEM; 311 return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 21c44b2293bc..a00dd2f74527 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
30#include <drm/drmP.h> 30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
33#include <linux/pm_runtime.h>
33#include <linux/vgaarb.h> 34#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h> 35#include <linux/vga_switcheroo.h>
35#include <linux/efi.h> 36#include <linux/efi.h>
@@ -1526,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev,
1526 return 0; 1527 return 0;
1527 1528
1528failed: 1529failed:
1530 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1531 if (radeon_is_px(ddev))
1532 pm_runtime_put_noidle(ddev->dev);
1529 if (runtime) 1533 if (runtime)
1530 vga_switcheroo_fini_domain_pm_ops(rdev->dev); 1534 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1531 return r; 1535 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3965d1916b9c..5f1cd695c965 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1711,6 +1711,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
1711 radeon_afmt_fini(rdev); 1711 radeon_afmt_fini(rdev);
1712 drm_kms_helper_poll_fini(rdev->ddev); 1712 drm_kms_helper_poll_fini(rdev->ddev);
1713 radeon_hpd_fini(rdev); 1713 radeon_hpd_fini(rdev);
1714 drm_crtc_force_disable_all(rdev->ddev);
1714 drm_mode_config_cleanup(rdev->ddev); 1715 drm_mode_config_cleanup(rdev->ddev);
1715 rdev->mode_info.mode_config_initialized = false; 1716 rdev->mode_info.mode_config_initialized = false;
1716 } 1717 }
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 414953c46a38..835563c1f0ed 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev)
63 if (rdev->rmmio == NULL) 63 if (rdev->rmmio == NULL)
64 goto done_free; 64 goto done_free;
65 65
66 pm_runtime_get_sync(dev->dev); 66 if (radeon_is_px(dev)) {
67 pm_runtime_get_sync(dev->dev);
68 pm_runtime_forbid(dev->dev);
69 }
67 70
68 radeon_kfd_device_fini(rdev); 71 radeon_kfd_device_fini(rdev);
69 72
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 0d8bdda736f9..e39fcef2e033 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
552 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); 552 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
553 553
554 if (status & DSSR_FRM) { 554 if (status & DSSR_FRM) {
555 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); 555 drm_crtc_handle_vblank(&rcrtc->crtc);
556 rcar_du_crtc_finish_page_flip(rcrtc); 556 rcar_du_crtc_finish_page_flip(rcrtc);
557 ret = IRQ_HANDLED; 557 ret = IRQ_HANDLED;
558 } 558 }
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index d665fb04d264..f0bd1ee8b128 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -433,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
433 is_support_iommu = false; 433 is_support_iommu = false;
434 } 434 }
435 435
436 of_node_put(iommu);
436 component_match_add(dev, &match, compare_of, port->parent); 437 component_match_add(dev, &match, compare_of, port->parent);
437 of_node_put(port); 438 of_node_put(port);
438 } 439 }
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 794148ff0e57..bd74732ea09b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -267,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
267 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); 267 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
268 if (vtg_np) 268 if (vtg_np)
269 compo->vtg_main = of_vtg_find(vtg_np); 269 compo->vtg_main = of_vtg_find(vtg_np);
270 of_node_put(vtg_np);
270 271
271 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1); 272 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
272 if (vtg_np) 273 if (vtg_np)
273 compo->vtg_aux = of_vtg_find(vtg_np); 274 compo->vtg_aux = of_vtg_find(vtg_np);
275 of_node_put(vtg_np);
274 276
275 platform_set_drvdata(pdev, compo); 277 platform_set_drvdata(pdev, compo);
276 278
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index ec3108074350..00881eb4536e 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -580,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
580 dvo->panel_node = of_parse_phandle(np, "sti,panel", 0); 580 dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
581 if (!dvo->panel_node) 581 if (!dvo->panel_node)
582 DRM_ERROR("No panel associated to the dvo output\n"); 582 DRM_ERROR("No panel associated to the dvo output\n");
583 of_node_put(dvo->panel_node);
583 584
584 platform_set_drvdata(pdev, dvo); 585 platform_set_drvdata(pdev, dvo);
585 586
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 33d2f42550cc..b03232247966 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1363,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
1363 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); 1363 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
1364 if (vtg_np) 1364 if (vtg_np)
1365 hqvdp->vtg = of_vtg_find(vtg_np); 1365 hqvdp->vtg = of_vtg_find(vtg_np);
1366 of_node_put(vtg_np);
1366 1367
1367 platform_set_drvdata(pdev, hqvdp); 1368 platform_set_drvdata(pdev, hqvdp);
1368 1369
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 957ce712ea44..0bdc385eec17 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev)
432 np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); 432 np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
433 if (np) { 433 if (np) {
434 vtg->slave = of_vtg_find(np); 434 vtg->slave = of_vtg_find(np);
435 of_node_put(np);
435 436
436 if (!vtg->slave) 437 if (!vtg->slave)
437 return -EPROBE_DEFER; 438 return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 79027b1c64d3..107c8bd04f6d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
697 697
698 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 698 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
699 699
700 drm_handle_vblank(dev, 0); 700 drm_crtc_handle_vblank(crtc);
701 701
702 if (!skip_event) { 702 if (!skip_event) {
703 struct drm_pending_vblank_event *event; 703 struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c20408940cd0..17d34e0edbdd 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
94 struct drm_device *dev = usb_get_intfdata(interface); 94 struct drm_device *dev = usb_get_intfdata(interface);
95 95
96 drm_kms_helper_poll_disable(dev); 96 drm_kms_helper_poll_disable(dev);
97 drm_connector_unregister_all(dev);
98 udl_fbdev_unplug(dev); 97 udl_fbdev_unplug(dev);
99 udl_drop_usb(dev); 98 udl_drop_usb(dev);
100 drm_unplug_dev(dev); 99 drm_unplug_dev(dev);
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 35ea5d02a827..29c2aab3c1a7 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -42,81 +42,38 @@
42#define DRIVER_MAJOR 1 42#define DRIVER_MAJOR 1
43#define DRIVER_MINOR 0 43#define DRIVER_MINOR 0
44 44
45void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
46{
47 drm_gem_put_pages(&obj->base, obj->pages, false, false);
48 obj->pages = NULL;
49}
50
51static void vgem_gem_free_object(struct drm_gem_object *obj) 45static void vgem_gem_free_object(struct drm_gem_object *obj)
52{ 46{
53 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); 47 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
54 48
55 drm_gem_free_mmap_offset(obj);
56
57 if (vgem_obj->use_dma_buf && obj->dma_buf) {
58 dma_buf_put(obj->dma_buf);
59 obj->dma_buf = NULL;
60 }
61
62 drm_gem_object_release(obj); 49 drm_gem_object_release(obj);
63
64 if (vgem_obj->pages)
65 vgem_gem_put_pages(vgem_obj);
66
67 vgem_obj->pages = NULL;
68
69 kfree(vgem_obj); 50 kfree(vgem_obj);
70} 51}
71 52
72int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
73{
74 struct page **pages;
75
76 if (obj->pages || obj->use_dma_buf)
77 return 0;
78
79 pages = drm_gem_get_pages(&obj->base);
80 if (IS_ERR(pages)) {
81 return PTR_ERR(pages);
82 }
83
84 obj->pages = pages;
85
86 return 0;
87}
88
89static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 53static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90{ 54{
91 struct drm_vgem_gem_object *obj = vma->vm_private_data; 55 struct drm_vgem_gem_object *obj = vma->vm_private_data;
92 loff_t num_pages;
93 pgoff_t page_offset;
94 int ret;
95
96 /* We don't use vmf->pgoff since that has the fake offset */ 56 /* We don't use vmf->pgoff since that has the fake offset */
97 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 57 unsigned long vaddr = (unsigned long)vmf->virtual_address;
98 PAGE_SHIFT; 58 struct page *page;
99 59
100 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 60 page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
101 61 (vaddr - vma->vm_start) >> PAGE_SHIFT);
102 if (page_offset > num_pages) 62 if (!IS_ERR(page)) {
103 return VM_FAULT_SIGBUS; 63 vmf->page = page;
104 64 return 0;
105 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, 65 } else switch (PTR_ERR(page)) {
106 obj->pages[page_offset]); 66 case -ENOSPC:
107 switch (ret) { 67 case -ENOMEM:
108 case 0: 68 return VM_FAULT_OOM;
109 return VM_FAULT_NOPAGE; 69 case -EBUSY:
110 case -ENOMEM: 70 return VM_FAULT_RETRY;
111 return VM_FAULT_OOM; 71 case -EFAULT:
112 case -EBUSY: 72 case -EINVAL:
113 return VM_FAULT_RETRY; 73 return VM_FAULT_SIGBUS;
114 case -EFAULT: 74 default:
115 case -EINVAL: 75 WARN_ON_ONCE(PTR_ERR(page));
116 return VM_FAULT_SIGBUS; 76 return VM_FAULT_SIGBUS;
117 default:
118 WARN_ON(1);
119 return VM_FAULT_SIGBUS;
120 } 77 }
121} 78}
122 79
@@ -134,57 +91,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
134 unsigned long size) 91 unsigned long size)
135{ 92{
136 struct drm_vgem_gem_object *obj; 93 struct drm_vgem_gem_object *obj;
137 struct drm_gem_object *gem_object; 94 int ret;
138 int err;
139
140 size = roundup(size, PAGE_SIZE);
141 95
142 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 96 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
143 if (!obj) 97 if (!obj)
144 return ERR_PTR(-ENOMEM); 98 return ERR_PTR(-ENOMEM);
145 99
146 gem_object = &obj->base; 100 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
147 101 if (ret)
148 err = drm_gem_object_init(dev, gem_object, size); 102 goto err_free;
149 if (err)
150 goto out;
151
152 err = vgem_gem_get_pages(obj);
153 if (err)
154 goto out;
155
156 err = drm_gem_handle_create(file, gem_object, handle);
157 if (err)
158 goto handle_out;
159 103
160 drm_gem_object_unreference_unlocked(gem_object); 104 ret = drm_gem_handle_create(file, &obj->base, handle);
105 drm_gem_object_unreference_unlocked(&obj->base);
106 if (ret)
107 goto err;
161 108
162 return gem_object; 109 return &obj->base;
163 110
164handle_out: 111err_free:
165 drm_gem_object_release(gem_object);
166out:
167 kfree(obj); 112 kfree(obj);
168 return ERR_PTR(err); 113err:
114 return ERR_PTR(ret);
169} 115}
170 116
171static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 117static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
172 struct drm_mode_create_dumb *args) 118 struct drm_mode_create_dumb *args)
173{ 119{
174 struct drm_gem_object *gem_object; 120 struct drm_gem_object *gem_object;
175 uint64_t size; 121 u64 pitch, size;
176 uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
177 122
123 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
178 size = args->height * pitch; 124 size = args->height * pitch;
179 if (size == 0) 125 if (size == 0)
180 return -EINVAL; 126 return -EINVAL;
181 127
182 gem_object = vgem_gem_create(dev, file, &args->handle, size); 128 gem_object = vgem_gem_create(dev, file, &args->handle, size);
183 129 if (IS_ERR(gem_object))
184 if (IS_ERR(gem_object)) {
185 DRM_DEBUG_DRIVER("object creation failed\n");
186 return PTR_ERR(gem_object); 130 return PTR_ERR(gem_object);
187 }
188 131
189 args->size = gem_object->size; 132 args->size = gem_object->size;
190 args->pitch = pitch; 133 args->pitch = pitch;
@@ -194,26 +137,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
194 return 0; 137 return 0;
195} 138}
196 139
197int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, 140static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
198 uint32_t handle, uint64_t *offset) 141 uint32_t handle, uint64_t *offset)
199{ 142{
200 int ret = 0;
201 struct drm_gem_object *obj; 143 struct drm_gem_object *obj;
144 int ret;
202 145
203 obj = drm_gem_object_lookup(file, handle); 146 obj = drm_gem_object_lookup(file, handle);
204 if (!obj) 147 if (!obj)
205 return -ENOENT; 148 return -ENOENT;
206 149
150 if (!obj->filp) {
151 ret = -EINVAL;
152 goto unref;
153 }
154
207 ret = drm_gem_create_mmap_offset(obj); 155 ret = drm_gem_create_mmap_offset(obj);
208 if (ret) 156 if (ret)
209 goto unref; 157 goto unref;
210 158
211 BUG_ON(!obj->filp);
212
213 obj->filp->private_data = obj;
214
215 *offset = drm_vma_node_offset_addr(&obj->vma_node); 159 *offset = drm_vma_node_offset_addr(&obj->vma_node);
216
217unref: 160unref:
218 drm_gem_object_unreference_unlocked(obj); 161 drm_gem_object_unreference_unlocked(obj);
219 162
@@ -223,24 +166,127 @@ unref:
223static struct drm_ioctl_desc vgem_ioctls[] = { 166static struct drm_ioctl_desc vgem_ioctls[] = {
224}; 167};
225 168
169static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
170{
171 unsigned long flags = vma->vm_flags;
172 int ret;
173
174 ret = drm_gem_mmap(filp, vma);
175 if (ret)
176 return ret;
177
178 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
179 * are ordinary and not special.
180 */
181 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
182 return 0;
183}
184
226static const struct file_operations vgem_driver_fops = { 185static const struct file_operations vgem_driver_fops = {
227 .owner = THIS_MODULE, 186 .owner = THIS_MODULE,
228 .open = drm_open, 187 .open = drm_open,
229 .mmap = drm_gem_mmap, 188 .mmap = vgem_mmap,
230 .poll = drm_poll, 189 .poll = drm_poll,
231 .read = drm_read, 190 .read = drm_read,
232 .unlocked_ioctl = drm_ioctl, 191 .unlocked_ioctl = drm_ioctl,
233 .release = drm_release, 192 .release = drm_release,
234}; 193};
235 194
195static int vgem_prime_pin(struct drm_gem_object *obj)
196{
197 long n_pages = obj->size >> PAGE_SHIFT;
198 struct page **pages;
199
200 /* Flush the object from the CPU cache so that importers can rely
201 * on coherent indirect access via the exported dma-address.
202 */
203 pages = drm_gem_get_pages(obj);
204 if (IS_ERR(pages))
205 return PTR_ERR(pages);
206
207 drm_clflush_pages(pages, n_pages);
208 drm_gem_put_pages(obj, pages, true, false);
209
210 return 0;
211}
212
213static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
214{
215 struct sg_table *st;
216 struct page **pages;
217
218 pages = drm_gem_get_pages(obj);
219 if (IS_ERR(pages))
220 return ERR_CAST(pages);
221
222 st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
223 drm_gem_put_pages(obj, pages, false, false);
224
225 return st;
226}
227
228static void *vgem_prime_vmap(struct drm_gem_object *obj)
229{
230 long n_pages = obj->size >> PAGE_SHIFT;
231 struct page **pages;
232 void *addr;
233
234 pages = drm_gem_get_pages(obj);
235 if (IS_ERR(pages))
236 return NULL;
237
238 addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
239 drm_gem_put_pages(obj, pages, false, false);
240
241 return addr;
242}
243
244static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
245{
246 vunmap(vaddr);
247}
248
249static int vgem_prime_mmap(struct drm_gem_object *obj,
250 struct vm_area_struct *vma)
251{
252 int ret;
253
254 if (obj->size < vma->vm_end - vma->vm_start)
255 return -EINVAL;
256
257 if (!obj->filp)
258 return -ENODEV;
259
260 ret = obj->filp->f_op->mmap(obj->filp, vma);
261 if (ret)
262 return ret;
263
264 fput(vma->vm_file);
265 vma->vm_file = get_file(obj->filp);
266 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
267 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
268
269 return 0;
270}
271
236static struct drm_driver vgem_driver = { 272static struct drm_driver vgem_driver = {
237 .driver_features = DRIVER_GEM, 273 .driver_features = DRIVER_GEM | DRIVER_PRIME,
238 .gem_free_object_unlocked = vgem_gem_free_object, 274 .gem_free_object_unlocked = vgem_gem_free_object,
239 .gem_vm_ops = &vgem_gem_vm_ops, 275 .gem_vm_ops = &vgem_gem_vm_ops,
240 .ioctls = vgem_ioctls, 276 .ioctls = vgem_ioctls,
241 .fops = &vgem_driver_fops, 277 .fops = &vgem_driver_fops,
278
242 .dumb_create = vgem_gem_dumb_create, 279 .dumb_create = vgem_gem_dumb_create,
243 .dumb_map_offset = vgem_gem_dumb_map, 280 .dumb_map_offset = vgem_gem_dumb_map,
281
282 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
283 .gem_prime_pin = vgem_prime_pin,
284 .gem_prime_export = drm_gem_prime_export,
285 .gem_prime_get_sg_table = vgem_prime_get_sg_table,
286 .gem_prime_vmap = vgem_prime_vmap,
287 .gem_prime_vunmap = vgem_prime_vunmap,
288 .gem_prime_mmap = vgem_prime_mmap,
289
244 .name = DRIVER_NAME, 290 .name = DRIVER_NAME,
245 .desc = DRIVER_DESC, 291 .desc = DRIVER_DESC,
246 .date = DRIVER_DATE, 292 .date = DRIVER_DATE,
@@ -248,7 +294,7 @@ static struct drm_driver vgem_driver = {
248 .minor = DRIVER_MINOR, 294 .minor = DRIVER_MINOR,
249}; 295};
250 296
251struct drm_device *vgem_device; 297static struct drm_device *vgem_device;
252 298
253static int __init vgem_init(void) 299static int __init vgem_init(void)
254{ 300{
@@ -261,7 +307,6 @@ static int __init vgem_init(void)
261 } 307 }
262 308
263 ret = drm_dev_register(vgem_device, 0); 309 ret = drm_dev_register(vgem_device, 0);
264
265 if (ret) 310 if (ret)
266 goto out_unref; 311 goto out_unref;
267 312
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index e9f92f7ee275..988cbaae7588 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -35,12 +35,6 @@
35#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) 35#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
36struct drm_vgem_gem_object { 36struct drm_vgem_gem_object {
37 struct drm_gem_object base; 37 struct drm_gem_object base;
38 struct page **pages;
39 bool use_dma_buf;
40}; 38};
41 39
42/* vgem_drv.c */
43extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
44extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
45
46#endif 40#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 60646644bef3..5d5c9515618d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1041,8 +1041,7 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
1041 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1041 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1042 struct vmw_master *vmaster; 1042 struct vmw_master *vmaster;
1043 1043
1044 if (file_priv->minor->type != DRM_MINOR_LEGACY || 1044 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1045 !(flags & DRM_AUTH))
1046 return NULL; 1045 return NULL;
1047 1046
1048 ret = mutex_lock_interruptible(&dev->master_mutex); 1047 ret = mutex_lock_interruptible(&dev->master_mutex);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 2df216b39cc5..5f962bfcb43c 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -52,9 +52,9 @@
52 * 52 *
53 * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs. 53 * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
54 * * muxless: Dual GPUs but only one of them is connected to outputs. 54 * * muxless: Dual GPUs but only one of them is connected to outputs.
55 * The other one is merely used to offload rendering, its results 55 * The other one is merely used to offload rendering, its results
56 * are copied over PCIe into the framebuffer. On Linux this is 56 * are copied over PCIe into the framebuffer. On Linux this is
57 * supported with DRI PRIME. 57 * supported with DRI PRIME.
58 * 58 *
59 * Hybrid graphics started to appear in the late Naughties and were initially 59 * Hybrid graphics started to appear in the late Naughties and were initially
60 * all muxed. Newer laptops moved to a muxless architecture for cost reasons. 60 * all muxed. Newer laptops moved to a muxless architecture for cost reasons.
@@ -560,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
560 * * OFF: Power off the device not in use. 560 * * OFF: Power off the device not in use.
561 * * ON: Power on the device not in use. 561 * * ON: Power on the device not in use.
562 * * IGD: Switch to the integrated graphics device. 562 * * IGD: Switch to the integrated graphics device.
563 * Power on the integrated GPU if necessary, power off the discrete GPU. 563 * Power on the integrated GPU if necessary, power off the discrete GPU.
564 * Prerequisite is that no user space processes (e.g. Xorg, alsactl) 564 * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
565 * have opened device files of the GPUs or the audio client. If the 565 * have opened device files of the GPUs or the audio client. If the
566 * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/ 566 * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
567 * and /dev/snd/controlC1 to identify processes blocking the switch. 567 * and /dev/snd/controlC1 to identify processes blocking the switch.
568 * * DIS: Switch to the discrete graphics device. 568 * * DIS: Switch to the discrete graphics device.
569 * * DIGD: Delayed switch to the integrated graphics device. 569 * * DIGD: Delayed switch to the integrated graphics device.
570 * This will perform the switch once the last user space process has 570 * This will perform the switch once the last user space process has
571 * closed the device files of the GPUs and the audio client. 571 * closed the device files of the GPUs and the audio client.
572 * * DDIS: Delayed switch to the discrete graphics device. 572 * * DDIS: Delayed switch to the discrete graphics device.
573 * * MIGD: Mux-only switch to the integrated graphics device. 573 * * MIGD: Mux-only switch to the integrated graphics device.
574 * Does not remap console or change the power state of either gpu. 574 * Does not remap console or change the power state of either gpu.
575 * If the integrated GPU is currently off, the screen will turn black. 575 * If the integrated GPU is currently off, the screen will turn black.
576 * If it is on, the screen will show whatever happens to be in VRAM. 576 * If it is on, the screen will show whatever happens to be in VRAM.
577 * Either way, the user has to blindly enter the command to switch back. 577 * Either way, the user has to blindly enter the command to switch back.
578 * * MDIS: Mux-only switch to the discrete graphics device. 578 * * MDIS: Mux-only switch to the discrete graphics device.
579 * 579 *
580 * For GPUs whose power state is controlled by the driver's runtime pm, 580 * For GPUs whose power state is controlled by the driver's runtime pm,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index cf918e3e6afb..c2fe2cffb809 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -942,8 +942,6 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data,
942 struct drm_file *filp); 942 struct drm_file *filp);
943extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe); 943extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
944extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); 944extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
945extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
946 struct timeval *vblanktime);
947extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, 945extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
948 struct timeval *vblanktime); 946 struct timeval *vblanktime);
949extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 947extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index f5469d3a46dd..9e6ab4a0c274 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -2588,7 +2588,6 @@ static inline unsigned drm_connector_index(struct drm_connector *connector)
2588} 2588}
2589 2589
2590/* helpers to {un}register all connectors from sysfs for device */ 2590/* helpers to {un}register all connectors from sysfs for device */
2591extern int drm_connector_register_all(struct drm_device *dev);
2592extern void drm_connector_unregister_all(struct drm_device *dev); 2591extern void drm_connector_unregister_all(struct drm_device *dev);
2593 2592
2594extern __printf(5, 6) 2593extern __printf(5, 6)
@@ -2654,6 +2653,8 @@ extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
2654extern void drm_plane_force_disable(struct drm_plane *plane); 2653extern void drm_plane_force_disable(struct drm_plane *plane);
2655extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, 2654extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
2656 int *hdisplay, int *vdisplay); 2655 int *hdisplay, int *vdisplay);
2656extern int drm_crtc_force_disable(struct drm_crtc *crtc);
2657extern int drm_crtc_force_disable_all(struct drm_device *dev);
2657 2658
2658extern void drm_encoder_cleanup(struct drm_encoder *encoder); 2659extern void drm_encoder_cleanup(struct drm_encoder *encoder);
2659 2660
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 72f5b15e0738..47ac92584d76 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -265,7 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
265 u16 end); 265 u16 end);
266int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, 266int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
267 u16 end); 267 u16 end);
268int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param); 268int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
269int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); 269int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
270int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, 270int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
271 enum mipi_dsi_dcs_tear_mode mode); 271 enum mipi_dsi_dcs_tear_mode mode);