aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-08-16 17:33:41 -0400
committerDave Airlie <airlied@redhat.com>2017-08-16 17:33:41 -0400
commit3154b133711f70bb50f513773947a8a647d24310 (patch)
treebe1284614bc52cec292ebc150d9983ca08af34cc /drivers/gpu
parentefa479352fc780b305fa186cafb5f416fdf2b2cb (diff)
parentd956e1293b9b43f3a9a508162cdbaa96cf02e6e0 (diff)
Merge tag 'drm-misc-next-2017-08-16' of git://anongit.freedesktop.org/git/drm-misc into drm-next
UAPI Changes: - vc4: Allow userspace to dictate rendering order in submit_cl ioctl (Eric) Cross-subsystem Changes: - vboxvideo: One of Cihangir's patches applies to vboxvideo which is maintained in staging Core Changes: - atomic_legacy_backoff is officially killed (Daniel) - Extract drm_device.h (Daniel) - Unregister drm device on unplug (Daniel) - Rename deprecated drm_*_(un)?reference functions to drm_*_{get|put} (Cihangir) Driver Changes: - vc4: Error/destroy path cleanups, log level demotion, edid leak (Eric) - various: Make various drm_*_funcs structs const (Bhumika) - tinydrm: add support for LEGO MINDSTORMS EV3 LCD (David) - various: Second half of .dumb_{map_offset|destroy} defaults set (Noralf) Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Eric Anholt <eric@anholt.net> Cc: Bhumika Goyal <bhumirks@gmail.com> Cc: Cihangir Akturk <cakturk@gmail.com> Cc: David Lechner <david@lechnology.com> Cc: Noralf Trønnes <noralf@tronnes.org> * tag 'drm-misc-next-2017-08-16' of git://anongit.freedesktop.org/git/drm-misc: (50 commits) drm/gem-cma-helper: Remove drm_gem_cma_dumb_map_offset() drm/virtio: Use the drm_driver.dumb_destroy default drm/bochs: Use the drm_driver.dumb_destroy default drm/mgag200: Use the drm_driver.dumb_destroy default drm/exynos: Use .dumb_map_offset and .dumb_destroy defaults drm/msm: Use the drm_driver.dumb_destroy default drm/ast: Use the drm_driver.dumb_destroy default drm/qxl: Use the drm_driver.dumb_destroy default drm/udl: Use the drm_driver.dumb_destroy default drm/cirrus: Use the drm_driver.dumb_destroy default drm/tegra: Use .dumb_map_offset and .dumb_destroy defaults drm/gma500: Use .dumb_map_offset and .dumb_destroy defaults drm/mxsfb: Use .dumb_map_offset and .dumb_destroy defaults drm/meson: Use .dumb_map_offset and .dumb_destroy defaults drm/kirin: Use .dumb_map_offset and .dumb_destroy defaults drm/vc4: Continue the switch to drm_*_put() helpers drm/vc4: Fix leak of HDMI EDID dma-buf: fix reservation_object_wait_timeout_rcu to wait correctly v2 dma-buf: add reservation_object_copy_fences (v2) drm/tinydrm: add support for LEGO MINDSTORMS EV3 LCD ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c10
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c10
-rw-r--r--drivers/gpu/drm/drm_drv.c40
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c37
-rw-r--r--drivers/gpu/drm/drm_ioctl.c4
-rw-r--r--drivers/gpu/drm/drm_plane.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/gma500/gem.c30
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c10
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/gem.c21
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig10
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c42
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c2
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c28
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c428
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c31
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c61
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c72
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c7
75 files changed, 721 insertions, 405 deletions
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index e3c13aa202b8..289eda54e5aa 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -31,7 +31,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
31 drm_fbdev_cma_hotplug_event(arcpgu->fbdev); 31 drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
32} 32}
33 33
34static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { 34static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
35 .fb_create = drm_fb_cma_create, 35 .fb_create = drm_fb_cma_create,
36 .output_poll_changed = arcpgu_fb_output_poll_changed, 36 .output_poll_changed = arcpgu_fb_output_poll_changed,
37 .atomic_check = drm_atomic_helper_check, 37 .atomic_check = drm_atomic_helper_check,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 3022b39c00f3..69dab82a3771 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,6 @@ static struct drm_driver driver = {
209 .gem_free_object_unlocked = ast_gem_free_object, 209 .gem_free_object_unlocked = ast_gem_free_object,
210 .dumb_create = ast_dumb_create, 210 .dumb_create = ast_dumb_create,
211 .dumb_map_offset = ast_dumb_mmap_offset, 211 .dumb_map_offset = ast_dumb_mmap_offset,
212 .dumb_destroy = drm_gem_dumb_destroy,
213 212
214}; 213};
215 214
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 9052ebeae8d0..0cd827e11fa2 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -266,7 +266,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
266 drm_fb_helper_unregister_fbi(&afbdev->helper); 266 drm_fb_helper_unregister_fbi(&afbdev->helper);
267 267
268 if (afb->obj) { 268 if (afb->obj) {
269 drm_gem_object_unreference_unlocked(afb->obj); 269 drm_gem_object_put_unlocked(afb->obj);
270 afb->obj = NULL; 270 afb->obj = NULL;
271 } 271 }
272 drm_fb_helper_fini(&afbdev->helper); 272 drm_fb_helper_fini(&afbdev->helper);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9a44cdec3bca..dac355812adc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -387,7 +387,7 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
387{ 387{
388 struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); 388 struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
389 389
390 drm_gem_object_unreference_unlocked(ast_fb->obj); 390 drm_gem_object_put_unlocked(ast_fb->obj);
391 drm_framebuffer_cleanup(fb); 391 drm_framebuffer_cleanup(fb);
392 kfree(ast_fb); 392 kfree(ast_fb);
393} 393}
@@ -429,13 +429,13 @@ ast_user_framebuffer_create(struct drm_device *dev,
429 429
430 ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL); 430 ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
431 if (!ast_fb) { 431 if (!ast_fb) {
432 drm_gem_object_unreference_unlocked(obj); 432 drm_gem_object_put_unlocked(obj);
433 return ERR_PTR(-ENOMEM); 433 return ERR_PTR(-ENOMEM);
434 } 434 }
435 435
436 ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj); 436 ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
437 if (ret) { 437 if (ret) {
438 drm_gem_object_unreference_unlocked(obj); 438 drm_gem_object_put_unlocked(obj);
439 kfree(ast_fb); 439 kfree(ast_fb);
440 return ERR_PTR(ret); 440 return ERR_PTR(ret);
441 } 441 }
@@ -628,7 +628,7 @@ int ast_dumb_create(struct drm_file *file,
628 return ret; 628 return ret;
629 629
630 ret = drm_gem_handle_create(file, gobj, &handle); 630 ret = drm_gem_handle_create(file, gobj, &handle);
631 drm_gem_object_unreference_unlocked(gobj); 631 drm_gem_object_put_unlocked(gobj);
632 if (ret) 632 if (ret)
633 return ret; 633 return ret;
634 634
@@ -676,7 +676,7 @@ ast_dumb_mmap_offset(struct drm_file *file,
676 bo = gem_to_ast_bo(obj); 676 bo = gem_to_ast_bo(obj);
677 *offset = ast_bo_mmap_offset(bo); 677 *offset = ast_bo_mmap_offset(bo);
678 678
679 drm_gem_object_unreference_unlocked(obj); 679 drm_gem_object_put_unlocked(obj);
680 680
681 return 0; 681 return 0;
682 682
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 43245229f437..6f3849ec0c1d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -950,7 +950,7 @@ static void ast_cursor_fini(struct drm_device *dev)
950{ 950{
951 struct ast_private *ast = dev->dev_private; 951 struct ast_private *ast = dev->dev_private;
952 ttm_bo_kunmap(&ast->cache_kmap); 952 ttm_bo_kunmap(&ast->cache_kmap);
953 drm_gem_object_unreference_unlocked(ast->cursor_cache); 953 drm_gem_object_put_unlocked(ast->cursor_cache);
954} 954}
955 955
956int ast_mode_init(struct drm_device *dev) 956int ast_mode_init(struct drm_device *dev)
@@ -1215,10 +1215,10 @@ static int ast_cursor_set(struct drm_crtc *crtc,
1215 1215
1216 ast_show_cursor(crtc); 1216 ast_show_cursor(crtc);
1217 1217
1218 drm_gem_object_unreference_unlocked(obj); 1218 drm_gem_object_put_unlocked(obj);
1219 return 0; 1219 return 0;
1220fail: 1220fail:
1221 drm_gem_object_unreference_unlocked(obj); 1221 drm_gem_object_put_unlocked(obj);
1222 return ret; 1222 return ret;
1223} 1223}
1224 1224
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index a1d28845da5f..7b20318483e4 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -93,7 +93,6 @@ static struct drm_driver bochs_driver = {
93 .gem_free_object_unlocked = bochs_gem_free_object, 93 .gem_free_object_unlocked = bochs_gem_free_object,
94 .dumb_create = bochs_dumb_create, 94 .dumb_create = bochs_dumb_create,
95 .dumb_map_offset = bochs_dumb_mmap_offset, 95 .dumb_map_offset = bochs_dumb_mmap_offset,
96 .dumb_destroy = drm_gem_dumb_destroy,
97}; 96};
98 97
99/* ---------------------------------------------------------------------- */ 98/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 682c090fa3ed..b2431aee7887 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -785,7 +785,7 @@ adv7511_connector_detect(struct drm_connector *connector, bool force)
785 return adv7511_detect(adv, connector); 785 return adv7511_detect(adv, connector);
786} 786}
787 787
788static struct drm_connector_funcs adv7511_connector_funcs = { 788static const struct drm_connector_funcs adv7511_connector_funcs = {
789 .fill_modes = drm_helper_probe_single_connector_modes, 789 .fill_modes = drm_helper_probe_single_connector_modes,
790 .detect = adv7511_connector_detect, 790 .detect = adv7511_connector_detect,
791 .destroy = drm_connector_cleanup, 791 .destroy = drm_connector_cleanup,
@@ -856,7 +856,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
856 return ret; 856 return ret;
857} 857}
858 858
859static struct drm_bridge_funcs adv7511_bridge_funcs = { 859static const struct drm_bridge_funcs adv7511_bridge_funcs = {
860 .enable = adv7511_bridge_enable, 860 .enable = adv7511_bridge_enable,
861 .disable = adv7511_bridge_disable, 861 .disable = adv7511_bridge_disable,
862 .mode_set = adv7511_bridge_mode_set, 862 .mode_set = adv7511_bridge_mode_set,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 8f2d1379c880..cf3f0caf9c63 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -517,7 +517,7 @@ static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
517 return bytes_to_frames(runtime, dw->buf_offset); 517 return bytes_to_frames(runtime, dw->buf_offset);
518} 518}
519 519
520static struct snd_pcm_ops snd_dw_hdmi_ops = { 520static const struct snd_pcm_ops snd_dw_hdmi_ops = {
521 .open = dw_hdmi_open, 521 .open = dw_hdmi_open,
522 .close = dw_hdmi_close, 522 .close = dw_hdmi_close,
523 .ioctl = snd_pcm_lib_ioctl, 523 .ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 36f5ccbd1794..63c7a01b7053 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -811,7 +811,7 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
811 return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge); 811 return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
812} 812}
813 813
814static struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { 814static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
815 .mode_set = dw_mipi_dsi_bridge_mode_set, 815 .mode_set = dw_mipi_dsi_bridge_mode_set,
816 .enable = dw_mipi_dsi_bridge_enable, 816 .enable = dw_mipi_dsi_bridge_enable,
817 .post_disable = dw_mipi_dsi_bridge_post_disable, 817 .post_disable = dw_mipi_dsi_bridge_post_disable,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 910c300f5c37..69c4e352dd78 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,6 @@ static struct drm_driver driver = {
142 .gem_free_object_unlocked = cirrus_gem_free_object, 142 .gem_free_object_unlocked = cirrus_gem_free_object,
143 .dumb_create = cirrus_dumb_create, 143 .dumb_create = cirrus_dumb_create,
144 .dumb_map_offset = cirrus_dumb_mmap_offset, 144 .dumb_map_offset = cirrus_dumb_mmap_offset,
145 .dumb_destroy = drm_gem_dumb_destroy,
146}; 145};
147 146
148static const struct dev_pm_ops cirrus_pm_ops = { 147static const struct dev_pm_ops cirrus_pm_ops = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 0f6815f35ad2..32fbfba2c623 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -251,7 +251,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
251 drm_fb_helper_unregister_fbi(&gfbdev->helper); 251 drm_fb_helper_unregister_fbi(&gfbdev->helper);
252 252
253 if (gfb->obj) { 253 if (gfb->obj) {
254 drm_gem_object_unreference_unlocked(gfb->obj); 254 drm_gem_object_put_unlocked(gfb->obj);
255 gfb->obj = NULL; 255 gfb->obj = NULL;
256 } 256 }
257 257
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e7fc95f63dca..b5f528543956 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -18,7 +18,7 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
18{ 18{
19 struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb); 19 struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
20 20
21 drm_gem_object_unreference_unlocked(cirrus_fb->obj); 21 drm_gem_object_put_unlocked(cirrus_fb->obj);
22 drm_framebuffer_cleanup(fb); 22 drm_framebuffer_cleanup(fb);
23 kfree(fb); 23 kfree(fb);
24} 24}
@@ -67,13 +67,13 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
67 67
68 cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL); 68 cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
69 if (!cirrus_fb) { 69 if (!cirrus_fb) {
70 drm_gem_object_unreference_unlocked(obj); 70 drm_gem_object_put_unlocked(obj);
71 return ERR_PTR(-ENOMEM); 71 return ERR_PTR(-ENOMEM);
72 } 72 }
73 73
74 ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj); 74 ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
75 if (ret) { 75 if (ret) {
76 drm_gem_object_unreference_unlocked(obj); 76 drm_gem_object_put_unlocked(obj);
77 kfree(cirrus_fb); 77 kfree(cirrus_fb);
78 return ERR_PTR(ret); 78 return ERR_PTR(ret);
79 } 79 }
@@ -261,7 +261,7 @@ int cirrus_dumb_create(struct drm_file *file,
261 return ret; 261 return ret;
262 262
263 ret = drm_gem_handle_create(file, gobj, &handle); 263 ret = drm_gem_handle_create(file, gobj, &handle);
264 drm_gem_object_unreference_unlocked(gobj); 264 drm_gem_object_put_unlocked(gobj);
265 if (ret) 265 if (ret)
266 return ret; 266 return ret;
267 267
@@ -310,7 +310,7 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
310 bo = gem_to_cirrus_bo(obj); 310 bo = gem_to_cirrus_bo(obj);
311 *offset = cirrus_bo_mmap_offset(bo); 311 *offset = cirrus_bo_mmap_offset(bo);
312 312
313 drm_gem_object_unreference_unlocked(obj); 313 drm_gem_object_put_unlocked(obj);
314 314
315 return 0; 315 return 0;
316} 316}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 2ed2d919beae..be38ac7050d4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -291,7 +291,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
291 291
292 if (!minor) { 292 if (!minor) {
293 return ERR_PTR(-ENODEV); 293 return ERR_PTR(-ENODEV);
294 } else if (drm_device_is_unplugged(minor->dev)) { 294 } else if (drm_dev_is_unplugged(minor->dev)) {
295 drm_dev_unref(minor->dev); 295 drm_dev_unref(minor->dev);
296 return ERR_PTR(-ENODEV); 296 return ERR_PTR(-ENODEV);
297 } 297 }
@@ -364,26 +364,32 @@ void drm_put_dev(struct drm_device *dev)
364} 364}
365EXPORT_SYMBOL(drm_put_dev); 365EXPORT_SYMBOL(drm_put_dev);
366 366
367void drm_unplug_dev(struct drm_device *dev) 367static void drm_device_set_unplugged(struct drm_device *dev)
368{ 368{
369 /* for a USB device */ 369 smp_wmb();
370 if (drm_core_check_feature(dev, DRIVER_MODESET)) 370 atomic_set(&dev->unplugged, 1);
371 drm_modeset_unregister_all(dev); 371}
372 372
373 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 373/**
374 drm_minor_unregister(dev, DRM_MINOR_RENDER); 374 * drm_dev_unplug - unplug a DRM device
375 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 375 * @dev: DRM device
376 *
377 * This unplugs a hotpluggable DRM device, which makes it inaccessible to
378 * userspace operations. Entry-points can use drm_dev_is_unplugged(). This
379 * essentially unregisters the device like drm_dev_unregister(), but can be
380 * called while there are still open users of @dev.
381 */
382void drm_dev_unplug(struct drm_device *dev)
383{
384 drm_dev_unregister(dev);
376 385
377 mutex_lock(&drm_global_mutex); 386 mutex_lock(&drm_global_mutex);
378
379 drm_device_set_unplugged(dev); 387 drm_device_set_unplugged(dev);
380 388 if (dev->open_count == 0)
381 if (dev->open_count == 0) { 389 drm_dev_unref(dev);
382 drm_put_dev(dev);
383 }
384 mutex_unlock(&drm_global_mutex); 390 mutex_unlock(&drm_global_mutex);
385} 391}
386EXPORT_SYMBOL(drm_unplug_dev); 392EXPORT_SYMBOL(drm_dev_unplug);
387 393
388/* 394/*
389 * DRM internal mount 395 * DRM internal mount
@@ -835,6 +841,9 @@ EXPORT_SYMBOL(drm_dev_register);
835 * drm_dev_register() but does not deallocate the device. The caller must call 841 * drm_dev_register() but does not deallocate the device. The caller must call
836 * drm_dev_unref() to drop their final reference. 842 * drm_dev_unref() to drop their final reference.
837 * 843 *
844 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
845 * which can be called while there are still open users of @dev.
846 *
838 * This should be called first in the device teardown code to make sure 847 * This should be called first in the device teardown code to make sure
839 * userspace can't access the device instance any more. 848 * userspace can't access the device instance any more.
840 */ 849 */
@@ -842,7 +851,8 @@ void drm_dev_unregister(struct drm_device *dev)
842{ 851{
843 struct drm_map_list *r_list, *list_temp; 852 struct drm_map_list *r_list, *list_temp;
844 853
845 drm_lastclose(dev); 854 if (drm_core_check_feature(dev, DRIVER_LEGACY))
855 drm_lastclose(dev);
846 856
847 dev->registered = false; 857 dev->registered = false;
848 858
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 59b75a974357..b3c6e997ccdb 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -436,7 +436,7 @@ int drm_release(struct inode *inode, struct file *filp)
436 436
437 if (!--dev->open_count) { 437 if (!--dev->open_count) {
438 drm_lastclose(dev); 438 drm_lastclose(dev);
439 if (drm_device_is_unplugged(dev)) 439 if (drm_dev_is_unplugged(dev))
440 drm_put_dev(dev); 440 drm_put_dev(dev);
441 } 441 }
442 mutex_unlock(&drm_global_mutex); 442 mutex_unlock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a8d396bed6a4..ad4e9cfe48a2 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1001,7 +1001,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1001 struct drm_vma_offset_node *node; 1001 struct drm_vma_offset_node *node;
1002 int ret; 1002 int ret;
1003 1003
1004 if (drm_device_is_unplugged(dev)) 1004 if (drm_dev_is_unplugged(dev))
1005 return -ENODEV; 1005 return -ENODEV;
1006 1006
1007 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1007 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 275ab872b34f..373e33f22be4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -264,41 +264,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
264} 264}
265EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); 265EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
266 266
267/**
268 * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
269 * object
270 * @file_priv: DRM file-private structure containing the GEM object
271 * @drm: DRM device
272 * @handle: GEM object handle
273 * @offset: return location for the fake mmap offset
274 *
275 * This function look up an object by its handle and returns the fake mmap
276 * offset associated with it. Drivers using the CMA helpers should set this
277 * as their &drm_driver.dumb_map_offset callback.
278 *
279 * Returns:
280 * 0 on success or a negative error code on failure.
281 */
282int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
283 struct drm_device *drm, u32 handle,
284 u64 *offset)
285{
286 struct drm_gem_object *gem_obj;
287
288 gem_obj = drm_gem_object_lookup(file_priv, handle);
289 if (!gem_obj) {
290 dev_err(drm->dev, "failed to lookup GEM object\n");
291 return -EINVAL;
292 }
293
294 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
295
296 drm_gem_object_put_unlocked(gem_obj);
297
298 return 0;
299}
300EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
301
302const struct vm_operations_struct drm_gem_cma_vm_ops = { 267const struct vm_operations_struct drm_gem_cma_vm_ops = {
303 .open = drm_gem_vm_open, 268 .open = drm_gem_vm_open,
304 .close = drm_gem_vm_close, 269 .close = drm_gem_vm_close,
@@ -390,7 +355,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
390 struct drm_device *dev = priv->minor->dev; 355 struct drm_device *dev = priv->minor->dev;
391 struct drm_vma_offset_node *node; 356 struct drm_vma_offset_node *node;
392 357
393 if (drm_device_is_unplugged(dev)) 358 if (drm_dev_is_unplugged(dev))
394 return -ENODEV; 359 return -ENODEV;
395 360
396 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 361 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8bfeb32f8a10..d920b2118a39 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -716,7 +716,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
716 struct drm_device *dev = file_priv->minor->dev; 716 struct drm_device *dev = file_priv->minor->dev;
717 int retcode; 717 int retcode;
718 718
719 if (drm_device_is_unplugged(dev)) 719 if (drm_dev_is_unplugged(dev))
720 return -ENODEV; 720 return -ENODEV;
721 721
722 retcode = drm_ioctl_permit(flags, file_priv); 722 retcode = drm_ioctl_permit(flags, file_priv);
@@ -765,7 +765,7 @@ long drm_ioctl(struct file *filp,
765 765
766 dev = file_priv->minor->dev; 766 dev = file_priv->minor->dev;
767 767
768 if (drm_device_is_unplugged(dev)) 768 if (drm_dev_is_unplugged(dev))
769 return -ENODEV; 769 return -ENODEV;
770 770
771 is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; 771 is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 5c14beee52ff..85ab1eec73e5 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -126,7 +126,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
126 plane->format_types[j], 126 plane->format_types[j],
127 plane->modifiers[i])) { 127 plane->modifiers[i])) {
128 128
129 mod->formats |= 1 << j; 129 mod->formats |= 1ULL << j;
130 } 130 }
131 } 131 }
132 132
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1170b3209a12..13a59ed2afbc 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -631,7 +631,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
631 struct drm_device *dev = priv->minor->dev; 631 struct drm_device *dev = priv->minor->dev;
632 int ret; 632 int ret;
633 633
634 if (drm_device_is_unplugged(dev)) 634 if (drm_dev_is_unplugged(dev))
635 return -ENODEV; 635 return -ENODEV;
636 636
637 mutex_lock(&dev->struct_mutex); 637 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index facc8419f0cd..b1f7299600f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -145,8 +145,6 @@ static struct drm_driver exynos_drm_driver = {
145 .gem_free_object_unlocked = exynos_drm_gem_free_object, 145 .gem_free_object_unlocked = exynos_drm_gem_free_object,
146 .gem_vm_ops = &exynos_drm_gem_vm_ops, 146 .gem_vm_ops = &exynos_drm_gem_vm_ops,
147 .dumb_create = exynos_drm_gem_dumb_create, 147 .dumb_create = exynos_drm_gem_dumb_create,
148 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
149 .dumb_destroy = drm_gem_dumb_destroy,
150 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 148 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
151 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 149 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
152 .gem_prime_export = drm_gem_prime_export, 150 .gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index c23479be4850..077de014d610 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -286,8 +286,8 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
286{ 286{
287 struct drm_exynos_gem_map *args = data; 287 struct drm_exynos_gem_map *args = data;
288 288
289 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, 289 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
290 &args->offset); 290 &args->offset);
291} 291}
292 292
293dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 293dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
@@ -422,32 +422,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
422 return 0; 422 return 0;
423} 423}
424 424
425int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
426 struct drm_device *dev, uint32_t handle,
427 uint64_t *offset)
428{
429 struct drm_gem_object *obj;
430 int ret = 0;
431
432 /*
433 * get offset of memory allocated for drm framebuffer.
434 * - this callback would be called by user application
435 * with DRM_IOCTL_MODE_MAP_DUMB command.
436 */
437
438 obj = drm_gem_object_lookup(file_priv, handle);
439 if (!obj) {
440 DRM_ERROR("failed to lookup gem object.\n");
441 return -EINVAL;
442 }
443
444 *offset = drm_vma_node_offset_addr(&obj->vma_node);
445 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
446
447 drm_gem_object_unreference_unlocked(obj);
448 return ret;
449}
450
451int exynos_drm_gem_fault(struct vm_fault *vmf) 425int exynos_drm_gem_fault(struct vm_fault *vmf)
452{ 426{
453 struct vm_area_struct *vma = vmf->vma; 427 struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 85457255fcd1..e86d1a9518c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -110,11 +110,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
110 struct drm_device *dev, 110 struct drm_device *dev,
111 struct drm_mode_create_dumb *args); 111 struct drm_mode_create_dumb *args);
112 112
113/* map memory region for drm framebuffer to user space. */
114int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
115 struct drm_device *dev, uint32_t handle,
116 uint64_t *offset);
117
118/* page fault handler and mmap fault address(virtual) to physical memory. */ 113/* page fault handler and mmap fault address(virtual) to physical memory. */
119int exynos_drm_gem_fault(struct vm_fault *vmf); 114int exynos_drm_gem_fault(struct vm_fault *vmf);
120 115
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 7da061aab729..131239759a75 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -48,36 +48,6 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
48} 48}
49 49
50/** 50/**
51 * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
52 * @file: our drm client file
53 * @dev: drm device
54 * @handle: GEM handle to the object (from dumb_create)
55 *
56 * Do the necessary setup to allow the mapping of the frame buffer
57 * into user memory. We don't have to do much here at the moment.
58 */
59int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
60 uint32_t handle, uint64_t *offset)
61{
62 int ret = 0;
63 struct drm_gem_object *obj;
64
65 /* GEM does all our handle to object mapping */
66 obj = drm_gem_object_lookup(file, handle);
67 if (obj == NULL)
68 return -ENOENT;
69
70 /* Make it mmapable */
71 ret = drm_gem_create_mmap_offset(obj);
72 if (ret)
73 goto out;
74 *offset = drm_vma_node_offset_addr(&obj->vma_node);
75out:
76 drm_gem_object_unreference_unlocked(obj);
77 return ret;
78}
79
80/**
81 * psb_gem_create - create a mappable object 51 * psb_gem_create - create a mappable object
82 * @file: the DRM file of the client 52 * @file: the DRM file of the client
83 * @dev: our device 53 * @dev: our device
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 747c06b227c5..37a3be71acd9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -494,8 +494,6 @@ static struct drm_driver driver = {
494 .gem_vm_ops = &psb_gem_vm_ops, 494 .gem_vm_ops = &psb_gem_vm_ops,
495 495
496 .dumb_create = psb_gem_dumb_create, 496 .dumb_create = psb_gem_dumb_create,
497 .dumb_map_offset = psb_gem_dumb_map_gtt,
498 .dumb_destroy = drm_gem_dumb_destroy,
499 .ioctls = psb_ioctls, 497 .ioctls = psb_ioctls,
500 .fops = &psb_gem_fops, 498 .fops = &psb_gem_fops,
501 .name = DRIVER_NAME, 499 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 83667087d6e5..821497dbd3fc 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -750,8 +750,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
750 struct drm_file *file); 750 struct drm_file *file);
751extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 751extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
752 struct drm_mode_create_dumb *args); 752 struct drm_mode_create_dumb *args);
753extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
754 uint32_t handle, uint64_t *offset);
755extern int psb_gem_fault(struct vm_fault *vmf); 753extern int psb_gem_fault(struct vm_fault *vmf);
756 754
757/* psb_device.c */ 755/* psb_device.c */
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index 9740eed9231a..b92595c477ef 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -157,7 +157,7 @@ out_unpin_bo:
157out_unreserve_ttm_bo: 157out_unreserve_ttm_bo:
158 ttm_bo_unreserve(&bo->bo); 158 ttm_bo_unreserve(&bo->bo);
159out_unref_gem: 159out_unref_gem:
160 drm_gem_object_unreference_unlocked(gobj); 160 drm_gem_object_put_unlocked(gobj);
161 161
162 return ret; 162 return ret;
163} 163}
@@ -172,7 +172,7 @@ static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
172 drm_fb_helper_fini(fbh); 172 drm_fb_helper_fini(fbh);
173 173
174 if (gfb) 174 if (gfb)
175 drm_framebuffer_unreference(&gfb->fb); 175 drm_framebuffer_put(&gfb->fb);
176} 176}
177 177
178static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = { 178static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index ac457c779caa..3518167a7dc4 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -444,7 +444,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
444 } 444 }
445 445
446 ret = drm_gem_handle_create(file, gobj, &handle); 446 ret = drm_gem_handle_create(file, gobj, &handle);
447 drm_gem_object_unreference_unlocked(gobj); 447 drm_gem_object_put_unlocked(gobj);
448 if (ret) { 448 if (ret) {
449 DRM_ERROR("failed to unreference GEM object: %d\n", ret); 449 DRM_ERROR("failed to unreference GEM object: %d\n", ret);
450 return ret; 450 return ret;
@@ -479,7 +479,7 @@ int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
479 bo = gem_to_hibmc_bo(obj); 479 bo = gem_to_hibmc_bo(obj);
480 *offset = hibmc_bo_mmap_offset(bo); 480 *offset = hibmc_bo_mmap_offset(bo);
481 481
482 drm_gem_object_unreference_unlocked(obj); 482 drm_gem_object_put_unlocked(obj);
483 return 0; 483 return 0;
484} 484}
485 485
@@ -487,7 +487,7 @@ static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
487{ 487{
488 struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb); 488 struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
489 489
490 drm_gem_object_unreference_unlocked(hibmc_fb->obj); 490 drm_gem_object_put_unlocked(hibmc_fb->obj);
491 drm_framebuffer_cleanup(fb); 491 drm_framebuffer_cleanup(fb);
492 kfree(hibmc_fb); 492 kfree(hibmc_fb);
493} 493}
@@ -543,7 +543,7 @@ hibmc_user_framebuffer_create(struct drm_device *dev,
543 543
544 hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj); 544 hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
545 if (IS_ERR(hibmc_fb)) { 545 if (IS_ERR(hibmc_fb)) {
546 drm_gem_object_unreference_unlocked(obj); 546 drm_gem_object_put_unlocked(obj);
547 return ERR_PTR((long)hibmc_fb); 547 return ERR_PTR((long)hibmc_fb);
548 } 548 }
549 return &hibmc_fb->fb; 549 return &hibmc_fb->fb;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 79fcce76f2ad..e27352ca26c4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -157,8 +157,6 @@ static struct drm_driver kirin_drm_driver = {
157 .gem_free_object_unlocked = drm_gem_cma_free_object, 157 .gem_free_object_unlocked = drm_gem_cma_free_object,
158 .gem_vm_ops = &drm_gem_cma_vm_ops, 158 .gem_vm_ops = &drm_gem_cma_vm_ops,
159 .dumb_create = kirin_gem_cma_dumb_create, 159 .dumb_create = kirin_gem_cma_dumb_create,
160 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
161 .dumb_destroy = drm_gem_dumb_destroy,
162 160
163 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 161 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
164 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 162 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index d4246c9dceae..0d8d506695f9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -58,7 +58,7 @@ static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
58 58
59 drm_framebuffer_cleanup(fb); 59 drm_framebuffer_cleanup(fb);
60 60
61 drm_gem_object_unreference_unlocked(mtk_fb->gem_obj); 61 drm_gem_object_put_unlocked(mtk_fb->gem_obj);
62 62
63 kfree(mtk_fb); 63 kfree(mtk_fb);
64} 64}
@@ -160,6 +160,6 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
160 return &mtk_fb->base; 160 return &mtk_fb->base;
161 161
162unreference: 162unreference:
163 drm_gem_object_unreference_unlocked(gem); 163 drm_gem_object_put_unlocked(gem);
164 return ERR_PTR(ret); 164 return ERR_PTR(ret);
165} 165}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 8ec963fff8b1..f595ac816b55 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -122,7 +122,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
122 goto err_handle_create; 122 goto err_handle_create;
123 123
124 /* drop reference from allocate - handle holds it now. */ 124 /* drop reference from allocate - handle holds it now. */
125 drm_gem_object_unreference_unlocked(&mtk_gem->base); 125 drm_gem_object_put_unlocked(&mtk_gem->base);
126 126
127 return 0; 127 return 0;
128 128
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 5375e6dccdd7..7742c7d81ed8 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -116,8 +116,6 @@ static struct drm_driver meson_driver = {
116 116
117 /* GEM Ops */ 117 /* GEM Ops */
118 .dumb_create = drm_gem_cma_dumb_create, 118 .dumb_create = drm_gem_cma_dumb_create,
119 .dumb_destroy = drm_gem_dumb_destroy,
120 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
121 .gem_free_object_unlocked = drm_gem_cma_free_object, 119 .gem_free_object_unlocked = drm_gem_cma_free_object,
122 .gem_vm_ops = &drm_gem_cma_vm_ops, 120 .gem_vm_ops = &drm_gem_cma_vm_ops,
123 121
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 2ac3fcbfea7b..968e20379d54 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -248,7 +248,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
248out_unreserve1: 248out_unreserve1:
249 mgag200_bo_unreserve(pixels_2); 249 mgag200_bo_unreserve(pixels_2);
250out_unref: 250out_unref:
251 drm_gem_object_unreference_unlocked(obj); 251 drm_gem_object_put_unlocked(obj);
252 252
253 return ret; 253 return ret;
254} 254}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 4189160af726..74cdde2ee474 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -102,7 +102,6 @@ static struct drm_driver driver = {
102 .gem_free_object_unlocked = mgag200_gem_free_object, 102 .gem_free_object_unlocked = mgag200_gem_free_object,
103 .dumb_create = mgag200_dumb_create, 103 .dumb_create = mgag200_dumb_create,
104 .dumb_map_offset = mgag200_dumb_mmap_offset, 104 .dumb_map_offset = mgag200_dumb_mmap_offset,
105 .dumb_destroy = drm_gem_dumb_destroy,
106}; 105};
107 106
108static struct pci_driver mgag200_pci_driver = { 107static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 9d914ca69996..30726c9fe28c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -232,7 +232,7 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
232err_alloc_fbi: 232err_alloc_fbi:
233 vfree(sysram); 233 vfree(sysram);
234err_sysram: 234err_sysram:
235 drm_gem_object_unreference_unlocked(gobj); 235 drm_gem_object_put_unlocked(gobj);
236 236
237 return ret; 237 return ret;
238} 238}
@@ -245,7 +245,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
245 drm_fb_helper_unregister_fbi(&mfbdev->helper); 245 drm_fb_helper_unregister_fbi(&mfbdev->helper);
246 246
247 if (mfb->obj) { 247 if (mfb->obj) {
248 drm_gem_object_unreference_unlocked(mfb->obj); 248 drm_gem_object_put_unlocked(mfb->obj);
249 mfb->obj = NULL; 249 mfb->obj = NULL;
250 } 250 }
251 drm_fb_helper_fini(&mfbdev->helper); 251 drm_fb_helper_fini(&mfbdev->helper);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index dce8a3eb5a10..780f983b0294 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -18,7 +18,7 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
18{ 18{
19 struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb); 19 struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
20 20
21 drm_gem_object_unreference_unlocked(mga_fb->obj); 21 drm_gem_object_put_unlocked(mga_fb->obj);
22 drm_framebuffer_cleanup(fb); 22 drm_framebuffer_cleanup(fb);
23 kfree(fb); 23 kfree(fb);
24} 24}
@@ -59,13 +59,13 @@ mgag200_user_framebuffer_create(struct drm_device *dev,
59 59
60 mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL); 60 mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
61 if (!mga_fb) { 61 if (!mga_fb) {
62 drm_gem_object_unreference_unlocked(obj); 62 drm_gem_object_put_unlocked(obj);
63 return ERR_PTR(-ENOMEM); 63 return ERR_PTR(-ENOMEM);
64 } 64 }
65 65
66 ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj); 66 ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
67 if (ret) { 67 if (ret) {
68 drm_gem_object_unreference_unlocked(obj); 68 drm_gem_object_put_unlocked(obj);
69 kfree(mga_fb); 69 kfree(mga_fb);
70 return ERR_PTR(ret); 70 return ERR_PTR(ret);
71 } 71 }
@@ -317,7 +317,7 @@ int mgag200_dumb_create(struct drm_file *file,
317 return ret; 317 return ret;
318 318
319 ret = drm_gem_handle_create(file, gobj, &handle); 319 ret = drm_gem_handle_create(file, gobj, &handle);
320 drm_gem_object_unreference_unlocked(gobj); 320 drm_gem_object_put_unlocked(gobj);
321 if (ret) 321 if (ret)
322 return ret; 322 return ret;
323 323
@@ -366,6 +366,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
366 bo = gem_to_mga_bo(obj); 366 bo = gem_to_mga_bo(obj);
367 *offset = mgag200_bo_mmap_offset(bo); 367 *offset = mgag200_bo_mmap_offset(bo);
368 368
369 drm_gem_object_unreference_unlocked(obj); 369 drm_gem_object_put_unlocked(obj);
370 return 0; 370 return 0;
371} 371}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f49f6ac5585c..b0129e7b29e3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -832,7 +832,6 @@ static struct drm_driver msm_driver = {
832 .gem_vm_ops = &vm_ops, 832 .gem_vm_ops = &vm_ops,
833 .dumb_create = msm_gem_dumb_create, 833 .dumb_create = msm_gem_dumb_create,
834 .dumb_map_offset = msm_gem_dumb_map_offset, 834 .dumb_map_offset = msm_gem_dumb_map_offset,
835 .dumb_destroy = drm_gem_dumb_destroy,
836 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 835 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
837 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 836 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
838 .gem_prime_export = drm_gem_prime_export, 837 .gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 93c38eb6d187..7fbad9cb656e 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -337,8 +337,6 @@ static struct drm_driver mxsfb_driver = {
337 .gem_free_object_unlocked = drm_gem_cma_free_object, 337 .gem_free_object_unlocked = drm_gem_cma_free_object,
338 .gem_vm_ops = &drm_gem_cma_vm_ops, 338 .gem_vm_ops = &drm_gem_cma_vm_ops,
339 .dumb_create = drm_gem_cma_dumb_create, 339 .dumb_create = drm_gem_cma_dumb_create,
340 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
341 .dumb_destroy = drm_gem_dumb_destroy,
342 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 340 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
343 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 341 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
344 .gem_prime_export = drm_gem_prime_export, 342 .gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 29653fe5285c..0ea3ca823034 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -72,7 +72,7 @@
72 72
73#define DRIVER_DESC "DRM module for PL111" 73#define DRIVER_DESC "DRM module for PL111"
74 74
75static struct drm_mode_config_funcs mode_config_funcs = { 75static const struct drm_mode_config_funcs mode_config_funcs = {
76 .fb_create = drm_fb_cma_create, 76 .fb_create = drm_fb_cma_create,
77 .atomic_check = drm_atomic_helper_check, 77 .atomic_check = drm_atomic_helper_check,
78 .atomic_commit = drm_atomic_helper_commit, 78 .atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 403e135895bf..2445e75cf7ea 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -263,7 +263,6 @@ static struct drm_driver qxl_driver = {
263 263
264 .dumb_create = qxl_mode_dumb_create, 264 .dumb_create = qxl_mode_dumb_create,
265 .dumb_map_offset = qxl_mode_dumb_mmap, 265 .dumb_map_offset = qxl_mode_dumb_mmap,
266 .dumb_destroy = drm_gem_dumb_destroy,
267#if defined(CONFIG_DEBUG_FS) 266#if defined(CONFIG_DEBUG_FS)
268 .debugfs_init = qxl_debugfs_init, 267 .debugfs_init = qxl_debugfs_init,
269#endif 268#endif
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index bd87768dd549..7a251a54e792 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -592,7 +592,7 @@ static void inno_hdmi_connector_destroy(struct drm_connector *connector)
592 drm_connector_cleanup(connector); 592 drm_connector_cleanup(connector);
593} 593}
594 594
595static struct drm_connector_funcs inno_hdmi_connector_funcs = { 595static const struct drm_connector_funcs inno_hdmi_connector_funcs = {
596 .fill_modes = inno_hdmi_probe_single_connector_modes, 596 .fill_modes = inno_hdmi_probe_single_connector_modes,
597 .detect = inno_hdmi_connector_detect, 597 .detect = inno_hdmi_connector_detect,
598 .destroy = inno_hdmi_connector_destroy, 598 .destroy = inno_hdmi_connector_destroy,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 8a0f75612d4b..70773041785b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -48,7 +48,7 @@ static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
48 int i; 48 int i;
49 49
50 for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) 50 for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
51 drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]); 51 drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
52 52
53 drm_framebuffer_cleanup(fb); 53 drm_framebuffer_cleanup(fb);
54 kfree(rockchip_fb); 54 kfree(rockchip_fb);
@@ -144,7 +144,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
144 width * drm_format_plane_cpp(mode_cmd->pixel_format, i); 144 width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
145 145
146 if (obj->size < min_size) { 146 if (obj->size < min_size) {
147 drm_gem_object_unreference_unlocked(obj); 147 drm_gem_object_put_unlocked(obj);
148 ret = -EINVAL; 148 ret = -EINVAL;
149 goto err_gem_object_unreference; 149 goto err_gem_object_unreference;
150 } 150 }
@@ -161,7 +161,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
161 161
162err_gem_object_unreference: 162err_gem_object_unreference:
163 for (i--; i >= 0; i--) 163 for (i--; i >= 0; i--)
164 drm_gem_object_unreference_unlocked(objs[i]); 164 drm_gem_object_put_unlocked(objs[i]);
165 return ERR_PTR(ret); 165 return ERR_PTR(ret);
166} 166}
167 167
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index ce946b9c57a9..724579ebf947 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -173,7 +173,7 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev)
173 drm_fb_helper_unregister_fbi(helper); 173 drm_fb_helper_unregister_fbi(helper);
174 174
175 if (helper->fb) 175 if (helper->fb)
176 drm_framebuffer_unreference(helper->fb); 176 drm_framebuffer_put(helper->fb);
177 177
178 drm_fb_helper_fini(helper); 178 drm_fb_helper_fini(helper);
179} 179}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index f74333efe4bb..1869c8bb76c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -383,7 +383,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv,
383 goto err_handle_create; 383 goto err_handle_create;
384 384
385 /* drop reference from allocate - handle holds it now. */ 385 /* drop reference from allocate - handle holds it now. */
386 drm_gem_object_unreference_unlocked(obj); 386 drm_gem_object_put_unlocked(obj);
387 387
388 return rk_obj; 388 return rk_obj;
389 389
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 948719dddc36..bf9ed0e63973 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1026,7 +1026,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
1026 if (old_plane_state->fb == new_plane_state->fb) 1026 if (old_plane_state->fb == new_plane_state->fb)
1027 continue; 1027 continue;
1028 1028
1029 drm_framebuffer_reference(old_plane_state->fb); 1029 drm_framebuffer_get(old_plane_state->fb);
1030 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb); 1030 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
1031 set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 1031 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
1032 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1032 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
@@ -1150,7 +1150,7 @@ static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
1150 struct drm_framebuffer *fb = val; 1150 struct drm_framebuffer *fb = val;
1151 1151
1152 drm_crtc_vblank_put(&vop->crtc); 1152 drm_crtc_vblank_put(&vop->crtc);
1153 drm_framebuffer_unreference(fb); 1153 drm_framebuffer_put(fb);
1154} 1154}
1155 1155
1156static void vop_handle_vblank(struct vop *vop) 1156static void vop_handle_vblank(struct vop *vop)
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 550bb262943f..42a238bbb899 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -119,7 +119,7 @@ sun4i_rgb_connector_destroy(struct drm_connector *connector)
119 drm_connector_cleanup(connector); 119 drm_connector_cleanup(connector);
120} 120}
121 121
122static struct drm_connector_funcs sun4i_rgb_con_funcs = { 122static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
123 .fill_modes = drm_helper_probe_single_connector_modes, 123 .fill_modes = drm_helper_probe_single_connector_modes,
124 .destroy = sun4i_rgb_connector_destroy, 124 .destroy = sun4i_rgb_connector_destroy,
125 .reset = drm_atomic_helper_connector_reset, 125 .reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 7b45ac9383ea..4edf15e299ab 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -545,7 +545,7 @@ sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
545 drm_connector_cleanup(connector); 545 drm_connector_cleanup(connector);
546} 546}
547 547
548static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = { 548static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
549 .fill_modes = drm_helper_probe_single_connector_modes, 549 .fill_modes = drm_helper_probe_single_connector_modes,
550 .destroy = sun4i_tv_comp_connector_destroy, 550 .destroy = sun4i_tv_comp_connector_destroy,
551 .reset = drm_atomic_helper_connector_reset, 551 .reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3ba659a5940d..224ce1dbb1cb 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1077,8 +1077,6 @@ static struct drm_driver tegra_drm_driver = {
1077 .gem_prime_import = tegra_gem_prime_import, 1077 .gem_prime_import = tegra_gem_prime_import,
1078 1078
1079 .dumb_create = tegra_bo_dumb_create, 1079 .dumb_create = tegra_bo_dumb_create,
1080 .dumb_map_offset = tegra_bo_dumb_map_offset,
1081 .dumb_destroy = drm_gem_dumb_destroy,
1082 1080
1083 .ioctls = tegra_drm_ioctls, 1081 .ioctls = tegra_drm_ioctls,
1084 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 1082 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 7a39a355678a..c6079affe642 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -423,27 +423,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
423 return 0; 423 return 0;
424} 424}
425 425
426int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
427 u32 handle, u64 *offset)
428{
429 struct drm_gem_object *gem;
430 struct tegra_bo *bo;
431
432 gem = drm_gem_object_lookup(file, handle);
433 if (!gem) {
434 dev_err(drm->dev, "failed to lookup GEM object\n");
435 return -EINVAL;
436 }
437
438 bo = to_tegra_bo(gem);
439
440 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
441
442 drm_gem_object_unreference_unlocked(gem);
443
444 return 0;
445}
446
447static int tegra_bo_fault(struct vm_fault *vmf) 426static int tegra_bo_fault(struct vm_fault *vmf)
448{ 427{
449 struct vm_area_struct *vma = vmf->vma; 428 struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 8b32a6fd586d..8eb9fd24ef0e 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -67,8 +67,6 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
67void tegra_bo_free_object(struct drm_gem_object *gem); 67void tegra_bo_free_object(struct drm_gem_object *gem);
68int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 68int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
69 struct drm_mode_create_dumb *args); 69 struct drm_mode_create_dumb *args);
70int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
71 u32 handle, u64 *offset);
72 70
73int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); 71int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
74 72
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index f17c3caceab2..2e790e7dced5 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -32,3 +32,13 @@ config TINYDRM_REPAPER
32 2.71" TFT EPD Panel (E2271CS021) 32 2.71" TFT EPD Panel (E2271CS021)
33 33
34 If M is selected the module will be called repaper. 34 If M is selected the module will be called repaper.
35
36config TINYDRM_ST7586
37 tristate "DRM support for Sitronix ST7586 display panels"
38 depends on DRM_TINYDRM && SPI
39 select TINYDRM_MIPI_DBI
40 help
41 DRM driver for the following Sitronix ST7586 panels:
42 * LEGO MINDSTORMS EV3
43
44 If M is selected the module will be called st7586.
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 95bb4d4fa785..0c184bd1bb59 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
6# Displays 6# Displays
7obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o 7obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
8obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o 8obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
9obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index 75808bb84c9a..bd6cce093a85 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -185,7 +185,9 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
185/** 185/**
186 * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale 186 * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
187 * @dst: 8-bit grayscale destination buffer 187 * @dst: 8-bit grayscale destination buffer
188 * @vaddr: XRGB8888 source buffer
188 * @fb: DRM framebuffer 189 * @fb: DRM framebuffer
190 * @clip: Clip rectangle area to copy
189 * 191 *
190 * Drm doesn't have native monochrome or grayscale support. 192 * Drm doesn't have native monochrome or grayscale support.
191 * Such drivers can announce the commonly supported XR24 format to userspace 193 * Such drivers can announce the commonly supported XR24 format to userspace
@@ -195,41 +197,31 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
195 * where 1 means foreground color and 0 background color. 197 * where 1 means foreground color and 0 background color.
196 * 198 *
197 * ITU BT.601 is used for the RGB -> luma (brightness) conversion. 199 * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
198 *
199 * Returns:
200 * Zero on success, negative error code on failure.
201 */ 200 */
202int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb) 201void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
202 struct drm_clip_rect *clip)
203{ 203{
204 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 204 unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
205 struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; 205 unsigned int x, y;
206 unsigned int x, y, pitch = fb->pitches[0];
207 int ret = 0;
208 void *buf; 206 void *buf;
209 u32 *src; 207 u32 *src;
210 208
211 if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888)) 209 if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
212 return -EINVAL; 210 return;
213 /* 211 /*
214 * The cma memory is write-combined so reads are uncached. 212 * The cma memory is write-combined so reads are uncached.
215 * Speed up by fetching one line at a time. 213 * Speed up by fetching one line at a time.
216 */ 214 */
217 buf = kmalloc(pitch, GFP_KERNEL); 215 buf = kmalloc(len, GFP_KERNEL);
218 if (!buf) 216 if (!buf)
219 return -ENOMEM; 217 return;
220
221 if (import_attach) {
222 ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
223 DMA_FROM_DEVICE);
224 if (ret)
225 goto err_free;
226 }
227 218
228 for (y = 0; y < fb->height; y++) { 219 for (y = clip->y1; y < clip->y2; y++) {
229 src = cma_obj->vaddr + (y * pitch); 220 src = vaddr + (y * fb->pitches[0]);
230 memcpy(buf, src, pitch); 221 src += clip->x1;
222 memcpy(buf, src, len);
231 src = buf; 223 src = buf;
232 for (x = 0; x < fb->width; x++) { 224 for (x = clip->x1; x < clip->x2; x++) {
233 u8 r = (*src & 0x00ff0000) >> 16; 225 u8 r = (*src & 0x00ff0000) >> 16;
234 u8 g = (*src & 0x0000ff00) >> 8; 226 u8 g = (*src & 0x0000ff00) >> 8;
235 u8 b = *src & 0x000000ff; 227 u8 b = *src & 0x000000ff;
@@ -240,13 +232,7 @@ int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb)
240 } 232 }
241 } 233 }
242 234
243 if (import_attach)
244 ret = dma_buf_end_cpu_access(import_attach->dmabuf,
245 DMA_FROM_DEVICE);
246err_free:
247 kfree(buf); 235 kfree(buf);
248
249 return ret;
250} 236}
251EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8); 237EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8);
252 238
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index f224b54a30f6..177e9d861001 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -56,7 +56,7 @@ static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
56static enum drm_connector_status 56static enum drm_connector_status
57tinydrm_connector_detect(struct drm_connector *connector, bool force) 57tinydrm_connector_detect(struct drm_connector *connector, bool force)
58{ 58{
59 if (drm_device_is_unplugged(connector->dev)) 59 if (drm_dev_is_unplugged(connector->dev))
60 return connector_status_disconnected; 60 return connector_status_disconnected;
61 61
62 return connector->status; 62 return connector->status;
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 3343d3f15a90..30dc97b3ff21 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/dma-buf.h>
21#include <linux/gpio/consumer.h> 22#include <linux/gpio/consumer.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/of_device.h> 24#include <linux/of_device.h>
@@ -525,11 +526,20 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
525 struct drm_clip_rect *clips, 526 struct drm_clip_rect *clips,
526 unsigned int num_clips) 527 unsigned int num_clips)
527{ 528{
529 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
530 struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
528 struct tinydrm_device *tdev = fb->dev->dev_private; 531 struct tinydrm_device *tdev = fb->dev->dev_private;
529 struct repaper_epd *epd = epd_from_tinydrm(tdev); 532 struct repaper_epd *epd = epd_from_tinydrm(tdev);
533 struct drm_clip_rect clip;
530 u8 *buf = NULL; 534 u8 *buf = NULL;
531 int ret = 0; 535 int ret = 0;
532 536
537 /* repaper can't do partial updates */
538 clip.x1 = 0;
539 clip.x2 = fb->width;
540 clip.y1 = 0;
541 clip.y2 = fb->height;
542
533 mutex_lock(&tdev->dirty_lock); 543 mutex_lock(&tdev->dirty_lock);
534 544
535 if (!epd->enabled) 545 if (!epd->enabled)
@@ -550,9 +560,21 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
550 goto out_unlock; 560 goto out_unlock;
551 } 561 }
552 562
553 ret = tinydrm_xrgb8888_to_gray8(buf, fb); 563 if (import_attach) {
554 if (ret) 564 ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
555 goto out_unlock; 565 DMA_FROM_DEVICE);
566 if (ret)
567 goto out_unlock;
568 }
569
570 tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
571
572 if (import_attach) {
573 ret = dma_buf_end_cpu_access(import_attach->dmabuf,
574 DMA_FROM_DEVICE);
575 if (ret)
576 goto out_unlock;
577 }
556 578
557 repaper_gray8_to_mono_reversed(buf, fb->width, fb->height); 579 repaper_gray8_to_mono_reversed(buf, fb->width, fb->height);
558 580
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
new file mode 100644
index 000000000000..1b39d3fb17f7
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -0,0 +1,428 @@
1/*
2 * DRM driver for Sitronix ST7586 panels
3 *
4 * Copyright 2017 David Lechner <david@lechnology.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/delay.h>
13#include <linux/dma-buf.h>
14#include <linux/gpio/consumer.h>
15#include <linux/module.h>
16#include <linux/property.h>
17#include <linux/spi/spi.h>
18#include <video/mipi_display.h>
19
20#include <drm/tinydrm/mipi-dbi.h>
21#include <drm/tinydrm/tinydrm-helpers.h>
22
23/* controller-specific commands */
24#define ST7586_DISP_MODE_GRAY 0x38
25#define ST7586_DISP_MODE_MONO 0x39
26#define ST7586_ENABLE_DDRAM 0x3a
27#define ST7586_SET_DISP_DUTY 0xb0
28#define ST7586_SET_PART_DISP 0xb4
29#define ST7586_SET_NLINE_INV 0xb5
30#define ST7586_SET_VOP 0xc0
31#define ST7586_SET_BIAS_SYSTEM 0xc3
32#define ST7586_SET_BOOST_LEVEL 0xc4
33#define ST7586_SET_VOP_OFFSET 0xc7
34#define ST7586_ENABLE_ANALOG 0xd0
35#define ST7586_AUTO_READ_CTRL 0xd7
36#define ST7586_OTP_RW_CTRL 0xe0
37#define ST7586_OTP_CTRL_OUT 0xe1
38#define ST7586_OTP_READ 0xe3
39
40#define ST7586_DISP_CTRL_MX BIT(6)
41#define ST7586_DISP_CTRL_MY BIT(7)
42
43/*
44 * The ST7586 controller has an unusual pixel format where 2bpp grayscale is
45 * packed 3 pixels per byte with the first two pixels using 3 bits and the 3rd
46 * pixel using only 2 bits.
47 *
48 * | D7 | D6 | D5 || | || 2bpp |
49 * | (D4) | (D3) | (D2) || D1 | D0 || GRAY |
50 * +------+------+------++------+------++------+
51 * | 1 | 1 | 1 || 1 | 1 || 0 0 | black
52 * | 1 | 0 | 0 || 1 | 0 || 0 1 | dark gray
53 * | 0 | 1 | 0 || 0 | 1 || 1 0 | light gray
54 * | 0 | 0 | 0 || 0 | 0 || 1 1 | white
55 */
56
57static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 };
58
59static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
60 struct drm_framebuffer *fb,
61 struct drm_clip_rect *clip)
62{
63 size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
64 unsigned int x, y;
65 u8 *src, *buf, val;
66
67 buf = kmalloc(len, GFP_KERNEL);
68 if (!buf)
69 return;
70
71 tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip);
72 src = buf;
73
74 for (y = clip->y1; y < clip->y2; y++) {
75 for (x = clip->x1; x < clip->x2; x += 3) {
76 val = st7586_lookup[*src++ >> 6] << 5;
77 val |= st7586_lookup[*src++ >> 6] << 2;
78 val |= st7586_lookup[*src++ >> 6] >> 1;
79 *dst++ = val;
80 }
81 }
82
83 kfree(buf);
84}
85
86static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
87 struct drm_clip_rect *clip)
88{
89 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
90 struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
91 void *src = cma_obj->vaddr;
92 int ret = 0;
93
94 if (import_attach) {
95 ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
96 DMA_FROM_DEVICE);
97 if (ret)
98 return ret;
99 }
100
101 st7586_xrgb8888_to_gray332(dst, src, fb, clip);
102
103 if (import_attach)
104 ret = dma_buf_end_cpu_access(import_attach->dmabuf,
105 DMA_FROM_DEVICE);
106
107 return ret;
108}
109
110static int st7586_fb_dirty(struct drm_framebuffer *fb,
111 struct drm_file *file_priv, unsigned int flags,
112 unsigned int color, struct drm_clip_rect *clips,
113 unsigned int num_clips)
114{
115 struct tinydrm_device *tdev = fb->dev->dev_private;
116 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
117 struct drm_clip_rect clip;
118 int start, end;
119 int ret = 0;
120
121 mutex_lock(&tdev->dirty_lock);
122
123 if (!mipi->enabled)
124 goto out_unlock;
125
126 /* fbdev can flush even when we're not interested */
127 if (tdev->pipe.plane.fb != fb)
128 goto out_unlock;
129
130 tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width,
131 fb->height);
132
133 /* 3 pixels per byte, so grow clip to nearest multiple of 3 */
134 clip.x1 = rounddown(clip.x1, 3);
135 clip.x2 = roundup(clip.x2, 3);
136
137 DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
138 clip.x1, clip.x2, clip.y1, clip.y2);
139
140 ret = st7586_buf_copy(mipi->tx_buf, fb, &clip);
141 if (ret)
142 goto out_unlock;
143
144 /* Pixels are packed 3 per byte */
145 start = clip.x1 / 3;
146 end = clip.x2 / 3;
147
148 mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
149 (start >> 8) & 0xFF, start & 0xFF,
150 (end >> 8) & 0xFF, (end - 1) & 0xFF);
151 mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
152 (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
153 (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
154
155 ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
156 (u8 *)mipi->tx_buf,
157 (end - start) * (clip.y2 - clip.y1));
158
159out_unlock:
160 mutex_unlock(&tdev->dirty_lock);
161
162 if (ret)
163 dev_err_once(fb->dev->dev, "Failed to update display %d\n",
164 ret);
165
166 return ret;
167}
168
169static const struct drm_framebuffer_funcs st7586_fb_funcs = {
170 .destroy = drm_fb_cma_destroy,
171 .create_handle = drm_fb_cma_create_handle,
172 .dirty = st7586_fb_dirty,
173};
174
175void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
176 struct drm_crtc_state *crtc_state)
177{
178 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
179 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
180 struct drm_framebuffer *fb = pipe->plane.fb;
181 struct device *dev = tdev->drm->dev;
182 int ret;
183 u8 addr_mode;
184
185 DRM_DEBUG_KMS("\n");
186
187 mipi_dbi_hw_reset(mipi);
188 ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
189 if (ret) {
190 dev_err(dev, "Error sending command %d\n", ret);
191 return;
192 }
193
194 mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
195
196 msleep(10);
197
198 mipi_dbi_command(mipi, ST7586_OTP_READ);
199
200 msleep(20);
201
202 mipi_dbi_command(mipi, ST7586_OTP_CTRL_OUT);
203 mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
204 mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
205
206 msleep(50);
207
208 mipi_dbi_command(mipi, ST7586_SET_VOP_OFFSET, 0x00);
209 mipi_dbi_command(mipi, ST7586_SET_VOP, 0xe3, 0x00);
210 mipi_dbi_command(mipi, ST7586_SET_BIAS_SYSTEM, 0x02);
211 mipi_dbi_command(mipi, ST7586_SET_BOOST_LEVEL, 0x04);
212 mipi_dbi_command(mipi, ST7586_ENABLE_ANALOG, 0x1d);
213 mipi_dbi_command(mipi, ST7586_SET_NLINE_INV, 0x00);
214 mipi_dbi_command(mipi, ST7586_DISP_MODE_GRAY);
215 mipi_dbi_command(mipi, ST7586_ENABLE_DDRAM, 0x02);
216
217 switch (mipi->rotation) {
218 default:
219 addr_mode = 0x00;
220 break;
221 case 90:
222 addr_mode = ST7586_DISP_CTRL_MY;
223 break;
224 case 180:
225 addr_mode = ST7586_DISP_CTRL_MX | ST7586_DISP_CTRL_MY;
226 break;
227 case 270:
228 addr_mode = ST7586_DISP_CTRL_MX;
229 break;
230 }
231 mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
232
233 mipi_dbi_command(mipi, ST7586_SET_DISP_DUTY, 0x7f);
234 mipi_dbi_command(mipi, ST7586_SET_PART_DISP, 0xa0);
235 mipi_dbi_command(mipi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
236 mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
237
238 msleep(100);
239
240 mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
241
242 mipi->enabled = true;
243
244 if (fb)
245 fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
246}
247
248static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
249{
250 struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
251 struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
252
253 DRM_DEBUG_KMS("\n");
254
255 if (!mipi->enabled)
256 return;
257
258 mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
259 mipi->enabled = false;
260}
261
262static const u32 st7586_formats[] = {
263 DRM_FORMAT_XRGB8888,
264};
265
266static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
267 const struct drm_simple_display_pipe_funcs *pipe_funcs,
268 struct drm_driver *driver, const struct drm_display_mode *mode,
269 unsigned int rotation)
270{
271 size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay;
272 struct tinydrm_device *tdev = &mipi->tinydrm;
273 int ret;
274
275 mutex_init(&mipi->cmdlock);
276
277 mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
278 if (!mipi->tx_buf)
279 return -ENOMEM;
280
281 ret = devm_tinydrm_init(dev, tdev, &st7586_fb_funcs, driver);
282 if (ret)
283 return ret;
284
285 ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
286 DRM_MODE_CONNECTOR_VIRTUAL,
287 st7586_formats,
288 ARRAY_SIZE(st7586_formats),
289 mode, rotation);
290 if (ret)
291 return ret;
292
293 tdev->drm->mode_config.preferred_depth = 32;
294 mipi->rotation = rotation;
295
296 drm_mode_config_reset(tdev->drm);
297
298 DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
299 tdev->drm->mode_config.preferred_depth, rotation);
300
301 return 0;
302}
303
304static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
305 .enable = st7586_pipe_enable,
306 .disable = st7586_pipe_disable,
307 .update = tinydrm_display_pipe_update,
308 .prepare_fb = tinydrm_display_pipe_prepare_fb,
309};
310
311static const struct drm_display_mode st7586_mode = {
312 TINYDRM_MODE(178, 128, 37, 27),
313};
314
315DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
316
317static struct drm_driver st7586_driver = {
318 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
319 DRIVER_ATOMIC,
320 .fops = &st7586_fops,
321 TINYDRM_GEM_DRIVER_OPS,
322 .lastclose = tinydrm_lastclose,
323 .debugfs_init = mipi_dbi_debugfs_init,
324 .name = "st7586",
325 .desc = "Sitronix ST7586",
326 .date = "20170801",
327 .major = 1,
328 .minor = 0,
329};
330
331static const struct of_device_id st7586_of_match[] = {
332 { .compatible = "lego,ev3-lcd" },
333 {},
334};
335MODULE_DEVICE_TABLE(of, st7586_of_match);
336
337static const struct spi_device_id st7586_id[] = {
338 { "ev3-lcd", 0 },
339 { },
340};
341MODULE_DEVICE_TABLE(spi, st7586_id);
342
343static int st7586_probe(struct spi_device *spi)
344{
345 struct device *dev = &spi->dev;
346 struct tinydrm_device *tdev;
347 struct mipi_dbi *mipi;
348 struct gpio_desc *a0;
349 u32 rotation = 0;
350 int ret;
351
352 mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
353 if (!mipi)
354 return -ENOMEM;
355
356 mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
357 if (IS_ERR(mipi->reset)) {
358 dev_err(dev, "Failed to get gpio 'reset'\n");
359 return PTR_ERR(mipi->reset);
360 }
361
362 a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
363 if (IS_ERR(a0)) {
364 dev_err(dev, "Failed to get gpio 'a0'\n");
365 return PTR_ERR(a0);
366 }
367
368 device_property_read_u32(dev, "rotation", &rotation);
369
370 ret = mipi_dbi_spi_init(spi, mipi, a0);
371 if (ret)
372 return ret;
373
374 /* Cannot read from this controller via SPI */
375 mipi->read_commands = NULL;
376
377 /*
378 * we are using 8-bit data, so we are not actually swapping anything,
379 * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the
380 * right thing and not use 16-bit transfers (which results in swapped
381 * bytes on little-endian systems and causes out of order data to be
382 * sent to the display).
383 */
384 mipi->swap_bytes = true;
385
386 ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver,
387 &st7586_mode, rotation);
388 if (ret)
389 return ret;
390
391 tdev = &mipi->tinydrm;
392
393 ret = devm_tinydrm_register(tdev);
394 if (ret)
395 return ret;
396
397 spi_set_drvdata(spi, mipi);
398
399 DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
400 tdev->drm->driver->name, dev_name(dev),
401 spi->max_speed_hz / 1000000,
402 tdev->drm->primary->index);
403
404 return 0;
405}
406
407static void st7586_shutdown(struct spi_device *spi)
408{
409 struct mipi_dbi *mipi = spi_get_drvdata(spi);
410
411 tinydrm_shutdown(&mipi->tinydrm);
412}
413
414static struct spi_driver st7586_spi_driver = {
415 .driver = {
416 .name = "st7586",
417 .owner = THIS_MODULE,
418 .of_match_table = st7586_of_match,
419 },
420 .id_table = st7586_id,
421 .probe = st7586_probe,
422 .shutdown = st7586_shutdown,
423};
424module_spi_driver(st7586_spi_driver);
425
426MODULE_DESCRIPTION("Sitronix ST7586 DRM driver");
427MODULE_AUTHOR("David Lechner <david@lechnology.com>");
428MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index d2f57c52f7db..9f9a49748d17 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -96,7 +96,7 @@ static int udl_mode_valid(struct drm_connector *connector,
96static enum drm_connector_status 96static enum drm_connector_status
97udl_detect(struct drm_connector *connector, bool force) 97udl_detect(struct drm_connector *connector, bool force)
98{ 98{
99 if (drm_device_is_unplugged(connector->dev)) 99 if (drm_dev_is_unplugged(connector->dev))
100 return connector_status_disconnected; 100 return connector_status_disconnected;
101 return connector_status_connected; 101 return connector_status_connected;
102} 102}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 0f02e1acf0ba..bfacb294d5c4 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -54,7 +54,6 @@ static struct drm_driver driver = {
54 54
55 .dumb_create = udl_dumb_create, 55 .dumb_create = udl_dumb_create,
56 .dumb_map_offset = udl_gem_mmap, 56 .dumb_map_offset = udl_gem_mmap,
57 .dumb_destroy = drm_gem_dumb_destroy,
58 .fops = &udl_driver_fops, 57 .fops = &udl_driver_fops,
59 58
60 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 59 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -102,7 +101,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
102 drm_kms_helper_poll_disable(dev); 101 drm_kms_helper_poll_disable(dev);
103 udl_fbdev_unplug(dev); 102 udl_fbdev_unplug(dev);
104 udl_drop_usb(dev); 103 udl_drop_usb(dev);
105 drm_unplug_dev(dev); 104 drm_dev_unplug(dev);
106} 105}
107 106
108/* 107/*
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a5c54dc60def..b7ca90db4e80 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -198,7 +198,7 @@ static int udl_fb_open(struct fb_info *info, int user)
198 struct udl_device *udl = dev->dev_private; 198 struct udl_device *udl = dev->dev_private;
199 199
200 /* If the USB device is gone, we don't accept new opens */ 200 /* If the USB device is gone, we don't accept new opens */
201 if (drm_device_is_unplugged(udl->ddev)) 201 if (drm_dev_is_unplugged(udl->ddev))
202 return -ENODEV; 202 return -ENODEV;
203 203
204 ufbdev->fb_count++; 204 ufbdev->fb_count++;
@@ -309,7 +309,7 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
309 struct udl_framebuffer *ufb = to_udl_fb(fb); 309 struct udl_framebuffer *ufb = to_udl_fb(fb);
310 310
311 if (ufb->obj) 311 if (ufb->obj)
312 drm_gem_object_unreference_unlocked(&ufb->obj->base); 312 drm_gem_object_put_unlocked(&ufb->obj->base);
313 313
314 drm_framebuffer_cleanup(fb); 314 drm_framebuffer_cleanup(fb);
315 kfree(ufb); 315 kfree(ufb);
@@ -403,7 +403,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
403 403
404 return ret; 404 return ret;
405out_gfree: 405out_gfree:
406 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); 406 drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
407out: 407out:
408 return ret; 408 return ret;
409} 409}
@@ -419,7 +419,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
419 drm_fb_helper_fini(&ufbdev->helper); 419 drm_fb_helper_fini(&ufbdev->helper);
420 drm_framebuffer_unregister_private(&ufbdev->ufb.base); 420 drm_framebuffer_unregister_private(&ufbdev->ufb.base);
421 drm_framebuffer_cleanup(&ufbdev->ufb.base); 421 drm_framebuffer_cleanup(&ufbdev->ufb.base);
422 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); 422 drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
423} 423}
424 424
425int udl_fbdev_init(struct drm_device *dev) 425int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index db9ceceba30e..dee6bd9a3dd1 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
52 return ret; 52 return ret;
53 } 53 }
54 54
55 drm_gem_object_unreference_unlocked(&obj->base); 55 drm_gem_object_put_unlocked(&obj->base);
56 *handle_p = handle; 56 *handle_p = handle;
57 return 0; 57 return 0;
58} 58}
@@ -234,7 +234,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
234 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); 234 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
235 235
236out: 236out:
237 drm_gem_object_unreference(&gobj->base); 237 drm_gem_object_put(&gobj->base);
238unlock: 238unlock:
239 mutex_unlock(&dev->struct_mutex); 239 mutex_unlock(&dev->struct_mutex);
240 return ret; 240 return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index b24dd8685590..3afdbf4bc10b 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -366,7 +366,7 @@ int vc4_dumb_create(struct drm_file *file_priv,
366 return PTR_ERR(bo); 366 return PTR_ERR(bo);
367 367
368 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 368 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
369 drm_gem_object_unreference_unlocked(&bo->base.base); 369 drm_gem_object_put_unlocked(&bo->base.base);
370 370
371 return ret; 371 return ret;
372} 372}
@@ -482,7 +482,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
482 struct vc4_bo *bo = to_vc4_bo(obj); 482 struct vc4_bo *bo = to_vc4_bo(obj);
483 483
484 if (bo->validated_shader) { 484 if (bo->validated_shader) {
485 DRM_ERROR("Attempting to export shader BO\n"); 485 DRM_DEBUG("Attempting to export shader BO\n");
486 return ERR_PTR(-EINVAL); 486 return ERR_PTR(-EINVAL);
487 } 487 }
488 488
@@ -503,7 +503,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
503 bo = to_vc4_bo(gem_obj); 503 bo = to_vc4_bo(gem_obj);
504 504
505 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { 505 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
506 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); 506 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
507 return -EINVAL; 507 return -EINVAL;
508 } 508 }
509 509
@@ -528,7 +528,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
528 struct vc4_bo *bo = to_vc4_bo(obj); 528 struct vc4_bo *bo = to_vc4_bo(obj);
529 529
530 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { 530 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
531 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); 531 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
532 return -EINVAL; 532 return -EINVAL;
533 } 533 }
534 534
@@ -540,7 +540,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj)
540 struct vc4_bo *bo = to_vc4_bo(obj); 540 struct vc4_bo *bo = to_vc4_bo(obj);
541 541
542 if (bo->validated_shader) { 542 if (bo->validated_shader) {
543 DRM_ERROR("mmaping of shader BOs not allowed.\n"); 543 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
544 return ERR_PTR(-EINVAL); 544 return ERR_PTR(-EINVAL);
545 } 545 }
546 546
@@ -581,7 +581,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
581 return PTR_ERR(bo); 581 return PTR_ERR(bo);
582 582
583 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 583 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
584 drm_gem_object_unreference_unlocked(&bo->base.base); 584 drm_gem_object_put_unlocked(&bo->base.base);
585 585
586 return ret; 586 return ret;
587} 587}
@@ -594,14 +594,14 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
594 594
595 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 595 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
596 if (!gem_obj) { 596 if (!gem_obj) {
597 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 597 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
598 return -EINVAL; 598 return -EINVAL;
599 } 599 }
600 600
601 /* The mmap offset was set up at BO allocation time. */ 601 /* The mmap offset was set up at BO allocation time. */
602 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 602 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
603 603
604 drm_gem_object_unreference_unlocked(gem_obj); 604 drm_gem_object_put_unlocked(gem_obj);
605 return 0; 605 return 0;
606} 606}
607 607
@@ -657,7 +657,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
657 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 657 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
658 658
659 fail: 659 fail:
660 drm_gem_object_unreference_unlocked(&bo->base.base); 660 drm_gem_object_put_unlocked(&bo->base.base);
661 661
662 return ret; 662 return ret;
663} 663}
@@ -698,13 +698,13 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
698 698
699 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 699 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
700 if (!gem_obj) { 700 if (!gem_obj) {
701 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 701 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
702 return -ENOENT; 702 return -ENOENT;
703 } 703 }
704 bo = to_vc4_bo(gem_obj); 704 bo = to_vc4_bo(gem_obj);
705 bo->t_format = t_format; 705 bo->t_format = t_format;
706 706
707 drm_gem_object_unreference_unlocked(gem_obj); 707 drm_gem_object_put_unlocked(gem_obj);
708 708
709 return 0; 709 return 0;
710} 710}
@@ -729,7 +729,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
729 729
730 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 730 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
731 if (!gem_obj) { 731 if (!gem_obj) {
732 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 732 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
733 return -ENOENT; 733 return -ENOENT;
734 } 734 }
735 bo = to_vc4_bo(gem_obj); 735 bo = to_vc4_bo(gem_obj);
@@ -739,7 +739,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
739 else 739 else
740 args->modifier = DRM_FORMAT_MOD_NONE; 740 args->modifier = DRM_FORMAT_MOD_NONE;
741 741
742 drm_gem_object_unreference_unlocked(gem_obj); 742 drm_gem_object_put_unlocked(gem_obj);
743 743
744 return 0; 744 return 0;
745} 745}
@@ -830,7 +830,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
830 ret = -ENOMEM; 830 ret = -ENOMEM;
831 mutex_unlock(&vc4->bo_lock); 831 mutex_unlock(&vc4->bo_lock);
832 832
833 drm_gem_object_unreference_unlocked(gem_obj); 833 drm_gem_object_put_unlocked(gem_obj);
834 834
835 return ret; 835 return ret;
836} 836}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 664a55b45af0..ce1e3b9e14c9 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -763,7 +763,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
763 } 763 }
764 764
765 drm_crtc_vblank_put(crtc); 765 drm_crtc_vblank_put(crtc);
766 drm_framebuffer_unreference(flip_state->fb); 766 drm_framebuffer_put(flip_state->fb);
767 kfree(flip_state); 767 kfree(flip_state);
768 768
769 up(&vc4->async_modeset); 769 up(&vc4->async_modeset);
@@ -792,7 +792,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
792 if (!flip_state) 792 if (!flip_state)
793 return -ENOMEM; 793 return -ENOMEM;
794 794
795 drm_framebuffer_reference(fb); 795 drm_framebuffer_get(fb);
796 flip_state->fb = fb; 796 flip_state->fb = fb;
797 flip_state->crtc = crtc; 797 flip_state->crtc = crtc;
798 flip_state->event = event; 798 flip_state->event = event;
@@ -800,7 +800,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
800 /* Make sure all other async modesetes have landed. */ 800 /* Make sure all other async modesetes have landed. */
801 ret = down_interruptible(&vc4->async_modeset); 801 ret = down_interruptible(&vc4->async_modeset);
802 if (ret) { 802 if (ret) {
803 drm_framebuffer_unreference(fb); 803 drm_framebuffer_put(fb);
804 kfree(flip_state); 804 kfree(flip_state);
805 return ret; 805 return ret;
806 } 806 }
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index e8f0e1790d5e..1c96edcb302b 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -99,6 +99,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
99 case DRM_VC4_PARAM_SUPPORTS_BRANCHES: 99 case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
100 case DRM_VC4_PARAM_SUPPORTS_ETC1: 100 case DRM_VC4_PARAM_SUPPORTS_ETC1:
101 case DRM_VC4_PARAM_SUPPORTS_THREADED_FS: 101 case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
102 case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
102 args->value = true; 103 args->value = true;
103 break; 104 break;
104 default: 105 default:
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 629d372633e6..d1e0dc908048 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1636,14 +1636,10 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
1636 1636
1637 pm_runtime_disable(dev); 1637 pm_runtime_disable(dev);
1638 1638
1639 drm_bridge_remove(dsi->bridge);
1640 vc4_dsi_encoder_destroy(dsi->encoder); 1639 vc4_dsi_encoder_destroy(dsi->encoder);
1641 1640
1642 mipi_dsi_host_unregister(&dsi->dsi_host); 1641 mipi_dsi_host_unregister(&dsi->dsi_host);
1643 1642
1644 clk_disable_unprepare(dsi->pll_phy_clock);
1645 clk_disable_unprepare(dsi->escape_clock);
1646
1647 if (dsi->port == 1) 1643 if (dsi->port == 1)
1648 vc4->dsi1 = NULL; 1644 vc4->dsi1 = NULL;
1649} 1645}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 209fccd0d3b4..d0c6bfb68c4e 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -55,7 +55,7 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
55 unsigned int i; 55 unsigned int i;
56 56
57 for (i = 0; i < state->user_state.bo_count; i++) 57 for (i = 0; i < state->user_state.bo_count; i++)
58 drm_gem_object_unreference_unlocked(state->bo[i]); 58 drm_gem_object_put_unlocked(state->bo[i]);
59 59
60 kfree(state); 60 kfree(state);
61} 61}
@@ -188,12 +188,12 @@ vc4_save_hang_state(struct drm_device *dev)
188 continue; 188 continue;
189 189
190 for (j = 0; j < exec[i]->bo_count; j++) { 190 for (j = 0; j < exec[i]->bo_count; j++) {
191 drm_gem_object_reference(&exec[i]->bo[j]->base); 191 drm_gem_object_get(&exec[i]->bo[j]->base);
192 kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; 192 kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
193 } 193 }
194 194
195 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { 195 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
196 drm_gem_object_reference(&bo->base.base); 196 drm_gem_object_get(&bo->base.base);
197 kernel_state->bo[j + prev_idx] = &bo->base.base; 197 kernel_state->bo[j + prev_idx] = &bo->base.base;
198 j++; 198 j++;
199 } 199 }
@@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
659 /* See comment on bo_index for why we have to check 659 /* See comment on bo_index for why we have to check
660 * this. 660 * this.
661 */ 661 */
662 DRM_ERROR("Rendering requires BOs to validate\n"); 662 DRM_DEBUG("Rendering requires BOs to validate\n");
663 return -EINVAL; 663 return -EINVAL;
664 } 664 }
665 665
@@ -690,13 +690,13 @@ vc4_cl_lookup_bos(struct drm_device *dev,
690 struct drm_gem_object *bo = idr_find(&file_priv->object_idr, 690 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
691 handles[i]); 691 handles[i]);
692 if (!bo) { 692 if (!bo) {
693 DRM_ERROR("Failed to look up GEM BO %d: %d\n", 693 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
694 i, handles[i]); 694 i, handles[i]);
695 ret = -EINVAL; 695 ret = -EINVAL;
696 spin_unlock(&file_priv->table_lock); 696 spin_unlock(&file_priv->table_lock);
697 goto fail; 697 goto fail;
698 } 698 }
699 drm_gem_object_reference(bo); 699 drm_gem_object_get(bo);
700 exec->bo[i] = (struct drm_gem_cma_object *)bo; 700 exec->bo[i] = (struct drm_gem_cma_object *)bo;
701 } 701 }
702 spin_unlock(&file_priv->table_lock); 702 spin_unlock(&file_priv->table_lock);
@@ -728,7 +728,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
728 args->shader_rec_count >= (UINT_MAX / 728 args->shader_rec_count >= (UINT_MAX /
729 sizeof(struct vc4_shader_state)) || 729 sizeof(struct vc4_shader_state)) ||
730 temp_size < exec_size) { 730 temp_size < exec_size) {
731 DRM_ERROR("overflow in exec arguments\n"); 731 DRM_DEBUG("overflow in exec arguments\n");
732 ret = -EINVAL; 732 ret = -EINVAL;
733 goto fail; 733 goto fail;
734 } 734 }
@@ -834,7 +834,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
834 834
835 if (exec->bo) { 835 if (exec->bo) {
836 for (i = 0; i < exec->bo_count; i++) 836 for (i = 0; i < exec->bo_count; i++)
837 drm_gem_object_unreference_unlocked(&exec->bo[i]->base); 837 drm_gem_object_put_unlocked(&exec->bo[i]->base);
838 kvfree(exec->bo); 838 kvfree(exec->bo);
839 } 839 }
840 840
@@ -842,7 +842,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
842 struct vc4_bo *bo = list_first_entry(&exec->unref_list, 842 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
843 struct vc4_bo, unref_head); 843 struct vc4_bo, unref_head);
844 list_del(&bo->unref_head); 844 list_del(&bo->unref_head);
845 drm_gem_object_unreference_unlocked(&bo->base.base); 845 drm_gem_object_put_unlocked(&bo->base.base);
846 } 846 }
847 847
848 /* Free up the allocation of any bin slots we used. */ 848 /* Free up the allocation of any bin slots we used. */
@@ -973,7 +973,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
973 973
974 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 974 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
975 if (!gem_obj) { 975 if (!gem_obj) {
976 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 976 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
977 return -EINVAL; 977 return -EINVAL;
978 } 978 }
979 bo = to_vc4_bo(gem_obj); 979 bo = to_vc4_bo(gem_obj);
@@ -981,7 +981,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
981 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, 981 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
982 &args->timeout_ns); 982 &args->timeout_ns);
983 983
984 drm_gem_object_unreference_unlocked(gem_obj); 984 drm_gem_object_put_unlocked(gem_obj);
985 return ret; 985 return ret;
986} 986}
987 987
@@ -1007,8 +1007,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1007 struct ww_acquire_ctx acquire_ctx; 1007 struct ww_acquire_ctx acquire_ctx;
1008 int ret = 0; 1008 int ret = 0;
1009 1009
1010 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { 1010 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1011 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); 1011 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1012 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1013 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1014 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1012 return -EINVAL; 1015 return -EINVAL;
1013 } 1016 }
1014 1017
@@ -1117,6 +1120,4 @@ vc4_gem_destroy(struct drm_device *dev)
1117 1120
1118 if (vc4->hang_state) 1121 if (vc4->hang_state)
1119 vc4_free_hang_state(dev, vc4->hang_state); 1122 vc4_free_hang_state(dev, vc4->hang_state);
1120
1121 vc4_bo_cache_destroy(dev);
1122} 1123}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ff09b8e2f9ee..937da8dd65b8 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -288,6 +288,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
288 drm_mode_connector_update_edid_property(connector, edid); 288 drm_mode_connector_update_edid_property(connector, edid);
289 ret = drm_add_edid_modes(connector, edid); 289 ret = drm_add_edid_modes(connector, edid);
290 drm_edid_to_eld(connector, edid); 290 drm_edid_to_eld(connector, edid);
291 kfree(edid);
291 292
292 return ret; 293 return ret;
293} 294}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index aeec6e8703d2..dfe7554268f0 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -169,7 +169,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
169 gem_obj = drm_gem_object_lookup(file_priv, 169 gem_obj = drm_gem_object_lookup(file_priv,
170 mode_cmd->handles[0]); 170 mode_cmd->handles[0]);
171 if (!gem_obj) { 171 if (!gem_obj) {
172 DRM_ERROR("Failed to look up GEM BO %d\n", 172 DRM_DEBUG("Failed to look up GEM BO %d\n",
173 mode_cmd->handles[0]); 173 mode_cmd->handles[0]);
174 return ERR_PTR(-ENOENT); 174 return ERR_PTR(-ENOENT);
175 } 175 }
@@ -184,7 +184,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
184 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 184 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
185 } 185 }
186 186
187 drm_gem_object_unreference_unlocked(gem_obj); 187 drm_gem_object_put_unlocked(gem_obj);
188 188
189 mode_cmd = &mode_cmd_local; 189 mode_cmd = &mode_cmd_local;
190 } 190 }
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 4a8051532f00..273984f71ae2 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -261,8 +261,17 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
261 uint8_t max_y_tile = args->max_y_tile; 261 uint8_t max_y_tile = args->max_y_tile;
262 uint8_t xtiles = max_x_tile - min_x_tile + 1; 262 uint8_t xtiles = max_x_tile - min_x_tile + 1;
263 uint8_t ytiles = max_y_tile - min_y_tile + 1; 263 uint8_t ytiles = max_y_tile - min_y_tile + 1;
264 uint8_t x, y; 264 uint8_t xi, yi;
265 uint32_t size, loop_body_size; 265 uint32_t size, loop_body_size;
266 bool positive_x = true;
267 bool positive_y = true;
268
269 if (args->flags & VC4_SUBMIT_CL_FIXED_RCL_ORDER) {
270 if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X))
271 positive_x = false;
272 if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y))
273 positive_y = false;
274 }
266 275
267 size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE; 276 size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
268 loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE; 277 loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
@@ -354,10 +363,12 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
354 rcl_u16(setup, args->height); 363 rcl_u16(setup, args->height);
355 rcl_u16(setup, args->color_write.bits); 364 rcl_u16(setup, args->color_write.bits);
356 365
357 for (y = min_y_tile; y <= max_y_tile; y++) { 366 for (yi = 0; yi < ytiles; yi++) {
358 for (x = min_x_tile; x <= max_x_tile; x++) { 367 int y = positive_y ? min_y_tile + yi : max_y_tile - yi;
359 bool first = (x == min_x_tile && y == min_y_tile); 368 for (xi = 0; xi < xtiles; xi++) {
360 bool last = (x == max_x_tile && y == max_y_tile); 369 int x = positive_x ? min_x_tile + xi : max_x_tile - xi;
370 bool first = (xi == 0 && yi == 0);
371 bool last = (xi == xtiles - 1 && yi == ytiles - 1);
361 372
362 emit_tile(exec, setup, x, y, first, last); 373 emit_tile(exec, setup, x, y, first, last);
363 } 374 }
@@ -378,14 +389,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
378 u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32); 389 u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
379 390
380 if (surf->offset > obj->base.size) { 391 if (surf->offset > obj->base.size) {
381 DRM_ERROR("surface offset %d > BO size %zd\n", 392 DRM_DEBUG("surface offset %d > BO size %zd\n",
382 surf->offset, obj->base.size); 393 surf->offset, obj->base.size);
383 return -EINVAL; 394 return -EINVAL;
384 } 395 }
385 396
386 if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE < 397 if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
387 render_tiles_stride * args->max_y_tile + args->max_x_tile) { 398 render_tiles_stride * args->max_y_tile + args->max_x_tile) {
388 DRM_ERROR("MSAA tile %d, %d out of bounds " 399 DRM_DEBUG("MSAA tile %d, %d out of bounds "
389 "(bo size %zd, offset %d).\n", 400 "(bo size %zd, offset %d).\n",
390 args->max_x_tile, args->max_y_tile, 401 args->max_x_tile, args->max_y_tile,
391 obj->base.size, 402 obj->base.size,
@@ -401,7 +412,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
401 struct drm_vc4_submit_rcl_surface *surf) 412 struct drm_vc4_submit_rcl_surface *surf)
402{ 413{
403 if (surf->flags != 0 || surf->bits != 0) { 414 if (surf->flags != 0 || surf->bits != 0) {
404 DRM_ERROR("MSAA surface had nonzero flags/bits\n"); 415 DRM_DEBUG("MSAA surface had nonzero flags/bits\n");
405 return -EINVAL; 416 return -EINVAL;
406 } 417 }
407 418
@@ -415,7 +426,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
415 exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; 426 exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
416 427
417 if (surf->offset & 0xf) { 428 if (surf->offset & 0xf) {
418 DRM_ERROR("MSAA write must be 16b aligned.\n"); 429 DRM_DEBUG("MSAA write must be 16b aligned.\n");
419 return -EINVAL; 430 return -EINVAL;
420 } 431 }
421 432
@@ -437,7 +448,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
437 int ret; 448 int ret;
438 449
439 if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { 450 if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
440 DRM_ERROR("Extra flags set\n"); 451 DRM_DEBUG("Extra flags set\n");
441 return -EINVAL; 452 return -EINVAL;
442 } 453 }
443 454
@@ -453,12 +464,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
453 464
454 if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { 465 if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
455 if (surf == &exec->args->zs_write) { 466 if (surf == &exec->args->zs_write) {
456 DRM_ERROR("general zs write may not be a full-res.\n"); 467 DRM_DEBUG("general zs write may not be a full-res.\n");
457 return -EINVAL; 468 return -EINVAL;
458 } 469 }
459 470
460 if (surf->bits != 0) { 471 if (surf->bits != 0) {
461 DRM_ERROR("load/store general bits set with " 472 DRM_DEBUG("load/store general bits set with "
462 "full res load/store.\n"); 473 "full res load/store.\n");
463 return -EINVAL; 474 return -EINVAL;
464 } 475 }
@@ -473,19 +484,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
473 if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK | 484 if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
474 VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK | 485 VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
475 VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) { 486 VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
476 DRM_ERROR("Unknown bits in load/store: 0x%04x\n", 487 DRM_DEBUG("Unknown bits in load/store: 0x%04x\n",
477 surf->bits); 488 surf->bits);
478 return -EINVAL; 489 return -EINVAL;
479 } 490 }
480 491
481 if (tiling > VC4_TILING_FORMAT_LT) { 492 if (tiling > VC4_TILING_FORMAT_LT) {
482 DRM_ERROR("Bad tiling format\n"); 493 DRM_DEBUG("Bad tiling format\n");
483 return -EINVAL; 494 return -EINVAL;
484 } 495 }
485 496
486 if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) { 497 if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
487 if (format != 0) { 498 if (format != 0) {
488 DRM_ERROR("No color format should be set for ZS\n"); 499 DRM_DEBUG("No color format should be set for ZS\n");
489 return -EINVAL; 500 return -EINVAL;
490 } 501 }
491 cpp = 4; 502 cpp = 4;
@@ -499,16 +510,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
499 cpp = 4; 510 cpp = 4;
500 break; 511 break;
501 default: 512 default:
502 DRM_ERROR("Bad tile buffer format\n"); 513 DRM_DEBUG("Bad tile buffer format\n");
503 return -EINVAL; 514 return -EINVAL;
504 } 515 }
505 } else { 516 } else {
506 DRM_ERROR("Bad load/store buffer %d.\n", buffer); 517 DRM_DEBUG("Bad load/store buffer %d.\n", buffer);
507 return -EINVAL; 518 return -EINVAL;
508 } 519 }
509 520
510 if (surf->offset & 0xf) { 521 if (surf->offset & 0xf) {
511 DRM_ERROR("load/store buffer must be 16b aligned.\n"); 522 DRM_DEBUG("load/store buffer must be 16b aligned.\n");
512 return -EINVAL; 523 return -EINVAL;
513 } 524 }
514 525
@@ -533,7 +544,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
533 int cpp; 544 int cpp;
534 545
535 if (surf->flags != 0) { 546 if (surf->flags != 0) {
536 DRM_ERROR("No flags supported on render config.\n"); 547 DRM_DEBUG("No flags supported on render config.\n");
537 return -EINVAL; 548 return -EINVAL;
538 } 549 }
539 550
@@ -541,7 +552,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
541 VC4_RENDER_CONFIG_FORMAT_MASK | 552 VC4_RENDER_CONFIG_FORMAT_MASK |
542 VC4_RENDER_CONFIG_MS_MODE_4X | 553 VC4_RENDER_CONFIG_MS_MODE_4X |
543 VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) { 554 VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
544 DRM_ERROR("Unknown bits in render config: 0x%04x\n", 555 DRM_DEBUG("Unknown bits in render config: 0x%04x\n",
545 surf->bits); 556 surf->bits);
546 return -EINVAL; 557 return -EINVAL;
547 } 558 }
@@ -556,7 +567,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
556 exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; 567 exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
557 568
558 if (tiling > VC4_TILING_FORMAT_LT) { 569 if (tiling > VC4_TILING_FORMAT_LT) {
559 DRM_ERROR("Bad tiling format\n"); 570 DRM_DEBUG("Bad tiling format\n");
560 return -EINVAL; 571 return -EINVAL;
561 } 572 }
562 573
@@ -569,7 +580,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
569 cpp = 4; 580 cpp = 4;
570 break; 581 break;
571 default: 582 default:
572 DRM_ERROR("Bad tile buffer format\n"); 583 DRM_DEBUG("Bad tile buffer format\n");
573 return -EINVAL; 584 return -EINVAL;
574 } 585 }
575 586
@@ -590,7 +601,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
590 601
591 if (args->min_x_tile > args->max_x_tile || 602 if (args->min_x_tile > args->max_x_tile ||
592 args->min_y_tile > args->max_y_tile) { 603 args->min_y_tile > args->max_y_tile) {
593 DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n", 604 DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
594 args->min_x_tile, args->min_y_tile, 605 args->min_x_tile, args->min_y_tile,
595 args->max_x_tile, args->max_y_tile); 606 args->max_x_tile, args->max_y_tile);
596 return -EINVAL; 607 return -EINVAL;
@@ -599,7 +610,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
599 if (has_bin && 610 if (has_bin &&
600 (args->max_x_tile > exec->bin_tiles_x || 611 (args->max_x_tile > exec->bin_tiles_x ||
601 args->max_y_tile > exec->bin_tiles_y)) { 612 args->max_y_tile > exec->bin_tiles_y)) {
602 DRM_ERROR("Render tiles (%d,%d) outside of bin config " 613 DRM_DEBUG("Render tiles (%d,%d) outside of bin config "
603 "(%d,%d)\n", 614 "(%d,%d)\n",
604 args->max_x_tile, args->max_y_tile, 615 args->max_x_tile, args->max_y_tile,
605 exec->bin_tiles_x, exec->bin_tiles_y); 616 exec->bin_tiles_x, exec->bin_tiles_y);
@@ -642,7 +653,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
642 */ 653 */
643 if (!setup.color_write && !setup.zs_write && 654 if (!setup.color_write && !setup.zs_write &&
644 !setup.msaa_color_write && !setup.msaa_zs_write) { 655 !setup.msaa_color_write && !setup.msaa_zs_write) {
645 DRM_ERROR("RCL requires color or Z/S write\n"); 656 DRM_DEBUG("RCL requires color or Z/S write\n");
646 return -EINVAL; 657 return -EINVAL;
647 } 658 }
648 659
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 814b512c6b9a..2db485abb186 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
109 struct vc4_bo *bo; 109 struct vc4_bo *bo;
110 110
111 if (hindex >= exec->bo_count) { 111 if (hindex >= exec->bo_count) {
112 DRM_ERROR("BO index %d greater than BO count %d\n", 112 DRM_DEBUG("BO index %d greater than BO count %d\n",
113 hindex, exec->bo_count); 113 hindex, exec->bo_count);
114 return NULL; 114 return NULL;
115 } 115 }
@@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
117 bo = to_vc4_bo(&obj->base); 117 bo = to_vc4_bo(&obj->base);
118 118
119 if (bo->validated_shader) { 119 if (bo->validated_shader) {
120 DRM_ERROR("Trying to use shader BO as something other than " 120 DRM_DEBUG("Trying to use shader BO as something other than "
121 "a shader\n"); 121 "a shader\n");
122 return NULL; 122 return NULL;
123 } 123 }
@@ -172,7 +172,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
172 * our math. 172 * our math.
173 */ 173 */
174 if (width > 4096 || height > 4096) { 174 if (width > 4096 || height > 4096) {
175 DRM_ERROR("Surface dimensions (%d,%d) too large", 175 DRM_DEBUG("Surface dimensions (%d,%d) too large",
176 width, height); 176 width, height);
177 return false; 177 return false;
178 } 178 }
@@ -191,7 +191,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
191 aligned_height = round_up(height, utile_h); 191 aligned_height = round_up(height, utile_h);
192 break; 192 break;
193 default: 193 default:
194 DRM_ERROR("buffer tiling %d unsupported\n", tiling_format); 194 DRM_DEBUG("buffer tiling %d unsupported\n", tiling_format);
195 return false; 195 return false;
196 } 196 }
197 197
@@ -200,7 +200,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
200 200
201 if (size + offset < size || 201 if (size + offset < size ||
202 size + offset > fbo->base.size) { 202 size + offset > fbo->base.size) {
203 DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", 203 DRM_DEBUG("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
204 width, height, 204 width, height,
205 aligned_width, aligned_height, 205 aligned_width, aligned_height,
206 size, offset, fbo->base.size); 206 size, offset, fbo->base.size);
@@ -214,7 +214,7 @@ static int
214validate_flush(VALIDATE_ARGS) 214validate_flush(VALIDATE_ARGS)
215{ 215{
216 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) { 216 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
217 DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n"); 217 DRM_DEBUG("Bin CL must end with VC4_PACKET_FLUSH\n");
218 return -EINVAL; 218 return -EINVAL;
219 } 219 }
220 exec->found_flush = true; 220 exec->found_flush = true;
@@ -226,13 +226,13 @@ static int
226validate_start_tile_binning(VALIDATE_ARGS) 226validate_start_tile_binning(VALIDATE_ARGS)
227{ 227{
228 if (exec->found_start_tile_binning_packet) { 228 if (exec->found_start_tile_binning_packet) {
229 DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n"); 229 DRM_DEBUG("Duplicate VC4_PACKET_START_TILE_BINNING\n");
230 return -EINVAL; 230 return -EINVAL;
231 } 231 }
232 exec->found_start_tile_binning_packet = true; 232 exec->found_start_tile_binning_packet = true;
233 233
234 if (!exec->found_tile_binning_mode_config_packet) { 234 if (!exec->found_tile_binning_mode_config_packet) {
235 DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); 235 DRM_DEBUG("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
236 return -EINVAL; 236 return -EINVAL;
237 } 237 }
238 238
@@ -243,7 +243,7 @@ static int
243validate_increment_semaphore(VALIDATE_ARGS) 243validate_increment_semaphore(VALIDATE_ARGS)
244{ 244{
245 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) { 245 if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
246 DRM_ERROR("Bin CL must end with " 246 DRM_DEBUG("Bin CL must end with "
247 "VC4_PACKET_INCREMENT_SEMAPHORE\n"); 247 "VC4_PACKET_INCREMENT_SEMAPHORE\n");
248 return -EINVAL; 248 return -EINVAL;
249 } 249 }
@@ -264,7 +264,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
264 264
265 /* Check overflow condition */ 265 /* Check overflow condition */
266 if (exec->shader_state_count == 0) { 266 if (exec->shader_state_count == 0) {
267 DRM_ERROR("shader state must precede primitives\n"); 267 DRM_DEBUG("shader state must precede primitives\n");
268 return -EINVAL; 268 return -EINVAL;
269 } 269 }
270 shader_state = &exec->shader_state[exec->shader_state_count - 1]; 270 shader_state = &exec->shader_state[exec->shader_state_count - 1];
@@ -281,7 +281,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
281 281
282 if (offset > ib->base.size || 282 if (offset > ib->base.size ||
283 (ib->base.size - offset) / index_size < length) { 283 (ib->base.size - offset) / index_size < length) {
284 DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n", 284 DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n",
285 offset, length, index_size, ib->base.size); 285 offset, length, index_size, ib->base.size);
286 return -EINVAL; 286 return -EINVAL;
287 } 287 }
@@ -301,13 +301,13 @@ validate_gl_array_primitive(VALIDATE_ARGS)
301 301
302 /* Check overflow condition */ 302 /* Check overflow condition */
303 if (exec->shader_state_count == 0) { 303 if (exec->shader_state_count == 0) {
304 DRM_ERROR("shader state must precede primitives\n"); 304 DRM_DEBUG("shader state must precede primitives\n");
305 return -EINVAL; 305 return -EINVAL;
306 } 306 }
307 shader_state = &exec->shader_state[exec->shader_state_count - 1]; 307 shader_state = &exec->shader_state[exec->shader_state_count - 1];
308 308
309 if (length + base_index < length) { 309 if (length + base_index < length) {
310 DRM_ERROR("primitive vertex count overflow\n"); 310 DRM_DEBUG("primitive vertex count overflow\n");
311 return -EINVAL; 311 return -EINVAL;
312 } 312 }
313 max_index = length + base_index - 1; 313 max_index = length + base_index - 1;
@@ -324,7 +324,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
324 uint32_t i = exec->shader_state_count++; 324 uint32_t i = exec->shader_state_count++;
325 325
326 if (i >= exec->shader_state_size) { 326 if (i >= exec->shader_state_size) {
327 DRM_ERROR("More requests for shader states than declared\n"); 327 DRM_DEBUG("More requests for shader states than declared\n");
328 return -EINVAL; 328 return -EINVAL;
329 } 329 }
330 330
@@ -332,7 +332,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
332 exec->shader_state[i].max_index = 0; 332 exec->shader_state[i].max_index = 0;
333 333
334 if (exec->shader_state[i].addr & ~0xf) { 334 if (exec->shader_state[i].addr & ~0xf) {
335 DRM_ERROR("high bits set in GL shader rec reference\n"); 335 DRM_DEBUG("high bits set in GL shader rec reference\n");
336 return -EINVAL; 336 return -EINVAL;
337 } 337 }
338 338
@@ -356,7 +356,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
356 int bin_slot; 356 int bin_slot;
357 357
358 if (exec->found_tile_binning_mode_config_packet) { 358 if (exec->found_tile_binning_mode_config_packet) {
359 DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); 359 DRM_DEBUG("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
360 return -EINVAL; 360 return -EINVAL;
361 } 361 }
362 exec->found_tile_binning_mode_config_packet = true; 362 exec->found_tile_binning_mode_config_packet = true;
@@ -368,14 +368,14 @@ validate_tile_binning_config(VALIDATE_ARGS)
368 368
369 if (exec->bin_tiles_x == 0 || 369 if (exec->bin_tiles_x == 0 ||
370 exec->bin_tiles_y == 0) { 370 exec->bin_tiles_y == 0) {
371 DRM_ERROR("Tile binning config of %dx%d too small\n", 371 DRM_DEBUG("Tile binning config of %dx%d too small\n",
372 exec->bin_tiles_x, exec->bin_tiles_y); 372 exec->bin_tiles_x, exec->bin_tiles_y);
373 return -EINVAL; 373 return -EINVAL;
374 } 374 }
375 375
376 if (flags & (VC4_BIN_CONFIG_DB_NON_MS | 376 if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
377 VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) { 377 VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
378 DRM_ERROR("unsupported binning config flags 0x%02x\n", flags); 378 DRM_DEBUG("unsupported binning config flags 0x%02x\n", flags);
379 return -EINVAL; 379 return -EINVAL;
380 } 380 }
381 381
@@ -493,20 +493,20 @@ vc4_validate_bin_cl(struct drm_device *dev,
493 const struct cmd_info *info; 493 const struct cmd_info *info;
494 494
495 if (cmd >= ARRAY_SIZE(cmd_info)) { 495 if (cmd >= ARRAY_SIZE(cmd_info)) {
496 DRM_ERROR("0x%08x: packet %d out of bounds\n", 496 DRM_DEBUG("0x%08x: packet %d out of bounds\n",
497 src_offset, cmd); 497 src_offset, cmd);
498 return -EINVAL; 498 return -EINVAL;
499 } 499 }
500 500
501 info = &cmd_info[cmd]; 501 info = &cmd_info[cmd];
502 if (!info->name) { 502 if (!info->name) {
503 DRM_ERROR("0x%08x: packet %d invalid\n", 503 DRM_DEBUG("0x%08x: packet %d invalid\n",
504 src_offset, cmd); 504 src_offset, cmd);
505 return -EINVAL; 505 return -EINVAL;
506 } 506 }
507 507
508 if (src_offset + info->len > len) { 508 if (src_offset + info->len > len) {
509 DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x " 509 DRM_DEBUG("0x%08x: packet %d (%s) length 0x%08x "
510 "exceeds bounds (0x%08x)\n", 510 "exceeds bounds (0x%08x)\n",
511 src_offset, cmd, info->name, info->len, 511 src_offset, cmd, info->name, info->len,
512 src_offset + len); 512 src_offset + len);
@@ -519,7 +519,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
519 if (info->func && info->func(exec, 519 if (info->func && info->func(exec,
520 dst_pkt + 1, 520 dst_pkt + 1,
521 src_pkt + 1)) { 521 src_pkt + 1)) {
522 DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n", 522 DRM_DEBUG("0x%08x: packet %d (%s) failed to validate\n",
523 src_offset, cmd, info->name); 523 src_offset, cmd, info->name);
524 return -EINVAL; 524 return -EINVAL;
525 } 525 }
@@ -537,7 +537,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
537 exec->ct0ea = exec->ct0ca + dst_offset; 537 exec->ct0ea = exec->ct0ca + dst_offset;
538 538
539 if (!exec->found_start_tile_binning_packet) { 539 if (!exec->found_start_tile_binning_packet) {
540 DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); 540 DRM_DEBUG("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
541 return -EINVAL; 541 return -EINVAL;
542 } 542 }
543 543
@@ -549,7 +549,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
549 * semaphore increment. 549 * semaphore increment.
550 */ 550 */
551 if (!exec->found_increment_semaphore_packet || !exec->found_flush) { 551 if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
552 DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " 552 DRM_DEBUG("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
553 "VC4_PACKET_FLUSH\n"); 553 "VC4_PACKET_FLUSH\n");
554 return -EINVAL; 554 return -EINVAL;
555 } 555 }
@@ -588,11 +588,11 @@ reloc_tex(struct vc4_exec_info *exec,
588 uint32_t remaining_size = tex->base.size - p0; 588 uint32_t remaining_size = tex->base.size - p0;
589 589
590 if (p0 > tex->base.size - 4) { 590 if (p0 > tex->base.size - 4) {
591 DRM_ERROR("UBO offset greater than UBO size\n"); 591 DRM_DEBUG("UBO offset greater than UBO size\n");
592 goto fail; 592 goto fail;
593 } 593 }
594 if (p1 > remaining_size - 4) { 594 if (p1 > remaining_size - 4) {
595 DRM_ERROR("UBO clamp would allow reads " 595 DRM_DEBUG("UBO clamp would allow reads "
596 "outside of UBO\n"); 596 "outside of UBO\n");
597 goto fail; 597 goto fail;
598 } 598 }
@@ -612,14 +612,14 @@ reloc_tex(struct vc4_exec_info *exec,
612 if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) == 612 if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
613 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) { 613 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
614 if (cube_map_stride) { 614 if (cube_map_stride) {
615 DRM_ERROR("Cube map stride set twice\n"); 615 DRM_DEBUG("Cube map stride set twice\n");
616 goto fail; 616 goto fail;
617 } 617 }
618 618
619 cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK; 619 cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
620 } 620 }
621 if (!cube_map_stride) { 621 if (!cube_map_stride) {
622 DRM_ERROR("Cube map stride not set\n"); 622 DRM_DEBUG("Cube map stride not set\n");
623 goto fail; 623 goto fail;
624 } 624 }
625 } 625 }
@@ -660,7 +660,7 @@ reloc_tex(struct vc4_exec_info *exec,
660 case VC4_TEXTURE_TYPE_RGBA64: 660 case VC4_TEXTURE_TYPE_RGBA64:
661 case VC4_TEXTURE_TYPE_YUV422R: 661 case VC4_TEXTURE_TYPE_YUV422R:
662 default: 662 default:
663 DRM_ERROR("Texture format %d unsupported\n", type); 663 DRM_DEBUG("Texture format %d unsupported\n", type);
664 goto fail; 664 goto fail;
665 } 665 }
666 utile_w = utile_width(cpp); 666 utile_w = utile_width(cpp);
@@ -713,7 +713,7 @@ reloc_tex(struct vc4_exec_info *exec,
713 level_size = aligned_width * cpp * aligned_height; 713 level_size = aligned_width * cpp * aligned_height;
714 714
715 if (offset < level_size) { 715 if (offset < level_size) {
716 DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db " 716 DRM_DEBUG("Level %d (%dx%d -> %dx%d) size %db "
717 "overflowed buffer bounds (offset %d)\n", 717 "overflowed buffer bounds (offset %d)\n",
718 i, level_width, level_height, 718 i, level_width, level_height,
719 aligned_width, aligned_height, 719 aligned_width, aligned_height,
@@ -764,7 +764,7 @@ validate_gl_shader_rec(struct drm_device *dev,
764 764
765 nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes; 765 nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
766 if (nr_relocs * 4 > exec->shader_rec_size) { 766 if (nr_relocs * 4 > exec->shader_rec_size) {
767 DRM_ERROR("overflowed shader recs reading %d handles " 767 DRM_DEBUG("overflowed shader recs reading %d handles "
768 "from %d bytes left\n", 768 "from %d bytes left\n",
769 nr_relocs, exec->shader_rec_size); 769 nr_relocs, exec->shader_rec_size);
770 return -EINVAL; 770 return -EINVAL;
@@ -774,7 +774,7 @@ validate_gl_shader_rec(struct drm_device *dev,
774 exec->shader_rec_size -= nr_relocs * 4; 774 exec->shader_rec_size -= nr_relocs * 4;
775 775
776 if (packet_size > exec->shader_rec_size) { 776 if (packet_size > exec->shader_rec_size) {
777 DRM_ERROR("overflowed shader recs copying %db packet " 777 DRM_DEBUG("overflowed shader recs copying %db packet "
778 "from %d bytes left\n", 778 "from %d bytes left\n",
779 packet_size, exec->shader_rec_size); 779 packet_size, exec->shader_rec_size);
780 return -EINVAL; 780 return -EINVAL;
@@ -794,7 +794,7 @@ validate_gl_shader_rec(struct drm_device *dev,
794 794
795 for (i = 0; i < shader_reloc_count; i++) { 795 for (i = 0; i < shader_reloc_count; i++) {
796 if (src_handles[i] > exec->bo_count) { 796 if (src_handles[i] > exec->bo_count) {
797 DRM_ERROR("Shader handle %d too big\n", src_handles[i]); 797 DRM_DEBUG("Shader handle %d too big\n", src_handles[i]);
798 return -EINVAL; 798 return -EINVAL;
799 } 799 }
800 800
@@ -810,13 +810,13 @@ validate_gl_shader_rec(struct drm_device *dev,
810 810
811 if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) != 811 if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
812 to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) { 812 to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
813 DRM_ERROR("Thread mode of CL and FS do not match\n"); 813 DRM_DEBUG("Thread mode of CL and FS do not match\n");
814 return -EINVAL; 814 return -EINVAL;
815 } 815 }
816 816
817 if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded || 817 if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
818 to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) { 818 to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
819 DRM_ERROR("cs and vs cannot be threaded\n"); 819 DRM_DEBUG("cs and vs cannot be threaded\n");
820 return -EINVAL; 820 return -EINVAL;
821 } 821 }
822 822
@@ -831,7 +831,7 @@ validate_gl_shader_rec(struct drm_device *dev,
831 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; 831 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
832 832
833 if (src_offset != 0) { 833 if (src_offset != 0) {
834 DRM_ERROR("Shaders must be at offset 0 of " 834 DRM_DEBUG("Shaders must be at offset 0 of "
835 "the BO.\n"); 835 "the BO.\n");
836 return -EINVAL; 836 return -EINVAL;
837 } 837 }
@@ -842,7 +842,7 @@ validate_gl_shader_rec(struct drm_device *dev,
842 842
843 if (validated_shader->uniforms_src_size > 843 if (validated_shader->uniforms_src_size >
844 exec->uniforms_size) { 844 exec->uniforms_size) {
845 DRM_ERROR("Uniforms src buffer overflow\n"); 845 DRM_DEBUG("Uniforms src buffer overflow\n");
846 return -EINVAL; 846 return -EINVAL;
847 } 847 }
848 848
@@ -900,7 +900,7 @@ validate_gl_shader_rec(struct drm_device *dev,
900 900
901 if (vbo->base.size < offset || 901 if (vbo->base.size < offset ||
902 vbo->base.size - offset < attr_size) { 902 vbo->base.size - offset < attr_size) {
903 DRM_ERROR("BO offset overflow (%d + %d > %zu)\n", 903 DRM_DEBUG("BO offset overflow (%d + %d > %zu)\n",
904 offset, attr_size, vbo->base.size); 904 offset, attr_size, vbo->base.size);
905 return -EINVAL; 905 return -EINVAL;
906 } 906 }
@@ -909,7 +909,7 @@ validate_gl_shader_rec(struct drm_device *dev,
909 max_index = ((vbo->base.size - offset - attr_size) / 909 max_index = ((vbo->base.size - offset - attr_size) /
910 stride); 910 stride);
911 if (state->max_index > max_index) { 911 if (state->max_index > max_index) {
912 DRM_ERROR("primitives use index %d out of " 912 DRM_DEBUG("primitives use index %d out of "
913 "supplied %d\n", 913 "supplied %d\n",
914 state->max_index, max_index); 914 state->max_index, max_index);
915 return -EINVAL; 915 return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 0b2df5c6efb4..d3f15bf60900 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
200 uint32_t clamp_reg, clamp_offset; 200 uint32_t clamp_reg, clamp_offset;
201 201
202 if (sig == QPU_SIG_SMALL_IMM) { 202 if (sig == QPU_SIG_SMALL_IMM) {
203 DRM_ERROR("direct TMU read used small immediate\n"); 203 DRM_DEBUG("direct TMU read used small immediate\n");
204 return false; 204 return false;
205 } 205 }
206 206
@@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
209 */ 209 */
210 if (is_mul || 210 if (is_mul ||
211 QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { 211 QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
212 DRM_ERROR("direct TMU load wasn't an add\n"); 212 DRM_DEBUG("direct TMU load wasn't an add\n");
213 return false; 213 return false;
214 } 214 }
215 215
@@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
220 */ 220 */
221 clamp_reg = raddr_add_a_to_live_reg_index(inst); 221 clamp_reg = raddr_add_a_to_live_reg_index(inst);
222 if (clamp_reg == ~0) { 222 if (clamp_reg == ~0) {
223 DRM_ERROR("direct TMU load wasn't clamped\n"); 223 DRM_DEBUG("direct TMU load wasn't clamped\n");
224 return false; 224 return false;
225 } 225 }
226 226
227 clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg]; 227 clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
228 if (clamp_offset == ~0) { 228 if (clamp_offset == ~0) {
229 DRM_ERROR("direct TMU load wasn't clamped\n"); 229 DRM_DEBUG("direct TMU load wasn't clamped\n");
230 return false; 230 return false;
231 } 231 }
232 232
@@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
238 238
239 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && 239 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
240 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { 240 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
241 DRM_ERROR("direct TMU load didn't add to a uniform\n"); 241 DRM_DEBUG("direct TMU load didn't add to a uniform\n");
242 return false; 242 return false;
243 } 243 }
244 244
@@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
246 } else { 246 } else {
247 if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM && 247 if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
248 raddr_b == QPU_R_UNIF)) { 248 raddr_b == QPU_R_UNIF)) {
249 DRM_ERROR("uniform read in the same instruction as " 249 DRM_DEBUG("uniform read in the same instruction as "
250 "texture setup.\n"); 250 "texture setup.\n");
251 return false; 251 return false;
252 } 252 }
253 } 253 }
254 254
255 if (validation_state->tmu_write_count[tmu] >= 4) { 255 if (validation_state->tmu_write_count[tmu] >= 4) {
256 DRM_ERROR("TMU%d got too many parameters before dispatch\n", 256 DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
257 tmu); 257 tmu);
258 return false; 258 return false;
259 } 259 }
@@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
265 */ 265 */
266 if (!is_direct) { 266 if (!is_direct) {
267 if (validation_state->needs_uniform_address_update) { 267 if (validation_state->needs_uniform_address_update) {
268 DRM_ERROR("Texturing with undefined uniform address\n"); 268 DRM_DEBUG("Texturing with undefined uniform address\n");
269 return false; 269 return false;
270 } 270 }
271 271
@@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
336 case QPU_SIG_LOAD_TMU1: 336 case QPU_SIG_LOAD_TMU1:
337 break; 337 break;
338 default: 338 default:
339 DRM_ERROR("uniforms address change must be " 339 DRM_DEBUG("uniforms address change must be "
340 "normal math\n"); 340 "normal math\n");
341 return false; 341 return false;
342 } 342 }
343 343
344 if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { 344 if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
345 DRM_ERROR("Uniform address reset must be an ADD.\n"); 345 DRM_DEBUG("Uniform address reset must be an ADD.\n");
346 return false; 346 return false;
347 } 347 }
348 348
349 if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) { 349 if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
350 DRM_ERROR("Uniform address reset must be unconditional.\n"); 350 DRM_DEBUG("Uniform address reset must be unconditional.\n");
351 return false; 351 return false;
352 } 352 }
353 353
354 if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP && 354 if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
355 !(inst & QPU_PM)) { 355 !(inst & QPU_PM)) {
356 DRM_ERROR("No packing allowed on uniforms reset\n"); 356 DRM_DEBUG("No packing allowed on uniforms reset\n");
357 return false; 357 return false;
358 } 358 }
359 359
360 if (add_lri == -1) { 360 if (add_lri == -1) {
361 DRM_ERROR("First argument of uniform address write must be " 361 DRM_DEBUG("First argument of uniform address write must be "
362 "an immediate value.\n"); 362 "an immediate value.\n");
363 return false; 363 return false;
364 } 364 }
365 365
366 if (validation_state->live_immediates[add_lri] != expected_offset) { 366 if (validation_state->live_immediates[add_lri] != expected_offset) {
367 DRM_ERROR("Resetting uniforms with offset %db instead of %db\n", 367 DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
368 validation_state->live_immediates[add_lri], 368 validation_state->live_immediates[add_lri],
369 expected_offset); 369 expected_offset);
370 return false; 370 return false;
@@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
372 372
373 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && 373 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
374 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { 374 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
375 DRM_ERROR("Second argument of uniform address write must be " 375 DRM_DEBUG("Second argument of uniform address write must be "
376 "a uniform.\n"); 376 "a uniform.\n");
377 return false; 377 return false;
378 } 378 }
@@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
417 switch (waddr) { 417 switch (waddr) {
418 case QPU_W_UNIFORMS_ADDRESS: 418 case QPU_W_UNIFORMS_ADDRESS:
419 if (is_b) { 419 if (is_b) {
420 DRM_ERROR("relative uniforms address change " 420 DRM_DEBUG("relative uniforms address change "
421 "unsupported\n"); 421 "unsupported\n");
422 return false; 422 return false;
423 } 423 }
@@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
452 /* XXX: I haven't thought about these, so don't support them 452 /* XXX: I haven't thought about these, so don't support them
453 * for now. 453 * for now.
454 */ 454 */
455 DRM_ERROR("Unsupported waddr %d\n", waddr); 455 DRM_DEBUG("Unsupported waddr %d\n", waddr);
456 return false; 456 return false;
457 457
458 case QPU_W_VPM_ADDR: 458 case QPU_W_VPM_ADDR:
459 DRM_ERROR("General VPM DMA unsupported\n"); 459 DRM_DEBUG("General VPM DMA unsupported\n");
460 return false; 460 return false;
461 461
462 case QPU_W_VPM: 462 case QPU_W_VPM:
@@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
559 bool ok; 559 bool ok;
560 560
561 if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) { 561 if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
562 DRM_ERROR("ADD and MUL both set up textures\n"); 562 DRM_DEBUG("ADD and MUL both set up textures\n");
563 return false; 563 return false;
564 } 564 }
565 565
@@ -588,7 +588,7 @@ check_branch(uint64_t inst,
588 * there's no need for it. 588 * there's no need for it.
589 */ 589 */
590 if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) { 590 if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
591 DRM_ERROR("branch instruction at %d wrote a register.\n", 591 DRM_DEBUG("branch instruction at %d wrote a register.\n",
592 validation_state->ip); 592 validation_state->ip);
593 return false; 593 return false;
594 } 594 }
@@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
614 validated_shader->uniforms_size += 4; 614 validated_shader->uniforms_size += 4;
615 615
616 if (validation_state->needs_uniform_address_update) { 616 if (validation_state->needs_uniform_address_update) {
617 DRM_ERROR("Uniform read with undefined uniform " 617 DRM_DEBUG("Uniform read with undefined uniform "
618 "address\n"); 618 "address\n");
619 return false; 619 return false;
620 } 620 }
@@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
660 continue; 660 continue;
661 661
662 if (ip - last_branch < 4) { 662 if (ip - last_branch < 4) {
663 DRM_ERROR("Branch at %d during delay slots\n", ip); 663 DRM_DEBUG("Branch at %d during delay slots\n", ip);
664 return false; 664 return false;
665 } 665 }
666 last_branch = ip; 666 last_branch = ip;
667 667
668 if (inst & QPU_BRANCH_REG) { 668 if (inst & QPU_BRANCH_REG) {
669 DRM_ERROR("branching from register relative " 669 DRM_DEBUG("branching from register relative "
670 "not supported\n"); 670 "not supported\n");
671 return false; 671 return false;
672 } 672 }
673 673
674 if (!(inst & QPU_BRANCH_REL)) { 674 if (!(inst & QPU_BRANCH_REL)) {
675 DRM_ERROR("relative branching required\n"); 675 DRM_DEBUG("relative branching required\n");
676 return false; 676 return false;
677 } 677 }
678 678
@@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
682 * end of the shader object. 682 * end of the shader object.
683 */ 683 */
684 if (branch_imm % sizeof(inst) != 0) { 684 if (branch_imm % sizeof(inst) != 0) {
685 DRM_ERROR("branch target not aligned\n"); 685 DRM_DEBUG("branch target not aligned\n");
686 return false; 686 return false;
687 } 687 }
688 688
689 branch_target_ip = after_delay_ip + (branch_imm >> 3); 689 branch_target_ip = after_delay_ip + (branch_imm >> 3);
690 if (branch_target_ip >= validation_state->max_ip) { 690 if (branch_target_ip >= validation_state->max_ip) {
691 DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n", 691 DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
692 ip, branch_target_ip, 692 ip, branch_target_ip,
693 validation_state->max_ip); 693 validation_state->max_ip);
694 return false; 694 return false;
@@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
699 * the shader. 699 * the shader.
700 */ 700 */
701 if (after_delay_ip >= validation_state->max_ip) { 701 if (after_delay_ip >= validation_state->max_ip) {
702 DRM_ERROR("Branch at %d continues past shader end " 702 DRM_DEBUG("Branch at %d continues past shader end "
703 "(%d/%d)\n", 703 "(%d/%d)\n",
704 ip, after_delay_ip, validation_state->max_ip); 704 ip, after_delay_ip, validation_state->max_ip);
705 return false; 705 return false;
@@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
709 } 709 }
710 710
711 if (max_branch_target > validation_state->max_ip - 3) { 711 if (max_branch_target > validation_state->max_ip - 3) {
712 DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); 712 DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
713 return false; 713 return false;
714 } 714 }
715 715
@@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
750 return true; 750 return true;
751 751
752 if (texturing_in_progress(validation_state)) { 752 if (texturing_in_progress(validation_state)) {
753 DRM_ERROR("Branch target landed during TMU setup\n"); 753 DRM_DEBUG("Branch target landed during TMU setup\n");
754 return false; 754 return false;
755 } 755 }
756 756
@@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
837 case QPU_SIG_LAST_THREAD_SWITCH: 837 case QPU_SIG_LAST_THREAD_SWITCH:
838 if (!check_instruction_writes(validated_shader, 838 if (!check_instruction_writes(validated_shader,
839 &validation_state)) { 839 &validation_state)) {
840 DRM_ERROR("Bad write at ip %d\n", ip); 840 DRM_DEBUG("Bad write at ip %d\n", ip);
841 goto fail; 841 goto fail;
842 } 842 }
843 843
@@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
855 validated_shader->is_threaded = true; 855 validated_shader->is_threaded = true;
856 856
857 if (ip < last_thread_switch_ip + 3) { 857 if (ip < last_thread_switch_ip + 3) {
858 DRM_ERROR("Thread switch too soon after " 858 DRM_DEBUG("Thread switch too soon after "
859 "last switch at ip %d\n", ip); 859 "last switch at ip %d\n", ip);
860 goto fail; 860 goto fail;
861 } 861 }
@@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
867 case QPU_SIG_LOAD_IMM: 867 case QPU_SIG_LOAD_IMM:
868 if (!check_instruction_writes(validated_shader, 868 if (!check_instruction_writes(validated_shader,
869 &validation_state)) { 869 &validation_state)) {
870 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); 870 DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
871 goto fail; 871 goto fail;
872 } 872 }
873 break; 873 break;
@@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
878 goto fail; 878 goto fail;
879 879
880 if (ip < last_thread_switch_ip + 3) { 880 if (ip < last_thread_switch_ip + 3) {
881 DRM_ERROR("Branch in thread switch at ip %d", 881 DRM_DEBUG("Branch in thread switch at ip %d",
882 ip); 882 ip);
883 goto fail; 883 goto fail;
884 } 884 }
885 885
886 break; 886 break;
887 default: 887 default:
888 DRM_ERROR("Unsupported QPU signal %d at " 888 DRM_DEBUG("Unsupported QPU signal %d at "
889 "instruction %d\n", sig, ip); 889 "instruction %d\n", sig, ip);
890 goto fail; 890 goto fail;
891 } 891 }
@@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
898 } 898 }
899 899
900 if (ip == validation_state.max_ip) { 900 if (ip == validation_state.max_ip) {
901 DRM_ERROR("shader failed to terminate before " 901 DRM_DEBUG("shader failed to terminate before "
902 "shader BO end at %zd\n", 902 "shader BO end at %zd\n",
903 shader_obj->base.size); 903 shader_obj->base.size);
904 goto fail; 904 goto fail;
@@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
907 /* Might corrupt other thread */ 907 /* Might corrupt other thread */
908 if (validated_shader->is_threaded && 908 if (validated_shader->is_threaded &&
909 validation_state.all_registers_used) { 909 validation_state.all_registers_used) {
910 DRM_ERROR("Shader uses threading, but uses the upper " 910 DRM_DEBUG("Shader uses threading, but uses the upper "
911 "half of the registers, too\n"); 911 "half of the registers, too\n");
912 goto fail; 912 goto fail;
913 } 913 }
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 12289673f457..2524ff116f00 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -190,7 +190,7 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
190 return ERR_CAST(obj); 190 return ERR_CAST(obj);
191 191
192 ret = drm_gem_handle_create(file, &obj->base, handle); 192 ret = drm_gem_handle_create(file, &obj->base, handle);
193 drm_gem_object_unreference_unlocked(&obj->base); 193 drm_gem_object_put_unlocked(&obj->base);
194 if (ret) 194 if (ret)
195 goto err; 195 goto err;
196 196
@@ -245,7 +245,7 @@ static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
245 245
246 *offset = drm_vma_node_offset_addr(&obj->vma_node); 246 *offset = drm_vma_node_offset_addr(&obj->vma_node);
247unref: 247unref:
248 drm_gem_object_unreference_unlocked(obj); 248 drm_gem_object_put_unlocked(obj);
249 249
250 return ret; 250 return ret;
251} 251}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 3109c8308eb5..8fd52f211e9d 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -213,7 +213,7 @@ err_fence:
213 dma_fence_put(fence); 213 dma_fence_put(fence);
214 } 214 }
215err: 215err:
216 drm_gem_object_unreference_unlocked(obj); 216 drm_gem_object_put_unlocked(obj);
217 return ret; 217 return ret;
218} 218}
219 219
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 63d35c7e416c..49a3d8d5a249 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -122,7 +122,6 @@ static struct drm_driver driver = {
122 122
123 .dumb_create = virtio_gpu_mode_dumb_create, 123 .dumb_create = virtio_gpu_mode_dumb_create,
124 .dumb_map_offset = virtio_gpu_mode_dumb_mmap, 124 .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
125 .dumb_destroy = virtio_gpu_mode_dumb_destroy,
126 125
127#if defined(CONFIG_DEBUG_FS) 126#if defined(CONFIG_DEBUG_FS)
128 .debugfs_init = virtio_gpu_debugfs_init, 127 .debugfs_init = virtio_gpu_debugfs_init,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3a66abb8fd50..da2fb585fea4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -236,9 +236,6 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
236int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, 236int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
237 struct drm_device *dev, 237 struct drm_device *dev,
238 struct drm_mode_create_dumb *args); 238 struct drm_mode_create_dumb *args);
239int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
240 struct drm_device *dev,
241 uint32_t handle);
242int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, 239int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
243 struct drm_device *dev, 240 struct drm_device *dev,
244 uint32_t handle, uint64_t *offset_p); 241 uint32_t handle, uint64_t *offset_p);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 046e28b69d99..15d18fd0c64b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -308,7 +308,7 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
308 308
309 return 0; 309 return 0;
310} 310}
311static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = { 311static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
312 .fb_probe = virtio_gpufb_create, 312 .fb_probe = virtio_gpufb_create,
313}; 313};
314 314
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index cc025d8fbe19..72ad7b103448 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -118,13 +118,6 @@ fail:
118 return ret; 118 return ret;
119} 119}
120 120
121int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
122 struct drm_device *dev,
123 uint32_t handle)
124{
125 return drm_gem_handle_delete(file_priv, handle);
126}
127
128int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, 121int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
129 struct drm_device *dev, 122 struct drm_device *dev,
130 uint32_t handle, uint64_t *offset_p) 123 uint32_t handle, uint64_t *offset_p)