aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-09-13 19:43:06 -0400
committerDave Airlie <airlied@redhat.com>2018-09-13 19:43:16 -0400
commit2dc7bad71cd310dc94d1c9907909324dd2b0618f (patch)
treea087555dd4b1588eeac02c1af5d8dde7b5bb15fa
parentb1c1566822ab489a945dfdafee651aa29de160c7 (diff)
parent169cc4c7a14e988985c8833ddec2f3e897de2c28 (diff)
Merge tag 'drm-misc-next-2018-09-13' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 4.20: UAPI Changes: - Add host endian variants for the most common formats (Gerd) - Fail ADDFB2 for big-endian drivers that don't advertise BE quirk (Gerd) - clear smem_start in fbdev for drm drivers to avoid leaking fb addr (Daniel) Cross-subsystem Changes: Core Changes: - fix drm_mode_addfb() on big endian machines (Gerd) - add timeline point to syncobj find+replace (Chunming) - more drmP.h removal effort (Daniel) - split uapi portions of drm_atomic.c into drm_atomic_uapi.c (Daniel) Driver Changes: - bochs: Convert open-coded portions to use helpers (Peter) - vkms: Add cursor support (Haneen) - udmabuf: Lots of fixups (mostly cosmetic afaict) (Gerd) - qxl: Convert to use fbdev helper (Peter) Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Chunming Zhou <david1.zhou@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Peter Wu <peter@lekensteyn.nl> Cc: Haneen Mohammed <hamohammed.sa@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com> From: Sean Paul <sean@poorly.run> Link: https://patchwork.freedesktop.org/patch/msgid/20180913130254.GA156437@art_vandelay
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/drm-kms.rst12
-rw-r--r--Documentation/gpu/todo.rst71
-rw-r--r--Documentation/gpu/vkms.rst24
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/udmabuf.c93
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs.h21
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c46
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c79
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c74
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c1510
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c1393
-rw-r--r--drivers/gpu/drm/drm_blend.c22
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h19
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_fourcc.c37
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c49
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c1
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioctl.c2
-rw-r--r--drivers/gpu/drm/drm_plane.c13
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_syncobj.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c101
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h32
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c197
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c90
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h27
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c154
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h18
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c19
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c43
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c3
-rw-r--r--drivers/tty/vt/vt.c12
-rw-r--r--drivers/video/fbdev/core/fbcon.c4
-rw-r--r--drivers/video/fbdev/core/fbmem.c8
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_atomic.h23
-rw-r--r--include/drm/drm_atomic_helper.h1
-rw-r--r--include/drm/drm_atomic_uapi.h58
-rw-r--r--include/drm/drm_color_mgmt.h1
-rw-r--r--include/drm/drm_connector.h1
-rw-r--r--include/drm/drm_drv.h1
-rw-r--r--include/drm/drm_encoder.h1
-rw-r--r--include/drm/drm_fb_helper.h22
-rw-r--r--include/drm/drm_fourcc.h22
-rw-r--r--include/drm/drm_mode_config.h15
-rw-r--r--include/drm/drm_plane.h3
-rw-r--r--include/drm/drm_property.h2
-rw-r--r--include/drm/drm_syncobj.h4
-rw-r--r--include/drm/drm_util.h32
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/fb.h9
-rw-r--r--include/linux/vt_kern.h7
-rw-r--r--include/uapi/drm/drm_fourcc.h3
-rw-r--r--include/uapi/drm/drm_mode.h3
85 files changed, 2327 insertions, 2261 deletions
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 65be325bf282..7d2d3875ff1a 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -13,6 +13,7 @@ GPU Driver Documentation
13 tve200 13 tve200
14 v3d 14 v3d
15 vc4 15 vc4
16 vkms
16 bridge/dw-hdmi 17 bridge/dw-hdmi
17 xen-front 18 xen-front
18 19
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index f8f5bf11a6ca..4b1501b4835b 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -287,8 +287,14 @@ Atomic Mode Setting Function Reference
287.. kernel-doc:: drivers/gpu/drm/drm_atomic.c 287.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
288 :export: 288 :export:
289 289
290.. kernel-doc:: drivers/gpu/drm/drm_atomic.c 290Atomic Mode Setting IOCTL and UAPI Functions
291 :internal: 291--------------------------------------------
292
293.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
294 :doc: overview
295
296.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
297 :export:
292 298
293CRTC Abstraction 299CRTC Abstraction
294================ 300================
@@ -566,7 +572,7 @@ Tile Group Property
566Explicit Fencing Properties 572Explicit Fencing Properties
567--------------------------- 573---------------------------
568 574
569.. kernel-doc:: drivers/gpu/drm/drm_atomic.c 575.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
570 :doc: explicit fencing properties 576 :doc: explicit fencing properties
571 577
572Existing KMS Properties 578Existing KMS Properties
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index a7c150d6b63f..77c2b3c25565 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -127,7 +127,8 @@ interfaces to fix these issues:
127 the acquire context explicitly on stack and then also pass it down into 127 the acquire context explicitly on stack and then also pass it down into
128 drivers explicitly so that the legacy-on-atomic functions can use them. 128 drivers explicitly so that the legacy-on-atomic functions can use them.
129 129
130 Except for some driver code this is done. 130 Except for some driver code this is done. This task should be finished by
131 adding WARN_ON(!drm_drv_uses_atomic_modeset) in drm_modeset_lock_all().
131 132
132* A bunch of the vtable hooks are now in the wrong place: DRM has a split 133* A bunch of the vtable hooks are now in the wrong place: DRM has a split
133 between core vfunc tables (named ``drm_foo_funcs``), which are used to 134 between core vfunc tables (named ``drm_foo_funcs``), which are used to
@@ -137,13 +138,6 @@ interfaces to fix these issues:
137 ``_helper_funcs`` since they are not part of the core ABI. There's a 138 ``_helper_funcs`` since they are not part of the core ABI. There's a
138 ``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``. 139 ``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``.
139 140
140* There's a new helper ``drm_atomic_helper_best_encoder()`` which could be
141 used by all atomic drivers which don't select the encoder for a given
142 connector at runtime. That's almost all of them, and would allow us to get
143 rid of a lot of ``best_encoder`` boilerplate in drivers.
144
145 This was almost done, but new drivers added a few more cases again.
146
147Contact: Daniel Vetter 141Contact: Daniel Vetter
148 142
149Get rid of dev->struct_mutex from GEM drivers 143Get rid of dev->struct_mutex from GEM drivers
@@ -164,9 +158,8 @@ private lock. The tricky part is the BO free functions, since those can't
164reliably take that lock any more. Instead state needs to be protected with 158reliably take that lock any more. Instead state needs to be protected with
165suitable subordinate locks or some cleanup work pushed to a worker thread. For 159suitable subordinate locks or some cleanup work pushed to a worker thread. For
166performance-critical drivers it might also be better to go with a more 160performance-critical drivers it might also be better to go with a more
167fine-grained per-buffer object and per-context lockings scheme. Currently the 161fine-grained per-buffer object and per-context lockings scheme. Currently only the
168following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and 162``msm`` driver still use ``struct_mutex``.
169``udl``.
170 163
171Contact: Daniel Vetter, respective driver maintainers 164Contact: Daniel Vetter, respective driver maintainers
172 165
@@ -190,7 +183,8 @@ Convert drivers to use simple modeset suspend/resume
190 183
191Most drivers (except i915 and nouveau) that use 184Most drivers (except i915 and nouveau) that use
192drm_atomic_helper_suspend/resume() can probably be converted to use 185drm_atomic_helper_suspend/resume() can probably be converted to use
193drm_mode_config_helper_suspend/resume(). 186drm_mode_config_helper_suspend/resume(). Also there's still open-coded version
187of the atomic suspend/resume code in older atomic modeset drivers.
194 188
195Contact: Maintainer of the driver you plan to convert 189Contact: Maintainer of the driver you plan to convert
196 190
@@ -246,20 +240,10 @@ Core refactorings
246Clean up the DRM header mess 240Clean up the DRM header mess
247---------------------------- 241----------------------------
248 242
249Currently the DRM subsystem has only one global header, ``drmP.h``. This is 243The DRM subsystem originally had only one huge global header, ``drmP.h``. This
250used both for functions exported to helper libraries and drivers and functions 244is now split up, but many source files still include it. The remaining part of
251only used internally in the ``drm.ko`` module. The goal would be to move all 245the cleanup work here is to replace any ``#include <drm/drmP.h>`` by only the
252header declarations not needed outside of ``drm.ko`` into 246headers needed (and fixing up any missing pre-declarations in the headers).
253``drivers/gpu/drm/drm_*_internal.h`` header files. ``EXPORT_SYMBOL`` also
254needs to be dropped for these functions.
255
256This would nicely tie in with the below task to create kerneldoc after the API
257is cleaned up. Or with the "hide legacy cruft better" task.
258
259Note that this is well in progress, but ``drmP.h`` is still huge. The updated
260plan is to switch to per-file driver API headers, which will also structure
261the kerneldoc better. This should also allow more fine-grained ``#include``
262directives.
263 247
264In the end no .c file should need to include ``drmP.h`` anymore. 248In the end no .c file should need to include ``drmP.h`` anymore.
265 249
@@ -278,26 +262,6 @@ See https://dri.freedesktop.org/docs/drm/ for what's there already.
278 262
279Contact: Daniel Vetter 263Contact: Daniel Vetter
280 264
281Hide legacy cruft better
282------------------------
283
284Way back DRM supported only drivers which shadow-attached to PCI devices with
285userspace or fbdev drivers setting up outputs. Modern DRM drivers take charge
286of the entire device, you can spot them with the DRIVER_MODESET flag.
287
288Unfortunately there's still large piles of legacy code around which needs to
289be hidden so that driver writers don't accidentally end up using it. And to
290prevent security issues in those legacy IOCTLs from being exploited on modern
291drivers. This has multiple possible subtasks:
292
293* Extract support code for legacy features into a ``drm-legacy.ko`` kernel
294 module and compile it only when one of the legacy drivers is enabled.
295
296This is mostly done, the only thing left is to split up ``drm_irq.c`` into
297legacy cruft and the parts needed by modern KMS drivers.
298
299Contact: Daniel Vetter
300
301Make panic handling work 265Make panic handling work
302------------------------ 266------------------------
303 267
@@ -396,17 +360,12 @@ converting things over. For modeset tests we also first need a bit of
396infrastructure to use dumb buffers for untiled buffers, to be able to run all 360infrastructure to use dumb buffers for untiled buffers, to be able to run all
397the non-i915 specific modeset tests. 361the non-i915 specific modeset tests.
398 362
399Contact: Daniel Vetter 363Extend virtual test driver (VKMS)
400 364---------------------------------
401Create a virtual KMS driver for testing (vkms)
402----------------------------------------------
403
404With all the latest helpers it should be fairly simple to create a virtual KMS
405driver useful for testing, or for running X or similar on headless machines
406(to be able to still use the GPU). This would be similar to vgem, but aimed at
407the modeset side.
408 365
409Once the basics are there there's tons of possibilities to extend it. 366See the documentation of :ref:`VKMS <vkms>` for more details. This is an ideal
367internship task, since it only requires a virtual machine and can be sized to
368fit the available time.
410 369
411Contact: Daniel Vetter 370Contact: Daniel Vetter
412 371
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
new file mode 100644
index 000000000000..0a6ea6216e41
--- /dev/null
+++ b/Documentation/gpu/vkms.rst
@@ -0,0 +1,24 @@
1.. _vkms:
2
3==========================================
4 drm/vkms Virtual Kernel Modesetting
5==========================================
6
7.. kernel-doc:: drivers/gpu/drm/vkms/vkms_drv.c
8 :doc: vkms (Virtual Kernel Modesetting)
9
10TODO
11====
12
13CRC API
14-------
15
16- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
17
18- Use the alpha value to blend vaddr_src with vaddr_dst instead of
19 overwriting it in ``blend()``.
20
21- Add igt test to check cleared alpha value for XRGB plane format.
22
23- Add igt test to check extreme alpha values i.e. fully opaque and fully
24 transparent (intermediate values are affected by hw-specific rounding modes).
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 338129eb126f..2e5a0faa2cb1 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -34,6 +34,7 @@ config UDMABUF
34 bool "userspace dmabuf misc driver" 34 bool "userspace dmabuf misc driver"
35 default n 35 default n
36 depends on DMA_SHARED_BUFFER 36 depends on DMA_SHARED_BUFFER
37 depends on MEMFD_CREATE || COMPILE_TEST
37 help 38 help
38 A driver to let userspace turn memfd regions into dma-bufs. 39 A driver to let userspace turn memfd regions into dma-bufs.
39 Qemu can use this to create host dmabufs for guest framebuffers. 40 Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 2e8502250afe..9edabce0b8ab 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -1,20 +1,22 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h> 2#include <linux/cred.h>
3#include <linux/module.h>
4#include <linux/device.h> 3#include <linux/device.h>
5#include <linux/kernel.h>
6#include <linux/slab.h>
7#include <linux/miscdevice.h>
8#include <linux/dma-buf.h> 4#include <linux/dma-buf.h>
9#include <linux/highmem.h> 5#include <linux/highmem.h>
10#include <linux/cred.h> 6#include <linux/init.h>
11#include <linux/shmem_fs.h> 7#include <linux/kernel.h>
12#include <linux/memfd.h> 8#include <linux/memfd.h>
9#include <linux/miscdevice.h>
10#include <linux/module.h>
11#include <linux/shmem_fs.h>
12#include <linux/slab.h>
13#include <linux/udmabuf.h>
13 14
14#include <uapi/linux/udmabuf.h> 15static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */
16static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
15 17
16struct udmabuf { 18struct udmabuf {
17 u32 pagecount; 19 pgoff_t pagecount;
18 struct page **pages; 20 struct page **pages;
19}; 21};
20 22
@@ -23,9 +25,6 @@ static int udmabuf_vm_fault(struct vm_fault *vmf)
23 struct vm_area_struct *vma = vmf->vma; 25 struct vm_area_struct *vma = vmf->vma;
24 struct udmabuf *ubuf = vma->vm_private_data; 26 struct udmabuf *ubuf = vma->vm_private_data;
25 27
26 if (WARN_ON(vmf->pgoff >= ubuf->pagecount))
27 return VM_FAULT_SIGBUS;
28
29 vmf->page = ubuf->pages[vmf->pgoff]; 28 vmf->page = ubuf->pages[vmf->pgoff];
30 get_page(vmf->page); 29 get_page(vmf->page);
31 return 0; 30 return 0;
@@ -52,25 +51,24 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
52{ 51{
53 struct udmabuf *ubuf = at->dmabuf->priv; 52 struct udmabuf *ubuf = at->dmabuf->priv;
54 struct sg_table *sg; 53 struct sg_table *sg;
54 int ret;
55 55
56 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 56 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
57 if (!sg) 57 if (!sg)
58 goto err1; 58 return ERR_PTR(-ENOMEM);
59 if (sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount, 59 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
60 0, ubuf->pagecount << PAGE_SHIFT, 60 0, ubuf->pagecount << PAGE_SHIFT,
61 GFP_KERNEL) < 0) 61 GFP_KERNEL);
62 goto err2; 62 if (ret < 0)
63 goto err;
63 if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) 64 if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction))
64 goto err3; 65 goto err;
65
66 return sg; 66 return sg;
67 67
68err3: 68err:
69 sg_free_table(sg); 69 sg_free_table(sg);
70err2:
71 kfree(sg); 70 kfree(sg);
72err1: 71 return ERR_PTR(ret);
73 return ERR_PTR(-ENOMEM);
74} 72}
75 73
76static void unmap_udmabuf(struct dma_buf_attachment *at, 74static void unmap_udmabuf(struct dma_buf_attachment *at,
@@ -106,7 +104,7 @@ static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
106 kunmap(vaddr); 104 kunmap(vaddr);
107} 105}
108 106
109static struct dma_buf_ops udmabuf_ops = { 107static const struct dma_buf_ops udmabuf_ops = {
110 .map_dma_buf = map_udmabuf, 108 .map_dma_buf = map_udmabuf,
111 .unmap_dma_buf = unmap_udmabuf, 109 .unmap_dma_buf = unmap_udmabuf,
112 .release = release_udmabuf, 110 .release = release_udmabuf,
@@ -118,48 +116,54 @@ static struct dma_buf_ops udmabuf_ops = {
118#define SEALS_WANTED (F_SEAL_SHRINK) 116#define SEALS_WANTED (F_SEAL_SHRINK)
119#define SEALS_DENIED (F_SEAL_WRITE) 117#define SEALS_DENIED (F_SEAL_WRITE)
120 118
121static long udmabuf_create(struct udmabuf_create_list *head, 119static long udmabuf_create(const struct udmabuf_create_list *head,
122 struct udmabuf_create_item *list) 120 const struct udmabuf_create_item *list)
123{ 121{
124 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 122 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
125 struct file *memfd = NULL; 123 struct file *memfd = NULL;
126 struct udmabuf *ubuf; 124 struct udmabuf *ubuf;
127 struct dma_buf *buf; 125 struct dma_buf *buf;
128 pgoff_t pgoff, pgcnt, pgidx, pgbuf; 126 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
129 struct page *page; 127 struct page *page;
130 int seals, ret = -EINVAL; 128 int seals, ret = -EINVAL;
131 u32 i, flags; 129 u32 i, flags;
132 130
133 ubuf = kzalloc(sizeof(struct udmabuf), GFP_KERNEL); 131 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
134 if (!ubuf) 132 if (!ubuf)
135 return -ENOMEM; 133 return -ENOMEM;
136 134
135 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
137 for (i = 0; i < head->count; i++) { 136 for (i = 0; i < head->count; i++) {
138 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) 137 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
139 goto err_free_ubuf; 138 goto err;
140 if (!IS_ALIGNED(list[i].size, PAGE_SIZE)) 139 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
141 goto err_free_ubuf; 140 goto err;
142 ubuf->pagecount += list[i].size >> PAGE_SHIFT; 141 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
142 if (ubuf->pagecount > pglimit)
143 goto err;
143 } 144 }
144 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(struct page *), 145 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
145 GFP_KERNEL); 146 GFP_KERNEL);
146 if (!ubuf->pages) { 147 if (!ubuf->pages) {
147 ret = -ENOMEM; 148 ret = -ENOMEM;
148 goto err_free_ubuf; 149 goto err;
149 } 150 }
150 151
151 pgbuf = 0; 152 pgbuf = 0;
152 for (i = 0; i < head->count; i++) { 153 for (i = 0; i < head->count; i++) {
154 ret = -EBADFD;
153 memfd = fget(list[i].memfd); 155 memfd = fget(list[i].memfd);
154 if (!memfd) 156 if (!memfd)
155 goto err_put_pages; 157 goto err;
156 if (!shmem_mapping(file_inode(memfd)->i_mapping)) 158 if (!shmem_mapping(file_inode(memfd)->i_mapping))
157 goto err_put_pages; 159 goto err;
158 seals = memfd_fcntl(memfd, F_GET_SEALS, 0); 160 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
159 if (seals == -EINVAL || 161 if (seals == -EINVAL)
160 (seals & SEALS_WANTED) != SEALS_WANTED || 162 goto err;
163 ret = -EINVAL;
164 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
161 (seals & SEALS_DENIED) != 0) 165 (seals & SEALS_DENIED) != 0)
162 goto err_put_pages; 166 goto err;
163 pgoff = list[i].offset >> PAGE_SHIFT; 167 pgoff = list[i].offset >> PAGE_SHIFT;
164 pgcnt = list[i].size >> PAGE_SHIFT; 168 pgcnt = list[i].size >> PAGE_SHIFT;
165 for (pgidx = 0; pgidx < pgcnt; pgidx++) { 169 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
@@ -167,13 +171,13 @@ static long udmabuf_create(struct udmabuf_create_list *head,
167 file_inode(memfd)->i_mapping, pgoff + pgidx); 171 file_inode(memfd)->i_mapping, pgoff + pgidx);
168 if (IS_ERR(page)) { 172 if (IS_ERR(page)) {
169 ret = PTR_ERR(page); 173 ret = PTR_ERR(page);
170 goto err_put_pages; 174 goto err;
171 } 175 }
172 ubuf->pages[pgbuf++] = page; 176 ubuf->pages[pgbuf++] = page;
173 } 177 }
174 fput(memfd); 178 fput(memfd);
179 memfd = NULL;
175 } 180 }
176 memfd = NULL;
177 181
178 exp_info.ops = &udmabuf_ops; 182 exp_info.ops = &udmabuf_ops;
179 exp_info.size = ubuf->pagecount << PAGE_SHIFT; 183 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
@@ -182,7 +186,7 @@ static long udmabuf_create(struct udmabuf_create_list *head,
182 buf = dma_buf_export(&exp_info); 186 buf = dma_buf_export(&exp_info);
183 if (IS_ERR(buf)) { 187 if (IS_ERR(buf)) {
184 ret = PTR_ERR(buf); 188 ret = PTR_ERR(buf);
185 goto err_put_pages; 189 goto err;
186 } 190 }
187 191
188 flags = 0; 192 flags = 0;
@@ -190,10 +194,9 @@ static long udmabuf_create(struct udmabuf_create_list *head,
190 flags |= O_CLOEXEC; 194 flags |= O_CLOEXEC;
191 return dma_buf_fd(buf, flags); 195 return dma_buf_fd(buf, flags);
192 196
193err_put_pages: 197err:
194 while (pgbuf > 0) 198 while (pgbuf > 0)
195 put_page(ubuf->pages[--pgbuf]); 199 put_page(ubuf->pages[--pgbuf]);
196err_free_ubuf:
197 if (memfd) 200 if (memfd)
198 fput(memfd); 201 fput(memfd);
199 kfree(ubuf->pages); 202 kfree(ubuf->pages);
@@ -208,7 +211,7 @@ static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
208 struct udmabuf_create_item list; 211 struct udmabuf_create_item list;
209 212
210 if (copy_from_user(&create, (void __user *)arg, 213 if (copy_from_user(&create, (void __user *)arg,
211 sizeof(struct udmabuf_create))) 214 sizeof(create)))
212 return -EFAULT; 215 return -EFAULT;
213 216
214 head.flags = create.flags; 217 head.flags = create.flags;
@@ -229,7 +232,7 @@ static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
229 232
230 if (copy_from_user(&head, (void __user *)arg, sizeof(head))) 233 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
231 return -EFAULT; 234 return -EFAULT;
232 if (head.count > 1024) 235 if (head.count > list_limit)
233 return -EINVAL; 236 return -EINVAL;
234 lsize = sizeof(struct udmabuf_create_item) * head.count; 237 lsize = sizeof(struct udmabuf_create_item) * head.count;
235 list = memdup_user((void __user *)(arg + sizeof(head)), lsize); 238 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
@@ -254,7 +257,7 @@ static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
254 ret = udmabuf_ioctl_create_list(filp, arg); 257 ret = udmabuf_ioctl_create_list(filp, arg);
255 break; 258 break;
256 default: 259 default:
257 ret = -EINVAL; 260 ret = -ENOTTY;
258 break; 261 break;
259 } 262 }
260 return ret; 263 return ret;
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a6771cef85e2..bc6a16a3c36e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
18 drm_encoder.o drm_mode_object.o drm_property.o \ 18 drm_encoder.o drm_mode_object.o drm_property.o \
19 drm_plane.o drm_color_mgmt.o drm_print.o \ 19 drm_plane.o drm_color_mgmt.o drm_print.o \
20 drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ 20 drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
21 drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o 21 drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
22 drm_atomic_uapi.o
22 23
23drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o 24drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
24drm-$(CONFIG_DRM_VM) += drm_vm.o 25drm-$(CONFIG_DRM_VM) += drm_vm.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b6e9df11115d..8f05e28607e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1098,7 +1098,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1098{ 1098{
1099 int r; 1099 int r;
1100 struct dma_fence *fence; 1100 struct dma_fence *fence;
1101 r = drm_syncobj_find_fence(p->filp, handle, &fence); 1101 r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
1102 if (r) 1102 if (r)
1103 return r; 1103 return r;
1104 1104
@@ -1187,7 +1187,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1187 int i; 1187 int i;
1188 1188
1189 for (i = 0; i < p->num_post_dep_syncobjs; ++i) 1189 for (i = 0; i < p->num_post_dep_syncobjs; ++i)
1190 drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); 1190 drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
1191} 1191}
1192 1192
1193static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1193static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index eb7dfb65ef47..8d770641fcc4 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <drm/drmP.h> 9#include <drm/drmP.h>
10#include <drm/drm_atomic.h> 10#include <drm/drm_atomic.h>
11#include <drm/drm_atomic_uapi.h>
11#include <drm/drm_atomic_helper.h> 12#include <drm/drm_atomic_helper.h>
12#include <drm/drm_plane_helper.h> 13#include <drm/drm_plane_helper.h>
13#include <drm/armada_drm.h> 14#include <drm/armada_drm.h>
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 375bf92cd04f..b4f6bb521900 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -51,11 +51,6 @@ enum bochs_types {
51 BOCHS_UNKNOWN, 51 BOCHS_UNKNOWN,
52}; 52};
53 53
54struct bochs_framebuffer {
55 struct drm_framebuffer base;
56 struct drm_gem_object *obj;
57};
58
59struct bochs_device { 54struct bochs_device {
60 /* hw */ 55 /* hw */
61 void __iomem *mmio; 56 void __iomem *mmio;
@@ -88,15 +83,11 @@ struct bochs_device {
88 83
89 /* fbdev */ 84 /* fbdev */
90 struct { 85 struct {
91 struct bochs_framebuffer gfb; 86 struct drm_framebuffer *fb;
92 struct drm_fb_helper helper; 87 struct drm_fb_helper helper;
93 int size;
94 bool initialized;
95 } fb; 88 } fb;
96}; 89};
97 90
98#define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base)
99
100struct bochs_bo { 91struct bochs_bo {
101 struct ttm_buffer_object bo; 92 struct ttm_buffer_object bo;
102 struct ttm_placement placement; 93 struct ttm_placement placement;
@@ -126,7 +117,7 @@ static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
126/* ---------------------------------------------------------------------- */ 117/* ---------------------------------------------------------------------- */
127 118
128/* bochs_hw.c */ 119/* bochs_hw.c */
129int bochs_hw_init(struct drm_device *dev, uint32_t flags); 120int bochs_hw_init(struct drm_device *dev);
130void bochs_hw_fini(struct drm_device *dev); 121void bochs_hw_fini(struct drm_device *dev);
131 122
132void bochs_hw_setmode(struct bochs_device *bochs, 123void bochs_hw_setmode(struct bochs_device *bochs,
@@ -148,15 +139,9 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
148int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, 139int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
149 uint32_t handle, uint64_t *offset); 140 uint32_t handle, uint64_t *offset);
150 141
151int bochs_framebuffer_init(struct drm_device *dev,
152 struct bochs_framebuffer *gfb,
153 const struct drm_mode_fb_cmd2 *mode_cmd,
154 struct drm_gem_object *obj);
155int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr); 142int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
156int bochs_bo_unpin(struct bochs_bo *bo); 143int bochs_bo_unpin(struct bochs_bo *bo);
157 144
158extern const struct drm_mode_config_funcs bochs_mode_funcs;
159
160/* bochs_kms.c */ 145/* bochs_kms.c */
161int bochs_kms_init(struct bochs_device *bochs); 146int bochs_kms_init(struct bochs_device *bochs);
162void bochs_kms_fini(struct bochs_device *bochs); 147void bochs_kms_fini(struct bochs_device *bochs);
@@ -164,3 +149,5 @@ void bochs_kms_fini(struct bochs_device *bochs);
164/* bochs_fbdev.c */ 149/* bochs_fbdev.c */
165int bochs_fbdev_init(struct bochs_device *bochs); 150int bochs_fbdev_init(struct bochs_device *bochs);
166void bochs_fbdev_fini(struct bochs_device *bochs); 151void bochs_fbdev_fini(struct bochs_device *bochs);
152
153extern const struct drm_mode_config_funcs bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index c61b40c72b62..f3dd66ae990a 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -35,7 +35,7 @@ static void bochs_unload(struct drm_device *dev)
35 dev->dev_private = NULL; 35 dev->dev_private = NULL;
36} 36}
37 37
38static int bochs_load(struct drm_device *dev, unsigned long flags) 38static int bochs_load(struct drm_device *dev)
39{ 39{
40 struct bochs_device *bochs; 40 struct bochs_device *bochs;
41 int ret; 41 int ret;
@@ -46,7 +46,7 @@ static int bochs_load(struct drm_device *dev, unsigned long flags)
46 dev->dev_private = bochs; 46 dev->dev_private = bochs;
47 bochs->dev = dev; 47 bochs->dev = dev;
48 48
49 ret = bochs_hw_init(dev, flags); 49 ret = bochs_hw_init(dev);
50 if (ret) 50 if (ret)
51 goto err; 51 goto err;
52 52
@@ -82,8 +82,6 @@ static const struct file_operations bochs_fops = {
82 82
83static struct drm_driver bochs_driver = { 83static struct drm_driver bochs_driver = {
84 .driver_features = DRIVER_GEM | DRIVER_MODESET, 84 .driver_features = DRIVER_GEM | DRIVER_MODESET,
85 .load = bochs_load,
86 .unload = bochs_unload,
87 .fops = &bochs_fops, 85 .fops = &bochs_fops,
88 .name = "bochs-drm", 86 .name = "bochs-drm",
89 .desc = "bochs dispi vga interface (qemu stdvga)", 87 .desc = "bochs dispi vga interface (qemu stdvga)",
@@ -107,11 +105,7 @@ static int bochs_pm_suspend(struct device *dev)
107 105
108 drm_kms_helper_poll_disable(drm_dev); 106 drm_kms_helper_poll_disable(drm_dev);
109 107
110 if (bochs->fb.initialized) { 108 drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 1);
111 console_lock();
112 drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
113 console_unlock();
114 }
115 109
116 return 0; 110 return 0;
117} 111}
@@ -124,11 +118,7 @@ static int bochs_pm_resume(struct device *dev)
124 118
125 drm_helper_resume_force_mode(drm_dev); 119 drm_helper_resume_force_mode(drm_dev);
126 120
127 if (bochs->fb.initialized) { 121 drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 0);
128 console_lock();
129 drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
130 console_unlock();
131 }
132 122
133 drm_kms_helper_poll_enable(drm_dev); 123 drm_kms_helper_poll_enable(drm_dev);
134 return 0; 124 return 0;
@@ -146,6 +136,7 @@ static const struct dev_pm_ops bochs_pm_ops = {
146static int bochs_pci_probe(struct pci_dev *pdev, 136static int bochs_pci_probe(struct pci_dev *pdev,
147 const struct pci_device_id *ent) 137 const struct pci_device_id *ent)
148{ 138{
139 struct drm_device *dev;
149 unsigned long fbsize; 140 unsigned long fbsize;
150 int ret; 141 int ret;
151 142
@@ -159,14 +150,37 @@ static int bochs_pci_probe(struct pci_dev *pdev,
159 if (ret) 150 if (ret)
160 return ret; 151 return ret;
161 152
162 return drm_get_pci_dev(pdev, ent, &bochs_driver); 153 dev = drm_dev_alloc(&bochs_driver, &pdev->dev);
154 if (IS_ERR(dev))
155 return PTR_ERR(dev);
156
157 dev->pdev = pdev;
158 pci_set_drvdata(pdev, dev);
159
160 ret = bochs_load(dev);
161 if (ret)
162 goto err_free_dev;
163
164 ret = drm_dev_register(dev, 0);
165 if (ret)
166 goto err_unload;
167
168 return ret;
169
170err_unload:
171 bochs_unload(dev);
172err_free_dev:
173 drm_dev_put(dev);
174 return ret;
163} 175}
164 176
165static void bochs_pci_remove(struct pci_dev *pdev) 177static void bochs_pci_remove(struct pci_dev *pdev)
166{ 178{
167 struct drm_device *dev = pci_get_drvdata(pdev); 179 struct drm_device *dev = pci_get_drvdata(pdev);
168 180
169 drm_put_dev(dev); 181 drm_dev_unregister(dev);
182 bochs_unload(dev);
183 drm_dev_put(dev);
170} 184}
171 185
172static const struct pci_device_id bochs_pci_tbl[] = { 186static const struct pci_device_id bochs_pci_tbl[] = {
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 14eb8d0d5a00..8f4d6c052f7b 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "bochs.h" 8#include "bochs.h"
9#include <drm/drm_gem_framebuffer_helper.h>
9 10
10/* ---------------------------------------------------------------------- */ 11/* ---------------------------------------------------------------------- */
11 12
@@ -13,9 +14,7 @@ static int bochsfb_mmap(struct fb_info *info,
13 struct vm_area_struct *vma) 14 struct vm_area_struct *vma)
14{ 15{
15 struct drm_fb_helper *fb_helper = info->par; 16 struct drm_fb_helper *fb_helper = info->par;
16 struct bochs_device *bochs = 17 struct bochs_bo *bo = gem_to_bochs_bo(fb_helper->fb->obj[0]);
17 container_of(fb_helper, struct bochs_device, fb.helper);
18 struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj);
19 18
20 return ttm_fbdev_mmap(vma, &bo->bo); 19 return ttm_fbdev_mmap(vma, &bo->bo);
21} 20}
@@ -101,19 +100,20 @@ static int bochsfb_create(struct drm_fb_helper *helper,
101 100
102 /* init fb device */ 101 /* init fb device */
103 info = drm_fb_helper_alloc_fbi(helper); 102 info = drm_fb_helper_alloc_fbi(helper);
104 if (IS_ERR(info)) 103 if (IS_ERR(info)) {
104 DRM_ERROR("Failed to allocate fbi: %ld\n", PTR_ERR(info));
105 return PTR_ERR(info); 105 return PTR_ERR(info);
106 }
106 107
107 info->par = &bochs->fb.helper; 108 info->par = &bochs->fb.helper;
108 109
109 ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); 110 fb = drm_gem_fbdev_fb_create(bochs->dev, sizes, 0, gobj, NULL);
110 if (ret) 111 if (IS_ERR(fb)) {
111 return ret; 112 DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb));
112 113 return PTR_ERR(fb);
113 bochs->fb.size = size; 114 }
114 115
115 /* setup helper */ 116 /* setup helper */
116 fb = &bochs->fb.gfb.base;
117 bochs->fb.helper.fb = fb; 117 bochs->fb.helper.fb = fb;
118 118
119 strcpy(info->fix.id, "bochsdrmfb"); 119 strcpy(info->fix.id, "bochsdrmfb");
@@ -130,27 +130,6 @@ static int bochsfb_create(struct drm_fb_helper *helper,
130 drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); 130 drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
131 info->fix.smem_start = 0; 131 info->fix.smem_start = 0;
132 info->fix.smem_len = size; 132 info->fix.smem_len = size;
133
134 bochs->fb.initialized = true;
135 return 0;
136}
137
138static int bochs_fbdev_destroy(struct bochs_device *bochs)
139{
140 struct bochs_framebuffer *gfb = &bochs->fb.gfb;
141
142 DRM_DEBUG_DRIVER("\n");
143
144 drm_fb_helper_unregister_fbi(&bochs->fb.helper);
145
146 if (gfb->obj) {
147 drm_gem_object_unreference_unlocked(gfb->obj);
148 gfb->obj = NULL;
149 }
150
151 drm_framebuffer_unregister_private(&gfb->base);
152 drm_framebuffer_cleanup(&gfb->base);
153
154 return 0; 133 return 0;
155} 134}
156 135
@@ -158,41 +137,17 @@ static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
158 .fb_probe = bochsfb_create, 137 .fb_probe = bochsfb_create,
159}; 138};
160 139
140const struct drm_mode_config_funcs bochs_mode_funcs = {
141 .fb_create = drm_gem_fb_create,
142};
143
161int bochs_fbdev_init(struct bochs_device *bochs) 144int bochs_fbdev_init(struct bochs_device *bochs)
162{ 145{
163 int ret; 146 return drm_fb_helper_fbdev_setup(bochs->dev, &bochs->fb.helper,
164 147 &bochs_fb_helper_funcs, 32, 1);
165 drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
166 &bochs_fb_helper_funcs);
167
168 ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1);
169 if (ret)
170 return ret;
171
172 ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
173 if (ret)
174 goto fini;
175
176 drm_helper_disable_unused_functions(bochs->dev);
177
178 ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32);
179 if (ret)
180 goto fini;
181
182 return 0;
183
184fini:
185 drm_fb_helper_fini(&bochs->fb.helper);
186 return ret;
187} 148}
188 149
189void bochs_fbdev_fini(struct bochs_device *bochs) 150void bochs_fbdev_fini(struct bochs_device *bochs)
190{ 151{
191 if (bochs->fb.initialized) 152 drm_fb_helper_fbdev_teardown(bochs->dev);
192 bochs_fbdev_destroy(bochs);
193
194 if (bochs->fb.helper.fbdev)
195 drm_fb_helper_fini(&bochs->fb.helper);
196
197 bochs->fb.initialized = false;
198} 153}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index a39b0343c197..16e4f1caccca 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -47,7 +47,7 @@ static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
47 } 47 }
48} 48}
49 49
50int bochs_hw_init(struct drm_device *dev, uint32_t flags) 50int bochs_hw_init(struct drm_device *dev)
51{ 51{
52 struct bochs_device *bochs = dev->dev_private; 52 struct bochs_device *bochs = dev->dev_private;
53 struct pci_dev *pdev = dev->pdev; 53 struct pci_dev *pdev = dev->pdev;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index ca5a9afdd5cf..ea9a43d31bf1 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -35,14 +35,12 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
35{ 35{
36 struct bochs_device *bochs = 36 struct bochs_device *bochs =
37 container_of(crtc, struct bochs_device, crtc); 37 container_of(crtc, struct bochs_device, crtc);
38 struct bochs_framebuffer *bochs_fb;
39 struct bochs_bo *bo; 38 struct bochs_bo *bo;
40 u64 gpu_addr = 0; 39 u64 gpu_addr = 0;
41 int ret; 40 int ret;
42 41
43 if (old_fb) { 42 if (old_fb) {
44 bochs_fb = to_bochs_framebuffer(old_fb); 43 bo = gem_to_bochs_bo(old_fb->obj[0]);
45 bo = gem_to_bochs_bo(bochs_fb->obj);
46 ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 44 ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
47 if (ret) { 45 if (ret) {
48 DRM_ERROR("failed to reserve old_fb bo\n"); 46 DRM_ERROR("failed to reserve old_fb bo\n");
@@ -55,8 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
55 if (WARN_ON(crtc->primary->fb == NULL)) 53 if (WARN_ON(crtc->primary->fb == NULL))
56 return -EINVAL; 54 return -EINVAL;
57 55
58 bochs_fb = to_bochs_framebuffer(crtc->primary->fb); 56 bo = gem_to_bochs_bo(crtc->primary->fb->obj[0]);
59 bo = gem_to_bochs_bo(bochs_fb->obj);
60 ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 57 ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
61 if (ret) 58 if (ret)
62 return ret; 59 return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index c9c7097030ca..a61c1ecb2bdc 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -457,77 +457,3 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
457 drm_gem_object_unreference_unlocked(obj); 457 drm_gem_object_unreference_unlocked(obj);
458 return 0; 458 return 0;
459} 459}
460
461/* ---------------------------------------------------------------------- */
462
463static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
464{
465 struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
466
467 drm_gem_object_unreference_unlocked(bochs_fb->obj);
468 drm_framebuffer_cleanup(fb);
469 kfree(fb);
470}
471
472static const struct drm_framebuffer_funcs bochs_fb_funcs = {
473 .destroy = bochs_user_framebuffer_destroy,
474};
475
476int bochs_framebuffer_init(struct drm_device *dev,
477 struct bochs_framebuffer *gfb,
478 const struct drm_mode_fb_cmd2 *mode_cmd,
479 struct drm_gem_object *obj)
480{
481 int ret;
482
483 drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
484 gfb->obj = obj;
485 ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
486 if (ret) {
487 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
488 return ret;
489 }
490 return 0;
491}
492
493static struct drm_framebuffer *
494bochs_user_framebuffer_create(struct drm_device *dev,
495 struct drm_file *filp,
496 const struct drm_mode_fb_cmd2 *mode_cmd)
497{
498 struct drm_gem_object *obj;
499 struct bochs_framebuffer *bochs_fb;
500 int ret;
501
502 DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
503 mode_cmd->width, mode_cmd->height,
504 (mode_cmd->pixel_format) & 0xff,
505 (mode_cmd->pixel_format >> 8) & 0xff,
506 (mode_cmd->pixel_format >> 16) & 0xff,
507 (mode_cmd->pixel_format >> 24) & 0xff);
508
509 if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
510 return ERR_PTR(-ENOENT);
511
512 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
513 if (obj == NULL)
514 return ERR_PTR(-ENOENT);
515
516 bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL);
517 if (!bochs_fb) {
518 drm_gem_object_unreference_unlocked(obj);
519 return ERR_PTR(-ENOMEM);
520 }
521
522 ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj);
523 if (ret) {
524 drm_gem_object_unreference_unlocked(obj);
525 kfree(bochs_fb);
526 return ERR_PTR(ret);
527 }
528 return &bochs_fb->base;
529}
530
531const struct drm_mode_config_funcs bochs_mode_funcs = {
532 .fb_create = bochs_user_framebuffer_create,
533};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index d68986cea132..2f21d3b6850b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -554,7 +554,7 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
554 if (retval < 0) 554 if (retval < 0)
555 return retval; 555 return retval;
556 556
557 dev_info(dp->dev, "Link Training Clock Recovery success\n"); 557 dev_dbg(dp->dev, "Link Training Clock Recovery success\n");
558 dp->link_train.lt_state = EQUALIZER_TRAINING; 558 dp->link_train.lt_state = EQUALIZER_TRAINING;
559 } else { 559 } else {
560 for (lane = 0; lane < lane_count; lane++) { 560 for (lane = 0; lane < lane_count; lane++) {
@@ -634,7 +634,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
634 if (retval < 0) 634 if (retval < 0)
635 return retval; 635 return retval;
636 636
637 dev_info(dp->dev, "Link Training success!\n"); 637 dev_dbg(dp->dev, "Link Training success!\n");
638 analogix_dp_get_link_bandwidth(dp, &reg); 638 analogix_dp_get_link_bandwidth(dp, &reg);
639 dp->link_train.link_rate = reg; 639 dp->link_train.link_rate = reg;
640 dev_dbg(dp->dev, "final bandwidth = %.2x\n", 640 dev_dbg(dp->dev, "final bandwidth = %.2x\n",
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index d0478abc01bd..7ada75919756 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -28,6 +28,7 @@
28 28
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/drm_atomic.h> 30#include <drm/drm_atomic.h>
31#include <drm/drm_atomic_uapi.h>
31#include <drm/drm_mode.h> 32#include <drm/drm_mode.h>
32#include <drm/drm_print.h> 33#include <drm/drm_print.h>
33#include <drm/drm_writeback.h> 34#include <drm/drm_writeback.h>
@@ -309,350 +310,6 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
309} 310}
310EXPORT_SYMBOL(drm_atomic_get_crtc_state); 311EXPORT_SYMBOL(drm_atomic_get_crtc_state);
311 312
312static void set_out_fence_for_crtc(struct drm_atomic_state *state,
313 struct drm_crtc *crtc, s32 __user *fence_ptr)
314{
315 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
316}
317
318static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
319 struct drm_crtc *crtc)
320{
321 s32 __user *fence_ptr;
322
323 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
324 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
325
326 return fence_ptr;
327}
328
329static int set_out_fence_for_connector(struct drm_atomic_state *state,
330 struct drm_connector *connector,
331 s32 __user *fence_ptr)
332{
333 unsigned int index = drm_connector_index(connector);
334
335 if (!fence_ptr)
336 return 0;
337
338 if (put_user(-1, fence_ptr))
339 return -EFAULT;
340
341 state->connectors[index].out_fence_ptr = fence_ptr;
342
343 return 0;
344}
345
346static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
347 struct drm_connector *connector)
348{
349 unsigned int index = drm_connector_index(connector);
350 s32 __user *fence_ptr;
351
352 fence_ptr = state->connectors[index].out_fence_ptr;
353 state->connectors[index].out_fence_ptr = NULL;
354
355 return fence_ptr;
356}
357
358/**
359 * drm_atomic_set_mode_for_crtc - set mode for CRTC
360 * @state: the CRTC whose incoming state to update
361 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
362 *
363 * Set a mode (originating from the kernel) on the desired CRTC state and update
364 * the enable property.
365 *
366 * RETURNS:
367 * Zero on success, error code on failure. Cannot return -EDEADLK.
368 */
369int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
370 const struct drm_display_mode *mode)
371{
372 struct drm_crtc *crtc = state->crtc;
373 struct drm_mode_modeinfo umode;
374
375 /* Early return for no change. */
376 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
377 return 0;
378
379 drm_property_blob_put(state->mode_blob);
380 state->mode_blob = NULL;
381
382 if (mode) {
383 drm_mode_convert_to_umode(&umode, mode);
384 state->mode_blob =
385 drm_property_create_blob(state->crtc->dev,
386 sizeof(umode),
387 &umode);
388 if (IS_ERR(state->mode_blob))
389 return PTR_ERR(state->mode_blob);
390
391 drm_mode_copy(&state->mode, mode);
392 state->enable = true;
393 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
394 mode->name, crtc->base.id, crtc->name, state);
395 } else {
396 memset(&state->mode, 0, sizeof(state->mode));
397 state->enable = false;
398 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
399 crtc->base.id, crtc->name, state);
400 }
401
402 return 0;
403}
404EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
405
406/**
407 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
408 * @state: the CRTC whose incoming state to update
409 * @blob: pointer to blob property to use for mode
410 *
411 * Set a mode (originating from a blob property) on the desired CRTC state.
412 * This function will take a reference on the blob property for the CRTC state,
413 * and release the reference held on the state's existing mode property, if any
414 * was set.
415 *
416 * RETURNS:
417 * Zero on success, error code on failure. Cannot return -EDEADLK.
418 */
419int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
420 struct drm_property_blob *blob)
421{
422 struct drm_crtc *crtc = state->crtc;
423
424 if (blob == state->mode_blob)
425 return 0;
426
427 drm_property_blob_put(state->mode_blob);
428 state->mode_blob = NULL;
429
430 memset(&state->mode, 0, sizeof(state->mode));
431
432 if (blob) {
433 int ret;
434
435 if (blob->length != sizeof(struct drm_mode_modeinfo)) {
436 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
437 crtc->base.id, crtc->name,
438 blob->length);
439 return -EINVAL;
440 }
441
442 ret = drm_mode_convert_umode(crtc->dev,
443 &state->mode, blob->data);
444 if (ret) {
445 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
446 crtc->base.id, crtc->name,
447 ret, drm_get_mode_status_name(state->mode.status));
448 drm_mode_debug_printmodeline(&state->mode);
449 return -EINVAL;
450 }
451
452 state->mode_blob = drm_property_blob_get(blob);
453 state->enable = true;
454 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
455 state->mode.name, crtc->base.id, crtc->name,
456 state);
457 } else {
458 state->enable = false;
459 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
460 crtc->base.id, crtc->name, state);
461 }
462
463 return 0;
464}
465EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
466
467/**
468 * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it
469 * @dev: DRM device
470 * @blob: a pointer to the member blob to be replaced
471 * @blob_id: ID of the new blob
472 * @expected_size: total expected size of the blob data (in bytes)
473 * @expected_elem_size: expected element size of the blob data (in bytes)
474 * @replaced: did the blob get replaced?
475 *
476 * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero
477 * @blob becomes NULL.
478 *
479 * If @expected_size is positive the new blob length is expected to be equal
480 * to @expected_size bytes. If @expected_elem_size is positive the new blob
481 * length is expected to be a multiple of @expected_elem_size bytes. Otherwise
482 * an error is returned.
483 *
484 * @replaced will indicate to the caller whether the blob was replaced or not.
485 * If the old and new blobs were in fact the same blob @replaced will be false
486 * otherwise it will be true.
487 *
488 * RETURNS:
489 * Zero on success, error code on failure.
490 */
491static int
492drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
493 struct drm_property_blob **blob,
494 uint64_t blob_id,
495 ssize_t expected_size,
496 ssize_t expected_elem_size,
497 bool *replaced)
498{
499 struct drm_property_blob *new_blob = NULL;
500
501 if (blob_id != 0) {
502 new_blob = drm_property_lookup_blob(dev, blob_id);
503 if (new_blob == NULL)
504 return -EINVAL;
505
506 if (expected_size > 0 &&
507 new_blob->length != expected_size) {
508 drm_property_blob_put(new_blob);
509 return -EINVAL;
510 }
511 if (expected_elem_size > 0 &&
512 new_blob->length % expected_elem_size != 0) {
513 drm_property_blob_put(new_blob);
514 return -EINVAL;
515 }
516 }
517
518 *replaced |= drm_property_replace_blob(blob, new_blob);
519 drm_property_blob_put(new_blob);
520
521 return 0;
522}
523
524/**
525 * drm_atomic_crtc_set_property - set property on CRTC
526 * @crtc: the drm CRTC to set a property on
527 * @state: the state object to update with the new property value
528 * @property: the property to set
529 * @val: the new property value
530 *
531 * This function handles generic/core properties and calls out to driver's
532 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
533 * consistent behavior you must call this function rather than the driver hook
534 * directly.
535 *
536 * RETURNS:
537 * Zero on success, error code on failure
538 */
539int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
540 struct drm_crtc_state *state, struct drm_property *property,
541 uint64_t val)
542{
543 struct drm_device *dev = crtc->dev;
544 struct drm_mode_config *config = &dev->mode_config;
545 bool replaced = false;
546 int ret;
547
548 if (property == config->prop_active)
549 state->active = val;
550 else if (property == config->prop_mode_id) {
551 struct drm_property_blob *mode =
552 drm_property_lookup_blob(dev, val);
553 ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
554 drm_property_blob_put(mode);
555 return ret;
556 } else if (property == config->degamma_lut_property) {
557 ret = drm_atomic_replace_property_blob_from_id(dev,
558 &state->degamma_lut,
559 val,
560 -1, sizeof(struct drm_color_lut),
561 &replaced);
562 state->color_mgmt_changed |= replaced;
563 return ret;
564 } else if (property == config->ctm_property) {
565 ret = drm_atomic_replace_property_blob_from_id(dev,
566 &state->ctm,
567 val,
568 sizeof(struct drm_color_ctm), -1,
569 &replaced);
570 state->color_mgmt_changed |= replaced;
571 return ret;
572 } else if (property == config->gamma_lut_property) {
573 ret = drm_atomic_replace_property_blob_from_id(dev,
574 &state->gamma_lut,
575 val,
576 -1, sizeof(struct drm_color_lut),
577 &replaced);
578 state->color_mgmt_changed |= replaced;
579 return ret;
580 } else if (property == config->prop_out_fence_ptr) {
581 s32 __user *fence_ptr = u64_to_user_ptr(val);
582
583 if (!fence_ptr)
584 return 0;
585
586 if (put_user(-1, fence_ptr))
587 return -EFAULT;
588
589 set_out_fence_for_crtc(state->state, crtc, fence_ptr);
590 } else if (crtc->funcs->atomic_set_property) {
591 return crtc->funcs->atomic_set_property(crtc, state, property, val);
592 } else {
593 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
594 crtc->base.id, crtc->name,
595 property->base.id, property->name);
596 return -EINVAL;
597 }
598
599 return 0;
600}
601EXPORT_SYMBOL(drm_atomic_crtc_set_property);
602
603/**
604 * drm_atomic_crtc_get_property - get property value from CRTC state
605 * @crtc: the drm CRTC to set a property on
606 * @state: the state object to get the property value from
607 * @property: the property to set
608 * @val: return location for the property value
609 *
610 * This function handles generic/core properties and calls out to driver's
611 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
612 * consistent behavior you must call this function rather than the driver hook
613 * directly.
614 *
615 * RETURNS:
616 * Zero on success, error code on failure
617 */
618static int
619drm_atomic_crtc_get_property(struct drm_crtc *crtc,
620 const struct drm_crtc_state *state,
621 struct drm_property *property, uint64_t *val)
622{
623 struct drm_device *dev = crtc->dev;
624 struct drm_mode_config *config = &dev->mode_config;
625
626 if (property == config->prop_active)
627 *val = state->active;
628 else if (property == config->prop_mode_id)
629 *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
630 else if (property == config->degamma_lut_property)
631 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
632 else if (property == config->ctm_property)
633 *val = (state->ctm) ? state->ctm->base.id : 0;
634 else if (property == config->gamma_lut_property)
635 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
636 else if (property == config->prop_out_fence_ptr)
637 *val = 0;
638 else if (crtc->funcs->atomic_get_property)
639 return crtc->funcs->atomic_get_property(crtc, state, property, val);
640 else
641 return -EINVAL;
642
643 return 0;
644}
645
646/**
647 * drm_atomic_crtc_check - check crtc state
648 * @crtc: crtc to check
649 * @state: crtc state to check
650 *
651 * Provides core sanity checks for crtc state.
652 *
653 * RETURNS:
654 * Zero on success, error code on failure
655 */
656static int drm_atomic_crtc_check(struct drm_crtc *crtc, 313static int drm_atomic_crtc_check(struct drm_crtc *crtc,
657 struct drm_crtc_state *state) 314 struct drm_crtc_state *state)
658{ 315{
@@ -728,16 +385,6 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
728 crtc->funcs->atomic_print_state(p, state); 385 crtc->funcs->atomic_print_state(p, state);
729} 386}
730 387
731/**
732 * drm_atomic_connector_check - check connector state
733 * @connector: connector to check
734 * @state: connector state to check
735 *
736 * Provides core sanity checks for connector state.
737 *
738 * RETURNS:
739 * Zero on success, error code on failure
740 */
741static int drm_atomic_connector_check(struct drm_connector *connector, 388static int drm_atomic_connector_check(struct drm_connector *connector,
742 struct drm_connector_state *state) 389 struct drm_connector_state *state)
743{ 390{
@@ -836,159 +483,6 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
836} 483}
837EXPORT_SYMBOL(drm_atomic_get_plane_state); 484EXPORT_SYMBOL(drm_atomic_get_plane_state);
838 485
839/**
840 * drm_atomic_plane_set_property - set property on plane
841 * @plane: the drm plane to set a property on
842 * @state: the state object to update with the new property value
843 * @property: the property to set
844 * @val: the new property value
845 *
846 * This function handles generic/core properties and calls out to driver's
847 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
848 * consistent behavior you must call this function rather than the driver hook
849 * directly.
850 *
851 * RETURNS:
852 * Zero on success, error code on failure
853 */
854static int drm_atomic_plane_set_property(struct drm_plane *plane,
855 struct drm_plane_state *state, struct drm_property *property,
856 uint64_t val)
857{
858 struct drm_device *dev = plane->dev;
859 struct drm_mode_config *config = &dev->mode_config;
860
861 if (property == config->prop_fb_id) {
862 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
863 drm_atomic_set_fb_for_plane(state, fb);
864 if (fb)
865 drm_framebuffer_put(fb);
866 } else if (property == config->prop_in_fence_fd) {
867 if (state->fence)
868 return -EINVAL;
869
870 if (U642I64(val) == -1)
871 return 0;
872
873 state->fence = sync_file_get_fence(val);
874 if (!state->fence)
875 return -EINVAL;
876
877 } else if (property == config->prop_crtc_id) {
878 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
879 return drm_atomic_set_crtc_for_plane(state, crtc);
880 } else if (property == config->prop_crtc_x) {
881 state->crtc_x = U642I64(val);
882 } else if (property == config->prop_crtc_y) {
883 state->crtc_y = U642I64(val);
884 } else if (property == config->prop_crtc_w) {
885 state->crtc_w = val;
886 } else if (property == config->prop_crtc_h) {
887 state->crtc_h = val;
888 } else if (property == config->prop_src_x) {
889 state->src_x = val;
890 } else if (property == config->prop_src_y) {
891 state->src_y = val;
892 } else if (property == config->prop_src_w) {
893 state->src_w = val;
894 } else if (property == config->prop_src_h) {
895 state->src_h = val;
896 } else if (property == plane->alpha_property) {
897 state->alpha = val;
898 } else if (property == plane->blend_mode_property) {
899 state->pixel_blend_mode = val;
900 } else if (property == plane->rotation_property) {
901 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
902 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
903 plane->base.id, plane->name, val);
904 return -EINVAL;
905 }
906 state->rotation = val;
907 } else if (property == plane->zpos_property) {
908 state->zpos = val;
909 } else if (property == plane->color_encoding_property) {
910 state->color_encoding = val;
911 } else if (property == plane->color_range_property) {
912 state->color_range = val;
913 } else if (plane->funcs->atomic_set_property) {
914 return plane->funcs->atomic_set_property(plane, state,
915 property, val);
916 } else {
917 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
918 plane->base.id, plane->name,
919 property->base.id, property->name);
920 return -EINVAL;
921 }
922
923 return 0;
924}
925
926/**
927 * drm_atomic_plane_get_property - get property value from plane state
928 * @plane: the drm plane to set a property on
929 * @state: the state object to get the property value from
930 * @property: the property to set
931 * @val: return location for the property value
932 *
933 * This function handles generic/core properties and calls out to driver's
934 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
935 * consistent behavior you must call this function rather than the driver hook
936 * directly.
937 *
938 * RETURNS:
939 * Zero on success, error code on failure
940 */
941static int
942drm_atomic_plane_get_property(struct drm_plane *plane,
943 const struct drm_plane_state *state,
944 struct drm_property *property, uint64_t *val)
945{
946 struct drm_device *dev = plane->dev;
947 struct drm_mode_config *config = &dev->mode_config;
948
949 if (property == config->prop_fb_id) {
950 *val = (state->fb) ? state->fb->base.id : 0;
951 } else if (property == config->prop_in_fence_fd) {
952 *val = -1;
953 } else if (property == config->prop_crtc_id) {
954 *val = (state->crtc) ? state->crtc->base.id : 0;
955 } else if (property == config->prop_crtc_x) {
956 *val = I642U64(state->crtc_x);
957 } else if (property == config->prop_crtc_y) {
958 *val = I642U64(state->crtc_y);
959 } else if (property == config->prop_crtc_w) {
960 *val = state->crtc_w;
961 } else if (property == config->prop_crtc_h) {
962 *val = state->crtc_h;
963 } else if (property == config->prop_src_x) {
964 *val = state->src_x;
965 } else if (property == config->prop_src_y) {
966 *val = state->src_y;
967 } else if (property == config->prop_src_w) {
968 *val = state->src_w;
969 } else if (property == config->prop_src_h) {
970 *val = state->src_h;
971 } else if (property == plane->alpha_property) {
972 *val = state->alpha;
973 } else if (property == plane->blend_mode_property) {
974 *val = state->pixel_blend_mode;
975 } else if (property == plane->rotation_property) {
976 *val = state->rotation;
977 } else if (property == plane->zpos_property) {
978 *val = state->zpos;
979 } else if (property == plane->color_encoding_property) {
980 *val = state->color_encoding;
981 } else if (property == plane->color_range_property) {
982 *val = state->color_range;
983 } else if (plane->funcs->atomic_get_property) {
984 return plane->funcs->atomic_get_property(plane, state, property, val);
985 } else {
986 return -EINVAL;
987 }
988
989 return 0;
990}
991
992static bool 486static bool
993plane_switching_crtc(struct drm_atomic_state *state, 487plane_switching_crtc(struct drm_atomic_state *state,
994 struct drm_plane *plane, 488 struct drm_plane *plane,
@@ -1328,111 +822,6 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
1328} 822}
1329EXPORT_SYMBOL(drm_atomic_get_connector_state); 823EXPORT_SYMBOL(drm_atomic_get_connector_state);
1330 824
1331/**
1332 * drm_atomic_connector_set_property - set property on connector.
1333 * @connector: the drm connector to set a property on
1334 * @state: the state object to update with the new property value
1335 * @property: the property to set
1336 * @val: the new property value
1337 *
1338 * This function handles generic/core properties and calls out to driver's
1339 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
1340 * consistent behavior you must call this function rather than the driver hook
1341 * directly.
1342 *
1343 * RETURNS:
1344 * Zero on success, error code on failure
1345 */
1346static int drm_atomic_connector_set_property(struct drm_connector *connector,
1347 struct drm_connector_state *state, struct drm_property *property,
1348 uint64_t val)
1349{
1350 struct drm_device *dev = connector->dev;
1351 struct drm_mode_config *config = &dev->mode_config;
1352
1353 if (property == config->prop_crtc_id) {
1354 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
1355 return drm_atomic_set_crtc_for_connector(state, crtc);
1356 } else if (property == config->dpms_property) {
1357 /* setting DPMS property requires special handling, which
1358 * is done in legacy setprop path for us. Disallow (for
1359 * now?) atomic writes to DPMS property:
1360 */
1361 return -EINVAL;
1362 } else if (property == config->tv_select_subconnector_property) {
1363 state->tv.subconnector = val;
1364 } else if (property == config->tv_left_margin_property) {
1365 state->tv.margins.left = val;
1366 } else if (property == config->tv_right_margin_property) {
1367 state->tv.margins.right = val;
1368 } else if (property == config->tv_top_margin_property) {
1369 state->tv.margins.top = val;
1370 } else if (property == config->tv_bottom_margin_property) {
1371 state->tv.margins.bottom = val;
1372 } else if (property == config->tv_mode_property) {
1373 state->tv.mode = val;
1374 } else if (property == config->tv_brightness_property) {
1375 state->tv.brightness = val;
1376 } else if (property == config->tv_contrast_property) {
1377 state->tv.contrast = val;
1378 } else if (property == config->tv_flicker_reduction_property) {
1379 state->tv.flicker_reduction = val;
1380 } else if (property == config->tv_overscan_property) {
1381 state->tv.overscan = val;
1382 } else if (property == config->tv_saturation_property) {
1383 state->tv.saturation = val;
1384 } else if (property == config->tv_hue_property) {
1385 state->tv.hue = val;
1386 } else if (property == config->link_status_property) {
1387 /* Never downgrade from GOOD to BAD on userspace's request here,
1388 * only hw issues can do that.
1389 *
1390 * For an atomic property the userspace doesn't need to be able
1391 * to understand all the properties, but needs to be able to
1392 * restore the state it wants on VT switch. So if the userspace
1393 * tries to change the link_status from GOOD to BAD, driver
1394 * silently rejects it and returns a 0. This prevents userspace
1395 * from accidently breaking the display when it restores the
1396 * state.
1397 */
1398 if (state->link_status != DRM_LINK_STATUS_GOOD)
1399 state->link_status = val;
1400 } else if (property == config->aspect_ratio_property) {
1401 state->picture_aspect_ratio = val;
1402 } else if (property == config->content_type_property) {
1403 state->content_type = val;
1404 } else if (property == connector->scaling_mode_property) {
1405 state->scaling_mode = val;
1406 } else if (property == connector->content_protection_property) {
1407 if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1408 DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
1409 return -EINVAL;
1410 }
1411 state->content_protection = val;
1412 } else if (property == config->writeback_fb_id_property) {
1413 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
1414 int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
1415 if (fb)
1416 drm_framebuffer_put(fb);
1417 return ret;
1418 } else if (property == config->writeback_out_fence_ptr_property) {
1419 s32 __user *fence_ptr = u64_to_user_ptr(val);
1420
1421 return set_out_fence_for_connector(state->state, connector,
1422 fence_ptr);
1423 } else if (connector->funcs->atomic_set_property) {
1424 return connector->funcs->atomic_set_property(connector,
1425 state, property, val);
1426 } else {
1427 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
1428 connector->base.id, connector->name,
1429 property->base.id, property->name);
1430 return -EINVAL;
1431 }
1432
1433 return 0;
1434}
1435
1436static void drm_atomic_connector_print_state(struct drm_printer *p, 825static void drm_atomic_connector_print_state(struct drm_printer *p,
1437 const struct drm_connector_state *state) 826 const struct drm_connector_state *state)
1438{ 827{
@@ -1450,360 +839,6 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
1450} 839}
1451 840
1452/** 841/**
1453 * drm_atomic_connector_get_property - get property value from connector state
1454 * @connector: the drm connector to set a property on
1455 * @state: the state object to get the property value from
1456 * @property: the property to set
1457 * @val: return location for the property value
1458 *
1459 * This function handles generic/core properties and calls out to driver's
1460 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
1461 * consistent behavior you must call this function rather than the driver hook
1462 * directly.
1463 *
1464 * RETURNS:
1465 * Zero on success, error code on failure
1466 */
1467static int
1468drm_atomic_connector_get_property(struct drm_connector *connector,
1469 const struct drm_connector_state *state,
1470 struct drm_property *property, uint64_t *val)
1471{
1472 struct drm_device *dev = connector->dev;
1473 struct drm_mode_config *config = &dev->mode_config;
1474
1475 if (property == config->prop_crtc_id) {
1476 *val = (state->crtc) ? state->crtc->base.id : 0;
1477 } else if (property == config->dpms_property) {
1478 *val = connector->dpms;
1479 } else if (property == config->tv_select_subconnector_property) {
1480 *val = state->tv.subconnector;
1481 } else if (property == config->tv_left_margin_property) {
1482 *val = state->tv.margins.left;
1483 } else if (property == config->tv_right_margin_property) {
1484 *val = state->tv.margins.right;
1485 } else if (property == config->tv_top_margin_property) {
1486 *val = state->tv.margins.top;
1487 } else if (property == config->tv_bottom_margin_property) {
1488 *val = state->tv.margins.bottom;
1489 } else if (property == config->tv_mode_property) {
1490 *val = state->tv.mode;
1491 } else if (property == config->tv_brightness_property) {
1492 *val = state->tv.brightness;
1493 } else if (property == config->tv_contrast_property) {
1494 *val = state->tv.contrast;
1495 } else if (property == config->tv_flicker_reduction_property) {
1496 *val = state->tv.flicker_reduction;
1497 } else if (property == config->tv_overscan_property) {
1498 *val = state->tv.overscan;
1499 } else if (property == config->tv_saturation_property) {
1500 *val = state->tv.saturation;
1501 } else if (property == config->tv_hue_property) {
1502 *val = state->tv.hue;
1503 } else if (property == config->link_status_property) {
1504 *val = state->link_status;
1505 } else if (property == config->aspect_ratio_property) {
1506 *val = state->picture_aspect_ratio;
1507 } else if (property == config->content_type_property) {
1508 *val = state->content_type;
1509 } else if (property == connector->scaling_mode_property) {
1510 *val = state->scaling_mode;
1511 } else if (property == connector->content_protection_property) {
1512 *val = state->content_protection;
1513 } else if (property == config->writeback_fb_id_property) {
1514 /* Writeback framebuffer is one-shot, write and forget */
1515 *val = 0;
1516 } else if (property == config->writeback_out_fence_ptr_property) {
1517 *val = 0;
1518 } else if (connector->funcs->atomic_get_property) {
1519 return connector->funcs->atomic_get_property(connector,
1520 state, property, val);
1521 } else {
1522 return -EINVAL;
1523 }
1524
1525 return 0;
1526}
1527
1528int drm_atomic_get_property(struct drm_mode_object *obj,
1529 struct drm_property *property, uint64_t *val)
1530{
1531 struct drm_device *dev = property->dev;
1532 int ret;
1533
1534 switch (obj->type) {
1535 case DRM_MODE_OBJECT_CONNECTOR: {
1536 struct drm_connector *connector = obj_to_connector(obj);
1537 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1538 ret = drm_atomic_connector_get_property(connector,
1539 connector->state, property, val);
1540 break;
1541 }
1542 case DRM_MODE_OBJECT_CRTC: {
1543 struct drm_crtc *crtc = obj_to_crtc(obj);
1544 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1545 ret = drm_atomic_crtc_get_property(crtc,
1546 crtc->state, property, val);
1547 break;
1548 }
1549 case DRM_MODE_OBJECT_PLANE: {
1550 struct drm_plane *plane = obj_to_plane(obj);
1551 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1552 ret = drm_atomic_plane_get_property(plane,
1553 plane->state, property, val);
1554 break;
1555 }
1556 default:
1557 ret = -EINVAL;
1558 break;
1559 }
1560
1561 return ret;
1562}
1563
1564/**
1565 * drm_atomic_set_crtc_for_plane - set crtc for plane
1566 * @plane_state: the plane whose incoming state to update
1567 * @crtc: crtc to use for the plane
1568 *
1569 * Changing the assigned crtc for a plane requires us to grab the lock and state
1570 * for the new crtc, as needed. This function takes care of all these details
1571 * besides updating the pointer in the state object itself.
1572 *
1573 * Returns:
1574 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1575 * then the w/w mutex code has detected a deadlock and the entire atomic
1576 * sequence must be restarted. All other errors are fatal.
1577 */
1578int
1579drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
1580 struct drm_crtc *crtc)
1581{
1582 struct drm_plane *plane = plane_state->plane;
1583 struct drm_crtc_state *crtc_state;
1584 /* Nothing to do for same crtc*/
1585 if (plane_state->crtc == crtc)
1586 return 0;
1587 if (plane_state->crtc) {
1588 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1589 plane_state->crtc);
1590 if (WARN_ON(IS_ERR(crtc_state)))
1591 return PTR_ERR(crtc_state);
1592
1593 crtc_state->plane_mask &= ~drm_plane_mask(plane);
1594 }
1595
1596 plane_state->crtc = crtc;
1597
1598 if (crtc) {
1599 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1600 crtc);
1601 if (IS_ERR(crtc_state))
1602 return PTR_ERR(crtc_state);
1603 crtc_state->plane_mask |= drm_plane_mask(plane);
1604 }
1605
1606 if (crtc)
1607 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
1608 plane->base.id, plane->name, plane_state,
1609 crtc->base.id, crtc->name);
1610 else
1611 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
1612 plane->base.id, plane->name, plane_state);
1613
1614 return 0;
1615}
1616EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
1617
1618/**
1619 * drm_atomic_set_fb_for_plane - set framebuffer for plane
1620 * @plane_state: atomic state object for the plane
1621 * @fb: fb to use for the plane
1622 *
1623 * Changing the assigned framebuffer for a plane requires us to grab a reference
1624 * to the new fb and drop the reference to the old fb, if there is one. This
1625 * function takes care of all these details besides updating the pointer in the
1626 * state object itself.
1627 */
1628void
1629drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
1630 struct drm_framebuffer *fb)
1631{
1632 struct drm_plane *plane = plane_state->plane;
1633
1634 if (fb)
1635 DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
1636 fb->base.id, plane->base.id, plane->name,
1637 plane_state);
1638 else
1639 DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
1640 plane->base.id, plane->name, plane_state);
1641
1642 drm_framebuffer_assign(&plane_state->fb, fb);
1643}
1644EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
1645
1646/**
1647 * drm_atomic_set_fence_for_plane - set fence for plane
1648 * @plane_state: atomic state object for the plane
1649 * @fence: dma_fence to use for the plane
1650 *
1651 * Helper to setup the plane_state fence in case it is not set yet.
1652 * By using this drivers doesn't need to worry if the user choose
1653 * implicit or explicit fencing.
1654 *
1655 * This function will not set the fence to the state if it was set
1656 * via explicit fencing interfaces on the atomic ioctl. In that case it will
1657 * drop the reference to the fence as we are not storing it anywhere.
1658 * Otherwise, if &drm_plane_state.fence is not set this function we just set it
1659 * with the received implicit fence. In both cases this function consumes a
1660 * reference for @fence.
1661 *
1662 * This way explicit fencing can be used to overrule implicit fencing, which is
1663 * important to make explicit fencing use-cases work: One example is using one
1664 * buffer for 2 screens with different refresh rates. Implicit fencing will
1665 * clamp rendering to the refresh rate of the slower screen, whereas explicit
1666 * fence allows 2 independent render and display loops on a single buffer. If a
1667 * driver allows obeys both implicit and explicit fences for plane updates, then
1668 * it will break all the benefits of explicit fencing.
1669 */
1670void
1671drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
1672 struct dma_fence *fence)
1673{
1674 if (plane_state->fence) {
1675 dma_fence_put(fence);
1676 return;
1677 }
1678
1679 plane_state->fence = fence;
1680}
1681EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
1682
1683/**
1684 * drm_atomic_set_crtc_for_connector - set crtc for connector
1685 * @conn_state: atomic state object for the connector
1686 * @crtc: crtc to use for the connector
1687 *
1688 * Changing the assigned crtc for a connector requires us to grab the lock and
1689 * state for the new crtc, as needed. This function takes care of all these
1690 * details besides updating the pointer in the state object itself.
1691 *
1692 * Returns:
1693 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1694 * then the w/w mutex code has detected a deadlock and the entire atomic
1695 * sequence must be restarted. All other errors are fatal.
1696 */
1697int
1698drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1699 struct drm_crtc *crtc)
1700{
1701 struct drm_connector *connector = conn_state->connector;
1702 struct drm_crtc_state *crtc_state;
1703
1704 if (conn_state->crtc == crtc)
1705 return 0;
1706
1707 if (conn_state->crtc) {
1708 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
1709 conn_state->crtc);
1710
1711 crtc_state->connector_mask &=
1712 ~drm_connector_mask(conn_state->connector);
1713
1714 drm_connector_put(conn_state->connector);
1715 conn_state->crtc = NULL;
1716 }
1717
1718 if (crtc) {
1719 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
1720 if (IS_ERR(crtc_state))
1721 return PTR_ERR(crtc_state);
1722
1723 crtc_state->connector_mask |=
1724 drm_connector_mask(conn_state->connector);
1725
1726 drm_connector_get(conn_state->connector);
1727 conn_state->crtc = crtc;
1728
1729 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
1730 connector->base.id, connector->name,
1731 conn_state, crtc->base.id, crtc->name);
1732 } else {
1733 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
1734 connector->base.id, connector->name,
1735 conn_state);
1736 }
1737
1738 return 0;
1739}
1740EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
1741
1742/*
1743 * drm_atomic_get_writeback_job - return or allocate a writeback job
1744 * @conn_state: Connector state to get the job for
1745 *
1746 * Writeback jobs have a different lifetime to the atomic state they are
1747 * associated with. This convenience function takes care of allocating a job
1748 * if there isn't yet one associated with the connector state, otherwise
1749 * it just returns the existing job.
1750 *
1751 * Returns: The writeback job for the given connector state
1752 */
1753static struct drm_writeback_job *
1754drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
1755{
1756 WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1757
1758 if (!conn_state->writeback_job)
1759 conn_state->writeback_job =
1760 kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
1761
1762 return conn_state->writeback_job;
1763}
1764
1765/**
1766 * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
1767 * @conn_state: atomic state object for the connector
1768 * @fb: fb to use for the connector
1769 *
1770 * This is used to set the framebuffer for a writeback connector, which outputs
1771 * to a buffer instead of an actual physical connector.
1772 * Changing the assigned framebuffer requires us to grab a reference to the new
1773 * fb and drop the reference to the old fb, if there is one. This function
1774 * takes care of all these details besides updating the pointer in the
1775 * state object itself.
1776 *
1777 * Note: The only way conn_state can already have an fb set is if the commit
1778 * sets the property more than once.
1779 *
1780 * See also: drm_writeback_connector_init()
1781 *
1782 * Returns: 0 on success
1783 */
1784int drm_atomic_set_writeback_fb_for_connector(
1785 struct drm_connector_state *conn_state,
1786 struct drm_framebuffer *fb)
1787{
1788 struct drm_writeback_job *job =
1789 drm_atomic_get_writeback_job(conn_state);
1790 if (!job)
1791 return -ENOMEM;
1792
1793 drm_framebuffer_assign(&job->fb, fb);
1794
1795 if (fb)
1796 DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
1797 fb->base.id, conn_state);
1798 else
1799 DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
1800 conn_state);
1801
1802 return 0;
1803}
1804EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
1805
1806/**
1807 * drm_atomic_add_affected_connectors - add connectors for crtc 842 * drm_atomic_add_affected_connectors - add connectors for crtc
1808 * @state: atomic state 843 * @state: atomic state
1809 * @crtc: DRM crtc 844 * @crtc: DRM crtc
@@ -2039,7 +1074,7 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
2039} 1074}
2040EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1075EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
2041 1076
2042static void drm_atomic_print_state(const struct drm_atomic_state *state) 1077void drm_atomic_print_state(const struct drm_atomic_state *state)
2043{ 1078{
2044 struct drm_printer p = drm_info_printer(state->dev->dev); 1079 struct drm_printer p = drm_info_printer(state->dev->dev);
2045 struct drm_plane *plane; 1080 struct drm_plane *plane;
@@ -2146,544 +1181,3 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
2146} 1181}
2147#endif 1182#endif
2148 1183
2149/*
2150 * The big monster ioctl
2151 */
2152
2153static struct drm_pending_vblank_event *create_vblank_event(
2154 struct drm_crtc *crtc, uint64_t user_data)
2155{
2156 struct drm_pending_vblank_event *e = NULL;
2157
2158 e = kzalloc(sizeof *e, GFP_KERNEL);
2159 if (!e)
2160 return NULL;
2161
2162 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
2163 e->event.base.length = sizeof(e->event);
2164 e->event.vbl.crtc_id = crtc->base.id;
2165 e->event.vbl.user_data = user_data;
2166
2167 return e;
2168}
2169
2170int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
2171 struct drm_connector *connector,
2172 int mode)
2173{
2174 struct drm_connector *tmp_connector;
2175 struct drm_connector_state *new_conn_state;
2176 struct drm_crtc *crtc;
2177 struct drm_crtc_state *crtc_state;
2178 int i, ret, old_mode = connector->dpms;
2179 bool active = false;
2180
2181 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
2182 state->acquire_ctx);
2183 if (ret)
2184 return ret;
2185
2186 if (mode != DRM_MODE_DPMS_ON)
2187 mode = DRM_MODE_DPMS_OFF;
2188 connector->dpms = mode;
2189
2190 crtc = connector->state->crtc;
2191 if (!crtc)
2192 goto out;
2193 ret = drm_atomic_add_affected_connectors(state, crtc);
2194 if (ret)
2195 goto out;
2196
2197 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2198 if (IS_ERR(crtc_state)) {
2199 ret = PTR_ERR(crtc_state);
2200 goto out;
2201 }
2202
2203 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
2204 if (new_conn_state->crtc != crtc)
2205 continue;
2206 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
2207 active = true;
2208 break;
2209 }
2210 }
2211
2212 crtc_state->active = active;
2213 ret = drm_atomic_commit(state);
2214out:
2215 if (ret != 0)
2216 connector->dpms = old_mode;
2217 return ret;
2218}
2219
2220int drm_atomic_set_property(struct drm_atomic_state *state,
2221 struct drm_mode_object *obj,
2222 struct drm_property *prop,
2223 uint64_t prop_value)
2224{
2225 struct drm_mode_object *ref;
2226 int ret;
2227
2228 if (!drm_property_change_valid_get(prop, prop_value, &ref))
2229 return -EINVAL;
2230
2231 switch (obj->type) {
2232 case DRM_MODE_OBJECT_CONNECTOR: {
2233 struct drm_connector *connector = obj_to_connector(obj);
2234 struct drm_connector_state *connector_state;
2235
2236 connector_state = drm_atomic_get_connector_state(state, connector);
2237 if (IS_ERR(connector_state)) {
2238 ret = PTR_ERR(connector_state);
2239 break;
2240 }
2241
2242 ret = drm_atomic_connector_set_property(connector,
2243 connector_state, prop, prop_value);
2244 break;
2245 }
2246 case DRM_MODE_OBJECT_CRTC: {
2247 struct drm_crtc *crtc = obj_to_crtc(obj);
2248 struct drm_crtc_state *crtc_state;
2249
2250 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2251 if (IS_ERR(crtc_state)) {
2252 ret = PTR_ERR(crtc_state);
2253 break;
2254 }
2255
2256 ret = drm_atomic_crtc_set_property(crtc,
2257 crtc_state, prop, prop_value);
2258 break;
2259 }
2260 case DRM_MODE_OBJECT_PLANE: {
2261 struct drm_plane *plane = obj_to_plane(obj);
2262 struct drm_plane_state *plane_state;
2263
2264 plane_state = drm_atomic_get_plane_state(state, plane);
2265 if (IS_ERR(plane_state)) {
2266 ret = PTR_ERR(plane_state);
2267 break;
2268 }
2269
2270 ret = drm_atomic_plane_set_property(plane,
2271 plane_state, prop, prop_value);
2272 break;
2273 }
2274 default:
2275 ret = -EINVAL;
2276 break;
2277 }
2278
2279 drm_property_change_valid_put(prop, ref);
2280 return ret;
2281}
2282
2283/**
2284 * DOC: explicit fencing properties
2285 *
2286 * Explicit fencing allows userspace to control the buffer synchronization
2287 * between devices. A Fence or a group of fences are transfered to/from
2288 * userspace using Sync File fds and there are two DRM properties for that.
2289 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
2290 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
2291 *
2292 * As a contrast, with implicit fencing the kernel keeps track of any
2293 * ongoing rendering, and automatically ensures that the atomic update waits
2294 * for any pending rendering to complete. For shared buffers represented with
2295 * a &struct dma_buf this is tracked in &struct reservation_object.
2296 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
2297 * whereas explicit fencing is what Android wants.
2298 *
2299 * "IN_FENCE_FD”:
2300 * Use this property to pass a fence that DRM should wait on before
2301 * proceeding with the Atomic Commit request and show the framebuffer for
2302 * the plane on the screen. The fence can be either a normal fence or a
2303 * merged one, the sync_file framework will handle both cases and use a
2304 * fence_array if a merged fence is received. Passing -1 here means no
2305 * fences to wait on.
2306 *
2307 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
2308 * it will only check if the Sync File is a valid one.
2309 *
2310 * On the driver side the fence is stored on the @fence parameter of
2311 * &struct drm_plane_state. Drivers which also support implicit fencing
2312 * should set the implicit fence using drm_atomic_set_fence_for_plane(),
2313 * to make sure there's consistent behaviour between drivers in precedence
2314 * of implicit vs. explicit fencing.
2315 *
2316 * "OUT_FENCE_PTR”:
2317 * Use this property to pass a file descriptor pointer to DRM. Once the
2318 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
2319 * the file descriptor number of a Sync File. This Sync File contains the
2320 * CRTC fence that will be signaled when all framebuffers present on the
2321 * Atomic Commit * request for that given CRTC are scanned out on the
2322 * screen.
2323 *
2324 * The Atomic Commit request fails if a invalid pointer is passed. If the
2325 * Atomic Commit request fails for any other reason the out fence fd
2326 * returned will be -1. On a Atomic Commit with the
2327 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
2328 *
2329 * Note that out-fences don't have a special interface to drivers and are
2330 * internally represented by a &struct drm_pending_vblank_event in struct
2331 * &drm_crtc_state, which is also used by the nonblocking atomic commit
2332 * helpers and for the DRM event handling for existing userspace.
2333 */
2334
2335struct drm_out_fence_state {
2336 s32 __user *out_fence_ptr;
2337 struct sync_file *sync_file;
2338 int fd;
2339};
2340
2341static int setup_out_fence(struct drm_out_fence_state *fence_state,
2342 struct dma_fence *fence)
2343{
2344 fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
2345 if (fence_state->fd < 0)
2346 return fence_state->fd;
2347
2348 if (put_user(fence_state->fd, fence_state->out_fence_ptr))
2349 return -EFAULT;
2350
2351 fence_state->sync_file = sync_file_create(fence);
2352 if (!fence_state->sync_file)
2353 return -ENOMEM;
2354
2355 return 0;
2356}
2357
2358static int prepare_signaling(struct drm_device *dev,
2359 struct drm_atomic_state *state,
2360 struct drm_mode_atomic *arg,
2361 struct drm_file *file_priv,
2362 struct drm_out_fence_state **fence_state,
2363 unsigned int *num_fences)
2364{
2365 struct drm_crtc *crtc;
2366 struct drm_crtc_state *crtc_state;
2367 struct drm_connector *conn;
2368 struct drm_connector_state *conn_state;
2369 int i, c = 0, ret;
2370
2371 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
2372 return 0;
2373
2374 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2375 s32 __user *fence_ptr;
2376
2377 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
2378
2379 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
2380 struct drm_pending_vblank_event *e;
2381
2382 e = create_vblank_event(crtc, arg->user_data);
2383 if (!e)
2384 return -ENOMEM;
2385
2386 crtc_state->event = e;
2387 }
2388
2389 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
2390 struct drm_pending_vblank_event *e = crtc_state->event;
2391
2392 if (!file_priv)
2393 continue;
2394
2395 ret = drm_event_reserve_init(dev, file_priv, &e->base,
2396 &e->event.base);
2397 if (ret) {
2398 kfree(e);
2399 crtc_state->event = NULL;
2400 return ret;
2401 }
2402 }
2403
2404 if (fence_ptr) {
2405 struct dma_fence *fence;
2406 struct drm_out_fence_state *f;
2407
2408 f = krealloc(*fence_state, sizeof(**fence_state) *
2409 (*num_fences + 1), GFP_KERNEL);
2410 if (!f)
2411 return -ENOMEM;
2412
2413 memset(&f[*num_fences], 0, sizeof(*f));
2414
2415 f[*num_fences].out_fence_ptr = fence_ptr;
2416 *fence_state = f;
2417
2418 fence = drm_crtc_create_fence(crtc);
2419 if (!fence)
2420 return -ENOMEM;
2421
2422 ret = setup_out_fence(&f[(*num_fences)++], fence);
2423 if (ret) {
2424 dma_fence_put(fence);
2425 return ret;
2426 }
2427
2428 crtc_state->event->base.fence = fence;
2429 }
2430
2431 c++;
2432 }
2433
2434 for_each_new_connector_in_state(state, conn, conn_state, i) {
2435 struct drm_writeback_connector *wb_conn;
2436 struct drm_writeback_job *job;
2437 struct drm_out_fence_state *f;
2438 struct dma_fence *fence;
2439 s32 __user *fence_ptr;
2440
2441 fence_ptr = get_out_fence_for_connector(state, conn);
2442 if (!fence_ptr)
2443 continue;
2444
2445 job = drm_atomic_get_writeback_job(conn_state);
2446 if (!job)
2447 return -ENOMEM;
2448
2449 f = krealloc(*fence_state, sizeof(**fence_state) *
2450 (*num_fences + 1), GFP_KERNEL);
2451 if (!f)
2452 return -ENOMEM;
2453
2454 memset(&f[*num_fences], 0, sizeof(*f));
2455
2456 f[*num_fences].out_fence_ptr = fence_ptr;
2457 *fence_state = f;
2458
2459 wb_conn = drm_connector_to_writeback(conn);
2460 fence = drm_writeback_get_out_fence(wb_conn);
2461 if (!fence)
2462 return -ENOMEM;
2463
2464 ret = setup_out_fence(&f[(*num_fences)++], fence);
2465 if (ret) {
2466 dma_fence_put(fence);
2467 return ret;
2468 }
2469
2470 job->out_fence = fence;
2471 }
2472
2473 /*
2474 * Having this flag means user mode pends on event which will never
2475 * reach due to lack of at least one CRTC for signaling
2476 */
2477 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
2478 return -EINVAL;
2479
2480 return 0;
2481}
2482
2483static void complete_signaling(struct drm_device *dev,
2484 struct drm_atomic_state *state,
2485 struct drm_out_fence_state *fence_state,
2486 unsigned int num_fences,
2487 bool install_fds)
2488{
2489 struct drm_crtc *crtc;
2490 struct drm_crtc_state *crtc_state;
2491 int i;
2492
2493 if (install_fds) {
2494 for (i = 0; i < num_fences; i++)
2495 fd_install(fence_state[i].fd,
2496 fence_state[i].sync_file->file);
2497
2498 kfree(fence_state);
2499 return;
2500 }
2501
2502 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2503 struct drm_pending_vblank_event *event = crtc_state->event;
2504 /*
2505 * Free the allocated event. drm_atomic_helper_setup_commit
2506 * can allocate an event too, so only free it if it's ours
2507 * to prevent a double free in drm_atomic_state_clear.
2508 */
2509 if (event && (event->base.fence || event->base.file_priv)) {
2510 drm_event_cancel_free(dev, &event->base);
2511 crtc_state->event = NULL;
2512 }
2513 }
2514
2515 if (!fence_state)
2516 return;
2517
2518 for (i = 0; i < num_fences; i++) {
2519 if (fence_state[i].sync_file)
2520 fput(fence_state[i].sync_file->file);
2521 if (fence_state[i].fd >= 0)
2522 put_unused_fd(fence_state[i].fd);
2523
2524 /* If this fails log error to the user */
2525 if (fence_state[i].out_fence_ptr &&
2526 put_user(-1, fence_state[i].out_fence_ptr))
2527 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
2528 }
2529
2530 kfree(fence_state);
2531}
2532
2533int drm_mode_atomic_ioctl(struct drm_device *dev,
2534 void *data, struct drm_file *file_priv)
2535{
2536 struct drm_mode_atomic *arg = data;
2537 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
2538 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
2539 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
2540 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
2541 unsigned int copied_objs, copied_props;
2542 struct drm_atomic_state *state;
2543 struct drm_modeset_acquire_ctx ctx;
2544 struct drm_out_fence_state *fence_state;
2545 int ret = 0;
2546 unsigned int i, j, num_fences;
2547
2548 /* disallow for drivers not supporting atomic: */
2549 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
2550 return -EINVAL;
2551
2552 /* disallow for userspace that has not enabled atomic cap (even
2553 * though this may be a bit overkill, since legacy userspace
2554 * wouldn't know how to call this ioctl)
2555 */
2556 if (!file_priv->atomic)
2557 return -EINVAL;
2558
2559 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
2560 return -EINVAL;
2561
2562 if (arg->reserved)
2563 return -EINVAL;
2564
2565 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
2566 !dev->mode_config.async_page_flip)
2567 return -EINVAL;
2568
2569 /* can't test and expect an event at the same time. */
2570 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
2571 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
2572 return -EINVAL;
2573
2574 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2575
2576 state = drm_atomic_state_alloc(dev);
2577 if (!state)
2578 return -ENOMEM;
2579
2580 state->acquire_ctx = &ctx;
2581 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
2582
2583retry:
2584 copied_objs = 0;
2585 copied_props = 0;
2586 fence_state = NULL;
2587 num_fences = 0;
2588
2589 for (i = 0; i < arg->count_objs; i++) {
2590 uint32_t obj_id, count_props;
2591 struct drm_mode_object *obj;
2592
2593 if (get_user(obj_id, objs_ptr + copied_objs)) {
2594 ret = -EFAULT;
2595 goto out;
2596 }
2597
2598 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
2599 if (!obj) {
2600 ret = -ENOENT;
2601 goto out;
2602 }
2603
2604 if (!obj->properties) {
2605 drm_mode_object_put(obj);
2606 ret = -ENOENT;
2607 goto out;
2608 }
2609
2610 if (get_user(count_props, count_props_ptr + copied_objs)) {
2611 drm_mode_object_put(obj);
2612 ret = -EFAULT;
2613 goto out;
2614 }
2615
2616 copied_objs++;
2617
2618 for (j = 0; j < count_props; j++) {
2619 uint32_t prop_id;
2620 uint64_t prop_value;
2621 struct drm_property *prop;
2622
2623 if (get_user(prop_id, props_ptr + copied_props)) {
2624 drm_mode_object_put(obj);
2625 ret = -EFAULT;
2626 goto out;
2627 }
2628
2629 prop = drm_mode_obj_find_prop_id(obj, prop_id);
2630 if (!prop) {
2631 drm_mode_object_put(obj);
2632 ret = -ENOENT;
2633 goto out;
2634 }
2635
2636 if (copy_from_user(&prop_value,
2637 prop_values_ptr + copied_props,
2638 sizeof(prop_value))) {
2639 drm_mode_object_put(obj);
2640 ret = -EFAULT;
2641 goto out;
2642 }
2643
2644 ret = drm_atomic_set_property(state, obj, prop,
2645 prop_value);
2646 if (ret) {
2647 drm_mode_object_put(obj);
2648 goto out;
2649 }
2650
2651 copied_props++;
2652 }
2653
2654 drm_mode_object_put(obj);
2655 }
2656
2657 ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
2658 &num_fences);
2659 if (ret)
2660 goto out;
2661
2662 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
2663 ret = drm_atomic_check_only(state);
2664 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
2665 ret = drm_atomic_nonblocking_commit(state);
2666 } else {
2667 if (unlikely(drm_debug & DRM_UT_STATE))
2668 drm_atomic_print_state(state);
2669
2670 ret = drm_atomic_commit(state);
2671 }
2672
2673out:
2674 complete_signaling(dev, state, fence_state, num_fences, !ret);
2675
2676 if (ret == -EDEADLK) {
2677 drm_atomic_state_clear(state);
2678 ret = drm_modeset_backoff(&ctx);
2679 if (!ret)
2680 goto retry;
2681 }
2682
2683 drm_atomic_state_put(state);
2684
2685 drm_modeset_drop_locks(&ctx);
2686 drm_modeset_acquire_fini(&ctx);
2687
2688 return ret;
2689}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2c23a48482da..3cf1aa132778 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -27,6 +27,7 @@
27 27
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/drm_atomic.h> 29#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_uapi.h>
30#include <drm/drm_plane_helper.h> 31#include <drm/drm_plane_helper.h>
31#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
32#include <drm/drm_atomic_helper.h> 33#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
new file mode 100644
index 000000000000..26690a664ec6
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -0,0 +1,1393 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 * Copyright (C) 2018 Intel Corp.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robdclark@gmail.com>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 */
28
29#include <drm/drm_atomic_uapi.h>
30#include <drm/drm_atomic.h>
31#include <drm/drm_print.h>
32#include <drm/drm_drv.h>
33#include <drm/drm_writeback.h>
34#include <drm/drm_vblank.h>
35
36#include <linux/dma-fence.h>
37#include <linux/uaccess.h>
38#include <linux/sync_file.h>
39#include <linux/file.h>
40
41#include "drm_crtc_internal.h"
42
43/**
44 * DOC: overview
45 *
46 * This file contains the marshalling and demarshalling glue for the atomic UAPI
47 * in all it's form: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
48 * SET_PROPERTY IOCTls. Plus interface functions for compatibility helpers and
49 * drivers which have special needs to construct their own atomic updates, e.g.
50 * for load detect or similiar.
51 */
52
53/**
54 * drm_atomic_set_mode_for_crtc - set mode for CRTC
55 * @state: the CRTC whose incoming state to update
56 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
57 *
58 * Set a mode (originating from the kernel) on the desired CRTC state and update
59 * the enable property.
60 *
61 * RETURNS:
62 * Zero on success, error code on failure. Cannot return -EDEADLK.
63 */
64int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
65 const struct drm_display_mode *mode)
66{
67 struct drm_crtc *crtc = state->crtc;
68 struct drm_mode_modeinfo umode;
69
70 /* Early return for no change. */
71 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
72 return 0;
73
74 drm_property_blob_put(state->mode_blob);
75 state->mode_blob = NULL;
76
77 if (mode) {
78 drm_mode_convert_to_umode(&umode, mode);
79 state->mode_blob =
80 drm_property_create_blob(state->crtc->dev,
81 sizeof(umode),
82 &umode);
83 if (IS_ERR(state->mode_blob))
84 return PTR_ERR(state->mode_blob);
85
86 drm_mode_copy(&state->mode, mode);
87 state->enable = true;
88 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
89 mode->name, crtc->base.id, crtc->name, state);
90 } else {
91 memset(&state->mode, 0, sizeof(state->mode));
92 state->enable = false;
93 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
94 crtc->base.id, crtc->name, state);
95 }
96
97 return 0;
98}
99EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
100
101/**
102 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
103 * @state: the CRTC whose incoming state to update
104 * @blob: pointer to blob property to use for mode
105 *
106 * Set a mode (originating from a blob property) on the desired CRTC state.
107 * This function will take a reference on the blob property for the CRTC state,
108 * and release the reference held on the state's existing mode property, if any
109 * was set.
110 *
111 * RETURNS:
112 * Zero on success, error code on failure. Cannot return -EDEADLK.
113 */
114int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
115 struct drm_property_blob *blob)
116{
117 struct drm_crtc *crtc = state->crtc;
118
119 if (blob == state->mode_blob)
120 return 0;
121
122 drm_property_blob_put(state->mode_blob);
123 state->mode_blob = NULL;
124
125 memset(&state->mode, 0, sizeof(state->mode));
126
127 if (blob) {
128 int ret;
129
130 if (blob->length != sizeof(struct drm_mode_modeinfo)) {
131 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
132 crtc->base.id, crtc->name,
133 blob->length);
134 return -EINVAL;
135 }
136
137 ret = drm_mode_convert_umode(crtc->dev,
138 &state->mode, blob->data);
139 if (ret) {
140 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
141 crtc->base.id, crtc->name,
142 ret, drm_get_mode_status_name(state->mode.status));
143 drm_mode_debug_printmodeline(&state->mode);
144 return -EINVAL;
145 }
146
147 state->mode_blob = drm_property_blob_get(blob);
148 state->enable = true;
149 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
150 state->mode.name, crtc->base.id, crtc->name,
151 state);
152 } else {
153 state->enable = false;
154 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
155 crtc->base.id, crtc->name, state);
156 }
157
158 return 0;
159}
160EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
161
162/**
163 * drm_atomic_set_crtc_for_plane - set crtc for plane
164 * @plane_state: the plane whose incoming state to update
165 * @crtc: crtc to use for the plane
166 *
167 * Changing the assigned crtc for a plane requires us to grab the lock and state
168 * for the new crtc, as needed. This function takes care of all these details
169 * besides updating the pointer in the state object itself.
170 *
171 * Returns:
172 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
173 * then the w/w mutex code has detected a deadlock and the entire atomic
174 * sequence must be restarted. All other errors are fatal.
175 */
176int
177drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
178 struct drm_crtc *crtc)
179{
180 struct drm_plane *plane = plane_state->plane;
181 struct drm_crtc_state *crtc_state;
182 /* Nothing to do for same crtc*/
183 if (plane_state->crtc == crtc)
184 return 0;
185 if (plane_state->crtc) {
186 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
187 plane_state->crtc);
188 if (WARN_ON(IS_ERR(crtc_state)))
189 return PTR_ERR(crtc_state);
190
191 crtc_state->plane_mask &= ~drm_plane_mask(plane);
192 }
193
194 plane_state->crtc = crtc;
195
196 if (crtc) {
197 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
198 crtc);
199 if (IS_ERR(crtc_state))
200 return PTR_ERR(crtc_state);
201 crtc_state->plane_mask |= drm_plane_mask(plane);
202 }
203
204 if (crtc)
205 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
206 plane->base.id, plane->name, plane_state,
207 crtc->base.id, crtc->name);
208 else
209 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
210 plane->base.id, plane->name, plane_state);
211
212 return 0;
213}
214EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
215
216/**
217 * drm_atomic_set_fb_for_plane - set framebuffer for plane
218 * @plane_state: atomic state object for the plane
219 * @fb: fb to use for the plane
220 *
221 * Changing the assigned framebuffer for a plane requires us to grab a reference
222 * to the new fb and drop the reference to the old fb, if there is one. This
223 * function takes care of all these details besides updating the pointer in the
224 * state object itself.
225 */
226void
227drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
228 struct drm_framebuffer *fb)
229{
230 struct drm_plane *plane = plane_state->plane;
231
232 if (fb)
233 DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
234 fb->base.id, plane->base.id, plane->name,
235 plane_state);
236 else
237 DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
238 plane->base.id, plane->name, plane_state);
239
240 drm_framebuffer_assign(&plane_state->fb, fb);
241}
242EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
243
244/**
245 * drm_atomic_set_fence_for_plane - set fence for plane
246 * @plane_state: atomic state object for the plane
247 * @fence: dma_fence to use for the plane
248 *
249 * Helper to setup the plane_state fence in case it is not set yet.
250 * By using this drivers doesn't need to worry if the user choose
251 * implicit or explicit fencing.
252 *
253 * This function will not set the fence to the state if it was set
254 * via explicit fencing interfaces on the atomic ioctl. In that case it will
255 * drop the reference to the fence as we are not storing it anywhere.
256 * Otherwise, if &drm_plane_state.fence is not set this function we just set it
257 * with the received implicit fence. In both cases this function consumes a
258 * reference for @fence.
259 *
260 * This way explicit fencing can be used to overrule implicit fencing, which is
261 * important to make explicit fencing use-cases work: One example is using one
262 * buffer for 2 screens with different refresh rates. Implicit fencing will
263 * clamp rendering to the refresh rate of the slower screen, whereas explicit
264 * fence allows 2 independent render and display loops on a single buffer. If a
265 * driver allows obeys both implicit and explicit fences for plane updates, then
266 * it will break all the benefits of explicit fencing.
267 */
268void
269drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
270 struct dma_fence *fence)
271{
272 if (plane_state->fence) {
273 dma_fence_put(fence);
274 return;
275 }
276
277 plane_state->fence = fence;
278}
279EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
280
281/**
282 * drm_atomic_set_crtc_for_connector - set crtc for connector
283 * @conn_state: atomic state object for the connector
284 * @crtc: crtc to use for the connector
285 *
286 * Changing the assigned crtc for a connector requires us to grab the lock and
287 * state for the new crtc, as needed. This function takes care of all these
288 * details besides updating the pointer in the state object itself.
289 *
290 * Returns:
291 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
292 * then the w/w mutex code has detected a deadlock and the entire atomic
293 * sequence must be restarted. All other errors are fatal.
294 */
295int
296drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
297 struct drm_crtc *crtc)
298{
299 struct drm_connector *connector = conn_state->connector;
300 struct drm_crtc_state *crtc_state;
301
302 if (conn_state->crtc == crtc)
303 return 0;
304
305 if (conn_state->crtc) {
306 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
307 conn_state->crtc);
308
309 crtc_state->connector_mask &=
310 ~drm_connector_mask(conn_state->connector);
311
312 drm_connector_put(conn_state->connector);
313 conn_state->crtc = NULL;
314 }
315
316 if (crtc) {
317 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
318 if (IS_ERR(crtc_state))
319 return PTR_ERR(crtc_state);
320
321 crtc_state->connector_mask |=
322 drm_connector_mask(conn_state->connector);
323
324 drm_connector_get(conn_state->connector);
325 conn_state->crtc = crtc;
326
327 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
328 connector->base.id, connector->name,
329 conn_state, crtc->base.id, crtc->name);
330 } else {
331 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
332 connector->base.id, connector->name,
333 conn_state);
334 }
335
336 return 0;
337}
338EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
339
340static void set_out_fence_for_crtc(struct drm_atomic_state *state,
341 struct drm_crtc *crtc, s32 __user *fence_ptr)
342{
343 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
344}
345
346static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
347 struct drm_crtc *crtc)
348{
349 s32 __user *fence_ptr;
350
351 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
352 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
353
354 return fence_ptr;
355}
356
357static int set_out_fence_for_connector(struct drm_atomic_state *state,
358 struct drm_connector *connector,
359 s32 __user *fence_ptr)
360{
361 unsigned int index = drm_connector_index(connector);
362
363 if (!fence_ptr)
364 return 0;
365
366 if (put_user(-1, fence_ptr))
367 return -EFAULT;
368
369 state->connectors[index].out_fence_ptr = fence_ptr;
370
371 return 0;
372}
373
374static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
375 struct drm_connector *connector)
376{
377 unsigned int index = drm_connector_index(connector);
378 s32 __user *fence_ptr;
379
380 fence_ptr = state->connectors[index].out_fence_ptr;
381 state->connectors[index].out_fence_ptr = NULL;
382
383 return fence_ptr;
384}
385
386static int
387drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
388 struct drm_property_blob **blob,
389 uint64_t blob_id,
390 ssize_t expected_size,
391 ssize_t expected_elem_size,
392 bool *replaced)
393{
394 struct drm_property_blob *new_blob = NULL;
395
396 if (blob_id != 0) {
397 new_blob = drm_property_lookup_blob(dev, blob_id);
398 if (new_blob == NULL)
399 return -EINVAL;
400
401 if (expected_size > 0 &&
402 new_blob->length != expected_size) {
403 drm_property_blob_put(new_blob);
404 return -EINVAL;
405 }
406 if (expected_elem_size > 0 &&
407 new_blob->length % expected_elem_size != 0) {
408 drm_property_blob_put(new_blob);
409 return -EINVAL;
410 }
411 }
412
413 *replaced |= drm_property_replace_blob(blob, new_blob);
414 drm_property_blob_put(new_blob);
415
416 return 0;
417}
418
419static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
420 struct drm_crtc_state *state, struct drm_property *property,
421 uint64_t val)
422{
423 struct drm_device *dev = crtc->dev;
424 struct drm_mode_config *config = &dev->mode_config;
425 bool replaced = false;
426 int ret;
427
428 if (property == config->prop_active)
429 state->active = val;
430 else if (property == config->prop_mode_id) {
431 struct drm_property_blob *mode =
432 drm_property_lookup_blob(dev, val);
433 ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
434 drm_property_blob_put(mode);
435 return ret;
436 } else if (property == config->degamma_lut_property) {
437 ret = drm_atomic_replace_property_blob_from_id(dev,
438 &state->degamma_lut,
439 val,
440 -1, sizeof(struct drm_color_lut),
441 &replaced);
442 state->color_mgmt_changed |= replaced;
443 return ret;
444 } else if (property == config->ctm_property) {
445 ret = drm_atomic_replace_property_blob_from_id(dev,
446 &state->ctm,
447 val,
448 sizeof(struct drm_color_ctm), -1,
449 &replaced);
450 state->color_mgmt_changed |= replaced;
451 return ret;
452 } else if (property == config->gamma_lut_property) {
453 ret = drm_atomic_replace_property_blob_from_id(dev,
454 &state->gamma_lut,
455 val,
456 -1, sizeof(struct drm_color_lut),
457 &replaced);
458 state->color_mgmt_changed |= replaced;
459 return ret;
460 } else if (property == config->prop_out_fence_ptr) {
461 s32 __user *fence_ptr = u64_to_user_ptr(val);
462
463 if (!fence_ptr)
464 return 0;
465
466 if (put_user(-1, fence_ptr))
467 return -EFAULT;
468
469 set_out_fence_for_crtc(state->state, crtc, fence_ptr);
470 } else if (crtc->funcs->atomic_set_property) {
471 return crtc->funcs->atomic_set_property(crtc, state, property, val);
472 } else {
473 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
474 crtc->base.id, crtc->name,
475 property->base.id, property->name);
476 return -EINVAL;
477 }
478
479 return 0;
480}
481
482static int
483drm_atomic_crtc_get_property(struct drm_crtc *crtc,
484 const struct drm_crtc_state *state,
485 struct drm_property *property, uint64_t *val)
486{
487 struct drm_device *dev = crtc->dev;
488 struct drm_mode_config *config = &dev->mode_config;
489
490 if (property == config->prop_active)
491 *val = state->active;
492 else if (property == config->prop_mode_id)
493 *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
494 else if (property == config->degamma_lut_property)
495 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
496 else if (property == config->ctm_property)
497 *val = (state->ctm) ? state->ctm->base.id : 0;
498 else if (property == config->gamma_lut_property)
499 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
500 else if (property == config->prop_out_fence_ptr)
501 *val = 0;
502 else if (crtc->funcs->atomic_get_property)
503 return crtc->funcs->atomic_get_property(crtc, state, property, val);
504 else
505 return -EINVAL;
506
507 return 0;
508}
509
510static int drm_atomic_plane_set_property(struct drm_plane *plane,
511 struct drm_plane_state *state, struct drm_property *property,
512 uint64_t val)
513{
514 struct drm_device *dev = plane->dev;
515 struct drm_mode_config *config = &dev->mode_config;
516
517 if (property == config->prop_fb_id) {
518 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
519 drm_atomic_set_fb_for_plane(state, fb);
520 if (fb)
521 drm_framebuffer_put(fb);
522 } else if (property == config->prop_in_fence_fd) {
523 if (state->fence)
524 return -EINVAL;
525
526 if (U642I64(val) == -1)
527 return 0;
528
529 state->fence = sync_file_get_fence(val);
530 if (!state->fence)
531 return -EINVAL;
532
533 } else if (property == config->prop_crtc_id) {
534 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
535 return drm_atomic_set_crtc_for_plane(state, crtc);
536 } else if (property == config->prop_crtc_x) {
537 state->crtc_x = U642I64(val);
538 } else if (property == config->prop_crtc_y) {
539 state->crtc_y = U642I64(val);
540 } else if (property == config->prop_crtc_w) {
541 state->crtc_w = val;
542 } else if (property == config->prop_crtc_h) {
543 state->crtc_h = val;
544 } else if (property == config->prop_src_x) {
545 state->src_x = val;
546 } else if (property == config->prop_src_y) {
547 state->src_y = val;
548 } else if (property == config->prop_src_w) {
549 state->src_w = val;
550 } else if (property == config->prop_src_h) {
551 state->src_h = val;
552 } else if (property == plane->alpha_property) {
553 state->alpha = val;
554 } else if (property == plane->blend_mode_property) {
555 state->pixel_blend_mode = val;
556 } else if (property == plane->rotation_property) {
557 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
558 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
559 plane->base.id, plane->name, val);
560 return -EINVAL;
561 }
562 state->rotation = val;
563 } else if (property == plane->zpos_property) {
564 state->zpos = val;
565 } else if (property == plane->color_encoding_property) {
566 state->color_encoding = val;
567 } else if (property == plane->color_range_property) {
568 state->color_range = val;
569 } else if (plane->funcs->atomic_set_property) {
570 return plane->funcs->atomic_set_property(plane, state,
571 property, val);
572 } else {
573 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
574 plane->base.id, plane->name,
575 property->base.id, property->name);
576 return -EINVAL;
577 }
578
579 return 0;
580}
581
582static int
583drm_atomic_plane_get_property(struct drm_plane *plane,
584 const struct drm_plane_state *state,
585 struct drm_property *property, uint64_t *val)
586{
587 struct drm_device *dev = plane->dev;
588 struct drm_mode_config *config = &dev->mode_config;
589
590 if (property == config->prop_fb_id) {
591 *val = (state->fb) ? state->fb->base.id : 0;
592 } else if (property == config->prop_in_fence_fd) {
593 *val = -1;
594 } else if (property == config->prop_crtc_id) {
595 *val = (state->crtc) ? state->crtc->base.id : 0;
596 } else if (property == config->prop_crtc_x) {
597 *val = I642U64(state->crtc_x);
598 } else if (property == config->prop_crtc_y) {
599 *val = I642U64(state->crtc_y);
600 } else if (property == config->prop_crtc_w) {
601 *val = state->crtc_w;
602 } else if (property == config->prop_crtc_h) {
603 *val = state->crtc_h;
604 } else if (property == config->prop_src_x) {
605 *val = state->src_x;
606 } else if (property == config->prop_src_y) {
607 *val = state->src_y;
608 } else if (property == config->prop_src_w) {
609 *val = state->src_w;
610 } else if (property == config->prop_src_h) {
611 *val = state->src_h;
612 } else if (property == plane->alpha_property) {
613 *val = state->alpha;
614 } else if (property == plane->blend_mode_property) {
615 *val = state->pixel_blend_mode;
616 } else if (property == plane->rotation_property) {
617 *val = state->rotation;
618 } else if (property == plane->zpos_property) {
619 *val = state->zpos;
620 } else if (property == plane->color_encoding_property) {
621 *val = state->color_encoding;
622 } else if (property == plane->color_range_property) {
623 *val = state->color_range;
624 } else if (plane->funcs->atomic_get_property) {
625 return plane->funcs->atomic_get_property(plane, state, property, val);
626 } else {
627 return -EINVAL;
628 }
629
630 return 0;
631}
632
633static struct drm_writeback_job *
634drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
635{
636 WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
637
638 if (!conn_state->writeback_job)
639 conn_state->writeback_job =
640 kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
641
642 return conn_state->writeback_job;
643}
644
645static int drm_atomic_set_writeback_fb_for_connector(
646 struct drm_connector_state *conn_state,
647 struct drm_framebuffer *fb)
648{
649 struct drm_writeback_job *job =
650 drm_atomic_get_writeback_job(conn_state);
651 if (!job)
652 return -ENOMEM;
653
654 drm_framebuffer_assign(&job->fb, fb);
655
656 if (fb)
657 DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
658 fb->base.id, conn_state);
659 else
660 DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
661 conn_state);
662
663 return 0;
664}
665
666static int drm_atomic_connector_set_property(struct drm_connector *connector,
667 struct drm_connector_state *state, struct drm_property *property,
668 uint64_t val)
669{
670 struct drm_device *dev = connector->dev;
671 struct drm_mode_config *config = &dev->mode_config;
672
673 if (property == config->prop_crtc_id) {
674 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
675 return drm_atomic_set_crtc_for_connector(state, crtc);
676 } else if (property == config->dpms_property) {
677 /* setting DPMS property requires special handling, which
678 * is done in legacy setprop path for us. Disallow (for
679 * now?) atomic writes to DPMS property:
680 */
681 return -EINVAL;
682 } else if (property == config->tv_select_subconnector_property) {
683 state->tv.subconnector = val;
684 } else if (property == config->tv_left_margin_property) {
685 state->tv.margins.left = val;
686 } else if (property == config->tv_right_margin_property) {
687 state->tv.margins.right = val;
688 } else if (property == config->tv_top_margin_property) {
689 state->tv.margins.top = val;
690 } else if (property == config->tv_bottom_margin_property) {
691 state->tv.margins.bottom = val;
692 } else if (property == config->tv_mode_property) {
693 state->tv.mode = val;
694 } else if (property == config->tv_brightness_property) {
695 state->tv.brightness = val;
696 } else if (property == config->tv_contrast_property) {
697 state->tv.contrast = val;
698 } else if (property == config->tv_flicker_reduction_property) {
699 state->tv.flicker_reduction = val;
700 } else if (property == config->tv_overscan_property) {
701 state->tv.overscan = val;
702 } else if (property == config->tv_saturation_property) {
703 state->tv.saturation = val;
704 } else if (property == config->tv_hue_property) {
705 state->tv.hue = val;
706 } else if (property == config->link_status_property) {
707 /* Never downgrade from GOOD to BAD on userspace's request here,
708 * only hw issues can do that.
709 *
710 * For an atomic property the userspace doesn't need to be able
711 * to understand all the properties, but needs to be able to
712 * restore the state it wants on VT switch. So if the userspace
713 * tries to change the link_status from GOOD to BAD, driver
714 * silently rejects it and returns a 0. This prevents userspace
715 * from accidently breaking the display when it restores the
716 * state.
717 */
718 if (state->link_status != DRM_LINK_STATUS_GOOD)
719 state->link_status = val;
720 } else if (property == config->aspect_ratio_property) {
721 state->picture_aspect_ratio = val;
722 } else if (property == config->content_type_property) {
723 state->content_type = val;
724 } else if (property == connector->scaling_mode_property) {
725 state->scaling_mode = val;
726 } else if (property == connector->content_protection_property) {
727 if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
728 DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
729 return -EINVAL;
730 }
731 state->content_protection = val;
732 } else if (property == config->writeback_fb_id_property) {
733 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
734 int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
735 if (fb)
736 drm_framebuffer_put(fb);
737 return ret;
738 } else if (property == config->writeback_out_fence_ptr_property) {
739 s32 __user *fence_ptr = u64_to_user_ptr(val);
740
741 return set_out_fence_for_connector(state->state, connector,
742 fence_ptr);
743 } else if (connector->funcs->atomic_set_property) {
744 return connector->funcs->atomic_set_property(connector,
745 state, property, val);
746 } else {
747 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
748 connector->base.id, connector->name,
749 property->base.id, property->name);
750 return -EINVAL;
751 }
752
753 return 0;
754}
755
756static int
757drm_atomic_connector_get_property(struct drm_connector *connector,
758 const struct drm_connector_state *state,
759 struct drm_property *property, uint64_t *val)
760{
761 struct drm_device *dev = connector->dev;
762 struct drm_mode_config *config = &dev->mode_config;
763
764 if (property == config->prop_crtc_id) {
765 *val = (state->crtc) ? state->crtc->base.id : 0;
766 } else if (property == config->dpms_property) {
767 *val = connector->dpms;
768 } else if (property == config->tv_select_subconnector_property) {
769 *val = state->tv.subconnector;
770 } else if (property == config->tv_left_margin_property) {
771 *val = state->tv.margins.left;
772 } else if (property == config->tv_right_margin_property) {
773 *val = state->tv.margins.right;
774 } else if (property == config->tv_top_margin_property) {
775 *val = state->tv.margins.top;
776 } else if (property == config->tv_bottom_margin_property) {
777 *val = state->tv.margins.bottom;
778 } else if (property == config->tv_mode_property) {
779 *val = state->tv.mode;
780 } else if (property == config->tv_brightness_property) {
781 *val = state->tv.brightness;
782 } else if (property == config->tv_contrast_property) {
783 *val = state->tv.contrast;
784 } else if (property == config->tv_flicker_reduction_property) {
785 *val = state->tv.flicker_reduction;
786 } else if (property == config->tv_overscan_property) {
787 *val = state->tv.overscan;
788 } else if (property == config->tv_saturation_property) {
789 *val = state->tv.saturation;
790 } else if (property == config->tv_hue_property) {
791 *val = state->tv.hue;
792 } else if (property == config->link_status_property) {
793 *val = state->link_status;
794 } else if (property == config->aspect_ratio_property) {
795 *val = state->picture_aspect_ratio;
796 } else if (property == config->content_type_property) {
797 *val = state->content_type;
798 } else if (property == connector->scaling_mode_property) {
799 *val = state->scaling_mode;
800 } else if (property == connector->content_protection_property) {
801 *val = state->content_protection;
802 } else if (property == config->writeback_fb_id_property) {
803 /* Writeback framebuffer is one-shot, write and forget */
804 *val = 0;
805 } else if (property == config->writeback_out_fence_ptr_property) {
806 *val = 0;
807 } else if (connector->funcs->atomic_get_property) {
808 return connector->funcs->atomic_get_property(connector,
809 state, property, val);
810 } else {
811 return -EINVAL;
812 }
813
814 return 0;
815}
816
817int drm_atomic_get_property(struct drm_mode_object *obj,
818 struct drm_property *property, uint64_t *val)
819{
820 struct drm_device *dev = property->dev;
821 int ret;
822
823 switch (obj->type) {
824 case DRM_MODE_OBJECT_CONNECTOR: {
825 struct drm_connector *connector = obj_to_connector(obj);
826 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
827 ret = drm_atomic_connector_get_property(connector,
828 connector->state, property, val);
829 break;
830 }
831 case DRM_MODE_OBJECT_CRTC: {
832 struct drm_crtc *crtc = obj_to_crtc(obj);
833 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
834 ret = drm_atomic_crtc_get_property(crtc,
835 crtc->state, property, val);
836 break;
837 }
838 case DRM_MODE_OBJECT_PLANE: {
839 struct drm_plane *plane = obj_to_plane(obj);
840 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
841 ret = drm_atomic_plane_get_property(plane,
842 plane->state, property, val);
843 break;
844 }
845 default:
846 ret = -EINVAL;
847 break;
848 }
849
850 return ret;
851}
852
853/*
854 * The big monster ioctl
855 */
856
857static struct drm_pending_vblank_event *create_vblank_event(
858 struct drm_crtc *crtc, uint64_t user_data)
859{
860 struct drm_pending_vblank_event *e = NULL;
861
862 e = kzalloc(sizeof *e, GFP_KERNEL);
863 if (!e)
864 return NULL;
865
866 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
867 e->event.base.length = sizeof(e->event);
868 e->event.vbl.crtc_id = crtc->base.id;
869 e->event.vbl.user_data = user_data;
870
871 return e;
872}
873
874int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
875 struct drm_connector *connector,
876 int mode)
877{
878 struct drm_connector *tmp_connector;
879 struct drm_connector_state *new_conn_state;
880 struct drm_crtc *crtc;
881 struct drm_crtc_state *crtc_state;
882 int i, ret, old_mode = connector->dpms;
883 bool active = false;
884
885 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
886 state->acquire_ctx);
887 if (ret)
888 return ret;
889
890 if (mode != DRM_MODE_DPMS_ON)
891 mode = DRM_MODE_DPMS_OFF;
892 connector->dpms = mode;
893
894 crtc = connector->state->crtc;
895 if (!crtc)
896 goto out;
897 ret = drm_atomic_add_affected_connectors(state, crtc);
898 if (ret)
899 goto out;
900
901 crtc_state = drm_atomic_get_crtc_state(state, crtc);
902 if (IS_ERR(crtc_state)) {
903 ret = PTR_ERR(crtc_state);
904 goto out;
905 }
906
907 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
908 if (new_conn_state->crtc != crtc)
909 continue;
910 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
911 active = true;
912 break;
913 }
914 }
915
916 crtc_state->active = active;
917 ret = drm_atomic_commit(state);
918out:
919 if (ret != 0)
920 connector->dpms = old_mode;
921 return ret;
922}
923
924int drm_atomic_set_property(struct drm_atomic_state *state,
925 struct drm_mode_object *obj,
926 struct drm_property *prop,
927 uint64_t prop_value)
928{
929 struct drm_mode_object *ref;
930 int ret;
931
932 if (!drm_property_change_valid_get(prop, prop_value, &ref))
933 return -EINVAL;
934
935 switch (obj->type) {
936 case DRM_MODE_OBJECT_CONNECTOR: {
937 struct drm_connector *connector = obj_to_connector(obj);
938 struct drm_connector_state *connector_state;
939
940 connector_state = drm_atomic_get_connector_state(state, connector);
941 if (IS_ERR(connector_state)) {
942 ret = PTR_ERR(connector_state);
943 break;
944 }
945
946 ret = drm_atomic_connector_set_property(connector,
947 connector_state, prop, prop_value);
948 break;
949 }
950 case DRM_MODE_OBJECT_CRTC: {
951 struct drm_crtc *crtc = obj_to_crtc(obj);
952 struct drm_crtc_state *crtc_state;
953
954 crtc_state = drm_atomic_get_crtc_state(state, crtc);
955 if (IS_ERR(crtc_state)) {
956 ret = PTR_ERR(crtc_state);
957 break;
958 }
959
960 ret = drm_atomic_crtc_set_property(crtc,
961 crtc_state, prop, prop_value);
962 break;
963 }
964 case DRM_MODE_OBJECT_PLANE: {
965 struct drm_plane *plane = obj_to_plane(obj);
966 struct drm_plane_state *plane_state;
967
968 plane_state = drm_atomic_get_plane_state(state, plane);
969 if (IS_ERR(plane_state)) {
970 ret = PTR_ERR(plane_state);
971 break;
972 }
973
974 ret = drm_atomic_plane_set_property(plane,
975 plane_state, prop, prop_value);
976 break;
977 }
978 default:
979 ret = -EINVAL;
980 break;
981 }
982
983 drm_property_change_valid_put(prop, ref);
984 return ret;
985}
986
987/**
988 * DOC: explicit fencing properties
989 *
990 * Explicit fencing allows userspace to control the buffer synchronization
991 * between devices. A Fence or a group of fences are transfered to/from
992 * userspace using Sync File fds and there are two DRM properties for that.
993 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
994 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
995 *
996 * As a contrast, with implicit fencing the kernel keeps track of any
997 * ongoing rendering, and automatically ensures that the atomic update waits
998 * for any pending rendering to complete. For shared buffers represented with
999 * a &struct dma_buf this is tracked in &struct reservation_object.
1000 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
1001 * whereas explicit fencing is what Android wants.
1002 *
1003 * "IN_FENCE_FD”:
1004 * Use this property to pass a fence that DRM should wait on before
1005 * proceeding with the Atomic Commit request and show the framebuffer for
1006 * the plane on the screen. The fence can be either a normal fence or a
1007 * merged one, the sync_file framework will handle both cases and use a
1008 * fence_array if a merged fence is received. Passing -1 here means no
1009 * fences to wait on.
1010 *
1011 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
1012 * it will only check if the Sync File is a valid one.
1013 *
1014 * On the driver side the fence is stored on the @fence parameter of
1015 * &struct drm_plane_state. Drivers which also support implicit fencing
1016 * should set the implicit fence using drm_atomic_set_fence_for_plane(),
1017 * to make sure there's consistent behaviour between drivers in precedence
1018 * of implicit vs. explicit fencing.
1019 *
1020 * "OUT_FENCE_PTR”:
1021 * Use this property to pass a file descriptor pointer to DRM. Once the
1022 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
1023 * the file descriptor number of a Sync File. This Sync File contains the
1024 * CRTC fence that will be signaled when all framebuffers present on the
1025 * Atomic Commit * request for that given CRTC are scanned out on the
1026 * screen.
1027 *
1028 * The Atomic Commit request fails if a invalid pointer is passed. If the
1029 * Atomic Commit request fails for any other reason the out fence fd
1030 * returned will be -1. On a Atomic Commit with the
1031 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
1032 *
1033 * Note that out-fences don't have a special interface to drivers and are
1034 * internally represented by a &struct drm_pending_vblank_event in struct
1035 * &drm_crtc_state, which is also used by the nonblocking atomic commit
1036 * helpers and for the DRM event handling for existing userspace.
1037 */
1038
1039struct drm_out_fence_state {
1040 s32 __user *out_fence_ptr;
1041 struct sync_file *sync_file;
1042 int fd;
1043};
1044
1045static int setup_out_fence(struct drm_out_fence_state *fence_state,
1046 struct dma_fence *fence)
1047{
1048 fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
1049 if (fence_state->fd < 0)
1050 return fence_state->fd;
1051
1052 if (put_user(fence_state->fd, fence_state->out_fence_ptr))
1053 return -EFAULT;
1054
1055 fence_state->sync_file = sync_file_create(fence);
1056 if (!fence_state->sync_file)
1057 return -ENOMEM;
1058
1059 return 0;
1060}
1061
1062static int prepare_signaling(struct drm_device *dev,
1063 struct drm_atomic_state *state,
1064 struct drm_mode_atomic *arg,
1065 struct drm_file *file_priv,
1066 struct drm_out_fence_state **fence_state,
1067 unsigned int *num_fences)
1068{
1069 struct drm_crtc *crtc;
1070 struct drm_crtc_state *crtc_state;
1071 struct drm_connector *conn;
1072 struct drm_connector_state *conn_state;
1073 int i, c = 0, ret;
1074
1075 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
1076 return 0;
1077
1078 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1079 s32 __user *fence_ptr;
1080
1081 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
1082
1083 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
1084 struct drm_pending_vblank_event *e;
1085
1086 e = create_vblank_event(crtc, arg->user_data);
1087 if (!e)
1088 return -ENOMEM;
1089
1090 crtc_state->event = e;
1091 }
1092
1093 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1094 struct drm_pending_vblank_event *e = crtc_state->event;
1095
1096 if (!file_priv)
1097 continue;
1098
1099 ret = drm_event_reserve_init(dev, file_priv, &e->base,
1100 &e->event.base);
1101 if (ret) {
1102 kfree(e);
1103 crtc_state->event = NULL;
1104 return ret;
1105 }
1106 }
1107
1108 if (fence_ptr) {
1109 struct dma_fence *fence;
1110 struct drm_out_fence_state *f;
1111
1112 f = krealloc(*fence_state, sizeof(**fence_state) *
1113 (*num_fences + 1), GFP_KERNEL);
1114 if (!f)
1115 return -ENOMEM;
1116
1117 memset(&f[*num_fences], 0, sizeof(*f));
1118
1119 f[*num_fences].out_fence_ptr = fence_ptr;
1120 *fence_state = f;
1121
1122 fence = drm_crtc_create_fence(crtc);
1123 if (!fence)
1124 return -ENOMEM;
1125
1126 ret = setup_out_fence(&f[(*num_fences)++], fence);
1127 if (ret) {
1128 dma_fence_put(fence);
1129 return ret;
1130 }
1131
1132 crtc_state->event->base.fence = fence;
1133 }
1134
1135 c++;
1136 }
1137
1138 for_each_new_connector_in_state(state, conn, conn_state, i) {
1139 struct drm_writeback_connector *wb_conn;
1140 struct drm_writeback_job *job;
1141 struct drm_out_fence_state *f;
1142 struct dma_fence *fence;
1143 s32 __user *fence_ptr;
1144
1145 fence_ptr = get_out_fence_for_connector(state, conn);
1146 if (!fence_ptr)
1147 continue;
1148
1149 job = drm_atomic_get_writeback_job(conn_state);
1150 if (!job)
1151 return -ENOMEM;
1152
1153 f = krealloc(*fence_state, sizeof(**fence_state) *
1154 (*num_fences + 1), GFP_KERNEL);
1155 if (!f)
1156 return -ENOMEM;
1157
1158 memset(&f[*num_fences], 0, sizeof(*f));
1159
1160 f[*num_fences].out_fence_ptr = fence_ptr;
1161 *fence_state = f;
1162
1163 wb_conn = drm_connector_to_writeback(conn);
1164 fence = drm_writeback_get_out_fence(wb_conn);
1165 if (!fence)
1166 return -ENOMEM;
1167
1168 ret = setup_out_fence(&f[(*num_fences)++], fence);
1169 if (ret) {
1170 dma_fence_put(fence);
1171 return ret;
1172 }
1173
1174 job->out_fence = fence;
1175 }
1176
1177 /*
1178 * Having this flag means user mode pends on event which will never
1179 * reach due to lack of at least one CRTC for signaling
1180 */
1181 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1182 return -EINVAL;
1183
1184 return 0;
1185}
1186
1187static void complete_signaling(struct drm_device *dev,
1188 struct drm_atomic_state *state,
1189 struct drm_out_fence_state *fence_state,
1190 unsigned int num_fences,
1191 bool install_fds)
1192{
1193 struct drm_crtc *crtc;
1194 struct drm_crtc_state *crtc_state;
1195 int i;
1196
1197 if (install_fds) {
1198 for (i = 0; i < num_fences; i++)
1199 fd_install(fence_state[i].fd,
1200 fence_state[i].sync_file->file);
1201
1202 kfree(fence_state);
1203 return;
1204 }
1205
1206 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1207 struct drm_pending_vblank_event *event = crtc_state->event;
1208 /*
1209 * Free the allocated event. drm_atomic_helper_setup_commit
1210 * can allocate an event too, so only free it if it's ours
1211 * to prevent a double free in drm_atomic_state_clear.
1212 */
1213 if (event && (event->base.fence || event->base.file_priv)) {
1214 drm_event_cancel_free(dev, &event->base);
1215 crtc_state->event = NULL;
1216 }
1217 }
1218
1219 if (!fence_state)
1220 return;
1221
1222 for (i = 0; i < num_fences; i++) {
1223 if (fence_state[i].sync_file)
1224 fput(fence_state[i].sync_file->file);
1225 if (fence_state[i].fd >= 0)
1226 put_unused_fd(fence_state[i].fd);
1227
1228 /* If this fails log error to the user */
1229 if (fence_state[i].out_fence_ptr &&
1230 put_user(-1, fence_state[i].out_fence_ptr))
1231 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
1232 }
1233
1234 kfree(fence_state);
1235}
1236
1237int drm_mode_atomic_ioctl(struct drm_device *dev,
1238 void *data, struct drm_file *file_priv)
1239{
1240 struct drm_mode_atomic *arg = data;
1241 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
1242 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
1243 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
1244 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
1245 unsigned int copied_objs, copied_props;
1246 struct drm_atomic_state *state;
1247 struct drm_modeset_acquire_ctx ctx;
1248 struct drm_out_fence_state *fence_state;
1249 int ret = 0;
1250 unsigned int i, j, num_fences;
1251
1252 /* disallow for drivers not supporting atomic: */
1253 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1254 return -EINVAL;
1255
1256 /* disallow for userspace that has not enabled atomic cap (even
1257 * though this may be a bit overkill, since legacy userspace
1258 * wouldn't know how to call this ioctl)
1259 */
1260 if (!file_priv->atomic)
1261 return -EINVAL;
1262
1263 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
1264 return -EINVAL;
1265
1266 if (arg->reserved)
1267 return -EINVAL;
1268
1269 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
1270 !dev->mode_config.async_page_flip)
1271 return -EINVAL;
1272
1273 /* can't test and expect an event at the same time. */
1274 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
1275 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1276 return -EINVAL;
1277
1278 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1279
1280 state = drm_atomic_state_alloc(dev);
1281 if (!state)
1282 return -ENOMEM;
1283
1284 state->acquire_ctx = &ctx;
1285 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1286
1287retry:
1288 copied_objs = 0;
1289 copied_props = 0;
1290 fence_state = NULL;
1291 num_fences = 0;
1292
1293 for (i = 0; i < arg->count_objs; i++) {
1294 uint32_t obj_id, count_props;
1295 struct drm_mode_object *obj;
1296
1297 if (get_user(obj_id, objs_ptr + copied_objs)) {
1298 ret = -EFAULT;
1299 goto out;
1300 }
1301
1302 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
1303 if (!obj) {
1304 ret = -ENOENT;
1305 goto out;
1306 }
1307
1308 if (!obj->properties) {
1309 drm_mode_object_put(obj);
1310 ret = -ENOENT;
1311 goto out;
1312 }
1313
1314 if (get_user(count_props, count_props_ptr + copied_objs)) {
1315 drm_mode_object_put(obj);
1316 ret = -EFAULT;
1317 goto out;
1318 }
1319
1320 copied_objs++;
1321
1322 for (j = 0; j < count_props; j++) {
1323 uint32_t prop_id;
1324 uint64_t prop_value;
1325 struct drm_property *prop;
1326
1327 if (get_user(prop_id, props_ptr + copied_props)) {
1328 drm_mode_object_put(obj);
1329 ret = -EFAULT;
1330 goto out;
1331 }
1332
1333 prop = drm_mode_obj_find_prop_id(obj, prop_id);
1334 if (!prop) {
1335 drm_mode_object_put(obj);
1336 ret = -ENOENT;
1337 goto out;
1338 }
1339
1340 if (copy_from_user(&prop_value,
1341 prop_values_ptr + copied_props,
1342 sizeof(prop_value))) {
1343 drm_mode_object_put(obj);
1344 ret = -EFAULT;
1345 goto out;
1346 }
1347
1348 ret = drm_atomic_set_property(state, obj, prop,
1349 prop_value);
1350 if (ret) {
1351 drm_mode_object_put(obj);
1352 goto out;
1353 }
1354
1355 copied_props++;
1356 }
1357
1358 drm_mode_object_put(obj);
1359 }
1360
1361 ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
1362 &num_fences);
1363 if (ret)
1364 goto out;
1365
1366 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
1367 ret = drm_atomic_check_only(state);
1368 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
1369 ret = drm_atomic_nonblocking_commit(state);
1370 } else {
1371 if (unlikely(drm_debug & DRM_UT_STATE))
1372 drm_atomic_print_state(state);
1373
1374 ret = drm_atomic_commit(state);
1375 }
1376
1377out:
1378 complete_signaling(dev, state, fence_state, num_fences, !ret);
1379
1380 if (ret == -EDEADLK) {
1381 drm_atomic_state_clear(state);
1382 ret = drm_modeset_backoff(&ctx);
1383 if (!ret)
1384 goto retry;
1385 }
1386
1387 drm_atomic_state_put(state);
1388
1389 drm_modeset_drop_locks(&ctx);
1390 drm_modeset_acquire_fini(&ctx);
1391
1392 return ret;
1393}
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 402b62d3f072..0c78ca386cbe 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -101,6 +101,28 @@
101 * Without this property the rectangle is only scaled, but not rotated or 101 * Without this property the rectangle is only scaled, but not rotated or
102 * reflected. 102 * reflected.
103 * 103 *
104 * Possbile values:
105 *
106 * "rotate-<degrees>":
107 * Signals that a drm plane is rotated <degrees> degrees in counter
108 * clockwise direction.
109 *
110 * "reflect-<axis>":
111 * Signals that the contents of a drm plane is reflected along the
112 * <axis> axis, in the same way as mirroring.
113 *
114 * reflect-x::
115 *
116 * |o | | o|
117 * | | -> | |
118 * | v| |v |
119 *
120 * reflect-y::
121 *
122 * |o | | ^|
123 * | | -> | |
124 * | v| |o |
125 *
104 * zpos: 126 * zpos:
105 * Z position is set up with drm_plane_create_zpos_immutable_property() and 127 * Z position is set up with drm_plane_create_zpos_immutable_property() and
106 * drm_plane_create_zpos_property(). It controls the visibility of overlapping 128 * drm_plane_create_zpos_property(). It controls the visibility of overlapping
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 1638bfe9627c..ba7025041e46 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -104,6 +104,10 @@ EXPORT_SYMBOL(drm_bridge_remove);
104 * If non-NULL the previous bridge must be already attached by a call to this 104 * If non-NULL the previous bridge must be already attached by a call to this
105 * function. 105 * function.
106 * 106 *
107 * Note that bridges attached to encoders are auto-detached during encoder
108 * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
109 * *not* be balanced with a drm_bridge_detach() in driver code.
110 *
107 * RETURNS: 111 * RETURNS:
108 * Zero on success, error code on failure 112 * Zero on success, error code on failure
109 */ 113 */
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 6011d769d50b..526619f963e5 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -20,11 +20,15 @@
20 * OF THIS SOFTWARE. 20 * OF THIS SOFTWARE.
21 */ 21 */
22 22
23#include <drm/drmP.h>
24#include <drm/drm_connector.h> 23#include <drm/drm_connector.h>
25#include <drm/drm_edid.h> 24#include <drm/drm_edid.h>
26#include <drm/drm_encoder.h> 25#include <drm/drm_encoder.h>
27#include <drm/drm_utils.h> 26#include <drm/drm_utils.h>
27#include <drm/drm_print.h>
28#include <drm/drm_drv.h>
29#include <drm/drm_file.h>
30
31#include <linux/uaccess.h>
28 32
29#include "drm_crtc_internal.h" 33#include "drm_crtc_internal.h"
30#include "drm_internal.h" 34#include "drm_internal.h"
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bae43938c8f6..2f6c877299e4 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,7 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/export.h> 35#include <linux/export.h>
36#include <linux/dma-fence.h> 36#include <linux/dma-fence.h>
37#include <drm/drmP.h> 37#include <linux/uaccess.h>
38#include <drm/drm_crtc.h> 38#include <drm/drm_crtc.h>
39#include <drm/drm_edid.h> 39#include <drm/drm_edid.h>
40#include <drm/drm_fourcc.h> 40#include <drm/drm_fourcc.h>
@@ -42,6 +42,9 @@
42#include <drm/drm_atomic.h> 42#include <drm/drm_atomic.h>
43#include <drm/drm_auth.h> 43#include <drm/drm_auth.h>
44#include <drm/drm_debugfs_crc.h> 44#include <drm/drm_debugfs_crc.h>
45#include <drm/drm_drv.h>
46#include <drm/drm_print.h>
47#include <drm/drm_file.h>
45 48
46#include "drm_crtc_internal.h" 49#include "drm_crtc_internal.h"
47#include "drm_internal.h" 50#include "drm_internal.h"
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 5a84c3bc915d..ce75e9506e85 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -35,6 +35,7 @@
35 35
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include <drm/drm_atomic.h> 37#include <drm/drm_atomic.h>
38#include <drm/drm_atomic_uapi.h>
38#include <drm/drm_crtc.h> 39#include <drm/drm_crtc.h>
39#include <drm/drm_encoder.h> 40#include <drm/drm_encoder.h>
40#include <drm/drm_fourcc.h> 41#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index b61322763394..86893448f486 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -31,6 +31,14 @@
31 * and are not exported to drivers. 31 * and are not exported to drivers.
32 */ 32 */
33 33
34enum drm_mode_status;
35enum drm_connector_force;
36
37struct drm_display_mode;
38struct work_struct;
39struct drm_connector;
40struct drm_bridge;
41struct edid;
34 42
35/* drm_crtc.c */ 43/* drm_crtc.c */
36int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, 44int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@@ -174,6 +182,8 @@ void drm_fb_release(struct drm_file *file_priv);
174 182
175int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or, 183int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
176 struct drm_file *file_priv); 184 struct drm_file *file_priv);
185int drm_mode_addfb2(struct drm_device *dev,
186 void *data, struct drm_file *file_priv);
177int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, 187int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
178 struct drm_file *file_priv); 188 struct drm_file *file_priv);
179 189
@@ -181,8 +191,8 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
181/* IOCTL */ 191/* IOCTL */
182int drm_mode_addfb_ioctl(struct drm_device *dev, 192int drm_mode_addfb_ioctl(struct drm_device *dev,
183 void *data, struct drm_file *file_priv); 193 void *data, struct drm_file *file_priv);
184int drm_mode_addfb2(struct drm_device *dev, 194int drm_mode_addfb2_ioctl(struct drm_device *dev,
185 void *data, struct drm_file *file_priv); 195 void *data, struct drm_file *file_priv);
186int drm_mode_rmfb_ioctl(struct drm_device *dev, 196int drm_mode_rmfb_ioctl(struct drm_device *dev,
187 void *data, struct drm_file *file_priv); 197 void *data, struct drm_file *file_priv);
188int drm_mode_getfb(struct drm_device *dev, 198int drm_mode_getfb(struct drm_device *dev,
@@ -196,6 +206,9 @@ struct drm_minor;
196int drm_atomic_debugfs_init(struct drm_minor *minor); 206int drm_atomic_debugfs_init(struct drm_minor *minor);
197#endif 207#endif
198 208
209void drm_atomic_print_state(const struct drm_atomic_state *state);
210
211/* drm_atomic_uapi.c */
199int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, 212int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
200 struct drm_connector *connector, 213 struct drm_connector *connector,
201 int mode); 214 int mode);
@@ -205,6 +218,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
205 uint64_t prop_value); 218 uint64_t prop_value);
206int drm_atomic_get_property(struct drm_mode_object *obj, 219int drm_atomic_get_property(struct drm_mode_object *obj,
207 struct drm_property *property, uint64_t *val); 220 struct drm_property *property, uint64_t *val);
221
222/* IOCTL */
208int drm_mode_atomic_ioctl(struct drm_device *dev, 223int drm_mode_atomic_ioctl(struct drm_device *dev,
209 void *data, struct drm_file *file_priv); 224 void *data, struct drm_file *file_priv);
210 225
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b0dd20bccb8..8e95d0f7c71d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2673,6 +2673,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
2673 2673
2674 info = fb_helper->fbdev; 2674 info = fb_helper->fbdev;
2675 info->var.pixclock = 0; 2675 info->var.pixclock = 0;
2676 /* don't leak any physical addresses to userspace */
2677 info->flags |= FBINFO_HIDE_SMEM_START;
2676 2678
2677 /* Need to drop locks to avoid recursive deadlock in 2679 /* Need to drop locks to avoid recursive deadlock in
2678 * register_framebuffer. This is ok because the only thing left to do is 2680 * register_framebuffer. This is ok because the only thing left to do is
@@ -2821,7 +2823,9 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
2821 * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback 2823 * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback
2822 * function. 2824 * function.
2823 * 2825 *
2824 * See also: drm_fb_helper_initial_config() 2826 * Use drm_fb_helper_fbdev_teardown() to destroy the fbdev.
2827 *
2828 * See also: drm_fb_helper_initial_config(), drm_fbdev_generic_setup().
2825 * 2829 *
2826 * Returns: 2830 * Returns:
2827 * Zero on success or negative error code on failure. 2831 * Zero on success or negative error code on failure.
@@ -3037,7 +3041,7 @@ static struct fb_deferred_io drm_fbdev_defio = {
3037 * @fb_helper: fbdev helper structure 3041 * @fb_helper: fbdev helper structure
3038 * @sizes: describes fbdev size and scanout surface size 3042 * @sizes: describes fbdev size and scanout surface size
3039 * 3043 *
3040 * This function uses the client API to crate a framebuffer backed by a dumb buffer. 3044 * This function uses the client API to create a framebuffer backed by a dumb buffer.
3041 * 3045 *
3042 * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, 3046 * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
3043 * fb_copyarea, fb_imageblit. 3047 * fb_copyarea, fb_imageblit.
@@ -3165,8 +3169,10 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
3165 if (dev->fb_helper) 3169 if (dev->fb_helper)
3166 return drm_fb_helper_hotplug_event(dev->fb_helper); 3170 return drm_fb_helper_hotplug_event(dev->fb_helper);
3167 3171
3168 if (!dev->mode_config.num_connector) 3172 if (!dev->mode_config.num_connector) {
3173 DRM_DEV_DEBUG(dev->dev, "No connectors found, will not create framebuffer!\n");
3169 return 0; 3174 return 0;
3175 }
3170 3176
3171 ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs, 3177 ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
3172 fb_helper->preferred_bpp, 0); 3178 fb_helper->preferred_bpp, 0);
@@ -3187,13 +3193,14 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
3187}; 3193};
3188 3194
3189/** 3195/**
3190 * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation 3196 * drm_fbdev_generic_setup() - Setup generic fbdev emulation
3191 * @dev: DRM device 3197 * @dev: DRM device
3192 * @preferred_bpp: Preferred bits per pixel for the device. 3198 * @preferred_bpp: Preferred bits per pixel for the device.
3193 * @dev->mode_config.preferred_depth is used if this is zero. 3199 * @dev->mode_config.preferred_depth is used if this is zero.
3194 * 3200 *
3195 * This function sets up generic fbdev emulation for drivers that supports 3201 * This function sets up generic fbdev emulation for drivers that supports
3196 * dumb buffers with a virtual address and that can be mmap'ed. 3202 * dumb buffers with a virtual address and that can be mmap'ed. If the driver
3203 * does not support these functions, it could use drm_fb_helper_fbdev_setup().
3197 * 3204 *
3198 * Restore, hotplug events and teardown are all taken care of. Drivers that do 3205 * Restore, hotplug events and teardown are all taken care of. Drivers that do
3199 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. 3206 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -3206,6 +3213,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
3206 * This function is safe to call even when there are no connectors present. 3213 * This function is safe to call even when there are no connectors present.
3207 * Setup will be retried on the next hotplug event. 3214 * Setup will be retried on the next hotplug event.
3208 * 3215 *
3216 * The fbdev is destroyed by drm_dev_unregister().
3217 *
3209 * Returns: 3218 * Returns:
3210 * Zero on success or negative error code on failure. 3219 * Zero on success or negative error code on failure.
3211 */ 3220 */
@@ -3214,6 +3223,8 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
3214 struct drm_fb_helper *fb_helper; 3223 struct drm_fb_helper *fb_helper;
3215 int ret; 3224 int ret;
3216 3225
3226 WARN(dev->fb_helper, "fb_helper is already set!\n");
3227
3217 if (!drm_fbdev_emulation) 3228 if (!drm_fbdev_emulation)
3218 return 0; 3229 return 0;
3219 3230
@@ -3224,12 +3235,15 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
3224 ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); 3235 ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
3225 if (ret) { 3236 if (ret) {
3226 kfree(fb_helper); 3237 kfree(fb_helper);
3238 DRM_DEV_ERROR(dev->dev, "Failed to register client: %d\n", ret);
3227 return ret; 3239 return ret;
3228 } 3240 }
3229 3241
3230 fb_helper->preferred_bpp = preferred_bpp; 3242 fb_helper->preferred_bpp = preferred_bpp;
3231 3243
3232 drm_fbdev_client_hotplug(&fb_helper->client); 3244 ret = drm_fbdev_client_hotplug(&fb_helper->client);
3245 if (ret)
3246 DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
3233 3247
3234 return 0; 3248 return 0;
3235} 3249}
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 35c1e2742c27..be1d6aaef651 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -45,32 +45,49 @@ static char printable_char(int c)
45 */ 45 */
46uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) 46uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
47{ 47{
48 uint32_t fmt; 48 uint32_t fmt = DRM_FORMAT_INVALID;
49 49
50 switch (bpp) { 50 switch (bpp) {
51 case 8: 51 case 8:
52 fmt = DRM_FORMAT_C8; 52 if (depth == 8)
53 fmt = DRM_FORMAT_C8;
53 break; 54 break;
55
54 case 16: 56 case 16:
55 if (depth == 15) 57 switch (depth) {
58 case 15:
56 fmt = DRM_FORMAT_XRGB1555; 59 fmt = DRM_FORMAT_XRGB1555;
57 else 60 break;
61 case 16:
58 fmt = DRM_FORMAT_RGB565; 62 fmt = DRM_FORMAT_RGB565;
63 break;
64 default:
65 break;
66 }
59 break; 67 break;
68
60 case 24: 69 case 24:
61 fmt = DRM_FORMAT_RGB888; 70 if (depth == 24)
71 fmt = DRM_FORMAT_RGB888;
62 break; 72 break;
73
63 case 32: 74 case 32:
64 if (depth == 24) 75 switch (depth) {
76 case 24:
65 fmt = DRM_FORMAT_XRGB8888; 77 fmt = DRM_FORMAT_XRGB8888;
66 else if (depth == 30) 78 break;
79 case 30:
67 fmt = DRM_FORMAT_XRGB2101010; 80 fmt = DRM_FORMAT_XRGB2101010;
68 else 81 break;
82 case 32:
69 fmt = DRM_FORMAT_ARGB8888; 83 fmt = DRM_FORMAT_ARGB8888;
84 break;
85 default:
86 break;
87 }
70 break; 88 break;
89
71 default: 90 default:
72 DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
73 fmt = DRM_FORMAT_XRGB8888;
74 break; 91 break;
75 } 92 }
76 93
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 781af1d42d76..6eaacd4eb8cc 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -25,6 +25,7 @@
25#include <drm/drm_auth.h> 25#include <drm/drm_auth.h>
26#include <drm/drm_framebuffer.h> 26#include <drm/drm_framebuffer.h>
27#include <drm/drm_atomic.h> 27#include <drm/drm_atomic.h>
28#include <drm/drm_atomic_uapi.h>
28#include <drm/drm_print.h> 29#include <drm/drm_print.h>
29 30
30#include "drm_internal.h" 31#include "drm_internal.h"
@@ -112,18 +113,34 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
112 struct drm_mode_fb_cmd2 r = {}; 113 struct drm_mode_fb_cmd2 r = {};
113 int ret; 114 int ret;
114 115
116 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
117 if (r.pixel_format == DRM_FORMAT_INVALID) {
118 DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
119 return -EINVAL;
120 }
121
115 /* convert to new format and call new ioctl */ 122 /* convert to new format and call new ioctl */
116 r.fb_id = or->fb_id; 123 r.fb_id = or->fb_id;
117 r.width = or->width; 124 r.width = or->width;
118 r.height = or->height; 125 r.height = or->height;
119 r.pitches[0] = or->pitch; 126 r.pitches[0] = or->pitch;
120 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
121 r.handles[0] = or->handle; 127 r.handles[0] = or->handle;
122 128
123 if (r.pixel_format == DRM_FORMAT_XRGB2101010 && 129 if (dev->mode_config.quirk_addfb_prefer_xbgr_30bpp &&
124 dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) 130 r.pixel_format == DRM_FORMAT_XRGB2101010)
125 r.pixel_format = DRM_FORMAT_XBGR2101010; 131 r.pixel_format = DRM_FORMAT_XBGR2101010;
126 132
133 if (dev->mode_config.quirk_addfb_prefer_host_byte_order) {
134 if (r.pixel_format == DRM_FORMAT_XRGB8888)
135 r.pixel_format = DRM_FORMAT_HOST_XRGB8888;
136 if (r.pixel_format == DRM_FORMAT_ARGB8888)
137 r.pixel_format = DRM_FORMAT_HOST_ARGB8888;
138 if (r.pixel_format == DRM_FORMAT_RGB565)
139 r.pixel_format = DRM_FORMAT_HOST_RGB565;
140 if (r.pixel_format == DRM_FORMAT_XRGB1555)
141 r.pixel_format = DRM_FORMAT_HOST_XRGB1555;
142 }
143
127 ret = drm_mode_addfb2(dev, &r, file_priv); 144 ret = drm_mode_addfb2(dev, &r, file_priv);
128 if (ret) 145 if (ret)
129 return ret; 146 return ret;
@@ -164,7 +181,7 @@ static int framebuffer_check(struct drm_device *dev,
164 int i; 181 int i;
165 182
166 /* check if the format is supported at all */ 183 /* check if the format is supported at all */
167 info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN); 184 info = __drm_format_info(r->pixel_format);
168 if (!info) { 185 if (!info) {
169 struct drm_format_name_buf format_name; 186 struct drm_format_name_buf format_name;
170 187
@@ -352,6 +369,30 @@ int drm_mode_addfb2(struct drm_device *dev,
352 return 0; 369 return 0;
353} 370}
354 371
372int drm_mode_addfb2_ioctl(struct drm_device *dev,
373 void *data, struct drm_file *file_priv)
374{
375#ifdef __BIG_ENDIAN
376 if (!dev->mode_config.quirk_addfb_prefer_host_byte_order) {
377 /*
378 * Drivers must set the
379 * quirk_addfb_prefer_host_byte_order quirk to make
380 * the drm_mode_addfb() compat code work correctly on
381 * bigendian machines.
382 *
383 * If they don't they interpret pixel_format values
384 * incorrectly for bug compatibility, which in turn
385 * implies the ADDFB2 ioctl does not work correctly
386 * then. So block it to make userspace fallback to
387 * ADDFB.
388 */
389 DRM_DEBUG_KMS("addfb2 broken on bigendian");
390 return -EINVAL;
391 }
392#endif
393 return drm_mode_addfb2(dev, data, file_priv);
394}
395
355struct drm_mode_rmfb_work { 396struct drm_mode_rmfb_work {
356 struct work_struct work; 397 struct work_struct work;
357 struct list_head fbs; 398 struct list_head fbs;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 2810d4131411..7607f9cd6f77 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -16,6 +16,7 @@
16 16
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18#include <drm/drm_atomic.h> 18#include <drm/drm_atomic.h>
19#include <drm/drm_atomic_uapi.h>
19#include <drm/drm_fb_helper.h> 20#include <drm/drm_fb_helper.h>
20#include <drm/drm_fourcc.h> 21#include <drm/drm_fourcc.h>
21#include <drm/drm_framebuffer.h> 22#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 40179c5fc6b8..0c4eb4a9ab31 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -21,9 +21,14 @@
21 * OTHER DEALINGS IN THE SOFTWARE. 21 * OTHER DEALINGS IN THE SOFTWARE.
22 */ 22 */
23 23
24#include <drm/drm_ioctl.h>
25
24#define DRM_IF_MAJOR 1 26#define DRM_IF_MAJOR 1
25#define DRM_IF_MINOR 4 27#define DRM_IF_MINOR 4
26 28
29struct drm_prime_file_private;
30struct dma_buf;
31
27/* drm_file.c */ 32/* drm_file.c */
28extern struct mutex drm_global_mutex; 33extern struct mutex drm_global_mutex;
29struct drm_file *drm_file_alloc(struct drm_minor *minor); 34struct drm_file *drm_file_alloc(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ea10e9a26aad..6b4a633b4240 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -645,7 +645,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
645 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), 645 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
646 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), 646 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
647 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED), 647 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
648 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED), 648 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, DRM_UNLOCKED),
649 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED), 649 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
650 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), 650 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
651 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), 651 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 6153cbda239f..4a72c6829d73 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -20,8 +20,17 @@
20 * OF THIS SOFTWARE. 20 * OF THIS SOFTWARE.
21 */ 21 */
22 22
23#include <drm/drmP.h> 23#include <linux/slab.h>
24#include <linux/uaccess.h>
25
24#include <drm/drm_plane.h> 26#include <drm/drm_plane.h>
27#include <drm/drm_drv.h>
28#include <drm/drm_print.h>
29#include <drm/drm_framebuffer.h>
30#include <drm/drm_file.h>
31#include <drm/drm_crtc.h>
32#include <drm/drm_fourcc.h>
33#include <drm/drm_vblank.h>
25 34
26#include "drm_crtc_internal.h" 35#include "drm_crtc_internal.h"
27 36
@@ -463,7 +472,6 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
463 struct drm_file *file_priv) 472 struct drm_file *file_priv)
464{ 473{
465 struct drm_mode_get_plane_res *plane_resp = data; 474 struct drm_mode_get_plane_res *plane_resp = data;
466 struct drm_mode_config *config;
467 struct drm_plane *plane; 475 struct drm_plane *plane;
468 uint32_t __user *plane_ptr; 476 uint32_t __user *plane_ptr;
469 int count = 0; 477 int count = 0;
@@ -471,7 +479,6 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
471 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 479 if (!drm_core_check_feature(dev, DRIVER_MODESET))
472 return -EINVAL; 480 return -EINVAL;
473 481
474 config = &dev->mode_config;
475 plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr); 482 plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr);
476 483
477 /* 484 /*
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 621f17643bb0..a393756b664e 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -28,6 +28,7 @@
28#include <drm/drm_plane_helper.h> 28#include <drm/drm_plane_helper.h>
29#include <drm/drm_rect.h> 29#include <drm/drm_rect.h>
30#include <drm/drm_atomic.h> 30#include <drm/drm_atomic.h>
31#include <drm/drm_atomic_uapi.h>
31#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
32#include <drm/drm_encoder.h> 33#include <drm/drm_encoder.h>
33#include <drm/drm_atomic_helper.h> 34#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 3a8837c49639..e9ce623d049e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,6 +56,33 @@
56#include "drm_internal.h" 56#include "drm_internal.h"
57#include <drm/drm_syncobj.h> 57#include <drm/drm_syncobj.h>
58 58
59struct drm_syncobj_stub_fence {
60 struct dma_fence base;
61 spinlock_t lock;
62};
63
64static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
65{
66 return "syncobjstub";
67}
68
69static bool drm_syncobj_stub_fence_enable_signaling(struct dma_fence *fence)
70{
71 return !dma_fence_is_signaled(fence);
72}
73
74static void drm_syncobj_stub_fence_release(struct dma_fence *f)
75{
76 kfree(f);
77}
78static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
79 .get_driver_name = drm_syncobj_stub_fence_get_name,
80 .get_timeline_name = drm_syncobj_stub_fence_get_name,
81 .enable_signaling = drm_syncobj_stub_fence_enable_signaling,
82 .release = drm_syncobj_stub_fence_release,
83};
84
85
59/** 86/**
60 * drm_syncobj_find - lookup and reference a sync object. 87 * drm_syncobj_find - lookup and reference a sync object.
61 * @file_private: drm file private pointer 88 * @file_private: drm file private pointer
@@ -140,11 +167,13 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
140/** 167/**
141 * drm_syncobj_replace_fence - replace fence in a sync object. 168 * drm_syncobj_replace_fence - replace fence in a sync object.
142 * @syncobj: Sync object to replace fence in 169 * @syncobj: Sync object to replace fence in
170 * @point: timeline point
143 * @fence: fence to install in sync file. 171 * @fence: fence to install in sync file.
144 * 172 *
145 * This replaces the fence on a sync object. 173 * This replaces the fence on a sync object, or a timeline point fence.
146 */ 174 */
147void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 175void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
176 u64 point,
148 struct dma_fence *fence) 177 struct dma_fence *fence)
149{ 178{
150 struct dma_fence *old_fence; 179 struct dma_fence *old_fence;
@@ -172,42 +201,19 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
172} 201}
173EXPORT_SYMBOL(drm_syncobj_replace_fence); 202EXPORT_SYMBOL(drm_syncobj_replace_fence);
174 203
175struct drm_syncobj_null_fence {
176 struct dma_fence base;
177 spinlock_t lock;
178};
179
180static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
181{
182 return "syncobjnull";
183}
184
185static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
186{
187 dma_fence_enable_sw_signaling(fence);
188 return !dma_fence_is_signaled(fence);
189}
190
191static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
192 .get_driver_name = drm_syncobj_null_fence_get_name,
193 .get_timeline_name = drm_syncobj_null_fence_get_name,
194 .enable_signaling = drm_syncobj_null_fence_enable_signaling,
195 .release = NULL,
196};
197
198static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) 204static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
199{ 205{
200 struct drm_syncobj_null_fence *fence; 206 struct drm_syncobj_stub_fence *fence;
201 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 207 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
202 if (fence == NULL) 208 if (fence == NULL)
203 return -ENOMEM; 209 return -ENOMEM;
204 210
205 spin_lock_init(&fence->lock); 211 spin_lock_init(&fence->lock);
206 dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, 212 dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
207 &fence->lock, 0, 0); 213 &fence->lock, 0, 0);
208 dma_fence_signal(&fence->base); 214 dma_fence_signal(&fence->base);
209 215
210 drm_syncobj_replace_fence(syncobj, &fence->base); 216 drm_syncobj_replace_fence(syncobj, 0, &fence->base);
211 217
212 dma_fence_put(&fence->base); 218 dma_fence_put(&fence->base);
213 219
@@ -218,6 +224,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
218 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 224 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
219 * @file_private: drm file private pointer 225 * @file_private: drm file private pointer
220 * @handle: sync object handle to lookup. 226 * @handle: sync object handle to lookup.
227 * @point: timeline point
221 * @fence: out parameter for the fence 228 * @fence: out parameter for the fence
222 * 229 *
223 * This is just a convenience function that combines drm_syncobj_find() and 230 * This is just a convenience function that combines drm_syncobj_find() and
@@ -228,7 +235,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
228 * dma_fence_put(). 235 * dma_fence_put().
229 */ 236 */
230int drm_syncobj_find_fence(struct drm_file *file_private, 237int drm_syncobj_find_fence(struct drm_file *file_private,
231 u32 handle, 238 u32 handle, u64 point,
232 struct dma_fence **fence) 239 struct dma_fence **fence)
233{ 240{
234 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 241 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
@@ -257,7 +264,7 @@ void drm_syncobj_free(struct kref *kref)
257 struct drm_syncobj *syncobj = container_of(kref, 264 struct drm_syncobj *syncobj = container_of(kref,
258 struct drm_syncobj, 265 struct drm_syncobj,
259 refcount); 266 refcount);
260 drm_syncobj_replace_fence(syncobj, NULL); 267 drm_syncobj_replace_fence(syncobj, 0, NULL);
261 kfree(syncobj); 268 kfree(syncobj);
262} 269}
263EXPORT_SYMBOL(drm_syncobj_free); 270EXPORT_SYMBOL(drm_syncobj_free);
@@ -297,7 +304,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
297 } 304 }
298 305
299 if (fence) 306 if (fence)
300 drm_syncobj_replace_fence(syncobj, fence); 307 drm_syncobj_replace_fence(syncobj, 0, fence);
301 308
302 *out_syncobj = syncobj; 309 *out_syncobj = syncobj;
303 return 0; 310 return 0;
@@ -482,7 +489,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
482 return -ENOENT; 489 return -ENOENT;
483 } 490 }
484 491
485 drm_syncobj_replace_fence(syncobj, fence); 492 drm_syncobj_replace_fence(syncobj, 0, fence);
486 dma_fence_put(fence); 493 dma_fence_put(fence);
487 drm_syncobj_put(syncobj); 494 drm_syncobj_put(syncobj);
488 return 0; 495 return 0;
@@ -499,7 +506,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
499 if (fd < 0) 506 if (fd < 0)
500 return fd; 507 return fd;
501 508
502 ret = drm_syncobj_find_fence(file_private, handle, &fence); 509 ret = drm_syncobj_find_fence(file_private, handle, 0, &fence);
503 if (ret) 510 if (ret)
504 goto err_put_fd; 511 goto err_put_fd;
505 512
@@ -964,7 +971,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
964 return ret; 971 return ret;
965 972
966 for (i = 0; i < args->count_handles; i++) 973 for (i = 0; i < args->count_handles; i++)
967 drm_syncobj_replace_fence(syncobjs[i], NULL); 974 drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
968 975
969 drm_syncobj_array_free(syncobjs, args->count_handles); 976 drm_syncobj_array_free(syncobjs, args->count_handles);
970 977
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2ccb982a5dba..7ea442033a57 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -52,6 +52,7 @@
52#include <drm/drm_gem.h> 52#include <drm/drm_gem.h>
53#include <drm/drm_auth.h> 53#include <drm/drm_auth.h>
54#include <drm/drm_cache.h> 54#include <drm/drm_cache.h>
55#include <drm/drm_util.h>
55 56
56#include "i915_params.h" 57#include "i915_params.h"
57#include "i915_reg.h" 58#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7d0b3a2c30e2..22b4cb775576 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2181,7 +2181,7 @@ signal_fence_array(struct i915_execbuffer *eb,
2181 if (!(flags & I915_EXEC_FENCE_SIGNAL)) 2181 if (!(flags & I915_EXEC_FENCE_SIGNAL))
2182 continue; 2182 continue;
2183 2183
2184 drm_syncobj_replace_fence(syncobj, fence); 2184 drm_syncobj_replace_fence(syncobj, 0, fence);
2185 } 2185 }
2186} 2186}
2187 2187
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1bd14c61dab5..b2bab57cd113 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,6 +46,7 @@
46#include <drm/drm_crtc_helper.h> 46#include <drm/drm_crtc_helper.h>
47#include <drm/drm_plane_helper.h> 47#include <drm/drm_plane_helper.h>
48#include <drm/drm_rect.h> 48#include <drm/drm_rect.h>
49#include <drm/drm_atomic_uapi.h>
49#include <linux/dma_remapping.h> 50#include <linux/dma_remapping.h>
50#include <linux/reservation.h> 51#include <linux/reservation.h>
51 52
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index e20e6a36a748..ed474da6c200 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -25,6 +25,8 @@
25#ifndef _INTEL_DISPLAY_H_ 25#ifndef _INTEL_DISPLAY_H_
26#define _INTEL_DISPLAY_H_ 26#define _INTEL_DISPLAY_H_
27 27
28#include <drm/drm_util.h>
29
28enum i915_gpio { 30enum i915_gpio {
29 GPIOA, 31 GPIOA,
30 GPIOB, 32 GPIOB,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3f6920dd7880..2dfa585712c2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,6 +2,8 @@
2#ifndef _INTEL_RINGBUFFER_H_ 2#ifndef _INTEL_RINGBUFFER_H_
3#define _INTEL_RINGBUFFER_H_ 3#define _INTEL_RINGBUFFER_H_
4 4
5#include <drm/drm_util.h>
6
5#include <linux/hashtable.h> 7#include <linux/hashtable.h>
6#include <linux/seqlock.h> 8#include <linux/seqlock.h>
7 9
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index b640e39ebaca..015341e2dd4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -21,6 +21,8 @@
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22#include <linux/dma-buf.h> 22#include <linux/dma-buf.h>
23 23
24#include <drm/drm_atomic_uapi.h>
25
24#include "msm_drv.h" 26#include "msm_drv.h"
25#include "dpu_kms.h" 27#include "dpu_kms.h"
26#include "dpu_formats.h" 28#include "dpu_formats.h"
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index c1f1779c980f..4bcdeca7479d 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -15,6 +15,8 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <drm/drm_atomic_uapi.h>
19
18#include "msm_drv.h" 20#include "msm_drv.h"
19#include "msm_gem.h" 21#include "msm_gem.h"
20#include "msm_kms.h" 22#include "msm_kms.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8412119bd940..a9bb656058e5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2174,7 +2174,7 @@ nv50_display_create(struct drm_device *dev)
2174 nouveau_display(dev)->fini = nv50_display_fini; 2174 nouveau_display(dev)->fini = nv50_display_fini;
2175 disp->disp = &nouveau_display(dev)->disp; 2175 disp->disp = &nouveau_display(dev)->disp;
2176 dev->mode_config.funcs = &nv50_disp_func; 2176 dev->mode_config.funcs = &nv50_disp_func;
2177 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; 2177 dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
2178 2178
2179 /* small shared memory area we use for notifiers and semaphores */ 2179 /* small shared memory area we use for notifiers and semaphores */
2180 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2180 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index dc7454e7f19a..0acc07555bcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -32,6 +32,8 @@
32#include <drm/drm_edid.h> 32#include <drm/drm_edid.h>
33#include <drm/drm_encoder.h> 33#include <drm/drm_encoder.h>
34#include <drm/drm_dp_helper.h> 34#include <drm/drm_dp_helper.h>
35#include <drm/drm_util.h>
36
35#include "nouveau_crtc.h" 37#include "nouveau_crtc.h"
36#include "nouveau_encoder.h" 38#include "nouveau_encoder.h"
37 39
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 844498c4267c..20a260887be3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -379,7 +379,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
379 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 379 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
380 FBINFO_HWACCEL_FILLRECT | 380 FBINFO_HWACCEL_FILLRECT |
381 FBINFO_HWACCEL_IMAGEBLIT; 381 FBINFO_HWACCEL_IMAGEBLIT;
382 info->flags |= FBINFO_CAN_FORCE_OUTPUT;
383 info->fbops = &nouveau_fbcon_sw_ops; 382 info->fbops = &nouveau_fbcon_sw_ops;
384 info->fix.smem_start = fb->nvbo->bo.mem.bus.base + 383 info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
385 fb->nvbo->bo.mem.bus.offset; 384 fb->nvbo->bo.mem.bus.offset;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 01704a7f07cb..87d16a0ce01e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -28,6 +28,7 @@
28#include <drm/drm_plane_helper.h> 28#include <drm/drm_plane_helper.h>
29#include <drm/drm_atomic_helper.h> 29#include <drm/drm_atomic_helper.h>
30#include <drm/drm_atomic.h> 30#include <drm/drm_atomic.h>
31#include <drm/drm_gem_framebuffer_helper.h>
31 32
32#include "qxl_drv.h" 33#include "qxl_drv.h"
33#include "qxl_object.h" 34#include "qxl_object.h"
@@ -388,17 +389,6 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
388 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 389 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
389}; 390};
390 391
391void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
392{
393 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
394 struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
395
396 WARN_ON(bo->shadow);
397 drm_gem_object_put_unlocked(qxl_fb->obj);
398 drm_framebuffer_cleanup(fb);
399 kfree(qxl_fb);
400}
401
402static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, 392static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
403 struct drm_file *file_priv, 393 struct drm_file *file_priv,
404 unsigned flags, unsigned color, 394 unsigned flags, unsigned color,
@@ -406,15 +396,14 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
406 unsigned num_clips) 396 unsigned num_clips)
407{ 397{
408 /* TODO: vmwgfx where this was cribbed from had locking. Why? */ 398 /* TODO: vmwgfx where this was cribbed from had locking. Why? */
409 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); 399 struct qxl_device *qdev = fb->dev->dev_private;
410 struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
411 struct drm_clip_rect norect; 400 struct drm_clip_rect norect;
412 struct qxl_bo *qobj; 401 struct qxl_bo *qobj;
413 int inc = 1; 402 int inc = 1;
414 403
415 drm_modeset_lock_all(fb->dev); 404 drm_modeset_lock_all(fb->dev);
416 405
417 qobj = gem_to_qxl_bo(qxl_fb->obj); 406 qobj = gem_to_qxl_bo(fb->obj[0]);
418 /* if we aren't primary surface ignore this */ 407 /* if we aren't primary surface ignore this */
419 if (!qobj->is_primary) { 408 if (!qobj->is_primary) {
420 drm_modeset_unlock_all(fb->dev); 409 drm_modeset_unlock_all(fb->dev);
@@ -432,7 +421,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
432 inc = 2; /* skip source rects */ 421 inc = 2; /* skip source rects */
433 } 422 }
434 423
435 qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, 424 qxl_draw_dirty_fb(qdev, fb, qobj, flags, color,
436 clips, num_clips, inc); 425 clips, num_clips, inc);
437 426
438 drm_modeset_unlock_all(fb->dev); 427 drm_modeset_unlock_all(fb->dev);
@@ -441,31 +430,11 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
441} 430}
442 431
443static const struct drm_framebuffer_funcs qxl_fb_funcs = { 432static const struct drm_framebuffer_funcs qxl_fb_funcs = {
444 .destroy = qxl_user_framebuffer_destroy, 433 .destroy = drm_gem_fb_destroy,
445 .dirty = qxl_framebuffer_surface_dirty, 434 .dirty = qxl_framebuffer_surface_dirty,
446/* TODO? 435 .create_handle = drm_gem_fb_create_handle,
447 * .create_handle = qxl_user_framebuffer_create_handle, */
448}; 436};
449 437
450int
451qxl_framebuffer_init(struct drm_device *dev,
452 struct qxl_framebuffer *qfb,
453 const struct drm_mode_fb_cmd2 *mode_cmd,
454 struct drm_gem_object *obj,
455 const struct drm_framebuffer_funcs *funcs)
456{
457 int ret;
458
459 qfb->obj = obj;
460 drm_helper_mode_fill_fb_struct(dev, &qfb->base, mode_cmd);
461 ret = drm_framebuffer_init(dev, &qfb->base, funcs);
462 if (ret) {
463 qfb->obj = NULL;
464 return ret;
465 }
466 return 0;
467}
468
469static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, 438static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
470 struct drm_crtc_state *old_state) 439 struct drm_crtc_state *old_state)
471{ 440{
@@ -488,14 +457,12 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
488 struct drm_plane_state *state) 457 struct drm_plane_state *state)
489{ 458{
490 struct qxl_device *qdev = plane->dev->dev_private; 459 struct qxl_device *qdev = plane->dev->dev_private;
491 struct qxl_framebuffer *qfb;
492 struct qxl_bo *bo; 460 struct qxl_bo *bo;
493 461
494 if (!state->crtc || !state->fb) 462 if (!state->crtc || !state->fb)
495 return 0; 463 return 0;
496 464
497 qfb = to_qxl_framebuffer(state->fb); 465 bo = gem_to_qxl_bo(state->fb->obj[0]);
498 bo = gem_to_qxl_bo(qfb->obj);
499 466
500 if (bo->surf.stride * bo->surf.height > qdev->vram_size) { 467 if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
501 DRM_ERROR("Mode doesn't fit in vram size (vgamem)"); 468 DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
@@ -556,23 +523,19 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
556 struct drm_plane_state *old_state) 523 struct drm_plane_state *old_state)
557{ 524{
558 struct qxl_device *qdev = plane->dev->dev_private; 525 struct qxl_device *qdev = plane->dev->dev_private;
559 struct qxl_framebuffer *qfb = 526 struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
560 to_qxl_framebuffer(plane->state->fb);
561 struct qxl_framebuffer *qfb_old;
562 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
563 struct qxl_bo *bo_old; 527 struct qxl_bo *bo_old;
564 struct drm_clip_rect norect = { 528 struct drm_clip_rect norect = {
565 .x1 = 0, 529 .x1 = 0,
566 .y1 = 0, 530 .y1 = 0,
567 .x2 = qfb->base.width, 531 .x2 = plane->state->fb->width,
568 .y2 = qfb->base.height 532 .y2 = plane->state->fb->height
569 }; 533 };
570 int ret; 534 int ret;
571 bool same_shadow = false; 535 bool same_shadow = false;
572 536
573 if (old_state->fb) { 537 if (old_state->fb) {
574 qfb_old = to_qxl_framebuffer(old_state->fb); 538 bo_old = gem_to_qxl_bo(old_state->fb->obj[0]);
575 bo_old = gem_to_qxl_bo(qfb_old->obj);
576 } else { 539 } else {
577 bo_old = NULL; 540 bo_old = NULL;
578 } 541 }
@@ -602,7 +565,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
602 bo->is_primary = true; 565 bo->is_primary = true;
603 } 566 }
604 567
605 qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); 568 qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1);
606} 569}
607 570
608static void qxl_primary_atomic_disable(struct drm_plane *plane, 571static void qxl_primary_atomic_disable(struct drm_plane *plane,
@@ -611,9 +574,7 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
611 struct qxl_device *qdev = plane->dev->dev_private; 574 struct qxl_device *qdev = plane->dev->dev_private;
612 575
613 if (old_state->fb) { 576 if (old_state->fb) {
614 struct qxl_framebuffer *qfb = 577 struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
615 to_qxl_framebuffer(old_state->fb);
616 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
617 578
618 if (bo->is_primary) { 579 if (bo->is_primary) {
619 qxl_io_destroy_primary(qdev); 580 qxl_io_destroy_primary(qdev);
@@ -645,7 +606,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
645 return; 606 return;
646 607
647 if (fb != old_state->fb) { 608 if (fb != old_state->fb) {
648 obj = to_qxl_framebuffer(fb)->obj; 609 obj = fb->obj[0];
649 user_bo = gem_to_qxl_bo(obj); 610 user_bo = gem_to_qxl_bo(obj);
650 611
651 /* pinning is done in the prepare/cleanup framevbuffer */ 612 /* pinning is done in the prepare/cleanup framevbuffer */
@@ -765,13 +726,13 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
765 if (!new_state->fb) 726 if (!new_state->fb)
766 return 0; 727 return 0;
767 728
768 obj = to_qxl_framebuffer(new_state->fb)->obj; 729 obj = new_state->fb->obj[0];
769 user_bo = gem_to_qxl_bo(obj); 730 user_bo = gem_to_qxl_bo(obj);
770 731
771 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 732 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
772 user_bo->is_dumb && !user_bo->shadow) { 733 user_bo->is_dumb && !user_bo->shadow) {
773 if (plane->state->fb) { 734 if (plane->state->fb) {
774 obj = to_qxl_framebuffer(plane->state->fb)->obj; 735 obj = plane->state->fb->obj[0];
775 old_bo = gem_to_qxl_bo(obj); 736 old_bo = gem_to_qxl_bo(obj);
776 } 737 }
777 if (old_bo && old_bo->shadow && 738 if (old_bo && old_bo->shadow &&
@@ -815,7 +776,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
815 return; 776 return;
816 } 777 }
817 778
818 obj = to_qxl_framebuffer(old_state->fb)->obj; 779 obj = old_state->fb->obj[0];
819 user_bo = gem_to_qxl_bo(obj); 780 user_bo = gem_to_qxl_bo(obj);
820 qxl_bo_unpin(user_bo); 781 qxl_bo_unpin(user_bo);
821 782
@@ -1115,26 +1076,8 @@ qxl_user_framebuffer_create(struct drm_device *dev,
1115 struct drm_file *file_priv, 1076 struct drm_file *file_priv,
1116 const struct drm_mode_fb_cmd2 *mode_cmd) 1077 const struct drm_mode_fb_cmd2 *mode_cmd)
1117{ 1078{
1118 struct drm_gem_object *obj; 1079 return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd,
1119 struct qxl_framebuffer *qxl_fb; 1080 &qxl_fb_funcs);
1120 int ret;
1121
1122 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1123 if (!obj)
1124 return NULL;
1125
1126 qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
1127 if (qxl_fb == NULL)
1128 return NULL;
1129
1130 ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
1131 if (ret) {
1132 kfree(qxl_fb);
1133 drm_gem_object_put_unlocked(obj);
1134 return NULL;
1135 }
1136
1137 return &qxl_fb->base;
1138} 1081}
1139 1082
1140static const struct drm_mode_config_funcs qxl_mode_funcs = { 1083static const struct drm_mode_config_funcs qxl_mode_funcs = {
@@ -1221,7 +1164,6 @@ int qxl_modeset_init(struct qxl_device *qdev)
1221 } 1164 }
1222 1165
1223 qxl_display_read_client_monitors_config(qdev); 1166 qxl_display_read_client_monitors_config(qdev);
1224 qdev->mode_info.mode_config_initialized = true;
1225 1167
1226 drm_mode_config_reset(&qdev->ddev); 1168 drm_mode_config_reset(&qdev->ddev);
1227 1169
@@ -1237,8 +1179,5 @@ void qxl_modeset_fini(struct qxl_device *qdev)
1237 qxl_fbdev_fini(qdev); 1179 qxl_fbdev_fini(qdev);
1238 1180
1239 qxl_destroy_monitors_object(qdev); 1181 qxl_destroy_monitors_object(qdev);
1240 if (qdev->mode_info.mode_config_initialized) { 1182 drm_mode_config_cleanup(&qdev->ddev);
1241 drm_mode_config_cleanup(&qdev->ddev);
1242 qdev->mode_info.mode_config_initialized = false;
1243 }
1244} 1183}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 4d8681e84e68..cc5b32e749ce 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -262,7 +262,7 @@ out_free_drawable:
262 * by treating them differently in the server. 262 * by treating them differently in the server.
263 */ 263 */
264void qxl_draw_dirty_fb(struct qxl_device *qdev, 264void qxl_draw_dirty_fb(struct qxl_device *qdev,
265 struct qxl_framebuffer *qxl_fb, 265 struct drm_framebuffer *fb,
266 struct qxl_bo *bo, 266 struct qxl_bo *bo,
267 unsigned flags, unsigned color, 267 unsigned flags, unsigned color,
268 struct drm_clip_rect *clips, 268 struct drm_clip_rect *clips,
@@ -281,9 +281,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
281 struct qxl_drawable *drawable; 281 struct qxl_drawable *drawable;
282 struct qxl_rect drawable_rect; 282 struct qxl_rect drawable_rect;
283 struct qxl_rect *rects; 283 struct qxl_rect *rects;
284 int stride = qxl_fb->base.pitches[0]; 284 int stride = fb->pitches[0];
285 /* depth is not actually interesting, we don't mask with it */ 285 /* depth is not actually interesting, we don't mask with it */
286 int depth = qxl_fb->base.format->cpp[0] * 8; 286 int depth = fb->format->cpp[0] * 8;
287 uint8_t *surface_base; 287 uint8_t *surface_base;
288 struct qxl_release *release; 288 struct qxl_release *release;
289 struct qxl_bo *clips_bo; 289 struct qxl_bo *clips_bo;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 01220d386b0a..8ff70a7281a7 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,6 +38,7 @@
38 38
39#include <drm/drm_crtc.h> 39#include <drm/drm_crtc.h>
40#include <drm/drm_encoder.h> 40#include <drm/drm_encoder.h>
41#include <drm/drm_fb_helper.h>
41#include <drm/drm_gem.h> 42#include <drm/drm_gem.h>
42#include <drm/drmP.h> 43#include <drm/drmP.h>
43#include <drm/ttm/ttm_bo_api.h> 44#include <drm/ttm/ttm_bo_api.h>
@@ -121,15 +122,9 @@ struct qxl_output {
121 struct drm_encoder enc; 122 struct drm_encoder enc;
122}; 123};
123 124
124struct qxl_framebuffer {
125 struct drm_framebuffer base;
126 struct drm_gem_object *obj;
127};
128
129#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) 125#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
130#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) 126#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
131#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) 127#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
132#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
133 128
134struct qxl_mman { 129struct qxl_mman {
135 struct ttm_bo_global_ref bo_global_ref; 130 struct ttm_bo_global_ref bo_global_ref;
@@ -138,13 +133,6 @@ struct qxl_mman {
138 struct ttm_bo_device bdev; 133 struct ttm_bo_device bdev;
139}; 134};
140 135
141struct qxl_mode_info {
142 bool mode_config_initialized;
143
144 /* pointer to fbdev info structure */
145 struct qxl_fbdev *qfbdev;
146};
147
148 136
149struct qxl_memslot { 137struct qxl_memslot {
150 uint8_t generation; 138 uint8_t generation;
@@ -232,10 +220,9 @@ struct qxl_device {
232 void *ram; 220 void *ram;
233 struct qxl_mman mman; 221 struct qxl_mman mman;
234 struct qxl_gem gem; 222 struct qxl_gem gem;
235 struct qxl_mode_info mode_info;
236 223
237 struct fb_info *fbdev_info; 224 struct drm_fb_helper fb_helper;
238 struct qxl_framebuffer *fbdev_qfb; 225
239 void *ram_physical; 226 void *ram_physical;
240 227
241 struct qxl_ring *release_ring; 228 struct qxl_ring *release_ring;
@@ -349,19 +336,8 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
349 336
350int qxl_fbdev_init(struct qxl_device *qdev); 337int qxl_fbdev_init(struct qxl_device *qdev);
351void qxl_fbdev_fini(struct qxl_device *qdev); 338void qxl_fbdev_fini(struct qxl_device *qdev);
352int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
353 struct drm_file *file_priv,
354 uint32_t *handle);
355void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
356 339
357/* qxl_display.c */ 340/* qxl_display.c */
358void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb);
359int
360qxl_framebuffer_init(struct drm_device *dev,
361 struct qxl_framebuffer *rfb,
362 const struct drm_mode_fb_cmd2 *mode_cmd,
363 struct drm_gem_object *obj,
364 const struct drm_framebuffer_funcs *funcs);
365void qxl_display_read_client_monitors_config(struct qxl_device *qdev); 341void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
366int qxl_create_monitors_object(struct qxl_device *qdev); 342int qxl_create_monitors_object(struct qxl_device *qdev);
367int qxl_destroy_monitors_object(struct qxl_device *qdev); 343int qxl_destroy_monitors_object(struct qxl_device *qdev);
@@ -471,7 +447,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
471 int stride /* filled in if 0 */); 447 int stride /* filled in if 0 */);
472 448
473void qxl_draw_dirty_fb(struct qxl_device *qdev, 449void qxl_draw_dirty_fb(struct qxl_device *qdev,
474 struct qxl_framebuffer *qxl_fb, 450 struct drm_framebuffer *fb,
475 struct qxl_bo *bo, 451 struct qxl_bo *bo,
476 unsigned flags, unsigned color, 452 unsigned flags, unsigned color,
477 struct drm_clip_rect *clips, 453 struct drm_clip_rect *clips,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index ca465c0d49fa..2294b7f14fdf 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -30,24 +30,12 @@
30#include <drm/drm_crtc.h> 30#include <drm/drm_crtc.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_fb_helper.h> 32#include <drm/drm_fb_helper.h>
33#include <drm/drm_gem_framebuffer_helper.h>
33 34
34#include "qxl_drv.h" 35#include "qxl_drv.h"
35 36
36#include "qxl_object.h" 37#include "qxl_object.h"
37 38
38#define QXL_DIRTY_DELAY (HZ / 30)
39
40struct qxl_fbdev {
41 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb;
43 struct qxl_device *qdev;
44
45 spinlock_t delayed_ops_lock;
46 struct list_head delayed_ops;
47 void *shadow;
48 int size;
49};
50
51static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, 39static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
52 struct qxl_device *qdev, struct fb_info *info, 40 struct qxl_device *qdev, struct fb_info *info,
53 const struct fb_image *image) 41 const struct fb_image *image)
@@ -73,13 +61,6 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
73 } 61 }
74} 62}
75 63
76#ifdef CONFIG_DRM_FBDEV_EMULATION
77static struct fb_deferred_io qxl_defio = {
78 .delay = QXL_DIRTY_DELAY,
79 .deferred_io = drm_fb_helper_deferred_io,
80};
81#endif
82
83static struct fb_ops qxlfb_ops = { 64static struct fb_ops qxlfb_ops = {
84 .owner = THIS_MODULE, 65 .owner = THIS_MODULE,
85 DRM_FB_HELPER_DEFAULT_OPS, 66 DRM_FB_HELPER_DEFAULT_OPS,
@@ -98,26 +79,10 @@ static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
98 drm_gem_object_put_unlocked(gobj); 79 drm_gem_object_put_unlocked(gobj);
99} 80}
100 81
101int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, 82static int qxlfb_create_pinned_object(struct qxl_device *qdev,
102 struct drm_file *file_priv,
103 uint32_t *handle)
104{
105 int r;
106 struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
107
108 BUG_ON(!gobj);
109 /* drm_get_handle_create adds a reference - good */
110 r = drm_gem_handle_create(file_priv, gobj, handle);
111 if (r)
112 return r;
113 return 0;
114}
115
116static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
117 const struct drm_mode_fb_cmd2 *mode_cmd, 83 const struct drm_mode_fb_cmd2 *mode_cmd,
118 struct drm_gem_object **gobj_p) 84 struct drm_gem_object **gobj_p)
119{ 85{
120 struct qxl_device *qdev = qfbdev->qdev;
121 struct drm_gem_object *gobj = NULL; 86 struct drm_gem_object *gobj = NULL;
122 struct qxl_bo *qbo = NULL; 87 struct qxl_bo *qbo = NULL;
123 int ret; 88 int ret;
@@ -174,13 +139,12 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
174 unsigned num_clips) 139 unsigned num_clips)
175{ 140{
176 struct qxl_device *qdev = fb->dev->dev_private; 141 struct qxl_device *qdev = fb->dev->dev_private;
177 struct fb_info *info = qdev->fbdev_info; 142 struct fb_info *info = qdev->fb_helper.fbdev;
178 struct qxl_fbdev *qfbdev = info->par;
179 struct qxl_fb_image qxl_fb_image; 143 struct qxl_fb_image qxl_fb_image;
180 struct fb_image *image = &qxl_fb_image.fb_image; 144 struct fb_image *image = &qxl_fb_image.fb_image;
181 145
182 /* TODO: hard coding 32 bpp */ 146 /* TODO: hard coding 32 bpp */
183 int stride = qfbdev->qfb.base.pitches[0]; 147 int stride = fb->pitches[0];
184 148
185 /* 149 /*
186 * we are using a shadow draw buffer, at qdev->surface0_shadow 150 * we are using a shadow draw buffer, at qdev->surface0_shadow
@@ -199,7 +163,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
199 image->cmap.green = NULL; 163 image->cmap.green = NULL;
200 image->cmap.blue = NULL; 164 image->cmap.blue = NULL;
201 image->cmap.transp = NULL; 165 image->cmap.transp = NULL;
202 image->data = qfbdev->shadow + (clips->x1 * 4) + (stride * clips->y1); 166 image->data = info->screen_base + (clips->x1 * 4) + (stride * clips->y1);
203 167
204 qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL); 168 qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
205 qxl_draw_opaque_fb(&qxl_fb_image, stride); 169 qxl_draw_opaque_fb(&qxl_fb_image, stride);
@@ -208,21 +172,22 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
208} 172}
209 173
210static const struct drm_framebuffer_funcs qxlfb_fb_funcs = { 174static const struct drm_framebuffer_funcs qxlfb_fb_funcs = {
211 .destroy = qxl_user_framebuffer_destroy, 175 .destroy = drm_gem_fb_destroy,
176 .create_handle = drm_gem_fb_create_handle,
212 .dirty = qxlfb_framebuffer_dirty, 177 .dirty = qxlfb_framebuffer_dirty,
213}; 178};
214 179
215static int qxlfb_create(struct qxl_fbdev *qfbdev, 180static int qxlfb_create(struct drm_fb_helper *helper,
216 struct drm_fb_helper_surface_size *sizes) 181 struct drm_fb_helper_surface_size *sizes)
217{ 182{
218 struct qxl_device *qdev = qfbdev->qdev; 183 struct qxl_device *qdev =
184 container_of(helper, struct qxl_device, fb_helper);
219 struct fb_info *info; 185 struct fb_info *info;
220 struct drm_framebuffer *fb = NULL; 186 struct drm_framebuffer *fb = NULL;
221 struct drm_mode_fb_cmd2 mode_cmd; 187 struct drm_mode_fb_cmd2 mode_cmd;
222 struct drm_gem_object *gobj = NULL; 188 struct drm_gem_object *gobj = NULL;
223 struct qxl_bo *qbo = NULL; 189 struct qxl_bo *qbo = NULL;
224 int ret; 190 int ret;
225 int size;
226 int bpp = sizes->surface_bpp; 191 int bpp = sizes->surface_bpp;
227 int depth = sizes->surface_depth; 192 int depth = sizes->surface_depth;
228 void *shadow; 193 void *shadow;
@@ -233,7 +198,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
233 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64); 198 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
234 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 199 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
235 200
236 ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj); 201 ret = qxlfb_create_pinned_object(qdev, &mode_cmd, &gobj);
237 if (ret < 0) 202 if (ret < 0)
238 return ret; 203 return ret;
239 204
@@ -247,25 +212,26 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
247 DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", 212 DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
248 qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo), 213 qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo),
249 qbo->kptr, shadow); 214 qbo->kptr, shadow);
250 size = mode_cmd.pitches[0] * mode_cmd.height;
251 215
252 info = drm_fb_helper_alloc_fbi(&qfbdev->helper); 216 info = drm_fb_helper_alloc_fbi(helper);
253 if (IS_ERR(info)) { 217 if (IS_ERR(info)) {
254 ret = PTR_ERR(info); 218 ret = PTR_ERR(info);
255 goto out_unref; 219 goto out_unref;
256 } 220 }
257 221
258 info->par = qfbdev; 222 info->par = helper;
259
260 qxl_framebuffer_init(&qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
261 &qxlfb_fb_funcs);
262 223
263 fb = &qfbdev->qfb.base; 224 fb = drm_gem_fbdev_fb_create(&qdev->ddev, sizes, 64, gobj,
225 &qxlfb_fb_funcs);
226 if (IS_ERR(fb)) {
227 DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb));
228 ret = PTR_ERR(fb);
229 goto out_unref;
230 }
264 231
265 /* setup helper with fb data */ 232 /* setup helper with fb data */
266 qfbdev->helper.fb = fb; 233 qdev->fb_helper.fb = fb;
267 234
268 qfbdev->shadow = shadow;
269 strcpy(info->fix.id, "qxldrmfb"); 235 strcpy(info->fix.id, "qxldrmfb");
270 236
271 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); 237 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
@@ -278,10 +244,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
278 */ 244 */
279 info->fix.smem_start = qdev->vram_base; /* TODO - correct? */ 245 info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
280 info->fix.smem_len = gobj->size; 246 info->fix.smem_len = gobj->size;
281 info->screen_base = qfbdev->shadow; 247 info->screen_base = shadow;
282 info->screen_size = gobj->size; 248 info->screen_size = gobj->size;
283 249
284 drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width, 250 drm_fb_helper_fill_var(info, &qdev->fb_helper, sizes->fb_width,
285 sizes->fb_height); 251 sizes->fb_height);
286 252
287 /* setup aperture base/size for vesafb takeover */ 253 /* setup aperture base/size for vesafb takeover */
@@ -296,13 +262,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
296 goto out_unref; 262 goto out_unref;
297 } 263 }
298 264
299#ifdef CONFIG_DRM_FBDEV_EMULATION 265 /* XXX error handling. */
300 info->fbdefio = &qxl_defio; 266 drm_fb_helper_defio_init(helper);
301 fb_deferred_io_init(info);
302#endif
303 267
304 qdev->fbdev_info = info;
305 qdev->fbdev_qfb = &qfbdev->qfb;
306 DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size); 268 DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
307 DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", 269 DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n",
308 fb->format->depth, fb->pitches[0], fb->width, fb->height); 270 fb->format->depth, fb->pitches[0], fb->width, fb->height);
@@ -313,119 +275,26 @@ out_unref:
313 qxl_bo_kunmap(qbo); 275 qxl_bo_kunmap(qbo);
314 qxl_bo_unpin(qbo); 276 qxl_bo_unpin(qbo);
315 } 277 }
316 if (fb && ret) {
317 drm_gem_object_put_unlocked(gobj);
318 drm_framebuffer_cleanup(fb);
319 kfree(fb);
320 }
321 drm_gem_object_put_unlocked(gobj); 278 drm_gem_object_put_unlocked(gobj);
322 return ret; 279 return ret;
323} 280}
324 281
325static int qxl_fb_find_or_create_single(
326 struct drm_fb_helper *helper,
327 struct drm_fb_helper_surface_size *sizes)
328{
329 struct qxl_fbdev *qfbdev =
330 container_of(helper, struct qxl_fbdev, helper);
331 int new_fb = 0;
332 int ret;
333
334 if (!helper->fb) {
335 ret = qxlfb_create(qfbdev, sizes);
336 if (ret)
337 return ret;
338 new_fb = 1;
339 }
340 return new_fb;
341}
342
343static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
344{
345 struct qxl_framebuffer *qfb = &qfbdev->qfb;
346
347 drm_fb_helper_unregister_fbi(&qfbdev->helper);
348
349 if (qfb->obj) {
350 qxlfb_destroy_pinned_object(qfb->obj);
351 qfb->obj = NULL;
352 }
353 drm_fb_helper_fini(&qfbdev->helper);
354 vfree(qfbdev->shadow);
355 drm_framebuffer_cleanup(&qfb->base);
356
357 return 0;
358}
359
360static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { 282static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
361 .fb_probe = qxl_fb_find_or_create_single, 283 .fb_probe = qxlfb_create,
362}; 284};
363 285
364int qxl_fbdev_init(struct qxl_device *qdev) 286int qxl_fbdev_init(struct qxl_device *qdev)
365{ 287{
366 int ret = 0; 288 return drm_fb_helper_fbdev_setup(&qdev->ddev, &qdev->fb_helper,
367 289 &qxl_fb_helper_funcs, 32,
368#ifdef CONFIG_DRM_FBDEV_EMULATION 290 QXLFB_CONN_LIMIT);
369 struct qxl_fbdev *qfbdev;
370 int bpp_sel = 32; /* TODO: parameter from somewhere? */
371
372 qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
373 if (!qfbdev)
374 return -ENOMEM;
375
376 qfbdev->qdev = qdev;
377 qdev->mode_info.qfbdev = qfbdev;
378 spin_lock_init(&qfbdev->delayed_ops_lock);
379 INIT_LIST_HEAD(&qfbdev->delayed_ops);
380
381 drm_fb_helper_prepare(&qdev->ddev, &qfbdev->helper,
382 &qxl_fb_helper_funcs);
383
384 ret = drm_fb_helper_init(&qdev->ddev, &qfbdev->helper,
385 QXLFB_CONN_LIMIT);
386 if (ret)
387 goto free;
388
389 ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
390 if (ret)
391 goto fini;
392
393 ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
394 if (ret)
395 goto fini;
396
397 return 0;
398
399fini:
400 drm_fb_helper_fini(&qfbdev->helper);
401free:
402 kfree(qfbdev);
403#endif
404
405 return ret;
406} 291}
407 292
408void qxl_fbdev_fini(struct qxl_device *qdev) 293void qxl_fbdev_fini(struct qxl_device *qdev)
409{ 294{
410 if (!qdev->mode_info.qfbdev) 295 struct fb_info *fbi = qdev->fb_helper.fbdev;
411 return; 296 void *shadow = fbi ? fbi->screen_buffer : NULL;
412 297
413 qxl_fbdev_destroy(&qdev->ddev, qdev->mode_info.qfbdev); 298 drm_fb_helper_fbdev_teardown(&qdev->ddev);
414 kfree(qdev->mode_info.qfbdev); 299 vfree(shadow);
415 qdev->mode_info.qfbdev = NULL;
416}
417
418void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
419{
420 if (!qdev->mode_info.qfbdev)
421 return;
422
423 drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
424}
425
426bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
427{
428 if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
429 return true;
430 return false;
431} 300}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 5864cb452c5c..941f35233b1f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,6 +448,11 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
448 return 0; 448 return 0;
449} 449}
450 450
451static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
452{
453 rockchip_drm_platform_remove(pdev);
454}
455
451static const struct of_device_id rockchip_drm_dt_ids[] = { 456static const struct of_device_id rockchip_drm_dt_ids[] = {
452 { .compatible = "rockchip,display-subsystem", }, 457 { .compatible = "rockchip,display-subsystem", },
453 { /* sentinel */ }, 458 { /* sentinel */ },
@@ -457,6 +462,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
457static struct platform_driver rockchip_drm_platform_driver = { 462static struct platform_driver rockchip_drm_platform_driver = {
458 .probe = rockchip_drm_platform_probe, 463 .probe = rockchip_drm_platform_probe,
459 .remove = rockchip_drm_platform_remove, 464 .remove = rockchip_drm_platform_remove,
465 .shutdown = rockchip_drm_platform_shutdown,
460 .driver = { 466 .driver = {
461 .name = "rockchip-drm", 467 .name = "rockchip-drm",
462 .of_match_table = rockchip_drm_dt_ids, 468 .of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 0cebb2db5b99..c78cd35a1294 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -12,6 +12,7 @@
12 12
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14#include <drm/drm_atomic_helper.h> 14#include <drm/drm_atomic_helper.h>
15#include <drm/drm_connector.h>
15#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
17#include <drm/drm_encoder.h> 18#include <drm/drm_encoder.h>
@@ -277,10 +278,64 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
277 SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); 278 SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
278} 279}
279 280
281static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
282 const struct drm_connector *connector)
283{
284 u32 bus_format = 0;
285 u32 val = 0;
286
287 /* XXX Would this ever happen? */
288 if (!connector)
289 return;
290
291 /*
292 * FIXME: Undocumented bits
293 *
294 * The whole dithering process and these parameters are not
295 * explained in the vendor documents or BSP kernel code.
296 */
297 regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PR_REG, 0x11111111);
298 regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PG_REG, 0x11111111);
299 regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PB_REG, 0x11111111);
300 regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LR_REG, 0x11111111);
301 regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LG_REG, 0x11111111);
302 regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LB_REG, 0x11111111);
303 regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL0_REG, 0x01010000);
304 regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL1_REG, 0x15151111);
305 regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL2_REG, 0x57575555);
306 regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL3_REG, 0x7f7f7777);
307
308 /* Do dithering if panel only supports 6 bits per color */
309 if (connector->display_info.bpc == 6)
310 val |= SUN4I_TCON0_FRM_CTL_EN;
311
312 if (connector->display_info.num_bus_formats == 1)
313 bus_format = connector->display_info.bus_formats[0];
314
315 /* Check the connection format */
316 switch (bus_format) {
317 case MEDIA_BUS_FMT_RGB565_1X16:
318 /* R and B components are only 5 bits deep */
319 val |= SUN4I_TCON0_FRM_CTL_MODE_R;
320 val |= SUN4I_TCON0_FRM_CTL_MODE_B;
321 case MEDIA_BUS_FMT_RGB666_1X18:
322 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
323 /* Fall through: enable dithering */
324 val |= SUN4I_TCON0_FRM_CTL_EN;
325 break;
326 }
327
328 /* Write dithering settings */
329 regmap_write(tcon->regs, SUN4I_TCON_FRM_CTL_REG, val);
330}
331
280static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon, 332static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
281 struct mipi_dsi_device *device, 333 const struct drm_encoder *encoder,
282 const struct drm_display_mode *mode) 334 const struct drm_display_mode *mode)
283{ 335{
336 /* TODO support normal CPU interface modes */
337 struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
338 struct mipi_dsi_device *device = dsi->device;
284 u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format); 339 u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format);
285 u8 lanes = device->lanes; 340 u8 lanes = device->lanes;
286 u32 block_space, start_delay; 341 u32 block_space, start_delay;
@@ -291,6 +346,9 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
291 346
292 sun4i_tcon0_mode_set_common(tcon, mode); 347 sun4i_tcon0_mode_set_common(tcon, mode);
293 348
349 /* Set dithering if needed */
350 sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder));
351
294 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, 352 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
295 SUN4I_TCON0_CTL_IF_MASK, 353 SUN4I_TCON0_CTL_IF_MASK,
296 SUN4I_TCON0_CTL_IF_8080); 354 SUN4I_TCON0_CTL_IF_8080);
@@ -356,6 +414,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
356 tcon->dclk_max_div = 7; 414 tcon->dclk_max_div = 7;
357 sun4i_tcon0_mode_set_common(tcon, mode); 415 sun4i_tcon0_mode_set_common(tcon, mode);
358 416
417 /* Set dithering if needed */
418 sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder));
419
359 /* Adjust clock delay */ 420 /* Adjust clock delay */
360 clk_delay = sun4i_tcon_get_clk_delay(mode, 0); 421 clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
361 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, 422 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
@@ -429,6 +490,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
429 tcon->dclk_max_div = 127; 490 tcon->dclk_max_div = 127;
430 sun4i_tcon0_mode_set_common(tcon, mode); 491 sun4i_tcon0_mode_set_common(tcon, mode);
431 492
493 /* Set dithering if needed */
494 sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
495
432 /* Adjust clock delay */ 496 /* Adjust clock delay */
433 clk_delay = sun4i_tcon_get_clk_delay(mode, 0); 497 clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
434 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, 498 regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
@@ -610,16 +674,10 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
610 const struct drm_encoder *encoder, 674 const struct drm_encoder *encoder,
611 const struct drm_display_mode *mode) 675 const struct drm_display_mode *mode)
612{ 676{
613 struct sun6i_dsi *dsi;
614
615 switch (encoder->encoder_type) { 677 switch (encoder->encoder_type) {
616 case DRM_MODE_ENCODER_DSI: 678 case DRM_MODE_ENCODER_DSI:
617 /* 679 /* DSI is tied to special case of CPU interface */
618 * This is not really elegant, but it's the "cleaner" 680 sun4i_tcon0_mode_set_cpu(tcon, encoder, mode);
619 * way I could think of...
620 */
621 dsi = encoder_to_sun6i_dsi(encoder);
622 sun4i_tcon0_mode_set_cpu(tcon, dsi->device, mode);
623 break; 681 break;
624 case DRM_MODE_ENCODER_LVDS: 682 case DRM_MODE_ENCODER_LVDS:
625 sun4i_tcon0_mode_set_lvds(tcon, encoder, mode); 683 sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
@@ -916,7 +974,8 @@ static bool sun4i_tcon_connected_to_tcon_top(struct device_node *node)
916 974
917 remote = of_graph_get_remote_node(node, 0, -1); 975 remote = of_graph_get_remote_node(node, 0, -1);
918 if (remote) { 976 if (remote) {
919 ret = !!of_match_node(sun8i_tcon_top_of_table, remote); 977 ret = !!(IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
978 of_match_node(sun8i_tcon_top_of_table, remote));
920 of_node_put(remote); 979 of_node_put(remote);
921 } 980 }
922 981
@@ -1344,13 +1403,20 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
1344 if (!pdev) 1403 if (!pdev)
1345 return -EINVAL; 1404 return -EINVAL;
1346 1405
1347 if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) { 1406 if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
1407 encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
1348 ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id); 1408 ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
1349 if (ret) 1409 if (ret)
1350 return ret; 1410 return ret;
1351 } 1411 }
1352 1412
1353 return sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id); 1413 if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
1414 ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
1415 if (ret)
1416 return ret;
1417 }
1418
1419 return 0;
1354} 1420}
1355 1421
1356static const struct sun4i_tcon_quirks sun4i_a10_quirks = { 1422static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index f6a071cd5a6f..3d492c8be1fc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -37,18 +37,21 @@
37#define SUN4I_TCON_GINT1_REG 0x8 37#define SUN4I_TCON_GINT1_REG 0x8
38 38
39#define SUN4I_TCON_FRM_CTL_REG 0x10 39#define SUN4I_TCON_FRM_CTL_REG 0x10
40#define SUN4I_TCON_FRM_CTL_EN BIT(31) 40#define SUN4I_TCON0_FRM_CTL_EN BIT(31)
41 41#define SUN4I_TCON0_FRM_CTL_MODE_R BIT(6)
42#define SUN4I_TCON_FRM_SEED_PR_REG 0x14 42#define SUN4I_TCON0_FRM_CTL_MODE_G BIT(5)
43#define SUN4I_TCON_FRM_SEED_PG_REG 0x18 43#define SUN4I_TCON0_FRM_CTL_MODE_B BIT(4)
44#define SUN4I_TCON_FRM_SEED_PB_REG 0x1c 44
45#define SUN4I_TCON_FRM_SEED_LR_REG 0x20 45#define SUN4I_TCON0_FRM_SEED_PR_REG 0x14
46#define SUN4I_TCON_FRM_SEED_LG_REG 0x24 46#define SUN4I_TCON0_FRM_SEED_PG_REG 0x18
47#define SUN4I_TCON_FRM_SEED_LB_REG 0x28 47#define SUN4I_TCON0_FRM_SEED_PB_REG 0x1c
48#define SUN4I_TCON_FRM_TBL0_REG 0x2c 48#define SUN4I_TCON0_FRM_SEED_LR_REG 0x20
49#define SUN4I_TCON_FRM_TBL1_REG 0x30 49#define SUN4I_TCON0_FRM_SEED_LG_REG 0x24
50#define SUN4I_TCON_FRM_TBL2_REG 0x34 50#define SUN4I_TCON0_FRM_SEED_LB_REG 0x28
51#define SUN4I_TCON_FRM_TBL3_REG 0x38 51#define SUN4I_TCON0_FRM_TBL0_REG 0x2c
52#define SUN4I_TCON0_FRM_TBL1_REG 0x30
53#define SUN4I_TCON0_FRM_TBL2_REG 0x34
54#define SUN4I_TCON0_FRM_TBL3_REG 0x38
52 55
53#define SUN4I_TCON0_CTL_REG 0x40 56#define SUN4I_TCON0_CTL_REG 0x40
54#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31) 57#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31)
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5ce24098a5fd..70c54774400b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -521,12 +521,12 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
521 kref_init(&exec->refcount); 521 kref_init(&exec->refcount);
522 522
523 ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, 523 ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
524 &exec->bin.in_fence); 524 0, &exec->bin.in_fence);
525 if (ret == -EINVAL) 525 if (ret == -EINVAL)
526 goto fail; 526 goto fail;
527 527
528 ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, 528 ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
529 &exec->render.in_fence); 529 0, &exec->render.in_fence);
530 if (ret == -EINVAL) 530 if (ret == -EINVAL)
531 goto fail; 531 goto fail;
532 532
@@ -584,7 +584,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
584 /* Update the return sync object for the */ 584 /* Update the return sync object for the */
585 sync_out = drm_syncobj_find(file_priv, args->out_sync); 585 sync_out = drm_syncobj_find(file_priv, args->out_sync);
586 if (sync_out) { 586 if (sync_out) {
587 drm_syncobj_replace_fence(sync_out, 587 drm_syncobj_replace_fence(sync_out, 0,
588 &exec->render.base.s_fence->finished); 588 &exec->render.base.s_fence->finished);
589 drm_syncobj_put(sync_out); 589 drm_syncobj_put(sync_out);
590 } 590 }
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0e6a121858d1..3ce136ba8791 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
35#include <drm/drm_atomic.h> 35#include <drm/drm_atomic.h>
36#include <drm/drm_atomic_helper.h> 36#include <drm/drm_atomic_helper.h>
37#include <drm/drm_crtc_helper.h> 37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_atomic_uapi.h>
38#include <linux/clk.h> 39#include <linux/clk.h>
39#include <drm/drm_fb_cma_helper.h> 40#include <drm/drm_fb_cma_helper.h>
40#include <linux/component.h> 41#include <linux/component.h>
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 7910b9acedd6..5b22e996af6c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
681 exec->fence = &fence->base; 681 exec->fence = &fence->base;
682 682
683 if (out_sync) 683 if (out_sync)
684 drm_syncobj_replace_fence(out_sync, exec->fence); 684 drm_syncobj_replace_fence(out_sync, 0, exec->fence);
685 685
686 vc4_update_bo_seqnos(exec, seqno); 686 vc4_update_bo_seqnos(exec, seqno);
687 687
@@ -1173,7 +1173,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1173 1173
1174 if (args->in_sync) { 1174 if (args->in_sync) {
1175 ret = drm_syncobj_find_fence(file_priv, args->in_sync, 1175 ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1176 &in_fence); 1176 0, &in_fence);
1177 if (ret) 1177 if (ret)
1178 goto fail; 1178 goto fail;
1179 1179
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cf78f74bb87f..f39ee212412d 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -22,6 +22,7 @@
22#include <drm/drm_atomic_helper.h> 22#include <drm/drm_atomic_helper.h>
23#include <drm/drm_fb_cma_helper.h> 23#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_plane_helper.h> 24#include <drm/drm_plane_helper.h>
25#include <drm/drm_atomic_uapi.h>
25 26
26#include "uapi/drm/vc4_drm.h" 27#include "uapi/drm/vc4_drm.h"
27#include "vc4_drv.h" 28#include "vc4_drv.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 9f1e0a669d4c..0379d6897659 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -75,12 +75,9 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
75 struct drm_gem_object *obj) 75 struct drm_gem_object *obj)
76{ 76{
77 int ret; 77 int ret;
78 struct virtio_gpu_object *bo;
79 78
80 vgfb->base.obj[0] = obj; 79 vgfb->base.obj[0] = obj;
81 80
82 bo = gem_to_virtio_gpu_obj(obj);
83
84 drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); 81 drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
85 82
86 ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); 83 ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
index 68db42f15086..0a2745646dfa 100644
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -1,36 +1,143 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include "vkms_drv.h" 2#include "vkms_drv.h"
3#include <linux/crc32.h> 3#include <linux/crc32.h>
4#include <drm/drm_atomic.h>
5#include <drm/drm_atomic_helper.h>
4#include <drm/drm_gem_framebuffer_helper.h> 6#include <drm/drm_gem_framebuffer_helper.h>
5 7
6static uint32_t _vkms_get_crc(struct vkms_crc_data *crc_data) 8/**
9 * compute_crc - Compute CRC value on output frame
10 *
11 * @vaddr_out: address to final framebuffer
12 * @crc_out: framebuffer's metadata
13 *
14 * returns CRC value computed using crc32 on the visible portion of
15 * the final framebuffer at vaddr_out
16 */
17static uint32_t compute_crc(void *vaddr_out, struct vkms_crc_data *crc_out)
18{
19 int i, j, src_offset;
20 int x_src = crc_out->src.x1 >> 16;
21 int y_src = crc_out->src.y1 >> 16;
22 int h_src = drm_rect_height(&crc_out->src) >> 16;
23 int w_src = drm_rect_width(&crc_out->src) >> 16;
24 u32 crc = 0;
25
26 for (i = y_src; i < y_src + h_src; ++i) {
27 for (j = x_src; j < x_src + w_src; ++j) {
28 src_offset = crc_out->offset
29 + (i * crc_out->pitch)
30 + (j * crc_out->cpp);
31 /* XRGB format ignores Alpha channel */
32 memset(vaddr_out + src_offset + 24, 0, 8);
33 crc = crc32_le(crc, vaddr_out + src_offset,
34 sizeof(u32));
35 }
36 }
37
38 return crc;
39}
40
41/**
42 * blend - belnd value at vaddr_src with value at vaddr_dst
43 * @vaddr_dst: destination address
44 * @vaddr_src: source address
45 * @crc_dst: destination framebuffer's metadata
46 * @crc_src: source framebuffer's metadata
47 *
48 * Blend value at vaddr_src with value at vaddr_dst.
49 * Currently, this function write value at vaddr_src on value
50 * at vaddr_dst using buffer's metadata to locate the new values
51 * from vaddr_src and their distenation at vaddr_dst.
52 *
53 * Todo: Use the alpha value to blend vaddr_src with vaddr_dst
54 * instead of overwriting it.
55 */
56static void blend(void *vaddr_dst, void *vaddr_src,
57 struct vkms_crc_data *crc_dst,
58 struct vkms_crc_data *crc_src)
7{ 59{
8 struct drm_framebuffer *fb = &crc_data->fb; 60 int i, j, j_dst, i_dst;
61 int offset_src, offset_dst;
62
63 int x_src = crc_src->src.x1 >> 16;
64 int y_src = crc_src->src.y1 >> 16;
65
66 int x_dst = crc_src->dst.x1;
67 int y_dst = crc_src->dst.y1;
68 int h_dst = drm_rect_height(&crc_src->dst);
69 int w_dst = drm_rect_width(&crc_src->dst);
70
71 int y_limit = y_src + h_dst;
72 int x_limit = x_src + w_dst;
73
74 for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
75 for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
76 offset_dst = crc_dst->offset
77 + (i_dst * crc_dst->pitch)
78 + (j_dst++ * crc_dst->cpp);
79 offset_src = crc_src->offset
80 + (i * crc_src->pitch)
81 + (j * crc_src->cpp);
82
83 memcpy(vaddr_dst + offset_dst,
84 vaddr_src + offset_src, sizeof(u32));
85 }
86 i_dst++;
87 }
88}
89
90static void compose_cursor(struct vkms_crc_data *cursor_crc,
91 struct vkms_crc_data *primary_crc, void *vaddr_out)
92{
93 struct drm_gem_object *cursor_obj;
94 struct vkms_gem_object *cursor_vkms_obj;
95
96 cursor_obj = drm_gem_fb_get_obj(&cursor_crc->fb, 0);
97 cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
98
99 mutex_lock(&cursor_vkms_obj->pages_lock);
100 if (!cursor_vkms_obj->vaddr) {
101 DRM_WARN("cursor plane vaddr is NULL");
102 goto out;
103 }
104
105 blend(vaddr_out, cursor_vkms_obj->vaddr, primary_crc, cursor_crc);
106
107out:
108 mutex_unlock(&cursor_vkms_obj->pages_lock);
109}
110
111static uint32_t _vkms_get_crc(struct vkms_crc_data *primary_crc,
112 struct vkms_crc_data *cursor_crc)
113{
114 struct drm_framebuffer *fb = &primary_crc->fb;
9 struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); 115 struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
10 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj); 116 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
117 void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
11 u32 crc = 0; 118 u32 crc = 0;
12 int i = 0;
13 unsigned int x = crc_data->src.x1 >> 16;
14 unsigned int y = crc_data->src.y1 >> 16;
15 unsigned int height = drm_rect_height(&crc_data->src) >> 16;
16 unsigned int width = drm_rect_width(&crc_data->src) >> 16;
17 unsigned int cpp = fb->format->cpp[0];
18 unsigned int src_offset;
19 unsigned int size_byte = width * cpp;
20 void *vaddr;
21 119
22 mutex_lock(&vkms_obj->pages_lock); 120 if (!vaddr_out) {
23 vaddr = vkms_obj->vaddr; 121 DRM_ERROR("Failed to allocate memory for output frame.");
24 if (WARN_ON(!vaddr)) 122 return 0;
25 goto out; 123 }
26 124
27 for (i = y; i < y + height; i++) { 125 mutex_lock(&vkms_obj->pages_lock);
28 src_offset = fb->offsets[0] + (i * fb->pitches[0]) + (x * cpp); 126 if (WARN_ON(!vkms_obj->vaddr)) {
29 crc = crc32_le(crc, vaddr + src_offset, size_byte); 127 mutex_unlock(&vkms_obj->pages_lock);
128 return crc;
30 } 129 }
31 130
32out: 131 memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
33 mutex_unlock(&vkms_obj->pages_lock); 132 mutex_unlock(&vkms_obj->pages_lock);
133
134 if (cursor_crc)
135 compose_cursor(cursor_crc, primary_crc, vaddr_out);
136
137 crc = compute_crc(vaddr_out, primary_crc);
138
139 kfree(vaddr_out);
140
34 return crc; 141 return crc;
35} 142}
36 143
@@ -53,6 +160,7 @@ void vkms_crc_work_handle(struct work_struct *work)
53 struct vkms_device *vdev = container_of(out, struct vkms_device, 160 struct vkms_device *vdev = container_of(out, struct vkms_device,
54 output); 161 output);
55 struct vkms_crc_data *primary_crc = NULL; 162 struct vkms_crc_data *primary_crc = NULL;
163 struct vkms_crc_data *cursor_crc = NULL;
56 struct drm_plane *plane; 164 struct drm_plane *plane;
57 u32 crc32 = 0; 165 u32 crc32 = 0;
58 u64 frame_start, frame_end; 166 u64 frame_start, frame_end;
@@ -77,14 +185,14 @@ void vkms_crc_work_handle(struct work_struct *work)
77 if (drm_framebuffer_read_refcount(&crc_data->fb) == 0) 185 if (drm_framebuffer_read_refcount(&crc_data->fb) == 0)
78 continue; 186 continue;
79 187
80 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 188 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
81 primary_crc = crc_data; 189 primary_crc = crc_data;
82 break; 190 else
83 } 191 cursor_crc = crc_data;
84 } 192 }
85 193
86 if (primary_crc) 194 if (primary_crc)
87 crc32 = _vkms_get_crc(primary_crc); 195 crc32 = _vkms_get_crc(primary_crc, cursor_crc);
88 196
89 frame_end = drm_crtc_accurate_vblank_count(crtc); 197 frame_end = drm_crtc_accurate_vblank_count(crtc);
90 198
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index bd9d4b2389bd..07cfde1b4132 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -5,6 +5,15 @@
5 * (at your option) any later version. 5 * (at your option) any later version.
6 */ 6 */
7 7
8/**
9 * DOC: vkms (Virtual Kernel Modesetting)
10 *
11 * vkms is a software-only model of a kms driver that is useful for testing,
12 * or for running X (or similar) on headless machines and be able to still
13 * use the GPU. vkms aims to enable a virtual display without the need for
14 * a hardware display capability.
15 */
16
8#include <linux/module.h> 17#include <linux/module.h>
9#include <drm/drm_gem.h> 18#include <drm/drm_gem.h>
10#include <drm/drm_crtc_helper.h> 19#include <drm/drm_crtc_helper.h>
@@ -21,6 +30,10 @@
21 30
22static struct vkms_device *vkms_device; 31static struct vkms_device *vkms_device;
23 32
33bool enable_cursor;
34module_param_named(enable_cursor, enable_cursor, bool, 0444);
35MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
36
24static const struct file_operations vkms_driver_fops = { 37static const struct file_operations vkms_driver_fops = {
25 .owner = THIS_MODULE, 38 .owner = THIS_MODULE,
26 .open = drm_open, 39 .open = drm_open,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 80af6d3a65e7..1c93990693e3 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -7,8 +7,8 @@
7#include <drm/drm_encoder.h> 7#include <drm/drm_encoder.h>
8#include <linux/hrtimer.h> 8#include <linux/hrtimer.h>
9 9
10#define XRES_MIN 32 10#define XRES_MIN 20
11#define YRES_MIN 32 11#define YRES_MIN 20
12 12
13#define XRES_DEF 1024 13#define XRES_DEF 1024
14#define YRES_DEF 768 14#define YRES_DEF 768
@@ -16,13 +16,22 @@
16#define XRES_MAX 8192 16#define XRES_MAX 8192
17#define YRES_MAX 8192 17#define YRES_MAX 8192
18 18
19extern bool enable_cursor;
20
19static const u32 vkms_formats[] = { 21static const u32 vkms_formats[] = {
20 DRM_FORMAT_XRGB8888, 22 DRM_FORMAT_XRGB8888,
21}; 23};
22 24
25static const u32 vkms_cursor_formats[] = {
26 DRM_FORMAT_ARGB8888,
27};
28
23struct vkms_crc_data { 29struct vkms_crc_data {
24 struct drm_rect src;
25 struct drm_framebuffer fb; 30 struct drm_framebuffer fb;
31 struct drm_rect src, dst;
32 unsigned int offset;
33 unsigned int pitch;
34 unsigned int cpp;
26}; 35};
27 36
28/** 37/**
@@ -104,7 +113,8 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
104 113
105int vkms_output_init(struct vkms_device *vkmsdev); 114int vkms_output_init(struct vkms_device *vkmsdev);
106 115
107struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev); 116struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
117 enum drm_plane_type type);
108 118
109/* Gem stuff */ 119/* Gem stuff */
110struct drm_gem_object *vkms_gem_create(struct drm_device *dev, 120struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 901012cb1af1..271a0eb9042c 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -49,14 +49,22 @@ int vkms_output_init(struct vkms_device *vkmsdev)
49 struct drm_connector *connector = &output->connector; 49 struct drm_connector *connector = &output->connector;
50 struct drm_encoder *encoder = &output->encoder; 50 struct drm_encoder *encoder = &output->encoder;
51 struct drm_crtc *crtc = &output->crtc; 51 struct drm_crtc *crtc = &output->crtc;
52 struct drm_plane *primary; 52 struct drm_plane *primary, *cursor = NULL;
53 int ret; 53 int ret;
54 54
55 primary = vkms_plane_init(vkmsdev); 55 primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
56 if (IS_ERR(primary)) 56 if (IS_ERR(primary))
57 return PTR_ERR(primary); 57 return PTR_ERR(primary);
58 58
59 ret = vkms_crtc_init(dev, crtc, primary, NULL); 59 if (enable_cursor) {
60 cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
61 if (IS_ERR(cursor)) {
62 ret = PTR_ERR(cursor);
63 goto err_cursor;
64 }
65 }
66
67 ret = vkms_crtc_init(dev, crtc, primary, cursor);
60 if (ret) 68 if (ret)
61 goto err_crtc; 69 goto err_crtc;
62 70
@@ -106,6 +114,11 @@ err_connector:
106 drm_crtc_cleanup(crtc); 114 drm_crtc_cleanup(crtc);
107 115
108err_crtc: 116err_crtc:
117 if (enable_cursor)
118 drm_plane_cleanup(cursor);
119
120err_cursor:
109 drm_plane_cleanup(primary); 121 drm_plane_cleanup(primary);
122
110 return ret; 123 return ret;
111} 124}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index c91661631c76..7041007396ae 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -81,26 +81,33 @@ static const struct drm_plane_funcs vkms_plane_funcs = {
81 .atomic_destroy_state = vkms_plane_destroy_state, 81 .atomic_destroy_state = vkms_plane_destroy_state,
82}; 82};
83 83
84static void vkms_primary_plane_update(struct drm_plane *plane, 84static void vkms_plane_atomic_update(struct drm_plane *plane,
85 struct drm_plane_state *old_state) 85 struct drm_plane_state *old_state)
86{ 86{
87 struct vkms_plane_state *vkms_plane_state; 87 struct vkms_plane_state *vkms_plane_state;
88 struct drm_framebuffer *fb = plane->state->fb;
88 struct vkms_crc_data *crc_data; 89 struct vkms_crc_data *crc_data;
89 90
90 if (!plane->state->crtc || !plane->state->fb) 91 if (!plane->state->crtc || !fb)
91 return; 92 return;
92 93
93 vkms_plane_state = to_vkms_plane_state(plane->state); 94 vkms_plane_state = to_vkms_plane_state(plane->state);
95
94 crc_data = vkms_plane_state->crc_data; 96 crc_data = vkms_plane_state->crc_data;
95 memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect)); 97 memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect));
96 memcpy(&crc_data->fb, plane->state->fb, sizeof(struct drm_framebuffer)); 98 memcpy(&crc_data->dst, &plane->state->dst, sizeof(struct drm_rect));
99 memcpy(&crc_data->fb, fb, sizeof(struct drm_framebuffer));
97 drm_framebuffer_get(&crc_data->fb); 100 drm_framebuffer_get(&crc_data->fb);
101 crc_data->offset = fb->offsets[0];
102 crc_data->pitch = fb->pitches[0];
103 crc_data->cpp = fb->format->cpp[0];
98} 104}
99 105
100static int vkms_plane_atomic_check(struct drm_plane *plane, 106static int vkms_plane_atomic_check(struct drm_plane *plane,
101 struct drm_plane_state *state) 107 struct drm_plane_state *state)
102{ 108{
103 struct drm_crtc_state *crtc_state; 109 struct drm_crtc_state *crtc_state;
110 bool can_position = false;
104 int ret; 111 int ret;
105 112
106 if (!state->fb | !state->crtc) 113 if (!state->fb | !state->crtc)
@@ -110,15 +117,18 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
110 if (IS_ERR(crtc_state)) 117 if (IS_ERR(crtc_state))
111 return PTR_ERR(crtc_state); 118 return PTR_ERR(crtc_state);
112 119
120 if (plane->type == DRM_PLANE_TYPE_CURSOR)
121 can_position = true;
122
113 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 123 ret = drm_atomic_helper_check_plane_state(state, crtc_state,
114 DRM_PLANE_HELPER_NO_SCALING, 124 DRM_PLANE_HELPER_NO_SCALING,
115 DRM_PLANE_HELPER_NO_SCALING, 125 DRM_PLANE_HELPER_NO_SCALING,
116 false, true); 126 can_position, true);
117 if (ret != 0) 127 if (ret != 0)
118 return ret; 128 return ret;
119 129
120 /* for now primary plane must be visible and full screen */ 130 /* for now primary plane must be visible and full screen */
121 if (!state->visible) 131 if (!state->visible && !can_position)
122 return -EINVAL; 132 return -EINVAL;
123 133
124 return 0; 134 return 0;
@@ -156,15 +166,17 @@ static void vkms_cleanup_fb(struct drm_plane *plane,
156} 166}
157 167
158static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { 168static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
159 .atomic_update = vkms_primary_plane_update, 169 .atomic_update = vkms_plane_atomic_update,
160 .atomic_check = vkms_plane_atomic_check, 170 .atomic_check = vkms_plane_atomic_check,
161 .prepare_fb = vkms_prepare_fb, 171 .prepare_fb = vkms_prepare_fb,
162 .cleanup_fb = vkms_cleanup_fb, 172 .cleanup_fb = vkms_cleanup_fb,
163}; 173};
164 174
165struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev) 175struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
176 enum drm_plane_type type)
166{ 177{
167 struct drm_device *dev = &vkmsdev->drm; 178 struct drm_device *dev = &vkmsdev->drm;
179 const struct drm_plane_helper_funcs *funcs;
168 struct drm_plane *plane; 180 struct drm_plane *plane;
169 const u32 *formats; 181 const u32 *formats;
170 int ret, nformats; 182 int ret, nformats;
@@ -173,19 +185,26 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
173 if (!plane) 185 if (!plane)
174 return ERR_PTR(-ENOMEM); 186 return ERR_PTR(-ENOMEM);
175 187
176 formats = vkms_formats; 188 if (type == DRM_PLANE_TYPE_CURSOR) {
177 nformats = ARRAY_SIZE(vkms_formats); 189 formats = vkms_cursor_formats;
190 nformats = ARRAY_SIZE(vkms_cursor_formats);
191 funcs = &vkms_primary_helper_funcs;
192 } else {
193 formats = vkms_formats;
194 nformats = ARRAY_SIZE(vkms_formats);
195 funcs = &vkms_primary_helper_funcs;
196 }
178 197
179 ret = drm_universal_plane_init(dev, plane, 0, 198 ret = drm_universal_plane_init(dev, plane, 0,
180 &vkms_plane_funcs, 199 &vkms_plane_funcs,
181 formats, nformats, 200 formats, nformats,
182 NULL, DRM_PLANE_TYPE_PRIMARY, NULL); 201 NULL, type, NULL);
183 if (ret) { 202 if (ret) {
184 kfree(plane); 203 kfree(plane);
185 return ERR_PTR(ret); 204 return ERR_PTR(ret);
186 } 205 }
187 206
188 drm_plane_helper_add(plane, &vkms_primary_helper_funcs); 207 drm_plane_helper_add(plane, funcs);
189 208
190 return plane; 209 return plane;
191} 210}
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
index 43c39eca4ae1..034f8ffa8f20 100644
--- a/drivers/staging/vboxvideo/vbox_fb.c
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -155,8 +155,7 @@ static int vboxfb_create(struct drm_fb_helper *helper,
155 * The last flag forces a mode set on VT switches even if the kernel 155 * The last flag forces a mode set on VT switches even if the kernel
156 * does not think it is needed. 156 * does not think it is needed.
157 */ 157 */
158 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT | 158 info->flags = FBINFO_DEFAULT | FBINFO_MISC_ALWAYS_SETPAR;
159 FBINFO_MISC_ALWAYS_SETPAR;
160 info->fbops = &vboxfb_ops; 159 info->fbops = &vboxfb_ops;
161 160
162 /* 161 /*
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5f1183b0b89d..55370e651db3 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1004,9 +1004,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
1004 clear_buffer_attributes(vc); 1004 clear_buffer_attributes(vc);
1005 } 1005 }
1006 1006
1007 /* Forcibly update if we're panicing */ 1007 if (update && vc->vc_mode != KD_GRAPHICS)
1008 if ((update && vc->vc_mode != KD_GRAPHICS) ||
1009 vt_force_oops_output(vc))
1010 do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2); 1008 do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
1011 } 1009 }
1012 set_cursor(vc); 1010 set_cursor(vc);
@@ -1046,7 +1044,6 @@ static void visual_init(struct vc_data *vc, int num, int init)
1046 vc->vc_hi_font_mask = 0; 1044 vc->vc_hi_font_mask = 0;
1047 vc->vc_complement_mask = 0; 1045 vc->vc_complement_mask = 0;
1048 vc->vc_can_do_color = 0; 1046 vc->vc_can_do_color = 0;
1049 vc->vc_panic_force_write = false;
1050 vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS; 1047 vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
1051 vc->vc_sw->con_init(vc, init); 1048 vc->vc_sw->con_init(vc, init);
1052 if (!vc->vc_complement_mask) 1049 if (!vc->vc_complement_mask)
@@ -2911,7 +2908,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2911 goto quit; 2908 goto quit;
2912 } 2909 }
2913 2910
2914 if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc)) 2911 if (vc->vc_mode != KD_TEXT)
2915 goto quit; 2912 goto quit;
2916 2913
2917 /* undraw cursor first */ 2914 /* undraw cursor first */
@@ -4229,8 +4226,7 @@ void do_unblank_screen(int leaving_gfx)
4229 return; 4226 return;
4230 } 4227 }
4231 vc = vc_cons[fg_console].d; 4228 vc = vc_cons[fg_console].d;
4232 /* Try to unblank in oops case too */ 4229 if (vc->vc_mode != KD_TEXT)
4233 if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
4234 return; /* but leave console_blanked != 0 */ 4230 return; /* but leave console_blanked != 0 */
4235 4231
4236 if (blankinterval) { 4232 if (blankinterval) {
@@ -4239,7 +4235,7 @@ void do_unblank_screen(int leaving_gfx)
4239 } 4235 }
4240 4236
4241 console_blanked = 0; 4237 console_blanked = 0;
4242 if (vc->vc_sw->con_blank(vc, 0, leaving_gfx) || vt_force_oops_output(vc)) 4238 if (vc->vc_sw->con_blank(vc, 0, leaving_gfx))
4243 /* Low-level driver cannot restore -> do it ourselves */ 4239 /* Low-level driver cannot restore -> do it ourselves */
4244 update_screen(vc); 4240 update_screen(vc);
4245 if (console_blank_hook) 4241 if (console_blank_hook)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 75ebbbf0a1fb..8958ccc8b1ac 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -284,8 +284,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
284 struct fbcon_ops *ops = info->fbcon_par; 284 struct fbcon_ops *ops = info->fbcon_par;
285 285
286 return (info->state != FBINFO_STATE_RUNNING || 286 return (info->state != FBINFO_STATE_RUNNING ||
287 vc->vc_mode != KD_TEXT || ops->graphics) && 287 vc->vc_mode != KD_TEXT || ops->graphics);
288 !vt_force_oops_output(vc);
289} 288}
290 289
291static int get_color(struct vc_data *vc, struct fb_info *info, 290static int get_color(struct vc_data *vc, struct fb_info *info,
@@ -1104,7 +1103,6 @@ static void fbcon_init(struct vc_data *vc, int init)
1104 if (p->userfont) 1103 if (p->userfont)
1105 charcnt = FNTCHARCNT(p->fontdata); 1104 charcnt = FNTCHARCNT(p->fontdata);
1106 1105
1107 vc->vc_panic_force_write = !!(info->flags & FBINFO_CAN_FORCE_OUTPUT);
1108 vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); 1106 vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
1109 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; 1107 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
1110 if (charcnt == 256) { 1108 if (charcnt == 256) {
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 5ffadc8e681d..861bf8081619 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1117,6 +1117,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1117 if (!lock_fb_info(info)) 1117 if (!lock_fb_info(info))
1118 return -ENODEV; 1118 return -ENODEV;
1119 fix = info->fix; 1119 fix = info->fix;
1120 if (info->flags & FBINFO_HIDE_SMEM_START)
1121 fix.smem_start = 0;
1120 unlock_fb_info(info); 1122 unlock_fb_info(info);
1121 1123
1122 ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0; 1124 ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
@@ -1327,6 +1329,8 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
1327 if (!lock_fb_info(info)) 1329 if (!lock_fb_info(info))
1328 return -ENODEV; 1330 return -ENODEV;
1329 fix = info->fix; 1331 fix = info->fix;
1332 if (info->flags & FBINFO_HIDE_SMEM_START)
1333 fix.smem_start = 0;
1330 unlock_fb_info(info); 1334 unlock_fb_info(info);
1331 return do_fscreeninfo_to_user(&fix, compat_ptr(arg)); 1335 return do_fscreeninfo_to_user(&fix, compat_ptr(arg));
1332} 1336}
@@ -1834,11 +1838,11 @@ EXPORT_SYMBOL(remove_conflicting_framebuffers);
1834/** 1838/**
1835 * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices 1839 * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
1836 * @pdev: PCI device 1840 * @pdev: PCI device
1837 * @resource_id: index of PCI BAR configuring framebuffer memory 1841 * @res_id: index of PCI BAR configuring framebuffer memory
1838 * @name: requesting driver name 1842 * @name: requesting driver name
1839 * 1843 *
1840 * This function removes framebuffer devices (eg. initialized by firmware) 1844 * This function removes framebuffer devices (eg. initialized by firmware)
1841 * using memory range configured for @pdev's BAR @resource_id. 1845 * using memory range configured for @pdev's BAR @res_id.
1842 * 1846 *
1843 * The function assumes that PCI device with shadowed ROM drives a primary 1847 * The function assumes that PCI device with shadowed ROM drives a primary
1844 * display and so kicks out vga16fb. 1848 * display and so kicks out vga16fb.
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f7a19c2a7a80..05350424a4d3 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -110,7 +110,4 @@ static inline bool drm_can_sleep(void)
110 return true; 110 return true;
111} 111}
112 112
113/* helper for handling conditionals in various for_each macros */
114#define for_each_if(condition) if (!(condition)) {} else
115
116#endif 113#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index da9d95a19580..d6adebcd6ea4 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -29,6 +29,7 @@
29#define DRM_ATOMIC_H_ 29#define DRM_ATOMIC_H_
30 30
31#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
32#include <drm/drm_util.h>
32 33
33/** 34/**
34 * struct drm_crtc_commit - track modeset commits on a CRTC 35 * struct drm_crtc_commit - track modeset commits on a CRTC
@@ -373,9 +374,6 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state);
373struct drm_crtc_state * __must_check 374struct drm_crtc_state * __must_check
374drm_atomic_get_crtc_state(struct drm_atomic_state *state, 375drm_atomic_get_crtc_state(struct drm_atomic_state *state,
375 struct drm_crtc *crtc); 376 struct drm_crtc *crtc);
376int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
377 struct drm_crtc_state *state, struct drm_property *property,
378 uint64_t val);
379struct drm_plane_state * __must_check 377struct drm_plane_state * __must_check
380drm_atomic_get_plane_state(struct drm_atomic_state *state, 378drm_atomic_get_plane_state(struct drm_atomic_state *state,
381 struct drm_plane *plane); 379 struct drm_plane *plane);
@@ -587,25 +585,6 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
587} 585}
588 586
589int __must_check 587int __must_check
590drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
591 const struct drm_display_mode *mode);
592int __must_check
593drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
594 struct drm_property_blob *blob);
595int __must_check
596drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
597 struct drm_crtc *crtc);
598void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
599 struct drm_framebuffer *fb);
600void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
601 struct dma_fence *fence);
602int __must_check
603drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
604 struct drm_crtc *crtc);
605int drm_atomic_set_writeback_fb_for_connector(
606 struct drm_connector_state *conn_state,
607 struct drm_framebuffer *fb);
608int __must_check
609drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 588drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
610 struct drm_crtc *crtc); 589 struct drm_crtc *crtc);
611int __must_check 590int __must_check
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index f4c7ed876c97..657af7b39379 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -31,6 +31,7 @@
31#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
32#include <drm/drm_modeset_helper_vtables.h> 32#include <drm/drm_modeset_helper_vtables.h>
33#include <drm/drm_modeset_helper.h> 33#include <drm/drm_modeset_helper.h>
34#include <drm/drm_util.h>
34 35
35struct drm_atomic_state; 36struct drm_atomic_state;
36struct drm_private_obj; 37struct drm_private_obj;
diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h
new file mode 100644
index 000000000000..8cec52ad1277
--- /dev/null
+++ b/include/drm/drm_atomic_uapi.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 * Copyright (C) 2018 Intel Corp.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robdclark@gmail.com>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 */
28
29#ifndef DRM_ATOMIC_UAPI_H_
30#define DRM_ATOMIC_UAPI_H_
31
32struct drm_crtc_state;
33struct drm_display_mode;
34struct drm_property_blob;
35struct drm_plane_state;
36struct drm_crtc;
37struct drm_connector_state;
38struct dma_fence;
39struct drm_framebuffer;
40
41int __must_check
42drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
43 const struct drm_display_mode *mode);
44int __must_check
45drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
46 struct drm_property_blob *blob);
47int __must_check
48drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
49 struct drm_crtc *crtc);
50void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
51 struct drm_framebuffer *fb);
52void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
53 struct dma_fence *fence);
54int __must_check
55drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
56 struct drm_crtc *crtc);
57
58#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 44f04233e3db..90ef9996d9a4 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -24,6 +24,7 @@
24#define __DRM_COLOR_MGMT_H__ 24#define __DRM_COLOR_MGMT_H__
25 25
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27#include <drm/drm_property.h>
27 28
28struct drm_crtc; 29struct drm_crtc;
29struct drm_plane; 30struct drm_plane;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 97ea41dc678f..91a877fa00cb 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -28,6 +28,7 @@
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/hdmi.h> 29#include <linux/hdmi.h>
30#include <drm/drm_mode_object.h> 30#include <drm/drm_mode_object.h>
31#include <drm/drm_util.h>
31 32
32#include <uapi/drm/drm_mode.h> 33#include <uapi/drm/drm_mode.h>
33 34
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 46a8009784df..23b9678137a6 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -56,7 +56,6 @@ struct drm_printer;
56#define DRIVER_ATOMIC 0x10000 56#define DRIVER_ATOMIC 0x10000
57#define DRIVER_KMS_LEGACY_CONTEXT 0x20000 57#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
58#define DRIVER_SYNCOBJ 0x40000 58#define DRIVER_SYNCOBJ 0x40000
59#define DRIVER_PREFER_XBGR_30BPP 0x80000
60 59
61/** 60/**
62 * struct drm_driver - DRM driver structure 61 * struct drm_driver - DRM driver structure
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 4f597c0730b4..70cfca03d812 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -28,6 +28,7 @@
28#include <drm/drm_crtc.h> 28#include <drm/drm_crtc.h>
29#include <drm/drm_mode.h> 29#include <drm/drm_mode.h>
30#include <drm/drm_mode_object.h> 30#include <drm/drm_mode_object.h>
31#include <drm/drm_util.h>
31 32
32struct drm_encoder; 33struct drm_encoder;
33 34
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 8b6ab3200a2c..bb9acea61369 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -604,6 +604,16 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
604 604
605#endif 605#endif
606 606
607/**
608 * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
609 * @a: memory range, users of which are to be removed
610 * @name: requesting driver name
611 * @primary: also kick vga16fb if present
612 *
613 * This function removes framebuffer devices (initialized by firmware/bootloader)
614 * which use memory range described by @a. If @a is NULL all such devices are
615 * removed.
616 */
607static inline int 617static inline int
608drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, 618drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
609 const char *name, bool primary) 619 const char *name, bool primary)
@@ -615,6 +625,18 @@ drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
615#endif 625#endif
616} 626}
617 627
628/**
629 * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
630 * @pdev: PCI device
631 * @resource_id: index of PCI BAR configuring framebuffer memory
632 * @name: requesting driver name
633 *
634 * This function removes framebuffer devices (eg. initialized by firmware)
635 * using memory range configured for @pdev's BAR @resource_id.
636 *
637 * The function assumes that PCI device with shadowed ROM drives a primary
638 * display and so kicks out vga16fb.
639 */
618static inline int 640static inline int
619drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, 641drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
620 int resource_id, 642 int resource_id,
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index f9c15845f465..fac831c40106 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,6 +25,28 @@
25#include <linux/types.h> 25#include <linux/types.h>
26#include <uapi/drm/drm_fourcc.h> 26#include <uapi/drm/drm_fourcc.h>
27 27
28/*
29 * DRM formats are little endian. Define host endian variants for the
30 * most common formats here, to reduce the #ifdefs needed in drivers.
31 *
32 * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in
33 * case the format can't be specified otherwise, so we don't end up
34 * with two values describing the same format.
35 */
36#ifdef __BIG_ENDIAN
37# define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \
38 DRM_FORMAT_BIG_ENDIAN)
39# define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \
40 DRM_FORMAT_BIG_ENDIAN)
41# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888
42# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888
43#else
44# define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555
45# define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565
46# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888
47# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888
48#endif
49
28struct drm_device; 50struct drm_device;
29struct drm_mode_fb_cmd2; 51struct drm_mode_fb_cmd2;
30 52
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index a0b202e1d69a..928e4172a0bb 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -809,6 +809,21 @@ struct drm_mode_config {
809 809
810 /* dumb ioctl parameters */ 810 /* dumb ioctl parameters */
811 uint32_t preferred_depth, prefer_shadow; 811 uint32_t preferred_depth, prefer_shadow;
812 bool quirk_addfb_prefer_xbgr_30bpp;
813
814 /**
815 * @quirk_addfb_prefer_host_byte_order:
816 *
817 * When set to true drm_mode_addfb() will pick host byte order
818 * pixel_format when calling drm_mode_addfb2(). This is how
819 * drm_mode_addfb() should have worked from day one. It
820 * didn't though, so we ended up with quirks in both kernel
821 * and userspace drivers to deal with the broken behavior.
822 * Simply fixing drm_mode_addfb() unconditionally would break
823 * these drivers, so add a quirk bit here to allow drivers
824 * opt-in.
825 */
826 bool quirk_addfb_prefer_host_byte_order;
812 827
813 /** 828 /**
814 * @async_page_flip: Does this device support async flips on the primary 829 * @async_page_flip: Does this device support async flips on the primary
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 16f5b66684ca..0a0834bef8bd 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -27,6 +27,9 @@
27#include <linux/ctype.h> 27#include <linux/ctype.h>
28#include <drm/drm_mode_object.h> 28#include <drm/drm_mode_object.h>
29#include <drm/drm_color_mgmt.h> 29#include <drm/drm_color_mgmt.h>
30#include <drm/drm_rect.h>
31#include <drm/drm_modeset_lock.h>
32#include <drm/drm_util.h>
30 33
31struct drm_crtc; 34struct drm_crtc;
32struct drm_printer; 35struct drm_printer;
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index c030f6ccab99..5b9efff35d6d 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -27,6 +27,8 @@
27#include <linux/ctype.h> 27#include <linux/ctype.h>
28#include <drm/drm_mode_object.h> 28#include <drm/drm_mode_object.h>
29 29
30#include <uapi/drm/drm_mode.h>
31
30/** 32/**
31 * struct drm_property_enum - symbolic values for enumerations 33 * struct drm_property_enum - symbolic values for enumerations
32 * @value: numeric property value for this enum entry 34 * @value: numeric property value for this enum entry
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index e419c79ba94d..425432b85a87 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -131,10 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
131 131
132struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 132struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
133 u32 handle); 133 u32 handle);
134void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, 134void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point,
135 struct dma_fence *fence); 135 struct dma_fence *fence);
136int drm_syncobj_find_fence(struct drm_file *file_private, 136int drm_syncobj_find_fence(struct drm_file *file_private,
137 u32 handle, 137 u32 handle, u64 point,
138 struct dma_fence **fence); 138 struct dma_fence **fence);
139void drm_syncobj_free(struct kref *kref); 139void drm_syncobj_free(struct kref *kref);
140int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, 140int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
new file mode 100644
index 000000000000..88abdca89baa
--- /dev/null
+++ b/include/drm/drm_util.h
@@ -0,0 +1,32 @@
1/*
2 * Internal Header for the Direct Rendering Manager
3 *
4 * Copyright 2018 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef _DRM_UTIL_H_
27#define _DRM_UTIL_H_
28
29/* helper for handling conditionals in various for_each macros */
30#define for_each_if(condition) if (!(condition)) {} else
31
32#endif
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index fea64f2692a0..ab137f97ecbd 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -141,7 +141,6 @@ struct vc_data {
141 struct uni_pagedir *vc_uni_pagedir; 141 struct uni_pagedir *vc_uni_pagedir;
142 struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ 142 struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
143 struct uni_screen *vc_uni_screen; /* unicode screen content */ 143 struct uni_screen *vc_uni_screen; /* unicode screen content */
144 bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
145 /* additional information is in vt_kern.h */ 144 /* additional information is in vt_kern.h */
146}; 145};
147 146
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3cd375dafd0e..a3cab6dc9b44 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -456,10 +456,13 @@ struct fb_tile_ops {
456 * and host endianness. Drivers should not use this flag. 456 * and host endianness. Drivers should not use this flag.
457 */ 457 */
458#define FBINFO_BE_MATH 0x100000 458#define FBINFO_BE_MATH 0x100000
459/*
460 * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM
461 * drivers to stop userspace from trying to share buffers behind the kernel's
462 * back. Instead dma-buf based buffer sharing should be used.
463 */
464#define FBINFO_HIDE_SMEM_START 0x200000
459 465
460/* report to the VT layer that this fb driver can accept forced console
461 output like oopses */
462#define FBINFO_CAN_FORCE_OUTPUT 0x200000
463 466
464struct fb_info { 467struct fb_info {
465 atomic_t count; 468 atomic_t count;
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 3fd07912909c..8dc77e40bc03 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -135,13 +135,6 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
135 int deflt); 135 int deflt);
136int vty_init(const struct file_operations *console_fops); 136int vty_init(const struct file_operations *console_fops);
137 137
138static inline bool vt_force_oops_output(struct vc_data *vc)
139{
140 if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0)
141 return true;
142 return false;
143}
144
145extern char vt_dont_switch; 138extern char vt_dont_switch;
146extern int default_utf8; 139extern int default_utf8;
147extern int global_cursor_default; 140extern int global_cursor_default;
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2ed46e9ae16a..139632b87181 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -71,6 +71,9 @@ extern "C" {
71 71
72#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ 72#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
73 73
74/* Reserve 0 for the invalid format specifier */
75#define DRM_FORMAT_INVALID 0
76
74/* color index */ 77/* color index */
75#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ 78#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
76 79
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 8d67243952f4..d3e0fe31efc5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -186,8 +186,9 @@ extern "C" {
186/* 186/*
187 * DRM_MODE_REFLECT_<axis> 187 * DRM_MODE_REFLECT_<axis>
188 * 188 *
189 * Signals that the contents of a drm plane is reflected in the <axis> axis, 189 * Signals that the contents of a drm plane is reflected along the <axis> axis,
190 * in the same way as mirroring. 190 * in the same way as mirroring.
191 * See kerneldoc chapter "Plane Composition Properties" for more details.
191 * 192 *
192 * This define is provided as a convenience, looking up the property id 193 * This define is provided as a convenience, looking up the property id
193 * using the name->prop id lookup is the preferred method. 194 * using the name->prop id lookup is the preferred method.