aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-11-21 21:54:33 -0500
committerDave Airlie <airlied@redhat.com>2018-11-21 21:54:38 -0500
commitb239499f927f79401d51a677bc640980ca630604 (patch)
tree2a434f67ac8eda54e442f9a52a508988c9bbd2e4 /drivers
parent9235dd441af43599b9cdcce599a3da4083fcad3c (diff)
parent0081cdfe63f0b5e72b14d13f45a93ca7b0b8092f (diff)
Merge tag 'drm-misc-next-2018-11-21' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v4.21, part 2: UAPI Changes: - Remove syncobj timeline support from drm. Cross-subsystem Changes: - Document canvas provider node in the DT bindings. - Improve documentation for TPO TPG110 DT bindings. Core Changes: - Use explicit state in drm atomic functions. - Add panel quirk for new GPD Win2 firmware. - Add DRM_FORMAT_XYUV8888. - Set the default import/export function in prime to drm_gem_prime_import/export. - Add a separate drm_gem_object_funcs, to stop relying on dev->driver->*gem* functions. - Make sure that tinydrm sets the virtual address also on imported buffers. Driver Changes: - Support active-low data enable signal in sun4i. - Fix scaling in vc4. - Use canvas provider node in meson. - Remove unused variables in sti and qxl and cirrus. - Add overlay plane support and primary plane scaling to meson. - i2c fixes in drm/bridge/sii902x - Fix mailbox read size in rockchip. - Spelling fix in panel/s6d16d0. - Remove unnecessary null check from qxl_bo_unref. - Remove unused arguments from qxl_bo_pin. - Fix qxl cursor pinning. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/9c0409e3-a85f-d2af-b4eb-baf1eb8bbae4@linux.intel.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c247
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c3
-rw-r--r--drivers/gpu/drm/drm_atomic.c115
-rw-r--r--drivers/gpu/drm/drm_client.c12
-rw-r--r--drivers/gpu/drm/drm_fourcc.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c109
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c86
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c79
-rw-r--r--drivers/gpu/drm/drm_syncobj.c359
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/meson/Kconfig1
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c7
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c265
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c74
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h66
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c586
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.h14
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c185
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h3
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c15
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c90
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c18
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c22
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c71
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c6
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c4
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c5
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c4
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c6
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c10
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c4
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c5
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c29
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c108
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c19
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
55 files changed, 2042 insertions, 746 deletions
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 9eeb8ef0b174..2fee47b0d50b 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -95,6 +95,7 @@ config DRM_SII902X
95 depends on OF 95 depends on OF
96 select DRM_KMS_HELPER 96 select DRM_KMS_HELPER
97 select REGMAP_I2C 97 select REGMAP_I2C
98 select I2C_MUX
98 ---help--- 99 ---help---
99 Silicon Image sii902x bridge chip driver. 100 Silicon Image sii902x bridge chip driver.
100 101
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index e59a13542333..bfa902013aa4 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2018 Renesas Electronics
3 *
2 * Copyright (C) 2016 Atmel 4 * Copyright (C) 2016 Atmel
3 * Bo Shen <voice.shen@atmel.com> 5 * Bo Shen <voice.shen@atmel.com>
4 * 6 *
@@ -21,6 +23,7 @@
21 */ 23 */
22 24
23#include <linux/gpio/consumer.h> 25#include <linux/gpio/consumer.h>
26#include <linux/i2c-mux.h>
24#include <linux/i2c.h> 27#include <linux/i2c.h>
25#include <linux/module.h> 28#include <linux/module.h>
26#include <linux/regmap.h> 29#include <linux/regmap.h>
@@ -86,8 +89,49 @@ struct sii902x {
86 struct drm_bridge bridge; 89 struct drm_bridge bridge;
87 struct drm_connector connector; 90 struct drm_connector connector;
88 struct gpio_desc *reset_gpio; 91 struct gpio_desc *reset_gpio;
92 struct i2c_mux_core *i2cmux;
89}; 93};
90 94
95static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val)
96{
97 union i2c_smbus_data data;
98 int ret;
99
100 ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
101 I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &data);
102
103 if (ret < 0)
104 return ret;
105
106 *val = data.byte;
107 return 0;
108}
109
110static int sii902x_write_unlocked(struct i2c_client *i2c, u8 reg, u8 val)
111{
112 union i2c_smbus_data data;
113
114 data.byte = val;
115
116 return __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
117 I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA,
118 &data);
119}
120
121static int sii902x_update_bits_unlocked(struct i2c_client *i2c, u8 reg, u8 mask,
122 u8 val)
123{
124 int ret;
125 u8 status;
126
127 ret = sii902x_read_unlocked(i2c, reg, &status);
128 if (ret)
129 return ret;
130 status &= ~mask;
131 status |= val & mask;
132 return sii902x_write_unlocked(i2c, reg, status);
133}
134
91static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge) 135static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
92{ 136{
93 return container_of(bridge, struct sii902x, bridge); 137 return container_of(bridge, struct sii902x, bridge);
@@ -135,41 +179,11 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
135static int sii902x_get_modes(struct drm_connector *connector) 179static int sii902x_get_modes(struct drm_connector *connector)
136{ 180{
137 struct sii902x *sii902x = connector_to_sii902x(connector); 181 struct sii902x *sii902x = connector_to_sii902x(connector);
138 struct regmap *regmap = sii902x->regmap;
139 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; 182 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
140 struct device *dev = &sii902x->i2c->dev;
141 unsigned long timeout;
142 unsigned int retries;
143 unsigned int status;
144 struct edid *edid; 183 struct edid *edid;
145 int num = 0; 184 int num = 0, ret;
146 int ret;
147
148 ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
149 SII902X_SYS_CTRL_DDC_BUS_REQ,
150 SII902X_SYS_CTRL_DDC_BUS_REQ);
151 if (ret)
152 return ret;
153
154 timeout = jiffies +
155 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
156 do {
157 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
158 if (ret)
159 return ret;
160 } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
161 time_before(jiffies, timeout));
162 185
163 if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { 186 edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
164 dev_err(dev, "failed to acquire the i2c bus\n");
165 return -ETIMEDOUT;
166 }
167
168 ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
169 if (ret)
170 return ret;
171
172 edid = drm_get_edid(connector, sii902x->i2c->adapter);
173 drm_connector_update_edid_property(connector, edid); 187 drm_connector_update_edid_property(connector, edid);
174 if (edid) { 188 if (edid) {
175 num = drm_add_edid_modes(connector, edid); 189 num = drm_add_edid_modes(connector, edid);
@@ -181,42 +195,6 @@ static int sii902x_get_modes(struct drm_connector *connector)
181 if (ret) 195 if (ret)
182 return ret; 196 return ret;
183 197
184 /*
185 * Sometimes the I2C bus can stall after failure to use the
186 * EDID channel. Retry a few times to see if things clear
187 * up, else continue anyway.
188 */
189 retries = 5;
190 do {
191 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
192 &status);
193 retries--;
194 } while (ret && retries);
195 if (ret)
196 dev_err(dev, "failed to read status (%d)\n", ret);
197
198 ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
199 SII902X_SYS_CTRL_DDC_BUS_REQ |
200 SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
201 if (ret)
202 return ret;
203
204 timeout = jiffies +
205 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
206 do {
207 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
208 if (ret)
209 return ret;
210 } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
211 SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
212 time_before(jiffies, timeout));
213
214 if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
215 SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
216 dev_err(dev, "failed to release the i2c bus\n");
217 return -ETIMEDOUT;
218 }
219
220 return num; 198 return num;
221} 199}
222 200
@@ -366,6 +344,121 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
366 return IRQ_HANDLED; 344 return IRQ_HANDLED;
367} 345}
368 346
347/*
348 * The purpose of sii902x_i2c_bypass_select is to enable the pass through
349 * mode of the HDMI transmitter. Do not use regmap from within this function,
350 * only use sii902x_*_unlocked functions to read/modify/write registers.
351 * We are holding the parent adapter lock here, keep this in mind before
352 * adding more i2c transactions.
353 *
354 * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
355 * in this driver, we need to make sure that we only touch 0x1A[2:1] from
356 * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
357 * we leave the remaining bits as we have found them.
358 */
359static int sii902x_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
360{
361 struct sii902x *sii902x = i2c_mux_priv(mux);
362 struct device *dev = &sii902x->i2c->dev;
363 unsigned long timeout;
364 u8 status;
365 int ret;
366
367 ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
368 SII902X_SYS_CTRL_DDC_BUS_REQ,
369 SII902X_SYS_CTRL_DDC_BUS_REQ);
370 if (ret)
371 return ret;
372
373 timeout = jiffies +
374 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
375 do {
376 ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
377 &status);
378 if (ret)
379 return ret;
380 } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
381 time_before(jiffies, timeout));
382
383 if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
384 dev_err(dev, "Failed to acquire the i2c bus\n");
385 return -ETIMEDOUT;
386 }
387
388 return sii902x_write_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
389 status);
390}
391
392/*
393 * The purpose of sii902x_i2c_bypass_deselect is to disable the pass through
394 * mode of the HDMI transmitter. Do not use regmap from within this function,
395 * only use sii902x_*_unlocked functions to read/modify/write registers.
396 * We are holding the parent adapter lock here, keep this in mind before
397 * adding more i2c transactions.
398 *
399 * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
400 * in this driver, we need to make sure that we only touch 0x1A[2:1] from
401 * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
402 * we leave the remaining bits as we have found them.
403 */
404static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
405{
406 struct sii902x *sii902x = i2c_mux_priv(mux);
407 struct device *dev = &sii902x->i2c->dev;
408 unsigned long timeout;
409 unsigned int retries;
410 u8 status;
411 int ret;
412
413 /*
414 * When the HDMI transmitter is in pass through mode, we need an
415 * (undocumented) additional delay between STOP and START conditions
416 * to guarantee the bus won't get stuck.
417 */
418 udelay(30);
419
420 /*
421 * Sometimes the I2C bus can stall after failure to use the
422 * EDID channel. Retry a few times to see if things clear
423 * up, else continue anyway.
424 */
425 retries = 5;
426 do {
427 ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
428 &status);
429 retries--;
430 } while (ret && retries);
431 if (ret) {
432 dev_err(dev, "failed to read status (%d)\n", ret);
433 return ret;
434 }
435
436 ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
437 SII902X_SYS_CTRL_DDC_BUS_REQ |
438 SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
439 if (ret)
440 return ret;
441
442 timeout = jiffies +
443 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
444 do {
445 ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
446 &status);
447 if (ret)
448 return ret;
449 } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
450 SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
451 time_before(jiffies, timeout));
452
453 if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
454 SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
455 dev_err(dev, "failed to release the i2c bus\n");
456 return -ETIMEDOUT;
457 }
458
459 return 0;
460}
461
369static int sii902x_probe(struct i2c_client *client, 462static int sii902x_probe(struct i2c_client *client,
370 const struct i2c_device_id *id) 463 const struct i2c_device_id *id)
371{ 464{
@@ -375,6 +468,13 @@ static int sii902x_probe(struct i2c_client *client,
375 u8 chipid[4]; 468 u8 chipid[4];
376 int ret; 469 int ret;
377 470
471 ret = i2c_check_functionality(client->adapter,
472 I2C_FUNC_SMBUS_BYTE_DATA);
473 if (!ret) {
474 dev_err(dev, "I2C adapter not suitable\n");
475 return -EIO;
476 }
477
378 sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); 478 sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
379 if (!sii902x) 479 if (!sii902x)
380 return -ENOMEM; 480 return -ENOMEM;
@@ -433,7 +533,15 @@ static int sii902x_probe(struct i2c_client *client,
433 533
434 i2c_set_clientdata(client, sii902x); 534 i2c_set_clientdata(client, sii902x);
435 535
436 return 0; 536 sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
537 1, 0, I2C_MUX_GATE,
538 sii902x_i2c_bypass_select,
539 sii902x_i2c_bypass_deselect);
540 if (!sii902x->i2cmux)
541 return -ENOMEM;
542
543 sii902x->i2cmux->priv = sii902x;
544 return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
437} 545}
438 546
439static int sii902x_remove(struct i2c_client *client) 547static int sii902x_remove(struct i2c_client *client)
@@ -441,6 +549,7 @@ static int sii902x_remove(struct i2c_client *client)
441{ 549{
442 struct sii902x *sii902x = i2c_get_clientdata(client); 550 struct sii902x *sii902x = i2c_get_clientdata(client);
443 551
552 i2c_mux_del_adapters(sii902x->i2cmux);
444 drm_bridge_remove(&sii902x->bridge); 553 drm_bridge_remove(&sii902x->bridge);
445 554
446 return 0; 555 return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 68ab1821e15b..4dd499c7d1ba 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -169,7 +169,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
169 struct drm_mode_fb_cmd2 mode_cmd; 169 struct drm_mode_fb_cmd2 mode_cmd;
170 void *sysram; 170 void *sysram;
171 struct drm_gem_object *gobj = NULL; 171 struct drm_gem_object *gobj = NULL;
172 struct cirrus_bo *bo = NULL;
173 int size, ret; 172 int size, ret;
174 173
175 mode_cmd.width = sizes->surface_width; 174 mode_cmd.width = sizes->surface_width;
@@ -185,8 +184,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
185 return ret; 184 return ret;
186 } 185 }
187 186
188 bo = gem_to_cirrus_bo(gobj);
189
190 sysram = vmalloc(size); 187 sysram = vmalloc(size);
191 if (!sysram) 188 if (!sysram)
192 return -ENOMEM; 189 return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3dbfbddae7e6..7e23b150ca80 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -315,9 +315,11 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
315} 315}
316EXPORT_SYMBOL(drm_atomic_get_crtc_state); 316EXPORT_SYMBOL(drm_atomic_get_crtc_state);
317 317
318static int drm_atomic_crtc_check(struct drm_crtc *crtc, 318static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
319 struct drm_crtc_state *state) 319 const struct drm_crtc_state *new_crtc_state)
320{ 320{
321 struct drm_crtc *crtc = new_crtc_state->crtc;
322
321 /* NOTE: we explicitly don't enforce constraints such as primary 323 /* NOTE: we explicitly don't enforce constraints such as primary
322 * layer covering entire screen, since that is something we want 324 * layer covering entire screen, since that is something we want
323 * to allow (on hw that supports it). For hw that does not, it 325 * to allow (on hw that supports it). For hw that does not, it
@@ -326,7 +328,7 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
326 * TODO: Add generic modeset state checks once we support those. 328 * TODO: Add generic modeset state checks once we support those.
327 */ 329 */
328 330
329 if (state->active && !state->enable) { 331 if (new_crtc_state->active && !new_crtc_state->enable) {
330 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 332 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
331 crtc->base.id, crtc->name); 333 crtc->base.id, crtc->name);
332 return -EINVAL; 334 return -EINVAL;
@@ -336,14 +338,14 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
336 * as this is a kernel-internal detail that userspace should never 338 * as this is a kernel-internal detail that userspace should never
337 * be able to trigger. */ 339 * be able to trigger. */
338 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 340 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
339 WARN_ON(state->enable && !state->mode_blob)) { 341 WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
340 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 342 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
341 crtc->base.id, crtc->name); 343 crtc->base.id, crtc->name);
342 return -EINVAL; 344 return -EINVAL;
343 } 345 }
344 346
345 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 347 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
346 WARN_ON(!state->enable && state->mode_blob)) { 348 WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
347 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 349 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
348 crtc->base.id, crtc->name); 350 crtc->base.id, crtc->name);
349 return -EINVAL; 351 return -EINVAL;
@@ -359,7 +361,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
359 * and legacy page_flip IOCTL which also reject service on a disabled 361 * and legacy page_flip IOCTL which also reject service on a disabled
360 * pipe. 362 * pipe.
361 */ 363 */
362 if (state->event && !state->active && !crtc->state->active) { 364 if (new_crtc_state->event &&
365 !new_crtc_state->active && !old_crtc_state->active) {
363 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 366 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
364 crtc->base.id, crtc->name); 367 crtc->base.id, crtc->name);
365 return -EINVAL; 368 return -EINVAL;
@@ -489,14 +492,13 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
489EXPORT_SYMBOL(drm_atomic_get_plane_state); 492EXPORT_SYMBOL(drm_atomic_get_plane_state);
490 493
491static bool 494static bool
492plane_switching_crtc(struct drm_atomic_state *state, 495plane_switching_crtc(const struct drm_plane_state *old_plane_state,
493 struct drm_plane *plane, 496 const struct drm_plane_state *new_plane_state)
494 struct drm_plane_state *plane_state)
495{ 497{
496 if (!plane->state->crtc || !plane_state->crtc) 498 if (!old_plane_state->crtc || !new_plane_state->crtc)
497 return false; 499 return false;
498 500
499 if (plane->state->crtc == plane_state->crtc) 501 if (old_plane_state->crtc == new_plane_state->crtc)
500 return false; 502 return false;
501 503
502 /* This could be refined, but currently there's no helper or driver code 504 /* This could be refined, but currently there's no helper or driver code
@@ -509,88 +511,95 @@ plane_switching_crtc(struct drm_atomic_state *state,
509 511
510/** 512/**
511 * drm_atomic_plane_check - check plane state 513 * drm_atomic_plane_check - check plane state
512 * @plane: plane to check 514 * @old_plane_state: old plane state to check
513 * @state: plane state to check 515 * @new_plane_state: new plane state to check
514 * 516 *
515 * Provides core sanity checks for plane state. 517 * Provides core sanity checks for plane state.
516 * 518 *
517 * RETURNS: 519 * RETURNS:
518 * Zero on success, error code on failure 520 * Zero on success, error code on failure
519 */ 521 */
520static int drm_atomic_plane_check(struct drm_plane *plane, 522static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
521 struct drm_plane_state *state) 523 const struct drm_plane_state *new_plane_state)
522{ 524{
525 struct drm_plane *plane = new_plane_state->plane;
526 struct drm_crtc *crtc = new_plane_state->crtc;
527 const struct drm_framebuffer *fb = new_plane_state->fb;
523 unsigned int fb_width, fb_height; 528 unsigned int fb_width, fb_height;
524 int ret; 529 int ret;
525 530
526 /* either *both* CRTC and FB must be set, or neither */ 531 /* either *both* CRTC and FB must be set, or neither */
527 if (state->crtc && !state->fb) { 532 if (crtc && !fb) {
528 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", 533 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
529 plane->base.id, plane->name); 534 plane->base.id, plane->name);
530 return -EINVAL; 535 return -EINVAL;
531 } else if (state->fb && !state->crtc) { 536 } else if (fb && !crtc) {
532 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", 537 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
533 plane->base.id, plane->name); 538 plane->base.id, plane->name);
534 return -EINVAL; 539 return -EINVAL;
535 } 540 }
536 541
537 /* if disabled, we don't care about the rest of the state: */ 542 /* if disabled, we don't care about the rest of the state: */
538 if (!state->crtc) 543 if (!crtc)
539 return 0; 544 return 0;
540 545
541 /* Check whether this plane is usable on this CRTC */ 546 /* Check whether this plane is usable on this CRTC */
542 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 547 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
543 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 548 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
544 state->crtc->base.id, state->crtc->name, 549 crtc->base.id, crtc->name,
545 plane->base.id, plane->name); 550 plane->base.id, plane->name);
546 return -EINVAL; 551 return -EINVAL;
547 } 552 }
548 553
549 /* Check whether this plane supports the fb pixel format. */ 554 /* Check whether this plane supports the fb pixel format. */
550 ret = drm_plane_check_pixel_format(plane, state->fb->format->format, 555 ret = drm_plane_check_pixel_format(plane, fb->format->format,
551 state->fb->modifier); 556 fb->modifier);
552 if (ret) { 557 if (ret) {
553 struct drm_format_name_buf format_name; 558 struct drm_format_name_buf format_name;
554 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 559 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
555 plane->base.id, plane->name, 560 plane->base.id, plane->name,
556 drm_get_format_name(state->fb->format->format, 561 drm_get_format_name(fb->format->format,
557 &format_name), 562 &format_name),
558 state->fb->modifier); 563 fb->modifier);
559 return ret; 564 return ret;
560 } 565 }
561 566
562 /* Give drivers some help against integer overflows */ 567 /* Give drivers some help against integer overflows */
563 if (state->crtc_w > INT_MAX || 568 if (new_plane_state->crtc_w > INT_MAX ||
564 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 569 new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
565 state->crtc_h > INT_MAX || 570 new_plane_state->crtc_h > INT_MAX ||
566 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 571 new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
567 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 572 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
568 plane->base.id, plane->name, 573 plane->base.id, plane->name,
569 state->crtc_w, state->crtc_h, 574 new_plane_state->crtc_w, new_plane_state->crtc_h,
570 state->crtc_x, state->crtc_y); 575 new_plane_state->crtc_x, new_plane_state->crtc_y);
571 return -ERANGE; 576 return -ERANGE;
572 } 577 }
573 578
574 fb_width = state->fb->width << 16; 579 fb_width = fb->width << 16;
575 fb_height = state->fb->height << 16; 580 fb_height = fb->height << 16;
576 581
577 /* Make sure source coordinates are inside the fb. */ 582 /* Make sure source coordinates are inside the fb. */
578 if (state->src_w > fb_width || 583 if (new_plane_state->src_w > fb_width ||
579 state->src_x > fb_width - state->src_w || 584 new_plane_state->src_x > fb_width - new_plane_state->src_w ||
580 state->src_h > fb_height || 585 new_plane_state->src_h > fb_height ||
581 state->src_y > fb_height - state->src_h) { 586 new_plane_state->src_y > fb_height - new_plane_state->src_h) {
582 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " 587 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
583 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 588 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
584 plane->base.id, plane->name, 589 plane->base.id, plane->name,
585 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 590 new_plane_state->src_w >> 16,
586 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 591 ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
587 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 592 new_plane_state->src_h >> 16,
588 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10, 593 ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
589 state->fb->width, state->fb->height); 594 new_plane_state->src_x >> 16,
595 ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
596 new_plane_state->src_y >> 16,
597 ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
598 fb->width, fb->height);
590 return -ENOSPC; 599 return -ENOSPC;
591 } 600 }
592 601
593 if (plane_switching_crtc(state->state, plane, state)) { 602 if (plane_switching_crtc(old_plane_state, new_plane_state)) {
594 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 603 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
595 plane->base.id, plane->name); 604 plane->base.id, plane->name);
596 return -EINVAL; 605 return -EINVAL;
@@ -927,6 +936,8 @@ int
927drm_atomic_add_affected_planes(struct drm_atomic_state *state, 936drm_atomic_add_affected_planes(struct drm_atomic_state *state,
928 struct drm_crtc *crtc) 937 struct drm_crtc *crtc)
929{ 938{
939 const struct drm_crtc_state *old_crtc_state =
940 drm_atomic_get_old_crtc_state(state, crtc);
930 struct drm_plane *plane; 941 struct drm_plane *plane;
931 942
932 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 943 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
@@ -934,7 +945,7 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
934 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", 945 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
935 crtc->base.id, crtc->name, state); 946 crtc->base.id, crtc->name, state);
936 947
937 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 948 drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
938 struct drm_plane_state *plane_state = 949 struct drm_plane_state *plane_state =
939 drm_atomic_get_plane_state(state, plane); 950 drm_atomic_get_plane_state(state, plane);
940 951
@@ -961,17 +972,19 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
961 struct drm_device *dev = state->dev; 972 struct drm_device *dev = state->dev;
962 struct drm_mode_config *config = &dev->mode_config; 973 struct drm_mode_config *config = &dev->mode_config;
963 struct drm_plane *plane; 974 struct drm_plane *plane;
964 struct drm_plane_state *plane_state; 975 struct drm_plane_state *old_plane_state;
976 struct drm_plane_state *new_plane_state;
965 struct drm_crtc *crtc; 977 struct drm_crtc *crtc;
966 struct drm_crtc_state *crtc_state; 978 struct drm_crtc_state *old_crtc_state;
979 struct drm_crtc_state *new_crtc_state;
967 struct drm_connector *conn; 980 struct drm_connector *conn;
968 struct drm_connector_state *conn_state; 981 struct drm_connector_state *conn_state;
969 int i, ret = 0; 982 int i, ret = 0;
970 983
971 DRM_DEBUG_ATOMIC("checking %p\n", state); 984 DRM_DEBUG_ATOMIC("checking %p\n", state);
972 985
973 for_each_new_plane_in_state(state, plane, plane_state, i) { 986 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
974 ret = drm_atomic_plane_check(plane, plane_state); 987 ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
975 if (ret) { 988 if (ret) {
976 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 989 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
977 plane->base.id, plane->name); 990 plane->base.id, plane->name);
@@ -979,8 +992,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
979 } 992 }
980 } 993 }
981 994
982 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 995 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
983 ret = drm_atomic_crtc_check(crtc, crtc_state); 996 ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
984 if (ret) { 997 if (ret) {
985 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 998 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
986 crtc->base.id, crtc->name); 999 crtc->base.id, crtc->name);
@@ -1008,8 +1021,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1008 } 1021 }
1009 1022
1010 if (!state->allow_modeset) { 1023 if (!state->allow_modeset) {
1011 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1024 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1012 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1025 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
1013 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1026 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1014 crtc->base.id, crtc->name); 1027 crtc->base.id, crtc->name);
1015 return -EINVAL; 1028 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fc03d26fcacc..9b2bd28dde0a 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -81,8 +81,7 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
81{ 81{
82 int ret; 82 int ret;
83 83
84 if (!drm_core_check_feature(dev, DRIVER_MODESET) || 84 if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
85 !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
86 return -EOPNOTSUPP; 85 return -EOPNOTSUPP;
87 86
88 if (funcs && !try_module_get(funcs->owner)) 87 if (funcs && !try_module_get(funcs->owner))
@@ -229,8 +228,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
229{ 228{
230 struct drm_device *dev = buffer->client->dev; 229 struct drm_device *dev = buffer->client->dev;
231 230
232 if (buffer->vaddr && dev->driver->gem_prime_vunmap) 231 drm_gem_vunmap(buffer->gem, buffer->vaddr);
233 dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
234 232
235 if (buffer->gem) 233 if (buffer->gem)
236 drm_gem_object_put_unlocked(buffer->gem); 234 drm_gem_object_put_unlocked(buffer->gem);
@@ -283,9 +281,9 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
283 * fd_install step out of the driver backend hooks, to make that 281 * fd_install step out of the driver backend hooks, to make that
284 * final step optional for internal users. 282 * final step optional for internal users.
285 */ 283 */
286 vaddr = dev->driver->gem_prime_vmap(obj); 284 vaddr = drm_gem_vmap(obj);
287 if (!vaddr) { 285 if (IS_ERR(vaddr)) {
288 ret = -ENOMEM; 286 ret = PTR_ERR(vaddr);
289 goto err_delete; 287 goto err_delete;
290 } 288 }
291 289
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index f523948c82b1..d90ee03a84c6 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -224,6 +224,7 @@ const struct drm_format_info *__drm_format_info(u32 format)
224 { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 224 { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
225 { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 225 { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
226 { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 226 { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
227 { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
227 { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, 228 { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
228 { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1, 229 { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
229 .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, 230 .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 512078ebd97b..8b55ece97967 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -257,7 +257,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
257 struct drm_gem_object *obj = ptr; 257 struct drm_gem_object *obj = ptr;
258 struct drm_device *dev = obj->dev; 258 struct drm_device *dev = obj->dev;
259 259
260 if (dev->driver->gem_close_object) 260 if (obj->funcs && obj->funcs->close)
261 obj->funcs->close(obj, file_priv);
262 else if (dev->driver->gem_close_object)
261 dev->driver->gem_close_object(obj, file_priv); 263 dev->driver->gem_close_object(obj, file_priv);
262 264
263 if (drm_core_check_feature(dev, DRIVER_PRIME)) 265 if (drm_core_check_feature(dev, DRIVER_PRIME))
@@ -410,7 +412,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
410 if (ret) 412 if (ret)
411 goto err_remove; 413 goto err_remove;
412 414
413 if (dev->driver->gem_open_object) { 415 if (obj->funcs && obj->funcs->open) {
416 ret = obj->funcs->open(obj, file_priv);
417 if (ret)
418 goto err_revoke;
419 } else if (dev->driver->gem_open_object) {
414 ret = dev->driver->gem_open_object(obj, file_priv); 420 ret = dev->driver->gem_open_object(obj, file_priv);
415 if (ret) 421 if (ret)
416 goto err_revoke; 422 goto err_revoke;
@@ -835,7 +841,9 @@ drm_gem_object_free(struct kref *kref)
835 container_of(kref, struct drm_gem_object, refcount); 841 container_of(kref, struct drm_gem_object, refcount);
836 struct drm_device *dev = obj->dev; 842 struct drm_device *dev = obj->dev;
837 843
838 if (dev->driver->gem_free_object_unlocked) { 844 if (obj->funcs) {
845 obj->funcs->free(obj);
846 } else if (dev->driver->gem_free_object_unlocked) {
839 dev->driver->gem_free_object_unlocked(obj); 847 dev->driver->gem_free_object_unlocked(obj);
840 } else if (dev->driver->gem_free_object) { 848 } else if (dev->driver->gem_free_object) {
841 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 849 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -864,13 +872,13 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
864 872
865 dev = obj->dev; 873 dev = obj->dev;
866 874
867 if (dev->driver->gem_free_object_unlocked) { 875 if (dev->driver->gem_free_object) {
868 kref_put(&obj->refcount, drm_gem_object_free);
869 } else {
870 might_lock(&dev->struct_mutex); 876 might_lock(&dev->struct_mutex);
871 if (kref_put_mutex(&obj->refcount, drm_gem_object_free, 877 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
872 &dev->struct_mutex)) 878 &dev->struct_mutex))
873 mutex_unlock(&dev->struct_mutex); 879 mutex_unlock(&dev->struct_mutex);
880 } else {
881 kref_put(&obj->refcount, drm_gem_object_free);
874 } 882 }
875} 883}
876EXPORT_SYMBOL(drm_gem_object_put_unlocked); 884EXPORT_SYMBOL(drm_gem_object_put_unlocked);
@@ -960,11 +968,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
960 if (obj_size < vma->vm_end - vma->vm_start) 968 if (obj_size < vma->vm_end - vma->vm_start)
961 return -EINVAL; 969 return -EINVAL;
962 970
963 if (!dev->driver->gem_vm_ops) 971 if (obj->funcs && obj->funcs->vm_ops)
972 vma->vm_ops = obj->funcs->vm_ops;
973 else if (dev->driver->gem_vm_ops)
974 vma->vm_ops = dev->driver->gem_vm_ops;
975 else
964 return -EINVAL; 976 return -EINVAL;
965 977
966 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 978 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
967 vma->vm_ops = dev->driver->gem_vm_ops;
968 vma->vm_private_data = obj; 979 vma->vm_private_data = obj;
969 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 980 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
970 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 981 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
@@ -1066,6 +1077,86 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1066 drm_printf_indent(p, indent, "imported=%s\n", 1077 drm_printf_indent(p, indent, "imported=%s\n",
1067 obj->import_attach ? "yes" : "no"); 1078 obj->import_attach ? "yes" : "no");
1068 1079
1069 if (obj->dev->driver->gem_print_info) 1080 if (obj->funcs && obj->funcs->print_info)
1081 obj->funcs->print_info(p, indent, obj);
1082 else if (obj->dev->driver->gem_print_info)
1070 obj->dev->driver->gem_print_info(p, indent, obj); 1083 obj->dev->driver->gem_print_info(p, indent, obj);
1071} 1084}
1085
1086/**
1087 * drm_gem_pin - Pin backing buffer in memory
1088 * @obj: GEM object
1089 *
1090 * Make sure the backing buffer is pinned in memory.
1091 *
1092 * Returns:
1093 * 0 on success or a negative error code on failure.
1094 */
1095int drm_gem_pin(struct drm_gem_object *obj)
1096{
1097 if (obj->funcs && obj->funcs->pin)
1098 return obj->funcs->pin(obj);
1099 else if (obj->dev->driver->gem_prime_pin)
1100 return obj->dev->driver->gem_prime_pin(obj);
1101 else
1102 return 0;
1103}
1104EXPORT_SYMBOL(drm_gem_pin);
1105
1106/**
1107 * drm_gem_unpin - Unpin backing buffer from memory
1108 * @obj: GEM object
1109 *
1110 * Relax the requirement that the backing buffer is pinned in memory.
1111 */
1112void drm_gem_unpin(struct drm_gem_object *obj)
1113{
1114 if (obj->funcs && obj->funcs->unpin)
1115 obj->funcs->unpin(obj);
1116 else if (obj->dev->driver->gem_prime_unpin)
1117 obj->dev->driver->gem_prime_unpin(obj);
1118}
1119EXPORT_SYMBOL(drm_gem_unpin);
1120
1121/**
1122 * drm_gem_vmap - Map buffer into kernel virtual address space
1123 * @obj: GEM object
1124 *
1125 * Returns:
1126 * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
1127 * error code on failure.
1128 */
1129void *drm_gem_vmap(struct drm_gem_object *obj)
1130{
1131 void *vaddr;
1132
1133 if (obj->funcs && obj->funcs->vmap)
1134 vaddr = obj->funcs->vmap(obj);
1135 else if (obj->dev->driver->gem_prime_vmap)
1136 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1137 else
1138 vaddr = ERR_PTR(-EOPNOTSUPP);
1139
1140 if (!vaddr)
1141 vaddr = ERR_PTR(-ENOMEM);
1142
1143 return vaddr;
1144}
1145EXPORT_SYMBOL(drm_gem_vmap);
1146
1147/**
1148 * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
1149 * @obj: GEM object
1150 * @vaddr: Virtual address (can be NULL)
1151 */
1152void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1153{
1154 if (!vaddr)
1155 return;
1156
1157 if (obj->funcs && obj->funcs->vunmap)
1158 obj->funcs->vunmap(obj, vaddr);
1159 else if (obj->dev->driver->gem_prime_vunmap)
1160 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1161}
1162EXPORT_SYMBOL(drm_gem_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1d2ced882b66..cc26625b4b33 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -176,6 +176,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
176 * 176 *
177 * This function frees the backing memory of the CMA GEM object, cleans up the 177 * This function frees the backing memory of the CMA GEM object, cleans up the
178 * GEM object state and frees the memory used to store the object itself. 178 * GEM object state and frees the memory used to store the object itself.
179 * If the buffer is imported and the virtual address is set, it is released.
179 * Drivers using the CMA helpers should set this as their 180 * Drivers using the CMA helpers should set this as their
180 * &drm_driver.gem_free_object_unlocked callback. 181 * &drm_driver.gem_free_object_unlocked callback.
181 */ 182 */
@@ -189,6 +190,8 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
189 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, 190 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
190 cma_obj->vaddr, cma_obj->paddr); 191 cma_obj->vaddr, cma_obj->paddr);
191 } else if (gem_obj->import_attach) { 192 } else if (gem_obj->import_attach) {
193 if (cma_obj->vaddr)
194 dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
192 drm_prime_gem_destroy(gem_obj, cma_obj->sgt); 195 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
193 } 196 }
194 197
@@ -575,3 +578,86 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
575 /* Nothing to do */ 578 /* Nothing to do */
576} 579}
577EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap); 580EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
581
582static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
583 .free = drm_gem_cma_free_object,
584 .print_info = drm_gem_cma_print_info,
585 .get_sg_table = drm_gem_cma_prime_get_sg_table,
586 .vmap = drm_gem_cma_prime_vmap,
587 .vm_ops = &drm_gem_cma_vm_ops,
588};
589
590/**
591 * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a
592 * default function table
593 * @dev: DRM device
594 * @size: Size of the object to allocate
595 *
596 * This sets the GEM object functions to the default CMA helper functions.
597 * This function can be used as the &drm_driver.gem_create_object callback.
598 *
599 * Returns:
600 * A pointer to a allocated GEM object or an error pointer on failure.
601 */
602struct drm_gem_object *
603drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
604{
605 struct drm_gem_cma_object *cma_obj;
606
607 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
608 if (!cma_obj)
609 return NULL;
610
611 cma_obj->base.funcs = &drm_cma_gem_default_funcs;
612
613 return &cma_obj->base;
614}
615EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
616
617/**
618 * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
619 * scatter/gather table and get the virtual address of the buffer
620 * @dev: DRM device
621 * @attach: DMA-BUF attachment
622 * @sgt: Scatter/gather table of pinned pages
623 *
624 * This function imports a scatter/gather table using
625 * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
626 * virtual address. This ensures that a CMA GEM object always has its virtual
627 * address set. This address is released when the object is freed.
628 *
629 * This function can be used as the &drm_driver.gem_prime_import_sg_table
630 * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set
631 * the necessary DRM driver operations.
632 *
633 * Returns:
634 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
635 * error code on failure.
636 */
637struct drm_gem_object *
638drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
639 struct dma_buf_attachment *attach,
640 struct sg_table *sgt)
641{
642 struct drm_gem_cma_object *cma_obj;
643 struct drm_gem_object *obj;
644 void *vaddr;
645
646 vaddr = dma_buf_vmap(attach->dmabuf);
647 if (!vaddr) {
648 DRM_ERROR("Failed to vmap PRIME buffer\n");
649 return ERR_PTR(-ENOMEM);
650 }
651
652 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
653 if (IS_ERR(obj)) {
654 dma_buf_vunmap(attach->dmabuf, vaddr);
655 return obj;
656 }
657
658 cma_obj = to_drm_gem_cma_obj(obj);
659 cma_obj->vaddr = vaddr;
660
661 return obj;
662}
663EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ab4e70e63f6e..52e445bb1aa5 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -63,7 +63,7 @@ static const struct drm_dmi_panel_orientation_data gpd_win2 = {
63 .width = 720, 63 .width = 720,
64 .height = 1280, 64 .height = 1280,
65 .bios_dates = (const char * const []){ 65 .bios_dates = (const char * const []){
66 "12/07/2017", "05/24/2018", NULL }, 66 "12/07/2017", "05/24/2018", "06/29/2018", NULL },
67 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, 67 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
68}; 68};
69 69
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 8d54d51a6b6b..5737cb8c6f03 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -199,7 +199,6 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
199{ 199{
200 struct drm_prime_attachment *prime_attach; 200 struct drm_prime_attachment *prime_attach;
201 struct drm_gem_object *obj = dma_buf->priv; 201 struct drm_gem_object *obj = dma_buf->priv;
202 struct drm_device *dev = obj->dev;
203 202
204 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 203 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
205 if (!prime_attach) 204 if (!prime_attach)
@@ -208,10 +207,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
208 prime_attach->dir = DMA_NONE; 207 prime_attach->dir = DMA_NONE;
209 attach->priv = prime_attach; 208 attach->priv = prime_attach;
210 209
211 if (!dev->driver->gem_prime_pin) 210 return drm_gem_pin(obj);
212 return 0;
213
214 return dev->driver->gem_prime_pin(obj);
215} 211}
216EXPORT_SYMBOL(drm_gem_map_attach); 212EXPORT_SYMBOL(drm_gem_map_attach);
217 213
@@ -228,7 +224,6 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
228{ 224{
229 struct drm_prime_attachment *prime_attach = attach->priv; 225 struct drm_prime_attachment *prime_attach = attach->priv;
230 struct drm_gem_object *obj = dma_buf->priv; 226 struct drm_gem_object *obj = dma_buf->priv;
231 struct drm_device *dev = obj->dev;
232 227
233 if (prime_attach) { 228 if (prime_attach) {
234 struct sg_table *sgt = prime_attach->sgt; 229 struct sg_table *sgt = prime_attach->sgt;
@@ -247,8 +242,7 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
247 attach->priv = NULL; 242 attach->priv = NULL;
248 } 243 }
249 244
250 if (dev->driver->gem_prime_unpin) 245 drm_gem_unpin(obj);
251 dev->driver->gem_prime_unpin(obj);
252} 246}
253EXPORT_SYMBOL(drm_gem_map_detach); 247EXPORT_SYMBOL(drm_gem_map_detach);
254 248
@@ -310,7 +304,10 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
310 if (WARN_ON(prime_attach->dir != DMA_NONE)) 304 if (WARN_ON(prime_attach->dir != DMA_NONE))
311 return ERR_PTR(-EBUSY); 305 return ERR_PTR(-EBUSY);
312 306
313 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 307 if (obj->funcs)
308 sgt = obj->funcs->get_sg_table(obj);
309 else
310 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
314 311
315 if (!IS_ERR(sgt)) { 312 if (!IS_ERR(sgt)) {
316 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 313 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
@@ -406,12 +403,13 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
406void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 403void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
407{ 404{
408 struct drm_gem_object *obj = dma_buf->priv; 405 struct drm_gem_object *obj = dma_buf->priv;
409 struct drm_device *dev = obj->dev; 406 void *vaddr;
410 407
411 if (dev->driver->gem_prime_vmap) 408 vaddr = drm_gem_vmap(obj);
412 return dev->driver->gem_prime_vmap(obj); 409 if (IS_ERR(vaddr))
413 else 410 vaddr = NULL;
414 return NULL; 411
412 return vaddr;
415} 413}
416EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 414EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
417 415
@@ -426,10 +424,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
426void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 424void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
427{ 425{
428 struct drm_gem_object *obj = dma_buf->priv; 426 struct drm_gem_object *obj = dma_buf->priv;
429 struct drm_device *dev = obj->dev;
430 427
431 if (dev->driver->gem_prime_vunmap) 428 drm_gem_vunmap(obj, vaddr);
432 dev->driver->gem_prime_vunmap(obj, vaddr);
433} 429}
434EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 430EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
435 431
@@ -529,7 +525,12 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
529 return dmabuf; 525 return dmabuf;
530 } 526 }
531 527
532 dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 528 if (obj->funcs && obj->funcs->export)
529 dmabuf = obj->funcs->export(obj, flags);
530 else if (dev->driver->gem_prime_export)
531 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
532 else
533 dmabuf = drm_gem_prime_export(dev, obj, flags);
533 if (IS_ERR(dmabuf)) { 534 if (IS_ERR(dmabuf)) {
534 /* normally the created dma-buf takes ownership of the ref, 535 /* normally the created dma-buf takes ownership of the ref,
535 * but if that fails then drop the ref 536 * but if that fails then drop the ref
@@ -649,6 +650,43 @@ out_unlock:
649EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 650EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
650 651
651/** 652/**
653 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
654 * @obj: GEM object
655 * @vma: Virtual address range
656 *
657 * This function sets up a userspace mapping for PRIME exported buffers using
658 * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
659 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
660 * called to set up the mapping.
661 *
662 * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
663 */
664int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
665{
666 /* Used by drm_gem_mmap() to lookup the GEM object */
667 struct drm_file priv = {
668 .minor = obj->dev->primary,
669 };
670 struct file fil = {
671 .private_data = &priv,
672 };
673 int ret;
674
675 ret = drm_vma_node_allow(&obj->vma_node, &priv);
676 if (ret)
677 return ret;
678
679 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
680
681 ret = obj->dev->driver->fops->mmap(&fil, vma);
682
683 drm_vma_node_revoke(&obj->vma_node, &priv);
684
685 return ret;
686}
687EXPORT_SYMBOL(drm_gem_prime_mmap);
688
689/**
652 * drm_gem_prime_import_dev - core implementation of the import callback 690 * drm_gem_prime_import_dev - core implementation of the import callback
653 * @dev: drm_device to import into 691 * @dev: drm_device to import into
654 * @dma_buf: dma-buf object to import 692 * @dma_buf: dma-buf object to import
@@ -762,7 +800,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
762 800
763 /* never seen this one, need to import */ 801 /* never seen this one, need to import */
764 mutex_lock(&dev->object_name_lock); 802 mutex_lock(&dev->object_name_lock);
765 obj = dev->driver->gem_prime_import(dev, dma_buf); 803 if (dev->driver->gem_prime_import)
804 obj = dev->driver->gem_prime_import(dev, dma_buf);
805 else
806 obj = drm_gem_prime_import(dev, dma_buf);
766 if (IS_ERR(obj)) { 807 if (IS_ERR(obj)) {
767 ret = PTR_ERR(obj); 808 ret = PTR_ERR(obj);
768 goto out_unlock; 809 goto out_unlock;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index da8175d9c6ff..e2c5b3ca4824 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,9 +56,6 @@
56#include "drm_internal.h" 56#include "drm_internal.h"
57#include <drm/drm_syncobj.h> 57#include <drm/drm_syncobj.h>
58 58
59/* merge normal syncobj to timeline syncobj, the point interval is 1 */
60#define DRM_SYNCOBJ_BINARY_POINT 1
61
62struct drm_syncobj_stub_fence { 59struct drm_syncobj_stub_fence {
63 struct dma_fence base; 60 struct dma_fence base;
64 spinlock_t lock; 61 spinlock_t lock;
@@ -74,29 +71,7 @@ static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
74 .get_timeline_name = drm_syncobj_stub_fence_get_name, 71 .get_timeline_name = drm_syncobj_stub_fence_get_name,
75}; 72};
76 73
77struct drm_syncobj_signal_pt {
78 struct dma_fence_array *fence_array;
79 u64 value;
80 struct list_head list;
81};
82
83static DEFINE_SPINLOCK(signaled_fence_lock);
84static struct dma_fence signaled_fence;
85 74
86static struct dma_fence *drm_syncobj_get_stub_fence(void)
87{
88 spin_lock(&signaled_fence_lock);
89 if (!signaled_fence.ops) {
90 dma_fence_init(&signaled_fence,
91 &drm_syncobj_stub_fence_ops,
92 &signaled_fence_lock,
93 0, 0);
94 dma_fence_signal_locked(&signaled_fence);
95 }
96 spin_unlock(&signaled_fence_lock);
97
98 return dma_fence_get(&signaled_fence);
99}
100/** 75/**
101 * drm_syncobj_find - lookup and reference a sync object. 76 * drm_syncobj_find - lookup and reference a sync object.
102 * @file_private: drm file private pointer 77 * @file_private: drm file private pointer
@@ -123,27 +98,6 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
123} 98}
124EXPORT_SYMBOL(drm_syncobj_find); 99EXPORT_SYMBOL(drm_syncobj_find);
125 100
126static struct dma_fence *
127drm_syncobj_find_signal_pt_for_point(struct drm_syncobj *syncobj,
128 uint64_t point)
129{
130 struct drm_syncobj_signal_pt *signal_pt;
131
132 if ((syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) &&
133 (point <= syncobj->timeline))
134 return drm_syncobj_get_stub_fence();
135
136 list_for_each_entry(signal_pt, &syncobj->signal_pt_list, list) {
137 if (point > signal_pt->value)
138 continue;
139 if ((syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) &&
140 (point != signal_pt->value))
141 continue;
142 return dma_fence_get(&signal_pt->fence_array->base);
143 }
144 return NULL;
145}
146
147static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 101static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
148 struct drm_syncobj_cb *cb, 102 struct drm_syncobj_cb *cb,
149 drm_syncobj_func_t func) 103 drm_syncobj_func_t func)
@@ -152,158 +106,53 @@ static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
152 list_add_tail(&cb->node, &syncobj->cb_list); 106 list_add_tail(&cb->node, &syncobj->cb_list);
153} 107}
154 108
155static void drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 109static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
156 struct dma_fence **fence, 110 struct dma_fence **fence,
157 struct drm_syncobj_cb *cb, 111 struct drm_syncobj_cb *cb,
158 drm_syncobj_func_t func) 112 drm_syncobj_func_t func)
159{ 113{
160 u64 pt_value = 0; 114 int ret;
161
162 WARN_ON(*fence);
163
164 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
165 /*BINARY syncobj always wait on last pt */
166 pt_value = syncobj->signal_point;
167 115
168 if (pt_value == 0) 116 *fence = drm_syncobj_fence_get(syncobj);
169 pt_value += DRM_SYNCOBJ_BINARY_POINT; 117 if (*fence)
170 } 118 return 1;
171 119
172 mutex_lock(&syncobj->cb_mutex); 120 spin_lock(&syncobj->lock);
173 spin_lock(&syncobj->pt_lock); 121 /* We've already tried once to get a fence and failed. Now that we
174 *fence = drm_syncobj_find_signal_pt_for_point(syncobj, pt_value); 122 * have the lock, try one more time just to be sure we don't add a
175 spin_unlock(&syncobj->pt_lock); 123 * callback when a fence has already been set.
176 if (!*fence) 124 */
125 if (syncobj->fence) {
126 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
127 lockdep_is_held(&syncobj->lock)));
128 ret = 1;
129 } else {
130 *fence = NULL;
177 drm_syncobj_add_callback_locked(syncobj, cb, func); 131 drm_syncobj_add_callback_locked(syncobj, cb, func);
178 mutex_unlock(&syncobj->cb_mutex); 132 ret = 0;
179} 133 }
180 134 spin_unlock(&syncobj->lock);
181static void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
182 struct drm_syncobj_cb *cb)
183{
184 mutex_lock(&syncobj->cb_mutex);
185 list_del_init(&cb->node);
186 mutex_unlock(&syncobj->cb_mutex);
187}
188 135
189static void drm_syncobj_init(struct drm_syncobj *syncobj) 136 return ret;
190{
191 spin_lock(&syncobj->pt_lock);
192 syncobj->timeline_context = dma_fence_context_alloc(1);
193 syncobj->timeline = 0;
194 syncobj->signal_point = 0;
195 init_waitqueue_head(&syncobj->wq);
196
197 INIT_LIST_HEAD(&syncobj->signal_pt_list);
198 spin_unlock(&syncobj->pt_lock);
199} 137}
200 138
201static void drm_syncobj_fini(struct drm_syncobj *syncobj) 139void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
140 struct drm_syncobj_cb *cb,
141 drm_syncobj_func_t func)
202{ 142{
203 struct drm_syncobj_signal_pt *signal_pt = NULL, *tmp; 143 spin_lock(&syncobj->lock);
204 144 drm_syncobj_add_callback_locked(syncobj, cb, func);
205 spin_lock(&syncobj->pt_lock); 145 spin_unlock(&syncobj->lock);
206 list_for_each_entry_safe(signal_pt, tmp,
207 &syncobj->signal_pt_list, list) {
208 list_del(&signal_pt->list);
209 dma_fence_put(&signal_pt->fence_array->base);
210 kfree(signal_pt);
211 }
212 spin_unlock(&syncobj->pt_lock);
213} 146}
214 147
215static int drm_syncobj_create_signal_pt(struct drm_syncobj *syncobj, 148void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
216 struct dma_fence *fence, 149 struct drm_syncobj_cb *cb)
217 u64 point)
218{ 150{
219 struct drm_syncobj_signal_pt *signal_pt = 151 spin_lock(&syncobj->lock);
220 kzalloc(sizeof(struct drm_syncobj_signal_pt), GFP_KERNEL); 152 list_del_init(&cb->node);
221 struct drm_syncobj_signal_pt *tail_pt; 153 spin_unlock(&syncobj->lock);
222 struct dma_fence **fences;
223 int num_fences = 0;
224 int ret = 0, i;
225
226 if (!signal_pt)
227 return -ENOMEM;
228 if (!fence)
229 goto out;
230
231 fences = kmalloc_array(sizeof(void *), 2, GFP_KERNEL);
232 if (!fences) {
233 ret = -ENOMEM;
234 goto out;
235 }
236 fences[num_fences++] = dma_fence_get(fence);
237 /* timeline syncobj must take this dependency */
238 if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
239 spin_lock(&syncobj->pt_lock);
240 if (!list_empty(&syncobj->signal_pt_list)) {
241 tail_pt = list_last_entry(&syncobj->signal_pt_list,
242 struct drm_syncobj_signal_pt, list);
243 fences[num_fences++] =
244 dma_fence_get(&tail_pt->fence_array->base);
245 }
246 spin_unlock(&syncobj->pt_lock);
247 }
248 signal_pt->fence_array = dma_fence_array_create(num_fences, fences,
249 syncobj->timeline_context,
250 point, false);
251 if (!signal_pt->fence_array) {
252 ret = -ENOMEM;
253 goto fail;
254 }
255
256 spin_lock(&syncobj->pt_lock);
257 if (syncobj->signal_point >= point) {
258 DRM_WARN("A later signal is ready!");
259 spin_unlock(&syncobj->pt_lock);
260 goto exist;
261 }
262 signal_pt->value = point;
263 list_add_tail(&signal_pt->list, &syncobj->signal_pt_list);
264 syncobj->signal_point = point;
265 spin_unlock(&syncobj->pt_lock);
266 wake_up_all(&syncobj->wq);
267
268 return 0;
269exist:
270 dma_fence_put(&signal_pt->fence_array->base);
271fail:
272 for (i = 0; i < num_fences; i++)
273 dma_fence_put(fences[i]);
274 kfree(fences);
275out:
276 kfree(signal_pt);
277 return ret;
278} 154}
279 155
280static void drm_syncobj_garbage_collection(struct drm_syncobj *syncobj)
281{
282 struct drm_syncobj_signal_pt *signal_pt, *tmp, *tail_pt;
283
284 spin_lock(&syncobj->pt_lock);
285 tail_pt = list_last_entry(&syncobj->signal_pt_list,
286 struct drm_syncobj_signal_pt,
287 list);
288 list_for_each_entry_safe(signal_pt, tmp,
289 &syncobj->signal_pt_list, list) {
290 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY &&
291 signal_pt == tail_pt)
292 continue;
293 if (dma_fence_is_signaled(&signal_pt->fence_array->base)) {
294 syncobj->timeline = signal_pt->value;
295 list_del(&signal_pt->list);
296 dma_fence_put(&signal_pt->fence_array->base);
297 kfree(signal_pt);
298 } else {
299 /*signal_pt is in order in list, from small to big, so
300 * the later must not be signal either */
301 break;
302 }
303 }
304
305 spin_unlock(&syncobj->pt_lock);
306}
307/** 156/**
308 * drm_syncobj_replace_fence - replace fence in a sync object. 157 * drm_syncobj_replace_fence - replace fence in a sync object.
309 * @syncobj: Sync object to replace fence in 158 * @syncobj: Sync object to replace fence in
@@ -316,30 +165,28 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
316 u64 point, 165 u64 point,
317 struct dma_fence *fence) 166 struct dma_fence *fence)
318{ 167{
319 u64 pt_value = point; 168 struct dma_fence *old_fence;
320 169 struct drm_syncobj_cb *cur, *tmp;
321 drm_syncobj_garbage_collection(syncobj); 170
322 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) { 171 if (fence)
323 if (!fence) { 172 dma_fence_get(fence);
324 drm_syncobj_fini(syncobj); 173
325 drm_syncobj_init(syncobj); 174 spin_lock(&syncobj->lock);
326 return;
327 }
328 pt_value = syncobj->signal_point +
329 DRM_SYNCOBJ_BINARY_POINT;
330 }
331 drm_syncobj_create_signal_pt(syncobj, fence, pt_value);
332 if (fence) {
333 struct drm_syncobj_cb *cur, *tmp;
334 LIST_HEAD(cb_list);
335 175
336 mutex_lock(&syncobj->cb_mutex); 176 old_fence = rcu_dereference_protected(syncobj->fence,
177 lockdep_is_held(&syncobj->lock));
178 rcu_assign_pointer(syncobj->fence, fence);
179
180 if (fence != old_fence) {
337 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 181 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
338 list_del_init(&cur->node); 182 list_del_init(&cur->node);
339 cur->func(syncobj, cur); 183 cur->func(syncobj, cur);
340 } 184 }
341 mutex_unlock(&syncobj->cb_mutex);
342 } 185 }
186
187 spin_unlock(&syncobj->lock);
188
189 dma_fence_put(old_fence);
343} 190}
344EXPORT_SYMBOL(drm_syncobj_replace_fence); 191EXPORT_SYMBOL(drm_syncobj_replace_fence);
345 192
@@ -362,64 +209,6 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
362 return 0; 209 return 0;
363} 210}
364 211
365static int
366drm_syncobj_point_get(struct drm_syncobj *syncobj, u64 point, u64 flags,
367 struct dma_fence **fence)
368{
369 int ret = 0;
370
371 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
372 ret = wait_event_interruptible(syncobj->wq,
373 point <= syncobj->signal_point);
374 if (ret < 0)
375 return ret;
376 }
377 spin_lock(&syncobj->pt_lock);
378 *fence = drm_syncobj_find_signal_pt_for_point(syncobj, point);
379 if (!*fence)
380 ret = -EINVAL;
381 spin_unlock(&syncobj->pt_lock);
382 return ret;
383}
384
385/**
386 * drm_syncobj_search_fence - lookup and reference the fence in a sync object or
387 * in a timeline point
388 * @syncobj: sync object pointer
389 * @point: timeline point
390 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
391 * @fence: out parameter for the fence
392 *
393 * if flags is DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, the function will block
394 * here until specific timeline points is reached.
395 * if not, you need a submit thread and block in userspace until all future
396 * timeline points have materialized, only then you can submit to the kernel,
397 * otherwise, function will fail to return fence.
398 *
399 * Returns 0 on success or a negative error value on failure. On success @fence
400 * contains a reference to the fence, which must be released by calling
401 * dma_fence_put().
402 */
403int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point,
404 u64 flags, struct dma_fence **fence)
405{
406 u64 pt_value = point;
407
408 if (!syncobj)
409 return -ENOENT;
410
411 drm_syncobj_garbage_collection(syncobj);
412 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
413 /*BINARY syncobj always wait on last pt */
414 pt_value = syncobj->signal_point;
415
416 if (pt_value == 0)
417 pt_value += DRM_SYNCOBJ_BINARY_POINT;
418 }
419 return drm_syncobj_point_get(syncobj, pt_value, flags, fence);
420}
421EXPORT_SYMBOL(drm_syncobj_search_fence);
422
423/** 212/**
424 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 213 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
425 * @file_private: drm file private pointer 214 * @file_private: drm file private pointer
@@ -429,7 +218,7 @@ EXPORT_SYMBOL(drm_syncobj_search_fence);
429 * @fence: out parameter for the fence 218 * @fence: out parameter for the fence
430 * 219 *
431 * This is just a convenience function that combines drm_syncobj_find() and 220 * This is just a convenience function that combines drm_syncobj_find() and
432 * drm_syncobj_lookup_fence(). 221 * drm_syncobj_fence_get().
433 * 222 *
434 * Returns 0 on success or a negative error value on failure. On success @fence 223 * Returns 0 on success or a negative error value on failure. On success @fence
435 * contains a reference to the fence, which must be released by calling 224 * contains a reference to the fence, which must be released by calling
@@ -440,11 +229,16 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
440 struct dma_fence **fence) 229 struct dma_fence **fence)
441{ 230{
442 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 231 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
443 int ret; 232 int ret = 0;
444 233
445 ret = drm_syncobj_search_fence(syncobj, point, flags, fence); 234 if (!syncobj)
446 if (syncobj) 235 return -ENOENT;
447 drm_syncobj_put(syncobj); 236
237 *fence = drm_syncobj_fence_get(syncobj);
238 if (!*fence) {
239 ret = -EINVAL;
240 }
241 drm_syncobj_put(syncobj);
448 return ret; 242 return ret;
449} 243}
450EXPORT_SYMBOL(drm_syncobj_find_fence); 244EXPORT_SYMBOL(drm_syncobj_find_fence);
@@ -460,7 +254,7 @@ void drm_syncobj_free(struct kref *kref)
460 struct drm_syncobj *syncobj = container_of(kref, 254 struct drm_syncobj *syncobj = container_of(kref,
461 struct drm_syncobj, 255 struct drm_syncobj,
462 refcount); 256 refcount);
463 drm_syncobj_fini(syncobj); 257 drm_syncobj_replace_fence(syncobj, 0, NULL);
464 kfree(syncobj); 258 kfree(syncobj);
465} 259}
466EXPORT_SYMBOL(drm_syncobj_free); 260EXPORT_SYMBOL(drm_syncobj_free);
@@ -489,13 +283,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
489 283
490 kref_init(&syncobj->refcount); 284 kref_init(&syncobj->refcount);
491 INIT_LIST_HEAD(&syncobj->cb_list); 285 INIT_LIST_HEAD(&syncobj->cb_list);
492 spin_lock_init(&syncobj->pt_lock); 286 spin_lock_init(&syncobj->lock);
493 mutex_init(&syncobj->cb_mutex);
494 if (flags & DRM_SYNCOBJ_CREATE_TYPE_TIMELINE)
495 syncobj->type = DRM_SYNCOBJ_TYPE_TIMELINE;
496 else
497 syncobj->type = DRM_SYNCOBJ_TYPE_BINARY;
498 drm_syncobj_init(syncobj);
499 287
500 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { 288 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
501 ret = drm_syncobj_assign_null_handle(syncobj); 289 ret = drm_syncobj_assign_null_handle(syncobj);
@@ -778,8 +566,7 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
778 return -EOPNOTSUPP; 566 return -EOPNOTSUPP;
779 567
780 /* no valid flags yet */ 568 /* no valid flags yet */
781 if (args->flags & ~(DRM_SYNCOBJ_CREATE_SIGNALED | 569 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
782 DRM_SYNCOBJ_CREATE_TYPE_TIMELINE))
783 return -EINVAL; 570 return -EINVAL;
784 571
785 return drm_syncobj_create_as_handle(file_private, 572 return drm_syncobj_create_as_handle(file_private,
@@ -872,8 +659,9 @@ static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
872 struct syncobj_wait_entry *wait = 659 struct syncobj_wait_entry *wait =
873 container_of(cb, struct syncobj_wait_entry, syncobj_cb); 660 container_of(cb, struct syncobj_wait_entry, syncobj_cb);
874 661
875 drm_syncobj_search_fence(syncobj, 0, 0, &wait->fence); 662 /* This happens inside the syncobj lock */
876 663 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
664 lockdep_is_held(&syncobj->lock)));
877 wake_up_process(wait->task); 665 wake_up_process(wait->task);
878} 666}
879 667
@@ -899,8 +687,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
899 signaled_count = 0; 687 signaled_count = 0;
900 for (i = 0; i < count; ++i) { 688 for (i = 0; i < count; ++i) {
901 entries[i].task = current; 689 entries[i].task = current;
902 drm_syncobj_search_fence(syncobjs[i], 0, 0, 690 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
903 &entries[i].fence);
904 if (!entries[i].fence) { 691 if (!entries[i].fence) {
905 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 692 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
906 continue; 693 continue;
@@ -931,9 +718,6 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
931 718
932 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 719 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
933 for (i = 0; i < count; ++i) { 720 for (i = 0; i < count; ++i) {
934 if (entries[i].fence)
935 continue;
936
937 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 721 drm_syncobj_fence_get_or_add_callback(syncobjs[i],
938 &entries[i].fence, 722 &entries[i].fence,
939 &entries[i].syncobj_cb, 723 &entries[i].syncobj_cb,
@@ -1165,13 +949,12 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1165 if (ret < 0) 949 if (ret < 0)
1166 return ret; 950 return ret;
1167 951
1168 for (i = 0; i < args->count_handles; i++) { 952 for (i = 0; i < args->count_handles; i++)
1169 drm_syncobj_fini(syncobjs[i]); 953 drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
1170 drm_syncobj_init(syncobjs[i]); 954
1171 }
1172 drm_syncobj_array_free(syncobjs, args->count_handles); 955 drm_syncobj_array_free(syncobjs, args->count_handles);
1173 956
1174 return ret; 957 return 0;
1175} 958}
1176 959
1177int 960int
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1a1c04db6c80..1aaccbe7e1de 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2157,7 +2157,7 @@ await_fence_array(struct i915_execbuffer *eb,
2157 if (!(flags & I915_EXEC_FENCE_WAIT)) 2157 if (!(flags & I915_EXEC_FENCE_WAIT))
2158 continue; 2158 continue;
2159 2159
2160 drm_syncobj_search_fence(syncobj, 0, 0, &fence); 2160 fence = drm_syncobj_fence_get(syncobj);
2161 if (!fence) 2161 if (!fence)
2162 return -EINVAL; 2162 return -EINVAL;
2163 2163
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 3ce51d8dfe1c..c28b69f48555 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -7,6 +7,7 @@ config DRM_MESON
7 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
8 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
9 select REGMAP_MMIO 9 select REGMAP_MMIO
10 select MESON_CANVAS
10 11
11config DRM_MESON_DW_HDMI 12config DRM_MESON_DW_HDMI
12 tristate "HDMI Synopsys Controller support for Amlogic Meson Display" 13 tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c5c4cc362f02..7709f2fbb9f7 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
1meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o 1meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
2meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o 2meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
3 3
4obj-$(CONFIG_DRM_MESON) += meson-drm.o 4obj-$(CONFIG_DRM_MESON) += meson-drm.o
5obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o 5obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
index 08f6073d967e..5de11aa7c775 100644
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -39,6 +39,7 @@
39#define CANVAS_WIDTH_HBIT 0 39#define CANVAS_WIDTH_HBIT 0
40#define CANVAS_HEIGHT_BIT 9 40#define CANVAS_HEIGHT_BIT 9
41#define CANVAS_BLKMODE_BIT 24 41#define CANVAS_BLKMODE_BIT 24
42#define CANVAS_ENDIAN_BIT 26
42#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */ 43#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
43#define CANVAS_LUT_WR_EN (0x2 << 8) 44#define CANVAS_LUT_WR_EN (0x2 << 8)
44#define CANVAS_LUT_RD_EN (0x1 << 8) 45#define CANVAS_LUT_RD_EN (0x1 << 8)
@@ -47,7 +48,8 @@ void meson_canvas_setup(struct meson_drm *priv,
47 uint32_t canvas_index, uint32_t addr, 48 uint32_t canvas_index, uint32_t addr,
48 uint32_t stride, uint32_t height, 49 uint32_t stride, uint32_t height,
49 unsigned int wrap, 50 unsigned int wrap,
50 unsigned int blkmode) 51 unsigned int blkmode,
52 unsigned int endian)
51{ 53{
52 unsigned int val; 54 unsigned int val;
53 55
@@ -60,7 +62,8 @@ void meson_canvas_setup(struct meson_drm *priv,
60 CANVAS_WIDTH_HBIT) | 62 CANVAS_WIDTH_HBIT) |
61 (height << CANVAS_HEIGHT_BIT) | 63 (height << CANVAS_HEIGHT_BIT) |
62 (wrap << 22) | 64 (wrap << 22) |
63 (blkmode << CANVAS_BLKMODE_BIT)); 65 (blkmode << CANVAS_BLKMODE_BIT) |
66 (endian << CANVAS_ENDIAN_BIT));
64 67
65 regmap_write(priv->dmc, DMC_CAV_LUT_ADDR, 68 regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
66 CANVAS_LUT_WR_EN | canvas_index); 69 CANVAS_LUT_WR_EN | canvas_index);
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
index af1759da4b27..85dbf26e2826 100644
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ b/drivers/gpu/drm/meson/meson_canvas.h
@@ -23,6 +23,9 @@
23#define __MESON_CANVAS_H 23#define __MESON_CANVAS_H
24 24
25#define MESON_CANVAS_ID_OSD1 0x4e 25#define MESON_CANVAS_ID_OSD1 0x4e
26#define MESON_CANVAS_ID_VD1_0 0x60
27#define MESON_CANVAS_ID_VD1_1 0x61
28#define MESON_CANVAS_ID_VD1_2 0x62
26 29
27/* Canvas configuration. */ 30/* Canvas configuration. */
28#define MESON_CANVAS_WRAP_NONE 0x00 31#define MESON_CANVAS_WRAP_NONE 0x00
@@ -33,10 +36,16 @@
33#define MESON_CANVAS_BLKMODE_32x32 0x01 36#define MESON_CANVAS_BLKMODE_32x32 0x01
34#define MESON_CANVAS_BLKMODE_64x64 0x02 37#define MESON_CANVAS_BLKMODE_64x64 0x02
35 38
39#define MESON_CANVAS_ENDIAN_SWAP16 0x1
40#define MESON_CANVAS_ENDIAN_SWAP32 0x3
41#define MESON_CANVAS_ENDIAN_SWAP64 0x7
42#define MESON_CANVAS_ENDIAN_SWAP128 0xf
43
36void meson_canvas_setup(struct meson_drm *priv, 44void meson_canvas_setup(struct meson_drm *priv,
37 uint32_t canvas_index, uint32_t addr, 45 uint32_t canvas_index, uint32_t addr,
38 uint32_t stride, uint32_t height, 46 uint32_t stride, uint32_t height,
39 unsigned int wrap, 47 unsigned int wrap,
40 unsigned int blkmode); 48 unsigned int blkmode,
49 unsigned int endian);
41 50
42#endif /* __MESON_CANVAS_H */ 51#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 05520202c967..d78168f979db 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/bitfield.h>
28#include <drm/drmP.h> 29#include <drm/drmP.h>
29#include <drm/drm_atomic.h> 30#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_helper.h> 31#include <drm/drm_atomic_helper.h>
@@ -98,6 +99,10 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
98 writel(crtc_state->mode.hdisplay, 99 writel(crtc_state->mode.hdisplay,
99 priv->io_base + _REG(VPP_POSTBLEND_H_SIZE)); 100 priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
100 101
102 /* VD1 Preblend vertical start/end */
103 writel(FIELD_PREP(GENMASK(11, 0), 2303),
104 priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
105
101 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE, 106 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
102 priv->io_base + _REG(VPP_MISC)); 107 priv->io_base + _REG(VPP_MISC));
103 108
@@ -110,11 +115,17 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
110 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 115 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
111 struct meson_drm *priv = meson_crtc->priv; 116 struct meson_drm *priv = meson_crtc->priv;
112 117
118 DRM_DEBUG_DRIVER("\n");
119
113 priv->viu.osd1_enabled = false; 120 priv->viu.osd1_enabled = false;
114 priv->viu.osd1_commit = false; 121 priv->viu.osd1_commit = false;
115 122
123 priv->viu.vd1_enabled = false;
124 priv->viu.vd1_commit = false;
125
116 /* Disable VPP Postblend */ 126 /* Disable VPP Postblend */
117 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0, 127 writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND |
128 VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0,
118 priv->io_base + _REG(VPP_MISC)); 129 priv->io_base + _REG(VPP_MISC));
119 130
120 if (crtc->state->event && !crtc->state->active) { 131 if (crtc->state->event && !crtc->state->active) {
@@ -149,6 +160,7 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
149 struct meson_drm *priv = meson_crtc->priv; 160 struct meson_drm *priv = meson_crtc->priv;
150 161
151 priv->viu.osd1_commit = true; 162 priv->viu.osd1_commit = true;
163 priv->viu.vd1_commit = true;
152} 164}
153 165
154static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = { 166static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
@@ -177,26 +189,37 @@ void meson_crtc_irq(struct meson_drm *priv)
177 priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3)); 189 priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
178 writel_relaxed(priv->viu.osd1_blk0_cfg[4], 190 writel_relaxed(priv->viu.osd1_blk0_cfg[4],
179 priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4)); 191 priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
180 192 writel_relaxed(priv->viu.osd_sc_ctrl0,
181 /* If output is interlace, make use of the Scaler */ 193 priv->io_base + _REG(VPP_OSD_SC_CTRL0));
182 if (priv->viu.osd1_interlace) { 194 writel_relaxed(priv->viu.osd_sc_i_wh_m1,
183 struct drm_plane *plane = priv->primary_plane; 195 priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
184 struct drm_plane_state *state = plane->state; 196 writel_relaxed(priv->viu.osd_sc_o_h_start_end,
185 struct drm_rect dest = { 197 priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
186 .x1 = state->crtc_x, 198 writel_relaxed(priv->viu.osd_sc_o_v_start_end,
187 .y1 = state->crtc_y, 199 priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
188 .x2 = state->crtc_x + state->crtc_w, 200 writel_relaxed(priv->viu.osd_sc_v_ini_phase,
189 .y2 = state->crtc_y + state->crtc_h, 201 priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
190 }; 202 writel_relaxed(priv->viu.osd_sc_v_phase_step,
191 203 priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
192 meson_vpp_setup_interlace_vscaler_osd1(priv, &dest); 204 writel_relaxed(priv->viu.osd_sc_h_ini_phase,
193 } else 205 priv->io_base + _REG(VPP_OSD_HSC_INI_PHASE));
194 meson_vpp_disable_interlace_vscaler_osd1(priv); 206 writel_relaxed(priv->viu.osd_sc_h_phase_step,
195 207 priv->io_base + _REG(VPP_OSD_HSC_PHASE_STEP));
196 meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, 208 writel_relaxed(priv->viu.osd_sc_h_ctrl0,
197 priv->viu.osd1_addr, priv->viu.osd1_stride, 209 priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
198 priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, 210 writel_relaxed(priv->viu.osd_sc_v_ctrl0,
199 MESON_CANVAS_BLKMODE_LINEAR); 211 priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
212
213 if (priv->canvas)
214 meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
215 priv->viu.osd1_addr, priv->viu.osd1_stride,
216 priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
217 MESON_CANVAS_BLKMODE_LINEAR, 0);
218 else
219 meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
220 priv->viu.osd1_addr, priv->viu.osd1_stride,
221 priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
222 MESON_CANVAS_BLKMODE_LINEAR, 0);
200 223
201 /* Enable OSD1 */ 224 /* Enable OSD1 */
202 writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, 225 writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
@@ -205,6 +228,206 @@ void meson_crtc_irq(struct meson_drm *priv)
205 priv->viu.osd1_commit = false; 228 priv->viu.osd1_commit = false;
206 } 229 }
207 230
231 /* Update the VD1 registers */
232 if (priv->viu.vd1_enabled && priv->viu.vd1_commit) {
233
234 switch (priv->viu.vd1_planes) {
235 case 3:
236 if (priv->canvas)
237 meson_canvas_config(priv->canvas,
238 priv->canvas_id_vd1_2,
239 priv->viu.vd1_addr2,
240 priv->viu.vd1_stride2,
241 priv->viu.vd1_height2,
242 MESON_CANVAS_WRAP_NONE,
243 MESON_CANVAS_BLKMODE_LINEAR,
244 MESON_CANVAS_ENDIAN_SWAP64);
245 else
246 meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
247 priv->viu.vd1_addr2,
248 priv->viu.vd1_stride2,
249 priv->viu.vd1_height2,
250 MESON_CANVAS_WRAP_NONE,
251 MESON_CANVAS_BLKMODE_LINEAR,
252 MESON_CANVAS_ENDIAN_SWAP64);
253 /* fallthrough */
254 case 2:
255 if (priv->canvas)
256 meson_canvas_config(priv->canvas,
257 priv->canvas_id_vd1_1,
258 priv->viu.vd1_addr1,
259 priv->viu.vd1_stride1,
260 priv->viu.vd1_height1,
261 MESON_CANVAS_WRAP_NONE,
262 MESON_CANVAS_BLKMODE_LINEAR,
263 MESON_CANVAS_ENDIAN_SWAP64);
264 else
265 meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
266 priv->viu.vd1_addr2,
267 priv->viu.vd1_stride2,
268 priv->viu.vd1_height2,
269 MESON_CANVAS_WRAP_NONE,
270 MESON_CANVAS_BLKMODE_LINEAR,
271 MESON_CANVAS_ENDIAN_SWAP64);
272 /* fallthrough */
273 case 1:
274 if (priv->canvas)
275 meson_canvas_config(priv->canvas,
276 priv->canvas_id_vd1_0,
277 priv->viu.vd1_addr0,
278 priv->viu.vd1_stride0,
279 priv->viu.vd1_height0,
280 MESON_CANVAS_WRAP_NONE,
281 MESON_CANVAS_BLKMODE_LINEAR,
282 MESON_CANVAS_ENDIAN_SWAP64);
283 else
284 meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
285 priv->viu.vd1_addr2,
286 priv->viu.vd1_stride2,
287 priv->viu.vd1_height2,
288 MESON_CANVAS_WRAP_NONE,
289 MESON_CANVAS_BLKMODE_LINEAR,
290 MESON_CANVAS_ENDIAN_SWAP64);
291 };
292
293 writel_relaxed(priv->viu.vd1_if0_gen_reg,
294 priv->io_base + _REG(VD1_IF0_GEN_REG));
295 writel_relaxed(priv->viu.vd1_if0_gen_reg,
296 priv->io_base + _REG(VD2_IF0_GEN_REG));
297 writel_relaxed(priv->viu.vd1_if0_gen_reg2,
298 priv->io_base + _REG(VD1_IF0_GEN_REG2));
299 writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
300 priv->io_base + _REG(VIU_VD1_FMT_CTRL));
301 writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
302 priv->io_base + _REG(VIU_VD2_FMT_CTRL));
303 writel_relaxed(priv->viu.viu_vd1_fmt_w,
304 priv->io_base + _REG(VIU_VD1_FMT_W));
305 writel_relaxed(priv->viu.viu_vd1_fmt_w,
306 priv->io_base + _REG(VIU_VD2_FMT_W));
307 writel_relaxed(priv->viu.vd1_if0_canvas0,
308 priv->io_base + _REG(VD1_IF0_CANVAS0));
309 writel_relaxed(priv->viu.vd1_if0_canvas0,
310 priv->io_base + _REG(VD1_IF0_CANVAS1));
311 writel_relaxed(priv->viu.vd1_if0_canvas0,
312 priv->io_base + _REG(VD2_IF0_CANVAS0));
313 writel_relaxed(priv->viu.vd1_if0_canvas0,
314 priv->io_base + _REG(VD2_IF0_CANVAS1));
315 writel_relaxed(priv->viu.vd1_if0_luma_x0,
316 priv->io_base + _REG(VD1_IF0_LUMA_X0));
317 writel_relaxed(priv->viu.vd1_if0_luma_x0,
318 priv->io_base + _REG(VD1_IF0_LUMA_X1));
319 writel_relaxed(priv->viu.vd1_if0_luma_x0,
320 priv->io_base + _REG(VD2_IF0_LUMA_X0));
321 writel_relaxed(priv->viu.vd1_if0_luma_x0,
322 priv->io_base + _REG(VD2_IF0_LUMA_X1));
323 writel_relaxed(priv->viu.vd1_if0_luma_y0,
324 priv->io_base + _REG(VD1_IF0_LUMA_Y0));
325 writel_relaxed(priv->viu.vd1_if0_luma_y0,
326 priv->io_base + _REG(VD1_IF0_LUMA_Y1));
327 writel_relaxed(priv->viu.vd1_if0_luma_y0,
328 priv->io_base + _REG(VD2_IF0_LUMA_Y0));
329 writel_relaxed(priv->viu.vd1_if0_luma_y0,
330 priv->io_base + _REG(VD2_IF0_LUMA_Y1));
331 writel_relaxed(priv->viu.vd1_if0_chroma_x0,
332 priv->io_base + _REG(VD1_IF0_CHROMA_X0));
333 writel_relaxed(priv->viu.vd1_if0_chroma_x0,
334 priv->io_base + _REG(VD1_IF0_CHROMA_X1));
335 writel_relaxed(priv->viu.vd1_if0_chroma_x0,
336 priv->io_base + _REG(VD2_IF0_CHROMA_X0));
337 writel_relaxed(priv->viu.vd1_if0_chroma_x0,
338 priv->io_base + _REG(VD2_IF0_CHROMA_X1));
339 writel_relaxed(priv->viu.vd1_if0_chroma_y0,
340 priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
341 writel_relaxed(priv->viu.vd1_if0_chroma_y0,
342 priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
343 writel_relaxed(priv->viu.vd1_if0_chroma_y0,
344 priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
345 writel_relaxed(priv->viu.vd1_if0_chroma_y0,
346 priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
347 writel_relaxed(priv->viu.vd1_if0_repeat_loop,
348 priv->io_base + _REG(VD1_IF0_RPT_LOOP));
349 writel_relaxed(priv->viu.vd1_if0_repeat_loop,
350 priv->io_base + _REG(VD2_IF0_RPT_LOOP));
351 writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
352 priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
353 writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
354 priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
355 writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
356 priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
357 writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
358 priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
359 writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
360 priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
361 writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
362 priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
363 writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
364 priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
365 writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
366 priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
367 writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
368 writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
369 writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
370 writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
371 writel_relaxed(priv->viu.vd1_range_map_y,
372 priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
373 writel_relaxed(priv->viu.vd1_range_map_cb,
374 priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
375 writel_relaxed(priv->viu.vd1_range_map_cr,
376 priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
377 writel_relaxed(0x78404,
378 priv->io_base + _REG(VPP_SC_MISC));
379 writel_relaxed(priv->viu.vpp_pic_in_height,
380 priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
381 writel_relaxed(priv->viu.vpp_postblend_vd1_h_start_end,
382 priv->io_base + _REG(VPP_POSTBLEND_VD1_H_START_END));
383 writel_relaxed(priv->viu.vpp_blend_vd2_h_start_end,
384 priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
385 writel_relaxed(priv->viu.vpp_postblend_vd1_v_start_end,
386 priv->io_base + _REG(VPP_POSTBLEND_VD1_V_START_END));
387 writel_relaxed(priv->viu.vpp_blend_vd2_v_start_end,
388 priv->io_base + _REG(VPP_BLEND_VD2_V_START_END));
389 writel_relaxed(priv->viu.vpp_hsc_region12_startp,
390 priv->io_base + _REG(VPP_HSC_REGION12_STARTP));
391 writel_relaxed(priv->viu.vpp_hsc_region34_startp,
392 priv->io_base + _REG(VPP_HSC_REGION34_STARTP));
393 writel_relaxed(priv->viu.vpp_hsc_region4_endp,
394 priv->io_base + _REG(VPP_HSC_REGION4_ENDP));
395 writel_relaxed(priv->viu.vpp_hsc_start_phase_step,
396 priv->io_base + _REG(VPP_HSC_START_PHASE_STEP));
397 writel_relaxed(priv->viu.vpp_hsc_region1_phase_slope,
398 priv->io_base + _REG(VPP_HSC_REGION1_PHASE_SLOPE));
399 writel_relaxed(priv->viu.vpp_hsc_region3_phase_slope,
400 priv->io_base + _REG(VPP_HSC_REGION3_PHASE_SLOPE));
401 writel_relaxed(priv->viu.vpp_line_in_length,
402 priv->io_base + _REG(VPP_LINE_IN_LENGTH));
403 writel_relaxed(priv->viu.vpp_preblend_h_size,
404 priv->io_base + _REG(VPP_PREBLEND_H_SIZE));
405 writel_relaxed(priv->viu.vpp_vsc_region12_startp,
406 priv->io_base + _REG(VPP_VSC_REGION12_STARTP));
407 writel_relaxed(priv->viu.vpp_vsc_region34_startp,
408 priv->io_base + _REG(VPP_VSC_REGION34_STARTP));
409 writel_relaxed(priv->viu.vpp_vsc_region4_endp,
410 priv->io_base + _REG(VPP_VSC_REGION4_ENDP));
411 writel_relaxed(priv->viu.vpp_vsc_start_phase_step,
412 priv->io_base + _REG(VPP_VSC_START_PHASE_STEP));
413 writel_relaxed(priv->viu.vpp_vsc_ini_phase,
414 priv->io_base + _REG(VPP_VSC_INI_PHASE));
415 writel_relaxed(priv->viu.vpp_vsc_phase_ctrl,
416 priv->io_base + _REG(VPP_VSC_PHASE_CTRL));
417 writel_relaxed(priv->viu.vpp_hsc_phase_ctrl,
418 priv->io_base + _REG(VPP_HSC_PHASE_CTRL));
419 writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
420
421 /* Enable VD1 */
422 writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
423 VPP_COLOR_MNG_ENABLE,
424 VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
425 VPP_COLOR_MNG_ENABLE,
426 priv->io_base + _REG(VPP_MISC));
427
428 priv->viu.vd1_commit = false;
429 }
430
208 drm_crtc_handle_vblank(priv->crtc); 431 drm_crtc_handle_vblank(priv->crtc);
209 432
210 spin_lock_irqsave(&priv->drm->event_lock, flags); 433 spin_lock_irqsave(&priv->drm->event_lock, flags);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 348b5a198b9d..3ee4d4a4ecba 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -41,6 +41,7 @@
41 41
42#include "meson_drv.h" 42#include "meson_drv.h"
43#include "meson_plane.h" 43#include "meson_plane.h"
44#include "meson_overlay.h"
44#include "meson_crtc.h" 45#include "meson_crtc.h"
45#include "meson_venc_cvbs.h" 46#include "meson_venc_cvbs.h"
46 47
@@ -208,24 +209,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
208 goto free_drm; 209 goto free_drm;
209 } 210 }
210 211
211 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); 212 priv->canvas = meson_canvas_get(dev);
212 if (!res) { 213 if (!IS_ERR(priv->canvas)) {
213 ret = -EINVAL; 214 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
214 goto free_drm; 215 if (ret)
215 } 216 goto free_drm;
216 /* Simply ioremap since it may be a shared register zone */ 217 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
217 regs = devm_ioremap(dev, res->start, resource_size(res)); 218 if (ret) {
218 if (!regs) { 219 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
219 ret = -EADDRNOTAVAIL; 220 goto free_drm;
220 goto free_drm; 221 }
221 } 222 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
223 if (ret) {
224 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
225 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
226 goto free_drm;
227 }
228 ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
229 if (ret) {
230 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
231 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
232 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
233 goto free_drm;
234 }
235 } else {
236 priv->canvas = NULL;
222 237
223 priv->dmc = devm_regmap_init_mmio(dev, regs, 238 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
224 &meson_regmap_config); 239 if (!res) {
225 if (IS_ERR(priv->dmc)) { 240 ret = -EINVAL;
226 dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); 241 goto free_drm;
227 ret = PTR_ERR(priv->dmc); 242 }
228 goto free_drm; 243 /* Simply ioremap since it may be a shared register zone */
244 regs = devm_ioremap(dev, res->start, resource_size(res));
245 if (!regs) {
246 ret = -EADDRNOTAVAIL;
247 goto free_drm;
248 }
249
250 priv->dmc = devm_regmap_init_mmio(dev, regs,
251 &meson_regmap_config);
252 if (IS_ERR(priv->dmc)) {
253 dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
254 ret = PTR_ERR(priv->dmc);
255 goto free_drm;
256 }
229 } 257 }
230 258
231 priv->vsync_irq = platform_get_irq(pdev, 0); 259 priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -264,6 +292,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
264 if (ret) 292 if (ret)
265 goto free_drm; 293 goto free_drm;
266 294
295 ret = meson_overlay_create(priv);
296 if (ret)
297 goto free_drm;
298
267 ret = meson_crtc_create(priv); 299 ret = meson_crtc_create(priv);
268 if (ret) 300 if (ret)
269 goto free_drm; 301 goto free_drm;
@@ -300,6 +332,14 @@ static int meson_drv_bind(struct device *dev)
300static void meson_drv_unbind(struct device *dev) 332static void meson_drv_unbind(struct device *dev)
301{ 333{
302 struct drm_device *drm = dev_get_drvdata(dev); 334 struct drm_device *drm = dev_get_drvdata(dev);
335 struct meson_drm *priv = drm->dev_private;
336
337 if (priv->canvas) {
338 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
339 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
340 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
341 meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
342 }
303 343
304 drm_dev_unregister(drm); 344 drm_dev_unregister(drm);
305 drm_kms_helper_poll_fini(drm); 345 drm_kms_helper_poll_fini(drm);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index aab96260da9f..4dccf4cd042a 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -22,6 +22,7 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/regmap.h> 23#include <linux/regmap.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/soc/amlogic/meson-canvas.h>
25#include <drm/drmP.h> 26#include <drm/drmP.h>
26 27
27struct meson_drm { 28struct meson_drm {
@@ -31,9 +32,16 @@ struct meson_drm {
31 struct regmap *dmc; 32 struct regmap *dmc;
32 int vsync_irq; 33 int vsync_irq;
33 34
35 struct meson_canvas *canvas;
36 u8 canvas_id_osd1;
37 u8 canvas_id_vd1_0;
38 u8 canvas_id_vd1_1;
39 u8 canvas_id_vd1_2;
40
34 struct drm_device *drm; 41 struct drm_device *drm;
35 struct drm_crtc *crtc; 42 struct drm_crtc *crtc;
36 struct drm_plane *primary_plane; 43 struct drm_plane *primary_plane;
44 struct drm_plane *overlay_plane;
37 45
38 /* Components Data */ 46 /* Components Data */
39 struct { 47 struct {
@@ -45,6 +53,64 @@ struct meson_drm {
45 uint32_t osd1_addr; 53 uint32_t osd1_addr;
46 uint32_t osd1_stride; 54 uint32_t osd1_stride;
47 uint32_t osd1_height; 55 uint32_t osd1_height;
56 uint32_t osd_sc_ctrl0;
57 uint32_t osd_sc_i_wh_m1;
58 uint32_t osd_sc_o_h_start_end;
59 uint32_t osd_sc_o_v_start_end;
60 uint32_t osd_sc_v_ini_phase;
61 uint32_t osd_sc_v_phase_step;
62 uint32_t osd_sc_h_ini_phase;
63 uint32_t osd_sc_h_phase_step;
64 uint32_t osd_sc_h_ctrl0;
65 uint32_t osd_sc_v_ctrl0;
66
67 bool vd1_enabled;
68 bool vd1_commit;
69 unsigned int vd1_planes;
70 uint32_t vd1_if0_gen_reg;
71 uint32_t vd1_if0_luma_x0;
72 uint32_t vd1_if0_luma_y0;
73 uint32_t vd1_if0_chroma_x0;
74 uint32_t vd1_if0_chroma_y0;
75 uint32_t vd1_if0_repeat_loop;
76 uint32_t vd1_if0_luma0_rpt_pat;
77 uint32_t vd1_if0_chroma0_rpt_pat;
78 uint32_t vd1_range_map_y;
79 uint32_t vd1_range_map_cb;
80 uint32_t vd1_range_map_cr;
81 uint32_t viu_vd1_fmt_w;
82 uint32_t vd1_if0_canvas0;
83 uint32_t vd1_if0_gen_reg2;
84 uint32_t viu_vd1_fmt_ctrl;
85 uint32_t vd1_addr0;
86 uint32_t vd1_addr1;
87 uint32_t vd1_addr2;
88 uint32_t vd1_stride0;
89 uint32_t vd1_stride1;
90 uint32_t vd1_stride2;
91 uint32_t vd1_height0;
92 uint32_t vd1_height1;
93 uint32_t vd1_height2;
94 uint32_t vpp_pic_in_height;
95 uint32_t vpp_postblend_vd1_h_start_end;
96 uint32_t vpp_postblend_vd1_v_start_end;
97 uint32_t vpp_hsc_region12_startp;
98 uint32_t vpp_hsc_region34_startp;
99 uint32_t vpp_hsc_region4_endp;
100 uint32_t vpp_hsc_start_phase_step;
101 uint32_t vpp_hsc_region1_phase_slope;
102 uint32_t vpp_hsc_region3_phase_slope;
103 uint32_t vpp_line_in_length;
104 uint32_t vpp_preblend_h_size;
105 uint32_t vpp_vsc_region12_startp;
106 uint32_t vpp_vsc_region34_startp;
107 uint32_t vpp_vsc_region4_endp;
108 uint32_t vpp_vsc_start_phase_step;
109 uint32_t vpp_vsc_ini_phase;
110 uint32_t vpp_vsc_phase_ctrl;
111 uint32_t vpp_hsc_phase_ctrl;
112 uint32_t vpp_blend_vd2_h_start_end;
113 uint32_t vpp_blend_vd2_v_start_end;
48 } viu; 114 } viu;
49 115
50 struct { 116 struct {
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
new file mode 100644
index 000000000000..9aebc5e4b418
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -0,0 +1,586 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/bitfield.h>
12#include <linux/platform_device.h>
13#include <drm/drmP.h>
14#include <drm/drm_atomic.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_plane_helper.h>
17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_rect.h>
20
21#include "meson_overlay.h"
22#include "meson_vpp.h"
23#include "meson_viu.h"
24#include "meson_canvas.h"
25#include "meson_registers.h"
26
27/* VD1_IF0_GEN_REG */
28#define VD_URGENT_CHROMA BIT(28)
29#define VD_URGENT_LUMA BIT(27)
30#define VD_HOLD_LINES(lines) FIELD_PREP(GENMASK(24, 19), lines)
31#define VD_DEMUX_MODE_RGB BIT(16)
32#define VD_BYTES_PER_PIXEL(val) FIELD_PREP(GENMASK(15, 14), val)
33#define VD_CHRO_RPT_LASTL_CTRL BIT(6)
34#define VD_LITTLE_ENDIAN BIT(4)
35#define VD_SEPARATE_EN BIT(1)
36#define VD_ENABLE BIT(0)
37
38/* VD1_IF0_CANVAS0 */
39#define CANVAS_ADDR2(addr) FIELD_PREP(GENMASK(23, 16), addr)
40#define CANVAS_ADDR1(addr) FIELD_PREP(GENMASK(15, 8), addr)
41#define CANVAS_ADDR0(addr) FIELD_PREP(GENMASK(7, 0), addr)
42
43/* VD1_IF0_LUMA_X0 VD1_IF0_CHROMA_X0 */
44#define VD_X_START(value) FIELD_PREP(GENMASK(14, 0), value)
45#define VD_X_END(value) FIELD_PREP(GENMASK(30, 16), value)
46
47/* VD1_IF0_LUMA_Y0 VD1_IF0_CHROMA_Y0 */
48#define VD_Y_START(value) FIELD_PREP(GENMASK(12, 0), value)
49#define VD_Y_END(value) FIELD_PREP(GENMASK(28, 16), value)
50
51/* VD1_IF0_GEN_REG2 */
52#define VD_COLOR_MAP(value) FIELD_PREP(GENMASK(1, 0), value)
53
54/* VIU_VD1_FMT_CTRL */
55#define VD_HORZ_Y_C_RATIO(value) FIELD_PREP(GENMASK(22, 21), value)
56#define VD_HORZ_FMT_EN BIT(20)
57#define VD_VERT_RPT_LINE0 BIT(16)
58#define VD_VERT_INITIAL_PHASE(value) FIELD_PREP(GENMASK(11, 8), value)
59#define VD_VERT_PHASE_STEP(value) FIELD_PREP(GENMASK(7, 1), value)
60#define VD_VERT_FMT_EN BIT(0)
61
62/* VPP_POSTBLEND_VD1_H_START_END */
63#define VD_H_END(value) FIELD_PREP(GENMASK(11, 0), value)
64#define VD_H_START(value) FIELD_PREP(GENMASK(27, 16), value)
65
66/* VPP_POSTBLEND_VD1_V_START_END */
67#define VD_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
68#define VD_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
69
70/* VPP_BLEND_VD2_V_START_END */
71#define VD2_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
72#define VD2_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
73
74/* VIU_VD1_FMT_W */
75#define VD_V_WIDTH(value) FIELD_PREP(GENMASK(11, 0), value)
76#define VD_H_WIDTH(value) FIELD_PREP(GENMASK(27, 16), value)
77
78/* VPP_HSC_REGION12_STARTP VPP_HSC_REGION34_STARTP */
79#define VD_REGION24_START(value) FIELD_PREP(GENMASK(11, 0), value)
80#define VD_REGION13_END(value) FIELD_PREP(GENMASK(27, 16), value)
81
82struct meson_overlay {
83 struct drm_plane base;
84 struct meson_drm *priv;
85};
86#define to_meson_overlay(x) container_of(x, struct meson_overlay, base)
87
88#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
89
90static int meson_overlay_atomic_check(struct drm_plane *plane,
91 struct drm_plane_state *state)
92{
93 struct drm_crtc_state *crtc_state;
94
95 if (!state->crtc)
96 return 0;
97
98 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
99 if (IS_ERR(crtc_state))
100 return PTR_ERR(crtc_state);
101
102 return drm_atomic_helper_check_plane_state(state, crtc_state,
103 FRAC_16_16(1, 5),
104 FRAC_16_16(5, 1),
105 true, true);
106}
107
108/* Takes a fixed 16.16 number and converts it to integer. */
109static inline int64_t fixed16_to_int(int64_t value)
110{
111 return value >> 16;
112}
113
114static const uint8_t skip_tab[6] = {
115 0x24, 0x04, 0x68, 0x48, 0x28, 0x08,
116};
117
118static void meson_overlay_get_vertical_phase(unsigned int ratio_y, int *phase,
119 int *repeat, bool interlace)
120{
121 int offset_in = 0;
122 int offset_out = 0;
123 int repeat_skip = 0;
124
125 if (!interlace && ratio_y > (1 << 18))
126 offset_out = (1 * ratio_y) >> 10;
127
128 while ((offset_in + (4 << 8)) <= offset_out) {
129 repeat_skip++;
130 offset_in += 4 << 8;
131 }
132
133 *phase = (offset_out - offset_in) >> 2;
134
135 if (*phase > 0x100)
136 repeat_skip++;
137
138 *phase = *phase & 0xff;
139
140 if (repeat_skip > 5)
141 repeat_skip = 5;
142
143 *repeat = skip_tab[repeat_skip];
144}
145
146static void meson_overlay_setup_scaler_params(struct meson_drm *priv,
147 struct drm_plane *plane,
148 bool interlace_mode)
149{
150 struct drm_crtc_state *crtc_state = priv->crtc->state;
151 int video_top, video_left, video_width, video_height;
152 struct drm_plane_state *state = plane->state;
153 unsigned int vd_start_lines, vd_end_lines;
154 unsigned int hd_start_lines, hd_end_lines;
155 unsigned int crtc_height, crtc_width;
156 unsigned int vsc_startp, vsc_endp;
157 unsigned int hsc_startp, hsc_endp;
158 unsigned int crop_top, crop_left;
159 int vphase, vphase_repeat_skip;
160 unsigned int ratio_x, ratio_y;
161 int temp_height, temp_width;
162 unsigned int w_in, h_in;
163 int temp, start, end;
164
165 if (!crtc_state) {
166 DRM_ERROR("Invalid crtc_state\n");
167 return;
168 }
169
170 crtc_height = crtc_state->mode.vdisplay;
171 crtc_width = crtc_state->mode.hdisplay;
172
173 w_in = fixed16_to_int(state->src_w);
174 h_in = fixed16_to_int(state->src_h);
175 crop_top = fixed16_to_int(state->src_x);
176 crop_left = fixed16_to_int(state->src_x);
177
178 video_top = state->crtc_y;
179 video_left = state->crtc_x;
180 video_width = state->crtc_w;
181 video_height = state->crtc_h;
182
183 DRM_DEBUG("crtc_width %d crtc_height %d interlace %d\n",
184 crtc_width, crtc_height, interlace_mode);
185 DRM_DEBUG("w_in %d h_in %d crop_top %d crop_left %d\n",
186 w_in, h_in, crop_top, crop_left);
187 DRM_DEBUG("video top %d left %d width %d height %d\n",
188 video_top, video_left, video_width, video_height);
189
190 ratio_x = (w_in << 18) / video_width;
191 ratio_y = (h_in << 18) / video_height;
192
193 if (ratio_x * video_width < (w_in << 18))
194 ratio_x++;
195
196 DRM_DEBUG("ratio x 0x%x y 0x%x\n", ratio_x, ratio_y);
197
198 meson_overlay_get_vertical_phase(ratio_y, &vphase, &vphase_repeat_skip,
199 interlace_mode);
200
201 DRM_DEBUG("vphase 0x%x skip %d\n", vphase, vphase_repeat_skip);
202
203 /* Vertical */
204
205 start = video_top + video_height / 2 - ((h_in << 17) / ratio_y);
206 end = (h_in << 18) / ratio_y + start - 1;
207
208 if (video_top < 0 && start < 0)
209 vd_start_lines = (-(start) * ratio_y) >> 18;
210 else if (start < video_top)
211 vd_start_lines = ((video_top - start) * ratio_y) >> 18;
212 else
213 vd_start_lines = 0;
214
215 if (video_top < 0)
216 temp_height = min_t(unsigned int,
217 video_top + video_height - 1,
218 crtc_height - 1);
219 else
220 temp_height = min_t(unsigned int,
221 video_top + video_height - 1,
222 crtc_height - 1) - video_top + 1;
223
224 temp = vd_start_lines + (temp_height * ratio_y >> 18);
225 vd_end_lines = (temp <= (h_in - 1)) ? temp : (h_in - 1);
226
227 vd_start_lines += crop_left;
228 vd_end_lines += crop_left;
229
230 /*
231 * TOFIX: Input frames are handled and scaled like progressive frames,
232 * proper handling of interlaced field input frames need to be figured
233 * out using the proper framebuffer flags set by userspace.
234 */
235 if (interlace_mode) {
236 start >>= 1;
237 end >>= 1;
238 }
239
240 vsc_startp = max_t(int, start,
241 max_t(int, 0, video_top));
242 vsc_endp = min_t(int, end,
243 min_t(int, crtc_height - 1,
244 video_top + video_height - 1));
245
246 DRM_DEBUG("vsc startp %d endp %d start_lines %d end_lines %d\n",
247 vsc_startp, vsc_endp, vd_start_lines, vd_end_lines);
248
249 /* Horizontal */
250
251 start = video_left + video_width / 2 - ((w_in << 17) / ratio_x);
252 end = (w_in << 18) / ratio_x + start - 1;
253
254 if (video_left < 0 && start < 0)
255 hd_start_lines = (-(start) * ratio_x) >> 18;
256 else if (start < video_left)
257 hd_start_lines = ((video_left - start) * ratio_x) >> 18;
258 else
259 hd_start_lines = 0;
260
261 if (video_left < 0)
262 temp_width = min_t(unsigned int,
263 video_left + video_width - 1,
264 crtc_width - 1);
265 else
266 temp_width = min_t(unsigned int,
267 video_left + video_width - 1,
268 crtc_width - 1) - video_left + 1;
269
270 temp = hd_start_lines + (temp_width * ratio_x >> 18);
271 hd_end_lines = (temp <= (w_in - 1)) ? temp : (w_in - 1);
272
273 priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
274 hsc_startp = max_t(int, start, max_t(int, 0, video_left));
275 hsc_endp = min_t(int, end, min_t(int, crtc_width - 1,
276 video_left + video_width - 1));
277
278 hd_start_lines += crop_top;
279 hd_end_lines += crop_top;
280
281 DRM_DEBUG("hsc startp %d endp %d start_lines %d end_lines %d\n",
282 hsc_startp, hsc_endp, hd_start_lines, hd_end_lines);
283
284 priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
285
286 priv->viu.vpp_vsc_ini_phase = vphase << 8;
287 priv->viu.vpp_vsc_phase_ctrl = (1 << 13) | (4 << 8) |
288 vphase_repeat_skip;
289
290 priv->viu.vd1_if0_luma_x0 = VD_X_START(hd_start_lines) |
291 VD_X_END(hd_end_lines);
292 priv->viu.vd1_if0_chroma_x0 = VD_X_START(hd_start_lines >> 1) |
293 VD_X_END(hd_end_lines >> 1);
294
295 priv->viu.viu_vd1_fmt_w =
296 VD_H_WIDTH(hd_end_lines - hd_start_lines + 1) |
297 VD_V_WIDTH(hd_end_lines/2 - hd_start_lines/2 + 1);
298
299 priv->viu.vd1_if0_luma_y0 = VD_Y_START(vd_start_lines) |
300 VD_Y_END(vd_end_lines);
301
302 priv->viu.vd1_if0_chroma_y0 = VD_Y_START(vd_start_lines >> 1) |
303 VD_Y_END(vd_end_lines >> 1);
304
305 priv->viu.vpp_pic_in_height = h_in;
306
307 priv->viu.vpp_postblend_vd1_h_start_end = VD_H_START(hsc_startp) |
308 VD_H_END(hsc_endp);
309 priv->viu.vpp_blend_vd2_h_start_end = VD_H_START(hd_start_lines) |
310 VD_H_END(hd_end_lines);
311 priv->viu.vpp_hsc_region12_startp = VD_REGION13_END(0) |
312 VD_REGION24_START(hsc_startp);
313 priv->viu.vpp_hsc_region34_startp =
314 VD_REGION13_END(hsc_startp) |
315 VD_REGION24_START(hsc_endp - hsc_startp);
316 priv->viu.vpp_hsc_region4_endp = hsc_endp - hsc_startp;
317 priv->viu.vpp_hsc_start_phase_step = ratio_x << 6;
318 priv->viu.vpp_hsc_region1_phase_slope = 0;
319 priv->viu.vpp_hsc_region3_phase_slope = 0;
320 priv->viu.vpp_hsc_phase_ctrl = (1 << 21) | (4 << 16);
321
322 priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
323 priv->viu.vpp_preblend_h_size = hd_end_lines - hd_start_lines + 1;
324
325 priv->viu.vpp_postblend_vd1_v_start_end = VD_V_START(vsc_startp) |
326 VD_V_END(vsc_endp);
327 priv->viu.vpp_blend_vd2_v_start_end =
328 VD2_V_START((vd_end_lines + 1) >> 1) |
329 VD2_V_END(vd_end_lines);
330
331 priv->viu.vpp_vsc_region12_startp = 0;
332 priv->viu.vpp_vsc_region34_startp =
333 VD_REGION13_END(vsc_endp - vsc_startp) |
334 VD_REGION24_START(vsc_endp - vsc_startp);
335 priv->viu.vpp_vsc_region4_endp = vsc_endp - vsc_startp;
336 priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
337}
338
339static void meson_overlay_atomic_update(struct drm_plane *plane,
340 struct drm_plane_state *old_state)
341{
342 struct meson_overlay *meson_overlay = to_meson_overlay(plane);
343 struct drm_plane_state *state = plane->state;
344 struct drm_framebuffer *fb = state->fb;
345 struct meson_drm *priv = meson_overlay->priv;
346 struct drm_gem_cma_object *gem;
347 unsigned long flags;
348 bool interlace_mode;
349
350 DRM_DEBUG_DRIVER("\n");
351
352 /* Fallback is canvas provider is not available */
353 if (!priv->canvas) {
354 priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
355 priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
356 priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
357 }
358
359 interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
360
361 spin_lock_irqsave(&priv->drm->event_lock, flags);
362
363 priv->viu.vd1_if0_gen_reg = VD_URGENT_CHROMA |
364 VD_URGENT_LUMA |
365 VD_HOLD_LINES(9) |
366 VD_CHRO_RPT_LASTL_CTRL |
367 VD_ENABLE;
368
369 /* Setup scaler params */
370 meson_overlay_setup_scaler_params(priv, plane, interlace_mode);
371
372 priv->viu.vd1_if0_repeat_loop = 0;
373 priv->viu.vd1_if0_luma0_rpt_pat = interlace_mode ? 8 : 0;
374 priv->viu.vd1_if0_chroma0_rpt_pat = interlace_mode ? 8 : 0;
375 priv->viu.vd1_range_map_y = 0;
376 priv->viu.vd1_range_map_cb = 0;
377 priv->viu.vd1_range_map_cr = 0;
378
379 /* Default values for RGB888/YUV444 */
380 priv->viu.vd1_if0_gen_reg2 = 0;
381 priv->viu.viu_vd1_fmt_ctrl = 0;
382
383 switch (fb->format->format) {
384 /* TOFIX DRM_FORMAT_RGB888 should be supported */
385 case DRM_FORMAT_YUYV:
386 priv->viu.vd1_if0_gen_reg |= VD_BYTES_PER_PIXEL(1);
387 priv->viu.vd1_if0_canvas0 =
388 CANVAS_ADDR2(priv->canvas_id_vd1_0) |
389 CANVAS_ADDR1(priv->canvas_id_vd1_0) |
390 CANVAS_ADDR0(priv->canvas_id_vd1_0);
391 priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
392 VD_HORZ_FMT_EN |
393 VD_VERT_RPT_LINE0 |
394 VD_VERT_INITIAL_PHASE(12) |
395 VD_VERT_PHASE_STEP(16) | /* /2 */
396 VD_VERT_FMT_EN;
397 break;
398 case DRM_FORMAT_NV12:
399 case DRM_FORMAT_NV21:
400 priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
401 priv->viu.vd1_if0_canvas0 =
402 CANVAS_ADDR2(priv->canvas_id_vd1_1) |
403 CANVAS_ADDR1(priv->canvas_id_vd1_1) |
404 CANVAS_ADDR0(priv->canvas_id_vd1_0);
405 if (fb->format->format == DRM_FORMAT_NV12)
406 priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(1);
407 else
408 priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(2);
409 priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
410 VD_HORZ_FMT_EN |
411 VD_VERT_RPT_LINE0 |
412 VD_VERT_INITIAL_PHASE(12) |
413 VD_VERT_PHASE_STEP(8) | /* /4 */
414 VD_VERT_FMT_EN;
415 break;
416 case DRM_FORMAT_YUV444:
417 case DRM_FORMAT_YUV422:
418 case DRM_FORMAT_YUV420:
419 case DRM_FORMAT_YUV411:
420 case DRM_FORMAT_YUV410:
421 priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
422 priv->viu.vd1_if0_canvas0 =
423 CANVAS_ADDR2(priv->canvas_id_vd1_2) |
424 CANVAS_ADDR1(priv->canvas_id_vd1_1) |
425 CANVAS_ADDR0(priv->canvas_id_vd1_0);
426 switch (fb->format->format) {
427 case DRM_FORMAT_YUV422:
428 priv->viu.viu_vd1_fmt_ctrl =
429 VD_HORZ_Y_C_RATIO(1) | /* /2 */
430 VD_HORZ_FMT_EN |
431 VD_VERT_RPT_LINE0 |
432 VD_VERT_INITIAL_PHASE(12) |
433 VD_VERT_PHASE_STEP(16) | /* /2 */
434 VD_VERT_FMT_EN;
435 break;
436 case DRM_FORMAT_YUV420:
437 priv->viu.viu_vd1_fmt_ctrl =
438 VD_HORZ_Y_C_RATIO(1) | /* /2 */
439 VD_HORZ_FMT_EN |
440 VD_VERT_RPT_LINE0 |
441 VD_VERT_INITIAL_PHASE(12) |
442 VD_VERT_PHASE_STEP(8) | /* /4 */
443 VD_VERT_FMT_EN;
444 break;
445 case DRM_FORMAT_YUV411:
446 priv->viu.viu_vd1_fmt_ctrl =
447 VD_HORZ_Y_C_RATIO(2) | /* /4 */
448 VD_HORZ_FMT_EN |
449 VD_VERT_RPT_LINE0 |
450 VD_VERT_INITIAL_PHASE(12) |
451 VD_VERT_PHASE_STEP(16) | /* /2 */
452 VD_VERT_FMT_EN;
453 break;
454 case DRM_FORMAT_YUV410:
455 priv->viu.viu_vd1_fmt_ctrl =
456 VD_HORZ_Y_C_RATIO(2) | /* /4 */
457 VD_HORZ_FMT_EN |
458 VD_VERT_RPT_LINE0 |
459 VD_VERT_INITIAL_PHASE(12) |
460 VD_VERT_PHASE_STEP(8) | /* /4 */
461 VD_VERT_FMT_EN;
462 break;
463 }
464 break;
465 }
466
467 /* Update Canvas with buffer address */
468 priv->viu.vd1_planes = drm_format_num_planes(fb->format->format);
469
470 switch (priv->viu.vd1_planes) {
471 case 3:
472 gem = drm_fb_cma_get_gem_obj(fb, 2);
473 priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
474 priv->viu.vd1_stride2 = fb->pitches[2];
475 priv->viu.vd1_height2 =
476 drm_format_plane_height(fb->height,
477 fb->format->format, 2);
478 DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n",
479 priv->viu.vd1_addr2,
480 priv->viu.vd1_stride2,
481 priv->viu.vd1_height2);
482 /* fallthrough */
483 case 2:
484 gem = drm_fb_cma_get_gem_obj(fb, 1);
485 priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
486 priv->viu.vd1_stride1 = fb->pitches[1];
487 priv->viu.vd1_height1 =
488 drm_format_plane_height(fb->height,
489 fb->format->format, 1);
490 DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n",
491 priv->viu.vd1_addr1,
492 priv->viu.vd1_stride1,
493 priv->viu.vd1_height1);
494 /* fallthrough */
495 case 1:
496 gem = drm_fb_cma_get_gem_obj(fb, 0);
497 priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
498 priv->viu.vd1_stride0 = fb->pitches[0];
499 priv->viu.vd1_height0 =
500 drm_format_plane_height(fb->height,
501 fb->format->format, 0);
502 DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n",
503 priv->viu.vd1_addr0,
504 priv->viu.vd1_stride0,
505 priv->viu.vd1_height0);
506 }
507
508 priv->viu.vd1_enabled = true;
509
510 spin_unlock_irqrestore(&priv->drm->event_lock, flags);
511
512 DRM_DEBUG_DRIVER("\n");
513}
514
515static void meson_overlay_atomic_disable(struct drm_plane *plane,
516 struct drm_plane_state *old_state)
517{
518 struct meson_overlay *meson_overlay = to_meson_overlay(plane);
519 struct meson_drm *priv = meson_overlay->priv;
520
521 DRM_DEBUG_DRIVER("\n");
522
523 priv->viu.vd1_enabled = false;
524
525 /* Disable VD1 */
526 writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
527 priv->io_base + _REG(VPP_MISC));
528
529}
530
531static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = {
532 .atomic_check = meson_overlay_atomic_check,
533 .atomic_disable = meson_overlay_atomic_disable,
534 .atomic_update = meson_overlay_atomic_update,
535};
536
537static const struct drm_plane_funcs meson_overlay_funcs = {
538 .update_plane = drm_atomic_helper_update_plane,
539 .disable_plane = drm_atomic_helper_disable_plane,
540 .destroy = drm_plane_cleanup,
541 .reset = drm_atomic_helper_plane_reset,
542 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
543 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
544};
545
546static const uint32_t supported_drm_formats[] = {
547 DRM_FORMAT_YUYV,
548 DRM_FORMAT_NV12,
549 DRM_FORMAT_NV21,
550 DRM_FORMAT_YUV444,
551 DRM_FORMAT_YUV422,
552 DRM_FORMAT_YUV420,
553 DRM_FORMAT_YUV411,
554 DRM_FORMAT_YUV410,
555};
556
557int meson_overlay_create(struct meson_drm *priv)
558{
559 struct meson_overlay *meson_overlay;
560 struct drm_plane *plane;
561
562 DRM_DEBUG_DRIVER("\n");
563
564 meson_overlay = devm_kzalloc(priv->drm->dev, sizeof(*meson_overlay),
565 GFP_KERNEL);
566 if (!meson_overlay)
567 return -ENOMEM;
568
569 meson_overlay->priv = priv;
570 plane = &meson_overlay->base;
571
572 drm_universal_plane_init(priv->drm, plane, 0xFF,
573 &meson_overlay_funcs,
574 supported_drm_formats,
575 ARRAY_SIZE(supported_drm_formats),
576 NULL,
577 DRM_PLANE_TYPE_OVERLAY, "meson_overlay_plane");
578
579 drm_plane_helper_add(plane, &meson_overlay_helper_funcs);
580
581 priv->overlay_plane = plane;
582
583 DRM_DEBUG_DRIVER("\n");
584
585 return 0;
586}
diff --git a/drivers/gpu/drm/meson/meson_overlay.h b/drivers/gpu/drm/meson/meson_overlay.h
new file mode 100644
index 000000000000..dae24f5ac63d
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.h
@@ -0,0 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright (C) 2018 BayLibre, SAS
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 */
6
7#ifndef __MESON_OVERLAY_H
8#define __MESON_OVERLAY_H
9
10#include "meson_drv.h"
11
12int meson_overlay_create(struct meson_drm *priv);
13
14#endif /* __MESON_OVERLAY_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 12c80dfcff59..12a47b4f65a5 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/bitfield.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <drm/drmP.h> 29#include <drm/drmP.h>
29#include <drm/drm_atomic.h> 30#include <drm/drm_atomic.h>
@@ -39,12 +40,50 @@
39#include "meson_canvas.h" 40#include "meson_canvas.h"
40#include "meson_registers.h" 41#include "meson_registers.h"
41 42
43/* OSD_SCI_WH_M1 */
44#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
45#define SCI_WH_M1_H(h) FIELD_PREP(GENMASK(12, 0), h)
46
47/* OSD_SCO_H_START_END */
48/* OSD_SCO_V_START_END */
49#define SCO_HV_START(start) FIELD_PREP(GENMASK(27, 16), start)
50#define SCO_HV_END(end) FIELD_PREP(GENMASK(11, 0), end)
51
52/* OSD_SC_CTRL0 */
53#define SC_CTRL0_PATH_EN BIT(3)
54#define SC_CTRL0_SEL_OSD1 BIT(2)
55
56/* OSD_VSC_CTRL0 */
57#define VSC_BANK_LEN(value) FIELD_PREP(GENMASK(2, 0), value)
58#define VSC_TOP_INI_RCV_NUM(value) FIELD_PREP(GENMASK(6, 3), value)
59#define VSC_TOP_RPT_L0_NUM(value) FIELD_PREP(GENMASK(9, 8), value)
60#define VSC_BOT_INI_RCV_NUM(value) FIELD_PREP(GENMASK(14, 11), value)
61#define VSC_BOT_RPT_L0_NUM(value) FIELD_PREP(GENMASK(17, 16), value)
62#define VSC_PROG_INTERLACE BIT(23)
63#define VSC_VERTICAL_SCALER_EN BIT(24)
64
65/* OSD_VSC_INI_PHASE */
66#define VSC_INI_PHASE_BOT(bottom) FIELD_PREP(GENMASK(31, 16), bottom)
67#define VSC_INI_PHASE_TOP(top) FIELD_PREP(GENMASK(15, 0), top)
68
69/* OSD_HSC_CTRL0 */
70#define HSC_BANK_LENGTH(value) FIELD_PREP(GENMASK(2, 0), value)
71#define HSC_INI_RCV_NUM0(value) FIELD_PREP(GENMASK(6, 3), value)
72#define HSC_RPT_P0_NUM0(value) FIELD_PREP(GENMASK(9, 8), value)
73#define HSC_HORIZ_SCALER_EN BIT(22)
74
75/* VPP_OSD_VSC_PHASE_STEP */
76/* VPP_OSD_HSC_PHASE_STEP */
77#define SC_PHASE_STEP(value) FIELD_PREP(GENMASK(27, 0), value)
78
42struct meson_plane { 79struct meson_plane {
43 struct drm_plane base; 80 struct drm_plane base;
44 struct meson_drm *priv; 81 struct meson_drm *priv;
45}; 82};
46#define to_meson_plane(x) container_of(x, struct meson_plane, base) 83#define to_meson_plane(x) container_of(x, struct meson_plane, base)
47 84
85#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
86
48static int meson_plane_atomic_check(struct drm_plane *plane, 87static int meson_plane_atomic_check(struct drm_plane *plane,
49 struct drm_plane_state *state) 88 struct drm_plane_state *state)
50{ 89{
@@ -57,10 +96,15 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
57 if (IS_ERR(crtc_state)) 96 if (IS_ERR(crtc_state))
58 return PTR_ERR(crtc_state); 97 return PTR_ERR(crtc_state);
59 98
99 /*
100 * Only allow :
101 * - Upscaling up to 5x, vertical and horizontal
102 * - Final coordinates must match crtc size
103 */
60 return drm_atomic_helper_check_plane_state(state, crtc_state, 104 return drm_atomic_helper_check_plane_state(state, crtc_state,
105 FRAC_16_16(1, 5),
61 DRM_PLANE_HELPER_NO_SCALING, 106 DRM_PLANE_HELPER_NO_SCALING,
62 DRM_PLANE_HELPER_NO_SCALING, 107 false, true);
63 true, true);
64} 108}
65 109
66/* Takes a fixed 16.16 number and converts it to integer. */ 110/* Takes a fixed 16.16 number and converts it to integer. */
@@ -74,22 +118,20 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
74{ 118{
75 struct meson_plane *meson_plane = to_meson_plane(plane); 119 struct meson_plane *meson_plane = to_meson_plane(plane);
76 struct drm_plane_state *state = plane->state; 120 struct drm_plane_state *state = plane->state;
77 struct drm_framebuffer *fb = state->fb; 121 struct drm_rect dest = drm_plane_state_dest(state);
78 struct meson_drm *priv = meson_plane->priv; 122 struct meson_drm *priv = meson_plane->priv;
123 struct drm_framebuffer *fb = state->fb;
79 struct drm_gem_cma_object *gem; 124 struct drm_gem_cma_object *gem;
80 struct drm_rect src = {
81 .x1 = (state->src_x),
82 .y1 = (state->src_y),
83 .x2 = (state->src_x + state->src_w),
84 .y2 = (state->src_y + state->src_h),
85 };
86 struct drm_rect dest = {
87 .x1 = state->crtc_x,
88 .y1 = state->crtc_y,
89 .x2 = state->crtc_x + state->crtc_w,
90 .y2 = state->crtc_y + state->crtc_h,
91 };
92 unsigned long flags; 125 unsigned long flags;
126 int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
127 int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
128 int hsc_ini_rcv_num, hsc_ini_rpt_p0_num;
129 int hf_phase_step, vf_phase_step;
130 int src_w, src_h, dst_w, dst_h;
131 int bot_ini_phase;
132 int hf_bank_len;
133 int vf_bank_len;
134 u8 canvas_id_osd1;
93 135
94 /* 136 /*
95 * Update Coordinates 137 * Update Coordinates
@@ -104,8 +146,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
104 (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | 146 (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
105 OSD_BLK0_ENABLE; 147 OSD_BLK0_ENABLE;
106 148
149 if (priv->canvas)
150 canvas_id_osd1 = priv->canvas_id_osd1;
151 else
152 canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
153
107 /* Set up BLK0 to point to the right canvas */ 154 /* Set up BLK0 to point to the right canvas */
108 priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) | 155 priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
109 OSD_ENDIANNESS_LE); 156 OSD_ENDIANNESS_LE);
110 157
111 /* On GXBB, Use the old non-HDR RGB2YUV converter */ 158 /* On GXBB, Use the old non-HDR RGB2YUV converter */
@@ -137,23 +184,115 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
137 break; 184 break;
138 }; 185 };
139 186
187 /* Default scaler parameters */
188 vsc_bot_rcv_num = 0;
189 vsc_bot_rpt_p0_num = 0;
190 hf_bank_len = 4;
191 vf_bank_len = 4;
192
140 if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 193 if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
141 priv->viu.osd1_interlace = true; 194 vsc_bot_rcv_num = 6;
195 vsc_bot_rpt_p0_num = 2;
196 }
197
198 hsc_ini_rcv_num = hf_bank_len;
199 vsc_ini_rcv_num = vf_bank_len;
200 hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
201 vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
142 202
203 src_w = fixed16_to_int(state->src_w);
204 src_h = fixed16_to_int(state->src_h);
205 dst_w = state->crtc_w;
206 dst_h = state->crtc_h;
207
208 /*
209 * When the output is interlaced, the OSD must switch between
210 * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
211 * at each vsync.
212 * But the vertical scaler can provide such funtionnality if
213 * is configured for 2:1 scaling with interlace options enabled.
214 */
215 if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
143 dest.y1 /= 2; 216 dest.y1 /= 2;
144 dest.y2 /= 2; 217 dest.y2 /= 2;
145 } else 218 dst_h /= 2;
146 priv->viu.osd1_interlace = false; 219 }
220
221 hf_phase_step = ((src_w << 18) / dst_w) << 6;
222 vf_phase_step = (src_h << 20) / dst_h;
223
224 if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
225 bot_ini_phase = ((vf_phase_step / 2) >> 4);
226 else
227 bot_ini_phase = 0;
228
229 vf_phase_step = (vf_phase_step << 4);
230
231 /* In interlaced mode, scaler is always active */
232 if (src_h != dst_h || src_w != dst_w) {
233 priv->viu.osd_sc_i_wh_m1 = SCI_WH_M1_W(src_w - 1) |
234 SCI_WH_M1_H(src_h - 1);
235 priv->viu.osd_sc_o_h_start_end = SCO_HV_START(dest.x1) |
236 SCO_HV_END(dest.x2 - 1);
237 priv->viu.osd_sc_o_v_start_end = SCO_HV_START(dest.y1) |
238 SCO_HV_END(dest.y2 - 1);
239 /* Enable OSD Scaler */
240 priv->viu.osd_sc_ctrl0 = SC_CTRL0_PATH_EN | SC_CTRL0_SEL_OSD1;
241 } else {
242 priv->viu.osd_sc_i_wh_m1 = 0;
243 priv->viu.osd_sc_o_h_start_end = 0;
244 priv->viu.osd_sc_o_v_start_end = 0;
245 priv->viu.osd_sc_ctrl0 = 0;
246 }
247
248 /* In interlaced mode, vertical scaler is always active */
249 if (src_h != dst_h) {
250 priv->viu.osd_sc_v_ctrl0 =
251 VSC_BANK_LEN(vf_bank_len) |
252 VSC_TOP_INI_RCV_NUM(vsc_ini_rcv_num) |
253 VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
254 VSC_VERTICAL_SCALER_EN;
255
256 if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
257 priv->viu.osd_sc_v_ctrl0 |=
258 VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
259 VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
260 VSC_PROG_INTERLACE;
261
262 priv->viu.osd_sc_v_phase_step = SC_PHASE_STEP(vf_phase_step);
263 priv->viu.osd_sc_v_ini_phase = VSC_INI_PHASE_BOT(bot_ini_phase);
264 } else {
265 priv->viu.osd_sc_v_ctrl0 = 0;
266 priv->viu.osd_sc_v_phase_step = 0;
267 priv->viu.osd_sc_v_ini_phase = 0;
268 }
269
270 /* Horizontal scaler is only used if width does not match */
271 if (src_w != dst_w) {
272 priv->viu.osd_sc_h_ctrl0 =
273 HSC_BANK_LENGTH(hf_bank_len) |
274 HSC_INI_RCV_NUM0(hsc_ini_rcv_num) |
275 HSC_RPT_P0_NUM0(hsc_ini_rpt_p0_num) |
276 HSC_HORIZ_SCALER_EN;
277 priv->viu.osd_sc_h_phase_step = SC_PHASE_STEP(hf_phase_step);
278 priv->viu.osd_sc_h_ini_phase = 0;
279 } else {
280 priv->viu.osd_sc_h_ctrl0 = 0;
281 priv->viu.osd_sc_h_phase_step = 0;
282 priv->viu.osd_sc_h_ini_phase = 0;
283 }
147 284
148 /* 285 /*
149 * The format of these registers is (x2 << 16 | x1), 286 * The format of these registers is (x2 << 16 | x1),
150 * where x2 is exclusive. 287 * where x2 is exclusive.
151 * e.g. +30x1920 would be (1919 << 16) | 30 288 * e.g. +30x1920 would be (1919 << 16) | 30
152 */ 289 */
153 priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) | 290 priv->viu.osd1_blk0_cfg[1] =
154 fixed16_to_int(src.x1); 291 ((fixed16_to_int(state->src.x2) - 1) << 16) |
155 priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) | 292 fixed16_to_int(state->src.x1);
156 fixed16_to_int(src.y1); 293 priv->viu.osd1_blk0_cfg[2] =
294 ((fixed16_to_int(state->src.y2) - 1) << 16) |
295 fixed16_to_int(state->src.y1);
157 priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1; 296 priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
158 priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1; 297 priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
159 298
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index bca87143e548..5c7e02c703bc 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -286,6 +286,7 @@
286#define VIU_OSD1_MATRIX_COEF22_30 0x1a9d 286#define VIU_OSD1_MATRIX_COEF22_30 0x1a9d
287#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e 287#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
288#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f 288#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
289#define VD1_IF0_GEN_REG3 0x1aa7
289#define VIU_OSD1_EOTF_CTL 0x1ad4 290#define VIU_OSD1_EOTF_CTL 0x1ad4
290#define VIU_OSD1_EOTF_COEF00_01 0x1ad5 291#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
291#define VIU_OSD1_EOTF_COEF02_10 0x1ad6 292#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -297,6 +298,7 @@
297#define VIU_OSD1_OETF_CTL 0x1adc 298#define VIU_OSD1_OETF_CTL 0x1adc
298#define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add 299#define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add
299#define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade 300#define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade
301#define AFBC_ENABLE 0x1ae0
300 302
301/* vpp */ 303/* vpp */
302#define VPP_DUMMY_DATA 0x1d00 304#define VPP_DUMMY_DATA 0x1d00
@@ -349,6 +351,7 @@
349#define VPP_VD2_PREBLEND BIT(15) 351#define VPP_VD2_PREBLEND BIT(15)
350#define VPP_OSD1_PREBLEND BIT(16) 352#define VPP_OSD1_PREBLEND BIT(16)
351#define VPP_OSD2_PREBLEND BIT(17) 353#define VPP_OSD2_PREBLEND BIT(17)
354#define VPP_COLOR_MNG_ENABLE BIT(28)
352#define VPP_OFIFO_SIZE 0x1d27 355#define VPP_OFIFO_SIZE 0x1d27
353#define VPP_FIFO_STATUS 0x1d28 356#define VPP_FIFO_STATUS 0x1d28
354#define VPP_SMOKE_CTRL 0x1d29 357#define VPP_SMOKE_CTRL 0x1d29
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 6bcfa527c180..2dffb987ec65 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -329,6 +329,21 @@ void meson_viu_init(struct meson_drm *priv)
329 0xff << OSD_REPLACE_SHIFT, 329 0xff << OSD_REPLACE_SHIFT,
330 priv->io_base + _REG(VIU_OSD2_CTRL_STAT2)); 330 priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
331 331
332 /* Disable VD1 AFBC */
333 /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */
334 writel_bits_relaxed(0x7 << 16, 0,
335 priv->io_base + _REG(VIU_MISC_CTRL0));
336 /* afbc vd1 set=0 */
337 writel_bits_relaxed(BIT(20), 0,
338 priv->io_base + _REG(VIU_MISC_CTRL0));
339 writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
340
341 writel_relaxed(0x00FF00C0,
342 priv->io_base + _REG(VD1_IF0_LUMA_FIFO_SIZE));
343 writel_relaxed(0x00FF00C0,
344 priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
345
346
332 priv->viu.osd1_enabled = false; 347 priv->viu.osd1_enabled = false;
333 priv->viu.osd1_commit = false; 348 priv->viu.osd1_commit = false;
334 priv->viu.osd1_interlace = false; 349 priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 27356f81a0ab..f9efb431e953 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -51,52 +51,6 @@ void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
51 writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL)); 51 writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
52} 52}
53 53
54/*
55 * When the output is interlaced, the OSD must switch between
56 * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
57 * at each vsync.
58 * But the vertical scaler can provide such funtionnality if
59 * is configured for 2:1 scaling with interlace options enabled.
60 */
61void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
62 struct drm_rect *input)
63{
64 writel_relaxed(BIT(3) /* Enable scaler */ |
65 BIT(2), /* Select OSD1 */
66 priv->io_base + _REG(VPP_OSD_SC_CTRL0));
67
68 writel_relaxed(((drm_rect_width(input) - 1) << 16) |
69 (drm_rect_height(input) - 1),
70 priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
71 /* 2:1 scaling */
72 writel_relaxed(((input->x1) << 16) | (input->x2),
73 priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
74 writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1),
75 priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
76
77 /* 2:1 scaling values */
78 writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
79 writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
80
81 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
82
83 writel_relaxed((4 << 0) /* osd_vsc_bank_length */ |
84 (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ |
85 (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ |
86 (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ |
87 (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ |
88 BIT(23) /* osd_prog_interlace */ |
89 BIT(24), /* Enable vertical scaler */
90 priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
91}
92
93void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv)
94{
95 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
96 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
97 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
98}
99
100static unsigned int vpp_filter_coefs_4point_bspline[] = { 54static unsigned int vpp_filter_coefs_4point_bspline[] = {
101 0x15561500, 0x14561600, 0x13561700, 0x12561800, 55 0x15561500, 0x14561600, 0x13561700, 0x12561800,
102 0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00, 56 0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
@@ -122,6 +76,31 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
122 priv->io_base + _REG(VPP_OSD_SCALE_COEF)); 76 priv->io_base + _REG(VPP_OSD_SCALE_COEF));
123} 77}
124 78
79static const uint32_t vpp_filter_coefs_bicubic[] = {
80 0x00800000, 0x007f0100, 0xff7f0200, 0xfe7f0300,
81 0xfd7e0500, 0xfc7e0600, 0xfb7d0800, 0xfb7c0900,
82 0xfa7b0b00, 0xfa7a0dff, 0xf9790fff, 0xf97711ff,
83 0xf87613ff, 0xf87416fe, 0xf87218fe, 0xf8701afe,
84 0xf76f1dfd, 0xf76d1ffd, 0xf76b21fd, 0xf76824fd,
85 0xf76627fc, 0xf76429fc, 0xf7612cfc, 0xf75f2ffb,
86 0xf75d31fb, 0xf75a34fb, 0xf75837fa, 0xf7553afa,
87 0xf8523cfa, 0xf8503ff9, 0xf84d42f9, 0xf84a45f9,
88 0xf84848f8
89};
90
91static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
92 const unsigned int *coefs,
93 bool is_horizontal)
94{
95 int i;
96
97 writel_relaxed(is_horizontal ? BIT(8) : 0,
98 priv->io_base + _REG(VPP_SCALE_COEF_IDX));
99 for (i = 0; i < 33; i++)
100 writel_relaxed(coefs[i],
101 priv->io_base + _REG(VPP_SCALE_COEF));
102}
103
125void meson_vpp_init(struct meson_drm *priv) 104void meson_vpp_init(struct meson_drm *priv)
126{ 105{
127 /* set dummy data default YUV black */ 106 /* set dummy data default YUV black */
@@ -150,17 +129,34 @@ void meson_vpp_init(struct meson_drm *priv)
150 129
151 /* Force all planes off */ 130 /* Force all planes off */
152 writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND | 131 writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
153 VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0, 132 VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
133 VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
154 priv->io_base + _REG(VPP_MISC)); 134 priv->io_base + _REG(VPP_MISC));
155 135
136 /* Setup default VD settings */
137 writel_relaxed(4096,
138 priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
139 writel_relaxed(4096,
140 priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
141
156 /* Disable Scalers */ 142 /* Disable Scalers */
157 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0)); 143 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
158 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 144 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
159 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0)); 145 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
146 writel_relaxed(4 | (4 << 8) | BIT(15),
147 priv->io_base + _REG(VPP_SC_MISC));
148
149 writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL));
160 150
161 /* Write in the proper filter coefficients. */ 151 /* Write in the proper filter coefficients. */
162 meson_vpp_write_scaling_filter_coefs(priv, 152 meson_vpp_write_scaling_filter_coefs(priv,
163 vpp_filter_coefs_4point_bspline, false); 153 vpp_filter_coefs_4point_bspline, false);
164 meson_vpp_write_scaling_filter_coefs(priv, 154 meson_vpp_write_scaling_filter_coefs(priv,
165 vpp_filter_coefs_4point_bspline, true); 155 vpp_filter_coefs_4point_bspline, true);
156
157 /* Write the VD proper filter coefficients. */
158 meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
159 false);
160 meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
161 true);
166} 162}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index fa8bfa7c492d..33c22ee036f8 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -96,7 +96,7 @@ static int s6d16d0_prepare(struct drm_panel *panel)
96 ret = mipi_dsi_dcs_set_tear_on(dsi, 96 ret = mipi_dsi_dcs_set_tear_on(dsi,
97 MIPI_DSI_DCS_TEAR_MODE_VBLANK); 97 MIPI_DSI_DCS_TEAR_MODE_VBLANK);
98 if (ret) { 98 if (ret) {
99 DRM_DEV_ERROR(s6->dev, "failed to enble vblank TE (%d)\n", 99 DRM_DEV_ERROR(s6->dev, "failed to enable vblank TE (%d)\n",
100 ret); 100 ret);
101 return ret; 101 return ret;
102 } 102 }
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 2ce9a8dcec84..ce0b9c40fc21 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -622,10 +622,14 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
622 if (ret) 622 if (ret)
623 goto out_kunmap; 623 goto out_kunmap;
624 624
625 ret = qxl_release_reserve_list(release, true); 625 ret = qxl_bo_pin(cursor_bo);
626 if (ret) 626 if (ret)
627 goto out_free_bo; 627 goto out_free_bo;
628 628
629 ret = qxl_release_reserve_list(release, true);
630 if (ret)
631 goto out_unpin;
632
629 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 633 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
630 if (ret) 634 if (ret)
631 goto out_backoff; 635 goto out_backoff;
@@ -670,15 +674,17 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
670 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 674 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
671 qxl_release_fence_buffer_objects(release); 675 qxl_release_fence_buffer_objects(release);
672 676
673 if (old_cursor_bo) 677 if (old_cursor_bo != NULL)
674 qxl_bo_unref(&old_cursor_bo); 678 qxl_bo_unpin(old_cursor_bo);
675 679 qxl_bo_unref(&old_cursor_bo);
676 qxl_bo_unref(&cursor_bo); 680 qxl_bo_unref(&cursor_bo);
677 681
678 return; 682 return;
679 683
680out_backoff: 684out_backoff:
681 qxl_release_backoff_reserve_list(release); 685 qxl_release_backoff_reserve_list(release);
686out_unpin:
687 qxl_bo_unpin(cursor_bo);
682out_free_bo: 688out_free_bo:
683 qxl_bo_unref(&cursor_bo); 689 qxl_bo_unref(&cursor_bo);
684out_kunmap: 690out_kunmap:
@@ -757,7 +763,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
757 } 763 }
758 } 764 }
759 765
760 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); 766 ret = qxl_bo_pin(user_bo);
761 if (ret) 767 if (ret)
762 return ret; 768 return ret;
763 769
@@ -1104,7 +1110,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
1104 } 1110 }
1105 qdev->monitors_config_bo = gem_to_qxl_bo(gobj); 1111 qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
1106 1112
1107 ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL); 1113 ret = qxl_bo_pin(qdev->monitors_config_bo);
1108 if (ret) 1114 if (ret)
1109 return ret; 1115 return ret;
1110 1116
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index c34e45662965..c408bb83c7a9 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -247,8 +247,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
247 qxl_release_fence_buffer_objects(release); 247 qxl_release_fence_buffer_objects(release);
248 248
249out_free_palette: 249out_free_palette:
250 if (palette_bo) 250 qxl_bo_unref(&palette_bo);
251 qxl_bo_unref(&palette_bo);
252out_free_image: 251out_free_image:
253 qxl_image_free_objects(qdev, dimage); 252 qxl_image_free_objects(qdev, dimage);
254out_free_drawable: 253out_free_drawable:
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 7e047c985ea6..a819d24225d2 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -111,7 +111,7 @@ static int qxlfb_create_pinned_object(struct qxl_device *qdev,
111 qbo->surf.stride = mode_cmd->pitches[0]; 111 qbo->surf.stride = mode_cmd->pitches[0];
112 qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB; 112 qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
113 113
114 ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL); 114 ret = qxl_bo_pin(qbo);
115 if (ret) { 115 if (ret) {
116 goto out_unref; 116 goto out_unref;
117 } 117 }
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index f6975d7c7d10..15238a413f9d 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -313,10 +313,8 @@ error:
313 313
314void qxl_device_fini(struct qxl_device *qdev) 314void qxl_device_fini(struct qxl_device *qdev)
315{ 315{
316 if (qdev->current_release_bo[0]) 316 qxl_bo_unref(&qdev->current_release_bo[0]);
317 qxl_bo_unref(&qdev->current_release_bo[0]); 317 qxl_bo_unref(&qdev->current_release_bo[1]);
318 if (qdev->current_release_bo[1])
319 qxl_bo_unref(&qdev->current_release_bo[1]);
320 flush_work(&qdev->gc_work); 318 flush_work(&qdev->gc_work);
321 qxl_ring_free(qdev->command_ring); 319 qxl_ring_free(qdev->command_ring);
322 qxl_ring_free(qdev->cursor_ring); 320 qxl_ring_free(qdev->cursor_ring);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index f67a3c535afb..91f3bbc73ecc 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -186,13 +186,9 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
186 struct qxl_bo *bo, void *pmap) 186 struct qxl_bo *bo, void *pmap)
187{ 187{
188 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 188 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
189 struct io_mapping *map;
190 189
191 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 190 if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
192 map = qdev->vram_mapping; 191 (bo->tbo.mem.mem_type != TTM_PL_PRIV))
193 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
194 map = qdev->surface_mapping;
195 else
196 goto fallback; 192 goto fallback;
197 193
198 io_mapping_unmap_atomic(pmap); 194 io_mapping_unmap_atomic(pmap);
@@ -200,7 +196,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
200 (void) ttm_mem_io_lock(man, false); 196 (void) ttm_mem_io_lock(man, false);
201 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); 197 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
202 ttm_mem_io_unlock(man); 198 ttm_mem_io_unlock(man);
203 return ; 199 return;
204 fallback: 200 fallback:
205 qxl_bo_kunmap(bo); 201 qxl_bo_kunmap(bo);
206} 202}
@@ -220,7 +216,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
220 return bo; 216 return bo;
221} 217}
222 218
223static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 219static int __qxl_bo_pin(struct qxl_bo *bo)
224{ 220{
225 struct ttm_operation_ctx ctx = { false, false }; 221 struct ttm_operation_ctx ctx = { false, false };
226 struct drm_device *ddev = bo->gem_base.dev; 222 struct drm_device *ddev = bo->gem_base.dev;
@@ -228,16 +224,12 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
228 224
229 if (bo->pin_count) { 225 if (bo->pin_count) {
230 bo->pin_count++; 226 bo->pin_count++;
231 if (gpu_addr)
232 *gpu_addr = qxl_bo_gpu_offset(bo);
233 return 0; 227 return 0;
234 } 228 }
235 qxl_ttm_placement_from_domain(bo, domain, true); 229 qxl_ttm_placement_from_domain(bo, bo->type, true);
236 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 230 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
237 if (likely(r == 0)) { 231 if (likely(r == 0)) {
238 bo->pin_count = 1; 232 bo->pin_count = 1;
239 if (gpu_addr != NULL)
240 *gpu_addr = qxl_bo_gpu_offset(bo);
241 } 233 }
242 if (unlikely(r != 0)) 234 if (unlikely(r != 0))
243 dev_err(ddev->dev, "%p pin failed\n", bo); 235 dev_err(ddev->dev, "%p pin failed\n", bo);
@@ -270,7 +262,7 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
270 * beforehand, use the internal version directly __qxl_bo_pin. 262 * beforehand, use the internal version directly __qxl_bo_pin.
271 * 263 *
272 */ 264 */
273int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 265int qxl_bo_pin(struct qxl_bo *bo)
274{ 266{
275 int r; 267 int r;
276 268
@@ -278,7 +270,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
278 if (r) 270 if (r)
279 return r; 271 return r;
280 272
281 r = __qxl_bo_pin(bo, bo->type, NULL); 273 r = __qxl_bo_pin(bo);
282 qxl_bo_unreserve(bo); 274 qxl_bo_unreserve(bo);
283 return r; 275 return r;
284} 276}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index b40fc9a10406..255b914e2a7b 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -97,7 +97,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int pa
97void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); 97void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
98extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); 98extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
99extern void qxl_bo_unref(struct qxl_bo **bo); 99extern void qxl_bo_unref(struct qxl_bo **bo);
100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); 100extern int qxl_bo_pin(struct qxl_bo *bo);
101extern int qxl_bo_unpin(struct qxl_bo *bo); 101extern int qxl_bo_unpin(struct qxl_bo *bo);
102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); 102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); 103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 3813ec198900..0a693fede05e 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -427,8 +427,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
427 struct ttm_buffer_object *bo; 427 struct ttm_buffer_object *bo;
428 struct ttm_bo_global *glob; 428 struct ttm_bo_global *glob;
429 struct ttm_bo_device *bdev; 429 struct ttm_bo_device *bdev;
430 struct ttm_bo_driver *driver;
431 struct qxl_bo *qbo;
432 struct ttm_validate_buffer *entry; 430 struct ttm_validate_buffer *entry;
433 struct qxl_device *qdev; 431 struct qxl_device *qdev;
434 432
@@ -449,14 +447,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
449 release->id | 0xf0000000, release->base.seqno); 447 release->id | 0xf0000000, release->base.seqno);
450 trace_dma_fence_emit(&release->base); 448 trace_dma_fence_emit(&release->base);
451 449
452 driver = bdev->driver;
453 glob = bdev->glob; 450 glob = bdev->glob;
454 451
455 spin_lock(&glob->lru_lock); 452 spin_lock(&glob->lru_lock);
456 453
457 list_for_each_entry(entry, &release->bos, head) { 454 list_for_each_entry(entry, &release->bos, head) {
458 bo = entry->bo; 455 bo = entry->bo;
459 qbo = to_qxl_bo(bo);
460 456
461 reservation_object_add_shared_fence(bo->resv, &release->base); 457 reservation_object_add_shared_fence(bo->resv, &release->base);
462 ttm_bo_add_to_lru(bo); 458 ttm_bo_add_to_lru(bo);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 3105965fc260..5a485489a1e2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
147} 147}
148 148
149static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp, 149static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
150 u8 *buff, u8 buff_size) 150 u8 *buff, u16 buff_size)
151{ 151{
152 u32 i; 152 u32 i;
153 int ret; 153 int ret;
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 61c2379fba87..ed76e52eb213 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -252,10 +252,8 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
252 struct sti_compositor *compo; 252 struct sti_compositor *compo;
253 struct drm_crtc *crtc = data; 253 struct drm_crtc *crtc = data;
254 struct sti_mixer *mixer; 254 struct sti_mixer *mixer;
255 struct sti_private *priv;
256 unsigned int pipe; 255 unsigned int pipe;
257 256
258 priv = crtc->dev->dev_private;
259 pipe = drm_crtc_index(crtc); 257 pipe = drm_crtc_index(crtc);
260 compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]); 258 compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
261 mixer = compo->mixer[pipe]; 259 mixer = compo->mixer[pipe];
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index f949287d926c..0420f5c978b9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -478,8 +478,11 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
478} 478}
479 479
480static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 480static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
481 const struct drm_encoder *encoder,
481 const struct drm_display_mode *mode) 482 const struct drm_display_mode *mode)
482{ 483{
484 struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
485 struct drm_display_info display_info = connector->display_info;
483 unsigned int bp, hsync, vsync; 486 unsigned int bp, hsync, vsync;
484 u8 clk_delay; 487 u8 clk_delay;
485 u32 val = 0; 488 u32 val = 0;
@@ -491,8 +494,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
491 sun4i_tcon0_mode_set_common(tcon, mode); 494 sun4i_tcon0_mode_set_common(tcon, mode);
492 495
493 /* Set dithering if needed */ 496 /* Set dithering if needed */
494 if (tcon->panel) 497 sun4i_tcon0_mode_set_dithering(tcon, connector);
495 sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
496 498
497 /* Adjust clock delay */ 499 /* Adjust clock delay */
498 clk_delay = sun4i_tcon_get_clk_delay(mode, 0); 500 clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -541,6 +543,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
541 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 543 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
542 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; 544 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
543 545
546 if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
547 val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
548
544 /* 549 /*
545 * On A20 and similar SoCs, the only way to achieve Positive Edge 550 * On A20 and similar SoCs, the only way to achieve Positive Edge
546 * (Rising Edge), is setting dclk clock phase to 2/3(240°). 551 * (Rising Edge), is setting dclk clock phase to 2/3(240°).
@@ -556,20 +561,16 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
556 * Following code is a way to avoid quirks all around TCON 561 * Following code is a way to avoid quirks all around TCON
557 * and DOTCLOCK drivers. 562 * and DOTCLOCK drivers.
558 */ 563 */
559 if (tcon->panel) { 564 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
560 struct drm_panel *panel = tcon->panel; 565 clk_set_phase(tcon->dclk, 240);
561 struct drm_connector *connector = panel->connector;
562 struct drm_display_info display_info = connector->display_info;
563 566
564 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) 567 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
565 clk_set_phase(tcon->dclk, 240); 568 clk_set_phase(tcon->dclk, 0);
566
567 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
568 clk_set_phase(tcon->dclk, 0);
569 }
570 569
571 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, 570 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
572 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE, 571 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
572 SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
573 SUN4I_TCON0_IO_POL_DE_NEGATIVE,
573 val); 574 val);
574 575
575 /* Map output pins to channel 0 */ 576 /* Map output pins to channel 0 */
@@ -684,7 +685,7 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
684 sun4i_tcon0_mode_set_lvds(tcon, encoder, mode); 685 sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
685 break; 686 break;
686 case DRM_MODE_ENCODER_NONE: 687 case DRM_MODE_ENCODER_NONE:
687 sun4i_tcon0_mode_set_rgb(tcon, mode); 688 sun4i_tcon0_mode_set_rgb(tcon, encoder, mode);
688 sun4i_tcon_set_mux(tcon, 0, encoder); 689 sun4i_tcon_set_mux(tcon, 0, encoder);
689 break; 690 break;
690 case DRM_MODE_ENCODER_TVDAC: 691 case DRM_MODE_ENCODER_TVDAC:
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 3d492c8be1fc..b5214d71610f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -116,6 +116,7 @@
116 116
117#define SUN4I_TCON0_IO_POL_REG 0x88 117#define SUN4I_TCON0_IO_POL_REG 0x88
118#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28) 118#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
119#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
119#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25) 120#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
120#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24) 121#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
121 122
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 9af51d982a33..01a6f2d42440 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -36,77 +36,6 @@
36 * and registers the DRM device using devm_tinydrm_register(). 36 * and registers the DRM device using devm_tinydrm_register().
37 */ 37 */
38 38
39/**
40 * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
41 * another driver's scatter/gather table of pinned pages
42 * @drm: DRM device to import into
43 * @attach: DMA-BUF attachment
44 * @sgt: Scatter/gather table of pinned pages
45 *
46 * This function imports a scatter/gather table exported via DMA-BUF by
47 * another driver using drm_gem_cma_prime_import_sg_table(). It sets the
48 * kernel virtual address on the CMA object. Drivers should use this as their
49 * &drm_driver->gem_prime_import_sg_table callback if they need the virtual
50 * address. tinydrm_gem_cma_free_object() should be used in combination with
51 * this function.
52 *
53 * Returns:
54 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
55 * error code on failure.
56 */
57struct drm_gem_object *
58tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
59 struct dma_buf_attachment *attach,
60 struct sg_table *sgt)
61{
62 struct drm_gem_cma_object *cma_obj;
63 struct drm_gem_object *obj;
64 void *vaddr;
65
66 vaddr = dma_buf_vmap(attach->dmabuf);
67 if (!vaddr) {
68 DRM_ERROR("Failed to vmap PRIME buffer\n");
69 return ERR_PTR(-ENOMEM);
70 }
71
72 obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt);
73 if (IS_ERR(obj)) {
74 dma_buf_vunmap(attach->dmabuf, vaddr);
75 return obj;
76 }
77
78 cma_obj = to_drm_gem_cma_obj(obj);
79 cma_obj->vaddr = vaddr;
80
81 return obj;
82}
83EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
84
85/**
86 * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM
87 * object
88 * @gem_obj: GEM object to free
89 *
90 * This function frees the backing memory of the CMA GEM object, cleans up the
91 * GEM object state and frees the memory used to store the object itself using
92 * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
93 * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
94 * can use this as their &drm_driver->gem_free_object_unlocked callback.
95 */
96void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
97{
98 if (gem_obj->import_attach) {
99 struct drm_gem_cma_object *cma_obj;
100
101 cma_obj = to_drm_gem_cma_obj(gem_obj);
102 dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
103 cma_obj->vaddr = NULL;
104 }
105
106 drm_gem_cma_free_object(gem_obj);
107}
108EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object);
109
110static struct drm_framebuffer * 39static struct drm_framebuffer *
111tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv, 40tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
112 const struct drm_mode_fb_cmd2 *mode_cmd) 41 const struct drm_mode_fb_cmd2 *mode_cmd)
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index dcd390163a4a..bf6bfbc5d412 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -9,12 +9,18 @@
9 9
10#include <linux/backlight.h> 10#include <linux/backlight.h>
11#include <linux/dma-buf.h> 11#include <linux/dma-buf.h>
12#include <linux/module.h>
12#include <linux/pm.h> 13#include <linux/pm.h>
13#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
14#include <linux/swab.h> 15#include <linux/swab.h>
15 16
17#include <drm/drm_device.h>
18#include <drm/drm_drv.h>
19#include <drm/drm_fourcc.h>
20#include <drm/drm_print.h>
16#include <drm/tinydrm/tinydrm.h> 21#include <drm/tinydrm/tinydrm.h>
17#include <drm/tinydrm/tinydrm-helpers.h> 22#include <drm/tinydrm/tinydrm-helpers.h>
23#include <uapi/drm/drm.h>
18 24
19static unsigned int spi_max; 25static unsigned int spi_max;
20module_param(spi_max, uint, 0400); 26module_param(spi_max, uint, 0400);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
index c3e51c2baebc..81a2bbeb25d4 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -16,7 +16,7 @@
16#include <linux/property.h> 16#include <linux/property.h>
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18 18
19#include <drm/drm_fb_helper.h> 19#include <drm/drm_gem_cma_helper.h>
20#include <drm/drm_gem_framebuffer_helper.h> 20#include <drm/drm_gem_framebuffer_helper.h>
21#include <drm/drm_modeset_helper.h> 21#include <drm/drm_modeset_helper.h>
22#include <drm/tinydrm/mipi-dbi.h> 22#include <drm/tinydrm/mipi-dbi.h>
@@ -188,7 +188,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
188static struct drm_driver hx8357d_driver = { 188static struct drm_driver hx8357d_driver = {
189 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 189 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
190 .fops = &hx8357d_fops, 190 .fops = &hx8357d_fops,
191 TINYDRM_GEM_DRIVER_OPS, 191 DRM_GEM_CMA_VMAP_DRIVER_OPS,
192 .debugfs_init = mipi_dbi_debugfs_init, 192 .debugfs_init = mipi_dbi_debugfs_init,
193 .name = "hx8357d", 193 .name = "hx8357d",
194 .desc = "HX8357D", 194 .desc = "HX8357D",
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 455fefe012f5..78f7c2d1b449 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,7 +20,8 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <video/mipi_display.h> 21#include <video/mipi_display.h>
22 22
23#include <drm/drm_fb_helper.h> 23#include <drm/drm_fb_cma_helper.h>
24#include <drm/drm_gem_cma_helper.h>
24#include <drm/drm_gem_framebuffer_helper.h> 25#include <drm/drm_gem_framebuffer_helper.h>
25#include <drm/tinydrm/mipi-dbi.h> 26#include <drm/tinydrm/mipi-dbi.h>
26#include <drm/tinydrm/tinydrm-helpers.h> 27#include <drm/tinydrm/tinydrm-helpers.h>
@@ -367,7 +368,7 @@ static struct drm_driver ili9225_driver = {
367 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 368 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
368 DRIVER_ATOMIC, 369 DRIVER_ATOMIC,
369 .fops = &ili9225_fops, 370 .fops = &ili9225_fops,
370 TINYDRM_GEM_DRIVER_OPS, 371 DRM_GEM_CMA_VMAP_DRIVER_OPS,
371 .name = "ili9225", 372 .name = "ili9225",
372 .desc = "Ilitek ILI9225", 373 .desc = "Ilitek ILI9225",
373 .date = "20171106", 374 .date = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 6701037749a7..51395bdc6ca2 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,7 @@
15#include <linux/property.h> 15#include <linux/property.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17 17
18#include <drm/drm_fb_helper.h> 18#include <drm/drm_gem_cma_helper.h>
19#include <drm/drm_gem_framebuffer_helper.h> 19#include <drm/drm_gem_framebuffer_helper.h>
20#include <drm/drm_modeset_helper.h> 20#include <drm/drm_modeset_helper.h>
21#include <drm/tinydrm/mipi-dbi.h> 21#include <drm/tinydrm/mipi-dbi.h>
@@ -144,7 +144,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
144static struct drm_driver ili9341_driver = { 144static struct drm_driver ili9341_driver = {
145 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 145 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
146 .fops = &ili9341_fops, 146 .fops = &ili9341_fops,
147 TINYDRM_GEM_DRIVER_OPS, 147 DRM_GEM_CMA_VMAP_DRIVER_OPS,
148 .debugfs_init = mipi_dbi_debugfs_init, 148 .debugfs_init = mipi_dbi_debugfs_init,
149 .name = "ili9341", 149 .name = "ili9341",
150 .desc = "Ilitek ILI9341", 150 .desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d7bb4c5e6657..3fa62e77c30b 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,9 +17,9 @@
17#include <linux/regulator/consumer.h> 17#include <linux/regulator/consumer.h>
18#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
19 19
20#include <drm/drm_fb_helper.h> 20#include <drm/drm_gem_cma_helper.h>
21#include <drm/drm_modeset_helper.h>
22#include <drm/drm_gem_framebuffer_helper.h> 21#include <drm/drm_gem_framebuffer_helper.h>
22#include <drm/drm_modeset_helper.h>
23#include <drm/tinydrm/mipi-dbi.h> 23#include <drm/tinydrm/mipi-dbi.h>
24#include <drm/tinydrm/tinydrm-helpers.h> 24#include <drm/tinydrm/tinydrm-helpers.h>
25#include <video/mipi_display.h> 25#include <video/mipi_display.h>
@@ -153,7 +153,7 @@ static struct drm_driver mi0283qt_driver = {
153 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 153 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
154 DRIVER_ATOMIC, 154 DRIVER_ATOMIC,
155 .fops = &mi0283qt_fops, 155 .fops = &mi0283qt_fops,
156 TINYDRM_GEM_DRIVER_OPS, 156 DRM_GEM_CMA_VMAP_DRIVER_OPS,
157 .debugfs_init = mipi_dbi_debugfs_init, 157 .debugfs_init = mipi_dbi_debugfs_init,
158 .name = "mi0283qt", 158 .name = "mi0283qt",
159 .desc = "Multi-Inno MI0283QT", 159 .desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 1bb870021f6e..3a05e56f9b0d 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -9,15 +9,19 @@
9 * (at your option) any later version. 9 * (at your option) any later version.
10 */ 10 */
11 11
12#include <drm/drm_gem_framebuffer_helper.h>
13#include <drm/tinydrm/mipi-dbi.h>
14#include <drm/tinydrm/tinydrm-helpers.h>
15#include <linux/debugfs.h> 12#include <linux/debugfs.h>
16#include <linux/dma-buf.h> 13#include <linux/dma-buf.h>
17#include <linux/gpio/consumer.h> 14#include <linux/gpio/consumer.h>
18#include <linux/module.h> 15#include <linux/module.h>
19#include <linux/regulator/consumer.h> 16#include <linux/regulator/consumer.h>
20#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18
19#include <drm/drm_fb_cma_helper.h>
20#include <drm/drm_gem_cma_helper.h>
21#include <drm/drm_gem_framebuffer_helper.h>
22#include <drm/tinydrm/mipi-dbi.h>
23#include <drm/tinydrm/tinydrm-helpers.h>
24#include <uapi/drm/drm.h>
21#include <video/mipi_display.h> 25#include <video/mipi_display.h>
22 26
23#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */ 27#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 50a1d4216ce7..07f45a008a0f 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,6 +26,8 @@
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/thermal.h> 27#include <linux/thermal.h>
28 28
29#include <drm/drm_fb_cma_helper.h>
30#include <drm/drm_gem_cma_helper.h>
29#include <drm/drm_gem_framebuffer_helper.h> 31#include <drm/drm_gem_framebuffer_helper.h>
30#include <drm/tinydrm/tinydrm.h> 32#include <drm/tinydrm/tinydrm.h>
31#include <drm/tinydrm/tinydrm-helpers.h> 33#include <drm/tinydrm/tinydrm-helpers.h>
@@ -882,7 +884,7 @@ static struct drm_driver repaper_driver = {
882 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 884 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
883 DRIVER_ATOMIC, 885 DRIVER_ATOMIC,
884 .fops = &repaper_fops, 886 .fops = &repaper_fops,
885 TINYDRM_GEM_DRIVER_OPS, 887 DRM_GEM_CMA_VMAP_DRIVER_OPS,
886 .name = "repaper", 888 .name = "repaper",
887 .desc = "Pervasive Displays RePaper e-ink panels", 889 .desc = "Pervasive Displays RePaper e-ink panels",
888 .date = "20170405", 890 .date = "20170405",
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 2fcbc3067d71..a6a8a1081b73 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,7 +17,8 @@
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18#include <video/mipi_display.h> 18#include <video/mipi_display.h>
19 19
20#include <drm/drm_fb_helper.h> 20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_gem_cma_helper.h>
21#include <drm/drm_gem_framebuffer_helper.h> 22#include <drm/drm_gem_framebuffer_helper.h>
22#include <drm/tinydrm/mipi-dbi.h> 23#include <drm/tinydrm/mipi-dbi.h>
23#include <drm/tinydrm/tinydrm-helpers.h> 24#include <drm/tinydrm/tinydrm-helpers.h>
@@ -303,7 +304,7 @@ static struct drm_driver st7586_driver = {
303 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 304 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
304 DRIVER_ATOMIC, 305 DRIVER_ATOMIC,
305 .fops = &st7586_fops, 306 .fops = &st7586_fops,
306 TINYDRM_GEM_DRIVER_OPS, 307 DRM_GEM_CMA_VMAP_DRIVER_OPS,
307 .debugfs_init = mipi_dbi_debugfs_init, 308 .debugfs_init = mipi_dbi_debugfs_init,
308 .name = "st7586", 309 .name = "st7586",
309 .desc = "Sitronix ST7586", 310 .desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3081bc57c116..b39779e0dcd8 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,7 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <video/mipi_display.h> 15#include <video/mipi_display.h>
16 16
17#include <drm/drm_fb_helper.h> 17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_gem_framebuffer_helper.h> 18#include <drm/drm_gem_framebuffer_helper.h>
19#include <drm/tinydrm/mipi-dbi.h> 19#include <drm/tinydrm/mipi-dbi.h>
20#include <drm/tinydrm/tinydrm-helpers.h> 20#include <drm/tinydrm/tinydrm-helpers.h>
@@ -119,7 +119,7 @@ static struct drm_driver st7735r_driver = {
119 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 119 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
120 DRIVER_ATOMIC, 120 DRIVER_ATOMIC,
121 .fops = &st7735r_fops, 121 .fops = &st7735r_fops,
122 TINYDRM_GEM_DRIVER_OPS, 122 DRM_GEM_CMA_VMAP_DRIVER_OPS,
123 .debugfs_init = mipi_dbi_debugfs_init, 123 .debugfs_init = mipi_dbi_debugfs_init,
124 .name = "st7735r", 124 .name = "st7735r",
125 .desc = "Sitronix ST7735R", 125 .desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 98fae4daa08c..1728fb7d00ba 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -129,12 +129,12 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
129 129
130static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) 130static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
131{ 131{
132 if (dst > src) 132 if (dst == src)
133 return VC4_SCALING_NONE;
134 if (3 * dst >= 2 * src)
133 return VC4_SCALING_PPF; 135 return VC4_SCALING_PPF;
134 else if (dst < src)
135 return VC4_SCALING_TPZ;
136 else 136 else
137 return VC4_SCALING_NONE; 137 return VC4_SCALING_TPZ;
138} 138}
139 139
140static bool plane_enabled(struct drm_plane_state *state) 140static bool plane_enabled(struct drm_plane_state *state)
@@ -341,12 +341,14 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
341 vc4_get_scaling_mode(vc4_state->src_h[1], 341 vc4_get_scaling_mode(vc4_state->src_h[1],
342 vc4_state->crtc_h); 342 vc4_state->crtc_h);
343 343
344 /* YUV conversion requires that horizontal scaling be enabled, 344 /* YUV conversion requires that horizontal scaling be enabled
345 * even on a plane that's otherwise 1:1. Looks like only PPF 345 * on the UV plane even if vc4_get_scaling_mode() returned
346 * works in that case, so let's pick that one. 346 * VC4_SCALING_NONE (which can happen when the down-scaling
347 * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
348 * case.
347 */ 349 */
348 if (vc4_state->is_unity) 350 if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
349 vc4_state->x_scaling[0] = VC4_SCALING_PPF; 351 vc4_state->x_scaling[1] = VC4_SCALING_PPF;
350 } else { 352 } else {
351 vc4_state->is_yuv = false; 353 vc4_state->is_yuv = false;
352 vc4_state->x_scaling[1] = VC4_SCALING_NONE; 354 vc4_state->x_scaling[1] = VC4_SCALING_NONE;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 2a8aaea72af3..9db568054d66 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -47,8 +47,8 @@
47#define DRIVER_DATE "0" 47#define DRIVER_DATE "0"
48 48
49#define DRIVER_MAJOR 0 49#define DRIVER_MAJOR 0
50#define DRIVER_MINOR 0 50#define DRIVER_MINOR 1
51#define DRIVER_PATCHLEVEL 1 51#define DRIVER_PATCHLEVEL 0
52 52
53/* virtgpu_drm_bus.c */ 53/* virtgpu_drm_bus.c */
54int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); 54int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
@@ -131,6 +131,7 @@ struct virtio_gpu_framebuffer {
131 int x1, y1, x2, y2; /* dirty rect */ 131 int x1, y1, x2, y2; /* dirty rect */
132 spinlock_t dirty_lock; 132 spinlock_t dirty_lock;
133 uint32_t hw_res_handle; 133 uint32_t hw_res_handle;
134 struct virtio_gpu_fence *fence;
134}; 135};
135#define to_virtio_gpu_framebuffer(x) \ 136#define to_virtio_gpu_framebuffer(x) \
136 container_of(x, struct virtio_gpu_framebuffer, base) 137 container_of(x, struct virtio_gpu_framebuffer, base)
@@ -346,6 +347,9 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
346int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma); 347int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
347 348
348/* virtio_gpu_fence.c */ 349/* virtio_gpu_fence.c */
350struct virtio_gpu_fence *virtio_gpu_fence_alloc(
351 struct virtio_gpu_device *vgdev);
352void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
349int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 353int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
350 struct virtio_gpu_ctrl_hdr *cmd_hdr, 354 struct virtio_gpu_ctrl_hdr *cmd_hdr,
351 struct virtio_gpu_fence **fence); 355 struct virtio_gpu_fence **fence);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 00c742a441bf..6b5d92215cfb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -67,6 +67,28 @@ static const struct dma_fence_ops virtio_fence_ops = {
67 .timeline_value_str = virtio_timeline_value_str, 67 .timeline_value_str = virtio_timeline_value_str,
68}; 68};
69 69
70struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
71{
72 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
73 struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
74 GFP_ATOMIC);
75 if (!fence)
76 return fence;
77
78 fence->drv = drv;
79 dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
80
81 return fence;
82}
83
84void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
85{
86 if (!fence)
87 return;
88
89 dma_fence_put(&fence->f);
90}
91
70int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 92int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
71 struct virtio_gpu_ctrl_hdr *cmd_hdr, 93 struct virtio_gpu_ctrl_hdr *cmd_hdr,
72 struct virtio_gpu_fence **fence) 94 struct virtio_gpu_fence **fence)
@@ -74,15 +96,8 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
74 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 96 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
75 unsigned long irq_flags; 97 unsigned long irq_flags;
76 98
77 *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
78 if ((*fence) == NULL)
79 return -ENOMEM;
80
81 spin_lock_irqsave(&drv->lock, irq_flags); 99 spin_lock_irqsave(&drv->lock, irq_flags);
82 (*fence)->drv = drv;
83 (*fence)->seq = ++drv->sync_seq; 100 (*fence)->seq = ++drv->sync_seq;
84 dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
85 drv->context, (*fence)->seq);
86 dma_fence_get(&(*fence)->f); 101 dma_fence_get(&(*fence)->f);
87 list_add_tail(&(*fence)->node, &drv->fences); 102 list_add_tail(&(*fence)->node, &drv->fences);
88 spin_unlock_irqrestore(&drv->lock, irq_flags); 103 spin_unlock_irqrestore(&drv->lock, irq_flags);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index bc5afa4f906e..340f2513d829 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -28,6 +28,7 @@
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/virtgpu_drm.h> 29#include <drm/virtgpu_drm.h>
30#include <drm/ttm/ttm_execbuf_util.h> 30#include <drm/ttm/ttm_execbuf_util.h>
31#include <linux/sync_file.h>
31 32
32#include "virtgpu_drv.h" 33#include "virtgpu_drv.h"
33 34
@@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
105 struct virtio_gpu_device *vgdev = dev->dev_private; 106 struct virtio_gpu_device *vgdev = dev->dev_private;
106 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; 107 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
107 struct drm_gem_object *gobj; 108 struct drm_gem_object *gobj;
108 struct virtio_gpu_fence *fence; 109 struct virtio_gpu_fence *out_fence;
109 struct virtio_gpu_object *qobj; 110 struct virtio_gpu_object *qobj;
110 int ret; 111 int ret;
111 uint32_t *bo_handles = NULL; 112 uint32_t *bo_handles = NULL;
@@ -114,11 +115,46 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
114 struct ttm_validate_buffer *buflist = NULL; 115 struct ttm_validate_buffer *buflist = NULL;
115 int i; 116 int i;
116 struct ww_acquire_ctx ticket; 117 struct ww_acquire_ctx ticket;
118 struct sync_file *sync_file;
119 int in_fence_fd = exbuf->fence_fd;
120 int out_fence_fd = -1;
117 void *buf; 121 void *buf;
118 122
119 if (vgdev->has_virgl_3d == false) 123 if (vgdev->has_virgl_3d == false)
120 return -ENOSYS; 124 return -ENOSYS;
121 125
126 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
127 return -EINVAL;
128
129 exbuf->fence_fd = -1;
130
131 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
132 struct dma_fence *in_fence;
133
134 in_fence = sync_file_get_fence(in_fence_fd);
135
136 if (!in_fence)
137 return -EINVAL;
138
139 /*
140 * Wait if the fence is from a foreign context, or if the fence
141 * array contains any fence from a foreign context.
142 */
143 ret = 0;
144 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
145 ret = dma_fence_wait(in_fence, true);
146
147 dma_fence_put(in_fence);
148 if (ret)
149 return ret;
150 }
151
152 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
153 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
154 if (out_fence_fd < 0)
155 return out_fence_fd;
156 }
157
122 INIT_LIST_HEAD(&validate_list); 158 INIT_LIST_HEAD(&validate_list);
123 if (exbuf->num_bo_handles) { 159 if (exbuf->num_bo_handles) {
124 160
@@ -128,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
128 sizeof(struct ttm_validate_buffer), 164 sizeof(struct ttm_validate_buffer),
129 GFP_KERNEL | __GFP_ZERO); 165 GFP_KERNEL | __GFP_ZERO);
130 if (!bo_handles || !buflist) { 166 if (!bo_handles || !buflist) {
131 kvfree(bo_handles); 167 ret = -ENOMEM;
132 kvfree(buflist); 168 goto out_unused_fd;
133 return -ENOMEM;
134 } 169 }
135 170
136 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles; 171 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
137 if (copy_from_user(bo_handles, user_bo_handles, 172 if (copy_from_user(bo_handles, user_bo_handles,
138 exbuf->num_bo_handles * sizeof(uint32_t))) { 173 exbuf->num_bo_handles * sizeof(uint32_t))) {
139 ret = -EFAULT; 174 ret = -EFAULT;
140 kvfree(bo_handles); 175 goto out_unused_fd;
141 kvfree(buflist);
142 return ret;
143 } 176 }
144 177
145 for (i = 0; i < exbuf->num_bo_handles; i++) { 178 for (i = 0; i < exbuf->num_bo_handles; i++) {
146 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); 179 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
147 if (!gobj) { 180 if (!gobj) {
148 kvfree(bo_handles); 181 ret = -ENOENT;
149 kvfree(buflist); 182 goto out_unused_fd;
150 return -ENOENT;
151 } 183 }
152 184
153 qobj = gem_to_virtio_gpu_obj(gobj); 185 qobj = gem_to_virtio_gpu_obj(gobj);
@@ -156,6 +188,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
156 list_add(&buflist[i].head, &validate_list); 188 list_add(&buflist[i].head, &validate_list);
157 } 189 }
158 kvfree(bo_handles); 190 kvfree(bo_handles);
191 bo_handles = NULL;
159 } 192 }
160 193
161 ret = virtio_gpu_object_list_validate(&ticket, &validate_list); 194 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
@@ -168,22 +201,48 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
168 ret = PTR_ERR(buf); 201 ret = PTR_ERR(buf);
169 goto out_unresv; 202 goto out_unresv;
170 } 203 }
204
205 out_fence = virtio_gpu_fence_alloc(vgdev);
206 if(!out_fence) {
207 ret = -ENOMEM;
208 goto out_memdup;
209 }
210
211 if (out_fence_fd >= 0) {
212 sync_file = sync_file_create(&out_fence->f);
213 if (!sync_file) {
214 dma_fence_put(&out_fence->f);
215 ret = -ENOMEM;
216 goto out_memdup;
217 }
218
219 exbuf->fence_fd = out_fence_fd;
220 fd_install(out_fence_fd, sync_file->file);
221 }
222
171 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 223 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
172 vfpriv->ctx_id, &fence); 224 vfpriv->ctx_id, &out_fence);
173 225
174 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); 226 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
175 227
176 /* fence the command bo */ 228 /* fence the command bo */
177 virtio_gpu_unref_list(&validate_list); 229 virtio_gpu_unref_list(&validate_list);
178 kvfree(buflist); 230 kvfree(buflist);
179 dma_fence_put(&fence->f);
180 return 0; 231 return 0;
181 232
233out_memdup:
234 kfree(buf);
182out_unresv: 235out_unresv:
183 ttm_eu_backoff_reservation(&ticket, &validate_list); 236 ttm_eu_backoff_reservation(&ticket, &validate_list);
184out_free: 237out_free:
185 virtio_gpu_unref_list(&validate_list); 238 virtio_gpu_unref_list(&validate_list);
239out_unused_fd:
240 kvfree(bo_handles);
186 kvfree(buflist); 241 kvfree(buflist);
242
243 if (out_fence_fd >= 0)
244 put_unused_fd(out_fence_fd);
245
187 return ret; 246 return ret;
188} 247}
189 248
@@ -283,11 +342,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
283 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples); 342 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
284 rc_3d.flags = cpu_to_le32(rc->flags); 343 rc_3d.flags = cpu_to_le32(rc->flags);
285 344
345 fence = virtio_gpu_fence_alloc(vgdev);
346 if (!fence) {
347 ret = -ENOMEM;
348 goto fail_backoff;
349 }
350
286 virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL); 351 virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL);
287 ret = virtio_gpu_object_attach(vgdev, qobj, &fence); 352 ret = virtio_gpu_object_attach(vgdev, qobj, &fence);
288 if (ret) { 353 if (ret) {
289 ttm_eu_backoff_reservation(&ticket, &validate_list); 354 virtio_gpu_fence_cleanup(fence);
290 goto fail_unref; 355 goto fail_backoff;
291 } 356 }
292 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); 357 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
293 } 358 }
@@ -312,6 +377,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
312 dma_fence_put(&fence->f); 377 dma_fence_put(&fence->f);
313 } 378 }
314 return 0; 379 return 0;
380fail_backoff:
381 ttm_eu_backoff_reservation(&ticket, &validate_list);
315fail_unref: 382fail_unref:
316 if (vgdev->has_virgl_3d) { 383 if (vgdev->has_virgl_3d) {
317 virtio_gpu_unref_list(&validate_list); 384 virtio_gpu_unref_list(&validate_list);
@@ -374,6 +441,12 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
374 goto out_unres; 441 goto out_unres;
375 442
376 convert_to_hw_box(&box, &args->box); 443 convert_to_hw_box(&box, &args->box);
444
445 fence = virtio_gpu_fence_alloc(vgdev);
446 if (!fence) {
447 ret = -ENOMEM;
448 goto out_unres;
449 }
377 virtio_gpu_cmd_transfer_from_host_3d 450 virtio_gpu_cmd_transfer_from_host_3d
378 (vgdev, qobj->hw_res_handle, 451 (vgdev, qobj->hw_res_handle,
379 vfpriv->ctx_id, offset, args->level, 452 vfpriv->ctx_id, offset, args->level,
@@ -423,6 +496,11 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
423 (vgdev, qobj, offset, 496 (vgdev, qobj, offset,
424 box.w, box.h, box.x, box.y, NULL); 497 box.w, box.h, box.x, box.y, NULL);
425 } else { 498 } else {
499 fence = virtio_gpu_fence_alloc(vgdev);
500 if (!fence) {
501 ret = -ENOMEM;
502 goto out_unres;
503 }
426 virtio_gpu_cmd_transfer_to_host_3d 504 virtio_gpu_cmd_transfer_to_host_3d
427 (vgdev, qobj, 505 (vgdev, qobj,
428 vfpriv ? vfpriv->ctx_id : 0, offset, 506 vfpriv ? vfpriv->ctx_id : 0, offset,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index bf609dcae224..691b842d5f3a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -55,10 +55,11 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
55static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev, 55static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
56 uint32_t nlen, const char *name) 56 uint32_t nlen, const char *name)
57{ 57{
58 int handle = ida_alloc_min(&vgdev->ctx_id_ida, 1, GFP_KERNEL); 58 int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
59 59
60 if (handle < 0) 60 if (handle < 0)
61 return handle; 61 return handle;
62 handle += 1;
62 virtio_gpu_cmd_context_create(vgdev, handle, nlen, name); 63 virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
63 return handle; 64 return handle;
64} 65}
@@ -67,7 +68,7 @@ static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
67 uint32_t ctx_id) 68 uint32_t ctx_id)
68{ 69{
69 virtio_gpu_cmd_context_destroy(vgdev, ctx_id); 70 virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
70 ida_free(&vgdev->ctx_id_ida, ctx_id); 71 ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
71} 72}
72 73
73static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, 74static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -266,8 +267,10 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
266 267
267 get_task_comm(dbgname, current); 268 get_task_comm(dbgname, current);
268 id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname); 269 id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
269 if (id < 0) 270 if (id < 0) {
271 kfree(vfpriv);
270 return id; 272 return id;
273 }
271 274
272 vfpriv->ctx_id = id; 275 vfpriv->ctx_id = id;
273 file->driver_priv = vfpriv; 276 file->driver_priv = vfpriv;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 77eac4eb06b1..f39a183d59c2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -25,16 +25,21 @@
25 25
26#include "virtgpu_drv.h" 26#include "virtgpu_drv.h"
27 27
28static void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, 28static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
29 uint32_t *resid) 29 uint32_t *resid)
30{ 30{
31 int handle = ida_alloc_min(&vgdev->resource_ida, 1, GFP_KERNEL); 31 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
32 *resid = handle; 32
33 if (handle < 0)
34 return handle;
35
36 *resid = handle + 1;
37 return 0;
33} 38}
34 39
35static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) 40static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
36{ 41{
37 ida_free(&vgdev->resource_ida, id); 42 ida_free(&vgdev->resource_ida, id - 1);
38} 43}
39 44
40static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 45static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
@@ -94,7 +99,11 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
94 bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); 99 bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
95 if (bo == NULL) 100 if (bo == NULL)
96 return -ENOMEM; 101 return -ENOMEM;
97 virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); 102 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
103 if (ret < 0) {
104 kfree(bo);
105 return ret;
106 }
98 size = roundup(size, PAGE_SIZE); 107 size = roundup(size, PAGE_SIZE);
99 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); 108 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
100 if (ret != 0) { 109 if (ret != 0) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a9f4ae7d4483..b84ac8c25856 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,6 +137,41 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
137 plane->state->src_h >> 16); 137 plane->state->src_h >> 16);
138} 138}
139 139
140static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
141 struct drm_plane_state *new_state)
142{
143 struct drm_device *dev = plane->dev;
144 struct virtio_gpu_device *vgdev = dev->dev_private;
145 struct virtio_gpu_framebuffer *vgfb;
146 struct virtio_gpu_object *bo;
147
148 if (!new_state->fb)
149 return 0;
150
151 vgfb = to_virtio_gpu_framebuffer(new_state->fb);
152 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
153 if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
154 vgfb->fence = virtio_gpu_fence_alloc(vgdev);
155 if (!vgfb->fence)
156 return -ENOMEM;
157 }
158
159 return 0;
160}
161
162static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
163 struct drm_plane_state *old_state)
164{
165 struct virtio_gpu_framebuffer *vgfb;
166
167 if (!plane->state->fb)
168 return;
169
170 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
171 if (vgfb->fence)
172 virtio_gpu_fence_cleanup(vgfb->fence);
173}
174
140static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 175static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
141 struct drm_plane_state *old_state) 176 struct drm_plane_state *old_state)
142{ 177{
@@ -144,7 +179,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
144 struct virtio_gpu_device *vgdev = dev->dev_private; 179 struct virtio_gpu_device *vgdev = dev->dev_private;
145 struct virtio_gpu_output *output = NULL; 180 struct virtio_gpu_output *output = NULL;
146 struct virtio_gpu_framebuffer *vgfb; 181 struct virtio_gpu_framebuffer *vgfb;
147 struct virtio_gpu_fence *fence = NULL;
148 struct virtio_gpu_object *bo = NULL; 182 struct virtio_gpu_object *bo = NULL;
149 uint32_t handle; 183 uint32_t handle;
150 int ret = 0; 184 int ret = 0;
@@ -170,13 +204,13 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
170 (vgdev, bo, 0, 204 (vgdev, bo, 0,
171 cpu_to_le32(plane->state->crtc_w), 205 cpu_to_le32(plane->state->crtc_w),
172 cpu_to_le32(plane->state->crtc_h), 206 cpu_to_le32(plane->state->crtc_h),
173 0, 0, &fence); 207 0, 0, &vgfb->fence);
174 ret = virtio_gpu_object_reserve(bo, false); 208 ret = virtio_gpu_object_reserve(bo, false);
175 if (!ret) { 209 if (!ret) {
176 reservation_object_add_excl_fence(bo->tbo.resv, 210 reservation_object_add_excl_fence(bo->tbo.resv,
177 &fence->f); 211 &vgfb->fence->f);
178 dma_fence_put(&fence->f); 212 dma_fence_put(&vgfb->fence->f);
179 fence = NULL; 213 vgfb->fence = NULL;
180 virtio_gpu_object_unreserve(bo); 214 virtio_gpu_object_unreserve(bo);
181 virtio_gpu_object_wait(bo, false); 215 virtio_gpu_object_wait(bo, false);
182 } 216 }
@@ -218,6 +252,8 @@ static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
218}; 252};
219 253
220static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 254static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
255 .prepare_fb = virtio_gpu_cursor_prepare_fb,
256 .cleanup_fb = virtio_gpu_cursor_cleanup_fb,
221 .atomic_check = virtio_gpu_plane_atomic_check, 257 .atomic_check = virtio_gpu_plane_atomic_check,
222 .atomic_update = virtio_gpu_cursor_plane_update, 258 .atomic_update = virtio_gpu_cursor_plane_update,
223}; 259};
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 51bef1775e47..93f2c3a51ee8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -896,9 +896,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
896 struct virtio_gpu_object *obj) 896 struct virtio_gpu_object *obj)
897{ 897{
898 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 898 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
899 struct virtio_gpu_fence *fence;
900 899
901 if (use_dma_api && obj->mapped) { 900 if (use_dma_api && obj->mapped) {
901 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
902 /* detach backing and wait for the host process it ... */ 902 /* detach backing and wait for the host process it ... */
903 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); 903 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
904 dma_fence_wait(&fence->f, true); 904 dma_fence_wait(&fence->f, true);