aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-08-11 01:33:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-08-11 01:33:47 -0400
commitb2dbdf2ca1d2803e9cdc46a94554c4a39ffb235a (patch)
treee4f0bc7c920b4bc983d237e36398b49362e548e6
parent27df704d43274578ca097c8a60f265faaacee7fb (diff)
parent46828dc77961d9286e55671c4dd3b6c9effadf1a (diff)
Merge tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Nothing too earth shattering here, it just seems like lots of little things all over the place. msm has probably the larger amount of changes, but they all seem fine, otherwise, some rockchip, i915, etnaviv and exynos fixes, along with one nouveau regression fix for some older GPUs" * tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux: (35 commits) drm/nouveau/disp/nv04: avoid creation of output paths drm: make DRM_STM default n drm/exynos: forbid creating framebuffers from too small GEM buffers drm/etnaviv: Fix off-by-one error in reloc checking drm/i915: fix backlight invert for non-zero minimum brightness drm/i915/shrinker: Wrap need_resched() inside preempt-disable drm/i915/perf: fix flex eu registers programming drm/i915: Fix out-of-bounds array access in bdw_load_gamma_lut drm/i915/gvt: Change the max length of mmio_reg_rw from 4 to 8 drm/i915/gvt: Initialize MMIO Block with HW state drm/rockchip: vop: report error when check resource error drm/rockchip: vop: round_up pitches to word align drm/rockchip: vop: fix NV12 video display error drm/rockchip: vop: fix iommu page fault when resume drm/i915/gvt: clean workload queue if error happened drm/i915/gvt: change resetting to resetting_eng drm/msm: gpu: don't abuse dma_alloc for non-DMA allocations drm/msm: gpu: call qcom_mdt interfaces only for ARCH_QCOM drm/msm/adreno: Prevent unclocked access when retrieving timestamps drm/msm: Remove __user from __u64 data types ...
-rw-r--r--drivers/dma-buf/sync_file.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c181
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c41
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--include/linux/sync_file.h3
-rw-r--r--include/uapi/drm/msm_drm.h6
32 files changed, 247 insertions, 214 deletions
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d7e219d2669d..66fb40d0ebdb 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
304{ 304{
305 struct sync_file *sync_file = file->private_data; 305 struct sync_file *sync_file = file->private_data;
306 306
307 if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) 307 if (test_bit(POLL_ENABLED, &sync_file->flags))
308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb); 308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
309 dma_fence_put(sync_file->fence); 309 dma_fence_put(sync_file->fence);
310 kfree(sync_file); 310 kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
318 318
319 poll_wait(file, &sync_file->wq, wait); 319 poll_wait(file, &sync_file->wq, wait);
320 320
321 if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { 321 if (list_empty(&sync_file->cb.node) &&
322 !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
322 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, 323 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
323 fence_check_cb_func) < 0) 324 fence_check_cb_func) < 0)
324 wake_up_all(&sync_file->wq); 325 wake_up_all(&sync_file->wq);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 5c26488e7a2d..0529e500c534 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
1255 1255
1256 /* port@2 is the output port */ 1256 /* port@2 is the output port */
1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); 1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
1258 if (ret) 1258 if (ret && ret != -ENODEV)
1259 return ret; 1259 return ret;
1260 1260
1261 /* Shut down GPIO is optional */ 1261 /* Shut down GPIO is optional */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5bd93169dac2..6463fc2c736f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
270 if (ret) 270 if (ret)
271 return ret; 271 return ret;
272 272
273 if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { 273 if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
274 DRM_ERROR("relocation %u outside object", i); 274 DRM_ERROR("relocation %u outside object\n", i);
275 return -EINVAL; 275 return -EINVAL;
276 } 276 }
277 277
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d48fd7c918f8..73217c281c9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
146 const struct drm_mode_fb_cmd2 *mode_cmd) 146 const struct drm_mode_fb_cmd2 *mode_cmd)
147{ 147{
148 const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
148 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 149 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
149 struct drm_gem_object *obj; 150 struct drm_gem_object *obj;
150 struct drm_framebuffer *fb; 151 struct drm_framebuffer *fb;
151 int i; 152 int i;
152 int ret; 153 int ret;
153 154
154 for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { 155 for (i = 0; i < info->num_planes; i++) {
156 unsigned int height = (i == 0) ? mode_cmd->height :
157 DIV_ROUND_UP(mode_cmd->height, info->vsub);
158 unsigned long size = height * mode_cmd->pitches[i] +
159 mode_cmd->offsets[i];
160
155 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); 161 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
156 if (!obj) { 162 if (!obj) {
157 DRM_ERROR("failed to lookup gem object\n"); 163 DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
160 } 166 }
161 167
162 exynos_gem[i] = to_exynos_gem(obj); 168 exynos_gem[i] = to_exynos_gem(obj);
169
170 if (size > exynos_gem[i]->size) {
171 i++;
172 ret = -EINVAL;
173 goto err;
174 }
163 } 175 }
164 176
165 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); 177 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 700050556242..1648887d3f55 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
47 ((a)->lrca == (b)->lrca)) 47 ((a)->lrca == (b)->lrca))
48 48
49static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
50
49static int context_switch_events[] = { 51static int context_switch_events[] = {
50 [RCS] = RCS_AS_CONTEXT_SWITCH, 52 [RCS] = RCS_AS_CONTEXT_SWITCH,
51 [BCS] = BCS_AS_CONTEXT_SWITCH, 53 [BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
499static int complete_execlist_workload(struct intel_vgpu_workload *workload) 501static int complete_execlist_workload(struct intel_vgpu_workload *workload)
500{ 502{
501 struct intel_vgpu *vgpu = workload->vgpu; 503 struct intel_vgpu *vgpu = workload->vgpu;
502 struct intel_vgpu_execlist *execlist = 504 int ring_id = workload->ring_id;
503 &vgpu->execlist[workload->ring_id]; 505 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
504 struct intel_vgpu_workload *next_workload; 506 struct intel_vgpu_workload *next_workload;
505 struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; 507 struct list_head *next = workload_q_head(vgpu, ring_id)->next;
506 bool lite_restore = false; 508 bool lite_restore = false;
507 int ret; 509 int ret;
508 510
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
512 release_shadow_batch_buffer(workload); 514 release_shadow_batch_buffer(workload);
513 release_shadow_wa_ctx(&workload->wa_ctx); 515 release_shadow_wa_ctx(&workload->wa_ctx);
514 516
515 if (workload->status || vgpu->resetting) 517 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
518 /* if workload->status is not successful means HW GPU
519 * has occurred GPU hang or something wrong with i915/GVT,
520 * and GVT won't inject context switch interrupt to guest.
521 * So this error is a vGPU hang actually to the guest.
522 * According to this we should emunlate a vGPU hang. If
523 * there are pending workloads which are already submitted
524 * from guest, we should clean them up like HW GPU does.
525 *
526 * if it is in middle of engine resetting, the pending
527 * workloads won't be submitted to HW GPU and will be
528 * cleaned up during the resetting process later, so doing
529 * the workload clean up here doesn't have any impact.
530 **/
531 clean_workloads(vgpu, ENGINE_MASK(ring_id));
516 goto out; 532 goto out;
533 }
517 534
518 if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { 535 if (!list_empty(workload_q_head(vgpu, ring_id))) {
519 struct execlist_ctx_descriptor_format *this_desc, *next_desc; 536 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
520 537
521 next_workload = container_of(next, 538 next_workload = container_of(next,
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
72 struct intel_gvt_device_info *info = &gvt->device_info; 72 struct intel_gvt_device_info *info = &gvt->device_info;
73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; 73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
74 struct intel_gvt_mmio_info *e; 74 struct intel_gvt_mmio_info *e;
75 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
76 int num = gvt->mmio.num_mmio_block;
75 struct gvt_firmware_header *h; 77 struct gvt_firmware_header *h;
76 void *firmware; 78 void *firmware;
77 void *p; 79 void *p;
78 unsigned long size, crc32_start; 80 unsigned long size, crc32_start;
79 int i; 81 int i, j;
80 int ret; 82 int ret;
81 83
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size; 84 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) 107 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); 108 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
107 109
110 for (i = 0; i < num; i++, block++) {
111 for (j = 0; j < block->size; j += 4)
112 *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
113 I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
114 block->offset) + j));
115 }
116
108 memcpy(gvt->firmware.mmio, p, info->mmio_size); 117 memcpy(gvt->firmware.mmio, p, info->mmio_size);
109 118
110 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; 119 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 3a74e79eac2f..2964a4d01a66 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
149 bool active; 149 bool active;
150 bool pv_notified; 150 bool pv_notified;
151 bool failsafe; 151 bool failsafe;
152 bool resetting; 152 unsigned int resetting_eng;
153 void *sched_data; 153 void *sched_data;
154 struct vgpu_sched_ctl sched_ctl; 154 struct vgpu_sched_ctl sched_ctl;
155 155
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
195 unsigned long vgpu_allocated_fence_num; 195 unsigned long vgpu_allocated_fence_num;
196}; 196};
197 197
198/* Special MMIO blocks. */
199struct gvt_mmio_block {
200 unsigned int device;
201 i915_reg_t offset;
202 unsigned int size;
203 gvt_mmio_func read;
204 gvt_mmio_func write;
205};
206
198#define INTEL_GVT_MMIO_HASH_BITS 11 207#define INTEL_GVT_MMIO_HASH_BITS 11
199 208
200struct intel_gvt_mmio { 209struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
214/* This reg could be accessed by unaligned address */ 223/* This reg could be accessed by unaligned address */
215#define F_UNALIGN (1 << 6) 224#define F_UNALIGN (1 << 6)
216 225
226 struct gvt_mmio_block *mmio_block;
227 unsigned int num_mmio_block;
228
217 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 229 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
218 unsigned int num_tracked_mmio; 230 unsigned int num_tracked_mmio;
219}; 231};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 17febe830ff6..feed9921b3b3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2857 return 0; 2857 return 0;
2858} 2858}
2859 2859
2860/* Special MMIO blocks. */
2861static struct gvt_mmio_block {
2862 unsigned int device;
2863 i915_reg_t offset;
2864 unsigned int size;
2865 gvt_mmio_func read;
2866 gvt_mmio_func write;
2867} gvt_mmio_blocks[] = {
2868 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2869 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2870 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2871 pvinfo_mmio_read, pvinfo_mmio_write},
2872 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2873 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2874 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2875};
2876
2877static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, 2860static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2878 unsigned int offset) 2861 unsigned int offset)
2879{ 2862{
2880 unsigned long device = intel_gvt_get_device_type(gvt); 2863 unsigned long device = intel_gvt_get_device_type(gvt);
2881 struct gvt_mmio_block *block = gvt_mmio_blocks; 2864 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2865 int num = gvt->mmio.num_mmio_block;
2882 int i; 2866 int i;
2883 2867
2884 for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { 2868 for (i = 0; i < num; i++, block++) {
2885 if (!(device & block->device)) 2869 if (!(device & block->device))
2886 continue; 2870 continue;
2887 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && 2871 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2912 gvt->mmio.mmio_attribute = NULL; 2896 gvt->mmio.mmio_attribute = NULL;
2913} 2897}
2914 2898
2899/* Special MMIO blocks. */
2900static struct gvt_mmio_block mmio_blocks[] = {
2901 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2902 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2903 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2904 pvinfo_mmio_read, pvinfo_mmio_write},
2905 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2906 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2907 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2908};
2909
2915/** 2910/**
2916 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device 2911 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2917 * @gvt: GVT device 2912 * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2951 goto err; 2946 goto err;
2952 } 2947 }
2953 2948
2949 gvt->mmio.mmio_block = mmio_blocks;
2950 gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
2951
2954 gvt_dbg_mmio("traced %u virtual mmio registers\n", 2952 gvt_dbg_mmio("traced %u virtual mmio registers\n",
2955 gvt->mmio.num_tracked_mmio); 2953 gvt->mmio.num_tracked_mmio);
2956 return 0; 2954 return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3030 gvt_mmio_func func; 3028 gvt_mmio_func func;
3031 int ret; 3029 int ret;
3032 3030
3033 if (WARN_ON(bytes > 4)) 3031 if (WARN_ON(bytes > 8))
3034 return -EINVAL; 3032 return -EINVAL;
3035 3033
3036 /* 3034 /*
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4f7057d62d88..22e08eb2d0b7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
432 432
433 i915_gem_request_put(fetch_and_zero(&workload->req)); 433 i915_gem_request_put(fetch_and_zero(&workload->req));
434 434
435 if (!workload->status && !vgpu->resetting) { 435 if (!workload->status && !(vgpu->resetting_eng &
436 ENGINE_MASK(ring_id))) {
436 update_guest_context(workload); 437 update_guest_context(workload);
437 438
438 for_each_set_bit(event, workload->pending_events, 439 for_each_set_bit(event, workload->pending_events,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 90c14e6e3ea0..3deadcbd5a24 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
480{ 480{
481 struct intel_gvt *gvt = vgpu->gvt; 481 struct intel_gvt *gvt = vgpu->gvt;
482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
483 unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
483 484
484 gvt_dbg_core("------------------------------------------\n"); 485 gvt_dbg_core("------------------------------------------\n");
485 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 486 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
486 vgpu->id, dmlr, engine_mask); 487 vgpu->id, dmlr, engine_mask);
487 vgpu->resetting = true; 488
489 vgpu->resetting_eng = resetting_eng;
488 490
489 intel_vgpu_stop_schedule(vgpu); 491 intel_vgpu_stop_schedule(vgpu);
490 /* 492 /*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
497 mutex_lock(&gvt->lock); 499 mutex_lock(&gvt->lock);
498 } 500 }
499 501
500 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 502 intel_vgpu_reset_execlist(vgpu, resetting_eng);
501 503
502 /* full GPU reset or device model level reset */ 504 /* full GPU reset or device model level reset */
503 if (engine_mask == ALL_ENGINES || dmlr) { 505 if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
520 } 522 }
521 } 523 }
522 524
523 vgpu->resetting = false; 525 vgpu->resetting_eng = 0;
524 gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 526 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
525 gvt_dbg_core("------------------------------------------\n"); 527 gvt_dbg_core("------------------------------------------\n");
526} 528}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1032f98add11..77fb39808131 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
43 return true; 43 return true;
44 44
45 case MUTEX_TRYLOCK_FAILED: 45 case MUTEX_TRYLOCK_FAILED:
46 *unlock = false;
47 preempt_disable();
46 do { 48 do {
47 cpu_relax(); 49 cpu_relax();
48 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
49 case MUTEX_TRYLOCK_SUCCESS:
50 *unlock = true; 51 *unlock = true;
51 return true; 52 break;
52 } 53 }
53 } while (!need_resched()); 54 } while (!need_resched());
55 preempt_enable();
56 return *unlock;
54 57
55 return false; 58 case MUTEX_TRYLOCK_SUCCESS:
59 *unlock = true;
60 return true;
56 } 61 }
57 62
58 BUG(); 63 BUG();
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9cd22f83b0cf..f33d90226704 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
1601 u32 *cs; 1601 u32 *cs;
1602 int i; 1602 int i;
1603 1603
1604 cs = intel_ring_begin(req, n_flex_regs * 2 + 4); 1604 cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
1605 if (IS_ERR(cs)) 1605 if (IS_ERR(cs))
1606 return PTR_ERR(cs); 1606 return PTR_ERR(cs);
1607 1607
1608 *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); 1608 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1609 1609
1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..17c4ae7e4e7c 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
398 } 398 }
399 399
400 /* Program the max register to clamp values > 1.0. */ 400 /* Program the max register to clamp values > 1.0. */
401 i = lut_size - 1;
401 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), 402 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
402 drm_color_lut_extract(lut[i].red, 16)); 403 drm_color_lut_extract(lut[i].red, 16));
403 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), 404 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c2cbd81869..593349be8b9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
469 469
470 if (i915.invert_brightness > 0 || 470 if (i915.invert_brightness > 0 ||
471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
472 return panel->backlight.max - val; 472 return panel->backlight.max - val + panel->backlight.min;
473 } 473 }
474 474
475 return val; 475 return val;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b638d192ce5e..99d39b2aefa6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,7 +5,7 @@ config DRM_MSM
5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
6 depends on OF && COMMON_CLK 6 depends on OF && COMMON_CLK
7 depends on MMU 7 depends on MMU
8 select QCOM_MDT_LOADER 8 select QCOM_MDT_LOADER if ARCH_QCOM
9 select REGULATOR 9 select REGULATOR
10 select DRM_KMS_HELPER 10 select DRM_KMS_HELPER
11 select DRM_PANEL 11 select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b4b54f1c24bc..f9eae03aa1dc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,7 @@
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/qcom_scm.h> 16#include <linux/qcom_scm.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/of_reserved_mem.h> 18#include <linux/of_address.h>
19#include <linux/soc/qcom/mdt_loader.h> 19#include <linux/soc/qcom/mdt_loader.h>
20#include "msm_gem.h" 20#include "msm_gem.h"
21#include "msm_mmu.h" 21#include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
26 26
27#define GPU_PAS_ID 13 27#define GPU_PAS_ID 13
28 28
29#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
30
31static int zap_shader_load_mdt(struct device *dev, const char *fwname) 29static int zap_shader_load_mdt(struct device *dev, const char *fwname)
32{ 30{
33 const struct firmware *fw; 31 const struct firmware *fw;
32 struct device_node *np;
33 struct resource r;
34 phys_addr_t mem_phys; 34 phys_addr_t mem_phys;
35 ssize_t mem_size; 35 ssize_t mem_size;
36 void *mem_region = NULL; 36 void *mem_region = NULL;
37 int ret; 37 int ret;
38 38
39 if (!IS_ENABLED(CONFIG_ARCH_QCOM))
40 return -EINVAL;
41
42 np = of_get_child_by_name(dev->of_node, "zap-shader");
43 if (!np)
44 return -ENODEV;
45
46 np = of_parse_phandle(np, "memory-region", 0);
47 if (!np)
48 return -EINVAL;
49
50 ret = of_address_to_resource(np, 0, &r);
51 if (ret)
52 return ret;
53
54 mem_phys = r.start;
55 mem_size = resource_size(&r);
56
39 /* Request the MDT file for the firmware */ 57 /* Request the MDT file for the firmware */
40 ret = request_firmware(&fw, fwname, dev); 58 ret = request_firmware(&fw, fwname, dev);
41 if (ret) { 59 if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
51 } 69 }
52 70
53 /* Allocate memory for the firmware image */ 71 /* Allocate memory for the firmware image */
54 mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); 72 mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
55 if (!mem_region) { 73 if (!mem_region) {
56 ret = -ENOMEM; 74 ret = -ENOMEM;
57 goto out; 75 goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
69 DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); 87 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
70 88
71out: 89out:
90 if (mem_region)
91 memunmap(mem_region);
92
72 release_firmware(fw); 93 release_firmware(fw);
73 94
74 return ret; 95 return ret;
75} 96}
76#else
77static int zap_shader_load_mdt(struct device *dev, const char *fwname)
78{
79 return -ENODEV;
80}
81#endif
82 97
83static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 98static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
84 struct msm_file_private *ctx) 99 struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
117 gpu->funcs->flush(gpu); 132 gpu->funcs->flush(gpu);
118} 133}
119 134
120struct a5xx_hwcg { 135static const struct {
121 u32 offset; 136 u32 offset;
122 u32 value; 137 u32 value;
123}; 138} a5xx_hwcg[] = {
124
125static const struct a5xx_hwcg a530_hwcg[] = {
126 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, 139 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
127 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, 140 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
128 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, 141 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
217 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} 230 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
218}; 231};
219 232
220static const struct { 233void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
221 int (*test)(struct adreno_gpu *gpu);
222 const struct a5xx_hwcg *regs;
223 unsigned int count;
224} a5xx_hwcg_regs[] = {
225 { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
226};
227
228static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
229 const struct a5xx_hwcg *regs, unsigned int count)
230{ 234{
231 unsigned int i; 235 unsigned int i;
232 236
233 for (i = 0; i < count; i++) 237 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
234 gpu_write(gpu, regs[i].offset, regs[i].value); 238 gpu_write(gpu, a5xx_hwcg[i].offset,
239 state ? a5xx_hwcg[i].value : 0);
235 240
236 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); 241 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
237 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); 242 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
238}
239
240static void a5xx_enable_hwcg(struct msm_gpu *gpu)
241{
242 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
243 unsigned int i;
244
245 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
246 if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
247 _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
248 a5xx_hwcg_regs[i].count);
249 return;
250 }
251 }
252} 243}
253 244
254static int a5xx_me_init(struct msm_gpu *gpu) 245static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
377 return ret; 368 return ret;
378} 369}
379 370
380/* Set up a child device to "own" the zap shader */
381static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
382{
383 struct device_node *node;
384 int ret;
385
386 if (dev->parent)
387 return 0;
388
389 /* Find the sub-node for the zap shader */
390 node = of_get_child_by_name(parent->of_node, "zap-shader");
391 if (!node) {
392 DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
393 return -ENODEV;
394 }
395
396 dev->parent = parent;
397 dev->of_node = node;
398 dev_set_name(dev, "adreno_zap_shader");
399
400 ret = device_register(dev);
401 if (ret) {
402 DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
403 goto out;
404 }
405
406 ret = of_reserved_mem_device_init(dev);
407 if (ret) {
408 DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
409 device_unregister(dev);
410 }
411
412out:
413 if (ret)
414 dev->parent = NULL;
415
416 return ret;
417}
418
419static int a5xx_zap_shader_init(struct msm_gpu *gpu) 371static int a5xx_zap_shader_init(struct msm_gpu *gpu)
420{ 372{
421 static bool loaded; 373 static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
444 return -ENODEV; 396 return -ENODEV;
445 } 397 }
446 398
447 ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); 399 ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
448
449 if (!ret)
450 ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
451 adreno_gpu->info->zapfw);
452 400
453 loaded = !ret; 401 loaded = !ret;
454 402
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
545 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); 493 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
546 494
547 /* Enable HWCG */ 495 /* Enable HWCG */
548 a5xx_enable_hwcg(gpu); 496 a5xx_set_hwcg(gpu, true);
549 497
550 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); 498 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
551 499
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
691 639
692 DBG("%s", gpu->name); 640 DBG("%s", gpu->name);
693 641
694 if (a5xx_gpu->zap_dev.parent)
695 device_unregister(&a5xx_gpu->zap_dev);
696
697 if (a5xx_gpu->pm4_bo) { 642 if (a5xx_gpu->pm4_bo) {
698 if (a5xx_gpu->pm4_iova) 643 if (a5xx_gpu->pm4_iova)
699 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); 644 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
920 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 865 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
921 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 866 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
922 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, 867 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
923 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, 868 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
924 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, 869 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
925 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 870 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
926 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, 871 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
927 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 872 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
928 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 873 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
929 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 874 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
930 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, 875 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
931 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, 876 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
932 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, 877 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
933 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 878 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
934 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, 879 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
935 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 880 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
936 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, 881 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
937 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 882 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
938 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, 883 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
939 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, 884 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
940 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, 885 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
941 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 886 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
942 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, 887 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
943 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, 888 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
944 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, 889 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
945 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 890 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
946 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, 891 0xB9A0, 0xB9BF, ~0
947 ~0
948}; 892};
949 893
950static void a5xx_dump(struct msm_gpu *gpu) 894static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
1020{ 964{
1021 seq_printf(m, "status: %08x\n", 965 seq_printf(m, "status: %08x\n",
1022 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 966 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
967
968 /*
969 * Temporarily disable hardware clock gating before going into
970 * adreno_show to avoid issues while reading the registers
971 */
972 a5xx_set_hwcg(gpu, false);
1023 adreno_show(gpu, m); 973 adreno_show(gpu, m);
974 a5xx_set_hwcg(gpu, true);
1024} 975}
1025#endif 976#endif
1026 977
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6638bc85645d..1137092241d5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -36,8 +36,6 @@ struct a5xx_gpu {
36 uint32_t gpmu_dwords; 36 uint32_t gpmu_dwords;
37 37
38 uint32_t lm_leakage; 38 uint32_t lm_leakage;
39
40 struct device zap_dev;
41}; 39};
42 40
43#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) 41#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
59} 57}
60 58
61bool a5xx_idle(struct msm_gpu *gpu); 59bool a5xx_idle(struct msm_gpu *gpu);
60void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
62 61
63#endif /* __A5XX_GPU_H__ */ 62#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1ab2703674a..7414c6bbd582 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
48 *value = adreno_gpu->base.fast_rate; 48 *value = adreno_gpu->base.fast_rate;
49 return 0; 49 return 0;
50 case MSM_PARAM_TIMESTAMP: 50 case MSM_PARAM_TIMESTAMP:
51 if (adreno_gpu->funcs->get_timestamp) 51 if (adreno_gpu->funcs->get_timestamp) {
52 return adreno_gpu->funcs->get_timestamp(gpu, value); 52 int ret;
53
54 pm_runtime_get_sync(&gpu->pdev->dev);
55 ret = adreno_gpu->funcs->get_timestamp(gpu, value);
56 pm_runtime_put_autosuspend(&gpu->pdev->dev);
57
58 return ret;
59 }
53 return -EINVAL; 60 return -EINVAL;
54 default: 61 default:
55 DBG("%s: invalid param: %u", gpu->name, param); 62 DBG("%s: invalid param: %u", gpu->name, param);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9e9c5696bc03..c7b612c3d771 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2137 struct msm_dsi_phy_clk_request *clk_req) 2137 struct msm_dsi_phy_clk_request *clk_req)
2138{ 2138{
2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2140 int ret;
2141
2142 ret = dsi_calc_clk_rate(msm_host);
2143 if (ret) {
2144 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2145 return;
2146 }
2140 2147
2141 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2148 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2142 clk_req->escclk_rate = msm_host->esc_clk_rate; 2149 clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2280 struct drm_display_mode *mode) 2287 struct drm_display_mode *mode)
2281{ 2288{
2282 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2289 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2283 int ret;
2284 2290
2285 if (msm_host->mode) { 2291 if (msm_host->mode) {
2286 drm_mode_destroy(msm_host->dev, msm_host->mode); 2292 drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2293 return -ENOMEM; 2299 return -ENOMEM;
2294 } 2300 }
2295 2301
2296 ret = dsi_calc_clk_rate(msm_host);
2297 if (ret) {
2298 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2299 return ret;
2300 }
2301
2302 return 0; 2302 return 0;
2303} 2303}
2304 2304
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index cb5415d6c04b..735a87a699fa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
221 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 221 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; 222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
223 unsigned long flags; 223 unsigned long flags;
224 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 224 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
225 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 225 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
226 int i, plane_cnt = 0; 226 int i, plane_cnt = 0;
227 bool bg_alpha_enabled = false; 227 bool bg_alpha_enabled = false;
228 u32 mixer_op_mode = 0; 228 u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
753 if (!handle) { 753 if (!handle) {
754 DBG("Cursor off"); 754 DBG("Cursor off");
755 cursor_enable = false; 755 cursor_enable = false;
756 mdp5_enable(mdp5_kms);
756 goto set_cursor; 757 goto set_cursor;
757 } 758 }
758 759
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
776 777
777 get_roi(crtc, &roi_w, &roi_h); 778 get_roi(crtc, &roi_w, &roi_h);
778 779
780 mdp5_enable(mdp5_kms);
781
779 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 782 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
780 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), 783 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
781 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); 784 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
804 crtc_flush(crtc, flush_mask); 807 crtc_flush(crtc, flush_mask);
805 808
806end: 809end:
810 mdp5_disable(mdp5_kms);
807 if (old_bo) { 811 if (old_bo) {
808 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); 812 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
809 /* enable vblank to complete cursor work: */ 813 /* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
836 840
837 get_roi(crtc, &roi_w, &roi_h); 841 get_roi(crtc, &roi_w, &roi_h);
838 842
843 mdp5_enable(mdp5_kms);
844
839 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 845 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
840 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), 846 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
841 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | 847 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
847 853
848 crtc_flush(crtc, flush_mask); 854 crtc_flush(crtc, flush_mask);
849 855
856 mdp5_disable(mdp5_kms);
857
850 return 0; 858 return 0;
851} 859}
852 860
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 97f3294fbfc6..70bef51245af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
299 struct mdp5_interface *intf = mdp5_encoder->intf; 299 struct mdp5_interface *intf = mdp5_encoder->intf;
300 300
301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
302 mdp5_cmd_encoder_disable(encoder); 302 mdp5_cmd_encoder_enable(encoder);
303 else 303 else
304 mdp5_vid_encoder_enable(encoder); 304 mdp5_vid_encoder_enable(encoder);
305} 305}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5d13fa5381ee..1c603aef3c59 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
502 const char *name, bool mandatory) 502 const char *name, bool mandatory)
503{ 503{
504 struct device *dev = &pdev->dev; 504 struct device *dev = &pdev->dev;
505 struct clk *clk = devm_clk_get(dev, name); 505 struct clk *clk = msm_clk_get(pdev, name);
506 if (IS_ERR(clk) && mandatory) { 506 if (IS_ERR(clk) && mandatory) {
507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
508 return PTR_ERR(clk); 508 return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
887 } 887 }
888 888
889 /* mandatory clocks: */ 889 /* mandatory clocks: */
890 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); 890 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
891 if (ret) 891 if (ret)
892 goto fail; 892 goto fail;
893 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); 893 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
894 if (ret) 894 if (ret)
895 goto fail; 895 goto fail;
896 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); 896 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
897 if (ret) 897 if (ret)
898 goto fail; 898 goto fail;
899 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); 899 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
900 if (ret) 900 if (ret)
901 goto fail; 901 goto fail;
902 902
903 /* optional clocks: */ 903 /* optional clocks: */
904 get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); 904 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
905 905
906 /* we need to set a default rate before enabling. Set a safe 906 /* we need to set a default rate before enabling. Set a safe
907 * rate first, then figure out hw revision, and then set a 907 * rate first, then figure out hw revision, and then set a
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index fe3a4de1a433..61f39c86dd09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
890 struct mdp5_hw_pipe *right_hwpipe; 890 struct mdp5_hw_pipe *right_hwpipe;
891 const struct mdp_format *format; 891 const struct mdp_format *format;
892 uint32_t nplanes, config = 0; 892 uint32_t nplanes, config = 0;
893 struct phase_step step = { 0 }; 893 struct phase_step step = { { 0 } };
894 struct pixel_ext pe = { 0 }; 894 struct pixel_ext pe = { { 0 } };
895 uint32_t hdecm = 0, vdecm = 0; 895 uint32_t hdecm = 0, vdecm = 0;
896 uint32_t pix_format; 896 uint32_t pix_format;
897 unsigned int rotation; 897 unsigned int rotation;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f35544c1ec..a0c60e738db8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
383 struct page **pages; 383 struct page **pages;
384 384
385 vma = add_vma(obj, aspace); 385 vma = add_vma(obj, aspace);
386 if (IS_ERR(vma)) 386 if (IS_ERR(vma)) {
387 return PTR_ERR(vma); 387 ret = PTR_ERR(vma);
388 goto unlock;
389 }
388 390
389 pages = get_pages(obj); 391 pages = get_pages(obj);
390 if (IS_ERR(pages)) { 392 if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
405 407
406fail: 408fail:
407 del_vma(vma); 409 del_vma(vma);
408 410unlock:
409 mutex_unlock(&msm_obj->lock); 411 mutex_unlock(&msm_obj->lock);
410 return ret; 412 return ret;
411} 413}
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
928 if (use_vram) { 930 if (use_vram) {
929 struct msm_gem_vma *vma; 931 struct msm_gem_vma *vma;
930 struct page **pages; 932 struct page **pages;
933 struct msm_gem_object *msm_obj = to_msm_bo(obj);
934
935 mutex_lock(&msm_obj->lock);
931 936
932 vma = add_vma(obj, NULL); 937 vma = add_vma(obj, NULL);
938 mutex_unlock(&msm_obj->lock);
933 if (IS_ERR(vma)) { 939 if (IS_ERR(vma)) {
934 ret = PTR_ERR(vma); 940 ret = PTR_ERR(vma);
935 goto fail; 941 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6bfca7470141..8a75c0bd8a78 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) 34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
35{ 35{
36 struct msm_gem_submit *submit; 36 struct msm_gem_submit *submit;
37 uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + 37 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
38 (nr_cmds * sizeof(submit->cmd[0])); 38 ((u64)nr_cmds * sizeof(submit->cmd[0]));
39 39
40 if (sz > SIZE_MAX) 40 if (sz > SIZE_MAX)
41 return NULL; 41 return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
451 if (ret) 451 if (ret)
452 goto out; 452 goto out;
453 453
454 if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { 454 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
455 ret = submit_fence_sync(submit); 455 ret = submit_fence_sync(submit);
456 if (ret) 456 if (ret)
457 goto out; 457 goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c36321bc8714..d34e331554f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -42,7 +42,7 @@ void
42msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
43 struct msm_gem_vma *vma, struct sg_table *sgt) 43 struct msm_gem_vma *vma, struct sg_table *sgt)
44{ 44{
45 if (!vma->iova) 45 if (!aspace || !vma->iova)
46 return; 46 return;
47 47
48 if (aspace->mmu) { 48 if (aspace->mmu) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index c7c84d34d97e..88582af8bd89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
267 /* Create output path objects for each VBIOS display path. */ 267 /* Create output path objects for each VBIOS display path. */
268 i = -1; 268 i = -1;
269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { 269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
270 if (ver < 0x40) /* No support for chipsets prior to NV50. */
271 break;
270 if (dcbE.type == DCB_OUTPUT_UNUSED) 272 if (dcbE.type == DCB_OUTPUT_UNUSED)
271 continue; 273 continue;
272 if (dcbE.type == DCB_OUTPUT_EOL) 274 if (dcbE.type == DCB_OUTPUT_EOL)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d450332c2fd..2900f1410d95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
500static int vop_enable(struct drm_crtc *crtc) 500static int vop_enable(struct drm_crtc *crtc)
501{ 501{
502 struct vop *vop = to_vop(crtc); 502 struct vop *vop = to_vop(crtc);
503 int ret; 503 int ret, i;
504 504
505 ret = pm_runtime_get_sync(vop->dev); 505 ret = pm_runtime_get_sync(vop->dev);
506 if (ret < 0) { 506 if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
533 } 533 }
534 534
535 memcpy(vop->regs, vop->regsbak, vop->len); 535 memcpy(vop->regs, vop->regsbak, vop->len);
536 /*
537 * We need to make sure that all windows are disabled before we
538 * enable the crtc. Otherwise we might try to scan from a destroyed
539 * buffer later.
540 */
541 for (i = 0; i < vop->data->win_size; i++) {
542 struct vop_win *vop_win = &vop->win[i];
543 const struct vop_win_data *win = vop_win->data;
544
545 spin_lock(&vop->reg_lock);
546 VOP_WIN_SET(vop, win, enable, 0);
547 spin_unlock(&vop->reg_lock);
548 }
549
536 vop_cfg_done(vop); 550 vop_cfg_done(vop);
537 551
538 /* 552 /*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
566static void vop_crtc_disable(struct drm_crtc *crtc) 580static void vop_crtc_disable(struct drm_crtc *crtc)
567{ 581{
568 struct vop *vop = to_vop(crtc); 582 struct vop *vop = to_vop(crtc);
569 int i;
570 583
571 WARN_ON(vop->event); 584 WARN_ON(vop->event);
572 585
573 rockchip_drm_psr_deactivate(&vop->crtc); 586 rockchip_drm_psr_deactivate(&vop->crtc);
574 587
575 /*
576 * We need to make sure that all windows are disabled before we
577 * disable that crtc. Otherwise we might try to scan from a destroyed
578 * buffer later.
579 */
580 for (i = 0; i < vop->data->win_size; i++) {
581 struct vop_win *vop_win = &vop->win[i];
582 const struct vop_win_data *win = vop_win->data;
583
584 spin_lock(&vop->reg_lock);
585 VOP_WIN_SET(vop, win, enable, 0);
586 spin_unlock(&vop->reg_lock);
587 }
588
589 vop_cfg_done(vop);
590
591 drm_crtc_vblank_off(crtc); 588 drm_crtc_vblank_off(crtc);
592 589
593 /* 590 /*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
682 * Src.x1 can be odd when do clip, but yuv plane start point 679 * Src.x1 can be odd when do clip, but yuv plane start point
683 * need align with 2 pixel. 680 * need align with 2 pixel.
684 */ 681 */
685 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) 682 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
683 DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
686 return -EINVAL; 684 return -EINVAL;
685 }
687 686
688 return 0; 687 return 0;
689} 688}
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
764 spin_lock(&vop->reg_lock); 763 spin_lock(&vop->reg_lock);
765 764
766 VOP_WIN_SET(vop, win, format, format); 765 VOP_WIN_SET(vop, win, format, format);
767 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); 766 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
768 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 767 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
769 if (is_yuv_support(fb->format->format)) { 768 if (is_yuv_support(fb->format->format)) {
770 int hsub = drm_format_horz_chroma_subsampling(fb->format->format); 769 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
778 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 777 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
779 778
780 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 779 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
781 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); 780 VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
782 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 781 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
783 } 782 }
784 783
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 9979fd0c2282..27eefbfcf3d0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
282 282
283 act_height = (src_h + vskiplines - 1) / vskiplines; 283 act_height = (src_h + vskiplines - 1) / vskiplines;
284 284
285 if (act_height == dst_h)
286 return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
287
285 return GET_SCL_FT_BILI_DN(act_height, dst_h); 288 return GET_SCL_FT_BILI_DN(act_height, dst_h);
286} 289}
287 290
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 2c4817fb0890..8fe5b184b4e8 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -7,7 +7,6 @@ config DRM_STM
7 select DRM_PANEL 7 select DRM_PANEL
8 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA 9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA
10 default y
11 10
12 help 11 help
13 Enable support for the on-chip display controller on 12 Enable support for the on-chip display controller on
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 5726107963b2..0ad87c434ae6 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -43,12 +43,13 @@ struct sync_file {
43#endif 43#endif
44 44
45 wait_queue_head_t wq; 45 wait_queue_head_t wq;
46 unsigned long flags;
46 47
47 struct dma_fence *fence; 48 struct dma_fence *fence;
48 struct dma_fence_cb cb; 49 struct dma_fence_cb cb;
49}; 50};
50 51
51#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS 52#define POLL_ENABLED 0
52 53
53struct sync_file *sync_file_create(struct dma_fence *fence); 54struct sync_file *sync_file_create(struct dma_fence *fence);
54struct dma_fence *sync_file_get_fence(int fd); 55struct dma_fence *sync_file_get_fence(int fd);
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 26c54f6d595d..ad4eb2863e70 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
171 __u32 size; /* in, cmdstream size */ 171 __u32 size; /* in, cmdstream size */
172 __u32 pad; 172 __u32 pad;
173 __u32 nr_relocs; /* in, number of submit_reloc's */ 173 __u32 nr_relocs; /* in, number of submit_reloc's */
174 __u64 __user relocs; /* in, ptr to array of submit_reloc's */ 174 __u64 relocs; /* in, ptr to array of submit_reloc's */
175}; 175};
176 176
177/* Each buffer referenced elsewhere in the cmdstream submit (ie. the 177/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
215 __u32 fence; /* out */ 215 __u32 fence; /* out */
216 __u32 nr_bos; /* in, number of submit_bo's */ 216 __u32 nr_bos; /* in, number of submit_bo's */
217 __u32 nr_cmds; /* in, number of submit_cmd's */ 217 __u32 nr_cmds; /* in, number of submit_cmd's */
218 __u64 __user bos; /* in, ptr to array of submit_bo's */ 218 __u64 bos; /* in, ptr to array of submit_bo's */
219 __u64 __user cmds; /* in, ptr to array of submit_cmd's */ 219 __u64 cmds; /* in, ptr to array of submit_cmd's */
220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ 220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
221}; 221};
222 222