aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-17 14:19:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-17 14:19:52 -0400
commit102ed8c35ab27e74b29f3050af50600d605e186d (patch)
treef65acb1f5486e9c7826dfb0c05199baecc8efb60
parentd528ae0d3dfedea553812c957a6ed1e87feeed8a (diff)
parent27b713c2e08ef27d500a79166098d42b24977500 (diff)
Merge tag 'drm-fixes-for-v4.11-rc3' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Bunch of fixes across the drivers, in a St Patrick's day pull request (please turn terminal colors to green on black or black on green for full effect). On the arm side, tilcdc, omap and malidp got fixes, while amd has some powermanagement fixes, and intel has a set of fixes across the driver. Nothing seems to bad or scary at this point" * tag 'drm-fixes-for-v4.11-rc3' of git://people.freedesktop.org/~airlied/linux: (27 commits) drm/amd/amdgpu: Fix debugfs reg read/write address width drm/amdgpu/si: add dpm quirk for Oland drm/radeon/si: add dpm quirk for Oland drm: amd: remove broken include path drm/amd/powerplay: fix copy error in smu7_clockpoweragting.c drm/tilcdc: Set framebuffer DMA address to HW only if CRTC is enabled drm/tilcdc: Fix hardcoded fail-return value in tilcdc_crtc_create() drm/i915: Fix forcewake active domain tracking drm/i915: Nuke skl_update_plane debug message from the pipe update critical section drm/i915: use correct node for handling cache domain eviction uapi: fix drm/omap_drm.h userspace compilation errors drm/omap: fix dmabuf mmap for dma_alloc'ed buffers drm/amdgpu: fix parser init error path to avoid crash in parser fini drm/amd/amdgpu: Disable GFX_PG on Carrizo until compute issues solved drm: mali-dp: Fix smart layer not going to composition drm: mali-dp: Remove mclk rate management drm/i915: Drain the freed state from the tail of the next commit drm/i915: Nuke debug messages from the pipe update critical section drm/i915: Use pagecache write to prepopulate shmemfs from pwrite-ioctl drm/i915: Store a permanent error in obj->mm.pages ...
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c18
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c97
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c57
-rw-r--r--drivers/gpu/drm/i915/intel_display.c58
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c37
-rw-r--r--include/uapi/drm/omap_drm.h38
24 files changed, 271 insertions, 123 deletions
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb57915b..8a08e81ee90d 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
3# of AMDSOC/AMDGPU drm driver. 3# of AMDSOC/AMDGPU drm driver.
4# It provides the HW control for ACP related functionalities. 4# It provides the HW control for ACP related functionalities.
5 5
6subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
7
8AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o 6AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d2d0f60ff36d..99424cb8020b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@ free_partial_kdata:
240 for (; i >= 0; i--) 240 for (; i >= 0; i--)
241 drm_free_large(p->chunks[i].kdata); 241 drm_free_large(p->chunks[i].kdata);
242 kfree(p->chunks); 242 kfree(p->chunks);
243 p->chunks = NULL;
244 p->nchunks = 0;
243put_ctx: 245put_ctx:
244 amdgpu_ctx_put(p->ctx); 246 amdgpu_ctx_put(p->ctx);
245free_chunk: 247free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4120b351a8e5..a3a105ec99e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2590 use_bank = 0; 2590 use_bank = 0;
2591 } 2591 }
2592 2592
2593 *pos &= 0x3FFFF; 2593 *pos &= (1UL << 22) - 1;
2594 2594
2595 if (use_bank) { 2595 if (use_bank) {
2596 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 2596 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2666 use_bank = 0; 2666 use_bank = 0;
2667 } 2667 }
2668 2668
2669 *pos &= 0x3FFFF; 2669 *pos &= (1UL << 22) - 1;
2670 2670
2671 if (use_bank) { 2671 if (use_bank) {
2672 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 2672 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index f55e45b52fbc..33b504bafb88 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3464 (adev->pdev->device == 0x6667)) { 3464 (adev->pdev->device == 0x6667)) {
3465 max_sclk = 75000; 3465 max_sclk = 75000;
3466 } 3466 }
3467 } else if (adev->asic_type == CHIP_OLAND) {
3468 if ((adev->pdev->device == 0x6604) &&
3469 (adev->pdev->subsystem_vendor == 0x1028) &&
3470 (adev->pdev->subsystem_device == 0x066F)) {
3471 max_sclk = 75000;
3472 }
3467 } 3473 }
3468 3474
3469 if (rps->vce_active) { 3475 if (rps->vce_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 50bdb24ef8d6..4a785d6acfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
1051 /* rev0 hardware requires workarounds to support PG */ 1051 /* rev0 hardware requires workarounds to support PG */
1052 adev->pg_flags = 0; 1052 adev->pg_flags = 0;
1053 if (adev->rev_id != 0x00) { 1053 if (adev->rev_id != 0x00) {
1054 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1054 adev->pg_flags |=
1055 AMD_PG_SUPPORT_GFX_SMG | 1055 AMD_PG_SUPPORT_GFX_SMG |
1056 AMD_PG_SUPPORT_GFX_PIPELINE | 1056 AMD_PG_SUPPORT_GFX_PIPELINE |
1057 AMD_PG_SUPPORT_CP | 1057 AMD_PG_SUPPORT_CP |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 8cf71f3c6d0e..261b828ad590 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
178 if (bgate) { 178 if (bgate) {
179 cgs_set_powergating_state(hwmgr->device, 179 cgs_set_powergating_state(hwmgr->device,
180 AMD_IP_BLOCK_TYPE_VCE, 180 AMD_IP_BLOCK_TYPE_VCE,
181 AMD_PG_STATE_UNGATE); 181 AMD_PG_STATE_GATE);
182 cgs_set_clockgating_state(hwmgr->device, 182 cgs_set_clockgating_state(hwmgr->device,
183 AMD_IP_BLOCK_TYPE_VCE, 183 AMD_IP_BLOCK_TYPE_VCE,
184 AMD_CG_STATE_GATE); 184 AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 08e6a71f5d05..294b53697334 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
63 63
64 clk_prepare_enable(hwdev->pxlclk); 64 clk_prepare_enable(hwdev->pxlclk);
65 65
66 /* mclk needs to be set to the same or higher rate than pxlclk */ 66 /* We rely on firmware to set mclk to a sensible level. */
67 clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
68 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); 67 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
69 68
70 hwdev->modeset(hwdev, &vm); 69 hwdev->modeset(hwdev, &vm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 488aedf5b58d..9f5513006eee 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, 84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, 86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
87}; 87};
88 88
89#define MALIDP_DE_DEFAULT_PREFETCH_START 5 89#define MALIDP_DE_DEFAULT_PREFETCH_START 5
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 414aada10fe5..d5aec082294c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -37,6 +37,8 @@
37#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) 37#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
38#define MALIDP_LAYER_COMP_SIZE 0x010 38#define MALIDP_LAYER_COMP_SIZE 0x010
39#define MALIDP_LAYER_OFFSET 0x014 39#define MALIDP_LAYER_OFFSET 0x014
40#define MALIDP550_LS_ENABLE 0x01c
41#define MALIDP550_LS_R1_IN_SIZE 0x020
40 42
41/* 43/*
42 * This 4-entry look-up-table is used to determine the full 8-bit alpha value 44 * This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
242 LAYER_V_VAL(plane->state->crtc_y), 244 LAYER_V_VAL(plane->state->crtc_y),
243 mp->layer->base + MALIDP_LAYER_OFFSET); 245 mp->layer->base + MALIDP_LAYER_OFFSET);
244 246
247 if (mp->layer->id == DE_SMART)
248 malidp_hw_write(mp->hwdev,
249 LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
250 mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
251
245 /* first clear the rotation bits */ 252 /* first clear the rotation bits */
246 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); 253 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
247 val &= ~LAYER_ROT_MASK; 254 val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
330 plane->hwdev = malidp->dev; 337 plane->hwdev = malidp->dev;
331 plane->layer = &map->layers[i]; 338 plane->layer = &map->layers[i];
332 339
333 /* Skip the features which the SMART layer doesn't have */ 340 if (id == DE_SMART) {
334 if (id == DE_SMART) 341 /*
342 * Enable the first rectangle in the SMART layer to be
343 * able to use it as a drm plane.
344 */
345 malidp_hw_write(malidp->dev, 1,
346 plane->layer->base + MALIDP550_LS_ENABLE);
347 /* Skip the features which the SMART layer doesn't have. */
335 continue; 348 continue;
349 }
336 350
337 drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); 351 drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
338 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, 352 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index aff6d4a84e99..b816067a65c5 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -84,6 +84,7 @@
84/* Stride register offsets relative to Lx_BASE */ 84/* Stride register offsets relative to Lx_BASE */
85#define MALIDP_DE_LG_STRIDE 0x18 85#define MALIDP_DE_LG_STRIDE 0x18
86#define MALIDP_DE_LV_STRIDE0 0x18 86#define MALIDP_DE_LV_STRIDE0 0x18
87#define MALIDP550_DE_LS_R1_STRIDE 0x28
87 88
88/* macros to set values into registers */ 89/* macros to set values into registers */
89#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) 90#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4b42d31391..7febe6eecf72 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -293,6 +293,7 @@ enum plane_id {
293 PLANE_PRIMARY, 293 PLANE_PRIMARY,
294 PLANE_SPRITE0, 294 PLANE_SPRITE0,
295 PLANE_SPRITE1, 295 PLANE_SPRITE1,
296 PLANE_SPRITE2,
296 PLANE_CURSOR, 297 PLANE_CURSOR,
297 I915_MAX_PLANES, 298 I915_MAX_PLANES,
298}; 299};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6908123162d1..10777da73039 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1434 1434
1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1436 1436
1437 ret = -ENODEV;
1438 if (obj->ops->pwrite)
1439 ret = obj->ops->pwrite(obj, args);
1440 if (ret != -ENODEV)
1441 goto err;
1442
1437 ret = i915_gem_object_wait(obj, 1443 ret = i915_gem_object_wait(obj,
1438 I915_WAIT_INTERRUPTIBLE | 1444 I915_WAIT_INTERRUPTIBLE |
1439 I915_WAIT_ALL, 1445 I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2119 */ 2125 */
2120 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2126 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2121 obj->mm.madv = __I915_MADV_PURGED; 2127 obj->mm.madv = __I915_MADV_PURGED;
2128 obj->mm.pages = ERR_PTR(-EFAULT);
2122} 2129}
2123 2130
2124/* Try to discard unwanted pages */ 2131/* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2218 2225
2219 __i915_gem_object_reset_page_iter(obj); 2226 __i915_gem_object_reset_page_iter(obj);
2220 2227
2221 obj->ops->put_pages(obj, pages); 2228 if (!IS_ERR(pages))
2229 obj->ops->put_pages(obj, pages);
2230
2222unlock: 2231unlock:
2223 mutex_unlock(&obj->mm.lock); 2232 mutex_unlock(&obj->mm.lock);
2224} 2233}
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2437 if (err) 2446 if (err)
2438 return err; 2447 return err;
2439 2448
2440 if (unlikely(!obj->mm.pages)) { 2449 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2441 err = ____i915_gem_object_get_pages(obj); 2450 err = ____i915_gem_object_get_pages(obj);
2442 if (err) 2451 if (err)
2443 goto unlock; 2452 goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2515 2524
2516 pinned = true; 2525 pinned = true;
2517 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2526 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2518 if (unlikely(!obj->mm.pages)) { 2527 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2519 ret = ____i915_gem_object_get_pages(obj); 2528 ret = ____i915_gem_object_get_pages(obj);
2520 if (ret) 2529 if (ret)
2521 goto err_unlock; 2530 goto err_unlock;
@@ -2563,6 +2572,75 @@ err_unlock:
2563 goto out_unlock; 2572 goto out_unlock;
2564} 2573}
2565 2574
2575static int
2576i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2577 const struct drm_i915_gem_pwrite *arg)
2578{
2579 struct address_space *mapping = obj->base.filp->f_mapping;
2580 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2581 u64 remain, offset;
2582 unsigned int pg;
2583
2584 /* Before we instantiate/pin the backing store for our use, we
2585 * can prepopulate the shmemfs filp efficiently using a write into
2586 * the pagecache. We avoid the penalty of instantiating all the
2587 * pages, important if the user is just writing to a few and never
2588 * uses the object on the GPU, and using a direct write into shmemfs
2589 * allows it to avoid the cost of retrieving a page (either swapin
2590 * or clearing-before-use) before it is overwritten.
2591 */
2592 if (READ_ONCE(obj->mm.pages))
2593 return -ENODEV;
2594
2595 /* Before the pages are instantiated the object is treated as being
2596 * in the CPU domain. The pages will be clflushed as required before
2597 * use, and we can freely write into the pages directly. If userspace
2598 * races pwrite with any other operation; corruption will ensue -
2599 * that is userspace's prerogative!
2600 */
2601
2602 remain = arg->size;
2603 offset = arg->offset;
2604 pg = offset_in_page(offset);
2605
2606 do {
2607 unsigned int len, unwritten;
2608 struct page *page;
2609 void *data, *vaddr;
2610 int err;
2611
2612 len = PAGE_SIZE - pg;
2613 if (len > remain)
2614 len = remain;
2615
2616 err = pagecache_write_begin(obj->base.filp, mapping,
2617 offset, len, 0,
2618 &page, &data);
2619 if (err < 0)
2620 return err;
2621
2622 vaddr = kmap(page);
2623 unwritten = copy_from_user(vaddr + pg, user_data, len);
2624 kunmap(page);
2625
2626 err = pagecache_write_end(obj->base.filp, mapping,
2627 offset, len, len - unwritten,
2628 page, data);
2629 if (err < 0)
2630 return err;
2631
2632 if (unwritten)
2633 return -EFAULT;
2634
2635 remain -= len;
2636 user_data += len;
2637 offset += len;
2638 pg = 0;
2639 } while (remain);
2640
2641 return 0;
2642}
2643
2566static bool ban_context(const struct i915_gem_context *ctx) 2644static bool ban_context(const struct i915_gem_context *ctx)
2567{ 2645{
2568 return (i915_gem_context_is_bannable(ctx) && 2646 return (i915_gem_context_is_bannable(ctx) &&
@@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3029 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3107 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3030 if (args->timeout_ns < 0) 3108 if (args->timeout_ns < 0)
3031 args->timeout_ns = 0; 3109 args->timeout_ns = 0;
3110
3111 /*
3112 * Apparently ktime isn't accurate enough and occasionally has a
3113 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3114 * things up to make the test happy. We allow up to 1 jiffy.
3115 *
3116 * This is a regression from the timespec->ktime conversion.
3117 */
3118 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3119 args->timeout_ns = 0;
3032 } 3120 }
3033 3121
3034 i915_gem_object_put(obj); 3122 i915_gem_object_put(obj);
@@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
3974static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4062static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3975 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4063 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3976 I915_GEM_OBJECT_IS_SHRINKABLE, 4064 I915_GEM_OBJECT_IS_SHRINKABLE,
4065
3977 .get_pages = i915_gem_object_get_pages_gtt, 4066 .get_pages = i915_gem_object_get_pages_gtt,
3978 .put_pages = i915_gem_object_put_pages_gtt, 4067 .put_pages = i915_gem_object_put_pages_gtt,
4068
4069 .pwrite = i915_gem_object_pwrite_gtt,
3979}; 4070};
3980 4071
3981struct drm_i915_gem_object * 4072struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c181b1bb3d2c..3be2503aa042 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
293 * those as well to make room for our guard pages. 293 * those as well to make room for our guard pages.
294 */ 294 */
295 if (check_color) { 295 if (check_color) {
296 if (vma->node.start + vma->node.size == node->start) { 296 if (node->start + node->size == target->start) {
297 if (vma->node.color == node->color) 297 if (node->color == target->color)
298 continue; 298 continue;
299 } 299 }
300 if (vma->node.start == node->start + node->size) { 300 if (node->start == target->start + target->size) {
301 if (vma->node.color == node->color) 301 if (node->color == target->color)
302 continue; 302 continue;
303 } 303 }
304 } 304 }
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index bf90b07163d1..76b80a0be797 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
56 56
57 int (*pwrite)(struct drm_i915_gem_object *,
58 const struct drm_i915_gem_pwrite *);
59
57 int (*dmabuf_export)(struct drm_i915_gem_object *); 60 int (*dmabuf_export)(struct drm_i915_gem_object *);
58 void (*release)(struct drm_i915_gem_object *); 61 void (*release)(struct drm_i915_gem_object *);
59}; 62};
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 155906e84812..df20e9bc1c0f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -512,10 +512,36 @@ err_unpin:
512 return ret; 512 return ret;
513} 513}
514 514
515static void
516i915_vma_remove(struct i915_vma *vma)
517{
518 struct drm_i915_gem_object *obj = vma->obj;
519
520 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
521 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
522
523 drm_mm_remove_node(&vma->node);
524 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
525
526 /* Since the unbound list is global, only move to that list if
527 * no more VMAs exist.
528 */
529 if (--obj->bind_count == 0)
530 list_move_tail(&obj->global_link,
531 &to_i915(obj->base.dev)->mm.unbound_list);
532
533 /* And finally now the object is completely decoupled from this vma,
534 * we can drop its hold on the backing storage and allow it to be
535 * reaped by the shrinker.
536 */
537 i915_gem_object_unpin_pages(obj);
538 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
539}
540
515int __i915_vma_do_pin(struct i915_vma *vma, 541int __i915_vma_do_pin(struct i915_vma *vma,
516 u64 size, u64 alignment, u64 flags) 542 u64 size, u64 alignment, u64 flags)
517{ 543{
518 unsigned int bound = vma->flags; 544 const unsigned int bound = vma->flags;
519 int ret; 545 int ret;
520 546
521 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 547 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
524 550
525 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 551 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
526 ret = -EBUSY; 552 ret = -EBUSY;
527 goto err; 553 goto err_unpin;
528 } 554 }
529 555
530 if ((bound & I915_VMA_BIND_MASK) == 0) { 556 if ((bound & I915_VMA_BIND_MASK) == 0) {
531 ret = i915_vma_insert(vma, size, alignment, flags); 557 ret = i915_vma_insert(vma, size, alignment, flags);
532 if (ret) 558 if (ret)
533 goto err; 559 goto err_unpin;
534 } 560 }
535 561
536 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 562 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
537 if (ret) 563 if (ret)
538 goto err; 564 goto err_remove;
539 565
540 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 566 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
541 __i915_vma_set_map_and_fenceable(vma); 567 __i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
544 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 570 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
545 return 0; 571 return 0;
546 572
547err: 573err_remove:
574 if ((bound & I915_VMA_BIND_MASK) == 0) {
575 GEM_BUG_ON(vma->pages);
576 i915_vma_remove(vma);
577 }
578err_unpin:
548 __i915_vma_unpin(vma); 579 __i915_vma_unpin(vma);
549 return ret; 580 return ret;
550} 581}
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
657 } 688 }
658 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 689 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
659 690
660 drm_mm_remove_node(&vma->node);
661 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
662
663 if (vma->pages != obj->mm.pages) { 691 if (vma->pages != obj->mm.pages) {
664 GEM_BUG_ON(!vma->pages); 692 GEM_BUG_ON(!vma->pages);
665 sg_free_table(vma->pages); 693 sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
667 } 695 }
668 vma->pages = NULL; 696 vma->pages = NULL;
669 697
670 /* Since the unbound list is global, only move to that list if 698 i915_vma_remove(vma);
671 * no more VMAs exist. */
672 if (--obj->bind_count == 0)
673 list_move_tail(&obj->global_link,
674 &to_i915(obj->base.dev)->mm.unbound_list);
675
676 /* And finally now the object is completely decoupled from this vma,
677 * we can drop its hold on the backing storage and allow it to be
678 * reaped by the shrinker.
679 */
680 i915_gem_object_unpin_pages(obj);
681 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
682 699
683destroy: 700destroy:
684 if (unlikely(i915_vma_is_closed(vma))) 701 if (unlikely(i915_vma_is_closed(vma)))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 01341670738f..3282b0f4b134 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3670 crtc->base.mode = crtc->base.state->mode; 3670 crtc->base.mode = crtc->base.state->mode;
3671 3671
3672 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3673 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3674 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3675
3676 /* 3672 /*
3677 * Update pipe size and adjust fitter if needed: the reason for this is 3673 * Update pipe size and adjust fitter if needed: the reason for this is
3678 * that in compute_mode_changes we check the native mode (not the pfit 3674 * that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
4796 struct intel_crtc_scaler_state *scaler_state = 4792 struct intel_crtc_scaler_state *scaler_state =
4797 &crtc->config->scaler_state; 4793 &crtc->config->scaler_state;
4798 4794
4799 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4800
4801 if (crtc->config->pch_pfit.enabled) { 4795 if (crtc->config->pch_pfit.enabled) {
4802 int id; 4796 int id;
4803 4797
4804 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4798 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
4805 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4806 return; 4799 return;
4807 }
4808 4800
4809 id = scaler_state->scaler_id; 4801 id = scaler_state->scaler_id;
4810 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4802 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4811 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4803 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4812 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4804 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4813 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4805 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4814
4815 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4816 } 4806 }
4817} 4807}
4818 4808
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
14379 } while (progress); 14369 } while (progress);
14380} 14370}
14381 14371
14372static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14373{
14374 struct intel_atomic_state *state, *next;
14375 struct llist_node *freed;
14376
14377 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14378 llist_for_each_entry_safe(state, next, freed, freed)
14379 drm_atomic_state_put(&state->base);
14380}
14381
14382static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14383{
14384 struct drm_i915_private *dev_priv =
14385 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14386
14387 intel_atomic_helper_free_state(dev_priv);
14388}
14389
14382static void intel_atomic_commit_tail(struct drm_atomic_state *state) 14390static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14383{ 14391{
14384 struct drm_device *dev = state->dev; 14392 struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14545 * can happen also when the device is completely off. 14553 * can happen also when the device is completely off.
14546 */ 14554 */
14547 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 14555 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
14556
14557 intel_atomic_helper_free_state(dev_priv);
14548} 14558}
14549 14559
14550static void intel_atomic_commit_work(struct work_struct *work) 14560static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14946 to_intel_atomic_state(old_crtc_state->state); 14956 to_intel_atomic_state(old_crtc_state->state);
14947 bool modeset = needs_modeset(crtc->state); 14957 bool modeset = needs_modeset(crtc->state);
14948 14958
14959 if (!modeset &&
14960 (intel_cstate->base.color_mgmt_changed ||
14961 intel_cstate->update_pipe)) {
14962 intel_color_set_csc(crtc->state);
14963 intel_color_load_luts(crtc->state);
14964 }
14965
14949 /* Perform vblank evasion around commit operation */ 14966 /* Perform vblank evasion around commit operation */
14950 intel_pipe_update_start(intel_crtc); 14967 intel_pipe_update_start(intel_crtc);
14951 14968
14952 if (modeset) 14969 if (modeset)
14953 goto out; 14970 goto out;
14954 14971
14955 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14956 intel_color_set_csc(crtc->state);
14957 intel_color_load_luts(crtc->state);
14958 }
14959
14960 if (intel_cstate->update_pipe) 14972 if (intel_cstate->update_pipe)
14961 intel_update_pipe_config(intel_crtc, old_intel_cstate); 14973 intel_update_pipe_config(intel_crtc, old_intel_cstate);
14962 else if (INTEL_GEN(dev_priv) >= 9) 14974 else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ fail:
16599 drm_modeset_acquire_fini(&ctx); 16611 drm_modeset_acquire_fini(&ctx);
16600} 16612}
16601 16613
16602static void intel_atomic_helper_free_state(struct work_struct *work)
16603{
16604 struct drm_i915_private *dev_priv =
16605 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16606 struct intel_atomic_state *state, *next;
16607 struct llist_node *freed;
16608
16609 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16610 llist_for_each_entry_safe(state, next, freed, freed)
16611 drm_atomic_state_put(&state->base);
16612}
16613
16614int intel_modeset_init(struct drm_device *dev) 16614int intel_modeset_init(struct drm_device *dev)
16615{ 16615{
16616 struct drm_i915_private *dev_priv = to_i915(dev); 16616 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
16631 dev->mode_config.funcs = &intel_mode_funcs; 16631 dev->mode_config.funcs = &intel_mode_funcs;
16632 16632
16633 INIT_WORK(&dev_priv->atomic_helper.free_work, 16633 INIT_WORK(&dev_priv->atomic_helper.free_work,
16634 intel_atomic_helper_free_state); 16634 intel_atomic_helper_free_state_worker);
16635 16635
16636 intel_init_quirks(dev); 16636 intel_init_quirks(dev);
16637 16637
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 1b8ba2e77539..2d449fb5d1d2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
357 bool *enabled, int width, int height) 357 bool *enabled, int width, int height)
358{ 358{
359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); 359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
360 unsigned long conn_configured, mask; 360 unsigned long conn_configured, conn_seq, mask;
361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); 361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
362 int i, j; 362 int i, j;
363 bool *save_enabled; 363 bool *save_enabled;
364 bool fallback = true; 364 bool fallback = true;
365 int num_connectors_enabled = 0; 365 int num_connectors_enabled = 0;
366 int num_connectors_detected = 0; 366 int num_connectors_detected = 0;
367 int pass = 0;
368 367
369 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); 368 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
370 if (!save_enabled) 369 if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
374 mask = BIT(count) - 1; 373 mask = BIT(count) - 1;
375 conn_configured = 0; 374 conn_configured = 0;
376retry: 375retry:
376 conn_seq = conn_configured;
377 for (i = 0; i < count; i++) { 377 for (i = 0; i < count; i++) {
378 struct drm_fb_helper_connector *fb_conn; 378 struct drm_fb_helper_connector *fb_conn;
379 struct drm_connector *connector; 379 struct drm_connector *connector;
@@ -387,7 +387,7 @@ retry:
387 if (conn_configured & BIT(i)) 387 if (conn_configured & BIT(i))
388 continue; 388 continue;
389 389
390 if (pass == 0 && !connector->has_tile) 390 if (conn_seq == 0 && !connector->has_tile)
391 continue; 391 continue;
392 392
393 if (connector->status == connector_status_connected) 393 if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ retry:
498 conn_configured |= BIT(i); 498 conn_configured |= BIT(i);
499 } 499 }
500 500
501 if ((conn_configured & mask) != mask) { 501 if ((conn_configured & mask) != mask && conn_configured != conn_seq)
502 pass++;
503 goto retry; 502 goto retry;
504 }
505 503
506 /* 504 /*
507 * If the BIOS didn't enable everything it could, fall back to have the 505 * If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 249623d45be0..940bab22d464 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4891 break; 4891 break;
4892 } 4892 }
4893 4893
4894 /* When byt can survive without system hang with dynamic
4895 * sw freq adjustments, this restriction can be lifted.
4896 */
4897 if (IS_VALLEYVIEW(dev_priv))
4898 goto skip_hw_write;
4899
4894 I915_WRITE(GEN6_RP_UP_EI, 4900 I915_WRITE(GEN6_RP_UP_EI,
4895 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4901 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4896 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4902 I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4911 GEN6_RP_UP_BUSY_AVG | 4917 GEN6_RP_UP_BUSY_AVG |
4912 GEN6_RP_DOWN_IDLE_AVG); 4918 GEN6_RP_DOWN_IDLE_AVG);
4913 4919
4920skip_hw_write:
4914 dev_priv->rps.power = new_power; 4921 dev_priv->rps.power = new_power;
4915 dev_priv->rps.up_threshold = threshold_up; 4922 dev_priv->rps.up_threshold = threshold_up;
4916 dev_priv->rps.down_threshold = threshold_down; 4923 dev_priv->rps.down_threshold = threshold_down;
@@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
7916 * @timeout_base_ms: timeout for polling with preemption enabled 7923 * @timeout_base_ms: timeout for polling with preemption enabled
7917 * 7924 *
7918 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 7925 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
7919 * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. 7926 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
7920 * The request is acknowledged once the PCODE reply dword equals @reply after 7927 * The request is acknowledged once the PCODE reply dword equals @reply after
7921 * applying @reply_mask. Polling is first attempted with preemption enabled 7928 * applying @reply_mask. Polling is first attempted with preemption enabled
7922 * for @timeout_base_ms and if this times out for another 10 ms with 7929 * for @timeout_base_ms and if this times out for another 50 ms with
7923 * preemption disabled. 7930 * preemption disabled.
7924 * 7931 *
7925 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 7932 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
7955 * worst case) _and_ PCODE was busy for some reason even after a 7962 * worst case) _and_ PCODE was busy for some reason even after a
7956 * (queued) request and @timeout_base_ms delay. As a workaround retry 7963 * (queued) request and @timeout_base_ms delay. As a workaround retry
7957 * the poll with preemption disabled to maximize the number of 7964 * the poll with preemption disabled to maximize the number of
7958 * requests. Increase the timeout from @timeout_base_ms to 10ms to 7965 * requests. Increase the timeout from @timeout_base_ms to 50ms to
7959 * account for interrupts that could reduce the number of these 7966 * account for interrupts that could reduce the number of these
7960 * requests. 7967 * requests, and for any quirks of the PCODE firmware that delays
7968 * the request completion.
7961 */ 7969 */
7962 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 7970 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
7963 WARN_ON_ONCE(timeout_base_ms > 3); 7971 WARN_ON_ONCE(timeout_base_ms > 3);
7964 preempt_disable(); 7972 preempt_disable();
7965 ret = wait_for_atomic(COND, 10); 7973 ret = wait_for_atomic(COND, 50);
7966 preempt_enable(); 7974 preempt_enable();
7967 7975
7968out: 7976out:
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9ef54688872a..9481ca9a3ae7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
254 int scaler_id = plane_state->scaler_id; 254 int scaler_id = plane_state->scaler_id;
255 const struct intel_scaler *scaler; 255 const struct intel_scaler *scaler;
256 256
257 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
258 plane_id, PS_PLANE_SEL(plane_id));
259
260 scaler = &crtc_state->scaler_state.scalers[scaler_id]; 257 scaler = &crtc_state->scaler_state.scalers[scaler_id];
261 258
262 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), 259 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abe08885a5ba..b7ff592b14f5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
119 119
120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 120 for_each_fw_domain_masked(d, fw_domains, dev_priv)
121 fw_domain_wait_ack(d); 121 fw_domain_wait_ack(d);
122
123 dev_priv->uncore.fw_domains_active |= fw_domains;
122} 124}
123 125
124static void 126static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
130 fw_domain_put(d); 132 fw_domain_put(d);
131 fw_domain_posting_read(d); 133 fw_domain_posting_read(d);
132 } 134 }
135
136 dev_priv->uncore.fw_domains_active &= ~fw_domains;
133} 137}
134 138
135static void 139static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
240 if (WARN_ON(domain->wake_count == 0)) 244 if (WARN_ON(domain->wake_count == 0))
241 domain->wake_count++; 245 domain->wake_count++;
242 246
243 if (--domain->wake_count == 0) { 247 if (--domain->wake_count == 0)
244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 248 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
245 dev_priv->uncore.fw_domains_active &= ~domain->mask;
246 }
247 249
248 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 250 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
249 251
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
454 fw_domains &= ~domain->mask; 456 fw_domains &= ~domain->mask;
455 } 457 }
456 458
457 if (fw_domains) { 459 if (fw_domains)
458 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 460 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
459 dev_priv->uncore.fw_domains_active |= fw_domains;
460 }
461} 461}
462 462
463/** 463/**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
968 fw_domain_arm_timer(domain); 968 fw_domain_arm_timer(domain);
969 969
970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
971 dev_priv->uncore.fw_domains_active |= fw_domains;
972} 971}
973 972
974static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 973static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c35d813..ee5883f59be5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
147 struct drm_gem_object *obj = buffer->priv; 147 struct drm_gem_object *obj = buffer->priv;
148 int ret = 0; 148 int ret = 0;
149 149
150 if (WARN_ON(!obj->filp))
151 return -EINVAL;
152
153 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 150 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
154 if (ret < 0) 151 if (ret < 0)
155 return ret; 152 return ret;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d12b8978142f..72e1588580a1 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2984 (rdev->pdev->device == 0x6667)) { 2984 (rdev->pdev->device == 0x6667)) {
2985 max_sclk = 75000; 2985 max_sclk = 75000;
2986 } 2986 }
2987 } else if (rdev->family == CHIP_OLAND) {
2988 if ((rdev->pdev->device == 0x6604) &&
2989 (rdev->pdev->subsystem_vendor == 0x1028) &&
2990 (rdev->pdev->subsystem_device == 0x066F)) {
2991 max_sclk = 75000;
2992 }
2987 } 2993 }
2988 2994
2989 if (rps->vce_active) { 2995 if (rps->vce_active) {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index f80bf9385e41..d745e8b50fb8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
464{ 464{
465 struct drm_device *dev = crtc->dev; 465 struct drm_device *dev = crtc->dev;
466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
467 unsigned long flags;
467 468
468 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 469 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
469 mutex_lock(&tilcdc_crtc->enable_lock); 470 mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
484 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 485 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
485 LCDC_PALETTE_LOAD_MODE(DATA_ONLY), 486 LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
486 LCDC_PALETTE_LOAD_MODE_MASK); 487 LCDC_PALETTE_LOAD_MODE_MASK);
488
489 /* There is no real chance for a race here as the time stamp
490 * is taken before the raster DMA is started. The spin-lock is
491 * taken to have a memory barrier after taking the time-stamp
492 * and to avoid a context switch between taking the stamp and
493 * enabling the raster.
494 */
495 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
496 tilcdc_crtc->last_vblank = ktime_get();
487 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 497 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
498 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
488 499
489 drm_crtc_vblank_on(crtc); 500 drm_crtc_vblank_on(crtc);
490 501
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
539 } 550 }
540 551
541 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 552 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
542 tilcdc_crtc->last_vblank = 0;
543 553
544 tilcdc_crtc->enabled = false; 554 tilcdc_crtc->enabled = false;
545 mutex_unlock(&tilcdc_crtc->enable_lock); 555 mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
602{ 612{
603 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 613 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
604 struct drm_device *dev = crtc->dev; 614 struct drm_device *dev = crtc->dev;
605 unsigned long flags;
606 615
607 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 616 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
608 617
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
614 drm_framebuffer_reference(fb); 623 drm_framebuffer_reference(fb);
615 624
616 crtc->primary->fb = fb; 625 crtc->primary->fb = fb;
626 tilcdc_crtc->event = event;
617 627
618 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 628 mutex_lock(&tilcdc_crtc->enable_lock);
619 629
620 if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { 630 if (tilcdc_crtc->enabled) {
631 unsigned long flags;
621 ktime_t next_vblank; 632 ktime_t next_vblank;
622 s64 tdiff; 633 s64 tdiff;
623 634
624 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, 635 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
625 1000000 / crtc->hwmode.vrefresh);
626 636
637 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
638 1000000 / crtc->hwmode.vrefresh);
627 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); 639 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
628 640
629 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) 641 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
630 tilcdc_crtc->next_fb = fb; 642 tilcdc_crtc->next_fb = fb;
631 } 643 else
632 644 set_scanout(crtc, fb);
633 if (tilcdc_crtc->next_fb != fb)
634 set_scanout(crtc, fb);
635 645
636 tilcdc_crtc->event = event; 646 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
647 }
637 648
638 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 649 mutex_unlock(&tilcdc_crtc->enable_lock);
639 650
640 return 0; 651 return 0;
641} 652}
@@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
1036 1047
1037fail: 1048fail:
1038 tilcdc_crtc_destroy(crtc); 1049 tilcdc_crtc_destroy(crtc);
1039 return -ENOMEM; 1050 return ret;
1040} 1051}
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 407cb55df6ac..7fb97863c945 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -33,8 +33,8 @@ extern "C" {
33#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ 33#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
34 34
35struct drm_omap_param { 35struct drm_omap_param {
36 uint64_t param; /* in */ 36 __u64 param; /* in */
37 uint64_t value; /* in (set_param), out (get_param) */ 37 __u64 value; /* in (set_param), out (get_param) */
38}; 38};
39 39
40#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ 40#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
@@ -53,18 +53,18 @@ struct drm_omap_param {
53#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) 53#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
54 54
55union omap_gem_size { 55union omap_gem_size {
56 uint32_t bytes; /* (for non-tiled formats) */ 56 __u32 bytes; /* (for non-tiled formats) */
57 struct { 57 struct {
58 uint16_t width; 58 __u16 width;
59 uint16_t height; 59 __u16 height;
60 } tiled; /* (for tiled formats) */ 60 } tiled; /* (for tiled formats) */
61}; 61};
62 62
63struct drm_omap_gem_new { 63struct drm_omap_gem_new {
64 union omap_gem_size size; /* in */ 64 union omap_gem_size size; /* in */
65 uint32_t flags; /* in */ 65 __u32 flags; /* in */
66 uint32_t handle; /* out */ 66 __u32 handle; /* out */
67 uint32_t __pad; 67 __u32 __pad;
68}; 68};
69 69
70/* mask of operations: */ 70/* mask of operations: */
@@ -74,33 +74,33 @@ enum omap_gem_op {
74}; 74};
75 75
76struct drm_omap_gem_cpu_prep { 76struct drm_omap_gem_cpu_prep {
77 uint32_t handle; /* buffer handle (in) */ 77 __u32 handle; /* buffer handle (in) */
78 uint32_t op; /* mask of omap_gem_op (in) */ 78 __u32 op; /* mask of omap_gem_op (in) */
79}; 79};
80 80
81struct drm_omap_gem_cpu_fini { 81struct drm_omap_gem_cpu_fini {
82 uint32_t handle; /* buffer handle (in) */ 82 __u32 handle; /* buffer handle (in) */
83 uint32_t op; /* mask of omap_gem_op (in) */ 83 __u32 op; /* mask of omap_gem_op (in) */
84 /* TODO maybe here we pass down info about what regions are touched 84 /* TODO maybe here we pass down info about what regions are touched
85 * by sw so we can be clever about cache ops? For now a placeholder, 85 * by sw so we can be clever about cache ops? For now a placeholder,
86 * set to zero and we just do full buffer flush.. 86 * set to zero and we just do full buffer flush..
87 */ 87 */
88 uint32_t nregions; 88 __u32 nregions;
89 uint32_t __pad; 89 __u32 __pad;
90}; 90};
91 91
92struct drm_omap_gem_info { 92struct drm_omap_gem_info {
93 uint32_t handle; /* buffer handle (in) */ 93 __u32 handle; /* buffer handle (in) */
94 uint32_t pad; 94 __u32 pad;
95 uint64_t offset; /* mmap offset (out) */ 95 __u64 offset; /* mmap offset (out) */
96 /* note: in case of tiled buffers, the user virtual size can be 96 /* note: in case of tiled buffers, the user virtual size can be
97 * different from the physical size (ie. how many pages are needed 97 * different from the physical size (ie. how many pages are needed
98 * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. 98 * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
99 * This size here is the one that should be used if you want to 99 * This size here is the one that should be used if you want to
100 * mmap() the buffer: 100 * mmap() the buffer:
101 */ 101 */
102 uint32_t size; /* virtual size for mmap'ing (out) */ 102 __u32 size; /* virtual size for mmap'ing (out) */
103 uint32_t __pad; 103 __u32 __pad;
104}; 104};
105 105
106#define DRM_OMAP_GET_PARAM 0x00 106#define DRM_OMAP_GET_PARAM 0x00