aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-08-09 20:17:31 -0400
committerDave Airlie <airlied@redhat.com>2017-08-09 20:17:31 -0400
commit1c52a78e71d1cb58149861f7b46827458c766be2 (patch)
tree4e2041eb7351891235277b5db7dd86f2680999e1 /drivers/gpu
parent9157822b9d7282611093886acd237168691de383 (diff)
parent1e2ba788787c86f527eca6ffd9adb97d691a810e (diff)
Merge tag 'drm-intel-fixes-2017-08-09-1' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
drm/i915 fixes for v4.13-rc5 * tag 'drm-intel-fixes-2017-08-09-1' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: fix backlight invert for non-zero minimum brightness drm/i915/shrinker: Wrap need_resched() inside preempt-disable drm/i915/perf: fix flex eu registers programming drm/i915: Fix out-of-bounds array access in bdw_load_gamma_lut drm/i915/gvt: Change the max length of mmio_reg_rw from 4 to 8 drm/i915/gvt: Initialize MMIO Block with HW state drm/i915/gvt: clean workload queue if error happened drm/i915/gvt: change resetting to resetting_eng
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
10 files changed, 82 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 700050556242..1648887d3f55 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
47 ((a)->lrca == (b)->lrca)) 47 ((a)->lrca == (b)->lrca))
48 48
49static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
50
49static int context_switch_events[] = { 51static int context_switch_events[] = {
50 [RCS] = RCS_AS_CONTEXT_SWITCH, 52 [RCS] = RCS_AS_CONTEXT_SWITCH,
51 [BCS] = BCS_AS_CONTEXT_SWITCH, 53 [BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
499static int complete_execlist_workload(struct intel_vgpu_workload *workload) 501static int complete_execlist_workload(struct intel_vgpu_workload *workload)
500{ 502{
501 struct intel_vgpu *vgpu = workload->vgpu; 503 struct intel_vgpu *vgpu = workload->vgpu;
502 struct intel_vgpu_execlist *execlist = 504 int ring_id = workload->ring_id;
503 &vgpu->execlist[workload->ring_id]; 505 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
504 struct intel_vgpu_workload *next_workload; 506 struct intel_vgpu_workload *next_workload;
505 struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; 507 struct list_head *next = workload_q_head(vgpu, ring_id)->next;
506 bool lite_restore = false; 508 bool lite_restore = false;
507 int ret; 509 int ret;
508 510
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
512 release_shadow_batch_buffer(workload); 514 release_shadow_batch_buffer(workload);
513 release_shadow_wa_ctx(&workload->wa_ctx); 515 release_shadow_wa_ctx(&workload->wa_ctx);
514 516
515 if (workload->status || vgpu->resetting) 517 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
518 /* if workload->status is not successful means HW GPU
519 * has occurred GPU hang or something wrong with i915/GVT,
520 * and GVT won't inject context switch interrupt to guest.
521 * So this error is a vGPU hang actually to the guest.
522 * According to this we should emunlate a vGPU hang. If
523 * there are pending workloads which are already submitted
524 * from guest, we should clean them up like HW GPU does.
525 *
526 * if it is in middle of engine resetting, the pending
527 * workloads won't be submitted to HW GPU and will be
528 * cleaned up during the resetting process later, so doing
529 * the workload clean up here doesn't have any impact.
530 **/
531 clean_workloads(vgpu, ENGINE_MASK(ring_id));
516 goto out; 532 goto out;
533 }
517 534
518 if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { 535 if (!list_empty(workload_q_head(vgpu, ring_id))) {
519 struct execlist_ctx_descriptor_format *this_desc, *next_desc; 536 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
520 537
521 next_workload = container_of(next, 538 next_workload = container_of(next,
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
72 struct intel_gvt_device_info *info = &gvt->device_info; 72 struct intel_gvt_device_info *info = &gvt->device_info;
73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; 73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
74 struct intel_gvt_mmio_info *e; 74 struct intel_gvt_mmio_info *e;
75 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
76 int num = gvt->mmio.num_mmio_block;
75 struct gvt_firmware_header *h; 77 struct gvt_firmware_header *h;
76 void *firmware; 78 void *firmware;
77 void *p; 79 void *p;
78 unsigned long size, crc32_start; 80 unsigned long size, crc32_start;
79 int i; 81 int i, j;
80 int ret; 82 int ret;
81 83
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size; 84 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) 107 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); 108 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
107 109
110 for (i = 0; i < num; i++, block++) {
111 for (j = 0; j < block->size; j += 4)
112 *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
113 I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
114 block->offset) + j));
115 }
116
108 memcpy(gvt->firmware.mmio, p, info->mmio_size); 117 memcpy(gvt->firmware.mmio, p, info->mmio_size);
109 118
110 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; 119 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 3a74e79eac2f..2964a4d01a66 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
149 bool active; 149 bool active;
150 bool pv_notified; 150 bool pv_notified;
151 bool failsafe; 151 bool failsafe;
152 bool resetting; 152 unsigned int resetting_eng;
153 void *sched_data; 153 void *sched_data;
154 struct vgpu_sched_ctl sched_ctl; 154 struct vgpu_sched_ctl sched_ctl;
155 155
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
195 unsigned long vgpu_allocated_fence_num; 195 unsigned long vgpu_allocated_fence_num;
196}; 196};
197 197
198/* Special MMIO blocks. */
199struct gvt_mmio_block {
200 unsigned int device;
201 i915_reg_t offset;
202 unsigned int size;
203 gvt_mmio_func read;
204 gvt_mmio_func write;
205};
206
198#define INTEL_GVT_MMIO_HASH_BITS 11 207#define INTEL_GVT_MMIO_HASH_BITS 11
199 208
200struct intel_gvt_mmio { 209struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
214/* This reg could be accessed by unaligned address */ 223/* This reg could be accessed by unaligned address */
215#define F_UNALIGN (1 << 6) 224#define F_UNALIGN (1 << 6)
216 225
226 struct gvt_mmio_block *mmio_block;
227 unsigned int num_mmio_block;
228
217 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 229 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
218 unsigned int num_tracked_mmio; 230 unsigned int num_tracked_mmio;
219}; 231};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 17febe830ff6..feed9921b3b3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2857 return 0; 2857 return 0;
2858} 2858}
2859 2859
2860/* Special MMIO blocks. */
2861static struct gvt_mmio_block {
2862 unsigned int device;
2863 i915_reg_t offset;
2864 unsigned int size;
2865 gvt_mmio_func read;
2866 gvt_mmio_func write;
2867} gvt_mmio_blocks[] = {
2868 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2869 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2870 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2871 pvinfo_mmio_read, pvinfo_mmio_write},
2872 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2873 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2874 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2875};
2876
2877static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, 2860static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2878 unsigned int offset) 2861 unsigned int offset)
2879{ 2862{
2880 unsigned long device = intel_gvt_get_device_type(gvt); 2863 unsigned long device = intel_gvt_get_device_type(gvt);
2881 struct gvt_mmio_block *block = gvt_mmio_blocks; 2864 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2865 int num = gvt->mmio.num_mmio_block;
2882 int i; 2866 int i;
2883 2867
2884 for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { 2868 for (i = 0; i < num; i++, block++) {
2885 if (!(device & block->device)) 2869 if (!(device & block->device))
2886 continue; 2870 continue;
2887 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && 2871 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2912 gvt->mmio.mmio_attribute = NULL; 2896 gvt->mmio.mmio_attribute = NULL;
2913} 2897}
2914 2898
2899/* Special MMIO blocks. */
2900static struct gvt_mmio_block mmio_blocks[] = {
2901 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2902 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2903 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2904 pvinfo_mmio_read, pvinfo_mmio_write},
2905 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2906 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2907 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2908};
2909
2915/** 2910/**
2916 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device 2911 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2917 * @gvt: GVT device 2912 * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2951 goto err; 2946 goto err;
2952 } 2947 }
2953 2948
2949 gvt->mmio.mmio_block = mmio_blocks;
2950 gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
2951
2954 gvt_dbg_mmio("traced %u virtual mmio registers\n", 2952 gvt_dbg_mmio("traced %u virtual mmio registers\n",
2955 gvt->mmio.num_tracked_mmio); 2953 gvt->mmio.num_tracked_mmio);
2956 return 0; 2954 return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3030 gvt_mmio_func func; 3028 gvt_mmio_func func;
3031 int ret; 3029 int ret;
3032 3030
3033 if (WARN_ON(bytes > 4)) 3031 if (WARN_ON(bytes > 8))
3034 return -EINVAL; 3032 return -EINVAL;
3035 3033
3036 /* 3034 /*
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4f7057d62d88..22e08eb2d0b7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
432 432
433 i915_gem_request_put(fetch_and_zero(&workload->req)); 433 i915_gem_request_put(fetch_and_zero(&workload->req));
434 434
435 if (!workload->status && !vgpu->resetting) { 435 if (!workload->status && !(vgpu->resetting_eng &
436 ENGINE_MASK(ring_id))) {
436 update_guest_context(workload); 437 update_guest_context(workload);
437 438
438 for_each_set_bit(event, workload->pending_events, 439 for_each_set_bit(event, workload->pending_events,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 90c14e6e3ea0..3deadcbd5a24 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
480{ 480{
481 struct intel_gvt *gvt = vgpu->gvt; 481 struct intel_gvt *gvt = vgpu->gvt;
482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
483 unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
483 484
484 gvt_dbg_core("------------------------------------------\n"); 485 gvt_dbg_core("------------------------------------------\n");
485 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 486 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
486 vgpu->id, dmlr, engine_mask); 487 vgpu->id, dmlr, engine_mask);
487 vgpu->resetting = true; 488
489 vgpu->resetting_eng = resetting_eng;
488 490
489 intel_vgpu_stop_schedule(vgpu); 491 intel_vgpu_stop_schedule(vgpu);
490 /* 492 /*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
497 mutex_lock(&gvt->lock); 499 mutex_lock(&gvt->lock);
498 } 500 }
499 501
500 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 502 intel_vgpu_reset_execlist(vgpu, resetting_eng);
501 503
502 /* full GPU reset or device model level reset */ 504 /* full GPU reset or device model level reset */
503 if (engine_mask == ALL_ENGINES || dmlr) { 505 if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
520 } 522 }
521 } 523 }
522 524
523 vgpu->resetting = false; 525 vgpu->resetting_eng = 0;
524 gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 526 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
525 gvt_dbg_core("------------------------------------------\n"); 527 gvt_dbg_core("------------------------------------------\n");
526} 528}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1032f98add11..77fb39808131 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
43 return true; 43 return true;
44 44
45 case MUTEX_TRYLOCK_FAILED: 45 case MUTEX_TRYLOCK_FAILED:
46 *unlock = false;
47 preempt_disable();
46 do { 48 do {
47 cpu_relax(); 49 cpu_relax();
48 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
49 case MUTEX_TRYLOCK_SUCCESS:
50 *unlock = true; 51 *unlock = true;
51 return true; 52 break;
52 } 53 }
53 } while (!need_resched()); 54 } while (!need_resched());
55 preempt_enable();
56 return *unlock;
54 57
55 return false; 58 case MUTEX_TRYLOCK_SUCCESS:
59 *unlock = true;
60 return true;
56 } 61 }
57 62
58 BUG(); 63 BUG();
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9cd22f83b0cf..f33d90226704 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
1601 u32 *cs; 1601 u32 *cs;
1602 int i; 1602 int i;
1603 1603
1604 cs = intel_ring_begin(req, n_flex_regs * 2 + 4); 1604 cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
1605 if (IS_ERR(cs)) 1605 if (IS_ERR(cs))
1606 return PTR_ERR(cs); 1606 return PTR_ERR(cs);
1607 1607
1608 *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); 1608 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1609 1609
1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..17c4ae7e4e7c 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
398 } 398 }
399 399
400 /* Program the max register to clamp values > 1.0. */ 400 /* Program the max register to clamp values > 1.0. */
401 i = lut_size - 1;
401 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), 402 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
402 drm_color_lut_extract(lut[i].red, 16)); 403 drm_color_lut_extract(lut[i].red, 16));
403 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), 404 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c2cbd81869..593349be8b9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
469 469
470 if (i915.invert_brightness > 0 || 470 if (i915.invert_brightness > 0 ||
471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
472 return panel->backlight.max - val; 472 return panel->backlight.max - val + panel->backlight.min;
473 } 473 }
474 474
475 return val; 475 return val;