diff options
| author | Dave Airlie <airlied@redhat.com> | 2017-04-28 15:50:27 -0400 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2017-04-28 15:50:27 -0400 |
| commit | 73ba2d5c2bd4ecfec8fe37f20e962889b8a4c972 (patch) | |
| tree | 19b1974a07f2d5ad811b503306f572500e36be80 | |
| parent | 53cecf1b0e301a881d0096568b58982d7474a8ae (diff) | |
| parent | 88326ef05b262f681d837ecf65db10a7edb609f1 (diff) | |
Merge tag 'drm-intel-next-fixes-2017-04-27' of git://anongit.freedesktop.org/git/drm-intel into drm-next
drm/i915 and gvt fixes for drm-next/v4.12
* tag 'drm-intel-next-fixes-2017-04-27' of git://anongit.freedesktop.org/git/drm-intel:
drm/i915: Confirm the request is still active before adding it to the await
drm/i915: Avoid busy-spinning on VLV_GLTC_PW_STATUS mmio
drm/i915/selftests: Allocate inode/file dynamically
drm/i915: Fix system hang with EI UP masked on Haswell
drm/i915: checking for NULL instead of IS_ERR() in mock selftests
drm/i915: Perform link quality check unconditionally during long pulse
drm/i915: Fix use after free in lpe_audio_platdev_destroy()
drm/i915: Use the right mapping_gfp_mask for final shmem allocation
drm/i915: Make legacy cursor updates more unsynced
drm/i915: Apply a cond_resched() to the saturated signaler
drm/i915: Park the signaler before sleeping
drm/i915/gvt: fix a bounds check in ring_id_to_context_switch_event()
drm/i915/gvt: Fix PTE write flush for taking runtime pm properly
drm/i915/gvt: remove some debug messages in scheduler timer handler
drm/i915/gvt: add mmio init for virtual display
drm/i915/gvt: use directly assignment for structure copying
drm/i915/gvt: remove redundant ring id check which cause significant CPU misprediction
drm/i915/gvt: remove redundant platform check for mocs load/restore
drm/i915/gvt: Align render mmio list to cacheline
drm/i915/gvt: cleanup some too chatty scheduler message
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/display.c | 29 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/execlist.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/render.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 17 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 46 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem_request.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_breadcrumbs.c | 21 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 31 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 15 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_lpe_audio.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_drm.c | 45 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_request.c | 2 |
17 files changed, 163 insertions, 97 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 94f2e701e4d4..41b2c3aaa04a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
| @@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id) | |||
| 616 | { | 616 | { |
| 617 | struct decode_info *d_info; | 617 | struct decode_info *d_info; |
| 618 | 618 | ||
| 619 | if (ring_id >= I915_NUM_ENGINES) | ||
| 620 | return INVALID_OP; | ||
| 621 | |||
| 622 | d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; | 619 | d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; |
| 623 | if (d_info == NULL) | 620 | if (d_info == NULL) |
| 624 | return INVALID_OP; | 621 | return INVALID_OP; |
| @@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id) | |||
| 661 | struct decode_info *d_info; | 658 | struct decode_info *d_info; |
| 662 | int i; | 659 | int i; |
| 663 | 660 | ||
| 664 | if (ring_id >= I915_NUM_ENGINES) | ||
| 665 | return; | ||
| 666 | |||
| 667 | d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; | 661 | d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; |
| 668 | if (d_info == NULL) | 662 | if (d_info == NULL) |
| 669 | return; | 663 | return; |
| @@ -2483,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) | |||
| 2483 | 2477 | ||
| 2484 | t1 = get_cycles(); | 2478 | t1 = get_cycles(); |
| 2485 | 2479 | ||
| 2486 | memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state)); | 2480 | s_before_advance_custom = *s; |
| 2487 | 2481 | ||
| 2488 | if (info->handler) { | 2482 | if (info->handler) { |
| 2489 | ret = info->handler(s); | 2483 | ret = info->handler(s); |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 4cf2b29fbaa1..e0261fcc5b50 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
| @@ -189,17 +189,44 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | |||
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { | 191 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { |
| 192 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; | ||
| 193 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; | 192 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; |
| 193 | vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= | ||
| 194 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | | ||
| 195 | TRANS_DDI_PORT_MASK); | ||
| 196 | vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= | ||
| 197 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | | ||
| 198 | (PORT_B << TRANS_DDI_PORT_SHIFT) | | ||
| 199 | TRANS_DDI_FUNC_ENABLE); | ||
| 200 | vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; | ||
| 201 | vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; | ||
| 202 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; | ||
| 194 | } | 203 | } |
| 195 | 204 | ||
| 196 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { | 205 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { |
| 197 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; | 206 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; |
| 207 | vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= | ||
| 208 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | | ||
| 209 | TRANS_DDI_PORT_MASK); | ||
| 210 | vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= | ||
| 211 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | | ||
| 212 | (PORT_C << TRANS_DDI_PORT_SHIFT) | | ||
| 213 | TRANS_DDI_FUNC_ENABLE); | ||
| 214 | vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; | ||
| 215 | vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; | ||
| 198 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; | 216 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; |
| 199 | } | 217 | } |
| 200 | 218 | ||
| 201 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { | 219 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { |
| 202 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; | 220 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; |
| 221 | vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= | ||
| 222 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | | ||
| 223 | TRANS_DDI_PORT_MASK); | ||
| 224 | vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= | ||
| 225 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | | ||
| 226 | (PORT_D << TRANS_DDI_PORT_SHIFT) | | ||
| 227 | TRANS_DDI_FUNC_ENABLE); | ||
| 228 | vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; | ||
| 229 | vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; | ||
| 203 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; | 230 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; |
| 204 | } | 231 | } |
| 205 | 232 | ||
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 536bde8638c8..dca989eb2d42 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
| @@ -56,8 +56,8 @@ static int context_switch_events[] = { | |||
| 56 | 56 | ||
| 57 | static int ring_id_to_context_switch_event(int ring_id) | 57 | static int ring_id_to_context_switch_event(int ring_id) |
| 58 | { | 58 | { |
| 59 | if (WARN_ON(ring_id < RCS && ring_id > | 59 | if (WARN_ON(ring_id < RCS || |
| 60 | ARRAY_SIZE(context_switch_events))) | 60 | ring_id >= ARRAY_SIZE(context_switch_events))) |
| 61 | return -EINVAL; | 61 | return -EINVAL; |
| 62 | 62 | ||
| 63 | return context_switch_events[ring_id]; | 63 | return context_switch_events[ring_id]; |
| @@ -687,9 +687,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, | |||
| 687 | } | 687 | } |
| 688 | 688 | ||
| 689 | if (emulate_schedule_in) | 689 | if (emulate_schedule_in) |
| 690 | memcpy(&workload->elsp_dwords, | 690 | workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords; |
| 691 | &vgpu->execlist[ring_id].elsp_dwords, | ||
| 692 | sizeof(workload->elsp_dwords)); | ||
| 693 | 691 | ||
| 694 | gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", | 692 | gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", |
| 695 | workload, ring_id, head, tail, start, ctl); | 693 | workload, ring_id, head, tail, start, ctl); |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6da4e444e572..c6f0077f590d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
| @@ -2294,12 +2294,15 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) | |||
| 2294 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) | 2294 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) |
| 2295 | { | 2295 | { |
| 2296 | struct intel_gvt *gvt = vgpu->gvt; | 2296 | struct intel_gvt *gvt = vgpu->gvt; |
| 2297 | struct drm_i915_private *dev_priv = gvt->dev_priv; | ||
| 2297 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 2298 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
| 2298 | u32 index; | 2299 | u32 index; |
| 2299 | u32 offset; | 2300 | u32 offset; |
| 2300 | u32 num_entries; | 2301 | u32 num_entries; |
| 2301 | struct intel_gvt_gtt_entry e; | 2302 | struct intel_gvt_gtt_entry e; |
| 2302 | 2303 | ||
| 2304 | intel_runtime_pm_get(dev_priv); | ||
| 2305 | |||
| 2303 | memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); | 2306 | memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); |
| 2304 | e.type = GTT_TYPE_GGTT_PTE; | 2307 | e.type = GTT_TYPE_GGTT_PTE; |
| 2305 | ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); | 2308 | ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); |
| @@ -2314,6 +2317,8 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) | |||
| 2314 | num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; | 2317 | num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; |
| 2315 | for (offset = 0; offset < num_entries; offset++) | 2318 | for (offset = 0; offset < num_entries; offset++) |
| 2316 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); | 2319 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); |
| 2320 | |||
| 2321 | intel_runtime_pm_put(dev_priv); | ||
| 2317 | } | 2322 | } |
| 2318 | 2323 | ||
| 2319 | /** | 2324 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index a7b665e9bbad..c6e7972ac21d 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c | |||
| @@ -44,7 +44,7 @@ struct render_mmio { | |||
| 44 | u32 value; | 44 | u32 value; |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | static struct render_mmio gen8_render_mmio_list[] = { | 47 | static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = { |
| 48 | {RCS, _MMIO(0x229c), 0xffff, false}, | 48 | {RCS, _MMIO(0x229c), 0xffff, false}, |
| 49 | {RCS, _MMIO(0x2248), 0x0, false}, | 49 | {RCS, _MMIO(0x2248), 0x0, false}, |
| 50 | {RCS, _MMIO(0x2098), 0x0, false}, | 50 | {RCS, _MMIO(0x2098), 0x0, false}, |
| @@ -75,7 +75,7 @@ static struct render_mmio gen8_render_mmio_list[] = { | |||
| 75 | {BCS, _MMIO(0x22028), 0x0, false}, | 75 | {BCS, _MMIO(0x22028), 0x0, false}, |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | static struct render_mmio gen9_render_mmio_list[] = { | 78 | static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = { |
| 79 | {RCS, _MMIO(0x229c), 0xffff, false}, | 79 | {RCS, _MMIO(0x229c), 0xffff, false}, |
| 80 | {RCS, _MMIO(0x2248), 0x0, false}, | 80 | {RCS, _MMIO(0x2248), 0x0, false}, |
| 81 | {RCS, _MMIO(0x2098), 0x0, false}, | 81 | {RCS, _MMIO(0x2098), 0x0, false}, |
| @@ -204,9 +204,6 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) | |||
| 204 | if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) | 204 | if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) |
| 205 | return; | 205 | return; |
| 206 | 206 | ||
| 207 | if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) | ||
| 208 | return; | ||
| 209 | |||
| 210 | offset.reg = regs[ring_id]; | 207 | offset.reg = regs[ring_id]; |
| 211 | for (i = 0; i < 64; i++) { | 208 | for (i = 0; i < 64; i++) { |
| 212 | gen9_render_mocs[ring_id][i] = I915_READ(offset); | 209 | gen9_render_mocs[ring_id][i] = I915_READ(offset); |
| @@ -242,9 +239,6 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) | |||
| 242 | if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) | 239 | if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) |
| 243 | return; | 240 | return; |
| 244 | 241 | ||
| 245 | if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) | ||
| 246 | return; | ||
| 247 | |||
| 248 | offset.reg = regs[ring_id]; | 242 | offset.reg = regs[ring_id]; |
| 249 | for (i = 0; i < 64; i++) { | 243 | for (i = 0; i < 64; i++) { |
| 250 | vgpu_vreg(vgpu, offset) = I915_READ(offset); | 244 | vgpu_vreg(vgpu, offset) = I915_READ(offset); |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index f84959170674..79ba4b3440aa 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
| @@ -133,9 +133,6 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) | |||
| 133 | if (!scheduler->next_vgpu) | 133 | if (!scheduler->next_vgpu) |
| 134 | return; | 134 | return; |
| 135 | 135 | ||
| 136 | gvt_dbg_sched("try to schedule next vgpu %d\n", | ||
| 137 | scheduler->next_vgpu->id); | ||
| 138 | |||
| 139 | /* | 136 | /* |
| 140 | * after the flag is set, workload dispatch thread will | 137 | * after the flag is set, workload dispatch thread will |
| 141 | * stop dispatching workload for current vgpu | 138 | * stop dispatching workload for current vgpu |
| @@ -144,15 +141,10 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) | |||
| 144 | 141 | ||
| 145 | /* still have uncompleted workload? */ | 142 | /* still have uncompleted workload? */ |
| 146 | for_each_engine(engine, gvt->dev_priv, i) { | 143 | for_each_engine(engine, gvt->dev_priv, i) { |
| 147 | if (scheduler->current_workload[i]) { | 144 | if (scheduler->current_workload[i]) |
| 148 | gvt_dbg_sched("still have running workload\n"); | ||
| 149 | return; | 145 | return; |
| 150 | } | ||
| 151 | } | 146 | } |
| 152 | 147 | ||
| 153 | gvt_dbg_sched("switch to next vgpu %d\n", | ||
| 154 | scheduler->next_vgpu->id); | ||
| 155 | |||
| 156 | cur_time = ktime_get(); | 148 | cur_time = ktime_get(); |
| 157 | if (scheduler->current_vgpu) { | 149 | if (scheduler->current_vgpu) { |
| 158 | vgpu_data = scheduler->current_vgpu->sched_data; | 150 | vgpu_data = scheduler->current_vgpu->sched_data; |
| @@ -224,17 +216,12 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) | |||
| 224 | list_del_init(&vgpu_data->lru_list); | 216 | list_del_init(&vgpu_data->lru_list); |
| 225 | list_add_tail(&vgpu_data->lru_list, | 217 | list_add_tail(&vgpu_data->lru_list, |
| 226 | &sched_data->lru_runq_head); | 218 | &sched_data->lru_runq_head); |
| 227 | |||
| 228 | gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); | ||
| 229 | } else { | 219 | } else { |
| 230 | scheduler->next_vgpu = gvt->idle_vgpu; | 220 | scheduler->next_vgpu = gvt->idle_vgpu; |
| 231 | } | 221 | } |
| 232 | out: | 222 | out: |
| 233 | if (scheduler->next_vgpu) { | 223 | if (scheduler->next_vgpu) |
| 234 | gvt_dbg_sched("try to schedule next vgpu %d\n", | ||
| 235 | scheduler->next_vgpu->id); | ||
| 236 | try_to_schedule_next_vgpu(gvt); | 224 | try_to_schedule_next_vgpu(gvt); |
| 237 | } | ||
| 238 | } | 225 | } |
| 239 | 226 | ||
| 240 | void intel_gvt_schedule(struct intel_gvt *gvt) | 227 | void intel_gvt_schedule(struct intel_gvt *gvt) |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index a77db2332e68..bada32b33237 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -279,11 +279,8 @@ static struct intel_vgpu_workload *pick_next_workload( | |||
| 279 | goto out; | 279 | goto out; |
| 280 | } | 280 | } |
| 281 | 281 | ||
| 282 | if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) { | 282 | if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) |
| 283 | gvt_dbg_sched("ring id %d stop - no available workload\n", | ||
| 284 | ring_id); | ||
| 285 | goto out; | 283 | goto out; |
| 286 | } | ||
| 287 | 284 | ||
| 288 | /* | 285 | /* |
| 289 | * still have current workload, maybe the workload disptacher | 286 | * still have current workload, maybe the workload disptacher |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7b4fa84cbc3c..3036d4835b0f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -2175,6 +2175,20 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |||
| 2175 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); | 2175 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
| 2176 | } | 2176 | } |
| 2177 | 2177 | ||
| 2178 | static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, | ||
| 2179 | u32 mask, u32 val) | ||
| 2180 | { | ||
| 2181 | /* The HW does not like us polling for PW_STATUS frequently, so | ||
| 2182 | * use the sleeping loop rather than risk the busy spin within | ||
| 2183 | * intel_wait_for_register(). | ||
| 2184 | * | ||
| 2185 | * Transitioning between RC6 states should be at most 2ms (see | ||
| 2186 | * valleyview_enable_rps) so use a 3ms timeout. | ||
| 2187 | */ | ||
| 2188 | return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, | ||
| 2189 | 3); | ||
| 2190 | } | ||
| 2191 | |||
| 2178 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) | 2192 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
| 2179 | { | 2193 | { |
| 2180 | u32 val; | 2194 | u32 val; |
| @@ -2203,8 +2217,9 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) | |||
| 2203 | 2217 | ||
| 2204 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) | 2218 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
| 2205 | { | 2219 | { |
| 2220 | u32 mask; | ||
| 2206 | u32 val; | 2221 | u32 val; |
| 2207 | int err = 0; | 2222 | int err; |
| 2208 | 2223 | ||
| 2209 | val = I915_READ(VLV_GTLC_WAKE_CTRL); | 2224 | val = I915_READ(VLV_GTLC_WAKE_CTRL); |
| 2210 | val &= ~VLV_GTLC_ALLOWWAKEREQ; | 2225 | val &= ~VLV_GTLC_ALLOWWAKEREQ; |
| @@ -2213,45 +2228,32 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) | |||
| 2213 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); | 2228 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
| 2214 | POSTING_READ(VLV_GTLC_WAKE_CTRL); | 2229 | POSTING_READ(VLV_GTLC_WAKE_CTRL); |
| 2215 | 2230 | ||
| 2216 | err = intel_wait_for_register(dev_priv, | 2231 | mask = VLV_GTLC_ALLOWWAKEACK; |
| 2217 | VLV_GTLC_PW_STATUS, | 2232 | val = allow ? mask : 0; |
| 2218 | VLV_GTLC_ALLOWWAKEACK, | 2233 | |
| 2219 | allow, | 2234 | err = vlv_wait_for_pw_status(dev_priv, mask, val); |
| 2220 | 1); | ||
| 2221 | if (err) | 2235 | if (err) |
| 2222 | DRM_ERROR("timeout disabling GT waking\n"); | 2236 | DRM_ERROR("timeout disabling GT waking\n"); |
| 2223 | 2237 | ||
| 2224 | return err; | 2238 | return err; |
| 2225 | } | 2239 | } |
| 2226 | 2240 | ||
| 2227 | static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, | 2241 | static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
| 2228 | bool wait_for_on) | 2242 | bool wait_for_on) |
| 2229 | { | 2243 | { |
| 2230 | u32 mask; | 2244 | u32 mask; |
| 2231 | u32 val; | 2245 | u32 val; |
| 2232 | int err; | ||
| 2233 | 2246 | ||
| 2234 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; | 2247 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; |
| 2235 | val = wait_for_on ? mask : 0; | 2248 | val = wait_for_on ? mask : 0; |
| 2236 | if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) | ||
| 2237 | return 0; | ||
| 2238 | |||
| 2239 | DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", | ||
| 2240 | onoff(wait_for_on), | ||
| 2241 | I915_READ(VLV_GTLC_PW_STATUS)); | ||
| 2242 | 2249 | ||
| 2243 | /* | 2250 | /* |
| 2244 | * RC6 transitioning can be delayed up to 2 msec (see | 2251 | * RC6 transitioning can be delayed up to 2 msec (see |
| 2245 | * valleyview_enable_rps), use 3 msec for safety. | 2252 | * valleyview_enable_rps), use 3 msec for safety. |
| 2246 | */ | 2253 | */ |
| 2247 | err = intel_wait_for_register(dev_priv, | 2254 | if (vlv_wait_for_pw_status(dev_priv, mask, val)) |
| 2248 | VLV_GTLC_PW_STATUS, mask, val, | ||
| 2249 | 3); | ||
| 2250 | if (err) | ||
| 2251 | DRM_ERROR("timeout waiting for GT wells to go %s\n", | 2255 | DRM_ERROR("timeout waiting for GT wells to go %s\n", |
| 2252 | onoff(wait_for_on)); | 2256 | onoff(wait_for_on)); |
| 2253 | |||
| 2254 | return err; | ||
| 2255 | } | 2257 | } |
| 2256 | 2258 | ||
| 2257 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) | 2259 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) |
| @@ -2272,7 +2274,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) | |||
| 2272 | * Bspec defines the following GT well on flags as debug only, so | 2274 | * Bspec defines the following GT well on flags as debug only, so |
| 2273 | * don't treat them as hard failures. | 2275 | * don't treat them as hard failures. |
| 2274 | */ | 2276 | */ |
| 2275 | (void)vlv_wait_for_gt_wells(dev_priv, false); | 2277 | vlv_wait_for_gt_wells(dev_priv, false); |
| 2276 | 2278 | ||
| 2277 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; | 2279 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; |
| 2278 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); | 2280 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 92343343044f..532a577ff7a1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2340,7 +2340,7 @@ rebuild_st: | |||
| 2340 | * defer the oom here by reporting the ENOMEM back | 2340 | * defer the oom here by reporting the ENOMEM back |
| 2341 | * to userspace. | 2341 | * to userspace. |
| 2342 | */ | 2342 | */ |
| 2343 | reclaim = mapping_gfp_constraint(mapping, 0); | 2343 | reclaim = mapping_gfp_mask(mapping); |
| 2344 | reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ | 2344 | reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ |
| 2345 | 2345 | ||
| 2346 | page = shmem_read_mapping_page_gfp(mapping, i, reclaim); | 2346 | page = shmem_read_mapping_page_gfp(mapping, i, reclaim); |
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 6348353b91ec..5ddbc9499775 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
| @@ -652,6 +652,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to, | |||
| 652 | 652 | ||
| 653 | GEM_BUG_ON(to == from); | 653 | GEM_BUG_ON(to == from); |
| 654 | 654 | ||
| 655 | if (i915_gem_request_completed(from)) | ||
| 656 | return 0; | ||
| 657 | |||
| 655 | if (to->engine->schedule) { | 658 | if (to->engine->schedule) { |
| 656 | ret = i915_priotree_add_dependency(to->i915, | 659 | ret = i915_priotree_add_dependency(to->i915, |
| 657 | &to->priotree, | 660 | &to->priotree, |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d9d196977f4a..fd97fe00cd0d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -4252,12 +4252,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
| 4252 | dev_priv->rps.pm_intrmsk_mbz = 0; | 4252 | dev_priv->rps.pm_intrmsk_mbz = 0; |
| 4253 | 4253 | ||
| 4254 | /* | 4254 | /* |
| 4255 | * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer | 4255 | * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer |
| 4256 | * if GEN6_PM_UP_EI_EXPIRED is masked. | 4256 | * if GEN6_PM_UP_EI_EXPIRED is masked. |
| 4257 | * | 4257 | * |
| 4258 | * TODO: verify if this can be reproduced on VLV,CHV. | 4258 | * TODO: verify if this can be reproduced on VLV,CHV. |
| 4259 | */ | 4259 | */ |
| 4260 | if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) | 4260 | if (INTEL_INFO(dev_priv)->gen <= 7) |
| 4261 | dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; | 4261 | dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; |
| 4262 | 4262 | ||
| 4263 | if (INTEL_INFO(dev_priv)->gen >= 8) | 4263 | if (INTEL_INFO(dev_priv)->gen >= 8) |
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index b6ea192ad550..9ccbf26124c6 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
| @@ -580,6 +580,8 @@ static int intel_breadcrumbs_signaler(void *arg) | |||
| 580 | signaler_set_rtpriority(); | 580 | signaler_set_rtpriority(); |
| 581 | 581 | ||
| 582 | do { | 582 | do { |
| 583 | bool do_schedule = true; | ||
| 584 | |||
| 583 | set_current_state(TASK_INTERRUPTIBLE); | 585 | set_current_state(TASK_INTERRUPTIBLE); |
| 584 | 586 | ||
| 585 | /* We are either woken up by the interrupt bottom-half, | 587 | /* We are either woken up by the interrupt bottom-half, |
| @@ -626,9 +628,23 @@ static int intel_breadcrumbs_signaler(void *arg) | |||
| 626 | spin_unlock_irq(&b->rb_lock); | 628 | spin_unlock_irq(&b->rb_lock); |
| 627 | 629 | ||
| 628 | i915_gem_request_put(request); | 630 | i915_gem_request_put(request); |
| 629 | } else { | 631 | |
| 632 | /* If the engine is saturated we may be continually | ||
| 633 | * processing completed requests. This angers the | ||
| 634 | * NMI watchdog if we never let anything else | ||
| 635 | * have access to the CPU. Let's pretend to be nice | ||
| 636 | * and relinquish the CPU if we burn through the | ||
| 637 | * entire RT timeslice! | ||
| 638 | */ | ||
| 639 | do_schedule = need_resched(); | ||
| 640 | } | ||
| 641 | |||
| 642 | if (unlikely(do_schedule)) { | ||
| 630 | DEFINE_WAIT(exec); | 643 | DEFINE_WAIT(exec); |
| 631 | 644 | ||
| 645 | if (kthread_should_park()) | ||
| 646 | kthread_parkme(); | ||
| 647 | |||
| 632 | if (kthread_should_stop()) { | 648 | if (kthread_should_stop()) { |
| 633 | GEM_BUG_ON(request); | 649 | GEM_BUG_ON(request); |
| 634 | break; | 650 | break; |
| @@ -641,9 +657,6 @@ static int intel_breadcrumbs_signaler(void *arg) | |||
| 641 | 657 | ||
| 642 | if (request) | 658 | if (request) |
| 643 | remove_wait_queue(&request->execute, &exec); | 659 | remove_wait_queue(&request->execute, &exec); |
| 644 | |||
| 645 | if (kthread_should_park()) | ||
| 646 | kthread_parkme(); | ||
| 647 | } | 660 | } |
| 648 | i915_gem_request_put(request); | 661 | i915_gem_request_put(request); |
| 649 | } while (1); | 662 | } while (1); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 881dec88df6e..3617927af269 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -13007,17 +13007,6 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13007 | struct drm_i915_private *dev_priv = to_i915(dev); | 13007 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 13008 | int ret = 0; | 13008 | int ret = 0; |
| 13009 | 13009 | ||
| 13010 | /* | ||
| 13011 | * The intel_legacy_cursor_update() fast path takes care | ||
| 13012 | * of avoiding the vblank waits for simple cursor | ||
| 13013 | * movement and flips. For cursor on/off and size changes, | ||
| 13014 | * we want to perform the vblank waits so that watermark | ||
| 13015 | * updates happen during the correct frames. Gen9+ have | ||
| 13016 | * double buffered watermarks and so shouldn't need this. | ||
| 13017 | */ | ||
| 13018 | if (INTEL_GEN(dev_priv) < 9) | ||
| 13019 | state->legacy_cursor_update = false; | ||
| 13020 | |||
| 13021 | ret = drm_atomic_helper_setup_commit(state, nonblock); | 13010 | ret = drm_atomic_helper_setup_commit(state, nonblock); |
| 13022 | if (ret) | 13011 | if (ret) |
| 13023 | return ret; | 13012 | return ret; |
| @@ -13033,6 +13022,26 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13033 | return ret; | 13022 | return ret; |
| 13034 | } | 13023 | } |
| 13035 | 13024 | ||
| 13025 | /* | ||
| 13026 | * The intel_legacy_cursor_update() fast path takes care | ||
| 13027 | * of avoiding the vblank waits for simple cursor | ||
| 13028 | * movement and flips. For cursor on/off and size changes, | ||
| 13029 | * we want to perform the vblank waits so that watermark | ||
| 13030 | * updates happen during the correct frames. Gen9+ have | ||
| 13031 | * double buffered watermarks and so shouldn't need this. | ||
| 13032 | * | ||
| 13033 | * Do this after drm_atomic_helper_setup_commit() and | ||
| 13034 | * intel_atomic_prepare_commit() because we still want | ||
| 13035 | * to skip the flip and fb cleanup waits. Although that | ||
| 13036 | * does risk yanking the mapping from under the display | ||
| 13037 | * engine. | ||
| 13038 | * | ||
| 13039 | * FIXME doing watermarks and fb cleanup from a vblank worker | ||
| 13040 | * (assuming we had any) would solve these problems. | ||
| 13041 | */ | ||
| 13042 | if (INTEL_GEN(dev_priv) < 9) | ||
| 13043 | state->legacy_cursor_update = false; | ||
| 13044 | |||
| 13036 | drm_atomic_helper_swap_state(state, true); | 13045 | drm_atomic_helper_swap_state(state, true); |
| 13037 | dev_priv->wm.distrust_bios_wm = false; | 13046 | dev_priv->wm.distrust_bios_wm = false; |
| 13038 | intel_shared_dpll_swap_state(state); | 13047 | intel_shared_dpll_swap_state(state); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6e04cb54e3ff..ee77b519835c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -4636,9 +4636,20 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) | |||
| 4636 | */ | 4636 | */ |
| 4637 | status = connector_status_disconnected; | 4637 | status = connector_status_disconnected; |
| 4638 | goto out; | 4638 | goto out; |
| 4639 | } else if (connector->status == connector_status_connected) { | 4639 | } else { |
| 4640 | /* | ||
| 4641 | * If display is now connected check links status, | ||
| 4642 | * there has been known issues of link loss triggerring | ||
| 4643 | * long pulse. | ||
| 4644 | * | ||
| 4645 | * Some sinks (eg. ASUS PB287Q) seem to perform some | ||
| 4646 | * weird HPD ping pong during modesets. So we can apparently | ||
| 4647 | * end up with HPD going low during a modeset, and then | ||
| 4648 | * going back up soon after. And once that happens we must | ||
| 4649 | * retrain the link to get a picture. That's in case no | ||
| 4650 | * userspace component reacted to intermittent HPD dip. | ||
| 4651 | */ | ||
| 4640 | intel_dp_check_link_status(intel_dp); | 4652 | intel_dp_check_link_status(intel_dp); |
| 4641 | goto out; | ||
| 4642 | } | 4653 | } |
| 4643 | 4654 | ||
| 4644 | /* | 4655 | /* |
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index d8ca187ae001..25d8e76489e4 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c | |||
| @@ -131,8 +131,15 @@ err: | |||
| 131 | 131 | ||
| 132 | static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) | 132 | static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) |
| 133 | { | 133 | { |
| 134 | /* XXX Note that platform_device_register_full() allocates a dma_mask | ||
| 135 | * and never frees it. We can't free it here as we cannot guarantee | ||
| 136 | * this is the last reference (i.e. that the dma_mask will not be | ||
| 137 | * used after our unregister). So ee choose to leak the sizeof(u64) | ||
| 138 | * allocation here - it should be fixed in the platform_device rather | ||
| 139 | * than us fiddle with its internals. | ||
| 140 | */ | ||
| 141 | |||
| 134 | platform_device_unregister(dev_priv->lpe_audio.platdev); | 142 | platform_device_unregister(dev_priv->lpe_audio.platdev); |
| 135 | kfree(dev_priv->lpe_audio.platdev->dev.dma_mask); | ||
| 136 | } | 143 | } |
| 137 | 144 | ||
| 138 | static void lpe_audio_irq_unmask(struct irq_data *d) | 145 | static void lpe_audio_irq_unmask(struct irq_data *d) |
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c index 113dec05c7dc..09c704153456 100644 --- a/drivers/gpu/drm/i915/selftests/mock_drm.c +++ b/drivers/gpu/drm/i915/selftests/mock_drm.c | |||
| @@ -24,31 +24,50 @@ | |||
| 24 | 24 | ||
| 25 | #include "mock_drm.h" | 25 | #include "mock_drm.h" |
| 26 | 26 | ||
| 27 | static inline struct inode fake_inode(struct drm_i915_private *i915) | ||
| 28 | { | ||
| 29 | return (struct inode){ .i_rdev = i915->drm.primary->index }; | ||
| 30 | } | ||
| 31 | |||
| 32 | struct drm_file *mock_file(struct drm_i915_private *i915) | 27 | struct drm_file *mock_file(struct drm_i915_private *i915) |
| 33 | { | 28 | { |
| 34 | struct inode inode = fake_inode(i915); | 29 | struct file *filp; |
| 35 | struct file filp = {}; | 30 | struct inode *inode; |
| 36 | struct drm_file *file; | 31 | struct drm_file *file; |
| 37 | int err; | 32 | int err; |
| 38 | 33 | ||
| 39 | err = drm_open(&inode, &filp); | 34 | inode = kzalloc(sizeof(*inode), GFP_KERNEL); |
| 40 | if (unlikely(err)) | 35 | if (!inode) { |
| 41 | return ERR_PTR(err); | 36 | err = -ENOMEM; |
| 37 | goto err; | ||
| 38 | } | ||
| 39 | |||
| 40 | inode->i_rdev = i915->drm.primary->index; | ||
| 42 | 41 | ||
| 43 | file = filp.private_data; | 42 | filp = kzalloc(sizeof(*filp), GFP_KERNEL); |
| 43 | if (!filp) { | ||
| 44 | err = -ENOMEM; | ||
| 45 | goto err_inode; | ||
| 46 | } | ||
| 47 | |||
| 48 | err = drm_open(inode, filp); | ||
| 49 | if (err) | ||
| 50 | goto err_filp; | ||
| 51 | |||
| 52 | file = filp->private_data; | ||
| 53 | memset(&file->filp, POISON_INUSE, sizeof(file->filp)); | ||
| 44 | file->authenticated = true; | 54 | file->authenticated = true; |
| 55 | |||
| 56 | kfree(filp); | ||
| 57 | kfree(inode); | ||
| 45 | return file; | 58 | return file; |
| 59 | |||
| 60 | err_filp: | ||
| 61 | kfree(filp); | ||
| 62 | err_inode: | ||
| 63 | kfree(inode); | ||
| 64 | err: | ||
| 65 | return ERR_PTR(err); | ||
| 46 | } | 66 | } |
| 47 | 67 | ||
| 48 | void mock_file_free(struct drm_i915_private *i915, struct drm_file *file) | 68 | void mock_file_free(struct drm_i915_private *i915, struct drm_file *file) |
| 49 | { | 69 | { |
| 50 | struct inode inode = fake_inode(i915); | ||
| 51 | struct file filp = { .private_data = file }; | 70 | struct file filp = { .private_data = file }; |
| 52 | 71 | ||
| 53 | drm_release(&inode, &filp); | 72 | drm_release(NULL, &filp); |
| 54 | } | 73 | } |
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 0e8d2e7f8c70..8097e3693ec4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c | |||
| @@ -35,7 +35,7 @@ mock_request(struct intel_engine_cs *engine, | |||
| 35 | 35 | ||
| 36 | /* NB the i915->requests slab cache is enlarged to fit mock_request */ | 36 | /* NB the i915->requests slab cache is enlarged to fit mock_request */ |
| 37 | request = i915_gem_request_alloc(engine, context); | 37 | request = i915_gem_request_alloc(engine, context); |
| 38 | if (!request) | 38 | if (IS_ERR(request)) |
| 39 | return NULL; | 39 | return NULL; |
| 40 | 40 | ||
| 41 | mock = container_of(request, typeof(*mock), base); | 41 | mock = container_of(request, typeof(*mock), base); |
