diff options
author | Dave Airlie <airlied@redhat.com> | 2017-01-09 17:02:09 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-01-09 17:02:09 -0500 |
commit | 5c37daf5dd2e63090abba4ea200b56176f6e4781 (patch) | |
tree | 77bf192b2d215f6086124188cf563f13da8fa46b | |
parent | 3806a271bf4be375f304e492148edb2507181158 (diff) | |
parent | 5d799acdd057e4f10fdd09ade22028c83f829f3e (diff) |
Merge tag 'drm-intel-next-2017-01-09' of git://anongit.freedesktop.org/git/drm-intel into drm-next
More 4.11 stuff, holidays edition (i.e. not much):
- docs and cleanups for shared dpll code (Ander)
- some kerneldoc work (Chris)
- fbc by default on gen9+ too, yeah! (Paulo)
- fixes, polish and other small things all over gem code (Chris)
- and a few small things on top
Plus a backmerge, because Dave was enjoying time off too.
* tag 'drm-intel-next-2017-01-09' of git://anongit.freedesktop.org/git/drm-intel: (275 commits)
drm/i915: Update DRIVER_DATE to 20170109
drm/i915: Drain freed objects for mmap space exhaustion
drm/i915: Purge loose pages if we run out of DMA remap space
drm/i915: Fix phys pwrite for struct_mutex-less operation
drm/i915: Simplify testing for am-I-the-kernel-context?
drm/i915: Use range_overflows()
drm/i915: Use fixed-sized types for stolen
drm/i915: Use phys_addr_t for the address of stolen memory
drm/i915: Consolidate checks for memcpy-from-wc support
drm/i915: Only skip requests once a context is banned
drm/i915: Move a few more utility macros to i915_utils.h
drm/i915: Clear ret before unbinding in i915_gem_evict_something()
drm/i915/guc: Exclude the upper end of the Global GTT for the GuC
drm/i915: Move a few utility macros into a separate header
drm/i915/execlists: Reorder execlists register enabling
drm/i915: Assert that we do create the deferred context
drm/i915: Assert all timeline requests are gone before fini
drm/i915: Revoke fenced GTT mmapings across GPU reset
drm/i915: enable FBC on gen9+ too
drm/i915: actually drive the BDW reserved IDs
...
100 files changed, 9803 insertions, 4144 deletions
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 117d2ab7a5f7..104296dffad1 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst | |||
@@ -213,6 +213,18 @@ Video BIOS Table (VBT) | |||
213 | .. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h | 213 | .. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h |
214 | :internal: | 214 | :internal: |
215 | 215 | ||
216 | Display PLLs | ||
217 | ------------ | ||
218 | |||
219 | .. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.c | ||
220 | :doc: Display PLLs | ||
221 | |||
222 | .. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.c | ||
223 | :internal: | ||
224 | |||
225 | .. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.h | ||
226 | :internal: | ||
227 | |||
216 | Memory Management and Command Submission | 228 | Memory Management and Command Submission |
217 | ======================================== | 229 | ======================================== |
218 | 230 | ||
@@ -356,4 +368,95 @@ switch_mm | |||
356 | .. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h | 368 | .. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h |
357 | :doc: switch_mm tracepoint | 369 | :doc: switch_mm tracepoint |
358 | 370 | ||
371 | Perf | ||
372 | ==== | ||
373 | |||
374 | Overview | ||
375 | -------- | ||
376 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
377 | :doc: i915 Perf Overview | ||
378 | |||
379 | Comparison with Core Perf | ||
380 | ------------------------- | ||
381 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
382 | :doc: i915 Perf History and Comparison with Core Perf | ||
383 | |||
384 | i915 Driver Entry Points | ||
385 | ------------------------ | ||
386 | |||
387 | This section covers the entrypoints exported outside of i915_perf.c to | ||
388 | integrate with drm/i915 and to handle the `DRM_I915_PERF_OPEN` ioctl. | ||
389 | |||
390 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
391 | :functions: i915_perf_init | ||
392 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
393 | :functions: i915_perf_fini | ||
394 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
395 | :functions: i915_perf_register | ||
396 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
397 | :functions: i915_perf_unregister | ||
398 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
399 | :functions: i915_perf_open_ioctl | ||
400 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
401 | :functions: i915_perf_release | ||
402 | |||
403 | i915 Perf Stream | ||
404 | ---------------- | ||
405 | |||
406 | This section covers the stream-semantics-agnostic structures and functions | ||
407 | for representing an i915 perf stream FD and associated file operations. | ||
408 | |||
409 | .. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h | ||
410 | :functions: i915_perf_stream | ||
411 | .. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h | ||
412 | :functions: i915_perf_stream_ops | ||
413 | |||
414 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
415 | :functions: read_properties_unlocked | ||
416 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
417 | :functions: i915_perf_open_ioctl_locked | ||
418 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
419 | :functions: i915_perf_destroy_locked | ||
420 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
421 | :functions: i915_perf_read | ||
422 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
423 | :functions: i915_perf_ioctl | ||
424 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
425 | :functions: i915_perf_enable_locked | ||
426 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
427 | :functions: i915_perf_disable_locked | ||
428 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
429 | :functions: i915_perf_poll | ||
430 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
431 | :functions: i915_perf_poll_locked | ||
432 | |||
433 | i915 Perf Observation Architecture Stream | ||
434 | ----------------------------------------- | ||
435 | |||
436 | .. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h | ||
437 | :functions: i915_oa_ops | ||
438 | |||
439 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
440 | :functions: i915_oa_stream_init | ||
441 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
442 | :functions: i915_oa_read | ||
443 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
444 | :functions: i915_oa_stream_enable | ||
445 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
446 | :functions: i915_oa_stream_disable | ||
447 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
448 | :functions: i915_oa_wait_unlocked | ||
449 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
450 | :functions: i915_oa_poll_wait | ||
451 | |||
452 | All i915 Perf Internals | ||
453 | ----------------------- | ||
454 | |||
455 | This section simply includes all currently documented i915 perf internals, in | ||
456 | no particular order, but may include some more minor utilities or platform | ||
457 | specific details than found in the more high-level sections. | ||
458 | |||
459 | .. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c | ||
460 | :internal: | ||
461 | |||
359 | .. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c | 462 | .. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 0f7d28a98b9a..9702c78f458d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -1420,8 +1420,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | |||
1420 | } | 1420 | } |
1421 | EXPORT_SYMBOL(intel_gmch_probe); | 1421 | EXPORT_SYMBOL(intel_gmch_probe); |
1422 | 1422 | ||
1423 | void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, | 1423 | void intel_gtt_get(u64 *gtt_total, |
1424 | phys_addr_t *mappable_base, u64 *mappable_end) | 1424 | u32 *stolen_size, |
1425 | phys_addr_t *mappable_base, | ||
1426 | u64 *mappable_end) | ||
1425 | { | 1427 | { |
1426 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; | 1428 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; |
1427 | *stolen_size = intel_private.stolen_size; | 1429 | *stolen_size = intel_private.stolen_size; |
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 51ba630a134b..597648c7a645 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug | |||
@@ -19,9 +19,12 @@ config DRM_I915_DEBUG | |||
19 | bool "Enable additional driver debugging" | 19 | bool "Enable additional driver debugging" |
20 | depends on DRM_I915 | 20 | depends on DRM_I915 |
21 | select PREEMPT_COUNT | 21 | select PREEMPT_COUNT |
22 | select I2C_CHARDEV | ||
23 | select DRM_DP_AUX_CHARDEV | ||
22 | select X86_MSR # used by igt/pm_rpm | 24 | select X86_MSR # used by igt/pm_rpm |
23 | select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) | 25 | select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) |
24 | select DRM_DEBUG_MM if DRM=y | 26 | select DRM_DEBUG_MM if DRM=y |
27 | select DRM_I915_SW_FENCE_DEBUG_OBJECTS | ||
25 | default n | 28 | default n |
26 | help | 29 | help |
27 | Choose this option to turn on extra driver debugging that may affect | 30 | Choose this option to turn on extra driver debugging that may affect |
@@ -43,3 +46,15 @@ config DRM_I915_DEBUG_GEM | |||
43 | 46 | ||
44 | If in doubt, say "N". | 47 | If in doubt, say "N". |
45 | 48 | ||
49 | config DRM_I915_SW_FENCE_DEBUG_OBJECTS | ||
50 | bool "Enable additional driver debugging for fence objects" | ||
51 | depends on DRM_I915 | ||
52 | select DEBUG_OBJECTS | ||
53 | default n | ||
54 | help | ||
55 | Choose this option to turn on extra driver debugging that may affect | ||
56 | performance but will catch some internal issues. | ||
57 | |||
58 | Recommended for driver developers only. | ||
59 | |||
60 | If in doubt, say "N". | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3dea46af9fe6..5196509e71cf 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -24,7 +24,7 @@ i915-y := i915_drv.o \ | |||
24 | intel_runtime_pm.o | 24 | intel_runtime_pm.o |
25 | 25 | ||
26 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | 26 | i915-$(CONFIG_COMPAT) += i915_ioc32.o |
27 | i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o | 27 | i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o |
28 | 28 | ||
29 | # GEM code | 29 | # GEM code |
30 | i915-y += i915_cmd_parser.o \ | 30 | i915-y += i915_cmd_parser.o \ |
@@ -55,7 +55,8 @@ i915-y += i915_cmd_parser.o \ | |||
55 | intel_uncore.o | 55 | intel_uncore.o |
56 | 56 | ||
57 | # general-purpose microcontroller (GuC) support | 57 | # general-purpose microcontroller (GuC) support |
58 | i915-y += intel_guc_loader.o \ | 58 | i915-y += intel_uc.o \ |
59 | intel_guc_loader.o \ | ||
59 | i915_guc_submission.o | 60 | i915_guc_submission.o |
60 | 61 | ||
61 | # autogenerated null render state | 62 | # autogenerated null render state |
@@ -117,6 +118,10 @@ i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o | |||
117 | # virtual gpu code | 118 | # virtual gpu code |
118 | i915-y += i915_vgpu.o | 119 | i915-y += i915_vgpu.o |
119 | 120 | ||
121 | # perf code | ||
122 | i915-y += i915_perf.o \ | ||
123 | i915_oa_hsw.o | ||
124 | |||
120 | ifeq ($(CONFIG_DRM_I915_GVT),y) | 125 | ifeq ($(CONFIG_DRM_I915_GVT),y) |
121 | i915-y += intel_gvt.o | 126 | i915-y += intel_gvt.o |
122 | include $(src)/gvt/Makefile | 127 | include $(src)/gvt/Makefile |
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc4aea6..7d33b607bc89 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c | |||
@@ -73,12 +73,15 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) | |||
73 | mutex_lock(&dev_priv->drm.struct_mutex); | 73 | mutex_lock(&dev_priv->drm.struct_mutex); |
74 | search_again: | 74 | search_again: |
75 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm, | 75 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm, |
76 | node, size, 4096, 0, | 76 | node, size, 4096, |
77 | I915_COLOR_UNEVICTABLE, | ||
77 | start, end, search_flag, | 78 | start, end, search_flag, |
78 | alloc_flag); | 79 | alloc_flag); |
79 | if (ret) { | 80 | if (ret) { |
80 | ret = i915_gem_evict_something(&dev_priv->ggtt.base, | 81 | ret = i915_gem_evict_something(&dev_priv->ggtt.base, |
81 | size, 4096, 0, start, end, 0); | 82 | size, 4096, |
83 | I915_COLOR_UNEVICTABLE, | ||
84 | start, end, 0); | ||
82 | if (ret == 0 && ++retried < 3) | 85 | if (ret == 0 && ++retried < 3) |
83 | goto search_again; | 86 | goto search_again; |
84 | 87 | ||
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d26a092c70e8..9a4b23c3ee97 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -1602,7 +1602,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) | |||
1602 | return -ENOMEM; | 1602 | return -ENOMEM; |
1603 | 1603 | ||
1604 | entry_obj->obj = | 1604 | entry_obj->obj = |
1605 | i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm), | 1605 | i915_gem_object_create(s->vgpu->gvt->dev_priv, |
1606 | roundup(bb_size, PAGE_SIZE)); | 1606 | roundup(bb_size, PAGE_SIZE)); |
1607 | if (IS_ERR(entry_obj->obj)) { | 1607 | if (IS_ERR(entry_obj->obj)) { |
1608 | ret = PTR_ERR(entry_obj->obj); | 1608 | ret = PTR_ERR(entry_obj->obj); |
@@ -2665,14 +2665,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
2665 | 2665 | ||
2666 | static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) | 2666 | static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) |
2667 | { | 2667 | { |
2668 | struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm; | ||
2669 | int ctx_size = wa_ctx->indirect_ctx.size; | 2668 | int ctx_size = wa_ctx->indirect_ctx.size; |
2670 | unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; | 2669 | unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; |
2671 | struct drm_i915_gem_object *obj; | 2670 | struct drm_i915_gem_object *obj; |
2672 | int ret = 0; | 2671 | int ret = 0; |
2673 | void *map; | 2672 | void *map; |
2674 | 2673 | ||
2675 | obj = i915_gem_object_create(dev, | 2674 | obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv, |
2676 | roundup(ctx_size + CACHELINE_BYTES, | 2675 | roundup(ctx_size + CACHELINE_BYTES, |
2677 | PAGE_SIZE)); | 2676 | PAGE_SIZE)); |
2678 | if (IS_ERR(obj)) | 2677 | if (IS_ERR(obj)) |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 522809710312..57fb8e3cbd1f 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -2200,7 +2200,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
2200 | MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); | 2200 | MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2201 | 2201 | ||
2202 | MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); | 2202 | MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); |
2203 | MMIO_D(OACONTROL, D_HSW); | 2203 | MMIO_D(GEN7_OACONTROL, D_HSW); |
2204 | MMIO_D(0x2b00, D_BDW_PLUS); | 2204 | MMIO_D(0x2b00, D_BDW_PLUS); |
2205 | MMIO_D(0x2360, D_BDW_PLUS); | 2205 | MMIO_D(0x2360, D_BDW_PLUS); |
2206 | MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); | 2206 | MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4db242250235..fd2b026f7ecd 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -549,18 +549,10 @@ err: | |||
549 | 549 | ||
550 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) | 550 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) |
551 | { | 551 | { |
552 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
553 | |||
554 | atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier, | 552 | atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier, |
555 | &vgpu->shadow_ctx_notifier_block); | 553 | &vgpu->shadow_ctx_notifier_block); |
556 | 554 | ||
557 | mutex_lock(&dev_priv->drm.struct_mutex); | 555 | i915_gem_context_put_unlocked(vgpu->shadow_ctx); |
558 | |||
559 | /* a little hacky to mark as ctx closed */ | ||
560 | vgpu->shadow_ctx->closed = true; | ||
561 | i915_gem_context_put(vgpu->shadow_ctx); | ||
562 | |||
563 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
564 | } | 556 | } |
565 | 557 | ||
566 | int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) | 558 | int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index f5039f4f988f..21b1cd917d81 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
@@ -86,6 +86,102 @@ | |||
86 | * general bitmasking mechanism. | 86 | * general bitmasking mechanism. |
87 | */ | 87 | */ |
88 | 88 | ||
89 | /* | ||
90 | * A command that requires special handling by the command parser. | ||
91 | */ | ||
92 | struct drm_i915_cmd_descriptor { | ||
93 | /* | ||
94 | * Flags describing how the command parser processes the command. | ||
95 | * | ||
96 | * CMD_DESC_FIXED: The command has a fixed length if this is set, | ||
97 | * a length mask if not set | ||
98 | * CMD_DESC_SKIP: The command is allowed but does not follow the | ||
99 | * standard length encoding for the opcode range in | ||
100 | * which it falls | ||
101 | * CMD_DESC_REJECT: The command is never allowed | ||
102 | * CMD_DESC_REGISTER: The command should be checked against the | ||
103 | * register whitelist for the appropriate ring | ||
104 | * CMD_DESC_MASTER: The command is allowed if the submitting process | ||
105 | * is the DRM master | ||
106 | */ | ||
107 | u32 flags; | ||
108 | #define CMD_DESC_FIXED (1<<0) | ||
109 | #define CMD_DESC_SKIP (1<<1) | ||
110 | #define CMD_DESC_REJECT (1<<2) | ||
111 | #define CMD_DESC_REGISTER (1<<3) | ||
112 | #define CMD_DESC_BITMASK (1<<4) | ||
113 | #define CMD_DESC_MASTER (1<<5) | ||
114 | |||
115 | /* | ||
116 | * The command's unique identification bits and the bitmask to get them. | ||
117 | * This isn't strictly the opcode field as defined in the spec and may | ||
118 | * also include type, subtype, and/or subop fields. | ||
119 | */ | ||
120 | struct { | ||
121 | u32 value; | ||
122 | u32 mask; | ||
123 | } cmd; | ||
124 | |||
125 | /* | ||
126 | * The command's length. The command is either fixed length (i.e. does | ||
127 | * not include a length field) or has a length field mask. The flag | ||
128 | * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has | ||
129 | * a length mask. All command entries in a command table must include | ||
130 | * length information. | ||
131 | */ | ||
132 | union { | ||
133 | u32 fixed; | ||
134 | u32 mask; | ||
135 | } length; | ||
136 | |||
137 | /* | ||
138 | * Describes where to find a register address in the command to check | ||
139 | * against the ring's register whitelist. Only valid if flags has the | ||
140 | * CMD_DESC_REGISTER bit set. | ||
141 | * | ||
142 | * A non-zero step value implies that the command may access multiple | ||
143 | * registers in sequence (e.g. LRI), in that case step gives the | ||
144 | * distance in dwords between individual offset fields. | ||
145 | */ | ||
146 | struct { | ||
147 | u32 offset; | ||
148 | u32 mask; | ||
149 | u32 step; | ||
150 | } reg; | ||
151 | |||
152 | #define MAX_CMD_DESC_BITMASKS 3 | ||
153 | /* | ||
154 | * Describes command checks where a particular dword is masked and | ||
155 | * compared against an expected value. If the command does not match | ||
156 | * the expected value, the parser rejects it. Only valid if flags has | ||
157 | * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero | ||
158 | * are valid. | ||
159 | * | ||
160 | * If the check specifies a non-zero condition_mask then the parser | ||
161 | * only performs the check when the bits specified by condition_mask | ||
162 | * are non-zero. | ||
163 | */ | ||
164 | struct { | ||
165 | u32 offset; | ||
166 | u32 mask; | ||
167 | u32 expected; | ||
168 | u32 condition_offset; | ||
169 | u32 condition_mask; | ||
170 | } bits[MAX_CMD_DESC_BITMASKS]; | ||
171 | }; | ||
172 | |||
173 | /* | ||
174 | * A table of commands requiring special handling by the command parser. | ||
175 | * | ||
176 | * Each engine has an array of tables. Each table consists of an array of | ||
177 | * command descriptors, which must be sorted with command opcodes in | ||
178 | * ascending order. | ||
179 | */ | ||
180 | struct drm_i915_cmd_table { | ||
181 | const struct drm_i915_cmd_descriptor *table; | ||
182 | int count; | ||
183 | }; | ||
184 | |||
89 | #define STD_MI_OPCODE_SHIFT (32 - 9) | 185 | #define STD_MI_OPCODE_SHIFT (32 - 9) |
90 | #define STD_3D_OPCODE_SHIFT (32 - 16) | 186 | #define STD_3D_OPCODE_SHIFT (32 - 16) |
91 | #define STD_2D_OPCODE_SHIFT (32 - 10) | 187 | #define STD_2D_OPCODE_SHIFT (32 - 10) |
@@ -450,7 +546,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { | |||
450 | REG64(PS_INVOCATION_COUNT), | 546 | REG64(PS_INVOCATION_COUNT), |
451 | REG64(PS_DEPTH_COUNT), | 547 | REG64(PS_DEPTH_COUNT), |
452 | REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), | 548 | REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), |
453 | REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */ | ||
454 | REG64(MI_PREDICATE_SRC0), | 549 | REG64(MI_PREDICATE_SRC0), |
455 | REG64(MI_PREDICATE_SRC1), | 550 | REG64(MI_PREDICATE_SRC1), |
456 | REG32(GEN7_3DPRIM_END_OFFSET), | 551 | REG32(GEN7_3DPRIM_END_OFFSET), |
@@ -559,7 +654,7 @@ static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { | |||
559 | 654 | ||
560 | static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) | 655 | static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) |
561 | { | 656 | { |
562 | u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; | 657 | u32 client = cmd_header >> INSTR_CLIENT_SHIFT; |
563 | u32 subclient = | 658 | u32 subclient = |
564 | (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; | 659 | (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; |
565 | 660 | ||
@@ -578,7 +673,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) | |||
578 | 673 | ||
579 | static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) | 674 | static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) |
580 | { | 675 | { |
581 | u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; | 676 | u32 client = cmd_header >> INSTR_CLIENT_SHIFT; |
582 | u32 subclient = | 677 | u32 subclient = |
583 | (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; | 678 | (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; |
584 | u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT; | 679 | u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT; |
@@ -601,7 +696,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) | |||
601 | 696 | ||
602 | static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) | 697 | static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) |
603 | { | 698 | { |
604 | u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; | 699 | u32 client = cmd_header >> INSTR_CLIENT_SHIFT; |
605 | 700 | ||
606 | if (client == INSTR_MI_CLIENT) | 701 | if (client == INSTR_MI_CLIENT) |
607 | return 0x3F; | 702 | return 0x3F; |
@@ -984,7 +1079,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, | |||
984 | 1079 | ||
985 | src = ERR_PTR(-ENODEV); | 1080 | src = ERR_PTR(-ENODEV); |
986 | if (src_needs_clflush && | 1081 | if (src_needs_clflush && |
987 | i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) { | 1082 | i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) { |
988 | src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); | 1083 | src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); |
989 | if (!IS_ERR(src)) { | 1084 | if (!IS_ERR(src)) { |
990 | i915_memcpy_from_wc(dst, | 1085 | i915_memcpy_from_wc(dst, |
@@ -1036,32 +1131,10 @@ unpin_src: | |||
1036 | return dst; | 1131 | return dst; |
1037 | } | 1132 | } |
1038 | 1133 | ||
1039 | /** | ||
1040 | * intel_engine_needs_cmd_parser() - should a given engine use software | ||
1041 | * command parsing? | ||
1042 | * @engine: the engine in question | ||
1043 | * | ||
1044 | * Only certain platforms require software batch buffer command parsing, and | ||
1045 | * only when enabled via module parameter. | ||
1046 | * | ||
1047 | * Return: true if the engine requires software command parsing | ||
1048 | */ | ||
1049 | bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine) | ||
1050 | { | ||
1051 | if (!engine->needs_cmd_parser) | ||
1052 | return false; | ||
1053 | |||
1054 | if (!USES_PPGTT(engine->i915)) | ||
1055 | return false; | ||
1056 | |||
1057 | return (i915.enable_cmd_parser == 1); | ||
1058 | } | ||
1059 | |||
1060 | static bool check_cmd(const struct intel_engine_cs *engine, | 1134 | static bool check_cmd(const struct intel_engine_cs *engine, |
1061 | const struct drm_i915_cmd_descriptor *desc, | 1135 | const struct drm_i915_cmd_descriptor *desc, |
1062 | const u32 *cmd, u32 length, | 1136 | const u32 *cmd, u32 length, |
1063 | const bool is_master, | 1137 | const bool is_master) |
1064 | bool *oacontrol_set) | ||
1065 | { | 1138 | { |
1066 | if (desc->flags & CMD_DESC_SKIP) | 1139 | if (desc->flags & CMD_DESC_SKIP) |
1067 | return true; | 1140 | return true; |
@@ -1099,31 +1172,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, | |||
1099 | } | 1172 | } |
1100 | 1173 | ||
1101 | /* | 1174 | /* |
1102 | * OACONTROL requires some special handling for | ||
1103 | * writes. We want to make sure that any batch which | ||
1104 | * enables OA also disables it before the end of the | ||
1105 | * batch. The goal is to prevent one process from | ||
1106 | * snooping on the perf data from another process. To do | ||
1107 | * that, we need to check the value that will be written | ||
1108 | * to the register. Hence, limit OACONTROL writes to | ||
1109 | * only MI_LOAD_REGISTER_IMM commands. | ||
1110 | */ | ||
1111 | if (reg_addr == i915_mmio_reg_offset(OACONTROL)) { | ||
1112 | if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { | ||
1113 | DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); | ||
1114 | return false; | ||
1115 | } | ||
1116 | |||
1117 | if (desc->cmd.value == MI_LOAD_REGISTER_REG) { | ||
1118 | DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n"); | ||
1119 | return false; | ||
1120 | } | ||
1121 | |||
1122 | if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) | ||
1123 | *oacontrol_set = (cmd[offset + 1] != 0); | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * Check the value written to the register against the | 1175 | * Check the value written to the register against the |
1128 | * allowed mask/value pair given in the whitelist entry. | 1176 | * allowed mask/value pair given in the whitelist entry. |
1129 | */ | 1177 | */ |
@@ -1214,7 +1262,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, | |||
1214 | u32 *cmd, *batch_end; | 1262 | u32 *cmd, *batch_end; |
1215 | struct drm_i915_cmd_descriptor default_desc = noop_desc; | 1263 | struct drm_i915_cmd_descriptor default_desc = noop_desc; |
1216 | const struct drm_i915_cmd_descriptor *desc = &default_desc; | 1264 | const struct drm_i915_cmd_descriptor *desc = &default_desc; |
1217 | bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ | ||
1218 | bool needs_clflush_after = false; | 1265 | bool needs_clflush_after = false; |
1219 | int ret = 0; | 1266 | int ret = 0; |
1220 | 1267 | ||
@@ -1270,20 +1317,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, | |||
1270 | break; | 1317 | break; |
1271 | } | 1318 | } |
1272 | 1319 | ||
1273 | if (!check_cmd(engine, desc, cmd, length, is_master, | 1320 | if (!check_cmd(engine, desc, cmd, length, is_master)) { |
1274 | &oacontrol_set)) { | 1321 | ret = -EACCES; |
1275 | ret = -EINVAL; | ||
1276 | break; | 1322 | break; |
1277 | } | 1323 | } |
1278 | 1324 | ||
1279 | cmd += length; | 1325 | cmd += length; |
1280 | } | 1326 | } |
1281 | 1327 | ||
1282 | if (oacontrol_set) { | ||
1283 | DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n"); | ||
1284 | ret = -EINVAL; | ||
1285 | } | ||
1286 | |||
1287 | if (cmd >= batch_end) { | 1328 | if (cmd >= batch_end) { |
1288 | DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); | 1329 | DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); |
1289 | ret = -EINVAL; | 1330 | ret = -EINVAL; |
@@ -1313,7 +1354,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) | |||
1313 | 1354 | ||
1314 | /* If the command parser is not enabled, report 0 - unsupported */ | 1355 | /* If the command parser is not enabled, report 0 - unsupported */ |
1315 | for_each_engine(engine, dev_priv, id) { | 1356 | for_each_engine(engine, dev_priv, id) { |
1316 | if (intel_engine_needs_cmd_parser(engine)) { | 1357 | if (engine->needs_cmd_parser) { |
1317 | active = true; | 1358 | active = true; |
1318 | break; | 1359 | break; |
1319 | } | 1360 | } |
@@ -1333,6 +1374,11 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) | |||
1333 | * 5. GPGPU dispatch compute indirect registers. | 1374 | * 5. GPGPU dispatch compute indirect registers. |
1334 | * 6. TIMESTAMP register and Haswell CS GPR registers | 1375 | * 6. TIMESTAMP register and Haswell CS GPR registers |
1335 | * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. | 1376 | * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. |
1377 | * 8. Don't report cmd_check() failures as EINVAL errors to userspace; | ||
1378 | * rely on the HW to NOOP disallowed commands as it would without | ||
1379 | * the parser enabled. | ||
1380 | * 9. Don't whitelist or handle oacontrol specially, as ownership | ||
1381 | * for oacontrol state is moving to i915-perf. | ||
1336 | */ | 1382 | */ |
1337 | return 7; | 1383 | return 9; |
1338 | } | 1384 | } |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b77b53b47acc..9d7b5a8c8dea 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -26,19 +26,9 @@ | |||
26 | * | 26 | * |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/seq_file.h> | ||
30 | #include <linux/circ_buf.h> | ||
31 | #include <linux/ctype.h> | ||
32 | #include <linux/debugfs.h> | 29 | #include <linux/debugfs.h> |
33 | #include <linux/slab.h> | ||
34 | #include <linux/export.h> | ||
35 | #include <linux/list_sort.h> | 30 | #include <linux/list_sort.h> |
36 | #include <asm/msr-index.h> | ||
37 | #include <drm/drmP.h> | ||
38 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
39 | #include "intel_ringbuffer.h" | ||
40 | #include <drm/i915_drm.h> | ||
41 | #include "i915_drv.h" | ||
42 | 32 | ||
43 | static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) | 33 | static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) |
44 | { | 34 | { |
@@ -77,6 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
77 | const struct intel_device_info *info = INTEL_INFO(dev_priv); | 67 | const struct intel_device_info *info = INTEL_INFO(dev_priv); |
78 | 68 | ||
79 | seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); | 69 | seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); |
70 | seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); | ||
80 | seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); | 71 | seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); |
81 | #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) | 72 | #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) |
82 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); | 73 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); |
@@ -549,10 +540,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
549 | if (work->flip_queued_req) { | 540 | if (work->flip_queued_req) { |
550 | struct intel_engine_cs *engine = work->flip_queued_req->engine; | 541 | struct intel_engine_cs *engine = work->flip_queued_req->engine; |
551 | 542 | ||
552 | seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", | 543 | seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n", |
553 | engine->name, | 544 | engine->name, |
554 | work->flip_queued_req->global_seqno, | 545 | work->flip_queued_req->global_seqno, |
555 | atomic_read(&dev_priv->gt.global_timeline.next_seqno), | 546 | intel_engine_last_submit(engine), |
556 | intel_engine_get_seqno(engine), | 547 | intel_engine_get_seqno(engine), |
557 | i915_gem_request_completed(work->flip_queued_req)); | 548 | i915_gem_request_completed(work->flip_queued_req)); |
558 | } else | 549 | } else |
@@ -686,7 +677,7 @@ static void i915_ring_seqno_info(struct seq_file *m, | |||
686 | 677 | ||
687 | spin_lock_irq(&b->lock); | 678 | spin_lock_irq(&b->lock); |
688 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { | 679 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { |
689 | struct intel_wait *w = container_of(rb, typeof(*w), node); | 680 | struct intel_wait *w = rb_entry(rb, typeof(*w), node); |
690 | 681 | ||
691 | seq_printf(m, "Waiting (%s): %s [%d] on %x\n", | 682 | seq_printf(m, "Waiting (%s): %s [%d] on %x\n", |
692 | engine->name, w->tsk->comm, w->tsk->pid, w->seqno); | 683 | engine->name, w->tsk->comm, w->tsk->pid, w->seqno); |
@@ -946,7 +937,7 @@ i915_error_state_write(struct file *filp, | |||
946 | struct i915_error_state_file_priv *error_priv = filp->private_data; | 937 | struct i915_error_state_file_priv *error_priv = filp->private_data; |
947 | 938 | ||
948 | DRM_DEBUG_DRIVER("Resetting error state\n"); | 939 | DRM_DEBUG_DRIVER("Resetting error state\n"); |
949 | i915_destroy_error_state(error_priv->dev); | 940 | i915_destroy_error_state(error_priv->i915); |
950 | 941 | ||
951 | return cnt; | 942 | return cnt; |
952 | } | 943 | } |
@@ -960,7 +951,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file) | |||
960 | if (!error_priv) | 951 | if (!error_priv) |
961 | return -ENOMEM; | 952 | return -ENOMEM; |
962 | 953 | ||
963 | error_priv->dev = &dev_priv->drm; | 954 | error_priv->i915 = dev_priv; |
964 | 955 | ||
965 | i915_error_state_get(&dev_priv->drm, error_priv); | 956 | i915_error_state_get(&dev_priv->drm, error_priv); |
966 | 957 | ||
@@ -988,8 +979,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, | |||
988 | ssize_t ret_count = 0; | 979 | ssize_t ret_count = 0; |
989 | int ret; | 980 | int ret; |
990 | 981 | ||
991 | ret = i915_error_state_buf_init(&error_str, | 982 | ret = i915_error_state_buf_init(&error_str, error_priv->i915, |
992 | to_i915(error_priv->dev), count, *pos); | 983 | count, *pos); |
993 | if (ret) | 984 | if (ret) |
994 | return ret; | 985 | return ret; |
995 | 986 | ||
@@ -1026,7 +1017,7 @@ i915_next_seqno_get(void *data, u64 *val) | |||
1026 | { | 1017 | { |
1027 | struct drm_i915_private *dev_priv = data; | 1018 | struct drm_i915_private *dev_priv = data; |
1028 | 1019 | ||
1029 | *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno); | 1020 | *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno); |
1030 | return 0; | 1021 | return 0; |
1031 | } | 1022 | } |
1032 | 1023 | ||
@@ -1108,7 +1099,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1108 | int max_freq; | 1099 | int max_freq; |
1109 | 1100 | ||
1110 | rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | 1101 | rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); |
1111 | if (IS_BROXTON(dev_priv)) { | 1102 | if (IS_GEN9_LP(dev_priv)) { |
1112 | rp_state_cap = I915_READ(BXT_RP_STATE_CAP); | 1103 | rp_state_cap = I915_READ(BXT_RP_STATE_CAP); |
1113 | gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); | 1104 | gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); |
1114 | } else { | 1105 | } else { |
@@ -1204,7 +1195,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1204 | seq_printf(m, "Down threshold: %d%%\n", | 1195 | seq_printf(m, "Down threshold: %d%%\n", |
1205 | dev_priv->rps.down_threshold); | 1196 | dev_priv->rps.down_threshold); |
1206 | 1197 | ||
1207 | max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 : | 1198 | max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : |
1208 | rp_state_cap >> 16) & 0xff; | 1199 | rp_state_cap >> 16) & 0xff; |
1209 | max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? | 1200 | max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? |
1210 | GEN9_FREQ_SCALER : 1); | 1201 | GEN9_FREQ_SCALER : 1); |
@@ -1217,7 +1208,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1217 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", | 1208 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", |
1218 | intel_gpu_freq(dev_priv, max_freq)); | 1209 | intel_gpu_freq(dev_priv, max_freq)); |
1219 | 1210 | ||
1220 | max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 : | 1211 | max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : |
1221 | rp_state_cap >> 0) & 0xff; | 1212 | rp_state_cap >> 0) & 0xff; |
1222 | max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? | 1213 | max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? |
1223 | GEN9_FREQ_SCALER : 1); | 1214 | GEN9_FREQ_SCALER : 1); |
@@ -1330,13 +1321,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) | |||
1330 | seq_printf(m, "\tseqno = %x [current %x, last %x]\n", | 1321 | seq_printf(m, "\tseqno = %x [current %x, last %x]\n", |
1331 | engine->hangcheck.seqno, seqno[id], | 1322 | engine->hangcheck.seqno, seqno[id], |
1332 | intel_engine_last_submit(engine)); | 1323 | intel_engine_last_submit(engine)); |
1333 | seq_printf(m, "\twaiters? %s, fake irq active? %s\n", | 1324 | seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", |
1334 | yesno(intel_engine_has_waiter(engine)), | 1325 | yesno(intel_engine_has_waiter(engine)), |
1335 | yesno(test_bit(engine->id, | 1326 | yesno(test_bit(engine->id, |
1336 | &dev_priv->gpu_error.missed_irq_rings))); | 1327 | &dev_priv->gpu_error.missed_irq_rings)), |
1328 | yesno(engine->hangcheck.stalled)); | ||
1329 | |||
1337 | spin_lock_irq(&b->lock); | 1330 | spin_lock_irq(&b->lock); |
1338 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { | 1331 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { |
1339 | struct intel_wait *w = container_of(rb, typeof(*w), node); | 1332 | struct intel_wait *w = rb_entry(rb, typeof(*w), node); |
1340 | 1333 | ||
1341 | seq_printf(m, "\t%s [%d] waiting for %x\n", | 1334 | seq_printf(m, "\t%s [%d] waiting for %x\n", |
1342 | w->tsk->comm, w->tsk->pid, w->seqno); | 1335 | w->tsk->comm, w->tsk->pid, w->seqno); |
@@ -1346,8 +1339,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) | |||
1346 | seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", | 1339 | seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", |
1347 | (long long)engine->hangcheck.acthd, | 1340 | (long long)engine->hangcheck.acthd, |
1348 | (long long)acthd[id]); | 1341 | (long long)acthd[id]); |
1349 | seq_printf(m, "\tscore = %d\n", engine->hangcheck.score); | 1342 | seq_printf(m, "\taction = %s(%d) %d ms ago\n", |
1350 | seq_printf(m, "\taction = %d\n", engine->hangcheck.action); | 1343 | hangcheck_action_to_str(engine->hangcheck.action), |
1344 | engine->hangcheck.action, | ||
1345 | jiffies_to_msecs(jiffies - | ||
1346 | engine->hangcheck.action_timestamp)); | ||
1351 | 1347 | ||
1352 | if (engine->id == RCS) { | 1348 | if (engine->id == RCS) { |
1353 | seq_puts(m, "\tinstdone read =\n"); | 1349 | seq_puts(m, "\tinstdone read =\n"); |
@@ -1728,7 +1724,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
1728 | 1724 | ||
1729 | if (HAS_PCH_SPLIT(dev_priv)) | 1725 | if (HAS_PCH_SPLIT(dev_priv)) |
1730 | sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; | 1726 | sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; |
1731 | else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) || | 1727 | else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || |
1732 | IS_I945G(dev_priv) || IS_I945GM(dev_priv)) | 1728 | IS_I945G(dev_priv) || IS_I945GM(dev_priv)) |
1733 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | 1729 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
1734 | else if (IS_I915GM(dev_priv)) | 1730 | else if (IS_I915GM(dev_priv)) |
@@ -2409,7 +2405,7 @@ static void i915_guc_client_info(struct seq_file *m, | |||
2409 | seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", | 2405 | seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", |
2410 | client->priority, client->ctx_index, client->proc_desc_offset); | 2406 | client->priority, client->ctx_index, client->proc_desc_offset); |
2411 | seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", | 2407 | seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", |
2412 | client->doorbell_id, client->doorbell_offset, client->cookie); | 2408 | client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); |
2413 | seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", | 2409 | seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", |
2414 | client->wq_size, client->wq_offset, client->wq_tail); | 2410 | client->wq_size, client->wq_offset, client->wq_tail); |
2415 | 2411 | ||
@@ -2429,47 +2425,41 @@ static void i915_guc_client_info(struct seq_file *m, | |||
2429 | static int i915_guc_info(struct seq_file *m, void *data) | 2425 | static int i915_guc_info(struct seq_file *m, void *data) |
2430 | { | 2426 | { |
2431 | struct drm_i915_private *dev_priv = node_to_i915(m->private); | 2427 | struct drm_i915_private *dev_priv = node_to_i915(m->private); |
2432 | struct drm_device *dev = &dev_priv->drm; | 2428 | const struct intel_guc *guc = &dev_priv->guc; |
2433 | struct intel_guc guc; | ||
2434 | struct i915_guc_client client = {}; | ||
2435 | struct intel_engine_cs *engine; | 2429 | struct intel_engine_cs *engine; |
2436 | enum intel_engine_id id; | 2430 | enum intel_engine_id id; |
2437 | u64 total = 0; | 2431 | u64 total; |
2438 | |||
2439 | if (!HAS_GUC_SCHED(dev_priv)) | ||
2440 | return 0; | ||
2441 | 2432 | ||
2442 | if (mutex_lock_interruptible(&dev->struct_mutex)) | 2433 | if (!guc->execbuf_client) { |
2434 | seq_printf(m, "GuC submission %s\n", | ||
2435 | HAS_GUC_SCHED(dev_priv) ? | ||
2436 | "disabled" : | ||
2437 | "not supported"); | ||
2443 | return 0; | 2438 | return 0; |
2444 | 2439 | } | |
2445 | /* Take a local copy of the GuC data, so we can dump it at leisure */ | ||
2446 | guc = dev_priv->guc; | ||
2447 | if (guc.execbuf_client) | ||
2448 | client = *guc.execbuf_client; | ||
2449 | |||
2450 | mutex_unlock(&dev->struct_mutex); | ||
2451 | 2440 | ||
2452 | seq_printf(m, "Doorbell map:\n"); | 2441 | seq_printf(m, "Doorbell map:\n"); |
2453 | seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap); | 2442 | seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap); |
2454 | seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline); | 2443 | seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); |
2455 | 2444 | ||
2456 | seq_printf(m, "GuC total action count: %llu\n", guc.action_count); | 2445 | seq_printf(m, "GuC total action count: %llu\n", guc->action_count); |
2457 | seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); | 2446 | seq_printf(m, "GuC action failure count: %u\n", guc->action_fail); |
2458 | seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); | 2447 | seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd); |
2459 | seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status); | 2448 | seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status); |
2460 | seq_printf(m, "GuC last action error code: %d\n", guc.action_err); | 2449 | seq_printf(m, "GuC last action error code: %d\n", guc->action_err); |
2461 | 2450 | ||
2451 | total = 0; | ||
2462 | seq_printf(m, "\nGuC submissions:\n"); | 2452 | seq_printf(m, "\nGuC submissions:\n"); |
2463 | for_each_engine(engine, dev_priv, id) { | 2453 | for_each_engine(engine, dev_priv, id) { |
2464 | u64 submissions = guc.submissions[id]; | 2454 | u64 submissions = guc->submissions[id]; |
2465 | total += submissions; | 2455 | total += submissions; |
2466 | seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", | 2456 | seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", |
2467 | engine->name, submissions, guc.last_seqno[id]); | 2457 | engine->name, submissions, guc->last_seqno[id]); |
2468 | } | 2458 | } |
2469 | seq_printf(m, "\t%s: %llu\n", "Total", total); | 2459 | seq_printf(m, "\t%s: %llu\n", "Total", total); |
2470 | 2460 | ||
2471 | seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client); | 2461 | seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); |
2472 | i915_guc_client_info(m, dev_priv, &client); | 2462 | i915_guc_client_info(m, dev_priv, guc->execbuf_client); |
2473 | 2463 | ||
2474 | i915_guc_log_info(m, dev_priv); | 2464 | i915_guc_log_info(m, dev_priv); |
2475 | 2465 | ||
@@ -2567,9 +2557,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
2567 | seq_printf(m, "Re-enable work scheduled: %s\n", | 2557 | seq_printf(m, "Re-enable work scheduled: %s\n", |
2568 | yesno(work_busy(&dev_priv->psr.work.work))); | 2558 | yesno(work_busy(&dev_priv->psr.work.work))); |
2569 | 2559 | ||
2570 | if (HAS_DDI(dev_priv)) | 2560 | if (HAS_DDI(dev_priv)) { |
2571 | enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; | 2561 | if (dev_priv->psr.psr2_support) |
2572 | else { | 2562 | enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; |
2563 | else | ||
2564 | enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; | ||
2565 | } else { | ||
2573 | for_each_pipe(dev_priv, pipe) { | 2566 | for_each_pipe(dev_priv, pipe) { |
2574 | enum transcoder cpu_transcoder = | 2567 | enum transcoder cpu_transcoder = |
2575 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); | 2568 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); |
@@ -2872,6 +2865,20 @@ static void intel_dp_info(struct seq_file *m, | |||
2872 | &intel_dp->aux); | 2865 | &intel_dp->aux); |
2873 | } | 2866 | } |
2874 | 2867 | ||
2868 | static void intel_dp_mst_info(struct seq_file *m, | ||
2869 | struct intel_connector *intel_connector) | ||
2870 | { | ||
2871 | struct intel_encoder *intel_encoder = intel_connector->encoder; | ||
2872 | struct intel_dp_mst_encoder *intel_mst = | ||
2873 | enc_to_mst(&intel_encoder->base); | ||
2874 | struct intel_digital_port *intel_dig_port = intel_mst->primary; | ||
2875 | struct intel_dp *intel_dp = &intel_dig_port->dp; | ||
2876 | bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, | ||
2877 | intel_connector->port); | ||
2878 | |||
2879 | seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); | ||
2880 | } | ||
2881 | |||
2875 | static void intel_hdmi_info(struct seq_file *m, | 2882 | static void intel_hdmi_info(struct seq_file *m, |
2876 | struct intel_connector *intel_connector) | 2883 | struct intel_connector *intel_connector) |
2877 | { | 2884 | { |
@@ -2914,7 +2921,10 @@ static void intel_connector_info(struct seq_file *m, | |||
2914 | switch (connector->connector_type) { | 2921 | switch (connector->connector_type) { |
2915 | case DRM_MODE_CONNECTOR_DisplayPort: | 2922 | case DRM_MODE_CONNECTOR_DisplayPort: |
2916 | case DRM_MODE_CONNECTOR_eDP: | 2923 | case DRM_MODE_CONNECTOR_eDP: |
2917 | intel_dp_info(m, intel_connector); | 2924 | if (intel_encoder->type == INTEL_OUTPUT_DP_MST) |
2925 | intel_dp_mst_info(m, intel_connector); | ||
2926 | else | ||
2927 | intel_dp_info(m, intel_connector); | ||
2918 | break; | 2928 | break; |
2919 | case DRM_MODE_CONNECTOR_LVDS: | 2929 | case DRM_MODE_CONNECTOR_LVDS: |
2920 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) | 2930 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) |
@@ -2938,7 +2948,7 @@ static bool cursor_active(struct drm_i915_private *dev_priv, int pipe) | |||
2938 | { | 2948 | { |
2939 | u32 state; | 2949 | u32 state; |
2940 | 2950 | ||
2941 | if (IS_845G(dev_priv) || IS_I865G(dev_priv)) | 2951 | if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) |
2942 | state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; | 2952 | state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; |
2943 | else | 2953 | else |
2944 | state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; | 2954 | state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
@@ -3060,7 +3070,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) | |||
3060 | pipe_config->scaler_state.scaler_users, | 3070 | pipe_config->scaler_state.scaler_users, |
3061 | pipe_config->scaler_state.scaler_id); | 3071 | pipe_config->scaler_state.scaler_id); |
3062 | 3072 | ||
3063 | for (i = 0; i < SKL_NUM_SCALERS; i++) { | 3073 | for (i = 0; i < num_scalers; i++) { |
3064 | struct intel_scaler *sc = | 3074 | struct intel_scaler *sc = |
3065 | &pipe_config->scaler_state.scalers[i]; | 3075 | &pipe_config->scaler_state.scalers[i]; |
3066 | 3076 | ||
@@ -3142,11 +3152,11 @@ static int i915_engine_info(struct seq_file *m, void *unused) | |||
3142 | u64 addr; | 3152 | u64 addr; |
3143 | 3153 | ||
3144 | seq_printf(m, "%s\n", engine->name); | 3154 | seq_printf(m, "%s\n", engine->name); |
3145 | seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n", | 3155 | seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n", |
3146 | intel_engine_get_seqno(engine), | 3156 | intel_engine_get_seqno(engine), |
3147 | intel_engine_last_submit(engine), | 3157 | intel_engine_last_submit(engine), |
3148 | engine->hangcheck.seqno, | 3158 | engine->hangcheck.seqno, |
3149 | engine->hangcheck.score); | 3159 | jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); |
3150 | 3160 | ||
3151 | rcu_read_lock(); | 3161 | rcu_read_lock(); |
3152 | 3162 | ||
@@ -3252,7 +3262,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) | |||
3252 | 3262 | ||
3253 | spin_lock_irq(&b->lock); | 3263 | spin_lock_irq(&b->lock); |
3254 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { | 3264 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { |
3255 | struct intel_wait *w = container_of(rb, typeof(*w), node); | 3265 | struct intel_wait *w = rb_entry(rb, typeof(*w), node); |
3256 | 3266 | ||
3257 | seq_printf(m, "\t%s [%d] waiting for %x\n", | 3267 | seq_printf(m, "\t%s [%d] waiting for %x\n", |
3258 | w->tsk->comm, w->tsk->pid, w->seqno); | 3268 | w->tsk->comm, w->tsk->pid, w->seqno); |
@@ -3342,14 +3352,14 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) | |||
3342 | 3352 | ||
3343 | seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); | 3353 | seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); |
3344 | seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", | 3354 | seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", |
3345 | pll->config.crtc_mask, pll->active_mask, yesno(pll->on)); | 3355 | pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); |
3346 | seq_printf(m, " tracked hardware state:\n"); | 3356 | seq_printf(m, " tracked hardware state:\n"); |
3347 | seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); | 3357 | seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); |
3348 | seq_printf(m, " dpll_md: 0x%08x\n", | 3358 | seq_printf(m, " dpll_md: 0x%08x\n", |
3349 | pll->config.hw_state.dpll_md); | 3359 | pll->state.hw_state.dpll_md); |
3350 | seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); | 3360 | seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); |
3351 | seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); | 3361 | seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); |
3352 | seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); | 3362 | seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); |
3353 | } | 3363 | } |
3354 | drm_modeset_unlock_all(dev); | 3364 | drm_modeset_unlock_all(dev); |
3355 | 3365 | ||
@@ -3527,12 +3537,6 @@ static int i915_drrs_status(struct seq_file *m, void *unused) | |||
3527 | return 0; | 3537 | return 0; |
3528 | } | 3538 | } |
3529 | 3539 | ||
3530 | struct pipe_crc_info { | ||
3531 | const char *name; | ||
3532 | struct drm_i915_private *dev_priv; | ||
3533 | enum pipe pipe; | ||
3534 | }; | ||
3535 | |||
3536 | static int i915_dp_mst_info(struct seq_file *m, void *unused) | 3540 | static int i915_dp_mst_info(struct seq_file *m, void *unused) |
3537 | { | 3541 | { |
3538 | struct drm_i915_private *dev_priv = node_to_i915(m->private); | 3542 | struct drm_i915_private *dev_priv = node_to_i915(m->private); |
@@ -3562,844 +3566,6 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) | |||
3562 | return 0; | 3566 | return 0; |
3563 | } | 3567 | } |
3564 | 3568 | ||
3565 | static int i915_pipe_crc_open(struct inode *inode, struct file *filep) | ||
3566 | { | ||
3567 | struct pipe_crc_info *info = inode->i_private; | ||
3568 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
3569 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
3570 | |||
3571 | if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes) | ||
3572 | return -ENODEV; | ||
3573 | |||
3574 | spin_lock_irq(&pipe_crc->lock); | ||
3575 | |||
3576 | if (pipe_crc->opened) { | ||
3577 | spin_unlock_irq(&pipe_crc->lock); | ||
3578 | return -EBUSY; /* already open */ | ||
3579 | } | ||
3580 | |||
3581 | pipe_crc->opened = true; | ||
3582 | filep->private_data = inode->i_private; | ||
3583 | |||
3584 | spin_unlock_irq(&pipe_crc->lock); | ||
3585 | |||
3586 | return 0; | ||
3587 | } | ||
3588 | |||
3589 | static int i915_pipe_crc_release(struct inode *inode, struct file *filep) | ||
3590 | { | ||
3591 | struct pipe_crc_info *info = inode->i_private; | ||
3592 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
3593 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
3594 | |||
3595 | spin_lock_irq(&pipe_crc->lock); | ||
3596 | pipe_crc->opened = false; | ||
3597 | spin_unlock_irq(&pipe_crc->lock); | ||
3598 | |||
3599 | return 0; | ||
3600 | } | ||
3601 | |||
3602 | /* (6 fields, 8 chars each, space separated (5) + '\n') */ | ||
3603 | #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) | ||
3604 | /* account for \'0' */ | ||
3605 | #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) | ||
3606 | |||
3607 | static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) | ||
3608 | { | ||
3609 | assert_spin_locked(&pipe_crc->lock); | ||
3610 | return CIRC_CNT(pipe_crc->head, pipe_crc->tail, | ||
3611 | INTEL_PIPE_CRC_ENTRIES_NR); | ||
3612 | } | ||
3613 | |||
3614 | static ssize_t | ||
3615 | i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, | ||
3616 | loff_t *pos) | ||
3617 | { | ||
3618 | struct pipe_crc_info *info = filep->private_data; | ||
3619 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
3620 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
3621 | char buf[PIPE_CRC_BUFFER_LEN]; | ||
3622 | int n_entries; | ||
3623 | ssize_t bytes_read; | ||
3624 | |||
3625 | /* | ||
3626 | * Don't allow user space to provide buffers not big enough to hold | ||
3627 | * a line of data. | ||
3628 | */ | ||
3629 | if (count < PIPE_CRC_LINE_LEN) | ||
3630 | return -EINVAL; | ||
3631 | |||
3632 | if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) | ||
3633 | return 0; | ||
3634 | |||
3635 | /* nothing to read */ | ||
3636 | spin_lock_irq(&pipe_crc->lock); | ||
3637 | while (pipe_crc_data_count(pipe_crc) == 0) { | ||
3638 | int ret; | ||
3639 | |||
3640 | if (filep->f_flags & O_NONBLOCK) { | ||
3641 | spin_unlock_irq(&pipe_crc->lock); | ||
3642 | return -EAGAIN; | ||
3643 | } | ||
3644 | |||
3645 | ret = wait_event_interruptible_lock_irq(pipe_crc->wq, | ||
3646 | pipe_crc_data_count(pipe_crc), pipe_crc->lock); | ||
3647 | if (ret) { | ||
3648 | spin_unlock_irq(&pipe_crc->lock); | ||
3649 | return ret; | ||
3650 | } | ||
3651 | } | ||
3652 | |||
3653 | /* We now have one or more entries to read */ | ||
3654 | n_entries = count / PIPE_CRC_LINE_LEN; | ||
3655 | |||
3656 | bytes_read = 0; | ||
3657 | while (n_entries > 0) { | ||
3658 | struct intel_pipe_crc_entry *entry = | ||
3659 | &pipe_crc->entries[pipe_crc->tail]; | ||
3660 | |||
3661 | if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, | ||
3662 | INTEL_PIPE_CRC_ENTRIES_NR) < 1) | ||
3663 | break; | ||
3664 | |||
3665 | BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); | ||
3666 | pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | ||
3667 | |||
3668 | bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, | ||
3669 | "%8u %8x %8x %8x %8x %8x\n", | ||
3670 | entry->frame, entry->crc[0], | ||
3671 | entry->crc[1], entry->crc[2], | ||
3672 | entry->crc[3], entry->crc[4]); | ||
3673 | |||
3674 | spin_unlock_irq(&pipe_crc->lock); | ||
3675 | |||
3676 | if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) | ||
3677 | return -EFAULT; | ||
3678 | |||
3679 | user_buf += PIPE_CRC_LINE_LEN; | ||
3680 | n_entries--; | ||
3681 | |||
3682 | spin_lock_irq(&pipe_crc->lock); | ||
3683 | } | ||
3684 | |||
3685 | spin_unlock_irq(&pipe_crc->lock); | ||
3686 | |||
3687 | return bytes_read; | ||
3688 | } | ||
3689 | |||
3690 | static const struct file_operations i915_pipe_crc_fops = { | ||
3691 | .owner = THIS_MODULE, | ||
3692 | .open = i915_pipe_crc_open, | ||
3693 | .read = i915_pipe_crc_read, | ||
3694 | .release = i915_pipe_crc_release, | ||
3695 | }; | ||
3696 | |||
3697 | static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { | ||
3698 | { | ||
3699 | .name = "i915_pipe_A_crc", | ||
3700 | .pipe = PIPE_A, | ||
3701 | }, | ||
3702 | { | ||
3703 | .name = "i915_pipe_B_crc", | ||
3704 | .pipe = PIPE_B, | ||
3705 | }, | ||
3706 | { | ||
3707 | .name = "i915_pipe_C_crc", | ||
3708 | .pipe = PIPE_C, | ||
3709 | }, | ||
3710 | }; | ||
3711 | |||
3712 | static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, | ||
3713 | enum pipe pipe) | ||
3714 | { | ||
3715 | struct drm_i915_private *dev_priv = to_i915(minor->dev); | ||
3716 | struct dentry *ent; | ||
3717 | struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; | ||
3718 | |||
3719 | info->dev_priv = dev_priv; | ||
3720 | ent = debugfs_create_file(info->name, S_IRUGO, root, info, | ||
3721 | &i915_pipe_crc_fops); | ||
3722 | if (!ent) | ||
3723 | return -ENOMEM; | ||
3724 | |||
3725 | return drm_add_fake_info_node(minor, ent, info); | ||
3726 | } | ||
3727 | |||
3728 | static const char * const pipe_crc_sources[] = { | ||
3729 | "none", | ||
3730 | "plane1", | ||
3731 | "plane2", | ||
3732 | "pf", | ||
3733 | "pipe", | ||
3734 | "TV", | ||
3735 | "DP-B", | ||
3736 | "DP-C", | ||
3737 | "DP-D", | ||
3738 | "auto", | ||
3739 | }; | ||
3740 | |||
3741 | static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) | ||
3742 | { | ||
3743 | BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); | ||
3744 | return pipe_crc_sources[source]; | ||
3745 | } | ||
3746 | |||
3747 | static int display_crc_ctl_show(struct seq_file *m, void *data) | ||
3748 | { | ||
3749 | struct drm_i915_private *dev_priv = m->private; | ||
3750 | int i; | ||
3751 | |||
3752 | for (i = 0; i < I915_MAX_PIPES; i++) | ||
3753 | seq_printf(m, "%c %s\n", pipe_name(i), | ||
3754 | pipe_crc_source_name(dev_priv->pipe_crc[i].source)); | ||
3755 | |||
3756 | return 0; | ||
3757 | } | ||
3758 | |||
3759 | static int display_crc_ctl_open(struct inode *inode, struct file *file) | ||
3760 | { | ||
3761 | return single_open(file, display_crc_ctl_show, inode->i_private); | ||
3762 | } | ||
3763 | |||
3764 | static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, | ||
3765 | uint32_t *val) | ||
3766 | { | ||
3767 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) | ||
3768 | *source = INTEL_PIPE_CRC_SOURCE_PIPE; | ||
3769 | |||
3770 | switch (*source) { | ||
3771 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
3772 | *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; | ||
3773 | break; | ||
3774 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
3775 | *val = 0; | ||
3776 | break; | ||
3777 | default: | ||
3778 | return -EINVAL; | ||
3779 | } | ||
3780 | |||
3781 | return 0; | ||
3782 | } | ||
3783 | |||
3784 | static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, | ||
3785 | enum pipe pipe, | ||
3786 | enum intel_pipe_crc_source *source) | ||
3787 | { | ||
3788 | struct drm_device *dev = &dev_priv->drm; | ||
3789 | struct intel_encoder *encoder; | ||
3790 | struct intel_crtc *crtc; | ||
3791 | struct intel_digital_port *dig_port; | ||
3792 | int ret = 0; | ||
3793 | |||
3794 | *source = INTEL_PIPE_CRC_SOURCE_PIPE; | ||
3795 | |||
3796 | drm_modeset_lock_all(dev); | ||
3797 | for_each_intel_encoder(dev, encoder) { | ||
3798 | if (!encoder->base.crtc) | ||
3799 | continue; | ||
3800 | |||
3801 | crtc = to_intel_crtc(encoder->base.crtc); | ||
3802 | |||
3803 | if (crtc->pipe != pipe) | ||
3804 | continue; | ||
3805 | |||
3806 | switch (encoder->type) { | ||
3807 | case INTEL_OUTPUT_TVOUT: | ||
3808 | *source = INTEL_PIPE_CRC_SOURCE_TV; | ||
3809 | break; | ||
3810 | case INTEL_OUTPUT_DP: | ||
3811 | case INTEL_OUTPUT_EDP: | ||
3812 | dig_port = enc_to_dig_port(&encoder->base); | ||
3813 | switch (dig_port->port) { | ||
3814 | case PORT_B: | ||
3815 | *source = INTEL_PIPE_CRC_SOURCE_DP_B; | ||
3816 | break; | ||
3817 | case PORT_C: | ||
3818 | *source = INTEL_PIPE_CRC_SOURCE_DP_C; | ||
3819 | break; | ||
3820 | case PORT_D: | ||
3821 | *source = INTEL_PIPE_CRC_SOURCE_DP_D; | ||
3822 | break; | ||
3823 | default: | ||
3824 | WARN(1, "nonexisting DP port %c\n", | ||
3825 | port_name(dig_port->port)); | ||
3826 | break; | ||
3827 | } | ||
3828 | break; | ||
3829 | default: | ||
3830 | break; | ||
3831 | } | ||
3832 | } | ||
3833 | drm_modeset_unlock_all(dev); | ||
3834 | |||
3835 | return ret; | ||
3836 | } | ||
3837 | |||
3838 | static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, | ||
3839 | enum pipe pipe, | ||
3840 | enum intel_pipe_crc_source *source, | ||
3841 | uint32_t *val) | ||
3842 | { | ||
3843 | bool need_stable_symbols = false; | ||
3844 | |||
3845 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { | ||
3846 | int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); | ||
3847 | if (ret) | ||
3848 | return ret; | ||
3849 | } | ||
3850 | |||
3851 | switch (*source) { | ||
3852 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
3853 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; | ||
3854 | break; | ||
3855 | case INTEL_PIPE_CRC_SOURCE_DP_B: | ||
3856 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; | ||
3857 | need_stable_symbols = true; | ||
3858 | break; | ||
3859 | case INTEL_PIPE_CRC_SOURCE_DP_C: | ||
3860 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; | ||
3861 | need_stable_symbols = true; | ||
3862 | break; | ||
3863 | case INTEL_PIPE_CRC_SOURCE_DP_D: | ||
3864 | if (!IS_CHERRYVIEW(dev_priv)) | ||
3865 | return -EINVAL; | ||
3866 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; | ||
3867 | need_stable_symbols = true; | ||
3868 | break; | ||
3869 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
3870 | *val = 0; | ||
3871 | break; | ||
3872 | default: | ||
3873 | return -EINVAL; | ||
3874 | } | ||
3875 | |||
3876 | /* | ||
3877 | * When the pipe CRC tap point is after the transcoders we need | ||
3878 | * to tweak symbol-level features to produce a deterministic series of | ||
3879 | * symbols for a given frame. We need to reset those features only once | ||
3880 | * a frame (instead of every nth symbol): | ||
3881 | * - DC-balance: used to ensure a better clock recovery from the data | ||
3882 | * link (SDVO) | ||
3883 | * - DisplayPort scrambling: used for EMI reduction | ||
3884 | */ | ||
3885 | if (need_stable_symbols) { | ||
3886 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
3887 | |||
3888 | tmp |= DC_BALANCE_RESET_VLV; | ||
3889 | switch (pipe) { | ||
3890 | case PIPE_A: | ||
3891 | tmp |= PIPE_A_SCRAMBLE_RESET; | ||
3892 | break; | ||
3893 | case PIPE_B: | ||
3894 | tmp |= PIPE_B_SCRAMBLE_RESET; | ||
3895 | break; | ||
3896 | case PIPE_C: | ||
3897 | tmp |= PIPE_C_SCRAMBLE_RESET; | ||
3898 | break; | ||
3899 | default: | ||
3900 | return -EINVAL; | ||
3901 | } | ||
3902 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
3903 | } | ||
3904 | |||
3905 | return 0; | ||
3906 | } | ||
3907 | |||
3908 | static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, | ||
3909 | enum pipe pipe, | ||
3910 | enum intel_pipe_crc_source *source, | ||
3911 | uint32_t *val) | ||
3912 | { | ||
3913 | bool need_stable_symbols = false; | ||
3914 | |||
3915 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { | ||
3916 | int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); | ||
3917 | if (ret) | ||
3918 | return ret; | ||
3919 | } | ||
3920 | |||
3921 | switch (*source) { | ||
3922 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
3923 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; | ||
3924 | break; | ||
3925 | case INTEL_PIPE_CRC_SOURCE_TV: | ||
3926 | if (!SUPPORTS_TV(dev_priv)) | ||
3927 | return -EINVAL; | ||
3928 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; | ||
3929 | break; | ||
3930 | case INTEL_PIPE_CRC_SOURCE_DP_B: | ||
3931 | if (!IS_G4X(dev_priv)) | ||
3932 | return -EINVAL; | ||
3933 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; | ||
3934 | need_stable_symbols = true; | ||
3935 | break; | ||
3936 | case INTEL_PIPE_CRC_SOURCE_DP_C: | ||
3937 | if (!IS_G4X(dev_priv)) | ||
3938 | return -EINVAL; | ||
3939 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; | ||
3940 | need_stable_symbols = true; | ||
3941 | break; | ||
3942 | case INTEL_PIPE_CRC_SOURCE_DP_D: | ||
3943 | if (!IS_G4X(dev_priv)) | ||
3944 | return -EINVAL; | ||
3945 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; | ||
3946 | need_stable_symbols = true; | ||
3947 | break; | ||
3948 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
3949 | *val = 0; | ||
3950 | break; | ||
3951 | default: | ||
3952 | return -EINVAL; | ||
3953 | } | ||
3954 | |||
3955 | /* | ||
3956 | * When the pipe CRC tap point is after the transcoders we need | ||
3957 | * to tweak symbol-level features to produce a deterministic series of | ||
3958 | * symbols for a given frame. We need to reset those features only once | ||
3959 | * a frame (instead of every nth symbol): | ||
3960 | * - DC-balance: used to ensure a better clock recovery from the data | ||
3961 | * link (SDVO) | ||
3962 | * - DisplayPort scrambling: used for EMI reduction | ||
3963 | */ | ||
3964 | if (need_stable_symbols) { | ||
3965 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
3966 | |||
3967 | WARN_ON(!IS_G4X(dev_priv)); | ||
3968 | |||
3969 | I915_WRITE(PORT_DFT_I9XX, | ||
3970 | I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); | ||
3971 | |||
3972 | if (pipe == PIPE_A) | ||
3973 | tmp |= PIPE_A_SCRAMBLE_RESET; | ||
3974 | else | ||
3975 | tmp |= PIPE_B_SCRAMBLE_RESET; | ||
3976 | |||
3977 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
3978 | } | ||
3979 | |||
3980 | return 0; | ||
3981 | } | ||
3982 | |||
3983 | static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, | ||
3984 | enum pipe pipe) | ||
3985 | { | ||
3986 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
3987 | |||
3988 | switch (pipe) { | ||
3989 | case PIPE_A: | ||
3990 | tmp &= ~PIPE_A_SCRAMBLE_RESET; | ||
3991 | break; | ||
3992 | case PIPE_B: | ||
3993 | tmp &= ~PIPE_B_SCRAMBLE_RESET; | ||
3994 | break; | ||
3995 | case PIPE_C: | ||
3996 | tmp &= ~PIPE_C_SCRAMBLE_RESET; | ||
3997 | break; | ||
3998 | default: | ||
3999 | return; | ||
4000 | } | ||
4001 | if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) | ||
4002 | tmp &= ~DC_BALANCE_RESET_VLV; | ||
4003 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
4004 | |||
4005 | } | ||
4006 | |||
4007 | static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, | ||
4008 | enum pipe pipe) | ||
4009 | { | ||
4010 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
4011 | |||
4012 | if (pipe == PIPE_A) | ||
4013 | tmp &= ~PIPE_A_SCRAMBLE_RESET; | ||
4014 | else | ||
4015 | tmp &= ~PIPE_B_SCRAMBLE_RESET; | ||
4016 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
4017 | |||
4018 | if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { | ||
4019 | I915_WRITE(PORT_DFT_I9XX, | ||
4020 | I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); | ||
4021 | } | ||
4022 | } | ||
4023 | |||
4024 | static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, | ||
4025 | uint32_t *val) | ||
4026 | { | ||
4027 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) | ||
4028 | *source = INTEL_PIPE_CRC_SOURCE_PIPE; | ||
4029 | |||
4030 | switch (*source) { | ||
4031 | case INTEL_PIPE_CRC_SOURCE_PLANE1: | ||
4032 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; | ||
4033 | break; | ||
4034 | case INTEL_PIPE_CRC_SOURCE_PLANE2: | ||
4035 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; | ||
4036 | break; | ||
4037 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
4038 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; | ||
4039 | break; | ||
4040 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
4041 | *val = 0; | ||
4042 | break; | ||
4043 | default: | ||
4044 | return -EINVAL; | ||
4045 | } | ||
4046 | |||
4047 | return 0; | ||
4048 | } | ||
4049 | |||
4050 | static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv, | ||
4051 | bool enable) | ||
4052 | { | ||
4053 | struct drm_device *dev = &dev_priv->drm; | ||
4054 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); | ||
4055 | struct intel_crtc_state *pipe_config; | ||
4056 | struct drm_atomic_state *state; | ||
4057 | int ret = 0; | ||
4058 | |||
4059 | drm_modeset_lock_all(dev); | ||
4060 | state = drm_atomic_state_alloc(dev); | ||
4061 | if (!state) { | ||
4062 | ret = -ENOMEM; | ||
4063 | goto out; | ||
4064 | } | ||
4065 | |||
4066 | state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); | ||
4067 | pipe_config = intel_atomic_get_crtc_state(state, crtc); | ||
4068 | if (IS_ERR(pipe_config)) { | ||
4069 | ret = PTR_ERR(pipe_config); | ||
4070 | goto out; | ||
4071 | } | ||
4072 | |||
4073 | pipe_config->pch_pfit.force_thru = enable; | ||
4074 | if (pipe_config->cpu_transcoder == TRANSCODER_EDP && | ||
4075 | pipe_config->pch_pfit.enabled != enable) | ||
4076 | pipe_config->base.connectors_changed = true; | ||
4077 | |||
4078 | ret = drm_atomic_commit(state); | ||
4079 | out: | ||
4080 | WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); | ||
4081 | drm_modeset_unlock_all(dev); | ||
4082 | drm_atomic_state_put(state); | ||
4083 | } | ||
4084 | |||
4085 | static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, | ||
4086 | enum pipe pipe, | ||
4087 | enum intel_pipe_crc_source *source, | ||
4088 | uint32_t *val) | ||
4089 | { | ||
4090 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) | ||
4091 | *source = INTEL_PIPE_CRC_SOURCE_PF; | ||
4092 | |||
4093 | switch (*source) { | ||
4094 | case INTEL_PIPE_CRC_SOURCE_PLANE1: | ||
4095 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; | ||
4096 | break; | ||
4097 | case INTEL_PIPE_CRC_SOURCE_PLANE2: | ||
4098 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; | ||
4099 | break; | ||
4100 | case INTEL_PIPE_CRC_SOURCE_PF: | ||
4101 | if (IS_HASWELL(dev_priv) && pipe == PIPE_A) | ||
4102 | hsw_trans_edp_pipe_A_crc_wa(dev_priv, true); | ||
4103 | |||
4104 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; | ||
4105 | break; | ||
4106 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
4107 | *val = 0; | ||
4108 | break; | ||
4109 | default: | ||
4110 | return -EINVAL; | ||
4111 | } | ||
4112 | |||
4113 | return 0; | ||
4114 | } | ||
4115 | |||
4116 | static int pipe_crc_set_source(struct drm_i915_private *dev_priv, | ||
4117 | enum pipe pipe, | ||
4118 | enum intel_pipe_crc_source source) | ||
4119 | { | ||
4120 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | ||
4121 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); | ||
4122 | enum intel_display_power_domain power_domain; | ||
4123 | u32 val = 0; /* shut up gcc */ | ||
4124 | int ret; | ||
4125 | |||
4126 | if (pipe_crc->source == source) | ||
4127 | return 0; | ||
4128 | |||
4129 | /* forbid changing the source without going back to 'none' */ | ||
4130 | if (pipe_crc->source && source) | ||
4131 | return -EINVAL; | ||
4132 | |||
4133 | power_domain = POWER_DOMAIN_PIPE(pipe); | ||
4134 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { | ||
4135 | DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); | ||
4136 | return -EIO; | ||
4137 | } | ||
4138 | |||
4139 | if (IS_GEN2(dev_priv)) | ||
4140 | ret = i8xx_pipe_crc_ctl_reg(&source, &val); | ||
4141 | else if (INTEL_GEN(dev_priv) < 5) | ||
4142 | ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); | ||
4143 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
4144 | ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); | ||
4145 | else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) | ||
4146 | ret = ilk_pipe_crc_ctl_reg(&source, &val); | ||
4147 | else | ||
4148 | ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); | ||
4149 | |||
4150 | if (ret != 0) | ||
4151 | goto out; | ||
4152 | |||
4153 | /* none -> real source transition */ | ||
4154 | if (source) { | ||
4155 | struct intel_pipe_crc_entry *entries; | ||
4156 | |||
4157 | DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", | ||
4158 | pipe_name(pipe), pipe_crc_source_name(source)); | ||
4159 | |||
4160 | entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, | ||
4161 | sizeof(pipe_crc->entries[0]), | ||
4162 | GFP_KERNEL); | ||
4163 | if (!entries) { | ||
4164 | ret = -ENOMEM; | ||
4165 | goto out; | ||
4166 | } | ||
4167 | |||
4168 | /* | ||
4169 | * When IPS gets enabled, the pipe CRC changes. Since IPS gets | ||
4170 | * enabled and disabled dynamically based on package C states, | ||
4171 | * user space can't make reliable use of the CRCs, so let's just | ||
4172 | * completely disable it. | ||
4173 | */ | ||
4174 | hsw_disable_ips(crtc); | ||
4175 | |||
4176 | spin_lock_irq(&pipe_crc->lock); | ||
4177 | kfree(pipe_crc->entries); | ||
4178 | pipe_crc->entries = entries; | ||
4179 | pipe_crc->head = 0; | ||
4180 | pipe_crc->tail = 0; | ||
4181 | spin_unlock_irq(&pipe_crc->lock); | ||
4182 | } | ||
4183 | |||
4184 | pipe_crc->source = source; | ||
4185 | |||
4186 | I915_WRITE(PIPE_CRC_CTL(pipe), val); | ||
4187 | POSTING_READ(PIPE_CRC_CTL(pipe)); | ||
4188 | |||
4189 | /* real source -> none transition */ | ||
4190 | if (source == INTEL_PIPE_CRC_SOURCE_NONE) { | ||
4191 | struct intel_pipe_crc_entry *entries; | ||
4192 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, | ||
4193 | pipe); | ||
4194 | |||
4195 | DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", | ||
4196 | pipe_name(pipe)); | ||
4197 | |||
4198 | drm_modeset_lock(&crtc->base.mutex, NULL); | ||
4199 | if (crtc->base.state->active) | ||
4200 | intel_wait_for_vblank(dev_priv, pipe); | ||
4201 | drm_modeset_unlock(&crtc->base.mutex); | ||
4202 | |||
4203 | spin_lock_irq(&pipe_crc->lock); | ||
4204 | entries = pipe_crc->entries; | ||
4205 | pipe_crc->entries = NULL; | ||
4206 | pipe_crc->head = 0; | ||
4207 | pipe_crc->tail = 0; | ||
4208 | spin_unlock_irq(&pipe_crc->lock); | ||
4209 | |||
4210 | kfree(entries); | ||
4211 | |||
4212 | if (IS_G4X(dev_priv)) | ||
4213 | g4x_undo_pipe_scramble_reset(dev_priv, pipe); | ||
4214 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
4215 | vlv_undo_pipe_scramble_reset(dev_priv, pipe); | ||
4216 | else if (IS_HASWELL(dev_priv) && pipe == PIPE_A) | ||
4217 | hsw_trans_edp_pipe_A_crc_wa(dev_priv, false); | ||
4218 | |||
4219 | hsw_enable_ips(crtc); | ||
4220 | } | ||
4221 | |||
4222 | ret = 0; | ||
4223 | |||
4224 | out: | ||
4225 | intel_display_power_put(dev_priv, power_domain); | ||
4226 | |||
4227 | return ret; | ||
4228 | } | ||
4229 | |||
4230 | /* | ||
4231 | * Parse pipe CRC command strings: | ||
4232 | * command: wsp* object wsp+ name wsp+ source wsp* | ||
4233 | * object: 'pipe' | ||
4234 | * name: (A | B | C) | ||
4235 | * source: (none | plane1 | plane2 | pf) | ||
4236 | * wsp: (#0x20 | #0x9 | #0xA)+ | ||
4237 | * | ||
4238 | * eg.: | ||
4239 | * "pipe A plane1" -> Start CRC computations on plane1 of pipe A | ||
4240 | * "pipe A none" -> Stop CRC | ||
4241 | */ | ||
4242 | static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) | ||
4243 | { | ||
4244 | int n_words = 0; | ||
4245 | |||
4246 | while (*buf) { | ||
4247 | char *end; | ||
4248 | |||
4249 | /* skip leading white space */ | ||
4250 | buf = skip_spaces(buf); | ||
4251 | if (!*buf) | ||
4252 | break; /* end of buffer */ | ||
4253 | |||
4254 | /* find end of word */ | ||
4255 | for (end = buf; *end && !isspace(*end); end++) | ||
4256 | ; | ||
4257 | |||
4258 | if (n_words == max_words) { | ||
4259 | DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", | ||
4260 | max_words); | ||
4261 | return -EINVAL; /* ran out of words[] before bytes */ | ||
4262 | } | ||
4263 | |||
4264 | if (*end) | ||
4265 | *end++ = '\0'; | ||
4266 | words[n_words++] = buf; | ||
4267 | buf = end; | ||
4268 | } | ||
4269 | |||
4270 | return n_words; | ||
4271 | } | ||
4272 | |||
4273 | enum intel_pipe_crc_object { | ||
4274 | PIPE_CRC_OBJECT_PIPE, | ||
4275 | }; | ||
4276 | |||
4277 | static const char * const pipe_crc_objects[] = { | ||
4278 | "pipe", | ||
4279 | }; | ||
4280 | |||
4281 | static int | ||
4282 | display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) | ||
4283 | { | ||
4284 | int i; | ||
4285 | |||
4286 | for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) | ||
4287 | if (!strcmp(buf, pipe_crc_objects[i])) { | ||
4288 | *o = i; | ||
4289 | return 0; | ||
4290 | } | ||
4291 | |||
4292 | return -EINVAL; | ||
4293 | } | ||
4294 | |||
4295 | static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) | ||
4296 | { | ||
4297 | const char name = buf[0]; | ||
4298 | |||
4299 | if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) | ||
4300 | return -EINVAL; | ||
4301 | |||
4302 | *pipe = name - 'A'; | ||
4303 | |||
4304 | return 0; | ||
4305 | } | ||
4306 | |||
4307 | static int | ||
4308 | display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) | ||
4309 | { | ||
4310 | int i; | ||
4311 | |||
4312 | for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) | ||
4313 | if (!strcmp(buf, pipe_crc_sources[i])) { | ||
4314 | *s = i; | ||
4315 | return 0; | ||
4316 | } | ||
4317 | |||
4318 | return -EINVAL; | ||
4319 | } | ||
4320 | |||
4321 | static int display_crc_ctl_parse(struct drm_i915_private *dev_priv, | ||
4322 | char *buf, size_t len) | ||
4323 | { | ||
4324 | #define N_WORDS 3 | ||
4325 | int n_words; | ||
4326 | char *words[N_WORDS]; | ||
4327 | enum pipe pipe; | ||
4328 | enum intel_pipe_crc_object object; | ||
4329 | enum intel_pipe_crc_source source; | ||
4330 | |||
4331 | n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); | ||
4332 | if (n_words != N_WORDS) { | ||
4333 | DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", | ||
4334 | N_WORDS); | ||
4335 | return -EINVAL; | ||
4336 | } | ||
4337 | |||
4338 | if (display_crc_ctl_parse_object(words[0], &object) < 0) { | ||
4339 | DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); | ||
4340 | return -EINVAL; | ||
4341 | } | ||
4342 | |||
4343 | if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { | ||
4344 | DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); | ||
4345 | return -EINVAL; | ||
4346 | } | ||
4347 | |||
4348 | if (display_crc_ctl_parse_source(words[2], &source) < 0) { | ||
4349 | DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); | ||
4350 | return -EINVAL; | ||
4351 | } | ||
4352 | |||
4353 | return pipe_crc_set_source(dev_priv, pipe, source); | ||
4354 | } | ||
4355 | |||
4356 | static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, | ||
4357 | size_t len, loff_t *offp) | ||
4358 | { | ||
4359 | struct seq_file *m = file->private_data; | ||
4360 | struct drm_i915_private *dev_priv = m->private; | ||
4361 | char *tmpbuf; | ||
4362 | int ret; | ||
4363 | |||
4364 | if (len == 0) | ||
4365 | return 0; | ||
4366 | |||
4367 | if (len > PAGE_SIZE - 1) { | ||
4368 | DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", | ||
4369 | PAGE_SIZE); | ||
4370 | return -E2BIG; | ||
4371 | } | ||
4372 | |||
4373 | tmpbuf = kmalloc(len + 1, GFP_KERNEL); | ||
4374 | if (!tmpbuf) | ||
4375 | return -ENOMEM; | ||
4376 | |||
4377 | if (copy_from_user(tmpbuf, ubuf, len)) { | ||
4378 | ret = -EFAULT; | ||
4379 | goto out; | ||
4380 | } | ||
4381 | tmpbuf[len] = '\0'; | ||
4382 | |||
4383 | ret = display_crc_ctl_parse(dev_priv, tmpbuf, len); | ||
4384 | |||
4385 | out: | ||
4386 | kfree(tmpbuf); | ||
4387 | if (ret < 0) | ||
4388 | return ret; | ||
4389 | |||
4390 | *offp += len; | ||
4391 | return len; | ||
4392 | } | ||
4393 | |||
4394 | static const struct file_operations i915_display_crc_ctl_fops = { | ||
4395 | .owner = THIS_MODULE, | ||
4396 | .open = display_crc_ctl_open, | ||
4397 | .read = seq_read, | ||
4398 | .llseek = seq_lseek, | ||
4399 | .release = single_release, | ||
4400 | .write = display_crc_ctl_write | ||
4401 | }; | ||
4402 | |||
4403 | static ssize_t i915_displayport_test_active_write(struct file *file, | 3569 | static ssize_t i915_displayport_test_active_write(struct file *file, |
4404 | const char __user *ubuf, | 3570 | const char __user *ubuf, |
4405 | size_t len, loff_t *offp) | 3571 | size_t len, loff_t *offp) |
@@ -4447,9 +3613,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file, | |||
4447 | * testing code, only accept an actual value of 1 here | 3613 | * testing code, only accept an actual value of 1 here |
4448 | */ | 3614 | */ |
4449 | if (val == 1) | 3615 | if (val == 1) |
4450 | intel_dp->compliance_test_active = 1; | 3616 | intel_dp->compliance.test_active = 1; |
4451 | else | 3617 | else |
4452 | intel_dp->compliance_test_active = 0; | 3618 | intel_dp->compliance.test_active = 0; |
4453 | } | 3619 | } |
4454 | } | 3620 | } |
4455 | out: | 3621 | out: |
@@ -4476,7 +3642,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data) | |||
4476 | if (connector->status == connector_status_connected && | 3642 | if (connector->status == connector_status_connected && |
4477 | connector->encoder != NULL) { | 3643 | connector->encoder != NULL) { |
4478 | intel_dp = enc_to_intel_dp(connector->encoder); | 3644 | intel_dp = enc_to_intel_dp(connector->encoder); |
4479 | if (intel_dp->compliance_test_active) | 3645 | if (intel_dp->compliance.test_active) |
4480 | seq_puts(m, "1"); | 3646 | seq_puts(m, "1"); |
4481 | else | 3647 | else |
4482 | seq_puts(m, "0"); | 3648 | seq_puts(m, "0"); |
@@ -4520,7 +3686,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data) | |||
4520 | if (connector->status == connector_status_connected && | 3686 | if (connector->status == connector_status_connected && |
4521 | connector->encoder != NULL) { | 3687 | connector->encoder != NULL) { |
4522 | intel_dp = enc_to_intel_dp(connector->encoder); | 3688 | intel_dp = enc_to_intel_dp(connector->encoder); |
4523 | seq_printf(m, "%lx", intel_dp->compliance_test_data); | 3689 | seq_printf(m, "%lx", intel_dp->compliance.test_data.edid); |
4524 | } else | 3690 | } else |
4525 | seq_puts(m, "0"); | 3691 | seq_puts(m, "0"); |
4526 | } | 3692 | } |
@@ -4559,7 +3725,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) | |||
4559 | if (connector->status == connector_status_connected && | 3725 | if (connector->status == connector_status_connected && |
4560 | connector->encoder != NULL) { | 3726 | connector->encoder != NULL) { |
4561 | intel_dp = enc_to_intel_dp(connector->encoder); | 3727 | intel_dp = enc_to_intel_dp(connector->encoder); |
4562 | seq_printf(m, "%02lx", intel_dp->compliance_test_type); | 3728 | seq_printf(m, "%02lx", intel_dp->compliance.test_type); |
4563 | } else | 3729 | } else |
4564 | seq_puts(m, "0"); | 3730 | seq_puts(m, "0"); |
4565 | } | 3731 | } |
@@ -4958,7 +4124,7 @@ unlock: | |||
4958 | 4124 | ||
4959 | if (val & DROP_FREED) { | 4125 | if (val & DROP_FREED) { |
4960 | synchronize_rcu(); | 4126 | synchronize_rcu(); |
4961 | flush_work(&dev_priv->mm.free_work); | 4127 | i915_gem_drain_freed_objects(dev_priv); |
4962 | } | 4128 | } |
4963 | 4129 | ||
4964 | return ret; | 4130 | return ret; |
@@ -5165,7 +4331,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, | |||
5165 | u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; | 4331 | u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; |
5166 | 4332 | ||
5167 | /* BXT has a single slice and at most 3 subslices. */ | 4333 | /* BXT has a single slice and at most 3 subslices. */ |
5168 | if (IS_BROXTON(dev_priv)) { | 4334 | if (IS_GEN9_LP(dev_priv)) { |
5169 | s_max = 1; | 4335 | s_max = 1; |
5170 | ss_max = 3; | 4336 | ss_max = 3; |
5171 | } | 4337 | } |
@@ -5199,7 +4365,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, | |||
5199 | for (ss = 0; ss < ss_max; ss++) { | 4365 | for (ss = 0; ss < ss_max; ss++) { |
5200 | unsigned int eu_cnt; | 4366 | unsigned int eu_cnt; |
5201 | 4367 | ||
5202 | if (IS_BROXTON(dev_priv)) { | 4368 | if (IS_GEN9_LP(dev_priv)) { |
5203 | if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) | 4369 | if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) |
5204 | /* skip disabled subslice */ | 4370 | /* skip disabled subslice */ |
5205 | continue; | 4371 | continue; |
@@ -5450,19 +4616,6 @@ static const struct i915_debugfs_files { | |||
5450 | {"i915_guc_log_control", &i915_guc_log_control_fops} | 4616 | {"i915_guc_log_control", &i915_guc_log_control_fops} |
5451 | }; | 4617 | }; |
5452 | 4618 | ||
5453 | void intel_display_crc_init(struct drm_i915_private *dev_priv) | ||
5454 | { | ||
5455 | enum pipe pipe; | ||
5456 | |||
5457 | for_each_pipe(dev_priv, pipe) { | ||
5458 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | ||
5459 | |||
5460 | pipe_crc->opened = false; | ||
5461 | spin_lock_init(&pipe_crc->lock); | ||
5462 | init_waitqueue_head(&pipe_crc->wq); | ||
5463 | } | ||
5464 | } | ||
5465 | |||
5466 | int i915_debugfs_register(struct drm_i915_private *dev_priv) | 4619 | int i915_debugfs_register(struct drm_i915_private *dev_priv) |
5467 | { | 4620 | { |
5468 | struct drm_minor *minor = dev_priv->drm.primary; | 4621 | struct drm_minor *minor = dev_priv->drm.primary; |
@@ -5472,11 +4625,9 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) | |||
5472 | if (ret) | 4625 | if (ret) |
5473 | return ret; | 4626 | return ret; |
5474 | 4627 | ||
5475 | for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { | 4628 | ret = intel_pipe_crc_create(minor); |
5476 | ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); | 4629 | if (ret) |
5477 | if (ret) | 4630 | return ret; |
5478 | return ret; | ||
5479 | } | ||
5480 | 4631 | ||
5481 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { | 4632 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { |
5482 | ret = i915_debugfs_create(minor->debugfs_root, minor, | 4633 | ret = i915_debugfs_create(minor->debugfs_root, minor, |
@@ -5502,12 +4653,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv) | |||
5502 | drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops, | 4653 | drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops, |
5503 | 1, minor); | 4654 | 1, minor); |
5504 | 4655 | ||
5505 | for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { | 4656 | intel_pipe_crc_cleanup(minor); |
5506 | struct drm_info_list *info_list = | ||
5507 | (struct drm_info_list *)&i915_pipe_crc_data[i]; | ||
5508 | |||
5509 | drm_debugfs_remove_files(info_list, 1, minor); | ||
5510 | } | ||
5511 | 4657 | ||
5512 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { | 4658 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { |
5513 | struct drm_info_list *info_list = | 4659 | struct drm_info_list *info_list = |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 445fec9c2841..4d22b4b479b8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -142,9 +142,8 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) | |||
142 | return ret; | 142 | return ret; |
143 | } | 143 | } |
144 | 144 | ||
145 | static void intel_detect_pch(struct drm_device *dev) | 145 | static void intel_detect_pch(struct drm_i915_private *dev_priv) |
146 | { | 146 | { |
147 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
148 | struct pci_dev *pch = NULL; | 147 | struct pci_dev *pch = NULL; |
149 | 148 | ||
150 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | 149 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting |
@@ -361,10 +360,8 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
361 | return 0; | 360 | return 0; |
362 | } | 361 | } |
363 | 362 | ||
364 | static int i915_get_bridge_dev(struct drm_device *dev) | 363 | static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) |
365 | { | 364 | { |
366 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
367 | |||
368 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | 365 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
369 | if (!dev_priv->bridge_dev) { | 366 | if (!dev_priv->bridge_dev) { |
370 | DRM_ERROR("bridge device not found\n"); | 367 | DRM_ERROR("bridge device not found\n"); |
@@ -375,9 +372,8 @@ static int i915_get_bridge_dev(struct drm_device *dev) | |||
375 | 372 | ||
376 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | 373 | /* Allocate space for the MCH regs if needed, return nonzero on error */ |
377 | static int | 374 | static int |
378 | intel_alloc_mchbar_resource(struct drm_device *dev) | 375 | intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) |
379 | { | 376 | { |
380 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
381 | int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; | 377 | int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
382 | u32 temp_lo, temp_hi = 0; | 378 | u32 temp_lo, temp_hi = 0; |
383 | u64 mchbar_addr; | 379 | u64 mchbar_addr; |
@@ -421,9 +417,8 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
421 | 417 | ||
422 | /* Setup MCHBAR if possible, return true if we should disable it again */ | 418 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
423 | static void | 419 | static void |
424 | intel_setup_mchbar(struct drm_device *dev) | 420 | intel_setup_mchbar(struct drm_i915_private *dev_priv) |
425 | { | 421 | { |
426 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
427 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; | 422 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
428 | u32 temp; | 423 | u32 temp; |
429 | bool enabled; | 424 | bool enabled; |
@@ -445,7 +440,7 @@ intel_setup_mchbar(struct drm_device *dev) | |||
445 | if (enabled) | 440 | if (enabled) |
446 | return; | 441 | return; |
447 | 442 | ||
448 | if (intel_alloc_mchbar_resource(dev)) | 443 | if (intel_alloc_mchbar_resource(dev_priv)) |
449 | return; | 444 | return; |
450 | 445 | ||
451 | dev_priv->mchbar_need_disable = true; | 446 | dev_priv->mchbar_need_disable = true; |
@@ -461,9 +456,8 @@ intel_setup_mchbar(struct drm_device *dev) | |||
461 | } | 456 | } |
462 | 457 | ||
463 | static void | 458 | static void |
464 | intel_teardown_mchbar(struct drm_device *dev) | 459 | intel_teardown_mchbar(struct drm_i915_private *dev_priv) |
465 | { | 460 | { |
466 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
467 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; | 461 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
468 | 462 | ||
469 | if (dev_priv->mchbar_need_disable) { | 463 | if (dev_priv->mchbar_need_disable) { |
@@ -493,9 +487,9 @@ intel_teardown_mchbar(struct drm_device *dev) | |||
493 | /* true = enable decode, false = disable decoder */ | 487 | /* true = enable decode, false = disable decoder */ |
494 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 488 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
495 | { | 489 | { |
496 | struct drm_device *dev = cookie; | 490 | struct drm_i915_private *dev_priv = cookie; |
497 | 491 | ||
498 | intel_modeset_vga_set_state(to_i915(dev), state); | 492 | intel_modeset_vga_set_state(dev_priv, state); |
499 | if (state) | 493 | if (state) |
500 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 494 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
501 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 495 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
@@ -503,6 +497,9 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) | |||
503 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 497 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
504 | } | 498 | } |
505 | 499 | ||
500 | static int i915_resume_switcheroo(struct drm_device *dev); | ||
501 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); | ||
502 | |||
506 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | 503 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
507 | { | 504 | { |
508 | struct drm_device *dev = pci_get_drvdata(pdev); | 505 | struct drm_device *dev = pci_get_drvdata(pdev); |
@@ -544,12 +541,11 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |||
544 | static void i915_gem_fini(struct drm_i915_private *dev_priv) | 541 | static void i915_gem_fini(struct drm_i915_private *dev_priv) |
545 | { | 542 | { |
546 | mutex_lock(&dev_priv->drm.struct_mutex); | 543 | mutex_lock(&dev_priv->drm.struct_mutex); |
547 | i915_gem_cleanup_engines(&dev_priv->drm); | 544 | i915_gem_cleanup_engines(dev_priv); |
548 | i915_gem_context_fini(&dev_priv->drm); | 545 | i915_gem_context_fini(dev_priv); |
549 | mutex_unlock(&dev_priv->drm.struct_mutex); | 546 | mutex_unlock(&dev_priv->drm.struct_mutex); |
550 | 547 | ||
551 | rcu_barrier(); | 548 | i915_gem_drain_freed_objects(dev_priv); |
552 | flush_work(&dev_priv->mm.free_work); | ||
553 | 549 | ||
554 | WARN_ON(!list_empty(&dev_priv->context_list)); | 550 | WARN_ON(!list_empty(&dev_priv->context_list)); |
555 | } | 551 | } |
@@ -574,7 +570,7 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
574 | * then we do not take part in VGA arbitration and the | 570 | * then we do not take part in VGA arbitration and the |
575 | * vga_client_register() fails with -ENODEV. | 571 | * vga_client_register() fails with -ENODEV. |
576 | */ | 572 | */ |
577 | ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode); | 573 | ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); |
578 | if (ret && ret != -ENODEV) | 574 | if (ret && ret != -ENODEV) |
579 | goto out; | 575 | goto out; |
580 | 576 | ||
@@ -595,7 +591,7 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
595 | if (ret) | 591 | if (ret) |
596 | goto cleanup_csr; | 592 | goto cleanup_csr; |
597 | 593 | ||
598 | intel_setup_gmbus(dev); | 594 | intel_setup_gmbus(dev_priv); |
599 | 595 | ||
600 | /* Important: The output setup functions called by modeset_init need | 596 | /* Important: The output setup functions called by modeset_init need |
601 | * working irqs for e.g. gmbus and dp aux transfers. */ | 597 | * working irqs for e.g. gmbus and dp aux transfers. */ |
@@ -603,9 +599,9 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
603 | if (ret) | 599 | if (ret) |
604 | goto cleanup_irq; | 600 | goto cleanup_irq; |
605 | 601 | ||
606 | intel_guc_init(dev); | 602 | intel_guc_init(dev_priv); |
607 | 603 | ||
608 | ret = i915_gem_init(dev); | 604 | ret = i915_gem_init(dev_priv); |
609 | if (ret) | 605 | if (ret) |
610 | goto cleanup_irq; | 606 | goto cleanup_irq; |
611 | 607 | ||
@@ -626,13 +622,13 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
626 | return 0; | 622 | return 0; |
627 | 623 | ||
628 | cleanup_gem: | 624 | cleanup_gem: |
629 | if (i915_gem_suspend(dev)) | 625 | if (i915_gem_suspend(dev_priv)) |
630 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); | 626 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
631 | i915_gem_fini(dev_priv); | 627 | i915_gem_fini(dev_priv); |
632 | cleanup_irq: | 628 | cleanup_irq: |
633 | intel_guc_fini(dev); | 629 | intel_guc_fini(dev_priv); |
634 | drm_irq_uninstall(dev); | 630 | drm_irq_uninstall(dev); |
635 | intel_teardown_gmbus(dev); | 631 | intel_teardown_gmbus(dev_priv); |
636 | cleanup_csr: | 632 | cleanup_csr: |
637 | intel_csr_ucode_fini(dev_priv); | 633 | intel_csr_ucode_fini(dev_priv); |
638 | intel_power_domains_fini(dev_priv); | 634 | intel_power_domains_fini(dev_priv); |
@@ -643,7 +639,6 @@ out: | |||
643 | return ret; | 639 | return ret; |
644 | } | 640 | } |
645 | 641 | ||
646 | #if IS_ENABLED(CONFIG_FB) | ||
647 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | 642 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
648 | { | 643 | { |
649 | struct apertures_struct *ap; | 644 | struct apertures_struct *ap; |
@@ -668,12 +663,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |||
668 | 663 | ||
669 | return ret; | 664 | return ret; |
670 | } | 665 | } |
671 | #else | ||
672 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | ||
673 | { | ||
674 | return 0; | ||
675 | } | ||
676 | #endif | ||
677 | 666 | ||
678 | #if !defined(CONFIG_VGA_CONSOLE) | 667 | #if !defined(CONFIG_VGA_CONSOLE) |
679 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) | 668 | static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) |
@@ -811,12 +800,15 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
811 | spin_lock_init(&dev_priv->uncore.lock); | 800 | spin_lock_init(&dev_priv->uncore.lock); |
812 | spin_lock_init(&dev_priv->mm.object_stat_lock); | 801 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
813 | spin_lock_init(&dev_priv->mmio_flip_lock); | 802 | spin_lock_init(&dev_priv->mmio_flip_lock); |
803 | spin_lock_init(&dev_priv->wm.dsparb_lock); | ||
814 | mutex_init(&dev_priv->sb_lock); | 804 | mutex_init(&dev_priv->sb_lock); |
815 | mutex_init(&dev_priv->modeset_restore_lock); | 805 | mutex_init(&dev_priv->modeset_restore_lock); |
816 | mutex_init(&dev_priv->av_mutex); | 806 | mutex_init(&dev_priv->av_mutex); |
817 | mutex_init(&dev_priv->wm.wm_mutex); | 807 | mutex_init(&dev_priv->wm.wm_mutex); |
818 | mutex_init(&dev_priv->pps_mutex); | 808 | mutex_init(&dev_priv->pps_mutex); |
819 | 809 | ||
810 | intel_uc_init_early(dev_priv); | ||
811 | |||
820 | i915_memcpy_init_early(dev_priv); | 812 | i915_memcpy_init_early(dev_priv); |
821 | 813 | ||
822 | ret = i915_workqueues_init(dev_priv); | 814 | ret = i915_workqueues_init(dev_priv); |
@@ -828,9 +820,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
828 | goto err_workqueues; | 820 | goto err_workqueues; |
829 | 821 | ||
830 | /* This must be called before any calls to HAS_PCH_* */ | 822 | /* This must be called before any calls to HAS_PCH_* */ |
831 | intel_detect_pch(&dev_priv->drm); | 823 | intel_detect_pch(dev_priv); |
832 | 824 | ||
833 | intel_pm_setup(&dev_priv->drm); | 825 | intel_pm_setup(dev_priv); |
834 | intel_init_dpio(dev_priv); | 826 | intel_init_dpio(dev_priv); |
835 | intel_power_domains_init(dev_priv); | 827 | intel_power_domains_init(dev_priv); |
836 | intel_irq_init(dev_priv); | 828 | intel_irq_init(dev_priv); |
@@ -838,7 +830,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
838 | intel_init_display_hooks(dev_priv); | 830 | intel_init_display_hooks(dev_priv); |
839 | intel_init_clock_gating_hooks(dev_priv); | 831 | intel_init_clock_gating_hooks(dev_priv); |
840 | intel_init_audio_hooks(dev_priv); | 832 | intel_init_audio_hooks(dev_priv); |
841 | ret = i915_gem_load_init(&dev_priv->drm); | 833 | ret = i915_gem_load_init(dev_priv); |
842 | if (ret < 0) | 834 | if (ret < 0) |
843 | goto err_gvt; | 835 | goto err_gvt; |
844 | 836 | ||
@@ -848,6 +840,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
848 | 840 | ||
849 | intel_detect_preproduction_hw(dev_priv); | 841 | intel_detect_preproduction_hw(dev_priv); |
850 | 842 | ||
843 | i915_perf_init(dev_priv); | ||
844 | |||
851 | return 0; | 845 | return 0; |
852 | 846 | ||
853 | err_gvt: | 847 | err_gvt: |
@@ -863,13 +857,13 @@ err_workqueues: | |||
863 | */ | 857 | */ |
864 | static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) | 858 | static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) |
865 | { | 859 | { |
866 | i915_gem_load_cleanup(&dev_priv->drm); | 860 | i915_perf_fini(dev_priv); |
861 | i915_gem_load_cleanup(dev_priv); | ||
867 | i915_workqueues_cleanup(dev_priv); | 862 | i915_workqueues_cleanup(dev_priv); |
868 | } | 863 | } |
869 | 864 | ||
870 | static int i915_mmio_setup(struct drm_device *dev) | 865 | static int i915_mmio_setup(struct drm_i915_private *dev_priv) |
871 | { | 866 | { |
872 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
873 | struct pci_dev *pdev = dev_priv->drm.pdev; | 867 | struct pci_dev *pdev = dev_priv->drm.pdev; |
874 | int mmio_bar; | 868 | int mmio_bar; |
875 | int mmio_size; | 869 | int mmio_size; |
@@ -895,17 +889,16 @@ static int i915_mmio_setup(struct drm_device *dev) | |||
895 | } | 889 | } |
896 | 890 | ||
897 | /* Try to make sure MCHBAR is enabled before poking at it */ | 891 | /* Try to make sure MCHBAR is enabled before poking at it */ |
898 | intel_setup_mchbar(dev); | 892 | intel_setup_mchbar(dev_priv); |
899 | 893 | ||
900 | return 0; | 894 | return 0; |
901 | } | 895 | } |
902 | 896 | ||
903 | static void i915_mmio_cleanup(struct drm_device *dev) | 897 | static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) |
904 | { | 898 | { |
905 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
906 | struct pci_dev *pdev = dev_priv->drm.pdev; | 899 | struct pci_dev *pdev = dev_priv->drm.pdev; |
907 | 900 | ||
908 | intel_teardown_mchbar(dev); | 901 | intel_teardown_mchbar(dev_priv); |
909 | pci_iounmap(pdev, dev_priv->regs); | 902 | pci_iounmap(pdev, dev_priv->regs); |
910 | } | 903 | } |
911 | 904 | ||
@@ -920,16 +913,15 @@ static void i915_mmio_cleanup(struct drm_device *dev) | |||
920 | */ | 913 | */ |
921 | static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) | 914 | static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) |
922 | { | 915 | { |
923 | struct drm_device *dev = &dev_priv->drm; | ||
924 | int ret; | 916 | int ret; |
925 | 917 | ||
926 | if (i915_inject_load_failure()) | 918 | if (i915_inject_load_failure()) |
927 | return -ENODEV; | 919 | return -ENODEV; |
928 | 920 | ||
929 | if (i915_get_bridge_dev(dev)) | 921 | if (i915_get_bridge_dev(dev_priv)) |
930 | return -EIO; | 922 | return -EIO; |
931 | 923 | ||
932 | ret = i915_mmio_setup(dev); | 924 | ret = i915_mmio_setup(dev_priv); |
933 | if (ret < 0) | 925 | if (ret < 0) |
934 | goto put_bridge; | 926 | goto put_bridge; |
935 | 927 | ||
@@ -949,10 +941,8 @@ put_bridge: | |||
949 | */ | 941 | */ |
950 | static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) | 942 | static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) |
951 | { | 943 | { |
952 | struct drm_device *dev = &dev_priv->drm; | ||
953 | |||
954 | intel_uncore_fini(dev_priv); | 944 | intel_uncore_fini(dev_priv); |
955 | i915_mmio_cleanup(dev); | 945 | i915_mmio_cleanup(dev_priv); |
956 | pci_dev_put(dev_priv->bridge_dev); | 946 | pci_dev_put(dev_priv->bridge_dev); |
957 | } | 947 | } |
958 | 948 | ||
@@ -1043,7 +1033,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1043 | * behaviour if any general state is accessed within a page above 4GB, | 1033 | * behaviour if any general state is accessed within a page above 4GB, |
1044 | * which also needs to be handled carefully. | 1034 | * which also needs to be handled carefully. |
1045 | */ | 1035 | */ |
1046 | if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) { | 1036 | if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { |
1047 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 1037 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
1048 | 1038 | ||
1049 | if (ret) { | 1039 | if (ret) { |
@@ -1126,6 +1116,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) | |||
1126 | i915_debugfs_register(dev_priv); | 1116 | i915_debugfs_register(dev_priv); |
1127 | i915_guc_register(dev_priv); | 1117 | i915_guc_register(dev_priv); |
1128 | i915_setup_sysfs(dev_priv); | 1118 | i915_setup_sysfs(dev_priv); |
1119 | |||
1120 | /* Depends on sysfs having been initialized */ | ||
1121 | i915_perf_register(dev_priv); | ||
1129 | } else | 1122 | } else |
1130 | DRM_ERROR("Failed to register driver for userspace access!\n"); | 1123 | DRM_ERROR("Failed to register driver for userspace access!\n"); |
1131 | 1124 | ||
@@ -1162,6 +1155,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) | |||
1162 | acpi_video_unregister(); | 1155 | acpi_video_unregister(); |
1163 | intel_opregion_unregister(dev_priv); | 1156 | intel_opregion_unregister(dev_priv); |
1164 | 1157 | ||
1158 | i915_perf_unregister(dev_priv); | ||
1159 | |||
1165 | i915_teardown_sysfs(dev_priv); | 1160 | i915_teardown_sysfs(dev_priv); |
1166 | i915_guc_unregister(dev_priv); | 1161 | i915_guc_unregister(dev_priv); |
1167 | i915_debugfs_unregister(dev_priv); | 1162 | i915_debugfs_unregister(dev_priv); |
@@ -1194,8 +1189,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1194 | if (dev_priv) | 1189 | if (dev_priv) |
1195 | ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); | 1190 | ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); |
1196 | if (ret) { | 1191 | if (ret) { |
1197 | dev_printk(KERN_ERR, &pdev->dev, | 1192 | DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); |
1198 | "[" DRM_NAME ":%s] allocation failed\n", __func__); | ||
1199 | kfree(dev_priv); | 1193 | kfree(dev_priv); |
1200 | return ret; | 1194 | return ret; |
1201 | } | 1195 | } |
@@ -1243,6 +1237,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1243 | 1237 | ||
1244 | intel_runtime_pm_enable(dev_priv); | 1238 | intel_runtime_pm_enable(dev_priv); |
1245 | 1239 | ||
1240 | dev_priv->ipc_enabled = false; | ||
1241 | |||
1246 | /* Everything is in place, we can now relax! */ | 1242 | /* Everything is in place, we can now relax! */ |
1247 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", | 1243 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", |
1248 | driver.name, driver.major, driver.minor, driver.patchlevel, | 1244 | driver.name, driver.major, driver.minor, driver.patchlevel, |
@@ -1280,7 +1276,7 @@ void i915_driver_unload(struct drm_device *dev) | |||
1280 | 1276 | ||
1281 | intel_fbdev_fini(dev); | 1277 | intel_fbdev_fini(dev); |
1282 | 1278 | ||
1283 | if (i915_gem_suspend(dev)) | 1279 | if (i915_gem_suspend(dev_priv)) |
1284 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); | 1280 | DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
1285 | 1281 | ||
1286 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); | 1282 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
@@ -1312,12 +1308,12 @@ void i915_driver_unload(struct drm_device *dev) | |||
1312 | 1308 | ||
1313 | /* Free error state after interrupts are fully disabled. */ | 1309 | /* Free error state after interrupts are fully disabled. */ |
1314 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | 1310 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); |
1315 | i915_destroy_error_state(dev); | 1311 | i915_destroy_error_state(dev_priv); |
1316 | 1312 | ||
1317 | /* Flush any outstanding unpin_work. */ | 1313 | /* Flush any outstanding unpin_work. */ |
1318 | drain_workqueue(dev_priv->wq); | 1314 | drain_workqueue(dev_priv->wq); |
1319 | 1315 | ||
1320 | intel_guc_fini(dev); | 1316 | intel_guc_fini(dev_priv); |
1321 | i915_gem_fini(dev_priv); | 1317 | i915_gem_fini(dev_priv); |
1322 | intel_fbc_cleanup_cfb(dev_priv); | 1318 | intel_fbc_cleanup_cfb(dev_priv); |
1323 | 1319 | ||
@@ -1422,14 +1418,14 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
1422 | 1418 | ||
1423 | pci_save_state(pdev); | 1419 | pci_save_state(pdev); |
1424 | 1420 | ||
1425 | error = i915_gem_suspend(dev); | 1421 | error = i915_gem_suspend(dev_priv); |
1426 | if (error) { | 1422 | if (error) { |
1427 | dev_err(&pdev->dev, | 1423 | dev_err(&pdev->dev, |
1428 | "GEM idle failed, resume might fail\n"); | 1424 | "GEM idle failed, resume might fail\n"); |
1429 | goto out; | 1425 | goto out; |
1430 | } | 1426 | } |
1431 | 1427 | ||
1432 | intel_guc_suspend(dev); | 1428 | intel_guc_suspend(dev_priv); |
1433 | 1429 | ||
1434 | intel_display_suspend(dev); | 1430 | intel_display_suspend(dev); |
1435 | 1431 | ||
@@ -1444,7 +1440,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
1444 | 1440 | ||
1445 | i915_gem_suspend_gtt_mappings(dev_priv); | 1441 | i915_gem_suspend_gtt_mappings(dev_priv); |
1446 | 1442 | ||
1447 | i915_save_state(dev); | 1443 | i915_save_state(dev_priv); |
1448 | 1444 | ||
1449 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; | 1445 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
1450 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); | 1446 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); |
@@ -1527,7 +1523,7 @@ out: | |||
1527 | return ret; | 1523 | return ret; |
1528 | } | 1524 | } |
1529 | 1525 | ||
1530 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) | 1526 | static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
1531 | { | 1527 | { |
1532 | int error; | 1528 | int error; |
1533 | 1529 | ||
@@ -1565,33 +1561,36 @@ static int i915_drm_resume(struct drm_device *dev) | |||
1565 | 1561 | ||
1566 | intel_csr_ucode_resume(dev_priv); | 1562 | intel_csr_ucode_resume(dev_priv); |
1567 | 1563 | ||
1568 | i915_gem_resume(dev); | 1564 | i915_gem_resume(dev_priv); |
1569 | 1565 | ||
1570 | i915_restore_state(dev); | 1566 | i915_restore_state(dev_priv); |
1571 | intel_pps_unlock_regs_wa(dev_priv); | 1567 | intel_pps_unlock_regs_wa(dev_priv); |
1572 | intel_opregion_setup(dev_priv); | 1568 | intel_opregion_setup(dev_priv); |
1573 | 1569 | ||
1574 | intel_init_pch_refclk(dev); | 1570 | intel_init_pch_refclk(dev_priv); |
1575 | drm_mode_config_reset(dev); | ||
1576 | 1571 | ||
1577 | /* | 1572 | /* |
1578 | * Interrupts have to be enabled before any batches are run. If not the | 1573 | * Interrupts have to be enabled before any batches are run. If not the |
1579 | * GPU will hang. i915_gem_init_hw() will initiate batches to | 1574 | * GPU will hang. i915_gem_init_hw() will initiate batches to |
1580 | * update/restore the context. | 1575 | * update/restore the context. |
1581 | * | 1576 | * |
1577 | * drm_mode_config_reset() needs AUX interrupts. | ||
1578 | * | ||
1582 | * Modeset enabling in intel_modeset_init_hw() also needs working | 1579 | * Modeset enabling in intel_modeset_init_hw() also needs working |
1583 | * interrupts. | 1580 | * interrupts. |
1584 | */ | 1581 | */ |
1585 | intel_runtime_pm_enable_interrupts(dev_priv); | 1582 | intel_runtime_pm_enable_interrupts(dev_priv); |
1586 | 1583 | ||
1584 | drm_mode_config_reset(dev); | ||
1585 | |||
1587 | mutex_lock(&dev->struct_mutex); | 1586 | mutex_lock(&dev->struct_mutex); |
1588 | if (i915_gem_init_hw(dev)) { | 1587 | if (i915_gem_init_hw(dev_priv)) { |
1589 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); | 1588 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); |
1590 | i915_gem_set_wedged(dev_priv); | 1589 | i915_gem_set_wedged(dev_priv); |
1591 | } | 1590 | } |
1592 | mutex_unlock(&dev->struct_mutex); | 1591 | mutex_unlock(&dev->struct_mutex); |
1593 | 1592 | ||
1594 | intel_guc_resume(dev); | 1593 | intel_guc_resume(dev_priv); |
1595 | 1594 | ||
1596 | intel_modeset_init_hw(dev); | 1595 | intel_modeset_init_hw(dev); |
1597 | 1596 | ||
@@ -1715,7 +1714,7 @@ out: | |||
1715 | return ret; | 1714 | return ret; |
1716 | } | 1715 | } |
1717 | 1716 | ||
1718 | int i915_resume_switcheroo(struct drm_device *dev) | 1717 | static int i915_resume_switcheroo(struct drm_device *dev) |
1719 | { | 1718 | { |
1720 | int ret; | 1719 | int ret; |
1721 | 1720 | ||
@@ -1764,11 +1763,10 @@ static void enable_engines_irq(struct drm_i915_private *dev_priv) | |||
1764 | */ | 1763 | */ |
1765 | void i915_reset(struct drm_i915_private *dev_priv) | 1764 | void i915_reset(struct drm_i915_private *dev_priv) |
1766 | { | 1765 | { |
1767 | struct drm_device *dev = &dev_priv->drm; | ||
1768 | struct i915_gpu_error *error = &dev_priv->gpu_error; | 1766 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
1769 | int ret; | 1767 | int ret; |
1770 | 1768 | ||
1771 | lockdep_assert_held(&dev->struct_mutex); | 1769 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
1772 | 1770 | ||
1773 | if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags)) | 1771 | if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags)) |
1774 | return; | 1772 | return; |
@@ -1778,6 +1776,7 @@ void i915_reset(struct drm_i915_private *dev_priv) | |||
1778 | error->reset_count++; | 1776 | error->reset_count++; |
1779 | 1777 | ||
1780 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); | 1778 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); |
1779 | i915_gem_reset_prepare(dev_priv); | ||
1781 | 1780 | ||
1782 | disable_engines_irq(dev_priv); | 1781 | disable_engines_irq(dev_priv); |
1783 | ret = intel_gpu_reset(dev_priv, ALL_ENGINES); | 1782 | ret = intel_gpu_reset(dev_priv, ALL_ENGINES); |
@@ -1791,7 +1790,7 @@ void i915_reset(struct drm_i915_private *dev_priv) | |||
1791 | goto error; | 1790 | goto error; |
1792 | } | 1791 | } |
1793 | 1792 | ||
1794 | i915_gem_reset(dev_priv); | 1793 | i915_gem_reset_finish(dev_priv); |
1795 | intel_overlay_reset(dev_priv); | 1794 | intel_overlay_reset(dev_priv); |
1796 | 1795 | ||
1797 | /* Ok, now get things going again... */ | 1796 | /* Ok, now get things going again... */ |
@@ -1808,12 +1807,14 @@ void i915_reset(struct drm_i915_private *dev_priv) | |||
1808 | * was running at the time of the reset (i.e. we weren't VT | 1807 | * was running at the time of the reset (i.e. we weren't VT |
1809 | * switched away). | 1808 | * switched away). |
1810 | */ | 1809 | */ |
1811 | ret = i915_gem_init_hw(dev); | 1810 | ret = i915_gem_init_hw(dev_priv); |
1812 | if (ret) { | 1811 | if (ret) { |
1813 | DRM_ERROR("Failed hw init on reset %d\n", ret); | 1812 | DRM_ERROR("Failed hw init on reset %d\n", ret); |
1814 | goto error; | 1813 | goto error; |
1815 | } | 1814 | } |
1816 | 1815 | ||
1816 | i915_queue_hangcheck(dev_priv); | ||
1817 | |||
1817 | wakeup: | 1818 | wakeup: |
1818 | wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); | 1819 | wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); |
1819 | return; | 1820 | return; |
@@ -2320,7 +2321,7 @@ static int intel_runtime_suspend(struct device *kdev) | |||
2320 | */ | 2321 | */ |
2321 | i915_gem_runtime_suspend(dev_priv); | 2322 | i915_gem_runtime_suspend(dev_priv); |
2322 | 2323 | ||
2323 | intel_guc_suspend(dev); | 2324 | intel_guc_suspend(dev_priv); |
2324 | 2325 | ||
2325 | intel_runtime_pm_disable_interrupts(dev_priv); | 2326 | intel_runtime_pm_disable_interrupts(dev_priv); |
2326 | 2327 | ||
@@ -2405,10 +2406,10 @@ static int intel_runtime_resume(struct device *kdev) | |||
2405 | if (intel_uncore_unclaimed_mmio(dev_priv)) | 2406 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
2406 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); | 2407 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); |
2407 | 2408 | ||
2408 | intel_guc_resume(dev); | 2409 | intel_guc_resume(dev_priv); |
2409 | 2410 | ||
2410 | if (IS_GEN6(dev_priv)) | 2411 | if (IS_GEN6(dev_priv)) |
2411 | intel_init_pch_refclk(dev); | 2412 | intel_init_pch_refclk(dev_priv); |
2412 | 2413 | ||
2413 | if (IS_BROXTON(dev_priv)) { | 2414 | if (IS_BROXTON(dev_priv)) { |
2414 | bxt_disable_dc9(dev_priv); | 2415 | bxt_disable_dc9(dev_priv); |
@@ -2565,6 +2566,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { | |||
2565 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), | 2566 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), |
2566 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), | 2567 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), |
2567 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), | 2568 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), |
2569 | DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), | ||
2568 | }; | 2570 | }; |
2569 | 2571 | ||
2570 | static struct drm_driver driver = { | 2572 | static struct drm_driver driver = { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c3c1d32b65a3..52d01be956cc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -52,14 +52,16 @@ | |||
52 | 52 | ||
53 | #include "i915_params.h" | 53 | #include "i915_params.h" |
54 | #include "i915_reg.h" | 54 | #include "i915_reg.h" |
55 | #include "i915_utils.h" | ||
55 | 56 | ||
56 | #include "intel_bios.h" | 57 | #include "intel_bios.h" |
57 | #include "intel_dpll_mgr.h" | 58 | #include "intel_dpll_mgr.h" |
58 | #include "intel_guc.h" | 59 | #include "intel_uc.h" |
59 | #include "intel_lrc.h" | 60 | #include "intel_lrc.h" |
60 | #include "intel_ringbuffer.h" | 61 | #include "intel_ringbuffer.h" |
61 | 62 | ||
62 | #include "i915_gem.h" | 63 | #include "i915_gem.h" |
64 | #include "i915_gem_context.h" | ||
63 | #include "i915_gem_fence_reg.h" | 65 | #include "i915_gem_fence_reg.h" |
64 | #include "i915_gem_object.h" | 66 | #include "i915_gem_object.h" |
65 | #include "i915_gem_gtt.h" | 67 | #include "i915_gem_gtt.h" |
@@ -76,8 +78,8 @@ | |||
76 | 78 | ||
77 | #define DRIVER_NAME "i915" | 79 | #define DRIVER_NAME "i915" |
78 | #define DRIVER_DESC "Intel Graphics" | 80 | #define DRIVER_DESC "Intel Graphics" |
79 | #define DRIVER_DATE "20161121" | 81 | #define DRIVER_DATE "20170109" |
80 | #define DRIVER_TIMESTAMP 1479717903 | 82 | #define DRIVER_TIMESTAMP 1483953121 |
81 | 83 | ||
82 | #undef WARN_ON | 84 | #undef WARN_ON |
83 | /* Many gcc seem to no see through this and fall over :( */ | 85 | /* Many gcc seem to no see through this and fall over :( */ |
@@ -119,6 +121,90 @@ bool __i915_inject_load_failure(const char *func, int line); | |||
119 | #define i915_inject_load_failure() \ | 121 | #define i915_inject_load_failure() \ |
120 | __i915_inject_load_failure(__func__, __LINE__) | 122 | __i915_inject_load_failure(__func__, __LINE__) |
121 | 123 | ||
124 | typedef struct { | ||
125 | uint32_t val; | ||
126 | } uint_fixed_16_16_t; | ||
127 | |||
128 | #define FP_16_16_MAX ({ \ | ||
129 | uint_fixed_16_16_t fp; \ | ||
130 | fp.val = UINT_MAX; \ | ||
131 | fp; \ | ||
132 | }) | ||
133 | |||
134 | static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val) | ||
135 | { | ||
136 | uint_fixed_16_16_t fp; | ||
137 | |||
138 | WARN_ON(val >> 16); | ||
139 | |||
140 | fp.val = val << 16; | ||
141 | return fp; | ||
142 | } | ||
143 | |||
144 | static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp) | ||
145 | { | ||
146 | return DIV_ROUND_UP(fp.val, 1 << 16); | ||
147 | } | ||
148 | |||
149 | static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp) | ||
150 | { | ||
151 | return fp.val >> 16; | ||
152 | } | ||
153 | |||
154 | static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1, | ||
155 | uint_fixed_16_16_t min2) | ||
156 | { | ||
157 | uint_fixed_16_16_t min; | ||
158 | |||
159 | min.val = min(min1.val, min2.val); | ||
160 | return min; | ||
161 | } | ||
162 | |||
163 | static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1, | ||
164 | uint_fixed_16_16_t max2) | ||
165 | { | ||
166 | uint_fixed_16_16_t max; | ||
167 | |||
168 | max.val = max(max1.val, max2.val); | ||
169 | return max; | ||
170 | } | ||
171 | |||
172 | static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val, | ||
173 | uint32_t d) | ||
174 | { | ||
175 | uint_fixed_16_16_t fp, res; | ||
176 | |||
177 | fp = u32_to_fixed_16_16(val); | ||
178 | res.val = DIV_ROUND_UP(fp.val, d); | ||
179 | return res; | ||
180 | } | ||
181 | |||
182 | static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val, | ||
183 | uint32_t d) | ||
184 | { | ||
185 | uint_fixed_16_16_t res; | ||
186 | uint64_t interm_val; | ||
187 | |||
188 | interm_val = (uint64_t)val << 16; | ||
189 | interm_val = DIV_ROUND_UP_ULL(interm_val, d); | ||
190 | WARN_ON(interm_val >> 32); | ||
191 | res.val = (uint32_t) interm_val; | ||
192 | |||
193 | return res; | ||
194 | } | ||
195 | |||
196 | static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val, | ||
197 | uint_fixed_16_16_t mul) | ||
198 | { | ||
199 | uint64_t intermediate_val; | ||
200 | uint_fixed_16_16_t fp; | ||
201 | |||
202 | intermediate_val = (uint64_t) val * mul.val; | ||
203 | WARN_ON(intermediate_val >> 32); | ||
204 | fp.val = (uint32_t) intermediate_val; | ||
205 | return fp; | ||
206 | } | ||
207 | |||
122 | static inline const char *yesno(bool v) | 208 | static inline const char *yesno(bool v) |
123 | { | 209 | { |
124 | return v ? "yes" : "no"; | 210 | return v ? "yes" : "no"; |
@@ -180,21 +266,39 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder) | |||
180 | } | 266 | } |
181 | 267 | ||
182 | /* | 268 | /* |
269 | * Global legacy plane identifier. Valid only for primary/sprite | ||
270 | * planes on pre-g4x, and only for primary planes on g4x+. | ||
271 | */ | ||
272 | enum plane { | ||
273 | PLANE_A, | ||
274 | PLANE_B, | ||
275 | PLANE_C, | ||
276 | }; | ||
277 | #define plane_name(p) ((p) + 'A') | ||
278 | |||
279 | #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') | ||
280 | |||
281 | /* | ||
282 | * Per-pipe plane identifier. | ||
183 | * I915_MAX_PLANES in the enum below is the maximum (across all platforms) | 283 | * I915_MAX_PLANES in the enum below is the maximum (across all platforms) |
184 | * number of planes per CRTC. Not all platforms really have this many planes, | 284 | * number of planes per CRTC. Not all platforms really have this many planes, |
185 | * which means some arrays of size I915_MAX_PLANES may have unused entries | 285 | * which means some arrays of size I915_MAX_PLANES may have unused entries |
186 | * between the topmost sprite plane and the cursor plane. | 286 | * between the topmost sprite plane and the cursor plane. |
287 | * | ||
288 | * This is expected to be passed to various register macros | ||
289 | * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care. | ||
187 | */ | 290 | */ |
188 | enum plane { | 291 | enum plane_id { |
189 | PLANE_A = 0, | 292 | PLANE_PRIMARY, |
190 | PLANE_B, | 293 | PLANE_SPRITE0, |
191 | PLANE_C, | 294 | PLANE_SPRITE1, |
192 | PLANE_CURSOR, | 295 | PLANE_CURSOR, |
193 | I915_MAX_PLANES, | 296 | I915_MAX_PLANES, |
194 | }; | 297 | }; |
195 | #define plane_name(p) ((p) + 'A') | ||
196 | 298 | ||
197 | #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') | 299 | #define for_each_plane_id_on_crtc(__crtc, __p) \ |
300 | for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ | ||
301 | for_each_if ((__crtc)->plane_ids_mask & BIT(__p)) | ||
198 | 302 | ||
199 | enum port { | 303 | enum port { |
200 | PORT_NONE = -1, | 304 | PORT_NONE = -1, |
@@ -216,7 +320,8 @@ enum dpio_channel { | |||
216 | 320 | ||
217 | enum dpio_phy { | 321 | enum dpio_phy { |
218 | DPIO_PHY0, | 322 | DPIO_PHY0, |
219 | DPIO_PHY1 | 323 | DPIO_PHY1, |
324 | DPIO_PHY2, | ||
220 | }; | 325 | }; |
221 | 326 | ||
222 | enum intel_display_power_domain { | 327 | enum intel_display_power_domain { |
@@ -416,6 +521,15 @@ struct drm_i915_file_private { | |||
416 | } rps; | 521 | } rps; |
417 | 522 | ||
418 | unsigned int bsd_engine; | 523 | unsigned int bsd_engine; |
524 | |||
525 | /* Client can have a maximum of 3 contexts banned before | ||
526 | * it is denied of creating new contexts. As one context | ||
527 | * ban needs 4 consecutive hangs, and more if there is | ||
528 | * progress in between, this is a last resort stop gap measure | ||
529 | * to limit the badly behaving clients access to gpu. | ||
530 | */ | ||
531 | #define I915_MAX_CLIENT_CONTEXT_BANS 3 | ||
532 | int context_bans; | ||
419 | }; | 533 | }; |
420 | 534 | ||
421 | /* Used by dp and fdi links */ | 535 | /* Used by dp and fdi links */ |
@@ -659,32 +773,20 @@ struct intel_csr { | |||
659 | }; | 773 | }; |
660 | 774 | ||
661 | #define DEV_INFO_FOR_EACH_FLAG(func) \ | 775 | #define DEV_INFO_FOR_EACH_FLAG(func) \ |
662 | /* Keep is_* in chronological order */ \ | ||
663 | func(is_mobile); \ | 776 | func(is_mobile); \ |
664 | func(is_i85x); \ | 777 | func(is_lp); \ |
665 | func(is_i915g); \ | ||
666 | func(is_i945gm); \ | ||
667 | func(is_g33); \ | ||
668 | func(is_g4x); \ | ||
669 | func(is_pineview); \ | ||
670 | func(is_broadwater); \ | ||
671 | func(is_crestline); \ | ||
672 | func(is_ivybridge); \ | ||
673 | func(is_valleyview); \ | ||
674 | func(is_cherryview); \ | ||
675 | func(is_haswell); \ | ||
676 | func(is_broadwell); \ | ||
677 | func(is_skylake); \ | ||
678 | func(is_broxton); \ | ||
679 | func(is_kabylake); \ | ||
680 | func(is_alpha_support); \ | 778 | func(is_alpha_support); \ |
681 | /* Keep has_* in alphabetical order */ \ | 779 | /* Keep has_* in alphabetical order */ \ |
682 | func(has_64bit_reloc); \ | 780 | func(has_64bit_reloc); \ |
781 | func(has_aliasing_ppgtt); \ | ||
683 | func(has_csr); \ | 782 | func(has_csr); \ |
684 | func(has_ddi); \ | 783 | func(has_ddi); \ |
784 | func(has_decoupled_mmio); \ | ||
685 | func(has_dp_mst); \ | 785 | func(has_dp_mst); \ |
686 | func(has_fbc); \ | 786 | func(has_fbc); \ |
687 | func(has_fpga_dbg); \ | 787 | func(has_fpga_dbg); \ |
788 | func(has_full_ppgtt); \ | ||
789 | func(has_full_48bit_ppgtt); \ | ||
688 | func(has_gmbus_irq); \ | 790 | func(has_gmbus_irq); \ |
689 | func(has_gmch_display); \ | 791 | func(has_gmch_display); \ |
690 | func(has_guc); \ | 792 | func(has_guc); \ |
@@ -705,8 +807,7 @@ struct intel_csr { | |||
705 | func(cursor_needs_physical); \ | 807 | func(cursor_needs_physical); \ |
706 | func(hws_needs_physical); \ | 808 | func(hws_needs_physical); \ |
707 | func(overlay_needs_physical); \ | 809 | func(overlay_needs_physical); \ |
708 | func(supports_tv); \ | 810 | func(supports_tv); |
709 | func(has_decoupled_mmio) | ||
710 | 811 | ||
711 | struct sseu_dev_info { | 812 | struct sseu_dev_info { |
712 | u8 slice_mask; | 813 | u8 slice_mask; |
@@ -726,13 +827,45 @@ static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) | |||
726 | return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); | 827 | return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); |
727 | } | 828 | } |
728 | 829 | ||
830 | /* Keep in gen based order, and chronological order within a gen */ | ||
831 | enum intel_platform { | ||
832 | INTEL_PLATFORM_UNINITIALIZED = 0, | ||
833 | INTEL_I830, | ||
834 | INTEL_I845G, | ||
835 | INTEL_I85X, | ||
836 | INTEL_I865G, | ||
837 | INTEL_I915G, | ||
838 | INTEL_I915GM, | ||
839 | INTEL_I945G, | ||
840 | INTEL_I945GM, | ||
841 | INTEL_G33, | ||
842 | INTEL_PINEVIEW, | ||
843 | INTEL_I965G, | ||
844 | INTEL_I965GM, | ||
845 | INTEL_G45, | ||
846 | INTEL_GM45, | ||
847 | INTEL_IRONLAKE, | ||
848 | INTEL_SANDYBRIDGE, | ||
849 | INTEL_IVYBRIDGE, | ||
850 | INTEL_VALLEYVIEW, | ||
851 | INTEL_HASWELL, | ||
852 | INTEL_BROADWELL, | ||
853 | INTEL_CHERRYVIEW, | ||
854 | INTEL_SKYLAKE, | ||
855 | INTEL_BROXTON, | ||
856 | INTEL_KABYLAKE, | ||
857 | INTEL_GEMINILAKE, | ||
858 | }; | ||
859 | |||
729 | struct intel_device_info { | 860 | struct intel_device_info { |
730 | u32 display_mmio_offset; | 861 | u32 display_mmio_offset; |
731 | u16 device_id; | 862 | u16 device_id; |
732 | u8 num_pipes; | 863 | u8 num_pipes; |
733 | u8 num_sprites[I915_MAX_PIPES]; | 864 | u8 num_sprites[I915_MAX_PIPES]; |
865 | u8 num_scalers[I915_MAX_PIPES]; | ||
734 | u8 gen; | 866 | u8 gen; |
735 | u16 gen_mask; | 867 | u16 gen_mask; |
868 | enum intel_platform platform; | ||
736 | u8 ring_mask; /* Rings supported by the HW */ | 869 | u8 ring_mask; /* Rings supported by the HW */ |
737 | u8 num_rings; | 870 | u8 num_rings; |
738 | #define DEFINE_FLAG(name) u8 name:1 | 871 | #define DEFINE_FLAG(name) u8 name:1 |
@@ -800,7 +933,8 @@ struct drm_i915_error_state { | |||
800 | /* Software tracked state */ | 933 | /* Software tracked state */ |
801 | bool waiting; | 934 | bool waiting; |
802 | int num_waiters; | 935 | int num_waiters; |
803 | int hangcheck_score; | 936 | unsigned long hangcheck_timestamp; |
937 | bool hangcheck_stalled; | ||
804 | enum intel_engine_hangcheck_action hangcheck_action; | 938 | enum intel_engine_hangcheck_action hangcheck_action; |
805 | struct i915_address_space *vm; | 939 | struct i915_address_space *vm; |
806 | int num_requests; | 940 | int num_requests; |
@@ -849,6 +983,7 @@ struct drm_i915_error_state { | |||
849 | long jiffies; | 983 | long jiffies; |
850 | pid_t pid; | 984 | pid_t pid; |
851 | u32 context; | 985 | u32 context; |
986 | int ban_score; | ||
852 | u32 seqno; | 987 | u32 seqno; |
853 | u32 head; | 988 | u32 head; |
854 | u32 tail; | 989 | u32 tail; |
@@ -870,6 +1005,7 @@ struct drm_i915_error_state { | |||
870 | 1005 | ||
871 | pid_t pid; | 1006 | pid_t pid; |
872 | char comm[TASK_COMM_LEN]; | 1007 | char comm[TASK_COMM_LEN]; |
1008 | int context_bans; | ||
873 | } engine[I915_NUM_ENGINES]; | 1009 | } engine[I915_NUM_ENGINES]; |
874 | 1010 | ||
875 | struct drm_i915_error_buffer { | 1011 | struct drm_i915_error_buffer { |
@@ -901,86 +1037,7 @@ enum i915_cache_level { | |||
901 | I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ | 1037 | I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ |
902 | }; | 1038 | }; |
903 | 1039 | ||
904 | struct i915_ctx_hang_stats { | 1040 | #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ |
905 | /* This context had batch pending when hang was declared */ | ||
906 | unsigned batch_pending; | ||
907 | |||
908 | /* This context had batch active when hang was declared */ | ||
909 | unsigned batch_active; | ||
910 | |||
911 | /* Time when this context was last blamed for a GPU reset */ | ||
912 | unsigned long guilty_ts; | ||
913 | |||
914 | /* If the contexts causes a second GPU hang within this time, | ||
915 | * it is permanently banned from submitting any more work. | ||
916 | */ | ||
917 | unsigned long ban_period_seconds; | ||
918 | |||
919 | /* This context is banned to submit more work */ | ||
920 | bool banned; | ||
921 | }; | ||
922 | |||
923 | /* This must match up with the value previously used for execbuf2.rsvd1. */ | ||
924 | #define DEFAULT_CONTEXT_HANDLE 0 | ||
925 | |||
926 | /** | ||
927 | * struct i915_gem_context - as the name implies, represents a context. | ||
928 | * @ref: reference count. | ||
929 | * @user_handle: userspace tracking identity for this context. | ||
930 | * @remap_slice: l3 row remapping information. | ||
931 | * @flags: context specific flags: | ||
932 | * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. | ||
933 | * @file_priv: filp associated with this context (NULL for global default | ||
934 | * context). | ||
935 | * @hang_stats: information about the role of this context in possible GPU | ||
936 | * hangs. | ||
937 | * @ppgtt: virtual memory space used by this context. | ||
938 | * @legacy_hw_ctx: render context backing object and whether it is correctly | ||
939 | * initialized (legacy ring submission mechanism only). | ||
940 | * @link: link in the global list of contexts. | ||
941 | * | ||
942 | * Contexts are memory images used by the hardware to store copies of their | ||
943 | * internal state. | ||
944 | */ | ||
945 | struct i915_gem_context { | ||
946 | struct kref ref; | ||
947 | struct drm_i915_private *i915; | ||
948 | struct drm_i915_file_private *file_priv; | ||
949 | struct i915_hw_ppgtt *ppgtt; | ||
950 | struct pid *pid; | ||
951 | const char *name; | ||
952 | |||
953 | struct i915_ctx_hang_stats hang_stats; | ||
954 | |||
955 | unsigned long flags; | ||
956 | #define CONTEXT_NO_ZEROMAP BIT(0) | ||
957 | #define CONTEXT_NO_ERROR_CAPTURE BIT(1) | ||
958 | |||
959 | /* Unique identifier for this context, used by the hw for tracking */ | ||
960 | unsigned int hw_id; | ||
961 | u32 user_handle; | ||
962 | int priority; /* greater priorities are serviced first */ | ||
963 | |||
964 | u32 ggtt_alignment; | ||
965 | |||
966 | struct intel_context { | ||
967 | struct i915_vma *state; | ||
968 | struct intel_ring *ring; | ||
969 | uint32_t *lrc_reg_state; | ||
970 | u64 lrc_desc; | ||
971 | int pin_count; | ||
972 | bool initialised; | ||
973 | } engine[I915_NUM_ENGINES]; | ||
974 | u32 ring_size; | ||
975 | u32 desc_template; | ||
976 | struct atomic_notifier_head status_notifier; | ||
977 | bool execlists_force_single_submission; | ||
978 | |||
979 | struct list_head link; | ||
980 | |||
981 | u8 remap_slice; | ||
982 | bool closed:1; | ||
983 | }; | ||
984 | 1041 | ||
985 | enum fb_op_origin { | 1042 | enum fb_op_origin { |
986 | ORIGIN_GTT, | 1043 | ORIGIN_GTT, |
@@ -1059,7 +1116,7 @@ struct intel_fbc { | |||
1059 | const char *no_fbc_reason; | 1116 | const char *no_fbc_reason; |
1060 | }; | 1117 | }; |
1061 | 1118 | ||
1062 | /** | 1119 | /* |
1063 | * HIGH_RR is the highest eDP panel refresh rate read from EDID | 1120 | * HIGH_RR is the highest eDP panel refresh rate read from EDID |
1064 | * LOW_RR is the lowest eDP panel refresh rate found from EDID | 1121 | * LOW_RR is the lowest eDP panel refresh rate found from EDID |
1065 | * parsing for same resolution. | 1122 | * parsing for same resolution. |
@@ -1396,7 +1453,7 @@ struct i915_gem_mm { | |||
1396 | struct work_struct free_work; | 1453 | struct work_struct free_work; |
1397 | 1454 | ||
1398 | /** Usable portion of the GTT for GEM */ | 1455 | /** Usable portion of the GTT for GEM */ |
1399 | unsigned long stolen_base; /* limited to low memory (32-bit) */ | 1456 | phys_addr_t stolen_base; /* limited to low memory (32-bit) */ |
1400 | 1457 | ||
1401 | /** PPGTT used for aliasing the PPGTT with the GTT */ | 1458 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
1402 | struct i915_hw_ppgtt *aliasing_ppgtt; | 1459 | struct i915_hw_ppgtt *aliasing_ppgtt; |
@@ -1439,19 +1496,20 @@ struct drm_i915_error_state_buf { | |||
1439 | }; | 1496 | }; |
1440 | 1497 | ||
1441 | struct i915_error_state_file_priv { | 1498 | struct i915_error_state_file_priv { |
1442 | struct drm_device *dev; | 1499 | struct drm_i915_private *i915; |
1443 | struct drm_i915_error_state *error; | 1500 | struct drm_i915_error_state *error; |
1444 | }; | 1501 | }; |
1445 | 1502 | ||
1446 | #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ | 1503 | #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ |
1447 | #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ | 1504 | #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ |
1448 | 1505 | ||
1506 | #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ | ||
1507 | #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ | ||
1508 | |||
1449 | struct i915_gpu_error { | 1509 | struct i915_gpu_error { |
1450 | /* For hangcheck timer */ | 1510 | /* For hangcheck timer */ |
1451 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ | 1511 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
1452 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) | 1512 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
1453 | /* Hang gpu twice in this window and your context gets banned */ | ||
1454 | #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) | ||
1455 | 1513 | ||
1456 | struct delayed_work hangcheck_work; | 1514 | struct delayed_work hangcheck_work; |
1457 | 1515 | ||
@@ -1533,6 +1591,7 @@ struct ddi_vbt_port_info { | |||
1533 | uint8_t supports_dvi:1; | 1591 | uint8_t supports_dvi:1; |
1534 | uint8_t supports_hdmi:1; | 1592 | uint8_t supports_hdmi:1; |
1535 | uint8_t supports_dp:1; | 1593 | uint8_t supports_dp:1; |
1594 | uint8_t supports_edp:1; | ||
1536 | 1595 | ||
1537 | uint8_t alternate_aux_channel; | 1596 | uint8_t alternate_aux_channel; |
1538 | uint8_t alternate_ddc_pin; | 1597 | uint8_t alternate_ddc_pin; |
@@ -1592,6 +1651,7 @@ struct intel_vbt_data { | |||
1592 | bool present; | 1651 | bool present; |
1593 | bool active_low_pwm; | 1652 | bool active_low_pwm; |
1594 | u8 min_brightness; /* min_brightness/255 of max */ | 1653 | u8 min_brightness; /* min_brightness/255 of max */ |
1654 | u8 controller; /* brightness controller number */ | ||
1595 | enum intel_backlight_type type; | 1655 | enum intel_backlight_type type; |
1596 | } backlight; | 1656 | } backlight; |
1597 | 1657 | ||
@@ -1638,24 +1698,22 @@ struct ilk_wm_values { | |||
1638 | }; | 1698 | }; |
1639 | 1699 | ||
1640 | struct vlv_pipe_wm { | 1700 | struct vlv_pipe_wm { |
1641 | uint16_t primary; | 1701 | uint16_t plane[I915_MAX_PLANES]; |
1642 | uint16_t sprite[2]; | ||
1643 | uint8_t cursor; | ||
1644 | }; | 1702 | }; |
1645 | 1703 | ||
1646 | struct vlv_sr_wm { | 1704 | struct vlv_sr_wm { |
1647 | uint16_t plane; | 1705 | uint16_t plane; |
1648 | uint8_t cursor; | 1706 | uint16_t cursor; |
1707 | }; | ||
1708 | |||
1709 | struct vlv_wm_ddl_values { | ||
1710 | uint8_t plane[I915_MAX_PLANES]; | ||
1649 | }; | 1711 | }; |
1650 | 1712 | ||
1651 | struct vlv_wm_values { | 1713 | struct vlv_wm_values { |
1652 | struct vlv_pipe_wm pipe[3]; | 1714 | struct vlv_pipe_wm pipe[3]; |
1653 | struct vlv_sr_wm sr; | 1715 | struct vlv_sr_wm sr; |
1654 | struct { | 1716 | struct vlv_wm_ddl_values ddl[3]; |
1655 | uint8_t cursor; | ||
1656 | uint8_t sprite[2]; | ||
1657 | uint8_t primary; | ||
1658 | } ddl[3]; | ||
1659 | uint8_t level; | 1717 | uint8_t level; |
1660 | bool cxsr; | 1718 | bool cxsr; |
1661 | }; | 1719 | }; |
@@ -1796,6 +1854,201 @@ struct intel_wm_config { | |||
1796 | bool sprites_scaled; | 1854 | bool sprites_scaled; |
1797 | }; | 1855 | }; |
1798 | 1856 | ||
1857 | struct i915_oa_format { | ||
1858 | u32 format; | ||
1859 | int size; | ||
1860 | }; | ||
1861 | |||
1862 | struct i915_oa_reg { | ||
1863 | i915_reg_t addr; | ||
1864 | u32 value; | ||
1865 | }; | ||
1866 | |||
1867 | struct i915_perf_stream; | ||
1868 | |||
1869 | /** | ||
1870 | * struct i915_perf_stream_ops - the OPs to support a specific stream type | ||
1871 | */ | ||
1872 | struct i915_perf_stream_ops { | ||
1873 | /** | ||
1874 | * @enable: Enables the collection of HW samples, either in response to | ||
1875 | * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened | ||
1876 | * without `I915_PERF_FLAG_DISABLED`. | ||
1877 | */ | ||
1878 | void (*enable)(struct i915_perf_stream *stream); | ||
1879 | |||
1880 | /** | ||
1881 | * @disable: Disables the collection of HW samples, either in response | ||
1882 | * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying | ||
1883 | * the stream. | ||
1884 | */ | ||
1885 | void (*disable)(struct i915_perf_stream *stream); | ||
1886 | |||
1887 | /** | ||
1888 | * @poll_wait: Call poll_wait, passing a wait queue that will be woken | ||
1889 | * once there is something ready to read() for the stream | ||
1890 | */ | ||
1891 | void (*poll_wait)(struct i915_perf_stream *stream, | ||
1892 | struct file *file, | ||
1893 | poll_table *wait); | ||
1894 | |||
1895 | /** | ||
1896 | * @wait_unlocked: For handling a blocking read, wait until there is | ||
1897 | * something to ready to read() for the stream. E.g. wait on the same | ||
1898 | * wait queue that would be passed to poll_wait(). | ||
1899 | */ | ||
1900 | int (*wait_unlocked)(struct i915_perf_stream *stream); | ||
1901 | |||
1902 | /** | ||
1903 | * @read: Copy buffered metrics as records to userspace | ||
1904 | * **buf**: the userspace, destination buffer | ||
1905 | * **count**: the number of bytes to copy, requested by userspace | ||
1906 | * **offset**: zero at the start of the read, updated as the read | ||
1907 | * proceeds, it represents how many bytes have been copied so far and | ||
1908 | * the buffer offset for copying the next record. | ||
1909 | * | ||
1910 | * Copy as many buffered i915 perf samples and records for this stream | ||
1911 | * to userspace as will fit in the given buffer. | ||
1912 | * | ||
1913 | * Only write complete records; returning -%ENOSPC if there isn't room | ||
1914 | * for a complete record. | ||
1915 | * | ||
1916 | * Return any error condition that results in a short read such as | ||
1917 | * -%ENOSPC or -%EFAULT, even though these may be squashed before | ||
1918 | * returning to userspace. | ||
1919 | */ | ||
1920 | int (*read)(struct i915_perf_stream *stream, | ||
1921 | char __user *buf, | ||
1922 | size_t count, | ||
1923 | size_t *offset); | ||
1924 | |||
1925 | /** | ||
1926 | * @destroy: Cleanup any stream specific resources. | ||
1927 | * | ||
1928 | * The stream will always be disabled before this is called. | ||
1929 | */ | ||
1930 | void (*destroy)(struct i915_perf_stream *stream); | ||
1931 | }; | ||
1932 | |||
1933 | /** | ||
1934 | * struct i915_perf_stream - state for a single open stream FD | ||
1935 | */ | ||
1936 | struct i915_perf_stream { | ||
1937 | /** | ||
1938 | * @dev_priv: i915 drm device | ||
1939 | */ | ||
1940 | struct drm_i915_private *dev_priv; | ||
1941 | |||
1942 | /** | ||
1943 | * @link: Links the stream into ``&drm_i915_private->streams`` | ||
1944 | */ | ||
1945 | struct list_head link; | ||
1946 | |||
1947 | /** | ||
1948 | * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` | ||
1949 | * properties given when opening a stream, representing the contents | ||
1950 | * of a single sample as read() by userspace. | ||
1951 | */ | ||
1952 | u32 sample_flags; | ||
1953 | |||
1954 | /** | ||
1955 | * @sample_size: Considering the configured contents of a sample | ||
1956 | * combined with the required header size, this is the total size | ||
1957 | * of a single sample record. | ||
1958 | */ | ||
1959 | int sample_size; | ||
1960 | |||
1961 | /** | ||
1962 | * @ctx: %NULL if measuring system-wide across all contexts or a | ||
1963 | * specific context that is being monitored. | ||
1964 | */ | ||
1965 | struct i915_gem_context *ctx; | ||
1966 | |||
1967 | /** | ||
1968 | * @enabled: Whether the stream is currently enabled, considering | ||
1969 | * whether the stream was opened in a disabled state and based | ||
1970 | * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. | ||
1971 | */ | ||
1972 | bool enabled; | ||
1973 | |||
1974 | /** | ||
1975 | * @ops: The callbacks providing the implementation of this specific | ||
1976 | * type of configured stream. | ||
1977 | */ | ||
1978 | const struct i915_perf_stream_ops *ops; | ||
1979 | }; | ||
1980 | |||
1981 | /** | ||
1982 | * struct i915_oa_ops - Gen specific implementation of an OA unit stream | ||
1983 | */ | ||
1984 | struct i915_oa_ops { | ||
1985 | /** | ||
1986 | * @init_oa_buffer: Resets the head and tail pointers of the | ||
1987 | * circular buffer for periodic OA reports. | ||
1988 | * | ||
1989 | * Called when first opening a stream for OA metrics, but also may be | ||
1990 | * called in response to an OA buffer overflow or other error | ||
1991 | * condition. | ||
1992 | * | ||
1993 | * Note it may be necessary to clear the full OA buffer here as part of | ||
1994 | * maintaining the invariable that new reports must be written to | ||
1995 | * zeroed memory for us to be able to reliable detect if an expected | ||
1996 | * report has not yet landed in memory. (At least on Haswell the OA | ||
1997 | * buffer tail pointer is not synchronized with reports being visible | ||
1998 | * to the CPU) | ||
1999 | */ | ||
2000 | void (*init_oa_buffer)(struct drm_i915_private *dev_priv); | ||
2001 | |||
2002 | /** | ||
2003 | * @enable_metric_set: Applies any MUX configuration to set up the | ||
2004 | * Boolean and Custom (B/C) counters that are part of the counter | ||
2005 | * reports being sampled. May apply system constraints such as | ||
2006 | * disabling EU clock gating as required. | ||
2007 | */ | ||
2008 | int (*enable_metric_set)(struct drm_i915_private *dev_priv); | ||
2009 | |||
2010 | /** | ||
2011 | * @disable_metric_set: Remove system constraints associated with using | ||
2012 | * the OA unit. | ||
2013 | */ | ||
2014 | void (*disable_metric_set)(struct drm_i915_private *dev_priv); | ||
2015 | |||
2016 | /** | ||
2017 | * @oa_enable: Enable periodic sampling | ||
2018 | */ | ||
2019 | void (*oa_enable)(struct drm_i915_private *dev_priv); | ||
2020 | |||
2021 | /** | ||
2022 | * @oa_disable: Disable periodic sampling | ||
2023 | */ | ||
2024 | void (*oa_disable)(struct drm_i915_private *dev_priv); | ||
2025 | |||
2026 | /** | ||
2027 | * @read: Copy data from the circular OA buffer into a given userspace | ||
2028 | * buffer. | ||
2029 | */ | ||
2030 | int (*read)(struct i915_perf_stream *stream, | ||
2031 | char __user *buf, | ||
2032 | size_t count, | ||
2033 | size_t *offset); | ||
2034 | |||
2035 | /** | ||
2036 | * @oa_buffer_is_empty: Check if OA buffer empty (false positives OK) | ||
2037 | * | ||
2038 | * This is either called via fops or the poll check hrtimer (atomic | ||
2039 | * ctx) without any locks taken. | ||
2040 | * | ||
2041 | * It's safe to read OA config state here unlocked, assuming that this | ||
2042 | * is only called while the stream is enabled, while the global OA | ||
2043 | * configuration can't be modified. | ||
2044 | * | ||
2045 | * Efficiency is more important than avoiding some false positives | ||
2046 | * here, which will be handled gracefully - likely resulting in an | ||
2047 | * %EAGAIN error for userspace. | ||
2048 | */ | ||
2049 | bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv); | ||
2050 | }; | ||
2051 | |||
1799 | struct drm_i915_private { | 2052 | struct drm_i915_private { |
1800 | struct drm_device drm; | 2053 | struct drm_device drm; |
1801 | 2054 | ||
@@ -1899,7 +2152,14 @@ struct drm_i915_private { | |||
1899 | 2152 | ||
1900 | unsigned int fsb_freq, mem_freq, is_ddr3; | 2153 | unsigned int fsb_freq, mem_freq, is_ddr3; |
1901 | unsigned int skl_preferred_vco_freq; | 2154 | unsigned int skl_preferred_vco_freq; |
1902 | unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; | 2155 | unsigned int cdclk_freq, max_cdclk_freq; |
2156 | |||
2157 | /* | ||
2158 | * For reading holding any crtc lock is sufficient, | ||
2159 | * for writing must hold all of them. | ||
2160 | */ | ||
2161 | unsigned int atomic_cdclk_freq; | ||
2162 | |||
1903 | unsigned int max_dotclk_freq; | 2163 | unsigned int max_dotclk_freq; |
1904 | unsigned int rawclk_freq; | 2164 | unsigned int rawclk_freq; |
1905 | unsigned int hpll_freq; | 2165 | unsigned int hpll_freq; |
@@ -2047,6 +2307,9 @@ struct drm_i915_private { | |||
2047 | } sagv_status; | 2307 | } sagv_status; |
2048 | 2308 | ||
2049 | struct { | 2309 | struct { |
2310 | /* protects DSPARB registers on pre-g4x/vlv/chv */ | ||
2311 | spinlock_t dsparb_lock; | ||
2312 | |||
2050 | /* | 2313 | /* |
2051 | * Raw watermark latency values: | 2314 | * Raw watermark latency values: |
2052 | * in 0.1us units for WM0, | 2315 | * in 0.1us units for WM0, |
@@ -2091,6 +2354,54 @@ struct drm_i915_private { | |||
2091 | 2354 | ||
2092 | struct i915_runtime_pm pm; | 2355 | struct i915_runtime_pm pm; |
2093 | 2356 | ||
2357 | struct { | ||
2358 | bool initialized; | ||
2359 | |||
2360 | struct kobject *metrics_kobj; | ||
2361 | struct ctl_table_header *sysctl_header; | ||
2362 | |||
2363 | struct mutex lock; | ||
2364 | struct list_head streams; | ||
2365 | |||
2366 | spinlock_t hook_lock; | ||
2367 | |||
2368 | struct { | ||
2369 | struct i915_perf_stream *exclusive_stream; | ||
2370 | |||
2371 | u32 specific_ctx_id; | ||
2372 | |||
2373 | struct hrtimer poll_check_timer; | ||
2374 | wait_queue_head_t poll_wq; | ||
2375 | bool pollin; | ||
2376 | |||
2377 | bool periodic; | ||
2378 | int period_exponent; | ||
2379 | int timestamp_frequency; | ||
2380 | |||
2381 | int tail_margin; | ||
2382 | |||
2383 | int metrics_set; | ||
2384 | |||
2385 | const struct i915_oa_reg *mux_regs; | ||
2386 | int mux_regs_len; | ||
2387 | const struct i915_oa_reg *b_counter_regs; | ||
2388 | int b_counter_regs_len; | ||
2389 | |||
2390 | struct { | ||
2391 | struct i915_vma *vma; | ||
2392 | u8 *vaddr; | ||
2393 | int format; | ||
2394 | int format_size; | ||
2395 | } oa_buffer; | ||
2396 | |||
2397 | u32 gen7_latched_oastatus1; | ||
2398 | |||
2399 | struct i915_oa_ops ops; | ||
2400 | const struct i915_oa_format *oa_formats; | ||
2401 | int n_builtin_sets; | ||
2402 | } oa; | ||
2403 | } perf; | ||
2404 | |||
2094 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ | 2405 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ |
2095 | struct { | 2406 | struct { |
2096 | void (*resume)(struct drm_i915_private *); | 2407 | void (*resume)(struct drm_i915_private *); |
@@ -2133,6 +2444,8 @@ struct drm_i915_private { | |||
2133 | /* perform PHY state sanity checks? */ | 2444 | /* perform PHY state sanity checks? */ |
2134 | bool chv_phy_assert[2]; | 2445 | bool chv_phy_assert[2]; |
2135 | 2446 | ||
2447 | bool ipc_enabled; | ||
2448 | |||
2136 | /* Used to save the pipe-to-encoder mapping for audio */ | 2449 | /* Used to save the pipe-to-encoder mapping for audio */ |
2137 | struct intel_encoder *av_enc_map[I915_MAX_PIPES]; | 2450 | struct intel_encoder *av_enc_map[I915_MAX_PIPES]; |
2138 | 2451 | ||
@@ -2281,102 +2594,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) | |||
2281 | (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ | 2594 | (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ |
2282 | ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) | 2595 | ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) |
2283 | 2596 | ||
2284 | /* | ||
2285 | * A command that requires special handling by the command parser. | ||
2286 | */ | ||
2287 | struct drm_i915_cmd_descriptor { | ||
2288 | /* | ||
2289 | * Flags describing how the command parser processes the command. | ||
2290 | * | ||
2291 | * CMD_DESC_FIXED: The command has a fixed length if this is set, | ||
2292 | * a length mask if not set | ||
2293 | * CMD_DESC_SKIP: The command is allowed but does not follow the | ||
2294 | * standard length encoding for the opcode range in | ||
2295 | * which it falls | ||
2296 | * CMD_DESC_REJECT: The command is never allowed | ||
2297 | * CMD_DESC_REGISTER: The command should be checked against the | ||
2298 | * register whitelist for the appropriate ring | ||
2299 | * CMD_DESC_MASTER: The command is allowed if the submitting process | ||
2300 | * is the DRM master | ||
2301 | */ | ||
2302 | u32 flags; | ||
2303 | #define CMD_DESC_FIXED (1<<0) | ||
2304 | #define CMD_DESC_SKIP (1<<1) | ||
2305 | #define CMD_DESC_REJECT (1<<2) | ||
2306 | #define CMD_DESC_REGISTER (1<<3) | ||
2307 | #define CMD_DESC_BITMASK (1<<4) | ||
2308 | #define CMD_DESC_MASTER (1<<5) | ||
2309 | |||
2310 | /* | ||
2311 | * The command's unique identification bits and the bitmask to get them. | ||
2312 | * This isn't strictly the opcode field as defined in the spec and may | ||
2313 | * also include type, subtype, and/or subop fields. | ||
2314 | */ | ||
2315 | struct { | ||
2316 | u32 value; | ||
2317 | u32 mask; | ||
2318 | } cmd; | ||
2319 | |||
2320 | /* | ||
2321 | * The command's length. The command is either fixed length (i.e. does | ||
2322 | * not include a length field) or has a length field mask. The flag | ||
2323 | * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has | ||
2324 | * a length mask. All command entries in a command table must include | ||
2325 | * length information. | ||
2326 | */ | ||
2327 | union { | ||
2328 | u32 fixed; | ||
2329 | u32 mask; | ||
2330 | } length; | ||
2331 | |||
2332 | /* | ||
2333 | * Describes where to find a register address in the command to check | ||
2334 | * against the ring's register whitelist. Only valid if flags has the | ||
2335 | * CMD_DESC_REGISTER bit set. | ||
2336 | * | ||
2337 | * A non-zero step value implies that the command may access multiple | ||
2338 | * registers in sequence (e.g. LRI), in that case step gives the | ||
2339 | * distance in dwords between individual offset fields. | ||
2340 | */ | ||
2341 | struct { | ||
2342 | u32 offset; | ||
2343 | u32 mask; | ||
2344 | u32 step; | ||
2345 | } reg; | ||
2346 | |||
2347 | #define MAX_CMD_DESC_BITMASKS 3 | ||
2348 | /* | ||
2349 | * Describes command checks where a particular dword is masked and | ||
2350 | * compared against an expected value. If the command does not match | ||
2351 | * the expected value, the parser rejects it. Only valid if flags has | ||
2352 | * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero | ||
2353 | * are valid. | ||
2354 | * | ||
2355 | * If the check specifies a non-zero condition_mask then the parser | ||
2356 | * only performs the check when the bits specified by condition_mask | ||
2357 | * are non-zero. | ||
2358 | */ | ||
2359 | struct { | ||
2360 | u32 offset; | ||
2361 | u32 mask; | ||
2362 | u32 expected; | ||
2363 | u32 condition_offset; | ||
2364 | u32 condition_mask; | ||
2365 | } bits[MAX_CMD_DESC_BITMASKS]; | ||
2366 | }; | ||
2367 | |||
2368 | /* | ||
2369 | * A table of commands requiring special handling by the command parser. | ||
2370 | * | ||
2371 | * Each engine has an array of tables. Each table consists of an array of | ||
2372 | * command descriptors, which must be sorted with command opcodes in | ||
2373 | * ascending order. | ||
2374 | */ | ||
2375 | struct drm_i915_cmd_table { | ||
2376 | const struct drm_i915_cmd_descriptor *table; | ||
2377 | int count; | ||
2378 | }; | ||
2379 | |||
2380 | static inline const struct intel_device_info * | 2597 | static inline const struct intel_device_info * |
2381 | intel_info(const struct drm_i915_private *dev_priv) | 2598 | intel_info(const struct drm_i915_private *dev_priv) |
2382 | { | 2599 | { |
@@ -2418,34 +2635,36 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2418 | #define IS_REVID(p, since, until) \ | 2635 | #define IS_REVID(p, since, until) \ |
2419 | (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) | 2636 | (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) |
2420 | 2637 | ||
2421 | #define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) | 2638 | #define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830) |
2422 | #define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) | 2639 | #define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G) |
2423 | #define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x) | 2640 | #define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X) |
2424 | #define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) | 2641 | #define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G) |
2425 | #define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g) | 2642 | #define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G) |
2426 | #define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) | 2643 | #define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM) |
2427 | #define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) | 2644 | #define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G) |
2428 | #define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm) | 2645 | #define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) |
2429 | #define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater) | 2646 | #define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G) |
2430 | #define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline) | 2647 | #define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM) |
2431 | #define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) | 2648 | #define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45) |
2432 | #define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x) | 2649 | #define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45) |
2650 | #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) | ||
2433 | #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) | 2651 | #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) |
2434 | #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) | 2652 | #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) |
2435 | #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview) | 2653 | #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW) |
2436 | #define IS_G33(dev_priv) ((dev_priv)->info.is_g33) | 2654 | #define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33) |
2437 | #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) | 2655 | #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) |
2438 | #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge) | 2656 | #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE) |
2439 | #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ | 2657 | #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ |
2440 | INTEL_DEVID(dev_priv) == 0x0152 || \ | 2658 | INTEL_DEVID(dev_priv) == 0x0152 || \ |
2441 | INTEL_DEVID(dev_priv) == 0x015a) | 2659 | INTEL_DEVID(dev_priv) == 0x015a) |
2442 | #define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview) | 2660 | #define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW) |
2443 | #define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview) | 2661 | #define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW) |
2444 | #define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell) | 2662 | #define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL) |
2445 | #define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell) | 2663 | #define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL) |
2446 | #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake) | 2664 | #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE) |
2447 | #define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton) | 2665 | #define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON) |
2448 | #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake) | 2666 | #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE) |
2667 | #define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE) | ||
2449 | #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) | 2668 | #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) |
2450 | #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ | 2669 | #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ |
2451 | (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) | 2670 | (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) |
@@ -2502,6 +2721,7 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2502 | #define BXT_REVID_A0 0x0 | 2721 | #define BXT_REVID_A0 0x0 |
2503 | #define BXT_REVID_A1 0x1 | 2722 | #define BXT_REVID_A1 0x1 |
2504 | #define BXT_REVID_B0 0x3 | 2723 | #define BXT_REVID_B0 0x3 |
2724 | #define BXT_REVID_B_LAST 0x8 | ||
2505 | #define BXT_REVID_C0 0x9 | 2725 | #define BXT_REVID_C0 0x9 |
2506 | 2726 | ||
2507 | #define IS_BXT_REVID(dev_priv, since, until) \ | 2727 | #define IS_BXT_REVID(dev_priv, since, until) \ |
@@ -2531,6 +2751,9 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2531 | #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) | 2751 | #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) |
2532 | #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) | 2752 | #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) |
2533 | 2753 | ||
2754 | #define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp) | ||
2755 | #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) | ||
2756 | |||
2534 | #define ENGINE_MASK(id) BIT(id) | 2757 | #define ENGINE_MASK(id) BIT(id) |
2535 | #define RENDER_RING ENGINE_MASK(RCS) | 2758 | #define RENDER_RING ENGINE_MASK(RCS) |
2536 | #define BSD_RING ENGINE_MASK(VCS) | 2759 | #define BSD_RING ENGINE_MASK(VCS) |
@@ -2567,7 +2790,7 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2567 | ((dev_priv)->info.overlay_needs_physical) | 2790 | ((dev_priv)->info.overlay_needs_physical) |
2568 | 2791 | ||
2569 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ | 2792 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
2570 | #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv)) | 2793 | #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) |
2571 | 2794 | ||
2572 | /* WaRsDisableCoarsePowerGating:skl,bxt */ | 2795 | /* WaRsDisableCoarsePowerGating:skl,bxt */ |
2573 | #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ | 2796 | #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ |
@@ -2676,9 +2899,6 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) | |||
2676 | return false; | 2899 | return false; |
2677 | } | 2900 | } |
2678 | 2901 | ||
2679 | extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); | ||
2680 | extern int i915_resume_switcheroo(struct drm_device *dev); | ||
2681 | |||
2682 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, | 2902 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
2683 | int enable_ppgtt); | 2903 | int enable_ppgtt); |
2684 | 2904 | ||
@@ -2892,23 +3112,37 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
2892 | struct drm_file *file_priv); | 3112 | struct drm_file *file_priv); |
2893 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, | 3113 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
2894 | struct drm_file *file_priv); | 3114 | struct drm_file *file_priv); |
2895 | int i915_gem_load_init(struct drm_device *dev); | 3115 | int i915_gem_load_init(struct drm_i915_private *dev_priv); |
2896 | void i915_gem_load_cleanup(struct drm_device *dev); | 3116 | void i915_gem_load_cleanup(struct drm_i915_private *dev_priv); |
2897 | void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); | 3117 | void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); |
2898 | int i915_gem_freeze(struct drm_i915_private *dev_priv); | 3118 | int i915_gem_freeze(struct drm_i915_private *dev_priv); |
2899 | int i915_gem_freeze_late(struct drm_i915_private *dev_priv); | 3119 | int i915_gem_freeze_late(struct drm_i915_private *dev_priv); |
2900 | 3120 | ||
2901 | void *i915_gem_object_alloc(struct drm_device *dev); | 3121 | void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); |
2902 | void i915_gem_object_free(struct drm_i915_gem_object *obj); | 3122 | void i915_gem_object_free(struct drm_i915_gem_object *obj); |
2903 | void i915_gem_object_init(struct drm_i915_gem_object *obj, | 3123 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
2904 | const struct drm_i915_gem_object_ops *ops); | 3124 | const struct drm_i915_gem_object_ops *ops); |
2905 | struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, | 3125 | struct drm_i915_gem_object * |
2906 | u64 size); | 3126 | i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); |
2907 | struct drm_i915_gem_object *i915_gem_object_create_from_data( | 3127 | struct drm_i915_gem_object * |
2908 | struct drm_device *dev, const void *data, size_t size); | 3128 | i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, |
3129 | const void *data, size_t size); | ||
2909 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); | 3130 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); |
2910 | void i915_gem_free_object(struct drm_gem_object *obj); | 3131 | void i915_gem_free_object(struct drm_gem_object *obj); |
2911 | 3132 | ||
3133 | static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) | ||
3134 | { | ||
3135 | /* A single pass should suffice to release all the freed objects (along | ||
3136 | * most call paths) , but be a little more paranoid in that freeing | ||
3137 | * the objects does take a little amount of time, during which the rcu | ||
3138 | * callbacks could have added new objects into the freed list, and | ||
3139 | * armed the work again. | ||
3140 | */ | ||
3141 | do { | ||
3142 | rcu_barrier(); | ||
3143 | } while (flush_work(&i915->mm.free_work)); | ||
3144 | } | ||
3145 | |||
2912 | struct i915_vma * __must_check | 3146 | struct i915_vma * __must_check |
2913 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | 3147 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
2914 | const struct i915_ggtt_view *view, | 3148 | const struct i915_ggtt_view *view, |
@@ -2978,7 +3212,6 @@ __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | |||
2978 | GEM_BUG_ON(!obj->mm.pages); | 3212 | GEM_BUG_ON(!obj->mm.pages); |
2979 | 3213 | ||
2980 | atomic_dec(&obj->mm.pages_pin_count); | 3214 | atomic_dec(&obj->mm.pages_pin_count); |
2981 | GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); | ||
2982 | } | 3215 | } |
2983 | 3216 | ||
2984 | static inline void | 3217 | static inline void |
@@ -3003,8 +3236,8 @@ enum i915_map_type { | |||
3003 | 3236 | ||
3004 | /** | 3237 | /** |
3005 | * i915_gem_object_pin_map - return a contiguous mapping of the entire object | 3238 | * i915_gem_object_pin_map - return a contiguous mapping of the entire object |
3006 | * @obj - the object to map into kernel address space | 3239 | * @obj: the object to map into kernel address space |
3007 | * @type - the type of mapping, used to select pgprot_t | 3240 | * @type: the type of mapping, used to select pgprot_t |
3008 | * | 3241 | * |
3009 | * Calls i915_gem_object_pin_pages() to prevent reaping of the object's | 3242 | * Calls i915_gem_object_pin_pages() to prevent reaping of the object's |
3010 | * pages and then returns a contiguous mapping of the backing storage into | 3243 | * pages and then returns a contiguous mapping of the backing storage into |
@@ -3022,7 +3255,7 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, | |||
3022 | 3255 | ||
3023 | /** | 3256 | /** |
3024 | * i915_gem_object_unpin_map - releases an earlier mapping | 3257 | * i915_gem_object_unpin_map - releases an earlier mapping |
3025 | * @obj - the object to unmap | 3258 | * @obj: the object to unmap |
3026 | * | 3259 | * |
3027 | * After pinning the object and mapping its pages, once you are finished | 3260 | * After pinning the object and mapping its pages, once you are finished |
3028 | * with your access, call i915_gem_object_unpin_map() to release the pin | 3261 | * with your access, call i915_gem_object_unpin_map() to release the pin |
@@ -3090,17 +3323,18 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) | |||
3090 | return READ_ONCE(error->reset_count); | 3323 | return READ_ONCE(error->reset_count); |
3091 | } | 3324 | } |
3092 | 3325 | ||
3093 | void i915_gem_reset(struct drm_i915_private *dev_priv); | 3326 | void i915_gem_reset_prepare(struct drm_i915_private *dev_priv); |
3327 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv); | ||
3094 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv); | 3328 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv); |
3095 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); | 3329 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
3096 | int __must_check i915_gem_init(struct drm_device *dev); | 3330 | int __must_check i915_gem_init(struct drm_i915_private *dev_priv); |
3097 | int __must_check i915_gem_init_hw(struct drm_device *dev); | 3331 | int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); |
3098 | void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); | 3332 | void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); |
3099 | void i915_gem_cleanup_engines(struct drm_device *dev); | 3333 | void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); |
3100 | int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, | 3334 | int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, |
3101 | unsigned int flags); | 3335 | unsigned int flags); |
3102 | int __must_check i915_gem_suspend(struct drm_device *dev); | 3336 | int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); |
3103 | void i915_gem_resume(struct drm_device *dev); | 3337 | void i915_gem_resume(struct drm_i915_private *dev_priv); |
3104 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 3338 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
3105 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, | 3339 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, |
3106 | unsigned int flags, | 3340 | unsigned int flags, |
@@ -3174,6 +3408,7 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, | |||
3174 | int __must_check i915_vma_get_fence(struct i915_vma *vma); | 3408 | int __must_check i915_vma_get_fence(struct i915_vma *vma); |
3175 | int __must_check i915_vma_put_fence(struct i915_vma *vma); | 3409 | int __must_check i915_vma_put_fence(struct i915_vma *vma); |
3176 | 3410 | ||
3411 | void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); | ||
3177 | void i915_gem_restore_fences(struct drm_i915_private *dev_priv); | 3412 | void i915_gem_restore_fences(struct drm_i915_private *dev_priv); |
3178 | 3413 | ||
3179 | void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); | 3414 | void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); |
@@ -3182,23 +3417,6 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, | |||
3182 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, | 3417 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, |
3183 | struct sg_table *pages); | 3418 | struct sg_table *pages); |
3184 | 3419 | ||
3185 | /* i915_gem_context.c */ | ||
3186 | int __must_check i915_gem_context_init(struct drm_device *dev); | ||
3187 | void i915_gem_context_lost(struct drm_i915_private *dev_priv); | ||
3188 | void i915_gem_context_fini(struct drm_device *dev); | ||
3189 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); | ||
3190 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); | ||
3191 | int i915_switch_context(struct drm_i915_gem_request *req); | ||
3192 | int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); | ||
3193 | struct i915_vma * | ||
3194 | i915_gem_context_pin_legacy(struct i915_gem_context *ctx, | ||
3195 | unsigned int flags); | ||
3196 | void i915_gem_context_free(struct kref *ctx_ref); | ||
3197 | struct drm_i915_gem_object * | ||
3198 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); | ||
3199 | struct i915_gem_context * | ||
3200 | i915_gem_context_create_gvt(struct drm_device *dev); | ||
3201 | |||
3202 | static inline struct i915_gem_context * | 3420 | static inline struct i915_gem_context * |
3203 | i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) | 3421 | i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) |
3204 | { | 3422 | { |
@@ -3226,6 +3444,14 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx) | |||
3226 | kref_put(&ctx->ref, i915_gem_context_free); | 3444 | kref_put(&ctx->ref, i915_gem_context_free); |
3227 | } | 3445 | } |
3228 | 3446 | ||
3447 | static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx) | ||
3448 | { | ||
3449 | struct mutex *lock = &ctx->i915->drm.struct_mutex; | ||
3450 | |||
3451 | if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock)) | ||
3452 | mutex_unlock(lock); | ||
3453 | } | ||
3454 | |||
3229 | static inline struct intel_timeline * | 3455 | static inline struct intel_timeline * |
3230 | i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, | 3456 | i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, |
3231 | struct intel_engine_cs *engine) | 3457 | struct intel_engine_cs *engine) |
@@ -3236,21 +3462,8 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, | |||
3236 | return &vm->timeline.engine[engine->id]; | 3462 | return &vm->timeline.engine[engine->id]; |
3237 | } | 3463 | } |
3238 | 3464 | ||
3239 | static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) | 3465 | int i915_perf_open_ioctl(struct drm_device *dev, void *data, |
3240 | { | 3466 | struct drm_file *file); |
3241 | return c->user_handle == DEFAULT_CONTEXT_HANDLE; | ||
3242 | } | ||
3243 | |||
3244 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | ||
3245 | struct drm_file *file); | ||
3246 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
3247 | struct drm_file *file); | ||
3248 | int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, | ||
3249 | struct drm_file *file_priv); | ||
3250 | int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | ||
3251 | struct drm_file *file_priv); | ||
3252 | int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, | ||
3253 | struct drm_file *file); | ||
3254 | 3467 | ||
3255 | /* i915_gem_evict.c */ | 3468 | /* i915_gem_evict.c */ |
3256 | int __must_check i915_gem_evict_something(struct i915_address_space *vm, | 3469 | int __must_check i915_gem_evict_something(struct i915_address_space *vm, |
@@ -3258,7 +3471,8 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm, | |||
3258 | unsigned cache_level, | 3471 | unsigned cache_level, |
3259 | u64 start, u64 end, | 3472 | u64 start, u64 end, |
3260 | unsigned flags); | 3473 | unsigned flags); |
3261 | int __must_check i915_gem_evict_for_vma(struct i915_vma *target); | 3474 | int __must_check i915_gem_evict_for_vma(struct i915_vma *vma, |
3475 | unsigned int flags); | ||
3262 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | 3476 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
3263 | 3477 | ||
3264 | /* belongs in i915_gem_gtt.h */ | 3478 | /* belongs in i915_gem_gtt.h */ |
@@ -3282,9 +3496,9 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, | |||
3282 | int i915_gem_init_stolen(struct drm_i915_private *dev_priv); | 3496 | int i915_gem_init_stolen(struct drm_i915_private *dev_priv); |
3283 | void i915_gem_cleanup_stolen(struct drm_device *dev); | 3497 | void i915_gem_cleanup_stolen(struct drm_device *dev); |
3284 | struct drm_i915_gem_object * | 3498 | struct drm_i915_gem_object * |
3285 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size); | 3499 | i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size); |
3286 | struct drm_i915_gem_object * | 3500 | struct drm_i915_gem_object * |
3287 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | 3501 | i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, |
3288 | u32 stolen_offset, | 3502 | u32 stolen_offset, |
3289 | u32 gtt_offset, | 3503 | u32 gtt_offset, |
3290 | u32 size); | 3504 | u32 size); |
@@ -3352,7 +3566,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, | |||
3352 | void i915_error_state_get(struct drm_device *dev, | 3566 | void i915_error_state_get(struct drm_device *dev, |
3353 | struct i915_error_state_file_priv *error_priv); | 3567 | struct i915_error_state_file_priv *error_priv); |
3354 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv); | 3568 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv); |
3355 | void i915_destroy_error_state(struct drm_device *dev); | 3569 | void i915_destroy_error_state(struct drm_i915_private *dev_priv); |
3356 | 3570 | ||
3357 | #else | 3571 | #else |
3358 | 3572 | ||
@@ -3362,7 +3576,7 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, | |||
3362 | { | 3576 | { |
3363 | } | 3577 | } |
3364 | 3578 | ||
3365 | static inline void i915_destroy_error_state(struct drm_device *dev) | 3579 | static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv) |
3366 | { | 3580 | { |
3367 | } | 3581 | } |
3368 | 3582 | ||
@@ -3374,7 +3588,6 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); | |||
3374 | int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); | 3588 | int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); |
3375 | void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); | 3589 | void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); |
3376 | void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); | 3590 | void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); |
3377 | bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); | ||
3378 | int intel_engine_cmd_parser(struct intel_engine_cs *engine, | 3591 | int intel_engine_cmd_parser(struct intel_engine_cs *engine, |
3379 | struct drm_i915_gem_object *batch_obj, | 3592 | struct drm_i915_gem_object *batch_obj, |
3380 | struct drm_i915_gem_object *shadow_batch_obj, | 3593 | struct drm_i915_gem_object *shadow_batch_obj, |
@@ -3382,17 +3595,23 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, | |||
3382 | u32 batch_len, | 3595 | u32 batch_len, |
3383 | bool is_master); | 3596 | bool is_master); |
3384 | 3597 | ||
3598 | /* i915_perf.c */ | ||
3599 | extern void i915_perf_init(struct drm_i915_private *dev_priv); | ||
3600 | extern void i915_perf_fini(struct drm_i915_private *dev_priv); | ||
3601 | extern void i915_perf_register(struct drm_i915_private *dev_priv); | ||
3602 | extern void i915_perf_unregister(struct drm_i915_private *dev_priv); | ||
3603 | |||
3385 | /* i915_suspend.c */ | 3604 | /* i915_suspend.c */ |
3386 | extern int i915_save_state(struct drm_device *dev); | 3605 | extern int i915_save_state(struct drm_i915_private *dev_priv); |
3387 | extern int i915_restore_state(struct drm_device *dev); | 3606 | extern int i915_restore_state(struct drm_i915_private *dev_priv); |
3388 | 3607 | ||
3389 | /* i915_sysfs.c */ | 3608 | /* i915_sysfs.c */ |
3390 | void i915_setup_sysfs(struct drm_i915_private *dev_priv); | 3609 | void i915_setup_sysfs(struct drm_i915_private *dev_priv); |
3391 | void i915_teardown_sysfs(struct drm_i915_private *dev_priv); | 3610 | void i915_teardown_sysfs(struct drm_i915_private *dev_priv); |
3392 | 3611 | ||
3393 | /* intel_i2c.c */ | 3612 | /* intel_i2c.c */ |
3394 | extern int intel_setup_gmbus(struct drm_device *dev); | 3613 | extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); |
3395 | extern void intel_teardown_gmbus(struct drm_device *dev); | 3614 | extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); |
3396 | extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, | 3615 | extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, |
3397 | unsigned int pin); | 3616 | unsigned int pin); |
3398 | 3617 | ||
@@ -3404,7 +3623,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) | |||
3404 | { | 3623 | { |
3405 | return container_of(adapter, struct intel_gmbus, adapter)->force_bit; | 3624 | return container_of(adapter, struct intel_gmbus, adapter)->force_bit; |
3406 | } | 3625 | } |
3407 | extern void intel_i2c_reset(struct drm_device *dev); | 3626 | extern void intel_i2c_reset(struct drm_i915_private *dev_priv); |
3408 | 3627 | ||
3409 | /* intel_bios.c */ | 3628 | /* intel_bios.c */ |
3410 | int intel_bios_init(struct drm_i915_private *dev_priv); | 3629 | int intel_bios_init(struct drm_i915_private *dev_priv); |
@@ -3471,6 +3690,7 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) | |||
3471 | return (struct intel_device_info *)&dev_priv->info; | 3690 | return (struct intel_device_info *)&dev_priv->info; |
3472 | } | 3691 | } |
3473 | 3692 | ||
3693 | const char *intel_platform_name(enum intel_platform platform); | ||
3474 | void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); | 3694 | void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); |
3475 | void intel_device_info_dump(struct drm_i915_private *dev_priv); | 3695 | void intel_device_info_dump(struct drm_i915_private *dev_priv); |
3476 | 3696 | ||
@@ -3487,9 +3707,9 @@ extern void intel_display_resume(struct drm_device *dev); | |||
3487 | extern void i915_redisable_vga(struct drm_i915_private *dev_priv); | 3707 | extern void i915_redisable_vga(struct drm_i915_private *dev_priv); |
3488 | extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); | 3708 | extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); |
3489 | extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); | 3709 | extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); |
3490 | extern void intel_init_pch_refclk(struct drm_device *dev); | 3710 | extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); |
3491 | extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); | 3711 | extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); |
3492 | extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, | 3712 | extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, |
3493 | bool enable); | 3713 | bool enable); |
3494 | 3714 | ||
3495 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, | 3715 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
@@ -3534,7 +3754,7 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); | |||
3534 | void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | 3754 | void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
3535 | 3755 | ||
3536 | /* intel_dpio_phy.c */ | 3756 | /* intel_dpio_phy.c */ |
3537 | void bxt_port_to_phy_channel(enum port port, | 3757 | void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, |
3538 | enum dpio_phy *phy, enum dpio_channel *ch); | 3758 | enum dpio_phy *phy, enum dpio_channel *ch); |
3539 | void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, | 3759 | void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, |
3540 | enum port port, u32 margin, u32 scale, | 3760 | enum port port, u32 margin, u32 scale, |
@@ -3801,29 +4021,25 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req) | |||
3801 | void i915_memcpy_init_early(struct drm_i915_private *dev_priv); | 4021 | void i915_memcpy_init_early(struct drm_i915_private *dev_priv); |
3802 | bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); | 4022 | bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); |
3803 | 4023 | ||
4024 | /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment, | ||
4025 | * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot | ||
4026 | * perform the operation. To check beforehand, pass in the parameters to | ||
4027 | * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits, | ||
4028 | * you only need to pass in the minor offsets, page-aligned pointers are | ||
4029 | * always valid. | ||
4030 | * | ||
4031 | * For just checking for SSE4.1, in the foreknowledge that the future use | ||
4032 | * will be correctly aligned, just use i915_has_memcpy_from_wc(). | ||
4033 | */ | ||
4034 | #define i915_can_memcpy_from_wc(dst, src, len) \ | ||
4035 | i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0) | ||
4036 | |||
4037 | #define i915_has_memcpy_from_wc() \ | ||
4038 | i915_memcpy_from_wc(NULL, NULL, 0) | ||
4039 | |||
3804 | /* i915_mm.c */ | 4040 | /* i915_mm.c */ |
3805 | int remap_io_mapping(struct vm_area_struct *vma, | 4041 | int remap_io_mapping(struct vm_area_struct *vma, |
3806 | unsigned long addr, unsigned long pfn, unsigned long size, | 4042 | unsigned long addr, unsigned long pfn, unsigned long size, |
3807 | struct io_mapping *iomap); | 4043 | struct io_mapping *iomap); |
3808 | 4044 | ||
3809 | #define ptr_mask_bits(ptr) ({ \ | ||
3810 | unsigned long __v = (unsigned long)(ptr); \ | ||
3811 | (typeof(ptr))(__v & PAGE_MASK); \ | ||
3812 | }) | ||
3813 | |||
3814 | #define ptr_unpack_bits(ptr, bits) ({ \ | ||
3815 | unsigned long __v = (unsigned long)(ptr); \ | ||
3816 | (bits) = __v & ~PAGE_MASK; \ | ||
3817 | (typeof(ptr))(__v & PAGE_MASK); \ | ||
3818 | }) | ||
3819 | |||
3820 | #define ptr_pack_bits(ptr, bits) \ | ||
3821 | ((typeof(ptr))((unsigned long)(ptr) | (bits))) | ||
3822 | |||
3823 | #define fetch_and_zero(ptr) ({ \ | ||
3824 | typeof(*ptr) __T = *(ptr); \ | ||
3825 | *(ptr) = (typeof(*ptr))0; \ | ||
3826 | __T; \ | ||
3827 | }) | ||
3828 | |||
3829 | #endif | 4045 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3dd7fc662859..dc00d9ae6d92 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/reservation.h> | 38 | #include <linux/reservation.h> |
39 | #include <linux/shmem_fs.h> | 39 | #include <linux/shmem_fs.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/stop_machine.h> | ||
41 | #include <linux/swap.h> | 42 | #include <linux/swap.h> |
42 | #include <linux/pci.h> | 43 | #include <linux/pci.h> |
43 | #include <linux/dma-buf.h> | 44 | #include <linux/dma-buf.h> |
@@ -69,7 +70,8 @@ insert_mappable_node(struct i915_ggtt *ggtt, | |||
69 | { | 70 | { |
70 | memset(node, 0, sizeof(*node)); | 71 | memset(node, 0, sizeof(*node)); |
71 | return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, | 72 | return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, |
72 | size, 0, -1, | 73 | size, 0, |
74 | I915_COLOR_UNEVICTABLE, | ||
73 | 0, ggtt->mappable_end, | 75 | 0, ggtt->mappable_end, |
74 | DRM_MM_SEARCH_DEFAULT, | 76 | DRM_MM_SEARCH_DEFAULT, |
75 | DRM_MM_CREATE_DEFAULT); | 77 | DRM_MM_CREATE_DEFAULT); |
@@ -595,52 +597,25 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
595 | struct drm_i915_gem_pwrite *args, | 597 | struct drm_i915_gem_pwrite *args, |
596 | struct drm_file *file) | 598 | struct drm_file *file) |
597 | { | 599 | { |
598 | struct drm_device *dev = obj->base.dev; | ||
599 | void *vaddr = obj->phys_handle->vaddr + args->offset; | 600 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
600 | char __user *user_data = u64_to_user_ptr(args->data_ptr); | 601 | char __user *user_data = u64_to_user_ptr(args->data_ptr); |
601 | int ret; | ||
602 | 602 | ||
603 | /* We manually control the domain here and pretend that it | 603 | /* We manually control the domain here and pretend that it |
604 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. | 604 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
605 | */ | 605 | */ |
606 | lockdep_assert_held(&obj->base.dev->struct_mutex); | ||
607 | ret = i915_gem_object_wait(obj, | ||
608 | I915_WAIT_INTERRUPTIBLE | | ||
609 | I915_WAIT_LOCKED | | ||
610 | I915_WAIT_ALL, | ||
611 | MAX_SCHEDULE_TIMEOUT, | ||
612 | to_rps_client(file)); | ||
613 | if (ret) | ||
614 | return ret; | ||
615 | |||
616 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); | 606 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
617 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 607 | if (copy_from_user(vaddr, user_data, args->size)) |
618 | unsigned long unwritten; | 608 | return -EFAULT; |
619 | |||
620 | /* The physical object once assigned is fixed for the lifetime | ||
621 | * of the obj, so we can safely drop the lock and continue | ||
622 | * to access vaddr. | ||
623 | */ | ||
624 | mutex_unlock(&dev->struct_mutex); | ||
625 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
626 | mutex_lock(&dev->struct_mutex); | ||
627 | if (unwritten) { | ||
628 | ret = -EFAULT; | ||
629 | goto out; | ||
630 | } | ||
631 | } | ||
632 | 609 | ||
633 | drm_clflush_virt_range(vaddr, args->size); | 610 | drm_clflush_virt_range(vaddr, args->size); |
634 | i915_gem_chipset_flush(to_i915(dev)); | 611 | i915_gem_chipset_flush(to_i915(obj->base.dev)); |
635 | 612 | ||
636 | out: | ||
637 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 613 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); |
638 | return ret; | 614 | return 0; |
639 | } | 615 | } |
640 | 616 | ||
641 | void *i915_gem_object_alloc(struct drm_device *dev) | 617 | void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) |
642 | { | 618 | { |
643 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
644 | return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); | 619 | return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); |
645 | } | 620 | } |
646 | 621 | ||
@@ -652,7 +627,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) | |||
652 | 627 | ||
653 | static int | 628 | static int |
654 | i915_gem_create(struct drm_file *file, | 629 | i915_gem_create(struct drm_file *file, |
655 | struct drm_device *dev, | 630 | struct drm_i915_private *dev_priv, |
656 | uint64_t size, | 631 | uint64_t size, |
657 | uint32_t *handle_p) | 632 | uint32_t *handle_p) |
658 | { | 633 | { |
@@ -665,7 +640,7 @@ i915_gem_create(struct drm_file *file, | |||
665 | return -EINVAL; | 640 | return -EINVAL; |
666 | 641 | ||
667 | /* Allocate the new object */ | 642 | /* Allocate the new object */ |
668 | obj = i915_gem_object_create(dev, size); | 643 | obj = i915_gem_object_create(dev_priv, size); |
669 | if (IS_ERR(obj)) | 644 | if (IS_ERR(obj)) |
670 | return PTR_ERR(obj); | 645 | return PTR_ERR(obj); |
671 | 646 | ||
@@ -687,7 +662,7 @@ i915_gem_dumb_create(struct drm_file *file, | |||
687 | /* have to work out size/pitch and return them */ | 662 | /* have to work out size/pitch and return them */ |
688 | args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); | 663 | args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); |
689 | args->size = args->pitch * args->height; | 664 | args->size = args->pitch * args->height; |
690 | return i915_gem_create(file, dev, | 665 | return i915_gem_create(file, to_i915(dev), |
691 | args->size, &args->handle); | 666 | args->size, &args->handle); |
692 | } | 667 | } |
693 | 668 | ||
@@ -701,11 +676,12 @@ int | |||
701 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 676 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
702 | struct drm_file *file) | 677 | struct drm_file *file) |
703 | { | 678 | { |
679 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
704 | struct drm_i915_gem_create *args = data; | 680 | struct drm_i915_gem_create *args = data; |
705 | 681 | ||
706 | i915_gem_flush_free_objects(to_i915(dev)); | 682 | i915_gem_flush_free_objects(dev_priv); |
707 | 683 | ||
708 | return i915_gem_create(file, dev, | 684 | return i915_gem_create(file, dev_priv, |
709 | args->size, &args->handle); | 685 | args->size, &args->handle); |
710 | } | 686 | } |
711 | 687 | ||
@@ -1140,8 +1116,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
1140 | return -ENOENT; | 1116 | return -ENOENT; |
1141 | 1117 | ||
1142 | /* Bounds check source. */ | 1118 | /* Bounds check source. */ |
1143 | if (args->offset > obj->base.size || | 1119 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
1144 | args->size > obj->base.size - args->offset) { | ||
1145 | ret = -EINVAL; | 1120 | ret = -EINVAL; |
1146 | goto out; | 1121 | goto out; |
1147 | } | 1122 | } |
@@ -1454,8 +1429,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1454 | return -ENOENT; | 1429 | return -ENOENT; |
1455 | 1430 | ||
1456 | /* Bounds check destination. */ | 1431 | /* Bounds check destination. */ |
1457 | if (args->offset > obj->base.size || | 1432 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
1458 | args->size > obj->base.size - args->offset) { | ||
1459 | ret = -EINVAL; | 1433 | ret = -EINVAL; |
1460 | goto err; | 1434 | goto err; |
1461 | } | 1435 | } |
@@ -1517,7 +1491,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) | |||
1517 | 1491 | ||
1518 | list_for_each_entry(vma, &obj->vma_list, obj_link) { | 1492 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
1519 | if (!i915_vma_is_ggtt(vma)) | 1493 | if (!i915_vma_is_ggtt(vma)) |
1520 | continue; | 1494 | break; |
1521 | 1495 | ||
1522 | if (i915_vma_is_active(vma)) | 1496 | if (i915_vma_is_active(vma)) |
1523 | continue; | 1497 | continue; |
@@ -2098,7 +2072,8 @@ u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, | |||
2098 | * Minimum alignment is 4k (GTT page size), but might be greater | 2072 | * Minimum alignment is 4k (GTT page size), but might be greater |
2099 | * if a fence register is needed for the object. | 2073 | * if a fence register is needed for the object. |
2100 | */ | 2074 | */ |
2101 | if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) || | 2075 | if (INTEL_GEN(dev_priv) >= 4 || |
2076 | (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) || | ||
2102 | tiling_mode == I915_TILING_NONE) | 2077 | tiling_mode == I915_TILING_NONE) |
2103 | return 4096; | 2078 | return 4096; |
2104 | 2079 | ||
@@ -2115,23 +2090,21 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | |||
2115 | int err; | 2090 | int err; |
2116 | 2091 | ||
2117 | err = drm_gem_create_mmap_offset(&obj->base); | 2092 | err = drm_gem_create_mmap_offset(&obj->base); |
2118 | if (!err) | 2093 | if (likely(!err)) |
2119 | return 0; | 2094 | return 0; |
2120 | 2095 | ||
2121 | /* We can idle the GPU locklessly to flush stale objects, but in order | 2096 | /* Attempt to reap some mmap space from dead objects */ |
2122 | * to claim that space for ourselves, we need to take the big | 2097 | do { |
2123 | * struct_mutex to free the requests+objects and allocate our slot. | 2098 | err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); |
2124 | */ | 2099 | if (err) |
2125 | err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); | 2100 | break; |
2126 | if (err) | ||
2127 | return err; | ||
2128 | 2101 | ||
2129 | err = i915_mutex_lock_interruptible(&dev_priv->drm); | 2102 | i915_gem_drain_freed_objects(dev_priv); |
2130 | if (!err) { | ||
2131 | i915_gem_retire_requests(dev_priv); | ||
2132 | err = drm_gem_create_mmap_offset(&obj->base); | 2103 | err = drm_gem_create_mmap_offset(&obj->base); |
2133 | mutex_unlock(&dev_priv->drm.struct_mutex); | 2104 | if (!err) |
2134 | } | 2105 | break; |
2106 | |||
2107 | } while (flush_delayed_work(&dev_priv->gt.retire_work)); | ||
2135 | 2108 | ||
2136 | return err; | 2109 | return err; |
2137 | } | 2110 | } |
@@ -2324,6 +2297,7 @@ static void i915_sg_trim(struct sg_table *orig_st) | |||
2324 | /* called before being DMA mapped, no need to copy sg->dma_* */ | 2297 | /* called before being DMA mapped, no need to copy sg->dma_* */ |
2325 | new_sg = sg_next(new_sg); | 2298 | new_sg = sg_next(new_sg); |
2326 | } | 2299 | } |
2300 | GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ | ||
2327 | 2301 | ||
2328 | sg_free_table(orig_st); | 2302 | sg_free_table(orig_st); |
2329 | 2303 | ||
@@ -2645,35 +2619,34 @@ err_unlock: | |||
2645 | goto out_unlock; | 2619 | goto out_unlock; |
2646 | } | 2620 | } |
2647 | 2621 | ||
2648 | static bool i915_context_is_banned(const struct i915_gem_context *ctx) | 2622 | static bool ban_context(const struct i915_gem_context *ctx) |
2649 | { | 2623 | { |
2650 | unsigned long elapsed; | 2624 | return (i915_gem_context_is_bannable(ctx) && |
2625 | ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD); | ||
2626 | } | ||
2651 | 2627 | ||
2652 | if (ctx->hang_stats.banned) | 2628 | static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) |
2653 | return true; | 2629 | { |
2630 | ctx->guilty_count++; | ||
2631 | ctx->ban_score += CONTEXT_SCORE_GUILTY; | ||
2632 | if (ban_context(ctx)) | ||
2633 | i915_gem_context_set_banned(ctx); | ||
2654 | 2634 | ||
2655 | elapsed = get_seconds() - ctx->hang_stats.guilty_ts; | 2635 | DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", |
2656 | if (ctx->hang_stats.ban_period_seconds && | 2636 | ctx->name, ctx->ban_score, |
2657 | elapsed <= ctx->hang_stats.ban_period_seconds) { | 2637 | yesno(i915_gem_context_is_banned(ctx))); |
2658 | DRM_DEBUG("context hanging too fast, banning!\n"); | ||
2659 | return true; | ||
2660 | } | ||
2661 | 2638 | ||
2662 | return false; | 2639 | if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv)) |
2640 | return; | ||
2641 | |||
2642 | ctx->file_priv->context_bans++; | ||
2643 | DRM_DEBUG_DRIVER("client %s has had %d context banned\n", | ||
2644 | ctx->name, ctx->file_priv->context_bans); | ||
2663 | } | 2645 | } |
2664 | 2646 | ||
2665 | static void i915_set_reset_status(struct i915_gem_context *ctx, | 2647 | static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) |
2666 | const bool guilty) | ||
2667 | { | 2648 | { |
2668 | struct i915_ctx_hang_stats *hs = &ctx->hang_stats; | 2649 | ctx->active_count++; |
2669 | |||
2670 | if (guilty) { | ||
2671 | hs->banned = i915_context_is_banned(ctx); | ||
2672 | hs->batch_active++; | ||
2673 | hs->guilty_ts = get_seconds(); | ||
2674 | } else { | ||
2675 | hs->batch_pending++; | ||
2676 | } | ||
2677 | } | 2650 | } |
2678 | 2651 | ||
2679 | struct drm_i915_gem_request * | 2652 | struct drm_i915_gem_request * |
@@ -2716,10 +2689,15 @@ static void reset_request(struct drm_i915_gem_request *request) | |||
2716 | memset(vaddr + head, 0, request->postfix - head); | 2689 | memset(vaddr + head, 0, request->postfix - head); |
2717 | } | 2690 | } |
2718 | 2691 | ||
2692 | void i915_gem_reset_prepare(struct drm_i915_private *dev_priv) | ||
2693 | { | ||
2694 | i915_gem_revoke_fences(dev_priv); | ||
2695 | } | ||
2696 | |||
2719 | static void i915_gem_reset_engine(struct intel_engine_cs *engine) | 2697 | static void i915_gem_reset_engine(struct intel_engine_cs *engine) |
2720 | { | 2698 | { |
2721 | struct drm_i915_gem_request *request; | 2699 | struct drm_i915_gem_request *request; |
2722 | struct i915_gem_context *incomplete_ctx; | 2700 | struct i915_gem_context *hung_ctx; |
2723 | struct intel_timeline *timeline; | 2701 | struct intel_timeline *timeline; |
2724 | unsigned long flags; | 2702 | unsigned long flags; |
2725 | bool ring_hung; | 2703 | bool ring_hung; |
@@ -2731,11 +2709,21 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
2731 | if (!request) | 2709 | if (!request) |
2732 | return; | 2710 | return; |
2733 | 2711 | ||
2734 | ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; | 2712 | hung_ctx = request->ctx; |
2735 | if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) | 2713 | |
2714 | ring_hung = engine->hangcheck.stalled; | ||
2715 | if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { | ||
2716 | DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n", | ||
2717 | engine->name, | ||
2718 | yesno(ring_hung)); | ||
2736 | ring_hung = false; | 2719 | ring_hung = false; |
2720 | } | ||
2721 | |||
2722 | if (ring_hung) | ||
2723 | i915_gem_context_mark_guilty(hung_ctx); | ||
2724 | else | ||
2725 | i915_gem_context_mark_innocent(hung_ctx); | ||
2737 | 2726 | ||
2738 | i915_set_reset_status(request->ctx, ring_hung); | ||
2739 | if (!ring_hung) | 2727 | if (!ring_hung) |
2740 | return; | 2728 | return; |
2741 | 2729 | ||
@@ -2745,6 +2733,10 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
2745 | /* Setup the CS to resume from the breadcrumb of the hung request */ | 2733 | /* Setup the CS to resume from the breadcrumb of the hung request */ |
2746 | engine->reset_hw(engine, request); | 2734 | engine->reset_hw(engine, request); |
2747 | 2735 | ||
2736 | /* If this context is now banned, skip all of its pending requests. */ | ||
2737 | if (!i915_gem_context_is_banned(hung_ctx)) | ||
2738 | return; | ||
2739 | |||
2748 | /* Users of the default context do not rely on logical state | 2740 | /* Users of the default context do not rely on logical state |
2749 | * preserved between batches. They have to emit full state on | 2741 | * preserved between batches. They have to emit full state on |
2750 | * every batch and so it is safe to execute queued requests following | 2742 | * every batch and so it is safe to execute queued requests following |
@@ -2753,17 +2745,16 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
2753 | * Other contexts preserve state, now corrupt. We want to skip all | 2745 | * Other contexts preserve state, now corrupt. We want to skip all |
2754 | * queued requests that reference the corrupt context. | 2746 | * queued requests that reference the corrupt context. |
2755 | */ | 2747 | */ |
2756 | incomplete_ctx = request->ctx; | 2748 | if (i915_gem_context_is_default(hung_ctx)) |
2757 | if (i915_gem_context_is_default(incomplete_ctx)) | ||
2758 | return; | 2749 | return; |
2759 | 2750 | ||
2760 | timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine); | 2751 | timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); |
2761 | 2752 | ||
2762 | spin_lock_irqsave(&engine->timeline->lock, flags); | 2753 | spin_lock_irqsave(&engine->timeline->lock, flags); |
2763 | spin_lock(&timeline->lock); | 2754 | spin_lock(&timeline->lock); |
2764 | 2755 | ||
2765 | list_for_each_entry_continue(request, &engine->timeline->requests, link) | 2756 | list_for_each_entry_continue(request, &engine->timeline->requests, link) |
2766 | if (request->ctx == incomplete_ctx) | 2757 | if (request->ctx == hung_ctx) |
2767 | reset_request(request); | 2758 | reset_request(request); |
2768 | 2759 | ||
2769 | list_for_each_entry(request, &timeline->requests, link) | 2760 | list_for_each_entry(request, &timeline->requests, link) |
@@ -2773,7 +2764,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
2773 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | 2764 | spin_unlock_irqrestore(&engine->timeline->lock, flags); |
2774 | } | 2765 | } |
2775 | 2766 | ||
2776 | void i915_gem_reset(struct drm_i915_private *dev_priv) | 2767 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) |
2777 | { | 2768 | { |
2778 | struct intel_engine_cs *engine; | 2769 | struct intel_engine_cs *engine; |
2779 | enum intel_engine_id id; | 2770 | enum intel_engine_id id; |
@@ -2803,6 +2794,12 @@ static void nop_submit_request(struct drm_i915_gem_request *request) | |||
2803 | 2794 | ||
2804 | static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) | 2795 | static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) |
2805 | { | 2796 | { |
2797 | /* We need to be sure that no thread is running the old callback as | ||
2798 | * we install the nop handler (otherwise we would submit a request | ||
2799 | * to hardware that will never complete). In order to prevent this | ||
2800 | * race, we wait until the machine is idle before making the swap | ||
2801 | * (using stop_machine()). | ||
2802 | */ | ||
2806 | engine->submit_request = nop_submit_request; | 2803 | engine->submit_request = nop_submit_request; |
2807 | 2804 | ||
2808 | /* Mark all pending requests as complete so that any concurrent | 2805 | /* Mark all pending requests as complete so that any concurrent |
@@ -2833,20 +2830,29 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) | |||
2833 | } | 2830 | } |
2834 | } | 2831 | } |
2835 | 2832 | ||
2836 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv) | 2833 | static int __i915_gem_set_wedged_BKL(void *data) |
2837 | { | 2834 | { |
2835 | struct drm_i915_private *i915 = data; | ||
2838 | struct intel_engine_cs *engine; | 2836 | struct intel_engine_cs *engine; |
2839 | enum intel_engine_id id; | 2837 | enum intel_engine_id id; |
2840 | 2838 | ||
2839 | for_each_engine(engine, i915, id) | ||
2840 | i915_gem_cleanup_engine(engine); | ||
2841 | |||
2842 | return 0; | ||
2843 | } | ||
2844 | |||
2845 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv) | ||
2846 | { | ||
2841 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 2847 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
2842 | set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); | 2848 | set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); |
2843 | 2849 | ||
2844 | i915_gem_context_lost(dev_priv); | 2850 | stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); |
2845 | for_each_engine(engine, dev_priv, id) | ||
2846 | i915_gem_cleanup_engine(engine); | ||
2847 | mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); | ||
2848 | 2851 | ||
2852 | i915_gem_context_lost(dev_priv); | ||
2849 | i915_gem_retire_requests(dev_priv); | 2853 | i915_gem_retire_requests(dev_priv); |
2854 | |||
2855 | mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); | ||
2850 | } | 2856 | } |
2851 | 2857 | ||
2852 | static void | 2858 | static void |
@@ -3532,7 +3538,7 @@ err_unpin_display: | |||
3532 | void | 3538 | void |
3533 | i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) | 3539 | i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) |
3534 | { | 3540 | { |
3535 | lockdep_assert_held(&vma->vm->dev->struct_mutex); | 3541 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
3536 | 3542 | ||
3537 | if (WARN_ON(vma->obj->pin_display == 0)) | 3543 | if (WARN_ON(vma->obj->pin_display == 0)) |
3538 | return; | 3544 | return; |
@@ -3966,14 +3972,9 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = { | |||
3966 | .put_pages = i915_gem_object_put_pages_gtt, | 3972 | .put_pages = i915_gem_object_put_pages_gtt, |
3967 | }; | 3973 | }; |
3968 | 3974 | ||
3969 | /* Note we don't consider signbits :| */ | ||
3970 | #define overflows_type(x, T) \ | ||
3971 | (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) | ||
3972 | |||
3973 | struct drm_i915_gem_object * | 3975 | struct drm_i915_gem_object * |
3974 | i915_gem_object_create(struct drm_device *dev, u64 size) | 3976 | i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) |
3975 | { | 3977 | { |
3976 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
3977 | struct drm_i915_gem_object *obj; | 3978 | struct drm_i915_gem_object *obj; |
3978 | struct address_space *mapping; | 3979 | struct address_space *mapping; |
3979 | gfp_t mask; | 3980 | gfp_t mask; |
@@ -3990,16 +3991,16 @@ i915_gem_object_create(struct drm_device *dev, u64 size) | |||
3990 | if (overflows_type(size, obj->base.size)) | 3991 | if (overflows_type(size, obj->base.size)) |
3991 | return ERR_PTR(-E2BIG); | 3992 | return ERR_PTR(-E2BIG); |
3992 | 3993 | ||
3993 | obj = i915_gem_object_alloc(dev); | 3994 | obj = i915_gem_object_alloc(dev_priv); |
3994 | if (obj == NULL) | 3995 | if (obj == NULL) |
3995 | return ERR_PTR(-ENOMEM); | 3996 | return ERR_PTR(-ENOMEM); |
3996 | 3997 | ||
3997 | ret = drm_gem_object_init(dev, &obj->base, size); | 3998 | ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size); |
3998 | if (ret) | 3999 | if (ret) |
3999 | goto fail; | 4000 | goto fail; |
4000 | 4001 | ||
4001 | mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; | 4002 | mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; |
4002 | if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) { | 4003 | if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { |
4003 | /* 965gm cannot relocate objects above 4GiB. */ | 4004 | /* 965gm cannot relocate objects above 4GiB. */ |
4004 | mask &= ~__GFP_HIGHMEM; | 4005 | mask &= ~__GFP_HIGHMEM; |
4005 | mask |= __GFP_DMA32; | 4006 | mask |= __GFP_DMA32; |
@@ -4192,12 +4193,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) | |||
4192 | enum intel_engine_id id; | 4193 | enum intel_engine_id id; |
4193 | 4194 | ||
4194 | for_each_engine(engine, dev_priv, id) | 4195 | for_each_engine(engine, dev_priv, id) |
4195 | GEM_BUG_ON(engine->last_context != dev_priv->kernel_context); | 4196 | GEM_BUG_ON(!i915_gem_context_is_kernel(engine->last_retired_context)); |
4196 | } | 4197 | } |
4197 | 4198 | ||
4198 | int i915_gem_suspend(struct drm_device *dev) | 4199 | int i915_gem_suspend(struct drm_i915_private *dev_priv) |
4199 | { | 4200 | { |
4200 | struct drm_i915_private *dev_priv = to_i915(dev); | 4201 | struct drm_device *dev = &dev_priv->drm; |
4201 | int ret; | 4202 | int ret; |
4202 | 4203 | ||
4203 | intel_suspend_gt_powersave(dev_priv); | 4204 | intel_suspend_gt_powersave(dev_priv); |
@@ -4231,8 +4232,14 @@ int i915_gem_suspend(struct drm_device *dev) | |||
4231 | 4232 | ||
4232 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | 4233 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); |
4233 | cancel_delayed_work_sync(&dev_priv->gt.retire_work); | 4234 | cancel_delayed_work_sync(&dev_priv->gt.retire_work); |
4234 | flush_delayed_work(&dev_priv->gt.idle_work); | 4235 | |
4235 | flush_work(&dev_priv->mm.free_work); | 4236 | /* As the idle_work is rearming if it detects a race, play safe and |
4237 | * repeat the flush until it is definitely idle. | ||
4238 | */ | ||
4239 | while (flush_delayed_work(&dev_priv->gt.idle_work)) | ||
4240 | ; | ||
4241 | |||
4242 | i915_gem_drain_freed_objects(dev_priv); | ||
4236 | 4243 | ||
4237 | /* Assert that we sucessfully flushed all the work and | 4244 | /* Assert that we sucessfully flushed all the work and |
4238 | * reset the GPU back to its idle, low power state. | 4245 | * reset the GPU back to its idle, low power state. |
@@ -4271,9 +4278,9 @@ err: | |||
4271 | return ret; | 4278 | return ret; |
4272 | } | 4279 | } |
4273 | 4280 | ||
4274 | void i915_gem_resume(struct drm_device *dev) | 4281 | void i915_gem_resume(struct drm_i915_private *dev_priv) |
4275 | { | 4282 | { |
4276 | struct drm_i915_private *dev_priv = to_i915(dev); | 4283 | struct drm_device *dev = &dev_priv->drm; |
4277 | 4284 | ||
4278 | WARN_ON(dev_priv->gt.awake); | 4285 | WARN_ON(dev_priv->gt.awake); |
4279 | 4286 | ||
@@ -4338,9 +4345,8 @@ static void init_unused_rings(struct drm_i915_private *dev_priv) | |||
4338 | } | 4345 | } |
4339 | 4346 | ||
4340 | int | 4347 | int |
4341 | i915_gem_init_hw(struct drm_device *dev) | 4348 | i915_gem_init_hw(struct drm_i915_private *dev_priv) |
4342 | { | 4349 | { |
4343 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4344 | struct intel_engine_cs *engine; | 4350 | struct intel_engine_cs *engine; |
4345 | enum intel_engine_id id; | 4351 | enum intel_engine_id id; |
4346 | int ret; | 4352 | int ret; |
@@ -4394,10 +4400,10 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4394 | goto out; | 4400 | goto out; |
4395 | } | 4401 | } |
4396 | 4402 | ||
4397 | intel_mocs_init_l3cc_table(dev); | 4403 | intel_mocs_init_l3cc_table(dev_priv); |
4398 | 4404 | ||
4399 | /* We can't enable contexts until all firmware is loaded */ | 4405 | /* We can't enable contexts until all firmware is loaded */ |
4400 | ret = intel_guc_setup(dev); | 4406 | ret = intel_guc_setup(dev_priv); |
4401 | if (ret) | 4407 | if (ret) |
4402 | goto out; | 4408 | goto out; |
4403 | 4409 | ||
@@ -4427,12 +4433,11 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) | |||
4427 | return true; | 4433 | return true; |
4428 | } | 4434 | } |
4429 | 4435 | ||
4430 | int i915_gem_init(struct drm_device *dev) | 4436 | int i915_gem_init(struct drm_i915_private *dev_priv) |
4431 | { | 4437 | { |
4432 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4433 | int ret; | 4438 | int ret; |
4434 | 4439 | ||
4435 | mutex_lock(&dev->struct_mutex); | 4440 | mutex_lock(&dev_priv->drm.struct_mutex); |
4436 | 4441 | ||
4437 | if (!i915.enable_execlists) { | 4442 | if (!i915.enable_execlists) { |
4438 | dev_priv->gt.resume = intel_legacy_submission_resume; | 4443 | dev_priv->gt.resume = intel_legacy_submission_resume; |
@@ -4456,15 +4461,15 @@ int i915_gem_init(struct drm_device *dev) | |||
4456 | if (ret) | 4461 | if (ret) |
4457 | goto out_unlock; | 4462 | goto out_unlock; |
4458 | 4463 | ||
4459 | ret = i915_gem_context_init(dev); | 4464 | ret = i915_gem_context_init(dev_priv); |
4460 | if (ret) | 4465 | if (ret) |
4461 | goto out_unlock; | 4466 | goto out_unlock; |
4462 | 4467 | ||
4463 | ret = intel_engines_init(dev); | 4468 | ret = intel_engines_init(dev_priv); |
4464 | if (ret) | 4469 | if (ret) |
4465 | goto out_unlock; | 4470 | goto out_unlock; |
4466 | 4471 | ||
4467 | ret = i915_gem_init_hw(dev); | 4472 | ret = i915_gem_init_hw(dev_priv); |
4468 | if (ret == -EIO) { | 4473 | if (ret == -EIO) { |
4469 | /* Allow engine initialisation to fail by marking the GPU as | 4474 | /* Allow engine initialisation to fail by marking the GPU as |
4470 | * wedged. But we only want to do this where the GPU is angry, | 4475 | * wedged. But we only want to do this where the GPU is angry, |
@@ -4477,15 +4482,14 @@ int i915_gem_init(struct drm_device *dev) | |||
4477 | 4482 | ||
4478 | out_unlock: | 4483 | out_unlock: |
4479 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 4484 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
4480 | mutex_unlock(&dev->struct_mutex); | 4485 | mutex_unlock(&dev_priv->drm.struct_mutex); |
4481 | 4486 | ||
4482 | return ret; | 4487 | return ret; |
4483 | } | 4488 | } |
4484 | 4489 | ||
4485 | void | 4490 | void |
4486 | i915_gem_cleanup_engines(struct drm_device *dev) | 4491 | i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) |
4487 | { | 4492 | { |
4488 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4489 | struct intel_engine_cs *engine; | 4493 | struct intel_engine_cs *engine; |
4490 | enum intel_engine_id id; | 4494 | enum intel_engine_id id; |
4491 | 4495 | ||
@@ -4501,8 +4505,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) | |||
4501 | if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && | 4505 | if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && |
4502 | !IS_CHERRYVIEW(dev_priv)) | 4506 | !IS_CHERRYVIEW(dev_priv)) |
4503 | dev_priv->num_fence_regs = 32; | 4507 | dev_priv->num_fence_regs = 32; |
4504 | else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) || | 4508 | else if (INTEL_INFO(dev_priv)->gen >= 4 || |
4505 | IS_I945GM(dev_priv) || IS_G33(dev_priv)) | 4509 | IS_I945G(dev_priv) || IS_I945GM(dev_priv) || |
4510 | IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) | ||
4506 | dev_priv->num_fence_regs = 16; | 4511 | dev_priv->num_fence_regs = 16; |
4507 | else | 4512 | else |
4508 | dev_priv->num_fence_regs = 8; | 4513 | dev_priv->num_fence_regs = 8; |
@@ -4525,9 +4530,8 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) | |||
4525 | } | 4530 | } |
4526 | 4531 | ||
4527 | int | 4532 | int |
4528 | i915_gem_load_init(struct drm_device *dev) | 4533 | i915_gem_load_init(struct drm_i915_private *dev_priv) |
4529 | { | 4534 | { |
4530 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4531 | int err = -ENOMEM; | 4535 | int err = -ENOMEM; |
4532 | 4536 | ||
4533 | dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); | 4537 | dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); |
@@ -4596,10 +4600,8 @@ err_out: | |||
4596 | return err; | 4600 | return err; |
4597 | } | 4601 | } |
4598 | 4602 | ||
4599 | void i915_gem_load_cleanup(struct drm_device *dev) | 4603 | void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) |
4600 | { | 4604 | { |
4601 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4602 | |||
4603 | WARN_ON(!llist_empty(&dev_priv->mm.free_list)); | 4605 | WARN_ON(!llist_empty(&dev_priv->mm.free_list)); |
4604 | 4606 | ||
4605 | mutex_lock(&dev_priv->drm.struct_mutex); | 4607 | mutex_lock(&dev_priv->drm.struct_mutex); |
@@ -4750,7 +4752,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
4750 | 4752 | ||
4751 | /* Allocate a new GEM object and fill it with the supplied data */ | 4753 | /* Allocate a new GEM object and fill it with the supplied data */ |
4752 | struct drm_i915_gem_object * | 4754 | struct drm_i915_gem_object * |
4753 | i915_gem_object_create_from_data(struct drm_device *dev, | 4755 | i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, |
4754 | const void *data, size_t size) | 4756 | const void *data, size_t size) |
4755 | { | 4757 | { |
4756 | struct drm_i915_gem_object *obj; | 4758 | struct drm_i915_gem_object *obj; |
@@ -4758,7 +4760,7 @@ i915_gem_object_create_from_data(struct drm_device *dev, | |||
4758 | size_t bytes; | 4760 | size_t bytes; |
4759 | int ret; | 4761 | int ret; |
4760 | 4762 | ||
4761 | obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); | 4763 | obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); |
4762 | if (IS_ERR(obj)) | 4764 | if (IS_ERR(obj)) |
4763 | return obj; | 4765 | return obj; |
4764 | 4766 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 51ec793f2e20..a585d47c420a 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h | |||
@@ -27,8 +27,10 @@ | |||
27 | 27 | ||
28 | #ifdef CONFIG_DRM_I915_DEBUG_GEM | 28 | #ifdef CONFIG_DRM_I915_DEBUG_GEM |
29 | #define GEM_BUG_ON(expr) BUG_ON(expr) | 29 | #define GEM_BUG_ON(expr) BUG_ON(expr) |
30 | #define GEM_WARN_ON(expr) WARN_ON(expr) | ||
30 | #else | 31 | #else |
31 | #define GEM_BUG_ON(expr) do { } while (0) | 32 | #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) |
33 | #define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0) | ||
32 | #endif | 34 | #endif |
33 | 35 | ||
34 | #define I915_NUM_ENGINES 5 | 36 | #define I915_NUM_ENGINES 5 |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1f94b8d6d83d..40a6939e3956 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -141,7 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref) | |||
141 | 141 | ||
142 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); | 142 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); |
143 | trace_i915_context_free(ctx); | 143 | trace_i915_context_free(ctx); |
144 | GEM_BUG_ON(!ctx->closed); | 144 | GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); |
145 | 145 | ||
146 | i915_ppgtt_put(ctx->ppgtt); | 146 | i915_ppgtt_put(ctx->ppgtt); |
147 | 147 | ||
@@ -166,15 +166,15 @@ void i915_gem_context_free(struct kref *ctx_ref) | |||
166 | kfree(ctx); | 166 | kfree(ctx); |
167 | } | 167 | } |
168 | 168 | ||
169 | struct drm_i915_gem_object * | 169 | static struct drm_i915_gem_object * |
170 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) | 170 | alloc_context_obj(struct drm_i915_private *dev_priv, u64 size) |
171 | { | 171 | { |
172 | struct drm_i915_gem_object *obj; | 172 | struct drm_i915_gem_object *obj; |
173 | int ret; | 173 | int ret; |
174 | 174 | ||
175 | lockdep_assert_held(&dev->struct_mutex); | 175 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
176 | 176 | ||
177 | obj = i915_gem_object_create(dev, size); | 177 | obj = i915_gem_object_create(dev_priv, size); |
178 | if (IS_ERR(obj)) | 178 | if (IS_ERR(obj)) |
179 | return obj; | 179 | return obj; |
180 | 180 | ||
@@ -193,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) | |||
193 | * This is only applicable for Ivy Bridge devices since | 193 | * This is only applicable for Ivy Bridge devices since |
194 | * later platforms don't have L3 control bits in the PTE. | 194 | * later platforms don't have L3 control bits in the PTE. |
195 | */ | 195 | */ |
196 | if (IS_IVYBRIDGE(to_i915(dev))) { | 196 | if (IS_IVYBRIDGE(dev_priv)) { |
197 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); | 197 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); |
198 | /* Failure shouldn't ever happen this early */ | 198 | /* Failure shouldn't ever happen this early */ |
199 | if (WARN_ON(ret)) { | 199 | if (WARN_ON(ret)) { |
@@ -228,8 +228,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm) | |||
228 | 228 | ||
229 | static void context_close(struct i915_gem_context *ctx) | 229 | static void context_close(struct i915_gem_context *ctx) |
230 | { | 230 | { |
231 | GEM_BUG_ON(ctx->closed); | 231 | i915_gem_context_set_closed(ctx); |
232 | ctx->closed = true; | ||
233 | if (ctx->ppgtt) | 232 | if (ctx->ppgtt) |
234 | i915_ppgtt_close(&ctx->ppgtt->base); | 233 | i915_ppgtt_close(&ctx->ppgtt->base); |
235 | ctx->file_priv = ERR_PTR(-EBADF); | 234 | ctx->file_priv = ERR_PTR(-EBADF); |
@@ -259,10 +258,9 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) | |||
259 | } | 258 | } |
260 | 259 | ||
261 | static struct i915_gem_context * | 260 | static struct i915_gem_context * |
262 | __create_hw_context(struct drm_device *dev, | 261 | __create_hw_context(struct drm_i915_private *dev_priv, |
263 | struct drm_i915_file_private *file_priv) | 262 | struct drm_i915_file_private *file_priv) |
264 | { | 263 | { |
265 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
266 | struct i915_gem_context *ctx; | 264 | struct i915_gem_context *ctx; |
267 | int ret; | 265 | int ret; |
268 | 266 | ||
@@ -286,8 +284,7 @@ __create_hw_context(struct drm_device *dev, | |||
286 | struct drm_i915_gem_object *obj; | 284 | struct drm_i915_gem_object *obj; |
287 | struct i915_vma *vma; | 285 | struct i915_vma *vma; |
288 | 286 | ||
289 | obj = i915_gem_alloc_context_obj(dev, | 287 | obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size); |
290 | dev_priv->hw_context_size); | ||
291 | if (IS_ERR(obj)) { | 288 | if (IS_ERR(obj)) { |
292 | ret = PTR_ERR(obj); | 289 | ret = PTR_ERR(obj); |
293 | goto err_out; | 290 | goto err_out; |
@@ -331,12 +328,21 @@ __create_hw_context(struct drm_device *dev, | |||
331 | * is no remap info, it will be a NOP. */ | 328 | * is no remap info, it will be a NOP. */ |
332 | ctx->remap_slice = ALL_L3_SLICES(dev_priv); | 329 | ctx->remap_slice = ALL_L3_SLICES(dev_priv); |
333 | 330 | ||
334 | ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; | 331 | i915_gem_context_set_bannable(ctx); |
335 | ctx->ring_size = 4 * PAGE_SIZE; | 332 | ctx->ring_size = 4 * PAGE_SIZE; |
336 | ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << | 333 | ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << |
337 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | 334 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
338 | ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); | 335 | ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); |
339 | 336 | ||
337 | /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not | ||
338 | * present or not in use we still need a small bias as ring wraparound | ||
339 | * at offset 0 sometimes hangs. No idea why. | ||
340 | */ | ||
341 | if (HAS_GUC(dev_priv) && i915.enable_guc_loading) | ||
342 | ctx->ggtt_offset_bias = GUC_WOPCM_TOP; | ||
343 | else | ||
344 | ctx->ggtt_offset_bias = 4096; | ||
345 | |||
340 | return ctx; | 346 | return ctx; |
341 | 347 | ||
342 | err_pid: | 348 | err_pid: |
@@ -353,21 +359,21 @@ err_out: | |||
353 | * well as an idle case. | 359 | * well as an idle case. |
354 | */ | 360 | */ |
355 | static struct i915_gem_context * | 361 | static struct i915_gem_context * |
356 | i915_gem_create_context(struct drm_device *dev, | 362 | i915_gem_create_context(struct drm_i915_private *dev_priv, |
357 | struct drm_i915_file_private *file_priv) | 363 | struct drm_i915_file_private *file_priv) |
358 | { | 364 | { |
359 | struct i915_gem_context *ctx; | 365 | struct i915_gem_context *ctx; |
360 | 366 | ||
361 | lockdep_assert_held(&dev->struct_mutex); | 367 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
362 | 368 | ||
363 | ctx = __create_hw_context(dev, file_priv); | 369 | ctx = __create_hw_context(dev_priv, file_priv); |
364 | if (IS_ERR(ctx)) | 370 | if (IS_ERR(ctx)) |
365 | return ctx; | 371 | return ctx; |
366 | 372 | ||
367 | if (USES_FULL_PPGTT(dev)) { | 373 | if (USES_FULL_PPGTT(dev_priv)) { |
368 | struct i915_hw_ppgtt *ppgtt; | 374 | struct i915_hw_ppgtt *ppgtt; |
369 | 375 | ||
370 | ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name); | 376 | ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name); |
371 | if (IS_ERR(ppgtt)) { | 377 | if (IS_ERR(ppgtt)) { |
372 | DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", | 378 | DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", |
373 | PTR_ERR(ppgtt)); | 379 | PTR_ERR(ppgtt)); |
@@ -407,35 +413,24 @@ i915_gem_context_create_gvt(struct drm_device *dev) | |||
407 | if (ret) | 413 | if (ret) |
408 | return ERR_PTR(ret); | 414 | return ERR_PTR(ret); |
409 | 415 | ||
410 | ctx = i915_gem_create_context(dev, NULL); | 416 | ctx = __create_hw_context(to_i915(dev), NULL); |
411 | if (IS_ERR(ctx)) | 417 | if (IS_ERR(ctx)) |
412 | goto out; | 418 | goto out; |
413 | 419 | ||
414 | ctx->execlists_force_single_submission = true; | 420 | ctx->file_priv = ERR_PTR(-EBADF); |
421 | i915_gem_context_set_closed(ctx); /* not user accessible */ | ||
422 | i915_gem_context_clear_bannable(ctx); | ||
423 | i915_gem_context_set_force_single_submission(ctx); | ||
415 | ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ | 424 | ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ |
425 | |||
426 | GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); | ||
416 | out: | 427 | out: |
417 | mutex_unlock(&dev->struct_mutex); | 428 | mutex_unlock(&dev->struct_mutex); |
418 | return ctx; | 429 | return ctx; |
419 | } | 430 | } |
420 | 431 | ||
421 | static void i915_gem_context_unpin(struct i915_gem_context *ctx, | 432 | int i915_gem_context_init(struct drm_i915_private *dev_priv) |
422 | struct intel_engine_cs *engine) | ||
423 | { | 433 | { |
424 | if (i915.enable_execlists) { | ||
425 | intel_lr_context_unpin(ctx, engine); | ||
426 | } else { | ||
427 | struct intel_context *ce = &ctx->engine[engine->id]; | ||
428 | |||
429 | if (ce->state) | ||
430 | i915_vma_unpin(ce->state); | ||
431 | |||
432 | i915_gem_context_put(ctx); | ||
433 | } | ||
434 | } | ||
435 | |||
436 | int i915_gem_context_init(struct drm_device *dev) | ||
437 | { | ||
438 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
439 | struct i915_gem_context *ctx; | 434 | struct i915_gem_context *ctx; |
440 | 435 | ||
441 | /* Init should only be called once per module load. Eventually the | 436 | /* Init should only be called once per module load. Eventually the |
@@ -469,16 +464,19 @@ int i915_gem_context_init(struct drm_device *dev) | |||
469 | } | 464 | } |
470 | } | 465 | } |
471 | 466 | ||
472 | ctx = i915_gem_create_context(dev, NULL); | 467 | ctx = i915_gem_create_context(dev_priv, NULL); |
473 | if (IS_ERR(ctx)) { | 468 | if (IS_ERR(ctx)) { |
474 | DRM_ERROR("Failed to create default global context (error %ld)\n", | 469 | DRM_ERROR("Failed to create default global context (error %ld)\n", |
475 | PTR_ERR(ctx)); | 470 | PTR_ERR(ctx)); |
476 | return PTR_ERR(ctx); | 471 | return PTR_ERR(ctx); |
477 | } | 472 | } |
478 | 473 | ||
474 | i915_gem_context_clear_bannable(ctx); | ||
479 | ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */ | 475 | ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */ |
480 | dev_priv->kernel_context = ctx; | 476 | dev_priv->kernel_context = ctx; |
481 | 477 | ||
478 | GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); | ||
479 | |||
482 | DRM_DEBUG_DRIVER("%s context support initialized\n", | 480 | DRM_DEBUG_DRIVER("%s context support initialized\n", |
483 | i915.enable_execlists ? "LR" : | 481 | i915.enable_execlists ? "LR" : |
484 | dev_priv->hw_context_size ? "HW" : "fake"); | 482 | dev_priv->hw_context_size ? "HW" : "fake"); |
@@ -493,10 +491,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) | |||
493 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 491 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
494 | 492 | ||
495 | for_each_engine(engine, dev_priv, id) { | 493 | for_each_engine(engine, dev_priv, id) { |
496 | if (engine->last_context) { | 494 | engine->legacy_active_context = NULL; |
497 | i915_gem_context_unpin(engine->last_context, engine); | 495 | |
498 | engine->last_context = NULL; | 496 | if (!engine->last_retired_context) |
499 | } | 497 | continue; |
498 | |||
499 | engine->context_unpin(engine, engine->last_retired_context); | ||
500 | engine->last_retired_context = NULL; | ||
500 | } | 501 | } |
501 | 502 | ||
502 | /* Force the GPU state to be restored on enabling */ | 503 | /* Force the GPU state to be restored on enabling */ |
@@ -522,12 +523,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) | |||
522 | } | 523 | } |
523 | } | 524 | } |
524 | 525 | ||
525 | void i915_gem_context_fini(struct drm_device *dev) | 526 | void i915_gem_context_fini(struct drm_i915_private *dev_priv) |
526 | { | 527 | { |
527 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
528 | struct i915_gem_context *dctx = dev_priv->kernel_context; | 528 | struct i915_gem_context *dctx = dev_priv->kernel_context; |
529 | 529 | ||
530 | lockdep_assert_held(&dev->struct_mutex); | 530 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
531 | |||
532 | GEM_BUG_ON(!i915_gem_context_is_kernel(dctx)); | ||
531 | 533 | ||
532 | context_close(dctx); | 534 | context_close(dctx); |
533 | dev_priv->kernel_context = NULL; | 535 | dev_priv->kernel_context = NULL; |
@@ -551,9 +553,11 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) | |||
551 | idr_init(&file_priv->context_idr); | 553 | idr_init(&file_priv->context_idr); |
552 | 554 | ||
553 | mutex_lock(&dev->struct_mutex); | 555 | mutex_lock(&dev->struct_mutex); |
554 | ctx = i915_gem_create_context(dev, file_priv); | 556 | ctx = i915_gem_create_context(to_i915(dev), file_priv); |
555 | mutex_unlock(&dev->struct_mutex); | 557 | mutex_unlock(&dev->struct_mutex); |
556 | 558 | ||
559 | GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); | ||
560 | |||
557 | if (IS_ERR(ctx)) { | 561 | if (IS_ERR(ctx)) { |
558 | idr_destroy(&file_priv->context_idr); | 562 | idr_destroy(&file_priv->context_idr); |
559 | return PTR_ERR(ctx); | 563 | return PTR_ERR(ctx); |
@@ -719,7 +723,7 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, | |||
719 | if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) | 723 | if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) |
720 | return false; | 724 | return false; |
721 | 725 | ||
722 | return to == engine->last_context; | 726 | return to == engine->legacy_active_context; |
723 | } | 727 | } |
724 | 728 | ||
725 | static bool | 729 | static bool |
@@ -731,11 +735,11 @@ needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, | |||
731 | return false; | 735 | return false; |
732 | 736 | ||
733 | /* Always load the ppgtt on first use */ | 737 | /* Always load the ppgtt on first use */ |
734 | if (!engine->last_context) | 738 | if (!engine->legacy_active_context) |
735 | return true; | 739 | return true; |
736 | 740 | ||
737 | /* Same context without new entries, skip */ | 741 | /* Same context without new entries, skip */ |
738 | if (engine->last_context == to && | 742 | if (engine->legacy_active_context == to && |
739 | !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) | 743 | !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) |
740 | return false; | 744 | return false; |
741 | 745 | ||
@@ -765,57 +769,20 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt, | |||
765 | return false; | 769 | return false; |
766 | } | 770 | } |
767 | 771 | ||
768 | struct i915_vma * | ||
769 | i915_gem_context_pin_legacy(struct i915_gem_context *ctx, | ||
770 | unsigned int flags) | ||
771 | { | ||
772 | struct i915_vma *vma = ctx->engine[RCS].state; | ||
773 | int ret; | ||
774 | |||
775 | /* Clear this page out of any CPU caches for coherent swap-in/out. | ||
776 | * We only want to do this on the first bind so that we do not stall | ||
777 | * on an active context (which by nature is already on the GPU). | ||
778 | */ | ||
779 | if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { | ||
780 | ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); | ||
781 | if (ret) | ||
782 | return ERR_PTR(ret); | ||
783 | } | ||
784 | |||
785 | ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags); | ||
786 | if (ret) | ||
787 | return ERR_PTR(ret); | ||
788 | |||
789 | return vma; | ||
790 | } | ||
791 | |||
792 | static int do_rcs_switch(struct drm_i915_gem_request *req) | 772 | static int do_rcs_switch(struct drm_i915_gem_request *req) |
793 | { | 773 | { |
794 | struct i915_gem_context *to = req->ctx; | 774 | struct i915_gem_context *to = req->ctx; |
795 | struct intel_engine_cs *engine = req->engine; | 775 | struct intel_engine_cs *engine = req->engine; |
796 | struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; | 776 | struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; |
797 | struct i915_vma *vma; | 777 | struct i915_gem_context *from = engine->legacy_active_context; |
798 | struct i915_gem_context *from; | ||
799 | u32 hw_flags; | 778 | u32 hw_flags; |
800 | int ret, i; | 779 | int ret, i; |
801 | 780 | ||
781 | GEM_BUG_ON(engine->id != RCS); | ||
782 | |||
802 | if (skip_rcs_switch(ppgtt, engine, to)) | 783 | if (skip_rcs_switch(ppgtt, engine, to)) |
803 | return 0; | 784 | return 0; |
804 | 785 | ||
805 | /* Trying to pin first makes error handling easier. */ | ||
806 | vma = i915_gem_context_pin_legacy(to, 0); | ||
807 | if (IS_ERR(vma)) | ||
808 | return PTR_ERR(vma); | ||
809 | |||
810 | /* | ||
811 | * Pin can switch back to the default context if we end up calling into | ||
812 | * evict_everything - as a last ditch gtt defrag effort that also | ||
813 | * switches to the default context. Hence we need to reload from here. | ||
814 | * | ||
815 | * XXX: Doing so is painfully broken! | ||
816 | */ | ||
817 | from = engine->last_context; | ||
818 | |||
819 | if (needs_pd_load_pre(ppgtt, engine, to)) { | 786 | if (needs_pd_load_pre(ppgtt, engine, to)) { |
820 | /* Older GENs and non render rings still want the load first, | 787 | /* Older GENs and non render rings still want the load first, |
821 | * "PP_DCLV followed by PP_DIR_BASE register through Load | 788 | * "PP_DCLV followed by PP_DIR_BASE register through Load |
@@ -824,7 +791,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) | |||
824 | trace_switch_mm(engine, to); | 791 | trace_switch_mm(engine, to); |
825 | ret = ppgtt->switch_mm(ppgtt, req); | 792 | ret = ppgtt->switch_mm(ppgtt, req); |
826 | if (ret) | 793 | if (ret) |
827 | goto err; | 794 | return ret; |
828 | } | 795 | } |
829 | 796 | ||
830 | if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) | 797 | if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) |
@@ -841,29 +808,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) | |||
841 | if (to != from || (hw_flags & MI_FORCE_RESTORE)) { | 808 | if (to != from || (hw_flags & MI_FORCE_RESTORE)) { |
842 | ret = mi_set_context(req, hw_flags); | 809 | ret = mi_set_context(req, hw_flags); |
843 | if (ret) | 810 | if (ret) |
844 | goto err; | 811 | return ret; |
845 | } | ||
846 | 812 | ||
847 | /* The backing object for the context is done after switching to the | 813 | engine->legacy_active_context = to; |
848 | * *next* context. Therefore we cannot retire the previous context until | ||
849 | * the next context has already started running. In fact, the below code | ||
850 | * is a bit suboptimal because the retiring can occur simply after the | ||
851 | * MI_SET_CONTEXT instead of when the next seqno has completed. | ||
852 | */ | ||
853 | if (from != NULL) { | ||
854 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the | ||
855 | * whole damn pipeline, we don't need to explicitly mark the | ||
856 | * object dirty. The only exception is that the context must be | ||
857 | * correct in case the object gets swapped out. Ideally we'd be | ||
858 | * able to defer doing this until we know the object would be | ||
859 | * swapped, but there is no way to do that yet. | ||
860 | */ | ||
861 | i915_vma_move_to_active(from->engine[RCS].state, req, 0); | ||
862 | /* state is kept alive until the next request */ | ||
863 | i915_vma_unpin(from->engine[RCS].state); | ||
864 | i915_gem_context_put(from); | ||
865 | } | 814 | } |
866 | engine->last_context = i915_gem_context_get(to); | ||
867 | 815 | ||
868 | /* GEN8 does *not* require an explicit reload if the PDPs have been | 816 | /* GEN8 does *not* require an explicit reload if the PDPs have been |
869 | * setup, and we do not wish to move them. | 817 | * setup, and we do not wish to move them. |
@@ -904,10 +852,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) | |||
904 | } | 852 | } |
905 | 853 | ||
906 | return 0; | 854 | return 0; |
907 | |||
908 | err: | ||
909 | i915_vma_unpin(vma); | ||
910 | return ret; | ||
911 | } | 855 | } |
912 | 856 | ||
913 | /** | 857 | /** |
@@ -947,12 +891,6 @@ int i915_switch_context(struct drm_i915_gem_request *req) | |||
947 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | 891 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); |
948 | } | 892 | } |
949 | 893 | ||
950 | if (to != engine->last_context) { | ||
951 | if (engine->last_context) | ||
952 | i915_gem_context_put(engine->last_context); | ||
953 | engine->last_context = i915_gem_context_get(to); | ||
954 | } | ||
955 | |||
956 | return 0; | 894 | return 0; |
957 | } | 895 | } |
958 | 896 | ||
@@ -1003,6 +941,11 @@ static bool contexts_enabled(struct drm_device *dev) | |||
1003 | return i915.enable_execlists || to_i915(dev)->hw_context_size; | 941 | return i915.enable_execlists || to_i915(dev)->hw_context_size; |
1004 | } | 942 | } |
1005 | 943 | ||
944 | static bool client_is_banned(struct drm_i915_file_private *file_priv) | ||
945 | { | ||
946 | return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS; | ||
947 | } | ||
948 | |||
1006 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | 949 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
1007 | struct drm_file *file) | 950 | struct drm_file *file) |
1008 | { | 951 | { |
@@ -1017,17 +960,27 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | |||
1017 | if (args->pad != 0) | 960 | if (args->pad != 0) |
1018 | return -EINVAL; | 961 | return -EINVAL; |
1019 | 962 | ||
963 | if (client_is_banned(file_priv)) { | ||
964 | DRM_DEBUG("client %s[%d] banned from creating ctx\n", | ||
965 | current->comm, | ||
966 | pid_nr(get_task_pid(current, PIDTYPE_PID))); | ||
967 | |||
968 | return -EIO; | ||
969 | } | ||
970 | |||
1020 | ret = i915_mutex_lock_interruptible(dev); | 971 | ret = i915_mutex_lock_interruptible(dev); |
1021 | if (ret) | 972 | if (ret) |
1022 | return ret; | 973 | return ret; |
1023 | 974 | ||
1024 | ctx = i915_gem_create_context(dev, file_priv); | 975 | ctx = i915_gem_create_context(to_i915(dev), file_priv); |
1025 | mutex_unlock(&dev->struct_mutex); | 976 | mutex_unlock(&dev->struct_mutex); |
1026 | if (IS_ERR(ctx)) | 977 | if (IS_ERR(ctx)) |
1027 | return PTR_ERR(ctx); | 978 | return PTR_ERR(ctx); |
1028 | 979 | ||
980 | GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); | ||
981 | |||
1029 | args->ctx_id = ctx->user_handle; | 982 | args->ctx_id = ctx->user_handle; |
1030 | DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); | 983 | DRM_DEBUG("HW context %d created\n", args->ctx_id); |
1031 | 984 | ||
1032 | return 0; | 985 | return 0; |
1033 | } | 986 | } |
@@ -1060,7 +1013,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | |||
1060 | context_close(ctx); | 1013 | context_close(ctx); |
1061 | mutex_unlock(&dev->struct_mutex); | 1014 | mutex_unlock(&dev->struct_mutex); |
1062 | 1015 | ||
1063 | DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); | 1016 | DRM_DEBUG("HW context %d destroyed\n", args->ctx_id); |
1064 | return 0; | 1017 | return 0; |
1065 | } | 1018 | } |
1066 | 1019 | ||
@@ -1085,7 +1038,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, | |||
1085 | args->size = 0; | 1038 | args->size = 0; |
1086 | switch (args->param) { | 1039 | switch (args->param) { |
1087 | case I915_CONTEXT_PARAM_BAN_PERIOD: | 1040 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
1088 | args->value = ctx->hang_stats.ban_period_seconds; | 1041 | ret = -EINVAL; |
1089 | break; | 1042 | break; |
1090 | case I915_CONTEXT_PARAM_NO_ZEROMAP: | 1043 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
1091 | args->value = ctx->flags & CONTEXT_NO_ZEROMAP; | 1044 | args->value = ctx->flags & CONTEXT_NO_ZEROMAP; |
@@ -1099,7 +1052,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, | |||
1099 | args->value = to_i915(dev)->ggtt.base.total; | 1052 | args->value = to_i915(dev)->ggtt.base.total; |
1100 | break; | 1053 | break; |
1101 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: | 1054 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: |
1102 | args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE); | 1055 | args->value = i915_gem_context_no_error_capture(ctx); |
1056 | break; | ||
1057 | case I915_CONTEXT_PARAM_BANNABLE: | ||
1058 | args->value = i915_gem_context_is_bannable(ctx); | ||
1103 | break; | 1059 | break; |
1104 | default: | 1060 | default: |
1105 | ret = -EINVAL; | 1061 | ret = -EINVAL; |
@@ -1130,13 +1086,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | |||
1130 | 1086 | ||
1131 | switch (args->param) { | 1087 | switch (args->param) { |
1132 | case I915_CONTEXT_PARAM_BAN_PERIOD: | 1088 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
1133 | if (args->size) | 1089 | ret = -EINVAL; |
1134 | ret = -EINVAL; | ||
1135 | else if (args->value < ctx->hang_stats.ban_period_seconds && | ||
1136 | !capable(CAP_SYS_ADMIN)) | ||
1137 | ret = -EPERM; | ||
1138 | else | ||
1139 | ctx->hang_stats.ban_period_seconds = args->value; | ||
1140 | break; | 1090 | break; |
1141 | case I915_CONTEXT_PARAM_NO_ZEROMAP: | 1091 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
1142 | if (args->size) { | 1092 | if (args->size) { |
@@ -1147,14 +1097,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | |||
1147 | } | 1097 | } |
1148 | break; | 1098 | break; |
1149 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: | 1099 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: |
1150 | if (args->size) { | 1100 | if (args->size) |
1151 | ret = -EINVAL; | 1101 | ret = -EINVAL; |
1152 | } else { | 1102 | else if (args->value) |
1153 | if (args->value) | 1103 | i915_gem_context_set_no_error_capture(ctx); |
1154 | ctx->flags |= CONTEXT_NO_ERROR_CAPTURE; | 1104 | else |
1155 | else | 1105 | i915_gem_context_clear_no_error_capture(ctx); |
1156 | ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE; | 1106 | break; |
1157 | } | 1107 | case I915_CONTEXT_PARAM_BANNABLE: |
1108 | if (args->size) | ||
1109 | ret = -EINVAL; | ||
1110 | else if (!capable(CAP_SYS_ADMIN) && !args->value) | ||
1111 | ret = -EPERM; | ||
1112 | else if (args->value) | ||
1113 | i915_gem_context_set_bannable(ctx); | ||
1114 | else | ||
1115 | i915_gem_context_clear_bannable(ctx); | ||
1158 | break; | 1116 | break; |
1159 | default: | 1117 | default: |
1160 | ret = -EINVAL; | 1118 | ret = -EINVAL; |
@@ -1170,7 +1128,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, | |||
1170 | { | 1128 | { |
1171 | struct drm_i915_private *dev_priv = to_i915(dev); | 1129 | struct drm_i915_private *dev_priv = to_i915(dev); |
1172 | struct drm_i915_reset_stats *args = data; | 1130 | struct drm_i915_reset_stats *args = data; |
1173 | struct i915_ctx_hang_stats *hs; | ||
1174 | struct i915_gem_context *ctx; | 1131 | struct i915_gem_context *ctx; |
1175 | int ret; | 1132 | int ret; |
1176 | 1133 | ||
@@ -1189,15 +1146,14 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, | |||
1189 | mutex_unlock(&dev->struct_mutex); | 1146 | mutex_unlock(&dev->struct_mutex); |
1190 | return PTR_ERR(ctx); | 1147 | return PTR_ERR(ctx); |
1191 | } | 1148 | } |
1192 | hs = &ctx->hang_stats; | ||
1193 | 1149 | ||
1194 | if (capable(CAP_SYS_ADMIN)) | 1150 | if (capable(CAP_SYS_ADMIN)) |
1195 | args->reset_count = i915_reset_count(&dev_priv->gpu_error); | 1151 | args->reset_count = i915_reset_count(&dev_priv->gpu_error); |
1196 | else | 1152 | else |
1197 | args->reset_count = 0; | 1153 | args->reset_count = 0; |
1198 | 1154 | ||
1199 | args->batch_active = hs->batch_active; | 1155 | args->batch_active = ctx->guilty_count; |
1200 | args->batch_pending = hs->batch_pending; | 1156 | args->batch_pending = ctx->active_count; |
1201 | 1157 | ||
1202 | mutex_unlock(&dev->struct_mutex); | 1158 | mutex_unlock(&dev->struct_mutex); |
1203 | 1159 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h new file mode 100644 index 000000000000..0ac750b90f3d --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_context.h | |||
@@ -0,0 +1,277 @@ | |||
1 | /* | ||
2 | * Copyright © 2016 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef __I915_GEM_CONTEXT_H__ | ||
26 | #define __I915_GEM_CONTEXT_H__ | ||
27 | |||
28 | #include <linux/bitops.h> | ||
29 | #include <linux/list.h> | ||
30 | |||
31 | struct pid; | ||
32 | |||
33 | struct drm_device; | ||
34 | struct drm_file; | ||
35 | |||
36 | struct drm_i915_private; | ||
37 | struct drm_i915_file_private; | ||
38 | struct i915_hw_ppgtt; | ||
39 | struct i915_vma; | ||
40 | struct intel_ring; | ||
41 | |||
42 | #define DEFAULT_CONTEXT_HANDLE 0 | ||
43 | |||
44 | /** | ||
45 | * struct i915_gem_context - client state | ||
46 | * | ||
47 | * The struct i915_gem_context represents the combined view of the driver and | ||
48 | * logical hardware state for a particular client. | ||
49 | */ | ||
50 | struct i915_gem_context { | ||
51 | /** i915: i915 device backpointer */ | ||
52 | struct drm_i915_private *i915; | ||
53 | |||
54 | /** file_priv: owning file descriptor */ | ||
55 | struct drm_i915_file_private *file_priv; | ||
56 | |||
57 | /** | ||
58 | * @ppgtt: unique address space (GTT) | ||
59 | * | ||
60 | * In full-ppgtt mode, each context has its own address space ensuring | ||
61 | * complete seperation of one client from all others. | ||
62 | * | ||
63 | * In other modes, this is a NULL pointer with the expectation that | ||
64 | * the caller uses the shared global GTT. | ||
65 | */ | ||
66 | struct i915_hw_ppgtt *ppgtt; | ||
67 | |||
68 | /** | ||
69 | * @pid: process id of creator | ||
70 | * | ||
71 | * Note that who created the context may not be the principle user, | ||
72 | * as the context may be shared across a local socket. However, | ||
73 | * that should only affect the default context, all contexts created | ||
74 | * explicitly by the client are expected to be isolated. | ||
75 | */ | ||
76 | struct pid *pid; | ||
77 | |||
78 | /** | ||
79 | * @name: arbitrary name | ||
80 | * | ||
81 | * A name is constructed for the context from the creator's process | ||
82 | * name, pid and user handle in order to uniquely identify the | ||
83 | * context in messages. | ||
84 | */ | ||
85 | const char *name; | ||
86 | |||
87 | /** link: place with &drm_i915_private.context_list */ | ||
88 | struct list_head link; | ||
89 | |||
90 | /** | ||
91 | * @ref: reference count | ||
92 | * | ||
93 | * A reference to a context is held by both the client who created it | ||
94 | * and on each request submitted to the hardware using the request | ||
95 | * (to ensure the hardware has access to the state until it has | ||
96 | * finished all pending writes). See i915_gem_context_get() and | ||
97 | * i915_gem_context_put() for access. | ||
98 | */ | ||
99 | struct kref ref; | ||
100 | |||
101 | /** | ||
102 | * @flags: small set of booleans | ||
103 | */ | ||
104 | unsigned long flags; | ||
105 | #define CONTEXT_NO_ZEROMAP BIT(0) | ||
106 | #define CONTEXT_NO_ERROR_CAPTURE 1 | ||
107 | #define CONTEXT_CLOSED 2 | ||
108 | #define CONTEXT_BANNABLE 3 | ||
109 | #define CONTEXT_BANNED 4 | ||
110 | #define CONTEXT_FORCE_SINGLE_SUBMISSION 5 | ||
111 | |||
112 | /** | ||
113 | * @hw_id: - unique identifier for the context | ||
114 | * | ||
115 | * The hardware needs to uniquely identify the context for a few | ||
116 | * functions like fault reporting, PASID, scheduling. The | ||
117 | * &drm_i915_private.context_hw_ida is used to assign a unqiue | ||
118 | * id for the lifetime of the context. | ||
119 | */ | ||
120 | unsigned int hw_id; | ||
121 | |||
122 | /** | ||
123 | * @user_handle: userspace identifier | ||
124 | * | ||
125 | * A unique per-file identifier is generated from | ||
126 | * &drm_i915_file_private.contexts. | ||
127 | */ | ||
128 | u32 user_handle; | ||
129 | |||
130 | /** | ||
131 | * @priority: execution and service priority | ||
132 | * | ||
133 | * All clients are equal, but some are more equal than others! | ||
134 | * | ||
135 | * Requests from a context with a greater (more positive) value of | ||
136 | * @priority will be executed before those with a lower @priority | ||
137 | * value, forming a simple QoS. | ||
138 | * | ||
139 | * The &drm_i915_private.kernel_context is assigned the lowest priority. | ||
140 | */ | ||
141 | int priority; | ||
142 | |||
143 | /** ggtt_alignment: alignment restriction for context objects */ | ||
144 | u32 ggtt_alignment; | ||
145 | /** ggtt_offset_bias: placement restriction for context objects */ | ||
146 | u32 ggtt_offset_bias; | ||
147 | |||
148 | /** engine: per-engine logical HW state */ | ||
149 | struct intel_context { | ||
150 | struct i915_vma *state; | ||
151 | struct intel_ring *ring; | ||
152 | u32 *lrc_reg_state; | ||
153 | u64 lrc_desc; | ||
154 | int pin_count; | ||
155 | bool initialised; | ||
156 | } engine[I915_NUM_ENGINES]; | ||
157 | |||
158 | /** ring_size: size for allocating the per-engine ring buffer */ | ||
159 | u32 ring_size; | ||
160 | /** desc_template: invariant fields for the HW context descriptor */ | ||
161 | u32 desc_template; | ||
162 | |||
163 | /** status_notifier: list of callbacks for context-switch changes */ | ||
164 | struct atomic_notifier_head status_notifier; | ||
165 | |||
166 | /** guilty_count: How many times this context has caused a GPU hang. */ | ||
167 | unsigned int guilty_count; | ||
168 | /** | ||
169 | * @active_count: How many times this context was active during a GPU | ||
170 | * hang, but did not cause it. | ||
171 | */ | ||
172 | unsigned int active_count; | ||
173 | |||
174 | #define CONTEXT_SCORE_GUILTY 10 | ||
175 | #define CONTEXT_SCORE_BAN_THRESHOLD 40 | ||
176 | /** ban_score: Accumulated score of all hangs caused by this context. */ | ||
177 | int ban_score; | ||
178 | |||
179 | /** remap_slice: Bitmask of cache lines that need remapping */ | ||
180 | u8 remap_slice; | ||
181 | }; | ||
182 | |||
183 | static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx) | ||
184 | { | ||
185 | return test_bit(CONTEXT_CLOSED, &ctx->flags); | ||
186 | } | ||
187 | |||
188 | static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx) | ||
189 | { | ||
190 | GEM_BUG_ON(i915_gem_context_is_closed(ctx)); | ||
191 | __set_bit(CONTEXT_CLOSED, &ctx->flags); | ||
192 | } | ||
193 | |||
194 | static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx) | ||
195 | { | ||
196 | return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags); | ||
197 | } | ||
198 | |||
199 | static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx) | ||
200 | { | ||
201 | __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags); | ||
202 | } | ||
203 | |||
204 | static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx) | ||
205 | { | ||
206 | __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags); | ||
207 | } | ||
208 | |||
209 | static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx) | ||
210 | { | ||
211 | return test_bit(CONTEXT_BANNABLE, &ctx->flags); | ||
212 | } | ||
213 | |||
214 | static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx) | ||
215 | { | ||
216 | __set_bit(CONTEXT_BANNABLE, &ctx->flags); | ||
217 | } | ||
218 | |||
219 | static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) | ||
220 | { | ||
221 | __clear_bit(CONTEXT_BANNABLE, &ctx->flags); | ||
222 | } | ||
223 | |||
224 | static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) | ||
225 | { | ||
226 | return test_bit(CONTEXT_BANNED, &ctx->flags); | ||
227 | } | ||
228 | |||
229 | static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx) | ||
230 | { | ||
231 | __set_bit(CONTEXT_BANNED, &ctx->flags); | ||
232 | } | ||
233 | |||
234 | static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx) | ||
235 | { | ||
236 | return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); | ||
237 | } | ||
238 | |||
239 | static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx) | ||
240 | { | ||
241 | __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); | ||
242 | } | ||
243 | |||
244 | static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) | ||
245 | { | ||
246 | return c->user_handle == DEFAULT_CONTEXT_HANDLE; | ||
247 | } | ||
248 | |||
249 | static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) | ||
250 | { | ||
251 | return !ctx->file_priv; | ||
252 | } | ||
253 | |||
254 | /* i915_gem_context.c */ | ||
255 | int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv); | ||
256 | void i915_gem_context_lost(struct drm_i915_private *dev_priv); | ||
257 | void i915_gem_context_fini(struct drm_i915_private *dev_priv); | ||
258 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); | ||
259 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); | ||
260 | int i915_switch_context(struct drm_i915_gem_request *req); | ||
261 | int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); | ||
262 | void i915_gem_context_free(struct kref *ctx_ref); | ||
263 | struct i915_gem_context * | ||
264 | i915_gem_context_create_gvt(struct drm_device *dev); | ||
265 | |||
266 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | ||
267 | struct drm_file *file); | ||
268 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
269 | struct drm_file *file); | ||
270 | int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, | ||
271 | struct drm_file *file_priv); | ||
272 | int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | ||
273 | struct drm_file *file_priv); | ||
274 | int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, | ||
275 | struct drm_file *file); | ||
276 | |||
277 | #endif /* !__I915_GEM_CONTEXT_H__ */ | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 5e38299b5df6..d037adcda6f2 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -278,7 +278,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, | |||
278 | 278 | ||
279 | get_dma_buf(dma_buf); | 279 | get_dma_buf(dma_buf); |
280 | 280 | ||
281 | obj = i915_gem_object_alloc(dev); | 281 | obj = i915_gem_object_alloc(to_i915(dev)); |
282 | if (obj == NULL) { | 282 | if (obj == NULL) { |
283 | ret = -ENOMEM; | 283 | ret = -ENOMEM; |
284 | goto fail_detach; | 284 | goto fail_detach; |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 85ceff1b74b6..026ebc5a452a 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -99,7 +99,7 @@ i915_gem_evict_something(struct i915_address_space *vm, | |||
99 | u64 start, u64 end, | 99 | u64 start, u64 end, |
100 | unsigned flags) | 100 | unsigned flags) |
101 | { | 101 | { |
102 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 102 | struct drm_i915_private *dev_priv = vm->i915; |
103 | struct drm_mm_scan scan; | 103 | struct drm_mm_scan scan; |
104 | struct list_head eviction_list; | 104 | struct list_head eviction_list; |
105 | struct list_head *phases[] = { | 105 | struct list_head *phases[] = { |
@@ -111,7 +111,7 @@ i915_gem_evict_something(struct i915_address_space *vm, | |||
111 | struct drm_mm_node *node; | 111 | struct drm_mm_node *node; |
112 | int ret; | 112 | int ret; |
113 | 113 | ||
114 | lockdep_assert_held(&vm->dev->struct_mutex); | 114 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
115 | trace_i915_gem_evict(vm, min_size, alignment, flags); | 115 | trace_i915_gem_evict(vm, min_size, alignment, flags); |
116 | 116 | ||
117 | /* | 117 | /* |
@@ -132,7 +132,14 @@ i915_gem_evict_something(struct i915_address_space *vm, | |||
132 | start, end, | 132 | start, end, |
133 | flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0); | 133 | flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0); |
134 | 134 | ||
135 | if (flags & PIN_NONBLOCK) | 135 | /* Retire before we search the active list. Although we have |
136 | * reasonable accuracy in our retirement lists, we may have | ||
137 | * a stray pin (preventing eviction) that can only be resolved by | ||
138 | * retiring. | ||
139 | */ | ||
140 | if (!(flags & PIN_NONBLOCK)) | ||
141 | i915_gem_retire_requests(dev_priv); | ||
142 | else | ||
136 | phases[1] = NULL; | 143 | phases[1] = NULL; |
137 | 144 | ||
138 | search_again: | 145 | search_again: |
@@ -165,7 +172,7 @@ search_again: | |||
165 | * back to userspace to give our workqueues time to | 172 | * back to userspace to give our workqueues time to |
166 | * acquire our locks and unpin the old scanouts. | 173 | * acquire our locks and unpin the old scanouts. |
167 | */ | 174 | */ |
168 | return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC; | 175 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; |
169 | } | 176 | } |
170 | 177 | ||
171 | /* Not everything in the GGTT is tracked via vma (otherwise we | 178 | /* Not everything in the GGTT is tracked via vma (otherwise we |
@@ -202,6 +209,7 @@ found: | |||
202 | } | 209 | } |
203 | 210 | ||
204 | /* Unbinding will emit any required flushes */ | 211 | /* Unbinding will emit any required flushes */ |
212 | ret = 0; | ||
205 | while (!list_empty(&eviction_list)) { | 213 | while (!list_empty(&eviction_list)) { |
206 | vma = list_first_entry(&eviction_list, | 214 | vma = list_first_entry(&eviction_list, |
207 | struct i915_vma, | 215 | struct i915_vma, |
@@ -221,45 +229,107 @@ found: | |||
221 | return ret; | 229 | return ret; |
222 | } | 230 | } |
223 | 231 | ||
224 | int | 232 | /** |
225 | i915_gem_evict_for_vma(struct i915_vma *target) | 233 | * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one |
234 | * @target: address space and range to evict for | ||
235 | * @flags: additional flags to control the eviction algorithm | ||
236 | * | ||
237 | * This function will try to evict vmas that overlap the target node. | ||
238 | * | ||
239 | * To clarify: This is for freeing up virtual address space, not for freeing | ||
240 | * memory in e.g. the shrinker. | ||
241 | */ | ||
242 | int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) | ||
226 | { | 243 | { |
227 | struct drm_mm_node *node, *next; | 244 | LIST_HEAD(eviction_list); |
245 | struct drm_mm_node *node; | ||
246 | u64 start = target->node.start; | ||
247 | u64 end = start + target->node.size; | ||
248 | struct i915_vma *vma, *next; | ||
249 | bool check_color; | ||
250 | int ret = 0; | ||
228 | 251 | ||
229 | lockdep_assert_held(&target->vm->dev->struct_mutex); | 252 | lockdep_assert_held(&target->vm->i915->drm.struct_mutex); |
253 | trace_i915_gem_evict_vma(target, flags); | ||
230 | 254 | ||
231 | list_for_each_entry_safe(node, next, | 255 | /* Retire before we search the active list. Although we have |
232 | &target->vm->mm.head_node.node_list, | 256 | * reasonable accuracy in our retirement lists, we may have |
233 | node_list) { | 257 | * a stray pin (preventing eviction) that can only be resolved by |
234 | struct i915_vma *vma; | 258 | * retiring. |
235 | int ret; | 259 | */ |
260 | if (!(flags & PIN_NONBLOCK)) | ||
261 | i915_gem_retire_requests(target->vm->i915); | ||
262 | |||
263 | check_color = target->vm->mm.color_adjust; | ||
264 | if (check_color) { | ||
265 | /* Expand search to cover neighbouring guard pages (or lack!) */ | ||
266 | if (start > target->vm->start) | ||
267 | start -= 4096; | ||
268 | if (end < target->vm->start + target->vm->total) | ||
269 | end += 4096; | ||
270 | } | ||
236 | 271 | ||
237 | if (node->start + node->size <= target->node.start) | 272 | drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) { |
238 | continue; | 273 | /* If we find any non-objects (!vma), we cannot evict them */ |
239 | if (node->start >= target->node.start + target->node.size) | 274 | if (node->color == I915_COLOR_UNEVICTABLE) { |
275 | ret = -ENOSPC; | ||
240 | break; | 276 | break; |
277 | } | ||
241 | 278 | ||
242 | vma = container_of(node, typeof(*vma), node); | 279 | vma = container_of(node, typeof(*vma), node); |
243 | 280 | ||
244 | if (i915_vma_is_pinned(vma)) { | 281 | /* If we are using coloring to insert guard pages between |
245 | if (!vma->exec_entry || i915_vma_pin_count(vma) > 1) | 282 | * different cache domains within the address space, we have |
246 | /* Object is pinned for some other use */ | 283 | * to check whether the objects on either side of our range |
247 | return -EBUSY; | 284 | * abutt and conflict. If they are in conflict, then we evict |
285 | * those as well to make room for our guard pages. | ||
286 | */ | ||
287 | if (check_color) { | ||
288 | if (vma->node.start + vma->node.size == target->node.start) { | ||
289 | if (vma->node.color == target->node.color) | ||
290 | continue; | ||
291 | } | ||
292 | if (vma->node.start == target->node.start + target->node.size) { | ||
293 | if (vma->node.color == target->node.color) | ||
294 | continue; | ||
295 | } | ||
296 | } | ||
248 | 297 | ||
249 | /* We need to evict a buffer in the same batch */ | 298 | if (flags & PIN_NONBLOCK && |
250 | if (vma->exec_entry->flags & EXEC_OBJECT_PINNED) | 299 | (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) { |
251 | /* Overlapping fixed objects in the same batch */ | 300 | ret = -ENOSPC; |
252 | return -EINVAL; | 301 | break; |
302 | } | ||
253 | 303 | ||
254 | return -ENOSPC; | 304 | /* Overlap of objects in the same batch? */ |
305 | if (i915_vma_is_pinned(vma)) { | ||
306 | ret = -ENOSPC; | ||
307 | if (vma->exec_entry && | ||
308 | vma->exec_entry->flags & EXEC_OBJECT_PINNED) | ||
309 | ret = -EINVAL; | ||
310 | break; | ||
255 | } | 311 | } |
256 | 312 | ||
257 | ret = i915_vma_unbind(vma); | 313 | /* Never show fear in the face of dragons! |
258 | if (ret) | 314 | * |
259 | return ret; | 315 | * We cannot directly remove this node from within this |
316 | * iterator and as with i915_gem_evict_something() we employ | ||
317 | * the vma pin_count in order to prevent the action of | ||
318 | * unbinding one vma from freeing (by dropping its active | ||
319 | * reference) another in our eviction list. | ||
320 | */ | ||
321 | __i915_vma_pin(vma); | ||
322 | list_add(&vma->exec_list, &eviction_list); | ||
260 | } | 323 | } |
261 | 324 | ||
262 | return 0; | 325 | list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { |
326 | list_del_init(&vma->exec_list); | ||
327 | __i915_vma_unpin(vma); | ||
328 | if (ret == 0) | ||
329 | ret = i915_vma_unbind(vma); | ||
330 | } | ||
331 | |||
332 | return ret; | ||
263 | } | 333 | } |
264 | 334 | ||
265 | /** | 335 | /** |
@@ -281,11 +351,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) | |||
281 | struct i915_vma *vma, *next; | 351 | struct i915_vma *vma, *next; |
282 | int ret; | 352 | int ret; |
283 | 353 | ||
284 | lockdep_assert_held(&vm->dev->struct_mutex); | 354 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
285 | trace_i915_gem_evict_vm(vm); | 355 | trace_i915_gem_evict_vm(vm); |
286 | 356 | ||
287 | if (do_idle) { | 357 | if (do_idle) { |
288 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 358 | struct drm_i915_private *dev_priv = vm->i915; |
289 | 359 | ||
290 | if (i915_is_ggtt(vm)) { | 360 | if (i915_is_ggtt(vm)) { |
291 | ret = i915_gem_switch_to_kernel_context(dev_priv); | 361 | ret = i915_gem_switch_to_kernel_context(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 097d9d8c2315..a5fe299da1d3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -274,6 +274,7 @@ static void eb_destroy(struct eb_vmas *eb) | |||
274 | exec_list); | 274 | exec_list); |
275 | list_del_init(&vma->exec_list); | 275 | list_del_init(&vma->exec_list); |
276 | i915_gem_execbuffer_unreserve_vma(vma); | 276 | i915_gem_execbuffer_unreserve_vma(vma); |
277 | vma->exec_entry = NULL; | ||
277 | i915_vma_put(vma); | 278 | i915_vma_put(vma); |
278 | } | 279 | } |
279 | kfree(eb); | 280 | kfree(eb); |
@@ -437,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, | |||
437 | memset(&cache->node, 0, sizeof(cache->node)); | 438 | memset(&cache->node, 0, sizeof(cache->node)); |
438 | ret = drm_mm_insert_node_in_range_generic | 439 | ret = drm_mm_insert_node_in_range_generic |
439 | (&ggtt->base.mm, &cache->node, | 440 | (&ggtt->base.mm, &cache->node, |
440 | 4096, 0, 0, | 441 | 4096, 0, I915_COLOR_UNEVICTABLE, |
441 | 0, ggtt->mappable_end, | 442 | 0, ggtt->mappable_end, |
442 | DRM_MM_SEARCH_DEFAULT, | 443 | DRM_MM_SEARCH_DEFAULT, |
443 | DRM_MM_CREATE_DEFAULT); | 444 | DRM_MM_CREATE_DEFAULT); |
@@ -1232,14 +1233,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, | |||
1232 | struct intel_engine_cs *engine, const u32 ctx_id) | 1233 | struct intel_engine_cs *engine, const u32 ctx_id) |
1233 | { | 1234 | { |
1234 | struct i915_gem_context *ctx; | 1235 | struct i915_gem_context *ctx; |
1235 | struct i915_ctx_hang_stats *hs; | ||
1236 | 1236 | ||
1237 | ctx = i915_gem_context_lookup(file->driver_priv, ctx_id); | 1237 | ctx = i915_gem_context_lookup(file->driver_priv, ctx_id); |
1238 | if (IS_ERR(ctx)) | 1238 | if (IS_ERR(ctx)) |
1239 | return ctx; | 1239 | return ctx; |
1240 | 1240 | ||
1241 | hs = &ctx->hang_stats; | 1241 | if (i915_gem_context_is_banned(ctx)) { |
1242 | if (hs->banned) { | ||
1243 | DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); | 1242 | DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); |
1244 | return ERR_PTR(-EIO); | 1243 | return ERR_PTR(-EIO); |
1245 | } | 1244 | } |
@@ -1260,6 +1259,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
1260 | struct drm_i915_gem_object *obj = vma->obj; | 1259 | struct drm_i915_gem_object *obj = vma->obj; |
1261 | const unsigned int idx = req->engine->id; | 1260 | const unsigned int idx = req->engine->id; |
1262 | 1261 | ||
1262 | lockdep_assert_held(&req->i915->drm.struct_mutex); | ||
1263 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 1263 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1264 | 1264 | ||
1265 | /* Add a reference if we're newly entering the active list. | 1265 | /* Add a reference if we're newly entering the active list. |
@@ -1715,7 +1715,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | params->args_batch_start_offset = args->batch_start_offset; | 1717 | params->args_batch_start_offset = args->batch_start_offset; |
1718 | if (intel_engine_needs_cmd_parser(engine) && args->batch_len) { | 1718 | if (engine->needs_cmd_parser && args->batch_len) { |
1719 | struct i915_vma *vma; | 1719 | struct i915_vma *vma; |
1720 | 1720 | ||
1721 | vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry, | 1721 | vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry, |
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 0efa3571afc3..775059e19ab9 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c | |||
@@ -290,7 +290,7 @@ i915_vma_put_fence(struct i915_vma *vma) | |||
290 | { | 290 | { |
291 | struct drm_i915_fence_reg *fence = vma->fence; | 291 | struct drm_i915_fence_reg *fence = vma->fence; |
292 | 292 | ||
293 | assert_rpm_wakelock_held(to_i915(vma->vm->dev)); | 293 | assert_rpm_wakelock_held(vma->vm->i915); |
294 | 294 | ||
295 | if (!fence) | 295 | if (!fence) |
296 | return 0; | 296 | return 0; |
@@ -313,7 +313,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) | |||
313 | } | 313 | } |
314 | 314 | ||
315 | /* Wait for completion of pending flips which consume fences */ | 315 | /* Wait for completion of pending flips which consume fences */ |
316 | if (intel_has_pending_fb_unpin(&dev_priv->drm)) | 316 | if (intel_has_pending_fb_unpin(dev_priv)) |
317 | return ERR_PTR(-EAGAIN); | 317 | return ERR_PTR(-EAGAIN); |
318 | 318 | ||
319 | return ERR_PTR(-EDEADLK); | 319 | return ERR_PTR(-EDEADLK); |
@@ -346,7 +346,7 @@ i915_vma_get_fence(struct i915_vma *vma) | |||
346 | /* Note that we revoke fences on runtime suspend. Therefore the user | 346 | /* Note that we revoke fences on runtime suspend. Therefore the user |
347 | * must keep the device awake whilst using the fence. | 347 | * must keep the device awake whilst using the fence. |
348 | */ | 348 | */ |
349 | assert_rpm_wakelock_held(to_i915(vma->vm->dev)); | 349 | assert_rpm_wakelock_held(vma->vm->i915); |
350 | 350 | ||
351 | /* Just update our place in the LRU if our fence is getting reused. */ | 351 | /* Just update our place in the LRU if our fence is getting reused. */ |
352 | if (vma->fence) { | 352 | if (vma->fence) { |
@@ -357,7 +357,7 @@ i915_vma_get_fence(struct i915_vma *vma) | |||
357 | return 0; | 357 | return 0; |
358 | } | 358 | } |
359 | } else if (set) { | 359 | } else if (set) { |
360 | fence = fence_find(to_i915(vma->vm->dev)); | 360 | fence = fence_find(vma->vm->i915); |
361 | if (IS_ERR(fence)) | 361 | if (IS_ERR(fence)) |
362 | return PTR_ERR(fence); | 362 | return PTR_ERR(fence); |
363 | } else | 363 | } else |
@@ -367,6 +367,30 @@ i915_vma_get_fence(struct i915_vma *vma) | |||
367 | } | 367 | } |
368 | 368 | ||
369 | /** | 369 | /** |
370 | * i915_gem_revoke_fences - revoke fence state | ||
371 | * @dev_priv: i915 device private | ||
372 | * | ||
373 | * Removes all GTT mmappings via the fence registers. This forces any user | ||
374 | * of the fence to reacquire that fence before continuing with their access. | ||
375 | * One use is during GPU reset where the fence register is lost and we need to | ||
376 | * revoke concurrent userspace access via GTT mmaps until the hardware has been | ||
377 | * reset and the fence registers have been restored. | ||
378 | */ | ||
379 | void i915_gem_revoke_fences(struct drm_i915_private *dev_priv) | ||
380 | { | ||
381 | int i; | ||
382 | |||
383 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | ||
384 | |||
385 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | ||
386 | struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; | ||
387 | |||
388 | if (fence->vma) | ||
389 | i915_gem_release_mmap(fence->vma->obj); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | /** | ||
370 | * i915_gem_restore_fences - restore fence state | 394 | * i915_gem_restore_fences - restore fence state |
371 | * @dev_priv: i915 device private | 395 | * @dev_priv: i915 device private |
372 | * | 396 | * |
@@ -512,8 +536,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) | |||
512 | */ | 536 | */ |
513 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | 537 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
514 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 538 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
515 | } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) && | 539 | } else if (IS_MOBILE(dev_priv) || |
516 | !IS_G33(dev_priv))) { | 540 | IS_I915G(dev_priv) || IS_I945G(dev_priv)) { |
517 | uint32_t dcc; | 541 | uint32_t dcc; |
518 | 542 | ||
519 | /* On 9xx chipsets, channel interleave by the CPU is | 543 | /* On 9xx chipsets, channel interleave by the CPU is |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d49a04eb584a..f698006fe883 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -113,10 +113,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, | |||
113 | bool has_full_ppgtt; | 113 | bool has_full_ppgtt; |
114 | bool has_full_48bit_ppgtt; | 114 | bool has_full_48bit_ppgtt; |
115 | 115 | ||
116 | has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6; | 116 | has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt; |
117 | has_full_ppgtt = INTEL_GEN(dev_priv) >= 7; | 117 | has_full_ppgtt = dev_priv->info.has_full_ppgtt; |
118 | has_full_48bit_ppgtt = | 118 | has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; |
119 | IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; | ||
120 | 119 | ||
121 | if (intel_vgpu_active(dev_priv)) { | 120 | if (intel_vgpu_active(dev_priv)) { |
122 | /* emulation is too hard */ | 121 | /* emulation is too hard */ |
@@ -372,7 +371,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr) | |||
372 | /* There are only few exceptions for gen >=6. chv and bxt. | 371 | /* There are only few exceptions for gen >=6. chv and bxt. |
373 | * And we are not sure about the latter so play safe for now. | 372 | * And we are not sure about the latter so play safe for now. |
374 | */ | 373 | */ |
375 | if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) | 374 | if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) |
376 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | 375 | drm_clflush_virt_range(vaddr, PAGE_SIZE); |
377 | 376 | ||
378 | kunmap_atomic(vaddr); | 377 | kunmap_atomic(vaddr); |
@@ -380,7 +379,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr) | |||
380 | 379 | ||
381 | #define kmap_px(px) kmap_page_dma(px_base(px)) | 380 | #define kmap_px(px) kmap_page_dma(px_base(px)) |
382 | #define kunmap_px(ppgtt, vaddr) \ | 381 | #define kunmap_px(ppgtt, vaddr) \ |
383 | kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr)) | 382 | kunmap_page_dma((ppgtt)->base.i915, (vaddr)) |
384 | 383 | ||
385 | #define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px)) | 384 | #define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px)) |
386 | #define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px)) | 385 | #define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px)) |
@@ -470,7 +469,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm, | |||
470 | scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, | 469 | scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, |
471 | I915_CACHE_LLC); | 470 | I915_CACHE_LLC); |
472 | 471 | ||
473 | fill_px(to_i915(vm->dev), pt, scratch_pte); | 472 | fill_px(vm->i915, pt, scratch_pte); |
474 | } | 473 | } |
475 | 474 | ||
476 | static void gen6_initialize_pt(struct i915_address_space *vm, | 475 | static void gen6_initialize_pt(struct i915_address_space *vm, |
@@ -483,7 +482,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm, | |||
483 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, | 482 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, |
484 | I915_CACHE_LLC, 0); | 483 | I915_CACHE_LLC, 0); |
485 | 484 | ||
486 | fill32_px(to_i915(vm->dev), pt, scratch_pte); | 485 | fill32_px(vm->i915, pt, scratch_pte); |
487 | } | 486 | } |
488 | 487 | ||
489 | static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv) | 488 | static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv) |
@@ -531,7 +530,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm, | |||
531 | 530 | ||
532 | scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); | 531 | scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); |
533 | 532 | ||
534 | fill_px(to_i915(vm->dev), pd, scratch_pde); | 533 | fill_px(vm->i915, pd, scratch_pde); |
535 | } | 534 | } |
536 | 535 | ||
537 | static int __pdp_init(struct drm_i915_private *dev_priv, | 536 | static int __pdp_init(struct drm_i915_private *dev_priv, |
@@ -612,7 +611,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm, | |||
612 | 611 | ||
613 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); | 612 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); |
614 | 613 | ||
615 | fill_px(to_i915(vm->dev), pdp, scratch_pdpe); | 614 | fill_px(vm->i915, pdp, scratch_pdpe); |
616 | } | 615 | } |
617 | 616 | ||
618 | static void gen8_initialize_pml4(struct i915_address_space *vm, | 617 | static void gen8_initialize_pml4(struct i915_address_space *vm, |
@@ -623,7 +622,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, | |||
623 | scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), | 622 | scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), |
624 | I915_CACHE_LLC); | 623 | I915_CACHE_LLC); |
625 | 624 | ||
626 | fill_px(to_i915(vm->dev), pml4, scratch_pml4e); | 625 | fill_px(vm->i915, pml4, scratch_pml4e); |
627 | } | 626 | } |
628 | 627 | ||
629 | static void | 628 | static void |
@@ -710,7 +709,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
710 | */ | 709 | */ |
711 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | 710 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) |
712 | { | 711 | { |
713 | ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask; | 712 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask; |
714 | } | 713 | } |
715 | 714 | ||
716 | /* Removes entries from a single page table, releasing it if it's empty. | 715 | /* Removes entries from a single page table, releasing it if it's empty. |
@@ -736,10 +735,8 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, | |||
736 | 735 | ||
737 | bitmap_clear(pt->used_ptes, pte, num_entries); | 736 | bitmap_clear(pt->used_ptes, pte, num_entries); |
738 | 737 | ||
739 | if (bitmap_empty(pt->used_ptes, GEN8_PTES)) { | 738 | if (bitmap_empty(pt->used_ptes, GEN8_PTES)) |
740 | free_pt(to_i915(vm->dev), pt); | ||
741 | return true; | 739 | return true; |
742 | } | ||
743 | 740 | ||
744 | pt_vaddr = kmap_px(pt); | 741 | pt_vaddr = kmap_px(pt); |
745 | 742 | ||
@@ -775,13 +772,12 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, | |||
775 | pde_vaddr = kmap_px(pd); | 772 | pde_vaddr = kmap_px(pd); |
776 | pde_vaddr[pde] = scratch_pde; | 773 | pde_vaddr[pde] = scratch_pde; |
777 | kunmap_px(ppgtt, pde_vaddr); | 774 | kunmap_px(ppgtt, pde_vaddr); |
775 | free_pt(vm->i915, pt); | ||
778 | } | 776 | } |
779 | } | 777 | } |
780 | 778 | ||
781 | if (bitmap_empty(pd->used_pdes, I915_PDES)) { | 779 | if (bitmap_empty(pd->used_pdes, I915_PDES)) |
782 | free_pd(to_i915(vm->dev), pd); | ||
783 | return true; | 780 | return true; |
784 | } | ||
785 | 781 | ||
786 | return false; | 782 | return false; |
787 | } | 783 | } |
@@ -795,7 +791,6 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, | |||
795 | uint64_t length) | 791 | uint64_t length) |
796 | { | 792 | { |
797 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | 793 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
798 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | ||
799 | struct i915_page_directory *pd; | 794 | struct i915_page_directory *pd; |
800 | uint64_t pdpe; | 795 | uint64_t pdpe; |
801 | gen8_ppgtt_pdpe_t *pdpe_vaddr; | 796 | gen8_ppgtt_pdpe_t *pdpe_vaddr; |
@@ -813,16 +808,14 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, | |||
813 | pdpe_vaddr[pdpe] = scratch_pdpe; | 808 | pdpe_vaddr[pdpe] = scratch_pdpe; |
814 | kunmap_px(ppgtt, pdpe_vaddr); | 809 | kunmap_px(ppgtt, pdpe_vaddr); |
815 | } | 810 | } |
811 | free_pd(vm->i915, pd); | ||
816 | } | 812 | } |
817 | } | 813 | } |
818 | 814 | ||
819 | mark_tlbs_dirty(ppgtt); | 815 | mark_tlbs_dirty(ppgtt); |
820 | 816 | ||
821 | if (USES_FULL_48BIT_PPGTT(dev_priv) && | 817 | if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) |
822 | bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) { | ||
823 | free_pdp(dev_priv, pdp); | ||
824 | return true; | 818 | return true; |
825 | } | ||
826 | 819 | ||
827 | return false; | 820 | return false; |
828 | } | 821 | } |
@@ -843,7 +836,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, | |||
843 | gen8_ppgtt_pml4e_t scratch_pml4e = | 836 | gen8_ppgtt_pml4e_t scratch_pml4e = |
844 | gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC); | 837 | gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC); |
845 | 838 | ||
846 | GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))); | 839 | GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915)); |
847 | 840 | ||
848 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { | 841 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
849 | if (WARN_ON(!pml4->pdps[pml4e])) | 842 | if (WARN_ON(!pml4->pdps[pml4e])) |
@@ -854,6 +847,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, | |||
854 | pml4e_vaddr = kmap_px(pml4); | 847 | pml4e_vaddr = kmap_px(pml4); |
855 | pml4e_vaddr[pml4e] = scratch_pml4e; | 848 | pml4e_vaddr[pml4e] = scratch_pml4e; |
856 | kunmap_px(ppgtt, pml4e_vaddr); | 849 | kunmap_px(ppgtt, pml4e_vaddr); |
850 | free_pdp(vm->i915, pdp); | ||
857 | } | 851 | } |
858 | } | 852 | } |
859 | } | 853 | } |
@@ -863,7 +857,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | |||
863 | { | 857 | { |
864 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | 858 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
865 | 859 | ||
866 | if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) | 860 | if (USES_FULL_48BIT_PPGTT(vm->i915)) |
867 | gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length); | 861 | gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length); |
868 | else | 862 | else |
869 | gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length); | 863 | gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length); |
@@ -898,7 +892,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, | |||
898 | kunmap_px(ppgtt, pt_vaddr); | 892 | kunmap_px(ppgtt, pt_vaddr); |
899 | pt_vaddr = NULL; | 893 | pt_vaddr = NULL; |
900 | if (++pde == I915_PDES) { | 894 | if (++pde == I915_PDES) { |
901 | if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev))) | 895 | if (++pdpe == I915_PDPES_PER_PDP(vm->i915)) |
902 | break; | 896 | break; |
903 | pde = 0; | 897 | pde = 0; |
904 | } | 898 | } |
@@ -921,7 +915,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | |||
921 | 915 | ||
922 | __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); | 916 | __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); |
923 | 917 | ||
924 | if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) { | 918 | if (!USES_FULL_48BIT_PPGTT(vm->i915)) { |
925 | gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, | 919 | gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, |
926 | cache_level); | 920 | cache_level); |
927 | } else { | 921 | } else { |
@@ -955,7 +949,7 @@ static void gen8_free_page_tables(struct drm_i915_private *dev_priv, | |||
955 | 949 | ||
956 | static int gen8_init_scratch(struct i915_address_space *vm) | 950 | static int gen8_init_scratch(struct i915_address_space *vm) |
957 | { | 951 | { |
958 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 952 | struct drm_i915_private *dev_priv = vm->i915; |
959 | int ret; | 953 | int ret; |
960 | 954 | ||
961 | ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); | 955 | ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); |
@@ -1002,7 +996,7 @@ free_scratch_page: | |||
1002 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) | 996 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) |
1003 | { | 997 | { |
1004 | enum vgt_g2v_type msg; | 998 | enum vgt_g2v_type msg; |
1005 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); | 999 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
1006 | int i; | 1000 | int i; |
1007 | 1001 | ||
1008 | if (USES_FULL_48BIT_PPGTT(dev_priv)) { | 1002 | if (USES_FULL_48BIT_PPGTT(dev_priv)) { |
@@ -1032,7 +1026,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) | |||
1032 | 1026 | ||
1033 | static void gen8_free_scratch(struct i915_address_space *vm) | 1027 | static void gen8_free_scratch(struct i915_address_space *vm) |
1034 | { | 1028 | { |
1035 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1029 | struct drm_i915_private *dev_priv = vm->i915; |
1036 | 1030 | ||
1037 | if (USES_FULL_48BIT_PPGTT(dev_priv)) | 1031 | if (USES_FULL_48BIT_PPGTT(dev_priv)) |
1038 | free_pdp(dev_priv, vm->scratch_pdp); | 1032 | free_pdp(dev_priv, vm->scratch_pdp); |
@@ -1059,7 +1053,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv, | |||
1059 | 1053 | ||
1060 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) | 1054 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) |
1061 | { | 1055 | { |
1062 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); | 1056 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
1063 | int i; | 1057 | int i; |
1064 | 1058 | ||
1065 | for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { | 1059 | for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { |
@@ -1074,7 +1068,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) | |||
1074 | 1068 | ||
1075 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | 1069 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
1076 | { | 1070 | { |
1077 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1071 | struct drm_i915_private *dev_priv = vm->i915; |
1078 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | 1072 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
1079 | 1073 | ||
1080 | if (intel_vgpu_active(dev_priv)) | 1074 | if (intel_vgpu_active(dev_priv)) |
@@ -1112,7 +1106,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, | |||
1112 | uint64_t length, | 1106 | uint64_t length, |
1113 | unsigned long *new_pts) | 1107 | unsigned long *new_pts) |
1114 | { | 1108 | { |
1115 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1109 | struct drm_i915_private *dev_priv = vm->i915; |
1116 | struct i915_page_table *pt; | 1110 | struct i915_page_table *pt; |
1117 | uint32_t pde; | 1111 | uint32_t pde; |
1118 | 1112 | ||
@@ -1173,7 +1167,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, | |||
1173 | uint64_t length, | 1167 | uint64_t length, |
1174 | unsigned long *new_pds) | 1168 | unsigned long *new_pds) |
1175 | { | 1169 | { |
1176 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1170 | struct drm_i915_private *dev_priv = vm->i915; |
1177 | struct i915_page_directory *pd; | 1171 | struct i915_page_directory *pd; |
1178 | uint32_t pdpe; | 1172 | uint32_t pdpe; |
1179 | uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv); | 1173 | uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv); |
@@ -1226,7 +1220,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, | |||
1226 | uint64_t length, | 1220 | uint64_t length, |
1227 | unsigned long *new_pdps) | 1221 | unsigned long *new_pdps) |
1228 | { | 1222 | { |
1229 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1223 | struct drm_i915_private *dev_priv = vm->i915; |
1230 | struct i915_page_directory_pointer *pdp; | 1224 | struct i915_page_directory_pointer *pdp; |
1231 | uint32_t pml4e; | 1225 | uint32_t pml4e; |
1232 | 1226 | ||
@@ -1301,7 +1295,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, | |||
1301 | { | 1295 | { |
1302 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | 1296 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
1303 | unsigned long *new_page_dirs, *new_page_tables; | 1297 | unsigned long *new_page_dirs, *new_page_tables; |
1304 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1298 | struct drm_i915_private *dev_priv = vm->i915; |
1305 | struct i915_page_directory *pd; | 1299 | struct i915_page_directory *pd; |
1306 | const uint64_t orig_start = start; | 1300 | const uint64_t orig_start = start; |
1307 | const uint64_t orig_length = length; | 1301 | const uint64_t orig_length = length; |
@@ -1309,15 +1303,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, | |||
1309 | uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv); | 1303 | uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv); |
1310 | int ret; | 1304 | int ret; |
1311 | 1305 | ||
1312 | /* Wrap is never okay since we can only represent 48b, and we don't | ||
1313 | * actually use the other side of the canonical address space. | ||
1314 | */ | ||
1315 | if (WARN_ON(start + length < start)) | ||
1316 | return -ENODEV; | ||
1317 | |||
1318 | if (WARN_ON(start + length > vm->total)) | ||
1319 | return -ENODEV; | ||
1320 | |||
1321 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); | 1306 | ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); |
1322 | if (ret) | 1307 | if (ret) |
1323 | return ret; | 1308 | return ret; |
@@ -1450,7 +1435,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, | |||
1450 | 1435 | ||
1451 | err_out: | 1436 | err_out: |
1452 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) | 1437 | for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) |
1453 | gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]); | 1438 | gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]); |
1454 | 1439 | ||
1455 | return ret; | 1440 | return ret; |
1456 | } | 1441 | } |
@@ -1460,7 +1445,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, | |||
1460 | { | 1445 | { |
1461 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | 1446 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
1462 | 1447 | ||
1463 | if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) | 1448 | if (USES_FULL_48BIT_PPGTT(vm->i915)) |
1464 | return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); | 1449 | return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); |
1465 | else | 1450 | else |
1466 | return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); | 1451 | return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); |
@@ -1531,7 +1516,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |||
1531 | gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, | 1516 | gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, |
1532 | I915_CACHE_LLC); | 1517 | I915_CACHE_LLC); |
1533 | 1518 | ||
1534 | if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) { | 1519 | if (!USES_FULL_48BIT_PPGTT(vm->i915)) { |
1535 | gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); | 1520 | gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); |
1536 | } else { | 1521 | } else { |
1537 | uint64_t pml4e; | 1522 | uint64_t pml4e; |
@@ -1584,7 +1569,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) | |||
1584 | */ | 1569 | */ |
1585 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | 1570 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
1586 | { | 1571 | { |
1587 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); | 1572 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
1588 | int ret; | 1573 | int ret; |
1589 | 1574 | ||
1590 | ret = gen8_init_scratch(&ppgtt->base); | 1575 | ret = gen8_init_scratch(&ppgtt->base); |
@@ -1927,7 +1912,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
1927 | uint64_t start_in, uint64_t length_in) | 1912 | uint64_t start_in, uint64_t length_in) |
1928 | { | 1913 | { |
1929 | DECLARE_BITMAP(new_page_tables, I915_PDES); | 1914 | DECLARE_BITMAP(new_page_tables, I915_PDES); |
1930 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1915 | struct drm_i915_private *dev_priv = vm->i915; |
1931 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 1916 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
1932 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | 1917 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
1933 | struct i915_page_table *pt; | 1918 | struct i915_page_table *pt; |
@@ -1935,9 +1920,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
1935 | uint32_t pde; | 1920 | uint32_t pde; |
1936 | int ret; | 1921 | int ret; |
1937 | 1922 | ||
1938 | if (WARN_ON(start_in + length_in > ppgtt->base.total)) | ||
1939 | return -ENODEV; | ||
1940 | |||
1941 | start = start_save = start_in; | 1923 | start = start_save = start_in; |
1942 | length = length_save = length_in; | 1924 | length = length_save = length_in; |
1943 | 1925 | ||
@@ -2014,7 +1996,7 @@ unwind_out: | |||
2014 | 1996 | ||
2015 | static int gen6_init_scratch(struct i915_address_space *vm) | 1997 | static int gen6_init_scratch(struct i915_address_space *vm) |
2016 | { | 1998 | { |
2017 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 1999 | struct drm_i915_private *dev_priv = vm->i915; |
2018 | int ret; | 2000 | int ret; |
2019 | 2001 | ||
2020 | ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); | 2002 | ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); |
@@ -2034,7 +2016,7 @@ static int gen6_init_scratch(struct i915_address_space *vm) | |||
2034 | 2016 | ||
2035 | static void gen6_free_scratch(struct i915_address_space *vm) | 2017 | static void gen6_free_scratch(struct i915_address_space *vm) |
2036 | { | 2018 | { |
2037 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 2019 | struct drm_i915_private *dev_priv = vm->i915; |
2038 | 2020 | ||
2039 | free_pt(dev_priv, vm->scratch_pt); | 2021 | free_pt(dev_priv, vm->scratch_pt); |
2040 | cleanup_scratch_page(dev_priv, &vm->scratch_page); | 2022 | cleanup_scratch_page(dev_priv, &vm->scratch_page); |
@@ -2044,7 +2026,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | |||
2044 | { | 2026 | { |
2045 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | 2027 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
2046 | struct i915_page_directory *pd = &ppgtt->pd; | 2028 | struct i915_page_directory *pd = &ppgtt->pd; |
2047 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 2029 | struct drm_i915_private *dev_priv = vm->i915; |
2048 | struct i915_page_table *pt; | 2030 | struct i915_page_table *pt; |
2049 | uint32_t pde; | 2031 | uint32_t pde; |
2050 | 2032 | ||
@@ -2060,7 +2042,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | |||
2060 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) | 2042 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
2061 | { | 2043 | { |
2062 | struct i915_address_space *vm = &ppgtt->base; | 2044 | struct i915_address_space *vm = &ppgtt->base; |
2063 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); | 2045 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
2064 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 2046 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
2065 | bool retried = false; | 2047 | bool retried = false; |
2066 | int ret; | 2048 | int ret; |
@@ -2076,15 +2058,15 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) | |||
2076 | return ret; | 2058 | return ret; |
2077 | 2059 | ||
2078 | alloc: | 2060 | alloc: |
2079 | ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, | 2061 | ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ppgtt->node, |
2080 | &ppgtt->node, GEN6_PD_SIZE, | 2062 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
2081 | GEN6_PD_ALIGN, 0, | 2063 | I915_COLOR_UNEVICTABLE, |
2082 | 0, ggtt->base.total, | 2064 | 0, ggtt->base.total, |
2083 | DRM_MM_TOPDOWN); | 2065 | DRM_MM_TOPDOWN); |
2084 | if (ret == -ENOSPC && !retried) { | 2066 | if (ret == -ENOSPC && !retried) { |
2085 | ret = i915_gem_evict_something(&ggtt->base, | 2067 | ret = i915_gem_evict_something(&ggtt->base, |
2086 | GEN6_PD_SIZE, GEN6_PD_ALIGN, | 2068 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
2087 | I915_CACHE_NONE, | 2069 | I915_COLOR_UNEVICTABLE, |
2088 | 0, ggtt->base.total, | 2070 | 0, ggtt->base.total, |
2089 | 0); | 2071 | 0); |
2090 | if (ret) | 2072 | if (ret) |
@@ -2125,7 +2107,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, | |||
2125 | 2107 | ||
2126 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | 2108 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
2127 | { | 2109 | { |
2128 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); | 2110 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
2129 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 2111 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
2130 | int ret; | 2112 | int ret; |
2131 | 2113 | ||
@@ -2176,7 +2158,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
2176 | static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, | 2158 | static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, |
2177 | struct drm_i915_private *dev_priv) | 2159 | struct drm_i915_private *dev_priv) |
2178 | { | 2160 | { |
2179 | ppgtt->base.dev = &dev_priv->drm; | 2161 | ppgtt->base.i915 = dev_priv; |
2180 | 2162 | ||
2181 | if (INTEL_INFO(dev_priv)->gen < 8) | 2163 | if (INTEL_INFO(dev_priv)->gen < 8) |
2182 | return gen6_ppgtt_init(ppgtt); | 2164 | return gen6_ppgtt_init(ppgtt); |
@@ -2379,10 +2361,24 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) | |||
2379 | int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, | 2361 | int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, |
2380 | struct sg_table *pages) | 2362 | struct sg_table *pages) |
2381 | { | 2363 | { |
2382 | if (dma_map_sg(&obj->base.dev->pdev->dev, | 2364 | do { |
2383 | pages->sgl, pages->nents, | 2365 | if (dma_map_sg(&obj->base.dev->pdev->dev, |
2384 | PCI_DMA_BIDIRECTIONAL)) | 2366 | pages->sgl, pages->nents, |
2385 | return 0; | 2367 | PCI_DMA_BIDIRECTIONAL)) |
2368 | return 0; | ||
2369 | |||
2370 | /* If the DMA remap fails, one cause can be that we have | ||
2371 | * too many objects pinned in a small remapping table, | ||
2372 | * such as swiotlb. Incrementally purge all other objects and | ||
2373 | * try again - if there are no more pages to remove from | ||
2374 | * the DMA remapper, i915_gem_shrink will return 0. | ||
2375 | */ | ||
2376 | GEM_BUG_ON(obj->mm.pages == pages); | ||
2377 | } while (i915_gem_shrink(to_i915(obj->base.dev), | ||
2378 | obj->base.size >> PAGE_SHIFT, | ||
2379 | I915_SHRINK_BOUND | | ||
2380 | I915_SHRINK_UNBOUND | | ||
2381 | I915_SHRINK_ACTIVE)); | ||
2386 | 2382 | ||
2387 | return -ENOSPC; | 2383 | return -ENOSPC; |
2388 | } | 2384 | } |
@@ -2398,7 +2394,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, | |||
2398 | enum i915_cache_level level, | 2394 | enum i915_cache_level level, |
2399 | u32 unused) | 2395 | u32 unused) |
2400 | { | 2396 | { |
2401 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 2397 | struct drm_i915_private *dev_priv = vm->i915; |
2402 | gen8_pte_t __iomem *pte = | 2398 | gen8_pte_t __iomem *pte = |
2403 | (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + | 2399 | (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + |
2404 | (offset >> PAGE_SHIFT); | 2400 | (offset >> PAGE_SHIFT); |
@@ -2414,7 +2410,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | |||
2414 | uint64_t start, | 2410 | uint64_t start, |
2415 | enum i915_cache_level level, u32 unused) | 2411 | enum i915_cache_level level, u32 unused) |
2416 | { | 2412 | { |
2417 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 2413 | struct drm_i915_private *dev_priv = vm->i915; |
2418 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); | 2414 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
2419 | struct sgt_iter sgt_iter; | 2415 | struct sgt_iter sgt_iter; |
2420 | gen8_pte_t __iomem *gtt_entries; | 2416 | gen8_pte_t __iomem *gtt_entries; |
@@ -2479,7 +2475,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm, | |||
2479 | enum i915_cache_level level, | 2475 | enum i915_cache_level level, |
2480 | u32 flags) | 2476 | u32 flags) |
2481 | { | 2477 | { |
2482 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 2478 | struct drm_i915_private *dev_priv = vm->i915; |
2483 | gen6_pte_t __iomem *pte = | 2479 | gen6_pte_t __iomem *pte = |
2484 | (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + | 2480 | (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + |
2485 | (offset >> PAGE_SHIFT); | 2481 | (offset >> PAGE_SHIFT); |
@@ -2501,7 +2497,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
2501 | uint64_t start, | 2497 | uint64_t start, |
2502 | enum i915_cache_level level, u32 flags) | 2498 | enum i915_cache_level level, u32 flags) |
2503 | { | 2499 | { |
2504 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | 2500 | struct drm_i915_private *dev_priv = vm->i915; |
2505 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); | 2501 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
2506 | struct sgt_iter sgt_iter; | 2502 | struct sgt_iter sgt_iter; |
2507 | gen6_pte_t __iomem *gtt_entries; | 2503 | gen6_pte_t __iomem *gtt_entries; |
@@ -2621,7 +2617,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, | |||
2621 | enum i915_cache_level cache_level, | 2617 | enum i915_cache_level cache_level, |
2622 | u32 flags) | 2618 | u32 flags) |
2623 | { | 2619 | { |
2624 | struct drm_i915_private *i915 = to_i915(vma->vm->dev); | 2620 | struct drm_i915_private *i915 = vma->vm->i915; |
2625 | struct drm_i915_gem_object *obj = vma->obj; | 2621 | struct drm_i915_gem_object *obj = vma->obj; |
2626 | u32 pte_flags = 0; | 2622 | u32 pte_flags = 0; |
2627 | int ret; | 2623 | int ret; |
@@ -2653,7 +2649,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, | |||
2653 | enum i915_cache_level cache_level, | 2649 | enum i915_cache_level cache_level, |
2654 | u32 flags) | 2650 | u32 flags) |
2655 | { | 2651 | { |
2656 | struct drm_i915_private *i915 = to_i915(vma->vm->dev); | 2652 | struct drm_i915_private *i915 = vma->vm->i915; |
2657 | u32 pte_flags; | 2653 | u32 pte_flags; |
2658 | int ret; | 2654 | int ret; |
2659 | 2655 | ||
@@ -2687,7 +2683,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, | |||
2687 | 2683 | ||
2688 | static void ggtt_unbind_vma(struct i915_vma *vma) | 2684 | static void ggtt_unbind_vma(struct i915_vma *vma) |
2689 | { | 2685 | { |
2690 | struct drm_i915_private *i915 = to_i915(vma->vm->dev); | 2686 | struct drm_i915_private *i915 = vma->vm->i915; |
2691 | struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; | 2687 | struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; |
2692 | const u64 size = min(vma->size, vma->node.size); | 2688 | const u64 size = min(vma->size, vma->node.size); |
2693 | 2689 | ||
@@ -2758,7 +2754,8 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) | |||
2758 | /* Reserve a mappable slot for our lockless error capture */ | 2754 | /* Reserve a mappable slot for our lockless error capture */ |
2759 | ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, | 2755 | ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, |
2760 | &ggtt->error_capture, | 2756 | &ggtt->error_capture, |
2761 | 4096, 0, -1, | 2757 | 4096, 0, |
2758 | I915_COLOR_UNEVICTABLE, | ||
2762 | 0, ggtt->mappable_end, | 2759 | 0, ggtt->mappable_end, |
2763 | 0, 0); | 2760 | 0, 0); |
2764 | if (ret) | 2761 | if (ret) |
@@ -2927,8 +2924,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) | |||
2927 | 2924 | ||
2928 | static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) | 2925 | static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) |
2929 | { | 2926 | { |
2930 | struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); | 2927 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
2931 | struct pci_dev *pdev = ggtt->base.dev->pdev; | 2928 | struct pci_dev *pdev = dev_priv->drm.pdev; |
2932 | phys_addr_t phys_addr; | 2929 | phys_addr_t phys_addr; |
2933 | int ret; | 2930 | int ret; |
2934 | 2931 | ||
@@ -2942,7 +2939,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) | |||
2942 | * resort to an uncached mapping. The WC issue is easily caught by the | 2939 | * resort to an uncached mapping. The WC issue is easily caught by the |
2943 | * readback check when writing GTT PTE entries. | 2940 | * readback check when writing GTT PTE entries. |
2944 | */ | 2941 | */ |
2945 | if (IS_BROXTON(dev_priv)) | 2942 | if (IS_GEN9_LP(dev_priv)) |
2946 | ggtt->gsm = ioremap_nocache(phys_addr, size); | 2943 | ggtt->gsm = ioremap_nocache(phys_addr, size); |
2947 | else | 2944 | else |
2948 | ggtt->gsm = ioremap_wc(phys_addr, size); | 2945 | ggtt->gsm = ioremap_wc(phys_addr, size); |
@@ -3040,12 +3037,12 @@ static void gen6_gmch_remove(struct i915_address_space *vm) | |||
3040 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); | 3037 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
3041 | 3038 | ||
3042 | iounmap(ggtt->gsm); | 3039 | iounmap(ggtt->gsm); |
3043 | cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page); | 3040 | cleanup_scratch_page(vm->i915, &vm->scratch_page); |
3044 | } | 3041 | } |
3045 | 3042 | ||
3046 | static int gen8_gmch_probe(struct i915_ggtt *ggtt) | 3043 | static int gen8_gmch_probe(struct i915_ggtt *ggtt) |
3047 | { | 3044 | { |
3048 | struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); | 3045 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
3049 | struct pci_dev *pdev = dev_priv->drm.pdev; | 3046 | struct pci_dev *pdev = dev_priv->drm.pdev; |
3050 | unsigned int size; | 3047 | unsigned int size; |
3051 | u16 snb_gmch_ctl; | 3048 | u16 snb_gmch_ctl; |
@@ -3072,7 +3069,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
3072 | 3069 | ||
3073 | ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; | 3070 | ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; |
3074 | 3071 | ||
3075 | if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) | 3072 | if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) |
3076 | chv_setup_private_ppat(dev_priv); | 3073 | chv_setup_private_ppat(dev_priv); |
3077 | else | 3074 | else |
3078 | bdw_setup_private_ppat(dev_priv); | 3075 | bdw_setup_private_ppat(dev_priv); |
@@ -3094,7 +3091,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
3094 | 3091 | ||
3095 | static int gen6_gmch_probe(struct i915_ggtt *ggtt) | 3092 | static int gen6_gmch_probe(struct i915_ggtt *ggtt) |
3096 | { | 3093 | { |
3097 | struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); | 3094 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
3098 | struct pci_dev *pdev = dev_priv->drm.pdev; | 3095 | struct pci_dev *pdev = dev_priv->drm.pdev; |
3099 | unsigned int size; | 3096 | unsigned int size; |
3100 | u16 snb_gmch_ctl; | 3097 | u16 snb_gmch_ctl; |
@@ -3147,7 +3144,7 @@ static void i915_gmch_remove(struct i915_address_space *vm) | |||
3147 | 3144 | ||
3148 | static int i915_gmch_probe(struct i915_ggtt *ggtt) | 3145 | static int i915_gmch_probe(struct i915_ggtt *ggtt) |
3149 | { | 3146 | { |
3150 | struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); | 3147 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
3151 | int ret; | 3148 | int ret; |
3152 | 3149 | ||
3153 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); | 3150 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); |
@@ -3156,8 +3153,10 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) | |||
3156 | return -EIO; | 3153 | return -EIO; |
3157 | } | 3154 | } |
3158 | 3155 | ||
3159 | intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, | 3156 | intel_gtt_get(&ggtt->base.total, |
3160 | &ggtt->mappable_base, &ggtt->mappable_end); | 3157 | &ggtt->stolen_size, |
3158 | &ggtt->mappable_base, | ||
3159 | &ggtt->mappable_end); | ||
3161 | 3160 | ||
3162 | ggtt->do_idle_maps = needs_idle_maps(dev_priv); | 3161 | ggtt->do_idle_maps = needs_idle_maps(dev_priv); |
3163 | ggtt->base.insert_page = i915_ggtt_insert_page; | 3162 | ggtt->base.insert_page = i915_ggtt_insert_page; |
@@ -3182,7 +3181,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) | |||
3182 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 3181 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
3183 | int ret; | 3182 | int ret; |
3184 | 3183 | ||
3185 | ggtt->base.dev = &dev_priv->drm; | 3184 | ggtt->base.i915 = dev_priv; |
3186 | 3185 | ||
3187 | if (INTEL_GEN(dev_priv) <= 5) | 3186 | if (INTEL_GEN(dev_priv) <= 5) |
3188 | ret = i915_gmch_probe(ggtt); | 3187 | ret = i915_gmch_probe(ggtt); |
@@ -3193,6 +3192,16 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) | |||
3193 | if (ret) | 3192 | if (ret) |
3194 | return ret; | 3193 | return ret; |
3195 | 3194 | ||
3195 | /* Trim the GGTT to fit the GuC mappable upper range (when enabled). | ||
3196 | * This is easier than doing range restriction on the fly, as we | ||
3197 | * currently don't have any bits spare to pass in this upper | ||
3198 | * restriction! | ||
3199 | */ | ||
3200 | if (HAS_GUC(dev_priv) && i915.enable_guc_loading) { | ||
3201 | ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); | ||
3202 | ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); | ||
3203 | } | ||
3204 | |||
3196 | if ((ggtt->base.total - 1) >> 32) { | 3205 | if ((ggtt->base.total - 1) >> 32) { |
3197 | DRM_ERROR("We never expected a Global GTT with more than 32bits" | 3206 | DRM_ERROR("We never expected a Global GTT with more than 32bits" |
3198 | " of address space! Found %lldM!\n", | 3207 | " of address space! Found %lldM!\n", |
@@ -3212,7 +3221,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) | |||
3212 | DRM_INFO("Memory usable by graphics device = %lluM\n", | 3221 | DRM_INFO("Memory usable by graphics device = %lluM\n", |
3213 | ggtt->base.total >> 20); | 3222 | ggtt->base.total >> 20); |
3214 | DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); | 3223 | DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); |
3215 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20); | 3224 | DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20); |
3216 | #ifdef CONFIG_INTEL_IOMMU | 3225 | #ifdef CONFIG_INTEL_IOMMU |
3217 | if (intel_iommu_gfx_mapped) | 3226 | if (intel_iommu_gfx_mapped) |
3218 | DRM_INFO("VT-d active for gfx access\n"); | 3227 | DRM_INFO("VT-d active for gfx access\n"); |
@@ -3312,7 +3321,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) | |||
3312 | ggtt->base.closed = false; | 3321 | ggtt->base.closed = false; |
3313 | 3322 | ||
3314 | if (INTEL_GEN(dev_priv) >= 8) { | 3323 | if (INTEL_GEN(dev_priv) >= 8) { |
3315 | if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) | 3324 | if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) |
3316 | chv_setup_private_ppat(dev_priv); | 3325 | chv_setup_private_ppat(dev_priv); |
3317 | else | 3326 | else |
3318 | bdw_setup_private_ppat(dev_priv); | 3327 | bdw_setup_private_ppat(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 4f35be4c26c7..9e91d7e6149c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -220,7 +220,7 @@ struct i915_pml4 { | |||
220 | struct i915_address_space { | 220 | struct i915_address_space { |
221 | struct drm_mm mm; | 221 | struct drm_mm mm; |
222 | struct i915_gem_timeline timeline; | 222 | struct i915_gem_timeline timeline; |
223 | struct drm_device *dev; | 223 | struct drm_i915_private *i915; |
224 | /* Every address space belongs to a struct file - except for the global | 224 | /* Every address space belongs to a struct file - except for the global |
225 | * GTT that is owned by the driver (and so @file is set to NULL). In | 225 | * GTT that is owned by the driver (and so @file is set to NULL). In |
226 | * principle, no information should leak from one context to another | 226 | * principle, no information should leak from one context to another |
@@ -315,12 +315,21 @@ struct i915_ggtt { | |||
315 | struct i915_address_space base; | 315 | struct i915_address_space base; |
316 | struct io_mapping mappable; /* Mapping to our CPU mappable region */ | 316 | struct io_mapping mappable; /* Mapping to our CPU mappable region */ |
317 | 317 | ||
318 | size_t stolen_size; /* Total size of stolen memory */ | ||
319 | size_t stolen_usable_size; /* Total size minus BIOS reserved */ | ||
320 | size_t stolen_reserved_base; | ||
321 | size_t stolen_reserved_size; | ||
322 | u64 mappable_end; /* End offset that we can CPU map */ | ||
323 | phys_addr_t mappable_base; /* PA of our GMADR */ | 318 | phys_addr_t mappable_base; /* PA of our GMADR */ |
319 | u64 mappable_end; /* End offset that we can CPU map */ | ||
320 | |||
321 | /* Stolen memory is segmented in hardware with different portions | ||
322 | * offlimits to certain functions. | ||
323 | * | ||
324 | * The drm_mm is initialised to the total accessible range, as found | ||
325 | * from the PCI config. On Broadwell+, this is further restricted to | ||
326 | * avoid the first page! The upper end of stolen memory is reserved for | ||
327 | * hardware functions and similarly removed from the accessible range. | ||
328 | */ | ||
329 | u32 stolen_size; /* Total size of stolen memory */ | ||
330 | u32 stolen_usable_size; /* Total size minus reserved ranges */ | ||
331 | u32 stolen_reserved_base; | ||
332 | u32 stolen_reserved_size; | ||
324 | 333 | ||
325 | /** "Graphics Stolen Memory" holds the global PTEs */ | 334 | /** "Graphics Stolen Memory" holds the global PTEs */ |
326 | void __iomem *gsm; | 335 | void __iomem *gsm; |
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 4b3ff3e5b911..2222863e505f 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c | |||
@@ -71,7 +71,7 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | |||
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; | 73 | gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; |
74 | if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) { | 74 | if (IS_I965GM(i915) || IS_I965G(i915)) { |
75 | /* 965gm cannot relocate objects above 4GiB. */ | 75 | /* 965gm cannot relocate objects above 4GiB. */ |
76 | gfp &= ~__GFP_HIGHMEM; | 76 | gfp &= ~__GFP_HIGHMEM; |
77 | gfp |= __GFP_DMA32; | 77 | gfp |= __GFP_DMA32; |
@@ -155,7 +155,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, | |||
155 | { | 155 | { |
156 | struct drm_i915_gem_object *obj; | 156 | struct drm_i915_gem_object *obj; |
157 | 157 | ||
158 | obj = i915_gem_object_alloc(&i915->drm); | 158 | obj = i915_gem_object_alloc(i915); |
159 | if (!obj) | 159 | if (!obj) |
160 | return ERR_PTR(-ENOMEM); | 160 | return ERR_PTR(-ENOMEM); |
161 | 161 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index b8f403faadbb..99056b948eda 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
@@ -62,6 +62,15 @@ static void i915_fence_release(struct dma_fence *fence) | |||
62 | { | 62 | { |
63 | struct drm_i915_gem_request *req = to_request(fence); | 63 | struct drm_i915_gem_request *req = to_request(fence); |
64 | 64 | ||
65 | /* The request is put onto a RCU freelist (i.e. the address | ||
66 | * is immediately reused), mark the fences as being freed now. | ||
67 | * Otherwise the debugobjects for the fences are only marked as | ||
68 | * freed when the slab cache itself is freed, and so we would get | ||
69 | * caught trying to reuse dead objects. | ||
70 | */ | ||
71 | i915_sw_fence_fini(&req->submit); | ||
72 | i915_sw_fence_fini(&req->execute); | ||
73 | |||
65 | kmem_cache_free(req->i915->requests, req); | 74 | kmem_cache_free(req->i915->requests, req); |
66 | } | 75 | } |
67 | 76 | ||
@@ -197,6 +206,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active, | |||
197 | 206 | ||
198 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) | 207 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
199 | { | 208 | { |
209 | struct intel_engine_cs *engine = request->engine; | ||
200 | struct i915_gem_active *active, *next; | 210 | struct i915_gem_active *active, *next; |
201 | 211 | ||
202 | lockdep_assert_held(&request->i915->drm.struct_mutex); | 212 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
@@ -207,9 +217,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) | |||
207 | 217 | ||
208 | trace_i915_gem_request_retire(request); | 218 | trace_i915_gem_request_retire(request); |
209 | 219 | ||
210 | spin_lock_irq(&request->engine->timeline->lock); | 220 | spin_lock_irq(&engine->timeline->lock); |
211 | list_del_init(&request->link); | 221 | list_del_init(&request->link); |
212 | spin_unlock_irq(&request->engine->timeline->lock); | 222 | spin_unlock_irq(&engine->timeline->lock); |
213 | 223 | ||
214 | /* We know the GPU must have read the request to have | 224 | /* We know the GPU must have read the request to have |
215 | * sent us the seqno + interrupt, so use the position | 225 | * sent us the seqno + interrupt, so use the position |
@@ -257,13 +267,20 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) | |||
257 | 267 | ||
258 | i915_gem_request_remove_from_client(request); | 268 | i915_gem_request_remove_from_client(request); |
259 | 269 | ||
260 | if (request->previous_context) { | 270 | /* Retirement decays the ban score as it is a sign of ctx progress */ |
261 | if (i915.enable_execlists) | 271 | if (request->ctx->ban_score > 0) |
262 | intel_lr_context_unpin(request->previous_context, | 272 | request->ctx->ban_score--; |
263 | request->engine); | ||
264 | } | ||
265 | 273 | ||
266 | i915_gem_context_put(request->ctx); | 274 | /* The backing object for the context is done after switching to the |
275 | * *next* context. Therefore we cannot retire the previous context until | ||
276 | * the next context has already started running. However, since we | ||
277 | * cannot take the required locks at i915_gem_request_submit() we | ||
278 | * defer the unpinning of the active context to now, retirement of | ||
279 | * the subsequent request. | ||
280 | */ | ||
281 | if (engine->last_retired_context) | ||
282 | engine->context_unpin(engine, engine->last_retired_context); | ||
283 | engine->last_retired_context = request->ctx; | ||
267 | 284 | ||
268 | dma_fence_signal(&request->fence); | 285 | dma_fence_signal(&request->fence); |
269 | 286 | ||
@@ -277,6 +294,8 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) | |||
277 | struct drm_i915_gem_request *tmp; | 294 | struct drm_i915_gem_request *tmp; |
278 | 295 | ||
279 | lockdep_assert_held(&req->i915->drm.struct_mutex); | 296 | lockdep_assert_held(&req->i915->drm.struct_mutex); |
297 | GEM_BUG_ON(!i915_gem_request_completed(req)); | ||
298 | |||
280 | if (list_empty(&req->link)) | 299 | if (list_empty(&req->link)) |
281 | return; | 300 | return; |
282 | 301 | ||
@@ -326,11 +345,11 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno) | |||
326 | GEM_BUG_ON(i915->gt.active_requests > 1); | 345 | GEM_BUG_ON(i915->gt.active_requests > 1); |
327 | 346 | ||
328 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ | 347 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ |
329 | if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) { | 348 | if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) { |
330 | while (intel_breadcrumbs_busy(i915)) | 349 | while (intel_breadcrumbs_busy(i915)) |
331 | cond_resched(); /* spin until threads are complete */ | 350 | cond_resched(); /* spin until threads are complete */ |
332 | } | 351 | } |
333 | atomic_set(&timeline->next_seqno, seqno); | 352 | atomic_set(&timeline->seqno, seqno); |
334 | 353 | ||
335 | /* Finally reset hw state */ | 354 | /* Finally reset hw state */ |
336 | for_each_engine(engine, i915, id) | 355 | for_each_engine(engine, i915, id) |
@@ -365,11 +384,11 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) | |||
365 | static int reserve_global_seqno(struct drm_i915_private *i915) | 384 | static int reserve_global_seqno(struct drm_i915_private *i915) |
366 | { | 385 | { |
367 | u32 active_requests = ++i915->gt.active_requests; | 386 | u32 active_requests = ++i915->gt.active_requests; |
368 | u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno); | 387 | u32 seqno = atomic_read(&i915->gt.global_timeline.seqno); |
369 | int ret; | 388 | int ret; |
370 | 389 | ||
371 | /* Reservation is fine until we need to wrap around */ | 390 | /* Reservation is fine until we need to wrap around */ |
372 | if (likely(next_seqno + active_requests > next_seqno)) | 391 | if (likely(seqno + active_requests > seqno)) |
373 | return 0; | 392 | return 0; |
374 | 393 | ||
375 | ret = i915_gem_init_global_seqno(i915, 0); | 394 | ret = i915_gem_init_global_seqno(i915, 0); |
@@ -383,13 +402,13 @@ static int reserve_global_seqno(struct drm_i915_private *i915) | |||
383 | 402 | ||
384 | static u32 __timeline_get_seqno(struct i915_gem_timeline *tl) | 403 | static u32 __timeline_get_seqno(struct i915_gem_timeline *tl) |
385 | { | 404 | { |
386 | /* next_seqno only incremented under a mutex */ | 405 | /* seqno only incremented under a mutex */ |
387 | return ++tl->next_seqno.counter; | 406 | return ++tl->seqno.counter; |
388 | } | 407 | } |
389 | 408 | ||
390 | static u32 timeline_get_seqno(struct i915_gem_timeline *tl) | 409 | static u32 timeline_get_seqno(struct i915_gem_timeline *tl) |
391 | { | 410 | { |
392 | return atomic_inc_return(&tl->next_seqno); | 411 | return atomic_inc_return(&tl->seqno); |
393 | } | 412 | } |
394 | 413 | ||
395 | void __i915_gem_request_submit(struct drm_i915_gem_request *request) | 414 | void __i915_gem_request_submit(struct drm_i915_gem_request *request) |
@@ -509,10 +528,18 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
509 | if (ret) | 528 | if (ret) |
510 | return ERR_PTR(ret); | 529 | return ERR_PTR(ret); |
511 | 530 | ||
512 | ret = reserve_global_seqno(dev_priv); | 531 | /* Pinning the contexts may generate requests in order to acquire |
532 | * GGTT space, so do this first before we reserve a seqno for | ||
533 | * ourselves. | ||
534 | */ | ||
535 | ret = engine->context_pin(engine, ctx); | ||
513 | if (ret) | 536 | if (ret) |
514 | return ERR_PTR(ret); | 537 | return ERR_PTR(ret); |
515 | 538 | ||
539 | ret = reserve_global_seqno(dev_priv); | ||
540 | if (ret) | ||
541 | goto err_unpin; | ||
542 | |||
516 | /* Move the oldest request to the slab-cache (if not in use!) */ | 543 | /* Move the oldest request to the slab-cache (if not in use!) */ |
517 | req = list_first_entry_or_null(&engine->timeline->requests, | 544 | req = list_first_entry_or_null(&engine->timeline->requests, |
518 | typeof(*req), link); | 545 | typeof(*req), link); |
@@ -578,11 +605,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
578 | INIT_LIST_HEAD(&req->active_list); | 605 | INIT_LIST_HEAD(&req->active_list); |
579 | req->i915 = dev_priv; | 606 | req->i915 = dev_priv; |
580 | req->engine = engine; | 607 | req->engine = engine; |
581 | req->ctx = i915_gem_context_get(ctx); | 608 | req->ctx = ctx; |
582 | 609 | ||
583 | /* No zalloc, must clear what we need by hand */ | 610 | /* No zalloc, must clear what we need by hand */ |
584 | req->global_seqno = 0; | 611 | req->global_seqno = 0; |
585 | req->previous_context = NULL; | ||
586 | req->file_priv = NULL; | 612 | req->file_priv = NULL; |
587 | req->batch = NULL; | 613 | req->batch = NULL; |
588 | 614 | ||
@@ -596,10 +622,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
596 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; | 622 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; |
597 | GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); | 623 | GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); |
598 | 624 | ||
599 | if (i915.enable_execlists) | 625 | ret = engine->request_alloc(req); |
600 | ret = intel_logical_ring_alloc_request_extras(req); | ||
601 | else | ||
602 | ret = intel_ring_alloc_request_extras(req); | ||
603 | if (ret) | 626 | if (ret) |
604 | goto err_ctx; | 627 | goto err_ctx; |
605 | 628 | ||
@@ -613,10 +636,16 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
613 | return req; | 636 | return req; |
614 | 637 | ||
615 | err_ctx: | 638 | err_ctx: |
616 | i915_gem_context_put(ctx); | 639 | /* Make sure we didn't add ourselves to external state before freeing */ |
640 | GEM_BUG_ON(!list_empty(&req->active_list)); | ||
641 | GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); | ||
642 | GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); | ||
643 | |||
617 | kmem_cache_free(dev_priv->requests, req); | 644 | kmem_cache_free(dev_priv->requests, req); |
618 | err_unreserve: | 645 | err_unreserve: |
619 | dev_priv->gt.active_requests--; | 646 | dev_priv->gt.active_requests--; |
647 | err_unpin: | ||
648 | engine->context_unpin(engine, ctx); | ||
620 | return ERR_PTR(ret); | 649 | return ERR_PTR(ret); |
621 | } | 650 | } |
622 | 651 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index d229f47d1028..ea511f06efaf 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h | |||
@@ -170,17 +170,6 @@ struct drm_i915_gem_request { | |||
170 | /** Preallocate space in the ring for the emitting the request */ | 170 | /** Preallocate space in the ring for the emitting the request */ |
171 | u32 reserved_space; | 171 | u32 reserved_space; |
172 | 172 | ||
173 | /** | ||
174 | * Context related to the previous request. | ||
175 | * As the contexts are accessed by the hardware until the switch is | ||
176 | * completed to a new context, the hardware may still be writing | ||
177 | * to the context object after the breadcrumb is visible. We must | ||
178 | * not unpin/unbind/prune that object whilst still active and so | ||
179 | * we keep the previous context pinned until the following (this) | ||
180 | * request is retired. | ||
181 | */ | ||
182 | struct i915_gem_context *previous_context; | ||
183 | |||
184 | /** Batch buffer related to this request if any (used for | 173 | /** Batch buffer related to this request if any (used for |
185 | * error state dump only). | 174 | * error state dump only). |
186 | */ | 175 | */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index abc78bbfc1dc..f1a1d33febcd 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -54,12 +54,6 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, | |||
54 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) | 54 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
55 | return -ENODEV; | 55 | return -ENODEV; |
56 | 56 | ||
57 | /* See the comment at the drm_mm_init() call for more about this check. | ||
58 | * WaSkipStolenMemoryFirstPage:bdw+ (incomplete) | ||
59 | */ | ||
60 | if (start < 4096 && INTEL_GEN(dev_priv) >= 8) | ||
61 | start = 4096; | ||
62 | |||
63 | mutex_lock(&dev_priv->mm.stolen_lock); | 57 | mutex_lock(&dev_priv->mm.stolen_lock); |
64 | ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, | 58 | ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, |
65 | alignment, start, end, | 59 | alignment, start, end, |
@@ -73,11 +67,8 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, | |||
73 | struct drm_mm_node *node, u64 size, | 67 | struct drm_mm_node *node, u64 size, |
74 | unsigned alignment) | 68 | unsigned alignment) |
75 | { | 69 | { |
76 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | ||
77 | |||
78 | return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, | 70 | return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, |
79 | alignment, 0, | 71 | alignment, 0, U64_MAX); |
80 | ggtt->stolen_usable_size); | ||
81 | } | 72 | } |
82 | 73 | ||
83 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, | 74 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
@@ -152,7 +143,7 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv) | |||
152 | tom = tmp * MB(32); | 143 | tom = tmp * MB(32); |
153 | 144 | ||
154 | base = tom - tseg_size - ggtt->stolen_size; | 145 | base = tom - tseg_size - ggtt->stolen_size; |
155 | } else if (IS_845G(dev_priv)) { | 146 | } else if (IS_I845G(dev_priv)) { |
156 | u32 tseg_size = 0; | 147 | u32 tseg_size = 0; |
157 | u32 tom; | 148 | u32 tom; |
158 | u8 tmp; | 149 | u8 tmp; |
@@ -202,8 +193,8 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv) | |||
202 | return 0; | 193 | return 0; |
203 | 194 | ||
204 | /* make sure we don't clobber the GTT if it's within stolen memory */ | 195 | /* make sure we don't clobber the GTT if it's within stolen memory */ |
205 | if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) && | 196 | if (INTEL_GEN(dev_priv) <= 4 && |
206 | !IS_G4X(dev_priv)) { | 197 | !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { |
207 | struct { | 198 | struct { |
208 | u32 start, end; | 199 | u32 start, end; |
209 | } stolen[2] = { | 200 | } stolen[2] = { |
@@ -290,14 +281,13 @@ void i915_gem_cleanup_stolen(struct drm_device *dev) | |||
290 | } | 281 | } |
291 | 282 | ||
292 | static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, | 283 | static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, |
293 | unsigned long *base, unsigned long *size) | 284 | phys_addr_t *base, u32 *size) |
294 | { | 285 | { |
295 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 286 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
296 | uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? | 287 | uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? |
297 | CTG_STOLEN_RESERVED : | 288 | CTG_STOLEN_RESERVED : |
298 | ELK_STOLEN_RESERVED); | 289 | ELK_STOLEN_RESERVED); |
299 | unsigned long stolen_top = dev_priv->mm.stolen_base + | 290 | phys_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
300 | ggtt->stolen_size; | ||
301 | 291 | ||
302 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; | 292 | *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; |
303 | 293 | ||
@@ -314,7 +304,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, | |||
314 | } | 304 | } |
315 | 305 | ||
316 | static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, | 306 | static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, |
317 | unsigned long *base, unsigned long *size) | 307 | phys_addr_t *base, u32 *size) |
318 | { | 308 | { |
319 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); | 309 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
320 | 310 | ||
@@ -340,7 +330,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, | |||
340 | } | 330 | } |
341 | 331 | ||
342 | static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, | 332 | static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, |
343 | unsigned long *base, unsigned long *size) | 333 | phys_addr_t *base, u32 *size) |
344 | { | 334 | { |
345 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); | 335 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
346 | 336 | ||
@@ -359,8 +349,8 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, | |||
359 | } | 349 | } |
360 | } | 350 | } |
361 | 351 | ||
362 | static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, | 352 | static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, |
363 | unsigned long *base, unsigned long *size) | 353 | phys_addr_t *base, u32 *size) |
364 | { | 354 | { |
365 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); | 355 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
366 | 356 | ||
@@ -386,11 +376,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, | |||
386 | } | 376 | } |
387 | 377 | ||
388 | static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, | 378 | static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, |
389 | unsigned long *base, unsigned long *size) | 379 | phys_addr_t *base, u32 *size) |
390 | { | 380 | { |
391 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 381 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
392 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); | 382 | uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); |
393 | unsigned long stolen_top; | 383 | phys_addr_t stolen_top; |
394 | 384 | ||
395 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; | 385 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
396 | 386 | ||
@@ -409,8 +399,9 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, | |||
409 | int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | 399 | int i915_gem_init_stolen(struct drm_i915_private *dev_priv) |
410 | { | 400 | { |
411 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 401 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
412 | unsigned long reserved_total, reserved_base = 0, reserved_size; | 402 | phys_addr_t reserved_base, stolen_top; |
413 | unsigned long stolen_top; | 403 | u32 reserved_total, reserved_size; |
404 | u32 stolen_usable_start; | ||
414 | 405 | ||
415 | mutex_init(&dev_priv->mm.stolen_lock); | 406 | mutex_init(&dev_priv->mm.stolen_lock); |
416 | 407 | ||
@@ -429,6 +420,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | |||
429 | return 0; | 420 | return 0; |
430 | 421 | ||
431 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; | 422 | stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; |
423 | reserved_base = 0; | ||
424 | reserved_size = 0; | ||
432 | 425 | ||
433 | switch (INTEL_INFO(dev_priv)->gen) { | 426 | switch (INTEL_INFO(dev_priv)->gen) { |
434 | case 2: | 427 | case 2: |
@@ -436,8 +429,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | |||
436 | break; | 429 | break; |
437 | case 4: | 430 | case 4: |
438 | if (IS_G4X(dev_priv)) | 431 | if (IS_G4X(dev_priv)) |
439 | g4x_get_stolen_reserved(dev_priv, &reserved_base, | 432 | g4x_get_stolen_reserved(dev_priv, |
440 | &reserved_size); | 433 | &reserved_base, &reserved_size); |
441 | break; | 434 | break; |
442 | case 5: | 435 | case 5: |
443 | /* Assume the gen6 maximum for the older platforms. */ | 436 | /* Assume the gen6 maximum for the older platforms. */ |
@@ -445,21 +438,20 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | |||
445 | reserved_base = stolen_top - reserved_size; | 438 | reserved_base = stolen_top - reserved_size; |
446 | break; | 439 | break; |
447 | case 6: | 440 | case 6: |
448 | gen6_get_stolen_reserved(dev_priv, &reserved_base, | 441 | gen6_get_stolen_reserved(dev_priv, |
449 | &reserved_size); | 442 | &reserved_base, &reserved_size); |
450 | break; | 443 | break; |
451 | case 7: | 444 | case 7: |
452 | gen7_get_stolen_reserved(dev_priv, &reserved_base, | 445 | gen7_get_stolen_reserved(dev_priv, |
453 | &reserved_size); | 446 | &reserved_base, &reserved_size); |
454 | break; | 447 | break; |
455 | default: | 448 | default: |
456 | if (IS_BROADWELL(dev_priv) || | 449 | if (IS_LP(dev_priv)) |
457 | IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 450 | chv_get_stolen_reserved(dev_priv, |
458 | bdw_get_stolen_reserved(dev_priv, &reserved_base, | 451 | &reserved_base, &reserved_size); |
459 | &reserved_size); | ||
460 | else | 452 | else |
461 | gen8_get_stolen_reserved(dev_priv, &reserved_base, | 453 | bdw_get_stolen_reserved(dev_priv, |
462 | &reserved_size); | 454 | &reserved_base, &reserved_size); |
463 | break; | 455 | break; |
464 | } | 456 | } |
465 | 457 | ||
@@ -472,9 +464,10 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | |||
472 | 464 | ||
473 | if (reserved_base < dev_priv->mm.stolen_base || | 465 | if (reserved_base < dev_priv->mm.stolen_base || |
474 | reserved_base + reserved_size > stolen_top) { | 466 | reserved_base + reserved_size > stolen_top) { |
475 | DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n", | 467 | phys_addr_t reserved_top = reserved_base + reserved_size; |
476 | reserved_base, reserved_base + reserved_size, | 468 | DRM_DEBUG_KMS("Stolen reserved area [%pa - %pa] outside stolen memory [%pa - %pa]\n", |
477 | dev_priv->mm.stolen_base, stolen_top); | 469 | &reserved_base, &reserved_top, |
470 | &dev_priv->mm.stolen_base, &stolen_top); | ||
478 | return 0; | 471 | return 0; |
479 | } | 472 | } |
480 | 473 | ||
@@ -485,24 +478,21 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | |||
485 | * memory, so just consider the start. */ | 478 | * memory, so just consider the start. */ |
486 | reserved_total = stolen_top - reserved_base; | 479 | reserved_total = stolen_top - reserved_base; |
487 | 480 | ||
488 | DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", | 481 | DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n", |
489 | ggtt->stolen_size >> 10, | 482 | ggtt->stolen_size >> 10, |
490 | (ggtt->stolen_size - reserved_total) >> 10); | 483 | (ggtt->stolen_size - reserved_total) >> 10); |
491 | 484 | ||
492 | ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total; | 485 | stolen_usable_start = 0; |
486 | /* WaSkipStolenMemoryFirstPage:bdw+ */ | ||
487 | if (INTEL_GEN(dev_priv) >= 8) | ||
488 | stolen_usable_start = 4096; | ||
493 | 489 | ||
494 | /* | 490 | ggtt->stolen_usable_size = |
495 | * Basic memrange allocator for stolen space. | 491 | ggtt->stolen_size - reserved_total - stolen_usable_start; |
496 | * | 492 | |
497 | * TODO: Notice that some platforms require us to not use the first page | 493 | /* Basic memrange allocator for stolen space. */ |
498 | * of the stolen memory but their BIOSes may still put the framebuffer | 494 | drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start, |
499 | * on the first page. So we don't reserve this page for now because of | 495 | ggtt->stolen_usable_size); |
500 | * that. Our current solution is to just prevent new nodes from being | ||
501 | * inserted on the first page - see the check we have at | ||
502 | * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon | ||
503 | * problem later. | ||
504 | */ | ||
505 | drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size); | ||
506 | 496 | ||
507 | return 0; | 497 | return 0; |
508 | } | 498 | } |
@@ -515,7 +505,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, | |||
515 | struct sg_table *st; | 505 | struct sg_table *st; |
516 | struct scatterlist *sg; | 506 | struct scatterlist *sg; |
517 | 507 | ||
518 | GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size); | 508 | GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size)); |
519 | 509 | ||
520 | /* We hide that we have no struct page backing our stolen object | 510 | /* We hide that we have no struct page backing our stolen object |
521 | * by wrapping the contiguous physical allocation with a fake | 511 | * by wrapping the contiguous physical allocation with a fake |
@@ -578,22 +568,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { | |||
578 | }; | 568 | }; |
579 | 569 | ||
580 | static struct drm_i915_gem_object * | 570 | static struct drm_i915_gem_object * |
581 | _i915_gem_object_create_stolen(struct drm_device *dev, | 571 | _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, |
582 | struct drm_mm_node *stolen) | 572 | struct drm_mm_node *stolen) |
583 | { | 573 | { |
584 | struct drm_i915_gem_object *obj; | 574 | struct drm_i915_gem_object *obj; |
585 | 575 | ||
586 | obj = i915_gem_object_alloc(dev); | 576 | obj = i915_gem_object_alloc(dev_priv); |
587 | if (obj == NULL) | 577 | if (obj == NULL) |
588 | return NULL; | 578 | return NULL; |
589 | 579 | ||
590 | drm_gem_private_object_init(dev, &obj->base, stolen->size); | 580 | drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); |
591 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); | 581 | i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
592 | 582 | ||
593 | obj->stolen = stolen; | 583 | obj->stolen = stolen; |
594 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; | 584 | obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; |
595 | obj->cache_level = HAS_LLC(to_i915(dev)) ? | 585 | obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; |
596 | I915_CACHE_LLC : I915_CACHE_NONE; | ||
597 | 586 | ||
598 | if (i915_gem_object_pin_pages(obj)) | 587 | if (i915_gem_object_pin_pages(obj)) |
599 | goto cleanup; | 588 | goto cleanup; |
@@ -606,9 +595,8 @@ cleanup: | |||
606 | } | 595 | } |
607 | 596 | ||
608 | struct drm_i915_gem_object * | 597 | struct drm_i915_gem_object * |
609 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size) | 598 | i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size) |
610 | { | 599 | { |
611 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
612 | struct drm_i915_gem_object *obj; | 600 | struct drm_i915_gem_object *obj; |
613 | struct drm_mm_node *stolen; | 601 | struct drm_mm_node *stolen; |
614 | int ret; | 602 | int ret; |
@@ -629,7 +617,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) | |||
629 | return NULL; | 617 | return NULL; |
630 | } | 618 | } |
631 | 619 | ||
632 | obj = _i915_gem_object_create_stolen(dev, stolen); | 620 | obj = _i915_gem_object_create_stolen(dev_priv, stolen); |
633 | if (obj) | 621 | if (obj) |
634 | return obj; | 622 | return obj; |
635 | 623 | ||
@@ -639,12 +627,11 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) | |||
639 | } | 627 | } |
640 | 628 | ||
641 | struct drm_i915_gem_object * | 629 | struct drm_i915_gem_object * |
642 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | 630 | i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, |
643 | u32 stolen_offset, | 631 | u32 stolen_offset, |
644 | u32 gtt_offset, | 632 | u32 gtt_offset, |
645 | u32 size) | 633 | u32 size) |
646 | { | 634 | { |
647 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
648 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 635 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
649 | struct drm_i915_gem_object *obj; | 636 | struct drm_i915_gem_object *obj; |
650 | struct drm_mm_node *stolen; | 637 | struct drm_mm_node *stolen; |
@@ -654,7 +641,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
654 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) | 641 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
655 | return NULL; | 642 | return NULL; |
656 | 643 | ||
657 | lockdep_assert_held(&dev->struct_mutex); | 644 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
658 | 645 | ||
659 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", | 646 | DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", |
660 | stolen_offset, gtt_offset, size); | 647 | stolen_offset, gtt_offset, size); |
@@ -679,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
679 | return NULL; | 666 | return NULL; |
680 | } | 667 | } |
681 | 668 | ||
682 | obj = _i915_gem_object_create_stolen(dev, stolen); | 669 | obj = _i915_gem_object_create_stolen(dev_priv, stolen); |
683 | if (obj == NULL) { | 670 | if (obj == NULL) { |
684 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); | 671 | DRM_DEBUG_KMS("failed to allocate stolen object\n"); |
685 | i915_gem_stolen_remove_node(dev_priv, stolen); | 672 | i915_gem_stolen_remove_node(dev_priv, stolen); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c85e7b06bdba..62ad375de6ca 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -119,7 +119,7 @@ i915_tiling_ok(struct drm_i915_private *dev_priv, | |||
119 | 119 | ||
120 | static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) | 120 | static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) |
121 | { | 121 | { |
122 | struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); | 122 | struct drm_i915_private *dev_priv = vma->vm->i915; |
123 | u32 size; | 123 | u32 size; |
124 | 124 | ||
125 | if (!i915_vma_is_map_and_fenceable(vma)) | 125 | if (!i915_vma_is_map_and_fenceable(vma)) |
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c index bf8a471b61e6..b596ca7ee058 100644 --- a/drivers/gpu/drm/i915/i915_gem_timeline.c +++ b/drivers/gpu/drm/i915/i915_gem_timeline.c | |||
@@ -81,10 +81,18 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915) | |||
81 | &class, "&global_timeline->lock"); | 81 | &class, "&global_timeline->lock"); |
82 | } | 82 | } |
83 | 83 | ||
84 | void i915_gem_timeline_fini(struct i915_gem_timeline *tl) | 84 | void i915_gem_timeline_fini(struct i915_gem_timeline *timeline) |
85 | { | 85 | { |
86 | lockdep_assert_held(&tl->i915->drm.struct_mutex); | 86 | int i; |
87 | 87 | ||
88 | list_del(&tl->link); | 88 | lockdep_assert_held(&timeline->i915->drm.struct_mutex); |
89 | kfree(tl->name); | 89 | |
90 | for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) { | ||
91 | struct intel_timeline *tl = &timeline->engine[i]; | ||
92 | |||
93 | GEM_BUG_ON(!list_empty(&tl->requests)); | ||
94 | } | ||
95 | |||
96 | list_del(&timeline->link); | ||
97 | kfree(timeline->name); | ||
90 | } | 98 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h index 98d99a62b4ae..f2e51f42cc2f 100644 --- a/drivers/gpu/drm/i915/i915_gem_timeline.h +++ b/drivers/gpu/drm/i915/i915_gem_timeline.h | |||
@@ -56,7 +56,7 @@ struct intel_timeline { | |||
56 | 56 | ||
57 | struct i915_gem_timeline { | 57 | struct i915_gem_timeline { |
58 | struct list_head link; | 58 | struct list_head link; |
59 | atomic_t next_seqno; | 59 | atomic_t seqno; |
60 | 60 | ||
61 | struct drm_i915_private *i915; | 61 | struct drm_i915_private *i915; |
62 | const char *name; | 62 | const char *name; |
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d068af2ec3a3..6a8fa085b74e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -784,7 +784,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
784 | return -ENODEV; | 784 | return -ENODEV; |
785 | } | 785 | } |
786 | 786 | ||
787 | obj = i915_gem_object_alloc(dev); | 787 | obj = i915_gem_object_alloc(dev_priv); |
788 | if (obj == NULL) | 788 | if (obj == NULL) |
789 | return -ENOMEM; | 789 | return -ENOMEM; |
790 | 790 | ||
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index ae84aa4b1467..396c6f0fd033 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -176,9 +176,14 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e, | |||
176 | 176 | ||
177 | #ifdef CONFIG_DRM_I915_COMPRESS_ERROR | 177 | #ifdef CONFIG_DRM_I915_COMPRESS_ERROR |
178 | 178 | ||
179 | static bool compress_init(struct z_stream_s *zstream) | 179 | struct compress { |
180 | struct z_stream_s zstream; | ||
181 | void *tmp; | ||
182 | }; | ||
183 | |||
184 | static bool compress_init(struct compress *c) | ||
180 | { | 185 | { |
181 | memset(zstream, 0, sizeof(*zstream)); | 186 | struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream)); |
182 | 187 | ||
183 | zstream->workspace = | 188 | zstream->workspace = |
184 | kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), | 189 | kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), |
@@ -191,14 +196,22 @@ static bool compress_init(struct z_stream_s *zstream) | |||
191 | return false; | 196 | return false; |
192 | } | 197 | } |
193 | 198 | ||
199 | c->tmp = NULL; | ||
200 | if (i915_has_memcpy_from_wc()) | ||
201 | c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN); | ||
202 | |||
194 | return true; | 203 | return true; |
195 | } | 204 | } |
196 | 205 | ||
197 | static int compress_page(struct z_stream_s *zstream, | 206 | static int compress_page(struct compress *c, |
198 | void *src, | 207 | void *src, |
199 | struct drm_i915_error_object *dst) | 208 | struct drm_i915_error_object *dst) |
200 | { | 209 | { |
210 | struct z_stream_s *zstream = &c->zstream; | ||
211 | |||
201 | zstream->next_in = src; | 212 | zstream->next_in = src; |
213 | if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) | ||
214 | zstream->next_in = c->tmp; | ||
202 | zstream->avail_in = PAGE_SIZE; | 215 | zstream->avail_in = PAGE_SIZE; |
203 | 216 | ||
204 | do { | 217 | do { |
@@ -226,9 +239,11 @@ static int compress_page(struct z_stream_s *zstream, | |||
226 | return 0; | 239 | return 0; |
227 | } | 240 | } |
228 | 241 | ||
229 | static void compress_fini(struct z_stream_s *zstream, | 242 | static void compress_fini(struct compress *c, |
230 | struct drm_i915_error_object *dst) | 243 | struct drm_i915_error_object *dst) |
231 | { | 244 | { |
245 | struct z_stream_s *zstream = &c->zstream; | ||
246 | |||
232 | if (dst) { | 247 | if (dst) { |
233 | zlib_deflate(zstream, Z_FINISH); | 248 | zlib_deflate(zstream, Z_FINISH); |
234 | dst->unused = zstream->avail_out; | 249 | dst->unused = zstream->avail_out; |
@@ -236,6 +251,9 @@ static void compress_fini(struct z_stream_s *zstream, | |||
236 | 251 | ||
237 | zlib_deflateEnd(zstream); | 252 | zlib_deflateEnd(zstream); |
238 | kfree(zstream->workspace); | 253 | kfree(zstream->workspace); |
254 | |||
255 | if (c->tmp) | ||
256 | free_page((unsigned long)c->tmp); | ||
239 | } | 257 | } |
240 | 258 | ||
241 | static void err_compression_marker(struct drm_i915_error_state_buf *m) | 259 | static void err_compression_marker(struct drm_i915_error_state_buf *m) |
@@ -245,28 +263,34 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) | |||
245 | 263 | ||
246 | #else | 264 | #else |
247 | 265 | ||
248 | static bool compress_init(struct z_stream_s *zstream) | 266 | struct compress { |
267 | }; | ||
268 | |||
269 | static bool compress_init(struct compress *c) | ||
249 | { | 270 | { |
250 | return true; | 271 | return true; |
251 | } | 272 | } |
252 | 273 | ||
253 | static int compress_page(struct z_stream_s *zstream, | 274 | static int compress_page(struct compress *c, |
254 | void *src, | 275 | void *src, |
255 | struct drm_i915_error_object *dst) | 276 | struct drm_i915_error_object *dst) |
256 | { | 277 | { |
257 | unsigned long page; | 278 | unsigned long page; |
279 | void *ptr; | ||
258 | 280 | ||
259 | page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); | 281 | page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); |
260 | if (!page) | 282 | if (!page) |
261 | return -ENOMEM; | 283 | return -ENOMEM; |
262 | 284 | ||
263 | dst->pages[dst->page_count++] = | 285 | ptr = (void *)page; |
264 | memcpy((void *)page, src, PAGE_SIZE); | 286 | if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE)) |
287 | memcpy(ptr, src, PAGE_SIZE); | ||
288 | dst->pages[dst->page_count++] = ptr; | ||
265 | 289 | ||
266 | return 0; | 290 | return 0; |
267 | } | 291 | } |
268 | 292 | ||
269 | static void compress_fini(struct z_stream_s *zstream, | 293 | static void compress_fini(struct compress *c, |
270 | struct drm_i915_error_object *dst) | 294 | struct drm_i915_error_object *dst) |
271 | { | 295 | { |
272 | } | 296 | } |
@@ -316,24 +340,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, | |||
316 | } | 340 | } |
317 | } | 341 | } |
318 | 342 | ||
319 | static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a) | ||
320 | { | ||
321 | switch (a) { | ||
322 | case HANGCHECK_IDLE: | ||
323 | return "idle"; | ||
324 | case HANGCHECK_WAIT: | ||
325 | return "wait"; | ||
326 | case HANGCHECK_ACTIVE: | ||
327 | return "active"; | ||
328 | case HANGCHECK_KICK: | ||
329 | return "kick"; | ||
330 | case HANGCHECK_HUNG: | ||
331 | return "hung"; | ||
332 | } | ||
333 | |||
334 | return "unknown"; | ||
335 | } | ||
336 | |||
337 | static void error_print_instdone(struct drm_i915_error_state_buf *m, | 343 | static void error_print_instdone(struct drm_i915_error_state_buf *m, |
338 | struct drm_i915_error_engine *ee) | 344 | struct drm_i915_error_engine *ee) |
339 | { | 345 | { |
@@ -370,8 +376,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m, | |||
370 | if (!erq->seqno) | 376 | if (!erq->seqno) |
371 | return; | 377 | return; |
372 | 378 | ||
373 | err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n", | 379 | err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n", |
374 | prefix, erq->pid, | 380 | prefix, erq->pid, erq->ban_score, |
375 | erq->context, erq->seqno, | 381 | erq->context, erq->seqno, |
376 | jiffies_to_msecs(jiffies - erq->jiffies), | 382 | jiffies_to_msecs(jiffies - erq->jiffies), |
377 | erq->head, erq->tail); | 383 | erq->head, erq->tail); |
@@ -441,9 +447,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, | |||
441 | err_printf(m, " waiting: %s\n", yesno(ee->waiting)); | 447 | err_printf(m, " waiting: %s\n", yesno(ee->waiting)); |
442 | err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); | 448 | err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); |
443 | err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); | 449 | err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); |
444 | err_printf(m, " hangcheck: %s [%d]\n", | 450 | err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled)); |
445 | hangcheck_action_to_str(ee->hangcheck_action), | 451 | err_printf(m, " hangcheck action: %s\n", |
446 | ee->hangcheck_score); | 452 | hangcheck_action_to_str(ee->hangcheck_action)); |
453 | err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n", | ||
454 | ee->hangcheck_timestamp, | ||
455 | jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); | ||
456 | |||
447 | error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); | 457 | error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); |
448 | error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); | 458 | error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); |
449 | } | 459 | } |
@@ -528,11 +538,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, | |||
528 | int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | 538 | int i915_error_state_to_str(struct drm_i915_error_state_buf *m, |
529 | const struct i915_error_state_file_priv *error_priv) | 539 | const struct i915_error_state_file_priv *error_priv) |
530 | { | 540 | { |
531 | struct drm_i915_private *dev_priv = to_i915(error_priv->dev); | 541 | struct drm_i915_private *dev_priv = error_priv->i915; |
532 | struct pci_dev *pdev = dev_priv->drm.pdev; | 542 | struct pci_dev *pdev = dev_priv->drm.pdev; |
533 | struct drm_i915_error_state *error = error_priv->error; | 543 | struct drm_i915_error_state *error = error_priv->error; |
534 | struct drm_i915_error_object *obj; | 544 | struct drm_i915_error_object *obj; |
535 | int max_hangcheck_score; | ||
536 | int i, j; | 545 | int i, j; |
537 | 546 | ||
538 | if (!error) { | 547 | if (!error) { |
@@ -549,22 +558,20 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
549 | err_printf(m, "Uptime: %ld s %ld us\n", | 558 | err_printf(m, "Uptime: %ld s %ld us\n", |
550 | error->uptime.tv_sec, error->uptime.tv_usec); | 559 | error->uptime.tv_sec, error->uptime.tv_usec); |
551 | err_print_capabilities(m, &error->device_info); | 560 | err_print_capabilities(m, &error->device_info); |
552 | max_hangcheck_score = 0; | 561 | |
553 | for (i = 0; i < ARRAY_SIZE(error->engine); i++) { | ||
554 | if (error->engine[i].hangcheck_score > max_hangcheck_score) | ||
555 | max_hangcheck_score = error->engine[i].hangcheck_score; | ||
556 | } | ||
557 | for (i = 0; i < ARRAY_SIZE(error->engine); i++) { | 562 | for (i = 0; i < ARRAY_SIZE(error->engine); i++) { |
558 | if (error->engine[i].hangcheck_score == max_hangcheck_score && | 563 | if (error->engine[i].hangcheck_stalled && |
559 | error->engine[i].pid != -1) { | 564 | error->engine[i].pid != -1) { |
560 | err_printf(m, "Active process (on ring %s): %s [%d]\n", | 565 | err_printf(m, "Active process (on ring %s): %s [%d], context bans %d\n", |
561 | engine_str(i), | 566 | engine_str(i), |
562 | error->engine[i].comm, | 567 | error->engine[i].comm, |
563 | error->engine[i].pid); | 568 | error->engine[i].pid, |
569 | error->engine[i].context_bans); | ||
564 | } | 570 | } |
565 | } | 571 | } |
566 | err_printf(m, "Reset count: %u\n", error->reset_count); | 572 | err_printf(m, "Reset count: %u\n", error->reset_count); |
567 | err_printf(m, "Suspend count: %u\n", error->suspend_count); | 573 | err_printf(m, "Suspend count: %u\n", error->suspend_count); |
574 | err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); | ||
568 | err_printf(m, "PCI ID: 0x%04x\n", pdev->device); | 575 | err_printf(m, "PCI ID: 0x%04x\n", pdev->device); |
569 | err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); | 576 | err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); |
570 | err_printf(m, "PCI Subsystem: %04x:%04x\n", | 577 | err_printf(m, "PCI Subsystem: %04x:%04x\n", |
@@ -651,9 +658,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
651 | if (obj) { | 658 | if (obj) { |
652 | err_puts(m, dev_priv->engine[i]->name); | 659 | err_puts(m, dev_priv->engine[i]->name); |
653 | if (ee->pid != -1) | 660 | if (ee->pid != -1) |
654 | err_printf(m, " (submitted by %s [%d])", | 661 | err_printf(m, " (submitted by %s [%d], bans %d)", |
655 | ee->comm, | 662 | ee->comm, |
656 | ee->pid); | 663 | ee->pid, |
664 | ee->context_bans); | ||
657 | err_printf(m, " --- gtt_offset = 0x%08x %08x\n", | 665 | err_printf(m, " --- gtt_offset = 0x%08x %08x\n", |
658 | upper_32_bits(obj->gtt_offset), | 666 | upper_32_bits(obj->gtt_offset), |
659 | lower_32_bits(obj->gtt_offset)); | 667 | lower_32_bits(obj->gtt_offset)); |
@@ -801,7 +809,7 @@ i915_error_object_create(struct drm_i915_private *i915, | |||
801 | struct i915_ggtt *ggtt = &i915->ggtt; | 809 | struct i915_ggtt *ggtt = &i915->ggtt; |
802 | const u64 slot = ggtt->error_capture.start; | 810 | const u64 slot = ggtt->error_capture.start; |
803 | struct drm_i915_error_object *dst; | 811 | struct drm_i915_error_object *dst; |
804 | struct z_stream_s zstream; | 812 | struct compress compress; |
805 | unsigned long num_pages; | 813 | unsigned long num_pages; |
806 | struct sgt_iter iter; | 814 | struct sgt_iter iter; |
807 | dma_addr_t dma; | 815 | dma_addr_t dma; |
@@ -821,7 +829,7 @@ i915_error_object_create(struct drm_i915_private *i915, | |||
821 | dst->page_count = 0; | 829 | dst->page_count = 0; |
822 | dst->unused = 0; | 830 | dst->unused = 0; |
823 | 831 | ||
824 | if (!compress_init(&zstream)) { | 832 | if (!compress_init(&compress)) { |
825 | kfree(dst); | 833 | kfree(dst); |
826 | return NULL; | 834 | return NULL; |
827 | } | 835 | } |
@@ -834,7 +842,7 @@ i915_error_object_create(struct drm_i915_private *i915, | |||
834 | I915_CACHE_NONE, 0); | 842 | I915_CACHE_NONE, 0); |
835 | 843 | ||
836 | s = io_mapping_map_atomic_wc(&ggtt->mappable, slot); | 844 | s = io_mapping_map_atomic_wc(&ggtt->mappable, slot); |
837 | ret = compress_page(&zstream, (void __force *)s, dst); | 845 | ret = compress_page(&compress, (void __force *)s, dst); |
838 | io_mapping_unmap_atomic(s); | 846 | io_mapping_unmap_atomic(s); |
839 | 847 | ||
840 | if (ret) | 848 | if (ret) |
@@ -849,7 +857,7 @@ unwind: | |||
849 | dst = NULL; | 857 | dst = NULL; |
850 | 858 | ||
851 | out: | 859 | out: |
852 | compress_fini(&zstream, dst); | 860 | compress_fini(&compress, dst); |
853 | ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE); | 861 | ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE); |
854 | return dst; | 862 | return dst; |
855 | } | 863 | } |
@@ -941,7 +949,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, | |||
941 | * strictly a client bug. Use instdone to differentiate those some. | 949 | * strictly a client bug. Use instdone to differentiate those some. |
942 | */ | 950 | */ |
943 | for (i = 0; i < I915_NUM_ENGINES; i++) { | 951 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
944 | if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) { | 952 | if (error->engine[i].hangcheck_stalled) { |
945 | if (engine_id) | 953 | if (engine_id) |
946 | *engine_id = i; | 954 | *engine_id = i; |
947 | 955 | ||
@@ -1159,8 +1167,9 @@ static void error_record_engine_registers(struct drm_i915_error_state *error, | |||
1159 | ee->hws = I915_READ(mmio); | 1167 | ee->hws = I915_READ(mmio); |
1160 | } | 1168 | } |
1161 | 1169 | ||
1162 | ee->hangcheck_score = engine->hangcheck.score; | 1170 | ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; |
1163 | ee->hangcheck_action = engine->hangcheck.action; | 1171 | ee->hangcheck_action = engine->hangcheck.action; |
1172 | ee->hangcheck_stalled = engine->hangcheck.stalled; | ||
1164 | 1173 | ||
1165 | if (USES_PPGTT(dev_priv)) { | 1174 | if (USES_PPGTT(dev_priv)) { |
1166 | int i; | 1175 | int i; |
@@ -1188,6 +1197,7 @@ static void record_request(struct drm_i915_gem_request *request, | |||
1188 | struct drm_i915_error_request *erq) | 1197 | struct drm_i915_error_request *erq) |
1189 | { | 1198 | { |
1190 | erq->context = request->ctx->hw_id; | 1199 | erq->context = request->ctx->hw_id; |
1200 | erq->ban_score = request->ctx->ban_score; | ||
1191 | erq->seqno = request->global_seqno; | 1201 | erq->seqno = request->global_seqno; |
1192 | erq->jiffies = request->emitted_jiffies; | 1202 | erq->jiffies = request->emitted_jiffies; |
1193 | erq->head = request->head; | 1203 | erq->head = request->head; |
@@ -1321,7 +1331,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, | |||
1321 | } | 1331 | } |
1322 | 1332 | ||
1323 | error->simulated |= | 1333 | error->simulated |= |
1324 | request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE; | 1334 | i915_gem_context_no_error_capture(request->ctx); |
1325 | 1335 | ||
1326 | ee->rq_head = request->head; | 1336 | ee->rq_head = request->head; |
1327 | ee->rq_post = request->postfix; | 1337 | ee->rq_post = request->postfix; |
@@ -1659,9 +1669,8 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv) | |||
1659 | kref_put(&error_priv->error->ref, i915_error_state_free); | 1669 | kref_put(&error_priv->error->ref, i915_error_state_free); |
1660 | } | 1670 | } |
1661 | 1671 | ||
1662 | void i915_destroy_error_state(struct drm_device *dev) | 1672 | void i915_destroy_error_state(struct drm_i915_private *dev_priv) |
1663 | { | 1673 | { |
1664 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1665 | struct drm_i915_error_state *error; | 1674 | struct drm_i915_error_state *error; |
1666 | 1675 | ||
1667 | spin_lock_irq(&dev_priv->gpu_error.lock); | 1676 | spin_lock_irq(&dev_priv->gpu_error.lock); |
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index a47e1e4aec03..6a0adafe0523 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h | |||
@@ -73,6 +73,9 @@ | |||
73 | #define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */ | 73 | #define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */ |
74 | #define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */ | 74 | #define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */ |
75 | 75 | ||
76 | /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ | ||
77 | #define GUC_GGTT_TOP 0xFEE00000 | ||
78 | |||
76 | #define GEN8_GT_PM_CONFIG _MMIO(0x138140) | 79 | #define GEN8_GT_PM_CONFIG _MMIO(0x138140) |
77 | #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) | 80 | #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) |
78 | #define GEN9_GT_PM_CONFIG _MMIO(0x13816c) | 81 | #define GEN9_GT_PM_CONFIG _MMIO(0x13816c) |
@@ -100,8 +103,8 @@ | |||
100 | GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ | 103 | GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ |
101 | GUC_ENABLE_MIA_CLOCK_GATING) | 104 | GUC_ENABLE_MIA_CLOCK_GATING) |
102 | 105 | ||
103 | #define HOST2GUC_INTERRUPT _MMIO(0xc4c8) | 106 | #define GUC_SEND_INTERRUPT _MMIO(0xc4c8) |
104 | #define HOST2GUC_TRIGGER (1<<0) | 107 | #define GUC_SEND_TRIGGER (1<<0) |
105 | 108 | ||
106 | #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) | 109 | #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) |
107 | #define GEN8_DRB_VALID (1<<0) | 110 | #define GEN8_DRB_VALID (1<<0) |
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 4462112725ef..710fbb9fc63f 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c | |||
@@ -21,12 +21,11 @@ | |||
21 | * IN THE SOFTWARE. | 21 | * IN THE SOFTWARE. |
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | #include <linux/firmware.h> | ||
25 | #include <linux/circ_buf.h> | 24 | #include <linux/circ_buf.h> |
26 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
27 | #include <linux/relay.h> | 26 | #include <linux/relay.h> |
28 | #include "i915_drv.h" | 27 | #include "i915_drv.h" |
29 | #include "intel_guc.h" | 28 | #include "intel_uc.h" |
30 | 29 | ||
31 | /** | 30 | /** |
32 | * DOC: GuC-based command submission | 31 | * DOC: GuC-based command submission |
@@ -49,7 +48,7 @@ | |||
49 | * Firmware writes a success/fail code back to the action register after | 48 | * Firmware writes a success/fail code back to the action register after |
50 | * processes the request. The kernel driver polls waiting for this update and | 49 | * processes the request. The kernel driver polls waiting for this update and |
51 | * then proceeds. | 50 | * then proceeds. |
52 | * See host2guc_action() | 51 | * See intel_guc_send() |
53 | * | 52 | * |
54 | * Doorbells: | 53 | * Doorbells: |
55 | * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) | 54 | * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) |
@@ -66,141 +65,29 @@ | |||
66 | */ | 65 | */ |
67 | 66 | ||
68 | /* | 67 | /* |
69 | * Read GuC command/status register (SOFT_SCRATCH_0) | ||
70 | * Return true if it contains a response rather than a command | ||
71 | */ | ||
72 | static inline bool host2guc_action_response(struct drm_i915_private *dev_priv, | ||
73 | u32 *status) | ||
74 | { | ||
75 | u32 val = I915_READ(SOFT_SCRATCH(0)); | ||
76 | *status = val; | ||
77 | return GUC2HOST_IS_RESPONSE(val); | ||
78 | } | ||
79 | |||
80 | static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) | ||
81 | { | ||
82 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
83 | u32 status; | ||
84 | int i; | ||
85 | int ret; | ||
86 | |||
87 | if (WARN_ON(len < 1 || len > 15)) | ||
88 | return -EINVAL; | ||
89 | |||
90 | mutex_lock(&guc->action_lock); | ||
91 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
92 | |||
93 | dev_priv->guc.action_count += 1; | ||
94 | dev_priv->guc.action_cmd = data[0]; | ||
95 | |||
96 | for (i = 0; i < len; i++) | ||
97 | I915_WRITE(SOFT_SCRATCH(i), data[i]); | ||
98 | |||
99 | POSTING_READ(SOFT_SCRATCH(i - 1)); | ||
100 | |||
101 | I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER); | ||
102 | |||
103 | /* | ||
104 | * Fast commands should complete in less than 10us, so sample quickly | ||
105 | * up to that length of time, then switch to a slower sleep-wait loop. | ||
106 | * No HOST2GUC command should ever take longer than 10ms. | ||
107 | */ | ||
108 | ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10); | ||
109 | if (ret) | ||
110 | ret = wait_for(host2guc_action_response(dev_priv, &status), 10); | ||
111 | if (status != GUC2HOST_STATUS_SUCCESS) { | ||
112 | /* | ||
113 | * Either the GuC explicitly returned an error (which | ||
114 | * we convert to -EIO here) or no response at all was | ||
115 | * received within the timeout limit (-ETIMEDOUT) | ||
116 | */ | ||
117 | if (ret != -ETIMEDOUT) | ||
118 | ret = -EIO; | ||
119 | |||
120 | DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n", | ||
121 | data[0], ret, status, I915_READ(SOFT_SCRATCH(15))); | ||
122 | |||
123 | dev_priv->guc.action_fail += 1; | ||
124 | dev_priv->guc.action_err = ret; | ||
125 | } | ||
126 | dev_priv->guc.action_status = status; | ||
127 | |||
128 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
129 | mutex_unlock(&guc->action_lock); | ||
130 | |||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Tell the GuC to allocate or deallocate a specific doorbell | 68 | * Tell the GuC to allocate or deallocate a specific doorbell |
136 | */ | 69 | */ |
137 | 70 | ||
138 | static int host2guc_allocate_doorbell(struct intel_guc *guc, | 71 | static int guc_allocate_doorbell(struct intel_guc *guc, |
139 | struct i915_guc_client *client) | 72 | struct i915_guc_client *client) |
140 | { | ||
141 | u32 data[2]; | ||
142 | |||
143 | data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL; | ||
144 | data[1] = client->ctx_index; | ||
145 | |||
146 | return host2guc_action(guc, data, 2); | ||
147 | } | ||
148 | |||
149 | static int host2guc_release_doorbell(struct intel_guc *guc, | ||
150 | struct i915_guc_client *client) | ||
151 | { | ||
152 | u32 data[2]; | ||
153 | |||
154 | data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL; | ||
155 | data[1] = client->ctx_index; | ||
156 | |||
157 | return host2guc_action(guc, data, 2); | ||
158 | } | ||
159 | |||
160 | static int host2guc_sample_forcewake(struct intel_guc *guc, | ||
161 | struct i915_guc_client *client) | ||
162 | { | ||
163 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
164 | u32 data[2]; | ||
165 | |||
166 | data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; | ||
167 | /* WaRsDisableCoarsePowerGating:skl,bxt */ | ||
168 | if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) | ||
169 | data[1] = 0; | ||
170 | else | ||
171 | /* bit 0 and 1 are for Render and Media domain separately */ | ||
172 | data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; | ||
173 | |||
174 | return host2guc_action(guc, data, ARRAY_SIZE(data)); | ||
175 | } | ||
176 | |||
177 | static int host2guc_logbuffer_flush_complete(struct intel_guc *guc) | ||
178 | { | ||
179 | u32 data[1]; | ||
180 | |||
181 | data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE; | ||
182 | |||
183 | return host2guc_action(guc, data, 1); | ||
184 | } | ||
185 | |||
186 | static int host2guc_force_logbuffer_flush(struct intel_guc *guc) | ||
187 | { | 73 | { |
188 | u32 data[2]; | 74 | u32 action[] = { |
189 | 75 | INTEL_GUC_ACTION_ALLOCATE_DOORBELL, | |
190 | data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH; | 76 | client->ctx_index |
191 | data[1] = 0; | 77 | }; |
192 | 78 | ||
193 | return host2guc_action(guc, data, 2); | 79 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); |
194 | } | 80 | } |
195 | 81 | ||
196 | static int host2guc_logging_control(struct intel_guc *guc, u32 control_val) | 82 | static int guc_release_doorbell(struct intel_guc *guc, |
83 | struct i915_guc_client *client) | ||
197 | { | 84 | { |
198 | u32 data[2]; | 85 | u32 action[] = { |
199 | 86 | INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, | |
200 | data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING; | 87 | client->ctx_index |
201 | data[1] = control_val; | 88 | }; |
202 | 89 | ||
203 | return host2guc_action(guc, data, 2); | 90 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); |
204 | } | 91 | } |
205 | 92 | ||
206 | /* | 93 | /* |
@@ -226,7 +113,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc, | |||
226 | test_bit(client->doorbell_id, doorbell_bitmap)) { | 113 | test_bit(client->doorbell_id, doorbell_bitmap)) { |
227 | /* Deactivate the old doorbell */ | 114 | /* Deactivate the old doorbell */ |
228 | doorbell->db_status = GUC_DOORBELL_DISABLED; | 115 | doorbell->db_status = GUC_DOORBELL_DISABLED; |
229 | (void)host2guc_release_doorbell(guc, client); | 116 | (void)guc_release_doorbell(guc, client); |
230 | __clear_bit(client->doorbell_id, doorbell_bitmap); | 117 | __clear_bit(client->doorbell_id, doorbell_bitmap); |
231 | } | 118 | } |
232 | 119 | ||
@@ -247,16 +134,9 @@ static int guc_update_doorbell_id(struct intel_guc *guc, | |||
247 | 134 | ||
248 | /* Activate the new doorbell */ | 135 | /* Activate the new doorbell */ |
249 | __set_bit(new_id, doorbell_bitmap); | 136 | __set_bit(new_id, doorbell_bitmap); |
250 | doorbell->cookie = 0; | ||
251 | doorbell->db_status = GUC_DOORBELL_ENABLED; | 137 | doorbell->db_status = GUC_DOORBELL_ENABLED; |
252 | return host2guc_allocate_doorbell(guc, client); | 138 | doorbell->cookie = client->doorbell_cookie; |
253 | } | 139 | return guc_allocate_doorbell(guc, client); |
254 | |||
255 | static int guc_init_doorbell(struct intel_guc *guc, | ||
256 | struct i915_guc_client *client, | ||
257 | uint16_t db_id) | ||
258 | { | ||
259 | return guc_update_doorbell_id(guc, client, db_id); | ||
260 | } | 140 | } |
261 | 141 | ||
262 | static void guc_disable_doorbell(struct intel_guc *guc, | 142 | static void guc_disable_doorbell(struct intel_guc *guc, |
@@ -298,7 +178,7 @@ select_doorbell_register(struct intel_guc *guc, uint32_t priority) | |||
298 | * Select, assign and relase doorbell cachelines | 178 | * Select, assign and relase doorbell cachelines |
299 | * | 179 | * |
300 | * These functions track which doorbell cachelines are in use. | 180 | * These functions track which doorbell cachelines are in use. |
301 | * The data they manipulate is protected by the host2guc lock. | 181 | * The data they manipulate is protected by the intel_guc_send lock. |
302 | */ | 182 | */ |
303 | 183 | ||
304 | static uint32_t select_doorbell_cacheline(struct intel_guc *guc) | 184 | static uint32_t select_doorbell_cacheline(struct intel_guc *guc) |
@@ -390,11 +270,11 @@ static void guc_ctx_desc_init(struct intel_guc *guc, | |||
390 | 270 | ||
391 | /* The state page is after PPHWSP */ | 271 | /* The state page is after PPHWSP */ |
392 | lrc->ring_lcra = | 272 | lrc->ring_lcra = |
393 | i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; | 273 | guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; |
394 | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | | 274 | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | |
395 | (guc_engine_id << GUC_ELC_ENGINE_OFFSET); | 275 | (guc_engine_id << GUC_ELC_ENGINE_OFFSET); |
396 | 276 | ||
397 | lrc->ring_begin = i915_ggtt_offset(ce->ring->vma); | 277 | lrc->ring_begin = guc_ggtt_offset(ce->ring->vma); |
398 | lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; | 278 | lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; |
399 | lrc->ring_next_free_location = lrc->ring_begin; | 279 | lrc->ring_next_free_location = lrc->ring_begin; |
400 | lrc->ring_current_tail_pointer_value = 0; | 280 | lrc->ring_current_tail_pointer_value = 0; |
@@ -410,7 +290,7 @@ static void guc_ctx_desc_init(struct intel_guc *guc, | |||
410 | * The doorbell, process descriptor, and workqueue are all parts | 290 | * The doorbell, process descriptor, and workqueue are all parts |
411 | * of the client object, which the GuC will reference via the GGTT | 291 | * of the client object, which the GuC will reference via the GGTT |
412 | */ | 292 | */ |
413 | gfx_addr = i915_ggtt_offset(client->vma); | 293 | gfx_addr = guc_ggtt_offset(client->vma); |
414 | desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + | 294 | desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + |
415 | client->doorbell_offset; | 295 | client->doorbell_offset; |
416 | desc.db_trigger_cpu = | 296 | desc.db_trigger_cpu = |
@@ -464,22 +344,23 @@ static void guc_ctx_desc_fini(struct intel_guc *guc, | |||
464 | int i915_guc_wq_reserve(struct drm_i915_gem_request *request) | 344 | int i915_guc_wq_reserve(struct drm_i915_gem_request *request) |
465 | { | 345 | { |
466 | const size_t wqi_size = sizeof(struct guc_wq_item); | 346 | const size_t wqi_size = sizeof(struct guc_wq_item); |
467 | struct i915_guc_client *gc = request->i915->guc.execbuf_client; | 347 | struct i915_guc_client *client = request->i915->guc.execbuf_client; |
468 | struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset; | 348 | struct guc_process_desc *desc = client->vaddr + |
349 | client->proc_desc_offset; | ||
469 | u32 freespace; | 350 | u32 freespace; |
470 | int ret; | 351 | int ret; |
471 | 352 | ||
472 | spin_lock(&gc->wq_lock); | 353 | spin_lock(&client->wq_lock); |
473 | freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); | 354 | freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); |
474 | freespace -= gc->wq_rsvd; | 355 | freespace -= client->wq_rsvd; |
475 | if (likely(freespace >= wqi_size)) { | 356 | if (likely(freespace >= wqi_size)) { |
476 | gc->wq_rsvd += wqi_size; | 357 | client->wq_rsvd += wqi_size; |
477 | ret = 0; | 358 | ret = 0; |
478 | } else { | 359 | } else { |
479 | gc->no_wq_space++; | 360 | client->no_wq_space++; |
480 | ret = -EAGAIN; | 361 | ret = -EAGAIN; |
481 | } | 362 | } |
482 | spin_unlock(&gc->wq_lock); | 363 | spin_unlock(&client->wq_lock); |
483 | 364 | ||
484 | return ret; | 365 | return ret; |
485 | } | 366 | } |
@@ -487,17 +368,17 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request) | |||
487 | void i915_guc_wq_unreserve(struct drm_i915_gem_request *request) | 368 | void i915_guc_wq_unreserve(struct drm_i915_gem_request *request) |
488 | { | 369 | { |
489 | const size_t wqi_size = sizeof(struct guc_wq_item); | 370 | const size_t wqi_size = sizeof(struct guc_wq_item); |
490 | struct i915_guc_client *gc = request->i915->guc.execbuf_client; | 371 | struct i915_guc_client *client = request->i915->guc.execbuf_client; |
491 | 372 | ||
492 | GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size); | 373 | GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size); |
493 | 374 | ||
494 | spin_lock(&gc->wq_lock); | 375 | spin_lock(&client->wq_lock); |
495 | gc->wq_rsvd -= wqi_size; | 376 | client->wq_rsvd -= wqi_size; |
496 | spin_unlock(&gc->wq_lock); | 377 | spin_unlock(&client->wq_lock); |
497 | } | 378 | } |
498 | 379 | ||
499 | /* Construct a Work Item and append it to the GuC's Work Queue */ | 380 | /* Construct a Work Item and append it to the GuC's Work Queue */ |
500 | static void guc_wq_item_append(struct i915_guc_client *gc, | 381 | static void guc_wq_item_append(struct i915_guc_client *client, |
501 | struct drm_i915_gem_request *rq) | 382 | struct drm_i915_gem_request *rq) |
502 | { | 383 | { |
503 | /* wqi_len is in DWords, and does not include the one-word header */ | 384 | /* wqi_len is in DWords, and does not include the one-word header */ |
@@ -508,10 +389,10 @@ static void guc_wq_item_append(struct i915_guc_client *gc, | |||
508 | struct guc_wq_item *wqi; | 389 | struct guc_wq_item *wqi; |
509 | u32 freespace, tail, wq_off; | 390 | u32 freespace, tail, wq_off; |
510 | 391 | ||
511 | desc = gc->vaddr + gc->proc_desc_offset; | 392 | desc = client->vaddr + client->proc_desc_offset; |
512 | 393 | ||
513 | /* Free space is guaranteed, see i915_guc_wq_reserve() above */ | 394 | /* Free space is guaranteed, see i915_guc_wq_reserve() above */ |
514 | freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); | 395 | freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); |
515 | GEM_BUG_ON(freespace < wqi_size); | 396 | GEM_BUG_ON(freespace < wqi_size); |
516 | 397 | ||
517 | /* The GuC firmware wants the tail index in QWords, not bytes */ | 398 | /* The GuC firmware wants the tail index in QWords, not bytes */ |
@@ -528,17 +409,17 @@ static void guc_wq_item_append(struct i915_guc_client *gc, | |||
528 | * workqueue buffer dw by dw. | 409 | * workqueue buffer dw by dw. |
529 | */ | 410 | */ |
530 | BUILD_BUG_ON(wqi_size != 16); | 411 | BUILD_BUG_ON(wqi_size != 16); |
531 | GEM_BUG_ON(gc->wq_rsvd < wqi_size); | 412 | GEM_BUG_ON(client->wq_rsvd < wqi_size); |
532 | 413 | ||
533 | /* postincrement WQ tail for next time */ | 414 | /* postincrement WQ tail for next time */ |
534 | wq_off = gc->wq_tail; | 415 | wq_off = client->wq_tail; |
535 | GEM_BUG_ON(wq_off & (wqi_size - 1)); | 416 | GEM_BUG_ON(wq_off & (wqi_size - 1)); |
536 | gc->wq_tail += wqi_size; | 417 | client->wq_tail += wqi_size; |
537 | gc->wq_tail &= gc->wq_size - 1; | 418 | client->wq_tail &= client->wq_size - 1; |
538 | gc->wq_rsvd -= wqi_size; | 419 | client->wq_rsvd -= wqi_size; |
539 | 420 | ||
540 | /* WQ starts from the page after doorbell / process_desc */ | 421 | /* WQ starts from the page after doorbell / process_desc */ |
541 | wqi = gc->vaddr + wq_off + GUC_DB_SIZE; | 422 | wqi = client->vaddr + wq_off + GUC_DB_SIZE; |
542 | 423 | ||
543 | /* Now fill in the 4-word work queue item */ | 424 | /* Now fill in the 4-word work queue item */ |
544 | wqi->header = WQ_TYPE_INORDER | | 425 | wqi->header = WQ_TYPE_INORDER | |
@@ -553,30 +434,30 @@ static void guc_wq_item_append(struct i915_guc_client *gc, | |||
553 | wqi->fence_id = rq->global_seqno; | 434 | wqi->fence_id = rq->global_seqno; |
554 | } | 435 | } |
555 | 436 | ||
556 | static int guc_ring_doorbell(struct i915_guc_client *gc) | 437 | static int guc_ring_doorbell(struct i915_guc_client *client) |
557 | { | 438 | { |
558 | struct guc_process_desc *desc; | 439 | struct guc_process_desc *desc; |
559 | union guc_doorbell_qw db_cmp, db_exc, db_ret; | 440 | union guc_doorbell_qw db_cmp, db_exc, db_ret; |
560 | union guc_doorbell_qw *db; | 441 | union guc_doorbell_qw *db; |
561 | int attempt = 2, ret = -EAGAIN; | 442 | int attempt = 2, ret = -EAGAIN; |
562 | 443 | ||
563 | desc = gc->vaddr + gc->proc_desc_offset; | 444 | desc = client->vaddr + client->proc_desc_offset; |
564 | 445 | ||
565 | /* Update the tail so it is visible to GuC */ | 446 | /* Update the tail so it is visible to GuC */ |
566 | desc->tail = gc->wq_tail; | 447 | desc->tail = client->wq_tail; |
567 | 448 | ||
568 | /* current cookie */ | 449 | /* current cookie */ |
569 | db_cmp.db_status = GUC_DOORBELL_ENABLED; | 450 | db_cmp.db_status = GUC_DOORBELL_ENABLED; |
570 | db_cmp.cookie = gc->cookie; | 451 | db_cmp.cookie = client->doorbell_cookie; |
571 | 452 | ||
572 | /* cookie to be updated */ | 453 | /* cookie to be updated */ |
573 | db_exc.db_status = GUC_DOORBELL_ENABLED; | 454 | db_exc.db_status = GUC_DOORBELL_ENABLED; |
574 | db_exc.cookie = gc->cookie + 1; | 455 | db_exc.cookie = client->doorbell_cookie + 1; |
575 | if (db_exc.cookie == 0) | 456 | if (db_exc.cookie == 0) |
576 | db_exc.cookie = 1; | 457 | db_exc.cookie = 1; |
577 | 458 | ||
578 | /* pointer of current doorbell cacheline */ | 459 | /* pointer of current doorbell cacheline */ |
579 | db = gc->vaddr + gc->doorbell_offset; | 460 | db = client->vaddr + client->doorbell_offset; |
580 | 461 | ||
581 | while (attempt--) { | 462 | while (attempt--) { |
582 | /* lets ring the doorbell */ | 463 | /* lets ring the doorbell */ |
@@ -586,7 +467,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) | |||
586 | /* if the exchange was successfully executed */ | 467 | /* if the exchange was successfully executed */ |
587 | if (db_ret.value_qw == db_cmp.value_qw) { | 468 | if (db_ret.value_qw == db_cmp.value_qw) { |
588 | /* db was successfully rung */ | 469 | /* db was successfully rung */ |
589 | gc->cookie = db_exc.cookie; | 470 | client->doorbell_cookie = db_exc.cookie; |
590 | ret = 0; | 471 | ret = 0; |
591 | break; | 472 | break; |
592 | } | 473 | } |
@@ -609,12 +490,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) | |||
609 | } | 490 | } |
610 | 491 | ||
611 | /** | 492 | /** |
612 | * i915_guc_submit() - Submit commands through GuC | 493 | * __i915_guc_submit() - Submit commands through GuC |
613 | * @rq: request associated with the commands | 494 | * @rq: request associated with the commands |
614 | * | 495 | * |
615 | * Return: 0 on success, otherwise an errno. | ||
616 | * (Note: nonzero really shouldn't happen!) | ||
617 | * | ||
618 | * The caller must have already called i915_guc_wq_reserve() above with | 496 | * The caller must have already called i915_guc_wq_reserve() above with |
619 | * a result of 0 (success), guaranteeing that there is space in the work | 497 | * a result of 0 (success), guaranteeing that there is space in the work |
620 | * queue for the new request, so enqueuing the item cannot fail. | 498 | * queue for the new request, so enqueuing the item cannot fail. |
@@ -626,7 +504,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) | |||
626 | * The only error here arises if the doorbell hardware isn't functioning | 504 | * The only error here arises if the doorbell hardware isn't functioning |
627 | * as expected, which really shouln't happen. | 505 | * as expected, which really shouln't happen. |
628 | */ | 506 | */ |
629 | static void i915_guc_submit(struct drm_i915_gem_request *rq) | 507 | static void __i915_guc_submit(struct drm_i915_gem_request *rq) |
630 | { | 508 | { |
631 | struct drm_i915_private *dev_priv = rq->i915; | 509 | struct drm_i915_private *dev_priv = rq->i915; |
632 | struct intel_engine_cs *engine = rq->engine; | 510 | struct intel_engine_cs *engine = rq->engine; |
@@ -635,17 +513,6 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq) | |||
635 | struct i915_guc_client *client = guc->execbuf_client; | 513 | struct i915_guc_client *client = guc->execbuf_client; |
636 | int b_ret; | 514 | int b_ret; |
637 | 515 | ||
638 | /* We keep the previous context alive until we retire the following | ||
639 | * request. This ensures that any the context object is still pinned | ||
640 | * for any residual writes the HW makes into it on the context switch | ||
641 | * into the next object following the breadcrumb. Otherwise, we may | ||
642 | * retire the context too early. | ||
643 | */ | ||
644 | rq->previous_context = engine->last_context; | ||
645 | engine->last_context = rq->ctx; | ||
646 | |||
647 | i915_gem_request_submit(rq); | ||
648 | |||
649 | spin_lock(&client->wq_lock); | 516 | spin_lock(&client->wq_lock); |
650 | guc_wq_item_append(client, rq); | 517 | guc_wq_item_append(client, rq); |
651 | 518 | ||
@@ -665,6 +532,12 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq) | |||
665 | spin_unlock(&client->wq_lock); | 532 | spin_unlock(&client->wq_lock); |
666 | } | 533 | } |
667 | 534 | ||
535 | static void i915_guc_submit(struct drm_i915_gem_request *rq) | ||
536 | { | ||
537 | i915_gem_request_submit(rq); | ||
538 | __i915_guc_submit(rq); | ||
539 | } | ||
540 | |||
668 | /* | 541 | /* |
669 | * Everything below here is concerned with setup & teardown, and is | 542 | * Everything below here is concerned with setup & teardown, and is |
670 | * therefore not part of the somewhat time-critical batch-submission | 543 | * therefore not part of the somewhat time-critical batch-submission |
@@ -691,7 +564,7 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size) | |||
691 | struct i915_vma *vma; | 564 | struct i915_vma *vma; |
692 | int ret; | 565 | int ret; |
693 | 566 | ||
694 | obj = i915_gem_object_create(&dev_priv->drm, size); | 567 | obj = i915_gem_object_create(dev_priv, size); |
695 | if (IS_ERR(obj)) | 568 | if (IS_ERR(obj)) |
696 | return ERR_CAST(obj); | 569 | return ERR_CAST(obj); |
697 | 570 | ||
@@ -779,8 +652,7 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) | |||
779 | uint16_t db_id; | 652 | uint16_t db_id; |
780 | int i, err; | 653 | int i, err; |
781 | 654 | ||
782 | /* Save client's original doorbell selection */ | 655 | guc_disable_doorbell(guc, client); |
783 | db_id = client->doorbell_id; | ||
784 | 656 | ||
785 | for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { | 657 | for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { |
786 | /* Skip if doorbell is OK */ | 658 | /* Skip if doorbell is OK */ |
@@ -793,7 +665,9 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) | |||
793 | i, err); | 665 | i, err); |
794 | } | 666 | } |
795 | 667 | ||
796 | /* Restore to original value */ | 668 | db_id = select_doorbell_register(guc, client->priority); |
669 | WARN_ON(db_id == GUC_INVALID_DOORBELL_ID); | ||
670 | |||
797 | err = guc_update_doorbell_id(guc, client, db_id); | 671 | err = guc_update_doorbell_id(guc, client, db_id); |
798 | if (err) | 672 | if (err) |
799 | DRM_WARN("Failed to restore doorbell to %d, err %d\n", | 673 | DRM_WARN("Failed to restore doorbell to %d, err %d\n", |
@@ -883,8 +757,13 @@ guc_client_alloc(struct drm_i915_private *dev_priv, | |||
883 | 757 | ||
884 | guc_proc_desc_init(guc, client); | 758 | guc_proc_desc_init(guc, client); |
885 | guc_ctx_desc_init(guc, client); | 759 | guc_ctx_desc_init(guc, client); |
886 | if (guc_init_doorbell(guc, client, db_id)) | 760 | |
887 | goto err; | 761 | /* For runtime client allocation we need to enable the doorbell. Not |
762 | * required yet for the static execbuf_client as this special kernel | ||
763 | * client is enabled from i915_guc_submission_enable(). | ||
764 | * | ||
765 | * guc_update_doorbell_id(guc, client, db_id); | ||
766 | */ | ||
888 | 767 | ||
889 | DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n", | 768 | DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n", |
890 | priority, client, client->engines, client->ctx_index); | 769 | priority, client, client->engines, client->ctx_index); |
@@ -1318,7 +1197,7 @@ static void guc_log_create(struct intel_guc *guc) | |||
1318 | * it should be present on the chipsets supporting GuC based | 1197 | * it should be present on the chipsets supporting GuC based |
1319 | * submisssions. | 1198 | * submisssions. |
1320 | */ | 1199 | */ |
1321 | if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) { | 1200 | if (WARN_ON(!i915_has_memcpy_from_wc())) { |
1322 | /* logging will not be enabled */ | 1201 | /* logging will not be enabled */ |
1323 | i915.guc_log_level = -1; | 1202 | i915.guc_log_level = -1; |
1324 | return; | 1203 | return; |
@@ -1347,7 +1226,7 @@ static void guc_log_create(struct intel_guc *guc) | |||
1347 | (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | | 1226 | (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | |
1348 | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); | 1227 | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); |
1349 | 1228 | ||
1350 | offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ | 1229 | offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ |
1351 | guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; | 1230 | guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; |
1352 | } | 1231 | } |
1353 | 1232 | ||
@@ -1450,7 +1329,7 @@ static void guc_addon_create(struct intel_guc *guc) | |||
1450 | guc_policies_init(policies); | 1329 | guc_policies_init(policies); |
1451 | 1330 | ||
1452 | ads->scheduler_policies = | 1331 | ads->scheduler_policies = |
1453 | i915_ggtt_offset(vma) + sizeof(struct guc_ads); | 1332 | guc_ggtt_offset(vma) + sizeof(struct guc_ads); |
1454 | 1333 | ||
1455 | /* MMIO reg state */ | 1334 | /* MMIO reg state */ |
1456 | reg_state = (void *)policies + sizeof(struct guc_policies); | 1335 | reg_state = (void *)policies + sizeof(struct guc_policies); |
@@ -1484,6 +1363,9 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) | |||
1484 | struct intel_guc *guc = &dev_priv->guc; | 1363 | struct intel_guc *guc = &dev_priv->guc; |
1485 | struct i915_vma *vma; | 1364 | struct i915_vma *vma; |
1486 | 1365 | ||
1366 | if (!HAS_GUC_SCHED(dev_priv)) | ||
1367 | return 0; | ||
1368 | |||
1487 | /* Wipe bitmap & delete client in case of reinitialisation */ | 1369 | /* Wipe bitmap & delete client in case of reinitialisation */ |
1488 | bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); | 1370 | bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); |
1489 | i915_guc_submission_disable(dev_priv); | 1371 | i915_guc_submission_disable(dev_priv); |
@@ -1500,46 +1382,62 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) | |||
1500 | 1382 | ||
1501 | guc->ctx_pool_vma = vma; | 1383 | guc->ctx_pool_vma = vma; |
1502 | ida_init(&guc->ctx_ids); | 1384 | ida_init(&guc->ctx_ids); |
1503 | mutex_init(&guc->action_lock); | ||
1504 | guc_log_create(guc); | 1385 | guc_log_create(guc); |
1505 | guc_addon_create(guc); | 1386 | guc_addon_create(guc); |
1506 | 1387 | ||
1388 | guc->execbuf_client = guc_client_alloc(dev_priv, | ||
1389 | INTEL_INFO(dev_priv)->ring_mask, | ||
1390 | GUC_CTX_PRIORITY_KMD_NORMAL, | ||
1391 | dev_priv->kernel_context); | ||
1392 | if (!guc->execbuf_client) { | ||
1393 | DRM_ERROR("Failed to create GuC client for execbuf!\n"); | ||
1394 | goto err; | ||
1395 | } | ||
1396 | |||
1507 | return 0; | 1397 | return 0; |
1398 | |||
1399 | err: | ||
1400 | i915_guc_submission_fini(dev_priv); | ||
1401 | return -ENOMEM; | ||
1402 | } | ||
1403 | |||
1404 | static void guc_reset_wq(struct i915_guc_client *client) | ||
1405 | { | ||
1406 | struct guc_process_desc *desc = client->vaddr + | ||
1407 | client->proc_desc_offset; | ||
1408 | |||
1409 | desc->head = 0; | ||
1410 | desc->tail = 0; | ||
1411 | |||
1412 | client->wq_tail = 0; | ||
1508 | } | 1413 | } |
1509 | 1414 | ||
1510 | int i915_guc_submission_enable(struct drm_i915_private *dev_priv) | 1415 | int i915_guc_submission_enable(struct drm_i915_private *dev_priv) |
1511 | { | 1416 | { |
1512 | struct intel_guc *guc = &dev_priv->guc; | 1417 | struct intel_guc *guc = &dev_priv->guc; |
1513 | struct drm_i915_gem_request *request; | 1418 | struct i915_guc_client *client = guc->execbuf_client; |
1514 | struct i915_guc_client *client; | ||
1515 | struct intel_engine_cs *engine; | 1419 | struct intel_engine_cs *engine; |
1516 | enum intel_engine_id id; | 1420 | enum intel_engine_id id; |
1517 | 1421 | ||
1518 | /* client for execbuf submission */ | 1422 | if (!client) |
1519 | client = guc_client_alloc(dev_priv, | 1423 | return -ENODEV; |
1520 | INTEL_INFO(dev_priv)->ring_mask, | ||
1521 | GUC_CTX_PRIORITY_KMD_NORMAL, | ||
1522 | dev_priv->kernel_context); | ||
1523 | if (!client) { | ||
1524 | DRM_ERROR("Failed to create normal GuC client!\n"); | ||
1525 | return -ENOMEM; | ||
1526 | } | ||
1527 | 1424 | ||
1528 | guc->execbuf_client = client; | 1425 | intel_guc_sample_forcewake(guc); |
1529 | host2guc_sample_forcewake(guc, client); | 1426 | |
1427 | guc_reset_wq(client); | ||
1530 | guc_init_doorbell_hw(guc); | 1428 | guc_init_doorbell_hw(guc); |
1531 | 1429 | ||
1532 | /* Take over from manual control of ELSP (execlists) */ | 1430 | /* Take over from manual control of ELSP (execlists) */ |
1533 | for_each_engine(engine, dev_priv, id) { | 1431 | for_each_engine(engine, dev_priv, id) { |
1432 | struct drm_i915_gem_request *rq; | ||
1433 | |||
1534 | engine->submit_request = i915_guc_submit; | 1434 | engine->submit_request = i915_guc_submit; |
1535 | engine->schedule = NULL; | 1435 | engine->schedule = NULL; |
1536 | 1436 | ||
1537 | /* Replay the current set of previously submitted requests */ | 1437 | /* Replay the current set of previously submitted requests */ |
1538 | list_for_each_entry(request, | 1438 | list_for_each_entry(rq, &engine->timeline->requests, link) { |
1539 | &engine->timeline->requests, link) { | ||
1540 | client->wq_rsvd += sizeof(struct guc_wq_item); | 1439 | client->wq_rsvd += sizeof(struct guc_wq_item); |
1541 | if (i915_sw_fence_done(&request->submit)) | 1440 | __i915_guc_submit(rq); |
1542 | i915_guc_submit(request); | ||
1543 | } | 1441 | } |
1544 | } | 1442 | } |
1545 | 1443 | ||
@@ -1555,14 +1453,18 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv) | |||
1555 | 1453 | ||
1556 | /* Revert back to manual ELSP submission */ | 1454 | /* Revert back to manual ELSP submission */ |
1557 | intel_execlists_enable_submission(dev_priv); | 1455 | intel_execlists_enable_submission(dev_priv); |
1558 | |||
1559 | guc_client_free(dev_priv, guc->execbuf_client); | ||
1560 | guc->execbuf_client = NULL; | ||
1561 | } | 1456 | } |
1562 | 1457 | ||
1563 | void i915_guc_submission_fini(struct drm_i915_private *dev_priv) | 1458 | void i915_guc_submission_fini(struct drm_i915_private *dev_priv) |
1564 | { | 1459 | { |
1565 | struct intel_guc *guc = &dev_priv->guc; | 1460 | struct intel_guc *guc = &dev_priv->guc; |
1461 | struct i915_guc_client *client; | ||
1462 | |||
1463 | client = fetch_and_zero(&guc->execbuf_client); | ||
1464 | if (!client) | ||
1465 | return; | ||
1466 | |||
1467 | guc_client_free(dev_priv, client); | ||
1566 | 1468 | ||
1567 | i915_vma_unpin_and_release(&guc->ads_vma); | 1469 | i915_vma_unpin_and_release(&guc->ads_vma); |
1568 | i915_vma_unpin_and_release(&guc->log.vma); | 1470 | i915_vma_unpin_and_release(&guc->log.vma); |
@@ -1574,11 +1476,10 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv) | |||
1574 | 1476 | ||
1575 | /** | 1477 | /** |
1576 | * intel_guc_suspend() - notify GuC entering suspend state | 1478 | * intel_guc_suspend() - notify GuC entering suspend state |
1577 | * @dev: drm device | 1479 | * @dev_priv: i915 device private |
1578 | */ | 1480 | */ |
1579 | int intel_guc_suspend(struct drm_device *dev) | 1481 | int intel_guc_suspend(struct drm_i915_private *dev_priv) |
1580 | { | 1482 | { |
1581 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1582 | struct intel_guc *guc = &dev_priv->guc; | 1483 | struct intel_guc *guc = &dev_priv->guc; |
1583 | struct i915_gem_context *ctx; | 1484 | struct i915_gem_context *ctx; |
1584 | u32 data[3]; | 1485 | u32 data[3]; |
@@ -1590,23 +1491,22 @@ int intel_guc_suspend(struct drm_device *dev) | |||
1590 | 1491 | ||
1591 | ctx = dev_priv->kernel_context; | 1492 | ctx = dev_priv->kernel_context; |
1592 | 1493 | ||
1593 | data[0] = HOST2GUC_ACTION_ENTER_S_STATE; | 1494 | data[0] = INTEL_GUC_ACTION_ENTER_S_STATE; |
1594 | /* any value greater than GUC_POWER_D0 */ | 1495 | /* any value greater than GUC_POWER_D0 */ |
1595 | data[1] = GUC_POWER_D1; | 1496 | data[1] = GUC_POWER_D1; |
1596 | /* first page is shared data with GuC */ | 1497 | /* first page is shared data with GuC */ |
1597 | data[2] = i915_ggtt_offset(ctx->engine[RCS].state); | 1498 | data[2] = guc_ggtt_offset(ctx->engine[RCS].state); |
1598 | 1499 | ||
1599 | return host2guc_action(guc, data, ARRAY_SIZE(data)); | 1500 | return intel_guc_send(guc, data, ARRAY_SIZE(data)); |
1600 | } | 1501 | } |
1601 | 1502 | ||
1602 | 1503 | ||
1603 | /** | 1504 | /** |
1604 | * intel_guc_resume() - notify GuC resuming from suspend state | 1505 | * intel_guc_resume() - notify GuC resuming from suspend state |
1605 | * @dev: drm device | 1506 | * @dev_priv: i915 device private |
1606 | */ | 1507 | */ |
1607 | int intel_guc_resume(struct drm_device *dev) | 1508 | int intel_guc_resume(struct drm_i915_private *dev_priv) |
1608 | { | 1509 | { |
1609 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1610 | struct intel_guc *guc = &dev_priv->guc; | 1510 | struct intel_guc *guc = &dev_priv->guc; |
1611 | struct i915_gem_context *ctx; | 1511 | struct i915_gem_context *ctx; |
1612 | u32 data[3]; | 1512 | u32 data[3]; |
@@ -1619,12 +1519,12 @@ int intel_guc_resume(struct drm_device *dev) | |||
1619 | 1519 | ||
1620 | ctx = dev_priv->kernel_context; | 1520 | ctx = dev_priv->kernel_context; |
1621 | 1521 | ||
1622 | data[0] = HOST2GUC_ACTION_EXIT_S_STATE; | 1522 | data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; |
1623 | data[1] = GUC_POWER_D0; | 1523 | data[1] = GUC_POWER_D0; |
1624 | /* first page is shared data with GuC */ | 1524 | /* first page is shared data with GuC */ |
1625 | data[2] = i915_ggtt_offset(ctx->engine[RCS].state); | 1525 | data[2] = guc_ggtt_offset(ctx->engine[RCS].state); |
1626 | 1526 | ||
1627 | return host2guc_action(guc, data, ARRAY_SIZE(data)); | 1527 | return intel_guc_send(guc, data, ARRAY_SIZE(data)); |
1628 | } | 1528 | } |
1629 | 1529 | ||
1630 | void i915_guc_capture_logs(struct drm_i915_private *dev_priv) | 1530 | void i915_guc_capture_logs(struct drm_i915_private *dev_priv) |
@@ -1635,7 +1535,7 @@ void i915_guc_capture_logs(struct drm_i915_private *dev_priv) | |||
1635 | * time, so get/put should be really quick. | 1535 | * time, so get/put should be really quick. |
1636 | */ | 1536 | */ |
1637 | intel_runtime_pm_get(dev_priv); | 1537 | intel_runtime_pm_get(dev_priv); |
1638 | host2guc_logbuffer_flush_complete(&dev_priv->guc); | 1538 | intel_guc_log_flush_complete(&dev_priv->guc); |
1639 | intel_runtime_pm_put(dev_priv); | 1539 | intel_runtime_pm_put(dev_priv); |
1640 | } | 1540 | } |
1641 | 1541 | ||
@@ -1653,7 +1553,7 @@ void i915_guc_flush_logs(struct drm_i915_private *dev_priv) | |||
1653 | flush_work(&dev_priv->guc.log.flush_work); | 1553 | flush_work(&dev_priv->guc.log.flush_work); |
1654 | 1554 | ||
1655 | /* Ask GuC to update the log buffer state */ | 1555 | /* Ask GuC to update the log buffer state */ |
1656 | host2guc_force_logbuffer_flush(&dev_priv->guc); | 1556 | intel_guc_log_flush(&dev_priv->guc); |
1657 | 1557 | ||
1658 | /* GuC would have updated log buffer by now, so capture it */ | 1558 | /* GuC would have updated log buffer by now, so capture it */ |
1659 | i915_guc_capture_logs(dev_priv); | 1559 | i915_guc_capture_logs(dev_priv); |
@@ -1694,9 +1594,9 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) | |||
1694 | if (!log_param.logging_enabled && (i915.guc_log_level < 0)) | 1594 | if (!log_param.logging_enabled && (i915.guc_log_level < 0)) |
1695 | return 0; | 1595 | return 0; |
1696 | 1596 | ||
1697 | ret = host2guc_logging_control(&dev_priv->guc, log_param.value); | 1597 | ret = intel_guc_log_control(&dev_priv->guc, log_param.value); |
1698 | if (ret < 0) { | 1598 | if (ret < 0) { |
1699 | DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret); | 1599 | DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret); |
1700 | return ret; | 1600 | return ret; |
1701 | } | 1601 | } |
1702 | 1602 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 07ca71cabb2b..a0e70f5b3aad 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1683,8 +1683,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) | |||
1683 | u32 msg, flush; | 1683 | u32 msg, flush; |
1684 | 1684 | ||
1685 | msg = I915_READ(SOFT_SCRATCH(15)); | 1685 | msg = I915_READ(SOFT_SCRATCH(15)); |
1686 | flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED | | 1686 | flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | |
1687 | GUC2HOST_MSG_FLUSH_LOG_BUFFER); | 1687 | INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); |
1688 | if (flush) { | 1688 | if (flush) { |
1689 | /* Clear the message bits that are handled */ | 1689 | /* Clear the message bits that are handled */ |
1690 | I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); | 1690 | I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); |
@@ -2435,7 +2435,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) | |||
2435 | found = true; | 2435 | found = true; |
2436 | } | 2436 | } |
2437 | 2437 | ||
2438 | if (IS_BROXTON(dev_priv)) { | 2438 | if (IS_GEN9_LP(dev_priv)) { |
2439 | tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; | 2439 | tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; |
2440 | if (tmp_mask) { | 2440 | if (tmp_mask) { |
2441 | bxt_hpd_irq_handler(dev_priv, tmp_mask, | 2441 | bxt_hpd_irq_handler(dev_priv, tmp_mask, |
@@ -2451,7 +2451,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) | |||
2451 | } | 2451 | } |
2452 | } | 2452 | } |
2453 | 2453 | ||
2454 | if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { | 2454 | if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { |
2455 | gmbus_irq_handler(dev_priv); | 2455 | gmbus_irq_handler(dev_priv); |
2456 | found = true; | 2456 | found = true; |
2457 | } | 2457 | } |
@@ -3375,7 +3375,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
3375 | GEN9_DE_PIPE_IRQ_FAULT_ERRORS; | 3375 | GEN9_DE_PIPE_IRQ_FAULT_ERRORS; |
3376 | de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | | 3376 | de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
3377 | GEN9_AUX_CHANNEL_D; | 3377 | GEN9_AUX_CHANNEL_D; |
3378 | if (IS_BROXTON(dev_priv)) | 3378 | if (IS_GEN9_LP(dev_priv)) |
3379 | de_port_masked |= BXT_DE_PORT_GMBUS; | 3379 | de_port_masked |= BXT_DE_PORT_GMBUS; |
3380 | } else { | 3380 | } else { |
3381 | de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | | 3381 | de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | |
@@ -3386,7 +3386,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
3386 | GEN8_PIPE_FIFO_UNDERRUN; | 3386 | GEN8_PIPE_FIFO_UNDERRUN; |
3387 | 3387 | ||
3388 | de_port_enables = de_port_masked; | 3388 | de_port_enables = de_port_masked; |
3389 | if (IS_BROXTON(dev_priv)) | 3389 | if (IS_GEN9_LP(dev_priv)) |
3390 | de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; | 3390 | de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; |
3391 | else if (IS_BROADWELL(dev_priv)) | 3391 | else if (IS_BROADWELL(dev_priv)) |
3392 | de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; | 3392 | de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; |
@@ -4211,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4211 | dev->driver->irq_uninstall = gen8_irq_uninstall; | 4211 | dev->driver->irq_uninstall = gen8_irq_uninstall; |
4212 | dev->driver->enable_vblank = gen8_enable_vblank; | 4212 | dev->driver->enable_vblank = gen8_enable_vblank; |
4213 | dev->driver->disable_vblank = gen8_disable_vblank; | 4213 | dev->driver->disable_vblank = gen8_disable_vblank; |
4214 | if (IS_BROXTON(dev_priv)) | 4214 | if (IS_GEN9_LP(dev_priv)) |
4215 | dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; | 4215 | dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; |
4216 | else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) | 4216 | else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) |
4217 | dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; | 4217 | dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; |
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c new file mode 100644 index 000000000000..4ddf756add31 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_hsw.c | |||
@@ -0,0 +1,752 @@ | |||
1 | /* | ||
2 | * Autogenerated file, DO NOT EDIT manually! | ||
3 | * | ||
4 | * Copyright (c) 2015 Intel Corporation | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
23 | * IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/sysfs.h> | ||
28 | |||
29 | #include "i915_drv.h" | ||
30 | #include "i915_oa_hsw.h" | ||
31 | |||
32 | enum metric_set_id { | ||
33 | METRIC_SET_ID_RENDER_BASIC = 1, | ||
34 | METRIC_SET_ID_COMPUTE_BASIC, | ||
35 | METRIC_SET_ID_COMPUTE_EXTENDED, | ||
36 | METRIC_SET_ID_MEMORY_READS, | ||
37 | METRIC_SET_ID_MEMORY_WRITES, | ||
38 | METRIC_SET_ID_SAMPLER_BALANCE, | ||
39 | }; | ||
40 | |||
41 | int i915_oa_n_builtin_metric_sets_hsw = 6; | ||
42 | |||
43 | static const struct i915_oa_reg b_counter_config_render_basic[] = { | ||
44 | { _MMIO(0x2724), 0x00800000 }, | ||
45 | { _MMIO(0x2720), 0x00000000 }, | ||
46 | { _MMIO(0x2714), 0x00800000 }, | ||
47 | { _MMIO(0x2710), 0x00000000 }, | ||
48 | }; | ||
49 | |||
50 | static const struct i915_oa_reg mux_config_render_basic[] = { | ||
51 | { _MMIO(0x253a4), 0x01600000 }, | ||
52 | { _MMIO(0x25440), 0x00100000 }, | ||
53 | { _MMIO(0x25128), 0x00000000 }, | ||
54 | { _MMIO(0x2691c), 0x00000800 }, | ||
55 | { _MMIO(0x26aa0), 0x01500000 }, | ||
56 | { _MMIO(0x26b9c), 0x00006000 }, | ||
57 | { _MMIO(0x2791c), 0x00000800 }, | ||
58 | { _MMIO(0x27aa0), 0x01500000 }, | ||
59 | { _MMIO(0x27b9c), 0x00006000 }, | ||
60 | { _MMIO(0x2641c), 0x00000400 }, | ||
61 | { _MMIO(0x25380), 0x00000010 }, | ||
62 | { _MMIO(0x2538c), 0x00000000 }, | ||
63 | { _MMIO(0x25384), 0x0800aaaa }, | ||
64 | { _MMIO(0x25400), 0x00000004 }, | ||
65 | { _MMIO(0x2540c), 0x06029000 }, | ||
66 | { _MMIO(0x25410), 0x00000002 }, | ||
67 | { _MMIO(0x25404), 0x5c30ffff }, | ||
68 | { _MMIO(0x25100), 0x00000016 }, | ||
69 | { _MMIO(0x25110), 0x00000400 }, | ||
70 | { _MMIO(0x25104), 0x00000000 }, | ||
71 | { _MMIO(0x26804), 0x00001211 }, | ||
72 | { _MMIO(0x26884), 0x00000100 }, | ||
73 | { _MMIO(0x26900), 0x00000002 }, | ||
74 | { _MMIO(0x26908), 0x00700000 }, | ||
75 | { _MMIO(0x26904), 0x00000000 }, | ||
76 | { _MMIO(0x26984), 0x00001022 }, | ||
77 | { _MMIO(0x26a04), 0x00000011 }, | ||
78 | { _MMIO(0x26a80), 0x00000006 }, | ||
79 | { _MMIO(0x26a88), 0x00000c02 }, | ||
80 | { _MMIO(0x26a84), 0x00000000 }, | ||
81 | { _MMIO(0x26b04), 0x00001000 }, | ||
82 | { _MMIO(0x26b80), 0x00000002 }, | ||
83 | { _MMIO(0x26b8c), 0x00000007 }, | ||
84 | { _MMIO(0x26b84), 0x00000000 }, | ||
85 | { _MMIO(0x27804), 0x00004844 }, | ||
86 | { _MMIO(0x27884), 0x00000400 }, | ||
87 | { _MMIO(0x27900), 0x00000002 }, | ||
88 | { _MMIO(0x27908), 0x0e000000 }, | ||
89 | { _MMIO(0x27904), 0x00000000 }, | ||
90 | { _MMIO(0x27984), 0x00004088 }, | ||
91 | { _MMIO(0x27a04), 0x00000044 }, | ||
92 | { _MMIO(0x27a80), 0x00000006 }, | ||
93 | { _MMIO(0x27a88), 0x00018040 }, | ||
94 | { _MMIO(0x27a84), 0x00000000 }, | ||
95 | { _MMIO(0x27b04), 0x00004000 }, | ||
96 | { _MMIO(0x27b80), 0x00000002 }, | ||
97 | { _MMIO(0x27b8c), 0x000000e0 }, | ||
98 | { _MMIO(0x27b84), 0x00000000 }, | ||
99 | { _MMIO(0x26104), 0x00002222 }, | ||
100 | { _MMIO(0x26184), 0x0c006666 }, | ||
101 | { _MMIO(0x26284), 0x04000000 }, | ||
102 | { _MMIO(0x26304), 0x04000000 }, | ||
103 | { _MMIO(0x26400), 0x00000002 }, | ||
104 | { _MMIO(0x26410), 0x000000a0 }, | ||
105 | { _MMIO(0x26404), 0x00000000 }, | ||
106 | { _MMIO(0x25420), 0x04108020 }, | ||
107 | { _MMIO(0x25424), 0x1284a420 }, | ||
108 | { _MMIO(0x2541c), 0x00000000 }, | ||
109 | { _MMIO(0x25428), 0x00042049 }, | ||
110 | }; | ||
111 | |||
112 | static const struct i915_oa_reg * | ||
113 | get_render_basic_mux_config(struct drm_i915_private *dev_priv, | ||
114 | int *len) | ||
115 | { | ||
116 | *len = ARRAY_SIZE(mux_config_render_basic); | ||
117 | return mux_config_render_basic; | ||
118 | } | ||
119 | |||
120 | static const struct i915_oa_reg b_counter_config_compute_basic[] = { | ||
121 | { _MMIO(0x2710), 0x00000000 }, | ||
122 | { _MMIO(0x2714), 0x00800000 }, | ||
123 | { _MMIO(0x2718), 0xaaaaaaaa }, | ||
124 | { _MMIO(0x271c), 0xaaaaaaaa }, | ||
125 | { _MMIO(0x2720), 0x00000000 }, | ||
126 | { _MMIO(0x2724), 0x00800000 }, | ||
127 | { _MMIO(0x2728), 0xaaaaaaaa }, | ||
128 | { _MMIO(0x272c), 0xaaaaaaaa }, | ||
129 | { _MMIO(0x2740), 0x00000000 }, | ||
130 | { _MMIO(0x2744), 0x00000000 }, | ||
131 | { _MMIO(0x2748), 0x00000000 }, | ||
132 | { _MMIO(0x274c), 0x00000000 }, | ||
133 | { _MMIO(0x2750), 0x00000000 }, | ||
134 | { _MMIO(0x2754), 0x00000000 }, | ||
135 | { _MMIO(0x2758), 0x00000000 }, | ||
136 | { _MMIO(0x275c), 0x00000000 }, | ||
137 | { _MMIO(0x236c), 0x00000000 }, | ||
138 | }; | ||
139 | |||
140 | static const struct i915_oa_reg mux_config_compute_basic[] = { | ||
141 | { _MMIO(0x253a4), 0x00000000 }, | ||
142 | { _MMIO(0x2681c), 0x01f00800 }, | ||
143 | { _MMIO(0x26820), 0x00001000 }, | ||
144 | { _MMIO(0x2781c), 0x01f00800 }, | ||
145 | { _MMIO(0x26520), 0x00000007 }, | ||
146 | { _MMIO(0x265a0), 0x00000007 }, | ||
147 | { _MMIO(0x25380), 0x00000010 }, | ||
148 | { _MMIO(0x2538c), 0x00300000 }, | ||
149 | { _MMIO(0x25384), 0xaa8aaaaa }, | ||
150 | { _MMIO(0x25404), 0xffffffff }, | ||
151 | { _MMIO(0x26800), 0x00004202 }, | ||
152 | { _MMIO(0x26808), 0x00605817 }, | ||
153 | { _MMIO(0x2680c), 0x10001005 }, | ||
154 | { _MMIO(0x26804), 0x00000000 }, | ||
155 | { _MMIO(0x27800), 0x00000102 }, | ||
156 | { _MMIO(0x27808), 0x0c0701e0 }, | ||
157 | { _MMIO(0x2780c), 0x000200a0 }, | ||
158 | { _MMIO(0x27804), 0x00000000 }, | ||
159 | { _MMIO(0x26484), 0x44000000 }, | ||
160 | { _MMIO(0x26704), 0x44000000 }, | ||
161 | { _MMIO(0x26500), 0x00000006 }, | ||
162 | { _MMIO(0x26510), 0x00000001 }, | ||
163 | { _MMIO(0x26504), 0x88000000 }, | ||
164 | { _MMIO(0x26580), 0x00000006 }, | ||
165 | { _MMIO(0x26590), 0x00000020 }, | ||
166 | { _MMIO(0x26584), 0x00000000 }, | ||
167 | { _MMIO(0x26104), 0x55822222 }, | ||
168 | { _MMIO(0x26184), 0xaa866666 }, | ||
169 | { _MMIO(0x25420), 0x08320c83 }, | ||
170 | { _MMIO(0x25424), 0x06820c83 }, | ||
171 | { _MMIO(0x2541c), 0x00000000 }, | ||
172 | { _MMIO(0x25428), 0x00000c03 }, | ||
173 | }; | ||
174 | |||
175 | static const struct i915_oa_reg * | ||
176 | get_compute_basic_mux_config(struct drm_i915_private *dev_priv, | ||
177 | int *len) | ||
178 | { | ||
179 | *len = ARRAY_SIZE(mux_config_compute_basic); | ||
180 | return mux_config_compute_basic; | ||
181 | } | ||
182 | |||
183 | static const struct i915_oa_reg b_counter_config_compute_extended[] = { | ||
184 | { _MMIO(0x2724), 0xf0800000 }, | ||
185 | { _MMIO(0x2720), 0x00000000 }, | ||
186 | { _MMIO(0x2714), 0xf0800000 }, | ||
187 | { _MMIO(0x2710), 0x00000000 }, | ||
188 | { _MMIO(0x2770), 0x0007fe2a }, | ||
189 | { _MMIO(0x2774), 0x0000ff00 }, | ||
190 | { _MMIO(0x2778), 0x0007fe6a }, | ||
191 | { _MMIO(0x277c), 0x0000ff00 }, | ||
192 | { _MMIO(0x2780), 0x0007fe92 }, | ||
193 | { _MMIO(0x2784), 0x0000ff00 }, | ||
194 | { _MMIO(0x2788), 0x0007fea2 }, | ||
195 | { _MMIO(0x278c), 0x0000ff00 }, | ||
196 | { _MMIO(0x2790), 0x0007fe32 }, | ||
197 | { _MMIO(0x2794), 0x0000ff00 }, | ||
198 | { _MMIO(0x2798), 0x0007fe9a }, | ||
199 | { _MMIO(0x279c), 0x0000ff00 }, | ||
200 | { _MMIO(0x27a0), 0x0007ff23 }, | ||
201 | { _MMIO(0x27a4), 0x0000ff00 }, | ||
202 | { _MMIO(0x27a8), 0x0007fff3 }, | ||
203 | { _MMIO(0x27ac), 0x0000fffe }, | ||
204 | }; | ||
205 | |||
206 | static const struct i915_oa_reg mux_config_compute_extended[] = { | ||
207 | { _MMIO(0x2681c), 0x3eb00800 }, | ||
208 | { _MMIO(0x26820), 0x00900000 }, | ||
209 | { _MMIO(0x25384), 0x02aaaaaa }, | ||
210 | { _MMIO(0x25404), 0x03ffffff }, | ||
211 | { _MMIO(0x26800), 0x00142284 }, | ||
212 | { _MMIO(0x26808), 0x0e629062 }, | ||
213 | { _MMIO(0x2680c), 0x3f6f55cb }, | ||
214 | { _MMIO(0x26810), 0x00000014 }, | ||
215 | { _MMIO(0x26804), 0x00000000 }, | ||
216 | { _MMIO(0x26104), 0x02aaaaaa }, | ||
217 | { _MMIO(0x26184), 0x02aaaaaa }, | ||
218 | { _MMIO(0x25420), 0x00000000 }, | ||
219 | { _MMIO(0x25424), 0x00000000 }, | ||
220 | { _MMIO(0x2541c), 0x00000000 }, | ||
221 | { _MMIO(0x25428), 0x00000000 }, | ||
222 | }; | ||
223 | |||
224 | static const struct i915_oa_reg * | ||
225 | get_compute_extended_mux_config(struct drm_i915_private *dev_priv, | ||
226 | int *len) | ||
227 | { | ||
228 | *len = ARRAY_SIZE(mux_config_compute_extended); | ||
229 | return mux_config_compute_extended; | ||
230 | } | ||
231 | |||
232 | static const struct i915_oa_reg b_counter_config_memory_reads[] = { | ||
233 | { _MMIO(0x2724), 0xf0800000 }, | ||
234 | { _MMIO(0x2720), 0x00000000 }, | ||
235 | { _MMIO(0x2714), 0xf0800000 }, | ||
236 | { _MMIO(0x2710), 0x00000000 }, | ||
237 | { _MMIO(0x274c), 0x76543298 }, | ||
238 | { _MMIO(0x2748), 0x98989898 }, | ||
239 | { _MMIO(0x2744), 0x000000e4 }, | ||
240 | { _MMIO(0x2740), 0x00000000 }, | ||
241 | { _MMIO(0x275c), 0x98a98a98 }, | ||
242 | { _MMIO(0x2758), 0x88888888 }, | ||
243 | { _MMIO(0x2754), 0x000c5500 }, | ||
244 | { _MMIO(0x2750), 0x00000000 }, | ||
245 | { _MMIO(0x2770), 0x0007f81a }, | ||
246 | { _MMIO(0x2774), 0x0000fc00 }, | ||
247 | { _MMIO(0x2778), 0x0007f82a }, | ||
248 | { _MMIO(0x277c), 0x0000fc00 }, | ||
249 | { _MMIO(0x2780), 0x0007f872 }, | ||
250 | { _MMIO(0x2784), 0x0000fc00 }, | ||
251 | { _MMIO(0x2788), 0x0007f8ba }, | ||
252 | { _MMIO(0x278c), 0x0000fc00 }, | ||
253 | { _MMIO(0x2790), 0x0007f87a }, | ||
254 | { _MMIO(0x2794), 0x0000fc00 }, | ||
255 | { _MMIO(0x2798), 0x0007f8ea }, | ||
256 | { _MMIO(0x279c), 0x0000fc00 }, | ||
257 | { _MMIO(0x27a0), 0x0007f8e2 }, | ||
258 | { _MMIO(0x27a4), 0x0000fc00 }, | ||
259 | { _MMIO(0x27a8), 0x0007f8f2 }, | ||
260 | { _MMIO(0x27ac), 0x0000fc00 }, | ||
261 | }; | ||
262 | |||
263 | static const struct i915_oa_reg mux_config_memory_reads[] = { | ||
264 | { _MMIO(0x253a4), 0x34300000 }, | ||
265 | { _MMIO(0x25440), 0x2d800000 }, | ||
266 | { _MMIO(0x25444), 0x00000008 }, | ||
267 | { _MMIO(0x25128), 0x0e600000 }, | ||
268 | { _MMIO(0x25380), 0x00000450 }, | ||
269 | { _MMIO(0x25390), 0x00052c43 }, | ||
270 | { _MMIO(0x25384), 0x00000000 }, | ||
271 | { _MMIO(0x25400), 0x00006144 }, | ||
272 | { _MMIO(0x25408), 0x0a418820 }, | ||
273 | { _MMIO(0x2540c), 0x000820e6 }, | ||
274 | { _MMIO(0x25404), 0xff500000 }, | ||
275 | { _MMIO(0x25100), 0x000005d6 }, | ||
276 | { _MMIO(0x2510c), 0x0ef00000 }, | ||
277 | { _MMIO(0x25104), 0x00000000 }, | ||
278 | { _MMIO(0x25420), 0x02108421 }, | ||
279 | { _MMIO(0x25424), 0x00008421 }, | ||
280 | { _MMIO(0x2541c), 0x00000000 }, | ||
281 | { _MMIO(0x25428), 0x00000000 }, | ||
282 | }; | ||
283 | |||
284 | static const struct i915_oa_reg * | ||
285 | get_memory_reads_mux_config(struct drm_i915_private *dev_priv, | ||
286 | int *len) | ||
287 | { | ||
288 | *len = ARRAY_SIZE(mux_config_memory_reads); | ||
289 | return mux_config_memory_reads; | ||
290 | } | ||
291 | |||
292 | static const struct i915_oa_reg b_counter_config_memory_writes[] = { | ||
293 | { _MMIO(0x2724), 0xf0800000 }, | ||
294 | { _MMIO(0x2720), 0x00000000 }, | ||
295 | { _MMIO(0x2714), 0xf0800000 }, | ||
296 | { _MMIO(0x2710), 0x00000000 }, | ||
297 | { _MMIO(0x274c), 0x76543298 }, | ||
298 | { _MMIO(0x2748), 0x98989898 }, | ||
299 | { _MMIO(0x2744), 0x000000e4 }, | ||
300 | { _MMIO(0x2740), 0x00000000 }, | ||
301 | { _MMIO(0x275c), 0xbabababa }, | ||
302 | { _MMIO(0x2758), 0x88888888 }, | ||
303 | { _MMIO(0x2754), 0x000c5500 }, | ||
304 | { _MMIO(0x2750), 0x00000000 }, | ||
305 | { _MMIO(0x2770), 0x0007f81a }, | ||
306 | { _MMIO(0x2774), 0x0000fc00 }, | ||
307 | { _MMIO(0x2778), 0x0007f82a }, | ||
308 | { _MMIO(0x277c), 0x0000fc00 }, | ||
309 | { _MMIO(0x2780), 0x0007f822 }, | ||
310 | { _MMIO(0x2784), 0x0000fc00 }, | ||
311 | { _MMIO(0x2788), 0x0007f8ba }, | ||
312 | { _MMIO(0x278c), 0x0000fc00 }, | ||
313 | { _MMIO(0x2790), 0x0007f87a }, | ||
314 | { _MMIO(0x2794), 0x0000fc00 }, | ||
315 | { _MMIO(0x2798), 0x0007f8ea }, | ||
316 | { _MMIO(0x279c), 0x0000fc00 }, | ||
317 | { _MMIO(0x27a0), 0x0007f8e2 }, | ||
318 | { _MMIO(0x27a4), 0x0000fc00 }, | ||
319 | { _MMIO(0x27a8), 0x0007f8f2 }, | ||
320 | { _MMIO(0x27ac), 0x0000fc00 }, | ||
321 | }; | ||
322 | |||
323 | static const struct i915_oa_reg mux_config_memory_writes[] = { | ||
324 | { _MMIO(0x253a4), 0x34300000 }, | ||
325 | { _MMIO(0x25440), 0x01500000 }, | ||
326 | { _MMIO(0x25444), 0x00000120 }, | ||
327 | { _MMIO(0x25128), 0x0c200000 }, | ||
328 | { _MMIO(0x25380), 0x00000450 }, | ||
329 | { _MMIO(0x25390), 0x00052c43 }, | ||
330 | { _MMIO(0x25384), 0x00000000 }, | ||
331 | { _MMIO(0x25400), 0x00007184 }, | ||
332 | { _MMIO(0x25408), 0x0a418820 }, | ||
333 | { _MMIO(0x2540c), 0x000820e6 }, | ||
334 | { _MMIO(0x25404), 0xff500000 }, | ||
335 | { _MMIO(0x25100), 0x000005d6 }, | ||
336 | { _MMIO(0x2510c), 0x1e700000 }, | ||
337 | { _MMIO(0x25104), 0x00000000 }, | ||
338 | { _MMIO(0x25420), 0x02108421 }, | ||
339 | { _MMIO(0x25424), 0x00008421 }, | ||
340 | { _MMIO(0x2541c), 0x00000000 }, | ||
341 | { _MMIO(0x25428), 0x00000000 }, | ||
342 | }; | ||
343 | |||
344 | static const struct i915_oa_reg * | ||
345 | get_memory_writes_mux_config(struct drm_i915_private *dev_priv, | ||
346 | int *len) | ||
347 | { | ||
348 | *len = ARRAY_SIZE(mux_config_memory_writes); | ||
349 | return mux_config_memory_writes; | ||
350 | } | ||
351 | |||
352 | static const struct i915_oa_reg b_counter_config_sampler_balance[] = { | ||
353 | { _MMIO(0x2740), 0x00000000 }, | ||
354 | { _MMIO(0x2744), 0x00800000 }, | ||
355 | { _MMIO(0x2710), 0x00000000 }, | ||
356 | { _MMIO(0x2714), 0x00800000 }, | ||
357 | { _MMIO(0x2720), 0x00000000 }, | ||
358 | { _MMIO(0x2724), 0x00800000 }, | ||
359 | }; | ||
360 | |||
361 | static const struct i915_oa_reg mux_config_sampler_balance[] = { | ||
362 | { _MMIO(0x2eb9c), 0x01906400 }, | ||
363 | { _MMIO(0x2fb9c), 0x01906400 }, | ||
364 | { _MMIO(0x253a4), 0x00000000 }, | ||
365 | { _MMIO(0x26b9c), 0x01906400 }, | ||
366 | { _MMIO(0x27b9c), 0x01906400 }, | ||
367 | { _MMIO(0x27104), 0x00a00000 }, | ||
368 | { _MMIO(0x27184), 0x00a50000 }, | ||
369 | { _MMIO(0x2e804), 0x00500000 }, | ||
370 | { _MMIO(0x2e984), 0x00500000 }, | ||
371 | { _MMIO(0x2eb04), 0x00500000 }, | ||
372 | { _MMIO(0x2eb80), 0x00000084 }, | ||
373 | { _MMIO(0x2eb8c), 0x14200000 }, | ||
374 | { _MMIO(0x2eb84), 0x00000000 }, | ||
375 | { _MMIO(0x2f804), 0x00050000 }, | ||
376 | { _MMIO(0x2f984), 0x00050000 }, | ||
377 | { _MMIO(0x2fb04), 0x00050000 }, | ||
378 | { _MMIO(0x2fb80), 0x00000084 }, | ||
379 | { _MMIO(0x2fb8c), 0x00050800 }, | ||
380 | { _MMIO(0x2fb84), 0x00000000 }, | ||
381 | { _MMIO(0x25380), 0x00000010 }, | ||
382 | { _MMIO(0x2538c), 0x000000c0 }, | ||
383 | { _MMIO(0x25384), 0xaa550000 }, | ||
384 | { _MMIO(0x25404), 0xffffc000 }, | ||
385 | { _MMIO(0x26804), 0x50000000 }, | ||
386 | { _MMIO(0x26984), 0x50000000 }, | ||
387 | { _MMIO(0x26b04), 0x50000000 }, | ||
388 | { _MMIO(0x26b80), 0x00000084 }, | ||
389 | { _MMIO(0x26b90), 0x00050800 }, | ||
390 | { _MMIO(0x26b84), 0x00000000 }, | ||
391 | { _MMIO(0x27804), 0x05000000 }, | ||
392 | { _MMIO(0x27984), 0x05000000 }, | ||
393 | { _MMIO(0x27b04), 0x05000000 }, | ||
394 | { _MMIO(0x27b80), 0x00000084 }, | ||
395 | { _MMIO(0x27b90), 0x00000142 }, | ||
396 | { _MMIO(0x27b84), 0x00000000 }, | ||
397 | { _MMIO(0x26104), 0xa0000000 }, | ||
398 | { _MMIO(0x26184), 0xa5000000 }, | ||
399 | { _MMIO(0x25424), 0x00008620 }, | ||
400 | { _MMIO(0x2541c), 0x00000000 }, | ||
401 | { _MMIO(0x25428), 0x0004a54a }, | ||
402 | }; | ||
403 | |||
404 | static const struct i915_oa_reg * | ||
405 | get_sampler_balance_mux_config(struct drm_i915_private *dev_priv, | ||
406 | int *len) | ||
407 | { | ||
408 | *len = ARRAY_SIZE(mux_config_sampler_balance); | ||
409 | return mux_config_sampler_balance; | ||
410 | } | ||
411 | |||
412 | int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv) | ||
413 | { | ||
414 | dev_priv->perf.oa.mux_regs = NULL; | ||
415 | dev_priv->perf.oa.mux_regs_len = 0; | ||
416 | dev_priv->perf.oa.b_counter_regs = NULL; | ||
417 | dev_priv->perf.oa.b_counter_regs_len = 0; | ||
418 | |||
419 | switch (dev_priv->perf.oa.metrics_set) { | ||
420 | case METRIC_SET_ID_RENDER_BASIC: | ||
421 | dev_priv->perf.oa.mux_regs = | ||
422 | get_render_basic_mux_config(dev_priv, | ||
423 | &dev_priv->perf.oa.mux_regs_len); | ||
424 | if (!dev_priv->perf.oa.mux_regs) { | ||
425 | DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set"); | ||
426 | |||
427 | /* EINVAL because *_register_sysfs already checked this | ||
428 | * and so it wouldn't have been advertised so userspace and | ||
429 | * so shouldn't have been requested | ||
430 | */ | ||
431 | return -EINVAL; | ||
432 | } | ||
433 | |||
434 | dev_priv->perf.oa.b_counter_regs = | ||
435 | b_counter_config_render_basic; | ||
436 | dev_priv->perf.oa.b_counter_regs_len = | ||
437 | ARRAY_SIZE(b_counter_config_render_basic); | ||
438 | |||
439 | return 0; | ||
440 | case METRIC_SET_ID_COMPUTE_BASIC: | ||
441 | dev_priv->perf.oa.mux_regs = | ||
442 | get_compute_basic_mux_config(dev_priv, | ||
443 | &dev_priv->perf.oa.mux_regs_len); | ||
444 | if (!dev_priv->perf.oa.mux_regs) { | ||
445 | DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set"); | ||
446 | |||
447 | /* EINVAL because *_register_sysfs already checked this | ||
448 | * and so it wouldn't have been advertised so userspace and | ||
449 | * so shouldn't have been requested | ||
450 | */ | ||
451 | return -EINVAL; | ||
452 | } | ||
453 | |||
454 | dev_priv->perf.oa.b_counter_regs = | ||
455 | b_counter_config_compute_basic; | ||
456 | dev_priv->perf.oa.b_counter_regs_len = | ||
457 | ARRAY_SIZE(b_counter_config_compute_basic); | ||
458 | |||
459 | return 0; | ||
460 | case METRIC_SET_ID_COMPUTE_EXTENDED: | ||
461 | dev_priv->perf.oa.mux_regs = | ||
462 | get_compute_extended_mux_config(dev_priv, | ||
463 | &dev_priv->perf.oa.mux_regs_len); | ||
464 | if (!dev_priv->perf.oa.mux_regs) { | ||
465 | DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set"); | ||
466 | |||
467 | /* EINVAL because *_register_sysfs already checked this | ||
468 | * and so it wouldn't have been advertised so userspace and | ||
469 | * so shouldn't have been requested | ||
470 | */ | ||
471 | return -EINVAL; | ||
472 | } | ||
473 | |||
474 | dev_priv->perf.oa.b_counter_regs = | ||
475 | b_counter_config_compute_extended; | ||
476 | dev_priv->perf.oa.b_counter_regs_len = | ||
477 | ARRAY_SIZE(b_counter_config_compute_extended); | ||
478 | |||
479 | return 0; | ||
480 | case METRIC_SET_ID_MEMORY_READS: | ||
481 | dev_priv->perf.oa.mux_regs = | ||
482 | get_memory_reads_mux_config(dev_priv, | ||
483 | &dev_priv->perf.oa.mux_regs_len); | ||
484 | if (!dev_priv->perf.oa.mux_regs) { | ||
485 | DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set"); | ||
486 | |||
487 | /* EINVAL because *_register_sysfs already checked this | ||
488 | * and so it wouldn't have been advertised so userspace and | ||
489 | * so shouldn't have been requested | ||
490 | */ | ||
491 | return -EINVAL; | ||
492 | } | ||
493 | |||
494 | dev_priv->perf.oa.b_counter_regs = | ||
495 | b_counter_config_memory_reads; | ||
496 | dev_priv->perf.oa.b_counter_regs_len = | ||
497 | ARRAY_SIZE(b_counter_config_memory_reads); | ||
498 | |||
499 | return 0; | ||
500 | case METRIC_SET_ID_MEMORY_WRITES: | ||
501 | dev_priv->perf.oa.mux_regs = | ||
502 | get_memory_writes_mux_config(dev_priv, | ||
503 | &dev_priv->perf.oa.mux_regs_len); | ||
504 | if (!dev_priv->perf.oa.mux_regs) { | ||
505 | DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set"); | ||
506 | |||
507 | /* EINVAL because *_register_sysfs already checked this | ||
508 | * and so it wouldn't have been advertised so userspace and | ||
509 | * so shouldn't have been requested | ||
510 | */ | ||
511 | return -EINVAL; | ||
512 | } | ||
513 | |||
514 | dev_priv->perf.oa.b_counter_regs = | ||
515 | b_counter_config_memory_writes; | ||
516 | dev_priv->perf.oa.b_counter_regs_len = | ||
517 | ARRAY_SIZE(b_counter_config_memory_writes); | ||
518 | |||
519 | return 0; | ||
520 | case METRIC_SET_ID_SAMPLER_BALANCE: | ||
521 | dev_priv->perf.oa.mux_regs = | ||
522 | get_sampler_balance_mux_config(dev_priv, | ||
523 | &dev_priv->perf.oa.mux_regs_len); | ||
524 | if (!dev_priv->perf.oa.mux_regs) { | ||
525 | DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set"); | ||
526 | |||
527 | /* EINVAL because *_register_sysfs already checked this | ||
528 | * and so it wouldn't have been advertised so userspace and | ||
529 | * so shouldn't have been requested | ||
530 | */ | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | |||
534 | dev_priv->perf.oa.b_counter_regs = | ||
535 | b_counter_config_sampler_balance; | ||
536 | dev_priv->perf.oa.b_counter_regs_len = | ||
537 | ARRAY_SIZE(b_counter_config_sampler_balance); | ||
538 | |||
539 | return 0; | ||
540 | default: | ||
541 | return -ENODEV; | ||
542 | } | ||
543 | } | ||
544 | |||
545 | static ssize_t | ||
546 | show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf) | ||
547 | { | ||
548 | return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC); | ||
549 | } | ||
550 | |||
551 | static struct device_attribute dev_attr_render_basic_id = { | ||
552 | .attr = { .name = "id", .mode = 0444 }, | ||
553 | .show = show_render_basic_id, | ||
554 | .store = NULL, | ||
555 | }; | ||
556 | |||
557 | static struct attribute *attrs_render_basic[] = { | ||
558 | &dev_attr_render_basic_id.attr, | ||
559 | NULL, | ||
560 | }; | ||
561 | |||
562 | static struct attribute_group group_render_basic = { | ||
563 | .name = "403d8832-1a27-4aa6-a64e-f5389ce7b212", | ||
564 | .attrs = attrs_render_basic, | ||
565 | }; | ||
566 | |||
567 | static ssize_t | ||
568 | show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf) | ||
569 | { | ||
570 | return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC); | ||
571 | } | ||
572 | |||
573 | static struct device_attribute dev_attr_compute_basic_id = { | ||
574 | .attr = { .name = "id", .mode = 0444 }, | ||
575 | .show = show_compute_basic_id, | ||
576 | .store = NULL, | ||
577 | }; | ||
578 | |||
579 | static struct attribute *attrs_compute_basic[] = { | ||
580 | &dev_attr_compute_basic_id.attr, | ||
581 | NULL, | ||
582 | }; | ||
583 | |||
584 | static struct attribute_group group_compute_basic = { | ||
585 | .name = "39ad14bc-2380-45c4-91eb-fbcb3aa7ae7b", | ||
586 | .attrs = attrs_compute_basic, | ||
587 | }; | ||
588 | |||
589 | static ssize_t | ||
590 | show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf) | ||
591 | { | ||
592 | return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED); | ||
593 | } | ||
594 | |||
595 | static struct device_attribute dev_attr_compute_extended_id = { | ||
596 | .attr = { .name = "id", .mode = 0444 }, | ||
597 | .show = show_compute_extended_id, | ||
598 | .store = NULL, | ||
599 | }; | ||
600 | |||
601 | static struct attribute *attrs_compute_extended[] = { | ||
602 | &dev_attr_compute_extended_id.attr, | ||
603 | NULL, | ||
604 | }; | ||
605 | |||
606 | static struct attribute_group group_compute_extended = { | ||
607 | .name = "3865be28-6982-49fe-9494-e4d1b4795413", | ||
608 | .attrs = attrs_compute_extended, | ||
609 | }; | ||
610 | |||
611 | static ssize_t | ||
612 | show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf) | ||
613 | { | ||
614 | return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS); | ||
615 | } | ||
616 | |||
617 | static struct device_attribute dev_attr_memory_reads_id = { | ||
618 | .attr = { .name = "id", .mode = 0444 }, | ||
619 | .show = show_memory_reads_id, | ||
620 | .store = NULL, | ||
621 | }; | ||
622 | |||
623 | static struct attribute *attrs_memory_reads[] = { | ||
624 | &dev_attr_memory_reads_id.attr, | ||
625 | NULL, | ||
626 | }; | ||
627 | |||
628 | static struct attribute_group group_memory_reads = { | ||
629 | .name = "bb5ed49b-2497-4095-94f6-26ba294db88a", | ||
630 | .attrs = attrs_memory_reads, | ||
631 | }; | ||
632 | |||
633 | static ssize_t | ||
634 | show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf) | ||
635 | { | ||
636 | return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES); | ||
637 | } | ||
638 | |||
639 | static struct device_attribute dev_attr_memory_writes_id = { | ||
640 | .attr = { .name = "id", .mode = 0444 }, | ||
641 | .show = show_memory_writes_id, | ||
642 | .store = NULL, | ||
643 | }; | ||
644 | |||
645 | static struct attribute *attrs_memory_writes[] = { | ||
646 | &dev_attr_memory_writes_id.attr, | ||
647 | NULL, | ||
648 | }; | ||
649 | |||
650 | static struct attribute_group group_memory_writes = { | ||
651 | .name = "3358d639-9b5f-45ab-976d-9b08cbfc6240", | ||
652 | .attrs = attrs_memory_writes, | ||
653 | }; | ||
654 | |||
655 | static ssize_t | ||
656 | show_sampler_balance_id(struct device *kdev, struct device_attribute *attr, char *buf) | ||
657 | { | ||
658 | return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_BALANCE); | ||
659 | } | ||
660 | |||
661 | static struct device_attribute dev_attr_sampler_balance_id = { | ||
662 | .attr = { .name = "id", .mode = 0444 }, | ||
663 | .show = show_sampler_balance_id, | ||
664 | .store = NULL, | ||
665 | }; | ||
666 | |||
667 | static struct attribute *attrs_sampler_balance[] = { | ||
668 | &dev_attr_sampler_balance_id.attr, | ||
669 | NULL, | ||
670 | }; | ||
671 | |||
672 | static struct attribute_group group_sampler_balance = { | ||
673 | .name = "bc274488-b4b6-40c7-90da-b77d7ad16189", | ||
674 | .attrs = attrs_sampler_balance, | ||
675 | }; | ||
676 | |||
677 | int | ||
678 | i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv) | ||
679 | { | ||
680 | int mux_len; | ||
681 | int ret = 0; | ||
682 | |||
683 | if (get_render_basic_mux_config(dev_priv, &mux_len)) { | ||
684 | ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic); | ||
685 | if (ret) | ||
686 | goto error_render_basic; | ||
687 | } | ||
688 | if (get_compute_basic_mux_config(dev_priv, &mux_len)) { | ||
689 | ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic); | ||
690 | if (ret) | ||
691 | goto error_compute_basic; | ||
692 | } | ||
693 | if (get_compute_extended_mux_config(dev_priv, &mux_len)) { | ||
694 | ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended); | ||
695 | if (ret) | ||
696 | goto error_compute_extended; | ||
697 | } | ||
698 | if (get_memory_reads_mux_config(dev_priv, &mux_len)) { | ||
699 | ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads); | ||
700 | if (ret) | ||
701 | goto error_memory_reads; | ||
702 | } | ||
703 | if (get_memory_writes_mux_config(dev_priv, &mux_len)) { | ||
704 | ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes); | ||
705 | if (ret) | ||
706 | goto error_memory_writes; | ||
707 | } | ||
708 | if (get_sampler_balance_mux_config(dev_priv, &mux_len)) { | ||
709 | ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance); | ||
710 | if (ret) | ||
711 | goto error_sampler_balance; | ||
712 | } | ||
713 | |||
714 | return 0; | ||
715 | |||
716 | error_sampler_balance: | ||
717 | if (get_sampler_balance_mux_config(dev_priv, &mux_len)) | ||
718 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes); | ||
719 | error_memory_writes: | ||
720 | if (get_sampler_balance_mux_config(dev_priv, &mux_len)) | ||
721 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads); | ||
722 | error_memory_reads: | ||
723 | if (get_sampler_balance_mux_config(dev_priv, &mux_len)) | ||
724 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended); | ||
725 | error_compute_extended: | ||
726 | if (get_sampler_balance_mux_config(dev_priv, &mux_len)) | ||
727 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic); | ||
728 | error_compute_basic: | ||
729 | if (get_sampler_balance_mux_config(dev_priv, &mux_len)) | ||
730 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic); | ||
731 | error_render_basic: | ||
732 | return ret; | ||
733 | } | ||
734 | |||
735 | void | ||
736 | i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv) | ||
737 | { | ||
738 | int mux_len; | ||
739 | |||
740 | if (get_render_basic_mux_config(dev_priv, &mux_len)) | ||
741 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic); | ||
742 | if (get_compute_basic_mux_config(dev_priv, &mux_len)) | ||
743 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic); | ||
744 | if (get_compute_extended_mux_config(dev_priv, &mux_len)) | ||
745 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended); | ||
746 | if (get_memory_reads_mux_config(dev_priv, &mux_len)) | ||
747 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads); | ||
748 | if (get_memory_writes_mux_config(dev_priv, &mux_len)) | ||
749 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes); | ||
750 | if (get_sampler_balance_mux_config(dev_priv, &mux_len)) | ||
751 | sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance); | ||
752 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h new file mode 100644 index 000000000000..429a229b5158 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_hsw.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Autogenerated file, DO NOT EDIT manually! | ||
3 | * | ||
4 | * Copyright (c) 2015 Intel Corporation | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
23 | * IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __I915_OA_HSW_H__ | ||
28 | #define __I915_OA_HSW_H__ | ||
29 | |||
30 | extern int i915_oa_n_builtin_metric_sets_hsw; | ||
31 | |||
32 | extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv); | ||
33 | |||
34 | extern int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv); | ||
35 | |||
36 | extern void i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv); | ||
37 | |||
38 | #endif | ||
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index d46ffe7086bc..0e280fbd52f1 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
@@ -50,7 +50,7 @@ struct i915_params i915 __read_mostly = { | |||
50 | .error_capture = true, | 50 | .error_capture = true, |
51 | .invert_brightness = 0, | 51 | .invert_brightness = 0, |
52 | .disable_display = 0, | 52 | .disable_display = 0, |
53 | .enable_cmd_parser = 1, | 53 | .enable_cmd_parser = true, |
54 | .use_mmio_flip = 0, | 54 | .use_mmio_flip = 0, |
55 | .mmio_debug = 0, | 55 | .mmio_debug = 0, |
56 | .verbose_state_checks = 1, | 56 | .verbose_state_checks = 1, |
@@ -188,9 +188,9 @@ MODULE_PARM_DESC(invert_brightness, | |||
188 | module_param_named(disable_display, i915.disable_display, bool, 0400); | 188 | module_param_named(disable_display, i915.disable_display, bool, 0400); |
189 | MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); | 189 | MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); |
190 | 190 | ||
191 | module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); | 191 | module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400); |
192 | MODULE_PARM_DESC(enable_cmd_parser, | 192 | MODULE_PARM_DESC(enable_cmd_parser, |
193 | "Enable command parsing (1=enabled [default], 0=disabled)"); | 193 | "Enable command parsing (true=enabled [default], false=disabled)"); |
194 | 194 | ||
195 | module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); | 195 | module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); |
196 | MODULE_PARM_DESC(use_mmio_flip, | 196 | MODULE_PARM_DESC(use_mmio_flip, |
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 817ad959941e..8e433de04679 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h | |||
@@ -44,7 +44,6 @@ struct i915_params { | |||
44 | int disable_power_well; | 44 | int disable_power_well; |
45 | int enable_ips; | 45 | int enable_ips; |
46 | int invert_brightness; | 46 | int invert_brightness; |
47 | int enable_cmd_parser; | ||
48 | int enable_guc_loading; | 47 | int enable_guc_loading; |
49 | int enable_guc_submission; | 48 | int enable_guc_submission; |
50 | int guc_log_level; | 49 | int guc_log_level; |
@@ -53,6 +52,7 @@ struct i915_params { | |||
53 | int edp_vswing; | 52 | int edp_vswing; |
54 | unsigned int inject_load_failure; | 53 | unsigned int inject_load_failure; |
55 | /* leave bools at the end to not create holes */ | 54 | /* leave bools at the end to not create holes */ |
55 | bool enable_cmd_parser; | ||
56 | bool enable_hangcheck; | 56 | bool enable_hangcheck; |
57 | bool fastboot; | 57 | bool fastboot; |
58 | bool prefault_disable; | 58 | bool prefault_disable; |
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index fce8e198bc76..ecb487b5356f 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #define CHV_COLORS \ | 54 | #define CHV_COLORS \ |
55 | .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } | 55 | .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } |
56 | 56 | ||
57 | /* Keep in gen based order, and chronological order within a gen */ | ||
57 | #define GEN2_FEATURES \ | 58 | #define GEN2_FEATURES \ |
58 | .gen = 2, .num_pipes = 1, \ | 59 | .gen = 2, .num_pipes = 1, \ |
59 | .has_overlay = 1, .overlay_needs_physical = 1, \ | 60 | .has_overlay = 1, .overlay_needs_physical = 1, \ |
@@ -65,17 +66,19 @@ | |||
65 | 66 | ||
66 | static const struct intel_device_info intel_i830_info = { | 67 | static const struct intel_device_info intel_i830_info = { |
67 | GEN2_FEATURES, | 68 | GEN2_FEATURES, |
69 | .platform = INTEL_I830, | ||
68 | .is_mobile = 1, .cursor_needs_physical = 1, | 70 | .is_mobile = 1, .cursor_needs_physical = 1, |
69 | .num_pipes = 2, /* legal, last one wins */ | 71 | .num_pipes = 2, /* legal, last one wins */ |
70 | }; | 72 | }; |
71 | 73 | ||
72 | static const struct intel_device_info intel_845g_info = { | 74 | static const struct intel_device_info intel_i845g_info = { |
73 | GEN2_FEATURES, | 75 | GEN2_FEATURES, |
76 | .platform = INTEL_I845G, | ||
74 | }; | 77 | }; |
75 | 78 | ||
76 | static const struct intel_device_info intel_i85x_info = { | 79 | static const struct intel_device_info intel_i85x_info = { |
77 | GEN2_FEATURES, | 80 | GEN2_FEATURES, |
78 | .is_i85x = 1, .is_mobile = 1, | 81 | .platform = INTEL_I85X, .is_mobile = 1, |
79 | .num_pipes = 2, /* legal, last one wins */ | 82 | .num_pipes = 2, /* legal, last one wins */ |
80 | .cursor_needs_physical = 1, | 83 | .cursor_needs_physical = 1, |
81 | .has_fbc = 1, | 84 | .has_fbc = 1, |
@@ -83,6 +86,7 @@ static const struct intel_device_info intel_i85x_info = { | |||
83 | 86 | ||
84 | static const struct intel_device_info intel_i865g_info = { | 87 | static const struct intel_device_info intel_i865g_info = { |
85 | GEN2_FEATURES, | 88 | GEN2_FEATURES, |
89 | .platform = INTEL_I865G, | ||
86 | }; | 90 | }; |
87 | 91 | ||
88 | #define GEN3_FEATURES \ | 92 | #define GEN3_FEATURES \ |
@@ -94,12 +98,14 @@ static const struct intel_device_info intel_i865g_info = { | |||
94 | 98 | ||
95 | static const struct intel_device_info intel_i915g_info = { | 99 | static const struct intel_device_info intel_i915g_info = { |
96 | GEN3_FEATURES, | 100 | GEN3_FEATURES, |
97 | .is_i915g = 1, .cursor_needs_physical = 1, | 101 | .platform = INTEL_I915G, .cursor_needs_physical = 1, |
98 | .has_overlay = 1, .overlay_needs_physical = 1, | 102 | .has_overlay = 1, .overlay_needs_physical = 1, |
99 | .hws_needs_physical = 1, | 103 | .hws_needs_physical = 1, |
100 | }; | 104 | }; |
105 | |||
101 | static const struct intel_device_info intel_i915gm_info = { | 106 | static const struct intel_device_info intel_i915gm_info = { |
102 | GEN3_FEATURES, | 107 | GEN3_FEATURES, |
108 | .platform = INTEL_I915GM, | ||
103 | .is_mobile = 1, | 109 | .is_mobile = 1, |
104 | .cursor_needs_physical = 1, | 110 | .cursor_needs_physical = 1, |
105 | .has_overlay = 1, .overlay_needs_physical = 1, | 111 | .has_overlay = 1, .overlay_needs_physical = 1, |
@@ -107,15 +113,18 @@ static const struct intel_device_info intel_i915gm_info = { | |||
107 | .has_fbc = 1, | 113 | .has_fbc = 1, |
108 | .hws_needs_physical = 1, | 114 | .hws_needs_physical = 1, |
109 | }; | 115 | }; |
116 | |||
110 | static const struct intel_device_info intel_i945g_info = { | 117 | static const struct intel_device_info intel_i945g_info = { |
111 | GEN3_FEATURES, | 118 | GEN3_FEATURES, |
119 | .platform = INTEL_I945G, | ||
112 | .has_hotplug = 1, .cursor_needs_physical = 1, | 120 | .has_hotplug = 1, .cursor_needs_physical = 1, |
113 | .has_overlay = 1, .overlay_needs_physical = 1, | 121 | .has_overlay = 1, .overlay_needs_physical = 1, |
114 | .hws_needs_physical = 1, | 122 | .hws_needs_physical = 1, |
115 | }; | 123 | }; |
124 | |||
116 | static const struct intel_device_info intel_i945gm_info = { | 125 | static const struct intel_device_info intel_i945gm_info = { |
117 | GEN3_FEATURES, | 126 | GEN3_FEATURES, |
118 | .is_i945gm = 1, .is_mobile = 1, | 127 | .platform = INTEL_I945GM, .is_mobile = 1, |
119 | .has_hotplug = 1, .cursor_needs_physical = 1, | 128 | .has_hotplug = 1, .cursor_needs_physical = 1, |
120 | .has_overlay = 1, .overlay_needs_physical = 1, | 129 | .has_overlay = 1, .overlay_needs_physical = 1, |
121 | .supports_tv = 1, | 130 | .supports_tv = 1, |
@@ -123,6 +132,20 @@ static const struct intel_device_info intel_i945gm_info = { | |||
123 | .hws_needs_physical = 1, | 132 | .hws_needs_physical = 1, |
124 | }; | 133 | }; |
125 | 134 | ||
135 | static const struct intel_device_info intel_g33_info = { | ||
136 | GEN3_FEATURES, | ||
137 | .platform = INTEL_G33, | ||
138 | .has_hotplug = 1, | ||
139 | .has_overlay = 1, | ||
140 | }; | ||
141 | |||
142 | static const struct intel_device_info intel_pineview_info = { | ||
143 | GEN3_FEATURES, | ||
144 | .platform = INTEL_PINEVIEW, .is_mobile = 1, | ||
145 | .has_hotplug = 1, | ||
146 | .has_overlay = 1, | ||
147 | }; | ||
148 | |||
126 | #define GEN4_FEATURES \ | 149 | #define GEN4_FEATURES \ |
127 | .gen = 4, .num_pipes = 2, \ | 150 | .gen = 4, .num_pipes = 2, \ |
128 | .has_hotplug = 1, \ | 151 | .has_hotplug = 1, \ |
@@ -133,50 +156,36 @@ static const struct intel_device_info intel_i945gm_info = { | |||
133 | 156 | ||
134 | static const struct intel_device_info intel_i965g_info = { | 157 | static const struct intel_device_info intel_i965g_info = { |
135 | GEN4_FEATURES, | 158 | GEN4_FEATURES, |
136 | .is_broadwater = 1, | 159 | .platform = INTEL_I965G, |
137 | .has_overlay = 1, | 160 | .has_overlay = 1, |
138 | .hws_needs_physical = 1, | 161 | .hws_needs_physical = 1, |
139 | }; | 162 | }; |
140 | 163 | ||
141 | static const struct intel_device_info intel_i965gm_info = { | 164 | static const struct intel_device_info intel_i965gm_info = { |
142 | GEN4_FEATURES, | 165 | GEN4_FEATURES, |
143 | .is_crestline = 1, | 166 | .platform = INTEL_I965GM, |
144 | .is_mobile = 1, .has_fbc = 1, | 167 | .is_mobile = 1, .has_fbc = 1, |
145 | .has_overlay = 1, | 168 | .has_overlay = 1, |
146 | .supports_tv = 1, | 169 | .supports_tv = 1, |
147 | .hws_needs_physical = 1, | 170 | .hws_needs_physical = 1, |
148 | }; | 171 | }; |
149 | 172 | ||
150 | static const struct intel_device_info intel_g33_info = { | ||
151 | GEN3_FEATURES, | ||
152 | .is_g33 = 1, | ||
153 | .has_hotplug = 1, | ||
154 | .has_overlay = 1, | ||
155 | }; | ||
156 | |||
157 | static const struct intel_device_info intel_g45_info = { | 173 | static const struct intel_device_info intel_g45_info = { |
158 | GEN4_FEATURES, | 174 | GEN4_FEATURES, |
159 | .is_g4x = 1, | 175 | .platform = INTEL_G45, |
160 | .has_pipe_cxsr = 1, | 176 | .has_pipe_cxsr = 1, |
161 | .ring_mask = RENDER_RING | BSD_RING, | 177 | .ring_mask = RENDER_RING | BSD_RING, |
162 | }; | 178 | }; |
163 | 179 | ||
164 | static const struct intel_device_info intel_gm45_info = { | 180 | static const struct intel_device_info intel_gm45_info = { |
165 | GEN4_FEATURES, | 181 | GEN4_FEATURES, |
166 | .is_g4x = 1, | 182 | .platform = INTEL_GM45, |
167 | .is_mobile = 1, .has_fbc = 1, | 183 | .is_mobile = 1, .has_fbc = 1, |
168 | .has_pipe_cxsr = 1, | 184 | .has_pipe_cxsr = 1, |
169 | .supports_tv = 1, | 185 | .supports_tv = 1, |
170 | .ring_mask = RENDER_RING | BSD_RING, | 186 | .ring_mask = RENDER_RING | BSD_RING, |
171 | }; | 187 | }; |
172 | 188 | ||
173 | static const struct intel_device_info intel_pineview_info = { | ||
174 | GEN3_FEATURES, | ||
175 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, | ||
176 | .has_hotplug = 1, | ||
177 | .has_overlay = 1, | ||
178 | }; | ||
179 | |||
180 | #define GEN5_FEATURES \ | 189 | #define GEN5_FEATURES \ |
181 | .gen = 5, .num_pipes = 2, \ | 190 | .gen = 5, .num_pipes = 2, \ |
182 | .has_hotplug = 1, \ | 191 | .has_hotplug = 1, \ |
@@ -187,10 +196,12 @@ static const struct intel_device_info intel_pineview_info = { | |||
187 | 196 | ||
188 | static const struct intel_device_info intel_ironlake_d_info = { | 197 | static const struct intel_device_info intel_ironlake_d_info = { |
189 | GEN5_FEATURES, | 198 | GEN5_FEATURES, |
199 | .platform = INTEL_IRONLAKE, | ||
190 | }; | 200 | }; |
191 | 201 | ||
192 | static const struct intel_device_info intel_ironlake_m_info = { | 202 | static const struct intel_device_info intel_ironlake_m_info = { |
193 | GEN5_FEATURES, | 203 | GEN5_FEATURES, |
204 | .platform = INTEL_IRONLAKE, | ||
194 | .is_mobile = 1, | 205 | .is_mobile = 1, |
195 | }; | 206 | }; |
196 | 207 | ||
@@ -204,15 +215,18 @@ static const struct intel_device_info intel_ironlake_m_info = { | |||
204 | .has_rc6p = 1, \ | 215 | .has_rc6p = 1, \ |
205 | .has_gmbus_irq = 1, \ | 216 | .has_gmbus_irq = 1, \ |
206 | .has_hw_contexts = 1, \ | 217 | .has_hw_contexts = 1, \ |
218 | .has_aliasing_ppgtt = 1, \ | ||
207 | GEN_DEFAULT_PIPEOFFSETS, \ | 219 | GEN_DEFAULT_PIPEOFFSETS, \ |
208 | CURSOR_OFFSETS | 220 | CURSOR_OFFSETS |
209 | 221 | ||
210 | static const struct intel_device_info intel_sandybridge_d_info = { | 222 | static const struct intel_device_info intel_sandybridge_d_info = { |
211 | GEN6_FEATURES, | 223 | GEN6_FEATURES, |
224 | .platform = INTEL_SANDYBRIDGE, | ||
212 | }; | 225 | }; |
213 | 226 | ||
214 | static const struct intel_device_info intel_sandybridge_m_info = { | 227 | static const struct intel_device_info intel_sandybridge_m_info = { |
215 | GEN6_FEATURES, | 228 | GEN6_FEATURES, |
229 | .platform = INTEL_SANDYBRIDGE, | ||
216 | .is_mobile = 1, | 230 | .is_mobile = 1, |
217 | }; | 231 | }; |
218 | 232 | ||
@@ -226,46 +240,49 @@ static const struct intel_device_info intel_sandybridge_m_info = { | |||
226 | .has_rc6p = 1, \ | 240 | .has_rc6p = 1, \ |
227 | .has_gmbus_irq = 1, \ | 241 | .has_gmbus_irq = 1, \ |
228 | .has_hw_contexts = 1, \ | 242 | .has_hw_contexts = 1, \ |
243 | .has_aliasing_ppgtt = 1, \ | ||
244 | .has_full_ppgtt = 1, \ | ||
229 | GEN_DEFAULT_PIPEOFFSETS, \ | 245 | GEN_DEFAULT_PIPEOFFSETS, \ |
230 | IVB_CURSOR_OFFSETS | 246 | IVB_CURSOR_OFFSETS |
231 | 247 | ||
232 | static const struct intel_device_info intel_ivybridge_d_info = { | 248 | static const struct intel_device_info intel_ivybridge_d_info = { |
233 | GEN7_FEATURES, | 249 | GEN7_FEATURES, |
234 | .is_ivybridge = 1, | 250 | .platform = INTEL_IVYBRIDGE, |
235 | .has_l3_dpf = 1, | 251 | .has_l3_dpf = 1, |
236 | }; | 252 | }; |
237 | 253 | ||
238 | static const struct intel_device_info intel_ivybridge_m_info = { | 254 | static const struct intel_device_info intel_ivybridge_m_info = { |
239 | GEN7_FEATURES, | 255 | GEN7_FEATURES, |
240 | .is_ivybridge = 1, | 256 | .platform = INTEL_IVYBRIDGE, |
241 | .is_mobile = 1, | 257 | .is_mobile = 1, |
242 | .has_l3_dpf = 1, | 258 | .has_l3_dpf = 1, |
243 | }; | 259 | }; |
244 | 260 | ||
245 | static const struct intel_device_info intel_ivybridge_q_info = { | 261 | static const struct intel_device_info intel_ivybridge_q_info = { |
246 | GEN7_FEATURES, | 262 | GEN7_FEATURES, |
247 | .is_ivybridge = 1, | 263 | .platform = INTEL_IVYBRIDGE, |
248 | .num_pipes = 0, /* legal, last one wins */ | 264 | .num_pipes = 0, /* legal, last one wins */ |
249 | .has_l3_dpf = 1, | 265 | .has_l3_dpf = 1, |
250 | }; | 266 | }; |
251 | 267 | ||
252 | #define VLV_FEATURES \ | ||
253 | .gen = 7, .num_pipes = 2, \ | ||
254 | .has_psr = 1, \ | ||
255 | .has_runtime_pm = 1, \ | ||
256 | .has_rc6 = 1, \ | ||
257 | .has_gmbus_irq = 1, \ | ||
258 | .has_hw_contexts = 1, \ | ||
259 | .has_gmch_display = 1, \ | ||
260 | .has_hotplug = 1, \ | ||
261 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | ||
262 | .display_mmio_offset = VLV_DISPLAY_BASE, \ | ||
263 | GEN_DEFAULT_PIPEOFFSETS, \ | ||
264 | CURSOR_OFFSETS | ||
265 | |||
266 | static const struct intel_device_info intel_valleyview_info = { | 268 | static const struct intel_device_info intel_valleyview_info = { |
267 | VLV_FEATURES, | 269 | .platform = INTEL_VALLEYVIEW, |
268 | .is_valleyview = 1, | 270 | .gen = 7, |
271 | .is_lp = 1, | ||
272 | .num_pipes = 2, | ||
273 | .has_psr = 1, | ||
274 | .has_runtime_pm = 1, | ||
275 | .has_rc6 = 1, | ||
276 | .has_gmbus_irq = 1, | ||
277 | .has_hw_contexts = 1, | ||
278 | .has_gmch_display = 1, | ||
279 | .has_hotplug = 1, | ||
280 | .has_aliasing_ppgtt = 1, | ||
281 | .has_full_ppgtt = 1, | ||
282 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, | ||
283 | .display_mmio_offset = VLV_DISPLAY_BASE, | ||
284 | GEN_DEFAULT_PIPEOFFSETS, | ||
285 | CURSOR_OFFSETS | ||
269 | }; | 286 | }; |
270 | 287 | ||
271 | #define HSW_FEATURES \ | 288 | #define HSW_FEATURES \ |
@@ -281,7 +298,7 @@ static const struct intel_device_info intel_valleyview_info = { | |||
281 | 298 | ||
282 | static const struct intel_device_info intel_haswell_info = { | 299 | static const struct intel_device_info intel_haswell_info = { |
283 | HSW_FEATURES, | 300 | HSW_FEATURES, |
284 | .is_haswell = 1, | 301 | .platform = INTEL_HASWELL, |
285 | .has_l3_dpf = 1, | 302 | .has_l3_dpf = 1, |
286 | }; | 303 | }; |
287 | 304 | ||
@@ -289,26 +306,28 @@ static const struct intel_device_info intel_haswell_info = { | |||
289 | HSW_FEATURES, \ | 306 | HSW_FEATURES, \ |
290 | BDW_COLORS, \ | 307 | BDW_COLORS, \ |
291 | .has_logical_ring_contexts = 1, \ | 308 | .has_logical_ring_contexts = 1, \ |
309 | .has_full_48bit_ppgtt = 1, \ | ||
292 | .has_64bit_reloc = 1 | 310 | .has_64bit_reloc = 1 |
293 | 311 | ||
294 | static const struct intel_device_info intel_broadwell_info = { | 312 | static const struct intel_device_info intel_broadwell_info = { |
295 | BDW_FEATURES, | 313 | BDW_FEATURES, |
296 | .gen = 8, | 314 | .gen = 8, |
297 | .is_broadwell = 1, | 315 | .platform = INTEL_BROADWELL, |
298 | }; | 316 | }; |
299 | 317 | ||
300 | static const struct intel_device_info intel_broadwell_gt3_info = { | 318 | static const struct intel_device_info intel_broadwell_gt3_info = { |
301 | BDW_FEATURES, | 319 | BDW_FEATURES, |
302 | .gen = 8, | 320 | .gen = 8, |
303 | .is_broadwell = 1, | 321 | .platform = INTEL_BROADWELL, |
304 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | 322 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
305 | }; | 323 | }; |
306 | 324 | ||
307 | static const struct intel_device_info intel_cherryview_info = { | 325 | static const struct intel_device_info intel_cherryview_info = { |
308 | .gen = 8, .num_pipes = 3, | 326 | .gen = 8, .num_pipes = 3, |
309 | .has_hotplug = 1, | 327 | .has_hotplug = 1, |
328 | .is_lp = 1, | ||
310 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 329 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
311 | .is_cherryview = 1, | 330 | .platform = INTEL_CHERRYVIEW, |
312 | .has_64bit_reloc = 1, | 331 | .has_64bit_reloc = 1, |
313 | .has_psr = 1, | 332 | .has_psr = 1, |
314 | .has_runtime_pm = 1, | 333 | .has_runtime_pm = 1, |
@@ -318,6 +337,8 @@ static const struct intel_device_info intel_cherryview_info = { | |||
318 | .has_hw_contexts = 1, | 337 | .has_hw_contexts = 1, |
319 | .has_logical_ring_contexts = 1, | 338 | .has_logical_ring_contexts = 1, |
320 | .has_gmch_display = 1, | 339 | .has_gmch_display = 1, |
340 | .has_aliasing_ppgtt = 1, | ||
341 | .has_full_ppgtt = 1, | ||
321 | .display_mmio_offset = VLV_DISPLAY_BASE, | 342 | .display_mmio_offset = VLV_DISPLAY_BASE, |
322 | GEN_CHV_PIPEOFFSETS, | 343 | GEN_CHV_PIPEOFFSETS, |
323 | CURSOR_OFFSETS, | 344 | CURSOR_OFFSETS, |
@@ -326,7 +347,7 @@ static const struct intel_device_info intel_cherryview_info = { | |||
326 | 347 | ||
327 | static const struct intel_device_info intel_skylake_info = { | 348 | static const struct intel_device_info intel_skylake_info = { |
328 | BDW_FEATURES, | 349 | BDW_FEATURES, |
329 | .is_skylake = 1, | 350 | .platform = INTEL_SKYLAKE, |
330 | .gen = 9, | 351 | .gen = 9, |
331 | .has_csr = 1, | 352 | .has_csr = 1, |
332 | .has_guc = 1, | 353 | .has_guc = 1, |
@@ -335,7 +356,7 @@ static const struct intel_device_info intel_skylake_info = { | |||
335 | 356 | ||
336 | static const struct intel_device_info intel_skylake_gt3_info = { | 357 | static const struct intel_device_info intel_skylake_gt3_info = { |
337 | BDW_FEATURES, | 358 | BDW_FEATURES, |
338 | .is_skylake = 1, | 359 | .platform = INTEL_SKYLAKE, |
339 | .gen = 9, | 360 | .gen = 9, |
340 | .has_csr = 1, | 361 | .has_csr = 1, |
341 | .has_guc = 1, | 362 | .has_guc = 1, |
@@ -343,36 +364,50 @@ static const struct intel_device_info intel_skylake_gt3_info = { | |||
343 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, | 364 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
344 | }; | 365 | }; |
345 | 366 | ||
367 | #define GEN9_LP_FEATURES \ | ||
368 | .gen = 9, \ | ||
369 | .is_lp = 1, \ | ||
370 | .has_hotplug = 1, \ | ||
371 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ | ||
372 | .num_pipes = 3, \ | ||
373 | .has_64bit_reloc = 1, \ | ||
374 | .has_ddi = 1, \ | ||
375 | .has_fpga_dbg = 1, \ | ||
376 | .has_fbc = 1, \ | ||
377 | .has_runtime_pm = 1, \ | ||
378 | .has_pooled_eu = 0, \ | ||
379 | .has_csr = 1, \ | ||
380 | .has_resource_streamer = 1, \ | ||
381 | .has_rc6 = 1, \ | ||
382 | .has_dp_mst = 1, \ | ||
383 | .has_gmbus_irq = 1, \ | ||
384 | .has_hw_contexts = 1, \ | ||
385 | .has_logical_ring_contexts = 1, \ | ||
386 | .has_guc = 1, \ | ||
387 | .has_decoupled_mmio = 1, \ | ||
388 | .has_aliasing_ppgtt = 1, \ | ||
389 | .has_full_ppgtt = 1, \ | ||
390 | .has_full_48bit_ppgtt = 1, \ | ||
391 | GEN_DEFAULT_PIPEOFFSETS, \ | ||
392 | IVB_CURSOR_OFFSETS, \ | ||
393 | BDW_COLORS | ||
394 | |||
346 | static const struct intel_device_info intel_broxton_info = { | 395 | static const struct intel_device_info intel_broxton_info = { |
347 | .is_broxton = 1, | 396 | GEN9_LP_FEATURES, |
348 | .gen = 9, | 397 | .platform = INTEL_BROXTON, |
349 | .has_hotplug = 1, | ||
350 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | ||
351 | .num_pipes = 3, | ||
352 | .has_64bit_reloc = 1, | ||
353 | .has_ddi = 1, | ||
354 | .has_fpga_dbg = 1, | ||
355 | .has_fbc = 1, | ||
356 | .has_runtime_pm = 1, | ||
357 | .has_pooled_eu = 0, | ||
358 | .has_csr = 1, | ||
359 | .has_resource_streamer = 1, | ||
360 | .has_rc6 = 1, | ||
361 | .has_dp_mst = 1, | ||
362 | .has_gmbus_irq = 1, | ||
363 | .has_hw_contexts = 1, | ||
364 | .has_logical_ring_contexts = 1, | ||
365 | .has_guc = 1, | ||
366 | .has_decoupled_mmio = 1, | ||
367 | .ddb_size = 512, | 398 | .ddb_size = 512, |
368 | GEN_DEFAULT_PIPEOFFSETS, | 399 | }; |
369 | IVB_CURSOR_OFFSETS, | 400 | |
370 | BDW_COLORS, | 401 | static const struct intel_device_info intel_geminilake_info = { |
402 | GEN9_LP_FEATURES, | ||
403 | .platform = INTEL_GEMINILAKE, | ||
404 | .is_alpha_support = 1, | ||
405 | .ddb_size = 1024, | ||
371 | }; | 406 | }; |
372 | 407 | ||
373 | static const struct intel_device_info intel_kabylake_info = { | 408 | static const struct intel_device_info intel_kabylake_info = { |
374 | BDW_FEATURES, | 409 | BDW_FEATURES, |
375 | .is_kabylake = 1, | 410 | .platform = INTEL_KABYLAKE, |
376 | .gen = 9, | 411 | .gen = 9, |
377 | .has_csr = 1, | 412 | .has_csr = 1, |
378 | .has_guc = 1, | 413 | .has_guc = 1, |
@@ -381,7 +416,7 @@ static const struct intel_device_info intel_kabylake_info = { | |||
381 | 416 | ||
382 | static const struct intel_device_info intel_kabylake_gt3_info = { | 417 | static const struct intel_device_info intel_kabylake_gt3_info = { |
383 | BDW_FEATURES, | 418 | BDW_FEATURES, |
384 | .is_kabylake = 1, | 419 | .platform = INTEL_KABYLAKE, |
385 | .gen = 9, | 420 | .gen = 9, |
386 | .has_csr = 1, | 421 | .has_csr = 1, |
387 | .has_guc = 1, | 422 | .has_guc = 1, |
@@ -397,7 +432,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = { | |||
397 | */ | 432 | */ |
398 | static const struct pci_device_id pciidlist[] = { | 433 | static const struct pci_device_id pciidlist[] = { |
399 | INTEL_I830_IDS(&intel_i830_info), | 434 | INTEL_I830_IDS(&intel_i830_info), |
400 | INTEL_I845G_IDS(&intel_845g_info), | 435 | INTEL_I845G_IDS(&intel_i845g_info), |
401 | INTEL_I85X_IDS(&intel_i85x_info), | 436 | INTEL_I85X_IDS(&intel_i85x_info), |
402 | INTEL_I865G_IDS(&intel_i865g_info), | 437 | INTEL_I865G_IDS(&intel_i865g_info), |
403 | INTEL_I915G_IDS(&intel_i915g_info), | 438 | INTEL_I915G_IDS(&intel_i915g_info), |
@@ -421,12 +456,14 @@ static const struct pci_device_id pciidlist[] = { | |||
421 | INTEL_VLV_IDS(&intel_valleyview_info), | 456 | INTEL_VLV_IDS(&intel_valleyview_info), |
422 | INTEL_BDW_GT12_IDS(&intel_broadwell_info), | 457 | INTEL_BDW_GT12_IDS(&intel_broadwell_info), |
423 | INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), | 458 | INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), |
459 | INTEL_BDW_RSVD_IDS(&intel_broadwell_info), | ||
424 | INTEL_CHV_IDS(&intel_cherryview_info), | 460 | INTEL_CHV_IDS(&intel_cherryview_info), |
425 | INTEL_SKL_GT1_IDS(&intel_skylake_info), | 461 | INTEL_SKL_GT1_IDS(&intel_skylake_info), |
426 | INTEL_SKL_GT2_IDS(&intel_skylake_info), | 462 | INTEL_SKL_GT2_IDS(&intel_skylake_info), |
427 | INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), | 463 | INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), |
428 | INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), | 464 | INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), |
429 | INTEL_BXT_IDS(&intel_broxton_info), | 465 | INTEL_BXT_IDS(&intel_broxton_info), |
466 | INTEL_GLK_IDS(&intel_geminilake_info), | ||
430 | INTEL_KBL_GT1_IDS(&intel_kabylake_info), | 467 | INTEL_KBL_GT1_IDS(&intel_kabylake_info), |
431 | INTEL_KBL_GT2_IDS(&intel_kabylake_info), | 468 | INTEL_KBL_GT2_IDS(&intel_kabylake_info), |
432 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), | 469 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c new file mode 100644 index 000000000000..a1b7eec58be2 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_perf.c | |||
@@ -0,0 +1,2096 @@ | |||
1 | /* | ||
2 | * Copyright © 2015-2016 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Robert Bragg <robert@sixbynine.org> | ||
25 | */ | ||
26 | |||
27 | |||
28 | /** | ||
29 | * DOC: i915 Perf Overview | ||
30 | * | ||
31 | * Gen graphics supports a large number of performance counters that can help | ||
32 | * driver and application developers understand and optimize their use of the | ||
33 | * GPU. | ||
34 | * | ||
35 | * This i915 perf interface enables userspace to configure and open a file | ||
36 | * descriptor representing a stream of GPU metrics which can then be read() as | ||
37 | * a stream of sample records. | ||
38 | * | ||
39 | * The interface is particularly suited to exposing buffered metrics that are | ||
40 | * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. | ||
41 | * | ||
42 | * Streams representing a single context are accessible to applications with a | ||
43 | * corresponding drm file descriptor, such that OpenGL can use the interface | ||
44 | * without special privileges. Access to system-wide metrics requires root | ||
45 | * privileges by default, unless changed via the dev.i915.perf_event_paranoid | ||
46 | * sysctl option. | ||
47 | * | ||
48 | */ | ||
49 | |||
50 | /** | ||
51 | * DOC: i915 Perf History and Comparison with Core Perf | ||
52 | * | ||
53 | * The interface was initially inspired by the core Perf infrastructure but | ||
54 | * some notable differences are: | ||
55 | * | ||
56 | * i915 perf file descriptors represent a "stream" instead of an "event"; where | ||
57 | * a perf event primarily corresponds to a single 64bit value, while a stream | ||
58 | * might sample sets of tightly-coupled counters, depending on the | ||
59 | * configuration. For example the Gen OA unit isn't designed to support | ||
60 | * orthogonal configurations of individual counters; it's configured for a set | ||
61 | * of related counters. Samples for an i915 perf stream capturing OA metrics | ||
62 | * will include a set of counter values packed in a compact HW specific format. | ||
63 | * The OA unit supports a number of different packing formats which can be | ||
64 | * selected by the user opening the stream. Perf has support for grouping | ||
65 | * events, but each event in the group is configured, validated and | ||
66 | * authenticated individually with separate system calls. | ||
67 | * | ||
68 | * i915 perf stream configurations are provided as an array of u64 (key,value) | ||
69 | * pairs, instead of a fixed struct with multiple miscellaneous config members, | ||
70 | * interleaved with event-type specific members. | ||
71 | * | ||
72 | * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. | ||
73 | * The supported metrics are being written to memory by the GPU unsynchronized | ||
74 | * with the CPU, using HW specific packing formats for counter sets. Sometimes | ||
75 | * the constraints on HW configuration require reports to be filtered before it | ||
76 | * would be acceptable to expose them to unprivileged applications - to hide | ||
77 | * the metrics of other processes/contexts. For these use cases a read() based | ||
78 | * interface is a good fit, and provides an opportunity to filter data as it | ||
79 | * gets copied from the GPU mapped buffers to userspace buffers. | ||
80 | * | ||
81 | * | ||
82 | * Issues hit with first prototype based on Core Perf | ||
83 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
84 | * | ||
85 | * The first prototype of this driver was based on the core perf | ||
86 | * infrastructure, and while we did make that mostly work, with some changes to | ||
87 | * perf, we found we were breaking or working around too many assumptions baked | ||
88 | * into perf's currently cpu centric design. | ||
89 | * | ||
90 | * In the end we didn't see a clear benefit to making perf's implementation and | ||
91 | * interface more complex by changing design assumptions while we knew we still | ||
92 | * wouldn't be able to use any existing perf based userspace tools. | ||
93 | * | ||
94 | * Also considering the Gen specific nature of the Observability hardware and | ||
95 | * how userspace will sometimes need to combine i915 perf OA metrics with | ||
96 | * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're | ||
97 | * expecting the interface to be used by a platform specific userspace such as | ||
98 | * OpenGL or tools. This is to say; we aren't inherently missing out on having | ||
99 | * a standard vendor/architecture agnostic interface by not using perf. | ||
100 | * | ||
101 | * | ||
102 | * For posterity, in case we might re-visit trying to adapt core perf to be | ||
103 | * better suited to exposing i915 metrics these were the main pain points we | ||
104 | * hit: | ||
105 | * | ||
106 | * - The perf based OA PMU driver broke some significant design assumptions: | ||
107 | * | ||
108 | * Existing perf pmus are used for profiling work on a cpu and we were | ||
109 | * introducing the idea of _IS_DEVICE pmus with different security | ||
110 | * implications, the need to fake cpu-related data (such as user/kernel | ||
111 | * registers) to fit with perf's current design, and adding _DEVICE records | ||
112 | * as a way to forward device-specific status records. | ||
113 | * | ||
114 | * The OA unit writes reports of counters into a circular buffer, without | ||
115 | * involvement from the CPU, making our PMU driver the first of a kind. | ||
116 | * | ||
117 | * Given the way we were periodically forward data from the GPU-mapped, OA | ||
118 | * buffer to perf's buffer, those bursts of sample writes looked to perf like | ||
119 | * we were sampling too fast and so we had to subvert its throttling checks. | ||
120 | * | ||
121 | * Perf supports groups of counters and allows those to be read via | ||
122 | * transactions internally but transactions currently seem designed to be | ||
123 | * explicitly initiated from the cpu (say in response to a userspace read()) | ||
124 | * and while we could pull a report out of the OA buffer we can't | ||
125 | * trigger a report from the cpu on demand. | ||
126 | * | ||
127 | * Related to being report based; the OA counters are configured in HW as a | ||
128 | * set while perf generally expects counter configurations to be orthogonal. | ||
129 | * Although counters can be associated with a group leader as they are | ||
130 | * opened, there's no clear precedent for being able to provide group-wide | ||
131 | * configuration attributes (for example we want to let userspace choose the | ||
132 | * OA unit report format used to capture all counters in a set, or specify a | ||
133 | * GPU context to filter metrics on). We avoided using perf's grouping | ||
134 | * feature and forwarded OA reports to userspace via perf's 'raw' sample | ||
135 | * field. This suited our userspace well considering how coupled the counters | ||
136 | * are when dealing with normalizing. It would be inconvenient to split | ||
137 | * counters up into separate events, only to require userspace to recombine | ||
138 | * them. For Mesa it's also convenient to be forwarded raw, periodic reports | ||
139 | * for combining with the side-band raw reports it captures using | ||
140 | * MI_REPORT_PERF_COUNT commands. | ||
141 | * | ||
142 | * - As a side note on perf's grouping feature; there was also some concern | ||
143 | * that using PERF_FORMAT_GROUP as a way to pack together counter values | ||
144 | * would quite drastically inflate our sample sizes, which would likely | ||
145 | * lower the effective sampling resolutions we could use when the available | ||
146 | * memory bandwidth is limited. | ||
147 | * | ||
148 | * With the OA unit's report formats, counters are packed together as 32 | ||
149 | * or 40bit values, with the largest report size being 256 bytes. | ||
150 | * | ||
151 | * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a | ||
152 | * documented ordering to the values, implying PERF_FORMAT_ID must also be | ||
153 | * used to add a 64bit ID before each value; giving 16 bytes per counter. | ||
154 | * | ||
155 | * Related to counter orthogonality; we can't time share the OA unit, while | ||
156 | * event scheduling is a central design idea within perf for allowing | ||
157 | * userspace to open + enable more events than can be configured in HW at any | ||
158 | * one time. The OA unit is not designed to allow re-configuration while in | ||
159 | * use. We can't reconfigure the OA unit without losing internal OA unit | ||
160 | * state which we can't access explicitly to save and restore. Reconfiguring | ||
161 | * the OA unit is also relatively slow, involving ~100 register writes. From | ||
162 | * userspace Mesa also depends on a stable OA configuration when emitting | ||
163 | * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be | ||
164 | * disabled while there are outstanding MI_RPC commands lest we hang the | ||
165 | * command streamer. | ||
166 | * | ||
167 | * The contents of sample records aren't extensible by device drivers (i.e. | ||
168 | * the sample_type bits). As an example; Sourab Gupta had been looking to | ||
169 | * attach GPU timestamps to our OA samples. We were shoehorning OA reports | ||
170 | * into sample records by using the 'raw' field, but it's tricky to pack more | ||
171 | * than one thing into this field because events/core.c currently only lets a | ||
172 | * pmu give a single raw data pointer plus len which will be copied into the | ||
173 | * ring buffer. To include more than the OA report we'd have to copy the | ||
174 | * report into an intermediate larger buffer. I'd been considering allowing a | ||
175 | * vector of data+len values to be specified for copying the raw data, but | ||
176 | * it felt like a kludge to being using the raw field for this purpose. | ||
177 | * | ||
178 | * - It felt like our perf based PMU was making some technical compromises | ||
179 | * just for the sake of using perf: | ||
180 | * | ||
181 | * perf_event_open() requires events to either relate to a pid or a specific | ||
182 | * cpu core, while our device pmu related to neither. Events opened with a | ||
183 | * pid will be automatically enabled/disabled according to the scheduling of | ||
184 | * that process - so not appropriate for us. When an event is related to a | ||
185 | * cpu id, perf ensures pmu methods will be invoked via an inter process | ||
186 | * interrupt on that core. To avoid invasive changes our userspace opened OA | ||
187 | * perf events for a specific cpu. This was workable but it meant the | ||
188 | * majority of the OA driver ran in atomic context, including all OA report | ||
189 | * forwarding, which wasn't really necessary in our case and seems to make | ||
190 | * our locking requirements somewhat complex as we handled the interaction | ||
191 | * with the rest of the i915 driver. | ||
192 | */ | ||
193 | |||
194 | #include <linux/anon_inodes.h> | ||
195 | #include <linux/sizes.h> | ||
196 | |||
197 | #include "i915_drv.h" | ||
198 | #include "i915_oa_hsw.h" | ||
199 | |||
200 | /* HW requires this to be a power of two, between 128k and 16M, though driver | ||
201 | * is currently generally designed assuming the largest 16M size is used such | ||
202 | * that the overflow cases are unlikely in normal operation. | ||
203 | */ | ||
204 | #define OA_BUFFER_SIZE SZ_16M | ||
205 | |||
206 | #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) | ||
207 | |||
208 | /* There's a HW race condition between OA unit tail pointer register updates and | ||
209 | * writes to memory whereby the tail pointer can sometimes get ahead of what's | ||
210 | * been written out to the OA buffer so far. | ||
211 | * | ||
212 | * Although this can be observed explicitly by checking for a zeroed report-id | ||
213 | * field in tail reports, it seems preferable to account for this earlier e.g. | ||
214 | * as part of the _oa_buffer_is_empty checks to minimize -EAGAIN polling cycles | ||
215 | * in this situation. | ||
216 | * | ||
217 | * To give time for the most recent reports to land before they may be copied to | ||
218 | * userspace, the driver operates as if the tail pointer effectively lags behind | ||
219 | * the HW tail pointer by 'tail_margin' bytes. The margin in bytes is calculated | ||
220 | * based on this constant in nanoseconds, the current OA sampling exponent | ||
221 | * and current report size. | ||
222 | * | ||
223 | * There is also a fallback check while reading to simply skip over reports with | ||
224 | * a zeroed report-id. | ||
225 | */ | ||
226 | #define OA_TAIL_MARGIN_NSEC 100000ULL | ||
227 | |||
228 | /* frequency for checking whether the OA unit has written new reports to the | ||
229 | * circular OA buffer... | ||
230 | */ | ||
231 | #define POLL_FREQUENCY 200 | ||
232 | #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) | ||
233 | |||
234 | /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ | ||
235 | static int zero; | ||
236 | static int one = 1; | ||
237 | static u32 i915_perf_stream_paranoid = true; | ||
238 | |||
239 | /* The maximum exponent the hardware accepts is 63 (essentially it selects one | ||
240 | * of the 64bit timestamp bits to trigger reports from) but there's currently | ||
241 | * no known use case for sampling as infrequently as once per 47 thousand years. | ||
242 | * | ||
243 | * Since the timestamps included in OA reports are only 32bits it seems | ||
244 | * reasonable to limit the OA exponent where it's still possible to account for | ||
245 | * overflow in OA report timestamps. | ||
246 | */ | ||
247 | #define OA_EXPONENT_MAX 31 | ||
248 | |||
249 | #define INVALID_CTX_ID 0xffffffff | ||
250 | |||
251 | |||
252 | /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate | ||
253 | * | ||
254 | * 160ns is the smallest sampling period we can theoretically program the OA | ||
255 | * unit with on Haswell, corresponding to 6.25MHz. | ||
256 | */ | ||
257 | static int oa_sample_rate_hard_limit = 6250000; | ||
258 | |||
259 | /* Theoretically we can program the OA unit to sample every 160ns but don't | ||
260 | * allow that by default unless root... | ||
261 | * | ||
262 | * The default threshold of 100000Hz is based on perf's similar | ||
263 | * kernel.perf_event_max_sample_rate sysctl parameter. | ||
264 | */ | ||
265 | static u32 i915_oa_max_sample_rate = 100000; | ||
266 | |||
267 | /* XXX: beware if future OA HW adds new report formats that the current | ||
268 | * code assumes all reports have a power-of-two size and ~(size - 1) can | ||
269 | * be used as a mask to align the OA tail pointer. | ||
270 | */ | ||
271 | static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { | ||
272 | [I915_OA_FORMAT_A13] = { 0, 64 }, | ||
273 | [I915_OA_FORMAT_A29] = { 1, 128 }, | ||
274 | [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, | ||
275 | /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ | ||
276 | [I915_OA_FORMAT_B4_C8] = { 4, 64 }, | ||
277 | [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, | ||
278 | [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, | ||
279 | [I915_OA_FORMAT_C4_B8] = { 7, 64 }, | ||
280 | }; | ||
281 | |||
282 | #define SAMPLE_OA_REPORT (1<<0) | ||
283 | |||
284 | /** | ||
285 | * struct perf_open_properties - for validated properties given to open a stream | ||
286 | * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags | ||
287 | * @single_context: Whether a single or all gpu contexts should be monitored | ||
288 | * @ctx_handle: A gem ctx handle for use with @single_context | ||
289 | * @metrics_set: An ID for an OA unit metric set advertised via sysfs | ||
290 | * @oa_format: An OA unit HW report format | ||
291 | * @oa_periodic: Whether to enable periodic OA unit sampling | ||
292 | * @oa_period_exponent: The OA unit sampling period is derived from this | ||
293 | * | ||
294 | * As read_properties_unlocked() enumerates and validates the properties given | ||
295 | * to open a stream of metrics the configuration is built up in the structure | ||
296 | * which starts out zero initialized. | ||
297 | */ | ||
298 | struct perf_open_properties { | ||
299 | u32 sample_flags; | ||
300 | |||
301 | u64 single_context:1; | ||
302 | u64 ctx_handle; | ||
303 | |||
304 | /* OA sampling state */ | ||
305 | int metrics_set; | ||
306 | int oa_format; | ||
307 | bool oa_periodic; | ||
308 | int oa_period_exponent; | ||
309 | }; | ||
310 | |||
311 | /* NB: This is either called via fops or the poll check hrtimer (atomic ctx) | ||
312 | * | ||
313 | * It's safe to read OA config state here unlocked, assuming that this is only | ||
314 | * called while the stream is enabled, while the global OA configuration can't | ||
315 | * be modified. | ||
316 | * | ||
317 | * Note: we don't lock around the head/tail reads even though there's the slim | ||
318 | * possibility of read() fop errors forcing a re-init of the OA buffer | ||
319 | * pointers. A race here could result in a false positive !empty status which | ||
320 | * is acceptable. | ||
321 | */ | ||
322 | static bool gen7_oa_buffer_is_empty_fop_unlocked(struct drm_i915_private *dev_priv) | ||
323 | { | ||
324 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | ||
325 | u32 oastatus2 = I915_READ(GEN7_OASTATUS2); | ||
326 | u32 oastatus1 = I915_READ(GEN7_OASTATUS1); | ||
327 | u32 head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK; | ||
328 | u32 tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK; | ||
329 | |||
330 | return OA_TAKEN(tail, head) < | ||
331 | dev_priv->perf.oa.tail_margin + report_size; | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * append_oa_status - Appends a status record to a userspace read() buffer. | ||
336 | * @stream: An i915-perf stream opened for OA metrics | ||
337 | * @buf: destination buffer given by userspace | ||
338 | * @count: the number of bytes userspace wants to read | ||
339 | * @offset: (inout): the current position for writing into @buf | ||
340 | * @type: The kind of status to report to userspace | ||
341 | * | ||
342 | * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) | ||
343 | * into the userspace read() buffer. | ||
344 | * | ||
345 | * The @buf @offset will only be updated on success. | ||
346 | * | ||
347 | * Returns: 0 on success, negative error code on failure. | ||
348 | */ | ||
349 | static int append_oa_status(struct i915_perf_stream *stream, | ||
350 | char __user *buf, | ||
351 | size_t count, | ||
352 | size_t *offset, | ||
353 | enum drm_i915_perf_record_type type) | ||
354 | { | ||
355 | struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; | ||
356 | |||
357 | if ((count - *offset) < header.size) | ||
358 | return -ENOSPC; | ||
359 | |||
360 | if (copy_to_user(buf + *offset, &header, sizeof(header))) | ||
361 | return -EFAULT; | ||
362 | |||
363 | (*offset) += header.size; | ||
364 | |||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * append_oa_sample - Copies single OA report into userspace read() buffer. | ||
370 | * @stream: An i915-perf stream opened for OA metrics | ||
371 | * @buf: destination buffer given by userspace | ||
372 | * @count: the number of bytes userspace wants to read | ||
373 | * @offset: (inout): the current position for writing into @buf | ||
374 | * @report: A single OA report to (optionally) include as part of the sample | ||
375 | * | ||
376 | * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` | ||
377 | * properties when opening a stream, tracked as `stream->sample_flags`. This | ||
378 | * function copies the requested components of a single sample to the given | ||
379 | * read() @buf. | ||
380 | * | ||
381 | * The @buf @offset will only be updated on success. | ||
382 | * | ||
383 | * Returns: 0 on success, negative error code on failure. | ||
384 | */ | ||
385 | static int append_oa_sample(struct i915_perf_stream *stream, | ||
386 | char __user *buf, | ||
387 | size_t count, | ||
388 | size_t *offset, | ||
389 | const u8 *report) | ||
390 | { | ||
391 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
392 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | ||
393 | struct drm_i915_perf_record_header header; | ||
394 | u32 sample_flags = stream->sample_flags; | ||
395 | |||
396 | header.type = DRM_I915_PERF_RECORD_SAMPLE; | ||
397 | header.pad = 0; | ||
398 | header.size = stream->sample_size; | ||
399 | |||
400 | if ((count - *offset) < header.size) | ||
401 | return -ENOSPC; | ||
402 | |||
403 | buf += *offset; | ||
404 | if (copy_to_user(buf, &header, sizeof(header))) | ||
405 | return -EFAULT; | ||
406 | buf += sizeof(header); | ||
407 | |||
408 | if (sample_flags & SAMPLE_OA_REPORT) { | ||
409 | if (copy_to_user(buf, report, report_size)) | ||
410 | return -EFAULT; | ||
411 | } | ||
412 | |||
413 | (*offset) += header.size; | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | /** | ||
419 | * Copies all buffered OA reports into userspace read() buffer. | ||
420 | * @stream: An i915-perf stream opened for OA metrics | ||
421 | * @buf: destination buffer given by userspace | ||
422 | * @count: the number of bytes userspace wants to read | ||
423 | * @offset: (inout): the current position for writing into @buf | ||
424 | * @head_ptr: (inout): the current oa buffer cpu read position | ||
425 | * @tail: the current oa buffer gpu write position | ||
426 | * | ||
427 | * Notably any error condition resulting in a short read (-%ENOSPC or | ||
428 | * -%EFAULT) will be returned even though one or more records may | ||
429 | * have been successfully copied. In this case it's up to the caller | ||
430 | * to decide if the error should be squashed before returning to | ||
431 | * userspace. | ||
432 | * | ||
433 | * Note: reports are consumed from the head, and appended to the | ||
434 | * tail, so the head chases the tail?... If you think that's mad | ||
435 | * and back-to-front you're not alone, but this follows the | ||
436 | * Gen PRM naming convention. | ||
437 | * | ||
438 | * Returns: 0 on success, negative error code on failure. | ||
439 | */ | ||
440 | static int gen7_append_oa_reports(struct i915_perf_stream *stream, | ||
441 | char __user *buf, | ||
442 | size_t count, | ||
443 | size_t *offset, | ||
444 | u32 *head_ptr, | ||
445 | u32 tail) | ||
446 | { | ||
447 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
448 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | ||
449 | u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; | ||
450 | int tail_margin = dev_priv->perf.oa.tail_margin; | ||
451 | u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); | ||
452 | u32 mask = (OA_BUFFER_SIZE - 1); | ||
453 | u32 head; | ||
454 | u32 taken; | ||
455 | int ret = 0; | ||
456 | |||
457 | if (WARN_ON(!stream->enabled)) | ||
458 | return -EIO; | ||
459 | |||
460 | head = *head_ptr - gtt_offset; | ||
461 | tail -= gtt_offset; | ||
462 | |||
463 | /* The OA unit is expected to wrap the tail pointer according to the OA | ||
464 | * buffer size and since we should never write a misaligned head | ||
465 | * pointer we don't expect to read one back either... | ||
466 | */ | ||
467 | if (tail > OA_BUFFER_SIZE || head > OA_BUFFER_SIZE || | ||
468 | head % report_size) { | ||
469 | DRM_ERROR("Inconsistent OA buffer pointer (head = %u, tail = %u): force restart\n", | ||
470 | head, tail); | ||
471 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | ||
472 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | ||
473 | *head_ptr = I915_READ(GEN7_OASTATUS2) & | ||
474 | GEN7_OASTATUS2_HEAD_MASK; | ||
475 | return -EIO; | ||
476 | } | ||
477 | |||
478 | |||
479 | /* The tail pointer increases in 64 byte increments, not in report_size | ||
480 | * steps... | ||
481 | */ | ||
482 | tail &= ~(report_size - 1); | ||
483 | |||
484 | /* Move the tail pointer back by the current tail_margin to account for | ||
485 | * the possibility that the latest reports may not have really landed | ||
486 | * in memory yet... | ||
487 | */ | ||
488 | |||
489 | if (OA_TAKEN(tail, head) < report_size + tail_margin) | ||
490 | return -EAGAIN; | ||
491 | |||
492 | tail -= tail_margin; | ||
493 | tail &= mask; | ||
494 | |||
495 | for (/* none */; | ||
496 | (taken = OA_TAKEN(tail, head)); | ||
497 | head = (head + report_size) & mask) { | ||
498 | u8 *report = oa_buf_base + head; | ||
499 | u32 *report32 = (void *)report; | ||
500 | |||
501 | /* All the report sizes factor neatly into the buffer | ||
502 | * size so we never expect to see a report split | ||
503 | * between the beginning and end of the buffer. | ||
504 | * | ||
505 | * Given the initial alignment check a misalignment | ||
506 | * here would imply a driver bug that would result | ||
507 | * in an overrun. | ||
508 | */ | ||
509 | if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { | ||
510 | DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); | ||
511 | break; | ||
512 | } | ||
513 | |||
514 | /* The report-ID field for periodic samples includes | ||
515 | * some undocumented flags related to what triggered | ||
516 | * the report and is never expected to be zero so we | ||
517 | * can check that the report isn't invalid before | ||
518 | * copying it to userspace... | ||
519 | */ | ||
520 | if (report32[0] == 0) { | ||
521 | DRM_NOTE("Skipping spurious, invalid OA report\n"); | ||
522 | continue; | ||
523 | } | ||
524 | |||
525 | ret = append_oa_sample(stream, buf, count, offset, report); | ||
526 | if (ret) | ||
527 | break; | ||
528 | |||
529 | /* The above report-id field sanity check is based on | ||
530 | * the assumption that the OA buffer is initially | ||
531 | * zeroed and we reset the field after copying so the | ||
532 | * check is still meaningful once old reports start | ||
533 | * being overwritten. | ||
534 | */ | ||
535 | report32[0] = 0; | ||
536 | } | ||
537 | |||
538 | *head_ptr = gtt_offset + head; | ||
539 | |||
540 | return ret; | ||
541 | } | ||
542 | |||
543 | /** | ||
544 | * gen7_oa_read - copy status records then buffered OA reports | ||
545 | * @stream: An i915-perf stream opened for OA metrics | ||
546 | * @buf: destination buffer given by userspace | ||
547 | * @count: the number of bytes userspace wants to read | ||
548 | * @offset: (inout): the current position for writing into @buf | ||
549 | * | ||
550 | * Checks Gen 7 specific OA unit status registers and if necessary appends | ||
551 | * corresponding status records for userspace (such as for a buffer full | ||
552 | * condition) and then initiate appending any buffered OA reports. | ||
553 | * | ||
554 | * Updates @offset according to the number of bytes successfully copied into | ||
555 | * the userspace buffer. | ||
556 | * | ||
557 | * Returns: zero on success or a negative error code | ||
558 | */ | ||
559 | static int gen7_oa_read(struct i915_perf_stream *stream, | ||
560 | char __user *buf, | ||
561 | size_t count, | ||
562 | size_t *offset) | ||
563 | { | ||
564 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
565 | int report_size = dev_priv->perf.oa.oa_buffer.format_size; | ||
566 | u32 oastatus2; | ||
567 | u32 oastatus1; | ||
568 | u32 head; | ||
569 | u32 tail; | ||
570 | int ret; | ||
571 | |||
572 | if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) | ||
573 | return -EIO; | ||
574 | |||
575 | oastatus2 = I915_READ(GEN7_OASTATUS2); | ||
576 | oastatus1 = I915_READ(GEN7_OASTATUS1); | ||
577 | |||
578 | head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK; | ||
579 | tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK; | ||
580 | |||
581 | /* XXX: On Haswell we don't have a safe way to clear oastatus1 | ||
582 | * bits while the OA unit is enabled (while the tail pointer | ||
583 | * may be updated asynchronously) so we ignore status bits | ||
584 | * that have already been reported to userspace. | ||
585 | */ | ||
586 | oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1; | ||
587 | |||
588 | /* We treat OABUFFER_OVERFLOW as a significant error: | ||
589 | * | ||
590 | * - The status can be interpreted to mean that the buffer is | ||
591 | * currently full (with a higher precedence than OA_TAKEN() | ||
592 | * which will start to report a near-empty buffer after an | ||
593 | * overflow) but it's awkward that we can't clear the status | ||
594 | * on Haswell, so without a reset we won't be able to catch | ||
595 | * the state again. | ||
596 | * | ||
597 | * - Since it also implies the HW has started overwriting old | ||
598 | * reports it may also affect our sanity checks for invalid | ||
599 | * reports when copying to userspace that assume new reports | ||
600 | * are being written to cleared memory. | ||
601 | * | ||
602 | * - In the future we may want to introduce a flight recorder | ||
603 | * mode where the driver will automatically maintain a safe | ||
604 | * guard band between head/tail, avoiding this overflow | ||
605 | * condition, but we avoid the added driver complexity for | ||
606 | * now. | ||
607 | */ | ||
608 | if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { | ||
609 | ret = append_oa_status(stream, buf, count, offset, | ||
610 | DRM_I915_PERF_RECORD_OA_BUFFER_LOST); | ||
611 | if (ret) | ||
612 | return ret; | ||
613 | |||
614 | DRM_DEBUG("OA buffer overflow: force restart\n"); | ||
615 | |||
616 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | ||
617 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | ||
618 | |||
619 | oastatus2 = I915_READ(GEN7_OASTATUS2); | ||
620 | oastatus1 = I915_READ(GEN7_OASTATUS1); | ||
621 | |||
622 | head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK; | ||
623 | tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK; | ||
624 | } | ||
625 | |||
626 | if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { | ||
627 | ret = append_oa_status(stream, buf, count, offset, | ||
628 | DRM_I915_PERF_RECORD_OA_REPORT_LOST); | ||
629 | if (ret) | ||
630 | return ret; | ||
631 | dev_priv->perf.oa.gen7_latched_oastatus1 |= | ||
632 | GEN7_OASTATUS1_REPORT_LOST; | ||
633 | } | ||
634 | |||
635 | ret = gen7_append_oa_reports(stream, buf, count, offset, | ||
636 | &head, tail); | ||
637 | |||
638 | /* All the report sizes are a power of two and the | ||
639 | * head should always be incremented by some multiple | ||
640 | * of the report size. | ||
641 | * | ||
642 | * A warning here, but notably if we later read back a | ||
643 | * misaligned pointer we will treat that as a bug since | ||
644 | * it could lead to a buffer overrun. | ||
645 | */ | ||
646 | WARN_ONCE(head & (report_size - 1), | ||
647 | "i915: Writing misaligned OA head pointer"); | ||
648 | |||
649 | /* Note: we update the head pointer here even if an error | ||
650 | * was returned since the error may represent a short read | ||
651 | * where some some reports were successfully copied. | ||
652 | */ | ||
653 | I915_WRITE(GEN7_OASTATUS2, | ||
654 | ((head & GEN7_OASTATUS2_HEAD_MASK) | | ||
655 | OA_MEM_SELECT_GGTT)); | ||
656 | |||
657 | return ret; | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * i915_oa_wait_unlocked - handles blocking IO until OA data available | ||
662 | * @stream: An i915-perf stream opened for OA metrics | ||
663 | * | ||
664 | * Called when userspace tries to read() from a blocking stream FD opened | ||
665 | * for OA metrics. It waits until the hrtimer callback finds a non-empty | ||
666 | * OA buffer and wakes us. | ||
667 | * | ||
668 | * Note: it's acceptable to have this return with some false positives | ||
669 | * since any subsequent read handling will return -EAGAIN if there isn't | ||
670 | * really data ready for userspace yet. | ||
671 | * | ||
672 | * Returns: zero on success or a negative error code | ||
673 | */ | ||
674 | static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) | ||
675 | { | ||
676 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
677 | |||
678 | /* We would wait indefinitely if periodic sampling is not enabled */ | ||
679 | if (!dev_priv->perf.oa.periodic) | ||
680 | return -EIO; | ||
681 | |||
682 | /* Note: the oa_buffer_is_empty() condition is ok to run unlocked as it | ||
683 | * just performs mmio reads of the OA buffer head + tail pointers and | ||
684 | * it's assumed we're handling some operation that implies the stream | ||
685 | * can't be destroyed until completion (such as a read()) that ensures | ||
686 | * the device + OA buffer can't disappear | ||
687 | */ | ||
688 | return wait_event_interruptible(dev_priv->perf.oa.poll_wq, | ||
689 | !dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)); | ||
690 | } | ||
691 | |||
692 | /** | ||
693 | * i915_oa_poll_wait - call poll_wait() for an OA stream poll() | ||
694 | * @stream: An i915-perf stream opened for OA metrics | ||
695 | * @file: An i915 perf stream file | ||
696 | * @wait: poll() state table | ||
697 | * | ||
698 | * For handling userspace polling on an i915 perf stream opened for OA metrics, | ||
699 | * this starts a poll_wait with the wait queue that our hrtimer callback wakes | ||
700 | * when it sees data ready to read in the circular OA buffer. | ||
701 | */ | ||
702 | static void i915_oa_poll_wait(struct i915_perf_stream *stream, | ||
703 | struct file *file, | ||
704 | poll_table *wait) | ||
705 | { | ||
706 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
707 | |||
708 | poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); | ||
709 | } | ||
710 | |||
711 | /** | ||
712 | * i915_oa_read - just calls through to &i915_oa_ops->read | ||
713 | * @stream: An i915-perf stream opened for OA metrics | ||
714 | * @buf: destination buffer given by userspace | ||
715 | * @count: the number of bytes userspace wants to read | ||
716 | * @offset: (inout): the current position for writing into @buf | ||
717 | * | ||
718 | * Updates @offset according to the number of bytes successfully copied into | ||
719 | * the userspace buffer. | ||
720 | * | ||
721 | * Returns: zero on success or a negative error code | ||
722 | */ | ||
723 | static int i915_oa_read(struct i915_perf_stream *stream, | ||
724 | char __user *buf, | ||
725 | size_t count, | ||
726 | size_t *offset) | ||
727 | { | ||
728 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
729 | |||
730 | return dev_priv->perf.oa.ops.read(stream, buf, count, offset); | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * oa_get_render_ctx_id - determine and hold ctx hw id | ||
735 | * @stream: An i915-perf stream opened for OA metrics | ||
736 | * | ||
737 | * Determine the render context hw id, and ensure it remains fixed for the | ||
738 | * lifetime of the stream. This ensures that we don't have to worry about | ||
739 | * updating the context ID in OACONTROL on the fly. | ||
740 | * | ||
741 | * Returns: zero on success or a negative error code | ||
742 | */ | ||
743 | static int oa_get_render_ctx_id(struct i915_perf_stream *stream) | ||
744 | { | ||
745 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
746 | struct intel_engine_cs *engine = dev_priv->engine[RCS]; | ||
747 | int ret; | ||
748 | |||
749 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | ||
750 | if (ret) | ||
751 | return ret; | ||
752 | |||
753 | /* As the ID is the gtt offset of the context's vma we pin | ||
754 | * the vma to ensure the ID remains fixed. | ||
755 | * | ||
756 | * NB: implied RCS engine... | ||
757 | */ | ||
758 | ret = engine->context_pin(engine, stream->ctx); | ||
759 | if (ret) | ||
760 | goto unlock; | ||
761 | |||
762 | /* Explicitly track the ID (instead of calling i915_ggtt_offset() | ||
763 | * on the fly) considering the difference with gen8+ and | ||
764 | * execlists | ||
765 | */ | ||
766 | dev_priv->perf.oa.specific_ctx_id = | ||
767 | i915_ggtt_offset(stream->ctx->engine[engine->id].state); | ||
768 | |||
769 | unlock: | ||
770 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
771 | |||
772 | return ret; | ||
773 | } | ||
774 | |||
775 | /** | ||
776 | * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold | ||
777 | * @stream: An i915-perf stream opened for OA metrics | ||
778 | * | ||
779 | * In case anything needed doing to ensure the context HW ID would remain valid | ||
780 | * for the lifetime of the stream, then that can be undone here. | ||
781 | */ | ||
782 | static void oa_put_render_ctx_id(struct i915_perf_stream *stream) | ||
783 | { | ||
784 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
785 | struct intel_engine_cs *engine = dev_priv->engine[RCS]; | ||
786 | |||
787 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
788 | |||
789 | dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; | ||
790 | engine->context_unpin(engine, stream->ctx); | ||
791 | |||
792 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
793 | } | ||
794 | |||
795 | static void | ||
796 | free_oa_buffer(struct drm_i915_private *i915) | ||
797 | { | ||
798 | mutex_lock(&i915->drm.struct_mutex); | ||
799 | |||
800 | i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); | ||
801 | i915_vma_unpin(i915->perf.oa.oa_buffer.vma); | ||
802 | i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); | ||
803 | |||
804 | i915->perf.oa.oa_buffer.vma = NULL; | ||
805 | i915->perf.oa.oa_buffer.vaddr = NULL; | ||
806 | |||
807 | mutex_unlock(&i915->drm.struct_mutex); | ||
808 | } | ||
809 | |||
810 | static void i915_oa_stream_destroy(struct i915_perf_stream *stream) | ||
811 | { | ||
812 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
813 | |||
814 | BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); | ||
815 | |||
816 | dev_priv->perf.oa.ops.disable_metric_set(dev_priv); | ||
817 | |||
818 | free_oa_buffer(dev_priv); | ||
819 | |||
820 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
821 | intel_runtime_pm_put(dev_priv); | ||
822 | |||
823 | if (stream->ctx) | ||
824 | oa_put_render_ctx_id(stream); | ||
825 | |||
826 | dev_priv->perf.oa.exclusive_stream = NULL; | ||
827 | } | ||
828 | |||
829 | static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) | ||
830 | { | ||
831 | u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); | ||
832 | |||
833 | /* Pre-DevBDW: OABUFFER must be set with counters off, | ||
834 | * before OASTATUS1, but after OASTATUS2 | ||
835 | */ | ||
836 | I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */ | ||
837 | I915_WRITE(GEN7_OABUFFER, gtt_offset); | ||
838 | I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ | ||
839 | |||
840 | /* On Haswell we have to track which OASTATUS1 flags we've | ||
841 | * already seen since they can't be cleared while periodic | ||
842 | * sampling is enabled. | ||
843 | */ | ||
844 | dev_priv->perf.oa.gen7_latched_oastatus1 = 0; | ||
845 | |||
846 | /* NB: although the OA buffer will initially be allocated | ||
847 | * zeroed via shmfs (and so this memset is redundant when | ||
848 | * first allocating), we may re-init the OA buffer, either | ||
849 | * when re-enabling a stream or in error/reset paths. | ||
850 | * | ||
851 | * The reason we clear the buffer for each re-init is for the | ||
852 | * sanity check in gen7_append_oa_reports() that looks at the | ||
853 | * report-id field to make sure it's non-zero which relies on | ||
854 | * the assumption that new reports are being written to zeroed | ||
855 | * memory... | ||
856 | */ | ||
857 | memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); | ||
858 | |||
859 | /* Maybe make ->pollin per-stream state if we support multiple | ||
860 | * concurrent streams in the future. | ||
861 | */ | ||
862 | dev_priv->perf.oa.pollin = false; | ||
863 | } | ||
864 | |||
865 | static int alloc_oa_buffer(struct drm_i915_private *dev_priv) | ||
866 | { | ||
867 | struct drm_i915_gem_object *bo; | ||
868 | struct i915_vma *vma; | ||
869 | int ret; | ||
870 | |||
871 | if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma)) | ||
872 | return -ENODEV; | ||
873 | |||
874 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | ||
875 | if (ret) | ||
876 | return ret; | ||
877 | |||
878 | BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); | ||
879 | BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); | ||
880 | |||
881 | bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE); | ||
882 | if (IS_ERR(bo)) { | ||
883 | DRM_ERROR("Failed to allocate OA buffer\n"); | ||
884 | ret = PTR_ERR(bo); | ||
885 | goto unlock; | ||
886 | } | ||
887 | |||
888 | ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); | ||
889 | if (ret) | ||
890 | goto err_unref; | ||
891 | |||
892 | /* PreHSW required 512K alignment, HSW requires 16M */ | ||
893 | vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); | ||
894 | if (IS_ERR(vma)) { | ||
895 | ret = PTR_ERR(vma); | ||
896 | goto err_unref; | ||
897 | } | ||
898 | dev_priv->perf.oa.oa_buffer.vma = vma; | ||
899 | |||
900 | dev_priv->perf.oa.oa_buffer.vaddr = | ||
901 | i915_gem_object_pin_map(bo, I915_MAP_WB); | ||
902 | if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) { | ||
903 | ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr); | ||
904 | goto err_unpin; | ||
905 | } | ||
906 | |||
907 | dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); | ||
908 | |||
909 | DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", | ||
910 | i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), | ||
911 | dev_priv->perf.oa.oa_buffer.vaddr); | ||
912 | |||
913 | goto unlock; | ||
914 | |||
915 | err_unpin: | ||
916 | __i915_vma_unpin(vma); | ||
917 | |||
918 | err_unref: | ||
919 | i915_gem_object_put(bo); | ||
920 | |||
921 | dev_priv->perf.oa.oa_buffer.vaddr = NULL; | ||
922 | dev_priv->perf.oa.oa_buffer.vma = NULL; | ||
923 | |||
924 | unlock: | ||
925 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
926 | return ret; | ||
927 | } | ||
928 | |||
929 | static void config_oa_regs(struct drm_i915_private *dev_priv, | ||
930 | const struct i915_oa_reg *regs, | ||
931 | int n_regs) | ||
932 | { | ||
933 | int i; | ||
934 | |||
935 | for (i = 0; i < n_regs; i++) { | ||
936 | const struct i915_oa_reg *reg = regs + i; | ||
937 | |||
938 | I915_WRITE(reg->addr, reg->value); | ||
939 | } | ||
940 | } | ||
941 | |||
942 | static int hsw_enable_metric_set(struct drm_i915_private *dev_priv) | ||
943 | { | ||
944 | int ret = i915_oa_select_metric_set_hsw(dev_priv); | ||
945 | |||
946 | if (ret) | ||
947 | return ret; | ||
948 | |||
949 | I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) | | ||
950 | GT_NOA_ENABLE)); | ||
951 | |||
952 | /* PRM: | ||
953 | * | ||
954 | * OA unit is using “crclk” for its functionality. When trunk | ||
955 | * level clock gating takes place, OA clock would be gated, | ||
956 | * unable to count the events from non-render clock domain. | ||
957 | * Render clock gating must be disabled when OA is enabled to | ||
958 | * count the events from non-render domain. Unit level clock | ||
959 | * gating for RCS should also be disabled. | ||
960 | */ | ||
961 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | ||
962 | ~GEN7_DOP_CLOCK_GATE_ENABLE)); | ||
963 | I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | | ||
964 | GEN6_CSUNIT_CLOCK_GATE_DISABLE)); | ||
965 | |||
966 | config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs, | ||
967 | dev_priv->perf.oa.mux_regs_len); | ||
968 | |||
969 | /* It apparently takes a fairly long time for a new MUX | ||
970 | * configuration to be be applied after these register writes. | ||
971 | * This delay duration was derived empirically based on the | ||
972 | * render_basic config but hopefully it covers the maximum | ||
973 | * configuration latency. | ||
974 | * | ||
975 | * As a fallback, the checks in _append_oa_reports() to skip | ||
976 | * invalid OA reports do also seem to work to discard reports | ||
977 | * generated before this config has completed - albeit not | ||
978 | * silently. | ||
979 | * | ||
980 | * Unfortunately this is essentially a magic number, since we | ||
981 | * don't currently know of a reliable mechanism for predicting | ||
982 | * how long the MUX config will take to apply and besides | ||
983 | * seeing invalid reports we don't know of a reliable way to | ||
984 | * explicitly check that the MUX config has landed. | ||
985 | * | ||
986 | * It's even possible we've miss characterized the underlying | ||
987 | * problem - it just seems like the simplest explanation why | ||
988 | * a delay at this location would mitigate any invalid reports. | ||
989 | */ | ||
990 | usleep_range(15000, 20000); | ||
991 | |||
992 | config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs, | ||
993 | dev_priv->perf.oa.b_counter_regs_len); | ||
994 | |||
995 | return 0; | ||
996 | } | ||
997 | |||
998 | static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) | ||
999 | { | ||
1000 | I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & | ||
1001 | ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); | ||
1002 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | | ||
1003 | GEN7_DOP_CLOCK_GATE_ENABLE)); | ||
1004 | |||
1005 | I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & | ||
1006 | ~GT_NOA_ENABLE)); | ||
1007 | } | ||
1008 | |||
1009 | static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv) | ||
1010 | { | ||
1011 | assert_spin_locked(&dev_priv->perf.hook_lock); | ||
1012 | |||
1013 | if (dev_priv->perf.oa.exclusive_stream->enabled) { | ||
1014 | struct i915_gem_context *ctx = | ||
1015 | dev_priv->perf.oa.exclusive_stream->ctx; | ||
1016 | u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; | ||
1017 | |||
1018 | bool periodic = dev_priv->perf.oa.periodic; | ||
1019 | u32 period_exponent = dev_priv->perf.oa.period_exponent; | ||
1020 | u32 report_format = dev_priv->perf.oa.oa_buffer.format; | ||
1021 | |||
1022 | I915_WRITE(GEN7_OACONTROL, | ||
1023 | (ctx_id & GEN7_OACONTROL_CTX_MASK) | | ||
1024 | (period_exponent << | ||
1025 | GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | | ||
1026 | (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | | ||
1027 | (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | | ||
1028 | (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | | ||
1029 | GEN7_OACONTROL_ENABLE); | ||
1030 | } else | ||
1031 | I915_WRITE(GEN7_OACONTROL, 0); | ||
1032 | } | ||
1033 | |||
1034 | static void gen7_oa_enable(struct drm_i915_private *dev_priv) | ||
1035 | { | ||
1036 | unsigned long flags; | ||
1037 | |||
1038 | /* Reset buf pointers so we don't forward reports from before now. | ||
1039 | * | ||
1040 | * Think carefully if considering trying to avoid this, since it | ||
1041 | * also ensures status flags and the buffer itself are cleared | ||
1042 | * in error paths, and we have checks for invalid reports based | ||
1043 | * on the assumption that certain fields are written to zeroed | ||
1044 | * memory which this helps maintains. | ||
1045 | */ | ||
1046 | gen7_init_oa_buffer(dev_priv); | ||
1047 | |||
1048 | spin_lock_irqsave(&dev_priv->perf.hook_lock, flags); | ||
1049 | gen7_update_oacontrol_locked(dev_priv); | ||
1050 | spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags); | ||
1051 | } | ||
1052 | |||
1053 | /** | ||
1054 | * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream | ||
1055 | * @stream: An i915 perf stream opened for OA metrics | ||
1056 | * | ||
1057 | * [Re]enables hardware periodic sampling according to the period configured | ||
1058 | * when opening the stream. This also starts a hrtimer that will periodically | ||
1059 | * check for data in the circular OA buffer for notifying userspace (e.g. | ||
1060 | * during a read() or poll()). | ||
1061 | */ | ||
1062 | static void i915_oa_stream_enable(struct i915_perf_stream *stream) | ||
1063 | { | ||
1064 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1065 | |||
1066 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | ||
1067 | |||
1068 | if (dev_priv->perf.oa.periodic) | ||
1069 | hrtimer_start(&dev_priv->perf.oa.poll_check_timer, | ||
1070 | ns_to_ktime(POLL_PERIOD), | ||
1071 | HRTIMER_MODE_REL_PINNED); | ||
1072 | } | ||
1073 | |||
1074 | static void gen7_oa_disable(struct drm_i915_private *dev_priv) | ||
1075 | { | ||
1076 | I915_WRITE(GEN7_OACONTROL, 0); | ||
1077 | } | ||
1078 | |||
1079 | /** | ||
1080 | * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream | ||
1081 | * @stream: An i915 perf stream opened for OA metrics | ||
1082 | * | ||
1083 | * Stops the OA unit from periodically writing counter reports into the | ||
1084 | * circular OA buffer. This also stops the hrtimer that periodically checks for | ||
1085 | * data in the circular OA buffer, for notifying userspace. | ||
1086 | */ | ||
1087 | static void i915_oa_stream_disable(struct i915_perf_stream *stream) | ||
1088 | { | ||
1089 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1090 | |||
1091 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | ||
1092 | |||
1093 | if (dev_priv->perf.oa.periodic) | ||
1094 | hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); | ||
1095 | } | ||
1096 | |||
1097 | static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) | ||
1098 | { | ||
1099 | return div_u64(1000000000ULL * (2ULL << exponent), | ||
1100 | dev_priv->perf.oa.timestamp_frequency); | ||
1101 | } | ||
1102 | |||
1103 | static const struct i915_perf_stream_ops i915_oa_stream_ops = { | ||
1104 | .destroy = i915_oa_stream_destroy, | ||
1105 | .enable = i915_oa_stream_enable, | ||
1106 | .disable = i915_oa_stream_disable, | ||
1107 | .wait_unlocked = i915_oa_wait_unlocked, | ||
1108 | .poll_wait = i915_oa_poll_wait, | ||
1109 | .read = i915_oa_read, | ||
1110 | }; | ||
1111 | |||
1112 | /** | ||
1113 | * i915_oa_stream_init - validate combined props for OA stream and init | ||
1114 | * @stream: An i915 perf stream | ||
1115 | * @param: The open parameters passed to `DRM_I915_PERF_OPEN` | ||
1116 | * @props: The property state that configures stream (individually validated) | ||
1117 | * | ||
1118 | * While read_properties_unlocked() validates properties in isolation it | ||
1119 | * doesn't ensure that the combination necessarily makes sense. | ||
1120 | * | ||
1121 | * At this point it has been determined that userspace wants a stream of | ||
1122 | * OA metrics, but still we need to further validate the combined | ||
1123 | * properties are OK. | ||
1124 | * | ||
1125 | * If the configuration makes sense then we can allocate memory for | ||
1126 | * a circular OA buffer and apply the requested metric set configuration. | ||
1127 | * | ||
1128 | * Returns: zero on success or a negative error code. | ||
1129 | */ | ||
1130 | static int i915_oa_stream_init(struct i915_perf_stream *stream, | ||
1131 | struct drm_i915_perf_open_param *param, | ||
1132 | struct perf_open_properties *props) | ||
1133 | { | ||
1134 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1135 | int format_size; | ||
1136 | int ret; | ||
1137 | |||
1138 | /* If the sysfs metrics/ directory wasn't registered for some | ||
1139 | * reason then don't let userspace try their luck with config | ||
1140 | * IDs | ||
1141 | */ | ||
1142 | if (!dev_priv->perf.metrics_kobj) { | ||
1143 | DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); | ||
1144 | return -EINVAL; | ||
1145 | } | ||
1146 | |||
1147 | if (!(props->sample_flags & SAMPLE_OA_REPORT)) { | ||
1148 | DRM_DEBUG("Only OA report sampling supported\n"); | ||
1149 | return -EINVAL; | ||
1150 | } | ||
1151 | |||
1152 | if (!dev_priv->perf.oa.ops.init_oa_buffer) { | ||
1153 | DRM_DEBUG("OA unit not supported\n"); | ||
1154 | return -ENODEV; | ||
1155 | } | ||
1156 | |||
1157 | /* To avoid the complexity of having to accurately filter | ||
1158 | * counter reports and marshal to the appropriate client | ||
1159 | * we currently only allow exclusive access | ||
1160 | */ | ||
1161 | if (dev_priv->perf.oa.exclusive_stream) { | ||
1162 | DRM_DEBUG("OA unit already in use\n"); | ||
1163 | return -EBUSY; | ||
1164 | } | ||
1165 | |||
1166 | if (!props->metrics_set) { | ||
1167 | DRM_DEBUG("OA metric set not specified\n"); | ||
1168 | return -EINVAL; | ||
1169 | } | ||
1170 | |||
1171 | if (!props->oa_format) { | ||
1172 | DRM_DEBUG("OA report format not specified\n"); | ||
1173 | return -EINVAL; | ||
1174 | } | ||
1175 | |||
1176 | stream->sample_size = sizeof(struct drm_i915_perf_record_header); | ||
1177 | |||
1178 | format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size; | ||
1179 | |||
1180 | stream->sample_flags |= SAMPLE_OA_REPORT; | ||
1181 | stream->sample_size += format_size; | ||
1182 | |||
1183 | dev_priv->perf.oa.oa_buffer.format_size = format_size; | ||
1184 | if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0)) | ||
1185 | return -EINVAL; | ||
1186 | |||
1187 | dev_priv->perf.oa.oa_buffer.format = | ||
1188 | dev_priv->perf.oa.oa_formats[props->oa_format].format; | ||
1189 | |||
1190 | dev_priv->perf.oa.metrics_set = props->metrics_set; | ||
1191 | |||
1192 | dev_priv->perf.oa.periodic = props->oa_periodic; | ||
1193 | if (dev_priv->perf.oa.periodic) { | ||
1194 | u32 tail; | ||
1195 | |||
1196 | dev_priv->perf.oa.period_exponent = props->oa_period_exponent; | ||
1197 | |||
1198 | /* See comment for OA_TAIL_MARGIN_NSEC for details | ||
1199 | * about this tail_margin... | ||
1200 | */ | ||
1201 | tail = div64_u64(OA_TAIL_MARGIN_NSEC, | ||
1202 | oa_exponent_to_ns(dev_priv, | ||
1203 | props->oa_period_exponent)); | ||
1204 | dev_priv->perf.oa.tail_margin = (tail + 1) * format_size; | ||
1205 | } | ||
1206 | |||
1207 | if (stream->ctx) { | ||
1208 | ret = oa_get_render_ctx_id(stream); | ||
1209 | if (ret) | ||
1210 | return ret; | ||
1211 | } | ||
1212 | |||
1213 | ret = alloc_oa_buffer(dev_priv); | ||
1214 | if (ret) | ||
1215 | goto err_oa_buf_alloc; | ||
1216 | |||
1217 | /* PRM - observability performance counters: | ||
1218 | * | ||
1219 | * OACONTROL, performance counter enable, note: | ||
1220 | * | ||
1221 | * "When this bit is set, in order to have coherent counts, | ||
1222 | * RC6 power state and trunk clock gating must be disabled. | ||
1223 | * This can be achieved by programming MMIO registers as | ||
1224 | * 0xA094=0 and 0xA090[31]=1" | ||
1225 | * | ||
1226 | * In our case we are expecting that taking pm + FORCEWAKE | ||
1227 | * references will effectively disable RC6. | ||
1228 | */ | ||
1229 | intel_runtime_pm_get(dev_priv); | ||
1230 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
1231 | |||
1232 | ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv); | ||
1233 | if (ret) | ||
1234 | goto err_enable; | ||
1235 | |||
1236 | stream->ops = &i915_oa_stream_ops; | ||
1237 | |||
1238 | dev_priv->perf.oa.exclusive_stream = stream; | ||
1239 | |||
1240 | return 0; | ||
1241 | |||
1242 | err_enable: | ||
1243 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
1244 | intel_runtime_pm_put(dev_priv); | ||
1245 | free_oa_buffer(dev_priv); | ||
1246 | |||
1247 | err_oa_buf_alloc: | ||
1248 | if (stream->ctx) | ||
1249 | oa_put_render_ctx_id(stream); | ||
1250 | |||
1251 | return ret; | ||
1252 | } | ||
1253 | |||
1254 | /** | ||
1255 | * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation | ||
1256 | * @stream: An i915 perf stream | ||
1257 | * @file: An i915 perf stream file | ||
1258 | * @buf: destination buffer given by userspace | ||
1259 | * @count: the number of bytes userspace wants to read | ||
1260 | * @ppos: (inout) file seek position (unused) | ||
1261 | * | ||
1262 | * Besides wrapping &i915_perf_stream_ops->read this provides a common place to | ||
1263 | * ensure that if we've successfully copied any data then reporting that takes | ||
1264 | * precedence over any internal error status, so the data isn't lost. | ||
1265 | * | ||
1266 | * For example ret will be -ENOSPC whenever there is more buffered data than | ||
1267 | * can be copied to userspace, but that's only interesting if we weren't able | ||
1268 | * to copy some data because it implies the userspace buffer is too small to | ||
1269 | * receive a single record (and we never split records). | ||
1270 | * | ||
1271 | * Another case with ret == -EFAULT is more of a grey area since it would seem | ||
1272 | * like bad form for userspace to ask us to overrun its buffer, but the user | ||
1273 | * knows best: | ||
1274 | * | ||
1275 | * http://yarchive.net/comp/linux/partial_reads_writes.html | ||
1276 | * | ||
1277 | * Returns: The number of bytes copied or a negative error code on failure. | ||
1278 | */ | ||
1279 | static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, | ||
1280 | struct file *file, | ||
1281 | char __user *buf, | ||
1282 | size_t count, | ||
1283 | loff_t *ppos) | ||
1284 | { | ||
1285 | /* Note we keep the offset (aka bytes read) separate from any | ||
1286 | * error status so that the final check for whether we return | ||
1287 | * the bytes read with a higher precedence than any error (see | ||
1288 | * comment below) doesn't need to be handled/duplicated in | ||
1289 | * stream->ops->read() implementations. | ||
1290 | */ | ||
1291 | size_t offset = 0; | ||
1292 | int ret = stream->ops->read(stream, buf, count, &offset); | ||
1293 | |||
1294 | return offset ?: (ret ?: -EAGAIN); | ||
1295 | } | ||
1296 | |||
1297 | /** | ||
1298 | * i915_perf_read - handles read() FOP for i915 perf stream FDs | ||
1299 | * @file: An i915 perf stream file | ||
1300 | * @buf: destination buffer given by userspace | ||
1301 | * @count: the number of bytes userspace wants to read | ||
1302 | * @ppos: (inout) file seek position (unused) | ||
1303 | * | ||
1304 | * The entry point for handling a read() on a stream file descriptor from | ||
1305 | * userspace. Most of the work is left to the i915_perf_read_locked() and | ||
1306 | * &i915_perf_stream_ops->read but to save having stream implementations (of | ||
1307 | * which we might have multiple later) we handle blocking read here. | ||
1308 | * | ||
1309 | * We can also consistently treat trying to read from a disabled stream | ||
1310 | * as an IO error so implementations can assume the stream is enabled | ||
1311 | * while reading. | ||
1312 | * | ||
1313 | * Returns: The number of bytes copied or a negative error code on failure. | ||
1314 | */ | ||
1315 | static ssize_t i915_perf_read(struct file *file, | ||
1316 | char __user *buf, | ||
1317 | size_t count, | ||
1318 | loff_t *ppos) | ||
1319 | { | ||
1320 | struct i915_perf_stream *stream = file->private_data; | ||
1321 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1322 | ssize_t ret; | ||
1323 | |||
1324 | /* To ensure it's handled consistently we simply treat all reads of a | ||
1325 | * disabled stream as an error. In particular it might otherwise lead | ||
1326 | * to a deadlock for blocking file descriptors... | ||
1327 | */ | ||
1328 | if (!stream->enabled) | ||
1329 | return -EIO; | ||
1330 | |||
1331 | if (!(file->f_flags & O_NONBLOCK)) { | ||
1332 | /* There's the small chance of false positives from | ||
1333 | * stream->ops->wait_unlocked. | ||
1334 | * | ||
1335 | * E.g. with single context filtering since we only wait until | ||
1336 | * oabuffer has >= 1 report we don't immediately know whether | ||
1337 | * any reports really belong to the current context | ||
1338 | */ | ||
1339 | do { | ||
1340 | ret = stream->ops->wait_unlocked(stream); | ||
1341 | if (ret) | ||
1342 | return ret; | ||
1343 | |||
1344 | mutex_lock(&dev_priv->perf.lock); | ||
1345 | ret = i915_perf_read_locked(stream, file, | ||
1346 | buf, count, ppos); | ||
1347 | mutex_unlock(&dev_priv->perf.lock); | ||
1348 | } while (ret == -EAGAIN); | ||
1349 | } else { | ||
1350 | mutex_lock(&dev_priv->perf.lock); | ||
1351 | ret = i915_perf_read_locked(stream, file, buf, count, ppos); | ||
1352 | mutex_unlock(&dev_priv->perf.lock); | ||
1353 | } | ||
1354 | |||
1355 | if (ret >= 0) { | ||
1356 | /* Maybe make ->pollin per-stream state if we support multiple | ||
1357 | * concurrent streams in the future. | ||
1358 | */ | ||
1359 | dev_priv->perf.oa.pollin = false; | ||
1360 | } | ||
1361 | |||
1362 | return ret; | ||
1363 | } | ||
1364 | |||
1365 | static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) | ||
1366 | { | ||
1367 | struct drm_i915_private *dev_priv = | ||
1368 | container_of(hrtimer, typeof(*dev_priv), | ||
1369 | perf.oa.poll_check_timer); | ||
1370 | |||
1371 | if (!dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)) { | ||
1372 | dev_priv->perf.oa.pollin = true; | ||
1373 | wake_up(&dev_priv->perf.oa.poll_wq); | ||
1374 | } | ||
1375 | |||
1376 | hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); | ||
1377 | |||
1378 | return HRTIMER_RESTART; | ||
1379 | } | ||
1380 | |||
1381 | /** | ||
1382 | * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream | ||
1383 | * @dev_priv: i915 device instance | ||
1384 | * @stream: An i915 perf stream | ||
1385 | * @file: An i915 perf stream file | ||
1386 | * @wait: poll() state table | ||
1387 | * | ||
1388 | * For handling userspace polling on an i915 perf stream, this calls through to | ||
1389 | * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that | ||
1390 | * will be woken for new stream data. | ||
1391 | * | ||
1392 | * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize | ||
1393 | * with any non-file-operation driver hooks. | ||
1394 | * | ||
1395 | * Returns: any poll events that are ready without sleeping | ||
1396 | */ | ||
1397 | static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, | ||
1398 | struct i915_perf_stream *stream, | ||
1399 | struct file *file, | ||
1400 | poll_table *wait) | ||
1401 | { | ||
1402 | unsigned int events = 0; | ||
1403 | |||
1404 | stream->ops->poll_wait(stream, file, wait); | ||
1405 | |||
1406 | /* Note: we don't explicitly check whether there's something to read | ||
1407 | * here since this path may be very hot depending on what else | ||
1408 | * userspace is polling, or on the timeout in use. We rely solely on | ||
1409 | * the hrtimer/oa_poll_check_timer_cb to notify us when there are | ||
1410 | * samples to read. | ||
1411 | */ | ||
1412 | if (dev_priv->perf.oa.pollin) | ||
1413 | events |= POLLIN; | ||
1414 | |||
1415 | return events; | ||
1416 | } | ||
1417 | |||
1418 | /** | ||
1419 | * i915_perf_poll - call poll_wait() with a suitable wait queue for stream | ||
1420 | * @file: An i915 perf stream file | ||
1421 | * @wait: poll() state table | ||
1422 | * | ||
1423 | * For handling userspace polling on an i915 perf stream, this ensures | ||
1424 | * poll_wait() gets called with a wait queue that will be woken for new stream | ||
1425 | * data. | ||
1426 | * | ||
1427 | * Note: Implementation deferred to i915_perf_poll_locked() | ||
1428 | * | ||
1429 | * Returns: any poll events that are ready without sleeping | ||
1430 | */ | ||
1431 | static unsigned int i915_perf_poll(struct file *file, poll_table *wait) | ||
1432 | { | ||
1433 | struct i915_perf_stream *stream = file->private_data; | ||
1434 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1435 | int ret; | ||
1436 | |||
1437 | mutex_lock(&dev_priv->perf.lock); | ||
1438 | ret = i915_perf_poll_locked(dev_priv, stream, file, wait); | ||
1439 | mutex_unlock(&dev_priv->perf.lock); | ||
1440 | |||
1441 | return ret; | ||
1442 | } | ||
1443 | |||
1444 | /** | ||
1445 | * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl | ||
1446 | * @stream: A disabled i915 perf stream | ||
1447 | * | ||
1448 | * [Re]enables the associated capture of data for this stream. | ||
1449 | * | ||
1450 | * If a stream was previously enabled then there's currently no intention | ||
1451 | * to provide userspace any guarantee about the preservation of previously | ||
1452 | * buffered data. | ||
1453 | */ | ||
1454 | static void i915_perf_enable_locked(struct i915_perf_stream *stream) | ||
1455 | { | ||
1456 | if (stream->enabled) | ||
1457 | return; | ||
1458 | |||
1459 | /* Allow stream->ops->enable() to refer to this */ | ||
1460 | stream->enabled = true; | ||
1461 | |||
1462 | if (stream->ops->enable) | ||
1463 | stream->ops->enable(stream); | ||
1464 | } | ||
1465 | |||
1466 | /** | ||
1467 | * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl | ||
1468 | * @stream: An enabled i915 perf stream | ||
1469 | * | ||
1470 | * Disables the associated capture of data for this stream. | ||
1471 | * | ||
1472 | * The intention is that disabling an re-enabling a stream will ideally be | ||
1473 | * cheaper than destroying and re-opening a stream with the same configuration, | ||
1474 | * though there are no formal guarantees about what state or buffered data | ||
1475 | * must be retained between disabling and re-enabling a stream. | ||
1476 | * | ||
1477 | * Note: while a stream is disabled it's considered an error for userspace | ||
1478 | * to attempt to read from the stream (-EIO). | ||
1479 | */ | ||
1480 | static void i915_perf_disable_locked(struct i915_perf_stream *stream) | ||
1481 | { | ||
1482 | if (!stream->enabled) | ||
1483 | return; | ||
1484 | |||
1485 | /* Allow stream->ops->disable() to refer to this */ | ||
1486 | stream->enabled = false; | ||
1487 | |||
1488 | if (stream->ops->disable) | ||
1489 | stream->ops->disable(stream); | ||
1490 | } | ||
1491 | |||
1492 | /** | ||
1493 | * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs | ||
1494 | * @stream: An i915 perf stream | ||
1495 | * @cmd: the ioctl request | ||
1496 | * @arg: the ioctl data | ||
1497 | * | ||
1498 | * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize | ||
1499 | * with any non-file-operation driver hooks. | ||
1500 | * | ||
1501 | * Returns: zero on success or a negative error code. Returns -EINVAL for | ||
1502 | * an unknown ioctl request. | ||
1503 | */ | ||
1504 | static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, | ||
1505 | unsigned int cmd, | ||
1506 | unsigned long arg) | ||
1507 | { | ||
1508 | switch (cmd) { | ||
1509 | case I915_PERF_IOCTL_ENABLE: | ||
1510 | i915_perf_enable_locked(stream); | ||
1511 | return 0; | ||
1512 | case I915_PERF_IOCTL_DISABLE: | ||
1513 | i915_perf_disable_locked(stream); | ||
1514 | return 0; | ||
1515 | } | ||
1516 | |||
1517 | return -EINVAL; | ||
1518 | } | ||
1519 | |||
1520 | /** | ||
1521 | * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs | ||
1522 | * @file: An i915 perf stream file | ||
1523 | * @cmd: the ioctl request | ||
1524 | * @arg: the ioctl data | ||
1525 | * | ||
1526 | * Implementation deferred to i915_perf_ioctl_locked(). | ||
1527 | * | ||
1528 | * Returns: zero on success or a negative error code. Returns -EINVAL for | ||
1529 | * an unknown ioctl request. | ||
1530 | */ | ||
1531 | static long i915_perf_ioctl(struct file *file, | ||
1532 | unsigned int cmd, | ||
1533 | unsigned long arg) | ||
1534 | { | ||
1535 | struct i915_perf_stream *stream = file->private_data; | ||
1536 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1537 | long ret; | ||
1538 | |||
1539 | mutex_lock(&dev_priv->perf.lock); | ||
1540 | ret = i915_perf_ioctl_locked(stream, cmd, arg); | ||
1541 | mutex_unlock(&dev_priv->perf.lock); | ||
1542 | |||
1543 | return ret; | ||
1544 | } | ||
1545 | |||
1546 | /** | ||
1547 | * i915_perf_destroy_locked - destroy an i915 perf stream | ||
1548 | * @stream: An i915 perf stream | ||
1549 | * | ||
1550 | * Frees all resources associated with the given i915 perf @stream, disabling | ||
1551 | * any associated data capture in the process. | ||
1552 | * | ||
1553 | * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize | ||
1554 | * with any non-file-operation driver hooks. | ||
1555 | */ | ||
1556 | static void i915_perf_destroy_locked(struct i915_perf_stream *stream) | ||
1557 | { | ||
1558 | if (stream->enabled) | ||
1559 | i915_perf_disable_locked(stream); | ||
1560 | |||
1561 | if (stream->ops->destroy) | ||
1562 | stream->ops->destroy(stream); | ||
1563 | |||
1564 | list_del(&stream->link); | ||
1565 | |||
1566 | if (stream->ctx) | ||
1567 | i915_gem_context_put_unlocked(stream->ctx); | ||
1568 | |||
1569 | kfree(stream); | ||
1570 | } | ||
1571 | |||
1572 | /** | ||
1573 | * i915_perf_release - handles userspace close() of a stream file | ||
1574 | * @inode: anonymous inode associated with file | ||
1575 | * @file: An i915 perf stream file | ||
1576 | * | ||
1577 | * Cleans up any resources associated with an open i915 perf stream file. | ||
1578 | * | ||
1579 | * NB: close() can't really fail from the userspace point of view. | ||
1580 | * | ||
1581 | * Returns: zero on success or a negative error code. | ||
1582 | */ | ||
1583 | static int i915_perf_release(struct inode *inode, struct file *file) | ||
1584 | { | ||
1585 | struct i915_perf_stream *stream = file->private_data; | ||
1586 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1587 | |||
1588 | mutex_lock(&dev_priv->perf.lock); | ||
1589 | i915_perf_destroy_locked(stream); | ||
1590 | mutex_unlock(&dev_priv->perf.lock); | ||
1591 | |||
1592 | return 0; | ||
1593 | } | ||
1594 | |||
1595 | |||
1596 | static const struct file_operations fops = { | ||
1597 | .owner = THIS_MODULE, | ||
1598 | .llseek = no_llseek, | ||
1599 | .release = i915_perf_release, | ||
1600 | .poll = i915_perf_poll, | ||
1601 | .read = i915_perf_read, | ||
1602 | .unlocked_ioctl = i915_perf_ioctl, | ||
1603 | }; | ||
1604 | |||
1605 | |||
1606 | static struct i915_gem_context * | ||
1607 | lookup_context(struct drm_i915_private *dev_priv, | ||
1608 | struct drm_i915_file_private *file_priv, | ||
1609 | u32 ctx_user_handle) | ||
1610 | { | ||
1611 | struct i915_gem_context *ctx; | ||
1612 | int ret; | ||
1613 | |||
1614 | ret = i915_mutex_lock_interruptible(&dev_priv->drm); | ||
1615 | if (ret) | ||
1616 | return ERR_PTR(ret); | ||
1617 | |||
1618 | ctx = i915_gem_context_lookup(file_priv, ctx_user_handle); | ||
1619 | if (!IS_ERR(ctx)) | ||
1620 | i915_gem_context_get(ctx); | ||
1621 | |||
1622 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
1623 | |||
1624 | return ctx; | ||
1625 | } | ||
1626 | |||
1627 | /** | ||
1628 | * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD | ||
1629 | * @dev_priv: i915 device instance | ||
1630 | * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` | ||
1631 | * @props: individually validated u64 property value pairs | ||
1632 | * @file: drm file | ||
1633 | * | ||
1634 | * See i915_perf_ioctl_open() for interface details. | ||
1635 | * | ||
1636 | * Implements further stream config validation and stream initialization on | ||
1637 | * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex | ||
1638 | * taken to serialize with any non-file-operation driver hooks. | ||
1639 | * | ||
1640 | * Note: at this point the @props have only been validated in isolation and | ||
1641 | * it's still necessary to validate that the combination of properties makes | ||
1642 | * sense. | ||
1643 | * | ||
1644 | * In the case where userspace is interested in OA unit metrics then further | ||
1645 | * config validation and stream initialization details will be handled by | ||
1646 | * i915_oa_stream_init(). The code here should only validate config state that | ||
1647 | * will be relevant to all stream types / backends. | ||
1648 | * | ||
1649 | * Returns: zero on success or a negative error code. | ||
1650 | */ | ||
1651 | static int | ||
1652 | i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, | ||
1653 | struct drm_i915_perf_open_param *param, | ||
1654 | struct perf_open_properties *props, | ||
1655 | struct drm_file *file) | ||
1656 | { | ||
1657 | struct i915_gem_context *specific_ctx = NULL; | ||
1658 | struct i915_perf_stream *stream = NULL; | ||
1659 | unsigned long f_flags = 0; | ||
1660 | int stream_fd; | ||
1661 | int ret; | ||
1662 | |||
1663 | if (props->single_context) { | ||
1664 | u32 ctx_handle = props->ctx_handle; | ||
1665 | struct drm_i915_file_private *file_priv = file->driver_priv; | ||
1666 | |||
1667 | specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle); | ||
1668 | if (IS_ERR(specific_ctx)) { | ||
1669 | ret = PTR_ERR(specific_ctx); | ||
1670 | if (ret != -EINTR) | ||
1671 | DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", | ||
1672 | ctx_handle); | ||
1673 | goto err; | ||
1674 | } | ||
1675 | } | ||
1676 | |||
1677 | /* Similar to perf's kernel.perf_paranoid_cpu sysctl option | ||
1678 | * we check a dev.i915.perf_stream_paranoid sysctl option | ||
1679 | * to determine if it's ok to access system wide OA counters | ||
1680 | * without CAP_SYS_ADMIN privileges. | ||
1681 | */ | ||
1682 | if (!specific_ctx && | ||
1683 | i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { | ||
1684 | DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); | ||
1685 | ret = -EACCES; | ||
1686 | goto err_ctx; | ||
1687 | } | ||
1688 | |||
1689 | stream = kzalloc(sizeof(*stream), GFP_KERNEL); | ||
1690 | if (!stream) { | ||
1691 | ret = -ENOMEM; | ||
1692 | goto err_ctx; | ||
1693 | } | ||
1694 | |||
1695 | stream->dev_priv = dev_priv; | ||
1696 | stream->ctx = specific_ctx; | ||
1697 | |||
1698 | ret = i915_oa_stream_init(stream, param, props); | ||
1699 | if (ret) | ||
1700 | goto err_alloc; | ||
1701 | |||
1702 | /* we avoid simply assigning stream->sample_flags = props->sample_flags | ||
1703 | * to have _stream_init check the combination of sample flags more | ||
1704 | * thoroughly, but still this is the expected result at this point. | ||
1705 | */ | ||
1706 | if (WARN_ON(stream->sample_flags != props->sample_flags)) { | ||
1707 | ret = -ENODEV; | ||
1708 | goto err_alloc; | ||
1709 | } | ||
1710 | |||
1711 | list_add(&stream->link, &dev_priv->perf.streams); | ||
1712 | |||
1713 | if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) | ||
1714 | f_flags |= O_CLOEXEC; | ||
1715 | if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) | ||
1716 | f_flags |= O_NONBLOCK; | ||
1717 | |||
1718 | stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); | ||
1719 | if (stream_fd < 0) { | ||
1720 | ret = stream_fd; | ||
1721 | goto err_open; | ||
1722 | } | ||
1723 | |||
1724 | if (!(param->flags & I915_PERF_FLAG_DISABLED)) | ||
1725 | i915_perf_enable_locked(stream); | ||
1726 | |||
1727 | return stream_fd; | ||
1728 | |||
1729 | err_open: | ||
1730 | list_del(&stream->link); | ||
1731 | if (stream->ops->destroy) | ||
1732 | stream->ops->destroy(stream); | ||
1733 | err_alloc: | ||
1734 | kfree(stream); | ||
1735 | err_ctx: | ||
1736 | if (specific_ctx) | ||
1737 | i915_gem_context_put_unlocked(specific_ctx); | ||
1738 | err: | ||
1739 | return ret; | ||
1740 | } | ||
1741 | |||
1742 | /** | ||
1743 | * read_properties_unlocked - validate + copy userspace stream open properties | ||
1744 | * @dev_priv: i915 device instance | ||
1745 | * @uprops: The array of u64 key value pairs given by userspace | ||
1746 | * @n_props: The number of key value pairs expected in @uprops | ||
1747 | * @props: The stream configuration built up while validating properties | ||
1748 | * | ||
1749 | * Note this function only validates properties in isolation it doesn't | ||
1750 | * validate that the combination of properties makes sense or that all | ||
1751 | * properties necessary for a particular kind of stream have been set. | ||
1752 | * | ||
1753 | * Note that there currently aren't any ordering requirements for properties so | ||
1754 | * we shouldn't validate or assume anything about ordering here. This doesn't | ||
1755 | * rule out defining new properties with ordering requirements in the future. | ||
1756 | */ | ||
1757 | static int read_properties_unlocked(struct drm_i915_private *dev_priv, | ||
1758 | u64 __user *uprops, | ||
1759 | u32 n_props, | ||
1760 | struct perf_open_properties *props) | ||
1761 | { | ||
1762 | u64 __user *uprop = uprops; | ||
1763 | int i; | ||
1764 | |||
1765 | memset(props, 0, sizeof(struct perf_open_properties)); | ||
1766 | |||
1767 | if (!n_props) { | ||
1768 | DRM_DEBUG("No i915 perf properties given\n"); | ||
1769 | return -EINVAL; | ||
1770 | } | ||
1771 | |||
1772 | /* Considering that ID = 0 is reserved and assuming that we don't | ||
1773 | * (currently) expect any configurations to ever specify duplicate | ||
1774 | * values for a particular property ID then the last _PROP_MAX value is | ||
1775 | * one greater than the maximum number of properties we expect to get | ||
1776 | * from userspace. | ||
1777 | */ | ||
1778 | if (n_props >= DRM_I915_PERF_PROP_MAX) { | ||
1779 | DRM_DEBUG("More i915 perf properties specified than exist\n"); | ||
1780 | return -EINVAL; | ||
1781 | } | ||
1782 | |||
1783 | for (i = 0; i < n_props; i++) { | ||
1784 | u64 oa_period, oa_freq_hz; | ||
1785 | u64 id, value; | ||
1786 | int ret; | ||
1787 | |||
1788 | ret = get_user(id, uprop); | ||
1789 | if (ret) | ||
1790 | return ret; | ||
1791 | |||
1792 | ret = get_user(value, uprop + 1); | ||
1793 | if (ret) | ||
1794 | return ret; | ||
1795 | |||
1796 | switch ((enum drm_i915_perf_property_id)id) { | ||
1797 | case DRM_I915_PERF_PROP_CTX_HANDLE: | ||
1798 | props->single_context = 1; | ||
1799 | props->ctx_handle = value; | ||
1800 | break; | ||
1801 | case DRM_I915_PERF_PROP_SAMPLE_OA: | ||
1802 | props->sample_flags |= SAMPLE_OA_REPORT; | ||
1803 | break; | ||
1804 | case DRM_I915_PERF_PROP_OA_METRICS_SET: | ||
1805 | if (value == 0 || | ||
1806 | value > dev_priv->perf.oa.n_builtin_sets) { | ||
1807 | DRM_DEBUG("Unknown OA metric set ID\n"); | ||
1808 | return -EINVAL; | ||
1809 | } | ||
1810 | props->metrics_set = value; | ||
1811 | break; | ||
1812 | case DRM_I915_PERF_PROP_OA_FORMAT: | ||
1813 | if (value == 0 || value >= I915_OA_FORMAT_MAX) { | ||
1814 | DRM_DEBUG("Invalid OA report format\n"); | ||
1815 | return -EINVAL; | ||
1816 | } | ||
1817 | if (!dev_priv->perf.oa.oa_formats[value].size) { | ||
1818 | DRM_DEBUG("Invalid OA report format\n"); | ||
1819 | return -EINVAL; | ||
1820 | } | ||
1821 | props->oa_format = value; | ||
1822 | break; | ||
1823 | case DRM_I915_PERF_PROP_OA_EXPONENT: | ||
1824 | if (value > OA_EXPONENT_MAX) { | ||
1825 | DRM_DEBUG("OA timer exponent too high (> %u)\n", | ||
1826 | OA_EXPONENT_MAX); | ||
1827 | return -EINVAL; | ||
1828 | } | ||
1829 | |||
1830 | /* Theoretically we can program the OA unit to sample | ||
1831 | * every 160ns but don't allow that by default unless | ||
1832 | * root. | ||
1833 | * | ||
1834 | * On Haswell the period is derived from the exponent | ||
1835 | * as: | ||
1836 | * | ||
1837 | * period = 80ns * 2^(exponent + 1) | ||
1838 | */ | ||
1839 | BUILD_BUG_ON(sizeof(oa_period) != 8); | ||
1840 | oa_period = 80ull * (2ull << value); | ||
1841 | |||
1842 | /* This check is primarily to ensure that oa_period <= | ||
1843 | * UINT32_MAX (before passing to do_div which only | ||
1844 | * accepts a u32 denominator), but we can also skip | ||
1845 | * checking anything < 1Hz which implicitly can't be | ||
1846 | * limited via an integer oa_max_sample_rate. | ||
1847 | */ | ||
1848 | if (oa_period <= NSEC_PER_SEC) { | ||
1849 | u64 tmp = NSEC_PER_SEC; | ||
1850 | do_div(tmp, oa_period); | ||
1851 | oa_freq_hz = tmp; | ||
1852 | } else | ||
1853 | oa_freq_hz = 0; | ||
1854 | |||
1855 | if (oa_freq_hz > i915_oa_max_sample_rate && | ||
1856 | !capable(CAP_SYS_ADMIN)) { | ||
1857 | DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", | ||
1858 | i915_oa_max_sample_rate); | ||
1859 | return -EACCES; | ||
1860 | } | ||
1861 | |||
1862 | props->oa_periodic = true; | ||
1863 | props->oa_period_exponent = value; | ||
1864 | break; | ||
1865 | default: | ||
1866 | MISSING_CASE(id); | ||
1867 | DRM_DEBUG("Unknown i915 perf property ID\n"); | ||
1868 | return -EINVAL; | ||
1869 | } | ||
1870 | |||
1871 | uprop += 2; | ||
1872 | } | ||
1873 | |||
1874 | return 0; | ||
1875 | } | ||
1876 | |||
1877 | /** | ||
1878 | * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD | ||
1879 | * @dev: drm device | ||
1880 | * @data: ioctl data copied from userspace (unvalidated) | ||
1881 | * @file: drm file | ||
1882 | * | ||
1883 | * Validates the stream open parameters given by userspace including flags | ||
1884 | * and an array of u64 key, value pair properties. | ||
1885 | * | ||
1886 | * Very little is assumed up front about the nature of the stream being | ||
1887 | * opened (for instance we don't assume it's for periodic OA unit metrics). An | ||
1888 | * i915-perf stream is expected to be a suitable interface for other forms of | ||
1889 | * buffered data written by the GPU besides periodic OA metrics. | ||
1890 | * | ||
1891 | * Note we copy the properties from userspace outside of the i915 perf | ||
1892 | * mutex to avoid an awkward lockdep with mmap_sem. | ||
1893 | * | ||
1894 | * Most of the implementation details are handled by | ||
1895 | * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock | ||
1896 | * mutex for serializing with any non-file-operation driver hooks. | ||
1897 | * | ||
1898 | * Return: A newly opened i915 Perf stream file descriptor or negative | ||
1899 | * error code on failure. | ||
1900 | */ | ||
1901 | int i915_perf_open_ioctl(struct drm_device *dev, void *data, | ||
1902 | struct drm_file *file) | ||
1903 | { | ||
1904 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1905 | struct drm_i915_perf_open_param *param = data; | ||
1906 | struct perf_open_properties props; | ||
1907 | u32 known_open_flags; | ||
1908 | int ret; | ||
1909 | |||
1910 | if (!dev_priv->perf.initialized) { | ||
1911 | DRM_DEBUG("i915 perf interface not available for this system\n"); | ||
1912 | return -ENOTSUPP; | ||
1913 | } | ||
1914 | |||
1915 | known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | | ||
1916 | I915_PERF_FLAG_FD_NONBLOCK | | ||
1917 | I915_PERF_FLAG_DISABLED; | ||
1918 | if (param->flags & ~known_open_flags) { | ||
1919 | DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); | ||
1920 | return -EINVAL; | ||
1921 | } | ||
1922 | |||
1923 | ret = read_properties_unlocked(dev_priv, | ||
1924 | u64_to_user_ptr(param->properties_ptr), | ||
1925 | param->num_properties, | ||
1926 | &props); | ||
1927 | if (ret) | ||
1928 | return ret; | ||
1929 | |||
1930 | mutex_lock(&dev_priv->perf.lock); | ||
1931 | ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); | ||
1932 | mutex_unlock(&dev_priv->perf.lock); | ||
1933 | |||
1934 | return ret; | ||
1935 | } | ||
1936 | |||
1937 | /** | ||
1938 | * i915_perf_register - exposes i915-perf to userspace | ||
1939 | * @dev_priv: i915 device instance | ||
1940 | * | ||
1941 | * In particular OA metric sets are advertised under a sysfs metrics/ | ||
1942 | * directory allowing userspace to enumerate valid IDs that can be | ||
1943 | * used to open an i915-perf stream. | ||
1944 | */ | ||
1945 | void i915_perf_register(struct drm_i915_private *dev_priv) | ||
1946 | { | ||
1947 | if (!IS_HASWELL(dev_priv)) | ||
1948 | return; | ||
1949 | |||
1950 | if (!dev_priv->perf.initialized) | ||
1951 | return; | ||
1952 | |||
1953 | /* To be sure we're synchronized with an attempted | ||
1954 | * i915_perf_open_ioctl(); considering that we register after | ||
1955 | * being exposed to userspace. | ||
1956 | */ | ||
1957 | mutex_lock(&dev_priv->perf.lock); | ||
1958 | |||
1959 | dev_priv->perf.metrics_kobj = | ||
1960 | kobject_create_and_add("metrics", | ||
1961 | &dev_priv->drm.primary->kdev->kobj); | ||
1962 | if (!dev_priv->perf.metrics_kobj) | ||
1963 | goto exit; | ||
1964 | |||
1965 | if (i915_perf_register_sysfs_hsw(dev_priv)) { | ||
1966 | kobject_put(dev_priv->perf.metrics_kobj); | ||
1967 | dev_priv->perf.metrics_kobj = NULL; | ||
1968 | } | ||
1969 | |||
1970 | exit: | ||
1971 | mutex_unlock(&dev_priv->perf.lock); | ||
1972 | } | ||
1973 | |||
1974 | /** | ||
1975 | * i915_perf_unregister - hide i915-perf from userspace | ||
1976 | * @dev_priv: i915 device instance | ||
1977 | * | ||
1978 | * i915-perf state cleanup is split up into an 'unregister' and | ||
1979 | * 'deinit' phase where the interface is first hidden from | ||
1980 | * userspace by i915_perf_unregister() before cleaning up | ||
1981 | * remaining state in i915_perf_fini(). | ||
1982 | */ | ||
1983 | void i915_perf_unregister(struct drm_i915_private *dev_priv) | ||
1984 | { | ||
1985 | if (!IS_HASWELL(dev_priv)) | ||
1986 | return; | ||
1987 | |||
1988 | if (!dev_priv->perf.metrics_kobj) | ||
1989 | return; | ||
1990 | |||
1991 | i915_perf_unregister_sysfs_hsw(dev_priv); | ||
1992 | |||
1993 | kobject_put(dev_priv->perf.metrics_kobj); | ||
1994 | dev_priv->perf.metrics_kobj = NULL; | ||
1995 | } | ||
1996 | |||
1997 | static struct ctl_table oa_table[] = { | ||
1998 | { | ||
1999 | .procname = "perf_stream_paranoid", | ||
2000 | .data = &i915_perf_stream_paranoid, | ||
2001 | .maxlen = sizeof(i915_perf_stream_paranoid), | ||
2002 | .mode = 0644, | ||
2003 | .proc_handler = proc_dointvec_minmax, | ||
2004 | .extra1 = &zero, | ||
2005 | .extra2 = &one, | ||
2006 | }, | ||
2007 | { | ||
2008 | .procname = "oa_max_sample_rate", | ||
2009 | .data = &i915_oa_max_sample_rate, | ||
2010 | .maxlen = sizeof(i915_oa_max_sample_rate), | ||
2011 | .mode = 0644, | ||
2012 | .proc_handler = proc_dointvec_minmax, | ||
2013 | .extra1 = &zero, | ||
2014 | .extra2 = &oa_sample_rate_hard_limit, | ||
2015 | }, | ||
2016 | {} | ||
2017 | }; | ||
2018 | |||
2019 | static struct ctl_table i915_root[] = { | ||
2020 | { | ||
2021 | .procname = "i915", | ||
2022 | .maxlen = 0, | ||
2023 | .mode = 0555, | ||
2024 | .child = oa_table, | ||
2025 | }, | ||
2026 | {} | ||
2027 | }; | ||
2028 | |||
2029 | static struct ctl_table dev_root[] = { | ||
2030 | { | ||
2031 | .procname = "dev", | ||
2032 | .maxlen = 0, | ||
2033 | .mode = 0555, | ||
2034 | .child = i915_root, | ||
2035 | }, | ||
2036 | {} | ||
2037 | }; | ||
2038 | |||
2039 | /** | ||
2040 | * i915_perf_init - initialize i915-perf state on module load | ||
2041 | * @dev_priv: i915 device instance | ||
2042 | * | ||
2043 | * Initializes i915-perf state without exposing anything to userspace. | ||
2044 | * | ||
2045 | * Note: i915-perf initialization is split into an 'init' and 'register' | ||
2046 | * phase with the i915_perf_register() exposing state to userspace. | ||
2047 | */ | ||
2048 | void i915_perf_init(struct drm_i915_private *dev_priv) | ||
2049 | { | ||
2050 | if (!IS_HASWELL(dev_priv)) | ||
2051 | return; | ||
2052 | |||
2053 | hrtimer_init(&dev_priv->perf.oa.poll_check_timer, | ||
2054 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
2055 | dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; | ||
2056 | init_waitqueue_head(&dev_priv->perf.oa.poll_wq); | ||
2057 | |||
2058 | INIT_LIST_HEAD(&dev_priv->perf.streams); | ||
2059 | mutex_init(&dev_priv->perf.lock); | ||
2060 | spin_lock_init(&dev_priv->perf.hook_lock); | ||
2061 | |||
2062 | dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; | ||
2063 | dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; | ||
2064 | dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; | ||
2065 | dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; | ||
2066 | dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable; | ||
2067 | dev_priv->perf.oa.ops.read = gen7_oa_read; | ||
2068 | dev_priv->perf.oa.ops.oa_buffer_is_empty = | ||
2069 | gen7_oa_buffer_is_empty_fop_unlocked; | ||
2070 | |||
2071 | dev_priv->perf.oa.timestamp_frequency = 12500000; | ||
2072 | |||
2073 | dev_priv->perf.oa.oa_formats = hsw_oa_formats; | ||
2074 | |||
2075 | dev_priv->perf.oa.n_builtin_sets = | ||
2076 | i915_oa_n_builtin_metric_sets_hsw; | ||
2077 | |||
2078 | dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); | ||
2079 | |||
2080 | dev_priv->perf.initialized = true; | ||
2081 | } | ||
2082 | |||
2083 | /** | ||
2084 | * i915_perf_fini - Counter part to i915_perf_init() | ||
2085 | * @dev_priv: i915 device instance | ||
2086 | */ | ||
2087 | void i915_perf_fini(struct drm_i915_private *dev_priv) | ||
2088 | { | ||
2089 | if (!dev_priv->perf.initialized) | ||
2090 | return; | ||
2091 | |||
2092 | unregister_sysctl_table(dev_priv->perf.sysctl_header); | ||
2093 | |||
2094 | memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); | ||
2095 | dev_priv->perf.initialized = false; | ||
2096 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c70c07a7b586..00970aa77afa 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -62,6 +62,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
62 | #define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ | 62 | #define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ |
63 | (port) == PORT_B ? (b) : (c)) | 63 | (port) == PORT_B ? (b) : (c)) |
64 | #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c)) | 64 | #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c)) |
65 | #define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \ | ||
66 | (phy) == DPIO_PHY1 ? (b) : (c)) | ||
67 | #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) | ||
65 | 68 | ||
66 | #define _MASKED_FIELD(mask, value) ({ \ | 69 | #define _MASKED_FIELD(mask, value) ({ \ |
67 | if (__builtin_constant_p(mask)) \ | 70 | if (__builtin_constant_p(mask)) \ |
@@ -107,6 +110,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
107 | #define GRDOM_RESET_STATUS (1 << 1) | 110 | #define GRDOM_RESET_STATUS (1 << 1) |
108 | #define GRDOM_RESET_ENABLE (1 << 0) | 111 | #define GRDOM_RESET_ENABLE (1 << 0) |
109 | 112 | ||
113 | /* BSpec only has register offset, PCI device and bit found empirically */ | ||
114 | #define I830_CLOCK_GATE 0xc8 /* device 0 */ | ||
115 | #define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2) | ||
116 | |||
110 | #define GCDGMBUS 0xcc | 117 | #define GCDGMBUS 0xcc |
111 | 118 | ||
112 | #define GCFGC2 0xda | 119 | #define GCFGC2 0xda |
@@ -294,7 +301,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
294 | * Instruction field definitions used by the command parser | 301 | * Instruction field definitions used by the command parser |
295 | */ | 302 | */ |
296 | #define INSTR_CLIENT_SHIFT 29 | 303 | #define INSTR_CLIENT_SHIFT 29 |
297 | #define INSTR_CLIENT_MASK 0xE0000000 | ||
298 | #define INSTR_MI_CLIENT 0x0 | 304 | #define INSTR_MI_CLIENT 0x0 |
299 | #define INSTR_BC_CLIENT 0x2 | 305 | #define INSTR_BC_CLIENT 0x2 |
300 | #define INSTR_RC_CLIENT 0x3 | 306 | #define INSTR_RC_CLIENT 0x3 |
@@ -615,7 +621,344 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
615 | #define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8) | 621 | #define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8) |
616 | #define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) | 622 | #define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) |
617 | 623 | ||
618 | #define OACONTROL _MMIO(0x2360) | 624 | #define GEN7_OACONTROL _MMIO(0x2360) |
625 | #define GEN7_OACONTROL_CTX_MASK 0xFFFFF000 | ||
626 | #define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F | ||
627 | #define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6 | ||
628 | #define GEN7_OACONTROL_TIMER_ENABLE (1<<5) | ||
629 | #define GEN7_OACONTROL_FORMAT_A13 (0<<2) | ||
630 | #define GEN7_OACONTROL_FORMAT_A29 (1<<2) | ||
631 | #define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2) | ||
632 | #define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2) | ||
633 | #define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2) | ||
634 | #define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2) | ||
635 | #define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2) | ||
636 | #define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2) | ||
637 | #define GEN7_OACONTROL_FORMAT_SHIFT 2 | ||
638 | #define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1) | ||
639 | #define GEN7_OACONTROL_ENABLE (1<<0) | ||
640 | |||
641 | #define GEN8_OACTXID _MMIO(0x2364) | ||
642 | |||
643 | #define GEN8_OACONTROL _MMIO(0x2B00) | ||
644 | #define GEN8_OA_REPORT_FORMAT_A12 (0<<2) | ||
645 | #define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2) | ||
646 | #define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2) | ||
647 | #define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2) | ||
648 | #define GEN8_OA_REPORT_FORMAT_SHIFT 2 | ||
649 | #define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1) | ||
650 | #define GEN8_OA_COUNTER_ENABLE (1<<0) | ||
651 | |||
652 | #define GEN8_OACTXCONTROL _MMIO(0x2360) | ||
653 | #define GEN8_OA_TIMER_PERIOD_MASK 0x3F | ||
654 | #define GEN8_OA_TIMER_PERIOD_SHIFT 2 | ||
655 | #define GEN8_OA_TIMER_ENABLE (1<<1) | ||
656 | #define GEN8_OA_COUNTER_RESUME (1<<0) | ||
657 | |||
658 | #define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */ | ||
659 | #define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3) | ||
660 | #define GEN7_OABUFFER_EDGE_TRIGGER (1<<2) | ||
661 | #define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1) | ||
662 | #define GEN7_OABUFFER_RESUME (1<<0) | ||
663 | |||
664 | #define GEN8_OABUFFER _MMIO(0x2b14) | ||
665 | |||
666 | #define GEN7_OASTATUS1 _MMIO(0x2364) | ||
667 | #define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0 | ||
668 | #define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2) | ||
669 | #define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1) | ||
670 | #define GEN7_OASTATUS1_REPORT_LOST (1<<0) | ||
671 | |||
672 | #define GEN7_OASTATUS2 _MMIO(0x2368) | ||
673 | #define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0 | ||
674 | |||
675 | #define GEN8_OASTATUS _MMIO(0x2b08) | ||
676 | #define GEN8_OASTATUS_OVERRUN_STATUS (1<<3) | ||
677 | #define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2) | ||
678 | #define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1) | ||
679 | #define GEN8_OASTATUS_REPORT_LOST (1<<0) | ||
680 | |||
681 | #define GEN8_OAHEADPTR _MMIO(0x2B0C) | ||
682 | #define GEN8_OATAILPTR _MMIO(0x2B10) | ||
683 | |||
684 | #define OABUFFER_SIZE_128K (0<<3) | ||
685 | #define OABUFFER_SIZE_256K (1<<3) | ||
686 | #define OABUFFER_SIZE_512K (2<<3) | ||
687 | #define OABUFFER_SIZE_1M (3<<3) | ||
688 | #define OABUFFER_SIZE_2M (4<<3) | ||
689 | #define OABUFFER_SIZE_4M (5<<3) | ||
690 | #define OABUFFER_SIZE_8M (6<<3) | ||
691 | #define OABUFFER_SIZE_16M (7<<3) | ||
692 | |||
693 | #define OA_MEM_SELECT_GGTT (1<<0) | ||
694 | |||
695 | #define EU_PERF_CNTL0 _MMIO(0xe458) | ||
696 | |||
697 | #define GDT_CHICKEN_BITS _MMIO(0x9840) | ||
698 | #define GT_NOA_ENABLE 0x00000080 | ||
699 | |||
700 | /* | ||
701 | * OA Boolean state | ||
702 | */ | ||
703 | |||
704 | #define OAREPORTTRIG1 _MMIO(0x2740) | ||
705 | #define OAREPORTTRIG1_THRESHOLD_MASK 0xffff | ||
706 | #define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ | ||
707 | |||
708 | #define OAREPORTTRIG2 _MMIO(0x2744) | ||
709 | #define OAREPORTTRIG2_INVERT_A_0 (1<<0) | ||
710 | #define OAREPORTTRIG2_INVERT_A_1 (1<<1) | ||
711 | #define OAREPORTTRIG2_INVERT_A_2 (1<<2) | ||
712 | #define OAREPORTTRIG2_INVERT_A_3 (1<<3) | ||
713 | #define OAREPORTTRIG2_INVERT_A_4 (1<<4) | ||
714 | #define OAREPORTTRIG2_INVERT_A_5 (1<<5) | ||
715 | #define OAREPORTTRIG2_INVERT_A_6 (1<<6) | ||
716 | #define OAREPORTTRIG2_INVERT_A_7 (1<<7) | ||
717 | #define OAREPORTTRIG2_INVERT_A_8 (1<<8) | ||
718 | #define OAREPORTTRIG2_INVERT_A_9 (1<<9) | ||
719 | #define OAREPORTTRIG2_INVERT_A_10 (1<<10) | ||
720 | #define OAREPORTTRIG2_INVERT_A_11 (1<<11) | ||
721 | #define OAREPORTTRIG2_INVERT_A_12 (1<<12) | ||
722 | #define OAREPORTTRIG2_INVERT_A_13 (1<<13) | ||
723 | #define OAREPORTTRIG2_INVERT_A_14 (1<<14) | ||
724 | #define OAREPORTTRIG2_INVERT_A_15 (1<<15) | ||
725 | #define OAREPORTTRIG2_INVERT_B_0 (1<<16) | ||
726 | #define OAREPORTTRIG2_INVERT_B_1 (1<<17) | ||
727 | #define OAREPORTTRIG2_INVERT_B_2 (1<<18) | ||
728 | #define OAREPORTTRIG2_INVERT_B_3 (1<<19) | ||
729 | #define OAREPORTTRIG2_INVERT_C_0 (1<<20) | ||
730 | #define OAREPORTTRIG2_INVERT_C_1 (1<<21) | ||
731 | #define OAREPORTTRIG2_INVERT_D_0 (1<<22) | ||
732 | #define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23) | ||
733 | #define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31) | ||
734 | |||
735 | #define OAREPORTTRIG3 _MMIO(0x2748) | ||
736 | #define OAREPORTTRIG3_NOA_SELECT_MASK 0xf | ||
737 | #define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0 | ||
738 | #define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4 | ||
739 | #define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8 | ||
740 | #define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12 | ||
741 | #define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16 | ||
742 | #define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20 | ||
743 | #define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24 | ||
744 | #define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28 | ||
745 | |||
746 | #define OAREPORTTRIG4 _MMIO(0x274c) | ||
747 | #define OAREPORTTRIG4_NOA_SELECT_MASK 0xf | ||
748 | #define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0 | ||
749 | #define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4 | ||
750 | #define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8 | ||
751 | #define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12 | ||
752 | #define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16 | ||
753 | #define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20 | ||
754 | #define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24 | ||
755 | #define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28 | ||
756 | |||
757 | #define OAREPORTTRIG5 _MMIO(0x2750) | ||
758 | #define OAREPORTTRIG5_THRESHOLD_MASK 0xffff | ||
759 | #define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ | ||
760 | |||
761 | #define OAREPORTTRIG6 _MMIO(0x2754) | ||
762 | #define OAREPORTTRIG6_INVERT_A_0 (1<<0) | ||
763 | #define OAREPORTTRIG6_INVERT_A_1 (1<<1) | ||
764 | #define OAREPORTTRIG6_INVERT_A_2 (1<<2) | ||
765 | #define OAREPORTTRIG6_INVERT_A_3 (1<<3) | ||
766 | #define OAREPORTTRIG6_INVERT_A_4 (1<<4) | ||
767 | #define OAREPORTTRIG6_INVERT_A_5 (1<<5) | ||
768 | #define OAREPORTTRIG6_INVERT_A_6 (1<<6) | ||
769 | #define OAREPORTTRIG6_INVERT_A_7 (1<<7) | ||
770 | #define OAREPORTTRIG6_INVERT_A_8 (1<<8) | ||
771 | #define OAREPORTTRIG6_INVERT_A_9 (1<<9) | ||
772 | #define OAREPORTTRIG6_INVERT_A_10 (1<<10) | ||
773 | #define OAREPORTTRIG6_INVERT_A_11 (1<<11) | ||
774 | #define OAREPORTTRIG6_INVERT_A_12 (1<<12) | ||
775 | #define OAREPORTTRIG6_INVERT_A_13 (1<<13) | ||
776 | #define OAREPORTTRIG6_INVERT_A_14 (1<<14) | ||
777 | #define OAREPORTTRIG6_INVERT_A_15 (1<<15) | ||
778 | #define OAREPORTTRIG6_INVERT_B_0 (1<<16) | ||
779 | #define OAREPORTTRIG6_INVERT_B_1 (1<<17) | ||
780 | #define OAREPORTTRIG6_INVERT_B_2 (1<<18) | ||
781 | #define OAREPORTTRIG6_INVERT_B_3 (1<<19) | ||
782 | #define OAREPORTTRIG6_INVERT_C_0 (1<<20) | ||
783 | #define OAREPORTTRIG6_INVERT_C_1 (1<<21) | ||
784 | #define OAREPORTTRIG6_INVERT_D_0 (1<<22) | ||
785 | #define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23) | ||
786 | #define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31) | ||
787 | |||
788 | #define OAREPORTTRIG7 _MMIO(0x2758) | ||
789 | #define OAREPORTTRIG7_NOA_SELECT_MASK 0xf | ||
790 | #define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0 | ||
791 | #define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4 | ||
792 | #define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8 | ||
793 | #define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12 | ||
794 | #define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16 | ||
795 | #define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20 | ||
796 | #define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24 | ||
797 | #define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28 | ||
798 | |||
799 | #define OAREPORTTRIG8 _MMIO(0x275c) | ||
800 | #define OAREPORTTRIG8_NOA_SELECT_MASK 0xf | ||
801 | #define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0 | ||
802 | #define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4 | ||
803 | #define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8 | ||
804 | #define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12 | ||
805 | #define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16 | ||
806 | #define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20 | ||
807 | #define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24 | ||
808 | #define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28 | ||
809 | |||
810 | #define OASTARTTRIG1 _MMIO(0x2710) | ||
811 | #define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000 | ||
812 | #define OASTARTTRIG1_THRESHOLD_MASK 0xffff | ||
813 | |||
814 | #define OASTARTTRIG2 _MMIO(0x2714) | ||
815 | #define OASTARTTRIG2_INVERT_A_0 (1<<0) | ||
816 | #define OASTARTTRIG2_INVERT_A_1 (1<<1) | ||
817 | #define OASTARTTRIG2_INVERT_A_2 (1<<2) | ||
818 | #define OASTARTTRIG2_INVERT_A_3 (1<<3) | ||
819 | #define OASTARTTRIG2_INVERT_A_4 (1<<4) | ||
820 | #define OASTARTTRIG2_INVERT_A_5 (1<<5) | ||
821 | #define OASTARTTRIG2_INVERT_A_6 (1<<6) | ||
822 | #define OASTARTTRIG2_INVERT_A_7 (1<<7) | ||
823 | #define OASTARTTRIG2_INVERT_A_8 (1<<8) | ||
824 | #define OASTARTTRIG2_INVERT_A_9 (1<<9) | ||
825 | #define OASTARTTRIG2_INVERT_A_10 (1<<10) | ||
826 | #define OASTARTTRIG2_INVERT_A_11 (1<<11) | ||
827 | #define OASTARTTRIG2_INVERT_A_12 (1<<12) | ||
828 | #define OASTARTTRIG2_INVERT_A_13 (1<<13) | ||
829 | #define OASTARTTRIG2_INVERT_A_14 (1<<14) | ||
830 | #define OASTARTTRIG2_INVERT_A_15 (1<<15) | ||
831 | #define OASTARTTRIG2_INVERT_B_0 (1<<16) | ||
832 | #define OASTARTTRIG2_INVERT_B_1 (1<<17) | ||
833 | #define OASTARTTRIG2_INVERT_B_2 (1<<18) | ||
834 | #define OASTARTTRIG2_INVERT_B_3 (1<<19) | ||
835 | #define OASTARTTRIG2_INVERT_C_0 (1<<20) | ||
836 | #define OASTARTTRIG2_INVERT_C_1 (1<<21) | ||
837 | #define OASTARTTRIG2_INVERT_D_0 (1<<22) | ||
838 | #define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23) | ||
839 | #define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24) | ||
840 | #define OASTARTTRIG2_EVENT_SELECT_0 (1<<28) | ||
841 | #define OASTARTTRIG2_EVENT_SELECT_1 (1<<29) | ||
842 | #define OASTARTTRIG2_EVENT_SELECT_2 (1<<30) | ||
843 | #define OASTARTTRIG2_EVENT_SELECT_3 (1<<31) | ||
844 | |||
845 | #define OASTARTTRIG3 _MMIO(0x2718) | ||
846 | #define OASTARTTRIG3_NOA_SELECT_MASK 0xf | ||
847 | #define OASTARTTRIG3_NOA_SELECT_8_SHIFT 0 | ||
848 | #define OASTARTTRIG3_NOA_SELECT_9_SHIFT 4 | ||
849 | #define OASTARTTRIG3_NOA_SELECT_10_SHIFT 8 | ||
850 | #define OASTARTTRIG3_NOA_SELECT_11_SHIFT 12 | ||
851 | #define OASTARTTRIG3_NOA_SELECT_12_SHIFT 16 | ||
852 | #define OASTARTTRIG3_NOA_SELECT_13_SHIFT 20 | ||
853 | #define OASTARTTRIG3_NOA_SELECT_14_SHIFT 24 | ||
854 | #define OASTARTTRIG3_NOA_SELECT_15_SHIFT 28 | ||
855 | |||
856 | #define OASTARTTRIG4 _MMIO(0x271c) | ||
857 | #define OASTARTTRIG4_NOA_SELECT_MASK 0xf | ||
858 | #define OASTARTTRIG4_NOA_SELECT_0_SHIFT 0 | ||
859 | #define OASTARTTRIG4_NOA_SELECT_1_SHIFT 4 | ||
860 | #define OASTARTTRIG4_NOA_SELECT_2_SHIFT 8 | ||
861 | #define OASTARTTRIG4_NOA_SELECT_3_SHIFT 12 | ||
862 | #define OASTARTTRIG4_NOA_SELECT_4_SHIFT 16 | ||
863 | #define OASTARTTRIG4_NOA_SELECT_5_SHIFT 20 | ||
864 | #define OASTARTTRIG4_NOA_SELECT_6_SHIFT 24 | ||
865 | #define OASTARTTRIG4_NOA_SELECT_7_SHIFT 28 | ||
866 | |||
867 | #define OASTARTTRIG5 _MMIO(0x2720) | ||
868 | #define OASTARTTRIG5_THRESHOLD_COUNT_MASK_MBZ 0xffff0000 | ||
869 | #define OASTARTTRIG5_THRESHOLD_MASK 0xffff | ||
870 | |||
871 | #define OASTARTTRIG6 _MMIO(0x2724) | ||
872 | #define OASTARTTRIG6_INVERT_A_0 (1<<0) | ||
873 | #define OASTARTTRIG6_INVERT_A_1 (1<<1) | ||
874 | #define OASTARTTRIG6_INVERT_A_2 (1<<2) | ||
875 | #define OASTARTTRIG6_INVERT_A_3 (1<<3) | ||
876 | #define OASTARTTRIG6_INVERT_A_4 (1<<4) | ||
877 | #define OASTARTTRIG6_INVERT_A_5 (1<<5) | ||
878 | #define OASTARTTRIG6_INVERT_A_6 (1<<6) | ||
879 | #define OASTARTTRIG6_INVERT_A_7 (1<<7) | ||
880 | #define OASTARTTRIG6_INVERT_A_8 (1<<8) | ||
881 | #define OASTARTTRIG6_INVERT_A_9 (1<<9) | ||
882 | #define OASTARTTRIG6_INVERT_A_10 (1<<10) | ||
883 | #define OASTARTTRIG6_INVERT_A_11 (1<<11) | ||
884 | #define OASTARTTRIG6_INVERT_A_12 (1<<12) | ||
885 | #define OASTARTTRIG6_INVERT_A_13 (1<<13) | ||
886 | #define OASTARTTRIG6_INVERT_A_14 (1<<14) | ||
887 | #define OASTARTTRIG6_INVERT_A_15 (1<<15) | ||
888 | #define OASTARTTRIG6_INVERT_B_0 (1<<16) | ||
889 | #define OASTARTTRIG6_INVERT_B_1 (1<<17) | ||
890 | #define OASTARTTRIG6_INVERT_B_2 (1<<18) | ||
891 | #define OASTARTTRIG6_INVERT_B_3 (1<<19) | ||
892 | #define OASTARTTRIG6_INVERT_C_0 (1<<20) | ||
893 | #define OASTARTTRIG6_INVERT_C_1 (1<<21) | ||
894 | #define OASTARTTRIG6_INVERT_D_0 (1<<22) | ||
895 | #define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23) | ||
896 | #define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24) | ||
897 | #define OASTARTTRIG6_EVENT_SELECT_4 (1<<28) | ||
898 | #define OASTARTTRIG6_EVENT_SELECT_5 (1<<29) | ||
899 | #define OASTARTTRIG6_EVENT_SELECT_6 (1<<30) | ||
900 | #define OASTARTTRIG6_EVENT_SELECT_7 (1<<31) | ||
901 | |||
902 | #define OASTARTTRIG7 _MMIO(0x2728) | ||
903 | #define OASTARTTRIG7_NOA_SELECT_MASK 0xf | ||
904 | #define OASTARTTRIG7_NOA_SELECT_8_SHIFT 0 | ||
905 | #define OASTARTTRIG7_NOA_SELECT_9_SHIFT 4 | ||
906 | #define OASTARTTRIG7_NOA_SELECT_10_SHIFT 8 | ||
907 | #define OASTARTTRIG7_NOA_SELECT_11_SHIFT 12 | ||
908 | #define OASTARTTRIG7_NOA_SELECT_12_SHIFT 16 | ||
909 | #define OASTARTTRIG7_NOA_SELECT_13_SHIFT 20 | ||
910 | #define OASTARTTRIG7_NOA_SELECT_14_SHIFT 24 | ||
911 | #define OASTARTTRIG7_NOA_SELECT_15_SHIFT 28 | ||
912 | |||
913 | #define OASTARTTRIG8 _MMIO(0x272c) | ||
914 | #define OASTARTTRIG8_NOA_SELECT_MASK 0xf | ||
915 | #define OASTARTTRIG8_NOA_SELECT_0_SHIFT 0 | ||
916 | #define OASTARTTRIG8_NOA_SELECT_1_SHIFT 4 | ||
917 | #define OASTARTTRIG8_NOA_SELECT_2_SHIFT 8 | ||
918 | #define OASTARTTRIG8_NOA_SELECT_3_SHIFT 12 | ||
919 | #define OASTARTTRIG8_NOA_SELECT_4_SHIFT 16 | ||
920 | #define OASTARTTRIG8_NOA_SELECT_5_SHIFT 20 | ||
921 | #define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24 | ||
922 | #define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28 | ||
923 | |||
924 | /* CECX_0 */ | ||
925 | #define OACEC_COMPARE_LESS_OR_EQUAL 6 | ||
926 | #define OACEC_COMPARE_NOT_EQUAL 5 | ||
927 | #define OACEC_COMPARE_LESS_THAN 4 | ||
928 | #define OACEC_COMPARE_GREATER_OR_EQUAL 3 | ||
929 | #define OACEC_COMPARE_EQUAL 2 | ||
930 | #define OACEC_COMPARE_GREATER_THAN 1 | ||
931 | #define OACEC_COMPARE_ANY_EQUAL 0 | ||
932 | |||
933 | #define OACEC_COMPARE_VALUE_MASK 0xffff | ||
934 | #define OACEC_COMPARE_VALUE_SHIFT 3 | ||
935 | |||
936 | #define OACEC_SELECT_NOA (0<<19) | ||
937 | #define OACEC_SELECT_PREV (1<<19) | ||
938 | #define OACEC_SELECT_BOOLEAN (2<<19) | ||
939 | |||
940 | /* CECX_1 */ | ||
941 | #define OACEC_MASK_MASK 0xffff | ||
942 | #define OACEC_CONSIDERATIONS_MASK 0xffff | ||
943 | #define OACEC_CONSIDERATIONS_SHIFT 16 | ||
944 | |||
945 | #define OACEC0_0 _MMIO(0x2770) | ||
946 | #define OACEC0_1 _MMIO(0x2774) | ||
947 | #define OACEC1_0 _MMIO(0x2778) | ||
948 | #define OACEC1_1 _MMIO(0x277c) | ||
949 | #define OACEC2_0 _MMIO(0x2780) | ||
950 | #define OACEC2_1 _MMIO(0x2784) | ||
951 | #define OACEC3_0 _MMIO(0x2788) | ||
952 | #define OACEC3_1 _MMIO(0x278c) | ||
953 | #define OACEC4_0 _MMIO(0x2790) | ||
954 | #define OACEC4_1 _MMIO(0x2794) | ||
955 | #define OACEC5_0 _MMIO(0x2798) | ||
956 | #define OACEC5_1 _MMIO(0x279c) | ||
957 | #define OACEC6_0 _MMIO(0x27a0) | ||
958 | #define OACEC6_1 _MMIO(0x27a4) | ||
959 | #define OACEC7_0 _MMIO(0x27a8) | ||
960 | #define OACEC7_1 _MMIO(0x27ac) | ||
961 | |||
619 | 962 | ||
620 | #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 | 963 | #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 |
621 | #define _GEN7_PIPEB_DE_LOAD_SL 0x71068 | 964 | #define _GEN7_PIPEB_DE_LOAD_SL 0x71068 |
@@ -708,9 +1051,15 @@ enum skl_disp_power_wells { | |||
708 | /* These numbers are fixed and must match the position of the pw bits */ | 1051 | /* These numbers are fixed and must match the position of the pw bits */ |
709 | SKL_DISP_PW_MISC_IO, | 1052 | SKL_DISP_PW_MISC_IO, |
710 | SKL_DISP_PW_DDI_A_E, | 1053 | SKL_DISP_PW_DDI_A_E, |
1054 | GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E, | ||
711 | SKL_DISP_PW_DDI_B, | 1055 | SKL_DISP_PW_DDI_B, |
712 | SKL_DISP_PW_DDI_C, | 1056 | SKL_DISP_PW_DDI_C, |
713 | SKL_DISP_PW_DDI_D, | 1057 | SKL_DISP_PW_DDI_D, |
1058 | |||
1059 | GLK_DISP_PW_AUX_A = 8, | ||
1060 | GLK_DISP_PW_AUX_B, | ||
1061 | GLK_DISP_PW_AUX_C, | ||
1062 | |||
714 | SKL_DISP_PW_1 = 14, | 1063 | SKL_DISP_PW_1 = 14, |
715 | SKL_DISP_PW_2, | 1064 | SKL_DISP_PW_2, |
716 | 1065 | ||
@@ -720,6 +1069,7 @@ enum skl_disp_power_wells { | |||
720 | 1069 | ||
721 | BXT_DPIO_CMN_A, | 1070 | BXT_DPIO_CMN_A, |
722 | BXT_DPIO_CMN_BC, | 1071 | BXT_DPIO_CMN_BC, |
1072 | GLK_DPIO_CMN_C, | ||
723 | }; | 1073 | }; |
724 | 1074 | ||
725 | #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) | 1075 | #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) |
@@ -1188,8 +1538,10 @@ enum skl_disp_power_wells { | |||
1188 | /* BXT PHY registers */ | 1538 | /* BXT PHY registers */ |
1189 | #define _BXT_PHY0_BASE 0x6C000 | 1539 | #define _BXT_PHY0_BASE 0x6C000 |
1190 | #define _BXT_PHY1_BASE 0x162000 | 1540 | #define _BXT_PHY1_BASE 0x162000 |
1191 | #define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \ | 1541 | #define _BXT_PHY2_BASE 0x163000 |
1192 | _BXT_PHY1_BASE) | 1542 | #define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \ |
1543 | _BXT_PHY1_BASE, \ | ||
1544 | _BXT_PHY2_BASE) | ||
1193 | 1545 | ||
1194 | #define _BXT_PHY(phy, reg) \ | 1546 | #define _BXT_PHY(phy, reg) \ |
1195 | _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) | 1547 | _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) |
@@ -1201,7 +1553,6 @@ enum skl_disp_power_wells { | |||
1201 | _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1)) | 1553 | _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1)) |
1202 | 1554 | ||
1203 | #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) | 1555 | #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) |
1204 | #define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) | ||
1205 | 1556 | ||
1206 | #define _BXT_PHY_CTL_DDI_A 0x64C00 | 1557 | #define _BXT_PHY_CTL_DDI_A 0x64C00 |
1207 | #define _BXT_PHY_CTL_DDI_B 0x64C10 | 1558 | #define _BXT_PHY_CTL_DDI_B 0x64C10 |
@@ -1214,9 +1565,11 @@ enum skl_disp_power_wells { | |||
1214 | 1565 | ||
1215 | #define _PHY_CTL_FAMILY_EDP 0x64C80 | 1566 | #define _PHY_CTL_FAMILY_EDP 0x64C80 |
1216 | #define _PHY_CTL_FAMILY_DDI 0x64C90 | 1567 | #define _PHY_CTL_FAMILY_DDI 0x64C90 |
1568 | #define _PHY_CTL_FAMILY_DDI_C 0x64CA0 | ||
1217 | #define COMMON_RESET_DIS (1 << 31) | 1569 | #define COMMON_RESET_DIS (1 << 31) |
1218 | #define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \ | 1570 | #define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \ |
1219 | _PHY_CTL_FAMILY_EDP) | 1571 | _PHY_CTL_FAMILY_EDP, \ |
1572 | _PHY_CTL_FAMILY_DDI_C) | ||
1220 | 1573 | ||
1221 | /* BXT PHY PLL registers */ | 1574 | /* BXT PHY PLL registers */ |
1222 | #define _PORT_PLL_A 0x46074 | 1575 | #define _PORT_PLL_A 0x46074 |
@@ -1225,6 +1578,8 @@ enum skl_disp_power_wells { | |||
1225 | #define PORT_PLL_ENABLE (1 << 31) | 1578 | #define PORT_PLL_ENABLE (1 << 31) |
1226 | #define PORT_PLL_LOCK (1 << 30) | 1579 | #define PORT_PLL_LOCK (1 << 30) |
1227 | #define PORT_PLL_REF_SEL (1 << 27) | 1580 | #define PORT_PLL_REF_SEL (1 << 27) |
1581 | #define PORT_PLL_POWER_ENABLE (1 << 26) | ||
1582 | #define PORT_PLL_POWER_STATE (1 << 25) | ||
1228 | #define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B) | 1583 | #define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B) |
1229 | 1584 | ||
1230 | #define _PORT_PLL_EBB_0_A 0x162034 | 1585 | #define _PORT_PLL_EBB_0_A 0x162034 |
@@ -1435,6 +1790,21 @@ enum skl_disp_power_wells { | |||
1435 | #define DEEMPH_SHIFT 24 | 1790 | #define DEEMPH_SHIFT 24 |
1436 | #define DE_EMPHASIS (0xFF << DEEMPH_SHIFT) | 1791 | #define DE_EMPHASIS (0xFF << DEEMPH_SHIFT) |
1437 | 1792 | ||
1793 | #define _PORT_TX_DW5_LN0_A 0x162514 | ||
1794 | #define _PORT_TX_DW5_LN0_B 0x6C514 | ||
1795 | #define _PORT_TX_DW5_LN0_C 0x6C914 | ||
1796 | #define _PORT_TX_DW5_GRP_A 0x162D14 | ||
1797 | #define _PORT_TX_DW5_GRP_B 0x6CD14 | ||
1798 | #define _PORT_TX_DW5_GRP_C 0x6CF14 | ||
1799 | #define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ | ||
1800 | _PORT_TX_DW5_LN0_B, \ | ||
1801 | _PORT_TX_DW5_LN0_C) | ||
1802 | #define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ | ||
1803 | _PORT_TX_DW5_GRP_B, \ | ||
1804 | _PORT_TX_DW5_GRP_C) | ||
1805 | #define DCC_DELAY_RANGE_1 (1 << 9) | ||
1806 | #define DCC_DELAY_RANGE_2 (1 << 8) | ||
1807 | |||
1438 | #define _PORT_TX_DW14_LN0_A 0x162538 | 1808 | #define _PORT_TX_DW14_LN0_A 0x162538 |
1439 | #define _PORT_TX_DW14_LN0_B 0x6C538 | 1809 | #define _PORT_TX_DW14_LN0_B 0x6C538 |
1440 | #define _PORT_TX_DW14_LN0_C 0x6C938 | 1810 | #define _PORT_TX_DW14_LN0_C 0x6C938 |
@@ -2920,7 +3290,7 @@ enum skl_disp_power_wells { | |||
2920 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) | 3290 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) |
2921 | #define INTERVAL_0_833_US(us) (((us) * 6) / 5) | 3291 | #define INTERVAL_0_833_US(us) (((us) * 6) / 5) |
2922 | #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ | 3292 | #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ |
2923 | (IS_BROXTON(dev_priv) ? \ | 3293 | (IS_GEN9_LP(dev_priv) ? \ |
2924 | INTERVAL_0_833_US(us) : \ | 3294 | INTERVAL_0_833_US(us) : \ |
2925 | INTERVAL_1_33_US(us)) : \ | 3295 | INTERVAL_1_33_US(us)) : \ |
2926 | INTERVAL_1_28_US(us)) | 3296 | INTERVAL_1_28_US(us)) |
@@ -2929,7 +3299,7 @@ enum skl_disp_power_wells { | |||
2929 | #define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3) | 3299 | #define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3) |
2930 | #define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6) | 3300 | #define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6) |
2931 | #define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \ | 3301 | #define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \ |
2932 | (IS_BROXTON(dev_priv) ? \ | 3302 | (IS_GEN9_LP(dev_priv) ? \ |
2933 | INTERVAL_0_833_TO_US(interval) : \ | 3303 | INTERVAL_0_833_TO_US(interval) : \ |
2934 | INTERVAL_1_33_TO_US(interval)) : \ | 3304 | INTERVAL_1_33_TO_US(interval)) : \ |
2935 | INTERVAL_1_28_TO_US(interval)) | 3305 | INTERVAL_1_28_TO_US(interval)) |
@@ -5374,18 +5744,21 @@ enum { | |||
5374 | #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) | 5744 | #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) |
5375 | #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) | 5745 | #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) |
5376 | 5746 | ||
5377 | #define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR) | 5747 | #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \ |
5378 | #define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF) | 5748 | _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) |
5379 | #define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE) | 5749 | |
5380 | #define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS) | 5750 | #define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR) |
5381 | #define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE) | 5751 | #define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF) |
5382 | #define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL) | 5752 | #define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE) |
5383 | #define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK) | 5753 | #define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS) |
5384 | #define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF) | 5754 | #define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE) |
5385 | #define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL) | 5755 | #define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL) |
5386 | #define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF) | 5756 | #define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK) |
5387 | #define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA) | 5757 | #define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF) |
5388 | #define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC) | 5758 | #define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL) |
5759 | #define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF) | ||
5760 | #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) | ||
5761 | #define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) | ||
5389 | 5762 | ||
5390 | /* | 5763 | /* |
5391 | * CHV pipe B sprite CSC | 5764 | * CHV pipe B sprite CSC |
@@ -5394,29 +5767,32 @@ enum { | |||
5394 | * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| | 5767 | * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| |
5395 | * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| | 5768 | * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| |
5396 | */ | 5769 | */ |
5397 | #define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000) | 5770 | #define _MMIO_CHV_SPCSC(plane_id, reg) \ |
5398 | #define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000) | 5771 | _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg)) |
5399 | #define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000) | 5772 | |
5773 | #define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900) | ||
5774 | #define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904) | ||
5775 | #define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908) | ||
5400 | #define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ | 5776 | #define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ |
5401 | #define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ | 5777 | #define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ |
5402 | 5778 | ||
5403 | #define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000) | 5779 | #define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c) |
5404 | #define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000) | 5780 | #define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910) |
5405 | #define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000) | 5781 | #define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914) |
5406 | #define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000) | 5782 | #define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918) |
5407 | #define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000) | 5783 | #define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c) |
5408 | #define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ | 5784 | #define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ |
5409 | #define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ | 5785 | #define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ |
5410 | 5786 | ||
5411 | #define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000) | 5787 | #define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920) |
5412 | #define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000) | 5788 | #define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924) |
5413 | #define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000) | 5789 | #define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928) |
5414 | #define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ | 5790 | #define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ |
5415 | #define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ | 5791 | #define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ |
5416 | 5792 | ||
5417 | #define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000) | 5793 | #define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c) |
5418 | #define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000) | 5794 | #define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930) |
5419 | #define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000) | 5795 | #define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934) |
5420 | #define SPCSC_OMAX(x) ((x) << 16) /* u10 */ | 5796 | #define SPCSC_OMAX(x) ((x) << 16) /* u10 */ |
5421 | #define SPCSC_OMIN(x) ((x) << 0) /* u10 */ | 5797 | #define SPCSC_OMIN(x) ((x) << 0) /* u10 */ |
5422 | 5798 | ||
@@ -6914,6 +7290,7 @@ enum { | |||
6914 | # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) | 7290 | # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) |
6915 | 7291 | ||
6916 | #define GEN6_UCGCTL3 _MMIO(0x9408) | 7292 | #define GEN6_UCGCTL3 _MMIO(0x9408) |
7293 | # define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20) | ||
6917 | 7294 | ||
6918 | #define GEN7_UCGCTL4 _MMIO(0x940c) | 7295 | #define GEN7_UCGCTL4 _MMIO(0x940c) |
6919 | #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) | 7296 | #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) |
@@ -8299,6 +8676,21 @@ enum { | |||
8299 | #define BXT_PIPE_SELECT_SHIFT 7 | 8676 | #define BXT_PIPE_SELECT_SHIFT 7 |
8300 | #define BXT_PIPE_SELECT_MASK (7 << 7) | 8677 | #define BXT_PIPE_SELECT_MASK (7 << 7) |
8301 | #define BXT_PIPE_SELECT(pipe) ((pipe) << 7) | 8678 | #define BXT_PIPE_SELECT(pipe) ((pipe) << 7) |
8679 | #define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */ | ||
8680 | #define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */ | ||
8681 | #define GLK_MIPIIO_RESET_RELEASED (1 << 28) | ||
8682 | #define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */ | ||
8683 | #define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */ | ||
8684 | #define GLK_LP_WAKE (1 << 22) | ||
8685 | #define GLK_LP11_LOW_PWR_MODE (1 << 21) | ||
8686 | #define GLK_LP00_LOW_PWR_MODE (1 << 20) | ||
8687 | #define GLK_FIREWALL_ENABLE (1 << 16) | ||
8688 | #define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10) | ||
8689 | #define BXT_PIXEL_OVERLAP_CNT_SHIFT 10 | ||
8690 | #define BXT_DSC_ENABLE (1 << 3) | ||
8691 | #define BXT_RGB_FLIP (1 << 2) | ||
8692 | #define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */ | ||
8693 | #define GLK_MIPIIO_ENABLE (1 << 0) | ||
8302 | 8694 | ||
8303 | #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) | 8695 | #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) |
8304 | #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) | 8696 | #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index b0e1e7ca75da..5c86925a0294 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -56,13 +56,12 @@ static void i915_restore_display(struct drm_i915_private *dev_priv) | |||
56 | i915_redisable_vga(dev_priv); | 56 | i915_redisable_vga(dev_priv); |
57 | } | 57 | } |
58 | 58 | ||
59 | int i915_save_state(struct drm_device *dev) | 59 | int i915_save_state(struct drm_i915_private *dev_priv) |
60 | { | 60 | { |
61 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
62 | struct pci_dev *pdev = dev_priv->drm.pdev; | 61 | struct pci_dev *pdev = dev_priv->drm.pdev; |
63 | int i; | 62 | int i; |
64 | 63 | ||
65 | mutex_lock(&dev->struct_mutex); | 64 | mutex_lock(&dev_priv->drm.struct_mutex); |
66 | 65 | ||
67 | i915_save_display(dev_priv); | 66 | i915_save_display(dev_priv); |
68 | 67 | ||
@@ -97,18 +96,17 @@ int i915_save_state(struct drm_device *dev) | |||
97 | dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); | 96 | dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); |
98 | } | 97 | } |
99 | 98 | ||
100 | mutex_unlock(&dev->struct_mutex); | 99 | mutex_unlock(&dev_priv->drm.struct_mutex); |
101 | 100 | ||
102 | return 0; | 101 | return 0; |
103 | } | 102 | } |
104 | 103 | ||
105 | int i915_restore_state(struct drm_device *dev) | 104 | int i915_restore_state(struct drm_i915_private *dev_priv) |
106 | { | 105 | { |
107 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
108 | struct pci_dev *pdev = dev_priv->drm.pdev; | 106 | struct pci_dev *pdev = dev_priv->drm.pdev; |
109 | int i; | 107 | int i; |
110 | 108 | ||
111 | mutex_lock(&dev->struct_mutex); | 109 | mutex_lock(&dev_priv->drm.struct_mutex); |
112 | 110 | ||
113 | i915_gem_restore_fences(dev_priv); | 111 | i915_gem_restore_fences(dev_priv); |
114 | 112 | ||
@@ -145,9 +143,9 @@ int i915_restore_state(struct drm_device *dev) | |||
145 | I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); | 143 | I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); |
146 | } | 144 | } |
147 | 145 | ||
148 | mutex_unlock(&dev->struct_mutex); | 146 | mutex_unlock(&dev_priv->drm.struct_mutex); |
149 | 147 | ||
150 | intel_i2c_reset(dev); | 148 | intel_i2c_reset(dev_priv); |
151 | 149 | ||
152 | return 0; | 150 | return 0; |
153 | } | 151 | } |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 147420ccf49c..f5a88092dacf 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c | |||
@@ -17,6 +17,92 @@ | |||
17 | 17 | ||
18 | static DEFINE_SPINLOCK(i915_sw_fence_lock); | 18 | static DEFINE_SPINLOCK(i915_sw_fence_lock); |
19 | 19 | ||
20 | enum { | ||
21 | DEBUG_FENCE_IDLE = 0, | ||
22 | DEBUG_FENCE_NOTIFY, | ||
23 | }; | ||
24 | |||
25 | #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS | ||
26 | |||
27 | static void *i915_sw_fence_debug_hint(void *addr) | ||
28 | { | ||
29 | return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK); | ||
30 | } | ||
31 | |||
32 | static struct debug_obj_descr i915_sw_fence_debug_descr = { | ||
33 | .name = "i915_sw_fence", | ||
34 | .debug_hint = i915_sw_fence_debug_hint, | ||
35 | }; | ||
36 | |||
37 | static inline void debug_fence_init(struct i915_sw_fence *fence) | ||
38 | { | ||
39 | debug_object_init(fence, &i915_sw_fence_debug_descr); | ||
40 | } | ||
41 | |||
42 | static inline void debug_fence_activate(struct i915_sw_fence *fence) | ||
43 | { | ||
44 | debug_object_activate(fence, &i915_sw_fence_debug_descr); | ||
45 | } | ||
46 | |||
47 | static inline void debug_fence_set_state(struct i915_sw_fence *fence, | ||
48 | int old, int new) | ||
49 | { | ||
50 | debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new); | ||
51 | } | ||
52 | |||
53 | static inline void debug_fence_deactivate(struct i915_sw_fence *fence) | ||
54 | { | ||
55 | debug_object_deactivate(fence, &i915_sw_fence_debug_descr); | ||
56 | } | ||
57 | |||
58 | static inline void debug_fence_destroy(struct i915_sw_fence *fence) | ||
59 | { | ||
60 | debug_object_destroy(fence, &i915_sw_fence_debug_descr); | ||
61 | } | ||
62 | |||
63 | static inline void debug_fence_free(struct i915_sw_fence *fence) | ||
64 | { | ||
65 | debug_object_free(fence, &i915_sw_fence_debug_descr); | ||
66 | } | ||
67 | |||
68 | static inline void debug_fence_assert(struct i915_sw_fence *fence) | ||
69 | { | ||
70 | debug_object_assert_init(fence, &i915_sw_fence_debug_descr); | ||
71 | } | ||
72 | |||
73 | #else | ||
74 | |||
75 | static inline void debug_fence_init(struct i915_sw_fence *fence) | ||
76 | { | ||
77 | } | ||
78 | |||
79 | static inline void debug_fence_activate(struct i915_sw_fence *fence) | ||
80 | { | ||
81 | } | ||
82 | |||
83 | static inline void debug_fence_set_state(struct i915_sw_fence *fence, | ||
84 | int old, int new) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | static inline void debug_fence_deactivate(struct i915_sw_fence *fence) | ||
89 | { | ||
90 | } | ||
91 | |||
92 | static inline void debug_fence_destroy(struct i915_sw_fence *fence) | ||
93 | { | ||
94 | } | ||
95 | |||
96 | static inline void debug_fence_free(struct i915_sw_fence *fence) | ||
97 | { | ||
98 | } | ||
99 | |||
100 | static inline void debug_fence_assert(struct i915_sw_fence *fence) | ||
101 | { | ||
102 | } | ||
103 | |||
104 | #endif | ||
105 | |||
20 | static int __i915_sw_fence_notify(struct i915_sw_fence *fence, | 106 | static int __i915_sw_fence_notify(struct i915_sw_fence *fence, |
21 | enum i915_sw_fence_notify state) | 107 | enum i915_sw_fence_notify state) |
22 | { | 108 | { |
@@ -26,25 +112,37 @@ static int __i915_sw_fence_notify(struct i915_sw_fence *fence, | |||
26 | return fn(fence, state); | 112 | return fn(fence, state); |
27 | } | 113 | } |
28 | 114 | ||
29 | static void i915_sw_fence_free(struct kref *kref) | 115 | #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS |
116 | void i915_sw_fence_fini(struct i915_sw_fence *fence) | ||
117 | { | ||
118 | debug_fence_free(fence); | ||
119 | } | ||
120 | #endif | ||
121 | |||
122 | static void i915_sw_fence_release(struct kref *kref) | ||
30 | { | 123 | { |
31 | struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref); | 124 | struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref); |
32 | 125 | ||
33 | WARN_ON(atomic_read(&fence->pending) > 0); | 126 | WARN_ON(atomic_read(&fence->pending) > 0); |
127 | debug_fence_destroy(fence); | ||
34 | 128 | ||
35 | if (fence->flags & I915_SW_FENCE_MASK) | 129 | if (fence->flags & I915_SW_FENCE_MASK) { |
36 | __i915_sw_fence_notify(fence, FENCE_FREE); | 130 | __i915_sw_fence_notify(fence, FENCE_FREE); |
37 | else | 131 | } else { |
132 | i915_sw_fence_fini(fence); | ||
38 | kfree(fence); | 133 | kfree(fence); |
134 | } | ||
39 | } | 135 | } |
40 | 136 | ||
41 | static void i915_sw_fence_put(struct i915_sw_fence *fence) | 137 | static void i915_sw_fence_put(struct i915_sw_fence *fence) |
42 | { | 138 | { |
43 | kref_put(&fence->kref, i915_sw_fence_free); | 139 | debug_fence_assert(fence); |
140 | kref_put(&fence->kref, i915_sw_fence_release); | ||
44 | } | 141 | } |
45 | 142 | ||
46 | static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence) | 143 | static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence) |
47 | { | 144 | { |
145 | debug_fence_assert(fence); | ||
48 | kref_get(&fence->kref); | 146 | kref_get(&fence->kref); |
49 | return fence; | 147 | return fence; |
50 | } | 148 | } |
@@ -56,6 +154,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, | |||
56 | wait_queue_t *pos, *next; | 154 | wait_queue_t *pos, *next; |
57 | unsigned long flags; | 155 | unsigned long flags; |
58 | 156 | ||
157 | debug_fence_deactivate(fence); | ||
59 | atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */ | 158 | atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */ |
60 | 159 | ||
61 | /* | 160 | /* |
@@ -88,23 +187,33 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, | |||
88 | } while (1); | 187 | } while (1); |
89 | } | 188 | } |
90 | spin_unlock_irqrestore(&x->lock, flags); | 189 | spin_unlock_irqrestore(&x->lock, flags); |
190 | |||
191 | debug_fence_assert(fence); | ||
91 | } | 192 | } |
92 | 193 | ||
93 | static void __i915_sw_fence_complete(struct i915_sw_fence *fence, | 194 | static void __i915_sw_fence_complete(struct i915_sw_fence *fence, |
94 | struct list_head *continuation) | 195 | struct list_head *continuation) |
95 | { | 196 | { |
197 | debug_fence_assert(fence); | ||
198 | |||
96 | if (!atomic_dec_and_test(&fence->pending)) | 199 | if (!atomic_dec_and_test(&fence->pending)) |
97 | return; | 200 | return; |
98 | 201 | ||
202 | debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY); | ||
203 | |||
99 | if (fence->flags & I915_SW_FENCE_MASK && | 204 | if (fence->flags & I915_SW_FENCE_MASK && |
100 | __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE) | 205 | __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE) |
101 | return; | 206 | return; |
102 | 207 | ||
208 | debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE); | ||
209 | |||
103 | __i915_sw_fence_wake_up_all(fence, continuation); | 210 | __i915_sw_fence_wake_up_all(fence, continuation); |
104 | } | 211 | } |
105 | 212 | ||
106 | static void i915_sw_fence_complete(struct i915_sw_fence *fence) | 213 | static void i915_sw_fence_complete(struct i915_sw_fence *fence) |
107 | { | 214 | { |
215 | debug_fence_assert(fence); | ||
216 | |||
108 | if (WARN_ON(i915_sw_fence_done(fence))) | 217 | if (WARN_ON(i915_sw_fence_done(fence))) |
109 | return; | 218 | return; |
110 | 219 | ||
@@ -113,6 +222,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence) | |||
113 | 222 | ||
114 | static void i915_sw_fence_await(struct i915_sw_fence *fence) | 223 | static void i915_sw_fence_await(struct i915_sw_fence *fence) |
115 | { | 224 | { |
225 | debug_fence_assert(fence); | ||
116 | WARN_ON(atomic_inc_return(&fence->pending) <= 1); | 226 | WARN_ON(atomic_inc_return(&fence->pending) <= 1); |
117 | } | 227 | } |
118 | 228 | ||
@@ -123,18 +233,26 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, | |||
123 | { | 233 | { |
124 | BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK); | 234 | BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK); |
125 | 235 | ||
236 | debug_fence_init(fence); | ||
237 | |||
126 | __init_waitqueue_head(&fence->wait, name, key); | 238 | __init_waitqueue_head(&fence->wait, name, key); |
127 | kref_init(&fence->kref); | 239 | kref_init(&fence->kref); |
128 | atomic_set(&fence->pending, 1); | 240 | atomic_set(&fence->pending, 1); |
129 | fence->flags = (unsigned long)fn; | 241 | fence->flags = (unsigned long)fn; |
130 | } | 242 | } |
131 | 243 | ||
132 | void i915_sw_fence_commit(struct i915_sw_fence *fence) | 244 | static void __i915_sw_fence_commit(struct i915_sw_fence *fence) |
133 | { | 245 | { |
134 | i915_sw_fence_complete(fence); | 246 | i915_sw_fence_complete(fence); |
135 | i915_sw_fence_put(fence); | 247 | i915_sw_fence_put(fence); |
136 | } | 248 | } |
137 | 249 | ||
250 | void i915_sw_fence_commit(struct i915_sw_fence *fence) | ||
251 | { | ||
252 | debug_fence_activate(fence); | ||
253 | __i915_sw_fence_commit(fence); | ||
254 | } | ||
255 | |||
138 | static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) | 256 | static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) |
139 | { | 257 | { |
140 | list_del(&wq->task_list); | 258 | list_del(&wq->task_list); |
@@ -206,9 +324,13 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | |||
206 | unsigned long flags; | 324 | unsigned long flags; |
207 | int pending; | 325 | int pending; |
208 | 326 | ||
327 | debug_fence_assert(fence); | ||
328 | |||
209 | if (i915_sw_fence_done(signaler)) | 329 | if (i915_sw_fence_done(signaler)) |
210 | return 0; | 330 | return 0; |
211 | 331 | ||
332 | debug_fence_assert(signaler); | ||
333 | |||
212 | /* The dependency graph must be acyclic. */ | 334 | /* The dependency graph must be acyclic. */ |
213 | if (unlikely(i915_sw_fence_check_if_after(fence, signaler))) | 335 | if (unlikely(i915_sw_fence_check_if_after(fence, signaler))) |
214 | return -EINVAL; | 336 | return -EINVAL; |
@@ -279,7 +401,7 @@ static void timer_i915_sw_fence_wake(unsigned long data) | |||
279 | dma_fence_put(cb->dma); | 401 | dma_fence_put(cb->dma); |
280 | cb->dma = NULL; | 402 | cb->dma = NULL; |
281 | 403 | ||
282 | i915_sw_fence_commit(cb->fence); | 404 | __i915_sw_fence_commit(cb->fence); |
283 | cb->timer.function = NULL; | 405 | cb->timer.function = NULL; |
284 | } | 406 | } |
285 | 407 | ||
@@ -290,7 +412,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma, | |||
290 | 412 | ||
291 | del_timer_sync(&cb->timer); | 413 | del_timer_sync(&cb->timer); |
292 | if (cb->timer.function) | 414 | if (cb->timer.function) |
293 | i915_sw_fence_commit(cb->fence); | 415 | __i915_sw_fence_commit(cb->fence); |
294 | dma_fence_put(cb->dma); | 416 | dma_fence_put(cb->dma); |
295 | 417 | ||
296 | kfree(cb); | 418 | kfree(cb); |
@@ -304,6 +426,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | |||
304 | struct i915_sw_dma_fence_cb *cb; | 426 | struct i915_sw_dma_fence_cb *cb; |
305 | int ret; | 427 | int ret; |
306 | 428 | ||
429 | debug_fence_assert(fence); | ||
430 | |||
307 | if (dma_fence_is_signaled(dma)) | 431 | if (dma_fence_is_signaled(dma)) |
308 | return 0; | 432 | return 0; |
309 | 433 | ||
@@ -349,6 +473,8 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | |||
349 | struct dma_fence *excl; | 473 | struct dma_fence *excl; |
350 | int ret = 0, pending; | 474 | int ret = 0, pending; |
351 | 475 | ||
476 | debug_fence_assert(fence); | ||
477 | |||
352 | if (write) { | 478 | if (write) { |
353 | struct dma_fence **shared; | 479 | struct dma_fence **shared; |
354 | unsigned int count, i; | 480 | unsigned int count, i; |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 0f3185ef7f4e..d31cefbbcc04 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h | |||
@@ -56,6 +56,12 @@ do { \ | |||
56 | __i915_sw_fence_init((fence), (fn), NULL, NULL) | 56 | __i915_sw_fence_init((fence), (fn), NULL, NULL) |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS | ||
60 | void i915_sw_fence_fini(struct i915_sw_fence *fence); | ||
61 | #else | ||
62 | static inline void i915_sw_fence_fini(struct i915_sw_fence *fence) {} | ||
63 | #endif | ||
64 | |||
59 | void i915_sw_fence_commit(struct i915_sw_fence *fence); | 65 | void i915_sw_fence_commit(struct i915_sw_fence *fence); |
60 | 66 | ||
61 | int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | 67 | int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 3df8d3dd31cd..40c0ac70d79d 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -535,7 +535,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, | |||
535 | if (ret) | 535 | if (ret) |
536 | return ret; | 536 | return ret; |
537 | 537 | ||
538 | error_priv.dev = dev; | 538 | error_priv.i915 = dev_priv; |
539 | i915_error_state_get(dev, &error_priv); | 539 | i915_error_state_get(dev, &error_priv); |
540 | 540 | ||
541 | ret = i915_error_state_to_str(&error_str, &error_priv); | 541 | ret = i915_error_state_to_str(&error_str, &error_priv); |
@@ -560,7 +560,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, | |||
560 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); | 560 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
561 | 561 | ||
562 | DRM_DEBUG_DRIVER("Resetting error state\n"); | 562 | DRM_DEBUG_DRIVER("Resetting error state\n"); |
563 | i915_destroy_error_state(&dev_priv->drm); | 563 | i915_destroy_error_state(dev_priv); |
564 | 564 | ||
565 | return count; | 565 | return count; |
566 | } | 566 | } |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index c5d210ebaa9a..18ae37c411fd 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -406,7 +406,7 @@ TRACE_EVENT(i915_gem_evict, | |||
406 | ), | 406 | ), |
407 | 407 | ||
408 | TP_fast_assign( | 408 | TP_fast_assign( |
409 | __entry->dev = vm->dev->primary->index; | 409 | __entry->dev = vm->i915->drm.primary->index; |
410 | __entry->vm = vm; | 410 | __entry->vm = vm; |
411 | __entry->size = size; | 411 | __entry->size = size; |
412 | __entry->align = align; | 412 | __entry->align = align; |
@@ -443,13 +443,41 @@ TRACE_EVENT(i915_gem_evict_vm, | |||
443 | ), | 443 | ), |
444 | 444 | ||
445 | TP_fast_assign( | 445 | TP_fast_assign( |
446 | __entry->dev = vm->dev->primary->index; | 446 | __entry->dev = vm->i915->drm.primary->index; |
447 | __entry->vm = vm; | 447 | __entry->vm = vm; |
448 | ), | 448 | ), |
449 | 449 | ||
450 | TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) | 450 | TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) |
451 | ); | 451 | ); |
452 | 452 | ||
453 | TRACE_EVENT(i915_gem_evict_vma, | ||
454 | TP_PROTO(struct i915_vma *vma, unsigned int flags), | ||
455 | TP_ARGS(vma, flags), | ||
456 | |||
457 | TP_STRUCT__entry( | ||
458 | __field(u32, dev) | ||
459 | __field(struct i915_address_space *, vm) | ||
460 | __field(u64, start) | ||
461 | __field(u64, size) | ||
462 | __field(unsigned long, color) | ||
463 | __field(unsigned int, flags) | ||
464 | ), | ||
465 | |||
466 | TP_fast_assign( | ||
467 | __entry->dev = vma->vm->i915->drm.primary->index; | ||
468 | __entry->vm = vma->vm; | ||
469 | __entry->start = vma->node.start; | ||
470 | __entry->size = vma->node.size; | ||
471 | __entry->color = vma->node.color; | ||
472 | __entry->flags = flags; | ||
473 | ), | ||
474 | |||
475 | TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x", | ||
476 | __entry->dev, __entry->vm, | ||
477 | __entry->start, __entry->size, | ||
478 | __entry->color, __entry->flags) | ||
479 | ); | ||
480 | |||
453 | TRACE_EVENT(i915_gem_ring_sync_to, | 481 | TRACE_EVENT(i915_gem_ring_sync_to, |
454 | TP_PROTO(struct drm_i915_gem_request *to, | 482 | TP_PROTO(struct drm_i915_gem_request *to, |
455 | struct drm_i915_gem_request *from), | 483 | struct drm_i915_gem_request *from), |
@@ -711,7 +739,7 @@ DECLARE_EVENT_CLASS(i915_ppgtt, | |||
711 | 739 | ||
712 | TP_fast_assign( | 740 | TP_fast_assign( |
713 | __entry->vm = vm; | 741 | __entry->vm = vm; |
714 | __entry->dev = vm->dev->primary->index; | 742 | __entry->dev = vm->i915->drm.primary->index; |
715 | ), | 743 | ), |
716 | 744 | ||
717 | TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm) | 745 | TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm) |
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h new file mode 100644 index 000000000000..34020873e1f6 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_utils.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Copyright © 2016 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef __I915_UTILS_H | ||
26 | #define __I915_UTILS_H | ||
27 | |||
28 | #define range_overflows(start, size, max) ({ \ | ||
29 | typeof(start) start__ = (start); \ | ||
30 | typeof(size) size__ = (size); \ | ||
31 | typeof(max) max__ = (max); \ | ||
32 | (void)(&start__ == &size__); \ | ||
33 | (void)(&start__ == &max__); \ | ||
34 | start__ > max__ || size__ > max__ - start__; \ | ||
35 | }) | ||
36 | |||
37 | #define range_overflows_t(type, start, size, max) \ | ||
38 | range_overflows((type)(start), (type)(size), (type)(max)) | ||
39 | |||
40 | /* Note we don't consider signbits :| */ | ||
41 | #define overflows_type(x, T) \ | ||
42 | (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) | ||
43 | |||
44 | #define ptr_mask_bits(ptr) ({ \ | ||
45 | unsigned long __v = (unsigned long)(ptr); \ | ||
46 | (typeof(ptr))(__v & PAGE_MASK); \ | ||
47 | }) | ||
48 | |||
49 | #define ptr_unpack_bits(ptr, bits) ({ \ | ||
50 | unsigned long __v = (unsigned long)(ptr); \ | ||
51 | (bits) = __v & ~PAGE_MASK; \ | ||
52 | (typeof(ptr))(__v & PAGE_MASK); \ | ||
53 | }) | ||
54 | |||
55 | #define ptr_pack_bits(ptr, bits) \ | ||
56 | ((typeof(ptr))((unsigned long)(ptr) | (bits))) | ||
57 | |||
58 | #define fetch_and_zero(ptr) ({ \ | ||
59 | typeof(*ptr) __T = *(ptr); \ | ||
60 | *(ptr) = (typeof(*ptr))0; \ | ||
61 | __T; \ | ||
62 | }) | ||
63 | |||
64 | #endif /* !__I915_UTILS_H */ | ||
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 325b917c5ad7..58f2483362ad 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -95,8 +95,13 @@ __i915_vma_create(struct drm_i915_gem_object *obj, | |||
95 | if (view) { | 95 | if (view) { |
96 | vma->ggtt_view = *view; | 96 | vma->ggtt_view = *view; |
97 | if (view->type == I915_GGTT_VIEW_PARTIAL) { | 97 | if (view->type == I915_GGTT_VIEW_PARTIAL) { |
98 | GEM_BUG_ON(range_overflows_t(u64, | ||
99 | view->params.partial.offset, | ||
100 | view->params.partial.size, | ||
101 | obj->base.size >> PAGE_SHIFT)); | ||
98 | vma->size = view->params.partial.size; | 102 | vma->size = view->params.partial.size; |
99 | vma->size <<= PAGE_SHIFT; | 103 | vma->size <<= PAGE_SHIFT; |
104 | GEM_BUG_ON(vma->size >= obj->base.size); | ||
100 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { | 105 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { |
101 | vma->size = | 106 | vma->size = |
102 | intel_rotation_info_size(&view->params.rotated); | 107 | intel_rotation_info_size(&view->params.rotated); |
@@ -176,6 +181,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | |||
176 | if (bind_flags == 0) | 181 | if (bind_flags == 0) |
177 | return 0; | 182 | return 0; |
178 | 183 | ||
184 | if (GEM_WARN_ON(range_overflows(vma->node.start, | ||
185 | vma->node.size, | ||
186 | vma->vm->total))) | ||
187 | return -ENODEV; | ||
188 | |||
179 | if (vma_flags == 0 && vma->vm->allocate_va_range) { | 189 | if (vma_flags == 0 && vma->vm->allocate_va_range) { |
180 | trace_i915_va_alloc(vma); | 190 | trace_i915_va_alloc(vma); |
181 | ret = vma->vm->allocate_va_range(vma->vm, | 191 | ret = vma->vm->allocate_va_range(vma->vm, |
@@ -198,9 +208,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) | |||
198 | void __iomem *ptr; | 208 | void __iomem *ptr; |
199 | 209 | ||
200 | /* Access through the GTT requires the device to be awake. */ | 210 | /* Access through the GTT requires the device to be awake. */ |
201 | assert_rpm_wakelock_held(to_i915(vma->vm->dev)); | 211 | assert_rpm_wakelock_held(vma->vm->i915); |
202 | 212 | ||
203 | lockdep_assert_held(&vma->vm->dev->struct_mutex); | 213 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
204 | if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) | 214 | if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) |
205 | return IO_ERR_PTR(-ENODEV); | 215 | return IO_ERR_PTR(-ENODEV); |
206 | 216 | ||
@@ -297,10 +307,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) | |||
297 | vma->flags &= ~I915_VMA_CAN_FENCE; | 307 | vma->flags &= ~I915_VMA_CAN_FENCE; |
298 | } | 308 | } |
299 | 309 | ||
300 | bool i915_gem_valid_gtt_space(struct i915_vma *vma, | 310 | static bool color_differs(struct drm_mm_node *node, unsigned long color) |
301 | unsigned long cache_level) | 311 | { |
312 | return node->allocated && node->color != color; | ||
313 | } | ||
314 | |||
315 | bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) | ||
302 | { | 316 | { |
303 | struct drm_mm_node *gtt_space = &vma->node; | 317 | struct drm_mm_node *node = &vma->node; |
304 | struct drm_mm_node *other; | 318 | struct drm_mm_node *other; |
305 | 319 | ||
306 | /* | 320 | /* |
@@ -313,18 +327,16 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, | |||
313 | if (vma->vm->mm.color_adjust == NULL) | 327 | if (vma->vm->mm.color_adjust == NULL) |
314 | return true; | 328 | return true; |
315 | 329 | ||
316 | if (!drm_mm_node_allocated(gtt_space)) | 330 | /* Only valid to be called on an already inserted vma */ |
317 | return true; | 331 | GEM_BUG_ON(!drm_mm_node_allocated(node)); |
318 | 332 | GEM_BUG_ON(list_empty(&node->node_list)); | |
319 | if (list_empty(>t_space->node_list)) | ||
320 | return true; | ||
321 | 333 | ||
322 | other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); | 334 | other = list_prev_entry(node, node_list); |
323 | if (other->allocated && !drm_mm_hole_follows(other) && other->color != cache_level) | 335 | if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) |
324 | return false; | 336 | return false; |
325 | 337 | ||
326 | other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); | 338 | other = list_next_entry(node, node_list); |
327 | if (other->allocated && !drm_mm_hole_follows(gtt_space) && other->color != cache_level) | 339 | if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) |
328 | return false; | 340 | return false; |
329 | 341 | ||
330 | return true; | 342 | return true; |
@@ -347,7 +359,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, | |||
347 | static int | 359 | static int |
348 | i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) | 360 | i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) |
349 | { | 361 | { |
350 | struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); | 362 | struct drm_i915_private *dev_priv = vma->vm->i915; |
351 | struct drm_i915_gem_object *obj = vma->obj; | 363 | struct drm_i915_gem_object *obj = vma->obj; |
352 | u64 start, end; | 364 | u64 start, end; |
353 | int ret; | 365 | int ret; |
@@ -391,7 +403,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) | |||
391 | 403 | ||
392 | if (flags & PIN_OFFSET_FIXED) { | 404 | if (flags & PIN_OFFSET_FIXED) { |
393 | u64 offset = flags & PIN_OFFSET_MASK; | 405 | u64 offset = flags & PIN_OFFSET_MASK; |
394 | if (offset & (alignment - 1) || offset > end - size) { | 406 | if (offset & (alignment - 1) || |
407 | range_overflows(offset, size, end)) { | ||
395 | ret = -EINVAL; | 408 | ret = -EINVAL; |
396 | goto err_unpin; | 409 | goto err_unpin; |
397 | } | 410 | } |
@@ -401,7 +414,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) | |||
401 | vma->node.color = obj->cache_level; | 414 | vma->node.color = obj->cache_level; |
402 | ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); | 415 | ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); |
403 | if (ret) { | 416 | if (ret) { |
404 | ret = i915_gem_evict_for_vma(vma); | 417 | ret = i915_gem_evict_for_vma(vma, flags); |
405 | if (ret == 0) | 418 | if (ret == 0) |
406 | ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); | 419 | ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); |
407 | if (ret) | 420 | if (ret) |
@@ -469,7 +482,7 @@ int __i915_vma_do_pin(struct i915_vma *vma, | |||
469 | unsigned int bound = vma->flags; | 482 | unsigned int bound = vma->flags; |
470 | int ret; | 483 | int ret; |
471 | 484 | ||
472 | lockdep_assert_held(&vma->vm->dev->struct_mutex); | 485 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
473 | GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); | 486 | GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); |
474 | GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); | 487 | GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); |
475 | 488 | ||
@@ -567,7 +580,7 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
567 | 580 | ||
568 | for_each_active(active, idx) { | 581 | for_each_active(active, idx) { |
569 | ret = i915_gem_active_retire(&vma->last_read[idx], | 582 | ret = i915_gem_active_retire(&vma->last_read[idx], |
570 | &vma->vm->dev->struct_mutex); | 583 | &vma->vm->i915->drm.struct_mutex); |
571 | if (ret) | 584 | if (ret) |
572 | break; | 585 | break; |
573 | } | 586 | } |
@@ -628,6 +641,7 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
628 | * reaped by the shrinker. | 641 | * reaped by the shrinker. |
629 | */ | 642 | */ |
630 | i915_gem_object_unpin_pages(obj); | 643 | i915_gem_object_unpin_pages(obj); |
644 | GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); | ||
631 | 645 | ||
632 | destroy: | 646 | destroy: |
633 | if (unlikely(i915_vma_is_closed(vma))) | 647 | if (unlikely(i915_vma_is_closed(vma))) |
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 85446f0b0b3f..e3b2b3b1e056 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h | |||
@@ -178,15 +178,23 @@ static inline void i915_vma_put(struct i915_vma *vma) | |||
178 | i915_gem_object_put(vma->obj); | 178 | i915_gem_object_put(vma->obj); |
179 | } | 179 | } |
180 | 180 | ||
181 | static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b) | ||
182 | { | ||
183 | return a - b; | ||
184 | } | ||
185 | |||
181 | static inline long | 186 | static inline long |
182 | i915_vma_compare(struct i915_vma *vma, | 187 | i915_vma_compare(struct i915_vma *vma, |
183 | struct i915_address_space *vm, | 188 | struct i915_address_space *vm, |
184 | const struct i915_ggtt_view *view) | 189 | const struct i915_ggtt_view *view) |
185 | { | 190 | { |
191 | ptrdiff_t cmp; | ||
192 | |||
186 | GEM_BUG_ON(view && !i915_is_ggtt(vm)); | 193 | GEM_BUG_ON(view && !i915_is_ggtt(vm)); |
187 | 194 | ||
188 | if (vma->vm != vm) | 195 | cmp = ptrdiff(vma->vm, vm); |
189 | return vma->vm - vm; | 196 | if (cmp) |
197 | return cmp; | ||
190 | 198 | ||
191 | if (!view) | 199 | if (!view) |
192 | return vma->ggtt_view.type; | 200 | return vma->ggtt_view.type; |
@@ -282,7 +290,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); | |||
282 | */ | 290 | */ |
283 | static inline void i915_vma_unpin_iomap(struct i915_vma *vma) | 291 | static inline void i915_vma_unpin_iomap(struct i915_vma *vma) |
284 | { | 292 | { |
285 | lockdep_assert_held(&vma->vm->dev->struct_mutex); | 293 | lockdep_assert_held(&vma->obj->base.dev->struct_mutex); |
286 | GEM_BUG_ON(vma->iomap == NULL); | 294 | GEM_BUG_ON(vma->iomap == NULL); |
287 | i915_vma_unpin(vma); | 295 | i915_vma_unpin(vma); |
288 | } | 296 | } |
@@ -311,7 +319,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma) | |||
311 | static inline bool | 319 | static inline bool |
312 | i915_vma_pin_fence(struct i915_vma *vma) | 320 | i915_vma_pin_fence(struct i915_vma *vma) |
313 | { | 321 | { |
314 | lockdep_assert_held(&vma->vm->dev->struct_mutex); | 322 | lockdep_assert_held(&vma->obj->base.dev->struct_mutex); |
315 | if (vma->fence) { | 323 | if (vma->fence) { |
316 | vma->fence->pin_count++; | 324 | vma->fence->pin_count++; |
317 | return true; | 325 | return true; |
@@ -330,7 +338,7 @@ i915_vma_pin_fence(struct i915_vma *vma) | |||
330 | static inline void | 338 | static inline void |
331 | i915_vma_unpin_fence(struct i915_vma *vma) | 339 | i915_vma_unpin_fence(struct i915_vma *vma) |
332 | { | 340 | { |
333 | lockdep_assert_held(&vma->vm->dev->struct_mutex); | 341 | lockdep_assert_held(&vma->obj->base.dev->struct_mutex); |
334 | if (vma->fence) { | 342 | if (vma->fence) { |
335 | GEM_BUG_ON(vma->fence->pin_count <= 0); | 343 | GEM_BUG_ON(vma->fence->pin_count <= 0); |
336 | vma->fence->pin_count--; | 344 | vma->fence->pin_count--; |
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index c5a166752eda..aa9160e7f1d8 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
@@ -265,37 +265,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, | |||
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | static void | ||
269 | intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, | ||
270 | struct intel_shared_dpll_config *shared_dpll) | ||
271 | { | ||
272 | enum intel_dpll_id i; | ||
273 | |||
274 | /* Copy shared dpll state */ | ||
275 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
276 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
277 | |||
278 | shared_dpll[i] = pll->config; | ||
279 | } | ||
280 | } | ||
281 | |||
282 | struct intel_shared_dpll_config * | ||
283 | intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) | ||
284 | { | ||
285 | struct intel_atomic_state *state = to_intel_atomic_state(s); | ||
286 | |||
287 | WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); | ||
288 | |||
289 | if (!state->dpll_set) { | ||
290 | state->dpll_set = true; | ||
291 | |||
292 | intel_atomic_duplicate_dpll_state(to_i915(s->dev), | ||
293 | state->shared_dpll); | ||
294 | } | ||
295 | |||
296 | return state->shared_dpll; | ||
297 | } | ||
298 | |||
299 | struct drm_atomic_state * | 268 | struct drm_atomic_state * |
300 | intel_atomic_state_alloc(struct drm_device *dev) | 269 | intel_atomic_state_alloc(struct drm_device *dev) |
301 | { | 270 | { |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 49f10538d4aa..16c202781db0 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -737,25 +737,49 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) | |||
737 | return dev_priv->cdclk_freq; | 737 | return dev_priv->cdclk_freq; |
738 | } | 738 | } |
739 | 739 | ||
740 | /* | ||
741 | * get the intel_encoder according to the parameter port and pipe | ||
742 | * intel_encoder is saved by the index of pipe | ||
743 | * MST & (pipe >= 0): return the av_enc_map[pipe], | ||
744 | * when port is matched | ||
745 | * MST & (pipe < 0): this is invalid | ||
746 | * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) | ||
747 | * will get the right intel_encoder with port matched | ||
748 | * Non-MST & (pipe < 0): get the right intel_encoder with port matched | ||
749 | */ | ||
740 | static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, | 750 | static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, |
741 | int port, int pipe) | 751 | int port, int pipe) |
742 | { | 752 | { |
753 | struct intel_encoder *encoder; | ||
743 | 754 | ||
744 | if (WARN_ON(pipe >= I915_MAX_PIPES)) | 755 | if (WARN_ON(pipe >= I915_MAX_PIPES)) |
745 | return NULL; | 756 | return NULL; |
746 | 757 | ||
747 | /* MST */ | 758 | /* MST */ |
748 | if (pipe >= 0) | 759 | if (pipe >= 0) { |
749 | return dev_priv->av_enc_map[pipe]; | 760 | encoder = dev_priv->av_enc_map[pipe]; |
761 | /* | ||
762 | * when bootup, audio driver may not know it is | ||
763 | * MST or not. So it will poll all the port & pipe | ||
764 | * combinations | ||
765 | */ | ||
766 | if (encoder != NULL && encoder->port == port && | ||
767 | encoder->type == INTEL_OUTPUT_DP_MST) | ||
768 | return encoder; | ||
769 | } | ||
750 | 770 | ||
751 | /* Non-MST */ | 771 | /* Non-MST */ |
752 | for_each_pipe(dev_priv, pipe) { | 772 | if (pipe > 0) |
753 | struct intel_encoder *encoder; | 773 | return NULL; |
754 | 774 | ||
775 | for_each_pipe(dev_priv, pipe) { | ||
755 | encoder = dev_priv->av_enc_map[pipe]; | 776 | encoder = dev_priv->av_enc_map[pipe]; |
756 | if (encoder == NULL) | 777 | if (encoder == NULL) |
757 | continue; | 778 | continue; |
758 | 779 | ||
780 | if (encoder->type == INTEL_OUTPUT_DP_MST) | ||
781 | continue; | ||
782 | |||
759 | if (port == encoder->port) | 783 | if (port == encoder->port) |
760 | return encoder; | 784 | return encoder; |
761 | } | 785 | } |
@@ -781,9 +805,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, | |||
781 | 805 | ||
782 | /* 1. get the pipe */ | 806 | /* 1. get the pipe */ |
783 | intel_encoder = get_saved_enc(dev_priv, port, pipe); | 807 | intel_encoder = get_saved_enc(dev_priv, port, pipe); |
784 | if (!intel_encoder || !intel_encoder->base.crtc || | 808 | if (!intel_encoder || !intel_encoder->base.crtc) { |
785 | (intel_encoder->type != INTEL_OUTPUT_HDMI && | ||
786 | intel_encoder->type != INTEL_OUTPUT_DP)) { | ||
787 | DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port)); | 809 | DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port)); |
788 | err = -ENODEV; | 810 | err = -ENODEV; |
789 | goto unlock; | 811 | goto unlock; |
@@ -906,6 +928,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) | |||
906 | { | 928 | { |
907 | int ret; | 929 | int ret; |
908 | 930 | ||
931 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | ||
932 | return; | ||
933 | |||
909 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); | 934 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); |
910 | if (ret < 0) { | 935 | if (ret < 0) { |
911 | DRM_ERROR("failed to add audio component (%d)\n", ret); | 936 | DRM_ERROR("failed to add audio component (%d)\n", ret); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 7ffab1abc518..e144f033f4b5 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -114,16 +114,18 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | |||
114 | panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + | 114 | panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + |
115 | ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); | 115 | ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); |
116 | panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + | 116 | panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + |
117 | dvo_timing->hsync_pulse_width; | 117 | ((dvo_timing->hsync_pulse_width_hi << 8) | |
118 | dvo_timing->hsync_pulse_width_lo); | ||
118 | panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + | 119 | panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + |
119 | ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); | 120 | ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); |
120 | 121 | ||
121 | panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | | 122 | panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | |
122 | dvo_timing->vactive_lo; | 123 | dvo_timing->vactive_lo; |
123 | panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + | 124 | panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + |
124 | dvo_timing->vsync_off; | 125 | ((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo); |
125 | panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + | 126 | panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + |
126 | dvo_timing->vsync_pulse_width; | 127 | ((dvo_timing->vsync_pulse_width_hi << 4) | |
128 | dvo_timing->vsync_pulse_width_lo); | ||
127 | panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + | 129 | panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + |
128 | ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); | 130 | ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); |
129 | panel_fixed_mode->clock = dvo_timing->clock * 10; | 131 | panel_fixed_mode->clock = dvo_timing->clock * 10; |
@@ -330,17 +332,19 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, | |||
330 | 332 | ||
331 | method = &backlight_data->backlight_control[panel_type]; | 333 | method = &backlight_data->backlight_control[panel_type]; |
332 | dev_priv->vbt.backlight.type = method->type; | 334 | dev_priv->vbt.backlight.type = method->type; |
335 | dev_priv->vbt.backlight.controller = method->controller; | ||
333 | } | 336 | } |
334 | 337 | ||
335 | dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; | 338 | dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; |
336 | dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; | 339 | dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; |
337 | dev_priv->vbt.backlight.min_brightness = entry->min_brightness; | 340 | dev_priv->vbt.backlight.min_brightness = entry->min_brightness; |
338 | DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " | 341 | DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " |
339 | "active %s, min brightness %u, level %u\n", | 342 | "active %s, min brightness %u, level %u, controller %u\n", |
340 | dev_priv->vbt.backlight.pwm_freq_hz, | 343 | dev_priv->vbt.backlight.pwm_freq_hz, |
341 | dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", | 344 | dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", |
342 | dev_priv->vbt.backlight.min_brightness, | 345 | dev_priv->vbt.backlight.min_brightness, |
343 | backlight_data->level[panel_type]); | 346 | backlight_data->level[panel_type], |
347 | dev_priv->vbt.backlight.controller); | ||
344 | } | 348 | } |
345 | 349 | ||
346 | /* Try to find sdvo panel data */ | 350 | /* Try to find sdvo panel data */ |
@@ -1159,6 +1163,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
1159 | info->supports_dvi = is_dvi; | 1163 | info->supports_dvi = is_dvi; |
1160 | info->supports_hdmi = is_hdmi; | 1164 | info->supports_hdmi = is_hdmi; |
1161 | info->supports_dp = is_dp; | 1165 | info->supports_dp = is_dp; |
1166 | info->supports_edp = is_edp; | ||
1162 | 1167 | ||
1163 | DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n", | 1168 | DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n", |
1164 | port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt); | 1169 | port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt); |
@@ -1411,13 +1416,16 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) | |||
1411 | return false; | 1416 | return false; |
1412 | } | 1417 | } |
1413 | 1418 | ||
1414 | if (vbt->bdb_offset + sizeof(struct bdb_header) > size) { | 1419 | if (range_overflows_t(size_t, |
1420 | vbt->bdb_offset, | ||
1421 | sizeof(struct bdb_header), | ||
1422 | size)) { | ||
1415 | DRM_DEBUG_DRIVER("BDB header incomplete\n"); | 1423 | DRM_DEBUG_DRIVER("BDB header incomplete\n"); |
1416 | return false; | 1424 | return false; |
1417 | } | 1425 | } |
1418 | 1426 | ||
1419 | bdb = get_bdb_header(vbt); | 1427 | bdb = get_bdb_header(vbt); |
1420 | if (vbt->bdb_offset + bdb->bdb_size > size) { | 1428 | if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) { |
1421 | DRM_DEBUG_DRIVER("BDB incomplete\n"); | 1429 | DRM_DEBUG_DRIVER("BDB incomplete\n"); |
1422 | return false; | 1430 | return false; |
1423 | } | 1431 | } |
@@ -1662,6 +1670,9 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) | |||
1662 | }; | 1670 | }; |
1663 | int i; | 1671 | int i; |
1664 | 1672 | ||
1673 | if (HAS_DDI(dev_priv)) | ||
1674 | return dev_priv->vbt.ddi_port_info[port].supports_edp; | ||
1675 | |||
1665 | if (!dev_priv->vbt.child_dev_num) | 1676 | if (!dev_priv->vbt.child_dev_num) |
1666 | return false; | 1677 | return false; |
1667 | 1678 | ||
@@ -1779,7 +1790,7 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, | |||
1779 | { | 1790 | { |
1780 | int i; | 1791 | int i; |
1781 | 1792 | ||
1782 | if (WARN_ON_ONCE(!IS_BROXTON(dev_priv))) | 1793 | if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv))) |
1783 | return false; | 1794 | return false; |
1784 | 1795 | ||
1785 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | 1796 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { |
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index c9c46a538edb..fcfa423d08bd 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
@@ -154,7 +154,7 @@ static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) | |||
154 | 154 | ||
155 | static inline struct intel_wait *to_wait(struct rb_node *node) | 155 | static inline struct intel_wait *to_wait(struct rb_node *node) |
156 | { | 156 | { |
157 | return container_of(node, struct intel_wait, node); | 157 | return rb_entry(node, struct intel_wait, node); |
158 | } | 158 | } |
159 | 159 | ||
160 | static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, | 160 | static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, |
@@ -427,7 +427,7 @@ static bool signal_complete(struct drm_i915_gem_request *request) | |||
427 | 427 | ||
428 | static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) | 428 | static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) |
429 | { | 429 | { |
430 | return container_of(rb, struct drm_i915_gem_request, signaling.node); | 430 | return rb_entry(rb, struct drm_i915_gem_request, signaling.node); |
431 | } | 431 | } |
432 | 432 | ||
433 | static void signaler_set_rtpriority(void) | 433 | static void signaler_set_rtpriority(void) |
@@ -623,6 +623,12 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) | |||
623 | { | 623 | { |
624 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | 624 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
625 | 625 | ||
626 | /* The engines should be idle and all requests accounted for! */ | ||
627 | WARN_ON(READ_ONCE(b->first_wait)); | ||
628 | WARN_ON(!RB_EMPTY_ROOT(&b->waiters)); | ||
629 | WARN_ON(READ_ONCE(b->first_signal)); | ||
630 | WARN_ON(!RB_EMPTY_ROOT(&b->signals)); | ||
631 | |||
626 | if (!IS_ERR_OR_NULL(b->signaler)) | 632 | if (!IS_ERR_OR_NULL(b->signaler)) |
627 | kthread_stop(b->signaler); | 633 | kthread_stop(b->signaler); |
628 | 634 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 86ecec5601d4..385e29af8baa 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -836,12 +836,11 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
836 | .destroy = intel_encoder_destroy, | 836 | .destroy = intel_encoder_destroy, |
837 | }; | 837 | }; |
838 | 838 | ||
839 | void intel_crt_init(struct drm_device *dev) | 839 | void intel_crt_init(struct drm_i915_private *dev_priv) |
840 | { | 840 | { |
841 | struct drm_connector *connector; | 841 | struct drm_connector *connector; |
842 | struct intel_crt *crt; | 842 | struct intel_crt *crt; |
843 | struct intel_connector *intel_connector; | 843 | struct intel_connector *intel_connector; |
844 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
845 | i915_reg_t adpa_reg; | 844 | i915_reg_t adpa_reg; |
846 | u32 adpa; | 845 | u32 adpa; |
847 | 846 | ||
@@ -881,10 +880,10 @@ void intel_crt_init(struct drm_device *dev) | |||
881 | 880 | ||
882 | connector = &intel_connector->base; | 881 | connector = &intel_connector->base; |
883 | crt->connector = intel_connector; | 882 | crt->connector = intel_connector; |
884 | drm_connector_init(dev, &intel_connector->base, | 883 | drm_connector_init(&dev_priv->drm, &intel_connector->base, |
885 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 884 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
886 | 885 | ||
887 | drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, | 886 | drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs, |
888 | DRM_MODE_ENCODER_DAC, "CRT"); | 887 | DRM_MODE_ENCODER_DAC, "CRT"); |
889 | 888 | ||
890 | intel_connector_attach_encoder(intel_connector, &crt->base); | 889 | intel_connector_attach_encoder(intel_connector, &crt->base); |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index d7a04bca8c28..9cbb8d8363b4 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
@@ -389,7 +389,7 @@ static void csr_load_work_fn(struct work_struct *work) | |||
389 | { | 389 | { |
390 | struct drm_i915_private *dev_priv; | 390 | struct drm_i915_private *dev_priv; |
391 | struct intel_csr *csr; | 391 | struct intel_csr *csr; |
392 | const struct firmware *fw; | 392 | const struct firmware *fw = NULL; |
393 | int ret; | 393 | int ret; |
394 | 394 | ||
395 | dev_priv = container_of(work, typeof(*dev_priv), csr.work); | 395 | dev_priv = container_of(work, typeof(*dev_priv), csr.work); |
@@ -405,7 +405,7 @@ static void csr_load_work_fn(struct work_struct *work) | |||
405 | 405 | ||
406 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | 406 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); |
407 | 407 | ||
408 | DRM_INFO("Finished loading %s (v%u.%u)\n", | 408 | DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n", |
409 | dev_priv->csr.fw_path, | 409 | dev_priv->csr.fw_path, |
410 | CSR_VERSION_MAJOR(csr->version), | 410 | CSR_VERSION_MAJOR(csr->version), |
411 | CSR_VERSION_MINOR(csr->version)); | 411 | CSR_VERSION_MINOR(csr->version)); |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 10ec9d4b7d45..66b367d0771a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -442,7 +442,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por | |||
442 | 442 | ||
443 | hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; | 443 | hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; |
444 | 444 | ||
445 | if (IS_BROXTON(dev_priv)) | 445 | if (IS_GEN9_LP(dev_priv)) |
446 | return hdmi_level; | 446 | return hdmi_level; |
447 | 447 | ||
448 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | 448 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
@@ -484,7 +484,7 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder) | |||
484 | const struct ddi_buf_trans *ddi_translations_edp; | 484 | const struct ddi_buf_trans *ddi_translations_edp; |
485 | const struct ddi_buf_trans *ddi_translations; | 485 | const struct ddi_buf_trans *ddi_translations; |
486 | 486 | ||
487 | if (IS_BROXTON(dev_priv)) | 487 | if (IS_GEN9_LP(dev_priv)) |
488 | return; | 488 | return; |
489 | 489 | ||
490 | if (IS_KABYLAKE(dev_priv)) { | 490 | if (IS_KABYLAKE(dev_priv)) { |
@@ -567,7 +567,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder) | |||
567 | enum port port = intel_ddi_get_encoder_port(encoder); | 567 | enum port port = intel_ddi_get_encoder_port(encoder); |
568 | const struct ddi_buf_trans *ddi_translations_hdmi; | 568 | const struct ddi_buf_trans *ddi_translations_hdmi; |
569 | 569 | ||
570 | if (IS_BROXTON(dev_priv)) | 570 | if (IS_GEN9_LP(dev_priv)) |
571 | return; | 571 | return; |
572 | 572 | ||
573 | hdmi_level = intel_ddi_hdmi_level(dev_priv, port); | 573 | hdmi_level = intel_ddi_hdmi_level(dev_priv, port); |
@@ -1057,7 +1057,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, | |||
1057 | return 0; | 1057 | return 0; |
1058 | 1058 | ||
1059 | pll = &dev_priv->shared_dplls[dpll]; | 1059 | pll = &dev_priv->shared_dplls[dpll]; |
1060 | state = &pll->config.hw_state; | 1060 | state = &pll->state.hw_state; |
1061 | 1061 | ||
1062 | clock.m1 = 2; | 1062 | clock.m1 = 2; |
1063 | clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22; | 1063 | clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22; |
@@ -1091,7 +1091,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, | |||
1091 | hsw_ddi_clock_get(encoder, pipe_config); | 1091 | hsw_ddi_clock_get(encoder, pipe_config); |
1092 | else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 1092 | else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
1093 | skl_ddi_clock_get(encoder, pipe_config); | 1093 | skl_ddi_clock_get(encoder, pipe_config); |
1094 | else if (IS_BROXTON(dev_priv)) | 1094 | else if (IS_GEN9_LP(dev_priv)) |
1095 | bxt_ddi_clock_get(encoder, pipe_config); | 1095 | bxt_ddi_clock_get(encoder, pipe_config); |
1096 | } | 1096 | } |
1097 | 1097 | ||
@@ -1153,7 +1153,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
1153 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 1153 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
1154 | return skl_ddi_pll_select(intel_crtc, crtc_state, | 1154 | return skl_ddi_pll_select(intel_crtc, crtc_state, |
1155 | intel_encoder); | 1155 | intel_encoder); |
1156 | else if (IS_BROXTON(dev_priv)) | 1156 | else if (IS_GEN9_LP(dev_priv)) |
1157 | return bxt_ddi_pll_select(intel_crtc, crtc_state, | 1157 | return bxt_ddi_pll_select(intel_crtc, crtc_state, |
1158 | intel_encoder); | 1158 | intel_encoder); |
1159 | else | 1159 | else |
@@ -1429,7 +1429,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | |||
1429 | DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); | 1429 | DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); |
1430 | 1430 | ||
1431 | out: | 1431 | out: |
1432 | if (ret && IS_BROXTON(dev_priv)) { | 1432 | if (ret && IS_GEN9_LP(dev_priv)) { |
1433 | tmp = I915_READ(BXT_PHY_CTL(port)); | 1433 | tmp = I915_READ(BXT_PHY_CTL(port)); |
1434 | if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | | 1434 | if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | |
1435 | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) | 1435 | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) |
@@ -1643,7 +1643,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) | |||
1643 | 1643 | ||
1644 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 1644 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
1645 | skl_ddi_set_iboost(encoder, level); | 1645 | skl_ddi_set_iboost(encoder, level); |
1646 | else if (IS_BROXTON(dev_priv)) | 1646 | else if (IS_GEN9_LP(dev_priv)) |
1647 | bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); | 1647 | bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); |
1648 | 1648 | ||
1649 | return DDI_BUF_TRANS_SELECT(level); | 1649 | return DDI_BUF_TRANS_SELECT(level); |
@@ -1701,7 +1701,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
1701 | 1701 | ||
1702 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | 1702 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, |
1703 | bool has_hdmi_sink, | 1703 | bool has_hdmi_sink, |
1704 | struct drm_display_mode *adjusted_mode, | 1704 | const struct intel_crtc_state *crtc_state, |
1705 | const struct drm_connector_state *conn_state, | ||
1705 | struct intel_shared_dpll *pll) | 1706 | struct intel_shared_dpll *pll) |
1706 | { | 1707 | { |
1707 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1708 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
@@ -1715,13 +1716,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | |||
1715 | intel_prepare_hdmi_ddi_buffers(encoder); | 1716 | intel_prepare_hdmi_ddi_buffers(encoder); |
1716 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 1717 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
1717 | skl_ddi_set_iboost(encoder, level); | 1718 | skl_ddi_set_iboost(encoder, level); |
1718 | else if (IS_BROXTON(dev_priv)) | 1719 | else if (IS_GEN9_LP(dev_priv)) |
1719 | bxt_ddi_vswing_sequence(dev_priv, level, port, | 1720 | bxt_ddi_vswing_sequence(dev_priv, level, port, |
1720 | INTEL_OUTPUT_HDMI); | 1721 | INTEL_OUTPUT_HDMI); |
1721 | 1722 | ||
1722 | intel_hdmi->set_infoframes(drm_encoder, | 1723 | intel_hdmi->set_infoframes(drm_encoder, |
1723 | has_hdmi_sink, | 1724 | has_hdmi_sink, |
1724 | adjusted_mode); | 1725 | crtc_state, conn_state); |
1725 | } | 1726 | } |
1726 | 1727 | ||
1727 | static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder, | 1728 | static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder, |
@@ -1742,8 +1743,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder, | |||
1742 | } | 1743 | } |
1743 | if (type == INTEL_OUTPUT_HDMI) { | 1744 | if (type == INTEL_OUTPUT_HDMI) { |
1744 | intel_ddi_pre_enable_hdmi(intel_encoder, | 1745 | intel_ddi_pre_enable_hdmi(intel_encoder, |
1745 | crtc->config->has_hdmi_sink, | 1746 | pipe_config->has_hdmi_sink, |
1746 | &crtc->config->base.adjusted_mode, | 1747 | pipe_config, conn_state, |
1747 | crtc->config->shared_dpll); | 1748 | crtc->config->shared_dpll); |
1748 | } | 1749 | } |
1749 | } | 1750 | } |
@@ -1949,6 +1950,19 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) | |||
1949 | udelay(600); | 1950 | udelay(600); |
1950 | } | 1951 | } |
1951 | 1952 | ||
1953 | bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, | ||
1954 | struct intel_crtc *intel_crtc) | ||
1955 | { | ||
1956 | u32 temp; | ||
1957 | |||
1958 | if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { | ||
1959 | temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); | ||
1960 | if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) | ||
1961 | return true; | ||
1962 | } | ||
1963 | return false; | ||
1964 | } | ||
1965 | |||
1952 | void intel_ddi_get_config(struct intel_encoder *encoder, | 1966 | void intel_ddi_get_config(struct intel_encoder *encoder, |
1953 | struct intel_crtc_state *pipe_config) | 1967 | struct intel_crtc_state *pipe_config) |
1954 | { | 1968 | { |
@@ -2014,11 +2028,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
2014 | break; | 2028 | break; |
2015 | } | 2029 | } |
2016 | 2030 | ||
2017 | if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { | 2031 | pipe_config->has_audio = |
2018 | temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); | 2032 | intel_ddi_is_audio_enabled(dev_priv, intel_crtc); |
2019 | if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) | ||
2020 | pipe_config->has_audio = true; | ||
2021 | } | ||
2022 | 2033 | ||
2023 | if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp && | 2034 | if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp && |
2024 | pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { | 2035 | pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { |
@@ -2042,7 +2053,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
2042 | 2053 | ||
2043 | intel_ddi_clock_get(encoder, pipe_config); | 2054 | intel_ddi_clock_get(encoder, pipe_config); |
2044 | 2055 | ||
2045 | if (IS_BROXTON(dev_priv)) | 2056 | if (IS_GEN9_LP(dev_priv)) |
2046 | pipe_config->lane_lat_optim_mask = | 2057 | pipe_config->lane_lat_optim_mask = |
2047 | bxt_ddi_phy_get_lane_lat_optim_mask(encoder); | 2058 | bxt_ddi_phy_get_lane_lat_optim_mask(encoder); |
2048 | } | 2059 | } |
@@ -2066,7 +2077,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, | |||
2066 | else | 2077 | else |
2067 | ret = intel_dp_compute_config(encoder, pipe_config, conn_state); | 2078 | ret = intel_dp_compute_config(encoder, pipe_config, conn_state); |
2068 | 2079 | ||
2069 | if (IS_BROXTON(dev_priv) && ret) | 2080 | if (IS_GEN9_LP(dev_priv) && ret) |
2070 | pipe_config->lane_lat_optim_mask = | 2081 | pipe_config->lane_lat_optim_mask = |
2071 | bxt_ddi_phy_calc_lane_lat_optim_mask(encoder, | 2082 | bxt_ddi_phy_calc_lane_lat_optim_mask(encoder, |
2072 | pipe_config->lane_count); | 2083 | pipe_config->lane_count); |
@@ -2123,10 +2134,10 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock) | |||
2123 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2134 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2124 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 2135 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
2125 | struct intel_shared_dpll *pll = NULL; | 2136 | struct intel_shared_dpll *pll = NULL; |
2126 | struct intel_shared_dpll_config tmp_pll_config; | 2137 | struct intel_shared_dpll_state tmp_pll_state; |
2127 | enum intel_dpll_id dpll_id; | 2138 | enum intel_dpll_id dpll_id; |
2128 | 2139 | ||
2129 | if (IS_BROXTON(dev_priv)) { | 2140 | if (IS_GEN9_LP(dev_priv)) { |
2130 | dpll_id = (enum intel_dpll_id)dig_port->port; | 2141 | dpll_id = (enum intel_dpll_id)dig_port->port; |
2131 | /* | 2142 | /* |
2132 | * Select the required PLL. This works for platforms where | 2143 | * Select the required PLL. This works for platforms where |
@@ -2139,11 +2150,11 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock) | |||
2139 | pll->active_mask); | 2150 | pll->active_mask); |
2140 | return NULL; | 2151 | return NULL; |
2141 | } | 2152 | } |
2142 | tmp_pll_config = pll->config; | 2153 | tmp_pll_state = pll->state; |
2143 | if (!bxt_ddi_dp_set_dpll_hw_state(clock, | 2154 | if (!bxt_ddi_dp_set_dpll_hw_state(clock, |
2144 | &pll->config.hw_state)) { | 2155 | &pll->state.hw_state)) { |
2145 | DRM_ERROR("Could not setup DPLL\n"); | 2156 | DRM_ERROR("Could not setup DPLL\n"); |
2146 | pll->config = tmp_pll_config; | 2157 | pll->state = tmp_pll_state; |
2147 | return NULL; | 2158 | return NULL; |
2148 | } | 2159 | } |
2149 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | 2160 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
@@ -2154,9 +2165,8 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock) | |||
2154 | return pll; | 2165 | return pll; |
2155 | } | 2166 | } |
2156 | 2167 | ||
2157 | void intel_ddi_init(struct drm_device *dev, enum port port) | 2168 | void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) |
2158 | { | 2169 | { |
2159 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
2160 | struct intel_digital_port *intel_dig_port; | 2170 | struct intel_digital_port *intel_dig_port; |
2161 | struct intel_encoder *intel_encoder; | 2171 | struct intel_encoder *intel_encoder; |
2162 | struct drm_encoder *encoder; | 2172 | struct drm_encoder *encoder; |
@@ -2218,12 +2228,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
2218 | intel_encoder = &intel_dig_port->base; | 2228 | intel_encoder = &intel_dig_port->base; |
2219 | encoder = &intel_encoder->base; | 2229 | encoder = &intel_encoder->base; |
2220 | 2230 | ||
2221 | drm_encoder_init(dev, encoder, &intel_ddi_funcs, | 2231 | drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs, |
2222 | DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); | 2232 | DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); |
2223 | 2233 | ||
2224 | intel_encoder->compute_config = intel_ddi_compute_config; | 2234 | intel_encoder->compute_config = intel_ddi_compute_config; |
2225 | intel_encoder->enable = intel_enable_ddi; | 2235 | intel_encoder->enable = intel_enable_ddi; |
2226 | if (IS_BROXTON(dev_priv)) | 2236 | if (IS_GEN9_LP(dev_priv)) |
2227 | intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; | 2237 | intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; |
2228 | intel_encoder->pre_enable = intel_ddi_pre_enable; | 2238 | intel_encoder->pre_enable = intel_ddi_pre_enable; |
2229 | intel_encoder->disable = intel_disable_ddi; | 2239 | intel_encoder->disable = intel_disable_ddi; |
@@ -2244,7 +2254,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
2244 | * configuration so that we use the proper lane count for our | 2254 | * configuration so that we use the proper lane count for our |
2245 | * calculations. | 2255 | * calculations. |
2246 | */ | 2256 | */ |
2247 | if (IS_BROXTON(dev_priv) && port == PORT_A) { | 2257 | if (IS_GEN9_LP(dev_priv) && port == PORT_A) { |
2248 | if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) { | 2258 | if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) { |
2249 | DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n"); | 2259 | DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n"); |
2250 | intel_dig_port->saved_port_bits |= DDI_A_4_LANES; | 2260 | intel_dig_port->saved_port_bits |= DDI_A_4_LANES; |
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 185e3bbc9ec9..f642f6ded4ae 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c | |||
@@ -24,11 +24,51 @@ | |||
24 | 24 | ||
25 | #include "i915_drv.h" | 25 | #include "i915_drv.h" |
26 | 26 | ||
27 | #define PLATFORM_NAME(x) [INTEL_##x] = #x | ||
28 | static const char * const platform_names[] = { | ||
29 | PLATFORM_NAME(I830), | ||
30 | PLATFORM_NAME(I845G), | ||
31 | PLATFORM_NAME(I85X), | ||
32 | PLATFORM_NAME(I865G), | ||
33 | PLATFORM_NAME(I915G), | ||
34 | PLATFORM_NAME(I915GM), | ||
35 | PLATFORM_NAME(I945G), | ||
36 | PLATFORM_NAME(I945GM), | ||
37 | PLATFORM_NAME(G33), | ||
38 | PLATFORM_NAME(PINEVIEW), | ||
39 | PLATFORM_NAME(I965G), | ||
40 | PLATFORM_NAME(I965GM), | ||
41 | PLATFORM_NAME(G45), | ||
42 | PLATFORM_NAME(GM45), | ||
43 | PLATFORM_NAME(IRONLAKE), | ||
44 | PLATFORM_NAME(SANDYBRIDGE), | ||
45 | PLATFORM_NAME(IVYBRIDGE), | ||
46 | PLATFORM_NAME(VALLEYVIEW), | ||
47 | PLATFORM_NAME(HASWELL), | ||
48 | PLATFORM_NAME(BROADWELL), | ||
49 | PLATFORM_NAME(CHERRYVIEW), | ||
50 | PLATFORM_NAME(SKYLAKE), | ||
51 | PLATFORM_NAME(BROXTON), | ||
52 | PLATFORM_NAME(KABYLAKE), | ||
53 | PLATFORM_NAME(GEMINILAKE), | ||
54 | }; | ||
55 | #undef PLATFORM_NAME | ||
56 | |||
57 | const char *intel_platform_name(enum intel_platform platform) | ||
58 | { | ||
59 | if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) || | ||
60 | platform_names[platform] == NULL)) | ||
61 | return "<unknown>"; | ||
62 | |||
63 | return platform_names[platform]; | ||
64 | } | ||
65 | |||
27 | void intel_device_info_dump(struct drm_i915_private *dev_priv) | 66 | void intel_device_info_dump(struct drm_i915_private *dev_priv) |
28 | { | 67 | { |
29 | const struct intel_device_info *info = &dev_priv->info; | 68 | const struct intel_device_info *info = &dev_priv->info; |
30 | 69 | ||
31 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x", | 70 | DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x", |
71 | intel_platform_name(info->platform), | ||
32 | info->gen, | 72 | info->gen, |
33 | dev_priv->drm.pdev->device, | 73 | dev_priv->drm.pdev->device, |
34 | dev_priv->drm.pdev->revision); | 74 | dev_priv->drm.pdev->revision); |
@@ -270,6 +310,12 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) | |||
270 | struct intel_device_info *info = mkwrite_device_info(dev_priv); | 310 | struct intel_device_info *info = mkwrite_device_info(dev_priv); |
271 | enum pipe pipe; | 311 | enum pipe pipe; |
272 | 312 | ||
313 | if (INTEL_GEN(dev_priv) >= 9) { | ||
314 | info->num_scalers[PIPE_A] = 2; | ||
315 | info->num_scalers[PIPE_B] = 2; | ||
316 | info->num_scalers[PIPE_C] = 1; | ||
317 | } | ||
318 | |||
273 | /* | 319 | /* |
274 | * Skylake and Broxton currently don't expose the topmost plane as its | 320 | * Skylake and Broxton currently don't expose the topmost plane as its |
275 | * use is exclusive with the legacy cursor and we only want to expose | 321 | * use is exclusive with the legacy cursor and we only want to expose |
@@ -278,7 +324,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) | |||
278 | * we don't expose the topmost plane at all to prevent ABI breakage | 324 | * we don't expose the topmost plane at all to prevent ABI breakage |
279 | * down the line. | 325 | * down the line. |
280 | */ | 326 | */ |
281 | if (IS_BROXTON(dev_priv)) { | 327 | if (IS_GEMINILAKE(dev_priv)) |
328 | for_each_pipe(dev_priv, pipe) | ||
329 | info->num_sprites[pipe] = 3; | ||
330 | else if (IS_BROXTON(dev_priv)) { | ||
282 | info->num_sprites[PIPE_A] = 2; | 331 | info->num_sprites[PIPE_A] = 2; |
283 | info->num_sprites[PIPE_B] = 2; | 332 | info->num_sprites[PIPE_B] = 2; |
284 | info->num_sprites[PIPE_C] = 1; | 333 | info->num_sprites[PIPE_C] = 1; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 252aaabc7eef..e2150a64860c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -115,15 +115,15 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
115 | const struct intel_crtc_state *pipe_config); | 115 | const struct intel_crtc_state *pipe_config); |
116 | static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); | 116 | static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); |
117 | static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); | 117 | static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); |
118 | static void skl_init_scalers(struct drm_i915_private *dev_priv, | 118 | static void intel_crtc_init_scalers(struct intel_crtc *crtc, |
119 | struct intel_crtc *crtc, | 119 | struct intel_crtc_state *crtc_state); |
120 | struct intel_crtc_state *crtc_state); | ||
121 | static void skylake_pfit_enable(struct intel_crtc *crtc); | 120 | static void skylake_pfit_enable(struct intel_crtc *crtc); |
122 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); | 121 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); |
123 | static void ironlake_pfit_enable(struct intel_crtc *crtc); | 122 | static void ironlake_pfit_enable(struct intel_crtc *crtc); |
124 | static void intel_modeset_setup_hw_state(struct drm_device *dev); | 123 | static void intel_modeset_setup_hw_state(struct drm_device *dev); |
125 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); | 124 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); |
126 | static int ilk_max_pixel_rate(struct drm_atomic_state *state); | 125 | static int ilk_max_pixel_rate(struct drm_atomic_state *state); |
126 | static int glk_calc_cdclk(int max_pixclk); | ||
127 | static int bxt_calc_cdclk(int max_pixclk); | 127 | static int bxt_calc_cdclk(int max_pixclk); |
128 | 128 | ||
129 | struct intel_limit { | 129 | struct intel_limit { |
@@ -614,12 +614,12 @@ static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, | |||
614 | INTELPllInvalid("m1 out of range\n"); | 614 | INTELPllInvalid("m1 out of range\n"); |
615 | 615 | ||
616 | if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && | 616 | if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && |
617 | !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv)) | 617 | !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) |
618 | if (clock->m1 <= clock->m2) | 618 | if (clock->m1 <= clock->m2) |
619 | INTELPllInvalid("m1 <= m2\n"); | 619 | INTELPllInvalid("m1 <= m2\n"); |
620 | 620 | ||
621 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && | 621 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && |
622 | !IS_BROXTON(dev_priv)) { | 622 | !IS_GEN9_LP(dev_priv)) { |
623 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 623 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
624 | INTELPllInvalid("p out of range\n"); | 624 | INTELPllInvalid("p out of range\n"); |
625 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 625 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
@@ -1232,7 +1232,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv, | |||
1232 | { | 1232 | { |
1233 | bool cur_state; | 1233 | bool cur_state; |
1234 | 1234 | ||
1235 | if (IS_845G(dev_priv) || IS_I865G(dev_priv)) | 1235 | if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) |
1236 | cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; | 1236 | cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; |
1237 | else | 1237 | else |
1238 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; | 1238 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
@@ -1327,7 +1327,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1327 | } | 1327 | } |
1328 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 1328 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1329 | for_each_sprite(dev_priv, pipe, sprite) { | 1329 | for_each_sprite(dev_priv, pipe, sprite) { |
1330 | u32 val = I915_READ(SPCNTR(pipe, sprite)); | 1330 | u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); |
1331 | I915_STATE_WARN(val & SP_ENABLE, | 1331 | I915_STATE_WARN(val & SP_ENABLE, |
1332 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1332 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1333 | sprite_name(pipe, sprite), pipe_name(pipe)); | 1333 | sprite_name(pipe, sprite), pipe_name(pipe)); |
@@ -2149,7 +2149,7 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr | |||
2149 | { | 2149 | { |
2150 | if (INTEL_INFO(dev_priv)->gen >= 9) | 2150 | if (INTEL_INFO(dev_priv)->gen >= 9) |
2151 | return 256 * 1024; | 2151 | return 256 * 1024; |
2152 | else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || | 2152 | else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || |
2153 | IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 2153 | IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
2154 | return 128 * 1024; | 2154 | return 128 * 1024; |
2155 | else if (INTEL_INFO(dev_priv)->gen >= 4) | 2155 | else if (INTEL_INFO(dev_priv)->gen >= 4) |
@@ -2688,7 +2688,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, | |||
2688 | 2688 | ||
2689 | mutex_lock(&dev->struct_mutex); | 2689 | mutex_lock(&dev->struct_mutex); |
2690 | 2690 | ||
2691 | obj = i915_gem_object_create_stolen_for_preallocated(dev, | 2691 | obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, |
2692 | base_aligned, | 2692 | base_aligned, |
2693 | base_aligned, | 2693 | base_aligned, |
2694 | size_aligned); | 2694 | size_aligned); |
@@ -3377,7 +3377,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane, | |||
3377 | struct drm_i915_private *dev_priv = to_i915(dev); | 3377 | struct drm_i915_private *dev_priv = to_i915(dev); |
3378 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | 3378 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
3379 | struct drm_framebuffer *fb = plane_state->base.fb; | 3379 | struct drm_framebuffer *fb = plane_state->base.fb; |
3380 | int pipe = intel_crtc->pipe; | 3380 | enum plane_id plane_id = to_intel_plane(plane)->id; |
3381 | enum pipe pipe = to_intel_plane(plane)->pipe; | ||
3381 | u32 plane_ctl; | 3382 | u32 plane_ctl; |
3382 | unsigned int rotation = plane_state->base.rotation; | 3383 | unsigned int rotation = plane_state->base.rotation; |
3383 | u32 stride = skl_plane_stride(fb, 0, rotation); | 3384 | u32 stride = skl_plane_stride(fb, 0, rotation); |
@@ -3412,30 +3413,30 @@ static void skylake_update_primary_plane(struct drm_plane *plane, | |||
3412 | intel_crtc->adjusted_x = src_x; | 3413 | intel_crtc->adjusted_x = src_x; |
3413 | intel_crtc->adjusted_y = src_y; | 3414 | intel_crtc->adjusted_y = src_y; |
3414 | 3415 | ||
3415 | I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); | 3416 | I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl); |
3416 | I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x); | 3417 | I915_WRITE(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x); |
3417 | I915_WRITE(PLANE_STRIDE(pipe, 0), stride); | 3418 | I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride); |
3418 | I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w); | 3419 | I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); |
3419 | 3420 | ||
3420 | if (scaler_id >= 0) { | 3421 | if (scaler_id >= 0) { |
3421 | uint32_t ps_ctrl = 0; | 3422 | uint32_t ps_ctrl = 0; |
3422 | 3423 | ||
3423 | WARN_ON(!dst_w || !dst_h); | 3424 | WARN_ON(!dst_w || !dst_h); |
3424 | ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | | 3425 | ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) | |
3425 | crtc_state->scaler_state.scalers[scaler_id].mode; | 3426 | crtc_state->scaler_state.scalers[scaler_id].mode; |
3426 | I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); | 3427 | I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); |
3427 | I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); | 3428 | I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); |
3428 | I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); | 3429 | I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); |
3429 | I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); | 3430 | I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); |
3430 | I915_WRITE(PLANE_POS(pipe, 0), 0); | 3431 | I915_WRITE(PLANE_POS(pipe, plane_id), 0); |
3431 | } else { | 3432 | } else { |
3432 | I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); | 3433 | I915_WRITE(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x); |
3433 | } | 3434 | } |
3434 | 3435 | ||
3435 | I915_WRITE(PLANE_SURF(pipe, 0), | 3436 | I915_WRITE(PLANE_SURF(pipe, plane_id), |
3436 | intel_fb_gtt_offset(fb, rotation) + surf_addr); | 3437 | intel_fb_gtt_offset(fb, rotation) + surf_addr); |
3437 | 3438 | ||
3438 | POSTING_READ(PLANE_SURF(pipe, 0)); | 3439 | POSTING_READ(PLANE_SURF(pipe, plane_id)); |
3439 | } | 3440 | } |
3440 | 3441 | ||
3441 | static void skylake_disable_primary_plane(struct drm_plane *primary, | 3442 | static void skylake_disable_primary_plane(struct drm_plane *primary, |
@@ -3443,12 +3444,12 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, | |||
3443 | { | 3444 | { |
3444 | struct drm_device *dev = crtc->dev; | 3445 | struct drm_device *dev = crtc->dev; |
3445 | struct drm_i915_private *dev_priv = to_i915(dev); | 3446 | struct drm_i915_private *dev_priv = to_i915(dev); |
3446 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3447 | enum plane_id plane_id = to_intel_plane(primary)->id; |
3447 | int pipe = intel_crtc->pipe; | 3448 | enum pipe pipe = to_intel_plane(primary)->pipe; |
3448 | 3449 | ||
3449 | I915_WRITE(PLANE_CTL(pipe, 0), 0); | 3450 | I915_WRITE(PLANE_CTL(pipe, plane_id), 0); |
3450 | I915_WRITE(PLANE_SURF(pipe, 0), 0); | 3451 | I915_WRITE(PLANE_SURF(pipe, plane_id), 0); |
3451 | POSTING_READ(PLANE_SURF(pipe, 0)); | 3452 | POSTING_READ(PLANE_SURF(pipe, plane_id)); |
3452 | } | 3453 | } |
3453 | 3454 | ||
3454 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ | 3455 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
@@ -4226,9 +4227,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) | |||
4226 | udelay(100); | 4227 | udelay(100); |
4227 | } | 4228 | } |
4228 | 4229 | ||
4229 | bool intel_has_pending_fb_unpin(struct drm_device *dev) | 4230 | bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) |
4230 | { | 4231 | { |
4231 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4232 | struct intel_crtc *crtc; | 4232 | struct intel_crtc *crtc; |
4233 | 4233 | ||
4234 | /* Note that we don't need to be called with mode_config.lock here | 4234 | /* Note that we don't need to be called with mode_config.lock here |
@@ -4238,7 +4238,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) | |||
4238 | * cannot claim and pin a new fb without at least acquring the | 4238 | * cannot claim and pin a new fb without at least acquring the |
4239 | * struct_mutex and so serialising with us. | 4239 | * struct_mutex and so serialising with us. |
4240 | */ | 4240 | */ |
4241 | for_each_intel_crtc(dev, crtc) { | 4241 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
4242 | if (atomic_read(&crtc->unpin_work_count) == 0) | 4242 | if (atomic_read(&crtc->unpin_work_count) == 0) |
4243 | continue; | 4243 | continue; |
4244 | 4244 | ||
@@ -5019,11 +5019,9 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) | |||
5019 | * event which is after the vblank start event, so we need to have a | 5019 | * event which is after the vblank start event, so we need to have a |
5020 | * wait-for-vblank between disabling the plane and the pipe. | 5020 | * wait-for-vblank between disabling the plane and the pipe. |
5021 | */ | 5021 | */ |
5022 | if (HAS_GMCH_DISPLAY(dev_priv)) { | 5022 | if (HAS_GMCH_DISPLAY(dev_priv) && |
5023 | intel_set_memory_cxsr(dev_priv, false); | 5023 | intel_set_memory_cxsr(dev_priv, false)) |
5024 | dev_priv->wm.vlv.cxsr = false; | ||
5025 | intel_wait_for_vblank(dev_priv, pipe); | 5024 | intel_wait_for_vblank(dev_priv, pipe); |
5026 | } | ||
5027 | } | 5025 | } |
5028 | 5026 | ||
5029 | static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) | 5027 | static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) |
@@ -5098,11 +5096,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) | |||
5098 | * event which is after the vblank start event, so we need to have a | 5096 | * event which is after the vblank start event, so we need to have a |
5099 | * wait-for-vblank between disabling the plane and the pipe. | 5097 | * wait-for-vblank between disabling the plane and the pipe. |
5100 | */ | 5098 | */ |
5101 | if (old_crtc_state->base.active) { | 5099 | if (old_crtc_state->base.active && |
5102 | intel_set_memory_cxsr(dev_priv, false); | 5100 | intel_set_memory_cxsr(dev_priv, false)) |
5103 | dev_priv->wm.vlv.cxsr = false; | ||
5104 | intel_wait_for_vblank(dev_priv, crtc->pipe); | 5101 | intel_wait_for_vblank(dev_priv, crtc->pipe); |
5105 | } | ||
5106 | } | 5102 | } |
5107 | 5103 | ||
5108 | /* | 5104 | /* |
@@ -5112,10 +5108,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) | |||
5112 | * | 5108 | * |
5113 | * WaCxSRDisabledForSpriteScaling:ivb | 5109 | * WaCxSRDisabledForSpriteScaling:ivb |
5114 | */ | 5110 | */ |
5115 | if (pipe_config->disable_lp_wm) { | 5111 | if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) |
5116 | ilk_disable_lp_wm(dev); | ||
5117 | intel_wait_for_vblank(dev_priv, crtc->pipe); | 5112 | intel_wait_for_vblank(dev_priv, crtc->pipe); |
5118 | } | ||
5119 | 5113 | ||
5120 | /* | 5114 | /* |
5121 | * If we're doing a modeset, we're done. No need to do any pre-vblank | 5115 | * If we're doing a modeset, we're done. No need to do any pre-vblank |
@@ -5463,10 +5457,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5463 | intel_ddi_enable_transcoder_func(crtc); | 5457 | intel_ddi_enable_transcoder_func(crtc); |
5464 | 5458 | ||
5465 | if (dev_priv->display.initial_watermarks != NULL) | 5459 | if (dev_priv->display.initial_watermarks != NULL) |
5466 | dev_priv->display.initial_watermarks(old_intel_state, | 5460 | dev_priv->display.initial_watermarks(old_intel_state, pipe_config); |
5467 | pipe_config); | ||
5468 | else | ||
5469 | intel_update_watermarks(intel_crtc); | ||
5470 | 5461 | ||
5471 | /* XXX: Do the pipe assertions at the right place for BXT DSI. */ | 5462 | /* XXX: Do the pipe assertions at the right place for BXT DSI. */ |
5472 | if (!transcoder_is_dsi(cpu_transcoder)) | 5463 | if (!transcoder_is_dsi(cpu_transcoder)) |
@@ -5803,8 +5794,10 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) | |||
5803 | { | 5794 | { |
5804 | int max_cdclk_freq = dev_priv->max_cdclk_freq; | 5795 | int max_cdclk_freq = dev_priv->max_cdclk_freq; |
5805 | 5796 | ||
5806 | if (INTEL_INFO(dev_priv)->gen >= 9 || | 5797 | if (IS_GEMINILAKE(dev_priv)) |
5807 | IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 5798 | return 2 * max_cdclk_freq; |
5799 | else if (INTEL_INFO(dev_priv)->gen >= 9 || | ||
5800 | IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | ||
5808 | return max_cdclk_freq; | 5801 | return max_cdclk_freq; |
5809 | else if (IS_CHERRYVIEW(dev_priv)) | 5802 | else if (IS_CHERRYVIEW(dev_priv)) |
5810 | return max_cdclk_freq*95/100; | 5803 | return max_cdclk_freq*95/100; |
@@ -5840,6 +5833,8 @@ static void intel_update_max_cdclk(struct drm_i915_private *dev_priv) | |||
5840 | max_cdclk = 308571; | 5833 | max_cdclk = 308571; |
5841 | 5834 | ||
5842 | dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); | 5835 | dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); |
5836 | } else if (IS_GEMINILAKE(dev_priv)) { | ||
5837 | dev_priv->max_cdclk_freq = 316800; | ||
5843 | } else if (IS_BROXTON(dev_priv)) { | 5838 | } else if (IS_BROXTON(dev_priv)) { |
5844 | dev_priv->max_cdclk_freq = 624000; | 5839 | dev_priv->max_cdclk_freq = 624000; |
5845 | } else if (IS_BROADWELL(dev_priv)) { | 5840 | } else if (IS_BROADWELL(dev_priv)) { |
@@ -5927,6 +5922,26 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) | |||
5927 | return dev_priv->cdclk_pll.ref * ratio; | 5922 | return dev_priv->cdclk_pll.ref * ratio; |
5928 | } | 5923 | } |
5929 | 5924 | ||
5925 | static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) | ||
5926 | { | ||
5927 | int ratio; | ||
5928 | |||
5929 | if (cdclk == dev_priv->cdclk_pll.ref) | ||
5930 | return 0; | ||
5931 | |||
5932 | switch (cdclk) { | ||
5933 | default: | ||
5934 | MISSING_CASE(cdclk); | ||
5935 | case 79200: | ||
5936 | case 158400: | ||
5937 | case 316800: | ||
5938 | ratio = 33; | ||
5939 | break; | ||
5940 | } | ||
5941 | |||
5942 | return dev_priv->cdclk_pll.ref * ratio; | ||
5943 | } | ||
5944 | |||
5930 | static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) | 5945 | static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) |
5931 | { | 5946 | { |
5932 | I915_WRITE(BXT_DE_PLL_ENABLE, 0); | 5947 | I915_WRITE(BXT_DE_PLL_ENABLE, 0); |
@@ -5968,7 +5983,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) | |||
5968 | u32 val, divider; | 5983 | u32 val, divider; |
5969 | int vco, ret; | 5984 | int vco, ret; |
5970 | 5985 | ||
5971 | vco = bxt_de_pll_vco(dev_priv, cdclk); | 5986 | if (IS_GEMINILAKE(dev_priv)) |
5987 | vco = glk_de_pll_vco(dev_priv, cdclk); | ||
5988 | else | ||
5989 | vco = bxt_de_pll_vco(dev_priv, cdclk); | ||
5972 | 5990 | ||
5973 | DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); | 5991 | DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); |
5974 | 5992 | ||
@@ -5981,6 +5999,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) | |||
5981 | divider = BXT_CDCLK_CD2X_DIV_SEL_2; | 5999 | divider = BXT_CDCLK_CD2X_DIV_SEL_2; |
5982 | break; | 6000 | break; |
5983 | case 3: | 6001 | case 3: |
6002 | WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); | ||
5984 | divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; | 6003 | divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; |
5985 | break; | 6004 | break; |
5986 | case 2: | 6005 | case 2: |
@@ -6090,6 +6109,8 @@ sanitize: | |||
6090 | 6109 | ||
6091 | void bxt_init_cdclk(struct drm_i915_private *dev_priv) | 6110 | void bxt_init_cdclk(struct drm_i915_private *dev_priv) |
6092 | { | 6111 | { |
6112 | int cdclk; | ||
6113 | |||
6093 | bxt_sanitize_cdclk(dev_priv); | 6114 | bxt_sanitize_cdclk(dev_priv); |
6094 | 6115 | ||
6095 | if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) | 6116 | if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) |
@@ -6100,7 +6121,12 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv) | |||
6100 | * - The initial CDCLK needs to be read from VBT. | 6121 | * - The initial CDCLK needs to be read from VBT. |
6101 | * Need to make this change after VBT has changes for BXT. | 6122 | * Need to make this change after VBT has changes for BXT. |
6102 | */ | 6123 | */ |
6103 | bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0)); | 6124 | if (IS_GEMINILAKE(dev_priv)) |
6125 | cdclk = glk_calc_cdclk(0); | ||
6126 | else | ||
6127 | cdclk = bxt_calc_cdclk(0); | ||
6128 | |||
6129 | bxt_set_cdclk(dev_priv, cdclk); | ||
6104 | } | 6130 | } |
6105 | 6131 | ||
6106 | void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) | 6132 | void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) |
@@ -6515,6 +6541,16 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, | |||
6515 | return 200000; | 6541 | return 200000; |
6516 | } | 6542 | } |
6517 | 6543 | ||
6544 | static int glk_calc_cdclk(int max_pixclk) | ||
6545 | { | ||
6546 | if (max_pixclk > 2 * 158400) | ||
6547 | return 316800; | ||
6548 | else if (max_pixclk > 2 * 79200) | ||
6549 | return 158400; | ||
6550 | else | ||
6551 | return 79200; | ||
6552 | } | ||
6553 | |||
6518 | static int bxt_calc_cdclk(int max_pixclk) | 6554 | static int bxt_calc_cdclk(int max_pixclk) |
6519 | { | 6555 | { |
6520 | if (max_pixclk > 576000) | 6556 | if (max_pixclk > 576000) |
@@ -6577,15 +6613,27 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) | |||
6577 | 6613 | ||
6578 | static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) | 6614 | static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) |
6579 | { | 6615 | { |
6616 | struct drm_i915_private *dev_priv = to_i915(state->dev); | ||
6580 | int max_pixclk = ilk_max_pixel_rate(state); | 6617 | int max_pixclk = ilk_max_pixel_rate(state); |
6581 | struct intel_atomic_state *intel_state = | 6618 | struct intel_atomic_state *intel_state = |
6582 | to_intel_atomic_state(state); | 6619 | to_intel_atomic_state(state); |
6620 | int cdclk; | ||
6583 | 6621 | ||
6584 | intel_state->cdclk = intel_state->dev_cdclk = | 6622 | if (IS_GEMINILAKE(dev_priv)) |
6585 | bxt_calc_cdclk(max_pixclk); | 6623 | cdclk = glk_calc_cdclk(max_pixclk); |
6624 | else | ||
6625 | cdclk = bxt_calc_cdclk(max_pixclk); | ||
6586 | 6626 | ||
6587 | if (!intel_state->active_crtcs) | 6627 | intel_state->cdclk = intel_state->dev_cdclk = cdclk; |
6588 | intel_state->dev_cdclk = bxt_calc_cdclk(0); | 6628 | |
6629 | if (!intel_state->active_crtcs) { | ||
6630 | if (IS_GEMINILAKE(dev_priv)) | ||
6631 | cdclk = glk_calc_cdclk(0); | ||
6632 | else | ||
6633 | cdclk = bxt_calc_cdclk(0); | ||
6634 | |||
6635 | intel_state->dev_cdclk = cdclk; | ||
6636 | } | ||
6589 | 6637 | ||
6590 | return 0; | 6638 | return 0; |
6591 | } | 6639 | } |
@@ -7287,6 +7335,7 @@ static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv) | |||
7287 | div = 2; | 7335 | div = 2; |
7288 | break; | 7336 | break; |
7289 | case BXT_CDCLK_CD2X_DIV_SEL_1_5: | 7337 | case BXT_CDCLK_CD2X_DIV_SEL_1_5: |
7338 | WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); | ||
7290 | div = 3; | 7339 | div = 3; |
7291 | break; | 7340 | break; |
7292 | case BXT_CDCLK_CD2X_DIV_SEL_2: | 7341 | case BXT_CDCLK_CD2X_DIV_SEL_2: |
@@ -7506,7 +7555,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) | |||
7506 | vco_table = ctg_vco; | 7555 | vco_table = ctg_vco; |
7507 | else if (IS_G4X(dev_priv)) | 7556 | else if (IS_G4X(dev_priv)) |
7508 | vco_table = elk_vco; | 7557 | vco_table = elk_vco; |
7509 | else if (IS_CRESTLINE(dev_priv)) | 7558 | else if (IS_I965GM(dev_priv)) |
7510 | vco_table = cl_vco; | 7559 | vco_table = cl_vco; |
7511 | else if (IS_PINEVIEW(dev_priv)) | 7560 | else if (IS_PINEVIEW(dev_priv)) |
7512 | vco_table = pnv_vco; | 7561 | vco_table = pnv_vco; |
@@ -8118,7 +8167,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, | |||
8118 | else | 8167 | else |
8119 | dpll |= DPLLB_MODE_DAC_SERIAL; | 8168 | dpll |= DPLLB_MODE_DAC_SERIAL; |
8120 | 8169 | ||
8121 | if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) { | 8170 | if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || |
8171 | IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { | ||
8122 | dpll |= (crtc_state->pixel_multiplier - 1) | 8172 | dpll |= (crtc_state->pixel_multiplier - 1) |
8123 | << SDVO_MULTIPLIER_SHIFT_HIRES; | 8173 | << SDVO_MULTIPLIER_SHIFT_HIRES; |
8124 | } | 8174 | } |
@@ -8832,7 +8882,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | |||
8832 | >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; | 8882 | >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; |
8833 | pipe_config->dpll_hw_state.dpll_md = tmp; | 8883 | pipe_config->dpll_hw_state.dpll_md = tmp; |
8834 | } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || | 8884 | } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || |
8835 | IS_G33(dev_priv)) { | 8885 | IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { |
8836 | tmp = I915_READ(DPLL(crtc->pipe)); | 8886 | tmp = I915_READ(DPLL(crtc->pipe)); |
8837 | pipe_config->pixel_multiplier = | 8887 | pipe_config->pixel_multiplier = |
8838 | ((tmp & SDVO_MULTIPLIER_MASK) | 8888 | ((tmp & SDVO_MULTIPLIER_MASK) |
@@ -8885,9 +8935,8 @@ out: | |||
8885 | return ret; | 8935 | return ret; |
8886 | } | 8936 | } |
8887 | 8937 | ||
8888 | static void ironlake_init_pch_refclk(struct drm_device *dev) | 8938 | static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) |
8889 | { | 8939 | { |
8890 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
8891 | struct intel_encoder *encoder; | 8940 | struct intel_encoder *encoder; |
8892 | int i; | 8941 | int i; |
8893 | u32 val, final; | 8942 | u32 val, final; |
@@ -8899,7 +8948,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
8899 | bool using_ssc_source = false; | 8948 | bool using_ssc_source = false; |
8900 | 8949 | ||
8901 | /* We need to take the global config into account */ | 8950 | /* We need to take the global config into account */ |
8902 | for_each_intel_encoder(dev, encoder) { | 8951 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
8903 | switch (encoder->type) { | 8952 | switch (encoder->type) { |
8904 | case INTEL_OUTPUT_LVDS: | 8953 | case INTEL_OUTPUT_LVDS: |
8905 | has_panel = true; | 8954 | has_panel = true; |
@@ -9155,10 +9204,9 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) | |||
9155 | * - Sequence to enable CLKOUT_DP without spread | 9204 | * - Sequence to enable CLKOUT_DP without spread |
9156 | * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O | 9205 | * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O |
9157 | */ | 9206 | */ |
9158 | static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, | 9207 | static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, |
9159 | bool with_fdi) | 9208 | bool with_spread, bool with_fdi) |
9160 | { | 9209 | { |
9161 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
9162 | uint32_t reg, tmp; | 9210 | uint32_t reg, tmp; |
9163 | 9211 | ||
9164 | if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) | 9212 | if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) |
@@ -9196,9 +9244,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, | |||
9196 | } | 9244 | } |
9197 | 9245 | ||
9198 | /* Sequence to disable CLKOUT_DP */ | 9246 | /* Sequence to disable CLKOUT_DP */ |
9199 | static void lpt_disable_clkout_dp(struct drm_device *dev) | 9247 | static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) |
9200 | { | 9248 | { |
9201 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
9202 | uint32_t reg, tmp; | 9249 | uint32_t reg, tmp; |
9203 | 9250 | ||
9204 | mutex_lock(&dev_priv->sb_lock); | 9251 | mutex_lock(&dev_priv->sb_lock); |
@@ -9283,12 +9330,12 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) | |||
9283 | 9330 | ||
9284 | #undef BEND_IDX | 9331 | #undef BEND_IDX |
9285 | 9332 | ||
9286 | static void lpt_init_pch_refclk(struct drm_device *dev) | 9333 | static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) |
9287 | { | 9334 | { |
9288 | struct intel_encoder *encoder; | 9335 | struct intel_encoder *encoder; |
9289 | bool has_vga = false; | 9336 | bool has_vga = false; |
9290 | 9337 | ||
9291 | for_each_intel_encoder(dev, encoder) { | 9338 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
9292 | switch (encoder->type) { | 9339 | switch (encoder->type) { |
9293 | case INTEL_OUTPUT_ANALOG: | 9340 | case INTEL_OUTPUT_ANALOG: |
9294 | has_vga = true; | 9341 | has_vga = true; |
@@ -9299,24 +9346,22 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
9299 | } | 9346 | } |
9300 | 9347 | ||
9301 | if (has_vga) { | 9348 | if (has_vga) { |
9302 | lpt_bend_clkout_dp(to_i915(dev), 0); | 9349 | lpt_bend_clkout_dp(dev_priv, 0); |
9303 | lpt_enable_clkout_dp(dev, true, true); | 9350 | lpt_enable_clkout_dp(dev_priv, true, true); |
9304 | } else { | 9351 | } else { |
9305 | lpt_disable_clkout_dp(dev); | 9352 | lpt_disable_clkout_dp(dev_priv); |
9306 | } | 9353 | } |
9307 | } | 9354 | } |
9308 | 9355 | ||
9309 | /* | 9356 | /* |
9310 | * Initialize reference clocks when the driver loads | 9357 | * Initialize reference clocks when the driver loads |
9311 | */ | 9358 | */ |
9312 | void intel_init_pch_refclk(struct drm_device *dev) | 9359 | void intel_init_pch_refclk(struct drm_i915_private *dev_priv) |
9313 | { | 9360 | { |
9314 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
9315 | |||
9316 | if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) | 9361 | if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) |
9317 | ironlake_init_pch_refclk(dev); | 9362 | ironlake_init_pch_refclk(dev_priv); |
9318 | else if (HAS_PCH_LPT(dev_priv)) | 9363 | else if (HAS_PCH_LPT(dev_priv)) |
9319 | lpt_init_pch_refclk(dev); | 9364 | lpt_init_pch_refclk(dev_priv); |
9320 | } | 9365 | } |
9321 | 9366 | ||
9322 | static void ironlake_set_pipeconf(struct drm_crtc *crtc) | 9367 | static void ironlake_set_pipeconf(struct drm_crtc *crtc) |
@@ -10165,7 +10210,6 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | |||
10165 | */ | 10210 | */ |
10166 | void hsw_enable_pc8(struct drm_i915_private *dev_priv) | 10211 | void hsw_enable_pc8(struct drm_i915_private *dev_priv) |
10167 | { | 10212 | { |
10168 | struct drm_device *dev = &dev_priv->drm; | ||
10169 | uint32_t val; | 10213 | uint32_t val; |
10170 | 10214 | ||
10171 | DRM_DEBUG_KMS("Enabling package C8+\n"); | 10215 | DRM_DEBUG_KMS("Enabling package C8+\n"); |
@@ -10176,19 +10220,18 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) | |||
10176 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); | 10220 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); |
10177 | } | 10221 | } |
10178 | 10222 | ||
10179 | lpt_disable_clkout_dp(dev); | 10223 | lpt_disable_clkout_dp(dev_priv); |
10180 | hsw_disable_lcpll(dev_priv, true, true); | 10224 | hsw_disable_lcpll(dev_priv, true, true); |
10181 | } | 10225 | } |
10182 | 10226 | ||
10183 | void hsw_disable_pc8(struct drm_i915_private *dev_priv) | 10227 | void hsw_disable_pc8(struct drm_i915_private *dev_priv) |
10184 | { | 10228 | { |
10185 | struct drm_device *dev = &dev_priv->drm; | ||
10186 | uint32_t val; | 10229 | uint32_t val; |
10187 | 10230 | ||
10188 | DRM_DEBUG_KMS("Disabling package C8+\n"); | 10231 | DRM_DEBUG_KMS("Disabling package C8+\n"); |
10189 | 10232 | ||
10190 | hsw_restore_lcpll(dev_priv); | 10233 | hsw_restore_lcpll(dev_priv); |
10191 | lpt_init_pch_refclk(dev); | 10234 | lpt_init_pch_refclk(dev_priv); |
10192 | 10235 | ||
10193 | if (HAS_PCH_LPT_LP(dev_priv)) { | 10236 | if (HAS_PCH_LPT_LP(dev_priv)) { |
10194 | val = I915_READ(SOUTH_DSPCLK_GATE_D); | 10237 | val = I915_READ(SOUTH_DSPCLK_GATE_D); |
@@ -10638,7 +10681,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, | |||
10638 | 10681 | ||
10639 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 10682 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
10640 | skylake_get_ddi_pll(dev_priv, port, pipe_config); | 10683 | skylake_get_ddi_pll(dev_priv, port, pipe_config); |
10641 | else if (IS_BROXTON(dev_priv)) | 10684 | else if (IS_GEN9_LP(dev_priv)) |
10642 | bxt_get_ddi_pll(dev_priv, port, pipe_config); | 10685 | bxt_get_ddi_pll(dev_priv, port, pipe_config); |
10643 | else | 10686 | else |
10644 | haswell_get_ddi_pll(dev_priv, port, pipe_config); | 10687 | haswell_get_ddi_pll(dev_priv, port, pipe_config); |
@@ -10683,7 +10726,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
10683 | 10726 | ||
10684 | active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); | 10727 | active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); |
10685 | 10728 | ||
10686 | if (IS_BROXTON(dev_priv) && | 10729 | if (IS_GEN9_LP(dev_priv) && |
10687 | bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { | 10730 | bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { |
10688 | WARN_ON(active); | 10731 | WARN_ON(active); |
10689 | active = true; | 10732 | active = true; |
@@ -10703,7 +10746,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
10703 | I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; | 10746 | I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; |
10704 | 10747 | ||
10705 | if (INTEL_GEN(dev_priv) >= 9) { | 10748 | if (INTEL_GEN(dev_priv) >= 9) { |
10706 | skl_init_scalers(dev_priv, crtc, pipe_config); | 10749 | intel_crtc_init_scalers(crtc, pipe_config); |
10707 | 10750 | ||
10708 | pipe_config->scaler_state.scaler_id = -1; | 10751 | pipe_config->scaler_state.scaler_id = -1; |
10709 | pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); | 10752 | pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); |
@@ -10884,7 +10927,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
10884 | 10927 | ||
10885 | I915_WRITE(CURPOS(pipe), pos); | 10928 | I915_WRITE(CURPOS(pipe), pos); |
10886 | 10929 | ||
10887 | if (IS_845G(dev_priv) || IS_I865G(dev_priv)) | 10930 | if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) |
10888 | i845_update_cursor(crtc, base, plane_state); | 10931 | i845_update_cursor(crtc, base, plane_state); |
10889 | else | 10932 | else |
10890 | i9xx_update_cursor(crtc, base, plane_state); | 10933 | i9xx_update_cursor(crtc, base, plane_state); |
@@ -10902,11 +10945,11 @@ static bool cursor_size_ok(struct drm_i915_private *dev_priv, | |||
10902 | * the precision of the register. Everything else requires | 10945 | * the precision of the register. Everything else requires |
10903 | * square cursors, limited to a few power-of-two sizes. | 10946 | * square cursors, limited to a few power-of-two sizes. |
10904 | */ | 10947 | */ |
10905 | if (IS_845G(dev_priv) || IS_I865G(dev_priv)) { | 10948 | if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { |
10906 | if ((width & 63) != 0) | 10949 | if ((width & 63) != 0) |
10907 | return false; | 10950 | return false; |
10908 | 10951 | ||
10909 | if (width > (IS_845G(dev_priv) ? 64 : 512)) | 10952 | if (width > (IS_I845G(dev_priv) ? 64 : 512)) |
10910 | return false; | 10953 | return false; |
10911 | 10954 | ||
10912 | if (height > 1023) | 10955 | if (height > 1023) |
@@ -10996,7 +11039,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, | |||
10996 | struct drm_i915_gem_object *obj; | 11039 | struct drm_i915_gem_object *obj; |
10997 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | 11040 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
10998 | 11041 | ||
10999 | obj = i915_gem_object_create(dev, | 11042 | obj = i915_gem_object_create(to_i915(dev), |
11000 | intel_framebuffer_size_for_mode(mode, bpp)); | 11043 | intel_framebuffer_size_for_mode(mode, bpp)); |
11001 | if (IS_ERR(obj)) | 11044 | if (IS_ERR(obj)) |
11002 | return ERR_CAST(obj); | 11045 | return ERR_CAST(obj); |
@@ -12253,7 +12296,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
12253 | INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); | 12296 | INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); |
12254 | queue_work(system_unbound_wq, &work->mmio_work); | 12297 | queue_work(system_unbound_wq, &work->mmio_work); |
12255 | } else { | 12298 | } else { |
12256 | request = i915_gem_request_alloc(engine, engine->last_context); | 12299 | request = i915_gem_request_alloc(engine, |
12300 | dev_priv->kernel_context); | ||
12257 | if (IS_ERR(request)) { | 12301 | if (IS_ERR(request)) { |
12258 | ret = PTR_ERR(request); | 12302 | ret = PTR_ERR(request); |
12259 | goto cleanup_unpin; | 12303 | goto cleanup_unpin; |
@@ -12781,39 +12825,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
12781 | DRM_DEBUG_KMS("ips: %i, double wide: %i\n", | 12825 | DRM_DEBUG_KMS("ips: %i, double wide: %i\n", |
12782 | pipe_config->ips_enabled, pipe_config->double_wide); | 12826 | pipe_config->ips_enabled, pipe_config->double_wide); |
12783 | 12827 | ||
12784 | if (IS_BROXTON(dev_priv)) { | 12828 | intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); |
12785 | DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," | ||
12786 | "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " | ||
12787 | "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", | ||
12788 | pipe_config->dpll_hw_state.ebb0, | ||
12789 | pipe_config->dpll_hw_state.ebb4, | ||
12790 | pipe_config->dpll_hw_state.pll0, | ||
12791 | pipe_config->dpll_hw_state.pll1, | ||
12792 | pipe_config->dpll_hw_state.pll2, | ||
12793 | pipe_config->dpll_hw_state.pll3, | ||
12794 | pipe_config->dpll_hw_state.pll6, | ||
12795 | pipe_config->dpll_hw_state.pll8, | ||
12796 | pipe_config->dpll_hw_state.pll9, | ||
12797 | pipe_config->dpll_hw_state.pll10, | ||
12798 | pipe_config->dpll_hw_state.pcsdw12); | ||
12799 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | ||
12800 | DRM_DEBUG_KMS("dpll_hw_state: " | ||
12801 | "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", | ||
12802 | pipe_config->dpll_hw_state.ctrl1, | ||
12803 | pipe_config->dpll_hw_state.cfgcr1, | ||
12804 | pipe_config->dpll_hw_state.cfgcr2); | ||
12805 | } else if (HAS_DDI(dev_priv)) { | ||
12806 | DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", | ||
12807 | pipe_config->dpll_hw_state.wrpll, | ||
12808 | pipe_config->dpll_hw_state.spll); | ||
12809 | } else { | ||
12810 | DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " | ||
12811 | "fp0: 0x%x, fp1: 0x%x\n", | ||
12812 | pipe_config->dpll_hw_state.dpll, | ||
12813 | pipe_config->dpll_hw_state.dpll_md, | ||
12814 | pipe_config->dpll_hw_state.fp0, | ||
12815 | pipe_config->dpll_hw_state.fp1); | ||
12816 | } | ||
12817 | 12829 | ||
12818 | DRM_DEBUG_KMS("planes on this crtc\n"); | 12830 | DRM_DEBUG_KMS("planes on this crtc\n"); |
12819 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | 12831 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { |
@@ -13157,6 +13169,31 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, | |||
13157 | return false; | 13169 | return false; |
13158 | } | 13170 | } |
13159 | 13171 | ||
13172 | static void __printf(3, 4) | ||
13173 | pipe_config_err(bool adjust, const char *name, const char *format, ...) | ||
13174 | { | ||
13175 | char *level; | ||
13176 | unsigned int category; | ||
13177 | struct va_format vaf; | ||
13178 | va_list args; | ||
13179 | |||
13180 | if (adjust) { | ||
13181 | level = KERN_DEBUG; | ||
13182 | category = DRM_UT_KMS; | ||
13183 | } else { | ||
13184 | level = KERN_ERR; | ||
13185 | category = DRM_UT_NONE; | ||
13186 | } | ||
13187 | |||
13188 | va_start(args, format); | ||
13189 | vaf.fmt = format; | ||
13190 | vaf.va = &args; | ||
13191 | |||
13192 | drm_printk(level, category, "mismatch in %s %pV", name, &vaf); | ||
13193 | |||
13194 | va_end(args); | ||
13195 | } | ||
13196 | |||
13160 | static bool | 13197 | static bool |
13161 | intel_pipe_config_compare(struct drm_i915_private *dev_priv, | 13198 | intel_pipe_config_compare(struct drm_i915_private *dev_priv, |
13162 | struct intel_crtc_state *current_config, | 13199 | struct intel_crtc_state *current_config, |
@@ -13165,17 +13202,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13165 | { | 13202 | { |
13166 | bool ret = true; | 13203 | bool ret = true; |
13167 | 13204 | ||
13168 | #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ | ||
13169 | do { \ | ||
13170 | if (!adjust) \ | ||
13171 | DRM_ERROR(fmt, ##__VA_ARGS__); \ | ||
13172 | else \ | ||
13173 | DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ | ||
13174 | } while (0) | ||
13175 | |||
13176 | #define PIPE_CONF_CHECK_X(name) \ | 13205 | #define PIPE_CONF_CHECK_X(name) \ |
13177 | if (current_config->name != pipe_config->name) { \ | 13206 | if (current_config->name != pipe_config->name) { \ |
13178 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | 13207 | pipe_config_err(adjust, __stringify(name), \ |
13179 | "(expected 0x%08x, found 0x%08x)\n", \ | 13208 | "(expected 0x%08x, found 0x%08x)\n", \ |
13180 | current_config->name, \ | 13209 | current_config->name, \ |
13181 | pipe_config->name); \ | 13210 | pipe_config->name); \ |
@@ -13184,7 +13213,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13184 | 13213 | ||
13185 | #define PIPE_CONF_CHECK_I(name) \ | 13214 | #define PIPE_CONF_CHECK_I(name) \ |
13186 | if (current_config->name != pipe_config->name) { \ | 13215 | if (current_config->name != pipe_config->name) { \ |
13187 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | 13216 | pipe_config_err(adjust, __stringify(name), \ |
13188 | "(expected %i, found %i)\n", \ | 13217 | "(expected %i, found %i)\n", \ |
13189 | current_config->name, \ | 13218 | current_config->name, \ |
13190 | pipe_config->name); \ | 13219 | pipe_config->name); \ |
@@ -13193,7 +13222,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13193 | 13222 | ||
13194 | #define PIPE_CONF_CHECK_P(name) \ | 13223 | #define PIPE_CONF_CHECK_P(name) \ |
13195 | if (current_config->name != pipe_config->name) { \ | 13224 | if (current_config->name != pipe_config->name) { \ |
13196 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | 13225 | pipe_config_err(adjust, __stringify(name), \ |
13197 | "(expected %p, found %p)\n", \ | 13226 | "(expected %p, found %p)\n", \ |
13198 | current_config->name, \ | 13227 | current_config->name, \ |
13199 | pipe_config->name); \ | 13228 | pipe_config->name); \ |
@@ -13204,7 +13233,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13204 | if (!intel_compare_link_m_n(¤t_config->name, \ | 13233 | if (!intel_compare_link_m_n(¤t_config->name, \ |
13205 | &pipe_config->name,\ | 13234 | &pipe_config->name,\ |
13206 | adjust)) { \ | 13235 | adjust)) { \ |
13207 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | 13236 | pipe_config_err(adjust, __stringify(name), \ |
13208 | "(expected tu %i gmch %i/%i link %i/%i, " \ | 13237 | "(expected tu %i gmch %i/%i link %i/%i, " \ |
13209 | "found tu %i, gmch %i/%i link %i/%i)\n", \ | 13238 | "found tu %i, gmch %i/%i link %i/%i)\n", \ |
13210 | current_config->name.tu, \ | 13239 | current_config->name.tu, \ |
@@ -13230,7 +13259,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13230 | &pipe_config->name, adjust) && \ | 13259 | &pipe_config->name, adjust) && \ |
13231 | !intel_compare_link_m_n(¤t_config->alt_name, \ | 13260 | !intel_compare_link_m_n(¤t_config->alt_name, \ |
13232 | &pipe_config->name, adjust)) { \ | 13261 | &pipe_config->name, adjust)) { \ |
13233 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | 13262 | pipe_config_err(adjust, __stringify(name), \ |
13234 | "(expected tu %i gmch %i/%i link %i/%i, " \ | 13263 | "(expected tu %i gmch %i/%i link %i/%i, " \ |
13235 | "or tu %i gmch %i/%i link %i/%i, " \ | 13264 | "or tu %i gmch %i/%i link %i/%i, " \ |
13236 | "found tu %i, gmch %i/%i link %i/%i)\n", \ | 13265 | "found tu %i, gmch %i/%i link %i/%i)\n", \ |
@@ -13254,8 +13283,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13254 | 13283 | ||
13255 | #define PIPE_CONF_CHECK_FLAGS(name, mask) \ | 13284 | #define PIPE_CONF_CHECK_FLAGS(name, mask) \ |
13256 | if ((current_config->name ^ pipe_config->name) & (mask)) { \ | 13285 | if ((current_config->name ^ pipe_config->name) & (mask)) { \ |
13257 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ | 13286 | pipe_config_err(adjust, __stringify(name), \ |
13258 | "(expected %i, found %i)\n", \ | 13287 | "(%x) (expected %i, found %i)\n", \ |
13288 | (mask), \ | ||
13259 | current_config->name & (mask), \ | 13289 | current_config->name & (mask), \ |
13260 | pipe_config->name & (mask)); \ | 13290 | pipe_config->name & (mask)); \ |
13261 | ret = false; \ | 13291 | ret = false; \ |
@@ -13263,7 +13293,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13263 | 13293 | ||
13264 | #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ | 13294 | #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ |
13265 | if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ | 13295 | if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ |
13266 | INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ | 13296 | pipe_config_err(adjust, __stringify(name), \ |
13267 | "(expected %i, found %i)\n", \ | 13297 | "(expected %i, found %i)\n", \ |
13268 | current_config->name, \ | 13298 | current_config->name, \ |
13269 | pipe_config->name); \ | 13299 | pipe_config->name); \ |
@@ -13380,7 +13410,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
13380 | #undef PIPE_CONF_CHECK_FLAGS | 13410 | #undef PIPE_CONF_CHECK_FLAGS |
13381 | #undef PIPE_CONF_CHECK_CLOCK_FUZZY | 13411 | #undef PIPE_CONF_CHECK_CLOCK_FUZZY |
13382 | #undef PIPE_CONF_QUIRK | 13412 | #undef PIPE_CONF_QUIRK |
13383 | #undef INTEL_ERR_OR_DBG_KMS | ||
13384 | 13413 | ||
13385 | return ret; | 13414 | return ret; |
13386 | } | 13415 | } |
@@ -13681,9 +13710,9 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, | |||
13681 | } | 13710 | } |
13682 | 13711 | ||
13683 | if (!crtc) { | 13712 | if (!crtc) { |
13684 | I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask, | 13713 | I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, |
13685 | "more active pll users than references: %x vs %x\n", | 13714 | "more active pll users than references: %x vs %x\n", |
13686 | pll->active_mask, pll->config.crtc_mask); | 13715 | pll->active_mask, pll->state.crtc_mask); |
13687 | 13716 | ||
13688 | return; | 13717 | return; |
13689 | } | 13718 | } |
@@ -13699,11 +13728,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, | |||
13699 | "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", | 13728 | "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", |
13700 | pipe_name(drm_crtc_index(crtc)), pll->active_mask); | 13729 | pipe_name(drm_crtc_index(crtc)), pll->active_mask); |
13701 | 13730 | ||
13702 | I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask), | 13731 | I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), |
13703 | "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", | 13732 | "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", |
13704 | crtc_mask, pll->config.crtc_mask); | 13733 | crtc_mask, pll->state.crtc_mask); |
13705 | 13734 | ||
13706 | I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, | 13735 | I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, |
13707 | &dpll_hw_state, | 13736 | &dpll_hw_state, |
13708 | sizeof(dpll_hw_state)), | 13737 | sizeof(dpll_hw_state)), |
13709 | "pll hw state mismatch\n"); | 13738 | "pll hw state mismatch\n"); |
@@ -13729,7 +13758,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, | |||
13729 | I915_STATE_WARN(pll->active_mask & crtc_mask, | 13758 | I915_STATE_WARN(pll->active_mask & crtc_mask, |
13730 | "pll active mismatch (didn't expect pipe %c in active mask)\n", | 13759 | "pll active mismatch (didn't expect pipe %c in active mask)\n", |
13731 | pipe_name(drm_crtc_index(crtc))); | 13760 | pipe_name(drm_crtc_index(crtc))); |
13732 | I915_STATE_WARN(pll->config.crtc_mask & crtc_mask, | 13761 | I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, |
13733 | "pll enabled crtcs mismatch (found %x in enabled mask)\n", | 13762 | "pll enabled crtcs mismatch (found %x in enabled mask)\n", |
13734 | pipe_name(drm_crtc_index(crtc))); | 13763 | pipe_name(drm_crtc_index(crtc))); |
13735 | } | 13764 | } |
@@ -13812,7 +13841,6 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state) | |||
13812 | { | 13841 | { |
13813 | struct drm_device *dev = state->dev; | 13842 | struct drm_device *dev = state->dev; |
13814 | struct drm_i915_private *dev_priv = to_i915(dev); | 13843 | struct drm_i915_private *dev_priv = to_i915(dev); |
13815 | struct intel_shared_dpll_config *shared_dpll = NULL; | ||
13816 | struct drm_crtc *crtc; | 13844 | struct drm_crtc *crtc; |
13817 | struct drm_crtc_state *crtc_state; | 13845 | struct drm_crtc_state *crtc_state; |
13818 | int i; | 13846 | int i; |
@@ -13833,10 +13861,7 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state) | |||
13833 | if (!old_dpll) | 13861 | if (!old_dpll) |
13834 | continue; | 13862 | continue; |
13835 | 13863 | ||
13836 | if (!shared_dpll) | 13864 | intel_release_shared_dpll(old_dpll, intel_crtc, state); |
13837 | shared_dpll = intel_atomic_get_shared_dpll_state(state); | ||
13838 | |||
13839 | intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc); | ||
13840 | } | 13865 | } |
13841 | } | 13866 | } |
13842 | 13867 | ||
@@ -13905,14 +13930,34 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) | |||
13905 | return 0; | 13930 | return 0; |
13906 | } | 13931 | } |
13907 | 13932 | ||
13933 | static int intel_lock_all_pipes(struct drm_atomic_state *state) | ||
13934 | { | ||
13935 | struct drm_crtc *crtc; | ||
13936 | |||
13937 | /* Add all pipes to the state */ | ||
13938 | for_each_crtc(state->dev, crtc) { | ||
13939 | struct drm_crtc_state *crtc_state; | ||
13940 | |||
13941 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
13942 | if (IS_ERR(crtc_state)) | ||
13943 | return PTR_ERR(crtc_state); | ||
13944 | } | ||
13945 | |||
13946 | return 0; | ||
13947 | } | ||
13948 | |||
13908 | static int intel_modeset_all_pipes(struct drm_atomic_state *state) | 13949 | static int intel_modeset_all_pipes(struct drm_atomic_state *state) |
13909 | { | 13950 | { |
13910 | struct drm_crtc *crtc; | 13951 | struct drm_crtc *crtc; |
13911 | struct drm_crtc_state *crtc_state; | ||
13912 | int ret = 0; | ||
13913 | 13952 | ||
13914 | /* add all active pipes to the state */ | 13953 | /* |
13954 | * Add all pipes to the state, and force | ||
13955 | * a modeset on all the active ones. | ||
13956 | */ | ||
13915 | for_each_crtc(state->dev, crtc) { | 13957 | for_each_crtc(state->dev, crtc) { |
13958 | struct drm_crtc_state *crtc_state; | ||
13959 | int ret; | ||
13960 | |||
13916 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | 13961 | crtc_state = drm_atomic_get_crtc_state(state, crtc); |
13917 | if (IS_ERR(crtc_state)) | 13962 | if (IS_ERR(crtc_state)) |
13918 | return PTR_ERR(crtc_state); | 13963 | return PTR_ERR(crtc_state); |
@@ -13924,14 +13969,14 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) | |||
13924 | 13969 | ||
13925 | ret = drm_atomic_add_affected_connectors(state, crtc); | 13970 | ret = drm_atomic_add_affected_connectors(state, crtc); |
13926 | if (ret) | 13971 | if (ret) |
13927 | break; | 13972 | return ret; |
13928 | 13973 | ||
13929 | ret = drm_atomic_add_affected_planes(state, crtc); | 13974 | ret = drm_atomic_add_affected_planes(state, crtc); |
13930 | if (ret) | 13975 | if (ret) |
13931 | break; | 13976 | return ret; |
13932 | } | 13977 | } |
13933 | 13978 | ||
13934 | return ret; | 13979 | return 0; |
13935 | } | 13980 | } |
13936 | 13981 | ||
13937 | static int intel_modeset_checks(struct drm_atomic_state *state) | 13982 | static int intel_modeset_checks(struct drm_atomic_state *state) |
@@ -13977,12 +14022,24 @@ static int intel_modeset_checks(struct drm_atomic_state *state) | |||
13977 | if (ret < 0) | 14022 | if (ret < 0) |
13978 | return ret; | 14023 | return ret; |
13979 | 14024 | ||
14025 | /* | ||
14026 | * Writes to dev_priv->atomic_cdclk_freq must protected by | ||
14027 | * holding all the crtc locks, even if we don't end up | ||
14028 | * touching the hardware | ||
14029 | */ | ||
14030 | if (intel_state->cdclk != dev_priv->atomic_cdclk_freq) { | ||
14031 | ret = intel_lock_all_pipes(state); | ||
14032 | if (ret < 0) | ||
14033 | return ret; | ||
14034 | } | ||
14035 | |||
14036 | /* All pipes must be switched off while we change the cdclk. */ | ||
13980 | if (intel_state->dev_cdclk != dev_priv->cdclk_freq || | 14037 | if (intel_state->dev_cdclk != dev_priv->cdclk_freq || |
13981 | intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) | 14038 | intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) { |
13982 | ret = intel_modeset_all_pipes(state); | 14039 | ret = intel_modeset_all_pipes(state); |
13983 | 14040 | if (ret < 0) | |
13984 | if (ret < 0) | 14041 | return ret; |
13985 | return ret; | 14042 | } |
13986 | 14043 | ||
13987 | DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", | 14044 | DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", |
13988 | intel_state->cdclk, intel_state->dev_cdclk); | 14045 | intel_state->cdclk, intel_state->dev_cdclk); |
@@ -14570,7 +14627,7 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
14570 | 14627 | ||
14571 | drm_atomic_helper_swap_state(state, true); | 14628 | drm_atomic_helper_swap_state(state, true); |
14572 | dev_priv->wm.distrust_bios_wm = false; | 14629 | dev_priv->wm.distrust_bios_wm = false; |
14573 | intel_shared_dpll_commit(state); | 14630 | intel_shared_dpll_swap_state(state); |
14574 | intel_atomic_track_fbs(state); | 14631 | intel_atomic_track_fbs(state); |
14575 | 14632 | ||
14576 | if (intel_state->modeset) { | 14633 | if (intel_state->modeset) { |
@@ -15112,6 +15169,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
15112 | primary->plane = (enum plane) !pipe; | 15169 | primary->plane = (enum plane) !pipe; |
15113 | else | 15170 | else |
15114 | primary->plane = (enum plane) pipe; | 15171 | primary->plane = (enum plane) pipe; |
15172 | primary->id = PLANE_PRIMARY; | ||
15115 | primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); | 15173 | primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); |
15116 | primary->check_plane = intel_check_primary_plane; | 15174 | primary->check_plane = intel_check_primary_plane; |
15117 | 15175 | ||
@@ -15311,6 +15369,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
15311 | cursor->max_downscale = 1; | 15369 | cursor->max_downscale = 1; |
15312 | cursor->pipe = pipe; | 15370 | cursor->pipe = pipe; |
15313 | cursor->plane = pipe; | 15371 | cursor->plane = pipe; |
15372 | cursor->id = PLANE_CURSOR; | ||
15314 | cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); | 15373 | cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); |
15315 | cursor->check_plane = intel_check_cursor_plane; | 15374 | cursor->check_plane = intel_check_cursor_plane; |
15316 | cursor->update_plane = intel_update_cursor_plane; | 15375 | cursor->update_plane = intel_update_cursor_plane; |
@@ -15345,14 +15404,18 @@ fail: | |||
15345 | return ERR_PTR(ret); | 15404 | return ERR_PTR(ret); |
15346 | } | 15405 | } |
15347 | 15406 | ||
15348 | static void skl_init_scalers(struct drm_i915_private *dev_priv, | 15407 | static void intel_crtc_init_scalers(struct intel_crtc *crtc, |
15349 | struct intel_crtc *crtc, | 15408 | struct intel_crtc_state *crtc_state) |
15350 | struct intel_crtc_state *crtc_state) | ||
15351 | { | 15409 | { |
15352 | struct intel_crtc_scaler_state *scaler_state = | 15410 | struct intel_crtc_scaler_state *scaler_state = |
15353 | &crtc_state->scaler_state; | 15411 | &crtc_state->scaler_state; |
15412 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
15354 | int i; | 15413 | int i; |
15355 | 15414 | ||
15415 | crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe]; | ||
15416 | if (!crtc->num_scalers) | ||
15417 | return; | ||
15418 | |||
15356 | for (i = 0; i < crtc->num_scalers; i++) { | 15419 | for (i = 0; i < crtc->num_scalers; i++) { |
15357 | struct intel_scaler *scaler = &scaler_state->scalers[i]; | 15420 | struct intel_scaler *scaler = &scaler_state->scalers[i]; |
15358 | 15421 | ||
@@ -15384,21 +15447,12 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
15384 | intel_crtc->base.state = &crtc_state->base; | 15447 | intel_crtc->base.state = &crtc_state->base; |
15385 | crtc_state->base.crtc = &intel_crtc->base; | 15448 | crtc_state->base.crtc = &intel_crtc->base; |
15386 | 15449 | ||
15387 | /* initialize shared scalers */ | ||
15388 | if (INTEL_GEN(dev_priv) >= 9) { | ||
15389 | if (pipe == PIPE_C) | ||
15390 | intel_crtc->num_scalers = 1; | ||
15391 | else | ||
15392 | intel_crtc->num_scalers = SKL_NUM_SCALERS; | ||
15393 | |||
15394 | skl_init_scalers(dev_priv, intel_crtc, crtc_state); | ||
15395 | } | ||
15396 | |||
15397 | primary = intel_primary_plane_create(dev_priv, pipe); | 15450 | primary = intel_primary_plane_create(dev_priv, pipe); |
15398 | if (IS_ERR(primary)) { | 15451 | if (IS_ERR(primary)) { |
15399 | ret = PTR_ERR(primary); | 15452 | ret = PTR_ERR(primary); |
15400 | goto fail; | 15453 | goto fail; |
15401 | } | 15454 | } |
15455 | intel_crtc->plane_ids_mask |= BIT(primary->id); | ||
15402 | 15456 | ||
15403 | for_each_sprite(dev_priv, pipe, sprite) { | 15457 | for_each_sprite(dev_priv, pipe, sprite) { |
15404 | struct intel_plane *plane; | 15458 | struct intel_plane *plane; |
@@ -15408,6 +15462,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
15408 | ret = PTR_ERR(plane); | 15462 | ret = PTR_ERR(plane); |
15409 | goto fail; | 15463 | goto fail; |
15410 | } | 15464 | } |
15465 | intel_crtc->plane_ids_mask |= BIT(plane->id); | ||
15411 | } | 15466 | } |
15412 | 15467 | ||
15413 | cursor = intel_cursor_plane_create(dev_priv, pipe); | 15468 | cursor = intel_cursor_plane_create(dev_priv, pipe); |
@@ -15415,6 +15470,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
15415 | ret = PTR_ERR(cursor); | 15470 | ret = PTR_ERR(cursor); |
15416 | goto fail; | 15471 | goto fail; |
15417 | } | 15472 | } |
15473 | intel_crtc->plane_ids_mask |= BIT(cursor->id); | ||
15418 | 15474 | ||
15419 | ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, | 15475 | ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, |
15420 | &primary->base, &cursor->base, | 15476 | &primary->base, &cursor->base, |
@@ -15432,6 +15488,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
15432 | 15488 | ||
15433 | intel_crtc->wm.cxsr_allowed = true; | 15489 | intel_crtc->wm.cxsr_allowed = true; |
15434 | 15490 | ||
15491 | /* initialize shared scalers */ | ||
15492 | intel_crtc_init_scalers(intel_crtc, crtc_state); | ||
15493 | |||
15435 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || | 15494 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
15436 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); | 15495 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); |
15437 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; | 15496 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; |
@@ -15568,7 +15627,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) | |||
15568 | 15627 | ||
15569 | static void intel_pps_init(struct drm_i915_private *dev_priv) | 15628 | static void intel_pps_init(struct drm_i915_private *dev_priv) |
15570 | { | 15629 | { |
15571 | if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv)) | 15630 | if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) |
15572 | dev_priv->pps_mmio_base = PCH_PPS_BASE; | 15631 | dev_priv->pps_mmio_base = PCH_PPS_BASE; |
15573 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 15632 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
15574 | dev_priv->pps_mmio_base = VLV_PPS_BASE; | 15633 | dev_priv->pps_mmio_base = VLV_PPS_BASE; |
@@ -15578,9 +15637,8 @@ static void intel_pps_init(struct drm_i915_private *dev_priv) | |||
15578 | intel_pps_unlock_regs_wa(dev_priv); | 15637 | intel_pps_unlock_regs_wa(dev_priv); |
15579 | } | 15638 | } |
15580 | 15639 | ||
15581 | static void intel_setup_outputs(struct drm_device *dev) | 15640 | static void intel_setup_outputs(struct drm_i915_private *dev_priv) |
15582 | { | 15641 | { |
15583 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
15584 | struct intel_encoder *encoder; | 15642 | struct intel_encoder *encoder; |
15585 | bool dpd_is_edp = false; | 15643 | bool dpd_is_edp = false; |
15586 | 15644 | ||
@@ -15591,22 +15649,22 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
15591 | * prevent the registeration of both eDP and LVDS and the incorrect | 15649 | * prevent the registeration of both eDP and LVDS and the incorrect |
15592 | * sharing of the PPS. | 15650 | * sharing of the PPS. |
15593 | */ | 15651 | */ |
15594 | intel_lvds_init(dev); | 15652 | intel_lvds_init(dev_priv); |
15595 | 15653 | ||
15596 | if (intel_crt_present(dev_priv)) | 15654 | if (intel_crt_present(dev_priv)) |
15597 | intel_crt_init(dev); | 15655 | intel_crt_init(dev_priv); |
15598 | 15656 | ||
15599 | if (IS_BROXTON(dev_priv)) { | 15657 | if (IS_GEN9_LP(dev_priv)) { |
15600 | /* | 15658 | /* |
15601 | * FIXME: Broxton doesn't support port detection via the | 15659 | * FIXME: Broxton doesn't support port detection via the |
15602 | * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to | 15660 | * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to |
15603 | * detect the ports. | 15661 | * detect the ports. |
15604 | */ | 15662 | */ |
15605 | intel_ddi_init(dev, PORT_A); | 15663 | intel_ddi_init(dev_priv, PORT_A); |
15606 | intel_ddi_init(dev, PORT_B); | 15664 | intel_ddi_init(dev_priv, PORT_B); |
15607 | intel_ddi_init(dev, PORT_C); | 15665 | intel_ddi_init(dev_priv, PORT_C); |
15608 | 15666 | ||
15609 | intel_dsi_init(dev); | 15667 | intel_dsi_init(dev_priv); |
15610 | } else if (HAS_DDI(dev_priv)) { | 15668 | } else if (HAS_DDI(dev_priv)) { |
15611 | int found; | 15669 | int found; |
15612 | 15670 | ||
@@ -15618,18 +15676,18 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
15618 | found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; | 15676 | found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; |
15619 | /* WaIgnoreDDIAStrap: skl */ | 15677 | /* WaIgnoreDDIAStrap: skl */ |
15620 | if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 15678 | if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
15621 | intel_ddi_init(dev, PORT_A); | 15679 | intel_ddi_init(dev_priv, PORT_A); |
15622 | 15680 | ||
15623 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP | 15681 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP |
15624 | * register */ | 15682 | * register */ |
15625 | found = I915_READ(SFUSE_STRAP); | 15683 | found = I915_READ(SFUSE_STRAP); |
15626 | 15684 | ||
15627 | if (found & SFUSE_STRAP_DDIB_DETECTED) | 15685 | if (found & SFUSE_STRAP_DDIB_DETECTED) |
15628 | intel_ddi_init(dev, PORT_B); | 15686 | intel_ddi_init(dev_priv, PORT_B); |
15629 | if (found & SFUSE_STRAP_DDIC_DETECTED) | 15687 | if (found & SFUSE_STRAP_DDIC_DETECTED) |
15630 | intel_ddi_init(dev, PORT_C); | 15688 | intel_ddi_init(dev_priv, PORT_C); |
15631 | if (found & SFUSE_STRAP_DDID_DETECTED) | 15689 | if (found & SFUSE_STRAP_DDID_DETECTED) |
15632 | intel_ddi_init(dev, PORT_D); | 15690 | intel_ddi_init(dev_priv, PORT_D); |
15633 | /* | 15691 | /* |
15634 | * On SKL we don't have a way to detect DDI-E so we rely on VBT. | 15692 | * On SKL we don't have a way to detect DDI-E so we rely on VBT. |
15635 | */ | 15693 | */ |
@@ -15637,35 +15695,35 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
15637 | (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || | 15695 | (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || |
15638 | dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || | 15696 | dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || |
15639 | dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) | 15697 | dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) |
15640 | intel_ddi_init(dev, PORT_E); | 15698 | intel_ddi_init(dev_priv, PORT_E); |
15641 | 15699 | ||
15642 | } else if (HAS_PCH_SPLIT(dev_priv)) { | 15700 | } else if (HAS_PCH_SPLIT(dev_priv)) { |
15643 | int found; | 15701 | int found; |
15644 | dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); | 15702 | dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); |
15645 | 15703 | ||
15646 | if (has_edp_a(dev_priv)) | 15704 | if (has_edp_a(dev_priv)) |
15647 | intel_dp_init(dev, DP_A, PORT_A); | 15705 | intel_dp_init(dev_priv, DP_A, PORT_A); |
15648 | 15706 | ||
15649 | if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { | 15707 | if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { |
15650 | /* PCH SDVOB multiplex with HDMIB */ | 15708 | /* PCH SDVOB multiplex with HDMIB */ |
15651 | found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); | 15709 | found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); |
15652 | if (!found) | 15710 | if (!found) |
15653 | intel_hdmi_init(dev, PCH_HDMIB, PORT_B); | 15711 | intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); |
15654 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | 15712 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
15655 | intel_dp_init(dev, PCH_DP_B, PORT_B); | 15713 | intel_dp_init(dev_priv, PCH_DP_B, PORT_B); |
15656 | } | 15714 | } |
15657 | 15715 | ||
15658 | if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) | 15716 | if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) |
15659 | intel_hdmi_init(dev, PCH_HDMIC, PORT_C); | 15717 | intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); |
15660 | 15718 | ||
15661 | if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) | 15719 | if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) |
15662 | intel_hdmi_init(dev, PCH_HDMID, PORT_D); | 15720 | intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); |
15663 | 15721 | ||
15664 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | 15722 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
15665 | intel_dp_init(dev, PCH_DP_C, PORT_C); | 15723 | intel_dp_init(dev_priv, PCH_DP_C, PORT_C); |
15666 | 15724 | ||
15667 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 15725 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
15668 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 15726 | intel_dp_init(dev_priv, PCH_DP_D, PORT_D); |
15669 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 15727 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
15670 | bool has_edp, has_port; | 15728 | bool has_edp, has_port; |
15671 | 15729 | ||
@@ -15687,16 +15745,16 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
15687 | has_edp = intel_dp_is_edp(dev_priv, PORT_B); | 15745 | has_edp = intel_dp_is_edp(dev_priv, PORT_B); |
15688 | has_port = intel_bios_is_port_present(dev_priv, PORT_B); | 15746 | has_port = intel_bios_is_port_present(dev_priv, PORT_B); |
15689 | if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) | 15747 | if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) |
15690 | has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); | 15748 | has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); |
15691 | if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) | 15749 | if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) |
15692 | intel_hdmi_init(dev, VLV_HDMIB, PORT_B); | 15750 | intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); |
15693 | 15751 | ||
15694 | has_edp = intel_dp_is_edp(dev_priv, PORT_C); | 15752 | has_edp = intel_dp_is_edp(dev_priv, PORT_C); |
15695 | has_port = intel_bios_is_port_present(dev_priv, PORT_C); | 15753 | has_port = intel_bios_is_port_present(dev_priv, PORT_C); |
15696 | if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) | 15754 | if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) |
15697 | has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); | 15755 | has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); |
15698 | if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) | 15756 | if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) |
15699 | intel_hdmi_init(dev, VLV_HDMIC, PORT_C); | 15757 | intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); |
15700 | 15758 | ||
15701 | if (IS_CHERRYVIEW(dev_priv)) { | 15759 | if (IS_CHERRYVIEW(dev_priv)) { |
15702 | /* | 15760 | /* |
@@ -15705,63 +15763,63 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
15705 | */ | 15763 | */ |
15706 | has_port = intel_bios_is_port_present(dev_priv, PORT_D); | 15764 | has_port = intel_bios_is_port_present(dev_priv, PORT_D); |
15707 | if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) | 15765 | if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) |
15708 | intel_dp_init(dev, CHV_DP_D, PORT_D); | 15766 | intel_dp_init(dev_priv, CHV_DP_D, PORT_D); |
15709 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) | 15767 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) |
15710 | intel_hdmi_init(dev, CHV_HDMID, PORT_D); | 15768 | intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); |
15711 | } | 15769 | } |
15712 | 15770 | ||
15713 | intel_dsi_init(dev); | 15771 | intel_dsi_init(dev_priv); |
15714 | } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { | 15772 | } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { |
15715 | bool found = false; | 15773 | bool found = false; |
15716 | 15774 | ||
15717 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { | 15775 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { |
15718 | DRM_DEBUG_KMS("probing SDVOB\n"); | 15776 | DRM_DEBUG_KMS("probing SDVOB\n"); |
15719 | found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); | 15777 | found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); |
15720 | if (!found && IS_G4X(dev_priv)) { | 15778 | if (!found && IS_G4X(dev_priv)) { |
15721 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | 15779 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
15722 | intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); | 15780 | intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); |
15723 | } | 15781 | } |
15724 | 15782 | ||
15725 | if (!found && IS_G4X(dev_priv)) | 15783 | if (!found && IS_G4X(dev_priv)) |
15726 | intel_dp_init(dev, DP_B, PORT_B); | 15784 | intel_dp_init(dev_priv, DP_B, PORT_B); |
15727 | } | 15785 | } |
15728 | 15786 | ||
15729 | /* Before G4X SDVOC doesn't have its own detect register */ | 15787 | /* Before G4X SDVOC doesn't have its own detect register */ |
15730 | 15788 | ||
15731 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { | 15789 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { |
15732 | DRM_DEBUG_KMS("probing SDVOC\n"); | 15790 | DRM_DEBUG_KMS("probing SDVOC\n"); |
15733 | found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); | 15791 | found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); |
15734 | } | 15792 | } |
15735 | 15793 | ||
15736 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { | 15794 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { |
15737 | 15795 | ||
15738 | if (IS_G4X(dev_priv)) { | 15796 | if (IS_G4X(dev_priv)) { |
15739 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | 15797 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
15740 | intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); | 15798 | intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); |
15741 | } | 15799 | } |
15742 | if (IS_G4X(dev_priv)) | 15800 | if (IS_G4X(dev_priv)) |
15743 | intel_dp_init(dev, DP_C, PORT_C); | 15801 | intel_dp_init(dev_priv, DP_C, PORT_C); |
15744 | } | 15802 | } |
15745 | 15803 | ||
15746 | if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) | 15804 | if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) |
15747 | intel_dp_init(dev, DP_D, PORT_D); | 15805 | intel_dp_init(dev_priv, DP_D, PORT_D); |
15748 | } else if (IS_GEN2(dev_priv)) | 15806 | } else if (IS_GEN2(dev_priv)) |
15749 | intel_dvo_init(dev); | 15807 | intel_dvo_init(dev_priv); |
15750 | 15808 | ||
15751 | if (SUPPORTS_TV(dev_priv)) | 15809 | if (SUPPORTS_TV(dev_priv)) |
15752 | intel_tv_init(dev); | 15810 | intel_tv_init(dev_priv); |
15753 | 15811 | ||
15754 | intel_psr_init(dev); | 15812 | intel_psr_init(dev_priv); |
15755 | 15813 | ||
15756 | for_each_intel_encoder(dev, encoder) { | 15814 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
15757 | encoder->base.possible_crtcs = encoder->crtc_mask; | 15815 | encoder->base.possible_crtcs = encoder->crtc_mask; |
15758 | encoder->base.possible_clones = | 15816 | encoder->base.possible_clones = |
15759 | intel_encoder_clones(encoder); | 15817 | intel_encoder_clones(encoder); |
15760 | } | 15818 | } |
15761 | 15819 | ||
15762 | intel_init_pch_refclk(dev); | 15820 | intel_init_pch_refclk(dev_priv); |
15763 | 15821 | ||
15764 | drm_helper_move_panel_connectors_to_head(dev); | 15822 | drm_helper_move_panel_connectors_to_head(&dev_priv->drm); |
15765 | } | 15823 | } |
15766 | 15824 | ||
15767 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 15825 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -16036,6 +16094,17 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
16036 | return fb; | 16094 | return fb; |
16037 | } | 16095 | } |
16038 | 16096 | ||
16097 | static void intel_atomic_state_free(struct drm_atomic_state *state) | ||
16098 | { | ||
16099 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
16100 | |||
16101 | drm_atomic_state_default_release(state); | ||
16102 | |||
16103 | i915_sw_fence_fini(&intel_state->commit_ready); | ||
16104 | |||
16105 | kfree(state); | ||
16106 | } | ||
16107 | |||
16039 | static const struct drm_mode_config_funcs intel_mode_funcs = { | 16108 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
16040 | .fb_create = intel_user_framebuffer_create, | 16109 | .fb_create = intel_user_framebuffer_create, |
16041 | .output_poll_changed = intel_fbdev_output_poll_changed, | 16110 | .output_poll_changed = intel_fbdev_output_poll_changed, |
@@ -16043,6 +16112,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
16043 | .atomic_commit = intel_atomic_commit, | 16112 | .atomic_commit = intel_atomic_commit, |
16044 | .atomic_state_alloc = intel_atomic_state_alloc, | 16113 | .atomic_state_alloc = intel_atomic_state_alloc, |
16045 | .atomic_state_clear = intel_atomic_state_clear, | 16114 | .atomic_state_clear = intel_atomic_state_clear, |
16115 | .atomic_state_free = intel_atomic_state_free, | ||
16046 | }; | 16116 | }; |
16047 | 16117 | ||
16048 | /** | 16118 | /** |
@@ -16123,7 +16193,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) | |||
16123 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 16193 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
16124 | dev_priv->display.get_display_clock_speed = | 16194 | dev_priv->display.get_display_clock_speed = |
16125 | skylake_get_display_clock_speed; | 16195 | skylake_get_display_clock_speed; |
16126 | else if (IS_BROXTON(dev_priv)) | 16196 | else if (IS_GEN9_LP(dev_priv)) |
16127 | dev_priv->display.get_display_clock_speed = | 16197 | dev_priv->display.get_display_clock_speed = |
16128 | broxton_get_display_clock_speed; | 16198 | broxton_get_display_clock_speed; |
16129 | else if (IS_BROADWELL(dev_priv)) | 16199 | else if (IS_BROADWELL(dev_priv)) |
@@ -16138,14 +16208,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) | |||
16138 | else if (IS_GEN5(dev_priv)) | 16208 | else if (IS_GEN5(dev_priv)) |
16139 | dev_priv->display.get_display_clock_speed = | 16209 | dev_priv->display.get_display_clock_speed = |
16140 | ilk_get_display_clock_speed; | 16210 | ilk_get_display_clock_speed; |
16141 | else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) || | 16211 | else if (IS_I945G(dev_priv) || IS_I965G(dev_priv) || |
16142 | IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) | 16212 | IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) |
16143 | dev_priv->display.get_display_clock_speed = | 16213 | dev_priv->display.get_display_clock_speed = |
16144 | i945_get_display_clock_speed; | 16214 | i945_get_display_clock_speed; |
16145 | else if (IS_GM45(dev_priv)) | 16215 | else if (IS_GM45(dev_priv)) |
16146 | dev_priv->display.get_display_clock_speed = | 16216 | dev_priv->display.get_display_clock_speed = |
16147 | gm45_get_display_clock_speed; | 16217 | gm45_get_display_clock_speed; |
16148 | else if (IS_CRESTLINE(dev_priv)) | 16218 | else if (IS_I965GM(dev_priv)) |
16149 | dev_priv->display.get_display_clock_speed = | 16219 | dev_priv->display.get_display_clock_speed = |
16150 | i965gm_get_display_clock_speed; | 16220 | i965gm_get_display_clock_speed; |
16151 | else if (IS_PINEVIEW(dev_priv)) | 16221 | else if (IS_PINEVIEW(dev_priv)) |
@@ -16157,7 +16227,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) | |||
16157 | else if (IS_I915G(dev_priv)) | 16227 | else if (IS_I915G(dev_priv)) |
16158 | dev_priv->display.get_display_clock_speed = | 16228 | dev_priv->display.get_display_clock_speed = |
16159 | i915_get_display_clock_speed; | 16229 | i915_get_display_clock_speed; |
16160 | else if (IS_I945GM(dev_priv) || IS_845G(dev_priv)) | 16230 | else if (IS_I945GM(dev_priv) || IS_I845G(dev_priv)) |
16161 | dev_priv->display.get_display_clock_speed = | 16231 | dev_priv->display.get_display_clock_speed = |
16162 | i9xx_misc_get_display_clock_speed; | 16232 | i9xx_misc_get_display_clock_speed; |
16163 | else if (IS_I915GM(dev_priv)) | 16233 | else if (IS_I915GM(dev_priv)) |
@@ -16196,7 +16266,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) | |||
16196 | valleyview_modeset_commit_cdclk; | 16266 | valleyview_modeset_commit_cdclk; |
16197 | dev_priv->display.modeset_calc_cdclk = | 16267 | dev_priv->display.modeset_calc_cdclk = |
16198 | valleyview_modeset_calc_cdclk; | 16268 | valleyview_modeset_calc_cdclk; |
16199 | } else if (IS_BROXTON(dev_priv)) { | 16269 | } else if (IS_GEN9_LP(dev_priv)) { |
16200 | dev_priv->display.modeset_commit_cdclk = | 16270 | dev_priv->display.modeset_commit_cdclk = |
16201 | bxt_modeset_commit_cdclk; | 16271 | bxt_modeset_commit_cdclk; |
16202 | dev_priv->display.modeset_calc_cdclk = | 16272 | dev_priv->display.modeset_calc_cdclk = |
@@ -16579,8 +16649,8 @@ int intel_modeset_init(struct drm_device *dev) | |||
16579 | dev->mode_config.max_height = 8192; | 16649 | dev->mode_config.max_height = 8192; |
16580 | } | 16650 | } |
16581 | 16651 | ||
16582 | if (IS_845G(dev_priv) || IS_I865G(dev_priv)) { | 16652 | if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { |
16583 | dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512; | 16653 | dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; |
16584 | dev->mode_config.cursor_height = 1023; | 16654 | dev->mode_config.cursor_height = 1023; |
16585 | } else if (IS_GEN2(dev_priv)) { | 16655 | } else if (IS_GEN2(dev_priv)) { |
16586 | dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; | 16656 | dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; |
@@ -16617,7 +16687,7 @@ int intel_modeset_init(struct drm_device *dev) | |||
16617 | 16687 | ||
16618 | /* Just disable it once at startup */ | 16688 | /* Just disable it once at startup */ |
16619 | i915_disable_vga(dev_priv); | 16689 | i915_disable_vga(dev_priv); |
16620 | intel_setup_outputs(dev); | 16690 | intel_setup_outputs(dev_priv); |
16621 | 16691 | ||
16622 | drm_modeset_lock_all(dev); | 16692 | drm_modeset_lock_all(dev); |
16623 | intel_modeset_setup_hw_state(dev); | 16693 | intel_modeset_setup_hw_state(dev); |
@@ -16948,16 +17018,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
16948 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | 17018 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; |
16949 | 17019 | ||
16950 | pll->on = pll->funcs.get_hw_state(dev_priv, pll, | 17020 | pll->on = pll->funcs.get_hw_state(dev_priv, pll, |
16951 | &pll->config.hw_state); | 17021 | &pll->state.hw_state); |
16952 | pll->config.crtc_mask = 0; | 17022 | pll->state.crtc_mask = 0; |
16953 | for_each_intel_crtc(dev, crtc) { | 17023 | for_each_intel_crtc(dev, crtc) { |
16954 | if (crtc->active && crtc->config->shared_dpll == pll) | 17024 | if (crtc->active && crtc->config->shared_dpll == pll) |
16955 | pll->config.crtc_mask |= 1 << crtc->pipe; | 17025 | pll->state.crtc_mask |= 1 << crtc->pipe; |
16956 | } | 17026 | } |
16957 | pll->active_mask = pll->config.crtc_mask; | 17027 | pll->active_mask = pll->state.crtc_mask; |
16958 | 17028 | ||
16959 | DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", | 17029 | DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", |
16960 | pll->name, pll->config.crtc_mask, pll->on); | 17030 | pll->name, pll->state.crtc_mask, pll->on); |
16961 | } | 17031 | } |
16962 | 17032 | ||
16963 | for_each_intel_encoder(dev, encoder) { | 17033 | for_each_intel_encoder(dev, encoder) { |
@@ -17024,17 +17094,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
17024 | * the atomic core happy. It wants a valid mode if the | 17094 | * the atomic core happy. It wants a valid mode if the |
17025 | * crtc's enabled, so we do the above call. | 17095 | * crtc's enabled, so we do the above call. |
17026 | * | 17096 | * |
17027 | * At this point some state updated by the connectors | 17097 | * But we don't set all the derived state fully, hence |
17028 | * in their ->detect() callback has not run yet, so | 17098 | * set a flag to indicate that a full recalculation is |
17029 | * no recalculation can be done yet. | 17099 | * needed on the next commit. |
17030 | * | ||
17031 | * Even if we could do a recalculation and modeset | ||
17032 | * right now it would cause a double modeset if | ||
17033 | * fbdev or userspace chooses a different initial mode. | ||
17034 | * | ||
17035 | * If that happens, someone indicated they wanted a | ||
17036 | * mode change, which means it's safe to do a full | ||
17037 | * recalculation. | ||
17038 | */ | 17100 | */ |
17039 | crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; | 17101 | crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; |
17040 | 17102 | ||
@@ -17254,7 +17316,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
17254 | 17316 | ||
17255 | intel_cleanup_gt_powersave(dev_priv); | 17317 | intel_cleanup_gt_powersave(dev_priv); |
17256 | 17318 | ||
17257 | intel_teardown_gmbus(dev); | 17319 | intel_teardown_gmbus(dev_priv); |
17258 | } | 17320 | } |
17259 | 17321 | ||
17260 | void intel_connector_attach_encoder(struct intel_connector *connector, | 17322 | void intel_connector_attach_encoder(struct intel_connector *connector, |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0b8e8eb85c19..fb12896bafee 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -156,38 +156,28 @@ static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) | |||
156 | u8 source_max, sink_max; | 156 | u8 source_max, sink_max; |
157 | 157 | ||
158 | source_max = intel_dig_port->max_lanes; | 158 | source_max = intel_dig_port->max_lanes; |
159 | sink_max = drm_dp_max_lane_count(intel_dp->dpcd); | 159 | sink_max = intel_dp->max_sink_lane_count; |
160 | 160 | ||
161 | return min(source_max, sink_max); | 161 | return min(source_max, sink_max); |
162 | } | 162 | } |
163 | 163 | ||
164 | /* | 164 | int |
165 | * The units on the numbers in the next two are... bizarre. Examples will | ||
166 | * make it clearer; this one parallels an example in the eDP spec. | ||
167 | * | ||
168 | * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: | ||
169 | * | ||
170 | * 270000 * 1 * 8 / 10 == 216000 | ||
171 | * | ||
172 | * The actual data capacity of that configuration is 2.16Gbit/s, so the | ||
173 | * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - | ||
174 | * or equivalently, kilopixels per second - so for 1680x1050R it'd be | ||
175 | * 119000. At 18bpp that's 2142000 kilobits per second. | ||
176 | * | ||
177 | * Thus the strange-looking division by 10 in intel_dp_link_required, to | ||
178 | * get the result in decakilobits instead of kilobits. | ||
179 | */ | ||
180 | |||
181 | static int | ||
182 | intel_dp_link_required(int pixel_clock, int bpp) | 165 | intel_dp_link_required(int pixel_clock, int bpp) |
183 | { | 166 | { |
184 | return (pixel_clock * bpp + 9) / 10; | 167 | /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ |
168 | return DIV_ROUND_UP(pixel_clock * bpp, 8); | ||
185 | } | 169 | } |
186 | 170 | ||
187 | static int | 171 | int |
188 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) | 172 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) |
189 | { | 173 | { |
190 | return (max_link_clock * max_lanes * 8) / 10; | 174 | /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the |
175 | * link rate that is generally expressed in Gbps. Since, 8 bits of data | ||
176 | * is transmitted every LS_Clk per lane, there is no need to account for | ||
177 | * the channel encoding that is done in the PHY layer here. | ||
178 | */ | ||
179 | |||
180 | return max_link_clock * max_lanes; | ||
191 | } | 181 | } |
192 | 182 | ||
193 | static int | 183 | static int |
@@ -223,7 +213,7 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) | |||
223 | 213 | ||
224 | *sink_rates = default_rates; | 214 | *sink_rates = default_rates; |
225 | 215 | ||
226 | return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; | 216 | return (intel_dp->max_sink_link_bw >> 3) + 1; |
227 | } | 217 | } |
228 | 218 | ||
229 | static int | 219 | static int |
@@ -233,7 +223,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates) | |||
233 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | 223 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); |
234 | int size; | 224 | int size; |
235 | 225 | ||
236 | if (IS_BROXTON(dev_priv)) { | 226 | if (IS_GEN9_LP(dev_priv)) { |
237 | *source_rates = bxt_rates; | 227 | *source_rates = bxt_rates; |
238 | size = ARRAY_SIZE(bxt_rates); | 228 | size = ARRAY_SIZE(bxt_rates); |
239 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | 229 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
@@ -288,6 +278,44 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp, | |||
288 | common_rates); | 278 | common_rates); |
289 | } | 279 | } |
290 | 280 | ||
281 | static int intel_dp_link_rate_index(struct intel_dp *intel_dp, | ||
282 | int *common_rates, int link_rate) | ||
283 | { | ||
284 | int common_len; | ||
285 | int index; | ||
286 | |||
287 | common_len = intel_dp_common_rates(intel_dp, common_rates); | ||
288 | for (index = 0; index < common_len; index++) { | ||
289 | if (link_rate == common_rates[common_len - index - 1]) | ||
290 | return common_len - index - 1; | ||
291 | } | ||
292 | |||
293 | return -1; | ||
294 | } | ||
295 | |||
296 | int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, | ||
297 | int link_rate, uint8_t lane_count) | ||
298 | { | ||
299 | int common_rates[DP_MAX_SUPPORTED_RATES]; | ||
300 | int link_rate_index; | ||
301 | |||
302 | link_rate_index = intel_dp_link_rate_index(intel_dp, | ||
303 | common_rates, | ||
304 | link_rate); | ||
305 | if (link_rate_index > 0) { | ||
306 | intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]); | ||
307 | intel_dp->max_sink_lane_count = lane_count; | ||
308 | } else if (lane_count > 1) { | ||
309 | intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp); | ||
310 | intel_dp->max_sink_lane_count = lane_count >> 1; | ||
311 | } else { | ||
312 | DRM_ERROR("Link Training Unsuccessful\n"); | ||
313 | return -1; | ||
314 | } | ||
315 | |||
316 | return 0; | ||
317 | } | ||
318 | |||
291 | static enum drm_mode_status | 319 | static enum drm_mode_status |
292 | intel_dp_mode_valid(struct drm_connector *connector, | 320 | intel_dp_mode_valid(struct drm_connector *connector, |
293 | struct drm_display_mode *mode) | 321 | struct drm_display_mode *mode) |
@@ -465,14 +493,50 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) | |||
465 | } | 493 | } |
466 | } | 494 | } |
467 | 495 | ||
496 | static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) | ||
497 | { | ||
498 | struct intel_encoder *encoder; | ||
499 | unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); | ||
500 | |||
501 | /* | ||
502 | * We don't have power sequencer currently. | ||
503 | * Pick one that's not used by other ports. | ||
504 | */ | ||
505 | for_each_intel_encoder(&dev_priv->drm, encoder) { | ||
506 | struct intel_dp *intel_dp; | ||
507 | |||
508 | if (encoder->type != INTEL_OUTPUT_DP && | ||
509 | encoder->type != INTEL_OUTPUT_EDP) | ||
510 | continue; | ||
511 | |||
512 | intel_dp = enc_to_intel_dp(&encoder->base); | ||
513 | |||
514 | if (encoder->type == INTEL_OUTPUT_EDP) { | ||
515 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE && | ||
516 | intel_dp->active_pipe != intel_dp->pps_pipe); | ||
517 | |||
518 | if (intel_dp->pps_pipe != INVALID_PIPE) | ||
519 | pipes &= ~(1 << intel_dp->pps_pipe); | ||
520 | } else { | ||
521 | WARN_ON(intel_dp->pps_pipe != INVALID_PIPE); | ||
522 | |||
523 | if (intel_dp->active_pipe != INVALID_PIPE) | ||
524 | pipes &= ~(1 << intel_dp->active_pipe); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | if (pipes == 0) | ||
529 | return INVALID_PIPE; | ||
530 | |||
531 | return ffs(pipes) - 1; | ||
532 | } | ||
533 | |||
468 | static enum pipe | 534 | static enum pipe |
469 | vlv_power_sequencer_pipe(struct intel_dp *intel_dp) | 535 | vlv_power_sequencer_pipe(struct intel_dp *intel_dp) |
470 | { | 536 | { |
471 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 537 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
472 | struct drm_device *dev = intel_dig_port->base.base.dev; | 538 | struct drm_device *dev = intel_dig_port->base.base.dev; |
473 | struct drm_i915_private *dev_priv = to_i915(dev); | 539 | struct drm_i915_private *dev_priv = to_i915(dev); |
474 | struct intel_encoder *encoder; | ||
475 | unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); | ||
476 | enum pipe pipe; | 540 | enum pipe pipe; |
477 | 541 | ||
478 | lockdep_assert_held(&dev_priv->pps_mutex); | 542 | lockdep_assert_held(&dev_priv->pps_mutex); |
@@ -480,33 +544,20 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) | |||
480 | /* We should never land here with regular DP ports */ | 544 | /* We should never land here with regular DP ports */ |
481 | WARN_ON(!is_edp(intel_dp)); | 545 | WARN_ON(!is_edp(intel_dp)); |
482 | 546 | ||
547 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE && | ||
548 | intel_dp->active_pipe != intel_dp->pps_pipe); | ||
549 | |||
483 | if (intel_dp->pps_pipe != INVALID_PIPE) | 550 | if (intel_dp->pps_pipe != INVALID_PIPE) |
484 | return intel_dp->pps_pipe; | 551 | return intel_dp->pps_pipe; |
485 | 552 | ||
486 | /* | 553 | pipe = vlv_find_free_pps(dev_priv); |
487 | * We don't have power sequencer currently. | ||
488 | * Pick one that's not used by other ports. | ||
489 | */ | ||
490 | for_each_intel_encoder(dev, encoder) { | ||
491 | struct intel_dp *tmp; | ||
492 | |||
493 | if (encoder->type != INTEL_OUTPUT_EDP) | ||
494 | continue; | ||
495 | |||
496 | tmp = enc_to_intel_dp(&encoder->base); | ||
497 | |||
498 | if (tmp->pps_pipe != INVALID_PIPE) | ||
499 | pipes &= ~(1 << tmp->pps_pipe); | ||
500 | } | ||
501 | 554 | ||
502 | /* | 555 | /* |
503 | * Didn't find one. This should not happen since there | 556 | * Didn't find one. This should not happen since there |
504 | * are two power sequencers and up to two eDP ports. | 557 | * are two power sequencers and up to two eDP ports. |
505 | */ | 558 | */ |
506 | if (WARN_ON(pipes == 0)) | 559 | if (WARN_ON(pipe == INVALID_PIPE)) |
507 | pipe = PIPE_A; | 560 | pipe = PIPE_A; |
508 | else | ||
509 | pipe = ffs(pipes) - 1; | ||
510 | 561 | ||
511 | vlv_steal_power_sequencer(dev, pipe); | 562 | vlv_steal_power_sequencer(dev, pipe); |
512 | intel_dp->pps_pipe = pipe; | 563 | intel_dp->pps_pipe = pipe; |
@@ -646,7 +697,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) | |||
646 | struct intel_encoder *encoder; | 697 | struct intel_encoder *encoder; |
647 | 698 | ||
648 | if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && | 699 | if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && |
649 | !IS_BROXTON(dev_priv))) | 700 | !IS_GEN9_LP(dev_priv))) |
650 | return; | 701 | return; |
651 | 702 | ||
652 | /* | 703 | /* |
@@ -662,11 +713,18 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) | |||
662 | for_each_intel_encoder(dev, encoder) { | 713 | for_each_intel_encoder(dev, encoder) { |
663 | struct intel_dp *intel_dp; | 714 | struct intel_dp *intel_dp; |
664 | 715 | ||
665 | if (encoder->type != INTEL_OUTPUT_EDP) | 716 | if (encoder->type != INTEL_OUTPUT_DP && |
717 | encoder->type != INTEL_OUTPUT_EDP) | ||
666 | continue; | 718 | continue; |
667 | 719 | ||
668 | intel_dp = enc_to_intel_dp(&encoder->base); | 720 | intel_dp = enc_to_intel_dp(&encoder->base); |
669 | if (IS_BROXTON(dev_priv)) | 721 | |
722 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE); | ||
723 | |||
724 | if (encoder->type != INTEL_OUTPUT_EDP) | ||
725 | continue; | ||
726 | |||
727 | if (IS_GEN9_LP(dev_priv)) | ||
670 | intel_dp->pps_reset = true; | 728 | intel_dp->pps_reset = true; |
671 | else | 729 | else |
672 | intel_dp->pps_pipe = INVALID_PIPE; | 730 | intel_dp->pps_pipe = INVALID_PIPE; |
@@ -689,7 +747,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv, | |||
689 | 747 | ||
690 | memset(regs, 0, sizeof(*regs)); | 748 | memset(regs, 0, sizeof(*regs)); |
691 | 749 | ||
692 | if (IS_BROXTON(dev_priv)) | 750 | if (IS_GEN9_LP(dev_priv)) |
693 | pps_idx = bxt_power_sequencer_idx(intel_dp); | 751 | pps_idx = bxt_power_sequencer_idx(intel_dp); |
694 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 752 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
695 | pps_idx = vlv_power_sequencer_pipe(intel_dp); | 753 | pps_idx = vlv_power_sequencer_pipe(intel_dp); |
@@ -698,7 +756,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv, | |||
698 | regs->pp_stat = PP_STATUS(pps_idx); | 756 | regs->pp_stat = PP_STATUS(pps_idx); |
699 | regs->pp_on = PP_ON_DELAYS(pps_idx); | 757 | regs->pp_on = PP_ON_DELAYS(pps_idx); |
700 | regs->pp_off = PP_OFF_DELAYS(pps_idx); | 758 | regs->pp_off = PP_OFF_DELAYS(pps_idx); |
701 | if (!IS_BROXTON(dev_priv)) | 759 | if (!IS_GEN9_LP(dev_priv)) |
702 | regs->pp_div = PP_DIVISOR(pps_idx); | 760 | regs->pp_div = PP_DIVISOR(pps_idx); |
703 | } | 761 | } |
704 | 762 | ||
@@ -2402,6 +2460,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
2402 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, | 2460 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
2403 | DP_SET_POWER_D3); | 2461 | DP_SET_POWER_D3); |
2404 | } else { | 2462 | } else { |
2463 | struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); | ||
2464 | |||
2405 | /* | 2465 | /* |
2406 | * When turning on, we need to retry for 1ms to give the sink | 2466 | * When turning on, we need to retry for 1ms to give the sink |
2407 | * time to wake up. | 2467 | * time to wake up. |
@@ -2413,6 +2473,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
2413 | break; | 2473 | break; |
2414 | msleep(1); | 2474 | msleep(1); |
2415 | } | 2475 | } |
2476 | |||
2477 | if (ret == 1 && lspcon->active) | ||
2478 | lspcon_wait_pcon_mode(lspcon); | ||
2416 | } | 2479 | } |
2417 | 2480 | ||
2418 | if (ret != 1) | 2481 | if (ret != 1) |
@@ -2820,6 +2883,8 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) | |||
2820 | enum pipe pipe = intel_dp->pps_pipe; | 2883 | enum pipe pipe = intel_dp->pps_pipe; |
2821 | i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); | 2884 | i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); |
2822 | 2885 | ||
2886 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE); | ||
2887 | |||
2823 | edp_panel_vdd_off_sync(intel_dp); | 2888 | edp_panel_vdd_off_sync(intel_dp); |
2824 | 2889 | ||
2825 | /* | 2890 | /* |
@@ -2854,22 +2919,23 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, | |||
2854 | struct intel_dp *intel_dp; | 2919 | struct intel_dp *intel_dp; |
2855 | enum port port; | 2920 | enum port port; |
2856 | 2921 | ||
2857 | if (encoder->type != INTEL_OUTPUT_EDP) | 2922 | if (encoder->type != INTEL_OUTPUT_DP && |
2923 | encoder->type != INTEL_OUTPUT_EDP) | ||
2858 | continue; | 2924 | continue; |
2859 | 2925 | ||
2860 | intel_dp = enc_to_intel_dp(&encoder->base); | 2926 | intel_dp = enc_to_intel_dp(&encoder->base); |
2861 | port = dp_to_dig_port(intel_dp)->port; | 2927 | port = dp_to_dig_port(intel_dp)->port; |
2862 | 2928 | ||
2929 | WARN(intel_dp->active_pipe == pipe, | ||
2930 | "stealing pipe %c power sequencer from active (e)DP port %c\n", | ||
2931 | pipe_name(pipe), port_name(port)); | ||
2932 | |||
2863 | if (intel_dp->pps_pipe != pipe) | 2933 | if (intel_dp->pps_pipe != pipe) |
2864 | continue; | 2934 | continue; |
2865 | 2935 | ||
2866 | DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n", | 2936 | DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n", |
2867 | pipe_name(pipe), port_name(port)); | 2937 | pipe_name(pipe), port_name(port)); |
2868 | 2938 | ||
2869 | WARN(encoder->base.crtc, | ||
2870 | "stealing pipe %c power sequencer from active eDP port %c\n", | ||
2871 | pipe_name(pipe), port_name(port)); | ||
2872 | |||
2873 | /* make sure vdd is off before we steal it */ | 2939 | /* make sure vdd is off before we steal it */ |
2874 | vlv_detach_power_sequencer(intel_dp); | 2940 | vlv_detach_power_sequencer(intel_dp); |
2875 | } | 2941 | } |
@@ -2885,19 +2951,17 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) | |||
2885 | 2951 | ||
2886 | lockdep_assert_held(&dev_priv->pps_mutex); | 2952 | lockdep_assert_held(&dev_priv->pps_mutex); |
2887 | 2953 | ||
2888 | if (!is_edp(intel_dp)) | 2954 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE); |
2889 | return; | ||
2890 | |||
2891 | if (intel_dp->pps_pipe == crtc->pipe) | ||
2892 | return; | ||
2893 | 2955 | ||
2894 | /* | 2956 | if (intel_dp->pps_pipe != INVALID_PIPE && |
2895 | * If another power sequencer was being used on this | 2957 | intel_dp->pps_pipe != crtc->pipe) { |
2896 | * port previously make sure to turn off vdd there while | 2958 | /* |
2897 | * we still have control of it. | 2959 | * If another power sequencer was being used on this |
2898 | */ | 2960 | * port previously make sure to turn off vdd there while |
2899 | if (intel_dp->pps_pipe != INVALID_PIPE) | 2961 | * we still have control of it. |
2962 | */ | ||
2900 | vlv_detach_power_sequencer(intel_dp); | 2963 | vlv_detach_power_sequencer(intel_dp); |
2964 | } | ||
2901 | 2965 | ||
2902 | /* | 2966 | /* |
2903 | * We may be stealing the power | 2967 | * We may be stealing the power |
@@ -2905,6 +2969,11 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) | |||
2905 | */ | 2969 | */ |
2906 | vlv_steal_power_sequencer(dev, crtc->pipe); | 2970 | vlv_steal_power_sequencer(dev, crtc->pipe); |
2907 | 2971 | ||
2972 | intel_dp->active_pipe = crtc->pipe; | ||
2973 | |||
2974 | if (!is_edp(intel_dp)) | ||
2975 | return; | ||
2976 | |||
2908 | /* now it's all ours */ | 2977 | /* now it's all ours */ |
2909 | intel_dp->pps_pipe = crtc->pipe; | 2978 | intel_dp->pps_pipe = crtc->pipe; |
2910 | 2979 | ||
@@ -2980,7 +3049,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) | |||
2980 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); | 3049 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); |
2981 | enum port port = dp_to_dig_port(intel_dp)->port; | 3050 | enum port port = dp_to_dig_port(intel_dp)->port; |
2982 | 3051 | ||
2983 | if (IS_BROXTON(dev_priv)) | 3052 | if (IS_GEN9_LP(dev_priv)) |
2984 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; | 3053 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; |
2985 | else if (INTEL_GEN(dev_priv) >= 9) { | 3054 | else if (INTEL_GEN(dev_priv) >= 9) { |
2986 | if (dev_priv->vbt.edp.low_vswing && port == PORT_A) | 3055 | if (dev_priv->vbt.edp.low_vswing && port == PORT_A) |
@@ -3491,6 +3560,12 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
3491 | msleep(intel_dp->panel_power_down_delay); | 3560 | msleep(intel_dp->panel_power_down_delay); |
3492 | 3561 | ||
3493 | intel_dp->DP = DP; | 3562 | intel_dp->DP = DP; |
3563 | |||
3564 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | ||
3565 | pps_lock(intel_dp); | ||
3566 | intel_dp->active_pipe = INVALID_PIPE; | ||
3567 | pps_unlock(intel_dp); | ||
3568 | } | ||
3494 | } | 3569 | } |
3495 | 3570 | ||
3496 | bool | 3571 | bool |
@@ -3569,7 +3644,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) | |||
3569 | if (val == 0) | 3644 | if (val == 0) |
3570 | break; | 3645 | break; |
3571 | 3646 | ||
3572 | /* Value read is in kHz while drm clock is saved in deca-kHz */ | 3647 | /* Value read multiplied by 200kHz gives the per-lane |
3648 | * link rate in kHz. The source rates are, however, | ||
3649 | * stored in terms of LS_Clk kHz. The full conversion | ||
3650 | * back to symbols is | ||
3651 | * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) | ||
3652 | */ | ||
3573 | intel_dp->sink_rates[i] = (val * 200) / 10; | 3653 | intel_dp->sink_rates[i] = (val * 200) / 10; |
3574 | } | 3654 | } |
3575 | intel_dp->num_sink_rates = i; | 3655 | intel_dp->num_sink_rates = i; |
@@ -3835,7 +3915,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) | |||
3835 | DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", | 3915 | DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", |
3836 | intel_dp->aux.i2c_nack_count, | 3916 | intel_dp->aux.i2c_nack_count, |
3837 | intel_dp->aux.i2c_defer_count); | 3917 | intel_dp->aux.i2c_defer_count); |
3838 | intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE; | 3918 | intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; |
3839 | } else { | 3919 | } else { |
3840 | struct edid *block = intel_connector->detect_edid; | 3920 | struct edid *block = intel_connector->detect_edid; |
3841 | 3921 | ||
@@ -3851,11 +3931,11 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) | |||
3851 | DRM_DEBUG_KMS("Failed to write EDID checksum\n"); | 3931 | DRM_DEBUG_KMS("Failed to write EDID checksum\n"); |
3852 | 3932 | ||
3853 | test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; | 3933 | test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; |
3854 | intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD; | 3934 | intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_STANDARD; |
3855 | } | 3935 | } |
3856 | 3936 | ||
3857 | /* Set test active flag here so userspace doesn't interrupt things */ | 3937 | /* Set test active flag here so userspace doesn't interrupt things */ |
3858 | intel_dp->compliance_test_active = 1; | 3938 | intel_dp->compliance.test_active = 1; |
3859 | 3939 | ||
3860 | return test_result; | 3940 | return test_result; |
3861 | } | 3941 | } |
@@ -3881,22 +3961,22 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp) | |||
3881 | switch (rxdata) { | 3961 | switch (rxdata) { |
3882 | case DP_TEST_LINK_TRAINING: | 3962 | case DP_TEST_LINK_TRAINING: |
3883 | DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); | 3963 | DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); |
3884 | intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING; | 3964 | intel_dp->compliance.test_type = DP_TEST_LINK_TRAINING; |
3885 | response = intel_dp_autotest_link_training(intel_dp); | 3965 | response = intel_dp_autotest_link_training(intel_dp); |
3886 | break; | 3966 | break; |
3887 | case DP_TEST_LINK_VIDEO_PATTERN: | 3967 | case DP_TEST_LINK_VIDEO_PATTERN: |
3888 | DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); | 3968 | DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); |
3889 | intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN; | 3969 | intel_dp->compliance.test_type = DP_TEST_LINK_VIDEO_PATTERN; |
3890 | response = intel_dp_autotest_video_pattern(intel_dp); | 3970 | response = intel_dp_autotest_video_pattern(intel_dp); |
3891 | break; | 3971 | break; |
3892 | case DP_TEST_LINK_EDID_READ: | 3972 | case DP_TEST_LINK_EDID_READ: |
3893 | DRM_DEBUG_KMS("EDID test requested\n"); | 3973 | DRM_DEBUG_KMS("EDID test requested\n"); |
3894 | intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ; | 3974 | intel_dp->compliance.test_type = DP_TEST_LINK_EDID_READ; |
3895 | response = intel_dp_autotest_edid(intel_dp); | 3975 | response = intel_dp_autotest_edid(intel_dp); |
3896 | break; | 3976 | break; |
3897 | case DP_TEST_LINK_PHY_TEST_PATTERN: | 3977 | case DP_TEST_LINK_PHY_TEST_PATTERN: |
3898 | DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); | 3978 | DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); |
3899 | intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN; | 3979 | intel_dp->compliance.test_type = DP_TEST_LINK_PHY_TEST_PATTERN; |
3900 | response = intel_dp_autotest_phy_pattern(intel_dp); | 3980 | response = intel_dp_autotest_phy_pattern(intel_dp); |
3901 | break; | 3981 | break; |
3902 | default: | 3982 | default: |
@@ -4020,7 +4100,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
4020 | return; | 4100 | return; |
4021 | 4101 | ||
4022 | /* if link training is requested we should perform it always */ | 4102 | /* if link training is requested we should perform it always */ |
4023 | if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) || | 4103 | if ((intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) || |
4024 | (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) { | 4104 | (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) { |
4025 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | 4105 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
4026 | intel_encoder->base.name); | 4106 | intel_encoder->base.name); |
@@ -4054,9 +4134,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) | |||
4054 | * Clearing compliance test variables to allow capturing | 4134 | * Clearing compliance test variables to allow capturing |
4055 | * of values for next automated test request. | 4135 | * of values for next automated test request. |
4056 | */ | 4136 | */ |
4057 | intel_dp->compliance_test_active = 0; | 4137 | memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); |
4058 | intel_dp->compliance_test_type = 0; | ||
4059 | intel_dp->compliance_test_data = 0; | ||
4060 | 4138 | ||
4061 | /* | 4139 | /* |
4062 | * Now read the DPCD to see if it's actually running | 4140 | * Now read the DPCD to see if it's actually running |
@@ -4148,9 +4226,10 @@ static enum drm_connector_status | |||
4148 | edp_detect(struct intel_dp *intel_dp) | 4226 | edp_detect(struct intel_dp *intel_dp) |
4149 | { | 4227 | { |
4150 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 4228 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
4229 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4151 | enum drm_connector_status status; | 4230 | enum drm_connector_status status; |
4152 | 4231 | ||
4153 | status = intel_panel_detect(dev); | 4232 | status = intel_panel_detect(dev_priv); |
4154 | if (status == connector_status_unknown) | 4233 | if (status == connector_status_unknown) |
4155 | status = connector_status_connected; | 4234 | status = connector_status_connected; |
4156 | 4235 | ||
@@ -4296,7 +4375,7 @@ static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | |||
4296 | return ibx_digital_port_connected(dev_priv, port); | 4375 | return ibx_digital_port_connected(dev_priv, port); |
4297 | else if (HAS_PCH_SPLIT(dev_priv)) | 4376 | else if (HAS_PCH_SPLIT(dev_priv)) |
4298 | return cpt_digital_port_connected(dev_priv, port); | 4377 | return cpt_digital_port_connected(dev_priv, port); |
4299 | else if (IS_BROXTON(dev_priv)) | 4378 | else if (IS_GEN9_LP(dev_priv)) |
4300 | return bxt_digital_port_connected(dev_priv, port); | 4379 | return bxt_digital_port_connected(dev_priv, port); |
4301 | else if (IS_GM45(dev_priv)) | 4380 | else if (IS_GM45(dev_priv)) |
4302 | return gm45_digital_port_connected(dev_priv, port); | 4381 | return gm45_digital_port_connected(dev_priv, port); |
@@ -4373,9 +4452,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) | |||
4373 | status = connector_status_disconnected; | 4452 | status = connector_status_disconnected; |
4374 | 4453 | ||
4375 | if (status == connector_status_disconnected) { | 4454 | if (status == connector_status_disconnected) { |
4376 | intel_dp->compliance_test_active = 0; | 4455 | memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); |
4377 | intel_dp->compliance_test_type = 0; | ||
4378 | intel_dp->compliance_test_data = 0; | ||
4379 | 4456 | ||
4380 | if (intel_dp->is_mst) { | 4457 | if (intel_dp->is_mst) { |
4381 | DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", | 4458 | DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", |
@@ -4396,6 +4473,12 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) | |||
4396 | yesno(intel_dp_source_supports_hbr2(intel_dp)), | 4473 | yesno(intel_dp_source_supports_hbr2(intel_dp)), |
4397 | yesno(drm_dp_tps3_supported(intel_dp->dpcd))); | 4474 | yesno(drm_dp_tps3_supported(intel_dp->dpcd))); |
4398 | 4475 | ||
4476 | /* Set the max lane count for sink */ | ||
4477 | intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); | ||
4478 | |||
4479 | /* Set the max link BW for sink */ | ||
4480 | intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp); | ||
4481 | |||
4399 | intel_dp_print_rates(intel_dp); | 4482 | intel_dp_print_rates(intel_dp); |
4400 | 4483 | ||
4401 | intel_dp_read_desc(intel_dp); | 4484 | intel_dp_read_desc(intel_dp); |
@@ -4751,27 +4834,41 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) | |||
4751 | edp_panel_vdd_schedule_off(intel_dp); | 4834 | edp_panel_vdd_schedule_off(intel_dp); |
4752 | } | 4835 | } |
4753 | 4836 | ||
4837 | static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) | ||
4838 | { | ||
4839 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); | ||
4840 | |||
4841 | if ((intel_dp->DP & DP_PORT_EN) == 0) | ||
4842 | return INVALID_PIPE; | ||
4843 | |||
4844 | if (IS_CHERRYVIEW(dev_priv)) | ||
4845 | return DP_PORT_TO_PIPE_CHV(intel_dp->DP); | ||
4846 | else | ||
4847 | return PORT_TO_PIPE(intel_dp->DP); | ||
4848 | } | ||
4849 | |||
4754 | void intel_dp_encoder_reset(struct drm_encoder *encoder) | 4850 | void intel_dp_encoder_reset(struct drm_encoder *encoder) |
4755 | { | 4851 | { |
4756 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 4852 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); |
4757 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 4853 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
4758 | struct intel_lspcon *lspcon = &intel_dig_port->lspcon; | 4854 | struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); |
4759 | struct intel_dp *intel_dp = &intel_dig_port->dp; | ||
4760 | 4855 | ||
4761 | if (!HAS_DDI(dev_priv)) | 4856 | if (!HAS_DDI(dev_priv)) |
4762 | intel_dp->DP = I915_READ(intel_dp->output_reg); | 4857 | intel_dp->DP = I915_READ(intel_dp->output_reg); |
4763 | 4858 | ||
4764 | if (IS_GEN9(dev_priv) && lspcon->active) | 4859 | if (lspcon->active) |
4765 | lspcon_resume(lspcon); | 4860 | lspcon_resume(lspcon); |
4766 | 4861 | ||
4767 | if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) | ||
4768 | return; | ||
4769 | |||
4770 | pps_lock(intel_dp); | 4862 | pps_lock(intel_dp); |
4771 | 4863 | ||
4772 | /* Reinit the power sequencer, in case BIOS did something with it. */ | 4864 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
4773 | intel_dp_pps_init(encoder->dev, intel_dp); | 4865 | intel_dp->active_pipe = vlv_active_pipe(intel_dp); |
4774 | intel_edp_panel_vdd_sanitize(intel_dp); | 4866 | |
4867 | if (is_edp(intel_dp)) { | ||
4868 | /* Reinit the power sequencer, in case BIOS did something with it. */ | ||
4869 | intel_dp_pps_init(encoder->dev, intel_dp); | ||
4870 | intel_edp_panel_vdd_sanitize(intel_dp); | ||
4871 | } | ||
4775 | 4872 | ||
4776 | pps_unlock(intel_dp); | 4873 | pps_unlock(intel_dp); |
4777 | } | 4874 | } |
@@ -4879,7 +4976,7 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port) | |||
4879 | if (INTEL_GEN(dev_priv) < 5) | 4976 | if (INTEL_GEN(dev_priv) < 5) |
4880 | return false; | 4977 | return false; |
4881 | 4978 | ||
4882 | if (port == PORT_A) | 4979 | if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) |
4883 | return true; | 4980 | return true; |
4884 | 4981 | ||
4885 | return intel_bios_is_port_edp(dev_priv, port); | 4982 | return intel_bios_is_port_edp(dev_priv, port); |
@@ -4926,7 +5023,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, | |||
4926 | 5023 | ||
4927 | pp_on = I915_READ(regs.pp_on); | 5024 | pp_on = I915_READ(regs.pp_on); |
4928 | pp_off = I915_READ(regs.pp_off); | 5025 | pp_off = I915_READ(regs.pp_off); |
4929 | if (!IS_BROXTON(dev_priv)) { | 5026 | if (!IS_GEN9_LP(dev_priv)) { |
4930 | I915_WRITE(regs.pp_ctrl, pp_ctl); | 5027 | I915_WRITE(regs.pp_ctrl, pp_ctl); |
4931 | pp_div = I915_READ(regs.pp_div); | 5028 | pp_div = I915_READ(regs.pp_div); |
4932 | } | 5029 | } |
@@ -4944,7 +5041,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, | |||
4944 | seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> | 5041 | seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> |
4945 | PANEL_POWER_DOWN_DELAY_SHIFT; | 5042 | PANEL_POWER_DOWN_DELAY_SHIFT; |
4946 | 5043 | ||
4947 | if (IS_BROXTON(dev_priv)) { | 5044 | if (IS_GEN9_LP(dev_priv)) { |
4948 | u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> | 5045 | u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> |
4949 | BXT_POWER_CYCLE_DELAY_SHIFT; | 5046 | BXT_POWER_CYCLE_DELAY_SHIFT; |
4950 | if (tmp > 0) | 5047 | if (tmp > 0) |
@@ -5101,7 +5198,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | |||
5101 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); | 5198 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); |
5102 | /* Compute the divisor for the pp clock, simply match the Bspec | 5199 | /* Compute the divisor for the pp clock, simply match the Bspec |
5103 | * formula. */ | 5200 | * formula. */ |
5104 | if (IS_BROXTON(dev_priv)) { | 5201 | if (IS_GEN9_LP(dev_priv)) { |
5105 | pp_div = I915_READ(regs.pp_ctrl); | 5202 | pp_div = I915_READ(regs.pp_ctrl); |
5106 | pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; | 5203 | pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; |
5107 | pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) | 5204 | pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) |
@@ -5127,7 +5224,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | |||
5127 | 5224 | ||
5128 | I915_WRITE(regs.pp_on, pp_on); | 5225 | I915_WRITE(regs.pp_on, pp_on); |
5129 | I915_WRITE(regs.pp_off, pp_off); | 5226 | I915_WRITE(regs.pp_off, pp_off); |
5130 | if (IS_BROXTON(dev_priv)) | 5227 | if (IS_GEN9_LP(dev_priv)) |
5131 | I915_WRITE(regs.pp_ctrl, pp_div); | 5228 | I915_WRITE(regs.pp_ctrl, pp_div); |
5132 | else | 5229 | else |
5133 | I915_WRITE(regs.pp_div, pp_div); | 5230 | I915_WRITE(regs.pp_div, pp_div); |
@@ -5135,7 +5232,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | |||
5135 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", | 5232 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", |
5136 | I915_READ(regs.pp_on), | 5233 | I915_READ(regs.pp_on), |
5137 | I915_READ(regs.pp_off), | 5234 | I915_READ(regs.pp_off), |
5138 | IS_BROXTON(dev_priv) ? | 5235 | IS_GEN9_LP(dev_priv) ? |
5139 | (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : | 5236 | (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : |
5140 | I915_READ(regs.pp_div)); | 5237 | I915_READ(regs.pp_div)); |
5141 | } | 5238 | } |
@@ -5515,7 +5612,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector, | |||
5515 | } | 5612 | } |
5516 | 5613 | ||
5517 | downclock_mode = intel_find_panel_downclock | 5614 | downclock_mode = intel_find_panel_downclock |
5518 | (dev, fixed_mode, connector); | 5615 | (dev_priv, fixed_mode, connector); |
5519 | 5616 | ||
5520 | if (!downclock_mode) { | 5617 | if (!downclock_mode) { |
5521 | DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); | 5618 | DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); |
@@ -5624,10 +5721,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
5624 | * If the current pipe isn't valid, try the PPS pipe, and if that | 5721 | * If the current pipe isn't valid, try the PPS pipe, and if that |
5625 | * fails just assume pipe A. | 5722 | * fails just assume pipe A. |
5626 | */ | 5723 | */ |
5627 | if (IS_CHERRYVIEW(dev_priv)) | 5724 | pipe = vlv_active_pipe(intel_dp); |
5628 | pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP); | ||
5629 | else | ||
5630 | pipe = PORT_TO_PIPE(intel_dp->DP); | ||
5631 | 5725 | ||
5632 | if (pipe != PIPE_A && pipe != PIPE_B) | 5726 | if (pipe != PIPE_A && pipe != PIPE_B) |
5633 | pipe = intel_dp->pps_pipe; | 5727 | pipe = intel_dp->pps_pipe; |
@@ -5676,6 +5770,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
5676 | return false; | 5770 | return false; |
5677 | 5771 | ||
5678 | intel_dp->pps_pipe = INVALID_PIPE; | 5772 | intel_dp->pps_pipe = INVALID_PIPE; |
5773 | intel_dp->active_pipe = INVALID_PIPE; | ||
5679 | 5774 | ||
5680 | /* intel_dp vfuncs */ | 5775 | /* intel_dp vfuncs */ |
5681 | if (INTEL_GEN(dev_priv) >= 9) | 5776 | if (INTEL_GEN(dev_priv) >= 9) |
@@ -5704,6 +5799,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
5704 | else | 5799 | else |
5705 | type = DRM_MODE_CONNECTOR_DisplayPort; | 5800 | type = DRM_MODE_CONNECTOR_DisplayPort; |
5706 | 5801 | ||
5802 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
5803 | intel_dp->active_pipe = vlv_active_pipe(intel_dp); | ||
5804 | |||
5707 | /* | 5805 | /* |
5708 | * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but | 5806 | * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but |
5709 | * for DP the encoder type can be set by the caller to | 5807 | * for DP the encoder type can be set by the caller to |
@@ -5793,11 +5891,10 @@ fail: | |||
5793 | return false; | 5891 | return false; |
5794 | } | 5892 | } |
5795 | 5893 | ||
5796 | bool intel_dp_init(struct drm_device *dev, | 5894 | bool intel_dp_init(struct drm_i915_private *dev_priv, |
5797 | i915_reg_t output_reg, | 5895 | i915_reg_t output_reg, |
5798 | enum port port) | 5896 | enum port port) |
5799 | { | 5897 | { |
5800 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
5801 | struct intel_digital_port *intel_dig_port; | 5898 | struct intel_digital_port *intel_dig_port; |
5802 | struct intel_encoder *intel_encoder; | 5899 | struct intel_encoder *intel_encoder; |
5803 | struct drm_encoder *encoder; | 5900 | struct drm_encoder *encoder; |
@@ -5814,8 +5911,9 @@ bool intel_dp_init(struct drm_device *dev, | |||
5814 | intel_encoder = &intel_dig_port->base; | 5911 | intel_encoder = &intel_dig_port->base; |
5815 | encoder = &intel_encoder->base; | 5912 | encoder = &intel_encoder->base; |
5816 | 5913 | ||
5817 | if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, | 5914 | if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, |
5818 | DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) | 5915 | &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, |
5916 | "DP %c", port_name(port))) | ||
5819 | goto err_encoder_init; | 5917 | goto err_encoder_init; |
5820 | 5918 | ||
5821 | intel_encoder->compute_config = intel_dp_compute_config; | 5919 | intel_encoder->compute_config = intel_dp_compute_config; |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index b029d1026a28..205fe4748ec5 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -37,6 +37,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
37 | struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); | 37 | struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); |
38 | struct intel_digital_port *intel_dig_port = intel_mst->primary; | 38 | struct intel_digital_port *intel_dig_port = intel_mst->primary; |
39 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 39 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
40 | struct intel_connector *connector = | ||
41 | to_intel_connector(conn_state->connector); | ||
40 | struct drm_atomic_state *state; | 42 | struct drm_atomic_state *state; |
41 | int bpp; | 43 | int bpp; |
42 | int lane_count, slots; | 44 | int lane_count, slots; |
@@ -58,6 +60,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
58 | 60 | ||
59 | state = pipe_config->base.state; | 61 | state = pipe_config->base.state; |
60 | 62 | ||
63 | if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port)) | ||
64 | pipe_config->has_audio = true; | ||
61 | mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); | 65 | mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); |
62 | 66 | ||
63 | pipe_config->pbn = mst_pbn; | 67 | pipe_config->pbn = mst_pbn; |
@@ -83,6 +87,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, | |||
83 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 87 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
84 | struct intel_connector *connector = | 88 | struct intel_connector *connector = |
85 | to_intel_connector(old_conn_state->connector); | 89 | to_intel_connector(old_conn_state->connector); |
90 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
86 | int ret; | 91 | int ret; |
87 | 92 | ||
88 | DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); | 93 | DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); |
@@ -93,6 +98,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, | |||
93 | if (ret) { | 98 | if (ret) { |
94 | DRM_ERROR("failed to update payload %d\n", ret); | 99 | DRM_ERROR("failed to update payload %d\n", ret); |
95 | } | 100 | } |
101 | if (old_crtc_state->has_audio) { | ||
102 | intel_audio_codec_disable(encoder); | ||
103 | intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); | ||
104 | } | ||
96 | } | 105 | } |
97 | 106 | ||
98 | static void intel_mst_post_disable_dp(struct intel_encoder *encoder, | 107 | static void intel_mst_post_disable_dp(struct intel_encoder *encoder, |
@@ -205,6 +214,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, | |||
205 | ret = drm_dp_check_act_status(&intel_dp->mst_mgr); | 214 | ret = drm_dp_check_act_status(&intel_dp->mst_mgr); |
206 | 215 | ||
207 | ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); | 216 | ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); |
217 | if (pipe_config->has_audio) { | ||
218 | intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); | ||
219 | intel_audio_codec_enable(encoder, pipe_config, conn_state); | ||
220 | } | ||
208 | } | 221 | } |
209 | 222 | ||
210 | static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, | 223 | static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, |
@@ -227,6 +240,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, | |||
227 | enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; | 240 | enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; |
228 | u32 temp, flags = 0; | 241 | u32 temp, flags = 0; |
229 | 242 | ||
243 | pipe_config->has_audio = | ||
244 | intel_ddi_is_audio_enabled(dev_priv, crtc); | ||
245 | |||
230 | temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); | 246 | temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
231 | if (temp & TRANS_DDI_PHSYNC) | 247 | if (temp & TRANS_DDI_PHSYNC) |
232 | flags |= DRM_MODE_FLAG_PHSYNC; | 248 | flags |= DRM_MODE_FLAG_PHSYNC; |
@@ -334,7 +350,17 @@ static enum drm_mode_status | |||
334 | intel_dp_mst_mode_valid(struct drm_connector *connector, | 350 | intel_dp_mst_mode_valid(struct drm_connector *connector, |
335 | struct drm_display_mode *mode) | 351 | struct drm_display_mode *mode) |
336 | { | 352 | { |
353 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
354 | struct intel_dp *intel_dp = intel_connector->mst_port; | ||
337 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | 355 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; |
356 | int bpp = 24; /* MST uses fixed bpp */ | ||
357 | int max_rate, mode_rate, max_lanes, max_link_clock; | ||
358 | |||
359 | max_link_clock = intel_dp_max_link_rate(intel_dp); | ||
360 | max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); | ||
361 | |||
362 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); | ||
363 | mode_rate = intel_dp_link_required(mode->clock, bpp); | ||
338 | 364 | ||
339 | /* TODO - validate mode against available PBN for link */ | 365 | /* TODO - validate mode against available PBN for link */ |
340 | if (mode->clock < 10000) | 366 | if (mode->clock < 10000) |
@@ -343,7 +369,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, | |||
343 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) | 369 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
344 | return MODE_H_ILLEGAL; | 370 | return MODE_H_ILLEGAL; |
345 | 371 | ||
346 | if (mode->clock > max_dotclk) | 372 | if (mode_rate > max_rate || mode->clock > max_dotclk) |
347 | return MODE_CLOCK_HIGH; | 373 | return MODE_CLOCK_HIGH; |
348 | 374 | ||
349 | return MODE_OK; | 375 | return MODE_OK; |
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 7a8e82dabbf2..09b670929786 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c | |||
@@ -131,6 +131,18 @@ struct bxt_ddi_phy_info { | |||
131 | enum dpio_phy rcomp_phy; | 131 | enum dpio_phy rcomp_phy; |
132 | 132 | ||
133 | /** | 133 | /** |
134 | * @reset_delay: delay in us to wait before setting the common reset | ||
135 | * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy. | ||
136 | */ | ||
137 | int reset_delay; | ||
138 | |||
139 | /** | ||
140 | * @pwron_mask: Mask with the appropriate bit set that would cause the | ||
141 | * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON. | ||
142 | */ | ||
143 | u32 pwron_mask; | ||
144 | |||
145 | /** | ||
134 | * @channel: struct containing per channel information. | 146 | * @channel: struct containing per channel information. |
135 | */ | 147 | */ |
136 | struct { | 148 | struct { |
@@ -145,6 +157,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { | |||
145 | [DPIO_PHY0] = { | 157 | [DPIO_PHY0] = { |
146 | .dual_channel = true, | 158 | .dual_channel = true, |
147 | .rcomp_phy = DPIO_PHY1, | 159 | .rcomp_phy = DPIO_PHY1, |
160 | .pwron_mask = BIT(0), | ||
148 | 161 | ||
149 | .channel = { | 162 | .channel = { |
150 | [DPIO_CH0] = { .port = PORT_B }, | 163 | [DPIO_CH0] = { .port = PORT_B }, |
@@ -154,6 +167,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { | |||
154 | [DPIO_PHY1] = { | 167 | [DPIO_PHY1] = { |
155 | .dual_channel = false, | 168 | .dual_channel = false, |
156 | .rcomp_phy = -1, | 169 | .rcomp_phy = -1, |
170 | .pwron_mask = BIT(1), | ||
157 | 171 | ||
158 | .channel = { | 172 | .channel = { |
159 | [DPIO_CH0] = { .port = PORT_A }, | 173 | [DPIO_CH0] = { .port = PORT_A }, |
@@ -161,20 +175,77 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { | |||
161 | }, | 175 | }, |
162 | }; | 176 | }; |
163 | 177 | ||
178 | static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { | ||
179 | [DPIO_PHY0] = { | ||
180 | .dual_channel = false, | ||
181 | .rcomp_phy = DPIO_PHY1, | ||
182 | .pwron_mask = BIT(0), | ||
183 | .reset_delay = 20, | ||
184 | |||
185 | .channel = { | ||
186 | [DPIO_CH0] = { .port = PORT_B }, | ||
187 | } | ||
188 | }, | ||
189 | [DPIO_PHY1] = { | ||
190 | .dual_channel = false, | ||
191 | .rcomp_phy = -1, | ||
192 | .pwron_mask = BIT(3), | ||
193 | .reset_delay = 20, | ||
194 | |||
195 | .channel = { | ||
196 | [DPIO_CH0] = { .port = PORT_A }, | ||
197 | } | ||
198 | }, | ||
199 | [DPIO_PHY2] = { | ||
200 | .dual_channel = false, | ||
201 | .rcomp_phy = DPIO_PHY1, | ||
202 | .pwron_mask = BIT(1), | ||
203 | .reset_delay = 20, | ||
204 | |||
205 | .channel = { | ||
206 | [DPIO_CH0] = { .port = PORT_C }, | ||
207 | } | ||
208 | }, | ||
209 | }; | ||
210 | |||
164 | static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info) | 211 | static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info) |
165 | { | 212 | { |
166 | return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) | | 213 | return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) | |
167 | BIT(phy_info->channel[DPIO_CH0].port); | 214 | BIT(phy_info->channel[DPIO_CH0].port); |
168 | } | 215 | } |
169 | 216 | ||
170 | void bxt_port_to_phy_channel(enum port port, | 217 | static const struct bxt_ddi_phy_info * |
218 | bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) | ||
219 | { | ||
220 | if (IS_GEMINILAKE(dev_priv)) { | ||
221 | *count = ARRAY_SIZE(glk_ddi_phy_info); | ||
222 | return glk_ddi_phy_info; | ||
223 | } else { | ||
224 | *count = ARRAY_SIZE(bxt_ddi_phy_info); | ||
225 | return bxt_ddi_phy_info; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | static const struct bxt_ddi_phy_info * | ||
230 | bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy) | ||
231 | { | ||
232 | int count; | ||
233 | const struct bxt_ddi_phy_info *phy_list = | ||
234 | bxt_get_phy_list(dev_priv, &count); | ||
235 | |||
236 | return &phy_list[phy]; | ||
237 | } | ||
238 | |||
239 | void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, | ||
171 | enum dpio_phy *phy, enum dpio_channel *ch) | 240 | enum dpio_phy *phy, enum dpio_channel *ch) |
172 | { | 241 | { |
173 | const struct bxt_ddi_phy_info *phy_info; | 242 | const struct bxt_ddi_phy_info *phy_info, *phys; |
174 | int i; | 243 | int i, count; |
175 | 244 | ||
176 | for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) { | 245 | phys = bxt_get_phy_list(dev_priv, &count); |
177 | phy_info = &bxt_ddi_phy_info[i]; | 246 | |
247 | for (i = 0; i < count; i++) { | ||
248 | phy_info = &phys[i]; | ||
178 | 249 | ||
179 | if (port == phy_info->channel[DPIO_CH0].port) { | 250 | if (port == phy_info->channel[DPIO_CH0].port) { |
180 | *phy = i; | 251 | *phy = i; |
@@ -203,7 +274,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, | |||
203 | enum dpio_phy phy; | 274 | enum dpio_phy phy; |
204 | enum dpio_channel ch; | 275 | enum dpio_channel ch; |
205 | 276 | ||
206 | bxt_port_to_phy_channel(port, &phy, &ch); | 277 | bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); |
207 | 278 | ||
208 | /* | 279 | /* |
209 | * While we write to the group register to program all lanes at once we | 280 | * While we write to the group register to program all lanes at once we |
@@ -241,10 +312,12 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, | |||
241 | bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, | 312 | bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, |
242 | enum dpio_phy phy) | 313 | enum dpio_phy phy) |
243 | { | 314 | { |
244 | const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; | 315 | const struct bxt_ddi_phy_info *phy_info; |
245 | enum port port; | 316 | enum port port; |
246 | 317 | ||
247 | if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) | 318 | phy_info = bxt_get_phy_info(dev_priv, phy); |
319 | |||
320 | if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) | ||
248 | return false; | 321 | return false; |
249 | 322 | ||
250 | if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) & | 323 | if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) & |
@@ -255,14 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, | |||
255 | return false; | 328 | return false; |
256 | } | 329 | } |
257 | 330 | ||
258 | if (phy_info->rcomp_phy == -1 && | ||
259 | !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) { | ||
260 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n", | ||
261 | phy); | ||
262 | |||
263 | return false; | ||
264 | } | ||
265 | |||
266 | if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { | 331 | if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { |
267 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n", | 332 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n", |
268 | phy); | 333 | phy); |
@@ -306,9 +371,11 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, | |||
306 | static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, | 371 | static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, |
307 | enum dpio_phy phy) | 372 | enum dpio_phy phy) |
308 | { | 373 | { |
309 | const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; | 374 | const struct bxt_ddi_phy_info *phy_info; |
310 | u32 val; | 375 | u32 val; |
311 | 376 | ||
377 | phy_info = bxt_get_phy_info(dev_priv, phy); | ||
378 | |||
312 | if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { | 379 | if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { |
313 | /* Still read out the GRC value for state verification */ | 380 | /* Still read out the GRC value for state verification */ |
314 | if (phy_info->rcomp_phy != -1) | 381 | if (phy_info->rcomp_phy != -1) |
@@ -317,7 +384,6 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, | |||
317 | if (bxt_ddi_phy_verify_state(dev_priv, phy)) { | 384 | if (bxt_ddi_phy_verify_state(dev_priv, phy)) { |
318 | DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " | 385 | DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " |
319 | "won't reprogram it\n", phy); | 386 | "won't reprogram it\n", phy); |
320 | |||
321 | return; | 387 | return; |
322 | } | 388 | } |
323 | 389 | ||
@@ -326,7 +392,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, | |||
326 | } | 392 | } |
327 | 393 | ||
328 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); | 394 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); |
329 | val |= GT_DISPLAY_POWER_ON(phy); | 395 | val |= phy_info->pwron_mask; |
330 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); | 396 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); |
331 | 397 | ||
332 | /* | 398 | /* |
@@ -367,6 +433,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, | |||
367 | 433 | ||
368 | if (phy_info->rcomp_phy != -1) { | 434 | if (phy_info->rcomp_phy != -1) { |
369 | uint32_t grc_code; | 435 | uint32_t grc_code; |
436 | |||
437 | bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy); | ||
438 | |||
370 | /* | 439 | /* |
371 | * PHY0 isn't connected to an RCOMP resistor so copy over | 440 | * PHY0 isn't connected to an RCOMP resistor so copy over |
372 | * the corresponding calibrated value from PHY1, and disable | 441 | * the corresponding calibrated value from PHY1, and disable |
@@ -384,31 +453,34 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, | |||
384 | I915_WRITE(BXT_PORT_REF_DW8(phy), val); | 453 | I915_WRITE(BXT_PORT_REF_DW8(phy), val); |
385 | } | 454 | } |
386 | 455 | ||
456 | if (phy_info->reset_delay) | ||
457 | udelay(phy_info->reset_delay); | ||
458 | |||
387 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); | 459 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); |
388 | val |= COMMON_RESET_DIS; | 460 | val |= COMMON_RESET_DIS; |
389 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); | 461 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); |
390 | |||
391 | if (phy_info->rcomp_phy == -1) | ||
392 | bxt_phy_wait_grc_done(dev_priv, phy); | ||
393 | |||
394 | } | 462 | } |
395 | 463 | ||
396 | void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) | 464 | void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) |
397 | { | 465 | { |
466 | const struct bxt_ddi_phy_info *phy_info; | ||
398 | uint32_t val; | 467 | uint32_t val; |
399 | 468 | ||
469 | phy_info = bxt_get_phy_info(dev_priv, phy); | ||
470 | |||
400 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); | 471 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); |
401 | val &= ~COMMON_RESET_DIS; | 472 | val &= ~COMMON_RESET_DIS; |
402 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); | 473 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); |
403 | 474 | ||
404 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); | 475 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); |
405 | val &= ~GT_DISPLAY_POWER_ON(phy); | 476 | val &= ~phy_info->pwron_mask; |
406 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); | 477 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); |
407 | } | 478 | } |
408 | 479 | ||
409 | void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) | 480 | void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) |
410 | { | 481 | { |
411 | const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; | 482 | const struct bxt_ddi_phy_info *phy_info = |
483 | bxt_get_phy_info(dev_priv, phy); | ||
412 | enum dpio_phy rcomp_phy = phy_info->rcomp_phy; | 484 | enum dpio_phy rcomp_phy = phy_info->rcomp_phy; |
413 | bool was_enabled; | 485 | bool was_enabled; |
414 | 486 | ||
@@ -461,10 +533,12 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, | |||
461 | bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, | 533 | bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, |
462 | enum dpio_phy phy) | 534 | enum dpio_phy phy) |
463 | { | 535 | { |
464 | const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; | 536 | const struct bxt_ddi_phy_info *phy_info; |
465 | uint32_t mask; | 537 | uint32_t mask; |
466 | bool ok; | 538 | bool ok; |
467 | 539 | ||
540 | phy_info = bxt_get_phy_info(dev_priv, phy); | ||
541 | |||
468 | #define _CHK(reg, mask, exp, fmt, ...) \ | 542 | #define _CHK(reg, mask, exp, fmt, ...) \ |
469 | __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ | 543 | __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ |
470 | ## __VA_ARGS__) | 544 | ## __VA_ARGS__) |
@@ -540,7 +614,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, | |||
540 | enum dpio_channel ch; | 614 | enum dpio_channel ch; |
541 | int lane; | 615 | int lane; |
542 | 616 | ||
543 | bxt_port_to_phy_channel(port, &phy, &ch); | 617 | bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); |
544 | 618 | ||
545 | for (lane = 0; lane < 4; lane++) { | 619 | for (lane = 0; lane < 4; lane++) { |
546 | u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane)); | 620 | u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane)); |
@@ -568,7 +642,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) | |||
568 | int lane; | 642 | int lane; |
569 | uint8_t mask; | 643 | uint8_t mask; |
570 | 644 | ||
571 | bxt_port_to_phy_channel(port, &phy, &ch); | 645 | bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); |
572 | 646 | ||
573 | mask = 0; | 647 | mask = 0; |
574 | for (lane = 0; lane < 4; lane++) { | 648 | for (lane = 0; lane < 4; lane++) { |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 58a756f2f224..c92a2558beb4 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -23,6 +23,25 @@ | |||
23 | 23 | ||
24 | #include "intel_drv.h" | 24 | #include "intel_drv.h" |
25 | 25 | ||
26 | /** | ||
27 | * DOC: Display PLLs | ||
28 | * | ||
29 | * Display PLLs used for driving outputs vary by platform. While some have | ||
30 | * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL | ||
31 | * from a pool. In the latter scenario, it is possible that multiple pipes | ||
32 | * share a PLL if their configurations match. | ||
33 | * | ||
34 | * This file provides an abstraction over display PLLs. The function | ||
35 | * intel_shared_dpll_init() initializes the PLLs for the given platform. The | ||
36 | * users of a PLL are tracked and that tracking is integrated with the atomic | ||
37 | * modest interface. During an atomic operation, a PLL can be requested for a | ||
38 | * given CRTC and encoder configuration by calling intel_get_shared_dpll() and | ||
39 | * a previously used PLL can be released with intel_release_shared_dpll(). | ||
40 | * Changes to the users are first staged in the atomic state, and then made | ||
41 | * effective by calling intel_shared_dpll_swap_state() during the atomic | ||
42 | * commit phase. | ||
43 | */ | ||
44 | |||
26 | struct intel_shared_dpll * | 45 | struct intel_shared_dpll * |
27 | skl_find_link_pll(struct drm_i915_private *dev_priv, int clock) | 46 | skl_find_link_pll(struct drm_i915_private *dev_priv, int clock) |
28 | { | 47 | { |
@@ -38,11 +57,11 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock) | |||
38 | pll = &dev_priv->shared_dplls[i]; | 57 | pll = &dev_priv->shared_dplls[i]; |
39 | 58 | ||
40 | /* Only want to check enabled timings first */ | 59 | /* Only want to check enabled timings first */ |
41 | if (pll->config.crtc_mask == 0) | 60 | if (pll->state.crtc_mask == 0) |
42 | continue; | 61 | continue; |
43 | 62 | ||
44 | if (memcmp(&dpll_hw_state, &pll->config.hw_state, | 63 | if (memcmp(&dpll_hw_state, &pll->state.hw_state, |
45 | sizeof(pll->config.hw_state)) == 0) { | 64 | sizeof(pll->state.hw_state)) == 0) { |
46 | found = true; | 65 | found = true; |
47 | break; | 66 | break; |
48 | } | 67 | } |
@@ -52,8 +71,8 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock) | |||
52 | for (i = DPLL_ID_SKL_DPLL1; | 71 | for (i = DPLL_ID_SKL_DPLL1; |
53 | ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) { | 72 | ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) { |
54 | pll = &dev_priv->shared_dplls[i]; | 73 | pll = &dev_priv->shared_dplls[i]; |
55 | if (pll->config.crtc_mask == 0) { | 74 | if (pll->state.crtc_mask == 0) { |
56 | pll->config.hw_state = dpll_hw_state; | 75 | pll->state.hw_state = dpll_hw_state; |
57 | break; | 76 | break; |
58 | } | 77 | } |
59 | } | 78 | } |
@@ -61,6 +80,45 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock) | |||
61 | return pll; | 80 | return pll; |
62 | } | 81 | } |
63 | 82 | ||
83 | static void | ||
84 | intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, | ||
85 | struct intel_shared_dpll_state *shared_dpll) | ||
86 | { | ||
87 | enum intel_dpll_id i; | ||
88 | |||
89 | /* Copy shared dpll state */ | ||
90 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
91 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
92 | |||
93 | shared_dpll[i] = pll->state; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | static struct intel_shared_dpll_state * | ||
98 | intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) | ||
99 | { | ||
100 | struct intel_atomic_state *state = to_intel_atomic_state(s); | ||
101 | |||
102 | WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); | ||
103 | |||
104 | if (!state->dpll_set) { | ||
105 | state->dpll_set = true; | ||
106 | |||
107 | intel_atomic_duplicate_dpll_state(to_i915(s->dev), | ||
108 | state->shared_dpll); | ||
109 | } | ||
110 | |||
111 | return state->shared_dpll; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * intel_get_shared_dpll_by_id - get a DPLL given its id | ||
116 | * @dev_priv: i915 device instance | ||
117 | * @id: pll id | ||
118 | * | ||
119 | * Returns: | ||
120 | * A pointer to the DPLL with @id | ||
121 | */ | ||
64 | struct intel_shared_dpll * | 122 | struct intel_shared_dpll * |
65 | intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, | 123 | intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, |
66 | enum intel_dpll_id id) | 124 | enum intel_dpll_id id) |
@@ -68,6 +126,14 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, | |||
68 | return &dev_priv->shared_dplls[id]; | 126 | return &dev_priv->shared_dplls[id]; |
69 | } | 127 | } |
70 | 128 | ||
129 | /** | ||
130 | * intel_get_shared_dpll_id - get the id of a DPLL | ||
131 | * @dev_priv: i915 device instance | ||
132 | * @pll: the DPLL | ||
133 | * | ||
134 | * Returns: | ||
135 | * The id of @pll | ||
136 | */ | ||
71 | enum intel_dpll_id | 137 | enum intel_dpll_id |
72 | intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, | 138 | intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, |
73 | struct intel_shared_dpll *pll) | 139 | struct intel_shared_dpll *pll) |
@@ -79,28 +145,6 @@ intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, | |||
79 | return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); | 145 | return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); |
80 | } | 146 | } |
81 | 147 | ||
82 | void | ||
83 | intel_shared_dpll_config_get(struct intel_shared_dpll_config *config, | ||
84 | struct intel_shared_dpll *pll, | ||
85 | struct intel_crtc *crtc) | ||
86 | { | ||
87 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
88 | enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll); | ||
89 | |||
90 | config[id].crtc_mask |= 1 << crtc->pipe; | ||
91 | } | ||
92 | |||
93 | void | ||
94 | intel_shared_dpll_config_put(struct intel_shared_dpll_config *config, | ||
95 | struct intel_shared_dpll *pll, | ||
96 | struct intel_crtc *crtc) | ||
97 | { | ||
98 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
99 | enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll); | ||
100 | |||
101 | config[id].crtc_mask &= ~(1 << crtc->pipe); | ||
102 | } | ||
103 | |||
104 | /* For ILK+ */ | 148 | /* For ILK+ */ |
105 | void assert_shared_dpll(struct drm_i915_private *dev_priv, | 149 | void assert_shared_dpll(struct drm_i915_private *dev_priv, |
106 | struct intel_shared_dpll *pll, | 150 | struct intel_shared_dpll *pll, |
@@ -118,6 +162,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, | |||
118 | pll->name, onoff(state), onoff(cur_state)); | 162 | pll->name, onoff(state), onoff(cur_state)); |
119 | } | 163 | } |
120 | 164 | ||
165 | /** | ||
166 | * intel_prepare_shared_dpll - call a dpll's prepare hook | ||
167 | * @crtc: CRTC which has a shared dpll | ||
168 | * | ||
169 | * This calls the PLL's prepare hook if it has one and if the PLL is not | ||
170 | * already enabled. The prepare hook is platform specific. | ||
171 | */ | ||
121 | void intel_prepare_shared_dpll(struct intel_crtc *crtc) | 172 | void intel_prepare_shared_dpll(struct intel_crtc *crtc) |
122 | { | 173 | { |
123 | struct drm_device *dev = crtc->base.dev; | 174 | struct drm_device *dev = crtc->base.dev; |
@@ -128,24 +179,22 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc) | |||
128 | return; | 179 | return; |
129 | 180 | ||
130 | mutex_lock(&dev_priv->dpll_lock); | 181 | mutex_lock(&dev_priv->dpll_lock); |
131 | WARN_ON(!pll->config.crtc_mask); | 182 | WARN_ON(!pll->state.crtc_mask); |
132 | if (!pll->active_mask) { | 183 | if (!pll->active_mask) { |
133 | DRM_DEBUG_DRIVER("setting up %s\n", pll->name); | 184 | DRM_DEBUG_DRIVER("setting up %s\n", pll->name); |
134 | WARN_ON(pll->on); | 185 | WARN_ON(pll->on); |
135 | assert_shared_dpll_disabled(dev_priv, pll); | 186 | assert_shared_dpll_disabled(dev_priv, pll); |
136 | 187 | ||
137 | pll->funcs.mode_set(dev_priv, pll); | 188 | pll->funcs.prepare(dev_priv, pll); |
138 | } | 189 | } |
139 | mutex_unlock(&dev_priv->dpll_lock); | 190 | mutex_unlock(&dev_priv->dpll_lock); |
140 | } | 191 | } |
141 | 192 | ||
142 | /** | 193 | /** |
143 | * intel_enable_shared_dpll - enable PCH PLL | 194 | * intel_enable_shared_dpll - enable a CRTC's shared DPLL |
144 | * @dev_priv: i915 private structure | 195 | * @crtc: CRTC which has a shared DPLL |
145 | * @pipe: pipe PLL to enable | ||
146 | * | 196 | * |
147 | * The PCH PLL needs to be enabled before the PCH transcoder, since it | 197 | * Enable the shared DPLL used by @crtc. |
148 | * drives the transcoder clock. | ||
149 | */ | 198 | */ |
150 | void intel_enable_shared_dpll(struct intel_crtc *crtc) | 199 | void intel_enable_shared_dpll(struct intel_crtc *crtc) |
151 | { | 200 | { |
@@ -161,7 +210,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc) | |||
161 | mutex_lock(&dev_priv->dpll_lock); | 210 | mutex_lock(&dev_priv->dpll_lock); |
162 | old_mask = pll->active_mask; | 211 | old_mask = pll->active_mask; |
163 | 212 | ||
164 | if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) || | 213 | if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) || |
165 | WARN_ON(pll->active_mask & crtc_mask)) | 214 | WARN_ON(pll->active_mask & crtc_mask)) |
166 | goto out; | 215 | goto out; |
167 | 216 | ||
@@ -186,6 +235,12 @@ out: | |||
186 | mutex_unlock(&dev_priv->dpll_lock); | 235 | mutex_unlock(&dev_priv->dpll_lock); |
187 | } | 236 | } |
188 | 237 | ||
238 | /** | ||
239 | * intel_disable_shared_dpll - disable a CRTC's shared DPLL | ||
240 | * @crtc: CRTC which has a shared DPLL | ||
241 | * | ||
242 | * Disable the shared DPLL used by @crtc. | ||
243 | */ | ||
189 | void intel_disable_shared_dpll(struct intel_crtc *crtc) | 244 | void intel_disable_shared_dpll(struct intel_crtc *crtc) |
190 | { | 245 | { |
191 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 246 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
@@ -230,7 +285,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc, | |||
230 | { | 285 | { |
231 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 286 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
232 | struct intel_shared_dpll *pll; | 287 | struct intel_shared_dpll *pll; |
233 | struct intel_shared_dpll_config *shared_dpll; | 288 | struct intel_shared_dpll_state *shared_dpll; |
234 | enum intel_dpll_id i; | 289 | enum intel_dpll_id i; |
235 | 290 | ||
236 | shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); | 291 | shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); |
@@ -270,7 +325,7 @@ static void | |||
270 | intel_reference_shared_dpll(struct intel_shared_dpll *pll, | 325 | intel_reference_shared_dpll(struct intel_shared_dpll *pll, |
271 | struct intel_crtc_state *crtc_state) | 326 | struct intel_crtc_state *crtc_state) |
272 | { | 327 | { |
273 | struct intel_shared_dpll_config *shared_dpll; | 328 | struct intel_shared_dpll_state *shared_dpll; |
274 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | 329 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
275 | enum intel_dpll_id i = pll->id; | 330 | enum intel_dpll_id i = pll->id; |
276 | 331 | ||
@@ -284,13 +339,24 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll, | |||
284 | DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, | 339 | DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, |
285 | pipe_name(crtc->pipe)); | 340 | pipe_name(crtc->pipe)); |
286 | 341 | ||
287 | intel_shared_dpll_config_get(shared_dpll, pll, crtc); | 342 | shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe; |
288 | } | 343 | } |
289 | 344 | ||
290 | void intel_shared_dpll_commit(struct drm_atomic_state *state) | 345 | /** |
346 | * intel_shared_dpll_swap_state - make atomic DPLL configuration effective | ||
347 | * @state: atomic state | ||
348 | * | ||
349 | * This is the dpll version of drm_atomic_helper_swap_state() since the | ||
350 | * helper does not handle driver-specific global state. | ||
351 | * | ||
352 | * For consistency with atomic helpers this function does a complete swap, | ||
353 | * i.e. it also puts the current state into @state, even though there is no | ||
354 | * need for that at this moment. | ||
355 | */ | ||
356 | void intel_shared_dpll_swap_state(struct drm_atomic_state *state) | ||
291 | { | 357 | { |
292 | struct drm_i915_private *dev_priv = to_i915(state->dev); | 358 | struct drm_i915_private *dev_priv = to_i915(state->dev); |
293 | struct intel_shared_dpll_config *shared_dpll; | 359 | struct intel_shared_dpll_state *shared_dpll; |
294 | struct intel_shared_dpll *pll; | 360 | struct intel_shared_dpll *pll; |
295 | enum intel_dpll_id i; | 361 | enum intel_dpll_id i; |
296 | 362 | ||
@@ -299,8 +365,13 @@ void intel_shared_dpll_commit(struct drm_atomic_state *state) | |||
299 | 365 | ||
300 | shared_dpll = to_intel_atomic_state(state)->shared_dpll; | 366 | shared_dpll = to_intel_atomic_state(state)->shared_dpll; |
301 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 367 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
368 | struct intel_shared_dpll_state tmp; | ||
369 | |||
302 | pll = &dev_priv->shared_dplls[i]; | 370 | pll = &dev_priv->shared_dplls[i]; |
303 | pll->config = shared_dpll[i]; | 371 | |
372 | tmp = pll->state; | ||
373 | pll->state = shared_dpll[i]; | ||
374 | shared_dpll[i] = tmp; | ||
304 | } | 375 | } |
305 | } | 376 | } |
306 | 377 | ||
@@ -323,11 +394,11 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, | |||
323 | return val & DPLL_VCO_ENABLE; | 394 | return val & DPLL_VCO_ENABLE; |
324 | } | 395 | } |
325 | 396 | ||
326 | static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, | 397 | static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv, |
327 | struct intel_shared_dpll *pll) | 398 | struct intel_shared_dpll *pll) |
328 | { | 399 | { |
329 | I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); | 400 | I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0); |
330 | I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); | 401 | I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1); |
331 | } | 402 | } |
332 | 403 | ||
333 | static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | 404 | static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
@@ -349,7 +420,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, | |||
349 | /* PCH refclock must be enabled first */ | 420 | /* PCH refclock must be enabled first */ |
350 | ibx_assert_pch_refclk_enabled(dev_priv); | 421 | ibx_assert_pch_refclk_enabled(dev_priv); |
351 | 422 | ||
352 | I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); | 423 | I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll); |
353 | 424 | ||
354 | /* Wait for the clocks to stabilize. */ | 425 | /* Wait for the clocks to stabilize. */ |
355 | POSTING_READ(PCH_DPLL(pll->id)); | 426 | POSTING_READ(PCH_DPLL(pll->id)); |
@@ -360,7 +431,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, | |||
360 | * | 431 | * |
361 | * So write it again. | 432 | * So write it again. |
362 | */ | 433 | */ |
363 | I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); | 434 | I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll); |
364 | POSTING_READ(PCH_DPLL(pll->id)); | 435 | POSTING_READ(PCH_DPLL(pll->id)); |
365 | udelay(200); | 436 | udelay(200); |
366 | } | 437 | } |
@@ -412,8 +483,19 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
412 | return pll; | 483 | return pll; |
413 | } | 484 | } |
414 | 485 | ||
486 | static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, | ||
487 | struct intel_dpll_hw_state *hw_state) | ||
488 | { | ||
489 | DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " | ||
490 | "fp0: 0x%x, fp1: 0x%x\n", | ||
491 | hw_state->dpll, | ||
492 | hw_state->dpll_md, | ||
493 | hw_state->fp0, | ||
494 | hw_state->fp1); | ||
495 | } | ||
496 | |||
415 | static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { | 497 | static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { |
416 | .mode_set = ibx_pch_dpll_mode_set, | 498 | .prepare = ibx_pch_dpll_prepare, |
417 | .enable = ibx_pch_dpll_enable, | 499 | .enable = ibx_pch_dpll_enable, |
418 | .disable = ibx_pch_dpll_disable, | 500 | .disable = ibx_pch_dpll_disable, |
419 | .get_hw_state = ibx_pch_dpll_get_hw_state, | 501 | .get_hw_state = ibx_pch_dpll_get_hw_state, |
@@ -422,7 +504,7 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { | |||
422 | static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, | 504 | static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, |
423 | struct intel_shared_dpll *pll) | 505 | struct intel_shared_dpll *pll) |
424 | { | 506 | { |
425 | I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); | 507 | I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll); |
426 | POSTING_READ(WRPLL_CTL(pll->id)); | 508 | POSTING_READ(WRPLL_CTL(pll->id)); |
427 | udelay(20); | 509 | udelay(20); |
428 | } | 510 | } |
@@ -430,7 +512,7 @@ static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, | |||
430 | static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, | 512 | static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, |
431 | struct intel_shared_dpll *pll) | 513 | struct intel_shared_dpll *pll) |
432 | { | 514 | { |
433 | I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); | 515 | I915_WRITE(SPLL_CTL, pll->state.hw_state.spll); |
434 | POSTING_READ(SPLL_CTL); | 516 | POSTING_READ(SPLL_CTL); |
435 | udelay(20); | 517 | udelay(20); |
436 | } | 518 | } |
@@ -798,6 +880,13 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
798 | return pll; | 880 | return pll; |
799 | } | 881 | } |
800 | 882 | ||
883 | static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, | ||
884 | struct intel_dpll_hw_state *hw_state) | ||
885 | { | ||
886 | DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", | ||
887 | hw_state->wrpll, hw_state->spll); | ||
888 | } | ||
889 | |||
801 | static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { | 890 | static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { |
802 | .enable = hsw_ddi_wrpll_enable, | 891 | .enable = hsw_ddi_wrpll_enable, |
803 | .disable = hsw_ddi_wrpll_disable, | 892 | .disable = hsw_ddi_wrpll_disable, |
@@ -873,7 +962,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, | |||
873 | 962 | ||
874 | val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) | | 963 | val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) | |
875 | DPLL_CTRL1_LINK_RATE_MASK(pll->id)); | 964 | DPLL_CTRL1_LINK_RATE_MASK(pll->id)); |
876 | val |= pll->config.hw_state.ctrl1 << (pll->id * 6); | 965 | val |= pll->state.hw_state.ctrl1 << (pll->id * 6); |
877 | 966 | ||
878 | I915_WRITE(DPLL_CTRL1, val); | 967 | I915_WRITE(DPLL_CTRL1, val); |
879 | POSTING_READ(DPLL_CTRL1); | 968 | POSTING_READ(DPLL_CTRL1); |
@@ -886,8 +975,8 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
886 | 975 | ||
887 | skl_ddi_pll_write_ctrl1(dev_priv, pll); | 976 | skl_ddi_pll_write_ctrl1(dev_priv, pll); |
888 | 977 | ||
889 | I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1); | 978 | I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1); |
890 | I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2); | 979 | I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2); |
891 | POSTING_READ(regs[pll->id].cfgcr1); | 980 | POSTING_READ(regs[pll->id].cfgcr1); |
892 | POSTING_READ(regs[pll->id].cfgcr2); | 981 | POSTING_READ(regs[pll->id].cfgcr2); |
893 | 982 | ||
@@ -1353,6 +1442,16 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
1353 | return pll; | 1442 | return pll; |
1354 | } | 1443 | } |
1355 | 1444 | ||
1445 | static void skl_dump_hw_state(struct drm_i915_private *dev_priv, | ||
1446 | struct intel_dpll_hw_state *hw_state) | ||
1447 | { | ||
1448 | DRM_DEBUG_KMS("dpll_hw_state: " | ||
1449 | "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", | ||
1450 | hw_state->ctrl1, | ||
1451 | hw_state->cfgcr1, | ||
1452 | hw_state->cfgcr2); | ||
1453 | } | ||
1454 | |||
1356 | static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { | 1455 | static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { |
1357 | .enable = skl_ddi_pll_enable, | 1456 | .enable = skl_ddi_pll_enable, |
1358 | .disable = skl_ddi_pll_disable, | 1457 | .disable = skl_ddi_pll_disable, |
@@ -1373,13 +1472,23 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1373 | enum dpio_phy phy; | 1472 | enum dpio_phy phy; |
1374 | enum dpio_channel ch; | 1473 | enum dpio_channel ch; |
1375 | 1474 | ||
1376 | bxt_port_to_phy_channel(port, &phy, &ch); | 1475 | bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); |
1377 | 1476 | ||
1378 | /* Non-SSC reference */ | 1477 | /* Non-SSC reference */ |
1379 | temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); | 1478 | temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); |
1380 | temp |= PORT_PLL_REF_SEL; | 1479 | temp |= PORT_PLL_REF_SEL; |
1381 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); | 1480 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); |
1382 | 1481 | ||
1482 | if (IS_GEMINILAKE(dev_priv)) { | ||
1483 | temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); | ||
1484 | temp |= PORT_PLL_POWER_ENABLE; | ||
1485 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); | ||
1486 | |||
1487 | if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & | ||
1488 | PORT_PLL_POWER_STATE), 200)) | ||
1489 | DRM_ERROR("Power state not set for PLL:%d\n", port); | ||
1490 | } | ||
1491 | |||
1383 | /* Disable 10 bit clock */ | 1492 | /* Disable 10 bit clock */ |
1384 | temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); | 1493 | temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); |
1385 | temp &= ~PORT_PLL_10BIT_CLK_ENABLE; | 1494 | temp &= ~PORT_PLL_10BIT_CLK_ENABLE; |
@@ -1388,31 +1497,31 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1388 | /* Write P1 & P2 */ | 1497 | /* Write P1 & P2 */ |
1389 | temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); | 1498 | temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); |
1390 | temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); | 1499 | temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); |
1391 | temp |= pll->config.hw_state.ebb0; | 1500 | temp |= pll->state.hw_state.ebb0; |
1392 | I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp); | 1501 | I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp); |
1393 | 1502 | ||
1394 | /* Write M2 integer */ | 1503 | /* Write M2 integer */ |
1395 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 0)); | 1504 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 0)); |
1396 | temp &= ~PORT_PLL_M2_MASK; | 1505 | temp &= ~PORT_PLL_M2_MASK; |
1397 | temp |= pll->config.hw_state.pll0; | 1506 | temp |= pll->state.hw_state.pll0; |
1398 | I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp); | 1507 | I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp); |
1399 | 1508 | ||
1400 | /* Write N */ | 1509 | /* Write N */ |
1401 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 1)); | 1510 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 1)); |
1402 | temp &= ~PORT_PLL_N_MASK; | 1511 | temp &= ~PORT_PLL_N_MASK; |
1403 | temp |= pll->config.hw_state.pll1; | 1512 | temp |= pll->state.hw_state.pll1; |
1404 | I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp); | 1513 | I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp); |
1405 | 1514 | ||
1406 | /* Write M2 fraction */ | 1515 | /* Write M2 fraction */ |
1407 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 2)); | 1516 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 2)); |
1408 | temp &= ~PORT_PLL_M2_FRAC_MASK; | 1517 | temp &= ~PORT_PLL_M2_FRAC_MASK; |
1409 | temp |= pll->config.hw_state.pll2; | 1518 | temp |= pll->state.hw_state.pll2; |
1410 | I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp); | 1519 | I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp); |
1411 | 1520 | ||
1412 | /* Write M2 fraction enable */ | 1521 | /* Write M2 fraction enable */ |
1413 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 3)); | 1522 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 3)); |
1414 | temp &= ~PORT_PLL_M2_FRAC_ENABLE; | 1523 | temp &= ~PORT_PLL_M2_FRAC_ENABLE; |
1415 | temp |= pll->config.hw_state.pll3; | 1524 | temp |= pll->state.hw_state.pll3; |
1416 | I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp); | 1525 | I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp); |
1417 | 1526 | ||
1418 | /* Write coeff */ | 1527 | /* Write coeff */ |
@@ -1420,24 +1529,24 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1420 | temp &= ~PORT_PLL_PROP_COEFF_MASK; | 1529 | temp &= ~PORT_PLL_PROP_COEFF_MASK; |
1421 | temp &= ~PORT_PLL_INT_COEFF_MASK; | 1530 | temp &= ~PORT_PLL_INT_COEFF_MASK; |
1422 | temp &= ~PORT_PLL_GAIN_CTL_MASK; | 1531 | temp &= ~PORT_PLL_GAIN_CTL_MASK; |
1423 | temp |= pll->config.hw_state.pll6; | 1532 | temp |= pll->state.hw_state.pll6; |
1424 | I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp); | 1533 | I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp); |
1425 | 1534 | ||
1426 | /* Write calibration val */ | 1535 | /* Write calibration val */ |
1427 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 8)); | 1536 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 8)); |
1428 | temp &= ~PORT_PLL_TARGET_CNT_MASK; | 1537 | temp &= ~PORT_PLL_TARGET_CNT_MASK; |
1429 | temp |= pll->config.hw_state.pll8; | 1538 | temp |= pll->state.hw_state.pll8; |
1430 | I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp); | 1539 | I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp); |
1431 | 1540 | ||
1432 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 9)); | 1541 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 9)); |
1433 | temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; | 1542 | temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; |
1434 | temp |= pll->config.hw_state.pll9; | 1543 | temp |= pll->state.hw_state.pll9; |
1435 | I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp); | 1544 | I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp); |
1436 | 1545 | ||
1437 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 10)); | 1546 | temp = I915_READ(BXT_PORT_PLL(phy, ch, 10)); |
1438 | temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; | 1547 | temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; |
1439 | temp &= ~PORT_PLL_DCO_AMP_MASK; | 1548 | temp &= ~PORT_PLL_DCO_AMP_MASK; |
1440 | temp |= pll->config.hw_state.pll10; | 1549 | temp |= pll->state.hw_state.pll10; |
1441 | I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp); | 1550 | I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp); |
1442 | 1551 | ||
1443 | /* Recalibrate with new settings */ | 1552 | /* Recalibrate with new settings */ |
@@ -1445,7 +1554,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1445 | temp |= PORT_PLL_RECALIBRATE; | 1554 | temp |= PORT_PLL_RECALIBRATE; |
1446 | I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); | 1555 | I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); |
1447 | temp &= ~PORT_PLL_10BIT_CLK_ENABLE; | 1556 | temp &= ~PORT_PLL_10BIT_CLK_ENABLE; |
1448 | temp |= pll->config.hw_state.ebb4; | 1557 | temp |= pll->state.hw_state.ebb4; |
1449 | I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); | 1558 | I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); |
1450 | 1559 | ||
1451 | /* Enable PLL */ | 1560 | /* Enable PLL */ |
@@ -1458,6 +1567,12 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1458 | 200)) | 1567 | 200)) |
1459 | DRM_ERROR("PLL %d not locked\n", port); | 1568 | DRM_ERROR("PLL %d not locked\n", port); |
1460 | 1569 | ||
1570 | if (IS_GEMINILAKE(dev_priv)) { | ||
1571 | temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch)); | ||
1572 | temp |= DCC_DELAY_RANGE_2; | ||
1573 | I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp); | ||
1574 | } | ||
1575 | |||
1461 | /* | 1576 | /* |
1462 | * While we write to the group register to program all lanes at once we | 1577 | * While we write to the group register to program all lanes at once we |
1463 | * can read only lane registers and we pick lanes 0/1 for that. | 1578 | * can read only lane registers and we pick lanes 0/1 for that. |
@@ -1465,7 +1580,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1465 | temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); | 1580 | temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); |
1466 | temp &= ~LANE_STAGGER_MASK; | 1581 | temp &= ~LANE_STAGGER_MASK; |
1467 | temp &= ~LANESTAGGER_STRAP_OVRD; | 1582 | temp &= ~LANESTAGGER_STRAP_OVRD; |
1468 | temp |= pll->config.hw_state.pcsdw12; | 1583 | temp |= pll->state.hw_state.pcsdw12; |
1469 | I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp); | 1584 | I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp); |
1470 | } | 1585 | } |
1471 | 1586 | ||
@@ -1479,6 +1594,16 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, | |||
1479 | temp &= ~PORT_PLL_ENABLE; | 1594 | temp &= ~PORT_PLL_ENABLE; |
1480 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); | 1595 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); |
1481 | POSTING_READ(BXT_PORT_PLL_ENABLE(port)); | 1596 | POSTING_READ(BXT_PORT_PLL_ENABLE(port)); |
1597 | |||
1598 | if (IS_GEMINILAKE(dev_priv)) { | ||
1599 | temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); | ||
1600 | temp &= ~PORT_PLL_POWER_ENABLE; | ||
1601 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); | ||
1602 | |||
1603 | if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) & | ||
1604 | PORT_PLL_POWER_STATE), 200)) | ||
1605 | DRM_ERROR("Power state not reset for PLL:%d\n", port); | ||
1606 | } | ||
1482 | } | 1607 | } |
1483 | 1608 | ||
1484 | static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | 1609 | static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, |
@@ -1491,7 +1616,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
1491 | enum dpio_phy phy; | 1616 | enum dpio_phy phy; |
1492 | enum dpio_channel ch; | 1617 | enum dpio_channel ch; |
1493 | 1618 | ||
1494 | bxt_port_to_phy_channel(port, &phy, &ch); | 1619 | bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); |
1495 | 1620 | ||
1496 | if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) | 1621 | if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) |
1497 | return false; | 1622 | return false; |
@@ -1758,6 +1883,25 @@ bxt_get_dpll(struct intel_crtc *crtc, | |||
1758 | return pll; | 1883 | return pll; |
1759 | } | 1884 | } |
1760 | 1885 | ||
1886 | static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, | ||
1887 | struct intel_dpll_hw_state *hw_state) | ||
1888 | { | ||
1889 | DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," | ||
1890 | "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " | ||
1891 | "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", | ||
1892 | hw_state->ebb0, | ||
1893 | hw_state->ebb4, | ||
1894 | hw_state->pll0, | ||
1895 | hw_state->pll1, | ||
1896 | hw_state->pll2, | ||
1897 | hw_state->pll3, | ||
1898 | hw_state->pll6, | ||
1899 | hw_state->pll8, | ||
1900 | hw_state->pll9, | ||
1901 | hw_state->pll10, | ||
1902 | hw_state->pcsdw12); | ||
1903 | } | ||
1904 | |||
1761 | static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { | 1905 | static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { |
1762 | .enable = bxt_ddi_pll_enable, | 1906 | .enable = bxt_ddi_pll_enable, |
1763 | .disable = bxt_ddi_pll_disable, | 1907 | .disable = bxt_ddi_pll_disable, |
@@ -1798,6 +1942,9 @@ struct intel_dpll_mgr { | |||
1798 | struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc, | 1942 | struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc, |
1799 | struct intel_crtc_state *crtc_state, | 1943 | struct intel_crtc_state *crtc_state, |
1800 | struct intel_encoder *encoder); | 1944 | struct intel_encoder *encoder); |
1945 | |||
1946 | void (*dump_hw_state)(struct drm_i915_private *dev_priv, | ||
1947 | struct intel_dpll_hw_state *hw_state); | ||
1801 | }; | 1948 | }; |
1802 | 1949 | ||
1803 | static const struct dpll_info pch_plls[] = { | 1950 | static const struct dpll_info pch_plls[] = { |
@@ -1809,6 +1956,7 @@ static const struct dpll_info pch_plls[] = { | |||
1809 | static const struct intel_dpll_mgr pch_pll_mgr = { | 1956 | static const struct intel_dpll_mgr pch_pll_mgr = { |
1810 | .dpll_info = pch_plls, | 1957 | .dpll_info = pch_plls, |
1811 | .get_dpll = ibx_get_dpll, | 1958 | .get_dpll = ibx_get_dpll, |
1959 | .dump_hw_state = ibx_dump_hw_state, | ||
1812 | }; | 1960 | }; |
1813 | 1961 | ||
1814 | static const struct dpll_info hsw_plls[] = { | 1962 | static const struct dpll_info hsw_plls[] = { |
@@ -1824,6 +1972,7 @@ static const struct dpll_info hsw_plls[] = { | |||
1824 | static const struct intel_dpll_mgr hsw_pll_mgr = { | 1972 | static const struct intel_dpll_mgr hsw_pll_mgr = { |
1825 | .dpll_info = hsw_plls, | 1973 | .dpll_info = hsw_plls, |
1826 | .get_dpll = hsw_get_dpll, | 1974 | .get_dpll = hsw_get_dpll, |
1975 | .dump_hw_state = hsw_dump_hw_state, | ||
1827 | }; | 1976 | }; |
1828 | 1977 | ||
1829 | static const struct dpll_info skl_plls[] = { | 1978 | static const struct dpll_info skl_plls[] = { |
@@ -1837,6 +1986,7 @@ static const struct dpll_info skl_plls[] = { | |||
1837 | static const struct intel_dpll_mgr skl_pll_mgr = { | 1986 | static const struct intel_dpll_mgr skl_pll_mgr = { |
1838 | .dpll_info = skl_plls, | 1987 | .dpll_info = skl_plls, |
1839 | .get_dpll = skl_get_dpll, | 1988 | .get_dpll = skl_get_dpll, |
1989 | .dump_hw_state = skl_dump_hw_state, | ||
1840 | }; | 1990 | }; |
1841 | 1991 | ||
1842 | static const struct dpll_info bxt_plls[] = { | 1992 | static const struct dpll_info bxt_plls[] = { |
@@ -1849,8 +1999,15 @@ static const struct dpll_info bxt_plls[] = { | |||
1849 | static const struct intel_dpll_mgr bxt_pll_mgr = { | 1999 | static const struct intel_dpll_mgr bxt_pll_mgr = { |
1850 | .dpll_info = bxt_plls, | 2000 | .dpll_info = bxt_plls, |
1851 | .get_dpll = bxt_get_dpll, | 2001 | .get_dpll = bxt_get_dpll, |
2002 | .dump_hw_state = bxt_dump_hw_state, | ||
1852 | }; | 2003 | }; |
1853 | 2004 | ||
2005 | /** | ||
2006 | * intel_shared_dpll_init - Initialize shared DPLLs | ||
2007 | * @dev: drm device | ||
2008 | * | ||
2009 | * Initialize shared DPLLs for @dev. | ||
2010 | */ | ||
1854 | void intel_shared_dpll_init(struct drm_device *dev) | 2011 | void intel_shared_dpll_init(struct drm_device *dev) |
1855 | { | 2012 | { |
1856 | struct drm_i915_private *dev_priv = to_i915(dev); | 2013 | struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -1860,7 +2017,7 @@ void intel_shared_dpll_init(struct drm_device *dev) | |||
1860 | 2017 | ||
1861 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 2018 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
1862 | dpll_mgr = &skl_pll_mgr; | 2019 | dpll_mgr = &skl_pll_mgr; |
1863 | else if (IS_BROXTON(dev_priv)) | 2020 | else if (IS_GEN9_LP(dev_priv)) |
1864 | dpll_mgr = &bxt_pll_mgr; | 2021 | dpll_mgr = &bxt_pll_mgr; |
1865 | else if (HAS_DDI(dev_priv)) | 2022 | else if (HAS_DDI(dev_priv)) |
1866 | dpll_mgr = &hsw_pll_mgr; | 2023 | dpll_mgr = &hsw_pll_mgr; |
@@ -1894,6 +2051,21 @@ void intel_shared_dpll_init(struct drm_device *dev) | |||
1894 | intel_ddi_pll_init(dev); | 2051 | intel_ddi_pll_init(dev); |
1895 | } | 2052 | } |
1896 | 2053 | ||
2054 | /** | ||
2055 | * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination | ||
2056 | * @crtc: CRTC | ||
2057 | * @crtc_state: atomic state for @crtc | ||
2058 | * @encoder: encoder | ||
2059 | * | ||
2060 | * Find an appropriate DPLL for the given CRTC and encoder combination. A | ||
2061 | * reference from the @crtc to the returned pll is registered in the atomic | ||
2062 | * state. That configuration is made effective by calling | ||
2063 | * intel_shared_dpll_swap_state(). The reference should be released by calling | ||
2064 | * intel_release_shared_dpll(). | ||
2065 | * | ||
2066 | * Returns: | ||
2067 | * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state. | ||
2068 | */ | ||
1897 | struct intel_shared_dpll * | 2069 | struct intel_shared_dpll * |
1898 | intel_get_shared_dpll(struct intel_crtc *crtc, | 2070 | intel_get_shared_dpll(struct intel_crtc *crtc, |
1899 | struct intel_crtc_state *crtc_state, | 2071 | struct intel_crtc_state *crtc_state, |
@@ -1907,3 +2079,48 @@ intel_get_shared_dpll(struct intel_crtc *crtc, | |||
1907 | 2079 | ||
1908 | return dpll_mgr->get_dpll(crtc, crtc_state, encoder); | 2080 | return dpll_mgr->get_dpll(crtc, crtc_state, encoder); |
1909 | } | 2081 | } |
2082 | |||
2083 | /** | ||
2084 | * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state | ||
2085 | * @dpll: dpll in use by @crtc | ||
2086 | * @crtc: crtc | ||
2087 | * @state: atomic state | ||
2088 | * | ||
2089 | * This function releases the reference from @crtc to @dpll from the | ||
2090 | * atomic @state. The new configuration is made effective by calling | ||
2091 | * intel_shared_dpll_swap_state(). | ||
2092 | */ | ||
2093 | void intel_release_shared_dpll(struct intel_shared_dpll *dpll, | ||
2094 | struct intel_crtc *crtc, | ||
2095 | struct drm_atomic_state *state) | ||
2096 | { | ||
2097 | struct intel_shared_dpll_state *shared_dpll_state; | ||
2098 | |||
2099 | shared_dpll_state = intel_atomic_get_shared_dpll_state(state); | ||
2100 | shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe); | ||
2101 | } | ||
2102 | |||
2103 | /** | ||
2104 | * intel_shared_dpll_dump_hw_state - write hw_state to dmesg | ||
2105 | * @dev_priv: i915 drm device | ||
2106 | * @hw_state: hw state to be written to the log | ||
2107 | * | ||
2108 | * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS. | ||
2109 | */ | ||
2110 | void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, | ||
2111 | struct intel_dpll_hw_state *hw_state) | ||
2112 | { | ||
2113 | if (dev_priv->dpll_mgr) { | ||
2114 | dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state); | ||
2115 | } else { | ||
2116 | /* fallback for platforms that don't use the shared dpll | ||
2117 | * infrastructure | ||
2118 | */ | ||
2119 | DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " | ||
2120 | "fp0: 0x%x, fp1: 0x%x\n", | ||
2121 | hw_state->dpll, | ||
2122 | hw_state->dpll_md, | ||
2123 | hw_state->fp0, | ||
2124 | hw_state->fp1); | ||
2125 | } | ||
2126 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index f4385353bc11..af1497eb4f9c 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h | |||
@@ -40,32 +40,72 @@ struct intel_encoder; | |||
40 | struct intel_shared_dpll; | 40 | struct intel_shared_dpll; |
41 | struct intel_dpll_mgr; | 41 | struct intel_dpll_mgr; |
42 | 42 | ||
43 | /** | ||
44 | * enum intel_dpll_id - possible DPLL ids | ||
45 | * | ||
46 | * Enumeration of possible IDs for a DPLL. Real shared dpll ids must be >= 0. | ||
47 | */ | ||
43 | enum intel_dpll_id { | 48 | enum intel_dpll_id { |
44 | DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ | 49 | /** |
45 | /* real shared dpll ids must be >= 0 */ | 50 | * @DPLL_ID_PRIVATE: non-shared dpll in use |
51 | */ | ||
52 | DPLL_ID_PRIVATE = -1, | ||
53 | |||
54 | /** | ||
55 | * @DPLL_ID_PCH_PLL_A: DPLL A in ILK, SNB and IVB | ||
56 | */ | ||
46 | DPLL_ID_PCH_PLL_A = 0, | 57 | DPLL_ID_PCH_PLL_A = 0, |
58 | /** | ||
59 | * @DPLL_ID_PCH_PLL_B: DPLL B in ILK, SNB and IVB | ||
60 | */ | ||
47 | DPLL_ID_PCH_PLL_B = 1, | 61 | DPLL_ID_PCH_PLL_B = 1, |
48 | /* hsw/bdw */ | 62 | |
63 | |||
64 | /** | ||
65 | * @DPLL_ID_WRPLL1: HSW and BDW WRPLL1 | ||
66 | */ | ||
49 | DPLL_ID_WRPLL1 = 0, | 67 | DPLL_ID_WRPLL1 = 0, |
68 | /** | ||
69 | * @DPLL_ID_WRPLL2: HSW and BDW WRPLL2 | ||
70 | */ | ||
50 | DPLL_ID_WRPLL2 = 1, | 71 | DPLL_ID_WRPLL2 = 1, |
72 | /** | ||
73 | * @DPLL_ID_SPLL: HSW and BDW SPLL | ||
74 | */ | ||
51 | DPLL_ID_SPLL = 2, | 75 | DPLL_ID_SPLL = 2, |
76 | /** | ||
77 | * @DPLL_ID_LCPLL_810: HSW and BDW 0.81 GHz LCPLL | ||
78 | */ | ||
52 | DPLL_ID_LCPLL_810 = 3, | 79 | DPLL_ID_LCPLL_810 = 3, |
80 | /** | ||
81 | * @DPLL_ID_LCPLL_1350: HSW and BDW 1.35 GHz LCPLL | ||
82 | */ | ||
53 | DPLL_ID_LCPLL_1350 = 4, | 83 | DPLL_ID_LCPLL_1350 = 4, |
84 | /** | ||
85 | * @DPLL_ID_LCPLL_2700: HSW and BDW 2.7 GHz LCPLL | ||
86 | */ | ||
54 | DPLL_ID_LCPLL_2700 = 5, | 87 | DPLL_ID_LCPLL_2700 = 5, |
55 | 88 | ||
56 | /* skl */ | 89 | |
90 | /** | ||
91 | * @DPLL_ID_SKL_DPLL0: SKL and later DPLL0 | ||
92 | */ | ||
57 | DPLL_ID_SKL_DPLL0 = 0, | 93 | DPLL_ID_SKL_DPLL0 = 0, |
94 | /** | ||
95 | * @DPLL_ID_SKL_DPLL1: SKL and later DPLL1 | ||
96 | */ | ||
58 | DPLL_ID_SKL_DPLL1 = 1, | 97 | DPLL_ID_SKL_DPLL1 = 1, |
98 | /** | ||
99 | * @DPLL_ID_SKL_DPLL2: SKL and later DPLL2 | ||
100 | */ | ||
59 | DPLL_ID_SKL_DPLL2 = 2, | 101 | DPLL_ID_SKL_DPLL2 = 2, |
102 | /** | ||
103 | * @DPLL_ID_SKL_DPLL3: SKL and later DPLL3 | ||
104 | */ | ||
60 | DPLL_ID_SKL_DPLL3 = 3, | 105 | DPLL_ID_SKL_DPLL3 = 3, |
61 | }; | 106 | }; |
62 | #define I915_NUM_PLLS 6 | 107 | #define I915_NUM_PLLS 6 |
63 | 108 | ||
64 | /** Inform the state checker that the DPLL is kept enabled even if not | ||
65 | * in use by any crtc. | ||
66 | */ | ||
67 | #define INTEL_DPLL_ALWAYS_ON (1 << 0) | ||
68 | |||
69 | struct intel_dpll_hw_state { | 109 | struct intel_dpll_hw_state { |
70 | /* i9xx, pch plls */ | 110 | /* i9xx, pch plls */ |
71 | uint32_t dpll; | 111 | uint32_t dpll; |
@@ -93,36 +133,120 @@ struct intel_dpll_hw_state { | |||
93 | pcsdw12; | 133 | pcsdw12; |
94 | }; | 134 | }; |
95 | 135 | ||
96 | struct intel_shared_dpll_config { | 136 | /** |
97 | unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ | 137 | * struct intel_shared_dpll_state - hold the DPLL atomic state |
138 | * | ||
139 | * This structure holds an atomic state for the DPLL, that can represent | ||
140 | * either its current state (in struct &intel_shared_dpll) or a desired | ||
141 | * future state which would be applied by an atomic mode set (stored in | ||
142 | * a struct &intel_atomic_state). | ||
143 | * | ||
144 | * See also intel_get_shared_dpll() and intel_release_shared_dpll(). | ||
145 | */ | ||
146 | struct intel_shared_dpll_state { | ||
147 | /** | ||
148 | * @crtc_mask: mask of CRTC using this DPLL, active or not | ||
149 | */ | ||
150 | unsigned crtc_mask; | ||
151 | |||
152 | /** | ||
153 | * @hw_state: hardware configuration for the DPLL stored in | ||
154 | * struct &intel_dpll_hw_state. | ||
155 | */ | ||
98 | struct intel_dpll_hw_state hw_state; | 156 | struct intel_dpll_hw_state hw_state; |
99 | }; | 157 | }; |
100 | 158 | ||
159 | /** | ||
160 | * struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs | ||
161 | */ | ||
101 | struct intel_shared_dpll_funcs { | 162 | struct intel_shared_dpll_funcs { |
102 | /* The mode_set hook is optional and should be used together with the | 163 | /** |
103 | * intel_prepare_shared_dpll function. */ | 164 | * @prepare: |
104 | void (*mode_set)(struct drm_i915_private *dev_priv, | 165 | * |
105 | struct intel_shared_dpll *pll); | 166 | * Optional hook to perform operations prior to enabling the PLL. |
167 | * Called from intel_prepare_shared_dpll() function unless the PLL | ||
168 | * is already enabled. | ||
169 | */ | ||
170 | void (*prepare)(struct drm_i915_private *dev_priv, | ||
171 | struct intel_shared_dpll *pll); | ||
172 | |||
173 | /** | ||
174 | * @enable: | ||
175 | * | ||
176 | * Hook for enabling the pll, called from intel_enable_shared_dpll() | ||
177 | * if the pll is not already enabled. | ||
178 | */ | ||
106 | void (*enable)(struct drm_i915_private *dev_priv, | 179 | void (*enable)(struct drm_i915_private *dev_priv, |
107 | struct intel_shared_dpll *pll); | 180 | struct intel_shared_dpll *pll); |
181 | |||
182 | /** | ||
183 | * @disable: | ||
184 | * | ||
185 | * Hook for disabling the pll, called from intel_disable_shared_dpll() | ||
186 | * only when it is safe to disable the pll, i.e., there are no more | ||
187 | * tracked users for it. | ||
188 | */ | ||
108 | void (*disable)(struct drm_i915_private *dev_priv, | 189 | void (*disable)(struct drm_i915_private *dev_priv, |
109 | struct intel_shared_dpll *pll); | 190 | struct intel_shared_dpll *pll); |
191 | |||
192 | /** | ||
193 | * @get_hw_state: | ||
194 | * | ||
195 | * Hook for reading the values currently programmed to the DPLL | ||
196 | * registers. This is used for initial hw state readout and state | ||
197 | * verification after a mode set. | ||
198 | */ | ||
110 | bool (*get_hw_state)(struct drm_i915_private *dev_priv, | 199 | bool (*get_hw_state)(struct drm_i915_private *dev_priv, |
111 | struct intel_shared_dpll *pll, | 200 | struct intel_shared_dpll *pll, |
112 | struct intel_dpll_hw_state *hw_state); | 201 | struct intel_dpll_hw_state *hw_state); |
113 | }; | 202 | }; |
114 | 203 | ||
204 | /** | ||
205 | * struct intel_shared_dpll - display PLL with tracked state and users | ||
206 | */ | ||
115 | struct intel_shared_dpll { | 207 | struct intel_shared_dpll { |
116 | struct intel_shared_dpll_config config; | 208 | /** |
209 | * @state: | ||
210 | * | ||
211 | * Store the state for the pll, including the its hw state | ||
212 | * and CRTCs using it. | ||
213 | */ | ||
214 | struct intel_shared_dpll_state state; | ||
117 | 215 | ||
118 | unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */ | 216 | /** |
119 | bool on; /* is the PLL actually active? Disabled during modeset */ | 217 | * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL |
218 | */ | ||
219 | unsigned active_mask; | ||
220 | |||
221 | /** | ||
222 | * @on: is the PLL actually active? Disabled during modeset | ||
223 | */ | ||
224 | bool on; | ||
225 | |||
226 | /** | ||
227 | * @name: DPLL name; used for logging | ||
228 | */ | ||
120 | const char *name; | 229 | const char *name; |
121 | /* should match the index in the dev_priv->shared_dplls array */ | 230 | |
231 | /** | ||
232 | * @id: unique indentifier for this DPLL; should match the index in the | ||
233 | * dev_priv->shared_dplls array | ||
234 | */ | ||
122 | enum intel_dpll_id id; | 235 | enum intel_dpll_id id; |
123 | 236 | ||
237 | /** | ||
238 | * @funcs: platform specific hooks | ||
239 | */ | ||
124 | struct intel_shared_dpll_funcs funcs; | 240 | struct intel_shared_dpll_funcs funcs; |
125 | 241 | ||
242 | #define INTEL_DPLL_ALWAYS_ON (1 << 0) | ||
243 | /** | ||
244 | * @flags: | ||
245 | * | ||
246 | * INTEL_DPLL_ALWAYS_ON | ||
247 | * Inform the state checker that the DPLL is kept enabled even if | ||
248 | * not in use by any CRTC. | ||
249 | */ | ||
126 | uint32_t flags; | 250 | uint32_t flags; |
127 | }; | 251 | }; |
128 | 252 | ||
@@ -138,14 +262,6 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, | |||
138 | enum intel_dpll_id | 262 | enum intel_dpll_id |
139 | intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, | 263 | intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, |
140 | struct intel_shared_dpll *pll); | 264 | struct intel_shared_dpll *pll); |
141 | void | ||
142 | intel_shared_dpll_config_get(struct intel_shared_dpll_config *config, | ||
143 | struct intel_shared_dpll *pll, | ||
144 | struct intel_crtc *crtc); | ||
145 | void | ||
146 | intel_shared_dpll_config_put(struct intel_shared_dpll_config *config, | ||
147 | struct intel_shared_dpll *pll, | ||
148 | struct intel_crtc *crtc); | ||
149 | void assert_shared_dpll(struct drm_i915_private *dev_priv, | 265 | void assert_shared_dpll(struct drm_i915_private *dev_priv, |
150 | struct intel_shared_dpll *pll, | 266 | struct intel_shared_dpll *pll, |
151 | bool state); | 267 | bool state); |
@@ -154,12 +270,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, | |||
154 | struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | 270 | struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, |
155 | struct intel_crtc_state *state, | 271 | struct intel_crtc_state *state, |
156 | struct intel_encoder *encoder); | 272 | struct intel_encoder *encoder); |
273 | void intel_release_shared_dpll(struct intel_shared_dpll *dpll, | ||
274 | struct intel_crtc *crtc, | ||
275 | struct drm_atomic_state *state); | ||
157 | void intel_prepare_shared_dpll(struct intel_crtc *crtc); | 276 | void intel_prepare_shared_dpll(struct intel_crtc *crtc); |
158 | void intel_enable_shared_dpll(struct intel_crtc *crtc); | 277 | void intel_enable_shared_dpll(struct intel_crtc *crtc); |
159 | void intel_disable_shared_dpll(struct intel_crtc *crtc); | 278 | void intel_disable_shared_dpll(struct intel_crtc *crtc); |
160 | void intel_shared_dpll_commit(struct drm_atomic_state *state); | 279 | void intel_shared_dpll_swap_state(struct drm_atomic_state *state); |
161 | void intel_shared_dpll_init(struct drm_device *dev); | 280 | void intel_shared_dpll_init(struct drm_device *dev); |
162 | 281 | ||
282 | void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, | ||
283 | struct intel_dpll_hw_state *hw_state); | ||
284 | |||
163 | /* BXT dpll related functions */ | 285 | /* BXT dpll related functions */ |
164 | bool bxt_ddi_dp_set_dpll_hw_state(int clock, | 286 | bool bxt_ddi_dp_set_dpll_hw_state(int clock, |
165 | struct intel_dpll_hw_state *dpll_hw_state); | 287 | struct intel_dpll_hw_state *dpll_hw_state); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 007a8258ce6b..6b02dac6ea26 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -359,7 +359,7 @@ struct intel_atomic_state { | |||
359 | /* SKL/KBL Only */ | 359 | /* SKL/KBL Only */ |
360 | unsigned int cdclk_pll_vco; | 360 | unsigned int cdclk_pll_vco; |
361 | 361 | ||
362 | struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; | 362 | struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; |
363 | 363 | ||
364 | /* | 364 | /* |
365 | * Current watermarks can't be trusted during hardware readout, so | 365 | * Current watermarks can't be trusted during hardware readout, so |
@@ -692,8 +692,9 @@ struct intel_crtc { | |||
692 | * some outputs connected to this crtc. | 692 | * some outputs connected to this crtc. |
693 | */ | 693 | */ |
694 | bool active; | 694 | bool active; |
695 | unsigned long enabled_power_domains; | ||
696 | bool lowfreq_avail; | 695 | bool lowfreq_avail; |
696 | u8 plane_ids_mask; | ||
697 | unsigned long enabled_power_domains; | ||
697 | struct intel_overlay *overlay; | 698 | struct intel_overlay *overlay; |
698 | struct intel_flip_work *flip_work; | 699 | struct intel_flip_work *flip_work; |
699 | 700 | ||
@@ -767,7 +768,8 @@ struct intel_plane_wm_parameters { | |||
767 | 768 | ||
768 | struct intel_plane { | 769 | struct intel_plane { |
769 | struct drm_plane base; | 770 | struct drm_plane base; |
770 | int plane; | 771 | u8 plane; |
772 | enum plane_id id; | ||
771 | enum pipe pipe; | 773 | enum pipe pipe; |
772 | bool can_scale; | 774 | bool can_scale; |
773 | int max_downscale; | 775 | int max_downscale; |
@@ -841,11 +843,13 @@ struct intel_hdmi { | |||
841 | enum hdmi_picture_aspect aspect_ratio; | 843 | enum hdmi_picture_aspect aspect_ratio; |
842 | struct intel_connector *attached_connector; | 844 | struct intel_connector *attached_connector; |
843 | void (*write_infoframe)(struct drm_encoder *encoder, | 845 | void (*write_infoframe)(struct drm_encoder *encoder, |
846 | const struct intel_crtc_state *crtc_state, | ||
844 | enum hdmi_infoframe_type type, | 847 | enum hdmi_infoframe_type type, |
845 | const void *frame, ssize_t len); | 848 | const void *frame, ssize_t len); |
846 | void (*set_infoframes)(struct drm_encoder *encoder, | 849 | void (*set_infoframes)(struct drm_encoder *encoder, |
847 | bool enable, | 850 | bool enable, |
848 | const struct drm_display_mode *adjusted_mode); | 851 | const struct intel_crtc_state *crtc_state, |
852 | const struct drm_connector_state *conn_state); | ||
849 | bool (*infoframe_enabled)(struct drm_encoder *encoder, | 853 | bool (*infoframe_enabled)(struct drm_encoder *encoder, |
850 | const struct intel_crtc_state *pipe_config); | 854 | const struct intel_crtc_state *pipe_config); |
851 | }; | 855 | }; |
@@ -881,6 +885,16 @@ struct intel_dp_desc { | |||
881 | u8 sw_minor_rev; | 885 | u8 sw_minor_rev; |
882 | } __packed; | 886 | } __packed; |
883 | 887 | ||
888 | struct intel_dp_compliance_data { | ||
889 | unsigned long edid; | ||
890 | }; | ||
891 | |||
892 | struct intel_dp_compliance { | ||
893 | unsigned long test_type; | ||
894 | struct intel_dp_compliance_data test_data; | ||
895 | bool test_active; | ||
896 | }; | ||
897 | |||
884 | struct intel_dp { | 898 | struct intel_dp { |
885 | i915_reg_t output_reg; | 899 | i915_reg_t output_reg; |
886 | i915_reg_t aux_ch_ctl_reg; | 900 | i915_reg_t aux_ch_ctl_reg; |
@@ -903,6 +917,10 @@ struct intel_dp { | |||
903 | /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ | 917 | /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ |
904 | uint8_t num_sink_rates; | 918 | uint8_t num_sink_rates; |
905 | int sink_rates[DP_MAX_SUPPORTED_RATES]; | 919 | int sink_rates[DP_MAX_SUPPORTED_RATES]; |
920 | /* Max lane count for the sink as per DPCD registers */ | ||
921 | uint8_t max_sink_lane_count; | ||
922 | /* Max link BW for the sink as per DPCD registers */ | ||
923 | int max_sink_link_bw; | ||
906 | /* sink or branch descriptor */ | 924 | /* sink or branch descriptor */ |
907 | struct intel_dp_desc desc; | 925 | struct intel_dp_desc desc; |
908 | struct drm_dp_aux aux; | 926 | struct drm_dp_aux aux; |
@@ -926,6 +944,12 @@ struct intel_dp { | |||
926 | */ | 944 | */ |
927 | enum pipe pps_pipe; | 945 | enum pipe pps_pipe; |
928 | /* | 946 | /* |
947 | * Pipe currently driving the port. Used for preventing | ||
948 | * the use of the PPS for any pipe currentrly driving | ||
949 | * external DP as that will mess things up on VLV. | ||
950 | */ | ||
951 | enum pipe active_pipe; | ||
952 | /* | ||
929 | * Set if the sequencer may be reset due to a power transition, | 953 | * Set if the sequencer may be reset due to a power transition, |
930 | * requiring a reinitialization. Only relevant on BXT. | 954 | * requiring a reinitialization. Only relevant on BXT. |
931 | */ | 955 | */ |
@@ -956,9 +980,7 @@ struct intel_dp { | |||
956 | void (*prepare_link_retrain)(struct intel_dp *intel_dp); | 980 | void (*prepare_link_retrain)(struct intel_dp *intel_dp); |
957 | 981 | ||
958 | /* Displayport compliance testing */ | 982 | /* Displayport compliance testing */ |
959 | unsigned long compliance_test_type; | 983 | struct intel_dp_compliance compliance; |
960 | unsigned long compliance_test_data; | ||
961 | bool compliance_test_active; | ||
962 | }; | 984 | }; |
963 | 985 | ||
964 | struct intel_lspcon { | 986 | struct intel_lspcon { |
@@ -1090,6 +1112,12 @@ dp_to_dig_port(struct intel_dp *intel_dp) | |||
1090 | return container_of(intel_dp, struct intel_digital_port, dp); | 1112 | return container_of(intel_dp, struct intel_digital_port, dp); |
1091 | } | 1113 | } |
1092 | 1114 | ||
1115 | static inline struct intel_lspcon * | ||
1116 | dp_to_lspcon(struct intel_dp *intel_dp) | ||
1117 | { | ||
1118 | return &dp_to_dig_port(intel_dp)->lspcon; | ||
1119 | } | ||
1120 | |||
1093 | static inline struct intel_digital_port * | 1121 | static inline struct intel_digital_port * |
1094 | hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) | 1122 | hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) |
1095 | { | 1123 | { |
@@ -1142,7 +1170,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv); | |||
1142 | void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); | 1170 | void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); |
1143 | 1171 | ||
1144 | /* intel_crt.c */ | 1172 | /* intel_crt.c */ |
1145 | void intel_crt_init(struct drm_device *dev); | 1173 | void intel_crt_init(struct drm_i915_private *dev_priv); |
1146 | void intel_crt_reset(struct drm_encoder *encoder); | 1174 | void intel_crt_reset(struct drm_encoder *encoder); |
1147 | 1175 | ||
1148 | /* intel_ddi.c */ | 1176 | /* intel_ddi.c */ |
@@ -1153,7 +1181,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, | |||
1153 | struct drm_connector_state *old_conn_state); | 1181 | struct drm_connector_state *old_conn_state); |
1154 | void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder); | 1182 | void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder); |
1155 | void hsw_fdi_link_train(struct drm_crtc *crtc); | 1183 | void hsw_fdi_link_train(struct drm_crtc *crtc); |
1156 | void intel_ddi_init(struct drm_device *dev, enum port port); | 1184 | void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); |
1157 | enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); | 1185 | enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); |
1158 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); | 1186 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); |
1159 | void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc); | 1187 | void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc); |
@@ -1166,6 +1194,8 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc, | |||
1166 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); | 1194 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); |
1167 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); | 1195 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); |
1168 | bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); | 1196 | bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); |
1197 | bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, | ||
1198 | struct intel_crtc *intel_crtc); | ||
1169 | void intel_ddi_get_config(struct intel_encoder *encoder, | 1199 | void intel_ddi_get_config(struct intel_encoder *encoder, |
1170 | struct intel_crtc_state *pipe_config); | 1200 | struct intel_crtc_state *pipe_config); |
1171 | struct intel_encoder * | 1201 | struct intel_encoder * |
@@ -1210,7 +1240,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y, | |||
1210 | void intel_add_fb_offsets(int *x, int *y, | 1240 | void intel_add_fb_offsets(int *x, int *y, |
1211 | const struct intel_plane_state *state, int plane); | 1241 | const struct intel_plane_state *state, int plane); |
1212 | unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); | 1242 | unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); |
1213 | bool intel_has_pending_fb_unpin(struct drm_device *dev); | 1243 | bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv); |
1214 | void intel_mark_busy(struct drm_i915_private *dev_priv); | 1244 | void intel_mark_busy(struct drm_i915_private *dev_priv); |
1215 | void intel_mark_idle(struct drm_i915_private *dev_priv); | 1245 | void intel_mark_idle(struct drm_i915_private *dev_priv); |
1216 | void intel_crtc_restore_mode(struct drm_crtc *crtc); | 1246 | void intel_crtc_restore_mode(struct drm_crtc *crtc); |
@@ -1378,12 +1408,15 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); | |||
1378 | void intel_csr_ucode_resume(struct drm_i915_private *); | 1408 | void intel_csr_ucode_resume(struct drm_i915_private *); |
1379 | 1409 | ||
1380 | /* intel_dp.c */ | 1410 | /* intel_dp.c */ |
1381 | bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); | 1411 | bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, |
1412 | enum port port); | ||
1382 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | 1413 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
1383 | struct intel_connector *intel_connector); | 1414 | struct intel_connector *intel_connector); |
1384 | void intel_dp_set_link_params(struct intel_dp *intel_dp, | 1415 | void intel_dp_set_link_params(struct intel_dp *intel_dp, |
1385 | int link_rate, uint8_t lane_count, | 1416 | int link_rate, uint8_t lane_count, |
1386 | bool link_mst); | 1417 | bool link_mst); |
1418 | int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, | ||
1419 | int link_rate, uint8_t lane_count); | ||
1387 | void intel_dp_start_link_train(struct intel_dp *intel_dp); | 1420 | void intel_dp_start_link_train(struct intel_dp *intel_dp); |
1388 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); | 1421 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); |
1389 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); | 1422 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); |
@@ -1445,6 +1478,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp); | |||
1445 | bool __intel_dp_read_desc(struct intel_dp *intel_dp, | 1478 | bool __intel_dp_read_desc(struct intel_dp *intel_dp, |
1446 | struct intel_dp_desc *desc); | 1479 | struct intel_dp_desc *desc); |
1447 | bool intel_dp_read_desc(struct intel_dp *intel_dp); | 1480 | bool intel_dp_read_desc(struct intel_dp *intel_dp); |
1481 | int intel_dp_link_required(int pixel_clock, int bpp); | ||
1482 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); | ||
1448 | 1483 | ||
1449 | /* intel_dp_aux_backlight.c */ | 1484 | /* intel_dp_aux_backlight.c */ |
1450 | int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); | 1485 | int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); |
@@ -1453,13 +1488,13 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); | |||
1453 | int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); | 1488 | int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); |
1454 | void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); | 1489 | void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); |
1455 | /* intel_dsi.c */ | 1490 | /* intel_dsi.c */ |
1456 | void intel_dsi_init(struct drm_device *dev); | 1491 | void intel_dsi_init(struct drm_i915_private *dev_priv); |
1457 | 1492 | ||
1458 | /* intel_dsi_dcs_backlight.c */ | 1493 | /* intel_dsi_dcs_backlight.c */ |
1459 | int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); | 1494 | int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); |
1460 | 1495 | ||
1461 | /* intel_dvo.c */ | 1496 | /* intel_dvo.c */ |
1462 | void intel_dvo_init(struct drm_device *dev); | 1497 | void intel_dvo_init(struct drm_i915_private *dev_priv); |
1463 | /* intel_hotplug.c */ | 1498 | /* intel_hotplug.c */ |
1464 | void intel_hpd_poll_init(struct drm_i915_private *dev_priv); | 1499 | void intel_hpd_poll_init(struct drm_i915_private *dev_priv); |
1465 | 1500 | ||
@@ -1523,7 +1558,8 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); | |||
1523 | void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv); | 1558 | void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv); |
1524 | 1559 | ||
1525 | /* intel_hdmi.c */ | 1560 | /* intel_hdmi.c */ |
1526 | void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port); | 1561 | void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, |
1562 | enum port port); | ||
1527 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | 1563 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
1528 | struct intel_connector *intel_connector); | 1564 | struct intel_connector *intel_connector); |
1529 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); | 1565 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); |
@@ -1534,7 +1570,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); | |||
1534 | 1570 | ||
1535 | 1571 | ||
1536 | /* intel_lvds.c */ | 1572 | /* intel_lvds.c */ |
1537 | void intel_lvds_init(struct drm_device *dev); | 1573 | void intel_lvds_init(struct drm_i915_private *dev_priv); |
1538 | struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); | 1574 | struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); |
1539 | bool intel_is_dual_link_lvds(struct drm_device *dev); | 1575 | bool intel_is_dual_link_lvds(struct drm_device *dev); |
1540 | 1576 | ||
@@ -1579,9 +1615,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector, | |||
1579 | void intel_panel_enable_backlight(struct intel_connector *connector); | 1615 | void intel_panel_enable_backlight(struct intel_connector *connector); |
1580 | void intel_panel_disable_backlight(struct intel_connector *connector); | 1616 | void intel_panel_disable_backlight(struct intel_connector *connector); |
1581 | void intel_panel_destroy_backlight(struct drm_connector *connector); | 1617 | void intel_panel_destroy_backlight(struct drm_connector *connector); |
1582 | enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 1618 | enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv); |
1583 | extern struct drm_display_mode *intel_find_panel_downclock( | 1619 | extern struct drm_display_mode *intel_find_panel_downclock( |
1584 | struct drm_device *dev, | 1620 | struct drm_i915_private *dev_priv, |
1585 | struct drm_display_mode *fixed_mode, | 1621 | struct drm_display_mode *fixed_mode, |
1586 | struct drm_connector *connector); | 1622 | struct drm_connector *connector); |
1587 | 1623 | ||
@@ -1607,7 +1643,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, | |||
1607 | void intel_psr_flush(struct drm_i915_private *dev_priv, | 1643 | void intel_psr_flush(struct drm_i915_private *dev_priv, |
1608 | unsigned frontbuffer_bits, | 1644 | unsigned frontbuffer_bits, |
1609 | enum fb_op_origin origin); | 1645 | enum fb_op_origin origin); |
1610 | void intel_psr_init(struct drm_device *dev); | 1646 | void intel_psr_init(struct drm_i915_private *dev_priv); |
1611 | void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, | 1647 | void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, |
1612 | unsigned frontbuffer_bits); | 1648 | unsigned frontbuffer_bits); |
1613 | 1649 | ||
@@ -1711,7 +1747,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv); | |||
1711 | void intel_update_watermarks(struct intel_crtc *crtc); | 1747 | void intel_update_watermarks(struct intel_crtc *crtc); |
1712 | void intel_init_pm(struct drm_i915_private *dev_priv); | 1748 | void intel_init_pm(struct drm_i915_private *dev_priv); |
1713 | void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); | 1749 | void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); |
1714 | void intel_pm_setup(struct drm_device *dev); | 1750 | void intel_pm_setup(struct drm_i915_private *dev_priv); |
1715 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 1751 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
1716 | void intel_gpu_ips_teardown(void); | 1752 | void intel_gpu_ips_teardown(void); |
1717 | void intel_init_gt_powersave(struct drm_i915_private *dev_priv); | 1753 | void intel_init_gt_powersave(struct drm_i915_private *dev_priv); |
@@ -1752,7 +1788,7 @@ static inline int intel_enable_rc6(void) | |||
1752 | } | 1788 | } |
1753 | 1789 | ||
1754 | /* intel_sdvo.c */ | 1790 | /* intel_sdvo.c */ |
1755 | bool intel_sdvo_init(struct drm_device *dev, | 1791 | bool intel_sdvo_init(struct drm_i915_private *dev_priv, |
1756 | i915_reg_t reg, enum port port); | 1792 | i915_reg_t reg, enum port port); |
1757 | 1793 | ||
1758 | 1794 | ||
@@ -1767,7 +1803,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc); | |||
1767 | void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work); | 1803 | void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work); |
1768 | 1804 | ||
1769 | /* intel_tv.c */ | 1805 | /* intel_tv.c */ |
1770 | void intel_tv_init(struct drm_device *dev); | 1806 | void intel_tv_init(struct drm_i915_private *dev_priv); |
1771 | 1807 | ||
1772 | /* intel_atomic.c */ | 1808 | /* intel_atomic.c */ |
1773 | int intel_connector_atomic_get_property(struct drm_connector *connector, | 1809 | int intel_connector_atomic_get_property(struct drm_connector *connector, |
@@ -1779,8 +1815,6 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc, | |||
1779 | struct drm_crtc_state *state); | 1815 | struct drm_crtc_state *state); |
1780 | struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev); | 1816 | struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev); |
1781 | void intel_atomic_state_clear(struct drm_atomic_state *); | 1817 | void intel_atomic_state_clear(struct drm_atomic_state *); |
1782 | struct intel_shared_dpll_config * | ||
1783 | intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s); | ||
1784 | 1818 | ||
1785 | static inline struct intel_crtc_state * | 1819 | static inline struct intel_crtc_state * |
1786 | intel_atomic_get_crtc_state(struct drm_atomic_state *state, | 1820 | intel_atomic_get_crtc_state(struct drm_atomic_state *state, |
@@ -1794,6 +1828,20 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, | |||
1794 | return to_intel_crtc_state(crtc_state); | 1828 | return to_intel_crtc_state(crtc_state); |
1795 | } | 1829 | } |
1796 | 1830 | ||
1831 | static inline struct intel_crtc_state * | ||
1832 | intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state, | ||
1833 | struct intel_crtc *crtc) | ||
1834 | { | ||
1835 | struct drm_crtc_state *crtc_state; | ||
1836 | |||
1837 | crtc_state = drm_atomic_get_existing_crtc_state(state, &crtc->base); | ||
1838 | |||
1839 | if (crtc_state) | ||
1840 | return to_intel_crtc_state(crtc_state); | ||
1841 | else | ||
1842 | return NULL; | ||
1843 | } | ||
1844 | |||
1797 | static inline struct intel_plane_state * | 1845 | static inline struct intel_plane_state * |
1798 | intel_atomic_get_existing_plane_state(struct drm_atomic_state *state, | 1846 | intel_atomic_get_existing_plane_state(struct drm_atomic_state *state, |
1799 | struct intel_plane *plane) | 1847 | struct intel_plane *plane) |
@@ -1827,4 +1875,10 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state); | |||
1827 | /* intel_lspcon.c */ | 1875 | /* intel_lspcon.c */ |
1828 | bool lspcon_init(struct intel_digital_port *intel_dig_port); | 1876 | bool lspcon_init(struct intel_digital_port *intel_dig_port); |
1829 | void lspcon_resume(struct intel_lspcon *lspcon); | 1877 | void lspcon_resume(struct intel_lspcon *lspcon); |
1878 | void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); | ||
1879 | |||
1880 | /* intel_pipe_crc.c */ | ||
1881 | int intel_pipe_crc_create(struct drm_minor *minor); | ||
1882 | void intel_pipe_crc_cleanup(struct drm_minor *minor); | ||
1883 | extern const struct file_operations i915_display_crc_ctl_fops; | ||
1830 | #endif /* __INTEL_DRV_H__ */ | 1884 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 5b72c50d6f76..16732e7bc08e 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -340,7 +340,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, | |||
340 | /* DSI uses short packets for sync events, so clear mode flags for DSI */ | 340 | /* DSI uses short packets for sync events, so clear mode flags for DSI */ |
341 | adjusted_mode->flags = 0; | 341 | adjusted_mode->flags = 0; |
342 | 342 | ||
343 | if (IS_BROXTON(dev_priv)) { | 343 | if (IS_GEN9_LP(dev_priv)) { |
344 | /* Dual link goes to DSI transcoder A. */ | 344 | /* Dual link goes to DSI transcoder A. */ |
345 | if (intel_dsi->ports == BIT(PORT_C)) | 345 | if (intel_dsi->ports == BIT(PORT_C)) |
346 | pipe_config->cpu_transcoder = TRANSCODER_DSI_C; | 346 | pipe_config->cpu_transcoder = TRANSCODER_DSI_C; |
@@ -379,7 +379,8 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) | |||
379 | val &= ~ULPS_STATE_MASK; | 379 | val &= ~ULPS_STATE_MASK; |
380 | val |= (ULPS_STATE_ENTER | DEVICE_READY); | 380 | val |= (ULPS_STATE_ENTER | DEVICE_READY); |
381 | I915_WRITE(MIPI_DEVICE_READY(port), val); | 381 | I915_WRITE(MIPI_DEVICE_READY(port), val); |
382 | usleep_range(2, 3); | 382 | /* at least 2us - relaxed for hrtimer subsystem optimization */ |
383 | usleep_range(10, 50); | ||
383 | 384 | ||
384 | /* 3. Exit ULPS */ | 385 | /* 3. Exit ULPS */ |
385 | val = I915_READ(MIPI_DEVICE_READY(port)); | 386 | val = I915_READ(MIPI_DEVICE_READY(port)); |
@@ -441,7 +442,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) | |||
441 | 442 | ||
442 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 443 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
443 | vlv_dsi_device_ready(encoder); | 444 | vlv_dsi_device_ready(encoder); |
444 | else if (IS_BROXTON(dev_priv)) | 445 | else if (IS_GEN9_LP(dev_priv)) |
445 | bxt_dsi_device_ready(encoder); | 446 | bxt_dsi_device_ready(encoder); |
446 | } | 447 | } |
447 | 448 | ||
@@ -464,7 +465,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) | |||
464 | } | 465 | } |
465 | 466 | ||
466 | for_each_dsi_port(port, intel_dsi->ports) { | 467 | for_each_dsi_port(port, intel_dsi->ports) { |
467 | i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? | 468 | i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? |
468 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); | 469 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); |
469 | u32 temp; | 470 | u32 temp; |
470 | 471 | ||
@@ -476,7 +477,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) | |||
476 | if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) { | 477 | if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) { |
477 | temp |= (intel_dsi->dual_link - 1) | 478 | temp |= (intel_dsi->dual_link - 1) |
478 | << DUAL_LINK_MODE_SHIFT; | 479 | << DUAL_LINK_MODE_SHIFT; |
479 | temp |= intel_crtc->pipe ? | 480 | if (IS_BROXTON(dev_priv)) |
481 | temp |= LANE_CONFIGURATION_DUAL_LINK_A; | ||
482 | else | ||
483 | temp |= intel_crtc->pipe ? | ||
480 | LANE_CONFIGURATION_DUAL_LINK_B : | 484 | LANE_CONFIGURATION_DUAL_LINK_B : |
481 | LANE_CONFIGURATION_DUAL_LINK_A; | 485 | LANE_CONFIGURATION_DUAL_LINK_A; |
482 | } | 486 | } |
@@ -494,7 +498,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) | |||
494 | enum port port; | 498 | enum port port; |
495 | 499 | ||
496 | for_each_dsi_port(port, intel_dsi->ports) { | 500 | for_each_dsi_port(port, intel_dsi->ports) { |
497 | i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? | 501 | i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? |
498 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); | 502 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); |
499 | u32 temp; | 503 | u32 temp; |
500 | 504 | ||
@@ -663,7 +667,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) | |||
663 | DRM_DEBUG_KMS("\n"); | 667 | DRM_DEBUG_KMS("\n"); |
664 | for_each_dsi_port(port, intel_dsi->ports) { | 668 | for_each_dsi_port(port, intel_dsi->ports) { |
665 | /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ | 669 | /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ |
666 | i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? | 670 | i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? |
667 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); | 671 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); |
668 | u32 val; | 672 | u32 val; |
669 | 673 | ||
@@ -695,8 +699,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) | |||
695 | I915_WRITE(MIPI_DEVICE_READY(port), 0x00); | 699 | I915_WRITE(MIPI_DEVICE_READY(port), 0x00); |
696 | usleep_range(2000, 2500); | 700 | usleep_range(2000, 2500); |
697 | } | 701 | } |
698 | |||
699 | intel_disable_dsi_pll(encoder); | ||
700 | } | 702 | } |
701 | 703 | ||
702 | static void intel_dsi_post_disable(struct intel_encoder *encoder, | 704 | static void intel_dsi_post_disable(struct intel_encoder *encoder, |
@@ -712,6 +714,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, | |||
712 | 714 | ||
713 | intel_dsi_clear_device_ready(encoder); | 715 | intel_dsi_clear_device_ready(encoder); |
714 | 716 | ||
717 | intel_disable_dsi_pll(encoder); | ||
718 | |||
715 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 719 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
716 | u32 val; | 720 | u32 val; |
717 | 721 | ||
@@ -755,12 +759,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | |||
755 | * configuration, otherwise accessing DSI registers will hang the | 759 | * configuration, otherwise accessing DSI registers will hang the |
756 | * machine. See BSpec North Display Engine registers/MIPI[BXT]. | 760 | * machine. See BSpec North Display Engine registers/MIPI[BXT]. |
757 | */ | 761 | */ |
758 | if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) | 762 | if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) |
759 | goto out_put_power; | 763 | goto out_put_power; |
760 | 764 | ||
761 | /* XXX: this only works for one DSI output */ | 765 | /* XXX: this only works for one DSI output */ |
762 | for_each_dsi_port(port, intel_dsi->ports) { | 766 | for_each_dsi_port(port, intel_dsi->ports) { |
763 | i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ? | 767 | i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ? |
764 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); | 768 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); |
765 | bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE; | 769 | bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE; |
766 | 770 | ||
@@ -785,7 +789,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | |||
785 | if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) | 789 | if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) |
786 | continue; | 790 | continue; |
787 | 791 | ||
788 | if (IS_BROXTON(dev_priv)) { | 792 | if (IS_GEN9_LP(dev_priv)) { |
789 | u32 tmp = I915_READ(MIPI_CTRL(port)); | 793 | u32 tmp = I915_READ(MIPI_CTRL(port)); |
790 | tmp &= BXT_PIPE_SELECT_MASK; | 794 | tmp &= BXT_PIPE_SELECT_MASK; |
791 | tmp >>= BXT_PIPE_SELECT_SHIFT; | 795 | tmp >>= BXT_PIPE_SELECT_SHIFT; |
@@ -973,7 +977,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, | |||
973 | u32 pclk; | 977 | u32 pclk; |
974 | DRM_DEBUG_KMS("\n"); | 978 | DRM_DEBUG_KMS("\n"); |
975 | 979 | ||
976 | if (IS_BROXTON(dev_priv)) | 980 | if (IS_GEN9_LP(dev_priv)) |
977 | bxt_dsi_get_pipe_config(encoder, pipe_config); | 981 | bxt_dsi_get_pipe_config(encoder, pipe_config); |
978 | 982 | ||
979 | pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, | 983 | pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, |
@@ -1065,7 +1069,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, | |||
1065 | hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); | 1069 | hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); |
1066 | 1070 | ||
1067 | for_each_dsi_port(port, intel_dsi->ports) { | 1071 | for_each_dsi_port(port, intel_dsi->ports) { |
1068 | if (IS_BROXTON(dev_priv)) { | 1072 | if (IS_GEN9_LP(dev_priv)) { |
1069 | /* | 1073 | /* |
1070 | * Program hdisplay and vdisplay on MIPI transcoder. | 1074 | * Program hdisplay and vdisplay on MIPI transcoder. |
1071 | * This is different from calculated hactive and | 1075 | * This is different from calculated hactive and |
@@ -1152,7 +1156,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, | |||
1152 | tmp &= ~READ_REQUEST_PRIORITY_MASK; | 1156 | tmp &= ~READ_REQUEST_PRIORITY_MASK; |
1153 | I915_WRITE(MIPI_CTRL(port), tmp | | 1157 | I915_WRITE(MIPI_CTRL(port), tmp | |
1154 | READ_REQUEST_PRIORITY_HIGH); | 1158 | READ_REQUEST_PRIORITY_HIGH); |
1155 | } else if (IS_BROXTON(dev_priv)) { | 1159 | } else if (IS_GEN9_LP(dev_priv)) { |
1156 | enum pipe pipe = intel_crtc->pipe; | 1160 | enum pipe pipe = intel_crtc->pipe; |
1157 | 1161 | ||
1158 | tmp = I915_READ(MIPI_CTRL(port)); | 1162 | tmp = I915_READ(MIPI_CTRL(port)); |
@@ -1190,7 +1194,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, | |||
1190 | if (intel_dsi->clock_stop) | 1194 | if (intel_dsi->clock_stop) |
1191 | tmp |= CLOCKSTOP; | 1195 | tmp |= CLOCKSTOP; |
1192 | 1196 | ||
1193 | if (IS_BROXTON(dev_priv)) { | 1197 | if (IS_GEN9_LP(dev_priv)) { |
1194 | tmp |= BXT_DPHY_DEFEATURE_EN; | 1198 | tmp |= BXT_DPHY_DEFEATURE_EN; |
1195 | if (!is_cmd_mode(intel_dsi)) | 1199 | if (!is_cmd_mode(intel_dsi)) |
1196 | tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; | 1200 | tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; |
@@ -1241,7 +1245,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, | |||
1241 | I915_WRITE(MIPI_INIT_COUNT(port), | 1245 | I915_WRITE(MIPI_INIT_COUNT(port), |
1242 | txclkesc(intel_dsi->escape_clk_div, 100)); | 1246 | txclkesc(intel_dsi->escape_clk_div, 100)); |
1243 | 1247 | ||
1244 | if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) { | 1248 | if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) { |
1245 | /* | 1249 | /* |
1246 | * BXT spec says write MIPI_INIT_COUNT for | 1250 | * BXT spec says write MIPI_INIT_COUNT for |
1247 | * both the ports, even if only one is | 1251 | * both the ports, even if only one is |
@@ -1424,15 +1428,15 @@ static void intel_dsi_add_properties(struct intel_connector *connector) | |||
1424 | } | 1428 | } |
1425 | } | 1429 | } |
1426 | 1430 | ||
1427 | void intel_dsi_init(struct drm_device *dev) | 1431 | void intel_dsi_init(struct drm_i915_private *dev_priv) |
1428 | { | 1432 | { |
1433 | struct drm_device *dev = &dev_priv->drm; | ||
1429 | struct intel_dsi *intel_dsi; | 1434 | struct intel_dsi *intel_dsi; |
1430 | struct intel_encoder *intel_encoder; | 1435 | struct intel_encoder *intel_encoder; |
1431 | struct drm_encoder *encoder; | 1436 | struct drm_encoder *encoder; |
1432 | struct intel_connector *intel_connector; | 1437 | struct intel_connector *intel_connector; |
1433 | struct drm_connector *connector; | 1438 | struct drm_connector *connector; |
1434 | struct drm_display_mode *scan, *fixed_mode = NULL; | 1439 | struct drm_display_mode *scan, *fixed_mode = NULL; |
1435 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1436 | enum port port; | 1440 | enum port port; |
1437 | unsigned int i; | 1441 | unsigned int i; |
1438 | 1442 | ||
@@ -1444,7 +1448,7 @@ void intel_dsi_init(struct drm_device *dev) | |||
1444 | 1448 | ||
1445 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 1449 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1446 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; | 1450 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; |
1447 | } else if (IS_BROXTON(dev_priv)) { | 1451 | } else if (IS_GEN9_LP(dev_priv)) { |
1448 | dev_priv->mipi_mmio_base = BXT_MIPI_BASE; | 1452 | dev_priv->mipi_mmio_base = BXT_MIPI_BASE; |
1449 | } else { | 1453 | } else { |
1450 | DRM_ERROR("Unsupported Mipi device to reg base"); | 1454 | DRM_ERROR("Unsupported Mipi device to reg base"); |
@@ -1485,7 +1489,7 @@ void intel_dsi_init(struct drm_device *dev) | |||
1485 | * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI | 1489 | * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI |
1486 | * port C. BXT isn't limited like this. | 1490 | * port C. BXT isn't limited like this. |
1487 | */ | 1491 | */ |
1488 | if (IS_BROXTON(dev_priv)) | 1492 | if (IS_GEN9_LP(dev_priv)) |
1489 | intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); | 1493 | intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); |
1490 | else if (port == PORT_A) | 1494 | else if (port == PORT_A) |
1491 | intel_encoder->crtc_mask = BIT(PIPE_A); | 1495 | intel_encoder->crtc_mask = BIT(PIPE_A); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 47cd1b20fb3e..8f683b8b1816 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <drm/drm_edid.h> | 29 | #include <drm/drm_edid.h> |
30 | #include <drm/i915_drm.h> | 30 | #include <drm/i915_drm.h> |
31 | #include <drm/drm_panel.h> | 31 | #include <drm/drm_panel.h> |
32 | #include <linux/gpio/consumer.h> | ||
32 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
33 | #include <video/mipi_display.h> | 34 | #include <video/mipi_display.h> |
34 | #include <asm/intel-mid.h> | 35 | #include <asm/intel-mid.h> |
@@ -305,19 +306,44 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv, | |||
305 | mutex_unlock(&dev_priv->sb_lock); | 306 | mutex_unlock(&dev_priv->sb_lock); |
306 | } | 307 | } |
307 | 308 | ||
309 | static void bxt_exec_gpio(struct drm_i915_private *dev_priv, | ||
310 | u8 gpio_source, u8 gpio_index, bool value) | ||
311 | { | ||
312 | /* XXX: this table is a quick ugly hack. */ | ||
313 | static struct gpio_desc *bxt_gpio_table[U8_MAX + 1]; | ||
314 | struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index]; | ||
315 | |||
316 | if (!gpio_desc) { | ||
317 | gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, | ||
318 | "panel", gpio_index, | ||
319 | value ? GPIOD_OUT_LOW : | ||
320 | GPIOD_OUT_HIGH); | ||
321 | |||
322 | if (IS_ERR_OR_NULL(gpio_desc)) { | ||
323 | DRM_ERROR("GPIO index %u request failed (%ld)\n", | ||
324 | gpio_index, PTR_ERR(gpio_desc)); | ||
325 | return; | ||
326 | } | ||
327 | |||
328 | bxt_gpio_table[gpio_index] = gpio_desc; | ||
329 | } | ||
330 | |||
331 | gpiod_set_value(gpio_desc, value); | ||
332 | } | ||
333 | |||
308 | static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) | 334 | static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) |
309 | { | 335 | { |
310 | struct drm_device *dev = intel_dsi->base.base.dev; | 336 | struct drm_device *dev = intel_dsi->base.base.dev; |
311 | struct drm_i915_private *dev_priv = to_i915(dev); | 337 | struct drm_i915_private *dev_priv = to_i915(dev); |
312 | u8 gpio_source, gpio_index; | 338 | u8 gpio_source, gpio_index = 0, gpio_number; |
313 | bool value; | 339 | bool value; |
314 | 340 | ||
315 | DRM_DEBUG_KMS("\n"); | 341 | DRM_DEBUG_KMS("\n"); |
316 | 342 | ||
317 | if (dev_priv->vbt.dsi.seq_version >= 3) | 343 | if (dev_priv->vbt.dsi.seq_version >= 3) |
318 | data++; | 344 | gpio_index = *data++; |
319 | 345 | ||
320 | gpio_index = *data++; | 346 | gpio_number = *data++; |
321 | 347 | ||
322 | /* gpio source in sequence v2 only */ | 348 | /* gpio source in sequence v2 only */ |
323 | if (dev_priv->vbt.dsi.seq_version == 2) | 349 | if (dev_priv->vbt.dsi.seq_version == 2) |
@@ -329,11 +355,11 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) | |||
329 | value = *data++ & 1; | 355 | value = *data++ & 1; |
330 | 356 | ||
331 | if (IS_VALLEYVIEW(dev_priv)) | 357 | if (IS_VALLEYVIEW(dev_priv)) |
332 | vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); | 358 | vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value); |
333 | else if (IS_CHERRYVIEW(dev_priv)) | 359 | else if (IS_CHERRYVIEW(dev_priv)) |
334 | chv_exec_gpio(dev_priv, gpio_source, gpio_index, value); | 360 | chv_exec_gpio(dev_priv, gpio_source, gpio_number, value); |
335 | else | 361 | else |
336 | DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); | 362 | bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value); |
337 | 363 | ||
338 | return data; | 364 | return data; |
339 | } | 365 | } |
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 56eff6004bc0..61440e5c2563 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c | |||
@@ -156,8 +156,10 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder, | |||
156 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, | 156 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, |
157 | config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN); | 157 | config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN); |
158 | 158 | ||
159 | /* wait at least 0.5 us after ungating before enabling VCO */ | 159 | /* wait at least 0.5 us after ungating before enabling VCO, |
160 | usleep_range(1, 10); | 160 | * allow hrtimer subsystem optimization by relaxing timing |
161 | */ | ||
162 | usleep_range(10, 50); | ||
161 | 163 | ||
162 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl); | 164 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl); |
163 | 165 | ||
@@ -351,7 +353,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | |||
351 | u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | 353 | u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, |
352 | struct intel_crtc_state *config) | 354 | struct intel_crtc_state *config) |
353 | { | 355 | { |
354 | if (IS_BROXTON(to_i915(encoder->base.dev))) | 356 | if (IS_GEN9_LP(to_i915(encoder->base.dev))) |
355 | return bxt_dsi_get_pclk(encoder, pipe_bpp, config); | 357 | return bxt_dsi_get_pclk(encoder, pipe_bpp, config); |
356 | else | 358 | else |
357 | return vlv_dsi_get_pclk(encoder, pipe_bpp, config); | 359 | return vlv_dsi_get_pclk(encoder, pipe_bpp, config); |
@@ -504,7 +506,7 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder, | |||
504 | 506 | ||
505 | bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) | 507 | bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) |
506 | { | 508 | { |
507 | if (IS_BROXTON(dev_priv)) | 509 | if (IS_GEN9_LP(dev_priv)) |
508 | return bxt_dsi_pll_is_enabled(dev_priv); | 510 | return bxt_dsi_pll_is_enabled(dev_priv); |
509 | 511 | ||
510 | MISSING_CASE(INTEL_DEVID(dev_priv)); | 512 | MISSING_CASE(INTEL_DEVID(dev_priv)); |
@@ -519,7 +521,7 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder, | |||
519 | 521 | ||
520 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 522 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
521 | return vlv_compute_dsi_pll(encoder, config); | 523 | return vlv_compute_dsi_pll(encoder, config); |
522 | else if (IS_BROXTON(dev_priv)) | 524 | else if (IS_GEN9_LP(dev_priv)) |
523 | return bxt_compute_dsi_pll(encoder, config); | 525 | return bxt_compute_dsi_pll(encoder, config); |
524 | 526 | ||
525 | return -ENODEV; | 527 | return -ENODEV; |
@@ -532,7 +534,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder, | |||
532 | 534 | ||
533 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 535 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
534 | vlv_enable_dsi_pll(encoder, config); | 536 | vlv_enable_dsi_pll(encoder, config); |
535 | else if (IS_BROXTON(dev_priv)) | 537 | else if (IS_GEN9_LP(dev_priv)) |
536 | bxt_enable_dsi_pll(encoder, config); | 538 | bxt_enable_dsi_pll(encoder, config); |
537 | } | 539 | } |
538 | 540 | ||
@@ -542,7 +544,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder) | |||
542 | 544 | ||
543 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 545 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
544 | vlv_disable_dsi_pll(encoder); | 546 | vlv_disable_dsi_pll(encoder); |
545 | else if (IS_BROXTON(dev_priv)) | 547 | else if (IS_GEN9_LP(dev_priv)) |
546 | bxt_disable_dsi_pll(encoder); | 548 | bxt_disable_dsi_pll(encoder); |
547 | } | 549 | } |
548 | 550 | ||
@@ -566,7 +568,7 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) | |||
566 | { | 568 | { |
567 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 569 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
568 | 570 | ||
569 | if (IS_BROXTON(dev_priv)) | 571 | if (IS_GEN9_LP(dev_priv)) |
570 | bxt_dsi_reset_clocks(encoder, port); | 572 | bxt_dsi_reset_clocks(encoder, port); |
571 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 573 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
572 | vlv_dsi_reset_clocks(encoder, port); | 574 | vlv_dsi_reset_clocks(encoder, port); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 708645443046..50da89dcb92b 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -422,9 +422,8 @@ static enum port intel_dvo_port(i915_reg_t dvo_reg) | |||
422 | return PORT_C; | 422 | return PORT_C; |
423 | } | 423 | } |
424 | 424 | ||
425 | void intel_dvo_init(struct drm_device *dev) | 425 | void intel_dvo_init(struct drm_i915_private *dev_priv) |
426 | { | 426 | { |
427 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
428 | struct intel_encoder *intel_encoder; | 427 | struct intel_encoder *intel_encoder; |
429 | struct intel_dvo *intel_dvo; | 428 | struct intel_dvo *intel_dvo; |
430 | struct intel_connector *intel_connector; | 429 | struct intel_connector *intel_connector; |
@@ -511,7 +510,7 @@ void intel_dvo_init(struct drm_device *dev) | |||
511 | continue; | 510 | continue; |
512 | 511 | ||
513 | port = intel_dvo_port(dvo->dvo_reg); | 512 | port = intel_dvo_port(dvo->dvo_reg); |
514 | drm_encoder_init(dev, &intel_encoder->base, | 513 | drm_encoder_init(&dev_priv->drm, &intel_encoder->base, |
515 | &intel_dvo_enc_funcs, encoder_type, | 514 | &intel_dvo_enc_funcs, encoder_type, |
516 | "DVO %c", port_name(port)); | 515 | "DVO %c", port_name(port)); |
517 | 516 | ||
@@ -523,14 +522,14 @@ void intel_dvo_init(struct drm_device *dev) | |||
523 | case INTEL_DVO_CHIP_TMDS: | 522 | case INTEL_DVO_CHIP_TMDS: |
524 | intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) | | 523 | intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) | |
525 | (1 << INTEL_OUTPUT_DVO); | 524 | (1 << INTEL_OUTPUT_DVO); |
526 | drm_connector_init(dev, connector, | 525 | drm_connector_init(&dev_priv->drm, connector, |
527 | &intel_dvo_connector_funcs, | 526 | &intel_dvo_connector_funcs, |
528 | DRM_MODE_CONNECTOR_DVII); | 527 | DRM_MODE_CONNECTOR_DVII); |
529 | encoder_type = DRM_MODE_ENCODER_TMDS; | 528 | encoder_type = DRM_MODE_ENCODER_TMDS; |
530 | break; | 529 | break; |
531 | case INTEL_DVO_CHIP_LVDS: | 530 | case INTEL_DVO_CHIP_LVDS: |
532 | intel_encoder->cloneable = 0; | 531 | intel_encoder->cloneable = 0; |
533 | drm_connector_init(dev, connector, | 532 | drm_connector_init(&dev_priv->drm, connector, |
534 | &intel_dvo_connector_funcs, | 533 | &intel_dvo_connector_funcs, |
535 | DRM_MODE_CONNECTOR_LVDS); | 534 | DRM_MODE_CONNECTOR_LVDS); |
536 | encoder_type = DRM_MODE_ENCODER_LVDS; | 535 | encoder_type = DRM_MODE_ENCODER_LVDS; |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 3da4d466e332..97bbbc3d6aa8 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -111,13 +111,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv, | |||
111 | 111 | ||
112 | /** | 112 | /** |
113 | * intel_engines_init() - allocate, populate and init the Engine Command Streamers | 113 | * intel_engines_init() - allocate, populate and init the Engine Command Streamers |
114 | * @dev: DRM device. | 114 | * @dev_priv: i915 device private |
115 | * | 115 | * |
116 | * Return: non-zero if the initialization failed. | 116 | * Return: non-zero if the initialization failed. |
117 | */ | 117 | */ |
118 | int intel_engines_init(struct drm_device *dev) | 118 | int intel_engines_init(struct drm_i915_private *dev_priv) |
119 | { | 119 | { |
120 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
121 | struct intel_device_info *device_info = mkwrite_device_info(dev_priv); | 120 | struct intel_device_info *device_info = mkwrite_device_info(dev_priv); |
122 | unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; | 121 | unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; |
123 | unsigned int mask = 0; | 122 | unsigned int mask = 0; |
@@ -257,7 +256,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) | |||
257 | 256 | ||
258 | WARN_ON(engine->scratch); | 257 | WARN_ON(engine->scratch); |
259 | 258 | ||
260 | obj = i915_gem_object_create_stolen(&engine->i915->drm, size); | 259 | obj = i915_gem_object_create_stolen(engine->i915, size); |
261 | if (!obj) | 260 | if (!obj) |
262 | obj = i915_gem_object_create_internal(engine->i915, size); | 261 | obj = i915_gem_object_create_internal(engine->i915, size); |
263 | if (IS_ERR(obj)) { | 262 | if (IS_ERR(obj)) { |
@@ -305,15 +304,30 @@ int intel_engine_init_common(struct intel_engine_cs *engine) | |||
305 | { | 304 | { |
306 | int ret; | 305 | int ret; |
307 | 306 | ||
308 | ret = intel_engine_init_breadcrumbs(engine); | 307 | /* We may need to do things with the shrinker which |
308 | * require us to immediately switch back to the default | ||
309 | * context. This can cause a problem as pinning the | ||
310 | * default context also requires GTT space which may not | ||
311 | * be available. To avoid this we always pin the default | ||
312 | * context. | ||
313 | */ | ||
314 | ret = engine->context_pin(engine, engine->i915->kernel_context); | ||
309 | if (ret) | 315 | if (ret) |
310 | return ret; | 316 | return ret; |
311 | 317 | ||
318 | ret = intel_engine_init_breadcrumbs(engine); | ||
319 | if (ret) | ||
320 | goto err_unpin; | ||
321 | |||
312 | ret = i915_gem_render_state_init(engine); | 322 | ret = i915_gem_render_state_init(engine); |
313 | if (ret) | 323 | if (ret) |
314 | return ret; | 324 | goto err_unpin; |
315 | 325 | ||
316 | return 0; | 326 | return 0; |
327 | |||
328 | err_unpin: | ||
329 | engine->context_unpin(engine, engine->i915->kernel_context); | ||
330 | return ret; | ||
317 | } | 331 | } |
318 | 332 | ||
319 | /** | 333 | /** |
@@ -331,6 +345,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) | |||
331 | intel_engine_fini_breadcrumbs(engine); | 345 | intel_engine_fini_breadcrumbs(engine); |
332 | intel_engine_cleanup_cmd_parser(engine); | 346 | intel_engine_cleanup_cmd_parser(engine); |
333 | i915_gem_batch_pool_fini(&engine->batch_pool); | 347 | i915_gem_batch_pool_fini(&engine->batch_pool); |
348 | |||
349 | engine->context_unpin(engine, engine->i915->kernel_context); | ||
334 | } | 350 | } |
335 | 351 | ||
336 | u64 intel_engine_get_active_head(struct intel_engine_cs *engine) | 352 | u64 intel_engine_get_active_head(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 659cebc3bfd2..26a81a9e9c1d 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -538,7 +538,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, | |||
538 | IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 538 | IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
539 | end = ggtt->stolen_size - 8 * 1024 * 1024; | 539 | end = ggtt->stolen_size - 8 * 1024 * 1024; |
540 | else | 540 | else |
541 | end = ggtt->stolen_usable_size; | 541 | end = U64_MAX; |
542 | 542 | ||
543 | /* HACK: This code depends on what we will do in *_enable_fbc. If that | 543 | /* HACK: This code depends on what we will do in *_enable_fbc. If that |
544 | * code changes, this code needs to change as well. | 544 | * code changes, this code needs to change as well. |
@@ -1317,7 +1317,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) | |||
1317 | if (!HAS_FBC(dev_priv)) | 1317 | if (!HAS_FBC(dev_priv)) |
1318 | return 0; | 1318 | return 0; |
1319 | 1319 | ||
1320 | if (IS_BROADWELL(dev_priv)) | 1320 | if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) |
1321 | return 1; | 1321 | return 1; |
1322 | 1322 | ||
1323 | return 0; | 1323 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 570c07d59d1a..73d02d21c768 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -145,9 +145,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper, | |||
145 | * important and we should probably use that space with FBC or other | 145 | * important and we should probably use that space with FBC or other |
146 | * features. */ | 146 | * features. */ |
147 | if (size * 2 < ggtt->stolen_usable_size) | 147 | if (size * 2 < ggtt->stolen_usable_size) |
148 | obj = i915_gem_object_create_stolen(dev, size); | 148 | obj = i915_gem_object_create_stolen(dev_priv, size); |
149 | if (obj == NULL) | 149 | if (obj == NULL) |
150 | obj = i915_gem_object_create(dev, size); | 150 | obj = i915_gem_object_create(dev_priv, size); |
151 | if (IS_ERR(obj)) { | 151 | if (IS_ERR(obj)) { |
152 | DRM_ERROR("failed to allocate framebuffer\n"); | 152 | DRM_ERROR("failed to allocate framebuffer\n"); |
153 | ret = PTR_ERR(obj); | 153 | ret = PTR_ERR(obj); |
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 324ea902558b..3202b32b5638 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h | |||
@@ -23,15 +23,6 @@ | |||
23 | #ifndef _INTEL_GUC_FWIF_H | 23 | #ifndef _INTEL_GUC_FWIF_H |
24 | #define _INTEL_GUC_FWIF_H | 24 | #define _INTEL_GUC_FWIF_H |
25 | 25 | ||
26 | /* | ||
27 | * This file is partially autogenerated, although currently with some manual | ||
28 | * fixups afterwards. In future, it should be entirely autogenerated, in order | ||
29 | * to ensure that the definitions herein remain in sync with those used by the | ||
30 | * GuC's own firmware. | ||
31 | * | ||
32 | * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST. | ||
33 | */ | ||
34 | |||
35 | #define GFXCORE_FAMILY_GEN9 12 | 26 | #define GFXCORE_FAMILY_GEN9 12 |
36 | #define GFXCORE_FAMILY_UNKNOWN 0x7fffffff | 27 | #define GFXCORE_FAMILY_UNKNOWN 0x7fffffff |
37 | 28 | ||
@@ -489,18 +480,18 @@ union guc_log_control { | |||
489 | } __packed; | 480 | } __packed; |
490 | 481 | ||
491 | /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ | 482 | /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ |
492 | enum host2guc_action { | 483 | enum intel_guc_action { |
493 | HOST2GUC_ACTION_DEFAULT = 0x0, | 484 | INTEL_GUC_ACTION_DEFAULT = 0x0, |
494 | HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6, | 485 | INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6, |
495 | HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10, | 486 | INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, |
496 | HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, | 487 | INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, |
497 | HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, | 488 | INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, |
498 | HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, | 489 | INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, |
499 | HOST2GUC_ACTION_ENTER_S_STATE = 0x501, | 490 | INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, |
500 | HOST2GUC_ACTION_EXIT_S_STATE = 0x502, | 491 | INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, |
501 | HOST2GUC_ACTION_SLPC_REQUEST = 0x3003, | 492 | INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, |
502 | HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, | 493 | INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, |
503 | HOST2GUC_ACTION_LIMIT | 494 | INTEL_GUC_ACTION_LIMIT |
504 | }; | 495 | }; |
505 | 496 | ||
506 | /* | 497 | /* |
@@ -509,22 +500,22 @@ enum host2guc_action { | |||
509 | * by the fact that all the MASK bits are set. The remaining bits | 500 | * by the fact that all the MASK bits are set. The remaining bits |
510 | * give more detail. | 501 | * give more detail. |
511 | */ | 502 | */ |
512 | #define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000) | 503 | #define INTEL_GUC_RECV_MASK ((u32)0xF0000000) |
513 | #define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK) | 504 | #define INTEL_GUC_RECV_IS_RESPONSE(x) ((u32)(x) >= INTEL_GUC_RECV_MASK) |
514 | #define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x)) | 505 | #define INTEL_GUC_RECV_STATUS(x) (INTEL_GUC_RECV_MASK | (x)) |
515 | 506 | ||
516 | /* GUC will return status back to SOFT_SCRATCH_O_REG */ | 507 | /* GUC will return status back to SOFT_SCRATCH_O_REG */ |
517 | enum guc2host_status { | 508 | enum intel_guc_status { |
518 | GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0), | 509 | INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0), |
519 | GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10), | 510 | INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10), |
520 | GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20), | 511 | INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20), |
521 | GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000) | 512 | INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000) |
522 | }; | 513 | }; |
523 | 514 | ||
524 | /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ | 515 | /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ |
525 | enum guc2host_message { | 516 | enum intel_guc_recv_message { |
526 | GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1), | 517 | INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), |
527 | GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3) | 518 | INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3) |
528 | }; | 519 | }; |
529 | 520 | ||
530 | #endif | 521 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 34d6ad2cf7c1..35d5690f47a2 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c | |||
@@ -28,7 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | #include <linux/firmware.h> | 29 | #include <linux/firmware.h> |
30 | #include "i915_drv.h" | 30 | #include "i915_drv.h" |
31 | #include "intel_guc.h" | 31 | #include "intel_uc.h" |
32 | 32 | ||
33 | /** | 33 | /** |
34 | * DOC: GuC-specific firmware loader | 34 | * DOC: GuC-specific firmware loader |
@@ -220,14 +220,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv) | |||
220 | params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; | 220 | params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; |
221 | 221 | ||
222 | if (guc->ads_vma) { | 222 | if (guc->ads_vma) { |
223 | u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; | 223 | u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; |
224 | params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; | 224 | params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; |
225 | params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; | 225 | params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; |
226 | } | 226 | } |
227 | 227 | ||
228 | /* If GuC submission is enabled, set up additional parameters here */ | 228 | /* If GuC submission is enabled, set up additional parameters here */ |
229 | if (i915.enable_guc_submission) { | 229 | if (i915.enable_guc_submission) { |
230 | u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma); | 230 | u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma); |
231 | u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; | 231 | u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; |
232 | 232 | ||
233 | pgs >>= PAGE_SHIFT; | 233 | pgs >>= PAGE_SHIFT; |
@@ -297,7 +297,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv, | |||
297 | I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); | 297 | I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); |
298 | 298 | ||
299 | /* Set the source address for the new blob */ | 299 | /* Set the source address for the new blob */ |
300 | offset = i915_ggtt_offset(vma) + guc_fw->header_offset; | 300 | offset = guc_ggtt_offset(vma) + guc_fw->header_offset; |
301 | I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); | 301 | I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); |
302 | I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); | 302 | I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); |
303 | 303 | ||
@@ -437,7 +437,7 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv) | |||
437 | 437 | ||
438 | /** | 438 | /** |
439 | * intel_guc_setup() - finish preparing the GuC for activity | 439 | * intel_guc_setup() - finish preparing the GuC for activity |
440 | * @dev: drm device | 440 | * @dev_priv: i915 device private |
441 | * | 441 | * |
442 | * Called from gem_init_hw() during driver loading and also after a GPU reset. | 442 | * Called from gem_init_hw() during driver loading and also after a GPU reset. |
443 | * | 443 | * |
@@ -448,9 +448,8 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv) | |||
448 | * | 448 | * |
449 | * Return: non-zero code on error | 449 | * Return: non-zero code on error |
450 | */ | 450 | */ |
451 | int intel_guc_setup(struct drm_device *dev) | 451 | int intel_guc_setup(struct drm_i915_private *dev_priv) |
452 | { | 452 | { |
453 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
454 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | 453 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; |
455 | const char *fw_path = guc_fw->guc_fw_path; | 454 | const char *fw_path = guc_fw->guc_fw_path; |
456 | int retries, ret, err; | 455 | int retries, ret, err; |
@@ -588,11 +587,12 @@ fail: | |||
588 | return ret; | 587 | return ret; |
589 | } | 588 | } |
590 | 589 | ||
591 | static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) | 590 | static void guc_fw_fetch(struct drm_i915_private *dev_priv, |
591 | struct intel_guc_fw *guc_fw) | ||
592 | { | 592 | { |
593 | struct pci_dev *pdev = dev->pdev; | 593 | struct pci_dev *pdev = dev_priv->drm.pdev; |
594 | struct drm_i915_gem_object *obj; | 594 | struct drm_i915_gem_object *obj; |
595 | const struct firmware *fw; | 595 | const struct firmware *fw = NULL; |
596 | struct guc_css_header *css; | 596 | struct guc_css_header *css; |
597 | size_t size; | 597 | size_t size; |
598 | int err; | 598 | int err; |
@@ -648,7 +648,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) | |||
648 | 648 | ||
649 | /* Header and uCode will be loaded to WOPCM. Size of the two. */ | 649 | /* Header and uCode will be loaded to WOPCM. Size of the two. */ |
650 | size = guc_fw->header_size + guc_fw->ucode_size; | 650 | size = guc_fw->header_size + guc_fw->ucode_size; |
651 | if (size > guc_wopcm_size(to_i915(dev))) { | 651 | if (size > guc_wopcm_size(dev_priv)) { |
652 | DRM_NOTE("Firmware is too large to fit in WOPCM\n"); | 652 | DRM_NOTE("Firmware is too large to fit in WOPCM\n"); |
653 | goto fail; | 653 | goto fail; |
654 | } | 654 | } |
@@ -675,9 +675,9 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) | |||
675 | guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found, | 675 | guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found, |
676 | guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); | 676 | guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); |
677 | 677 | ||
678 | mutex_lock(&dev->struct_mutex); | 678 | mutex_lock(&dev_priv->drm.struct_mutex); |
679 | obj = i915_gem_object_create_from_data(dev, fw->data, fw->size); | 679 | obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size); |
680 | mutex_unlock(&dev->struct_mutex); | 680 | mutex_unlock(&dev_priv->drm.struct_mutex); |
681 | if (IS_ERR_OR_NULL(obj)) { | 681 | if (IS_ERR_OR_NULL(obj)) { |
682 | err = obj ? PTR_ERR(obj) : -ENOMEM; | 682 | err = obj ? PTR_ERR(obj) : -ENOMEM; |
683 | goto fail; | 683 | goto fail; |
@@ -699,12 +699,12 @@ fail: | |||
699 | DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n", | 699 | DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n", |
700 | err, fw, guc_fw->guc_fw_obj); | 700 | err, fw, guc_fw->guc_fw_obj); |
701 | 701 | ||
702 | mutex_lock(&dev->struct_mutex); | 702 | mutex_lock(&dev_priv->drm.struct_mutex); |
703 | obj = guc_fw->guc_fw_obj; | 703 | obj = guc_fw->guc_fw_obj; |
704 | if (obj) | 704 | if (obj) |
705 | i915_gem_object_put(obj); | 705 | i915_gem_object_put(obj); |
706 | guc_fw->guc_fw_obj = NULL; | 706 | guc_fw->guc_fw_obj = NULL; |
707 | mutex_unlock(&dev->struct_mutex); | 707 | mutex_unlock(&dev_priv->drm.struct_mutex); |
708 | 708 | ||
709 | release_firmware(fw); /* OK even if fw is NULL */ | 709 | release_firmware(fw); /* OK even if fw is NULL */ |
710 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; | 710 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; |
@@ -712,16 +712,15 @@ fail: | |||
712 | 712 | ||
713 | /** | 713 | /** |
714 | * intel_guc_init() - define parameters and fetch firmware | 714 | * intel_guc_init() - define parameters and fetch firmware |
715 | * @dev: drm device | 715 | * @dev_priv: i915 device private |
716 | * | 716 | * |
717 | * Called early during driver load, but after GEM is initialised. | 717 | * Called early during driver load, but after GEM is initialised. |
718 | * | 718 | * |
719 | * The firmware will be transferred to the GuC's memory later, | 719 | * The firmware will be transferred to the GuC's memory later, |
720 | * when intel_guc_setup() is called. | 720 | * when intel_guc_setup() is called. |
721 | */ | 721 | */ |
722 | void intel_guc_init(struct drm_device *dev) | 722 | void intel_guc_init(struct drm_i915_private *dev_priv) |
723 | { | 723 | { |
724 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
725 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | 724 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; |
726 | const char *fw_path; | 725 | const char *fw_path; |
727 | 726 | ||
@@ -754,7 +753,6 @@ void intel_guc_init(struct drm_device *dev) | |||
754 | fw_path = ""; /* unknown device */ | 753 | fw_path = ""; /* unknown device */ |
755 | } | 754 | } |
756 | 755 | ||
757 | guc_fw->guc_dev = dev; | ||
758 | guc_fw->guc_fw_path = fw_path; | 756 | guc_fw->guc_fw_path = fw_path; |
759 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; | 757 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; |
760 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; | 758 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; |
@@ -769,20 +767,19 @@ void intel_guc_init(struct drm_device *dev) | |||
769 | 767 | ||
770 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; | 768 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; |
771 | DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); | 769 | DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); |
772 | guc_fw_fetch(dev, guc_fw); | 770 | guc_fw_fetch(dev_priv, guc_fw); |
773 | /* status must now be FAIL or SUCCESS */ | 771 | /* status must now be FAIL or SUCCESS */ |
774 | } | 772 | } |
775 | 773 | ||
776 | /** | 774 | /** |
777 | * intel_guc_fini() - clean up all allocated resources | 775 | * intel_guc_fini() - clean up all allocated resources |
778 | * @dev: drm device | 776 | * @dev_priv: i915 device private |
779 | */ | 777 | */ |
780 | void intel_guc_fini(struct drm_device *dev) | 778 | void intel_guc_fini(struct drm_i915_private *dev_priv) |
781 | { | 779 | { |
782 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
783 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; | 780 | struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; |
784 | 781 | ||
785 | mutex_lock(&dev->struct_mutex); | 782 | mutex_lock(&dev_priv->drm.struct_mutex); |
786 | guc_interrupts_release(dev_priv); | 783 | guc_interrupts_release(dev_priv); |
787 | i915_guc_submission_disable(dev_priv); | 784 | i915_guc_submission_disable(dev_priv); |
788 | i915_guc_submission_fini(dev_priv); | 785 | i915_guc_submission_fini(dev_priv); |
@@ -790,7 +787,7 @@ void intel_guc_fini(struct drm_device *dev) | |||
790 | if (guc_fw->guc_fw_obj) | 787 | if (guc_fw->guc_fw_obj) |
791 | i915_gem_object_put(guc_fw->guc_fw_obj); | 788 | i915_gem_object_put(guc_fw->guc_fw_obj); |
792 | guc_fw->guc_fw_obj = NULL; | 789 | guc_fw->guc_fw_obj = NULL; |
793 | mutex_unlock(&dev->struct_mutex); | 790 | mutex_unlock(&dev_priv->drm.struct_mutex); |
794 | 791 | ||
795 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; | 792 | guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; |
796 | } | 793 | } |
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 53df5b11bff4..f05971f5586f 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c | |||
@@ -236,13 +236,13 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) | |||
236 | memset(&engine->hangcheck.instdone, 0, | 236 | memset(&engine->hangcheck.instdone, 0, |
237 | sizeof(engine->hangcheck.instdone)); | 237 | sizeof(engine->hangcheck.instdone)); |
238 | 238 | ||
239 | return HANGCHECK_ACTIVE; | 239 | return ENGINE_ACTIVE_HEAD; |
240 | } | 240 | } |
241 | 241 | ||
242 | if (!subunits_stuck(engine)) | 242 | if (!subunits_stuck(engine)) |
243 | return HANGCHECK_ACTIVE; | 243 | return ENGINE_ACTIVE_SUBUNITS; |
244 | 244 | ||
245 | return HANGCHECK_HUNG; | 245 | return ENGINE_DEAD; |
246 | } | 246 | } |
247 | 247 | ||
248 | static enum intel_engine_hangcheck_action | 248 | static enum intel_engine_hangcheck_action |
@@ -253,11 +253,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) | |||
253 | u32 tmp; | 253 | u32 tmp; |
254 | 254 | ||
255 | ha = head_stuck(engine, acthd); | 255 | ha = head_stuck(engine, acthd); |
256 | if (ha != HANGCHECK_HUNG) | 256 | if (ha != ENGINE_DEAD) |
257 | return ha; | 257 | return ha; |
258 | 258 | ||
259 | if (IS_GEN2(dev_priv)) | 259 | if (IS_GEN2(dev_priv)) |
260 | return HANGCHECK_HUNG; | 260 | return ENGINE_DEAD; |
261 | 261 | ||
262 | /* Is the chip hanging on a WAIT_FOR_EVENT? | 262 | /* Is the chip hanging on a WAIT_FOR_EVENT? |
263 | * If so we can simply poke the RB_WAIT bit | 263 | * If so we can simply poke the RB_WAIT bit |
@@ -270,25 +270,144 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) | |||
270 | "Kicking stuck wait on %s", | 270 | "Kicking stuck wait on %s", |
271 | engine->name); | 271 | engine->name); |
272 | I915_WRITE_CTL(engine, tmp); | 272 | I915_WRITE_CTL(engine, tmp); |
273 | return HANGCHECK_KICK; | 273 | return ENGINE_WAIT_KICK; |
274 | } | 274 | } |
275 | 275 | ||
276 | if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { | 276 | if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { |
277 | switch (semaphore_passed(engine)) { | 277 | switch (semaphore_passed(engine)) { |
278 | default: | 278 | default: |
279 | return HANGCHECK_HUNG; | 279 | return ENGINE_DEAD; |
280 | case 1: | 280 | case 1: |
281 | i915_handle_error(dev_priv, 0, | 281 | i915_handle_error(dev_priv, 0, |
282 | "Kicking stuck semaphore on %s", | 282 | "Kicking stuck semaphore on %s", |
283 | engine->name); | 283 | engine->name); |
284 | I915_WRITE_CTL(engine, tmp); | 284 | I915_WRITE_CTL(engine, tmp); |
285 | return HANGCHECK_KICK; | 285 | return ENGINE_WAIT_KICK; |
286 | case 0: | 286 | case 0: |
287 | return HANGCHECK_WAIT; | 287 | return ENGINE_WAIT; |
288 | } | 288 | } |
289 | } | 289 | } |
290 | 290 | ||
291 | return HANGCHECK_HUNG; | 291 | return ENGINE_DEAD; |
292 | } | ||
293 | |||
294 | static void hangcheck_load_sample(struct intel_engine_cs *engine, | ||
295 | struct intel_engine_hangcheck *hc) | ||
296 | { | ||
297 | /* We don't strictly need an irq-barrier here, as we are not | ||
298 | * serving an interrupt request, be paranoid in case the | ||
299 | * barrier has side-effects (such as preventing a broken | ||
300 | * cacheline snoop) and so be sure that we can see the seqno | ||
301 | * advance. If the seqno should stick, due to a stale | ||
302 | * cacheline, we would erroneously declare the GPU hung. | ||
303 | */ | ||
304 | if (engine->irq_seqno_barrier) | ||
305 | engine->irq_seqno_barrier(engine); | ||
306 | |||
307 | hc->acthd = intel_engine_get_active_head(engine); | ||
308 | hc->seqno = intel_engine_get_seqno(engine); | ||
309 | } | ||
310 | |||
311 | static void hangcheck_store_sample(struct intel_engine_cs *engine, | ||
312 | const struct intel_engine_hangcheck *hc) | ||
313 | { | ||
314 | engine->hangcheck.acthd = hc->acthd; | ||
315 | engine->hangcheck.seqno = hc->seqno; | ||
316 | engine->hangcheck.action = hc->action; | ||
317 | engine->hangcheck.stalled = hc->stalled; | ||
318 | } | ||
319 | |||
320 | static enum intel_engine_hangcheck_action | ||
321 | hangcheck_get_action(struct intel_engine_cs *engine, | ||
322 | const struct intel_engine_hangcheck *hc) | ||
323 | { | ||
324 | if (engine->hangcheck.seqno != hc->seqno) | ||
325 | return ENGINE_ACTIVE_SEQNO; | ||
326 | |||
327 | if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine))) | ||
328 | return ENGINE_IDLE; | ||
329 | |||
330 | return engine_stuck(engine, hc->acthd); | ||
331 | } | ||
332 | |||
333 | static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, | ||
334 | struct intel_engine_hangcheck *hc) | ||
335 | { | ||
336 | unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT; | ||
337 | |||
338 | hc->action = hangcheck_get_action(engine, hc); | ||
339 | |||
340 | /* We always increment the progress | ||
341 | * if the engine is busy and still processing | ||
342 | * the same request, so that no single request | ||
343 | * can run indefinitely (such as a chain of | ||
344 | * batches). The only time we do not increment | ||
345 | * the hangcheck score on this ring, if this | ||
346 | * engine is in a legitimate wait for another | ||
347 | * engine. In that case the waiting engine is a | ||
348 | * victim and we want to be sure we catch the | ||
349 | * right culprit. Then every time we do kick | ||
350 | * the ring, make it as a progress as the seqno | ||
351 | * advancement might ensure and if not, it | ||
352 | * will catch the hanging engine. | ||
353 | */ | ||
354 | |||
355 | switch (hc->action) { | ||
356 | case ENGINE_IDLE: | ||
357 | case ENGINE_ACTIVE_SEQNO: | ||
358 | /* Clear head and subunit states on seqno movement */ | ||
359 | hc->acthd = 0; | ||
360 | |||
361 | memset(&engine->hangcheck.instdone, 0, | ||
362 | sizeof(engine->hangcheck.instdone)); | ||
363 | |||
364 | /* Intentional fall through */ | ||
365 | case ENGINE_WAIT_KICK: | ||
366 | case ENGINE_WAIT: | ||
367 | engine->hangcheck.action_timestamp = jiffies; | ||
368 | break; | ||
369 | |||
370 | case ENGINE_ACTIVE_HEAD: | ||
371 | case ENGINE_ACTIVE_SUBUNITS: | ||
372 | /* Seqno stuck with still active engine gets leeway, | ||
373 | * in hopes that it is just a long shader. | ||
374 | */ | ||
375 | timeout = I915_SEQNO_DEAD_TIMEOUT; | ||
376 | break; | ||
377 | |||
378 | case ENGINE_DEAD: | ||
379 | break; | ||
380 | |||
381 | default: | ||
382 | MISSING_CASE(hc->action); | ||
383 | } | ||
384 | |||
385 | hc->stalled = time_after(jiffies, | ||
386 | engine->hangcheck.action_timestamp + timeout); | ||
387 | } | ||
388 | |||
389 | static void hangcheck_declare_hang(struct drm_i915_private *i915, | ||
390 | unsigned int hung, | ||
391 | unsigned int stuck) | ||
392 | { | ||
393 | struct intel_engine_cs *engine; | ||
394 | char msg[80]; | ||
395 | unsigned int tmp; | ||
396 | int len; | ||
397 | |||
398 | /* If some rings hung but others were still busy, only | ||
399 | * blame the hanging rings in the synopsis. | ||
400 | */ | ||
401 | if (stuck != hung) | ||
402 | hung &= ~stuck; | ||
403 | len = scnprintf(msg, sizeof(msg), | ||
404 | "%s on ", stuck == hung ? "No progress" : "Hang"); | ||
405 | for_each_engine_masked(engine, i915, hung, tmp) | ||
406 | len += scnprintf(msg + len, sizeof(msg) - len, | ||
407 | "%s, ", engine->name); | ||
408 | msg[len-2] = '\0'; | ||
409 | |||
410 | return i915_handle_error(i915, hung, msg); | ||
292 | } | 411 | } |
293 | 412 | ||
294 | /* | 413 | /* |
@@ -308,10 +427,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) | |||
308 | enum intel_engine_id id; | 427 | enum intel_engine_id id; |
309 | unsigned int hung = 0, stuck = 0; | 428 | unsigned int hung = 0, stuck = 0; |
310 | int busy_count = 0; | 429 | int busy_count = 0; |
311 | #define BUSY 1 | ||
312 | #define KICK 5 | ||
313 | #define HUNG 20 | ||
314 | #define ACTIVE_DECAY 15 | ||
315 | 430 | ||
316 | if (!i915.enable_hangcheck) | 431 | if (!i915.enable_hangcheck) |
317 | return; | 432 | return; |
@@ -319,6 +434,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) | |||
319 | if (!READ_ONCE(dev_priv->gt.awake)) | 434 | if (!READ_ONCE(dev_priv->gt.awake)) |
320 | return; | 435 | return; |
321 | 436 | ||
437 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | ||
438 | return; | ||
439 | |||
322 | /* As enabling the GPU requires fairly extensive mmio access, | 440 | /* As enabling the GPU requires fairly extensive mmio access, |
323 | * periodically arm the mmio checker to see if we are triggering | 441 | * periodically arm the mmio checker to see if we are triggering |
324 | * any invalid access. | 442 | * any invalid access. |
@@ -326,112 +444,26 @@ static void i915_hangcheck_elapsed(struct work_struct *work) | |||
326 | intel_uncore_arm_unclaimed_mmio_detection(dev_priv); | 444 | intel_uncore_arm_unclaimed_mmio_detection(dev_priv); |
327 | 445 | ||
328 | for_each_engine(engine, dev_priv, id) { | 446 | for_each_engine(engine, dev_priv, id) { |
329 | bool busy = intel_engine_has_waiter(engine); | 447 | struct intel_engine_hangcheck cur_state, *hc = &cur_state; |
330 | u64 acthd; | 448 | const bool busy = intel_engine_has_waiter(engine); |
331 | u32 seqno; | ||
332 | u32 submit; | ||
333 | 449 | ||
334 | semaphore_clear_deadlocks(dev_priv); | 450 | semaphore_clear_deadlocks(dev_priv); |
335 | 451 | ||
336 | /* We don't strictly need an irq-barrier here, as we are not | 452 | hangcheck_load_sample(engine, hc); |
337 | * serving an interrupt request, be paranoid in case the | 453 | hangcheck_accumulate_sample(engine, hc); |
338 | * barrier has side-effects (such as preventing a broken | 454 | hangcheck_store_sample(engine, hc); |
339 | * cacheline snoop) and so be sure that we can see the seqno | 455 | |
340 | * advance. If the seqno should stick, due to a stale | 456 | if (engine->hangcheck.stalled) { |
341 | * cacheline, we would erroneously declare the GPU hung. | 457 | hung |= intel_engine_flag(engine); |
342 | */ | 458 | if (hc->action != ENGINE_DEAD) |
343 | if (engine->irq_seqno_barrier) | 459 | stuck |= intel_engine_flag(engine); |
344 | engine->irq_seqno_barrier(engine); | ||
345 | |||
346 | acthd = intel_engine_get_active_head(engine); | ||
347 | seqno = intel_engine_get_seqno(engine); | ||
348 | submit = intel_engine_last_submit(engine); | ||
349 | |||
350 | if (engine->hangcheck.seqno == seqno) { | ||
351 | if (i915_seqno_passed(seqno, submit)) { | ||
352 | engine->hangcheck.action = HANGCHECK_IDLE; | ||
353 | } else { | ||
354 | /* We always increment the hangcheck score | ||
355 | * if the engine is busy and still processing | ||
356 | * the same request, so that no single request | ||
357 | * can run indefinitely (such as a chain of | ||
358 | * batches). The only time we do not increment | ||
359 | * the hangcheck score on this ring, if this | ||
360 | * engine is in a legitimate wait for another | ||
361 | * engine. In that case the waiting engine is a | ||
362 | * victim and we want to be sure we catch the | ||
363 | * right culprit. Then every time we do kick | ||
364 | * the ring, add a small increment to the | ||
365 | * score so that we can catch a batch that is | ||
366 | * being repeatedly kicked and so responsible | ||
367 | * for stalling the machine. | ||
368 | */ | ||
369 | engine->hangcheck.action = | ||
370 | engine_stuck(engine, acthd); | ||
371 | |||
372 | switch (engine->hangcheck.action) { | ||
373 | case HANGCHECK_IDLE: | ||
374 | case HANGCHECK_WAIT: | ||
375 | break; | ||
376 | case HANGCHECK_ACTIVE: | ||
377 | engine->hangcheck.score += BUSY; | ||
378 | break; | ||
379 | case HANGCHECK_KICK: | ||
380 | engine->hangcheck.score += KICK; | ||
381 | break; | ||
382 | case HANGCHECK_HUNG: | ||
383 | engine->hangcheck.score += HUNG; | ||
384 | break; | ||
385 | } | ||
386 | } | ||
387 | |||
388 | if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { | ||
389 | hung |= intel_engine_flag(engine); | ||
390 | if (engine->hangcheck.action != HANGCHECK_HUNG) | ||
391 | stuck |= intel_engine_flag(engine); | ||
392 | } | ||
393 | } else { | ||
394 | engine->hangcheck.action = HANGCHECK_ACTIVE; | ||
395 | |||
396 | /* Gradually reduce the count so that we catch DoS | ||
397 | * attempts across multiple batches. | ||
398 | */ | ||
399 | if (engine->hangcheck.score > 0) | ||
400 | engine->hangcheck.score -= ACTIVE_DECAY; | ||
401 | if (engine->hangcheck.score < 0) | ||
402 | engine->hangcheck.score = 0; | ||
403 | |||
404 | /* Clear head and subunit states on seqno movement */ | ||
405 | acthd = 0; | ||
406 | |||
407 | memset(&engine->hangcheck.instdone, 0, | ||
408 | sizeof(engine->hangcheck.instdone)); | ||
409 | } | 460 | } |
410 | 461 | ||
411 | engine->hangcheck.seqno = seqno; | ||
412 | engine->hangcheck.acthd = acthd; | ||
413 | busy_count += busy; | 462 | busy_count += busy; |
414 | } | 463 | } |
415 | 464 | ||
416 | if (hung) { | 465 | if (hung) |
417 | char msg[80]; | 466 | hangcheck_declare_hang(dev_priv, hung, stuck); |
418 | unsigned int tmp; | ||
419 | int len; | ||
420 | |||
421 | /* If some rings hung but others were still busy, only | ||
422 | * blame the hanging rings in the synopsis. | ||
423 | */ | ||
424 | if (stuck != hung) | ||
425 | hung &= ~stuck; | ||
426 | len = scnprintf(msg, sizeof(msg), | ||
427 | "%s on ", stuck == hung ? "No progress" : "Hang"); | ||
428 | for_each_engine_masked(engine, dev_priv, hung, tmp) | ||
429 | len += scnprintf(msg + len, sizeof(msg) - len, | ||
430 | "%s, ", engine->name); | ||
431 | msg[len-2] = '\0'; | ||
432 | |||
433 | return i915_handle_error(dev_priv, hung, msg); | ||
434 | } | ||
435 | 467 | ||
436 | /* Reset timer in case GPU hangs without another request being added */ | 468 | /* Reset timer in case GPU hangs without another request being added */ |
437 | if (busy_count) | 469 | if (busy_count) |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index fb88e32e25a3..0bcfead14571 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -133,6 +133,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, | |||
133 | } | 133 | } |
134 | 134 | ||
135 | static void g4x_write_infoframe(struct drm_encoder *encoder, | 135 | static void g4x_write_infoframe(struct drm_encoder *encoder, |
136 | const struct intel_crtc_state *crtc_state, | ||
136 | enum hdmi_infoframe_type type, | 137 | enum hdmi_infoframe_type type, |
137 | const void *frame, ssize_t len) | 138 | const void *frame, ssize_t len) |
138 | { | 139 | { |
@@ -187,13 +188,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder, | |||
187 | } | 188 | } |
188 | 189 | ||
189 | static void ibx_write_infoframe(struct drm_encoder *encoder, | 190 | static void ibx_write_infoframe(struct drm_encoder *encoder, |
191 | const struct intel_crtc_state *crtc_state, | ||
190 | enum hdmi_infoframe_type type, | 192 | enum hdmi_infoframe_type type, |
191 | const void *frame, ssize_t len) | 193 | const void *frame, ssize_t len) |
192 | { | 194 | { |
193 | const uint32_t *data = frame; | 195 | const uint32_t *data = frame; |
194 | struct drm_device *dev = encoder->dev; | 196 | struct drm_device *dev = encoder->dev; |
195 | struct drm_i915_private *dev_priv = to_i915(dev); | 197 | struct drm_i915_private *dev_priv = to_i915(dev); |
196 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 198 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
197 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 199 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
198 | u32 val = I915_READ(reg); | 200 | u32 val = I915_READ(reg); |
199 | int i; | 201 | int i; |
@@ -246,13 +248,14 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder, | |||
246 | } | 248 | } |
247 | 249 | ||
248 | static void cpt_write_infoframe(struct drm_encoder *encoder, | 250 | static void cpt_write_infoframe(struct drm_encoder *encoder, |
251 | const struct intel_crtc_state *crtc_state, | ||
249 | enum hdmi_infoframe_type type, | 252 | enum hdmi_infoframe_type type, |
250 | const void *frame, ssize_t len) | 253 | const void *frame, ssize_t len) |
251 | { | 254 | { |
252 | const uint32_t *data = frame; | 255 | const uint32_t *data = frame; |
253 | struct drm_device *dev = encoder->dev; | 256 | struct drm_device *dev = encoder->dev; |
254 | struct drm_i915_private *dev_priv = to_i915(dev); | 257 | struct drm_i915_private *dev_priv = to_i915(dev); |
255 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 258 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
256 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 259 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
257 | u32 val = I915_READ(reg); | 260 | u32 val = I915_READ(reg); |
258 | int i; | 261 | int i; |
@@ -303,13 +306,14 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder, | |||
303 | } | 306 | } |
304 | 307 | ||
305 | static void vlv_write_infoframe(struct drm_encoder *encoder, | 308 | static void vlv_write_infoframe(struct drm_encoder *encoder, |
309 | const struct intel_crtc_state *crtc_state, | ||
306 | enum hdmi_infoframe_type type, | 310 | enum hdmi_infoframe_type type, |
307 | const void *frame, ssize_t len) | 311 | const void *frame, ssize_t len) |
308 | { | 312 | { |
309 | const uint32_t *data = frame; | 313 | const uint32_t *data = frame; |
310 | struct drm_device *dev = encoder->dev; | 314 | struct drm_device *dev = encoder->dev; |
311 | struct drm_i915_private *dev_priv = to_i915(dev); | 315 | struct drm_i915_private *dev_priv = to_i915(dev); |
312 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 316 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
313 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | 317 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); |
314 | u32 val = I915_READ(reg); | 318 | u32 val = I915_READ(reg); |
315 | int i; | 319 | int i; |
@@ -361,14 +365,14 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder, | |||
361 | } | 365 | } |
362 | 366 | ||
363 | static void hsw_write_infoframe(struct drm_encoder *encoder, | 367 | static void hsw_write_infoframe(struct drm_encoder *encoder, |
368 | const struct intel_crtc_state *crtc_state, | ||
364 | enum hdmi_infoframe_type type, | 369 | enum hdmi_infoframe_type type, |
365 | const void *frame, ssize_t len) | 370 | const void *frame, ssize_t len) |
366 | { | 371 | { |
367 | const uint32_t *data = frame; | 372 | const uint32_t *data = frame; |
368 | struct drm_device *dev = encoder->dev; | 373 | struct drm_device *dev = encoder->dev; |
369 | struct drm_i915_private *dev_priv = to_i915(dev); | 374 | struct drm_i915_private *dev_priv = to_i915(dev); |
370 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 375 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
371 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | ||
372 | i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); | 376 | i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); |
373 | i915_reg_t data_reg; | 377 | i915_reg_t data_reg; |
374 | int i; | 378 | int i; |
@@ -425,6 +429,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder, | |||
425 | * bytes by one. | 429 | * bytes by one. |
426 | */ | 430 | */ |
427 | static void intel_write_infoframe(struct drm_encoder *encoder, | 431 | static void intel_write_infoframe(struct drm_encoder *encoder, |
432 | const struct intel_crtc_state *crtc_state, | ||
428 | union hdmi_infoframe *frame) | 433 | union hdmi_infoframe *frame) |
429 | { | 434 | { |
430 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 435 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
@@ -443,26 +448,25 @@ static void intel_write_infoframe(struct drm_encoder *encoder, | |||
443 | buffer[3] = 0; | 448 | buffer[3] = 0; |
444 | len++; | 449 | len++; |
445 | 450 | ||
446 | intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len); | 451 | intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); |
447 | } | 452 | } |
448 | 453 | ||
449 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | 454 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, |
450 | const struct drm_display_mode *adjusted_mode) | 455 | const struct intel_crtc_state *crtc_state) |
451 | { | 456 | { |
452 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 457 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
453 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
454 | union hdmi_infoframe frame; | 458 | union hdmi_infoframe frame; |
455 | int ret; | 459 | int ret; |
456 | 460 | ||
457 | ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, | 461 | ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, |
458 | adjusted_mode); | 462 | &crtc_state->base.adjusted_mode); |
459 | if (ret < 0) { | 463 | if (ret < 0) { |
460 | DRM_ERROR("couldn't fill AVI infoframe\n"); | 464 | DRM_ERROR("couldn't fill AVI infoframe\n"); |
461 | return; | 465 | return; |
462 | } | 466 | } |
463 | 467 | ||
464 | if (intel_hdmi->rgb_quant_range_selectable) { | 468 | if (intel_hdmi->rgb_quant_range_selectable) { |
465 | if (intel_crtc->config->limited_color_range) | 469 | if (crtc_state->limited_color_range) |
466 | frame.avi.quantization_range = | 470 | frame.avi.quantization_range = |
467 | HDMI_QUANTIZATION_RANGE_LIMITED; | 471 | HDMI_QUANTIZATION_RANGE_LIMITED; |
468 | else | 472 | else |
@@ -470,10 +474,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | |||
470 | HDMI_QUANTIZATION_RANGE_FULL; | 474 | HDMI_QUANTIZATION_RANGE_FULL; |
471 | } | 475 | } |
472 | 476 | ||
473 | intel_write_infoframe(encoder, &frame); | 477 | intel_write_infoframe(encoder, crtc_state, &frame); |
474 | } | 478 | } |
475 | 479 | ||
476 | static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | 480 | static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, |
481 | const struct intel_crtc_state *crtc_state) | ||
477 | { | 482 | { |
478 | union hdmi_infoframe frame; | 483 | union hdmi_infoframe frame; |
479 | int ret; | 484 | int ret; |
@@ -486,27 +491,28 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | |||
486 | 491 | ||
487 | frame.spd.sdi = HDMI_SPD_SDI_PC; | 492 | frame.spd.sdi = HDMI_SPD_SDI_PC; |
488 | 493 | ||
489 | intel_write_infoframe(encoder, &frame); | 494 | intel_write_infoframe(encoder, crtc_state, &frame); |
490 | } | 495 | } |
491 | 496 | ||
492 | static void | 497 | static void |
493 | intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, | 498 | intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, |
494 | const struct drm_display_mode *adjusted_mode) | 499 | const struct intel_crtc_state *crtc_state) |
495 | { | 500 | { |
496 | union hdmi_infoframe frame; | 501 | union hdmi_infoframe frame; |
497 | int ret; | 502 | int ret; |
498 | 503 | ||
499 | ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, | 504 | ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, |
500 | adjusted_mode); | 505 | &crtc_state->base.adjusted_mode); |
501 | if (ret < 0) | 506 | if (ret < 0) |
502 | return; | 507 | return; |
503 | 508 | ||
504 | intel_write_infoframe(encoder, &frame); | 509 | intel_write_infoframe(encoder, crtc_state, &frame); |
505 | } | 510 | } |
506 | 511 | ||
507 | static void g4x_set_infoframes(struct drm_encoder *encoder, | 512 | static void g4x_set_infoframes(struct drm_encoder *encoder, |
508 | bool enable, | 513 | bool enable, |
509 | const struct drm_display_mode *adjusted_mode) | 514 | const struct intel_crtc_state *crtc_state, |
515 | const struct drm_connector_state *conn_state) | ||
510 | { | 516 | { |
511 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 517 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); |
512 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 518 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
@@ -560,28 +566,22 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, | |||
560 | I915_WRITE(reg, val); | 566 | I915_WRITE(reg, val); |
561 | POSTING_READ(reg); | 567 | POSTING_READ(reg); |
562 | 568 | ||
563 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 569 | intel_hdmi_set_avi_infoframe(encoder, crtc_state); |
564 | intel_hdmi_set_spd_infoframe(encoder); | 570 | intel_hdmi_set_spd_infoframe(encoder, crtc_state); |
565 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 571 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); |
566 | } | 572 | } |
567 | 573 | ||
568 | static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder) | 574 | static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state) |
569 | { | 575 | { |
570 | struct drm_device *dev = encoder->dev; | 576 | struct drm_connector *connector = conn_state->connector; |
571 | struct drm_connector *connector; | ||
572 | |||
573 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
574 | 577 | ||
575 | /* | 578 | /* |
576 | * HDMI cloning is only supported on g4x which doesn't | 579 | * HDMI cloning is only supported on g4x which doesn't |
577 | * support deep color or GCP infoframes anyway so no | 580 | * support deep color or GCP infoframes anyway so no |
578 | * need to worry about multiple HDMI sinks here. | 581 | * need to worry about multiple HDMI sinks here. |
579 | */ | 582 | */ |
580 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
581 | if (connector->encoder == encoder) | ||
582 | return connector->display_info.bpc > 8; | ||
583 | 583 | ||
584 | return false; | 584 | return connector->display_info.bpc > 8; |
585 | } | 585 | } |
586 | 586 | ||
587 | /* | 587 | /* |
@@ -627,15 +627,17 @@ static bool gcp_default_phase_possible(int pipe_bpp, | |||
627 | mode->crtc_htotal/2 % pixels_per_group == 0); | 627 | mode->crtc_htotal/2 % pixels_per_group == 0); |
628 | } | 628 | } |
629 | 629 | ||
630 | static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) | 630 | static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, |
631 | const struct intel_crtc_state *crtc_state, | ||
632 | const struct drm_connector_state *conn_state) | ||
631 | { | 633 | { |
632 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 634 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); |
633 | struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); | 635 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
634 | i915_reg_t reg; | 636 | i915_reg_t reg; |
635 | u32 val = 0; | 637 | u32 val = 0; |
636 | 638 | ||
637 | if (HAS_DDI(dev_priv)) | 639 | if (HAS_DDI(dev_priv)) |
638 | reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); | 640 | reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); |
639 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 641 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
640 | reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); | 642 | reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); |
641 | else if (HAS_PCH_SPLIT(dev_priv)) | 643 | else if (HAS_PCH_SPLIT(dev_priv)) |
@@ -644,12 +646,12 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) | |||
644 | return false; | 646 | return false; |
645 | 647 | ||
646 | /* Indicate color depth whenever the sink supports deep color */ | 648 | /* Indicate color depth whenever the sink supports deep color */ |
647 | if (hdmi_sink_is_deep_color(encoder)) | 649 | if (hdmi_sink_is_deep_color(conn_state)) |
648 | val |= GCP_COLOR_INDICATION; | 650 | val |= GCP_COLOR_INDICATION; |
649 | 651 | ||
650 | /* Enable default_phase whenever the display mode is suitably aligned */ | 652 | /* Enable default_phase whenever the display mode is suitably aligned */ |
651 | if (gcp_default_phase_possible(crtc->config->pipe_bpp, | 653 | if (gcp_default_phase_possible(crtc_state->pipe_bpp, |
652 | &crtc->config->base.adjusted_mode)) | 654 | &crtc_state->base.adjusted_mode)) |
653 | val |= GCP_DEFAULT_PHASE_ENABLE; | 655 | val |= GCP_DEFAULT_PHASE_ENABLE; |
654 | 656 | ||
655 | I915_WRITE(reg, val); | 657 | I915_WRITE(reg, val); |
@@ -659,10 +661,11 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) | |||
659 | 661 | ||
660 | static void ibx_set_infoframes(struct drm_encoder *encoder, | 662 | static void ibx_set_infoframes(struct drm_encoder *encoder, |
661 | bool enable, | 663 | bool enable, |
662 | const struct drm_display_mode *adjusted_mode) | 664 | const struct intel_crtc_state *crtc_state, |
665 | const struct drm_connector_state *conn_state) | ||
663 | { | 666 | { |
664 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 667 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); |
665 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 668 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
666 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 669 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
667 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | 670 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
668 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 671 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
@@ -698,23 +701,24 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, | |||
698 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 701 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
699 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | 702 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); |
700 | 703 | ||
701 | if (intel_hdmi_set_gcp_infoframe(encoder)) | 704 | if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) |
702 | val |= VIDEO_DIP_ENABLE_GCP; | 705 | val |= VIDEO_DIP_ENABLE_GCP; |
703 | 706 | ||
704 | I915_WRITE(reg, val); | 707 | I915_WRITE(reg, val); |
705 | POSTING_READ(reg); | 708 | POSTING_READ(reg); |
706 | 709 | ||
707 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 710 | intel_hdmi_set_avi_infoframe(encoder, crtc_state); |
708 | intel_hdmi_set_spd_infoframe(encoder); | 711 | intel_hdmi_set_spd_infoframe(encoder, crtc_state); |
709 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 712 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); |
710 | } | 713 | } |
711 | 714 | ||
712 | static void cpt_set_infoframes(struct drm_encoder *encoder, | 715 | static void cpt_set_infoframes(struct drm_encoder *encoder, |
713 | bool enable, | 716 | bool enable, |
714 | const struct drm_display_mode *adjusted_mode) | 717 | const struct intel_crtc_state *crtc_state, |
718 | const struct drm_connector_state *conn_state) | ||
715 | { | 719 | { |
716 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 720 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); |
717 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 721 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
718 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 722 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
719 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 723 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
720 | u32 val = I915_READ(reg); | 724 | u32 val = I915_READ(reg); |
@@ -740,24 +744,25 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, | |||
740 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 744 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
741 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | 745 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); |
742 | 746 | ||
743 | if (intel_hdmi_set_gcp_infoframe(encoder)) | 747 | if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) |
744 | val |= VIDEO_DIP_ENABLE_GCP; | 748 | val |= VIDEO_DIP_ENABLE_GCP; |
745 | 749 | ||
746 | I915_WRITE(reg, val); | 750 | I915_WRITE(reg, val); |
747 | POSTING_READ(reg); | 751 | POSTING_READ(reg); |
748 | 752 | ||
749 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 753 | intel_hdmi_set_avi_infoframe(encoder, crtc_state); |
750 | intel_hdmi_set_spd_infoframe(encoder); | 754 | intel_hdmi_set_spd_infoframe(encoder, crtc_state); |
751 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 755 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); |
752 | } | 756 | } |
753 | 757 | ||
754 | static void vlv_set_infoframes(struct drm_encoder *encoder, | 758 | static void vlv_set_infoframes(struct drm_encoder *encoder, |
755 | bool enable, | 759 | bool enable, |
756 | const struct drm_display_mode *adjusted_mode) | 760 | const struct intel_crtc_state *crtc_state, |
761 | const struct drm_connector_state *conn_state) | ||
757 | { | 762 | { |
758 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 763 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); |
759 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 764 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
760 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 765 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
761 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 766 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
762 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | 767 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); |
763 | u32 val = I915_READ(reg); | 768 | u32 val = I915_READ(reg); |
@@ -792,25 +797,25 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, | |||
792 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 797 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
793 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | 798 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); |
794 | 799 | ||
795 | if (intel_hdmi_set_gcp_infoframe(encoder)) | 800 | if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) |
796 | val |= VIDEO_DIP_ENABLE_GCP; | 801 | val |= VIDEO_DIP_ENABLE_GCP; |
797 | 802 | ||
798 | I915_WRITE(reg, val); | 803 | I915_WRITE(reg, val); |
799 | POSTING_READ(reg); | 804 | POSTING_READ(reg); |
800 | 805 | ||
801 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 806 | intel_hdmi_set_avi_infoframe(encoder, crtc_state); |
802 | intel_hdmi_set_spd_infoframe(encoder); | 807 | intel_hdmi_set_spd_infoframe(encoder, crtc_state); |
803 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 808 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); |
804 | } | 809 | } |
805 | 810 | ||
806 | static void hsw_set_infoframes(struct drm_encoder *encoder, | 811 | static void hsw_set_infoframes(struct drm_encoder *encoder, |
807 | bool enable, | 812 | bool enable, |
808 | const struct drm_display_mode *adjusted_mode) | 813 | const struct intel_crtc_state *crtc_state, |
814 | const struct drm_connector_state *conn_state) | ||
809 | { | 815 | { |
810 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 816 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); |
811 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
812 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 817 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
813 | i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); | 818 | i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); |
814 | u32 val = I915_READ(reg); | 819 | u32 val = I915_READ(reg); |
815 | 820 | ||
816 | assert_hdmi_port_disabled(intel_hdmi); | 821 | assert_hdmi_port_disabled(intel_hdmi); |
@@ -825,15 +830,15 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, | |||
825 | return; | 830 | return; |
826 | } | 831 | } |
827 | 832 | ||
828 | if (intel_hdmi_set_gcp_infoframe(encoder)) | 833 | if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) |
829 | val |= VIDEO_DIP_ENABLE_GCP_HSW; | 834 | val |= VIDEO_DIP_ENABLE_GCP_HSW; |
830 | 835 | ||
831 | I915_WRITE(reg, val); | 836 | I915_WRITE(reg, val); |
832 | POSTING_READ(reg); | 837 | POSTING_READ(reg); |
833 | 838 | ||
834 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 839 | intel_hdmi_set_avi_infoframe(encoder, crtc_state); |
835 | intel_hdmi_set_spd_infoframe(encoder); | 840 | intel_hdmi_set_spd_infoframe(encoder, crtc_state); |
836 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 841 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); |
837 | } | 842 | } |
838 | 843 | ||
839 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) | 844 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) |
@@ -852,31 +857,32 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) | |||
852 | adapter, enable); | 857 | adapter, enable); |
853 | } | 858 | } |
854 | 859 | ||
855 | static void intel_hdmi_prepare(struct intel_encoder *encoder) | 860 | static void intel_hdmi_prepare(struct intel_encoder *encoder, |
861 | const struct intel_crtc_state *crtc_state) | ||
856 | { | 862 | { |
857 | struct drm_device *dev = encoder->base.dev; | 863 | struct drm_device *dev = encoder->base.dev; |
858 | struct drm_i915_private *dev_priv = to_i915(dev); | 864 | struct drm_i915_private *dev_priv = to_i915(dev); |
859 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 865 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
860 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 866 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
861 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | 867 | const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; |
862 | u32 hdmi_val; | 868 | u32 hdmi_val; |
863 | 869 | ||
864 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); | 870 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); |
865 | 871 | ||
866 | hdmi_val = SDVO_ENCODING_HDMI; | 872 | hdmi_val = SDVO_ENCODING_HDMI; |
867 | if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range) | 873 | if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range) |
868 | hdmi_val |= HDMI_COLOR_RANGE_16_235; | 874 | hdmi_val |= HDMI_COLOR_RANGE_16_235; |
869 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 875 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
870 | hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; | 876 | hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; |
871 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 877 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
872 | hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; | 878 | hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; |
873 | 879 | ||
874 | if (crtc->config->pipe_bpp > 24) | 880 | if (crtc_state->pipe_bpp > 24) |
875 | hdmi_val |= HDMI_COLOR_FORMAT_12bpc; | 881 | hdmi_val |= HDMI_COLOR_FORMAT_12bpc; |
876 | else | 882 | else |
877 | hdmi_val |= SDVO_COLOR_FORMAT_8bpc; | 883 | hdmi_val |= SDVO_COLOR_FORMAT_8bpc; |
878 | 884 | ||
879 | if (crtc->config->has_hdmi_sink) | 885 | if (crtc_state->has_hdmi_sink) |
880 | hdmi_val |= HDMI_MODE_SELECT_HDMI; | 886 | hdmi_val |= HDMI_MODE_SELECT_HDMI; |
881 | 887 | ||
882 | if (HAS_PCH_CPT(dev_priv)) | 888 | if (HAS_PCH_CPT(dev_priv)) |
@@ -979,9 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder, | |||
979 | struct intel_crtc_state *pipe_config, | 985 | struct intel_crtc_state *pipe_config, |
980 | struct drm_connector_state *conn_state) | 986 | struct drm_connector_state *conn_state) |
981 | { | 987 | { |
982 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 988 | struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); |
983 | 989 | ||
984 | WARN_ON(!crtc->config->has_hdmi_sink); | 990 | WARN_ON(!pipe_config->has_hdmi_sink); |
985 | DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", | 991 | DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", |
986 | pipe_name(crtc->pipe)); | 992 | pipe_name(crtc->pipe)); |
987 | intel_audio_codec_enable(encoder, pipe_config, conn_state); | 993 | intel_audio_codec_enable(encoder, pipe_config, conn_state); |
@@ -1015,14 +1021,13 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, | |||
1015 | { | 1021 | { |
1016 | struct drm_device *dev = encoder->base.dev; | 1022 | struct drm_device *dev = encoder->base.dev; |
1017 | struct drm_i915_private *dev_priv = to_i915(dev); | 1023 | struct drm_i915_private *dev_priv = to_i915(dev); |
1018 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
1019 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1024 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
1020 | u32 temp; | 1025 | u32 temp; |
1021 | 1026 | ||
1022 | temp = I915_READ(intel_hdmi->hdmi_reg); | 1027 | temp = I915_READ(intel_hdmi->hdmi_reg); |
1023 | 1028 | ||
1024 | temp |= SDVO_ENABLE; | 1029 | temp |= SDVO_ENABLE; |
1025 | if (crtc->config->has_audio) | 1030 | if (pipe_config->has_audio) |
1026 | temp |= SDVO_AUDIO_ENABLE; | 1031 | temp |= SDVO_AUDIO_ENABLE; |
1027 | 1032 | ||
1028 | /* | 1033 | /* |
@@ -1066,7 +1071,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, | |||
1066 | { | 1071 | { |
1067 | struct drm_device *dev = encoder->base.dev; | 1072 | struct drm_device *dev = encoder->base.dev; |
1068 | struct drm_i915_private *dev_priv = to_i915(dev); | 1073 | struct drm_i915_private *dev_priv = to_i915(dev); |
1069 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 1074 | struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); |
1070 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1075 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
1071 | enum pipe pipe = crtc->pipe; | 1076 | enum pipe pipe = crtc->pipe; |
1072 | u32 temp; | 1077 | u32 temp; |
@@ -1128,7 +1133,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, | |||
1128 | struct drm_device *dev = encoder->base.dev; | 1133 | struct drm_device *dev = encoder->base.dev; |
1129 | struct drm_i915_private *dev_priv = to_i915(dev); | 1134 | struct drm_i915_private *dev_priv = to_i915(dev); |
1130 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1135 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
1131 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 1136 | struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); |
1132 | u32 temp; | 1137 | u32 temp; |
1133 | 1138 | ||
1134 | temp = I915_READ(intel_hdmi->hdmi_reg); | 1139 | temp = I915_READ(intel_hdmi->hdmi_reg); |
@@ -1170,7 +1175,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, | |||
1170 | intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); | 1175 | intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); |
1171 | } | 1176 | } |
1172 | 1177 | ||
1173 | intel_hdmi->set_infoframes(&encoder->base, false, NULL); | 1178 | intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state); |
1174 | 1179 | ||
1175 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); | 1180 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); |
1176 | } | 1181 | } |
@@ -1246,7 +1251,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, | |||
1246 | return MODE_CLOCK_HIGH; | 1251 | return MODE_CLOCK_HIGH; |
1247 | 1252 | ||
1248 | /* BXT DPLL can't generate 223-240 MHz */ | 1253 | /* BXT DPLL can't generate 223-240 MHz */ |
1249 | if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000) | 1254 | if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000) |
1250 | return MODE_CLOCK_RANGE; | 1255 | return MODE_CLOCK_RANGE; |
1251 | 1256 | ||
1252 | /* CHV DPLL can't generate 216-240 MHz */ | 1257 | /* CHV DPLL can't generate 216-240 MHz */ |
@@ -1642,13 +1647,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, | |||
1642 | struct drm_connector_state *conn_state) | 1647 | struct drm_connector_state *conn_state) |
1643 | { | 1648 | { |
1644 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1649 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
1645 | const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | ||
1646 | 1650 | ||
1647 | intel_hdmi_prepare(encoder); | 1651 | intel_hdmi_prepare(encoder, pipe_config); |
1648 | 1652 | ||
1649 | intel_hdmi->set_infoframes(&encoder->base, | 1653 | intel_hdmi->set_infoframes(&encoder->base, |
1650 | pipe_config->has_hdmi_sink, | 1654 | pipe_config->has_hdmi_sink, |
1651 | adjusted_mode); | 1655 | pipe_config, conn_state); |
1652 | } | 1656 | } |
1653 | 1657 | ||
1654 | static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, | 1658 | static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, |
@@ -1659,7 +1663,6 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, | |||
1659 | struct intel_hdmi *intel_hdmi = &dport->hdmi; | 1663 | struct intel_hdmi *intel_hdmi = &dport->hdmi; |
1660 | struct drm_device *dev = encoder->base.dev; | 1664 | struct drm_device *dev = encoder->base.dev; |
1661 | struct drm_i915_private *dev_priv = to_i915(dev); | 1665 | struct drm_i915_private *dev_priv = to_i915(dev); |
1662 | const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | ||
1663 | 1666 | ||
1664 | vlv_phy_pre_encoder_enable(encoder); | 1667 | vlv_phy_pre_encoder_enable(encoder); |
1665 | 1668 | ||
@@ -1669,7 +1672,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, | |||
1669 | 1672 | ||
1670 | intel_hdmi->set_infoframes(&encoder->base, | 1673 | intel_hdmi->set_infoframes(&encoder->base, |
1671 | pipe_config->has_hdmi_sink, | 1674 | pipe_config->has_hdmi_sink, |
1672 | adjusted_mode); | 1675 | pipe_config, conn_state); |
1673 | 1676 | ||
1674 | g4x_enable_hdmi(encoder, pipe_config, conn_state); | 1677 | g4x_enable_hdmi(encoder, pipe_config, conn_state); |
1675 | 1678 | ||
@@ -1680,7 +1683,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder, | |||
1680 | struct intel_crtc_state *pipe_config, | 1683 | struct intel_crtc_state *pipe_config, |
1681 | struct drm_connector_state *conn_state) | 1684 | struct drm_connector_state *conn_state) |
1682 | { | 1685 | { |
1683 | intel_hdmi_prepare(encoder); | 1686 | intel_hdmi_prepare(encoder, pipe_config); |
1684 | 1687 | ||
1685 | vlv_phy_pre_pll_enable(encoder); | 1688 | vlv_phy_pre_pll_enable(encoder); |
1686 | } | 1689 | } |
@@ -1689,7 +1692,7 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder, | |||
1689 | struct intel_crtc_state *pipe_config, | 1692 | struct intel_crtc_state *pipe_config, |
1690 | struct drm_connector_state *conn_state) | 1693 | struct drm_connector_state *conn_state) |
1691 | { | 1694 | { |
1692 | intel_hdmi_prepare(encoder); | 1695 | intel_hdmi_prepare(encoder, pipe_config); |
1693 | 1696 | ||
1694 | chv_phy_pre_pll_enable(encoder); | 1697 | chv_phy_pre_pll_enable(encoder); |
1695 | } | 1698 | } |
@@ -1732,9 +1735,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, | |||
1732 | struct intel_hdmi *intel_hdmi = &dport->hdmi; | 1735 | struct intel_hdmi *intel_hdmi = &dport->hdmi; |
1733 | struct drm_device *dev = encoder->base.dev; | 1736 | struct drm_device *dev = encoder->base.dev; |
1734 | struct drm_i915_private *dev_priv = to_i915(dev); | 1737 | struct drm_i915_private *dev_priv = to_i915(dev); |
1735 | struct intel_crtc *intel_crtc = | ||
1736 | to_intel_crtc(encoder->base.crtc); | ||
1737 | const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; | ||
1738 | 1738 | ||
1739 | chv_phy_pre_encoder_enable(encoder); | 1739 | chv_phy_pre_encoder_enable(encoder); |
1740 | 1740 | ||
@@ -1743,8 +1743,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, | |||
1743 | chv_set_phy_signal_level(encoder, 128, 102, false); | 1743 | chv_set_phy_signal_level(encoder, 128, 102, false); |
1744 | 1744 | ||
1745 | intel_hdmi->set_infoframes(&encoder->base, | 1745 | intel_hdmi->set_infoframes(&encoder->base, |
1746 | intel_crtc->config->has_hdmi_sink, | 1746 | pipe_config->has_hdmi_sink, |
1747 | adjusted_mode); | 1747 | pipe_config, conn_state); |
1748 | 1748 | ||
1749 | g4x_enable_hdmi(encoder, pipe_config, conn_state); | 1749 | g4x_enable_hdmi(encoder, pipe_config, conn_state); |
1750 | 1750 | ||
@@ -1809,13 +1809,13 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, | |||
1809 | 1809 | ||
1810 | switch (port) { | 1810 | switch (port) { |
1811 | case PORT_B: | 1811 | case PORT_B: |
1812 | if (IS_BROXTON(dev_priv)) | 1812 | if (IS_GEN9_LP(dev_priv)) |
1813 | ddc_pin = GMBUS_PIN_1_BXT; | 1813 | ddc_pin = GMBUS_PIN_1_BXT; |
1814 | else | 1814 | else |
1815 | ddc_pin = GMBUS_PIN_DPB; | 1815 | ddc_pin = GMBUS_PIN_DPB; |
1816 | break; | 1816 | break; |
1817 | case PORT_C: | 1817 | case PORT_C: |
1818 | if (IS_BROXTON(dev_priv)) | 1818 | if (IS_GEN9_LP(dev_priv)) |
1819 | ddc_pin = GMBUS_PIN_2_BXT; | 1819 | ddc_pin = GMBUS_PIN_2_BXT; |
1820 | else | 1820 | else |
1821 | ddc_pin = GMBUS_PIN_DPC; | 1821 | ddc_pin = GMBUS_PIN_DPC; |
@@ -1933,10 +1933,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
1933 | } | 1933 | } |
1934 | } | 1934 | } |
1935 | 1935 | ||
1936 | void intel_hdmi_init(struct drm_device *dev, | 1936 | void intel_hdmi_init(struct drm_i915_private *dev_priv, |
1937 | i915_reg_t hdmi_reg, enum port port) | 1937 | i915_reg_t hdmi_reg, enum port port) |
1938 | { | 1938 | { |
1939 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1940 | struct intel_digital_port *intel_dig_port; | 1939 | struct intel_digital_port *intel_dig_port; |
1941 | struct intel_encoder *intel_encoder; | 1940 | struct intel_encoder *intel_encoder; |
1942 | struct intel_connector *intel_connector; | 1941 | struct intel_connector *intel_connector; |
@@ -1953,8 +1952,9 @@ void intel_hdmi_init(struct drm_device *dev, | |||
1953 | 1952 | ||
1954 | intel_encoder = &intel_dig_port->base; | 1953 | intel_encoder = &intel_dig_port->base; |
1955 | 1954 | ||
1956 | drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, | 1955 | drm_encoder_init(&dev_priv->drm, &intel_encoder->base, |
1957 | DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); | 1956 | &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, |
1957 | "HDMI %c", port_name(port)); | ||
1958 | 1958 | ||
1959 | intel_encoder->compute_config = intel_hdmi_compute_config; | 1959 | intel_encoder->compute_config = intel_hdmi_compute_config; |
1960 | if (HAS_PCH_SPLIT(dev_priv)) { | 1960 | if (HAS_PCH_SPLIT(dev_priv)) { |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 83f260bb4eef..bce1ba80f277 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -72,7 +72,7 @@ static const struct gmbus_pin gmbus_pins_bxt[] = { | |||
72 | static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, | 72 | static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, |
73 | unsigned int pin) | 73 | unsigned int pin) |
74 | { | 74 | { |
75 | if (IS_BROXTON(dev_priv)) | 75 | if (IS_GEN9_LP(dev_priv)) |
76 | return &gmbus_pins_bxt[pin]; | 76 | return &gmbus_pins_bxt[pin]; |
77 | else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 77 | else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
78 | return &gmbus_pins_skl[pin]; | 78 | return &gmbus_pins_skl[pin]; |
@@ -87,7 +87,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, | |||
87 | { | 87 | { |
88 | unsigned int size; | 88 | unsigned int size; |
89 | 89 | ||
90 | if (IS_BROXTON(dev_priv)) | 90 | if (IS_GEN9_LP(dev_priv)) |
91 | size = ARRAY_SIZE(gmbus_pins_bxt); | 91 | size = ARRAY_SIZE(gmbus_pins_bxt); |
92 | else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 92 | else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
93 | size = ARRAY_SIZE(gmbus_pins_skl); | 93 | size = ARRAY_SIZE(gmbus_pins_skl); |
@@ -111,10 +111,8 @@ to_intel_gmbus(struct i2c_adapter *i2c) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | void | 113 | void |
114 | intel_i2c_reset(struct drm_device *dev) | 114 | intel_i2c_reset(struct drm_i915_private *dev_priv) |
115 | { | 115 | { |
116 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
117 | |||
118 | I915_WRITE(GMBUS0, 0); | 116 | I915_WRITE(GMBUS0, 0); |
119 | I915_WRITE(GMBUS4, 0); | 117 | I915_WRITE(GMBUS4, 0); |
120 | } | 118 | } |
@@ -141,7 +139,7 @@ static u32 get_reserved(struct intel_gmbus *bus) | |||
141 | u32 reserved = 0; | 139 | u32 reserved = 0; |
142 | 140 | ||
143 | /* On most chips, these bits must be preserved in software. */ | 141 | /* On most chips, these bits must be preserved in software. */ |
144 | if (!IS_I830(dev_priv) && !IS_845G(dev_priv)) | 142 | if (!IS_I830(dev_priv) && !IS_I845G(dev_priv)) |
145 | reserved = I915_READ_NOTRACE(bus->gpio_reg) & | 143 | reserved = I915_READ_NOTRACE(bus->gpio_reg) & |
146 | (GPIO_DATA_PULLUP_DISABLE | | 144 | (GPIO_DATA_PULLUP_DISABLE | |
147 | GPIO_CLOCK_PULLUP_DISABLE); | 145 | GPIO_CLOCK_PULLUP_DISABLE); |
@@ -211,7 +209,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter) | |||
211 | adapter); | 209 | adapter); |
212 | struct drm_i915_private *dev_priv = bus->dev_priv; | 210 | struct drm_i915_private *dev_priv = bus->dev_priv; |
213 | 211 | ||
214 | intel_i2c_reset(&dev_priv->drm); | 212 | intel_i2c_reset(dev_priv); |
215 | intel_i2c_quirk_set(dev_priv, true); | 213 | intel_i2c_quirk_set(dev_priv, true); |
216 | set_data(bus, 1); | 214 | set_data(bus, 1); |
217 | set_clock(bus, 1); | 215 | set_clock(bus, 1); |
@@ -617,11 +615,10 @@ static const struct i2c_algorithm gmbus_algorithm = { | |||
617 | 615 | ||
618 | /** | 616 | /** |
619 | * intel_gmbus_setup - instantiate all Intel i2c GMBuses | 617 | * intel_gmbus_setup - instantiate all Intel i2c GMBuses |
620 | * @dev: DRM device | 618 | * @dev_priv: i915 device private |
621 | */ | 619 | */ |
622 | int intel_setup_gmbus(struct drm_device *dev) | 620 | int intel_setup_gmbus(struct drm_i915_private *dev_priv) |
623 | { | 621 | { |
624 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
625 | struct pci_dev *pdev = dev_priv->drm.pdev; | 622 | struct pci_dev *pdev = dev_priv->drm.pdev; |
626 | struct intel_gmbus *bus; | 623 | struct intel_gmbus *bus; |
627 | unsigned int pin; | 624 | unsigned int pin; |
@@ -678,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
678 | goto err; | 675 | goto err; |
679 | } | 676 | } |
680 | 677 | ||
681 | intel_i2c_reset(&dev_priv->drm); | 678 | intel_i2c_reset(dev_priv); |
682 | 679 | ||
683 | return 0; | 680 | return 0; |
684 | 681 | ||
@@ -724,9 +721,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) | |||
724 | mutex_unlock(&dev_priv->gmbus_mutex); | 721 | mutex_unlock(&dev_priv->gmbus_mutex); |
725 | } | 722 | } |
726 | 723 | ||
727 | void intel_teardown_gmbus(struct drm_device *dev) | 724 | void intel_teardown_gmbus(struct drm_i915_private *dev_priv) |
728 | { | 725 | { |
729 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
730 | struct intel_gmbus *bus; | 726 | struct intel_gmbus *bus; |
731 | unsigned int pin; | 727 | unsigned int pin; |
732 | 728 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d4961fa20c73..6db246ad2f13 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -230,8 +230,6 @@ enum { | |||
230 | 230 | ||
231 | static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, | 231 | static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, |
232 | struct intel_engine_cs *engine); | 232 | struct intel_engine_cs *engine); |
233 | static int intel_lr_context_pin(struct i915_gem_context *ctx, | ||
234 | struct intel_engine_cs *engine); | ||
235 | static void execlists_init_reg_state(u32 *reg_state, | 233 | static void execlists_init_reg_state(u32 *reg_state, |
236 | struct i915_gem_context *ctx, | 234 | struct i915_gem_context *ctx, |
237 | struct intel_engine_cs *engine, | 235 | struct intel_engine_cs *engine, |
@@ -415,7 +413,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) | |||
415 | static bool ctx_single_port_submission(const struct i915_gem_context *ctx) | 413 | static bool ctx_single_port_submission(const struct i915_gem_context *ctx) |
416 | { | 414 | { |
417 | return (IS_ENABLED(CONFIG_DRM_I915_GVT) && | 415 | return (IS_ENABLED(CONFIG_DRM_I915_GVT) && |
418 | ctx->execlists_force_single_submission); | 416 | i915_gem_context_force_single_submission(ctx)); |
419 | } | 417 | } |
420 | 418 | ||
421 | static bool can_merge_ctx(const struct i915_gem_context *prev, | 419 | static bool can_merge_ctx(const struct i915_gem_context *prev, |
@@ -514,15 +512,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
514 | RB_CLEAR_NODE(&cursor->priotree.node); | 512 | RB_CLEAR_NODE(&cursor->priotree.node); |
515 | cursor->priotree.priority = INT_MAX; | 513 | cursor->priotree.priority = INT_MAX; |
516 | 514 | ||
517 | /* We keep the previous context alive until we retire the | ||
518 | * following request. This ensures that any the context object | ||
519 | * is still pinned for any residual writes the HW makes into it | ||
520 | * on the context switch into the next object following the | ||
521 | * breadcrumb. Otherwise, we may retire the context too early. | ||
522 | */ | ||
523 | cursor->previous_context = engine->last_context; | ||
524 | engine->last_context = cursor->ctx; | ||
525 | |||
526 | __i915_gem_request_submit(cursor); | 515 | __i915_gem_request_submit(cursor); |
527 | last = cursor; | 516 | last = cursor; |
528 | submit = true; | 517 | submit = true; |
@@ -695,7 +684,6 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) | |||
695 | 684 | ||
696 | static void execlists_schedule(struct drm_i915_gem_request *request, int prio) | 685 | static void execlists_schedule(struct drm_i915_gem_request *request, int prio) |
697 | { | 686 | { |
698 | static DEFINE_MUTEX(lock); | ||
699 | struct intel_engine_cs *engine = NULL; | 687 | struct intel_engine_cs *engine = NULL; |
700 | struct i915_dependency *dep, *p; | 688 | struct i915_dependency *dep, *p; |
701 | struct i915_dependency stack; | 689 | struct i915_dependency stack; |
@@ -704,8 +692,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) | |||
704 | if (prio <= READ_ONCE(request->priotree.priority)) | 692 | if (prio <= READ_ONCE(request->priotree.priority)) |
705 | return; | 693 | return; |
706 | 694 | ||
707 | /* Need global lock to use the temporary link inside i915_dependency */ | 695 | /* Need BKL in order to use the temporary link inside i915_dependency */ |
708 | mutex_lock(&lock); | 696 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
709 | 697 | ||
710 | stack.signaler = &request->priotree; | 698 | stack.signaler = &request->priotree; |
711 | list_add(&stack.dfs_link, &dfs); | 699 | list_add(&stack.dfs_link, &dfs); |
@@ -734,7 +722,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) | |||
734 | if (prio > READ_ONCE(p->signaler->priority)) | 722 | if (prio > READ_ONCE(p->signaler->priority)) |
735 | list_move_tail(&p->dfs_link, &dfs); | 723 | list_move_tail(&p->dfs_link, &dfs); |
736 | 724 | ||
737 | p = list_next_entry(dep, dfs_link); | 725 | list_safe_reset_next(dep, p, dfs_link); |
738 | if (!RB_EMPTY_NODE(&pt->node)) | 726 | if (!RB_EMPTY_NODE(&pt->node)) |
739 | continue; | 727 | continue; |
740 | 728 | ||
@@ -772,80 +760,14 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) | |||
772 | if (engine) | 760 | if (engine) |
773 | spin_unlock_irq(&engine->timeline->lock); | 761 | spin_unlock_irq(&engine->timeline->lock); |
774 | 762 | ||
775 | mutex_unlock(&lock); | ||
776 | |||
777 | /* XXX Do we need to preempt to make room for us and our deps? */ | 763 | /* XXX Do we need to preempt to make room for us and our deps? */ |
778 | } | 764 | } |
779 | 765 | ||
780 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) | 766 | static int execlists_context_pin(struct intel_engine_cs *engine, |
781 | { | 767 | struct i915_gem_context *ctx) |
782 | struct intel_engine_cs *engine = request->engine; | ||
783 | struct intel_context *ce = &request->ctx->engine[engine->id]; | ||
784 | int ret; | ||
785 | |||
786 | /* Flush enough space to reduce the likelihood of waiting after | ||
787 | * we start building the request - in which case we will just | ||
788 | * have to repeat work. | ||
789 | */ | ||
790 | request->reserved_space += EXECLISTS_REQUEST_SIZE; | ||
791 | |||
792 | if (!ce->state) { | ||
793 | ret = execlists_context_deferred_alloc(request->ctx, engine); | ||
794 | if (ret) | ||
795 | return ret; | ||
796 | } | ||
797 | |||
798 | request->ring = ce->ring; | ||
799 | |||
800 | ret = intel_lr_context_pin(request->ctx, engine); | ||
801 | if (ret) | ||
802 | return ret; | ||
803 | |||
804 | if (i915.enable_guc_submission) { | ||
805 | /* | ||
806 | * Check that the GuC has space for the request before | ||
807 | * going any further, as the i915_add_request() call | ||
808 | * later on mustn't fail ... | ||
809 | */ | ||
810 | ret = i915_guc_wq_reserve(request); | ||
811 | if (ret) | ||
812 | goto err_unpin; | ||
813 | } | ||
814 | |||
815 | ret = intel_ring_begin(request, 0); | ||
816 | if (ret) | ||
817 | goto err_unreserve; | ||
818 | |||
819 | if (!ce->initialised) { | ||
820 | ret = engine->init_context(request); | ||
821 | if (ret) | ||
822 | goto err_unreserve; | ||
823 | |||
824 | ce->initialised = true; | ||
825 | } | ||
826 | |||
827 | /* Note that after this point, we have committed to using | ||
828 | * this request as it is being used to both track the | ||
829 | * state of engine initialisation and liveness of the | ||
830 | * golden renderstate above. Think twice before you try | ||
831 | * to cancel/unwind this request now. | ||
832 | */ | ||
833 | |||
834 | request->reserved_space -= EXECLISTS_REQUEST_SIZE; | ||
835 | return 0; | ||
836 | |||
837 | err_unreserve: | ||
838 | if (i915.enable_guc_submission) | ||
839 | i915_guc_wq_unreserve(request); | ||
840 | err_unpin: | ||
841 | intel_lr_context_unpin(request->ctx, engine); | ||
842 | return ret; | ||
843 | } | ||
844 | |||
845 | static int intel_lr_context_pin(struct i915_gem_context *ctx, | ||
846 | struct intel_engine_cs *engine) | ||
847 | { | 768 | { |
848 | struct intel_context *ce = &ctx->engine[engine->id]; | 769 | struct intel_context *ce = &ctx->engine[engine->id]; |
770 | unsigned int flags; | ||
849 | void *vaddr; | 771 | void *vaddr; |
850 | int ret; | 772 | int ret; |
851 | 773 | ||
@@ -854,8 +776,20 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, | |||
854 | if (ce->pin_count++) | 776 | if (ce->pin_count++) |
855 | return 0; | 777 | return 0; |
856 | 778 | ||
857 | ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, | 779 | if (!ce->state) { |
858 | PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL); | 780 | ret = execlists_context_deferred_alloc(ctx, engine); |
781 | if (ret) | ||
782 | goto err; | ||
783 | } | ||
784 | GEM_BUG_ON(!ce->state); | ||
785 | |||
786 | flags = PIN_GLOBAL; | ||
787 | if (ctx->ggtt_offset_bias) | ||
788 | flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias; | ||
789 | if (i915_gem_context_is_kernel(ctx)) | ||
790 | flags |= PIN_HIGH; | ||
791 | |||
792 | ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags); | ||
859 | if (ret) | 793 | if (ret) |
860 | goto err; | 794 | goto err; |
861 | 795 | ||
@@ -865,7 +799,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, | |||
865 | goto unpin_vma; | 799 | goto unpin_vma; |
866 | } | 800 | } |
867 | 801 | ||
868 | ret = intel_ring_pin(ce->ring); | 802 | ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias); |
869 | if (ret) | 803 | if (ret) |
870 | goto unpin_map; | 804 | goto unpin_map; |
871 | 805 | ||
@@ -895,8 +829,8 @@ err: | |||
895 | return ret; | 829 | return ret; |
896 | } | 830 | } |
897 | 831 | ||
898 | void intel_lr_context_unpin(struct i915_gem_context *ctx, | 832 | static void execlists_context_unpin(struct intel_engine_cs *engine, |
899 | struct intel_engine_cs *engine) | 833 | struct i915_gem_context *ctx) |
900 | { | 834 | { |
901 | struct intel_context *ce = &ctx->engine[engine->id]; | 835 | struct intel_context *ce = &ctx->engine[engine->id]; |
902 | 836 | ||
@@ -914,6 +848,63 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx, | |||
914 | i915_gem_context_put(ctx); | 848 | i915_gem_context_put(ctx); |
915 | } | 849 | } |
916 | 850 | ||
851 | static int execlists_request_alloc(struct drm_i915_gem_request *request) | ||
852 | { | ||
853 | struct intel_engine_cs *engine = request->engine; | ||
854 | struct intel_context *ce = &request->ctx->engine[engine->id]; | ||
855 | int ret; | ||
856 | |||
857 | GEM_BUG_ON(!ce->pin_count); | ||
858 | |||
859 | /* Flush enough space to reduce the likelihood of waiting after | ||
860 | * we start building the request - in which case we will just | ||
861 | * have to repeat work. | ||
862 | */ | ||
863 | request->reserved_space += EXECLISTS_REQUEST_SIZE; | ||
864 | |||
865 | GEM_BUG_ON(!ce->ring); | ||
866 | request->ring = ce->ring; | ||
867 | |||
868 | if (i915.enable_guc_submission) { | ||
869 | /* | ||
870 | * Check that the GuC has space for the request before | ||
871 | * going any further, as the i915_add_request() call | ||
872 | * later on mustn't fail ... | ||
873 | */ | ||
874 | ret = i915_guc_wq_reserve(request); | ||
875 | if (ret) | ||
876 | goto err; | ||
877 | } | ||
878 | |||
879 | ret = intel_ring_begin(request, 0); | ||
880 | if (ret) | ||
881 | goto err_unreserve; | ||
882 | |||
883 | if (!ce->initialised) { | ||
884 | ret = engine->init_context(request); | ||
885 | if (ret) | ||
886 | goto err_unreserve; | ||
887 | |||
888 | ce->initialised = true; | ||
889 | } | ||
890 | |||
891 | /* Note that after this point, we have committed to using | ||
892 | * this request as it is being used to both track the | ||
893 | * state of engine initialisation and liveness of the | ||
894 | * golden renderstate above. Think twice before you try | ||
895 | * to cancel/unwind this request now. | ||
896 | */ | ||
897 | |||
898 | request->reserved_space -= EXECLISTS_REQUEST_SIZE; | ||
899 | return 0; | ||
900 | |||
901 | err_unreserve: | ||
902 | if (i915.enable_guc_submission) | ||
903 | i915_guc_wq_unreserve(request); | ||
904 | err: | ||
905 | return ret; | ||
906 | } | ||
907 | |||
917 | static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) | 908 | static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) |
918 | { | 909 | { |
919 | int ret, i; | 910 | int ret, i; |
@@ -1246,7 +1237,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) | |||
1246 | struct i915_vma *vma; | 1237 | struct i915_vma *vma; |
1247 | int err; | 1238 | int err; |
1248 | 1239 | ||
1249 | obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size)); | 1240 | obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size)); |
1250 | if (IS_ERR(obj)) | 1241 | if (IS_ERR(obj)) |
1251 | return PTR_ERR(obj); | 1242 | return PTR_ERR(obj); |
1252 | 1243 | ||
@@ -1344,15 +1335,6 @@ out: | |||
1344 | return ret; | 1335 | return ret; |
1345 | } | 1336 | } |
1346 | 1337 | ||
1347 | static void lrc_init_hws(struct intel_engine_cs *engine) | ||
1348 | { | ||
1349 | struct drm_i915_private *dev_priv = engine->i915; | ||
1350 | |||
1351 | I915_WRITE(RING_HWS_PGA(engine->mmio_base), | ||
1352 | engine->status_page.ggtt_offset); | ||
1353 | POSTING_READ(RING_HWS_PGA(engine->mmio_base)); | ||
1354 | } | ||
1355 | |||
1356 | static int gen8_init_common_ring(struct intel_engine_cs *engine) | 1338 | static int gen8_init_common_ring(struct intel_engine_cs *engine) |
1357 | { | 1339 | { |
1358 | struct drm_i915_private *dev_priv = engine->i915; | 1340 | struct drm_i915_private *dev_priv = engine->i915; |
@@ -1362,20 +1344,19 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) | |||
1362 | if (ret) | 1344 | if (ret) |
1363 | return ret; | 1345 | return ret; |
1364 | 1346 | ||
1365 | lrc_init_hws(engine); | ||
1366 | |||
1367 | intel_engine_reset_breadcrumbs(engine); | 1347 | intel_engine_reset_breadcrumbs(engine); |
1348 | intel_engine_init_hangcheck(engine); | ||
1368 | 1349 | ||
1369 | I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); | 1350 | I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); |
1370 | |||
1371 | I915_WRITE(RING_MODE_GEN7(engine), | 1351 | I915_WRITE(RING_MODE_GEN7(engine), |
1372 | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | | 1352 | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | |
1373 | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); | 1353 | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); |
1354 | I915_WRITE(RING_HWS_PGA(engine->mmio_base), | ||
1355 | engine->status_page.ggtt_offset); | ||
1356 | POSTING_READ(RING_HWS_PGA(engine->mmio_base)); | ||
1374 | 1357 | ||
1375 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); | 1358 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); |
1376 | 1359 | ||
1377 | intel_engine_init_hangcheck(engine); | ||
1378 | |||
1379 | /* After a GPU reset, we may have requests to replay */ | 1360 | /* After a GPU reset, we may have requests to replay */ |
1380 | if (!execlists_elsp_idle(engine)) { | 1361 | if (!execlists_elsp_idle(engine)) { |
1381 | engine->execlist_port[0].count = 0; | 1362 | engine->execlist_port[0].count = 0; |
@@ -1794,13 +1775,12 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) | |||
1794 | if (engine->cleanup) | 1775 | if (engine->cleanup) |
1795 | engine->cleanup(engine); | 1776 | engine->cleanup(engine); |
1796 | 1777 | ||
1797 | intel_engine_cleanup_common(engine); | ||
1798 | |||
1799 | if (engine->status_page.vma) { | 1778 | if (engine->status_page.vma) { |
1800 | i915_gem_object_unpin_map(engine->status_page.vma->obj); | 1779 | i915_gem_object_unpin_map(engine->status_page.vma->obj); |
1801 | engine->status_page.vma = NULL; | 1780 | engine->status_page.vma = NULL; |
1802 | } | 1781 | } |
1803 | intel_lr_context_unpin(dev_priv->kernel_context, engine); | 1782 | |
1783 | intel_engine_cleanup_common(engine); | ||
1804 | 1784 | ||
1805 | lrc_destroy_wa_ctx_obj(engine); | 1785 | lrc_destroy_wa_ctx_obj(engine); |
1806 | engine->i915 = NULL; | 1786 | engine->i915 = NULL; |
@@ -1825,6 +1805,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) | |||
1825 | /* Default vfuncs which can be overriden by each engine. */ | 1805 | /* Default vfuncs which can be overriden by each engine. */ |
1826 | engine->init_hw = gen8_init_common_ring; | 1806 | engine->init_hw = gen8_init_common_ring; |
1827 | engine->reset_hw = reset_common_ring; | 1807 | engine->reset_hw = reset_common_ring; |
1808 | |||
1809 | engine->context_pin = execlists_context_pin; | ||
1810 | engine->context_unpin = execlists_context_unpin; | ||
1811 | |||
1812 | engine->request_alloc = execlists_request_alloc; | ||
1813 | |||
1828 | engine->emit_flush = gen8_emit_flush; | 1814 | engine->emit_flush = gen8_emit_flush; |
1829 | engine->emit_breadcrumb = gen8_emit_breadcrumb; | 1815 | engine->emit_breadcrumb = gen8_emit_breadcrumb; |
1830 | engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; | 1816 | engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; |
@@ -1907,18 +1893,6 @@ logical_ring_init(struct intel_engine_cs *engine) | |||
1907 | if (ret) | 1893 | if (ret) |
1908 | goto error; | 1894 | goto error; |
1909 | 1895 | ||
1910 | ret = execlists_context_deferred_alloc(dctx, engine); | ||
1911 | if (ret) | ||
1912 | goto error; | ||
1913 | |||
1914 | /* As this is the default context, always pin it */ | ||
1915 | ret = intel_lr_context_pin(dctx, engine); | ||
1916 | if (ret) { | ||
1917 | DRM_ERROR("Failed to pin context for %s: %d\n", | ||
1918 | engine->name, ret); | ||
1919 | goto error; | ||
1920 | } | ||
1921 | |||
1922 | /* And setup the hardware status page. */ | 1896 | /* And setup the hardware status page. */ |
1923 | ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); | 1897 | ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); |
1924 | if (ret) { | 1898 | if (ret) { |
@@ -2240,7 +2214,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, | |||
2240 | /* One extra page as the sharing data between driver and GuC */ | 2214 | /* One extra page as the sharing data between driver and GuC */ |
2241 | context_size += PAGE_SIZE * LRC_PPHWSP_PN; | 2215 | context_size += PAGE_SIZE * LRC_PPHWSP_PN; |
2242 | 2216 | ||
2243 | ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size); | 2217 | ctx_obj = i915_gem_object_create(ctx->i915, context_size); |
2244 | if (IS_ERR(ctx_obj)) { | 2218 | if (IS_ERR(ctx_obj)) { |
2245 | DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); | 2219 | DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); |
2246 | return PTR_ERR(ctx_obj); | 2220 | return PTR_ERR(ctx_obj); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index c1f546180ba2..01ba36ea125e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
@@ -63,14 +63,12 @@ enum { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | /* Logical Rings */ | 65 | /* Logical Rings */ |
66 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); | ||
67 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); | ||
68 | void intel_logical_ring_stop(struct intel_engine_cs *engine); | 66 | void intel_logical_ring_stop(struct intel_engine_cs *engine); |
69 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); | 67 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); |
70 | int logical_render_ring_init(struct intel_engine_cs *engine); | 68 | int logical_render_ring_init(struct intel_engine_cs *engine); |
71 | int logical_xcs_ring_init(struct intel_engine_cs *engine); | 69 | int logical_xcs_ring_init(struct intel_engine_cs *engine); |
72 | 70 | ||
73 | int intel_engines_init(struct drm_device *dev); | 71 | int intel_engines_init(struct drm_i915_private *dev_priv); |
74 | 72 | ||
75 | /* Logical Ring Contexts */ | 73 | /* Logical Ring Contexts */ |
76 | 74 | ||
@@ -79,13 +77,10 @@ int intel_engines_init(struct drm_device *dev); | |||
79 | #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) | 77 | #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) |
80 | #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) | 78 | #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) |
81 | 79 | ||
80 | struct drm_i915_private; | ||
82 | struct i915_gem_context; | 81 | struct i915_gem_context; |
83 | 82 | ||
84 | uint32_t intel_lr_context_size(struct intel_engine_cs *engine); | 83 | uint32_t intel_lr_context_size(struct intel_engine_cs *engine); |
85 | void intel_lr_context_unpin(struct i915_gem_context *ctx, | ||
86 | struct intel_engine_cs *engine); | ||
87 | |||
88 | struct drm_i915_private; | ||
89 | 84 | ||
90 | void intel_lr_context_resume(struct drm_i915_private *dev_priv); | 85 | void intel_lr_context_resume(struct drm_i915_private *dev_priv); |
91 | uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, | 86 | uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index daa523410953..f6d4e6940257 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
@@ -35,21 +35,59 @@ static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) | |||
35 | return &dig_port->dp; | 35 | return &dig_port->dp; |
36 | } | 36 | } |
37 | 37 | ||
38 | static const char *lspcon_mode_name(enum drm_lspcon_mode mode) | ||
39 | { | ||
40 | switch (mode) { | ||
41 | case DRM_LSPCON_MODE_PCON: | ||
42 | return "PCON"; | ||
43 | case DRM_LSPCON_MODE_LS: | ||
44 | return "LS"; | ||
45 | case DRM_LSPCON_MODE_INVALID: | ||
46 | return "INVALID"; | ||
47 | default: | ||
48 | MISSING_CASE(mode); | ||
49 | return "INVALID"; | ||
50 | } | ||
51 | } | ||
52 | |||
38 | static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) | 53 | static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) |
39 | { | 54 | { |
40 | enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID; | 55 | enum drm_lspcon_mode current_mode; |
41 | struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; | 56 | struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; |
42 | 57 | ||
43 | if (drm_lspcon_get_mode(adapter, ¤t_mode)) | 58 | if (drm_lspcon_get_mode(adapter, ¤t_mode)) { |
44 | DRM_ERROR("Error reading LSPCON mode\n"); | 59 | DRM_ERROR("Error reading LSPCON mode\n"); |
45 | else | 60 | return DRM_LSPCON_MODE_INVALID; |
46 | DRM_DEBUG_KMS("Current LSPCON mode %s\n", | 61 | } |
47 | current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS"); | 62 | return current_mode; |
63 | } | ||
64 | |||
65 | static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, | ||
66 | enum drm_lspcon_mode mode) | ||
67 | { | ||
68 | enum drm_lspcon_mode current_mode; | ||
69 | |||
70 | current_mode = lspcon_get_current_mode(lspcon); | ||
71 | if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID) | ||
72 | goto out; | ||
73 | |||
74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", | ||
75 | lspcon_mode_name(mode)); | ||
76 | |||
77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode || | ||
78 | current_mode == DRM_LSPCON_MODE_INVALID, 100); | ||
79 | if (current_mode != mode) | ||
80 | DRM_DEBUG_KMS("LSPCON mode hasn't settled\n"); | ||
81 | |||
82 | out: | ||
83 | DRM_DEBUG_KMS("Current LSPCON mode %s\n", | ||
84 | lspcon_mode_name(current_mode)); | ||
85 | |||
48 | return current_mode; | 86 | return current_mode; |
49 | } | 87 | } |
50 | 88 | ||
51 | static int lspcon_change_mode(struct intel_lspcon *lspcon, | 89 | static int lspcon_change_mode(struct intel_lspcon *lspcon, |
52 | enum drm_lspcon_mode mode, bool force) | 90 | enum drm_lspcon_mode mode) |
53 | { | 91 | { |
54 | int err; | 92 | int err; |
55 | enum drm_lspcon_mode current_mode; | 93 | enum drm_lspcon_mode current_mode; |
@@ -77,10 +115,30 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon, | |||
77 | return 0; | 115 | return 0; |
78 | } | 116 | } |
79 | 117 | ||
118 | static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) | ||
119 | { | ||
120 | uint8_t rev; | ||
121 | |||
122 | if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV, | ||
123 | &rev) != 1) { | ||
124 | DRM_DEBUG_KMS("Native AUX CH down\n"); | ||
125 | return false; | ||
126 | } | ||
127 | |||
128 | DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n", | ||
129 | rev >> 4, rev & 0xf); | ||
130 | |||
131 | return true; | ||
132 | } | ||
133 | |||
80 | static bool lspcon_probe(struct intel_lspcon *lspcon) | 134 | static bool lspcon_probe(struct intel_lspcon *lspcon) |
81 | { | 135 | { |
82 | enum drm_dp_dual_mode_type adaptor_type; | 136 | enum drm_dp_dual_mode_type adaptor_type; |
83 | struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; | 137 | struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; |
138 | enum drm_lspcon_mode expected_mode; | ||
139 | |||
140 | expected_mode = lspcon_wake_native_aux_ch(lspcon) ? | ||
141 | DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS; | ||
84 | 142 | ||
85 | /* Lets probe the adaptor and check its type */ | 143 | /* Lets probe the adaptor and check its type */ |
86 | adaptor_type = drm_dp_dual_mode_detect(adapter); | 144 | adaptor_type = drm_dp_dual_mode_detect(adapter); |
@@ -92,7 +150,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) | |||
92 | 150 | ||
93 | /* Yay ... got a LSPCON device */ | 151 | /* Yay ... got a LSPCON device */ |
94 | DRM_DEBUG_KMS("LSPCON detected\n"); | 152 | DRM_DEBUG_KMS("LSPCON detected\n"); |
95 | lspcon->mode = lspcon_get_current_mode(lspcon); | 153 | lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); |
96 | lspcon->active = true; | 154 | lspcon->active = true; |
97 | return true; | 155 | return true; |
98 | } | 156 | } |
@@ -132,14 +190,29 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) | |||
132 | 190 | ||
133 | void lspcon_resume(struct intel_lspcon *lspcon) | 191 | void lspcon_resume(struct intel_lspcon *lspcon) |
134 | { | 192 | { |
135 | lspcon_resume_in_pcon_wa(lspcon); | 193 | enum drm_lspcon_mode expected_mode; |
194 | |||
195 | if (lspcon_wake_native_aux_ch(lspcon)) { | ||
196 | expected_mode = DRM_LSPCON_MODE_PCON; | ||
197 | lspcon_resume_in_pcon_wa(lspcon); | ||
198 | } else { | ||
199 | expected_mode = DRM_LSPCON_MODE_LS; | ||
200 | } | ||
201 | |||
202 | if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON) | ||
203 | return; | ||
136 | 204 | ||
137 | if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true)) | 205 | if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) |
138 | DRM_ERROR("LSPCON resume failed\n"); | 206 | DRM_ERROR("LSPCON resume failed\n"); |
139 | else | 207 | else |
140 | DRM_DEBUG_KMS("LSPCON resume success\n"); | 208 | DRM_DEBUG_KMS("LSPCON resume success\n"); |
141 | } | 209 | } |
142 | 210 | ||
211 | void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) | ||
212 | { | ||
213 | lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON); | ||
214 | } | ||
215 | |||
143 | bool lspcon_init(struct intel_digital_port *intel_dig_port) | 216 | bool lspcon_init(struct intel_digital_port *intel_dig_port) |
144 | { | 217 | { |
145 | struct intel_dp *dp = &intel_dig_port->dp; | 218 | struct intel_dp *dp = &intel_dig_port->dp; |
@@ -166,8 +239,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) | |||
166 | * 2.0 sinks. | 239 | * 2.0 sinks. |
167 | */ | 240 | */ |
168 | if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) { | 241 | if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) { |
169 | if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, | 242 | if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { |
170 | true) < 0) { | ||
171 | DRM_ERROR("LSPCON mode change to PCON failed\n"); | 243 | DRM_ERROR("LSPCON mode change to PCON failed\n"); |
172 | return false; | 244 | return false; |
173 | } | 245 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index d12ef0047d49..9ca4dc4d2378 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -460,13 +460,13 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
460 | static enum drm_connector_status | 460 | static enum drm_connector_status |
461 | intel_lvds_detect(struct drm_connector *connector, bool force) | 461 | intel_lvds_detect(struct drm_connector *connector, bool force) |
462 | { | 462 | { |
463 | struct drm_device *dev = connector->dev; | 463 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
464 | enum drm_connector_status status; | 464 | enum drm_connector_status status; |
465 | 465 | ||
466 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 466 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
467 | connector->base.id, connector->name); | 467 | connector->base.id, connector->name); |
468 | 468 | ||
469 | status = intel_panel_detect(dev); | 469 | status = intel_panel_detect(dev_priv); |
470 | if (status != connector_status_unknown) | 470 | if (status != connector_status_unknown) |
471 | return status; | 471 | return status; |
472 | 472 | ||
@@ -971,9 +971,9 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv) | |||
971 | * Create the connector, register the LVDS DDC bus, and try to figure out what | 971 | * Create the connector, register the LVDS DDC bus, and try to figure out what |
972 | * modes we can display on the LVDS panel (if present). | 972 | * modes we can display on the LVDS panel (if present). |
973 | */ | 973 | */ |
974 | void intel_lvds_init(struct drm_device *dev) | 974 | void intel_lvds_init(struct drm_i915_private *dev_priv) |
975 | { | 975 | { |
976 | struct drm_i915_private *dev_priv = to_i915(dev); | 976 | struct drm_device *dev = &dev_priv->drm; |
977 | struct intel_lvds_encoder *lvds_encoder; | 977 | struct intel_lvds_encoder *lvds_encoder; |
978 | struct intel_encoder *intel_encoder; | 978 | struct intel_encoder *intel_encoder; |
979 | struct intel_lvds_connector *lvds_connector; | 979 | struct intel_lvds_connector *lvds_connector; |
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 80bb9247ce66..c787fc4e6eb9 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
@@ -182,7 +182,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, | |||
182 | table->size = ARRAY_SIZE(skylake_mocs_table); | 182 | table->size = ARRAY_SIZE(skylake_mocs_table); |
183 | table->table = skylake_mocs_table; | 183 | table->table = skylake_mocs_table; |
184 | result = true; | 184 | result = true; |
185 | } else if (IS_BROXTON(dev_priv)) { | 185 | } else if (IS_GEN9_LP(dev_priv)) { |
186 | table->size = ARRAY_SIZE(broxton_mocs_table); | 186 | table->size = ARRAY_SIZE(broxton_mocs_table); |
187 | table->table = broxton_mocs_table; | 187 | table->table = broxton_mocs_table; |
188 | result = true; | 188 | result = true; |
@@ -380,7 +380,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
380 | 380 | ||
381 | /** | 381 | /** |
382 | * intel_mocs_init_l3cc_table() - program the mocs control table | 382 | * intel_mocs_init_l3cc_table() - program the mocs control table |
383 | * @dev: The the device to be programmed. | 383 | * @dev_priv: i915 device private |
384 | * | 384 | * |
385 | * This function simply programs the mocs registers for the given table | 385 | * This function simply programs the mocs registers for the given table |
386 | * starting at the given address. This register set is programmed in pairs. | 386 | * starting at the given address. This register set is programmed in pairs. |
@@ -392,9 +392,8 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
392 | * | 392 | * |
393 | * Return: Nothing. | 393 | * Return: Nothing. |
394 | */ | 394 | */ |
395 | void intel_mocs_init_l3cc_table(struct drm_device *dev) | 395 | void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) |
396 | { | 396 | { |
397 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
398 | struct drm_i915_mocs_table table; | 397 | struct drm_i915_mocs_table table; |
399 | unsigned int i; | 398 | unsigned int i; |
400 | 399 | ||
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h index a8bd9f7bfece..ce4a5dfa5f94 100644 --- a/drivers/gpu/drm/i915/intel_mocs.h +++ b/drivers/gpu/drm/i915/intel_mocs.h | |||
@@ -53,7 +53,7 @@ | |||
53 | #include "i915_drv.h" | 53 | #include "i915_drv.h" |
54 | 54 | ||
55 | int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); | 55 | int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); |
56 | void intel_mocs_init_l3cc_table(struct drm_device *dev); | 56 | void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); |
57 | int intel_mocs_init_engine(struct intel_engine_cs *engine); | 57 | int intel_mocs_init_engine(struct intel_engine_cs *engine); |
58 | 58 | ||
59 | #endif | 59 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 10610b4077d1..4473a611c664 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -187,6 +187,29 @@ struct intel_overlay { | |||
187 | struct i915_gem_active last_flip; | 187 | struct i915_gem_active last_flip; |
188 | }; | 188 | }; |
189 | 189 | ||
190 | static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, | ||
191 | bool enable) | ||
192 | { | ||
193 | struct pci_dev *pdev = dev_priv->drm.pdev; | ||
194 | u8 val; | ||
195 | |||
196 | /* WA_OVERLAY_CLKGATE:alm */ | ||
197 | if (enable) | ||
198 | I915_WRITE(DSPCLK_GATE_D, 0); | ||
199 | else | ||
200 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | ||
201 | |||
202 | /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */ | ||
203 | pci_bus_read_config_byte(pdev->bus, | ||
204 | PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val); | ||
205 | if (enable) | ||
206 | val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE; | ||
207 | else | ||
208 | val |= I830_L2_CACHE_CLOCK_GATE_DISABLE; | ||
209 | pci_bus_write_config_byte(pdev->bus, | ||
210 | PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); | ||
211 | } | ||
212 | |||
190 | static struct overlay_registers __iomem * | 213 | static struct overlay_registers __iomem * |
191 | intel_overlay_map_regs(struct intel_overlay *overlay) | 214 | intel_overlay_map_regs(struct intel_overlay *overlay) |
192 | { | 215 | { |
@@ -262,6 +285,9 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
262 | 285 | ||
263 | overlay->active = true; | 286 | overlay->active = true; |
264 | 287 | ||
288 | if (IS_I830(dev_priv)) | ||
289 | i830_overlay_clock_gating(dev_priv, false); | ||
290 | |||
265 | ring = req->ring; | 291 | ring = req->ring; |
266 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); | 292 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); |
267 | intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); | 293 | intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); |
@@ -272,8 +298,30 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
272 | return intel_overlay_do_wait_request(overlay, req, NULL); | 298 | return intel_overlay_do_wait_request(overlay, req, NULL); |
273 | } | 299 | } |
274 | 300 | ||
301 | static void intel_overlay_flip_prepare(struct intel_overlay *overlay, | ||
302 | struct i915_vma *vma) | ||
303 | { | ||
304 | enum pipe pipe = overlay->crtc->pipe; | ||
305 | |||
306 | WARN_ON(overlay->old_vma); | ||
307 | |||
308 | i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL, | ||
309 | vma ? vma->obj : NULL, | ||
310 | INTEL_FRONTBUFFER_OVERLAY(pipe)); | ||
311 | |||
312 | intel_frontbuffer_flip_prepare(overlay->i915, | ||
313 | INTEL_FRONTBUFFER_OVERLAY(pipe)); | ||
314 | |||
315 | overlay->old_vma = overlay->vma; | ||
316 | if (vma) | ||
317 | overlay->vma = i915_vma_get(vma); | ||
318 | else | ||
319 | overlay->vma = NULL; | ||
320 | } | ||
321 | |||
275 | /* overlay needs to be enabled in OCMD reg */ | 322 | /* overlay needs to be enabled in OCMD reg */ |
276 | static int intel_overlay_continue(struct intel_overlay *overlay, | 323 | static int intel_overlay_continue(struct intel_overlay *overlay, |
324 | struct i915_vma *vma, | ||
277 | bool load_polyphase_filter) | 325 | bool load_polyphase_filter) |
278 | { | 326 | { |
279 | struct drm_i915_private *dev_priv = overlay->i915; | 327 | struct drm_i915_private *dev_priv = overlay->i915; |
@@ -308,53 +356,57 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
308 | intel_ring_emit(ring, flip_addr); | 356 | intel_ring_emit(ring, flip_addr); |
309 | intel_ring_advance(ring); | 357 | intel_ring_advance(ring); |
310 | 358 | ||
359 | intel_overlay_flip_prepare(overlay, vma); | ||
360 | |||
311 | intel_overlay_submit_request(overlay, req, NULL); | 361 | intel_overlay_submit_request(overlay, req, NULL); |
312 | 362 | ||
313 | return 0; | 363 | return 0; |
314 | } | 364 | } |
315 | 365 | ||
316 | static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, | 366 | static void intel_overlay_release_old_vma(struct intel_overlay *overlay) |
317 | struct drm_i915_gem_request *req) | ||
318 | { | 367 | { |
319 | struct intel_overlay *overlay = | ||
320 | container_of(active, typeof(*overlay), last_flip); | ||
321 | struct i915_vma *vma; | 368 | struct i915_vma *vma; |
322 | 369 | ||
323 | vma = fetch_and_zero(&overlay->old_vma); | 370 | vma = fetch_and_zero(&overlay->old_vma); |
324 | if (WARN_ON(!vma)) | 371 | if (WARN_ON(!vma)) |
325 | return; | 372 | return; |
326 | 373 | ||
327 | i915_gem_track_fb(vma->obj, NULL, | 374 | intel_frontbuffer_flip_complete(overlay->i915, |
328 | INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); | 375 | INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); |
329 | 376 | ||
330 | i915_gem_object_unpin_from_display_plane(vma); | 377 | i915_gem_object_unpin_from_display_plane(vma); |
331 | i915_vma_put(vma); | 378 | i915_vma_put(vma); |
332 | } | 379 | } |
333 | 380 | ||
381 | static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, | ||
382 | struct drm_i915_gem_request *req) | ||
383 | { | ||
384 | struct intel_overlay *overlay = | ||
385 | container_of(active, typeof(*overlay), last_flip); | ||
386 | |||
387 | intel_overlay_release_old_vma(overlay); | ||
388 | } | ||
389 | |||
334 | static void intel_overlay_off_tail(struct i915_gem_active *active, | 390 | static void intel_overlay_off_tail(struct i915_gem_active *active, |
335 | struct drm_i915_gem_request *req) | 391 | struct drm_i915_gem_request *req) |
336 | { | 392 | { |
337 | struct intel_overlay *overlay = | 393 | struct intel_overlay *overlay = |
338 | container_of(active, typeof(*overlay), last_flip); | 394 | container_of(active, typeof(*overlay), last_flip); |
339 | struct i915_vma *vma; | 395 | struct drm_i915_private *dev_priv = overlay->i915; |
340 | |||
341 | /* never have the overlay hw on without showing a frame */ | ||
342 | vma = fetch_and_zero(&overlay->vma); | ||
343 | if (WARN_ON(!vma)) | ||
344 | return; | ||
345 | 396 | ||
346 | i915_gem_object_unpin_from_display_plane(vma); | 397 | intel_overlay_release_old_vma(overlay); |
347 | i915_vma_put(vma); | ||
348 | 398 | ||
349 | overlay->crtc->overlay = NULL; | 399 | overlay->crtc->overlay = NULL; |
350 | overlay->crtc = NULL; | 400 | overlay->crtc = NULL; |
351 | overlay->active = false; | 401 | overlay->active = false; |
402 | |||
403 | if (IS_I830(dev_priv)) | ||
404 | i830_overlay_clock_gating(dev_priv, true); | ||
352 | } | 405 | } |
353 | 406 | ||
354 | /* overlay needs to be disabled in OCMD reg */ | 407 | /* overlay needs to be disabled in OCMD reg */ |
355 | static int intel_overlay_off(struct intel_overlay *overlay) | 408 | static int intel_overlay_off(struct intel_overlay *overlay) |
356 | { | 409 | { |
357 | struct drm_i915_private *dev_priv = overlay->i915; | ||
358 | struct drm_i915_gem_request *req; | 410 | struct drm_i915_gem_request *req; |
359 | struct intel_ring *ring; | 411 | struct intel_ring *ring; |
360 | u32 flip_addr = overlay->flip_addr; | 412 | u32 flip_addr = overlay->flip_addr; |
@@ -379,25 +431,21 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
379 | } | 431 | } |
380 | 432 | ||
381 | ring = req->ring; | 433 | ring = req->ring; |
434 | |||
382 | /* wait for overlay to go idle */ | 435 | /* wait for overlay to go idle */ |
383 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 436 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
384 | intel_ring_emit(ring, flip_addr); | 437 | intel_ring_emit(ring, flip_addr); |
385 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 438 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
439 | |||
386 | /* turn overlay off */ | 440 | /* turn overlay off */ |
387 | if (IS_I830(dev_priv)) { | 441 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); |
388 | /* Workaround: Don't disable the overlay fully, since otherwise | 442 | intel_ring_emit(ring, flip_addr); |
389 | * it dies on the next OVERLAY_ON cmd. */ | 443 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
390 | intel_ring_emit(ring, MI_NOOP); | 444 | |
391 | intel_ring_emit(ring, MI_NOOP); | ||
392 | intel_ring_emit(ring, MI_NOOP); | ||
393 | } else { | ||
394 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | ||
395 | intel_ring_emit(ring, flip_addr); | ||
396 | intel_ring_emit(ring, | ||
397 | MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
398 | } | ||
399 | intel_ring_advance(ring); | 445 | intel_ring_advance(ring); |
400 | 446 | ||
447 | intel_overlay_flip_prepare(overlay, NULL); | ||
448 | |||
401 | return intel_overlay_do_wait_request(overlay, req, | 449 | return intel_overlay_do_wait_request(overlay, req, |
402 | intel_overlay_off_tail); | 450 | intel_overlay_off_tail); |
403 | } | 451 | } |
@@ -542,51 +590,57 @@ static int uv_vsubsampling(u32 format) | |||
542 | 590 | ||
543 | static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width) | 591 | static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width) |
544 | { | 592 | { |
545 | u32 mask, shift, ret; | 593 | u32 sw; |
546 | if (IS_GEN2(dev_priv)) { | 594 | |
547 | mask = 0x1f; | 595 | if (IS_GEN2(dev_priv)) |
548 | shift = 5; | 596 | sw = ALIGN((offset & 31) + width, 32); |
549 | } else { | 597 | else |
550 | mask = 0x3f; | 598 | sw = ALIGN((offset & 63) + width, 64); |
551 | shift = 6; | 599 | |
552 | } | 600 | if (sw == 0) |
553 | ret = ((offset + width + mask) >> shift) - (offset >> shift); | 601 | return 0; |
554 | if (!IS_GEN2(dev_priv)) | 602 | |
555 | ret <<= 1; | 603 | return (sw - 32) >> 3; |
556 | ret -= 1; | ||
557 | return ret << 2; | ||
558 | } | 604 | } |
559 | 605 | ||
560 | static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { | 606 | static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = { |
561 | 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, | 607 | [ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, }, |
562 | 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, | 608 | [ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, }, |
563 | 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, | 609 | [ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, }, |
564 | 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, | 610 | [ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, }, |
565 | 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, | 611 | [ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, }, |
566 | 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, | 612 | [ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, }, |
567 | 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, | 613 | [ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, }, |
568 | 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, | 614 | [ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, }, |
569 | 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, | 615 | [ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, }, |
570 | 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, | 616 | [ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, }, |
571 | 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, | 617 | [10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, }, |
572 | 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, | 618 | [11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, }, |
573 | 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, | 619 | [12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, }, |
574 | 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, | 620 | [13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, }, |
575 | 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, | 621 | [14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, }, |
576 | 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, | 622 | [15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, }, |
577 | 0xb000, 0x3000, 0x0800, 0x3000, 0xb000 | 623 | [16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, }, |
578 | }; | 624 | }; |
579 | 625 | ||
580 | static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { | 626 | static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = { |
581 | 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, | 627 | [ 0] = { 0x3000, 0x1800, 0x1800, }, |
582 | 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, | 628 | [ 1] = { 0xb000, 0x18d0, 0x2e60, }, |
583 | 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880, | 629 | [ 2] = { 0xb000, 0x1990, 0x2ce0, }, |
584 | 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00, | 630 | [ 3] = { 0xb020, 0x1a68, 0x2b40, }, |
585 | 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0, | 631 | [ 4] = { 0xb040, 0x1b20, 0x29e0, }, |
586 | 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, | 632 | [ 5] = { 0xb060, 0x1bd8, 0x2880, }, |
587 | 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, | 633 | [ 6] = { 0xb080, 0x1c88, 0x3e60, }, |
588 | 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, | 634 | [ 7] = { 0xb0a0, 0x1d28, 0x3c00, }, |
589 | 0x3000, 0x0800, 0x3000 | 635 | [ 8] = { 0xb0c0, 0x1db8, 0x39e0, }, |
636 | [ 9] = { 0xb0e0, 0x1e40, 0x37e0, }, | ||
637 | [10] = { 0xb100, 0x1eb8, 0x3620, }, | ||
638 | [11] = { 0xb100, 0x1f18, 0x34a0, }, | ||
639 | [12] = { 0xb100, 0x1f68, 0x3360, }, | ||
640 | [13] = { 0xb0e0, 0x1fa8, 0x3240, }, | ||
641 | [14] = { 0xb0c0, 0x1fe0, 0x3140, }, | ||
642 | [15] = { 0xb060, 0x1ff0, 0x30a0, }, | ||
643 | [16] = { 0x3000, 0x0800, 0x3000, }, | ||
590 | }; | 644 | }; |
591 | 645 | ||
592 | static void update_polyphase_filter(struct overlay_registers __iomem *regs) | 646 | static void update_polyphase_filter(struct overlay_registers __iomem *regs) |
@@ -659,16 +713,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay, | |||
659 | static void update_colorkey(struct intel_overlay *overlay, | 713 | static void update_colorkey(struct intel_overlay *overlay, |
660 | struct overlay_registers __iomem *regs) | 714 | struct overlay_registers __iomem *regs) |
661 | { | 715 | { |
662 | const struct drm_framebuffer *fb = | 716 | const struct intel_plane_state *state = |
663 | overlay->crtc->base.primary->fb; | 717 | to_intel_plane_state(overlay->crtc->base.primary->state); |
664 | u32 key = overlay->color_key; | 718 | u32 key = overlay->color_key; |
665 | u32 flags; | 719 | u32 format = 0; |
720 | u32 flags = 0; | ||
666 | 721 | ||
667 | flags = 0; | ||
668 | if (overlay->color_key_enabled) | 722 | if (overlay->color_key_enabled) |
669 | flags |= DST_KEY_ENABLE; | 723 | flags |= DST_KEY_ENABLE; |
670 | 724 | ||
671 | switch (fb->format->format) { | 725 | if (state->base.visible) |
726 | format = state->base.fb->format->format; | ||
727 | |||
728 | switch (format) { | ||
672 | case DRM_FORMAT_C8: | 729 | case DRM_FORMAT_C8: |
673 | key = 0; | 730 | key = 0; |
674 | flags |= CLK_RGB8I_MASK; | 731 | flags |= CLK_RGB8I_MASK; |
@@ -834,18 +891,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
834 | 891 | ||
835 | intel_overlay_unmap_regs(overlay, regs); | 892 | intel_overlay_unmap_regs(overlay, regs); |
836 | 893 | ||
837 | ret = intel_overlay_continue(overlay, scale_changed); | 894 | ret = intel_overlay_continue(overlay, vma, scale_changed); |
838 | if (ret) | 895 | if (ret) |
839 | goto out_unpin; | 896 | goto out_unpin; |
840 | 897 | ||
841 | i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL, | ||
842 | vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe)); | ||
843 | |||
844 | overlay->old_vma = overlay->vma; | ||
845 | overlay->vma = vma; | ||
846 | |||
847 | intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe)); | ||
848 | |||
849 | return 0; | 898 | return 0; |
850 | 899 | ||
851 | out_unpin: | 900 | out_unpin: |
@@ -919,12 +968,13 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay) | |||
919 | static int check_overlay_dst(struct intel_overlay *overlay, | 968 | static int check_overlay_dst(struct intel_overlay *overlay, |
920 | struct drm_intel_overlay_put_image *rec) | 969 | struct drm_intel_overlay_put_image *rec) |
921 | { | 970 | { |
922 | struct drm_display_mode *mode = &overlay->crtc->base.mode; | 971 | const struct intel_crtc_state *pipe_config = |
972 | overlay->crtc->config; | ||
923 | 973 | ||
924 | if (rec->dst_x < mode->hdisplay && | 974 | if (rec->dst_x < pipe_config->pipe_src_w && |
925 | rec->dst_x + rec->dst_width <= mode->hdisplay && | 975 | rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && |
926 | rec->dst_y < mode->vdisplay && | 976 | rec->dst_y < pipe_config->pipe_src_h && |
927 | rec->dst_y + rec->dst_height <= mode->vdisplay) | 977 | rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h) |
928 | return 0; | 978 | return 0; |
929 | else | 979 | else |
930 | return -EINVAL; | 980 | return -EINVAL; |
@@ -956,7 +1006,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv, | |||
956 | u32 tmp; | 1006 | u32 tmp; |
957 | 1007 | ||
958 | /* check src dimensions */ | 1008 | /* check src dimensions */ |
959 | if (IS_845G(dev_priv) || IS_I830(dev_priv)) { | 1009 | if (IS_I845G(dev_priv) || IS_I830(dev_priv)) { |
960 | if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || | 1010 | if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || |
961 | rec->src_width > IMAGE_MAX_WIDTH_LEGACY) | 1011 | rec->src_width > IMAGE_MAX_WIDTH_LEGACY) |
962 | return -EINVAL; | 1012 | return -EINVAL; |
@@ -1008,7 +1058,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv, | |||
1008 | return -EINVAL; | 1058 | return -EINVAL; |
1009 | 1059 | ||
1010 | /* stride checking */ | 1060 | /* stride checking */ |
1011 | if (IS_I830(dev_priv) || IS_845G(dev_priv)) | 1061 | if (IS_I830(dev_priv) || IS_I845G(dev_priv)) |
1012 | stride_mask = 255; | 1062 | stride_mask = 255; |
1013 | else | 1063 | else |
1014 | stride_mask = 63; | 1064 | stride_mask = 63; |
@@ -1056,33 +1106,6 @@ static int check_overlay_src(struct drm_i915_private *dev_priv, | |||
1056 | return 0; | 1106 | return 0; |
1057 | } | 1107 | } |
1058 | 1108 | ||
1059 | /** | ||
1060 | * Return the pipe currently connected to the panel fitter, | ||
1061 | * or -1 if the panel fitter is not present or not in use | ||
1062 | */ | ||
1063 | static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv) | ||
1064 | { | ||
1065 | u32 pfit_control; | ||
1066 | |||
1067 | /* i830 doesn't have a panel fitter */ | ||
1068 | if (INTEL_GEN(dev_priv) <= 3 && | ||
1069 | (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) | ||
1070 | return -1; | ||
1071 | |||
1072 | pfit_control = I915_READ(PFIT_CONTROL); | ||
1073 | |||
1074 | /* See if the panel fitter is in use */ | ||
1075 | if ((pfit_control & PFIT_ENABLE) == 0) | ||
1076 | return -1; | ||
1077 | |||
1078 | /* 965 can place panel fitter on either pipe */ | ||
1079 | if (IS_GEN4(dev_priv)) | ||
1080 | return (pfit_control >> 29) & 0x3; | ||
1081 | |||
1082 | /* older chips can only use pipe 1 */ | ||
1083 | return 1; | ||
1084 | } | ||
1085 | |||
1086 | int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, | 1109 | int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, |
1087 | struct drm_file *file_priv) | 1110 | struct drm_file *file_priv) |
1088 | { | 1111 | { |
@@ -1144,7 +1167,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, | |||
1144 | goto out_unlock; | 1167 | goto out_unlock; |
1145 | 1168 | ||
1146 | if (overlay->crtc != crtc) { | 1169 | if (overlay->crtc != crtc) { |
1147 | struct drm_display_mode *mode = &crtc->base.mode; | ||
1148 | ret = intel_overlay_switch_off(overlay); | 1170 | ret = intel_overlay_switch_off(overlay); |
1149 | if (ret != 0) | 1171 | if (ret != 0) |
1150 | goto out_unlock; | 1172 | goto out_unlock; |
@@ -1157,8 +1179,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, | |||
1157 | crtc->overlay = overlay; | 1179 | crtc->overlay = overlay; |
1158 | 1180 | ||
1159 | /* line too wide, i.e. one-line-mode */ | 1181 | /* line too wide, i.e. one-line-mode */ |
1160 | if (mode->hdisplay > 1024 && | 1182 | if (crtc->config->pipe_src_w > 1024 && |
1161 | intel_panel_fitter_pipe(dev_priv) == crtc->pipe) { | 1183 | crtc->config->gmch_pfit.control & PFIT_ENABLE) { |
1162 | overlay->pfit_active = true; | 1184 | overlay->pfit_active = true; |
1163 | update_pfit_vscale_ratio(overlay); | 1185 | update_pfit_vscale_ratio(overlay); |
1164 | } else | 1186 | } else |
@@ -1213,6 +1235,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, | |||
1213 | 1235 | ||
1214 | mutex_unlock(&dev->struct_mutex); | 1236 | mutex_unlock(&dev->struct_mutex); |
1215 | drm_modeset_unlock_all(dev); | 1237 | drm_modeset_unlock_all(dev); |
1238 | i915_gem_object_put(new_bo); | ||
1216 | 1239 | ||
1217 | kfree(params); | 1240 | kfree(params); |
1218 | 1241 | ||
@@ -1390,10 +1413,9 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) | |||
1390 | 1413 | ||
1391 | reg_bo = NULL; | 1414 | reg_bo = NULL; |
1392 | if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) | 1415 | if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) |
1393 | reg_bo = i915_gem_object_create_stolen(&dev_priv->drm, | 1416 | reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE); |
1394 | PAGE_SIZE); | ||
1395 | if (reg_bo == NULL) | 1417 | if (reg_bo == NULL) |
1396 | reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE); | 1418 | reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE); |
1397 | if (IS_ERR(reg_bo)) | 1419 | if (IS_ERR(reg_bo)) |
1398 | goto out_free; | 1420 | goto out_free; |
1399 | overlay->reg_bo = reg_bo; | 1421 | overlay->reg_bo = reg_bo; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 08ab6d762ca4..1a6ff26dea20 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -48,7 +48,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, | |||
48 | 48 | ||
49 | /** | 49 | /** |
50 | * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID | 50 | * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID |
51 | * @dev: drm device | 51 | * @dev_priv: i915 device instance |
52 | * @fixed_mode : panel native mode | 52 | * @fixed_mode : panel native mode |
53 | * @connector: LVDS/eDP connector | 53 | * @connector: LVDS/eDP connector |
54 | * | 54 | * |
@@ -56,7 +56,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, | |||
56 | * Find the reduced downclock for LVDS/eDP in EDID. | 56 | * Find the reduced downclock for LVDS/eDP in EDID. |
57 | */ | 57 | */ |
58 | struct drm_display_mode * | 58 | struct drm_display_mode * |
59 | intel_find_panel_downclock(struct drm_device *dev, | 59 | intel_find_panel_downclock(struct drm_i915_private *dev_priv, |
60 | struct drm_display_mode *fixed_mode, | 60 | struct drm_display_mode *fixed_mode, |
61 | struct drm_connector *connector) | 61 | struct drm_connector *connector) |
62 | { | 62 | { |
@@ -94,7 +94,7 @@ intel_find_panel_downclock(struct drm_device *dev, | |||
94 | } | 94 | } |
95 | 95 | ||
96 | if (temp_downclock < fixed_mode->clock) | 96 | if (temp_downclock < fixed_mode->clock) |
97 | return drm_mode_duplicate(dev, tmp_mode); | 97 | return drm_mode_duplicate(&dev_priv->drm, tmp_mode); |
98 | else | 98 | else |
99 | return NULL; | 99 | return NULL; |
100 | } | 100 | } |
@@ -375,10 +375,8 @@ out: | |||
375 | } | 375 | } |
376 | 376 | ||
377 | enum drm_connector_status | 377 | enum drm_connector_status |
378 | intel_panel_detect(struct drm_device *dev) | 378 | intel_panel_detect(struct drm_i915_private *dev_priv) |
379 | { | 379 | { |
380 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
381 | |||
382 | /* Assume that the BIOS does not lie through the OpRegion... */ | 380 | /* Assume that the BIOS does not lie through the OpRegion... */ |
383 | if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { | 381 | if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { |
384 | return *dev_priv->opregion.lid_state & 0x1 ? | 382 | return *dev_priv->opregion.lid_state & 0x1 ? |
@@ -1039,10 +1037,7 @@ static void bxt_enable_backlight(struct intel_connector *connector) | |||
1039 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 1037 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
1040 | u32 pwm_ctl, val; | 1038 | u32 pwm_ctl, val; |
1041 | 1039 | ||
1042 | /* To use 2nd set of backlight registers, utility pin has to be | 1040 | /* Controller 1 uses the utility pin. */ |
1043 | * enabled with PWM mode. | ||
1044 | * The field should only be changed when the utility pin is disabled | ||
1045 | */ | ||
1046 | if (panel->backlight.controller == 1) { | 1041 | if (panel->backlight.controller == 1) { |
1047 | val = I915_READ(UTIL_PIN_CTL); | 1042 | val = I915_READ(UTIL_PIN_CTL); |
1048 | if (val & UTIL_PIN_ENABLE) { | 1043 | if (val & UTIL_PIN_ENABLE) { |
@@ -1332,8 +1327,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | |||
1332 | */ | 1327 | */ |
1333 | static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | 1328 | static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) |
1334 | { | 1329 | { |
1335 | struct drm_device *dev = connector->base.dev; | 1330 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1336 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1337 | int clock; | 1331 | int clock; |
1338 | 1332 | ||
1339 | if (IS_G4X(dev_priv)) | 1333 | if (IS_G4X(dev_priv)) |
@@ -1608,19 +1602,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) | |||
1608 | struct intel_panel *panel = &connector->panel; | 1602 | struct intel_panel *panel = &connector->panel; |
1609 | u32 pwm_ctl, val; | 1603 | u32 pwm_ctl, val; |
1610 | 1604 | ||
1611 | /* | 1605 | panel->backlight.controller = dev_priv->vbt.backlight.controller; |
1612 | * For BXT hard coding the Backlight controller to 0. | ||
1613 | * TODO : Read the controller value from VBT and generalize | ||
1614 | */ | ||
1615 | panel->backlight.controller = 0; | ||
1616 | 1606 | ||
1617 | pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); | 1607 | pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); |
1618 | 1608 | ||
1619 | /* Keeping the check if controller 1 is to be programmed. | 1609 | /* Controller 1 uses the utility pin. */ |
1620 | * This will come into affect once the VBT parsing | ||
1621 | * is fixed for controller selection, and controller 1 is used | ||
1622 | * for a prticular display configuration. | ||
1623 | */ | ||
1624 | if (panel->backlight.controller == 1) { | 1610 | if (panel->backlight.controller == 1) { |
1625 | val = I915_READ(UTIL_PIN_CTL); | 1611 | val = I915_READ(UTIL_PIN_CTL); |
1626 | panel->backlight.util_pin_active_low = | 1612 | panel->backlight.util_pin_active_low = |
@@ -1756,7 +1742,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) | |||
1756 | intel_dsi_dcs_init_backlight_funcs(connector) == 0) | 1742 | intel_dsi_dcs_init_backlight_funcs(connector) == 0) |
1757 | return; | 1743 | return; |
1758 | 1744 | ||
1759 | if (IS_BROXTON(dev_priv)) { | 1745 | if (IS_GEN9_LP(dev_priv)) { |
1760 | panel->backlight.setup = bxt_setup_backlight; | 1746 | panel->backlight.setup = bxt_setup_backlight; |
1761 | panel->backlight.enable = bxt_enable_backlight; | 1747 | panel->backlight.enable = bxt_enable_backlight; |
1762 | panel->backlight.disable = bxt_disable_backlight; | 1748 | panel->backlight.disable = bxt_disable_backlight; |
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c new file mode 100644 index 000000000000..ef0c0e195164 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c | |||
@@ -0,0 +1,939 @@ | |||
1 | /* | ||
2 | * Copyright © 2013 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Author: Damien Lespiau <damien.lespiau@intel.com> | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/seq_file.h> | ||
28 | #include <linux/circ_buf.h> | ||
29 | #include <linux/ctype.h> | ||
30 | #include <linux/debugfs.h> | ||
31 | #include "intel_drv.h" | ||
32 | |||
33 | struct pipe_crc_info { | ||
34 | const char *name; | ||
35 | struct drm_i915_private *dev_priv; | ||
36 | enum pipe pipe; | ||
37 | }; | ||
38 | |||
39 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | ||
40 | * allocated we need to hook into the minor for release. | ||
41 | */ | ||
42 | static int drm_add_fake_info_node(struct drm_minor *minor, | ||
43 | struct dentry *ent, const void *key) | ||
44 | { | ||
45 | struct drm_info_node *node; | ||
46 | |||
47 | node = kmalloc(sizeof(*node), GFP_KERNEL); | ||
48 | if (node == NULL) { | ||
49 | debugfs_remove(ent); | ||
50 | return -ENOMEM; | ||
51 | } | ||
52 | |||
53 | node->minor = minor; | ||
54 | node->dent = ent; | ||
55 | node->info_ent = (void *) key; | ||
56 | |||
57 | mutex_lock(&minor->debugfs_lock); | ||
58 | list_add(&node->list, &minor->debugfs_list); | ||
59 | mutex_unlock(&minor->debugfs_lock); | ||
60 | |||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int i915_pipe_crc_open(struct inode *inode, struct file *filep) | ||
65 | { | ||
66 | struct pipe_crc_info *info = inode->i_private; | ||
67 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
68 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
69 | |||
70 | if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes) | ||
71 | return -ENODEV; | ||
72 | |||
73 | spin_lock_irq(&pipe_crc->lock); | ||
74 | |||
75 | if (pipe_crc->opened) { | ||
76 | spin_unlock_irq(&pipe_crc->lock); | ||
77 | return -EBUSY; /* already open */ | ||
78 | } | ||
79 | |||
80 | pipe_crc->opened = true; | ||
81 | filep->private_data = inode->i_private; | ||
82 | |||
83 | spin_unlock_irq(&pipe_crc->lock); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static int i915_pipe_crc_release(struct inode *inode, struct file *filep) | ||
89 | { | ||
90 | struct pipe_crc_info *info = inode->i_private; | ||
91 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
92 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
93 | |||
94 | spin_lock_irq(&pipe_crc->lock); | ||
95 | pipe_crc->opened = false; | ||
96 | spin_unlock_irq(&pipe_crc->lock); | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /* (6 fields, 8 chars each, space separated (5) + '\n') */ | ||
102 | #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) | ||
103 | /* account for \'0' */ | ||
104 | #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) | ||
105 | |||
106 | static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) | ||
107 | { | ||
108 | assert_spin_locked(&pipe_crc->lock); | ||
109 | return CIRC_CNT(pipe_crc->head, pipe_crc->tail, | ||
110 | INTEL_PIPE_CRC_ENTRIES_NR); | ||
111 | } | ||
112 | |||
113 | static ssize_t | ||
114 | i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, | ||
115 | loff_t *pos) | ||
116 | { | ||
117 | struct pipe_crc_info *info = filep->private_data; | ||
118 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
119 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
120 | char buf[PIPE_CRC_BUFFER_LEN]; | ||
121 | int n_entries; | ||
122 | ssize_t bytes_read; | ||
123 | |||
124 | /* | ||
125 | * Don't allow user space to provide buffers not big enough to hold | ||
126 | * a line of data. | ||
127 | */ | ||
128 | if (count < PIPE_CRC_LINE_LEN) | ||
129 | return -EINVAL; | ||
130 | |||
131 | if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) | ||
132 | return 0; | ||
133 | |||
134 | /* nothing to read */ | ||
135 | spin_lock_irq(&pipe_crc->lock); | ||
136 | while (pipe_crc_data_count(pipe_crc) == 0) { | ||
137 | int ret; | ||
138 | |||
139 | if (filep->f_flags & O_NONBLOCK) { | ||
140 | spin_unlock_irq(&pipe_crc->lock); | ||
141 | return -EAGAIN; | ||
142 | } | ||
143 | |||
144 | ret = wait_event_interruptible_lock_irq(pipe_crc->wq, | ||
145 | pipe_crc_data_count(pipe_crc), pipe_crc->lock); | ||
146 | if (ret) { | ||
147 | spin_unlock_irq(&pipe_crc->lock); | ||
148 | return ret; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | /* We now have one or more entries to read */ | ||
153 | n_entries = count / PIPE_CRC_LINE_LEN; | ||
154 | |||
155 | bytes_read = 0; | ||
156 | while (n_entries > 0) { | ||
157 | struct intel_pipe_crc_entry *entry = | ||
158 | &pipe_crc->entries[pipe_crc->tail]; | ||
159 | |||
160 | if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, | ||
161 | INTEL_PIPE_CRC_ENTRIES_NR) < 1) | ||
162 | break; | ||
163 | |||
164 | BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); | ||
165 | pipe_crc->tail = (pipe_crc->tail + 1) & | ||
166 | (INTEL_PIPE_CRC_ENTRIES_NR - 1); | ||
167 | |||
168 | bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, | ||
169 | "%8u %8x %8x %8x %8x %8x\n", | ||
170 | entry->frame, entry->crc[0], | ||
171 | entry->crc[1], entry->crc[2], | ||
172 | entry->crc[3], entry->crc[4]); | ||
173 | |||
174 | spin_unlock_irq(&pipe_crc->lock); | ||
175 | |||
176 | if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) | ||
177 | return -EFAULT; | ||
178 | |||
179 | user_buf += PIPE_CRC_LINE_LEN; | ||
180 | n_entries--; | ||
181 | |||
182 | spin_lock_irq(&pipe_crc->lock); | ||
183 | } | ||
184 | |||
185 | spin_unlock_irq(&pipe_crc->lock); | ||
186 | |||
187 | return bytes_read; | ||
188 | } | ||
189 | |||
190 | static const struct file_operations i915_pipe_crc_fops = { | ||
191 | .owner = THIS_MODULE, | ||
192 | .open = i915_pipe_crc_open, | ||
193 | .read = i915_pipe_crc_read, | ||
194 | .release = i915_pipe_crc_release, | ||
195 | }; | ||
196 | |||
197 | static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { | ||
198 | { | ||
199 | .name = "i915_pipe_A_crc", | ||
200 | .pipe = PIPE_A, | ||
201 | }, | ||
202 | { | ||
203 | .name = "i915_pipe_B_crc", | ||
204 | .pipe = PIPE_B, | ||
205 | }, | ||
206 | { | ||
207 | .name = "i915_pipe_C_crc", | ||
208 | .pipe = PIPE_C, | ||
209 | }, | ||
210 | }; | ||
211 | |||
212 | static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, | ||
213 | enum pipe pipe) | ||
214 | { | ||
215 | struct drm_i915_private *dev_priv = to_i915(minor->dev); | ||
216 | struct dentry *ent; | ||
217 | struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; | ||
218 | |||
219 | info->dev_priv = dev_priv; | ||
220 | ent = debugfs_create_file(info->name, S_IRUGO, root, info, | ||
221 | &i915_pipe_crc_fops); | ||
222 | if (!ent) | ||
223 | return -ENOMEM; | ||
224 | |||
225 | return drm_add_fake_info_node(minor, ent, info); | ||
226 | } | ||
227 | |||
228 | static const char * const pipe_crc_sources[] = { | ||
229 | "none", | ||
230 | "plane1", | ||
231 | "plane2", | ||
232 | "pf", | ||
233 | "pipe", | ||
234 | "TV", | ||
235 | "DP-B", | ||
236 | "DP-C", | ||
237 | "DP-D", | ||
238 | "auto", | ||
239 | }; | ||
240 | |||
241 | static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) | ||
242 | { | ||
243 | BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); | ||
244 | return pipe_crc_sources[source]; | ||
245 | } | ||
246 | |||
247 | static int display_crc_ctl_show(struct seq_file *m, void *data) | ||
248 | { | ||
249 | struct drm_i915_private *dev_priv = m->private; | ||
250 | int i; | ||
251 | |||
252 | for (i = 0; i < I915_MAX_PIPES; i++) | ||
253 | seq_printf(m, "%c %s\n", pipe_name(i), | ||
254 | pipe_crc_source_name(dev_priv->pipe_crc[i].source)); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int display_crc_ctl_open(struct inode *inode, struct file *file) | ||
260 | { | ||
261 | return single_open(file, display_crc_ctl_show, inode->i_private); | ||
262 | } | ||
263 | |||
264 | static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, | ||
265 | uint32_t *val) | ||
266 | { | ||
267 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) | ||
268 | *source = INTEL_PIPE_CRC_SOURCE_PIPE; | ||
269 | |||
270 | switch (*source) { | ||
271 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
272 | *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; | ||
273 | break; | ||
274 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
275 | *val = 0; | ||
276 | break; | ||
277 | default: | ||
278 | return -EINVAL; | ||
279 | } | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, | ||
285 | enum pipe pipe, | ||
286 | enum intel_pipe_crc_source *source) | ||
287 | { | ||
288 | struct drm_device *dev = &dev_priv->drm; | ||
289 | struct intel_encoder *encoder; | ||
290 | struct intel_crtc *crtc; | ||
291 | struct intel_digital_port *dig_port; | ||
292 | int ret = 0; | ||
293 | |||
294 | *source = INTEL_PIPE_CRC_SOURCE_PIPE; | ||
295 | |||
296 | drm_modeset_lock_all(dev); | ||
297 | for_each_intel_encoder(dev, encoder) { | ||
298 | if (!encoder->base.crtc) | ||
299 | continue; | ||
300 | |||
301 | crtc = to_intel_crtc(encoder->base.crtc); | ||
302 | |||
303 | if (crtc->pipe != pipe) | ||
304 | continue; | ||
305 | |||
306 | switch (encoder->type) { | ||
307 | case INTEL_OUTPUT_TVOUT: | ||
308 | *source = INTEL_PIPE_CRC_SOURCE_TV; | ||
309 | break; | ||
310 | case INTEL_OUTPUT_DP: | ||
311 | case INTEL_OUTPUT_EDP: | ||
312 | dig_port = enc_to_dig_port(&encoder->base); | ||
313 | switch (dig_port->port) { | ||
314 | case PORT_B: | ||
315 | *source = INTEL_PIPE_CRC_SOURCE_DP_B; | ||
316 | break; | ||
317 | case PORT_C: | ||
318 | *source = INTEL_PIPE_CRC_SOURCE_DP_C; | ||
319 | break; | ||
320 | case PORT_D: | ||
321 | *source = INTEL_PIPE_CRC_SOURCE_DP_D; | ||
322 | break; | ||
323 | default: | ||
324 | WARN(1, "nonexisting DP port %c\n", | ||
325 | port_name(dig_port->port)); | ||
326 | break; | ||
327 | } | ||
328 | break; | ||
329 | default: | ||
330 | break; | ||
331 | } | ||
332 | } | ||
333 | drm_modeset_unlock_all(dev); | ||
334 | |||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, | ||
339 | enum pipe pipe, | ||
340 | enum intel_pipe_crc_source *source, | ||
341 | uint32_t *val) | ||
342 | { | ||
343 | bool need_stable_symbols = false; | ||
344 | |||
345 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { | ||
346 | int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); | ||
347 | if (ret) | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | switch (*source) { | ||
352 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
353 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; | ||
354 | break; | ||
355 | case INTEL_PIPE_CRC_SOURCE_DP_B: | ||
356 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; | ||
357 | need_stable_symbols = true; | ||
358 | break; | ||
359 | case INTEL_PIPE_CRC_SOURCE_DP_C: | ||
360 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; | ||
361 | need_stable_symbols = true; | ||
362 | break; | ||
363 | case INTEL_PIPE_CRC_SOURCE_DP_D: | ||
364 | if (!IS_CHERRYVIEW(dev_priv)) | ||
365 | return -EINVAL; | ||
366 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; | ||
367 | need_stable_symbols = true; | ||
368 | break; | ||
369 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
370 | *val = 0; | ||
371 | break; | ||
372 | default: | ||
373 | return -EINVAL; | ||
374 | } | ||
375 | |||
376 | /* | ||
377 | * When the pipe CRC tap point is after the transcoders we need | ||
378 | * to tweak symbol-level features to produce a deterministic series of | ||
379 | * symbols for a given frame. We need to reset those features only once | ||
380 | * a frame (instead of every nth symbol): | ||
381 | * - DC-balance: used to ensure a better clock recovery from the data | ||
382 | * link (SDVO) | ||
383 | * - DisplayPort scrambling: used for EMI reduction | ||
384 | */ | ||
385 | if (need_stable_symbols) { | ||
386 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
387 | |||
388 | tmp |= DC_BALANCE_RESET_VLV; | ||
389 | switch (pipe) { | ||
390 | case PIPE_A: | ||
391 | tmp |= PIPE_A_SCRAMBLE_RESET; | ||
392 | break; | ||
393 | case PIPE_B: | ||
394 | tmp |= PIPE_B_SCRAMBLE_RESET; | ||
395 | break; | ||
396 | case PIPE_C: | ||
397 | tmp |= PIPE_C_SCRAMBLE_RESET; | ||
398 | break; | ||
399 | default: | ||
400 | return -EINVAL; | ||
401 | } | ||
402 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
403 | } | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, | ||
409 | enum pipe pipe, | ||
410 | enum intel_pipe_crc_source *source, | ||
411 | uint32_t *val) | ||
412 | { | ||
413 | bool need_stable_symbols = false; | ||
414 | |||
415 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { | ||
416 | int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); | ||
417 | if (ret) | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | switch (*source) { | ||
422 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
423 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; | ||
424 | break; | ||
425 | case INTEL_PIPE_CRC_SOURCE_TV: | ||
426 | if (!SUPPORTS_TV(dev_priv)) | ||
427 | return -EINVAL; | ||
428 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; | ||
429 | break; | ||
430 | case INTEL_PIPE_CRC_SOURCE_DP_B: | ||
431 | if (!IS_G4X(dev_priv)) | ||
432 | return -EINVAL; | ||
433 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; | ||
434 | need_stable_symbols = true; | ||
435 | break; | ||
436 | case INTEL_PIPE_CRC_SOURCE_DP_C: | ||
437 | if (!IS_G4X(dev_priv)) | ||
438 | return -EINVAL; | ||
439 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; | ||
440 | need_stable_symbols = true; | ||
441 | break; | ||
442 | case INTEL_PIPE_CRC_SOURCE_DP_D: | ||
443 | if (!IS_G4X(dev_priv)) | ||
444 | return -EINVAL; | ||
445 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; | ||
446 | need_stable_symbols = true; | ||
447 | break; | ||
448 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
449 | *val = 0; | ||
450 | break; | ||
451 | default: | ||
452 | return -EINVAL; | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * When the pipe CRC tap point is after the transcoders we need | ||
457 | * to tweak symbol-level features to produce a deterministic series of | ||
458 | * symbols for a given frame. We need to reset those features only once | ||
459 | * a frame (instead of every nth symbol): | ||
460 | * - DC-balance: used to ensure a better clock recovery from the data | ||
461 | * link (SDVO) | ||
462 | * - DisplayPort scrambling: used for EMI reduction | ||
463 | */ | ||
464 | if (need_stable_symbols) { | ||
465 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
466 | |||
467 | WARN_ON(!IS_G4X(dev_priv)); | ||
468 | |||
469 | I915_WRITE(PORT_DFT_I9XX, | ||
470 | I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); | ||
471 | |||
472 | if (pipe == PIPE_A) | ||
473 | tmp |= PIPE_A_SCRAMBLE_RESET; | ||
474 | else | ||
475 | tmp |= PIPE_B_SCRAMBLE_RESET; | ||
476 | |||
477 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
478 | } | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, | ||
484 | enum pipe pipe) | ||
485 | { | ||
486 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
487 | |||
488 | switch (pipe) { | ||
489 | case PIPE_A: | ||
490 | tmp &= ~PIPE_A_SCRAMBLE_RESET; | ||
491 | break; | ||
492 | case PIPE_B: | ||
493 | tmp &= ~PIPE_B_SCRAMBLE_RESET; | ||
494 | break; | ||
495 | case PIPE_C: | ||
496 | tmp &= ~PIPE_C_SCRAMBLE_RESET; | ||
497 | break; | ||
498 | default: | ||
499 | return; | ||
500 | } | ||
501 | if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) | ||
502 | tmp &= ~DC_BALANCE_RESET_VLV; | ||
503 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
504 | |||
505 | } | ||
506 | |||
507 | static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, | ||
508 | enum pipe pipe) | ||
509 | { | ||
510 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | ||
511 | |||
512 | if (pipe == PIPE_A) | ||
513 | tmp &= ~PIPE_A_SCRAMBLE_RESET; | ||
514 | else | ||
515 | tmp &= ~PIPE_B_SCRAMBLE_RESET; | ||
516 | I915_WRITE(PORT_DFT2_G4X, tmp); | ||
517 | |||
518 | if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { | ||
519 | I915_WRITE(PORT_DFT_I9XX, | ||
520 | I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, | ||
525 | uint32_t *val) | ||
526 | { | ||
527 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) | ||
528 | *source = INTEL_PIPE_CRC_SOURCE_PIPE; | ||
529 | |||
530 | switch (*source) { | ||
531 | case INTEL_PIPE_CRC_SOURCE_PLANE1: | ||
532 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; | ||
533 | break; | ||
534 | case INTEL_PIPE_CRC_SOURCE_PLANE2: | ||
535 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; | ||
536 | break; | ||
537 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
538 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; | ||
539 | break; | ||
540 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
541 | *val = 0; | ||
542 | break; | ||
543 | default: | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv, | ||
551 | bool enable) | ||
552 | { | ||
553 | struct drm_device *dev = &dev_priv->drm; | ||
554 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); | ||
555 | struct intel_crtc_state *pipe_config; | ||
556 | struct drm_atomic_state *state; | ||
557 | int ret = 0; | ||
558 | |||
559 | drm_modeset_lock_all(dev); | ||
560 | state = drm_atomic_state_alloc(dev); | ||
561 | if (!state) { | ||
562 | ret = -ENOMEM; | ||
563 | goto out; | ||
564 | } | ||
565 | |||
566 | state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); | ||
567 | pipe_config = intel_atomic_get_crtc_state(state, crtc); | ||
568 | if (IS_ERR(pipe_config)) { | ||
569 | ret = PTR_ERR(pipe_config); | ||
570 | goto out; | ||
571 | } | ||
572 | |||
573 | pipe_config->pch_pfit.force_thru = enable; | ||
574 | if (pipe_config->cpu_transcoder == TRANSCODER_EDP && | ||
575 | pipe_config->pch_pfit.enabled != enable) | ||
576 | pipe_config->base.connectors_changed = true; | ||
577 | |||
578 | ret = drm_atomic_commit(state); | ||
579 | out: | ||
580 | WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); | ||
581 | drm_modeset_unlock_all(dev); | ||
582 | drm_atomic_state_put(state); | ||
583 | } | ||
584 | |||
585 | static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, | ||
586 | enum pipe pipe, | ||
587 | enum intel_pipe_crc_source *source, | ||
588 | uint32_t *val) | ||
589 | { | ||
590 | if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) | ||
591 | *source = INTEL_PIPE_CRC_SOURCE_PF; | ||
592 | |||
593 | switch (*source) { | ||
594 | case INTEL_PIPE_CRC_SOURCE_PLANE1: | ||
595 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; | ||
596 | break; | ||
597 | case INTEL_PIPE_CRC_SOURCE_PLANE2: | ||
598 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; | ||
599 | break; | ||
600 | case INTEL_PIPE_CRC_SOURCE_PF: | ||
601 | if (IS_HASWELL(dev_priv) && pipe == PIPE_A) | ||
602 | hsw_trans_edp_pipe_A_crc_wa(dev_priv, true); | ||
603 | |||
604 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; | ||
605 | break; | ||
606 | case INTEL_PIPE_CRC_SOURCE_NONE: | ||
607 | *val = 0; | ||
608 | break; | ||
609 | default: | ||
610 | return -EINVAL; | ||
611 | } | ||
612 | |||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static int pipe_crc_set_source(struct drm_i915_private *dev_priv, | ||
617 | enum pipe pipe, | ||
618 | enum intel_pipe_crc_source source) | ||
619 | { | ||
620 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | ||
621 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); | ||
622 | enum intel_display_power_domain power_domain; | ||
623 | u32 val = 0; /* shut up gcc */ | ||
624 | int ret; | ||
625 | |||
626 | if (pipe_crc->source == source) | ||
627 | return 0; | ||
628 | |||
629 | /* forbid changing the source without going back to 'none' */ | ||
630 | if (pipe_crc->source && source) | ||
631 | return -EINVAL; | ||
632 | |||
633 | power_domain = POWER_DOMAIN_PIPE(pipe); | ||
634 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { | ||
635 | DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); | ||
636 | return -EIO; | ||
637 | } | ||
638 | |||
639 | if (IS_GEN2(dev_priv)) | ||
640 | ret = i8xx_pipe_crc_ctl_reg(&source, &val); | ||
641 | else if (INTEL_GEN(dev_priv) < 5) | ||
642 | ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); | ||
643 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
644 | ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); | ||
645 | else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) | ||
646 | ret = ilk_pipe_crc_ctl_reg(&source, &val); | ||
647 | else | ||
648 | ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); | ||
649 | |||
650 | if (ret != 0) | ||
651 | goto out; | ||
652 | |||
653 | /* none -> real source transition */ | ||
654 | if (source) { | ||
655 | struct intel_pipe_crc_entry *entries; | ||
656 | |||
657 | DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", | ||
658 | pipe_name(pipe), pipe_crc_source_name(source)); | ||
659 | |||
660 | entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, | ||
661 | sizeof(pipe_crc->entries[0]), | ||
662 | GFP_KERNEL); | ||
663 | if (!entries) { | ||
664 | ret = -ENOMEM; | ||
665 | goto out; | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * When IPS gets enabled, the pipe CRC changes. Since IPS gets | ||
670 | * enabled and disabled dynamically based on package C states, | ||
671 | * user space can't make reliable use of the CRCs, so let's just | ||
672 | * completely disable it. | ||
673 | */ | ||
674 | hsw_disable_ips(crtc); | ||
675 | |||
676 | spin_lock_irq(&pipe_crc->lock); | ||
677 | kfree(pipe_crc->entries); | ||
678 | pipe_crc->entries = entries; | ||
679 | pipe_crc->head = 0; | ||
680 | pipe_crc->tail = 0; | ||
681 | spin_unlock_irq(&pipe_crc->lock); | ||
682 | } | ||
683 | |||
684 | pipe_crc->source = source; | ||
685 | |||
686 | I915_WRITE(PIPE_CRC_CTL(pipe), val); | ||
687 | POSTING_READ(PIPE_CRC_CTL(pipe)); | ||
688 | |||
689 | /* real source -> none transition */ | ||
690 | if (source == INTEL_PIPE_CRC_SOURCE_NONE) { | ||
691 | struct intel_pipe_crc_entry *entries; | ||
692 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, | ||
693 | pipe); | ||
694 | |||
695 | DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", | ||
696 | pipe_name(pipe)); | ||
697 | |||
698 | drm_modeset_lock(&crtc->base.mutex, NULL); | ||
699 | if (crtc->base.state->active) | ||
700 | intel_wait_for_vblank(dev_priv, pipe); | ||
701 | drm_modeset_unlock(&crtc->base.mutex); | ||
702 | |||
703 | spin_lock_irq(&pipe_crc->lock); | ||
704 | entries = pipe_crc->entries; | ||
705 | pipe_crc->entries = NULL; | ||
706 | pipe_crc->head = 0; | ||
707 | pipe_crc->tail = 0; | ||
708 | spin_unlock_irq(&pipe_crc->lock); | ||
709 | |||
710 | kfree(entries); | ||
711 | |||
712 | if (IS_G4X(dev_priv)) | ||
713 | g4x_undo_pipe_scramble_reset(dev_priv, pipe); | ||
714 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
715 | vlv_undo_pipe_scramble_reset(dev_priv, pipe); | ||
716 | else if (IS_HASWELL(dev_priv) && pipe == PIPE_A) | ||
717 | hsw_trans_edp_pipe_A_crc_wa(dev_priv, false); | ||
718 | |||
719 | hsw_enable_ips(crtc); | ||
720 | } | ||
721 | |||
722 | ret = 0; | ||
723 | |||
724 | out: | ||
725 | intel_display_power_put(dev_priv, power_domain); | ||
726 | |||
727 | return ret; | ||
728 | } | ||
729 | |||
730 | /* | ||
731 | * Parse pipe CRC command strings: | ||
732 | * command: wsp* object wsp+ name wsp+ source wsp* | ||
733 | * object: 'pipe' | ||
734 | * name: (A | B | C) | ||
735 | * source: (none | plane1 | plane2 | pf) | ||
736 | * wsp: (#0x20 | #0x9 | #0xA)+ | ||
737 | * | ||
738 | * eg.: | ||
739 | * "pipe A plane1" -> Start CRC computations on plane1 of pipe A | ||
740 | * "pipe A none" -> Stop CRC | ||
741 | */ | ||
742 | static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) | ||
743 | { | ||
744 | int n_words = 0; | ||
745 | |||
746 | while (*buf) { | ||
747 | char *end; | ||
748 | |||
749 | /* skip leading white space */ | ||
750 | buf = skip_spaces(buf); | ||
751 | if (!*buf) | ||
752 | break; /* end of buffer */ | ||
753 | |||
754 | /* find end of word */ | ||
755 | for (end = buf; *end && !isspace(*end); end++) | ||
756 | ; | ||
757 | |||
758 | if (n_words == max_words) { | ||
759 | DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", | ||
760 | max_words); | ||
761 | return -EINVAL; /* ran out of words[] before bytes */ | ||
762 | } | ||
763 | |||
764 | if (*end) | ||
765 | *end++ = '\0'; | ||
766 | words[n_words++] = buf; | ||
767 | buf = end; | ||
768 | } | ||
769 | |||
770 | return n_words; | ||
771 | } | ||
772 | |||
773 | enum intel_pipe_crc_object { | ||
774 | PIPE_CRC_OBJECT_PIPE, | ||
775 | }; | ||
776 | |||
777 | static const char * const pipe_crc_objects[] = { | ||
778 | "pipe", | ||
779 | }; | ||
780 | |||
781 | static int | ||
782 | display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) | ||
783 | { | ||
784 | int i; | ||
785 | |||
786 | for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) | ||
787 | if (!strcmp(buf, pipe_crc_objects[i])) { | ||
788 | *o = i; | ||
789 | return 0; | ||
790 | } | ||
791 | |||
792 | return -EINVAL; | ||
793 | } | ||
794 | |||
795 | static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) | ||
796 | { | ||
797 | const char name = buf[0]; | ||
798 | |||
799 | if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) | ||
800 | return -EINVAL; | ||
801 | |||
802 | *pipe = name - 'A'; | ||
803 | |||
804 | return 0; | ||
805 | } | ||
806 | |||
807 | static int | ||
808 | display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) | ||
809 | { | ||
810 | int i; | ||
811 | |||
812 | for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) | ||
813 | if (!strcmp(buf, pipe_crc_sources[i])) { | ||
814 | *s = i; | ||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | return -EINVAL; | ||
819 | } | ||
820 | |||
821 | static int display_crc_ctl_parse(struct drm_i915_private *dev_priv, | ||
822 | char *buf, size_t len) | ||
823 | { | ||
824 | #define N_WORDS 3 | ||
825 | int n_words; | ||
826 | char *words[N_WORDS]; | ||
827 | enum pipe pipe; | ||
828 | enum intel_pipe_crc_object object; | ||
829 | enum intel_pipe_crc_source source; | ||
830 | |||
831 | n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); | ||
832 | if (n_words != N_WORDS) { | ||
833 | DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", | ||
834 | N_WORDS); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | |||
838 | if (display_crc_ctl_parse_object(words[0], &object) < 0) { | ||
839 | DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); | ||
840 | return -EINVAL; | ||
841 | } | ||
842 | |||
843 | if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { | ||
844 | DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); | ||
845 | return -EINVAL; | ||
846 | } | ||
847 | |||
848 | if (display_crc_ctl_parse_source(words[2], &source) < 0) { | ||
849 | DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); | ||
850 | return -EINVAL; | ||
851 | } | ||
852 | |||
853 | return pipe_crc_set_source(dev_priv, pipe, source); | ||
854 | } | ||
855 | |||
856 | static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, | ||
857 | size_t len, loff_t *offp) | ||
858 | { | ||
859 | struct seq_file *m = file->private_data; | ||
860 | struct drm_i915_private *dev_priv = m->private; | ||
861 | char *tmpbuf; | ||
862 | int ret; | ||
863 | |||
864 | if (len == 0) | ||
865 | return 0; | ||
866 | |||
867 | if (len > PAGE_SIZE - 1) { | ||
868 | DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", | ||
869 | PAGE_SIZE); | ||
870 | return -E2BIG; | ||
871 | } | ||
872 | |||
873 | tmpbuf = kmalloc(len + 1, GFP_KERNEL); | ||
874 | if (!tmpbuf) | ||
875 | return -ENOMEM; | ||
876 | |||
877 | if (copy_from_user(tmpbuf, ubuf, len)) { | ||
878 | ret = -EFAULT; | ||
879 | goto out; | ||
880 | } | ||
881 | tmpbuf[len] = '\0'; | ||
882 | |||
883 | ret = display_crc_ctl_parse(dev_priv, tmpbuf, len); | ||
884 | |||
885 | out: | ||
886 | kfree(tmpbuf); | ||
887 | if (ret < 0) | ||
888 | return ret; | ||
889 | |||
890 | *offp += len; | ||
891 | return len; | ||
892 | } | ||
893 | |||
894 | const struct file_operations i915_display_crc_ctl_fops = { | ||
895 | .owner = THIS_MODULE, | ||
896 | .open = display_crc_ctl_open, | ||
897 | .read = seq_read, | ||
898 | .llseek = seq_lseek, | ||
899 | .release = single_release, | ||
900 | .write = display_crc_ctl_write | ||
901 | }; | ||
902 | |||
903 | void intel_display_crc_init(struct drm_i915_private *dev_priv) | ||
904 | { | ||
905 | enum pipe pipe; | ||
906 | |||
907 | for_each_pipe(dev_priv, pipe) { | ||
908 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | ||
909 | |||
910 | pipe_crc->opened = false; | ||
911 | spin_lock_init(&pipe_crc->lock); | ||
912 | init_waitqueue_head(&pipe_crc->wq); | ||
913 | } | ||
914 | } | ||
915 | |||
916 | int intel_pipe_crc_create(struct drm_minor *minor) | ||
917 | { | ||
918 | int ret, i; | ||
919 | |||
920 | for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { | ||
921 | ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); | ||
922 | if (ret) | ||
923 | return ret; | ||
924 | } | ||
925 | |||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | void intel_pipe_crc_cleanup(struct drm_minor *minor) | ||
930 | { | ||
931 | int i; | ||
932 | |||
933 | for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { | ||
934 | struct drm_info_list *info_list = | ||
935 | (struct drm_info_list *)&i915_pipe_crc_data[i]; | ||
936 | |||
937 | drm_debugfs_remove_files(info_list, 1, minor); | ||
938 | } | ||
939 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9f3b78dfa997..249623d45be0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -312,23 +312,30 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) | |||
312 | #define FW_WM(value, plane) \ | 312 | #define FW_WM(value, plane) \ |
313 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) | 313 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) |
314 | 314 | ||
315 | void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | 315 | static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) |
316 | { | 316 | { |
317 | bool was_enabled; | ||
317 | u32 val; | 318 | u32 val; |
318 | 319 | ||
319 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 320 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
321 | was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; | ||
320 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); | 322 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); |
321 | POSTING_READ(FW_BLC_SELF_VLV); | 323 | POSTING_READ(FW_BLC_SELF_VLV); |
322 | dev_priv->wm.vlv.cxsr = enable; | 324 | } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { |
323 | } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) { | 325 | was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
324 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); | 326 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); |
325 | POSTING_READ(FW_BLC_SELF); | 327 | POSTING_READ(FW_BLC_SELF); |
326 | } else if (IS_PINEVIEW(dev_priv)) { | 328 | } else if (IS_PINEVIEW(dev_priv)) { |
327 | val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; | 329 | val = I915_READ(DSPFW3); |
328 | val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; | 330 | was_enabled = val & PINEVIEW_SELF_REFRESH_EN; |
331 | if (enable) | ||
332 | val |= PINEVIEW_SELF_REFRESH_EN; | ||
333 | else | ||
334 | val &= ~PINEVIEW_SELF_REFRESH_EN; | ||
329 | I915_WRITE(DSPFW3, val); | 335 | I915_WRITE(DSPFW3, val); |
330 | POSTING_READ(DSPFW3); | 336 | POSTING_READ(DSPFW3); |
331 | } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { | 337 | } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { |
338 | was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | ||
332 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : | 339 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : |
333 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); | 340 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); |
334 | I915_WRITE(FW_BLC_SELF, val); | 341 | I915_WRITE(FW_BLC_SELF, val); |
@@ -339,17 +346,33 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | |||
339 | * and yet it does have the related watermark in | 346 | * and yet it does have the related watermark in |
340 | * FW_BLC_SELF. What's going on? | 347 | * FW_BLC_SELF. What's going on? |
341 | */ | 348 | */ |
349 | was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; | ||
342 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : | 350 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : |
343 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN); | 351 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN); |
344 | I915_WRITE(INSTPM, val); | 352 | I915_WRITE(INSTPM, val); |
345 | POSTING_READ(INSTPM); | 353 | POSTING_READ(INSTPM); |
346 | } else { | 354 | } else { |
347 | return; | 355 | return false; |
348 | } | 356 | } |
349 | 357 | ||
350 | DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable)); | 358 | DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n", |
359 | enableddisabled(enable), | ||
360 | enableddisabled(was_enabled)); | ||
361 | |||
362 | return was_enabled; | ||
351 | } | 363 | } |
352 | 364 | ||
365 | bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | ||
366 | { | ||
367 | bool ret; | ||
368 | |||
369 | mutex_lock(&dev_priv->wm.wm_mutex); | ||
370 | ret = _intel_set_memory_cxsr(dev_priv, enable); | ||
371 | dev_priv->wm.vlv.cxsr = enable; | ||
372 | mutex_unlock(&dev_priv->wm.wm_mutex); | ||
373 | |||
374 | return ret; | ||
375 | } | ||
353 | 376 | ||
354 | /* | 377 | /* |
355 | * Latency for FIFO fetches is dependent on several factors: | 378 | * Latency for FIFO fetches is dependent on several factors: |
@@ -370,12 +393,15 @@ static const int pessimal_latency_ns = 5000; | |||
370 | #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ | 393 | #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ |
371 | ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) | 394 | ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) |
372 | 395 | ||
373 | static int vlv_get_fifo_size(struct drm_i915_private *dev_priv, | 396 | static int vlv_get_fifo_size(struct intel_plane *plane) |
374 | enum pipe pipe, int plane) | ||
375 | { | 397 | { |
398 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | ||
376 | int sprite0_start, sprite1_start, size; | 399 | int sprite0_start, sprite1_start, size; |
377 | 400 | ||
378 | switch (pipe) { | 401 | if (plane->id == PLANE_CURSOR) |
402 | return 63; | ||
403 | |||
404 | switch (plane->pipe) { | ||
379 | uint32_t dsparb, dsparb2, dsparb3; | 405 | uint32_t dsparb, dsparb2, dsparb3; |
380 | case PIPE_A: | 406 | case PIPE_A: |
381 | dsparb = I915_READ(DSPARB); | 407 | dsparb = I915_READ(DSPARB); |
@@ -399,24 +425,21 @@ static int vlv_get_fifo_size(struct drm_i915_private *dev_priv, | |||
399 | return 0; | 425 | return 0; |
400 | } | 426 | } |
401 | 427 | ||
402 | switch (plane) { | 428 | switch (plane->id) { |
403 | case 0: | 429 | case PLANE_PRIMARY: |
404 | size = sprite0_start; | 430 | size = sprite0_start; |
405 | break; | 431 | break; |
406 | case 1: | 432 | case PLANE_SPRITE0: |
407 | size = sprite1_start - sprite0_start; | 433 | size = sprite1_start - sprite0_start; |
408 | break; | 434 | break; |
409 | case 2: | 435 | case PLANE_SPRITE1: |
410 | size = 512 - 1 - sprite1_start; | 436 | size = 512 - 1 - sprite1_start; |
411 | break; | 437 | break; |
412 | default: | 438 | default: |
413 | return 0; | 439 | return 0; |
414 | } | 440 | } |
415 | 441 | ||
416 | DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", | 442 | DRM_DEBUG_KMS("%s FIFO size: %d\n", plane->base.name, size); |
417 | pipe_name(pipe), plane == 0 ? "primary" : "sprite", | ||
418 | plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), | ||
419 | size); | ||
420 | 443 | ||
421 | return size; | 444 | return size; |
422 | } | 445 | } |
@@ -842,71 +865,77 @@ static bool g4x_compute_srwm(struct drm_i915_private *dev_priv, | |||
842 | #define FW_WM_VLV(value, plane) \ | 865 | #define FW_WM_VLV(value, plane) \ |
843 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) | 866 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) |
844 | 867 | ||
845 | static void vlv_write_wm_values(struct intel_crtc *crtc, | 868 | static void vlv_write_wm_values(struct drm_i915_private *dev_priv, |
846 | const struct vlv_wm_values *wm) | 869 | const struct vlv_wm_values *wm) |
847 | { | 870 | { |
848 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 871 | enum pipe pipe; |
849 | enum pipe pipe = crtc->pipe; | ||
850 | 872 | ||
851 | I915_WRITE(VLV_DDL(pipe), | 873 | for_each_pipe(dev_priv, pipe) { |
852 | (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | | 874 | I915_WRITE(VLV_DDL(pipe), |
853 | (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | | 875 | (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | |
854 | (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | | 876 | (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | |
855 | (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); | 877 | (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | |
878 | (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * Zero the (unused) WM1 watermarks, and also clear all the | ||
883 | * high order bits so that there are no out of bounds values | ||
884 | * present in the registers during the reprogramming. | ||
885 | */ | ||
886 | I915_WRITE(DSPHOWM, 0); | ||
887 | I915_WRITE(DSPHOWM1, 0); | ||
888 | I915_WRITE(DSPFW4, 0); | ||
889 | I915_WRITE(DSPFW5, 0); | ||
890 | I915_WRITE(DSPFW6, 0); | ||
856 | 891 | ||
857 | I915_WRITE(DSPFW1, | 892 | I915_WRITE(DSPFW1, |
858 | FW_WM(wm->sr.plane, SR) | | 893 | FW_WM(wm->sr.plane, SR) | |
859 | FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | | 894 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | |
860 | FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | | 895 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | |
861 | FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); | 896 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); |
862 | I915_WRITE(DSPFW2, | 897 | I915_WRITE(DSPFW2, |
863 | FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | | 898 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | |
864 | FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | | 899 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | |
865 | FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); | 900 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); |
866 | I915_WRITE(DSPFW3, | 901 | I915_WRITE(DSPFW3, |
867 | FW_WM(wm->sr.cursor, CURSOR_SR)); | 902 | FW_WM(wm->sr.cursor, CURSOR_SR)); |
868 | 903 | ||
869 | if (IS_CHERRYVIEW(dev_priv)) { | 904 | if (IS_CHERRYVIEW(dev_priv)) { |
870 | I915_WRITE(DSPFW7_CHV, | 905 | I915_WRITE(DSPFW7_CHV, |
871 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | | 906 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | |
872 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); | 907 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); |
873 | I915_WRITE(DSPFW8_CHV, | 908 | I915_WRITE(DSPFW8_CHV, |
874 | FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | | 909 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | |
875 | FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); | 910 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); |
876 | I915_WRITE(DSPFW9_CHV, | 911 | I915_WRITE(DSPFW9_CHV, |
877 | FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | | 912 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | |
878 | FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); | 913 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); |
879 | I915_WRITE(DSPHOWM, | 914 | I915_WRITE(DSPHOWM, |
880 | FW_WM(wm->sr.plane >> 9, SR_HI) | | 915 | FW_WM(wm->sr.plane >> 9, SR_HI) | |
881 | FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | | 916 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | |
882 | FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | | 917 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | |
883 | FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | | 918 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | |
884 | FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | | 919 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | |
885 | FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | | 920 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | |
886 | FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | | 921 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | |
887 | FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | | 922 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | |
888 | FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | | 923 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | |
889 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); | 924 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); |
890 | } else { | 925 | } else { |
891 | I915_WRITE(DSPFW7, | 926 | I915_WRITE(DSPFW7, |
892 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | | 927 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | |
893 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); | 928 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); |
894 | I915_WRITE(DSPHOWM, | 929 | I915_WRITE(DSPHOWM, |
895 | FW_WM(wm->sr.plane >> 9, SR_HI) | | 930 | FW_WM(wm->sr.plane >> 9, SR_HI) | |
896 | FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | | 931 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | |
897 | FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | | 932 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | |
898 | FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | | 933 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | |
899 | FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | | 934 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | |
900 | FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | | 935 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | |
901 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); | 936 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); |
902 | } | 937 | } |
903 | 938 | ||
904 | /* zero (unused) WM1 watermarks */ | ||
905 | I915_WRITE(DSPFW4, 0); | ||
906 | I915_WRITE(DSPFW5, 0); | ||
907 | I915_WRITE(DSPFW6, 0); | ||
908 | I915_WRITE(DSPHOWM1, 0); | ||
909 | |||
910 | POSTING_READ(DSPFW1); | 939 | POSTING_READ(DSPFW1); |
911 | } | 940 | } |
912 | 941 | ||
@@ -949,24 +978,26 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) | |||
949 | } | 978 | } |
950 | } | 979 | } |
951 | 980 | ||
952 | static uint16_t vlv_compute_wm_level(struct intel_plane *plane, | 981 | static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, |
953 | struct intel_crtc *crtc, | 982 | const struct intel_plane_state *plane_state, |
954 | const struct intel_plane_state *state, | ||
955 | int level) | 983 | int level) |
956 | { | 984 | { |
985 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | ||
957 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 986 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
987 | const struct drm_display_mode *adjusted_mode = | ||
988 | &crtc_state->base.adjusted_mode; | ||
958 | int clock, htotal, cpp, width, wm; | 989 | int clock, htotal, cpp, width, wm; |
959 | 990 | ||
960 | if (dev_priv->wm.pri_latency[level] == 0) | 991 | if (dev_priv->wm.pri_latency[level] == 0) |
961 | return USHRT_MAX; | 992 | return USHRT_MAX; |
962 | 993 | ||
963 | if (!state->base.visible) | 994 | if (!plane_state->base.visible) |
964 | return 0; | 995 | return 0; |
965 | 996 | ||
966 | cpp = state->base.fb->format->cpp[0]; | 997 | cpp = plane_state->base.fb->format->cpp[0]; |
967 | clock = crtc->config->base.adjusted_mode.crtc_clock; | 998 | clock = adjusted_mode->crtc_clock; |
968 | htotal = crtc->config->base.adjusted_mode.crtc_htotal; | 999 | htotal = adjusted_mode->crtc_htotal; |
969 | width = crtc->config->pipe_src_w; | 1000 | width = crtc_state->pipe_src_w; |
970 | if (WARN_ON(htotal == 0)) | 1001 | if (WARN_ON(htotal == 0)) |
971 | htotal = 1; | 1002 | htotal = 1; |
972 | 1003 | ||
@@ -1053,48 +1084,45 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) | |||
1053 | WARN_ON(fifo_left != 0); | 1084 | WARN_ON(fifo_left != 0); |
1054 | } | 1085 | } |
1055 | 1086 | ||
1087 | static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) | ||
1088 | { | ||
1089 | if (wm > fifo_size) | ||
1090 | return USHRT_MAX; | ||
1091 | else | ||
1092 | return fifo_size - wm; | ||
1093 | } | ||
1094 | |||
1056 | static void vlv_invert_wms(struct intel_crtc *crtc) | 1095 | static void vlv_invert_wms(struct intel_crtc *crtc) |
1057 | { | 1096 | { |
1058 | struct vlv_wm_state *wm_state = &crtc->wm_state; | 1097 | struct vlv_wm_state *wm_state = &crtc->wm_state; |
1059 | int level; | 1098 | int level; |
1060 | 1099 | ||
1061 | for (level = 0; level < wm_state->num_levels; level++) { | 1100 | for (level = 0; level < wm_state->num_levels; level++) { |
1062 | struct drm_device *dev = crtc->base.dev; | 1101 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1063 | const int sr_fifo_size = | 1102 | const int sr_fifo_size = |
1064 | INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1; | 1103 | INTEL_INFO(dev_priv)->num_pipes * 512 - 1; |
1065 | struct intel_plane *plane; | 1104 | struct intel_plane *plane; |
1066 | 1105 | ||
1067 | wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; | 1106 | wm_state->sr[level].plane = |
1068 | wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; | 1107 | vlv_invert_wm_value(wm_state->sr[level].plane, |
1069 | 1108 | sr_fifo_size); | |
1070 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | 1109 | wm_state->sr[level].cursor = |
1071 | switch (plane->base.type) { | 1110 | vlv_invert_wm_value(wm_state->sr[level].cursor, |
1072 | int sprite; | 1111 | 63); |
1073 | case DRM_PLANE_TYPE_CURSOR: | 1112 | |
1074 | wm_state->wm[level].cursor = plane->wm.fifo_size - | 1113 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
1075 | wm_state->wm[level].cursor; | 1114 | wm_state->wm[level].plane[plane->id] = |
1076 | break; | 1115 | vlv_invert_wm_value(wm_state->wm[level].plane[plane->id], |
1077 | case DRM_PLANE_TYPE_PRIMARY: | 1116 | plane->wm.fifo_size); |
1078 | wm_state->wm[level].primary = plane->wm.fifo_size - | ||
1079 | wm_state->wm[level].primary; | ||
1080 | break; | ||
1081 | case DRM_PLANE_TYPE_OVERLAY: | ||
1082 | sprite = plane->plane; | ||
1083 | wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - | ||
1084 | wm_state->wm[level].sprite[sprite]; | ||
1085 | break; | ||
1086 | } | ||
1087 | } | 1117 | } |
1088 | } | 1118 | } |
1089 | } | 1119 | } |
1090 | 1120 | ||
1091 | static void vlv_compute_wm(struct intel_crtc *crtc) | 1121 | static void vlv_compute_wm(struct intel_crtc *crtc) |
1092 | { | 1122 | { |
1093 | struct drm_device *dev = crtc->base.dev; | 1123 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1094 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1095 | struct vlv_wm_state *wm_state = &crtc->wm_state; | 1124 | struct vlv_wm_state *wm_state = &crtc->wm_state; |
1096 | struct intel_plane *plane; | 1125 | struct intel_plane *plane; |
1097 | int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; | ||
1098 | int level; | 1126 | int level; |
1099 | 1127 | ||
1100 | memset(wm_state, 0, sizeof(*wm_state)); | 1128 | memset(wm_state, 0, sizeof(*wm_state)); |
@@ -1109,45 +1137,27 @@ static void vlv_compute_wm(struct intel_crtc *crtc) | |||
1109 | if (wm_state->num_active_planes != 1) | 1137 | if (wm_state->num_active_planes != 1) |
1110 | wm_state->cxsr = false; | 1138 | wm_state->cxsr = false; |
1111 | 1139 | ||
1112 | if (wm_state->cxsr) { | 1140 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
1113 | for (level = 0; level < wm_state->num_levels; level++) { | ||
1114 | wm_state->sr[level].plane = sr_fifo_size; | ||
1115 | wm_state->sr[level].cursor = 63; | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | ||
1120 | struct intel_plane_state *state = | 1141 | struct intel_plane_state *state = |
1121 | to_intel_plane_state(plane->base.state); | 1142 | to_intel_plane_state(plane->base.state); |
1143 | int level; | ||
1122 | 1144 | ||
1123 | if (!state->base.visible) | 1145 | if (!state->base.visible) |
1124 | continue; | 1146 | continue; |
1125 | 1147 | ||
1126 | /* normal watermarks */ | 1148 | /* normal watermarks */ |
1127 | for (level = 0; level < wm_state->num_levels; level++) { | 1149 | for (level = 0; level < wm_state->num_levels; level++) { |
1128 | int wm = vlv_compute_wm_level(plane, crtc, state, level); | 1150 | int wm = vlv_compute_wm_level(crtc->config, state, level); |
1129 | int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; | 1151 | int max_wm = plane->wm.fifo_size; |
1130 | 1152 | ||
1131 | /* hack */ | 1153 | /* hack */ |
1132 | if (WARN_ON(level == 0 && wm > max_wm)) | 1154 | if (WARN_ON(level == 0 && wm > max_wm)) |
1133 | wm = max_wm; | 1155 | wm = max_wm; |
1134 | 1156 | ||
1135 | if (wm > plane->wm.fifo_size) | 1157 | if (wm > max_wm) |
1136 | break; | 1158 | break; |
1137 | 1159 | ||
1138 | switch (plane->base.type) { | 1160 | wm_state->wm[level].plane[plane->id] = wm; |
1139 | int sprite; | ||
1140 | case DRM_PLANE_TYPE_CURSOR: | ||
1141 | wm_state->wm[level].cursor = wm; | ||
1142 | break; | ||
1143 | case DRM_PLANE_TYPE_PRIMARY: | ||
1144 | wm_state->wm[level].primary = wm; | ||
1145 | break; | ||
1146 | case DRM_PLANE_TYPE_OVERLAY: | ||
1147 | sprite = plane->plane; | ||
1148 | wm_state->wm[level].sprite[sprite] = wm; | ||
1149 | break; | ||
1150 | } | ||
1151 | } | 1161 | } |
1152 | 1162 | ||
1153 | wm_state->num_levels = level; | 1163 | wm_state->num_levels = level; |
@@ -1156,26 +1166,15 @@ static void vlv_compute_wm(struct intel_crtc *crtc) | |||
1156 | continue; | 1166 | continue; |
1157 | 1167 | ||
1158 | /* maxfifo watermarks */ | 1168 | /* maxfifo watermarks */ |
1159 | switch (plane->base.type) { | 1169 | if (plane->id == PLANE_CURSOR) { |
1160 | int sprite, level; | ||
1161 | case DRM_PLANE_TYPE_CURSOR: | ||
1162 | for (level = 0; level < wm_state->num_levels; level++) | 1170 | for (level = 0; level < wm_state->num_levels; level++) |
1163 | wm_state->sr[level].cursor = | 1171 | wm_state->sr[level].cursor = |
1164 | wm_state->wm[level].cursor; | 1172 | wm_state->wm[level].plane[PLANE_CURSOR]; |
1165 | break; | 1173 | } else { |
1166 | case DRM_PLANE_TYPE_PRIMARY: | ||
1167 | for (level = 0; level < wm_state->num_levels; level++) | ||
1168 | wm_state->sr[level].plane = | ||
1169 | min(wm_state->sr[level].plane, | ||
1170 | wm_state->wm[level].primary); | ||
1171 | break; | ||
1172 | case DRM_PLANE_TYPE_OVERLAY: | ||
1173 | sprite = plane->plane; | ||
1174 | for (level = 0; level < wm_state->num_levels; level++) | 1174 | for (level = 0; level < wm_state->num_levels; level++) |
1175 | wm_state->sr[level].plane = | 1175 | wm_state->sr[level].plane = |
1176 | min(wm_state->sr[level].plane, | 1176 | max(wm_state->sr[level].plane, |
1177 | wm_state->wm[level].sprite[sprite]); | 1177 | wm_state->wm[level].plane[plane->id]); |
1178 | break; | ||
1179 | } | 1178 | } |
1180 | } | 1179 | } |
1181 | 1180 | ||
@@ -1199,17 +1198,23 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) | |||
1199 | int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; | 1198 | int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; |
1200 | 1199 | ||
1201 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | 1200 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
1202 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { | 1201 | switch (plane->id) { |
1203 | WARN_ON(plane->wm.fifo_size != 63); | 1202 | case PLANE_PRIMARY: |
1204 | continue; | ||
1205 | } | ||
1206 | |||
1207 | if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) | ||
1208 | sprite0_start = plane->wm.fifo_size; | 1203 | sprite0_start = plane->wm.fifo_size; |
1209 | else if (plane->plane == 0) | 1204 | break; |
1205 | case PLANE_SPRITE0: | ||
1210 | sprite1_start = sprite0_start + plane->wm.fifo_size; | 1206 | sprite1_start = sprite0_start + plane->wm.fifo_size; |
1211 | else | 1207 | break; |
1208 | case PLANE_SPRITE1: | ||
1212 | fifo_size = sprite1_start + plane->wm.fifo_size; | 1209 | fifo_size = sprite1_start + plane->wm.fifo_size; |
1210 | break; | ||
1211 | case PLANE_CURSOR: | ||
1212 | WARN_ON(plane->wm.fifo_size != 63); | ||
1213 | break; | ||
1214 | default: | ||
1215 | MISSING_CASE(plane->id); | ||
1216 | break; | ||
1217 | } | ||
1213 | } | 1218 | } |
1214 | 1219 | ||
1215 | WARN_ON(fifo_size != 512 - 1); | 1220 | WARN_ON(fifo_size != 512 - 1); |
@@ -1218,6 +1223,8 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) | |||
1218 | pipe_name(crtc->pipe), sprite0_start, | 1223 | pipe_name(crtc->pipe), sprite0_start, |
1219 | sprite1_start, fifo_size); | 1224 | sprite1_start, fifo_size); |
1220 | 1225 | ||
1226 | spin_lock(&dev_priv->wm.dsparb_lock); | ||
1227 | |||
1221 | switch (crtc->pipe) { | 1228 | switch (crtc->pipe) { |
1222 | uint32_t dsparb, dsparb2, dsparb3; | 1229 | uint32_t dsparb, dsparb2, dsparb3; |
1223 | case PIPE_A: | 1230 | case PIPE_A: |
@@ -1274,20 +1281,24 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) | |||
1274 | default: | 1281 | default: |
1275 | break; | 1282 | break; |
1276 | } | 1283 | } |
1284 | |||
1285 | POSTING_READ(DSPARB); | ||
1286 | |||
1287 | spin_unlock(&dev_priv->wm.dsparb_lock); | ||
1277 | } | 1288 | } |
1278 | 1289 | ||
1279 | #undef VLV_FIFO | 1290 | #undef VLV_FIFO |
1280 | 1291 | ||
1281 | static void vlv_merge_wm(struct drm_device *dev, | 1292 | static void vlv_merge_wm(struct drm_i915_private *dev_priv, |
1282 | struct vlv_wm_values *wm) | 1293 | struct vlv_wm_values *wm) |
1283 | { | 1294 | { |
1284 | struct intel_crtc *crtc; | 1295 | struct intel_crtc *crtc; |
1285 | int num_active_crtcs = 0; | 1296 | int num_active_crtcs = 0; |
1286 | 1297 | ||
1287 | wm->level = to_i915(dev)->wm.max_level; | 1298 | wm->level = dev_priv->wm.max_level; |
1288 | wm->cxsr = true; | 1299 | wm->cxsr = true; |
1289 | 1300 | ||
1290 | for_each_intel_crtc(dev, crtc) { | 1301 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
1291 | const struct vlv_wm_state *wm_state = &crtc->wm_state; | 1302 | const struct vlv_wm_state *wm_state = &crtc->wm_state; |
1292 | 1303 | ||
1293 | if (!crtc->active) | 1304 | if (!crtc->active) |
@@ -1306,7 +1317,7 @@ static void vlv_merge_wm(struct drm_device *dev, | |||
1306 | if (num_active_crtcs > 1) | 1317 | if (num_active_crtcs > 1) |
1307 | wm->level = VLV_WM_LEVEL_PM2; | 1318 | wm->level = VLV_WM_LEVEL_PM2; |
1308 | 1319 | ||
1309 | for_each_intel_crtc(dev, crtc) { | 1320 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
1310 | struct vlv_wm_state *wm_state = &crtc->wm_state; | 1321 | struct vlv_wm_state *wm_state = &crtc->wm_state; |
1311 | enum pipe pipe = crtc->pipe; | 1322 | enum pipe pipe = crtc->pipe; |
1312 | 1323 | ||
@@ -1317,63 +1328,70 @@ static void vlv_merge_wm(struct drm_device *dev, | |||
1317 | if (wm->cxsr) | 1328 | if (wm->cxsr) |
1318 | wm->sr = wm_state->sr[wm->level]; | 1329 | wm->sr = wm_state->sr[wm->level]; |
1319 | 1330 | ||
1320 | wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; | 1331 | wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; |
1321 | wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; | 1332 | wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; |
1322 | wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; | 1333 | wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; |
1323 | wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; | 1334 | wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; |
1324 | } | 1335 | } |
1325 | } | 1336 | } |
1326 | 1337 | ||
1338 | static bool is_disabling(int old, int new, int threshold) | ||
1339 | { | ||
1340 | return old >= threshold && new < threshold; | ||
1341 | } | ||
1342 | |||
1343 | static bool is_enabling(int old, int new, int threshold) | ||
1344 | { | ||
1345 | return old < threshold && new >= threshold; | ||
1346 | } | ||
1347 | |||
1327 | static void vlv_update_wm(struct intel_crtc *crtc) | 1348 | static void vlv_update_wm(struct intel_crtc *crtc) |
1328 | { | 1349 | { |
1329 | struct drm_device *dev = crtc->base.dev; | 1350 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1330 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1331 | enum pipe pipe = crtc->pipe; | 1351 | enum pipe pipe = crtc->pipe; |
1332 | struct vlv_wm_values wm = {}; | 1352 | struct vlv_wm_values *old_wm = &dev_priv->wm.vlv; |
1353 | struct vlv_wm_values new_wm = {}; | ||
1333 | 1354 | ||
1334 | vlv_compute_wm(crtc); | 1355 | vlv_compute_wm(crtc); |
1335 | vlv_merge_wm(dev, &wm); | 1356 | vlv_merge_wm(dev_priv, &new_wm); |
1336 | 1357 | ||
1337 | if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { | 1358 | if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) { |
1338 | /* FIXME should be part of crtc atomic commit */ | 1359 | /* FIXME should be part of crtc atomic commit */ |
1339 | vlv_pipe_set_fifo_size(crtc); | 1360 | vlv_pipe_set_fifo_size(crtc); |
1361 | |||
1340 | return; | 1362 | return; |
1341 | } | 1363 | } |
1342 | 1364 | ||
1343 | if (wm.level < VLV_WM_LEVEL_DDR_DVFS && | 1365 | if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) |
1344 | dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) | ||
1345 | chv_set_memory_dvfs(dev_priv, false); | 1366 | chv_set_memory_dvfs(dev_priv, false); |
1346 | 1367 | ||
1347 | if (wm.level < VLV_WM_LEVEL_PM5 && | 1368 | if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) |
1348 | dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) | ||
1349 | chv_set_memory_pm5(dev_priv, false); | 1369 | chv_set_memory_pm5(dev_priv, false); |
1350 | 1370 | ||
1351 | if (!wm.cxsr && dev_priv->wm.vlv.cxsr) | 1371 | if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) |
1352 | intel_set_memory_cxsr(dev_priv, false); | 1372 | _intel_set_memory_cxsr(dev_priv, false); |
1353 | 1373 | ||
1354 | /* FIXME should be part of crtc atomic commit */ | 1374 | /* FIXME should be part of crtc atomic commit */ |
1355 | vlv_pipe_set_fifo_size(crtc); | 1375 | vlv_pipe_set_fifo_size(crtc); |
1356 | 1376 | ||
1357 | vlv_write_wm_values(crtc, &wm); | 1377 | vlv_write_wm_values(dev_priv, &new_wm); |
1358 | 1378 | ||
1359 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " | 1379 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " |
1360 | "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", | 1380 | "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", |
1361 | pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, | 1381 | pipe_name(pipe), new_wm.pipe[pipe].plane[PLANE_PRIMARY], new_wm.pipe[pipe].plane[PLANE_CURSOR], |
1362 | wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], | 1382 | new_wm.pipe[pipe].plane[PLANE_SPRITE0], new_wm.pipe[pipe].plane[PLANE_SPRITE1], |
1363 | wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); | 1383 | new_wm.sr.plane, new_wm.sr.cursor, new_wm.level, new_wm.cxsr); |
1364 | 1384 | ||
1365 | if (wm.cxsr && !dev_priv->wm.vlv.cxsr) | 1385 | if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) |
1366 | intel_set_memory_cxsr(dev_priv, true); | 1386 | _intel_set_memory_cxsr(dev_priv, true); |
1367 | 1387 | ||
1368 | if (wm.level >= VLV_WM_LEVEL_PM5 && | 1388 | if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) |
1369 | dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) | ||
1370 | chv_set_memory_pm5(dev_priv, true); | 1389 | chv_set_memory_pm5(dev_priv, true); |
1371 | 1390 | ||
1372 | if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && | 1391 | if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) |
1373 | dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) | ||
1374 | chv_set_memory_dvfs(dev_priv, true); | 1392 | chv_set_memory_dvfs(dev_priv, true); |
1375 | 1393 | ||
1376 | dev_priv->wm.vlv = wm; | 1394 | *old_wm = new_wm; |
1377 | } | 1395 | } |
1378 | 1396 | ||
1379 | #define single_plane_enabled(mask) is_power_of_2(mask) | 1397 | #define single_plane_enabled(mask) is_power_of_2(mask) |
@@ -2870,28 +2888,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev) | |||
2870 | #define SKL_SAGV_BLOCK_TIME 30 /* µs */ | 2888 | #define SKL_SAGV_BLOCK_TIME 30 /* µs */ |
2871 | 2889 | ||
2872 | /* | 2890 | /* |
2873 | * Return the index of a plane in the SKL DDB and wm result arrays. Primary | ||
2874 | * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and | ||
2875 | * other universal planes are in indices 1..n. Note that this may leave unused | ||
2876 | * indices between the top "sprite" plane and the cursor. | ||
2877 | */ | ||
2878 | static int | ||
2879 | skl_wm_plane_id(const struct intel_plane *plane) | ||
2880 | { | ||
2881 | switch (plane->base.type) { | ||
2882 | case DRM_PLANE_TYPE_PRIMARY: | ||
2883 | return 0; | ||
2884 | case DRM_PLANE_TYPE_CURSOR: | ||
2885 | return PLANE_CURSOR; | ||
2886 | case DRM_PLANE_TYPE_OVERLAY: | ||
2887 | return plane->plane + 1; | ||
2888 | default: | ||
2889 | MISSING_CASE(plane->base.type); | ||
2890 | return plane->plane; | ||
2891 | } | ||
2892 | } | ||
2893 | |||
2894 | /* | ||
2895 | * FIXME: We still don't have the proper code detect if we need to apply the WA, | 2891 | * FIXME: We still don't have the proper code detect if we need to apply the WA, |
2896 | * so assume we'll always need it in order to avoid underruns. | 2892 | * so assume we'll always need it in order to avoid underruns. |
2897 | */ | 2893 | */ |
@@ -3013,7 +3009,6 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) | |||
3013 | struct intel_crtc *crtc; | 3009 | struct intel_crtc *crtc; |
3014 | struct intel_plane *plane; | 3010 | struct intel_plane *plane; |
3015 | struct intel_crtc_state *cstate; | 3011 | struct intel_crtc_state *cstate; |
3016 | struct skl_plane_wm *wm; | ||
3017 | enum pipe pipe; | 3012 | enum pipe pipe; |
3018 | int level, latency; | 3013 | int level, latency; |
3019 | 3014 | ||
@@ -3040,7 +3035,8 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) | |||
3040 | return false; | 3035 | return false; |
3041 | 3036 | ||
3042 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | 3037 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
3043 | wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)]; | 3038 | struct skl_plane_wm *wm = |
3039 | &cstate->wm.skl.optimal.planes[plane->id]; | ||
3044 | 3040 | ||
3045 | /* Skip this plane if it's not enabled */ | 3041 | /* Skip this plane if it's not enabled */ |
3046 | if (!wm->wm[0].plane_en) | 3042 | if (!wm->wm[0].plane_en) |
@@ -3143,28 +3139,29 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) | |||
3143 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | 3139 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
3144 | struct skl_ddb_allocation *ddb /* out */) | 3140 | struct skl_ddb_allocation *ddb /* out */) |
3145 | { | 3141 | { |
3146 | enum pipe pipe; | 3142 | struct intel_crtc *crtc; |
3147 | int plane; | ||
3148 | u32 val; | ||
3149 | 3143 | ||
3150 | memset(ddb, 0, sizeof(*ddb)); | 3144 | memset(ddb, 0, sizeof(*ddb)); |
3151 | 3145 | ||
3152 | for_each_pipe(dev_priv, pipe) { | 3146 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
3153 | enum intel_display_power_domain power_domain; | 3147 | enum intel_display_power_domain power_domain; |
3148 | enum plane_id plane_id; | ||
3149 | enum pipe pipe = crtc->pipe; | ||
3154 | 3150 | ||
3155 | power_domain = POWER_DOMAIN_PIPE(pipe); | 3151 | power_domain = POWER_DOMAIN_PIPE(pipe); |
3156 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) | 3152 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
3157 | continue; | 3153 | continue; |
3158 | 3154 | ||
3159 | for_each_universal_plane(dev_priv, pipe, plane) { | 3155 | for_each_plane_id_on_crtc(crtc, plane_id) { |
3160 | val = I915_READ(PLANE_BUF_CFG(pipe, plane)); | 3156 | u32 val; |
3161 | skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], | ||
3162 | val); | ||
3163 | } | ||
3164 | 3157 | ||
3165 | val = I915_READ(CUR_BUF_CFG(pipe)); | 3158 | if (plane_id != PLANE_CURSOR) |
3166 | skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], | 3159 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); |
3167 | val); | 3160 | else |
3161 | val = I915_READ(CUR_BUF_CFG(pipe)); | ||
3162 | |||
3163 | skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val); | ||
3164 | } | ||
3168 | 3165 | ||
3169 | intel_display_power_put(dev_priv, power_domain); | 3166 | intel_display_power_put(dev_priv, power_domain); |
3170 | } | 3167 | } |
@@ -3269,30 +3266,28 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, | |||
3269 | struct drm_crtc_state *cstate = &intel_cstate->base; | 3266 | struct drm_crtc_state *cstate = &intel_cstate->base; |
3270 | struct drm_atomic_state *state = cstate->state; | 3267 | struct drm_atomic_state *state = cstate->state; |
3271 | struct drm_plane *plane; | 3268 | struct drm_plane *plane; |
3272 | const struct intel_plane *intel_plane; | ||
3273 | const struct drm_plane_state *pstate; | 3269 | const struct drm_plane_state *pstate; |
3274 | unsigned int rate, total_data_rate = 0; | 3270 | unsigned int total_data_rate = 0; |
3275 | int id; | ||
3276 | 3271 | ||
3277 | if (WARN_ON(!state)) | 3272 | if (WARN_ON(!state)) |
3278 | return 0; | 3273 | return 0; |
3279 | 3274 | ||
3280 | /* Calculate and cache data rate for each plane */ | 3275 | /* Calculate and cache data rate for each plane */ |
3281 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { | 3276 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { |
3282 | id = skl_wm_plane_id(to_intel_plane(plane)); | 3277 | enum plane_id plane_id = to_intel_plane(plane)->id; |
3283 | intel_plane = to_intel_plane(plane); | 3278 | unsigned int rate; |
3284 | 3279 | ||
3285 | /* packed/uv */ | 3280 | /* packed/uv */ |
3286 | rate = skl_plane_relative_data_rate(intel_cstate, | 3281 | rate = skl_plane_relative_data_rate(intel_cstate, |
3287 | pstate, 0); | 3282 | pstate, 0); |
3288 | plane_data_rate[id] = rate; | 3283 | plane_data_rate[plane_id] = rate; |
3289 | 3284 | ||
3290 | total_data_rate += rate; | 3285 | total_data_rate += rate; |
3291 | 3286 | ||
3292 | /* y-plane */ | 3287 | /* y-plane */ |
3293 | rate = skl_plane_relative_data_rate(intel_cstate, | 3288 | rate = skl_plane_relative_data_rate(intel_cstate, |
3294 | pstate, 1); | 3289 | pstate, 1); |
3295 | plane_y_data_rate[id] = rate; | 3290 | plane_y_data_rate[plane_id] = rate; |
3296 | 3291 | ||
3297 | total_data_rate += rate; | 3292 | total_data_rate += rate; |
3298 | } | 3293 | } |
@@ -3371,17 +3366,16 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active, | |||
3371 | struct drm_plane *plane; | 3366 | struct drm_plane *plane; |
3372 | 3367 | ||
3373 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { | 3368 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { |
3374 | struct intel_plane *intel_plane = to_intel_plane(plane); | 3369 | enum plane_id plane_id = to_intel_plane(plane)->id; |
3375 | int id = skl_wm_plane_id(intel_plane); | ||
3376 | 3370 | ||
3377 | if (id == PLANE_CURSOR) | 3371 | if (plane_id == PLANE_CURSOR) |
3378 | continue; | 3372 | continue; |
3379 | 3373 | ||
3380 | if (!pstate->visible) | 3374 | if (!pstate->visible) |
3381 | continue; | 3375 | continue; |
3382 | 3376 | ||
3383 | minimum[id] = skl_ddb_min_alloc(pstate, 0); | 3377 | minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); |
3384 | y_minimum[id] = skl_ddb_min_alloc(pstate, 1); | 3378 | y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); |
3385 | } | 3379 | } |
3386 | 3380 | ||
3387 | minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); | 3381 | minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); |
@@ -3401,8 +3395,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
3401 | uint16_t minimum[I915_MAX_PLANES] = {}; | 3395 | uint16_t minimum[I915_MAX_PLANES] = {}; |
3402 | uint16_t y_minimum[I915_MAX_PLANES] = {}; | 3396 | uint16_t y_minimum[I915_MAX_PLANES] = {}; |
3403 | unsigned int total_data_rate; | 3397 | unsigned int total_data_rate; |
3398 | enum plane_id plane_id; | ||
3404 | int num_active; | 3399 | int num_active; |
3405 | int id, i; | ||
3406 | unsigned plane_data_rate[I915_MAX_PLANES] = {}; | 3400 | unsigned plane_data_rate[I915_MAX_PLANES] = {}; |
3407 | unsigned plane_y_data_rate[I915_MAX_PLANES] = {}; | 3401 | unsigned plane_y_data_rate[I915_MAX_PLANES] = {}; |
3408 | 3402 | ||
@@ -3433,9 +3427,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
3433 | * proportional to the data rate. | 3427 | * proportional to the data rate. |
3434 | */ | 3428 | */ |
3435 | 3429 | ||
3436 | for (i = 0; i < I915_MAX_PLANES; i++) { | 3430 | for_each_plane_id_on_crtc(intel_crtc, plane_id) { |
3437 | alloc_size -= minimum[i]; | 3431 | alloc_size -= minimum[plane_id]; |
3438 | alloc_size -= y_minimum[i]; | 3432 | alloc_size -= y_minimum[plane_id]; |
3439 | } | 3433 | } |
3440 | 3434 | ||
3441 | ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; | 3435 | ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; |
@@ -3454,28 +3448,28 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
3454 | return 0; | 3448 | return 0; |
3455 | 3449 | ||
3456 | start = alloc->start; | 3450 | start = alloc->start; |
3457 | for (id = 0; id < I915_MAX_PLANES; id++) { | 3451 | for_each_plane_id_on_crtc(intel_crtc, plane_id) { |
3458 | unsigned int data_rate, y_data_rate; | 3452 | unsigned int data_rate, y_data_rate; |
3459 | uint16_t plane_blocks, y_plane_blocks = 0; | 3453 | uint16_t plane_blocks, y_plane_blocks = 0; |
3460 | 3454 | ||
3461 | if (id == PLANE_CURSOR) | 3455 | if (plane_id == PLANE_CURSOR) |
3462 | continue; | 3456 | continue; |
3463 | 3457 | ||
3464 | data_rate = plane_data_rate[id]; | 3458 | data_rate = plane_data_rate[plane_id]; |
3465 | 3459 | ||
3466 | /* | 3460 | /* |
3467 | * allocation for (packed formats) or (uv-plane part of planar format): | 3461 | * allocation for (packed formats) or (uv-plane part of planar format): |
3468 | * promote the expression to 64 bits to avoid overflowing, the | 3462 | * promote the expression to 64 bits to avoid overflowing, the |
3469 | * result is < available as data_rate / total_data_rate < 1 | 3463 | * result is < available as data_rate / total_data_rate < 1 |
3470 | */ | 3464 | */ |
3471 | plane_blocks = minimum[id]; | 3465 | plane_blocks = minimum[plane_id]; |
3472 | plane_blocks += div_u64((uint64_t)alloc_size * data_rate, | 3466 | plane_blocks += div_u64((uint64_t)alloc_size * data_rate, |
3473 | total_data_rate); | 3467 | total_data_rate); |
3474 | 3468 | ||
3475 | /* Leave disabled planes at (0,0) */ | 3469 | /* Leave disabled planes at (0,0) */ |
3476 | if (data_rate) { | 3470 | if (data_rate) { |
3477 | ddb->plane[pipe][id].start = start; | 3471 | ddb->plane[pipe][plane_id].start = start; |
3478 | ddb->plane[pipe][id].end = start + plane_blocks; | 3472 | ddb->plane[pipe][plane_id].end = start + plane_blocks; |
3479 | } | 3473 | } |
3480 | 3474 | ||
3481 | start += plane_blocks; | 3475 | start += plane_blocks; |
@@ -3483,15 +3477,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
3483 | /* | 3477 | /* |
3484 | * allocation for y_plane part of planar format: | 3478 | * allocation for y_plane part of planar format: |
3485 | */ | 3479 | */ |
3486 | y_data_rate = plane_y_data_rate[id]; | 3480 | y_data_rate = plane_y_data_rate[plane_id]; |
3487 | 3481 | ||
3488 | y_plane_blocks = y_minimum[id]; | 3482 | y_plane_blocks = y_minimum[plane_id]; |
3489 | y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, | 3483 | y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, |
3490 | total_data_rate); | 3484 | total_data_rate); |
3491 | 3485 | ||
3492 | if (y_data_rate) { | 3486 | if (y_data_rate) { |
3493 | ddb->y_plane[pipe][id].start = start; | 3487 | ddb->y_plane[pipe][plane_id].start = start; |
3494 | ddb->y_plane[pipe][id].end = start + y_plane_blocks; | 3488 | ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks; |
3495 | } | 3489 | } |
3496 | 3490 | ||
3497 | start += y_plane_blocks; | 3491 | start += y_plane_blocks; |
@@ -3506,32 +3500,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
3506 | * should allow pixel_rate up to ~2 GHz which seems sufficient since max | 3500 | * should allow pixel_rate up to ~2 GHz which seems sufficient since max |
3507 | * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. | 3501 | * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. |
3508 | */ | 3502 | */ |
3509 | static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) | 3503 | static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, |
3504 | uint32_t latency) | ||
3510 | { | 3505 | { |
3511 | uint32_t wm_intermediate_val, ret; | 3506 | uint32_t wm_intermediate_val; |
3507 | uint_fixed_16_16_t ret; | ||
3512 | 3508 | ||
3513 | if (latency == 0) | 3509 | if (latency == 0) |
3514 | return UINT_MAX; | 3510 | return FP_16_16_MAX; |
3515 | |||
3516 | wm_intermediate_val = latency * pixel_rate * cpp / 512; | ||
3517 | ret = DIV_ROUND_UP(wm_intermediate_val, 1000); | ||
3518 | 3511 | ||
3512 | wm_intermediate_val = latency * pixel_rate * cpp; | ||
3513 | ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512); | ||
3519 | return ret; | 3514 | return ret; |
3520 | } | 3515 | } |
3521 | 3516 | ||
3522 | static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, | 3517 | static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, |
3523 | uint32_t latency, uint32_t plane_blocks_per_line) | 3518 | uint32_t pipe_htotal, |
3519 | uint32_t latency, | ||
3520 | uint_fixed_16_16_t plane_blocks_per_line) | ||
3524 | { | 3521 | { |
3525 | uint32_t ret; | ||
3526 | uint32_t wm_intermediate_val; | 3522 | uint32_t wm_intermediate_val; |
3523 | uint_fixed_16_16_t ret; | ||
3527 | 3524 | ||
3528 | if (latency == 0) | 3525 | if (latency == 0) |
3529 | return UINT_MAX; | 3526 | return FP_16_16_MAX; |
3530 | 3527 | ||
3531 | wm_intermediate_val = latency * pixel_rate; | 3528 | wm_intermediate_val = latency * pixel_rate; |
3532 | ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * | 3529 | wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, |
3533 | plane_blocks_per_line; | 3530 | pipe_htotal * 1000); |
3534 | 3531 | ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line); | |
3535 | return ret; | 3532 | return ret; |
3536 | } | 3533 | } |
3537 | 3534 | ||
@@ -3571,24 +3568,36 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
3571 | struct drm_plane_state *pstate = &intel_pstate->base; | 3568 | struct drm_plane_state *pstate = &intel_pstate->base; |
3572 | struct drm_framebuffer *fb = pstate->fb; | 3569 | struct drm_framebuffer *fb = pstate->fb; |
3573 | uint32_t latency = dev_priv->wm.skl_latency[level]; | 3570 | uint32_t latency = dev_priv->wm.skl_latency[level]; |
3574 | uint32_t method1, method2; | 3571 | uint_fixed_16_16_t method1, method2; |
3575 | uint32_t plane_bytes_per_line, plane_blocks_per_line; | 3572 | uint_fixed_16_16_t plane_blocks_per_line; |
3573 | uint_fixed_16_16_t selected_result; | ||
3574 | uint32_t interm_pbpl; | ||
3575 | uint32_t plane_bytes_per_line; | ||
3576 | uint32_t res_blocks, res_lines; | 3576 | uint32_t res_blocks, res_lines; |
3577 | uint32_t selected_result; | ||
3578 | uint8_t cpp; | 3577 | uint8_t cpp; |
3579 | uint32_t width = 0, height = 0; | 3578 | uint32_t width = 0, height = 0; |
3580 | uint32_t plane_pixel_rate; | 3579 | uint32_t plane_pixel_rate; |
3581 | uint32_t y_tile_minimum, y_min_scanlines; | 3580 | uint_fixed_16_16_t y_tile_minimum; |
3581 | uint32_t y_min_scanlines; | ||
3582 | struct intel_atomic_state *state = | 3582 | struct intel_atomic_state *state = |
3583 | to_intel_atomic_state(cstate->base.state); | 3583 | to_intel_atomic_state(cstate->base.state); |
3584 | bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); | 3584 | bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); |
3585 | bool y_tiled, x_tiled; | ||
3585 | 3586 | ||
3586 | if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { | 3587 | if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { |
3587 | *enabled = false; | 3588 | *enabled = false; |
3588 | return 0; | 3589 | return 0; |
3589 | } | 3590 | } |
3590 | 3591 | ||
3591 | if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED) | 3592 | y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || |
3593 | fb->modifier == I915_FORMAT_MOD_Yf_TILED; | ||
3594 | x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; | ||
3595 | |||
3596 | /* Display WA #1141: kbl. */ | ||
3597 | if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled) | ||
3598 | latency += 4; | ||
3599 | |||
3600 | if (apply_memory_bw_wa && x_tiled) | ||
3592 | latency += 15; | 3601 | latency += 15; |
3593 | 3602 | ||
3594 | width = drm_rect_width(&intel_pstate->base.src) >> 16; | 3603 | width = drm_rect_width(&intel_pstate->base.src) >> 16; |
@@ -3627,16 +3636,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
3627 | y_min_scanlines *= 2; | 3636 | y_min_scanlines *= 2; |
3628 | 3637 | ||
3629 | plane_bytes_per_line = width * cpp; | 3638 | plane_bytes_per_line = width * cpp; |
3630 | if (fb->modifier == I915_FORMAT_MOD_Y_TILED || | 3639 | if (y_tiled) { |
3631 | fb->modifier == I915_FORMAT_MOD_Yf_TILED) { | 3640 | interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * |
3641 | y_min_scanlines, 512); | ||
3632 | plane_blocks_per_line = | 3642 | plane_blocks_per_line = |
3633 | DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); | 3643 | fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines); |
3634 | plane_blocks_per_line /= y_min_scanlines; | 3644 | } else if (x_tiled) { |
3635 | } else if (fb->modifier == DRM_FORMAT_MOD_NONE) { | 3645 | interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512); |
3636 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) | 3646 | plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); |
3637 | + 1; | ||
3638 | } else { | 3647 | } else { |
3639 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | 3648 | interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; |
3649 | plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); | ||
3640 | } | 3650 | } |
3641 | 3651 | ||
3642 | method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); | 3652 | method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); |
@@ -3645,28 +3655,29 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
3645 | latency, | 3655 | latency, |
3646 | plane_blocks_per_line); | 3656 | plane_blocks_per_line); |
3647 | 3657 | ||
3648 | y_tile_minimum = plane_blocks_per_line * y_min_scanlines; | 3658 | y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines, |
3659 | plane_blocks_per_line); | ||
3649 | 3660 | ||
3650 | if (fb->modifier == I915_FORMAT_MOD_Y_TILED || | 3661 | if (y_tiled) { |
3651 | fb->modifier == I915_FORMAT_MOD_Yf_TILED) { | 3662 | selected_result = max_fixed_16_16(method2, y_tile_minimum); |
3652 | selected_result = max(method2, y_tile_minimum); | ||
3653 | } else { | 3663 | } else { |
3654 | if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && | 3664 | if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && |
3655 | (plane_bytes_per_line / 512 < 1)) | 3665 | (plane_bytes_per_line / 512 < 1)) |
3656 | selected_result = method2; | 3666 | selected_result = method2; |
3657 | else if ((ddb_allocation / plane_blocks_per_line) >= 1) | 3667 | else if ((ddb_allocation / |
3658 | selected_result = min(method1, method2); | 3668 | fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) |
3669 | selected_result = min_fixed_16_16(method1, method2); | ||
3659 | else | 3670 | else |
3660 | selected_result = method1; | 3671 | selected_result = method1; |
3661 | } | 3672 | } |
3662 | 3673 | ||
3663 | res_blocks = selected_result + 1; | 3674 | res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1; |
3664 | res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); | 3675 | res_lines = DIV_ROUND_UP(selected_result.val, |
3676 | plane_blocks_per_line.val); | ||
3665 | 3677 | ||
3666 | if (level >= 1 && level <= 7) { | 3678 | if (level >= 1 && level <= 7) { |
3667 | if (fb->modifier == I915_FORMAT_MOD_Y_TILED || | 3679 | if (y_tiled) { |
3668 | fb->modifier == I915_FORMAT_MOD_Yf_TILED) { | 3680 | res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum); |
3669 | res_blocks += y_tile_minimum; | ||
3670 | res_lines += y_min_scanlines; | 3681 | res_lines += y_min_scanlines; |
3671 | } else { | 3682 | } else { |
3672 | res_blocks++; | 3683 | res_blocks++; |
@@ -3683,12 +3694,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
3683 | if (level) { | 3694 | if (level) { |
3684 | return 0; | 3695 | return 0; |
3685 | } else { | 3696 | } else { |
3697 | struct drm_plane *plane = pstate->plane; | ||
3698 | |||
3686 | DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); | 3699 | DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); |
3687 | DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n", | 3700 | DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", |
3688 | to_intel_crtc(cstate->base.crtc)->pipe, | 3701 | plane->base.id, plane->name, |
3689 | skl_wm_plane_id(to_intel_plane(pstate->plane)), | ||
3690 | res_blocks, ddb_allocation, res_lines); | 3702 | res_blocks, ddb_allocation, res_lines); |
3691 | |||
3692 | return -EINVAL; | 3703 | return -EINVAL; |
3693 | } | 3704 | } |
3694 | } | 3705 | } |
@@ -3715,7 +3726,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, | |||
3715 | uint16_t ddb_blocks; | 3726 | uint16_t ddb_blocks; |
3716 | enum pipe pipe = intel_crtc->pipe; | 3727 | enum pipe pipe = intel_crtc->pipe; |
3717 | int ret; | 3728 | int ret; |
3718 | int i = skl_wm_plane_id(intel_plane); | ||
3719 | 3729 | ||
3720 | if (state) | 3730 | if (state) |
3721 | intel_pstate = | 3731 | intel_pstate = |
@@ -3738,7 +3748,7 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, | |||
3738 | 3748 | ||
3739 | WARN_ON(!intel_pstate->base.fb); | 3749 | WARN_ON(!intel_pstate->base.fb); |
3740 | 3750 | ||
3741 | ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); | 3751 | ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]); |
3742 | 3752 | ||
3743 | ret = skl_compute_plane_wm(dev_priv, | 3753 | ret = skl_compute_plane_wm(dev_priv, |
3744 | cstate, | 3754 | cstate, |
@@ -3757,7 +3767,10 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, | |||
3757 | static uint32_t | 3767 | static uint32_t |
3758 | skl_compute_linetime_wm(struct intel_crtc_state *cstate) | 3768 | skl_compute_linetime_wm(struct intel_crtc_state *cstate) |
3759 | { | 3769 | { |
3770 | struct drm_atomic_state *state = cstate->base.state; | ||
3771 | struct drm_i915_private *dev_priv = to_i915(state->dev); | ||
3760 | uint32_t pixel_rate; | 3772 | uint32_t pixel_rate; |
3773 | uint32_t linetime_wm; | ||
3761 | 3774 | ||
3762 | if (!cstate->base.active) | 3775 | if (!cstate->base.active) |
3763 | return 0; | 3776 | return 0; |
@@ -3767,8 +3780,14 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) | |||
3767 | if (WARN_ON(pixel_rate == 0)) | 3780 | if (WARN_ON(pixel_rate == 0)) |
3768 | return 0; | 3781 | return 0; |
3769 | 3782 | ||
3770 | return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, | 3783 | linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * |
3771 | pixel_rate); | 3784 | 1000, pixel_rate); |
3785 | |||
3786 | /* Display WA #1135: bxt. */ | ||
3787 | if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled) | ||
3788 | linetime_wm = DIV_ROUND_UP(linetime_wm, 2); | ||
3789 | |||
3790 | return linetime_wm; | ||
3772 | } | 3791 | } |
3773 | 3792 | ||
3774 | static void skl_compute_transition_wm(struct intel_crtc_state *cstate, | 3793 | static void skl_compute_transition_wm(struct intel_crtc_state *cstate, |
@@ -3801,7 +3820,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, | |||
3801 | for_each_intel_plane_mask(&dev_priv->drm, | 3820 | for_each_intel_plane_mask(&dev_priv->drm, |
3802 | intel_plane, | 3821 | intel_plane, |
3803 | cstate->base.plane_mask) { | 3822 | cstate->base.plane_mask) { |
3804 | wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)]; | 3823 | wm = &pipe_wm->planes[intel_plane->id]; |
3805 | 3824 | ||
3806 | for (level = 0; level <= max_level; level++) { | 3825 | for (level = 0; level <= max_level; level++) { |
3807 | ret = skl_compute_wm_level(dev_priv, ddb, cstate, | 3826 | ret = skl_compute_wm_level(dev_priv, ddb, cstate, |
@@ -3845,7 +3864,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, | |||
3845 | static void skl_write_plane_wm(struct intel_crtc *intel_crtc, | 3864 | static void skl_write_plane_wm(struct intel_crtc *intel_crtc, |
3846 | const struct skl_plane_wm *wm, | 3865 | const struct skl_plane_wm *wm, |
3847 | const struct skl_ddb_allocation *ddb, | 3866 | const struct skl_ddb_allocation *ddb, |
3848 | int plane) | 3867 | enum plane_id plane_id) |
3849 | { | 3868 | { |
3850 | struct drm_crtc *crtc = &intel_crtc->base; | 3869 | struct drm_crtc *crtc = &intel_crtc->base; |
3851 | struct drm_device *dev = crtc->dev; | 3870 | struct drm_device *dev = crtc->dev; |
@@ -3854,16 +3873,16 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, | |||
3854 | enum pipe pipe = intel_crtc->pipe; | 3873 | enum pipe pipe = intel_crtc->pipe; |
3855 | 3874 | ||
3856 | for (level = 0; level <= max_level; level++) { | 3875 | for (level = 0; level <= max_level; level++) { |
3857 | skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level), | 3876 | skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), |
3858 | &wm->wm[level]); | 3877 | &wm->wm[level]); |
3859 | } | 3878 | } |
3860 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane), | 3879 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), |
3861 | &wm->trans_wm); | 3880 | &wm->trans_wm); |
3862 | 3881 | ||
3863 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane), | 3882 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), |
3864 | &ddb->plane[pipe][plane]); | 3883 | &ddb->plane[pipe][plane_id]); |
3865 | skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane), | 3884 | skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id), |
3866 | &ddb->y_plane[pipe][plane]); | 3885 | &ddb->y_plane[pipe][plane_id]); |
3867 | } | 3886 | } |
3868 | 3887 | ||
3869 | static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, | 3888 | static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, |
@@ -3968,17 +3987,16 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) | |||
3968 | struct drm_plane_state *plane_state; | 3987 | struct drm_plane_state *plane_state; |
3969 | struct drm_plane *plane; | 3988 | struct drm_plane *plane; |
3970 | enum pipe pipe = intel_crtc->pipe; | 3989 | enum pipe pipe = intel_crtc->pipe; |
3971 | int id; | ||
3972 | 3990 | ||
3973 | WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc)); | 3991 | WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc)); |
3974 | 3992 | ||
3975 | drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { | 3993 | drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { |
3976 | id = skl_wm_plane_id(to_intel_plane(plane)); | 3994 | enum plane_id plane_id = to_intel_plane(plane)->id; |
3977 | 3995 | ||
3978 | if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id], | 3996 | if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], |
3979 | &new_ddb->plane[pipe][id]) && | 3997 | &new_ddb->plane[pipe][plane_id]) && |
3980 | skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id], | 3998 | skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id], |
3981 | &new_ddb->y_plane[pipe][id])) | 3999 | &new_ddb->y_plane[pipe][plane_id])) |
3982 | continue; | 4000 | continue; |
3983 | 4001 | ||
3984 | plane_state = drm_atomic_get_plane_state(state, plane); | 4002 | plane_state = drm_atomic_get_plane_state(state, plane); |
@@ -4090,7 +4108,6 @@ skl_print_wm_changes(const struct drm_atomic_state *state) | |||
4090 | const struct intel_plane *intel_plane; | 4108 | const struct intel_plane *intel_plane; |
4091 | const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; | 4109 | const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; |
4092 | const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; | 4110 | const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; |
4093 | int id; | ||
4094 | int i; | 4111 | int i; |
4095 | 4112 | ||
4096 | for_each_crtc_in_state(state, crtc, cstate, i) { | 4113 | for_each_crtc_in_state(state, crtc, cstate, i) { |
@@ -4098,11 +4115,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state) | |||
4098 | enum pipe pipe = intel_crtc->pipe; | 4115 | enum pipe pipe = intel_crtc->pipe; |
4099 | 4116 | ||
4100 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | 4117 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { |
4118 | enum plane_id plane_id = intel_plane->id; | ||
4101 | const struct skl_ddb_entry *old, *new; | 4119 | const struct skl_ddb_entry *old, *new; |
4102 | 4120 | ||
4103 | id = skl_wm_plane_id(intel_plane); | 4121 | old = &old_ddb->plane[pipe][plane_id]; |
4104 | old = &old_ddb->plane[pipe][id]; | 4122 | new = &new_ddb->plane[pipe][plane_id]; |
4105 | new = &new_ddb->plane[pipe][id]; | ||
4106 | 4123 | ||
4107 | if (skl_ddb_entry_equal(old, new)) | 4124 | if (skl_ddb_entry_equal(old, new)) |
4108 | continue; | 4125 | continue; |
@@ -4192,17 +4209,21 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, | |||
4192 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; | 4209 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; |
4193 | const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; | 4210 | const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; |
4194 | enum pipe pipe = crtc->pipe; | 4211 | enum pipe pipe = crtc->pipe; |
4195 | int plane; | 4212 | enum plane_id plane_id; |
4196 | 4213 | ||
4197 | if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) | 4214 | if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) |
4198 | return; | 4215 | return; |
4199 | 4216 | ||
4200 | I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); | 4217 | I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); |
4201 | 4218 | ||
4202 | for_each_universal_plane(dev_priv, pipe, plane) | 4219 | for_each_plane_id_on_crtc(crtc, plane_id) { |
4203 | skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane); | 4220 | if (plane_id != PLANE_CURSOR) |
4204 | 4221 | skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], | |
4205 | skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb); | 4222 | ddb, plane_id); |
4223 | else | ||
4224 | skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id], | ||
4225 | ddb); | ||
4226 | } | ||
4206 | } | 4227 | } |
4207 | 4228 | ||
4208 | static void skl_initial_wm(struct intel_atomic_state *state, | 4229 | static void skl_initial_wm(struct intel_atomic_state *state, |
@@ -4317,32 +4338,29 @@ static inline void skl_wm_level_from_reg_val(uint32_t val, | |||
4317 | void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, | 4338 | void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, |
4318 | struct skl_pipe_wm *out) | 4339 | struct skl_pipe_wm *out) |
4319 | { | 4340 | { |
4320 | struct drm_device *dev = crtc->dev; | 4341 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); |
4321 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4322 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4342 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4323 | struct intel_plane *intel_plane; | ||
4324 | struct skl_plane_wm *wm; | ||
4325 | enum pipe pipe = intel_crtc->pipe; | 4343 | enum pipe pipe = intel_crtc->pipe; |
4326 | int level, id, max_level; | 4344 | int level, max_level; |
4345 | enum plane_id plane_id; | ||
4327 | uint32_t val; | 4346 | uint32_t val; |
4328 | 4347 | ||
4329 | max_level = ilk_wm_max_level(dev_priv); | 4348 | max_level = ilk_wm_max_level(dev_priv); |
4330 | 4349 | ||
4331 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | 4350 | for_each_plane_id_on_crtc(intel_crtc, plane_id) { |
4332 | id = skl_wm_plane_id(intel_plane); | 4351 | struct skl_plane_wm *wm = &out->planes[plane_id]; |
4333 | wm = &out->planes[id]; | ||
4334 | 4352 | ||
4335 | for (level = 0; level <= max_level; level++) { | 4353 | for (level = 0; level <= max_level; level++) { |
4336 | if (id != PLANE_CURSOR) | 4354 | if (plane_id != PLANE_CURSOR) |
4337 | val = I915_READ(PLANE_WM(pipe, id, level)); | 4355 | val = I915_READ(PLANE_WM(pipe, plane_id, level)); |
4338 | else | 4356 | else |
4339 | val = I915_READ(CUR_WM(pipe, level)); | 4357 | val = I915_READ(CUR_WM(pipe, level)); |
4340 | 4358 | ||
4341 | skl_wm_level_from_reg_val(val, &wm->wm[level]); | 4359 | skl_wm_level_from_reg_val(val, &wm->wm[level]); |
4342 | } | 4360 | } |
4343 | 4361 | ||
4344 | if (id != PLANE_CURSOR) | 4362 | if (plane_id != PLANE_CURSOR) |
4345 | val = I915_READ(PLANE_WM_TRANS(pipe, id)); | 4363 | val = I915_READ(PLANE_WM_TRANS(pipe, plane_id)); |
4346 | else | 4364 | else |
4347 | val = I915_READ(CUR_WM_TRANS(pipe)); | 4365 | val = I915_READ(CUR_WM_TRANS(pipe)); |
4348 | 4366 | ||
@@ -4450,67 +4468,67 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, | |||
4450 | for_each_pipe(dev_priv, pipe) { | 4468 | for_each_pipe(dev_priv, pipe) { |
4451 | tmp = I915_READ(VLV_DDL(pipe)); | 4469 | tmp = I915_READ(VLV_DDL(pipe)); |
4452 | 4470 | ||
4453 | wm->ddl[pipe].primary = | 4471 | wm->ddl[pipe].plane[PLANE_PRIMARY] = |
4454 | (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | 4472 | (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
4455 | wm->ddl[pipe].cursor = | 4473 | wm->ddl[pipe].plane[PLANE_CURSOR] = |
4456 | (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | 4474 | (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
4457 | wm->ddl[pipe].sprite[0] = | 4475 | wm->ddl[pipe].plane[PLANE_SPRITE0] = |
4458 | (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | 4476 | (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
4459 | wm->ddl[pipe].sprite[1] = | 4477 | wm->ddl[pipe].plane[PLANE_SPRITE1] = |
4460 | (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); | 4478 | (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); |
4461 | } | 4479 | } |
4462 | 4480 | ||
4463 | tmp = I915_READ(DSPFW1); | 4481 | tmp = I915_READ(DSPFW1); |
4464 | wm->sr.plane = _FW_WM(tmp, SR); | 4482 | wm->sr.plane = _FW_WM(tmp, SR); |
4465 | wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); | 4483 | wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); |
4466 | wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); | 4484 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); |
4467 | wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); | 4485 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); |
4468 | 4486 | ||
4469 | tmp = I915_READ(DSPFW2); | 4487 | tmp = I915_READ(DSPFW2); |
4470 | wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); | 4488 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); |
4471 | wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); | 4489 | wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); |
4472 | wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); | 4490 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); |
4473 | 4491 | ||
4474 | tmp = I915_READ(DSPFW3); | 4492 | tmp = I915_READ(DSPFW3); |
4475 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); | 4493 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); |
4476 | 4494 | ||
4477 | if (IS_CHERRYVIEW(dev_priv)) { | 4495 | if (IS_CHERRYVIEW(dev_priv)) { |
4478 | tmp = I915_READ(DSPFW7_CHV); | 4496 | tmp = I915_READ(DSPFW7_CHV); |
4479 | wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); | 4497 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); |
4480 | wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); | 4498 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); |
4481 | 4499 | ||
4482 | tmp = I915_READ(DSPFW8_CHV); | 4500 | tmp = I915_READ(DSPFW8_CHV); |
4483 | wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); | 4501 | wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); |
4484 | wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); | 4502 | wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); |
4485 | 4503 | ||
4486 | tmp = I915_READ(DSPFW9_CHV); | 4504 | tmp = I915_READ(DSPFW9_CHV); |
4487 | wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); | 4505 | wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); |
4488 | wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); | 4506 | wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); |
4489 | 4507 | ||
4490 | tmp = I915_READ(DSPHOWM); | 4508 | tmp = I915_READ(DSPHOWM); |
4491 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | 4509 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; |
4492 | wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; | 4510 | wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; |
4493 | wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; | 4511 | wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; |
4494 | wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; | 4512 | wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; |
4495 | wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; | 4513 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; |
4496 | wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | 4514 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; |
4497 | wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; | 4515 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; |
4498 | wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | 4516 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; |
4499 | wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | 4517 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; |
4500 | wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; | 4518 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; |
4501 | } else { | 4519 | } else { |
4502 | tmp = I915_READ(DSPFW7); | 4520 | tmp = I915_READ(DSPFW7); |
4503 | wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); | 4521 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); |
4504 | wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); | 4522 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); |
4505 | 4523 | ||
4506 | tmp = I915_READ(DSPHOWM); | 4524 | tmp = I915_READ(DSPHOWM); |
4507 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | 4525 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; |
4508 | wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; | 4526 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; |
4509 | wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | 4527 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; |
4510 | wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; | 4528 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; |
4511 | wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | 4529 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; |
4512 | wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | 4530 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; |
4513 | wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; | 4531 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; |
4514 | } | 4532 | } |
4515 | } | 4533 | } |
4516 | 4534 | ||
@@ -4527,21 +4545,8 @@ void vlv_wm_get_hw_state(struct drm_device *dev) | |||
4527 | 4545 | ||
4528 | vlv_read_wm_values(dev_priv, wm); | 4546 | vlv_read_wm_values(dev_priv, wm); |
4529 | 4547 | ||
4530 | for_each_intel_plane(dev, plane) { | 4548 | for_each_intel_plane(dev, plane) |
4531 | switch (plane->base.type) { | 4549 | plane->wm.fifo_size = vlv_get_fifo_size(plane); |
4532 | int sprite; | ||
4533 | case DRM_PLANE_TYPE_CURSOR: | ||
4534 | plane->wm.fifo_size = 63; | ||
4535 | break; | ||
4536 | case DRM_PLANE_TYPE_PRIMARY: | ||
4537 | plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0); | ||
4538 | break; | ||
4539 | case DRM_PLANE_TYPE_OVERLAY: | ||
4540 | sprite = plane->plane; | ||
4541 | plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1); | ||
4542 | break; | ||
4543 | } | ||
4544 | } | ||
4545 | 4550 | ||
4546 | wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; | 4551 | wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; |
4547 | wm->level = VLV_WM_LEVEL_PM2; | 4552 | wm->level = VLV_WM_LEVEL_PM2; |
@@ -4582,8 +4587,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev) | |||
4582 | 4587 | ||
4583 | for_each_pipe(dev_priv, pipe) | 4588 | for_each_pipe(dev_priv, pipe) |
4584 | DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", | 4589 | DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", |
4585 | pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, | 4590 | pipe_name(pipe), |
4586 | wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); | 4591 | wm->pipe[pipe].plane[PLANE_PRIMARY], |
4592 | wm->pipe[pipe].plane[PLANE_CURSOR], | ||
4593 | wm->pipe[pipe].plane[PLANE_SPRITE0], | ||
4594 | wm->pipe[pipe].plane[PLANE_SPRITE1]); | ||
4587 | 4595 | ||
4588 | DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", | 4596 | DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", |
4589 | wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); | 4597 | wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); |
@@ -5003,8 +5011,18 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
5003 | if (dev_priv->rps.cur_freq <= val) | 5011 | if (dev_priv->rps.cur_freq <= val) |
5004 | return; | 5012 | return; |
5005 | 5013 | ||
5006 | /* Wake up the media well, as that takes a lot less | 5014 | /* The punit delays the write of the frequency and voltage until it |
5007 | * power than the Render well. */ | 5015 | * determines the GPU is awake. During normal usage we don't want to |
5016 | * waste power changing the frequency if the GPU is sleeping (rc6). | ||
5017 | * However, the GPU and driver is now idle and we do not want to delay | ||
5018 | * switching to minimum voltage (reducing power whilst idle) as we do | ||
5019 | * not expect to be woken in the near future and so must flush the | ||
5020 | * change by waking the device. | ||
5021 | * | ||
5022 | * We choose to take the media powerwell (either would do to trick the | ||
5023 | * punit into committing the voltage change) as that takes a lot less | ||
5024 | * power than the render powerwell. | ||
5025 | */ | ||
5008 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); | 5026 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); |
5009 | valleyview_set_rps(dev_priv, val); | 5027 | valleyview_set_rps(dev_priv, val); |
5010 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); | 5028 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); |
@@ -5226,7 +5244,7 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) | |||
5226 | if (!enable_rc6) | 5244 | if (!enable_rc6) |
5227 | return 0; | 5245 | return 0; |
5228 | 5246 | ||
5229 | if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { | 5247 | if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { |
5230 | DRM_INFO("RC6 disabled by BIOS\n"); | 5248 | DRM_INFO("RC6 disabled by BIOS\n"); |
5231 | return 0; | 5249 | return 0; |
5232 | } | 5250 | } |
@@ -5260,7 +5278,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) | |||
5260 | /* All of these values are in units of 50MHz */ | 5278 | /* All of these values are in units of 50MHz */ |
5261 | 5279 | ||
5262 | /* static values from HW: RP0 > RP1 > RPn (min_freq) */ | 5280 | /* static values from HW: RP0 > RP1 > RPn (min_freq) */ |
5263 | if (IS_BROXTON(dev_priv)) { | 5281 | if (IS_GEN9_LP(dev_priv)) { |
5264 | u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); | 5282 | u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); |
5265 | dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; | 5283 | dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; |
5266 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; | 5284 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; |
@@ -5823,7 +5841,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) | |||
5823 | int pcbr_offset; | 5841 | int pcbr_offset; |
5824 | 5842 | ||
5825 | pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; | 5843 | pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; |
5826 | pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm, | 5844 | pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv, |
5827 | pcbr_offset, | 5845 | pcbr_offset, |
5828 | I915_GTT_OFFSET_NONE, | 5846 | I915_GTT_OFFSET_NONE, |
5829 | pctx_size); | 5847 | pctx_size); |
@@ -5840,7 +5858,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) | |||
5840 | * overlap with other ranges, such as the frame buffer, protected | 5858 | * overlap with other ranges, such as the frame buffer, protected |
5841 | * memory, or any other relevant ranges. | 5859 | * memory, or any other relevant ranges. |
5842 | */ | 5860 | */ |
5843 | pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size); | 5861 | pctx = i915_gem_object_create_stolen(dev_priv, pctx_size); |
5844 | if (!pctx) { | 5862 | if (!pctx) { |
5845 | DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); | 5863 | DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); |
5846 | goto out; | 5864 | goto out; |
@@ -6791,7 +6809,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) | |||
6791 | goto out; | 6809 | goto out; |
6792 | 6810 | ||
6793 | rcs = dev_priv->engine[RCS]; | 6811 | rcs = dev_priv->engine[RCS]; |
6794 | if (rcs->last_context) | 6812 | if (rcs->last_retired_context) |
6795 | goto out; | 6813 | goto out; |
6796 | 6814 | ||
6797 | if (!rcs->init_context) | 6815 | if (!rcs->init_context) |
@@ -7602,8 +7620,6 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) | |||
7602 | 7620 | ||
7603 | static void i830_init_clock_gating(struct drm_i915_private *dev_priv) | 7621 | static void i830_init_clock_gating(struct drm_i915_private *dev_priv) |
7604 | { | 7622 | { |
7605 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | ||
7606 | |||
7607 | I915_WRITE(MEM_MODE, | 7623 | I915_WRITE(MEM_MODE, |
7608 | _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | | 7624 | _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | |
7609 | _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); | 7625 | _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); |
@@ -7640,7 +7656,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) | |||
7640 | dev_priv->display.init_clock_gating = skylake_init_clock_gating; | 7656 | dev_priv->display.init_clock_gating = skylake_init_clock_gating; |
7641 | else if (IS_KABYLAKE(dev_priv)) | 7657 | else if (IS_KABYLAKE(dev_priv)) |
7642 | dev_priv->display.init_clock_gating = kabylake_init_clock_gating; | 7658 | dev_priv->display.init_clock_gating = kabylake_init_clock_gating; |
7643 | else if (IS_BROXTON(dev_priv)) | 7659 | else if (IS_GEN9_LP(dev_priv)) |
7644 | dev_priv->display.init_clock_gating = bxt_init_clock_gating; | 7660 | dev_priv->display.init_clock_gating = bxt_init_clock_gating; |
7645 | else if (IS_BROADWELL(dev_priv)) | 7661 | else if (IS_BROADWELL(dev_priv)) |
7646 | dev_priv->display.init_clock_gating = broadwell_init_clock_gating; | 7662 | dev_priv->display.init_clock_gating = broadwell_init_clock_gating; |
@@ -7658,9 +7674,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) | |||
7658 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; | 7674 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; |
7659 | else if (IS_G4X(dev_priv)) | 7675 | else if (IS_G4X(dev_priv)) |
7660 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; | 7676 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; |
7661 | else if (IS_CRESTLINE(dev_priv)) | 7677 | else if (IS_I965GM(dev_priv)) |
7662 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; | 7678 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; |
7663 | else if (IS_BROADWATER(dev_priv)) | 7679 | else if (IS_I965G(dev_priv)) |
7664 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; | 7680 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; |
7665 | else if (IS_GEN3(dev_priv)) | 7681 | else if (IS_GEN3(dev_priv)) |
7666 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | 7682 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
@@ -7709,10 +7725,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) | |||
7709 | DRM_DEBUG_KMS("Failed to read display plane latency. " | 7725 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
7710 | "Disable CxSR\n"); | 7726 | "Disable CxSR\n"); |
7711 | } | 7727 | } |
7712 | } else if (IS_CHERRYVIEW(dev_priv)) { | 7728 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
7713 | vlv_setup_wm_latency(dev_priv); | ||
7714 | dev_priv->display.update_wm = vlv_update_wm; | ||
7715 | } else if (IS_VALLEYVIEW(dev_priv)) { | ||
7716 | vlv_setup_wm_latency(dev_priv); | 7729 | vlv_setup_wm_latency(dev_priv); |
7717 | dev_priv->display.update_wm = vlv_update_wm; | 7730 | dev_priv->display.update_wm = vlv_update_wm; |
7718 | } else if (IS_PINEVIEW(dev_priv)) { | 7731 | } else if (IS_PINEVIEW(dev_priv)) { |
@@ -7856,6 +7869,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, | |||
7856 | } | 7869 | } |
7857 | 7870 | ||
7858 | I915_WRITE_FW(GEN6_PCODE_DATA, val); | 7871 | I915_WRITE_FW(GEN6_PCODE_DATA, val); |
7872 | I915_WRITE_FW(GEN6_PCODE_DATA1, 0); | ||
7859 | I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | 7873 | I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); |
7860 | 7874 | ||
7861 | if (intel_wait_for_register_fw(dev_priv, | 7875 | if (intel_wait_for_register_fw(dev_priv, |
@@ -8048,10 +8062,8 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) | |||
8048 | queue_work(req->i915->wq, &boost->work); | 8062 | queue_work(req->i915->wq, &boost->work); |
8049 | } | 8063 | } |
8050 | 8064 | ||
8051 | void intel_pm_setup(struct drm_device *dev) | 8065 | void intel_pm_setup(struct drm_i915_private *dev_priv) |
8052 | { | 8066 | { |
8053 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
8054 | |||
8055 | mutex_init(&dev_priv->rps.hw_lock); | 8067 | mutex_init(&dev_priv->rps.hw_lock); |
8056 | spin_lock_init(&dev_priv->rps.client_lock); | 8068 | spin_lock_init(&dev_priv->rps.client_lock); |
8057 | 8069 | ||
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index c6be70686b4a..6aca8ff14989 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -813,15 +813,13 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, | |||
813 | 813 | ||
814 | /** | 814 | /** |
815 | * intel_psr_init - Init basic PSR work and mutex. | 815 | * intel_psr_init - Init basic PSR work and mutex. |
816 | * @dev: DRM device | 816 | * @dev_priv: i915 device private |
817 | * | 817 | * |
818 | * This function is called only once at driver load to initialize basic | 818 | * This function is called only once at driver load to initialize basic |
819 | * PSR stuff. | 819 | * PSR stuff. |
820 | */ | 820 | */ |
821 | void intel_psr_init(struct drm_device *dev) | 821 | void intel_psr_init(struct drm_i915_private *dev_priv) |
822 | { | 822 | { |
823 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
824 | |||
825 | dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? | 823 | dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? |
826 | HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; | 824 | HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; |
827 | 825 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index aeb637dc1fdf..0971ac396b60 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1805,10 +1805,9 @@ static int init_phys_status_page(struct intel_engine_cs *engine) | |||
1805 | return 0; | 1805 | return 0; |
1806 | } | 1806 | } |
1807 | 1807 | ||
1808 | int intel_ring_pin(struct intel_ring *ring) | 1808 | int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias) |
1809 | { | 1809 | { |
1810 | /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ | 1810 | unsigned int flags; |
1811 | unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096; | ||
1812 | enum i915_map_type map; | 1811 | enum i915_map_type map; |
1813 | struct i915_vma *vma = ring->vma; | 1812 | struct i915_vma *vma = ring->vma; |
1814 | void *addr; | 1813 | void *addr; |
@@ -1818,6 +1817,9 @@ int intel_ring_pin(struct intel_ring *ring) | |||
1818 | 1817 | ||
1819 | map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC; | 1818 | map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC; |
1820 | 1819 | ||
1820 | flags = PIN_GLOBAL; | ||
1821 | if (offset_bias) | ||
1822 | flags |= PIN_OFFSET_BIAS | offset_bias; | ||
1821 | if (vma->obj->stolen) | 1823 | if (vma->obj->stolen) |
1822 | flags |= PIN_MAPPABLE; | 1824 | flags |= PIN_MAPPABLE; |
1823 | 1825 | ||
@@ -1869,9 +1871,9 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) | |||
1869 | struct drm_i915_gem_object *obj; | 1871 | struct drm_i915_gem_object *obj; |
1870 | struct i915_vma *vma; | 1872 | struct i915_vma *vma; |
1871 | 1873 | ||
1872 | obj = i915_gem_object_create_stolen(&dev_priv->drm, size); | 1874 | obj = i915_gem_object_create_stolen(dev_priv, size); |
1873 | if (!obj) | 1875 | if (!obj) |
1874 | obj = i915_gem_object_create(&dev_priv->drm, size); | 1876 | obj = i915_gem_object_create(dev_priv, size); |
1875 | if (IS_ERR(obj)) | 1877 | if (IS_ERR(obj)) |
1876 | return ERR_CAST(obj); | 1878 | return ERR_CAST(obj); |
1877 | 1879 | ||
@@ -1912,7 +1914,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) | |||
1912 | * of the buffer. | 1914 | * of the buffer. |
1913 | */ | 1915 | */ |
1914 | ring->effective_size = size; | 1916 | ring->effective_size = size; |
1915 | if (IS_I830(engine->i915) || IS_845G(engine->i915)) | 1917 | if (IS_I830(engine->i915) || IS_I845G(engine->i915)) |
1916 | ring->effective_size -= 2 * CACHELINE_BYTES; | 1918 | ring->effective_size -= 2 * CACHELINE_BYTES; |
1917 | 1919 | ||
1918 | ring->last_retired_head = -1; | 1920 | ring->last_retired_head = -1; |
@@ -1939,8 +1941,26 @@ intel_ring_free(struct intel_ring *ring) | |||
1939 | kfree(ring); | 1941 | kfree(ring); |
1940 | } | 1942 | } |
1941 | 1943 | ||
1942 | static int intel_ring_context_pin(struct i915_gem_context *ctx, | 1944 | static int context_pin(struct i915_gem_context *ctx, unsigned int flags) |
1943 | struct intel_engine_cs *engine) | 1945 | { |
1946 | struct i915_vma *vma = ctx->engine[RCS].state; | ||
1947 | int ret; | ||
1948 | |||
1949 | /* Clear this page out of any CPU caches for coherent swap-in/out. | ||
1950 | * We only want to do this on the first bind so that we do not stall | ||
1951 | * on an active context (which by nature is already on the GPU). | ||
1952 | */ | ||
1953 | if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { | ||
1954 | ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); | ||
1955 | if (ret) | ||
1956 | return ret; | ||
1957 | } | ||
1958 | |||
1959 | return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags); | ||
1960 | } | ||
1961 | |||
1962 | static int intel_ring_context_pin(struct intel_engine_cs *engine, | ||
1963 | struct i915_gem_context *ctx) | ||
1944 | { | 1964 | { |
1945 | struct intel_context *ce = &ctx->engine[engine->id]; | 1965 | struct intel_context *ce = &ctx->engine[engine->id]; |
1946 | int ret; | 1966 | int ret; |
@@ -1951,13 +1971,15 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx, | |||
1951 | return 0; | 1971 | return 0; |
1952 | 1972 | ||
1953 | if (ce->state) { | 1973 | if (ce->state) { |
1954 | struct i915_vma *vma; | 1974 | unsigned int flags; |
1975 | |||
1976 | flags = 0; | ||
1977 | if (i915_gem_context_is_kernel(ctx)) | ||
1978 | flags = PIN_HIGH; | ||
1955 | 1979 | ||
1956 | vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH); | 1980 | ret = context_pin(ctx, flags); |
1957 | if (IS_ERR(vma)) { | 1981 | if (ret) |
1958 | ret = PTR_ERR(vma); | ||
1959 | goto error; | 1982 | goto error; |
1960 | } | ||
1961 | } | 1983 | } |
1962 | 1984 | ||
1963 | /* The kernel context is only used as a placeholder for flushing the | 1985 | /* The kernel context is only used as a placeholder for flushing the |
@@ -1967,7 +1989,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx, | |||
1967 | * as during eviction we cannot allocate and pin the renderstate in | 1989 | * as during eviction we cannot allocate and pin the renderstate in |
1968 | * order to initialise the context. | 1990 | * order to initialise the context. |
1969 | */ | 1991 | */ |
1970 | if (ctx == ctx->i915->kernel_context) | 1992 | if (i915_gem_context_is_kernel(ctx)) |
1971 | ce->initialised = true; | 1993 | ce->initialised = true; |
1972 | 1994 | ||
1973 | i915_gem_context_get(ctx); | 1995 | i915_gem_context_get(ctx); |
@@ -1978,12 +2000,13 @@ error: | |||
1978 | return ret; | 2000 | return ret; |
1979 | } | 2001 | } |
1980 | 2002 | ||
1981 | static void intel_ring_context_unpin(struct i915_gem_context *ctx, | 2003 | static void intel_ring_context_unpin(struct intel_engine_cs *engine, |
1982 | struct intel_engine_cs *engine) | 2004 | struct i915_gem_context *ctx) |
1983 | { | 2005 | { |
1984 | struct intel_context *ce = &ctx->engine[engine->id]; | 2006 | struct intel_context *ce = &ctx->engine[engine->id]; |
1985 | 2007 | ||
1986 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); | 2008 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); |
2009 | GEM_BUG_ON(ce->pin_count == 0); | ||
1987 | 2010 | ||
1988 | if (--ce->pin_count) | 2011 | if (--ce->pin_count) |
1989 | return; | 2012 | return; |
@@ -2008,17 +2031,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) | |||
2008 | if (ret) | 2031 | if (ret) |
2009 | goto error; | 2032 | goto error; |
2010 | 2033 | ||
2011 | /* We may need to do things with the shrinker which | ||
2012 | * require us to immediately switch back to the default | ||
2013 | * context. This can cause a problem as pinning the | ||
2014 | * default context also requires GTT space which may not | ||
2015 | * be available. To avoid this we always pin the default | ||
2016 | * context. | ||
2017 | */ | ||
2018 | ret = intel_ring_context_pin(dev_priv->kernel_context, engine); | ||
2019 | if (ret) | ||
2020 | goto error; | ||
2021 | |||
2022 | ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); | 2034 | ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); |
2023 | if (IS_ERR(ring)) { | 2035 | if (IS_ERR(ring)) { |
2024 | ret = PTR_ERR(ring); | 2036 | ret = PTR_ERR(ring); |
@@ -2036,7 +2048,8 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) | |||
2036 | goto error; | 2048 | goto error; |
2037 | } | 2049 | } |
2038 | 2050 | ||
2039 | ret = intel_ring_pin(ring); | 2051 | /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ |
2052 | ret = intel_ring_pin(ring, 4096); | ||
2040 | if (ret) { | 2053 | if (ret) { |
2041 | intel_ring_free(ring); | 2054 | intel_ring_free(ring); |
2042 | goto error; | 2055 | goto error; |
@@ -2077,8 +2090,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) | |||
2077 | 2090 | ||
2078 | intel_engine_cleanup_common(engine); | 2091 | intel_engine_cleanup_common(engine); |
2079 | 2092 | ||
2080 | intel_ring_context_unpin(dev_priv->kernel_context, engine); | ||
2081 | |||
2082 | engine->i915 = NULL; | 2093 | engine->i915 = NULL; |
2083 | dev_priv->engine[engine->id] = NULL; | 2094 | dev_priv->engine[engine->id] = NULL; |
2084 | kfree(engine); | 2095 | kfree(engine); |
@@ -2095,16 +2106,19 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) | |||
2095 | } | 2106 | } |
2096 | } | 2107 | } |
2097 | 2108 | ||
2098 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) | 2109 | static int ring_request_alloc(struct drm_i915_gem_request *request) |
2099 | { | 2110 | { |
2100 | int ret; | 2111 | int ret; |
2101 | 2112 | ||
2113 | GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count); | ||
2114 | |||
2102 | /* Flush enough space to reduce the likelihood of waiting after | 2115 | /* Flush enough space to reduce the likelihood of waiting after |
2103 | * we start building the request - in which case we will just | 2116 | * we start building the request - in which case we will just |
2104 | * have to repeat work. | 2117 | * have to repeat work. |
2105 | */ | 2118 | */ |
2106 | request->reserved_space += LEGACY_REQUEST_SIZE; | 2119 | request->reserved_space += LEGACY_REQUEST_SIZE; |
2107 | 2120 | ||
2121 | GEM_BUG_ON(!request->engine->buffer); | ||
2108 | request->ring = request->engine->buffer; | 2122 | request->ring = request->engine->buffer; |
2109 | 2123 | ||
2110 | ret = intel_ring_begin(request, 0); | 2124 | ret = intel_ring_begin(request, 0); |
@@ -2452,7 +2466,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, | |||
2452 | if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { | 2466 | if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { |
2453 | struct i915_vma *vma; | 2467 | struct i915_vma *vma; |
2454 | 2468 | ||
2455 | obj = i915_gem_object_create(&dev_priv->drm, 4096); | 2469 | obj = i915_gem_object_create(dev_priv, 4096); |
2456 | if (IS_ERR(obj)) | 2470 | if (IS_ERR(obj)) |
2457 | goto err; | 2471 | goto err; |
2458 | 2472 | ||
@@ -2584,6 +2598,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, | |||
2584 | engine->init_hw = init_ring_common; | 2598 | engine->init_hw = init_ring_common; |
2585 | engine->reset_hw = reset_ring_common; | 2599 | engine->reset_hw = reset_ring_common; |
2586 | 2600 | ||
2601 | engine->context_pin = intel_ring_context_pin; | ||
2602 | engine->context_unpin = intel_ring_context_unpin; | ||
2603 | |||
2604 | engine->request_alloc = ring_request_alloc; | ||
2605 | |||
2587 | engine->emit_breadcrumb = i9xx_emit_breadcrumb; | 2606 | engine->emit_breadcrumb = i9xx_emit_breadcrumb; |
2588 | engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; | 2607 | engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; |
2589 | if (i915.semaphores) { | 2608 | if (i915.semaphores) { |
@@ -2608,7 +2627,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, | |||
2608 | engine->emit_bb_start = gen6_emit_bb_start; | 2627 | engine->emit_bb_start = gen6_emit_bb_start; |
2609 | else if (INTEL_GEN(dev_priv) >= 4) | 2628 | else if (INTEL_GEN(dev_priv) >= 4) |
2610 | engine->emit_bb_start = i965_emit_bb_start; | 2629 | engine->emit_bb_start = i965_emit_bb_start; |
2611 | else if (IS_I830(dev_priv) || IS_845G(dev_priv)) | 2630 | else if (IS_I830(dev_priv) || IS_I845G(dev_priv)) |
2612 | engine->emit_bb_start = i830_emit_bb_start; | 2631 | engine->emit_bb_start = i830_emit_bb_start; |
2613 | else | 2632 | else |
2614 | engine->emit_bb_start = i915_emit_bb_start; | 2633 | engine->emit_bb_start = i915_emit_bb_start; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3466b4e77e7c..79c2b8d72322 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -65,14 +65,37 @@ struct intel_hw_status_page { | |||
65 | GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) | 65 | GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) |
66 | 66 | ||
67 | enum intel_engine_hangcheck_action { | 67 | enum intel_engine_hangcheck_action { |
68 | HANGCHECK_IDLE = 0, | 68 | ENGINE_IDLE = 0, |
69 | HANGCHECK_WAIT, | 69 | ENGINE_WAIT, |
70 | HANGCHECK_ACTIVE, | 70 | ENGINE_ACTIVE_SEQNO, |
71 | HANGCHECK_KICK, | 71 | ENGINE_ACTIVE_HEAD, |
72 | HANGCHECK_HUNG, | 72 | ENGINE_ACTIVE_SUBUNITS, |
73 | ENGINE_WAIT_KICK, | ||
74 | ENGINE_DEAD, | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | #define HANGCHECK_SCORE_RING_HUNG 31 | 77 | static inline const char * |
78 | hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) | ||
79 | { | ||
80 | switch (a) { | ||
81 | case ENGINE_IDLE: | ||
82 | return "idle"; | ||
83 | case ENGINE_WAIT: | ||
84 | return "wait"; | ||
85 | case ENGINE_ACTIVE_SEQNO: | ||
86 | return "active seqno"; | ||
87 | case ENGINE_ACTIVE_HEAD: | ||
88 | return "active head"; | ||
89 | case ENGINE_ACTIVE_SUBUNITS: | ||
90 | return "active subunits"; | ||
91 | case ENGINE_WAIT_KICK: | ||
92 | return "wait kick"; | ||
93 | case ENGINE_DEAD: | ||
94 | return "dead"; | ||
95 | } | ||
96 | |||
97 | return "unknown"; | ||
98 | } | ||
76 | 99 | ||
77 | #define I915_MAX_SLICES 3 | 100 | #define I915_MAX_SLICES 3 |
78 | #define I915_MAX_SUBSLICES 3 | 101 | #define I915_MAX_SUBSLICES 3 |
@@ -104,10 +127,11 @@ struct intel_instdone { | |||
104 | struct intel_engine_hangcheck { | 127 | struct intel_engine_hangcheck { |
105 | u64 acthd; | 128 | u64 acthd; |
106 | u32 seqno; | 129 | u32 seqno; |
107 | int score; | ||
108 | enum intel_engine_hangcheck_action action; | 130 | enum intel_engine_hangcheck_action action; |
131 | unsigned long action_timestamp; | ||
109 | int deadlock; | 132 | int deadlock; |
110 | struct intel_instdone instdone; | 133 | struct intel_instdone instdone; |
134 | bool stalled; | ||
111 | }; | 135 | }; |
112 | 136 | ||
113 | struct intel_ring { | 137 | struct intel_ring { |
@@ -242,6 +266,11 @@ struct intel_engine_cs { | |||
242 | void (*reset_hw)(struct intel_engine_cs *engine, | 266 | void (*reset_hw)(struct intel_engine_cs *engine, |
243 | struct drm_i915_gem_request *req); | 267 | struct drm_i915_gem_request *req); |
244 | 268 | ||
269 | int (*context_pin)(struct intel_engine_cs *engine, | ||
270 | struct i915_gem_context *ctx); | ||
271 | void (*context_unpin)(struct intel_engine_cs *engine, | ||
272 | struct i915_gem_context *ctx); | ||
273 | int (*request_alloc)(struct drm_i915_gem_request *req); | ||
245 | int (*init_context)(struct drm_i915_gem_request *req); | 274 | int (*init_context)(struct drm_i915_gem_request *req); |
246 | 275 | ||
247 | int (*emit_flush)(struct drm_i915_gem_request *request, | 276 | int (*emit_flush)(struct drm_i915_gem_request *request, |
@@ -355,7 +384,24 @@ struct intel_engine_cs { | |||
355 | bool preempt_wa; | 384 | bool preempt_wa; |
356 | u32 ctx_desc_template; | 385 | u32 ctx_desc_template; |
357 | 386 | ||
358 | struct i915_gem_context *last_context; | 387 | /* Contexts are pinned whilst they are active on the GPU. The last |
388 | * context executed remains active whilst the GPU is idle - the | ||
389 | * switch away and write to the context object only occurs on the | ||
390 | * next execution. Contexts are only unpinned on retirement of the | ||
391 | * following request ensuring that we can always write to the object | ||
392 | * on the context switch even after idling. Across suspend, we switch | ||
393 | * to the kernel context and trash it as the save may not happen | ||
394 | * before the hardware is powered down. | ||
395 | */ | ||
396 | struct i915_gem_context *last_retired_context; | ||
397 | |||
398 | /* We track the current MI_SET_CONTEXT in order to eliminate | ||
399 | * redudant context switches. This presumes that requests are not | ||
400 | * reordered! Or when they are the tracking is updated along with | ||
401 | * the emission of individual requests into the legacy command | ||
402 | * stream (ring). | ||
403 | */ | ||
404 | struct i915_gem_context *legacy_active_context; | ||
359 | 405 | ||
360 | struct intel_engine_hangcheck hangcheck; | 406 | struct intel_engine_hangcheck hangcheck; |
361 | 407 | ||
@@ -437,7 +483,7 @@ intel_write_status_page(struct intel_engine_cs *engine, | |||
437 | 483 | ||
438 | struct intel_ring * | 484 | struct intel_ring * |
439 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); | 485 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); |
440 | int intel_ring_pin(struct intel_ring *ring); | 486 | int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); |
441 | void intel_ring_unpin(struct intel_ring *ring); | 487 | void intel_ring_unpin(struct intel_ring *ring); |
442 | void intel_ring_free(struct intel_ring *ring); | 488 | void intel_ring_free(struct intel_ring *ring); |
443 | 489 | ||
@@ -446,8 +492,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine); | |||
446 | 492 | ||
447 | void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); | 493 | void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); |
448 | 494 | ||
449 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); | ||
450 | |||
451 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); | 495 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
452 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); | 496 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
453 | 497 | ||
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 87b4af092d54..c0b7e95b5b8e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -453,6 +453,57 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, | |||
453 | BIT(POWER_DOMAIN_AUX_C) | \ | 453 | BIT(POWER_DOMAIN_AUX_C) | \ |
454 | BIT(POWER_DOMAIN_INIT)) | 454 | BIT(POWER_DOMAIN_INIT)) |
455 | 455 | ||
456 | #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ | ||
457 | BIT(POWER_DOMAIN_TRANSCODER_A) | \ | ||
458 | BIT(POWER_DOMAIN_PIPE_B) | \ | ||
459 | BIT(POWER_DOMAIN_TRANSCODER_B) | \ | ||
460 | BIT(POWER_DOMAIN_PIPE_C) | \ | ||
461 | BIT(POWER_DOMAIN_TRANSCODER_C) | \ | ||
462 | BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ | ||
463 | BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ | ||
464 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
465 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
466 | BIT(POWER_DOMAIN_AUX_B) | \ | ||
467 | BIT(POWER_DOMAIN_AUX_C) | \ | ||
468 | BIT(POWER_DOMAIN_AUDIO) | \ | ||
469 | BIT(POWER_DOMAIN_VGA) | \ | ||
470 | BIT(POWER_DOMAIN_INIT)) | ||
471 | #define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \ | ||
472 | BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ | ||
473 | BIT(POWER_DOMAIN_INIT)) | ||
474 | #define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \ | ||
475 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
476 | BIT(POWER_DOMAIN_INIT)) | ||
477 | #define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \ | ||
478 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
479 | BIT(POWER_DOMAIN_INIT)) | ||
480 | #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ | ||
481 | BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ | ||
482 | BIT(POWER_DOMAIN_AUX_A) | \ | ||
483 | BIT(POWER_DOMAIN_INIT)) | ||
484 | #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ | ||
485 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
486 | BIT(POWER_DOMAIN_AUX_B) | \ | ||
487 | BIT(POWER_DOMAIN_INIT)) | ||
488 | #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ | ||
489 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
490 | BIT(POWER_DOMAIN_AUX_C) | \ | ||
491 | BIT(POWER_DOMAIN_INIT)) | ||
492 | #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ | ||
493 | BIT(POWER_DOMAIN_AUX_A) | \ | ||
494 | BIT(POWER_DOMAIN_INIT)) | ||
495 | #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ | ||
496 | BIT(POWER_DOMAIN_AUX_B) | \ | ||
497 | BIT(POWER_DOMAIN_INIT)) | ||
498 | #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ | ||
499 | BIT(POWER_DOMAIN_AUX_C) | \ | ||
500 | BIT(POWER_DOMAIN_INIT)) | ||
501 | #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ | ||
502 | GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ | ||
503 | BIT(POWER_DOMAIN_MODESET) | \ | ||
504 | BIT(POWER_DOMAIN_AUX_A) | \ | ||
505 | BIT(POWER_DOMAIN_INIT)) | ||
506 | |||
456 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) | 507 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) |
457 | { | 508 | { |
458 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), | 509 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), |
@@ -530,7 +581,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) | |||
530 | u32 mask; | 581 | u32 mask; |
531 | 582 | ||
532 | mask = DC_STATE_EN_UPTO_DC5; | 583 | mask = DC_STATE_EN_UPTO_DC5; |
533 | if (IS_BROXTON(dev_priv)) | 584 | if (IS_GEN9_LP(dev_priv)) |
534 | mask |= DC_STATE_EN_DC9; | 585 | mask |= DC_STATE_EN_DC9; |
535 | else | 586 | else |
536 | mask |= DC_STATE_EN_UPTO_DC6; | 587 | mask |= DC_STATE_EN_UPTO_DC6; |
@@ -694,7 +745,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, | |||
694 | } | 745 | } |
695 | 746 | ||
696 | static void skl_set_power_well(struct drm_i915_private *dev_priv, | 747 | static void skl_set_power_well(struct drm_i915_private *dev_priv, |
697 | struct i915_power_well *power_well, bool enable) | 748 | struct i915_power_well *power_well, bool enable) |
698 | { | 749 | { |
699 | uint32_t tmp, fuse_status; | 750 | uint32_t tmp, fuse_status; |
700 | uint32_t req_mask, state_mask; | 751 | uint32_t req_mask, state_mask; |
@@ -720,11 +771,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, | |||
720 | return; | 771 | return; |
721 | } | 772 | } |
722 | break; | 773 | break; |
723 | case SKL_DISP_PW_DDI_A_E: | 774 | case SKL_DISP_PW_MISC_IO: |
775 | case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */ | ||
724 | case SKL_DISP_PW_DDI_B: | 776 | case SKL_DISP_PW_DDI_B: |
725 | case SKL_DISP_PW_DDI_C: | 777 | case SKL_DISP_PW_DDI_C: |
726 | case SKL_DISP_PW_DDI_D: | 778 | case SKL_DISP_PW_DDI_D: |
727 | case SKL_DISP_PW_MISC_IO: | 779 | case GLK_DISP_PW_AUX_A: |
780 | case GLK_DISP_PW_AUX_B: | ||
781 | case GLK_DISP_PW_AUX_C: | ||
728 | break; | 782 | break; |
729 | default: | 783 | default: |
730 | WARN(1, "Unknown power well %lu\n", power_well->id); | 784 | WARN(1, "Unknown power well %lu\n", power_well->id); |
@@ -884,6 +938,12 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) | |||
884 | power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); | 938 | power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); |
885 | if (power_well->count > 0) | 939 | if (power_well->count > 0) |
886 | bxt_ddi_phy_verify_state(dev_priv, power_well->data); | 940 | bxt_ddi_phy_verify_state(dev_priv, power_well->data); |
941 | |||
942 | if (IS_GEMINILAKE(dev_priv)) { | ||
943 | power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C); | ||
944 | if (power_well->count > 0) | ||
945 | bxt_ddi_phy_verify_state(dev_priv, power_well->data); | ||
946 | } | ||
887 | } | 947 | } |
888 | 948 | ||
889 | static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, | 949 | static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, |
@@ -911,7 +971,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, | |||
911 | 971 | ||
912 | gen9_assert_dbuf_enabled(dev_priv); | 972 | gen9_assert_dbuf_enabled(dev_priv); |
913 | 973 | ||
914 | if (IS_BROXTON(dev_priv)) | 974 | if (IS_GEN9_LP(dev_priv)) |
915 | bxt_verify_ddi_phy_power_wells(dev_priv); | 975 | bxt_verify_ddi_phy_power_wells(dev_priv); |
916 | } | 976 | } |
917 | 977 | ||
@@ -2161,6 +2221,91 @@ static struct i915_power_well bxt_power_wells[] = { | |||
2161 | }, | 2221 | }, |
2162 | }; | 2222 | }; |
2163 | 2223 | ||
2224 | static struct i915_power_well glk_power_wells[] = { | ||
2225 | { | ||
2226 | .name = "always-on", | ||
2227 | .always_on = 1, | ||
2228 | .domains = POWER_DOMAIN_MASK, | ||
2229 | .ops = &i9xx_always_on_power_well_ops, | ||
2230 | }, | ||
2231 | { | ||
2232 | .name = "power well 1", | ||
2233 | /* Handled by the DMC firmware */ | ||
2234 | .domains = 0, | ||
2235 | .ops = &skl_power_well_ops, | ||
2236 | .id = SKL_DISP_PW_1, | ||
2237 | }, | ||
2238 | { | ||
2239 | .name = "DC off", | ||
2240 | .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, | ||
2241 | .ops = &gen9_dc_off_power_well_ops, | ||
2242 | .id = SKL_DISP_PW_DC_OFF, | ||
2243 | }, | ||
2244 | { | ||
2245 | .name = "power well 2", | ||
2246 | .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, | ||
2247 | .ops = &skl_power_well_ops, | ||
2248 | .id = SKL_DISP_PW_2, | ||
2249 | }, | ||
2250 | { | ||
2251 | .name = "dpio-common-a", | ||
2252 | .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, | ||
2253 | .ops = &bxt_dpio_cmn_power_well_ops, | ||
2254 | .id = BXT_DPIO_CMN_A, | ||
2255 | .data = DPIO_PHY1, | ||
2256 | }, | ||
2257 | { | ||
2258 | .name = "dpio-common-b", | ||
2259 | .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, | ||
2260 | .ops = &bxt_dpio_cmn_power_well_ops, | ||
2261 | .id = BXT_DPIO_CMN_BC, | ||
2262 | .data = DPIO_PHY0, | ||
2263 | }, | ||
2264 | { | ||
2265 | .name = "dpio-common-c", | ||
2266 | .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, | ||
2267 | .ops = &bxt_dpio_cmn_power_well_ops, | ||
2268 | .id = GLK_DPIO_CMN_C, | ||
2269 | .data = DPIO_PHY2, | ||
2270 | }, | ||
2271 | { | ||
2272 | .name = "AUX A", | ||
2273 | .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, | ||
2274 | .ops = &skl_power_well_ops, | ||
2275 | .id = GLK_DISP_PW_AUX_A, | ||
2276 | }, | ||
2277 | { | ||
2278 | .name = "AUX B", | ||
2279 | .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, | ||
2280 | .ops = &skl_power_well_ops, | ||
2281 | .id = GLK_DISP_PW_AUX_B, | ||
2282 | }, | ||
2283 | { | ||
2284 | .name = "AUX C", | ||
2285 | .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, | ||
2286 | .ops = &skl_power_well_ops, | ||
2287 | .id = GLK_DISP_PW_AUX_C, | ||
2288 | }, | ||
2289 | { | ||
2290 | .name = "DDI A power well", | ||
2291 | .domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS, | ||
2292 | .ops = &skl_power_well_ops, | ||
2293 | .id = GLK_DISP_PW_DDI_A, | ||
2294 | }, | ||
2295 | { | ||
2296 | .name = "DDI B power well", | ||
2297 | .domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS, | ||
2298 | .ops = &skl_power_well_ops, | ||
2299 | .id = SKL_DISP_PW_DDI_B, | ||
2300 | }, | ||
2301 | { | ||
2302 | .name = "DDI C power well", | ||
2303 | .domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS, | ||
2304 | .ops = &skl_power_well_ops, | ||
2305 | .id = SKL_DISP_PW_DDI_C, | ||
2306 | }, | ||
2307 | }; | ||
2308 | |||
2164 | static int | 2309 | static int |
2165 | sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, | 2310 | sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, |
2166 | int disable_power_well) | 2311 | int disable_power_well) |
@@ -2181,7 +2326,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, | |||
2181 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | 2326 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
2182 | max_dc = 2; | 2327 | max_dc = 2; |
2183 | mask = 0; | 2328 | mask = 0; |
2184 | } else if (IS_BROXTON(dev_priv)) { | 2329 | } else if (IS_GEN9_LP(dev_priv)) { |
2185 | max_dc = 1; | 2330 | max_dc = 1; |
2186 | /* | 2331 | /* |
2187 | * DC9 has a separate HW flow from the rest of the DC states, | 2332 | * DC9 has a separate HW flow from the rest of the DC states, |
@@ -2257,6 +2402,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) | |||
2257 | set_power_wells(power_domains, skl_power_wells); | 2402 | set_power_wells(power_domains, skl_power_wells); |
2258 | } else if (IS_BROXTON(dev_priv)) { | 2403 | } else if (IS_BROXTON(dev_priv)) { |
2259 | set_power_wells(power_domains, bxt_power_wells); | 2404 | set_power_wells(power_domains, bxt_power_wells); |
2405 | } else if (IS_GEMINILAKE(dev_priv)) { | ||
2406 | set_power_wells(power_domains, glk_power_wells); | ||
2260 | } else if (IS_CHERRYVIEW(dev_priv)) { | 2407 | } else if (IS_CHERRYVIEW(dev_priv)) { |
2261 | set_power_wells(power_domains, chv_power_wells); | 2408 | set_power_wells(power_domains, chv_power_wells); |
2262 | } else if (IS_VALLEYVIEW(dev_priv)) { | 2409 | } else if (IS_VALLEYVIEW(dev_priv)) { |
@@ -2585,7 +2732,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) | |||
2585 | 2732 | ||
2586 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | 2733 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
2587 | skl_display_core_init(dev_priv, resume); | 2734 | skl_display_core_init(dev_priv, resume); |
2588 | } else if (IS_BROXTON(dev_priv)) { | 2735 | } else if (IS_GEN9_LP(dev_priv)) { |
2589 | bxt_display_core_init(dev_priv, resume); | 2736 | bxt_display_core_init(dev_priv, resume); |
2590 | } else if (IS_CHERRYVIEW(dev_priv)) { | 2737 | } else if (IS_CHERRYVIEW(dev_priv)) { |
2591 | mutex_lock(&power_domains->lock); | 2738 | mutex_lock(&power_domains->lock); |
@@ -2624,7 +2771,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) | |||
2624 | 2771 | ||
2625 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 2772 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
2626 | skl_display_core_uninit(dev_priv); | 2773 | skl_display_core_uninit(dev_priv); |
2627 | else if (IS_BROXTON(dev_priv)) | 2774 | else if (IS_GEN9_LP(dev_priv)) |
2628 | bxt_display_core_uninit(dev_priv); | 2775 | bxt_display_core_uninit(dev_priv); |
2629 | } | 2776 | } |
2630 | 2777 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 27808e91cb5a..2ad13903a054 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1296,7 +1296,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, | |||
1296 | if (INTEL_GEN(dev_priv) >= 4) { | 1296 | if (INTEL_GEN(dev_priv) >= 4) { |
1297 | /* done in crtc_mode_set as the dpll_md reg must be written early */ | 1297 | /* done in crtc_mode_set as the dpll_md reg must be written early */ |
1298 | } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || | 1298 | } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || |
1299 | IS_G33(dev_priv)) { | 1299 | IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { |
1300 | /* done in crtc_mode_set as it lives inside the dpll register */ | 1300 | /* done in crtc_mode_set as it lives inside the dpll register */ |
1301 | } else { | 1301 | } else { |
1302 | sdvox |= (crtc_state->pixel_multiplier - 1) | 1302 | sdvox |= (crtc_state->pixel_multiplier - 1) |
@@ -2342,9 +2342,9 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) | |||
2342 | } | 2342 | } |
2343 | 2343 | ||
2344 | static u8 | 2344 | static u8 |
2345 | intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) | 2345 | intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv, |
2346 | struct intel_sdvo *sdvo) | ||
2346 | { | 2347 | { |
2347 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
2348 | struct sdvo_device_mapping *my_mapping, *other_mapping; | 2348 | struct sdvo_device_mapping *my_mapping, *other_mapping; |
2349 | 2349 | ||
2350 | if (sdvo->port == PORT_B) { | 2350 | if (sdvo->port == PORT_B) { |
@@ -2934,9 +2934,9 @@ static const struct i2c_algorithm intel_sdvo_ddc_proxy = { | |||
2934 | 2934 | ||
2935 | static bool | 2935 | static bool |
2936 | intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, | 2936 | intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, |
2937 | struct drm_device *dev) | 2937 | struct drm_i915_private *dev_priv) |
2938 | { | 2938 | { |
2939 | struct pci_dev *pdev = dev->pdev; | 2939 | struct pci_dev *pdev = dev_priv->drm.pdev; |
2940 | 2940 | ||
2941 | sdvo->ddc.owner = THIS_MODULE; | 2941 | sdvo->ddc.owner = THIS_MODULE; |
2942 | sdvo->ddc.class = I2C_CLASS_DDC; | 2942 | sdvo->ddc.class = I2C_CLASS_DDC; |
@@ -2957,10 +2957,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv, | |||
2957 | WARN_ON(port != PORT_B && port != PORT_C); | 2957 | WARN_ON(port != PORT_B && port != PORT_C); |
2958 | } | 2958 | } |
2959 | 2959 | ||
2960 | bool intel_sdvo_init(struct drm_device *dev, | 2960 | bool intel_sdvo_init(struct drm_i915_private *dev_priv, |
2961 | i915_reg_t sdvo_reg, enum port port) | 2961 | i915_reg_t sdvo_reg, enum port port) |
2962 | { | 2962 | { |
2963 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
2964 | struct intel_encoder *intel_encoder; | 2963 | struct intel_encoder *intel_encoder; |
2965 | struct intel_sdvo *intel_sdvo; | 2964 | struct intel_sdvo *intel_sdvo; |
2966 | int i; | 2965 | int i; |
@@ -2973,16 +2972,18 @@ bool intel_sdvo_init(struct drm_device *dev, | |||
2973 | 2972 | ||
2974 | intel_sdvo->sdvo_reg = sdvo_reg; | 2973 | intel_sdvo->sdvo_reg = sdvo_reg; |
2975 | intel_sdvo->port = port; | 2974 | intel_sdvo->port = port; |
2976 | intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; | 2975 | intel_sdvo->slave_addr = |
2976 | intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1; | ||
2977 | intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); | 2977 | intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); |
2978 | if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) | 2978 | if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv)) |
2979 | goto err_i2c_bus; | 2979 | goto err_i2c_bus; |
2980 | 2980 | ||
2981 | /* encoder type will be decided later */ | 2981 | /* encoder type will be decided later */ |
2982 | intel_encoder = &intel_sdvo->base; | 2982 | intel_encoder = &intel_sdvo->base; |
2983 | intel_encoder->type = INTEL_OUTPUT_SDVO; | 2983 | intel_encoder->type = INTEL_OUTPUT_SDVO; |
2984 | intel_encoder->port = port; | 2984 | intel_encoder->port = port; |
2985 | drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, | 2985 | drm_encoder_init(&dev_priv->drm, &intel_encoder->base, |
2986 | &intel_sdvo_enc_funcs, 0, | ||
2986 | "SDVO %c", port_name(port)); | 2987 | "SDVO %c", port_name(port)); |
2987 | 2988 | ||
2988 | /* Read the regs to test if we can talk to the device */ | 2989 | /* Read the regs to test if we can talk to the device */ |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index ff766c0cb873..7031bc733d97 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -203,8 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane, | |||
203 | struct drm_i915_private *dev_priv = to_i915(dev); | 203 | struct drm_i915_private *dev_priv = to_i915(dev); |
204 | struct intel_plane *intel_plane = to_intel_plane(drm_plane); | 204 | struct intel_plane *intel_plane = to_intel_plane(drm_plane); |
205 | struct drm_framebuffer *fb = plane_state->base.fb; | 205 | struct drm_framebuffer *fb = plane_state->base.fb; |
206 | const int pipe = intel_plane->pipe; | 206 | enum plane_id plane_id = intel_plane->id; |
207 | const int plane = intel_plane->plane + 1; | 207 | enum pipe pipe = intel_plane->pipe; |
208 | u32 plane_ctl; | 208 | u32 plane_ctl; |
209 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; | 209 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; |
210 | u32 surf_addr = plane_state->main.offset; | 210 | u32 surf_addr = plane_state->main.offset; |
@@ -229,9 +229,9 @@ skl_update_plane(struct drm_plane *drm_plane, | |||
229 | plane_ctl |= skl_plane_ctl_rotation(rotation); | 229 | plane_ctl |= skl_plane_ctl_rotation(rotation); |
230 | 230 | ||
231 | if (key->flags) { | 231 | if (key->flags) { |
232 | I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); | 232 | I915_WRITE(PLANE_KEYVAL(pipe, plane_id), key->min_value); |
233 | I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); | 233 | I915_WRITE(PLANE_KEYMAX(pipe, plane_id), key->max_value); |
234 | I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask); | 234 | I915_WRITE(PLANE_KEYMSK(pipe, plane_id), key->channel_mask); |
235 | } | 235 | } |
236 | 236 | ||
237 | if (key->flags & I915_SET_COLORKEY_DESTINATION) | 237 | if (key->flags & I915_SET_COLORKEY_DESTINATION) |
@@ -245,36 +245,36 @@ skl_update_plane(struct drm_plane *drm_plane, | |||
245 | crtc_w--; | 245 | crtc_w--; |
246 | crtc_h--; | 246 | crtc_h--; |
247 | 247 | ||
248 | I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); | 248 | I915_WRITE(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); |
249 | I915_WRITE(PLANE_STRIDE(pipe, plane), stride); | 249 | I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride); |
250 | I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w); | 250 | I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); |
251 | 251 | ||
252 | /* program plane scaler */ | 252 | /* program plane scaler */ |
253 | if (plane_state->scaler_id >= 0) { | 253 | if (plane_state->scaler_id >= 0) { |
254 | int scaler_id = plane_state->scaler_id; | 254 | int scaler_id = plane_state->scaler_id; |
255 | const struct intel_scaler *scaler; | 255 | const struct intel_scaler *scaler; |
256 | 256 | ||
257 | DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, | 257 | DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", |
258 | PS_PLANE_SEL(plane)); | 258 | plane_id, PS_PLANE_SEL(plane_id)); |
259 | 259 | ||
260 | scaler = &crtc_state->scaler_state.scalers[scaler_id]; | 260 | scaler = &crtc_state->scaler_state.scalers[scaler_id]; |
261 | 261 | ||
262 | I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), | 262 | I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), |
263 | PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode); | 263 | PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode); |
264 | I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); | 264 | I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); |
265 | I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); | 265 | I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); |
266 | I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), | 266 | I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), |
267 | ((crtc_w + 1) << 16)|(crtc_h + 1)); | 267 | ((crtc_w + 1) << 16)|(crtc_h + 1)); |
268 | 268 | ||
269 | I915_WRITE(PLANE_POS(pipe, plane), 0); | 269 | I915_WRITE(PLANE_POS(pipe, plane_id), 0); |
270 | } else { | 270 | } else { |
271 | I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); | 271 | I915_WRITE(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); |
272 | } | 272 | } |
273 | 273 | ||
274 | I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); | 274 | I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl); |
275 | I915_WRITE(PLANE_SURF(pipe, plane), | 275 | I915_WRITE(PLANE_SURF(pipe, plane_id), |
276 | intel_fb_gtt_offset(fb, rotation) + surf_addr); | 276 | intel_fb_gtt_offset(fb, rotation) + surf_addr); |
277 | POSTING_READ(PLANE_SURF(pipe, plane)); | 277 | POSTING_READ(PLANE_SURF(pipe, plane_id)); |
278 | } | 278 | } |
279 | 279 | ||
280 | static void | 280 | static void |
@@ -283,20 +283,20 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) | |||
283 | struct drm_device *dev = dplane->dev; | 283 | struct drm_device *dev = dplane->dev; |
284 | struct drm_i915_private *dev_priv = to_i915(dev); | 284 | struct drm_i915_private *dev_priv = to_i915(dev); |
285 | struct intel_plane *intel_plane = to_intel_plane(dplane); | 285 | struct intel_plane *intel_plane = to_intel_plane(dplane); |
286 | const int pipe = intel_plane->pipe; | 286 | enum plane_id plane_id = intel_plane->id; |
287 | const int plane = intel_plane->plane + 1; | 287 | enum pipe pipe = intel_plane->pipe; |
288 | 288 | ||
289 | I915_WRITE(PLANE_CTL(pipe, plane), 0); | 289 | I915_WRITE(PLANE_CTL(pipe, plane_id), 0); |
290 | 290 | ||
291 | I915_WRITE(PLANE_SURF(pipe, plane), 0); | 291 | I915_WRITE(PLANE_SURF(pipe, plane_id), 0); |
292 | POSTING_READ(PLANE_SURF(pipe, plane)); | 292 | POSTING_READ(PLANE_SURF(pipe, plane_id)); |
293 | } | 293 | } |
294 | 294 | ||
295 | static void | 295 | static void |
296 | chv_update_csc(struct intel_plane *intel_plane, uint32_t format) | 296 | chv_update_csc(struct intel_plane *intel_plane, uint32_t format) |
297 | { | 297 | { |
298 | struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); | 298 | struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); |
299 | int plane = intel_plane->plane; | 299 | enum plane_id plane_id = intel_plane->id; |
300 | 300 | ||
301 | /* Seems RGB data bypasses the CSC always */ | 301 | /* Seems RGB data bypasses the CSC always */ |
302 | if (!format_is_yuv(format)) | 302 | if (!format_is_yuv(format)) |
@@ -312,23 +312,23 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format) | |||
312 | * Cb and Cr apparently come in as signed already, so no | 312 | * Cb and Cr apparently come in as signed already, so no |
313 | * need for any offset. For Y we need to remove the offset. | 313 | * need for any offset. For Y we need to remove the offset. |
314 | */ | 314 | */ |
315 | I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64)); | 315 | I915_WRITE(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64)); |
316 | I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0)); | 316 | I915_WRITE(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); |
317 | I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0)); | 317 | I915_WRITE(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); |
318 | 318 | ||
319 | I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537)); | 319 | I915_WRITE(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537)); |
320 | I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0)); | 320 | I915_WRITE(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0)); |
321 | I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769)); | 321 | I915_WRITE(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769)); |
322 | I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0)); | 322 | I915_WRITE(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0)); |
323 | I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263)); | 323 | I915_WRITE(SPCSCC8(plane_id), SPCSC_C0(8263)); |
324 | 324 | ||
325 | I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64)); | 325 | I915_WRITE(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64)); |
326 | I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); | 326 | I915_WRITE(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); |
327 | I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); | 327 | I915_WRITE(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); |
328 | 328 | ||
329 | I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); | 329 | I915_WRITE(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); |
330 | I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); | 330 | I915_WRITE(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); |
331 | I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); | 331 | I915_WRITE(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); |
332 | } | 332 | } |
333 | 333 | ||
334 | static void | 334 | static void |
@@ -340,8 +340,8 @@ vlv_update_plane(struct drm_plane *dplane, | |||
340 | struct drm_i915_private *dev_priv = to_i915(dev); | 340 | struct drm_i915_private *dev_priv = to_i915(dev); |
341 | struct intel_plane *intel_plane = to_intel_plane(dplane); | 341 | struct intel_plane *intel_plane = to_intel_plane(dplane); |
342 | struct drm_framebuffer *fb = plane_state->base.fb; | 342 | struct drm_framebuffer *fb = plane_state->base.fb; |
343 | int pipe = intel_plane->pipe; | 343 | enum pipe pipe = intel_plane->pipe; |
344 | int plane = intel_plane->plane; | 344 | enum plane_id plane_id = intel_plane->id; |
345 | u32 sprctl; | 345 | u32 sprctl; |
346 | u32 sprsurf_offset, linear_offset; | 346 | u32 sprsurf_offset, linear_offset; |
347 | unsigned int rotation = plane_state->base.rotation; | 347 | unsigned int rotation = plane_state->base.rotation; |
@@ -434,9 +434,9 @@ vlv_update_plane(struct drm_plane *dplane, | |||
434 | linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); | 434 | linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); |
435 | 435 | ||
436 | if (key->flags) { | 436 | if (key->flags) { |
437 | I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value); | 437 | I915_WRITE(SPKEYMINVAL(pipe, plane_id), key->min_value); |
438 | I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value); | 438 | I915_WRITE(SPKEYMAXVAL(pipe, plane_id), key->max_value); |
439 | I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask); | 439 | I915_WRITE(SPKEYMSK(pipe, plane_id), key->channel_mask); |
440 | } | 440 | } |
441 | 441 | ||
442 | if (key->flags & I915_SET_COLORKEY_SOURCE) | 442 | if (key->flags & I915_SET_COLORKEY_SOURCE) |
@@ -445,21 +445,21 @@ vlv_update_plane(struct drm_plane *dplane, | |||
445 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) | 445 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) |
446 | chv_update_csc(intel_plane, fb->format->format); | 446 | chv_update_csc(intel_plane, fb->format->format); |
447 | 447 | ||
448 | I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); | 448 | I915_WRITE(SPSTRIDE(pipe, plane_id), fb->pitches[0]); |
449 | I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); | 449 | I915_WRITE(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); |
450 | 450 | ||
451 | if (fb->modifier == I915_FORMAT_MOD_X_TILED) | 451 | if (fb->modifier == I915_FORMAT_MOD_X_TILED) |
452 | I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); | 452 | I915_WRITE(SPTILEOFF(pipe, plane_id), (y << 16) | x); |
453 | else | 453 | else |
454 | I915_WRITE(SPLINOFF(pipe, plane), linear_offset); | 454 | I915_WRITE(SPLINOFF(pipe, plane_id), linear_offset); |
455 | 455 | ||
456 | I915_WRITE(SPCONSTALPHA(pipe, plane), 0); | 456 | I915_WRITE(SPCONSTALPHA(pipe, plane_id), 0); |
457 | 457 | ||
458 | I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); | 458 | I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); |
459 | I915_WRITE(SPCNTR(pipe, plane), sprctl); | 459 | I915_WRITE(SPCNTR(pipe, plane_id), sprctl); |
460 | I915_WRITE(SPSURF(pipe, plane), | 460 | I915_WRITE(SPSURF(pipe, plane_id), |
461 | intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); | 461 | intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); |
462 | POSTING_READ(SPSURF(pipe, plane)); | 462 | POSTING_READ(SPSURF(pipe, plane_id)); |
463 | } | 463 | } |
464 | 464 | ||
465 | static void | 465 | static void |
@@ -468,13 +468,13 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) | |||
468 | struct drm_device *dev = dplane->dev; | 468 | struct drm_device *dev = dplane->dev; |
469 | struct drm_i915_private *dev_priv = to_i915(dev); | 469 | struct drm_i915_private *dev_priv = to_i915(dev); |
470 | struct intel_plane *intel_plane = to_intel_plane(dplane); | 470 | struct intel_plane *intel_plane = to_intel_plane(dplane); |
471 | int pipe = intel_plane->pipe; | 471 | enum pipe pipe = intel_plane->pipe; |
472 | int plane = intel_plane->plane; | 472 | enum plane_id plane_id = intel_plane->id; |
473 | 473 | ||
474 | I915_WRITE(SPCNTR(pipe, plane), 0); | 474 | I915_WRITE(SPCNTR(pipe, plane_id), 0); |
475 | 475 | ||
476 | I915_WRITE(SPSURF(pipe, plane), 0); | 476 | I915_WRITE(SPSURF(pipe, plane_id), 0); |
477 | POSTING_READ(SPSURF(pipe, plane)); | 477 | POSTING_READ(SPSURF(pipe, plane_id)); |
478 | } | 478 | } |
479 | 479 | ||
480 | static void | 480 | static void |
@@ -1112,6 +1112,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, | |||
1112 | 1112 | ||
1113 | intel_plane->pipe = pipe; | 1113 | intel_plane->pipe = pipe; |
1114 | intel_plane->plane = plane; | 1114 | intel_plane->plane = plane; |
1115 | intel_plane->id = PLANE_SPRITE0 + plane; | ||
1115 | intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane); | 1116 | intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane); |
1116 | intel_plane->check_plane = intel_check_sprite_plane; | 1117 | intel_plane->check_plane = intel_check_sprite_plane; |
1117 | 1118 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 78cdfc6833d6..eb692e4ffe01 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1537,9 +1537,9 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = { | |||
1537 | }; | 1537 | }; |
1538 | 1538 | ||
1539 | void | 1539 | void |
1540 | intel_tv_init(struct drm_device *dev) | 1540 | intel_tv_init(struct drm_i915_private *dev_priv) |
1541 | { | 1541 | { |
1542 | struct drm_i915_private *dev_priv = to_i915(dev); | 1542 | struct drm_device *dev = &dev_priv->drm; |
1543 | struct drm_connector *connector; | 1543 | struct drm_connector *connector; |
1544 | struct intel_tv *intel_tv; | 1544 | struct intel_tv *intel_tv; |
1545 | struct intel_encoder *intel_encoder; | 1545 | struct intel_encoder *intel_encoder; |
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c new file mode 100644 index 000000000000..c6be35220955 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_uc.c | |||
@@ -0,0 +1,142 @@ | |||
1 | /* | ||
2 | * Copyright © 2016 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include "i915_drv.h" | ||
26 | #include "intel_uc.h" | ||
27 | |||
28 | void intel_uc_init_early(struct drm_i915_private *dev_priv) | ||
29 | { | ||
30 | mutex_init(&dev_priv->guc.send_mutex); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * Read GuC command/status register (SOFT_SCRATCH_0) | ||
35 | * Return true if it contains a response rather than a command | ||
36 | */ | ||
37 | static bool intel_guc_recv(struct intel_guc *guc, u32 *status) | ||
38 | { | ||
39 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
40 | |||
41 | u32 val = I915_READ(SOFT_SCRATCH(0)); | ||
42 | *status = val; | ||
43 | return INTEL_GUC_RECV_IS_RESPONSE(val); | ||
44 | } | ||
45 | |||
46 | int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) | ||
47 | { | ||
48 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
49 | u32 status; | ||
50 | int i; | ||
51 | int ret; | ||
52 | |||
53 | if (WARN_ON(len < 1 || len > 15)) | ||
54 | return -EINVAL; | ||
55 | |||
56 | mutex_lock(&guc->send_mutex); | ||
57 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
58 | |||
59 | dev_priv->guc.action_count += 1; | ||
60 | dev_priv->guc.action_cmd = action[0]; | ||
61 | |||
62 | for (i = 0; i < len; i++) | ||
63 | I915_WRITE(SOFT_SCRATCH(i), action[i]); | ||
64 | |||
65 | POSTING_READ(SOFT_SCRATCH(i - 1)); | ||
66 | |||
67 | I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); | ||
68 | |||
69 | /* | ||
70 | * Fast commands should complete in less than 10us, so sample quickly | ||
71 | * up to that length of time, then switch to a slower sleep-wait loop. | ||
72 | * No inte_guc_send command should ever take longer than 10ms. | ||
73 | */ | ||
74 | ret = wait_for_us(intel_guc_recv(guc, &status), 10); | ||
75 | if (ret) | ||
76 | ret = wait_for(intel_guc_recv(guc, &status), 10); | ||
77 | if (status != INTEL_GUC_STATUS_SUCCESS) { | ||
78 | /* | ||
79 | * Either the GuC explicitly returned an error (which | ||
80 | * we convert to -EIO here) or no response at all was | ||
81 | * received within the timeout limit (-ETIMEDOUT) | ||
82 | */ | ||
83 | if (ret != -ETIMEDOUT) | ||
84 | ret = -EIO; | ||
85 | |||
86 | DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;" | ||
87 | " ret=%d status=0x%08X response=0x%08X\n", | ||
88 | action[0], ret, status, I915_READ(SOFT_SCRATCH(15))); | ||
89 | |||
90 | dev_priv->guc.action_fail += 1; | ||
91 | dev_priv->guc.action_err = ret; | ||
92 | } | ||
93 | dev_priv->guc.action_status = status; | ||
94 | |||
95 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
96 | mutex_unlock(&guc->send_mutex); | ||
97 | |||
98 | return ret; | ||
99 | } | ||
100 | |||
101 | int intel_guc_sample_forcewake(struct intel_guc *guc) | ||
102 | { | ||
103 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
104 | u32 action[2]; | ||
105 | |||
106 | action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; | ||
107 | /* WaRsDisableCoarsePowerGating:skl,bxt */ | ||
108 | if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) | ||
109 | action[1] = 0; | ||
110 | else | ||
111 | /* bit 0 and 1 are for Render and Media domain separately */ | ||
112 | action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; | ||
113 | |||
114 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); | ||
115 | } | ||
116 | |||
117 | int intel_guc_log_flush_complete(struct intel_guc *guc) | ||
118 | { | ||
119 | u32 action[] = { INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE }; | ||
120 | |||
121 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); | ||
122 | } | ||
123 | |||
124 | int intel_guc_log_flush(struct intel_guc *guc) | ||
125 | { | ||
126 | u32 action[] = { | ||
127 | INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, | ||
128 | 0 | ||
129 | }; | ||
130 | |||
131 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); | ||
132 | } | ||
133 | |||
134 | int intel_guc_log_control(struct intel_guc *guc, u32 control_val) | ||
135 | { | ||
136 | u32 action[] = { | ||
137 | INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, | ||
138 | control_val | ||
139 | }; | ||
140 | |||
141 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); | ||
142 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_uc.h index 0053258e03d3..9490a8e049c3 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_uc.h | |||
@@ -21,13 +21,15 @@ | |||
21 | * IN THE SOFTWARE. | 21 | * IN THE SOFTWARE. |
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | #ifndef _INTEL_GUC_H_ | 24 | #ifndef _INTEL_UC_H_ |
25 | #define _INTEL_GUC_H_ | 25 | #define _INTEL_UC_H_ |
26 | 26 | ||
27 | #include "intel_guc_fwif.h" | 27 | #include "intel_guc_fwif.h" |
28 | #include "i915_guc_reg.h" | 28 | #include "i915_guc_reg.h" |
29 | #include "intel_ringbuffer.h" | 29 | #include "intel_ringbuffer.h" |
30 | 30 | ||
31 | #include "i915_vma.h" | ||
32 | |||
31 | struct drm_i915_gem_request; | 33 | struct drm_i915_gem_request; |
32 | 34 | ||
33 | /* | 35 | /* |
@@ -74,7 +76,7 @@ struct i915_guc_client { | |||
74 | uint32_t proc_desc_offset; | 76 | uint32_t proc_desc_offset; |
75 | 77 | ||
76 | uint32_t doorbell_offset; | 78 | uint32_t doorbell_offset; |
77 | uint32_t cookie; | 79 | uint32_t doorbell_cookie; |
78 | uint16_t doorbell_id; | 80 | uint16_t doorbell_id; |
79 | uint16_t padding[3]; /* Maintain alignment */ | 81 | uint16_t padding[3]; /* Maintain alignment */ |
80 | 82 | ||
@@ -103,7 +105,6 @@ enum intel_guc_fw_status { | |||
103 | * of fetching, caching, and loading the firmware image into the GuC. | 105 | * of fetching, caching, and loading the firmware image into the GuC. |
104 | */ | 106 | */ |
105 | struct intel_guc_fw { | 107 | struct intel_guc_fw { |
106 | struct drm_device * guc_dev; | ||
107 | const char * guc_fw_path; | 108 | const char * guc_fw_path; |
108 | size_t guc_fw_size; | 109 | size_t guc_fw_size; |
109 | struct drm_i915_gem_object * guc_fw_obj; | 110 | struct drm_i915_gem_object * guc_fw_obj; |
@@ -143,7 +144,7 @@ struct intel_guc { | |||
143 | struct intel_guc_fw guc_fw; | 144 | struct intel_guc_fw guc_fw; |
144 | struct intel_guc_log log; | 145 | struct intel_guc_log log; |
145 | 146 | ||
146 | /* GuC2Host interrupt related state */ | 147 | /* intel_guc_recv interrupt related state */ |
147 | bool interrupts_enabled; | 148 | bool interrupts_enabled; |
148 | 149 | ||
149 | struct i915_vma *ads_vma; | 150 | struct i915_vma *ads_vma; |
@@ -165,17 +166,25 @@ struct intel_guc { | |||
165 | uint64_t submissions[I915_NUM_ENGINES]; | 166 | uint64_t submissions[I915_NUM_ENGINES]; |
166 | uint32_t last_seqno[I915_NUM_ENGINES]; | 167 | uint32_t last_seqno[I915_NUM_ENGINES]; |
167 | 168 | ||
168 | /* To serialize the Host2GuC actions */ | 169 | /* To serialize the intel_guc_send actions */ |
169 | struct mutex action_lock; | 170 | struct mutex send_mutex; |
170 | }; | 171 | }; |
171 | 172 | ||
173 | /* intel_uc.c */ | ||
174 | void intel_uc_init_early(struct drm_i915_private *dev_priv); | ||
175 | int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len); | ||
176 | int intel_guc_sample_forcewake(struct intel_guc *guc); | ||
177 | int intel_guc_log_flush_complete(struct intel_guc *guc); | ||
178 | int intel_guc_log_flush(struct intel_guc *guc); | ||
179 | int intel_guc_log_control(struct intel_guc *guc, u32 control_val); | ||
180 | |||
172 | /* intel_guc_loader.c */ | 181 | /* intel_guc_loader.c */ |
173 | extern void intel_guc_init(struct drm_device *dev); | 182 | extern void intel_guc_init(struct drm_i915_private *dev_priv); |
174 | extern int intel_guc_setup(struct drm_device *dev); | 183 | extern int intel_guc_setup(struct drm_i915_private *dev_priv); |
175 | extern void intel_guc_fini(struct drm_device *dev); | 184 | extern void intel_guc_fini(struct drm_i915_private *dev_priv); |
176 | extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); | 185 | extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); |
177 | extern int intel_guc_suspend(struct drm_device *dev); | 186 | extern int intel_guc_suspend(struct drm_i915_private *dev_priv); |
178 | extern int intel_guc_resume(struct drm_device *dev); | 187 | extern int intel_guc_resume(struct drm_i915_private *dev_priv); |
179 | 188 | ||
180 | /* i915_guc_submission.c */ | 189 | /* i915_guc_submission.c */ |
181 | int i915_guc_submission_init(struct drm_i915_private *dev_priv); | 190 | int i915_guc_submission_init(struct drm_i915_private *dev_priv); |
@@ -190,4 +199,12 @@ void i915_guc_register(struct drm_i915_private *dev_priv); | |||
190 | void i915_guc_unregister(struct drm_i915_private *dev_priv); | 199 | void i915_guc_unregister(struct drm_i915_private *dev_priv); |
191 | int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); | 200 | int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); |
192 | 201 | ||
202 | static inline u32 guc_ggtt_offset(struct i915_vma *vma) | ||
203 | { | ||
204 | u32 offset = i915_ggtt_offset(vma); | ||
205 | GEM_BUG_ON(offset < GUC_WOPCM_TOP); | ||
206 | GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); | ||
207 | return offset; | ||
208 | } | ||
209 | |||
193 | #endif | 210 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 0bffd3f0c15d..abe08885a5ba 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -421,8 +421,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, | |||
421 | GT_FIFO_CTL_RC6_POLICY_STALL); | 421 | GT_FIFO_CTL_RC6_POLICY_STALL); |
422 | } | 422 | } |
423 | 423 | ||
424 | /* Enable Decoupled MMIO only on BXT C stepping onwards */ | 424 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST)) |
425 | if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | ||
426 | info->has_decoupled_mmio = false; | 425 | info->has_decoupled_mmio = false; |
427 | 426 | ||
428 | intel_uncore_forcewake_reset(dev_priv, restore_forcewake); | 427 | intel_uncore_forcewake_reset(dev_priv, restore_forcewake); |
@@ -626,7 +625,14 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) | |||
626 | dev_priv->uncore.fw_domains_table_entries, | 625 | dev_priv->uncore.fw_domains_table_entries, |
627 | fw_range_cmp); | 626 | fw_range_cmp); |
628 | 627 | ||
629 | return entry ? entry->domains : 0; | 628 | if (!entry) |
629 | return 0; | ||
630 | |||
631 | WARN(entry->domains & ~dev_priv->uncore.fw_domains, | ||
632 | "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", | ||
633 | entry->domains & ~dev_priv->uncore.fw_domains, offset); | ||
634 | |||
635 | return entry->domains; | ||
630 | } | 636 | } |
631 | 637 | ||
632 | static void | 638 | static void |
@@ -1813,7 +1819,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) | |||
1813 | return ironlake_do_reset; | 1819 | return ironlake_do_reset; |
1814 | else if (IS_G4X(dev_priv)) | 1820 | else if (IS_G4X(dev_priv)) |
1815 | return g4x_do_reset; | 1821 | return g4x_do_reset; |
1816 | else if (IS_G33(dev_priv)) | 1822 | else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) |
1817 | return g33_do_reset; | 1823 | return g33_do_reset; |
1818 | else if (INTEL_INFO(dev_priv)->gen >= 3) | 1824 | else if (INTEL_INFO(dev_priv)->gen >= 3) |
1819 | return i915_do_reset; | 1825 | return i915_do_reset; |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 8886cab19f98..a92e7762f596 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
@@ -399,10 +399,12 @@ struct lvds_dvo_timing { | |||
399 | u8 vblank_hi:4; | 399 | u8 vblank_hi:4; |
400 | u8 vactive_hi:4; | 400 | u8 vactive_hi:4; |
401 | u8 hsync_off_lo; | 401 | u8 hsync_off_lo; |
402 | u8 hsync_pulse_width; | 402 | u8 hsync_pulse_width_lo; |
403 | u8 vsync_pulse_width:4; | 403 | u8 vsync_pulse_width_lo:4; |
404 | u8 vsync_off:4; | 404 | u8 vsync_off_lo:4; |
405 | u8 rsvd0:6; | 405 | u8 vsync_pulse_width_hi:2; |
406 | u8 vsync_off_hi:2; | ||
407 | u8 hsync_pulse_width_hi:2; | ||
406 | u8 hsync_off_hi:2; | 408 | u8 hsync_off_hi:2; |
407 | u8 himage_lo; | 409 | u8 himage_lo; |
408 | u8 vimage_lo; | 410 | u8 vimage_lo; |
@@ -414,7 +416,7 @@ struct lvds_dvo_timing { | |||
414 | u8 digital:2; | 416 | u8 digital:2; |
415 | u8 vsync_positive:1; | 417 | u8 vsync_positive:1; |
416 | u8 hsync_positive:1; | 418 | u8 hsync_positive:1; |
417 | u8 rsvd2:1; | 419 | u8 non_interlaced:1; |
418 | } __packed; | 420 | } __packed; |
419 | 421 | ||
420 | struct lvds_pnp_id { | 422 | struct lvds_pnp_id { |
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 0d5f4268d75f..a1dd21d6b723 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h | |||
@@ -226,23 +226,18 @@ | |||
226 | INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ | 226 | INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ |
227 | INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ | 227 | INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ |
228 | 228 | ||
229 | #define INTEL_BDW_RSVDM_IDS(info) \ | 229 | #define INTEL_BDW_RSVD_IDS(info) \ |
230 | INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ | 230 | INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ |
231 | INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ | 231 | INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ |
232 | INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ | 232 | INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ |
233 | INTEL_VGA_DEVICE(0x163E, info) /* ULX */ | 233 | INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \ |
234 | |||
235 | #define INTEL_BDW_RSVDD_IDS(info) \ | ||
236 | INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ | 234 | INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ |
237 | INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ | 235 | INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ |
238 | 236 | ||
239 | #define INTEL_BDW_IDS(info) \ | 237 | #define INTEL_BDW_IDS(info) \ |
240 | INTEL_BDW_GT12_IDS(info), \ | 238 | INTEL_BDW_GT12_IDS(info), \ |
241 | INTEL_BDW_GT3_IDS(info), \ | 239 | INTEL_BDW_GT3_IDS(info), \ |
242 | INTEL_BDW_RSVDM_IDS(info), \ | 240 | INTEL_BDW_RSVD_IDS(info) |
243 | INTEL_BDW_GT12_IDS(info), \ | ||
244 | INTEL_BDW_GT3_IDS(info), \ | ||
245 | INTEL_BDW_RSVDD_IDS(info) | ||
246 | 241 | ||
247 | #define INTEL_CHV_IDS(info) \ | 242 | #define INTEL_CHV_IDS(info) \ |
248 | INTEL_VGA_DEVICE(0x22b0, info), \ | 243 | INTEL_VGA_DEVICE(0x22b0, info), \ |
@@ -270,14 +265,14 @@ | |||
270 | INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ | 265 | INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ |
271 | INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ | 266 | INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ |
272 | INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ | 267 | INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ |
273 | INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ | 268 | INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \ |
274 | INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ | ||
275 | 269 | ||
276 | #define INTEL_SKL_GT4_IDS(info) \ | 270 | #define INTEL_SKL_GT4_IDS(info) \ |
277 | INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ | 271 | INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ |
278 | INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ | 272 | INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ |
279 | INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ | 273 | INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ |
280 | INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */ | 274 | INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \ |
275 | INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */ | ||
281 | 276 | ||
282 | #define INTEL_SKL_IDS(info) \ | 277 | #define INTEL_SKL_IDS(info) \ |
283 | INTEL_SKL_GT1_IDS(info), \ | 278 | INTEL_SKL_GT1_IDS(info), \ |
@@ -292,6 +287,10 @@ | |||
292 | INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ | 287 | INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ |
293 | INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ | 288 | INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ |
294 | 289 | ||
290 | #define INTEL_GLK_IDS(info) \ | ||
291 | INTEL_VGA_DEVICE(0x3184, info), \ | ||
292 | INTEL_VGA_DEVICE(0x3185, info) | ||
293 | |||
295 | #define INTEL_KBL_GT1_IDS(info) \ | 294 | #define INTEL_KBL_GT1_IDS(info) \ |
296 | INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ | 295 | INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ |
297 | INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ | 296 | INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ |
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index f49edecd66a3..b3bf717cfc45 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h | |||
@@ -3,8 +3,10 @@ | |||
3 | #ifndef _DRM_INTEL_GTT_H | 3 | #ifndef _DRM_INTEL_GTT_H |
4 | #define _DRM_INTEL_GTT_H | 4 | #define _DRM_INTEL_GTT_H |
5 | 5 | ||
6 | void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, | 6 | void intel_gtt_get(u64 *gtt_total, |
7 | phys_addr_t *mappable_base, u64 *mappable_end); | 7 | u32 *stolen_size, |
8 | phys_addr_t *mappable_base, | ||
9 | u64 *mappable_end); | ||
8 | 10 | ||
9 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | 11 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
10 | struct agp_bridge_data *bridge); | 12 | struct agp_bridge_data *bridge); |
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 1c12a350eca3..da32c2f6c3f9 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h | |||
@@ -258,6 +258,7 @@ typedef struct _drm_i915_sarea { | |||
258 | #define DRM_I915_GEM_USERPTR 0x33 | 258 | #define DRM_I915_GEM_USERPTR 0x33 |
259 | #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 | 259 | #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 |
260 | #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 | 260 | #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 |
261 | #define DRM_I915_PERF_OPEN 0x36 | ||
261 | 262 | ||
262 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | 263 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
263 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | 264 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
@@ -311,6 +312,7 @@ typedef struct _drm_i915_sarea { | |||
311 | #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) | 312 | #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) |
312 | #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) | 313 | #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) |
313 | #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) | 314 | #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) |
315 | #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) | ||
314 | 316 | ||
315 | /* Allow drivers to submit batchbuffers directly to hardware, relying | 317 | /* Allow drivers to submit batchbuffers directly to hardware, relying |
316 | * on the security mechanisms provided by hardware. | 318 | * on the security mechanisms provided by hardware. |
@@ -1224,9 +1226,142 @@ struct drm_i915_gem_context_param { | |||
1224 | #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 | 1226 | #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 |
1225 | #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 | 1227 | #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 |
1226 | #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 | 1228 | #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 |
1229 | #define I915_CONTEXT_PARAM_BANNABLE 0x5 | ||
1227 | __u64 value; | 1230 | __u64 value; |
1228 | }; | 1231 | }; |
1229 | 1232 | ||
1233 | enum drm_i915_oa_format { | ||
1234 | I915_OA_FORMAT_A13 = 1, | ||
1235 | I915_OA_FORMAT_A29, | ||
1236 | I915_OA_FORMAT_A13_B8_C8, | ||
1237 | I915_OA_FORMAT_B4_C8, | ||
1238 | I915_OA_FORMAT_A45_B8_C8, | ||
1239 | I915_OA_FORMAT_B4_C8_A16, | ||
1240 | I915_OA_FORMAT_C4_B8, | ||
1241 | |||
1242 | I915_OA_FORMAT_MAX /* non-ABI */ | ||
1243 | }; | ||
1244 | |||
1245 | enum drm_i915_perf_property_id { | ||
1246 | /** | ||
1247 | * Open the stream for a specific context handle (as used with | ||
1248 | * execbuffer2). A stream opened for a specific context this way | ||
1249 | * won't typically require root privileges. | ||
1250 | */ | ||
1251 | DRM_I915_PERF_PROP_CTX_HANDLE = 1, | ||
1252 | |||
1253 | /** | ||
1254 | * A value of 1 requests the inclusion of raw OA unit reports as | ||
1255 | * part of stream samples. | ||
1256 | */ | ||
1257 | DRM_I915_PERF_PROP_SAMPLE_OA, | ||
1258 | |||
1259 | /** | ||
1260 | * The value specifies which set of OA unit metrics should be | ||
1261 | * be configured, defining the contents of any OA unit reports. | ||
1262 | */ | ||
1263 | DRM_I915_PERF_PROP_OA_METRICS_SET, | ||
1264 | |||
1265 | /** | ||
1266 | * The value specifies the size and layout of OA unit reports. | ||
1267 | */ | ||
1268 | DRM_I915_PERF_PROP_OA_FORMAT, | ||
1269 | |||
1270 | /** | ||
1271 | * Specifying this property implicitly requests periodic OA unit | ||
1272 | * sampling and (at least on Haswell) the sampling frequency is derived | ||
1273 | * from this exponent as follows: | ||
1274 | * | ||
1275 | * 80ns * 2^(period_exponent + 1) | ||
1276 | */ | ||
1277 | DRM_I915_PERF_PROP_OA_EXPONENT, | ||
1278 | |||
1279 | DRM_I915_PERF_PROP_MAX /* non-ABI */ | ||
1280 | }; | ||
1281 | |||
1282 | struct drm_i915_perf_open_param { | ||
1283 | __u32 flags; | ||
1284 | #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) | ||
1285 | #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) | ||
1286 | #define I915_PERF_FLAG_DISABLED (1<<2) | ||
1287 | |||
1288 | /** The number of u64 (id, value) pairs */ | ||
1289 | __u32 num_properties; | ||
1290 | |||
1291 | /** | ||
1292 | * Pointer to array of u64 (id, value) pairs configuring the stream | ||
1293 | * to open. | ||
1294 | */ | ||
1295 | __u64 properties_ptr; | ||
1296 | }; | ||
1297 | |||
1298 | /** | ||
1299 | * Enable data capture for a stream that was either opened in a disabled state | ||
1300 | * via I915_PERF_FLAG_DISABLED or was later disabled via | ||
1301 | * I915_PERF_IOCTL_DISABLE. | ||
1302 | * | ||
1303 | * It is intended to be cheaper to disable and enable a stream than it may be | ||
1304 | * to close and re-open a stream with the same configuration. | ||
1305 | * | ||
1306 | * It's undefined whether any pending data for the stream will be lost. | ||
1307 | */ | ||
1308 | #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) | ||
1309 | |||
1310 | /** | ||
1311 | * Disable data capture for a stream. | ||
1312 | * | ||
1313 | * It is an error to try and read a stream that is disabled. | ||
1314 | */ | ||
1315 | #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) | ||
1316 | |||
1317 | /** | ||
1318 | * Common to all i915 perf records | ||
1319 | */ | ||
1320 | struct drm_i915_perf_record_header { | ||
1321 | __u32 type; | ||
1322 | __u16 pad; | ||
1323 | __u16 size; | ||
1324 | }; | ||
1325 | |||
1326 | enum drm_i915_perf_record_type { | ||
1327 | |||
1328 | /** | ||
1329 | * Samples are the work horse record type whose contents are extensible | ||
1330 | * and defined when opening an i915 perf stream based on the given | ||
1331 | * properties. | ||
1332 | * | ||
1333 | * Boolean properties following the naming convention | ||
1334 | * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in | ||
1335 | * every sample. | ||
1336 | * | ||
1337 | * The order of these sample properties given by userspace has no | ||
1338 | * affect on the ordering of data within a sample. The order is | ||
1339 | * documented here. | ||
1340 | * | ||
1341 | * struct { | ||
1342 | * struct drm_i915_perf_record_header header; | ||
1343 | * | ||
1344 | * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA | ||
1345 | * }; | ||
1346 | */ | ||
1347 | DRM_I915_PERF_RECORD_SAMPLE = 1, | ||
1348 | |||
1349 | /* | ||
1350 | * Indicates that one or more OA reports were not written by the | ||
1351 | * hardware. This can happen for example if an MI_REPORT_PERF_COUNT | ||
1352 | * command collides with periodic sampling - which would be more likely | ||
1353 | * at higher sampling frequencies. | ||
1354 | */ | ||
1355 | DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, | ||
1356 | |||
1357 | /** | ||
1358 | * An error occurred that resulted in all pending OA reports being lost. | ||
1359 | */ | ||
1360 | DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, | ||
1361 | |||
1362 | DRM_I915_PERF_RECORD_MAX /* non-ABI */ | ||
1363 | }; | ||
1364 | |||
1230 | #if defined(__cplusplus) | 1365 | #if defined(__cplusplus) |
1231 | } | 1366 | } |
1232 | #endif | 1367 | #endif |