diff options
author | Dave Airlie <airlied@redhat.com> | 2018-07-18 15:46:24 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-07-18 15:46:30 -0400 |
commit | 539c475dadc430bd0f1601902fcacc1e55ffe85a (patch) | |
tree | 6dc3e9ca56165cb46baa84febcb885ed52452cf2 | |
parent | 0c2fd59ae315e28f8868edf80df21a502f933fec (diff) | |
parent | 82edc7e8b8c06151bdc653935bc13b83e2f0fcfa (diff) |
Merge tag 'drm-intel-next-2018-07-09' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Higlights here goes to many PSR fixes and improvements; to the Ice lake work with
power well support and begin of DSI support addition. Also there were many improvements
on execlists and interrupts for minimal latency on command submission; and many fixes
on selftests, mostly caught by our CI.
General driver:
- Clean-up on aux irq (Lucas)
- Mark expected switch fall-through for dealing with static analysis tools (Gustavo)
Gem:
- Different fixes for GuC (Chris, Anusha, Michal)
- Avoid self-relocation BIAS if no relocation (Chris)
- Improve debugging cases in on EINVAL return and vma allocation (Chris)
- Fixes and improvements on context destroying and freeing (Chris)
- Wait for engines to idle before retiring (Chris)
- Many improvements on execlists and interrupts for minimal latency on command submission (Chris)
- Many fixes in selftests, specially on cases highlighted on CI (Chris)
- Other fixes and improvements around GGTT (Chris)
- Prevent background reaping of active objects (Chris)
Display:
- Parallel modeset cleanup to fix driver reset (Chris)
- Get AUX power domain for DP main link (Imre)
- Clean-up on PSR unused func pointers (Rodrigo)
- Many PSR/PSR2 fixes and improvements (DK, Jose, Tarun)
- Add a PSR1 live status (Vathsala)
- Replace old drm_*_{un/reference} with put,get functions (Thomas)
- FBC fixes (Maarten)
- Abstract and document the usage of picking macros (Jani)
- Remove unnecessary check for unsupported modifiers for NV12. (DK)
- Interrupt fixes for display (Ville)
- Clean up on sdvo code (Ville)
- Clean up on current DSI code (Jani)
- Remove support for legacy debugfs crc interface (Maarten)
- Simplify get_encoder_power_domains (Imre)
Icelake:
- MG PLL fixes (Imre)
- Add hw workaround for alpha blending (Vandita)
- Add power well support (Imre)
- Add Interrupt Support (Anusha)
- Start to add support for DSI on Ice Lake (Madhav)
Signed-off-by: Dave Airlie <airlied@redhat.com>
# gpg: Signature made Tue 10 Jul 2018 08:41:37 AM AEST
# gpg: using RSA key FA625F640EEB13CA
# gpg: Good signature from "Rodrigo Vivi <rodrigo.vivi@intel.com>"
# gpg: aka "Rodrigo Vivi <rodrigo.vivi@gmail.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6D20 7068 EEDD 6509 1C2C E2A3 FA62 5F64 0EEB 13CA
Link: https://patchwork.freedesktop.org/patch/msgid/20180710234349.GA16562@intel.com
76 files changed, 2728 insertions, 1868 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 9de8b1c51a5c..459f8f88a34c 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug | |||
@@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM | |||
51 | 51 | ||
52 | If in doubt, say "N". | 52 | If in doubt, say "N". |
53 | 53 | ||
54 | config DRM_I915_ERRLOG_GEM | ||
55 | bool "Insert extra logging (very verbose) for common GEM errors" | ||
56 | default n | ||
57 | depends on DRM_I915_DEBUG_GEM | ||
58 | help | ||
59 | Enable additional logging that may help track down the cause of | ||
60 | principally userspace errors. | ||
61 | |||
62 | Recommended for driver developers only. | ||
63 | |||
64 | If in doubt, say "N". | ||
65 | |||
54 | config DRM_I915_TRACE_GEM | 66 | config DRM_I915_TRACE_GEM |
55 | bool "Insert extra ftrace output from the GEM internals" | 67 | bool "Insert extra ftrace output from the GEM internals" |
56 | depends on DRM_I915_DEBUG_GEM | 68 | depends on DRM_I915_DEBUG_GEM |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 4c6adae23e18..5794f102f9b8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \ | |||
135 | dvo_ns2501.o \ | 135 | dvo_ns2501.o \ |
136 | dvo_sil164.o \ | 136 | dvo_sil164.o \ |
137 | dvo_tfp410.o \ | 137 | dvo_tfp410.o \ |
138 | icl_dsi.o \ | ||
138 | intel_crt.o \ | 139 | intel_crt.o \ |
139 | intel_ddi.o \ | 140 | intel_ddi.o \ |
140 | intel_dp_aux_backlight.o \ | 141 | intel_dp_aux_backlight.o \ |
141 | intel_dp_link_training.o \ | 142 | intel_dp_link_training.o \ |
142 | intel_dp_mst.o \ | 143 | intel_dp_mst.o \ |
143 | intel_dp.o \ | 144 | intel_dp.o \ |
144 | intel_dsi.o \ | ||
145 | intel_dsi_dcs_backlight.o \ | 145 | intel_dsi_dcs_backlight.o \ |
146 | intel_dsi_pll.o \ | ||
147 | intel_dsi_vbt.o \ | 146 | intel_dsi_vbt.o \ |
148 | intel_dvo.o \ | 147 | intel_dvo.o \ |
149 | intel_hdmi.o \ | 148 | intel_hdmi.o \ |
@@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \ | |||
152 | intel_lvds.o \ | 151 | intel_lvds.o \ |
153 | intel_panel.o \ | 152 | intel_panel.o \ |
154 | intel_sdvo.o \ | 153 | intel_sdvo.o \ |
155 | intel_tv.o | 154 | intel_tv.o \ |
155 | vlv_dsi.o \ | ||
156 | vlv_dsi_pll.o | ||
156 | 157 | ||
157 | # Post-mortem debug and GPU hang state capture | 158 | # Post-mortem debug and GPU hang state capture |
158 | i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o | 159 | i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 928818f218f7..b0e566956b8d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -476,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
476 | i915_gem_obj_finish_shmem_access(bb->obj); | 476 | i915_gem_obj_finish_shmem_access(bb->obj); |
477 | bb->accessing = false; | 477 | bb->accessing = false; |
478 | 478 | ||
479 | i915_vma_move_to_active(bb->vma, workload->req, 0); | 479 | ret = i915_vma_move_to_active(bb->vma, |
480 | workload->req, | ||
481 | 0); | ||
482 | if (ret) | ||
483 | goto err; | ||
480 | } | 484 | } |
481 | } | 485 | } |
482 | return 0; | 486 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c400f42a54ec..099f97ef2303 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1659,11 +1659,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
1659 | else | 1659 | else |
1660 | seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); | 1660 | seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); |
1661 | 1661 | ||
1662 | if (fbc->work.scheduled) | ||
1663 | seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n", | ||
1664 | fbc->work.scheduled_vblank, | ||
1665 | drm_crtc_vblank_count(&fbc->crtc->base)); | ||
1666 | |||
1667 | if (intel_fbc_is_active(dev_priv)) { | 1662 | if (intel_fbc_is_active(dev_priv)) { |
1668 | u32 mask; | 1663 | u32 mask; |
1669 | 1664 | ||
@@ -2597,27 +2592,55 @@ static const struct file_operations i915_guc_log_relay_fops = { | |||
2597 | .release = i915_guc_log_relay_release, | 2592 | .release = i915_guc_log_relay_release, |
2598 | }; | 2593 | }; |
2599 | 2594 | ||
2600 | static const char *psr2_live_status(u32 val) | 2595 | static void |
2601 | { | 2596 | psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) |
2602 | static const char * const live_status[] = { | 2597 | { |
2603 | "IDLE", | 2598 | u32 val, psr_status; |
2604 | "CAPTURE", | ||
2605 | "CAPTURE_FS", | ||
2606 | "SLEEP", | ||
2607 | "BUFON_FW", | ||
2608 | "ML_UP", | ||
2609 | "SU_STANDBY", | ||
2610 | "FAST_SLEEP", | ||
2611 | "DEEP_SLEEP", | ||
2612 | "BUF_ON", | ||
2613 | "TG_ON" | ||
2614 | }; | ||
2615 | 2599 | ||
2616 | val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; | 2600 | if (dev_priv->psr.psr2_enabled) { |
2617 | if (val < ARRAY_SIZE(live_status)) | 2601 | static const char * const live_status[] = { |
2618 | return live_status[val]; | 2602 | "IDLE", |
2603 | "CAPTURE", | ||
2604 | "CAPTURE_FS", | ||
2605 | "SLEEP", | ||
2606 | "BUFON_FW", | ||
2607 | "ML_UP", | ||
2608 | "SU_STANDBY", | ||
2609 | "FAST_SLEEP", | ||
2610 | "DEEP_SLEEP", | ||
2611 | "BUF_ON", | ||
2612 | "TG_ON" | ||
2613 | }; | ||
2614 | psr_status = I915_READ(EDP_PSR2_STATUS); | ||
2615 | val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >> | ||
2616 | EDP_PSR2_STATUS_STATE_SHIFT; | ||
2617 | if (val < ARRAY_SIZE(live_status)) { | ||
2618 | seq_printf(m, "Source PSR status: 0x%x [%s]\n", | ||
2619 | psr_status, live_status[val]); | ||
2620 | return; | ||
2621 | } | ||
2622 | } else { | ||
2623 | static const char * const live_status[] = { | ||
2624 | "IDLE", | ||
2625 | "SRDONACK", | ||
2626 | "SRDENT", | ||
2627 | "BUFOFF", | ||
2628 | "BUFON", | ||
2629 | "AUXACK", | ||
2630 | "SRDOFFACK", | ||
2631 | "SRDENT_ON", | ||
2632 | }; | ||
2633 | psr_status = I915_READ(EDP_PSR_STATUS); | ||
2634 | val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >> | ||
2635 | EDP_PSR_STATUS_STATE_SHIFT; | ||
2636 | if (val < ARRAY_SIZE(live_status)) { | ||
2637 | seq_printf(m, "Source PSR status: 0x%x [%s]\n", | ||
2638 | psr_status, live_status[val]); | ||
2639 | return; | ||
2640 | } | ||
2641 | } | ||
2619 | 2642 | ||
2620 | return "unknown"; | 2643 | seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown"); |
2621 | } | 2644 | } |
2622 | 2645 | ||
2623 | static const char *psr_sink_status(u8 val) | 2646 | static const char *psr_sink_status(u8 val) |
@@ -2681,12 +2704,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
2681 | 2704 | ||
2682 | seq_printf(m, "Performance_Counter: %u\n", psrperf); | 2705 | seq_printf(m, "Performance_Counter: %u\n", psrperf); |
2683 | } | 2706 | } |
2684 | if (dev_priv->psr.psr2_enabled) { | ||
2685 | u32 psr2 = I915_READ(EDP_PSR2_STATUS); | ||
2686 | 2707 | ||
2687 | seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n", | 2708 | psr_source_status(dev_priv, m); |
2688 | psr2, psr2_live_status(psr2)); | ||
2689 | } | ||
2690 | 2709 | ||
2691 | if (dev_priv->psr.enabled) { | 2710 | if (dev_priv->psr.enabled) { |
2692 | struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux; | 2711 | struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux; |
@@ -4086,7 +4105,8 @@ fault_irq_set(struct drm_i915_private *i915, | |||
4086 | 4105 | ||
4087 | err = i915_gem_wait_for_idle(i915, | 4106 | err = i915_gem_wait_for_idle(i915, |
4088 | I915_WAIT_LOCKED | | 4107 | I915_WAIT_LOCKED | |
4089 | I915_WAIT_INTERRUPTIBLE); | 4108 | I915_WAIT_INTERRUPTIBLE, |
4109 | MAX_SCHEDULE_TIMEOUT); | ||
4090 | if (err) | 4110 | if (err) |
4091 | goto err_unlock; | 4111 | goto err_unlock; |
4092 | 4112 | ||
@@ -4191,7 +4211,8 @@ i915_drop_caches_set(void *data, u64 val) | |||
4191 | if (val & DROP_ACTIVE) | 4211 | if (val & DROP_ACTIVE) |
4192 | ret = i915_gem_wait_for_idle(dev_priv, | 4212 | ret = i915_gem_wait_for_idle(dev_priv, |
4193 | I915_WAIT_INTERRUPTIBLE | | 4213 | I915_WAIT_INTERRUPTIBLE | |
4194 | I915_WAIT_LOCKED); | 4214 | I915_WAIT_LOCKED, |
4215 | MAX_SCHEDULE_TIMEOUT); | ||
4195 | 4216 | ||
4196 | if (val & DROP_RETIRE) | 4217 | if (val & DROP_RETIRE) |
4197 | i915_retire_requests(dev_priv); | 4218 | i915_retire_requests(dev_priv); |
@@ -4799,7 +4820,6 @@ static const struct i915_debugfs_files { | |||
4799 | #endif | 4820 | #endif |
4800 | {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, | 4821 | {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, |
4801 | {"i915_next_seqno", &i915_next_seqno_fops}, | 4822 | {"i915_next_seqno", &i915_next_seqno_fops}, |
4802 | {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, | ||
4803 | {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, | 4823 | {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, |
4804 | {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, | 4824 | {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, |
4805 | {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, | 4825 | {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, |
@@ -4819,7 +4839,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) | |||
4819 | { | 4839 | { |
4820 | struct drm_minor *minor = dev_priv->drm.primary; | 4840 | struct drm_minor *minor = dev_priv->drm.primary; |
4821 | struct dentry *ent; | 4841 | struct dentry *ent; |
4822 | int ret, i; | 4842 | int i; |
4823 | 4843 | ||
4824 | ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, | 4844 | ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, |
4825 | minor->debugfs_root, to_i915(minor->dev), | 4845 | minor->debugfs_root, to_i915(minor->dev), |
@@ -4827,10 +4847,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) | |||
4827 | if (!ent) | 4847 | if (!ent) |
4828 | return -ENOMEM; | 4848 | return -ENOMEM; |
4829 | 4849 | ||
4830 | ret = intel_pipe_crc_create(minor); | ||
4831 | if (ret) | ||
4832 | return ret; | ||
4833 | |||
4834 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { | 4850 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { |
4835 | ent = debugfs_create_file(i915_debugfs_files[i].name, | 4851 | ent = debugfs_create_file(i915_debugfs_files[i].name, |
4836 | S_IRUGO | S_IWUSR, | 4852 | S_IRUGO | S_IWUSR, |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index beb0951001ce..0db3c83cce29 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -1165,6 +1165,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1165 | * get lost on g4x as well, and interrupt delivery seems to stay | 1165 | * get lost on g4x as well, and interrupt delivery seems to stay |
1166 | * properly dead afterwards. So we'll just disable them for all | 1166 | * properly dead afterwards. So we'll just disable them for all |
1167 | * pre-gen5 chipsets. | 1167 | * pre-gen5 chipsets. |
1168 | * | ||
1169 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy | ||
1170 | * interrupts even when in MSI mode. This results in spurious | ||
1171 | * interrupt warnings if the legacy irq no. is shared with another | ||
1172 | * device. The kernel then disables that interrupt source and so | ||
1173 | * prevents the other device from working properly. | ||
1168 | */ | 1174 | */ |
1169 | if (INTEL_GEN(dev_priv) >= 5) { | 1175 | if (INTEL_GEN(dev_priv) >= 5) { |
1170 | if (pci_enable_msi(pdev) < 0) | 1176 | if (pci_enable_msi(pdev) < 0) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f4751b383858..eeb002a47032 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -86,8 +86,8 @@ | |||
86 | 86 | ||
87 | #define DRIVER_NAME "i915" | 87 | #define DRIVER_NAME "i915" |
88 | #define DRIVER_DESC "Intel Graphics" | 88 | #define DRIVER_DESC "Intel Graphics" |
89 | #define DRIVER_DATE "20180620" | 89 | #define DRIVER_DATE "20180709" |
90 | #define DRIVER_TIMESTAMP 1529529048 | 90 | #define DRIVER_TIMESTAMP 1531175967 |
91 | 91 | ||
92 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and | 92 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and |
93 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions | 93 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions |
@@ -512,6 +512,7 @@ struct intel_fbc { | |||
512 | 512 | ||
513 | bool enabled; | 513 | bool enabled; |
514 | bool active; | 514 | bool active; |
515 | bool flip_pending; | ||
515 | 516 | ||
516 | bool underrun_detected; | 517 | bool underrun_detected; |
517 | struct work_struct underrun_work; | 518 | struct work_struct underrun_work; |
@@ -579,12 +580,6 @@ struct intel_fbc { | |||
579 | unsigned int gen9_wa_cfb_stride; | 580 | unsigned int gen9_wa_cfb_stride; |
580 | } params; | 581 | } params; |
581 | 582 | ||
582 | struct intel_fbc_work { | ||
583 | bool scheduled; | ||
584 | u64 scheduled_vblank; | ||
585 | struct work_struct work; | ||
586 | } work; | ||
587 | |||
588 | const char *no_fbc_reason; | 583 | const char *no_fbc_reason; |
589 | }; | 584 | }; |
590 | 585 | ||
@@ -631,14 +626,6 @@ struct i915_psr { | |||
631 | bool debug; | 626 | bool debug; |
632 | ktime_t last_entry_attempt; | 627 | ktime_t last_entry_attempt; |
633 | ktime_t last_exit; | 628 | ktime_t last_exit; |
634 | |||
635 | void (*enable_source)(struct intel_dp *, | ||
636 | const struct intel_crtc_state *); | ||
637 | void (*disable_source)(struct intel_dp *, | ||
638 | const struct intel_crtc_state *); | ||
639 | void (*enable_sink)(struct intel_dp *); | ||
640 | void (*activate)(struct intel_dp *); | ||
641 | void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *); | ||
642 | }; | 629 | }; |
643 | 630 | ||
644 | enum intel_pch { | 631 | enum intel_pch { |
@@ -965,7 +952,7 @@ struct i915_gem_mm { | |||
965 | /** | 952 | /** |
966 | * Small stash of WC pages | 953 | * Small stash of WC pages |
967 | */ | 954 | */ |
968 | struct pagevec wc_stash; | 955 | struct pagestash wc_stash; |
969 | 956 | ||
970 | /** | 957 | /** |
971 | * tmpfs instance used for shmem backed objects | 958 | * tmpfs instance used for shmem backed objects |
@@ -1284,20 +1271,11 @@ enum intel_pipe_crc_source { | |||
1284 | INTEL_PIPE_CRC_SOURCE_MAX, | 1271 | INTEL_PIPE_CRC_SOURCE_MAX, |
1285 | }; | 1272 | }; |
1286 | 1273 | ||
1287 | struct intel_pipe_crc_entry { | ||
1288 | uint32_t frame; | ||
1289 | uint32_t crc[5]; | ||
1290 | }; | ||
1291 | |||
1292 | #define INTEL_PIPE_CRC_ENTRIES_NR 128 | 1274 | #define INTEL_PIPE_CRC_ENTRIES_NR 128 |
1293 | struct intel_pipe_crc { | 1275 | struct intel_pipe_crc { |
1294 | spinlock_t lock; | 1276 | spinlock_t lock; |
1295 | bool opened; /* exclusive access to the result file */ | ||
1296 | struct intel_pipe_crc_entry *entries; | ||
1297 | enum intel_pipe_crc_source source; | ||
1298 | int head, tail; | ||
1299 | wait_queue_head_t wq; | ||
1300 | int skipped; | 1277 | int skipped; |
1278 | enum intel_pipe_crc_source source; | ||
1301 | }; | 1279 | }; |
1302 | 1280 | ||
1303 | struct i915_frontbuffer_tracking { | 1281 | struct i915_frontbuffer_tracking { |
@@ -1757,7 +1735,6 @@ struct drm_i915_private { | |||
1757 | struct drm_atomic_state *modeset_restore_state; | 1735 | struct drm_atomic_state *modeset_restore_state; |
1758 | struct drm_modeset_acquire_ctx reset_ctx; | 1736 | struct drm_modeset_acquire_ctx reset_ctx; |
1759 | 1737 | ||
1760 | struct list_head vm_list; /* Global list of all address spaces */ | ||
1761 | struct i915_ggtt ggtt; /* VM representing the global address space */ | 1738 | struct i915_ggtt ggtt; /* VM representing the global address space */ |
1762 | 1739 | ||
1763 | struct i915_gem_mm mm; | 1740 | struct i915_gem_mm mm; |
@@ -2326,6 +2303,7 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2326 | } | 2303 | } |
2327 | 2304 | ||
2328 | #define INTEL_INFO(dev_priv) intel_info((dev_priv)) | 2305 | #define INTEL_INFO(dev_priv) intel_info((dev_priv)) |
2306 | #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) | ||
2329 | 2307 | ||
2330 | #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) | 2308 | #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) |
2331 | #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) | 2309 | #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) |
@@ -2578,16 +2556,6 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2578 | (IS_CANNONLAKE(dev_priv) || \ | 2556 | (IS_CANNONLAKE(dev_priv) || \ |
2579 | IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) | 2557 | IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) |
2580 | 2558 | ||
2581 | /* | ||
2582 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts | ||
2583 | * even when in MSI mode. This results in spurious interrupt warnings if the | ||
2584 | * legacy irq no. is shared with another device. The kernel then disables that | ||
2585 | * interrupt source and so prevents the other device from working properly. | ||
2586 | * | ||
2587 | * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX | ||
2588 | * interrupts. | ||
2589 | */ | ||
2590 | #define HAS_AUX_IRQ(dev_priv) true | ||
2591 | #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) | 2559 | #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) |
2592 | 2560 | ||
2593 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 2561 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
@@ -3119,9 +3087,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) | |||
3119 | } | 3087 | } |
3120 | 3088 | ||
3121 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | 3089 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
3122 | void i915_vma_move_to_active(struct i915_vma *vma, | ||
3123 | struct i915_request *rq, | ||
3124 | unsigned int flags); | ||
3125 | int i915_gem_dumb_create(struct drm_file *file_priv, | 3090 | int i915_gem_dumb_create(struct drm_file *file_priv, |
3126 | struct drm_device *dev, | 3091 | struct drm_device *dev, |
3127 | struct drm_mode_create_dumb *args); | 3092 | struct drm_mode_create_dumb *args); |
@@ -3189,7 +3154,7 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); | |||
3189 | void i915_gem_fini(struct drm_i915_private *dev_priv); | 3154 | void i915_gem_fini(struct drm_i915_private *dev_priv); |
3190 | void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); | 3155 | void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); |
3191 | int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, | 3156 | int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, |
3192 | unsigned int flags); | 3157 | unsigned int flags, long timeout); |
3193 | int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); | 3158 | int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); |
3194 | void i915_gem_suspend_late(struct drm_i915_private *dev_priv); | 3159 | void i915_gem_suspend_late(struct drm_i915_private *dev_priv); |
3195 | void i915_gem_resume(struct drm_i915_private *dev_priv); | 3160 | void i915_gem_resume(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 858d188dd33b..b35cbfd16c9c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -837,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) | |||
837 | } | 837 | } |
838 | break; | 838 | break; |
839 | 839 | ||
840 | case I915_GEM_DOMAIN_WC: | ||
841 | wmb(); | ||
842 | break; | ||
843 | |||
840 | case I915_GEM_DOMAIN_CPU: | 844 | case I915_GEM_DOMAIN_CPU: |
841 | i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); | 845 | i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); |
842 | break; | 846 | break; |
@@ -2006,7 +2010,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) | |||
2006 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | 2010 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); |
2007 | struct i915_vma *vma; | 2011 | struct i915_vma *vma; |
2008 | pgoff_t page_offset; | 2012 | pgoff_t page_offset; |
2009 | unsigned int flags; | ||
2010 | int ret; | 2013 | int ret; |
2011 | 2014 | ||
2012 | /* We don't use vmf->pgoff since that has the fake offset */ | 2015 | /* We don't use vmf->pgoff since that has the fake offset */ |
@@ -2042,27 +2045,34 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) | |||
2042 | goto err_unlock; | 2045 | goto err_unlock; |
2043 | } | 2046 | } |
2044 | 2047 | ||
2045 | /* If the object is smaller than a couple of partial vma, it is | ||
2046 | * not worth only creating a single partial vma - we may as well | ||
2047 | * clear enough space for the full object. | ||
2048 | */ | ||
2049 | flags = PIN_MAPPABLE; | ||
2050 | if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) | ||
2051 | flags |= PIN_NONBLOCK | PIN_NONFAULT; | ||
2052 | 2048 | ||
2053 | /* Now pin it into the GTT as needed */ | 2049 | /* Now pin it into the GTT as needed */ |
2054 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); | 2050 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
2051 | PIN_MAPPABLE | | ||
2052 | PIN_NONBLOCK | | ||
2053 | PIN_NONFAULT); | ||
2055 | if (IS_ERR(vma)) { | 2054 | if (IS_ERR(vma)) { |
2056 | /* Use a partial view if it is bigger than available space */ | 2055 | /* Use a partial view if it is bigger than available space */ |
2057 | struct i915_ggtt_view view = | 2056 | struct i915_ggtt_view view = |
2058 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); | 2057 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); |
2058 | unsigned int flags; | ||
2059 | 2059 | ||
2060 | /* Userspace is now writing through an untracked VMA, abandon | 2060 | flags = PIN_MAPPABLE; |
2061 | if (view.type == I915_GGTT_VIEW_NORMAL) | ||
2062 | flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ | ||
2063 | |||
2064 | /* | ||
2065 | * Userspace is now writing through an untracked VMA, abandon | ||
2061 | * all hope that the hardware is able to track future writes. | 2066 | * all hope that the hardware is able to track future writes. |
2062 | */ | 2067 | */ |
2063 | obj->frontbuffer_ggtt_origin = ORIGIN_CPU; | 2068 | obj->frontbuffer_ggtt_origin = ORIGIN_CPU; |
2064 | 2069 | ||
2065 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); | 2070 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); |
2071 | if (IS_ERR(vma) && !view.type) { | ||
2072 | flags = PIN_MAPPABLE; | ||
2073 | view.type = I915_GGTT_VIEW_PARTIAL; | ||
2074 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); | ||
2075 | } | ||
2066 | } | 2076 | } |
2067 | if (IS_ERR(vma)) { | 2077 | if (IS_ERR(vma)) { |
2068 | ret = PTR_ERR(vma); | 2078 | ret = PTR_ERR(vma); |
@@ -2114,6 +2124,7 @@ err: | |||
2114 | */ | 2124 | */ |
2115 | if (!i915_terminally_wedged(&dev_priv->gpu_error)) | 2125 | if (!i915_terminally_wedged(&dev_priv->gpu_error)) |
2116 | return VM_FAULT_SIGBUS; | 2126 | return VM_FAULT_SIGBUS; |
2127 | /* else: fall through */ | ||
2117 | case -EAGAIN: | 2128 | case -EAGAIN: |
2118 | /* | 2129 | /* |
2119 | * EAGAIN means the gpu is hung and we'll wait for the error | 2130 | * EAGAIN means the gpu is hung and we'll wait for the error |
@@ -2256,7 +2267,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | |||
2256 | 2267 | ||
2257 | /* Attempt to reap some mmap space from dead objects */ | 2268 | /* Attempt to reap some mmap space from dead objects */ |
2258 | do { | 2269 | do { |
2259 | err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); | 2270 | err = i915_gem_wait_for_idle(dev_priv, |
2271 | I915_WAIT_INTERRUPTIBLE, | ||
2272 | MAX_SCHEDULE_TIMEOUT); | ||
2260 | if (err) | 2273 | if (err) |
2261 | break; | 2274 | break; |
2262 | 2275 | ||
@@ -3074,25 +3087,6 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) | |||
3074 | return err; | 3087 | return err; |
3075 | } | 3088 | } |
3076 | 3089 | ||
3077 | static void skip_request(struct i915_request *request) | ||
3078 | { | ||
3079 | void *vaddr = request->ring->vaddr; | ||
3080 | u32 head; | ||
3081 | |||
3082 | /* As this request likely depends on state from the lost | ||
3083 | * context, clear out all the user operations leaving the | ||
3084 | * breadcrumb at the end (so we get the fence notifications). | ||
3085 | */ | ||
3086 | head = request->head; | ||
3087 | if (request->postfix < head) { | ||
3088 | memset(vaddr + head, 0, request->ring->size - head); | ||
3089 | head = 0; | ||
3090 | } | ||
3091 | memset(vaddr + head, 0, request->postfix - head); | ||
3092 | |||
3093 | dma_fence_set_error(&request->fence, -EIO); | ||
3094 | } | ||
3095 | |||
3096 | static void engine_skip_context(struct i915_request *request) | 3090 | static void engine_skip_context(struct i915_request *request) |
3097 | { | 3091 | { |
3098 | struct intel_engine_cs *engine = request->engine; | 3092 | struct intel_engine_cs *engine = request->engine; |
@@ -3103,14 +3097,14 @@ static void engine_skip_context(struct i915_request *request) | |||
3103 | GEM_BUG_ON(timeline == &engine->timeline); | 3097 | GEM_BUG_ON(timeline == &engine->timeline); |
3104 | 3098 | ||
3105 | spin_lock_irqsave(&engine->timeline.lock, flags); | 3099 | spin_lock_irqsave(&engine->timeline.lock, flags); |
3106 | spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING); | 3100 | spin_lock(&timeline->lock); |
3107 | 3101 | ||
3108 | list_for_each_entry_continue(request, &engine->timeline.requests, link) | 3102 | list_for_each_entry_continue(request, &engine->timeline.requests, link) |
3109 | if (request->gem_context == hung_ctx) | 3103 | if (request->gem_context == hung_ctx) |
3110 | skip_request(request); | 3104 | i915_request_skip(request, -EIO); |
3111 | 3105 | ||
3112 | list_for_each_entry(request, &timeline->requests, link) | 3106 | list_for_each_entry(request, &timeline->requests, link) |
3113 | skip_request(request); | 3107 | i915_request_skip(request, -EIO); |
3114 | 3108 | ||
3115 | spin_unlock(&timeline->lock); | 3109 | spin_unlock(&timeline->lock); |
3116 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | 3110 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
@@ -3153,7 +3147,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine, | |||
3153 | 3147 | ||
3154 | if (stalled) { | 3148 | if (stalled) { |
3155 | i915_gem_context_mark_guilty(request->gem_context); | 3149 | i915_gem_context_mark_guilty(request->gem_context); |
3156 | skip_request(request); | 3150 | i915_request_skip(request, -EIO); |
3157 | 3151 | ||
3158 | /* If this context is now banned, skip all pending requests. */ | 3152 | /* If this context is now banned, skip all pending requests. */ |
3159 | if (i915_gem_context_is_banned(request->gem_context)) | 3153 | if (i915_gem_context_is_banned(request->gem_context)) |
@@ -3750,14 +3744,14 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
3750 | return ret; | 3744 | return ret; |
3751 | } | 3745 | } |
3752 | 3746 | ||
3753 | static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) | 3747 | static long wait_for_timeline(struct i915_timeline *tl, |
3748 | unsigned int flags, long timeout) | ||
3754 | { | 3749 | { |
3755 | struct i915_request *rq; | 3750 | struct i915_request *rq; |
3756 | long ret; | ||
3757 | 3751 | ||
3758 | rq = i915_gem_active_get_unlocked(&tl->last_request); | 3752 | rq = i915_gem_active_get_unlocked(&tl->last_request); |
3759 | if (!rq) | 3753 | if (!rq) |
3760 | return 0; | 3754 | return timeout; |
3761 | 3755 | ||
3762 | /* | 3756 | /* |
3763 | * "Race-to-idle". | 3757 | * "Race-to-idle". |
@@ -3771,10 +3765,10 @@ static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) | |||
3771 | if (flags & I915_WAIT_FOR_IDLE_BOOST) | 3765 | if (flags & I915_WAIT_FOR_IDLE_BOOST) |
3772 | gen6_rps_boost(rq, NULL); | 3766 | gen6_rps_boost(rq, NULL); |
3773 | 3767 | ||
3774 | ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT); | 3768 | timeout = i915_request_wait(rq, flags, timeout); |
3775 | i915_request_put(rq); | 3769 | i915_request_put(rq); |
3776 | 3770 | ||
3777 | return ret < 0 ? ret : 0; | 3771 | return timeout; |
3778 | } | 3772 | } |
3779 | 3773 | ||
3780 | static int wait_for_engines(struct drm_i915_private *i915) | 3774 | static int wait_for_engines(struct drm_i915_private *i915) |
@@ -3790,10 +3784,12 @@ static int wait_for_engines(struct drm_i915_private *i915) | |||
3790 | return 0; | 3784 | return 0; |
3791 | } | 3785 | } |
3792 | 3786 | ||
3793 | int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) | 3787 | int i915_gem_wait_for_idle(struct drm_i915_private *i915, |
3788 | unsigned int flags, long timeout) | ||
3794 | { | 3789 | { |
3795 | GEM_TRACE("flags=%x (%s)\n", | 3790 | GEM_TRACE("flags=%x (%s), timeout=%ld%s\n", |
3796 | flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked"); | 3791 | flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", |
3792 | timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : ""); | ||
3797 | 3793 | ||
3798 | /* If the device is asleep, we have no requests outstanding */ | 3794 | /* If the device is asleep, we have no requests outstanding */ |
3799 | if (!READ_ONCE(i915->gt.awake)) | 3795 | if (!READ_ONCE(i915->gt.awake)) |
@@ -3806,27 +3802,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) | |||
3806 | lockdep_assert_held(&i915->drm.struct_mutex); | 3802 | lockdep_assert_held(&i915->drm.struct_mutex); |
3807 | 3803 | ||
3808 | list_for_each_entry(tl, &i915->gt.timelines, link) { | 3804 | list_for_each_entry(tl, &i915->gt.timelines, link) { |
3809 | err = wait_for_timeline(tl, flags); | 3805 | timeout = wait_for_timeline(tl, flags, timeout); |
3810 | if (err) | 3806 | if (timeout < 0) |
3811 | return err; | 3807 | return timeout; |
3812 | } | 3808 | } |
3809 | |||
3810 | err = wait_for_engines(i915); | ||
3811 | if (err) | ||
3812 | return err; | ||
3813 | |||
3813 | i915_retire_requests(i915); | 3814 | i915_retire_requests(i915); |
3814 | GEM_BUG_ON(i915->gt.active_requests); | 3815 | GEM_BUG_ON(i915->gt.active_requests); |
3815 | |||
3816 | return wait_for_engines(i915); | ||
3817 | } else { | 3816 | } else { |
3818 | struct intel_engine_cs *engine; | 3817 | struct intel_engine_cs *engine; |
3819 | enum intel_engine_id id; | 3818 | enum intel_engine_id id; |
3820 | int err; | ||
3821 | 3819 | ||
3822 | for_each_engine(engine, i915, id) { | 3820 | for_each_engine(engine, i915, id) { |
3823 | err = wait_for_timeline(&engine->timeline, flags); | 3821 | struct i915_timeline *tl = &engine->timeline; |
3824 | if (err) | ||
3825 | return err; | ||
3826 | } | ||
3827 | 3822 | ||
3828 | return 0; | 3823 | timeout = wait_for_timeline(tl, flags, timeout); |
3824 | if (timeout < 0) | ||
3825 | return timeout; | ||
3826 | } | ||
3829 | } | 3827 | } |
3828 | |||
3829 | return 0; | ||
3830 | } | 3830 | } |
3831 | 3831 | ||
3832 | static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) | 3832 | static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) |
@@ -5057,7 +5057,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) | |||
5057 | ret = i915_gem_wait_for_idle(dev_priv, | 5057 | ret = i915_gem_wait_for_idle(dev_priv, |
5058 | I915_WAIT_INTERRUPTIBLE | | 5058 | I915_WAIT_INTERRUPTIBLE | |
5059 | I915_WAIT_LOCKED | | 5059 | I915_WAIT_LOCKED | |
5060 | I915_WAIT_FOR_IDLE_BOOST); | 5060 | I915_WAIT_FOR_IDLE_BOOST, |
5061 | MAX_SCHEDULE_TIMEOUT); | ||
5061 | if (ret && ret != -EIO) | 5062 | if (ret && ret != -EIO) |
5062 | goto err_unlock; | 5063 | goto err_unlock; |
5063 | 5064 | ||
@@ -5361,9 +5362,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) | |||
5361 | if (err) | 5362 | if (err) |
5362 | goto err_active; | 5363 | goto err_active; |
5363 | 5364 | ||
5364 | err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); | 5365 | if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) { |
5365 | if (err) | 5366 | i915_gem_set_wedged(i915); |
5367 | err = -EIO; /* Caller will declare us wedged */ | ||
5366 | goto err_active; | 5368 | goto err_active; |
5369 | } | ||
5367 | 5370 | ||
5368 | assert_kernel_context_is_current(i915); | 5371 | assert_kernel_context_is_current(i915); |
5369 | 5372 | ||
@@ -5426,7 +5429,9 @@ err_active: | |||
5426 | if (WARN_ON(i915_gem_switch_to_kernel_context(i915))) | 5429 | if (WARN_ON(i915_gem_switch_to_kernel_context(i915))) |
5427 | goto out_ctx; | 5430 | goto out_ctx; |
5428 | 5431 | ||
5429 | if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED))) | 5432 | if (WARN_ON(i915_gem_wait_for_idle(i915, |
5433 | I915_WAIT_LOCKED, | ||
5434 | MAX_SCHEDULE_TIMEOUT))) | ||
5430 | goto out_ctx; | 5435 | goto out_ctx; |
5431 | 5436 | ||
5432 | i915_gem_contexts_lost(i915); | 5437 | i915_gem_contexts_lost(i915); |
@@ -5456,13 +5461,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) | |||
5456 | if (ret) | 5461 | if (ret) |
5457 | return ret; | 5462 | return ret; |
5458 | 5463 | ||
5459 | ret = intel_wopcm_init(&dev_priv->wopcm); | 5464 | ret = intel_uc_init_misc(dev_priv); |
5460 | if (ret) | 5465 | if (ret) |
5461 | return ret; | 5466 | return ret; |
5462 | 5467 | ||
5463 | ret = intel_uc_init_misc(dev_priv); | 5468 | ret = intel_wopcm_init(&dev_priv->wopcm); |
5464 | if (ret) | 5469 | if (ret) |
5465 | return ret; | 5470 | goto err_uc_misc; |
5466 | 5471 | ||
5467 | /* This is just a security blanket to placate dragons. | 5472 | /* This is just a security blanket to placate dragons. |
5468 | * On some systems, we very sporadically observe that the first TLBs | 5473 | * On some systems, we very sporadically observe that the first TLBs |
@@ -5560,6 +5565,7 @@ err_unlock: | |||
5560 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 5565 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
5561 | mutex_unlock(&dev_priv->drm.struct_mutex); | 5566 | mutex_unlock(&dev_priv->drm.struct_mutex); |
5562 | 5567 | ||
5568 | err_uc_misc: | ||
5563 | intel_uc_fini_misc(dev_priv); | 5569 | intel_uc_fini_misc(dev_priv); |
5564 | 5570 | ||
5565 | if (ret != -EIO) | 5571 | if (ret != -EIO) |
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 261da577829a..e46592956872 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h | |||
@@ -88,4 +88,9 @@ static inline void __tasklet_enable_sync_once(struct tasklet_struct *t) | |||
88 | tasklet_kill(t); | 88 | tasklet_kill(t); |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) | ||
92 | { | ||
93 | return !atomic_read(&t->count); | ||
94 | } | ||
95 | |||
91 | #endif /* __I915_GEM_H__ */ | 96 | #endif /* __I915_GEM_H__ */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index ccf463ab6562..b10770cfccd2 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -374,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, | |||
374 | if (USES_FULL_PPGTT(dev_priv)) { | 374 | if (USES_FULL_PPGTT(dev_priv)) { |
375 | struct i915_hw_ppgtt *ppgtt; | 375 | struct i915_hw_ppgtt *ppgtt; |
376 | 376 | ||
377 | ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name); | 377 | ppgtt = i915_ppgtt_create(dev_priv, file_priv); |
378 | if (IS_ERR(ppgtt)) { | 378 | if (IS_ERR(ppgtt)) { |
379 | DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", | 379 | DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", |
380 | PTR_ERR(ppgtt)); | 380 | PTR_ERR(ppgtt)); |
@@ -512,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) | |||
512 | } | 512 | } |
513 | 513 | ||
514 | DRM_DEBUG_DRIVER("%s context support initialized\n", | 514 | DRM_DEBUG_DRIVER("%s context support initialized\n", |
515 | dev_priv->engine[RCS]->context_size ? "logical" : | 515 | DRIVER_CAPS(dev_priv)->has_logical_contexts ? |
516 | "fake"); | 516 | "logical" : "fake"); |
517 | return 0; | 517 | return 0; |
518 | } | 518 | } |
519 | 519 | ||
@@ -720,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | |||
720 | struct i915_gem_context *ctx; | 720 | struct i915_gem_context *ctx; |
721 | int ret; | 721 | int ret; |
722 | 722 | ||
723 | if (!dev_priv->engine[RCS]->context_size) | 723 | if (!DRIVER_CAPS(dev_priv)->has_logical_contexts) |
724 | return -ENODEV; | 724 | return -ENODEV; |
725 | 725 | ||
726 | if (args->pad != 0) | 726 | if (args->pad != 0) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 54814a196ee4..02b83a5ed96c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915) | |||
69 | 69 | ||
70 | err = i915_gem_wait_for_idle(i915, | 70 | err = i915_gem_wait_for_idle(i915, |
71 | I915_WAIT_INTERRUPTIBLE | | 71 | I915_WAIT_INTERRUPTIBLE | |
72 | I915_WAIT_LOCKED); | 72 | I915_WAIT_LOCKED, |
73 | MAX_SCHEDULE_TIMEOUT); | ||
73 | if (err) | 74 | if (err) |
74 | return err; | 75 | return err; |
75 | 76 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 60dc2a865f5f..3f0c612d42e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -66,6 +66,15 @@ enum { | |||
66 | #define __I915_EXEC_ILLEGAL_FLAGS \ | 66 | #define __I915_EXEC_ILLEGAL_FLAGS \ |
67 | (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) | 67 | (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) |
68 | 68 | ||
69 | /* Catch emission of unexpected errors for CI! */ | ||
70 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) | ||
71 | #undef EINVAL | ||
72 | #define EINVAL ({ \ | ||
73 | DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ | ||
74 | 22; \ | ||
75 | }) | ||
76 | #endif | ||
77 | |||
69 | /** | 78 | /** |
70 | * DOC: User command execution | 79 | * DOC: User command execution |
71 | * | 80 | * |
@@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb, | |||
534 | * paranoia do it everywhere. | 543 | * paranoia do it everywhere. |
535 | */ | 544 | */ |
536 | if (i == batch_idx) { | 545 | if (i == batch_idx) { |
537 | if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) | 546 | if (entry->relocation_count && |
547 | !(eb->flags[i] & EXEC_OBJECT_PINNED)) | ||
538 | eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; | 548 | eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; |
539 | if (eb->reloc_cache.has_fence) | 549 | if (eb->reloc_cache.has_fence) |
540 | eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; | 550 | eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; |
@@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, | |||
1155 | goto err_request; | 1165 | goto err_request; |
1156 | 1166 | ||
1157 | GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); | 1167 | GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); |
1158 | i915_vma_move_to_active(batch, rq, 0); | 1168 | err = i915_vma_move_to_active(batch, rq, 0); |
1159 | reservation_object_lock(batch->resv, NULL); | 1169 | if (err) |
1160 | reservation_object_add_excl_fence(batch->resv, &rq->fence); | 1170 | goto skip_request; |
1161 | reservation_object_unlock(batch->resv); | ||
1162 | i915_vma_unpin(batch); | ||
1163 | 1171 | ||
1164 | i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | 1172 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); |
1165 | reservation_object_lock(vma->resv, NULL); | 1173 | if (err) |
1166 | reservation_object_add_excl_fence(vma->resv, &rq->fence); | 1174 | goto skip_request; |
1167 | reservation_object_unlock(vma->resv); | ||
1168 | 1175 | ||
1169 | rq->batch = batch; | 1176 | rq->batch = batch; |
1177 | i915_vma_unpin(batch); | ||
1170 | 1178 | ||
1171 | cache->rq = rq; | 1179 | cache->rq = rq; |
1172 | cache->rq_cmd = cmd; | 1180 | cache->rq_cmd = cmd; |
@@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, | |||
1175 | /* Return with batch mapping (cmd) still pinned */ | 1183 | /* Return with batch mapping (cmd) still pinned */ |
1176 | return 0; | 1184 | return 0; |
1177 | 1185 | ||
1186 | skip_request: | ||
1187 | i915_request_skip(rq, err); | ||
1178 | err_request: | 1188 | err_request: |
1179 | i915_request_add(rq); | 1189 | i915_request_add(rq); |
1180 | err_unpin: | 1190 | err_unpin: |
@@ -1761,25 +1771,6 @@ slow: | |||
1761 | return eb_relocate_slow(eb); | 1771 | return eb_relocate_slow(eb); |
1762 | } | 1772 | } |
1763 | 1773 | ||
1764 | static void eb_export_fence(struct i915_vma *vma, | ||
1765 | struct i915_request *rq, | ||
1766 | unsigned int flags) | ||
1767 | { | ||
1768 | struct reservation_object *resv = vma->resv; | ||
1769 | |||
1770 | /* | ||
1771 | * Ignore errors from failing to allocate the new fence, we can't | ||
1772 | * handle an error right now. Worst case should be missed | ||
1773 | * synchronisation leading to rendering corruption. | ||
1774 | */ | ||
1775 | reservation_object_lock(resv, NULL); | ||
1776 | if (flags & EXEC_OBJECT_WRITE) | ||
1777 | reservation_object_add_excl_fence(resv, &rq->fence); | ||
1778 | else if (reservation_object_reserve_shared(resv) == 0) | ||
1779 | reservation_object_add_shared_fence(resv, &rq->fence); | ||
1780 | reservation_object_unlock(resv); | ||
1781 | } | ||
1782 | |||
1783 | static int eb_move_to_gpu(struct i915_execbuffer *eb) | 1774 | static int eb_move_to_gpu(struct i915_execbuffer *eb) |
1784 | { | 1775 | { |
1785 | const unsigned int count = eb->buffer_count; | 1776 | const unsigned int count = eb->buffer_count; |
@@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) | |||
1833 | unsigned int flags = eb->flags[i]; | 1824 | unsigned int flags = eb->flags[i]; |
1834 | struct i915_vma *vma = eb->vma[i]; | 1825 | struct i915_vma *vma = eb->vma[i]; |
1835 | 1826 | ||
1836 | i915_vma_move_to_active(vma, eb->request, flags); | 1827 | err = i915_vma_move_to_active(vma, eb->request, flags); |
1837 | eb_export_fence(vma, eb->request, flags); | 1828 | if (unlikely(err)) { |
1829 | i915_request_skip(eb->request, err); | ||
1830 | return err; | ||
1831 | } | ||
1838 | 1832 | ||
1839 | __eb_unreserve_vma(vma, flags); | 1833 | __eb_unreserve_vma(vma, flags); |
1840 | vma->exec_flags = NULL; | 1834 | vma->exec_flags = NULL; |
@@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) | |||
1874 | return true; | 1868 | return true; |
1875 | } | 1869 | } |
1876 | 1870 | ||
1877 | void i915_vma_move_to_active(struct i915_vma *vma, | ||
1878 | struct i915_request *rq, | ||
1879 | unsigned int flags) | ||
1880 | { | ||
1881 | struct drm_i915_gem_object *obj = vma->obj; | ||
1882 | const unsigned int idx = rq->engine->id; | ||
1883 | |||
1884 | lockdep_assert_held(&rq->i915->drm.struct_mutex); | ||
1885 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | ||
1886 | |||
1887 | /* | ||
1888 | * Add a reference if we're newly entering the active list. | ||
1889 | * The order in which we add operations to the retirement queue is | ||
1890 | * vital here: mark_active adds to the start of the callback list, | ||
1891 | * such that subsequent callbacks are called first. Therefore we | ||
1892 | * add the active reference first and queue for it to be dropped | ||
1893 | * *last*. | ||
1894 | */ | ||
1895 | if (!i915_vma_is_active(vma)) | ||
1896 | obj->active_count++; | ||
1897 | i915_vma_set_active(vma, idx); | ||
1898 | i915_gem_active_set(&vma->last_read[idx], rq); | ||
1899 | list_move_tail(&vma->vm_link, &vma->vm->active_list); | ||
1900 | |||
1901 | obj->write_domain = 0; | ||
1902 | if (flags & EXEC_OBJECT_WRITE) { | ||
1903 | obj->write_domain = I915_GEM_DOMAIN_RENDER; | ||
1904 | |||
1905 | if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) | ||
1906 | i915_gem_active_set(&obj->frontbuffer_write, rq); | ||
1907 | |||
1908 | obj->read_domains = 0; | ||
1909 | } | ||
1910 | obj->read_domains |= I915_GEM_GPU_DOMAINS; | ||
1911 | |||
1912 | if (flags & EXEC_OBJECT_NEEDS_FENCE) | ||
1913 | i915_gem_active_set(&vma->last_fence, rq); | ||
1914 | } | ||
1915 | |||
1916 | static int i915_reset_gen7_sol_offsets(struct i915_request *rq) | 1871 | static int i915_reset_gen7_sol_offsets(struct i915_request *rq) |
1917 | { | 1872 | { |
1918 | u32 *cs; | 1873 | u32 *cs; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c6aa761ca085..abd81fb9b0b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -375,37 +375,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, | |||
375 | return pte; | 375 | return pte; |
376 | } | 376 | } |
377 | 377 | ||
378 | static void stash_init(struct pagestash *stash) | ||
379 | { | ||
380 | pagevec_init(&stash->pvec); | ||
381 | spin_lock_init(&stash->lock); | ||
382 | } | ||
383 | |||
384 | static struct page *stash_pop_page(struct pagestash *stash) | ||
385 | { | ||
386 | struct page *page = NULL; | ||
387 | |||
388 | spin_lock(&stash->lock); | ||
389 | if (likely(stash->pvec.nr)) | ||
390 | page = stash->pvec.pages[--stash->pvec.nr]; | ||
391 | spin_unlock(&stash->lock); | ||
392 | |||
393 | return page; | ||
394 | } | ||
395 | |||
396 | static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) | ||
397 | { | ||
398 | int nr; | ||
399 | |||
400 | spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); | ||
401 | |||
402 | nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); | ||
403 | memcpy(stash->pvec.pages + stash->pvec.nr, | ||
404 | pvec->pages + pvec->nr - nr, | ||
405 | sizeof(pvec->pages[0]) * nr); | ||
406 | stash->pvec.nr += nr; | ||
407 | |||
408 | spin_unlock(&stash->lock); | ||
409 | |||
410 | pvec->nr -= nr; | ||
411 | } | ||
412 | |||
378 | static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) | 413 | static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) |
379 | { | 414 | { |
380 | struct pagevec *pvec = &vm->free_pages; | 415 | struct pagevec stack; |
381 | struct pagevec stash; | 416 | struct page *page; |
382 | 417 | ||
383 | if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) | 418 | if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) |
384 | i915_gem_shrink_all(vm->i915); | 419 | i915_gem_shrink_all(vm->i915); |
385 | 420 | ||
386 | if (likely(pvec->nr)) | 421 | page = stash_pop_page(&vm->free_pages); |
387 | return pvec->pages[--pvec->nr]; | 422 | if (page) |
423 | return page; | ||
388 | 424 | ||
389 | if (!vm->pt_kmap_wc) | 425 | if (!vm->pt_kmap_wc) |
390 | return alloc_page(gfp); | 426 | return alloc_page(gfp); |
391 | 427 | ||
392 | /* A placeholder for a specific mutex to guard the WC stash */ | ||
393 | lockdep_assert_held(&vm->i915->drm.struct_mutex); | ||
394 | |||
395 | /* Look in our global stash of WC pages... */ | 428 | /* Look in our global stash of WC pages... */ |
396 | pvec = &vm->i915->mm.wc_stash; | 429 | page = stash_pop_page(&vm->i915->mm.wc_stash); |
397 | if (likely(pvec->nr)) | 430 | if (page) |
398 | return pvec->pages[--pvec->nr]; | 431 | return page; |
399 | 432 | ||
400 | /* | 433 | /* |
401 | * Otherwise batch allocate pages to amoritize cost of set_pages_wc. | 434 | * Otherwise batch allocate pages to amortize cost of set_pages_wc. |
402 | * | 435 | * |
403 | * We have to be careful as page allocation may trigger the shrinker | 436 | * We have to be careful as page allocation may trigger the shrinker |
404 | * (via direct reclaim) which will fill up the WC stash underneath us. | 437 | * (via direct reclaim) which will fill up the WC stash underneath us. |
405 | * So we add our WB pages into a temporary pvec on the stack and merge | 438 | * So we add our WB pages into a temporary pvec on the stack and merge |
406 | * them into the WC stash after all the allocations are complete. | 439 | * them into the WC stash after all the allocations are complete. |
407 | */ | 440 | */ |
408 | pagevec_init(&stash); | 441 | pagevec_init(&stack); |
409 | do { | 442 | do { |
410 | struct page *page; | 443 | struct page *page; |
411 | 444 | ||
@@ -413,59 +446,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) | |||
413 | if (unlikely(!page)) | 446 | if (unlikely(!page)) |
414 | break; | 447 | break; |
415 | 448 | ||
416 | stash.pages[stash.nr++] = page; | 449 | stack.pages[stack.nr++] = page; |
417 | } while (stash.nr < pagevec_space(pvec)); | 450 | } while (pagevec_space(&stack)); |
418 | 451 | ||
419 | if (stash.nr) { | 452 | if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { |
420 | int nr = min_t(int, stash.nr, pagevec_space(pvec)); | 453 | page = stack.pages[--stack.nr]; |
421 | struct page **pages = stash.pages + stash.nr - nr; | ||
422 | 454 | ||
423 | if (nr && !set_pages_array_wc(pages, nr)) { | 455 | /* Merge spare WC pages to the global stash */ |
424 | memcpy(pvec->pages + pvec->nr, | 456 | stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); |
425 | pages, sizeof(pages[0]) * nr); | ||
426 | pvec->nr += nr; | ||
427 | stash.nr -= nr; | ||
428 | } | ||
429 | 457 | ||
430 | pagevec_release(&stash); | 458 | /* Push any surplus WC pages onto the local VM stash */ |
459 | if (stack.nr) | ||
460 | stash_push_pagevec(&vm->free_pages, &stack); | ||
431 | } | 461 | } |
432 | 462 | ||
433 | return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL; | 463 | /* Return unwanted leftovers */ |
464 | if (unlikely(stack.nr)) { | ||
465 | WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); | ||
466 | __pagevec_release(&stack); | ||
467 | } | ||
468 | |||
469 | return page; | ||
434 | } | 470 | } |
435 | 471 | ||
436 | static void vm_free_pages_release(struct i915_address_space *vm, | 472 | static void vm_free_pages_release(struct i915_address_space *vm, |
437 | bool immediate) | 473 | bool immediate) |
438 | { | 474 | { |
439 | struct pagevec *pvec = &vm->free_pages; | 475 | struct pagevec *pvec = &vm->free_pages.pvec; |
476 | struct pagevec stack; | ||
440 | 477 | ||
478 | lockdep_assert_held(&vm->free_pages.lock); | ||
441 | GEM_BUG_ON(!pagevec_count(pvec)); | 479 | GEM_BUG_ON(!pagevec_count(pvec)); |
442 | 480 | ||
443 | if (vm->pt_kmap_wc) { | 481 | if (vm->pt_kmap_wc) { |
444 | struct pagevec *stash = &vm->i915->mm.wc_stash; | 482 | /* |
445 | 483 | * When we use WC, first fill up the global stash and then | |
446 | /* When we use WC, first fill up the global stash and then | ||
447 | * only if full immediately free the overflow. | 484 | * only if full immediately free the overflow. |
448 | */ | 485 | */ |
486 | stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); | ||
449 | 487 | ||
450 | lockdep_assert_held(&vm->i915->drm.struct_mutex); | 488 | /* |
451 | if (pagevec_space(stash)) { | 489 | * As we have made some room in the VM's free_pages, |
452 | do { | 490 | * we can wait for it to fill again. Unless we are |
453 | stash->pages[stash->nr++] = | 491 | * inside i915_address_space_fini() and must |
454 | pvec->pages[--pvec->nr]; | 492 | * immediately release the pages! |
455 | if (!pvec->nr) | 493 | */ |
456 | return; | 494 | if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) |
457 | } while (pagevec_space(stash)); | 495 | return; |
458 | 496 | ||
459 | /* As we have made some room in the VM's free_pages, | 497 | /* |
460 | * we can wait for it to fill again. Unless we are | 498 | * We have to drop the lock to allow ourselves to sleep, |
461 | * inside i915_address_space_fini() and must | 499 | * so take a copy of the pvec and clear the stash for |
462 | * immediately release the pages! | 500 | * others to use it as we sleep. |
463 | */ | 501 | */ |
464 | if (!immediate) | 502 | stack = *pvec; |
465 | return; | 503 | pagevec_reinit(pvec); |
466 | } | 504 | spin_unlock(&vm->free_pages.lock); |
467 | 505 | ||
506 | pvec = &stack; | ||
468 | set_pages_array_wb(pvec->pages, pvec->nr); | 507 | set_pages_array_wb(pvec->pages, pvec->nr); |
508 | |||
509 | spin_lock(&vm->free_pages.lock); | ||
469 | } | 510 | } |
470 | 511 | ||
471 | __pagevec_release(pvec); | 512 | __pagevec_release(pvec); |
@@ -481,8 +522,35 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) | |||
481 | * unconditional might_sleep() for everybody. | 522 | * unconditional might_sleep() for everybody. |
482 | */ | 523 | */ |
483 | might_sleep(); | 524 | might_sleep(); |
484 | if (!pagevec_add(&vm->free_pages, page)) | 525 | spin_lock(&vm->free_pages.lock); |
526 | if (!pagevec_add(&vm->free_pages.pvec, page)) | ||
485 | vm_free_pages_release(vm, false); | 527 | vm_free_pages_release(vm, false); |
528 | spin_unlock(&vm->free_pages.lock); | ||
529 | } | ||
530 | |||
531 | static void i915_address_space_init(struct i915_address_space *vm, | ||
532 | struct drm_i915_private *dev_priv) | ||
533 | { | ||
534 | GEM_BUG_ON(!vm->total); | ||
535 | drm_mm_init(&vm->mm, 0, vm->total); | ||
536 | vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; | ||
537 | |||
538 | stash_init(&vm->free_pages); | ||
539 | |||
540 | INIT_LIST_HEAD(&vm->active_list); | ||
541 | INIT_LIST_HEAD(&vm->inactive_list); | ||
542 | INIT_LIST_HEAD(&vm->unbound_list); | ||
543 | } | ||
544 | |||
545 | static void i915_address_space_fini(struct i915_address_space *vm) | ||
546 | { | ||
547 | spin_lock(&vm->free_pages.lock); | ||
548 | if (pagevec_count(&vm->free_pages.pvec)) | ||
549 | vm_free_pages_release(vm, true); | ||
550 | GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); | ||
551 | spin_unlock(&vm->free_pages.lock); | ||
552 | |||
553 | drm_mm_takedown(&vm->mm); | ||
486 | } | 554 | } |
487 | 555 | ||
488 | static int __setup_page_dma(struct i915_address_space *vm, | 556 | static int __setup_page_dma(struct i915_address_space *vm, |
@@ -493,8 +561,11 @@ static int __setup_page_dma(struct i915_address_space *vm, | |||
493 | if (unlikely(!p->page)) | 561 | if (unlikely(!p->page)) |
494 | return -ENOMEM; | 562 | return -ENOMEM; |
495 | 563 | ||
496 | p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE, | 564 | p->daddr = dma_map_page_attrs(vm->dma, |
497 | PCI_DMA_BIDIRECTIONAL); | 565 | p->page, 0, PAGE_SIZE, |
566 | PCI_DMA_BIDIRECTIONAL, | ||
567 | DMA_ATTR_SKIP_CPU_SYNC | | ||
568 | DMA_ATTR_NO_WARN); | ||
498 | if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { | 569 | if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { |
499 | vm_free_page(vm, p->page); | 570 | vm_free_page(vm, p->page); |
500 | return -ENOMEM; | 571 | return -ENOMEM; |
@@ -575,8 +646,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) | |||
575 | if (unlikely(!page)) | 646 | if (unlikely(!page)) |
576 | goto skip; | 647 | goto skip; |
577 | 648 | ||
578 | addr = dma_map_page(vm->dma, page, 0, size, | 649 | addr = dma_map_page_attrs(vm->dma, |
579 | PCI_DMA_BIDIRECTIONAL); | 650 | page, 0, size, |
651 | PCI_DMA_BIDIRECTIONAL, | ||
652 | DMA_ATTR_SKIP_CPU_SYNC | | ||
653 | DMA_ATTR_NO_WARN); | ||
580 | if (unlikely(dma_mapping_error(vm->dma, addr))) | 654 | if (unlikely(dma_mapping_error(vm->dma, addr))) |
581 | goto free_page; | 655 | goto free_page; |
582 | 656 | ||
@@ -1562,6 +1636,8 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) | |||
1562 | if (!ppgtt) | 1636 | if (!ppgtt) |
1563 | return ERR_PTR(-ENOMEM); | 1637 | return ERR_PTR(-ENOMEM); |
1564 | 1638 | ||
1639 | kref_init(&ppgtt->ref); | ||
1640 | |||
1565 | ppgtt->vm.i915 = i915; | 1641 | ppgtt->vm.i915 = i915; |
1566 | ppgtt->vm.dma = &i915->drm.pdev->dev; | 1642 | ppgtt->vm.dma = &i915->drm.pdev->dev; |
1567 | 1643 | ||
@@ -1569,6 +1645,8 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) | |||
1569 | 1ULL << 48 : | 1645 | 1ULL << 48 : |
1570 | 1ULL << 32; | 1646 | 1ULL << 32; |
1571 | 1647 | ||
1648 | i915_address_space_init(&ppgtt->vm, i915); | ||
1649 | |||
1572 | /* There are only few exceptions for gen >=6. chv and bxt. | 1650 | /* There are only few exceptions for gen >=6. chv and bxt. |
1573 | * And we are not sure about the latter so play safe for now. | 1651 | * And we are not sure about the latter so play safe for now. |
1574 | */ | 1652 | */ |
@@ -1996,7 +2074,6 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) | |||
1996 | struct drm_i915_private *i915 = ppgtt->base.vm.i915; | 2074 | struct drm_i915_private *i915 = ppgtt->base.vm.i915; |
1997 | struct i915_ggtt *ggtt = &i915->ggtt; | 2075 | struct i915_ggtt *ggtt = &i915->ggtt; |
1998 | struct i915_vma *vma; | 2076 | struct i915_vma *vma; |
1999 | int i; | ||
2000 | 2077 | ||
2001 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | 2078 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); |
2002 | GEM_BUG_ON(size > ggtt->vm.total); | 2079 | GEM_BUG_ON(size > ggtt->vm.total); |
@@ -2005,14 +2082,14 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) | |||
2005 | if (!vma) | 2082 | if (!vma) |
2006 | return ERR_PTR(-ENOMEM); | 2083 | return ERR_PTR(-ENOMEM); |
2007 | 2084 | ||
2008 | for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) | ||
2009 | init_request_active(&vma->last_read[i], NULL); | ||
2010 | init_request_active(&vma->last_fence, NULL); | 2085 | init_request_active(&vma->last_fence, NULL); |
2011 | 2086 | ||
2012 | vma->vm = &ggtt->vm; | 2087 | vma->vm = &ggtt->vm; |
2013 | vma->ops = &pd_vma_ops; | 2088 | vma->ops = &pd_vma_ops; |
2014 | vma->private = ppgtt; | 2089 | vma->private = ppgtt; |
2015 | 2090 | ||
2091 | vma->active = RB_ROOT; | ||
2092 | |||
2016 | vma->size = size; | 2093 | vma->size = size; |
2017 | vma->fence_size = size; | 2094 | vma->fence_size = size; |
2018 | vma->flags = I915_VMA_GGTT; | 2095 | vma->flags = I915_VMA_GGTT; |
@@ -2068,11 +2145,15 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) | |||
2068 | if (!ppgtt) | 2145 | if (!ppgtt) |
2069 | return ERR_PTR(-ENOMEM); | 2146 | return ERR_PTR(-ENOMEM); |
2070 | 2147 | ||
2148 | kref_init(&ppgtt->base.ref); | ||
2149 | |||
2071 | ppgtt->base.vm.i915 = i915; | 2150 | ppgtt->base.vm.i915 = i915; |
2072 | ppgtt->base.vm.dma = &i915->drm.pdev->dev; | 2151 | ppgtt->base.vm.dma = &i915->drm.pdev->dev; |
2073 | 2152 | ||
2074 | ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; | 2153 | ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; |
2075 | 2154 | ||
2155 | i915_address_space_init(&ppgtt->base.vm, i915); | ||
2156 | |||
2076 | ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; | 2157 | ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; |
2077 | ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; | 2158 | ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; |
2078 | ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; | 2159 | ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; |
@@ -2105,30 +2186,6 @@ err_free: | |||
2105 | return ERR_PTR(err); | 2186 | return ERR_PTR(err); |
2106 | } | 2187 | } |
2107 | 2188 | ||
2108 | static void i915_address_space_init(struct i915_address_space *vm, | ||
2109 | struct drm_i915_private *dev_priv, | ||
2110 | const char *name) | ||
2111 | { | ||
2112 | drm_mm_init(&vm->mm, 0, vm->total); | ||
2113 | vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; | ||
2114 | |||
2115 | INIT_LIST_HEAD(&vm->active_list); | ||
2116 | INIT_LIST_HEAD(&vm->inactive_list); | ||
2117 | INIT_LIST_HEAD(&vm->unbound_list); | ||
2118 | |||
2119 | list_add_tail(&vm->global_link, &dev_priv->vm_list); | ||
2120 | pagevec_init(&vm->free_pages); | ||
2121 | } | ||
2122 | |||
2123 | static void i915_address_space_fini(struct i915_address_space *vm) | ||
2124 | { | ||
2125 | if (pagevec_count(&vm->free_pages)) | ||
2126 | vm_free_pages_release(vm, true); | ||
2127 | |||
2128 | drm_mm_takedown(&vm->mm); | ||
2129 | list_del(&vm->global_link); | ||
2130 | } | ||
2131 | |||
2132 | static void gtt_write_workarounds(struct drm_i915_private *dev_priv) | 2189 | static void gtt_write_workarounds(struct drm_i915_private *dev_priv) |
2133 | { | 2190 | { |
2134 | /* This function is for gtt related workarounds. This function is | 2191 | /* This function is for gtt related workarounds. This function is |
@@ -2199,8 +2256,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915) | |||
2199 | 2256 | ||
2200 | struct i915_hw_ppgtt * | 2257 | struct i915_hw_ppgtt * |
2201 | i915_ppgtt_create(struct drm_i915_private *i915, | 2258 | i915_ppgtt_create(struct drm_i915_private *i915, |
2202 | struct drm_i915_file_private *fpriv, | 2259 | struct drm_i915_file_private *fpriv) |
2203 | const char *name) | ||
2204 | { | 2260 | { |
2205 | struct i915_hw_ppgtt *ppgtt; | 2261 | struct i915_hw_ppgtt *ppgtt; |
2206 | 2262 | ||
@@ -2208,8 +2264,6 @@ i915_ppgtt_create(struct drm_i915_private *i915, | |||
2208 | if (IS_ERR(ppgtt)) | 2264 | if (IS_ERR(ppgtt)) |
2209 | return ppgtt; | 2265 | return ppgtt; |
2210 | 2266 | ||
2211 | kref_init(&ppgtt->ref); | ||
2212 | i915_address_space_init(&ppgtt->vm, i915, name); | ||
2213 | ppgtt->vm.file = fpriv; | 2267 | ppgtt->vm.file = fpriv; |
2214 | 2268 | ||
2215 | trace_i915_ppgtt_create(&ppgtt->vm); | 2269 | trace_i915_ppgtt_create(&ppgtt->vm); |
@@ -2739,7 +2793,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, | |||
2739 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 2793 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
2740 | 2794 | ||
2741 | if (unlikely(ggtt->do_idle_maps)) { | 2795 | if (unlikely(ggtt->do_idle_maps)) { |
2742 | if (i915_gem_wait_for_idle(dev_priv, 0)) { | 2796 | if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { |
2743 | DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); | 2797 | DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); |
2744 | /* Wait a bit, in hopes it avoids the hang */ | 2798 | /* Wait a bit, in hopes it avoids the hang */ |
2745 | udelay(10); | 2799 | udelay(10); |
@@ -2788,7 +2842,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) | |||
2788 | struct i915_hw_ppgtt *ppgtt; | 2842 | struct i915_hw_ppgtt *ppgtt; |
2789 | int err; | 2843 | int err; |
2790 | 2844 | ||
2791 | ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]"); | 2845 | ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); |
2792 | if (IS_ERR(ppgtt)) | 2846 | if (IS_ERR(ppgtt)) |
2793 | return PTR_ERR(ppgtt); | 2847 | return PTR_ERR(ppgtt); |
2794 | 2848 | ||
@@ -2918,7 +2972,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) | |||
2918 | 2972 | ||
2919 | ggtt->vm.cleanup(&ggtt->vm); | 2973 | ggtt->vm.cleanup(&ggtt->vm); |
2920 | 2974 | ||
2921 | pvec = &dev_priv->mm.wc_stash; | 2975 | pvec = &dev_priv->mm.wc_stash.pvec; |
2922 | if (pvec->nr) { | 2976 | if (pvec->nr) { |
2923 | set_pages_array_wb(pvec->pages, pvec->nr); | 2977 | set_pages_array_wb(pvec->pages, pvec->nr); |
2924 | __pagevec_release(pvec); | 2978 | __pagevec_release(pvec); |
@@ -3518,7 +3572,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) | |||
3518 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 3572 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
3519 | int ret; | 3573 | int ret; |
3520 | 3574 | ||
3521 | INIT_LIST_HEAD(&dev_priv->vm_list); | 3575 | stash_init(&dev_priv->mm.wc_stash); |
3522 | 3576 | ||
3523 | /* Note that we use page colouring to enforce a guard page at the | 3577 | /* Note that we use page colouring to enforce a guard page at the |
3524 | * end of the address space. This is required as the CS may prefetch | 3578 | * end of the address space. This is required as the CS may prefetch |
@@ -3526,7 +3580,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) | |||
3526 | * and beyond the end of the GTT if we do not provide a guard. | 3580 | * and beyond the end of the GTT if we do not provide a guard. |
3527 | */ | 3581 | */ |
3528 | mutex_lock(&dev_priv->drm.struct_mutex); | 3582 | mutex_lock(&dev_priv->drm.struct_mutex); |
3529 | i915_address_space_init(&ggtt->vm, dev_priv, "[global]"); | 3583 | i915_address_space_init(&ggtt->vm, dev_priv); |
3530 | if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) | 3584 | if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) |
3531 | ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; | 3585 | ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; |
3532 | mutex_unlock(&dev_priv->drm.struct_mutex); | 3586 | mutex_unlock(&dev_priv->drm.struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 9a4824cae68d..feda45dfd481 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -270,6 +270,11 @@ struct i915_vma_ops { | |||
270 | void (*clear_pages)(struct i915_vma *vma); | 270 | void (*clear_pages)(struct i915_vma *vma); |
271 | }; | 271 | }; |
272 | 272 | ||
273 | struct pagestash { | ||
274 | spinlock_t lock; | ||
275 | struct pagevec pvec; | ||
276 | }; | ||
277 | |||
273 | struct i915_address_space { | 278 | struct i915_address_space { |
274 | struct drm_mm mm; | 279 | struct drm_mm mm; |
275 | struct drm_i915_private *i915; | 280 | struct drm_i915_private *i915; |
@@ -283,7 +288,6 @@ struct i915_address_space { | |||
283 | * assign blame. | 288 | * assign blame. |
284 | */ | 289 | */ |
285 | struct drm_i915_file_private *file; | 290 | struct drm_i915_file_private *file; |
286 | struct list_head global_link; | ||
287 | u64 total; /* size addr space maps (ex. 2GB for ggtt) */ | 291 | u64 total; /* size addr space maps (ex. 2GB for ggtt) */ |
288 | u64 reserved; /* size addr space reserved */ | 292 | u64 reserved; /* size addr space reserved */ |
289 | 293 | ||
@@ -324,7 +328,7 @@ struct i915_address_space { | |||
324 | */ | 328 | */ |
325 | struct list_head unbound_list; | 329 | struct list_head unbound_list; |
326 | 330 | ||
327 | struct pagevec free_pages; | 331 | struct pagestash free_pages; |
328 | bool pt_kmap_wc; | 332 | bool pt_kmap_wc; |
329 | 333 | ||
330 | /* FIXME: Need a more generic return type */ | 334 | /* FIXME: Need a more generic return type */ |
@@ -615,8 +619,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); | |||
615 | int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); | 619 | int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); |
616 | void i915_ppgtt_release(struct kref *kref); | 620 | void i915_ppgtt_release(struct kref *kref); |
617 | struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, | 621 | struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, |
618 | struct drm_i915_file_private *fpriv, | 622 | struct drm_i915_file_private *fpriv); |
619 | const char *name); | ||
620 | void i915_ppgtt_close(struct i915_address_space *vm); | 623 | void i915_ppgtt_close(struct i915_address_space *vm); |
621 | static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) | 624 | static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) |
622 | { | 625 | { |
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 54f00b350779..c3c6f2e588fb 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h | |||
@@ -337,26 +337,17 @@ __attribute__((nonnull)) | |||
337 | static inline struct drm_i915_gem_object * | 337 | static inline struct drm_i915_gem_object * |
338 | i915_gem_object_get(struct drm_i915_gem_object *obj) | 338 | i915_gem_object_get(struct drm_i915_gem_object *obj) |
339 | { | 339 | { |
340 | drm_gem_object_reference(&obj->base); | 340 | drm_gem_object_get(&obj->base); |
341 | return obj; | 341 | return obj; |
342 | } | 342 | } |
343 | 343 | ||
344 | __deprecated | ||
345 | extern void drm_gem_object_reference(struct drm_gem_object *); | ||
346 | |||
347 | __attribute__((nonnull)) | 344 | __attribute__((nonnull)) |
348 | static inline void | 345 | static inline void |
349 | i915_gem_object_put(struct drm_i915_gem_object *obj) | 346 | i915_gem_object_put(struct drm_i915_gem_object *obj) |
350 | { | 347 | { |
351 | __drm_gem_object_unreference(&obj->base); | 348 | __drm_gem_object_put(&obj->base); |
352 | } | 349 | } |
353 | 350 | ||
354 | __deprecated | ||
355 | extern void drm_gem_object_unreference(struct drm_gem_object *); | ||
356 | |||
357 | __deprecated | ||
358 | extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); | ||
359 | |||
360 | static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) | 351 | static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) |
361 | { | 352 | { |
362 | reservation_object_lock(obj->resv, NULL); | 353 | reservation_object_lock(obj->resv, NULL); |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 3210cedfa46c..90baf9086d0a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) | |||
222 | goto err_unpin; | 222 | goto err_unpin; |
223 | } | 223 | } |
224 | 224 | ||
225 | i915_vma_move_to_active(so.vma, rq, 0); | 225 | err = i915_vma_move_to_active(so.vma, rq, 0); |
226 | err_unpin: | 226 | err_unpin: |
227 | i915_vma_unpin(so.vma); | 227 | i915_vma_unpin(so.vma); |
228 | err_vma: | 228 | err_vma: |
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 55e84e71f526..c61f5b80fee3 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
@@ -172,7 +172,9 @@ i915_gem_shrink(struct drm_i915_private *i915, | |||
172 | * we will free as much as we can and hope to get a second chance. | 172 | * we will free as much as we can and hope to get a second chance. |
173 | */ | 173 | */ |
174 | if (flags & I915_SHRINK_ACTIVE) | 174 | if (flags & I915_SHRINK_ACTIVE) |
175 | i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); | 175 | i915_gem_wait_for_idle(i915, |
176 | I915_WAIT_LOCKED, | ||
177 | MAX_SCHEDULE_TIMEOUT); | ||
176 | 178 | ||
177 | trace_i915_gem_shrink(i915, target, flags); | 179 | trace_i915_gem_shrink(i915, target, flags); |
178 | i915_retire_requests(i915); | 180 | i915_retire_requests(i915); |
@@ -392,7 +394,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock, | |||
392 | unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); | 394 | unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); |
393 | 395 | ||
394 | do { | 396 | do { |
395 | if (i915_gem_wait_for_idle(i915, 0) == 0 && | 397 | if (i915_gem_wait_for_idle(i915, |
398 | 0, MAX_SCHEDULE_TIMEOUT) == 0 && | ||
396 | shrinker_lock(i915, unlock)) | 399 | shrinker_lock(i915, unlock)) |
397 | break; | 400 | break; |
398 | 401 | ||
@@ -466,7 +469,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr | |||
466 | return NOTIFY_DONE; | 469 | return NOTIFY_DONE; |
467 | 470 | ||
468 | /* Force everything onto the inactive lists */ | 471 | /* Force everything onto the inactive lists */ |
469 | ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); | 472 | ret = i915_gem_wait_for_idle(i915, |
473 | I915_WAIT_LOCKED, | ||
474 | MAX_SCHEDULE_TIMEOUT); | ||
470 | if (ret) | 475 | if (ret) |
471 | goto out; | 476 | goto out; |
472 | 477 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 79a347295e00..055f8687776d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, | |||
254 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { | 254 | switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { |
255 | default: | 255 | default: |
256 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); | 256 | MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); |
257 | /* fall through */ | ||
257 | case GEN7_STOLEN_RESERVED_1M: | 258 | case GEN7_STOLEN_RESERVED_1M: |
258 | *size = 1024 * 1024; | 259 | *size = 1024 * 1024; |
259 | break; | 260 | break; |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index df524c9cad40..8c81cf3aa182 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -335,21 +335,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, | |||
335 | struct drm_i915_error_buffer *err, | 335 | struct drm_i915_error_buffer *err, |
336 | int count) | 336 | int count) |
337 | { | 337 | { |
338 | int i; | ||
339 | |||
340 | err_printf(m, "%s [%d]:\n", name, count); | 338 | err_printf(m, "%s [%d]:\n", name, count); |
341 | 339 | ||
342 | while (count--) { | 340 | while (count--) { |
343 | err_printf(m, " %08x_%08x %8u %02x %02x [ ", | 341 | err_printf(m, " %08x_%08x %8u %02x %02x %02x", |
344 | upper_32_bits(err->gtt_offset), | 342 | upper_32_bits(err->gtt_offset), |
345 | lower_32_bits(err->gtt_offset), | 343 | lower_32_bits(err->gtt_offset), |
346 | err->size, | 344 | err->size, |
347 | err->read_domains, | 345 | err->read_domains, |
348 | err->write_domain); | 346 | err->write_domain, |
349 | for (i = 0; i < I915_NUM_ENGINES; i++) | 347 | err->wseqno); |
350 | err_printf(m, "%02x ", err->rseqno[i]); | ||
351 | |||
352 | err_printf(m, "] %02x", err->wseqno); | ||
353 | err_puts(m, tiling_flag(err->tiling)); | 348 | err_puts(m, tiling_flag(err->tiling)); |
354 | err_puts(m, dirty_flag(err->dirty)); | 349 | err_puts(m, dirty_flag(err->dirty)); |
355 | err_puts(m, purgeable_flag(err->purgeable)); | 350 | err_puts(m, purgeable_flag(err->purgeable)); |
@@ -1021,13 +1016,10 @@ static void capture_bo(struct drm_i915_error_buffer *err, | |||
1021 | struct i915_vma *vma) | 1016 | struct i915_vma *vma) |
1022 | { | 1017 | { |
1023 | struct drm_i915_gem_object *obj = vma->obj; | 1018 | struct drm_i915_gem_object *obj = vma->obj; |
1024 | int i; | ||
1025 | 1019 | ||
1026 | err->size = obj->base.size; | 1020 | err->size = obj->base.size; |
1027 | err->name = obj->base.name; | 1021 | err->name = obj->base.name; |
1028 | 1022 | ||
1029 | for (i = 0; i < I915_NUM_ENGINES; i++) | ||
1030 | err->rseqno[i] = __active_get_seqno(&vma->last_read[i]); | ||
1031 | err->wseqno = __active_get_seqno(&obj->frontbuffer_write); | 1023 | err->wseqno = __active_get_seqno(&obj->frontbuffer_write); |
1032 | err->engine = __active_get_engine_id(&obj->frontbuffer_write); | 1024 | err->engine = __active_get_engine_id(&obj->frontbuffer_write); |
1033 | 1025 | ||
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 58910f1dc67c..f893a4e8b783 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h | |||
@@ -177,7 +177,7 @@ struct i915_gpu_state { | |||
177 | struct drm_i915_error_buffer { | 177 | struct drm_i915_error_buffer { |
178 | u32 size; | 178 | u32 size; |
179 | u32 name; | 179 | u32 name; |
180 | u32 rseqno[I915_NUM_ENGINES], wseqno; | 180 | u32 wseqno; |
181 | u64 gtt_offset; | 181 | u64 gtt_offset; |
182 | u32 read_domains; | 182 | u32 read_domains; |
183 | u32 write_domain; | 183 | u32 write_domain; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 46aaef5c1851..495b9d27990e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -122,6 +122,15 @@ static const u32 hpd_gen11[HPD_NUM_PINS] = { | |||
122 | [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG | 122 | [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG |
123 | }; | 123 | }; |
124 | 124 | ||
125 | static const u32 hpd_icp[HPD_NUM_PINS] = { | ||
126 | [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, | ||
127 | [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, | ||
128 | [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, | ||
129 | [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, | ||
130 | [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, | ||
131 | [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP | ||
132 | }; | ||
133 | |||
125 | /* IIR can theoretically queue up two events. Be paranoid. */ | 134 | /* IIR can theoretically queue up two events. Be paranoid. */ |
126 | #define GEN8_IRQ_RESET_NDX(type, which) do { \ | 135 | #define GEN8_IRQ_RESET_NDX(type, which) do { \ |
127 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ | 136 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ |
@@ -1145,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) | |||
1145 | 1154 | ||
1146 | static void notify_ring(struct intel_engine_cs *engine) | 1155 | static void notify_ring(struct intel_engine_cs *engine) |
1147 | { | 1156 | { |
1157 | const u32 seqno = intel_engine_get_seqno(engine); | ||
1148 | struct i915_request *rq = NULL; | 1158 | struct i915_request *rq = NULL; |
1159 | struct task_struct *tsk = NULL; | ||
1149 | struct intel_wait *wait; | 1160 | struct intel_wait *wait; |
1150 | 1161 | ||
1151 | if (!engine->breadcrumbs.irq_armed) | 1162 | if (unlikely(!engine->breadcrumbs.irq_armed)) |
1152 | return; | 1163 | return; |
1153 | 1164 | ||
1154 | atomic_inc(&engine->irq_count); | 1165 | rcu_read_lock(); |
1155 | set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); | ||
1156 | 1166 | ||
1157 | spin_lock(&engine->breadcrumbs.irq_lock); | 1167 | spin_lock(&engine->breadcrumbs.irq_lock); |
1158 | wait = engine->breadcrumbs.irq_wait; | 1168 | wait = engine->breadcrumbs.irq_wait; |
1159 | if (wait) { | 1169 | if (wait) { |
1160 | bool wakeup = engine->irq_seqno_barrier; | 1170 | /* |
1161 | 1171 | * We use a callback from the dma-fence to submit | |
1162 | /* We use a callback from the dma-fence to submit | ||
1163 | * requests after waiting on our own requests. To | 1172 | * requests after waiting on our own requests. To |
1164 | * ensure minimum delay in queuing the next request to | 1173 | * ensure minimum delay in queuing the next request to |
1165 | * hardware, signal the fence now rather than wait for | 1174 | * hardware, signal the fence now rather than wait for |
@@ -1170,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine) | |||
1170 | * and to handle coalescing of multiple seqno updates | 1179 | * and to handle coalescing of multiple seqno updates |
1171 | * and many waiters. | 1180 | * and many waiters. |
1172 | */ | 1181 | */ |
1173 | if (i915_seqno_passed(intel_engine_get_seqno(engine), | 1182 | if (i915_seqno_passed(seqno, wait->seqno)) { |
1174 | wait->seqno)) { | ||
1175 | struct i915_request *waiter = wait->request; | 1183 | struct i915_request *waiter = wait->request; |
1176 | 1184 | ||
1177 | wakeup = true; | 1185 | if (waiter && |
1178 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, | 1186 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
1179 | &waiter->fence.flags) && | 1187 | &waiter->fence.flags) && |
1180 | intel_wait_check_request(wait, waiter)) | 1188 | intel_wait_check_request(wait, waiter)) |
1181 | rq = i915_request_get(waiter); | 1189 | rq = i915_request_get(waiter); |
1190 | |||
1191 | tsk = wait->tsk; | ||
1192 | } else { | ||
1193 | if (engine->irq_seqno_barrier && | ||
1194 | i915_seqno_passed(seqno, wait->seqno - 1)) { | ||
1195 | set_bit(ENGINE_IRQ_BREADCRUMB, | ||
1196 | &engine->irq_posted); | ||
1197 | tsk = wait->tsk; | ||
1198 | } | ||
1182 | } | 1199 | } |
1183 | 1200 | ||
1184 | if (wakeup) | 1201 | engine->breadcrumbs.irq_count++; |
1185 | wake_up_process(wait->tsk); | ||
1186 | } else { | 1202 | } else { |
1187 | if (engine->breadcrumbs.irq_armed) | 1203 | if (engine->breadcrumbs.irq_armed) |
1188 | __intel_engine_disarm_breadcrumbs(engine); | 1204 | __intel_engine_disarm_breadcrumbs(engine); |
@@ -1190,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine) | |||
1190 | spin_unlock(&engine->breadcrumbs.irq_lock); | 1206 | spin_unlock(&engine->breadcrumbs.irq_lock); |
1191 | 1207 | ||
1192 | if (rq) { | 1208 | if (rq) { |
1193 | dma_fence_signal(&rq->fence); | 1209 | spin_lock(&rq->lock); |
1210 | dma_fence_signal_locked(&rq->fence); | ||
1194 | GEM_BUG_ON(!i915_request_completed(rq)); | 1211 | GEM_BUG_ON(!i915_request_completed(rq)); |
1212 | spin_unlock(&rq->lock); | ||
1213 | |||
1195 | i915_request_put(rq); | 1214 | i915_request_put(rq); |
1196 | } | 1215 | } |
1197 | 1216 | ||
1217 | if (tsk && tsk->state & TASK_NORMAL) | ||
1218 | wake_up_process(tsk); | ||
1219 | |||
1220 | rcu_read_unlock(); | ||
1221 | |||
1198 | trace_intel_engine_notify(engine, wait); | 1222 | trace_intel_engine_notify(engine, wait); |
1199 | } | 1223 | } |
1200 | 1224 | ||
@@ -1469,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, | |||
1469 | static void | 1493 | static void |
1470 | gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) | 1494 | gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) |
1471 | { | 1495 | { |
1472 | struct intel_engine_execlists * const execlists = &engine->execlists; | ||
1473 | bool tasklet = false; | 1496 | bool tasklet = false; |
1474 | 1497 | ||
1475 | if (iir & GT_CONTEXT_SWITCH_INTERRUPT) { | 1498 | if (iir & GT_CONTEXT_SWITCH_INTERRUPT) |
1476 | if (READ_ONCE(engine->execlists.active)) | 1499 | tasklet = true; |
1477 | tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, | ||
1478 | &engine->irq_posted); | ||
1479 | } | ||
1480 | 1500 | ||
1481 | if (iir & GT_RENDER_USER_INTERRUPT) { | 1501 | if (iir & GT_RENDER_USER_INTERRUPT) { |
1482 | notify_ring(engine); | 1502 | notify_ring(engine); |
@@ -1484,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) | |||
1484 | } | 1504 | } |
1485 | 1505 | ||
1486 | if (tasklet) | 1506 | if (tasklet) |
1487 | tasklet_hi_schedule(&execlists->tasklet); | 1507 | tasklet_hi_schedule(&engine->execlists.tasklet); |
1488 | } | 1508 | } |
1489 | 1509 | ||
1490 | static void gen8_gt_irq_ack(struct drm_i915_private *i915, | 1510 | static void gen8_gt_irq_ack(struct drm_i915_private *i915, |
@@ -1586,6 +1606,34 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val) | |||
1586 | } | 1606 | } |
1587 | } | 1607 | } |
1588 | 1608 | ||
1609 | static bool icp_ddi_port_hotplug_long_detect(enum port port, u32 val) | ||
1610 | { | ||
1611 | switch (port) { | ||
1612 | case PORT_A: | ||
1613 | return val & ICP_DDIA_HPD_LONG_DETECT; | ||
1614 | case PORT_B: | ||
1615 | return val & ICP_DDIB_HPD_LONG_DETECT; | ||
1616 | default: | ||
1617 | return false; | ||
1618 | } | ||
1619 | } | ||
1620 | |||
1621 | static bool icp_tc_port_hotplug_long_detect(enum port port, u32 val) | ||
1622 | { | ||
1623 | switch (port) { | ||
1624 | case PORT_C: | ||
1625 | return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); | ||
1626 | case PORT_D: | ||
1627 | return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); | ||
1628 | case PORT_E: | ||
1629 | return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); | ||
1630 | case PORT_F: | ||
1631 | return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); | ||
1632 | default: | ||
1633 | return false; | ||
1634 | } | ||
1635 | } | ||
1636 | |||
1589 | static bool spt_port_hotplug2_long_detect(enum port port, u32 val) | 1637 | static bool spt_port_hotplug2_long_detect(enum port port, u32 val) |
1590 | { | 1638 | { |
1591 | switch (port) { | 1639 | switch (port) { |
@@ -1703,69 +1751,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, | |||
1703 | uint32_t crc4) | 1751 | uint32_t crc4) |
1704 | { | 1752 | { |
1705 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | 1753 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; |
1706 | struct intel_pipe_crc_entry *entry; | ||
1707 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); | 1754 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
1708 | struct drm_driver *driver = dev_priv->drm.driver; | ||
1709 | uint32_t crcs[5]; | 1755 | uint32_t crcs[5]; |
1710 | int head, tail; | ||
1711 | 1756 | ||
1712 | spin_lock(&pipe_crc->lock); | 1757 | spin_lock(&pipe_crc->lock); |
1713 | if (pipe_crc->source && !crtc->base.crc.opened) { | 1758 | /* |
1714 | if (!pipe_crc->entries) { | 1759 | * For some not yet identified reason, the first CRC is |
1715 | spin_unlock(&pipe_crc->lock); | 1760 | * bonkers. So let's just wait for the next vblank and read |
1716 | DRM_DEBUG_KMS("spurious interrupt\n"); | 1761 | * out the buggy result. |
1717 | return; | 1762 | * |
1718 | } | 1763 | * On GEN8+ sometimes the second CRC is bonkers as well, so |
1719 | 1764 | * don't trust that one either. | |
1720 | head = pipe_crc->head; | 1765 | */ |
1721 | tail = pipe_crc->tail; | 1766 | if (pipe_crc->skipped <= 0 || |
1722 | 1767 | (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { | |
1723 | if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { | 1768 | pipe_crc->skipped++; |
1724 | spin_unlock(&pipe_crc->lock); | ||
1725 | DRM_ERROR("CRC buffer overflowing\n"); | ||
1726 | return; | ||
1727 | } | ||
1728 | |||
1729 | entry = &pipe_crc->entries[head]; | ||
1730 | |||
1731 | entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); | ||
1732 | entry->crc[0] = crc0; | ||
1733 | entry->crc[1] = crc1; | ||
1734 | entry->crc[2] = crc2; | ||
1735 | entry->crc[3] = crc3; | ||
1736 | entry->crc[4] = crc4; | ||
1737 | |||
1738 | head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | ||
1739 | pipe_crc->head = head; | ||
1740 | |||
1741 | spin_unlock(&pipe_crc->lock); | ||
1742 | |||
1743 | wake_up_interruptible(&pipe_crc->wq); | ||
1744 | } else { | ||
1745 | /* | ||
1746 | * For some not yet identified reason, the first CRC is | ||
1747 | * bonkers. So let's just wait for the next vblank and read | ||
1748 | * out the buggy result. | ||
1749 | * | ||
1750 | * On GEN8+ sometimes the second CRC is bonkers as well, so | ||
1751 | * don't trust that one either. | ||
1752 | */ | ||
1753 | if (pipe_crc->skipped <= 0 || | ||
1754 | (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { | ||
1755 | pipe_crc->skipped++; | ||
1756 | spin_unlock(&pipe_crc->lock); | ||
1757 | return; | ||
1758 | } | ||
1759 | spin_unlock(&pipe_crc->lock); | 1769 | spin_unlock(&pipe_crc->lock); |
1760 | crcs[0] = crc0; | 1770 | return; |
1761 | crcs[1] = crc1; | ||
1762 | crcs[2] = crc2; | ||
1763 | crcs[3] = crc3; | ||
1764 | crcs[4] = crc4; | ||
1765 | drm_crtc_add_crc_entry(&crtc->base, true, | ||
1766 | drm_crtc_accurate_vblank_count(&crtc->base), | ||
1767 | crcs); | ||
1768 | } | 1771 | } |
1772 | spin_unlock(&pipe_crc->lock); | ||
1773 | |||
1774 | crcs[0] = crc0; | ||
1775 | crcs[1] = crc1; | ||
1776 | crcs[2] = crc2; | ||
1777 | crcs[3] = crc3; | ||
1778 | crcs[4] = crc4; | ||
1779 | drm_crtc_add_crc_entry(&crtc->base, true, | ||
1780 | drm_crtc_accurate_vblank_count(&crtc->base), | ||
1781 | crcs); | ||
1769 | } | 1782 | } |
1770 | #else | 1783 | #else |
1771 | static inline void | 1784 | static inline void |
@@ -2021,10 +2034,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, | |||
2021 | 2034 | ||
2022 | static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) | 2035 | static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) |
2023 | { | 2036 | { |
2024 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 2037 | u32 hotplug_status = 0, hotplug_status_mask; |
2038 | int i; | ||
2039 | |||
2040 | if (IS_G4X(dev_priv) || | ||
2041 | IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
2042 | hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | | ||
2043 | DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; | ||
2044 | else | ||
2045 | hotplug_status_mask = HOTPLUG_INT_STATUS_I915; | ||
2046 | |||
2047 | /* | ||
2048 | * We absolutely have to clear all the pending interrupt | ||
2049 | * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port | ||
2050 | * interrupt bit won't have an edge, and the i965/g4x | ||
2051 | * edge triggered IIR will not notice that an interrupt | ||
2052 | * is still pending. We can't use PORT_HOTPLUG_EN to | ||
2053 | * guarantee the edge as the act of toggling the enable | ||
2054 | * bits can itself generate a new hotplug interrupt :( | ||
2055 | */ | ||
2056 | for (i = 0; i < 10; i++) { | ||
2057 | u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; | ||
2058 | |||
2059 | if (tmp == 0) | ||
2060 | return hotplug_status; | ||
2025 | 2061 | ||
2026 | if (hotplug_status) | 2062 | hotplug_status |= tmp; |
2027 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 2063 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
2064 | } | ||
2065 | |||
2066 | WARN_ONCE(1, | ||
2067 | "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", | ||
2068 | I915_READ(PORT_HOTPLUG_STAT)); | ||
2028 | 2069 | ||
2029 | return hotplug_status; | 2070 | return hotplug_status; |
2030 | } | 2071 | } |
@@ -2131,7 +2172,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
2131 | 2172 | ||
2132 | I915_WRITE(VLV_IER, ier); | 2173 | I915_WRITE(VLV_IER, ier); |
2133 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | 2174 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
2134 | POSTING_READ(VLV_MASTER_IER); | ||
2135 | 2175 | ||
2136 | if (gt_iir) | 2176 | if (gt_iir) |
2137 | snb_gt_irq_handler(dev_priv, gt_iir); | 2177 | snb_gt_irq_handler(dev_priv, gt_iir); |
@@ -2216,7 +2256,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
2216 | 2256 | ||
2217 | I915_WRITE(VLV_IER, ier); | 2257 | I915_WRITE(VLV_IER, ier); |
2218 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | 2258 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
2219 | POSTING_READ(GEN8_MASTER_IRQ); | ||
2220 | 2259 | ||
2221 | gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); | 2260 | gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); |
2222 | 2261 | ||
@@ -2385,6 +2424,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) | |||
2385 | cpt_serr_int_handler(dev_priv); | 2424 | cpt_serr_int_handler(dev_priv); |
2386 | } | 2425 | } |
2387 | 2426 | ||
2427 | static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) | ||
2428 | { | ||
2429 | u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; | ||
2430 | u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; | ||
2431 | u32 pin_mask = 0, long_mask = 0; | ||
2432 | |||
2433 | if (ddi_hotplug_trigger) { | ||
2434 | u32 dig_hotplug_reg; | ||
2435 | |||
2436 | dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); | ||
2437 | I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); | ||
2438 | |||
2439 | intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, | ||
2440 | ddi_hotplug_trigger, | ||
2441 | dig_hotplug_reg, hpd_icp, | ||
2442 | icp_ddi_port_hotplug_long_detect); | ||
2443 | } | ||
2444 | |||
2445 | if (tc_hotplug_trigger) { | ||
2446 | u32 dig_hotplug_reg; | ||
2447 | |||
2448 | dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); | ||
2449 | I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); | ||
2450 | |||
2451 | intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, | ||
2452 | tc_hotplug_trigger, | ||
2453 | dig_hotplug_reg, hpd_icp, | ||
2454 | icp_tc_port_hotplug_long_detect); | ||
2455 | } | ||
2456 | |||
2457 | if (pin_mask) | ||
2458 | intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); | ||
2459 | |||
2460 | if (pch_iir & SDE_GMBUS_ICP) | ||
2461 | gmbus_irq_handler(dev_priv); | ||
2462 | } | ||
2463 | |||
2388 | static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) | 2464 | static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) |
2389 | { | 2465 | { |
2390 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & | 2466 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & |
@@ -2548,7 +2624,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2548 | /* disable master interrupt before clearing iir */ | 2624 | /* disable master interrupt before clearing iir */ |
2549 | de_ier = I915_READ(DEIER); | 2625 | de_ier = I915_READ(DEIER); |
2550 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 2626 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
2551 | POSTING_READ(DEIER); | ||
2552 | 2627 | ||
2553 | /* Disable south interrupts. We'll only write to SDEIIR once, so further | 2628 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
2554 | * interrupts will will be stored on its back queue, and then we'll be | 2629 | * interrupts will will be stored on its back queue, and then we'll be |
@@ -2558,7 +2633,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2558 | if (!HAS_PCH_NOP(dev_priv)) { | 2633 | if (!HAS_PCH_NOP(dev_priv)) { |
2559 | sde_ier = I915_READ(SDEIER); | 2634 | sde_ier = I915_READ(SDEIER); |
2560 | I915_WRITE(SDEIER, 0); | 2635 | I915_WRITE(SDEIER, 0); |
2561 | POSTING_READ(SDEIER); | ||
2562 | } | 2636 | } |
2563 | 2637 | ||
2564 | /* Find, clear, then process each source of interrupt */ | 2638 | /* Find, clear, then process each source of interrupt */ |
@@ -2593,11 +2667,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2593 | } | 2667 | } |
2594 | 2668 | ||
2595 | I915_WRITE(DEIER, de_ier); | 2669 | I915_WRITE(DEIER, de_ier); |
2596 | POSTING_READ(DEIER); | 2670 | if (!HAS_PCH_NOP(dev_priv)) |
2597 | if (!HAS_PCH_NOP(dev_priv)) { | ||
2598 | I915_WRITE(SDEIER, sde_ier); | 2671 | I915_WRITE(SDEIER, sde_ier); |
2599 | POSTING_READ(SDEIER); | ||
2600 | } | ||
2601 | 2672 | ||
2602 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | 2673 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
2603 | enable_rpm_wakeref_asserts(dev_priv); | 2674 | enable_rpm_wakeref_asserts(dev_priv); |
@@ -2804,8 +2875,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) | |||
2804 | I915_WRITE(SDEIIR, iir); | 2875 | I915_WRITE(SDEIIR, iir); |
2805 | ret = IRQ_HANDLED; | 2876 | ret = IRQ_HANDLED; |
2806 | 2877 | ||
2807 | if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || | 2878 | if (HAS_PCH_ICP(dev_priv)) |
2808 | HAS_PCH_CNP(dev_priv)) | 2879 | icp_irq_handler(dev_priv, iir); |
2880 | else if (HAS_PCH_SPT(dev_priv) || | ||
2881 | HAS_PCH_KBP(dev_priv) || | ||
2882 | HAS_PCH_CNP(dev_priv)) | ||
2809 | spt_irq_handler(dev_priv, iir); | 2883 | spt_irq_handler(dev_priv, iir); |
2810 | else | 2884 | else |
2811 | cpt_irq_handler(dev_priv, iir); | 2885 | cpt_irq_handler(dev_priv, iir); |
@@ -3170,7 +3244,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) | |||
3170 | */ | 3244 | */ |
3171 | DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); | 3245 | DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); |
3172 | I915_WRITE(EMR, I915_READ(EMR) | eir); | 3246 | I915_WRITE(EMR, I915_READ(EMR) | eir); |
3173 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 3247 | I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); |
3174 | } | 3248 | } |
3175 | } | 3249 | } |
3176 | 3250 | ||
@@ -3584,6 +3658,9 @@ static void gen11_irq_reset(struct drm_device *dev) | |||
3584 | GEN3_IRQ_RESET(GEN11_DE_HPD_); | 3658 | GEN3_IRQ_RESET(GEN11_DE_HPD_); |
3585 | GEN3_IRQ_RESET(GEN11_GU_MISC_); | 3659 | GEN3_IRQ_RESET(GEN11_GU_MISC_); |
3586 | GEN3_IRQ_RESET(GEN8_PCU_); | 3660 | GEN3_IRQ_RESET(GEN8_PCU_); |
3661 | |||
3662 | if (HAS_PCH_ICP(dev_priv)) | ||
3663 | GEN3_IRQ_RESET(SDE); | ||
3587 | } | 3664 | } |
3588 | 3665 | ||
3589 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, | 3666 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, |
@@ -3700,6 +3777,35 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
3700 | ibx_hpd_detection_setup(dev_priv); | 3777 | ibx_hpd_detection_setup(dev_priv); |
3701 | } | 3778 | } |
3702 | 3779 | ||
3780 | static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) | ||
3781 | { | ||
3782 | u32 hotplug; | ||
3783 | |||
3784 | hotplug = I915_READ(SHOTPLUG_CTL_DDI); | ||
3785 | hotplug |= ICP_DDIA_HPD_ENABLE | | ||
3786 | ICP_DDIB_HPD_ENABLE; | ||
3787 | I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); | ||
3788 | |||
3789 | hotplug = I915_READ(SHOTPLUG_CTL_TC); | ||
3790 | hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | | ||
3791 | ICP_TC_HPD_ENABLE(PORT_TC2) | | ||
3792 | ICP_TC_HPD_ENABLE(PORT_TC3) | | ||
3793 | ICP_TC_HPD_ENABLE(PORT_TC4); | ||
3794 | I915_WRITE(SHOTPLUG_CTL_TC, hotplug); | ||
3795 | } | ||
3796 | |||
3797 | static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) | ||
3798 | { | ||
3799 | u32 hotplug_irqs, enabled_irqs; | ||
3800 | |||
3801 | hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; | ||
3802 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); | ||
3803 | |||
3804 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | ||
3805 | |||
3806 | icp_hpd_detection_setup(dev_priv); | ||
3807 | } | ||
3808 | |||
3703 | static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) | 3809 | static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) |
3704 | { | 3810 | { |
3705 | u32 hotplug; | 3811 | u32 hotplug; |
@@ -3733,6 +3839,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
3733 | POSTING_READ(GEN11_DE_HPD_IMR); | 3839 | POSTING_READ(GEN11_DE_HPD_IMR); |
3734 | 3840 | ||
3735 | gen11_hpd_detection_setup(dev_priv); | 3841 | gen11_hpd_detection_setup(dev_priv); |
3842 | |||
3843 | if (HAS_PCH_ICP(dev_priv)) | ||
3844 | icp_hpd_irq_setup(dev_priv); | ||
3736 | } | 3845 | } |
3737 | 3846 | ||
3738 | static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) | 3847 | static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) |
@@ -4168,11 +4277,29 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) | |||
4168 | I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); | 4277 | I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); |
4169 | } | 4278 | } |
4170 | 4279 | ||
4280 | static void icp_irq_postinstall(struct drm_device *dev) | ||
4281 | { | ||
4282 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4283 | u32 mask = SDE_GMBUS_ICP; | ||
4284 | |||
4285 | WARN_ON(I915_READ(SDEIER) != 0); | ||
4286 | I915_WRITE(SDEIER, 0xffffffff); | ||
4287 | POSTING_READ(SDEIER); | ||
4288 | |||
4289 | gen3_assert_iir_is_zero(dev_priv, SDEIIR); | ||
4290 | I915_WRITE(SDEIMR, ~mask); | ||
4291 | |||
4292 | icp_hpd_detection_setup(dev_priv); | ||
4293 | } | ||
4294 | |||
4171 | static int gen11_irq_postinstall(struct drm_device *dev) | 4295 | static int gen11_irq_postinstall(struct drm_device *dev) |
4172 | { | 4296 | { |
4173 | struct drm_i915_private *dev_priv = dev->dev_private; | 4297 | struct drm_i915_private *dev_priv = dev->dev_private; |
4174 | u32 gu_misc_masked = GEN11_GU_MISC_GSE; | 4298 | u32 gu_misc_masked = GEN11_GU_MISC_GSE; |
4175 | 4299 | ||
4300 | if (HAS_PCH_ICP(dev_priv)) | ||
4301 | icp_irq_postinstall(dev); | ||
4302 | |||
4176 | gen11_gt_irq_postinstall(dev_priv); | 4303 | gen11_gt_irq_postinstall(dev_priv); |
4177 | gen8_de_irq_postinstall(dev_priv); | 4304 | gen8_de_irq_postinstall(dev_priv); |
4178 | 4305 | ||
@@ -4225,11 +4352,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev) | |||
4225 | /* Unmask the interrupts that we always want on. */ | 4352 | /* Unmask the interrupts that we always want on. */ |
4226 | dev_priv->irq_mask = | 4353 | dev_priv->irq_mask = |
4227 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 4354 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4228 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); | 4355 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
4356 | I915_MASTER_ERROR_INTERRUPT); | ||
4229 | 4357 | ||
4230 | enable_mask = | 4358 | enable_mask = |
4231 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 4359 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4232 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 4360 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
4361 | I915_MASTER_ERROR_INTERRUPT | | ||
4233 | I915_USER_INTERRUPT; | 4362 | I915_USER_INTERRUPT; |
4234 | 4363 | ||
4235 | GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); | 4364 | GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); |
@@ -4244,6 +4373,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev) | |||
4244 | return 0; | 4373 | return 0; |
4245 | } | 4374 | } |
4246 | 4375 | ||
4376 | static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, | ||
4377 | u16 *eir, u16 *eir_stuck) | ||
4378 | { | ||
4379 | u16 emr; | ||
4380 | |||
4381 | *eir = I915_READ16(EIR); | ||
4382 | |||
4383 | if (*eir) | ||
4384 | I915_WRITE16(EIR, *eir); | ||
4385 | |||
4386 | *eir_stuck = I915_READ16(EIR); | ||
4387 | if (*eir_stuck == 0) | ||
4388 | return; | ||
4389 | |||
4390 | /* | ||
4391 | * Toggle all EMR bits to make sure we get an edge | ||
4392 | * in the ISR master error bit if we don't clear | ||
4393 | * all the EIR bits. Otherwise the edge triggered | ||
4394 | * IIR on i965/g4x wouldn't notice that an interrupt | ||
4395 | * is still pending. Also some EIR bits can't be | ||
4396 | * cleared except by handling the underlying error | ||
4397 | * (or by a GPU reset) so we mask any bit that | ||
4398 | * remains set. | ||
4399 | */ | ||
4400 | emr = I915_READ16(EMR); | ||
4401 | I915_WRITE16(EMR, 0xffff); | ||
4402 | I915_WRITE16(EMR, emr | *eir_stuck); | ||
4403 | } | ||
4404 | |||
4405 | static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, | ||
4406 | u16 eir, u16 eir_stuck) | ||
4407 | { | ||
4408 | DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); | ||
4409 | |||
4410 | if (eir_stuck) | ||
4411 | DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); | ||
4412 | } | ||
4413 | |||
4414 | static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, | ||
4415 | u32 *eir, u32 *eir_stuck) | ||
4416 | { | ||
4417 | u32 emr; | ||
4418 | |||
4419 | *eir = I915_READ(EIR); | ||
4420 | |||
4421 | I915_WRITE(EIR, *eir); | ||
4422 | |||
4423 | *eir_stuck = I915_READ(EIR); | ||
4424 | if (*eir_stuck == 0) | ||
4425 | return; | ||
4426 | |||
4427 | /* | ||
4428 | * Toggle all EMR bits to make sure we get an edge | ||
4429 | * in the ISR master error bit if we don't clear | ||
4430 | * all the EIR bits. Otherwise the edge triggered | ||
4431 | * IIR on i965/g4x wouldn't notice that an interrupt | ||
4432 | * is still pending. Also some EIR bits can't be | ||
4433 | * cleared except by handling the underlying error | ||
4434 | * (or by a GPU reset) so we mask any bit that | ||
4435 | * remains set. | ||
4436 | */ | ||
4437 | emr = I915_READ(EMR); | ||
4438 | I915_WRITE(EMR, 0xffffffff); | ||
4439 | I915_WRITE(EMR, emr | *eir_stuck); | ||
4440 | } | ||
4441 | |||
4442 | static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, | ||
4443 | u32 eir, u32 eir_stuck) | ||
4444 | { | ||
4445 | DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); | ||
4446 | |||
4447 | if (eir_stuck) | ||
4448 | DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); | ||
4449 | } | ||
4450 | |||
4247 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) | 4451 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
4248 | { | 4452 | { |
4249 | struct drm_device *dev = arg; | 4453 | struct drm_device *dev = arg; |
@@ -4258,6 +4462,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
4258 | 4462 | ||
4259 | do { | 4463 | do { |
4260 | u32 pipe_stats[I915_MAX_PIPES] = {}; | 4464 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
4465 | u16 eir = 0, eir_stuck = 0; | ||
4261 | u16 iir; | 4466 | u16 iir; |
4262 | 4467 | ||
4263 | iir = I915_READ16(IIR); | 4468 | iir = I915_READ16(IIR); |
@@ -4270,13 +4475,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
4270 | * signalled in iir */ | 4475 | * signalled in iir */ |
4271 | i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); | 4476 | i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); |
4272 | 4477 | ||
4478 | if (iir & I915_MASTER_ERROR_INTERRUPT) | ||
4479 | i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); | ||
4480 | |||
4273 | I915_WRITE16(IIR, iir); | 4481 | I915_WRITE16(IIR, iir); |
4274 | 4482 | ||
4275 | if (iir & I915_USER_INTERRUPT) | 4483 | if (iir & I915_USER_INTERRUPT) |
4276 | notify_ring(dev_priv->engine[RCS]); | 4484 | notify_ring(dev_priv->engine[RCS]); |
4277 | 4485 | ||
4278 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 4486 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
4279 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); | 4487 | i8xx_error_irq_handler(dev_priv, eir, eir_stuck); |
4280 | 4488 | ||
4281 | i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); | 4489 | i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); |
4282 | } while (0); | 4490 | } while (0); |
@@ -4314,12 +4522,14 @@ static int i915_irq_postinstall(struct drm_device *dev) | |||
4314 | dev_priv->irq_mask = | 4522 | dev_priv->irq_mask = |
4315 | ~(I915_ASLE_INTERRUPT | | 4523 | ~(I915_ASLE_INTERRUPT | |
4316 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 4524 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4317 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); | 4525 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
4526 | I915_MASTER_ERROR_INTERRUPT); | ||
4318 | 4527 | ||
4319 | enable_mask = | 4528 | enable_mask = |
4320 | I915_ASLE_INTERRUPT | | 4529 | I915_ASLE_INTERRUPT | |
4321 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 4530 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4322 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 4531 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
4532 | I915_MASTER_ERROR_INTERRUPT | | ||
4323 | I915_USER_INTERRUPT; | 4533 | I915_USER_INTERRUPT; |
4324 | 4534 | ||
4325 | if (I915_HAS_HOTPLUG(dev_priv)) { | 4535 | if (I915_HAS_HOTPLUG(dev_priv)) { |
@@ -4357,6 +4567,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
4357 | 4567 | ||
4358 | do { | 4568 | do { |
4359 | u32 pipe_stats[I915_MAX_PIPES] = {}; | 4569 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
4570 | u32 eir = 0, eir_stuck = 0; | ||
4360 | u32 hotplug_status = 0; | 4571 | u32 hotplug_status = 0; |
4361 | u32 iir; | 4572 | u32 iir; |
4362 | 4573 | ||
@@ -4374,13 +4585,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
4374 | * signalled in iir */ | 4585 | * signalled in iir */ |
4375 | i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); | 4586 | i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); |
4376 | 4587 | ||
4588 | if (iir & I915_MASTER_ERROR_INTERRUPT) | ||
4589 | i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); | ||
4590 | |||
4377 | I915_WRITE(IIR, iir); | 4591 | I915_WRITE(IIR, iir); |
4378 | 4592 | ||
4379 | if (iir & I915_USER_INTERRUPT) | 4593 | if (iir & I915_USER_INTERRUPT) |
4380 | notify_ring(dev_priv->engine[RCS]); | 4594 | notify_ring(dev_priv->engine[RCS]); |
4381 | 4595 | ||
4382 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 4596 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
4383 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); | 4597 | i9xx_error_irq_handler(dev_priv, eir, eir_stuck); |
4384 | 4598 | ||
4385 | if (hotplug_status) | 4599 | if (hotplug_status) |
4386 | i9xx_hpd_irq_handler(dev_priv, hotplug_status); | 4600 | i9xx_hpd_irq_handler(dev_priv, hotplug_status); |
@@ -4434,14 +4648,14 @@ static int i965_irq_postinstall(struct drm_device *dev) | |||
4434 | I915_DISPLAY_PORT_INTERRUPT | | 4648 | I915_DISPLAY_PORT_INTERRUPT | |
4435 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 4649 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4436 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 4650 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
4437 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 4651 | I915_MASTER_ERROR_INTERRUPT); |
4438 | 4652 | ||
4439 | enable_mask = | 4653 | enable_mask = |
4440 | I915_ASLE_INTERRUPT | | 4654 | I915_ASLE_INTERRUPT | |
4441 | I915_DISPLAY_PORT_INTERRUPT | | 4655 | I915_DISPLAY_PORT_INTERRUPT | |
4442 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 4656 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4443 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 4657 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
4444 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | 4658 | I915_MASTER_ERROR_INTERRUPT | |
4445 | I915_USER_INTERRUPT; | 4659 | I915_USER_INTERRUPT; |
4446 | 4660 | ||
4447 | if (IS_G4X(dev_priv)) | 4661 | if (IS_G4X(dev_priv)) |
@@ -4501,6 +4715,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4501 | 4715 | ||
4502 | do { | 4716 | do { |
4503 | u32 pipe_stats[I915_MAX_PIPES] = {}; | 4717 | u32 pipe_stats[I915_MAX_PIPES] = {}; |
4718 | u32 eir = 0, eir_stuck = 0; | ||
4504 | u32 hotplug_status = 0; | 4719 | u32 hotplug_status = 0; |
4505 | u32 iir; | 4720 | u32 iir; |
4506 | 4721 | ||
@@ -4517,6 +4732,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4517 | * signalled in iir */ | 4732 | * signalled in iir */ |
4518 | i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); | 4733 | i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); |
4519 | 4734 | ||
4735 | if (iir & I915_MASTER_ERROR_INTERRUPT) | ||
4736 | i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); | ||
4737 | |||
4520 | I915_WRITE(IIR, iir); | 4738 | I915_WRITE(IIR, iir); |
4521 | 4739 | ||
4522 | if (iir & I915_USER_INTERRUPT) | 4740 | if (iir & I915_USER_INTERRUPT) |
@@ -4525,8 +4743,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4525 | if (iir & I915_BSD_USER_INTERRUPT) | 4743 | if (iir & I915_BSD_USER_INTERRUPT) |
4526 | notify_ring(dev_priv->engine[VCS]); | 4744 | notify_ring(dev_priv->engine[VCS]); |
4527 | 4745 | ||
4528 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 4746 | if (iir & I915_MASTER_ERROR_INTERRUPT) |
4529 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); | 4747 | i9xx_error_irq_handler(dev_priv, eir, eir_stuck); |
4530 | 4748 | ||
4531 | if (hotplug_status) | 4749 | if (hotplug_status) |
4532 | i9xx_hpd_irq_handler(dev_priv, hotplug_status); | 4750 | i9xx_hpd_irq_handler(dev_priv, hotplug_status); |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 447407fee3b8..6bf10952c724 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c | |||
@@ -1836,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, | |||
1836 | * So far the best way to work around this issue seems to be draining | 1836 | * So far the best way to work around this issue seems to be draining |
1837 | * the GPU from any submitted work. | 1837 | * the GPU from any submitted work. |
1838 | */ | 1838 | */ |
1839 | ret = i915_gem_wait_for_idle(dev_priv, wait_flags); | 1839 | ret = i915_gem_wait_for_idle(dev_priv, |
1840 | wait_flags, | ||
1841 | MAX_SCHEDULE_TIMEOUT); | ||
1840 | if (ret) | 1842 | if (ret) |
1841 | goto out; | 1843 | goto out; |
1842 | 1844 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4bfd7a9bd75f..0424e45f88db 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -139,19 +139,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
139 | return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); | 139 | return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); |
140 | } | 140 | } |
141 | 141 | ||
142 | /* | ||
143 | * Given the first two numbers __a and __b of arbitrarily many evenly spaced | ||
144 | * numbers, pick the 0-based __index'th value. | ||
145 | * | ||
146 | * Always prefer this over _PICK() if the numbers are evenly spaced. | ||
147 | */ | ||
148 | #define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a))) | ||
149 | |||
150 | /* | ||
151 | * Given the arbitrary numbers in varargs, pick the 0-based __index'th number. | ||
152 | * | ||
153 | * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced. | ||
154 | */ | ||
142 | #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index]) | 155 | #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index]) |
143 | 156 | ||
144 | #define _PIPE(pipe, a, b) ((a) + (pipe) * ((b) - (a))) | 157 | /* |
158 | * Named helper wrappers around _PICK_EVEN() and _PICK(). | ||
159 | */ | ||
160 | #define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) | ||
145 | #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) | 161 | #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) |
146 | #define _PLANE(plane, a, b) _PIPE(plane, a, b) | 162 | #define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) |
147 | #define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) | 163 | #define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) |
148 | #define _TRANS(tran, a, b) ((a) + (tran) * ((b) - (a))) | 164 | #define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) |
149 | #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) | 165 | #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) |
150 | #define _PORT(port, a, b) ((a) + (port) * ((b) - (a))) | 166 | #define _PORT(port, a, b) _PICK_EVEN(port, a, b) |
151 | #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) | 167 | #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) |
152 | #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) | 168 | #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) |
153 | #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) | 169 | #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) |
154 | #define _PLL(pll, a, b) ((a) + (pll) * ((b) - (a))) | 170 | #define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) |
155 | #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) | 171 | #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) |
156 | #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) | 172 | #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) |
157 | #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) | 173 | #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) |
@@ -1045,13 +1061,13 @@ enum i915_power_well_id { | |||
1045 | 1061 | ||
1046 | /* | 1062 | /* |
1047 | * HSW/BDW | 1063 | * HSW/BDW |
1048 | * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1) | 1064 | * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) |
1049 | */ | 1065 | */ |
1050 | HSW_DISP_PW_GLOBAL = 15, | 1066 | HSW_DISP_PW_GLOBAL = 15, |
1051 | 1067 | ||
1052 | /* | 1068 | /* |
1053 | * GEN9+ | 1069 | * GEN9+ |
1054 | * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1) | 1070 | * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) |
1055 | */ | 1071 | */ |
1056 | SKL_DISP_PW_MISC_IO = 0, | 1072 | SKL_DISP_PW_MISC_IO = 0, |
1057 | SKL_DISP_PW_DDI_A_E, | 1073 | SKL_DISP_PW_DDI_A_E, |
@@ -1075,17 +1091,54 @@ enum i915_power_well_id { | |||
1075 | SKL_DISP_PW_2, | 1091 | SKL_DISP_PW_2, |
1076 | 1092 | ||
1077 | /* - custom power wells */ | 1093 | /* - custom power wells */ |
1078 | SKL_DISP_PW_DC_OFF, | ||
1079 | BXT_DPIO_CMN_A, | 1094 | BXT_DPIO_CMN_A, |
1080 | BXT_DPIO_CMN_BC, | 1095 | BXT_DPIO_CMN_BC, |
1081 | GLK_DPIO_CMN_C, /* 19 */ | 1096 | GLK_DPIO_CMN_C, /* 18 */ |
1097 | |||
1098 | /* | ||
1099 | * GEN11+ | ||
1100 | * - _HSW_PWR_WELL_CTL1-4 | ||
1101 | * (status bit: (id&15)*2, req bit:(id&15)*2+1) | ||
1102 | */ | ||
1103 | ICL_DISP_PW_1 = 0, | ||
1104 | ICL_DISP_PW_2, | ||
1105 | ICL_DISP_PW_3, | ||
1106 | ICL_DISP_PW_4, | ||
1107 | |||
1108 | /* | ||
1109 | * - _HSW_PWR_WELL_CTL_AUX1/2/4 | ||
1110 | * (status bit: (id&15)*2, req bit:(id&15)*2+1) | ||
1111 | */ | ||
1112 | ICL_DISP_PW_AUX_A = 16, | ||
1113 | ICL_DISP_PW_AUX_B, | ||
1114 | ICL_DISP_PW_AUX_C, | ||
1115 | ICL_DISP_PW_AUX_D, | ||
1116 | ICL_DISP_PW_AUX_E, | ||
1117 | ICL_DISP_PW_AUX_F, | ||
1118 | |||
1119 | ICL_DISP_PW_AUX_TBT1 = 24, | ||
1120 | ICL_DISP_PW_AUX_TBT2, | ||
1121 | ICL_DISP_PW_AUX_TBT3, | ||
1122 | ICL_DISP_PW_AUX_TBT4, | ||
1123 | |||
1124 | /* | ||
1125 | * - _HSW_PWR_WELL_CTL_DDI1/2/4 | ||
1126 | * (status bit: (id&15)*2, req bit:(id&15)*2+1) | ||
1127 | */ | ||
1128 | ICL_DISP_PW_DDI_A = 32, | ||
1129 | ICL_DISP_PW_DDI_B, | ||
1130 | ICL_DISP_PW_DDI_C, | ||
1131 | ICL_DISP_PW_DDI_D, | ||
1132 | ICL_DISP_PW_DDI_E, | ||
1133 | ICL_DISP_PW_DDI_F, /* 37 */ | ||
1082 | 1134 | ||
1083 | /* | 1135 | /* |
1084 | * Multiple platforms. | 1136 | * Multiple platforms. |
1085 | * Must start following the highest ID of any platform. | 1137 | * Must start following the highest ID of any platform. |
1086 | * - custom power wells | 1138 | * - custom power wells |
1087 | */ | 1139 | */ |
1088 | I915_DISP_PW_ALWAYS_ON = 20, | 1140 | SKL_DISP_PW_DC_OFF = 38, |
1141 | I915_DISP_PW_ALWAYS_ON, | ||
1089 | }; | 1142 | }; |
1090 | 1143 | ||
1091 | #define PUNIT_REG_PWRGT_CTRL 0x60 | 1144 | #define PUNIT_REG_PWRGT_CTRL 0x60 |
@@ -1667,6 +1720,26 @@ enum i915_power_well_id { | |||
1667 | #define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \ | 1720 | #define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \ |
1668 | _ICL_PORT_CL_DW5_B) | 1721 | _ICL_PORT_CL_DW5_B) |
1669 | 1722 | ||
1723 | #define _CNL_PORT_CL_DW10_A 0x162028 | ||
1724 | #define _ICL_PORT_CL_DW10_B 0x6c028 | ||
1725 | #define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \ | ||
1726 | _CNL_PORT_CL_DW10_A, \ | ||
1727 | _ICL_PORT_CL_DW10_B) | ||
1728 | #define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) | ||
1729 | #define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 | ||
1730 | #define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) | ||
1731 | #define PWR_UP_ALL_LANES (0x0 << 4) | ||
1732 | #define PWR_DOWN_LN_3_2_1 (0xe << 4) | ||
1733 | #define PWR_DOWN_LN_3_2 (0xc << 4) | ||
1734 | #define PWR_DOWN_LN_3 (0x8 << 4) | ||
1735 | #define PWR_DOWN_LN_2_1_0 (0x7 << 4) | ||
1736 | #define PWR_DOWN_LN_1_0 (0x3 << 4) | ||
1737 | #define PWR_DOWN_LN_1 (0x2 << 4) | ||
1738 | #define PWR_DOWN_LN_3_1 (0xa << 4) | ||
1739 | #define PWR_DOWN_LN_3_1_0 (0xb << 4) | ||
1740 | #define PWR_DOWN_LN_MASK (0xf << 4) | ||
1741 | #define PWR_DOWN_LN_SHIFT 4 | ||
1742 | |||
1670 | #define _PORT_CL1CM_DW9_A 0x162024 | 1743 | #define _PORT_CL1CM_DW9_A 0x162024 |
1671 | #define _PORT_CL1CM_DW9_BC 0x6C024 | 1744 | #define _PORT_CL1CM_DW9_BC 0x6C024 |
1672 | #define IREF0RC_OFFSET_SHIFT 8 | 1745 | #define IREF0RC_OFFSET_SHIFT 8 |
@@ -1679,6 +1752,13 @@ enum i915_power_well_id { | |||
1679 | #define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) | 1752 | #define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) |
1680 | #define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) | 1753 | #define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) |
1681 | 1754 | ||
1755 | #define _ICL_PORT_CL_DW12_A 0x162030 | ||
1756 | #define _ICL_PORT_CL_DW12_B 0x6C030 | ||
1757 | #define ICL_LANE_ENABLE_AUX (1 << 0) | ||
1758 | #define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \ | ||
1759 | _ICL_PORT_CL_DW12_A, \ | ||
1760 | _ICL_PORT_CL_DW12_B) | ||
1761 | |||
1682 | #define _PORT_CL1CM_DW28_A 0x162070 | 1762 | #define _PORT_CL1CM_DW28_A 0x162070 |
1683 | #define _PORT_CL1CM_DW28_BC 0x6C070 | 1763 | #define _PORT_CL1CM_DW28_BC 0x6C070 |
1684 | #define OCL1_POWER_DOWN_EN (1 << 23) | 1764 | #define OCL1_POWER_DOWN_EN (1 << 23) |
@@ -1716,16 +1796,22 @@ enum i915_power_well_id { | |||
1716 | _CNL_PORT_PCS_DW1_LN0_D, \ | 1796 | _CNL_PORT_PCS_DW1_LN0_D, \ |
1717 | _CNL_PORT_PCS_DW1_LN0_AE, \ | 1797 | _CNL_PORT_PCS_DW1_LN0_AE, \ |
1718 | _CNL_PORT_PCS_DW1_LN0_F)) | 1798 | _CNL_PORT_PCS_DW1_LN0_F)) |
1799 | |||
1719 | #define _ICL_PORT_PCS_DW1_GRP_A 0x162604 | 1800 | #define _ICL_PORT_PCS_DW1_GRP_A 0x162604 |
1720 | #define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 | 1801 | #define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 |
1721 | #define _ICL_PORT_PCS_DW1_LN0_A 0x162804 | 1802 | #define _ICL_PORT_PCS_DW1_LN0_A 0x162804 |
1722 | #define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 | 1803 | #define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 |
1804 | #define _ICL_PORT_PCS_DW1_AUX_A 0x162304 | ||
1805 | #define _ICL_PORT_PCS_DW1_AUX_B 0x6c304 | ||
1723 | #define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ | 1806 | #define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ |
1724 | _ICL_PORT_PCS_DW1_GRP_A, \ | 1807 | _ICL_PORT_PCS_DW1_GRP_A, \ |
1725 | _ICL_PORT_PCS_DW1_GRP_B) | 1808 | _ICL_PORT_PCS_DW1_GRP_B) |
1726 | #define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ | 1809 | #define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ |
1727 | _ICL_PORT_PCS_DW1_LN0_A, \ | 1810 | _ICL_PORT_PCS_DW1_LN0_A, \ |
1728 | _ICL_PORT_PCS_DW1_LN0_B) | 1811 | _ICL_PORT_PCS_DW1_LN0_B) |
1812 | #define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \ | ||
1813 | _ICL_PORT_PCS_DW1_AUX_A, \ | ||
1814 | _ICL_PORT_PCS_DW1_AUX_B) | ||
1729 | #define COMMON_KEEPER_EN (1 << 26) | 1815 | #define COMMON_KEEPER_EN (1 << 26) |
1730 | 1816 | ||
1731 | /* CNL Port TX registers */ | 1817 | /* CNL Port TX registers */ |
@@ -1762,16 +1848,23 @@ enum i915_power_well_id { | |||
1762 | #define _ICL_PORT_TX_DW2_GRP_B 0x6C688 | 1848 | #define _ICL_PORT_TX_DW2_GRP_B 0x6C688 |
1763 | #define _ICL_PORT_TX_DW2_LN0_A 0x162888 | 1849 | #define _ICL_PORT_TX_DW2_LN0_A 0x162888 |
1764 | #define _ICL_PORT_TX_DW2_LN0_B 0x6C888 | 1850 | #define _ICL_PORT_TX_DW2_LN0_B 0x6C888 |
1851 | #define _ICL_PORT_TX_DW2_AUX_A 0x162388 | ||
1852 | #define _ICL_PORT_TX_DW2_AUX_B 0x6c388 | ||
1765 | #define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ | 1853 | #define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ |
1766 | _ICL_PORT_TX_DW2_GRP_A, \ | 1854 | _ICL_PORT_TX_DW2_GRP_A, \ |
1767 | _ICL_PORT_TX_DW2_GRP_B) | 1855 | _ICL_PORT_TX_DW2_GRP_B) |
1768 | #define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ | 1856 | #define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ |
1769 | _ICL_PORT_TX_DW2_LN0_A, \ | 1857 | _ICL_PORT_TX_DW2_LN0_A, \ |
1770 | _ICL_PORT_TX_DW2_LN0_B) | 1858 | _ICL_PORT_TX_DW2_LN0_B) |
1859 | #define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \ | ||
1860 | _ICL_PORT_TX_DW2_AUX_A, \ | ||
1861 | _ICL_PORT_TX_DW2_AUX_B) | ||
1771 | #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) | 1862 | #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) |
1772 | #define SWING_SEL_UPPER_MASK (1 << 15) | 1863 | #define SWING_SEL_UPPER_MASK (1 << 15) |
1773 | #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) | 1864 | #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) |
1774 | #define SWING_SEL_LOWER_MASK (0x7 << 11) | 1865 | #define SWING_SEL_LOWER_MASK (0x7 << 11) |
1866 | #define FRC_LATENCY_OPTIM_MASK (0x7 << 8) | ||
1867 | #define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) | ||
1775 | #define RCOMP_SCALAR(x) ((x) << 0) | 1868 | #define RCOMP_SCALAR(x) ((x) << 0) |
1776 | #define RCOMP_SCALAR_MASK (0xFF << 0) | 1869 | #define RCOMP_SCALAR_MASK (0xFF << 0) |
1777 | 1870 | ||
@@ -1787,6 +1880,8 @@ enum i915_power_well_id { | |||
1787 | #define _ICL_PORT_TX_DW4_LN0_A 0x162890 | 1880 | #define _ICL_PORT_TX_DW4_LN0_A 0x162890 |
1788 | #define _ICL_PORT_TX_DW4_LN1_A 0x162990 | 1881 | #define _ICL_PORT_TX_DW4_LN1_A 0x162990 |
1789 | #define _ICL_PORT_TX_DW4_LN0_B 0x6C890 | 1882 | #define _ICL_PORT_TX_DW4_LN0_B 0x6C890 |
1883 | #define _ICL_PORT_TX_DW4_AUX_A 0x162390 | ||
1884 | #define _ICL_PORT_TX_DW4_AUX_B 0x6c390 | ||
1790 | #define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \ | 1885 | #define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \ |
1791 | _ICL_PORT_TX_DW4_GRP_A, \ | 1886 | _ICL_PORT_TX_DW4_GRP_A, \ |
1792 | _ICL_PORT_TX_DW4_GRP_B) | 1887 | _ICL_PORT_TX_DW4_GRP_B) |
@@ -1795,6 +1890,9 @@ enum i915_power_well_id { | |||
1795 | _ICL_PORT_TX_DW4_LN0_B) + \ | 1890 | _ICL_PORT_TX_DW4_LN0_B) + \ |
1796 | ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \ | 1891 | ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \ |
1797 | _ICL_PORT_TX_DW4_LN0_A))) | 1892 | _ICL_PORT_TX_DW4_LN0_A))) |
1893 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \ | ||
1894 | _ICL_PORT_TX_DW4_AUX_A, \ | ||
1895 | _ICL_PORT_TX_DW4_AUX_B) | ||
1798 | #define LOADGEN_SELECT (1 << 31) | 1896 | #define LOADGEN_SELECT (1 << 31) |
1799 | #define POST_CURSOR_1(x) ((x) << 12) | 1897 | #define POST_CURSOR_1(x) ((x) << 12) |
1800 | #define POST_CURSOR_1_MASK (0x3F << 12) | 1898 | #define POST_CURSOR_1_MASK (0x3F << 12) |
@@ -1809,12 +1907,17 @@ enum i915_power_well_id { | |||
1809 | #define _ICL_PORT_TX_DW5_GRP_B 0x6C694 | 1907 | #define _ICL_PORT_TX_DW5_GRP_B 0x6C694 |
1810 | #define _ICL_PORT_TX_DW5_LN0_A 0x162894 | 1908 | #define _ICL_PORT_TX_DW5_LN0_A 0x162894 |
1811 | #define _ICL_PORT_TX_DW5_LN0_B 0x6C894 | 1909 | #define _ICL_PORT_TX_DW5_LN0_B 0x6C894 |
1910 | #define _ICL_PORT_TX_DW5_AUX_A 0x162394 | ||
1911 | #define _ICL_PORT_TX_DW5_AUX_B 0x6c394 | ||
1812 | #define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \ | 1912 | #define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \ |
1813 | _ICL_PORT_TX_DW5_GRP_A, \ | 1913 | _ICL_PORT_TX_DW5_GRP_A, \ |
1814 | _ICL_PORT_TX_DW5_GRP_B) | 1914 | _ICL_PORT_TX_DW5_GRP_B) |
1815 | #define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \ | 1915 | #define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \ |
1816 | _ICL_PORT_TX_DW5_LN0_A, \ | 1916 | _ICL_PORT_TX_DW5_LN0_A, \ |
1817 | _ICL_PORT_TX_DW5_LN0_B) | 1917 | _ICL_PORT_TX_DW5_LN0_B) |
1918 | #define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \ | ||
1919 | _ICL_PORT_TX_DW5_AUX_A, \ | ||
1920 | _ICL_PORT_TX_DW5_AUX_B) | ||
1818 | #define TX_TRAINING_EN (1 << 31) | 1921 | #define TX_TRAINING_EN (1 << 31) |
1819 | #define TAP2_DISABLE (1 << 30) | 1922 | #define TAP2_DISABLE (1 << 30) |
1820 | #define TAP3_DISABLE (1 << 29) | 1923 | #define TAP3_DISABLE (1 << 29) |
@@ -2811,7 +2914,6 @@ enum i915_power_well_id { | |||
2811 | #define I915_DISPLAY_PORT_INTERRUPT (1 << 17) | 2914 | #define I915_DISPLAY_PORT_INTERRUPT (1 << 17) |
2812 | #define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16) | 2915 | #define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16) |
2813 | #define I915_MASTER_ERROR_INTERRUPT (1 << 15) | 2916 | #define I915_MASTER_ERROR_INTERRUPT (1 << 15) |
2814 | #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1 << 15) | ||
2815 | #define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14) | 2917 | #define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14) |
2816 | #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */ | 2918 | #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */ |
2817 | #define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13) | 2919 | #define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13) |
@@ -4044,6 +4146,7 @@ enum { | |||
4044 | #define EDP_PSR_SKIP_AUX_EXIT (1 << 12) | 4146 | #define EDP_PSR_SKIP_AUX_EXIT (1 << 12) |
4045 | #define EDP_PSR_TP1_TP2_SEL (0 << 11) | 4147 | #define EDP_PSR_TP1_TP2_SEL (0 << 11) |
4046 | #define EDP_PSR_TP1_TP3_SEL (1 << 11) | 4148 | #define EDP_PSR_TP1_TP3_SEL (1 << 11) |
4149 | #define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */ | ||
4047 | #define EDP_PSR_TP2_TP3_TIME_500us (0 << 8) | 4150 | #define EDP_PSR_TP2_TP3_TIME_500us (0 << 8) |
4048 | #define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) | 4151 | #define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) |
4049 | #define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) | 4152 | #define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) |
@@ -4072,6 +4175,7 @@ enum { | |||
4072 | 4175 | ||
4073 | #define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) | 4176 | #define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) |
4074 | #define EDP_PSR_STATUS_STATE_MASK (7 << 29) | 4177 | #define EDP_PSR_STATUS_STATE_MASK (7 << 29) |
4178 | #define EDP_PSR_STATUS_STATE_SHIFT 29 | ||
4075 | #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) | 4179 | #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) |
4076 | #define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29) | 4180 | #define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29) |
4077 | #define EDP_PSR_STATUS_STATE_SRDENT (2 << 29) | 4181 | #define EDP_PSR_STATUS_STATE_SRDENT (2 << 29) |
@@ -6829,7 +6933,7 @@ enum { | |||
6829 | #define _PS_ECC_STAT_2B 0x68AD0 | 6933 | #define _PS_ECC_STAT_2B 0x68AD0 |
6830 | #define _PS_ECC_STAT_1C 0x691D0 | 6934 | #define _PS_ECC_STAT_1C 0x691D0 |
6831 | 6935 | ||
6832 | #define _ID(id, a, b) ((a) + (id) * ((b) - (a))) | 6936 | #define _ID(id, a, b) _PICK_EVEN(id, a, b) |
6833 | #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ | 6937 | #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ |
6834 | _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ | 6938 | _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ |
6835 | _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) | 6939 | _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) |
@@ -7366,6 +7470,14 @@ enum { | |||
7366 | #define BDW_SCRATCH1 _MMIO(0xb11c) | 7470 | #define BDW_SCRATCH1 _MMIO(0xb11c) |
7367 | #define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) | 7471 | #define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) |
7368 | 7472 | ||
7473 | /*GEN11 chicken */ | ||
7474 | #define _PIPEA_CHICKEN 0x70038 | ||
7475 | #define _PIPEB_CHICKEN 0x71038 | ||
7476 | #define _PIPEC_CHICKEN 0x72038 | ||
7477 | #define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) | ||
7478 | #define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\ | ||
7479 | _PIPEB_CHICKEN) | ||
7480 | |||
7369 | /* PCH */ | 7481 | /* PCH */ |
7370 | 7482 | ||
7371 | /* south display engine interrupt: IBX */ | 7483 | /* south display engine interrupt: IBX */ |
@@ -7409,7 +7521,7 @@ enum { | |||
7409 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) | 7521 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) |
7410 | #define SDE_TRANS_MASK (0x3f) | 7522 | #define SDE_TRANS_MASK (0x3f) |
7411 | 7523 | ||
7412 | /* south display engine interrupt: CPT/PPT */ | 7524 | /* south display engine interrupt: CPT - CNP */ |
7413 | #define SDE_AUDIO_POWER_D_CPT (1 << 31) | 7525 | #define SDE_AUDIO_POWER_D_CPT (1 << 31) |
7414 | #define SDE_AUDIO_POWER_C_CPT (1 << 30) | 7526 | #define SDE_AUDIO_POWER_C_CPT (1 << 30) |
7415 | #define SDE_AUDIO_POWER_B_CPT (1 << 29) | 7527 | #define SDE_AUDIO_POWER_B_CPT (1 << 29) |
@@ -7457,6 +7569,21 @@ enum { | |||
7457 | SDE_FDI_RXB_CPT | \ | 7569 | SDE_FDI_RXB_CPT | \ |
7458 | SDE_FDI_RXA_CPT) | 7570 | SDE_FDI_RXA_CPT) |
7459 | 7571 | ||
7572 | /* south display engine interrupt: ICP */ | ||
7573 | #define SDE_TC4_HOTPLUG_ICP (1 << 27) | ||
7574 | #define SDE_TC3_HOTPLUG_ICP (1 << 26) | ||
7575 | #define SDE_TC2_HOTPLUG_ICP (1 << 25) | ||
7576 | #define SDE_TC1_HOTPLUG_ICP (1 << 24) | ||
7577 | #define SDE_GMBUS_ICP (1 << 23) | ||
7578 | #define SDE_DDIB_HOTPLUG_ICP (1 << 17) | ||
7579 | #define SDE_DDIA_HOTPLUG_ICP (1 << 16) | ||
7580 | #define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \ | ||
7581 | SDE_DDIA_HOTPLUG_ICP) | ||
7582 | #define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \ | ||
7583 | SDE_TC3_HOTPLUG_ICP | \ | ||
7584 | SDE_TC2_HOTPLUG_ICP | \ | ||
7585 | SDE_TC1_HOTPLUG_ICP) | ||
7586 | |||
7460 | #define SDEISR _MMIO(0xc4000) | 7587 | #define SDEISR _MMIO(0xc4000) |
7461 | #define SDEIMR _MMIO(0xc4004) | 7588 | #define SDEIMR _MMIO(0xc4004) |
7462 | #define SDEIIR _MMIO(0xc4008) | 7589 | #define SDEIIR _MMIO(0xc4008) |
@@ -7517,6 +7644,30 @@ enum { | |||
7517 | #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) | 7644 | #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) |
7518 | #define PORTE_HOTPLUG_LONG_DETECT (2 << 0) | 7645 | #define PORTE_HOTPLUG_LONG_DETECT (2 << 0) |
7519 | 7646 | ||
7647 | /* This register is a reuse of PCH_PORT_HOTPLUG register. The | ||
7648 | * functionality covered in PCH_PORT_HOTPLUG is split into | ||
7649 | * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC. | ||
7650 | */ | ||
7651 | |||
7652 | #define SHOTPLUG_CTL_DDI _MMIO(0xc4030) | ||
7653 | #define ICP_DDIB_HPD_ENABLE (1 << 7) | ||
7654 | #define ICP_DDIB_HPD_STATUS_MASK (3 << 4) | ||
7655 | #define ICP_DDIB_HPD_NO_DETECT (0 << 4) | ||
7656 | #define ICP_DDIB_HPD_SHORT_DETECT (1 << 4) | ||
7657 | #define ICP_DDIB_HPD_LONG_DETECT (2 << 4) | ||
7658 | #define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) | ||
7659 | #define ICP_DDIA_HPD_ENABLE (1 << 3) | ||
7660 | #define ICP_DDIA_HPD_STATUS_MASK (3 << 0) | ||
7661 | #define ICP_DDIA_HPD_NO_DETECT (0 << 0) | ||
7662 | #define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) | ||
7663 | #define ICP_DDIA_HPD_LONG_DETECT (2 << 0) | ||
7664 | #define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0) | ||
7665 | |||
7666 | #define SHOTPLUG_CTL_TC _MMIO(0xc4034) | ||
7667 | #define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4) | ||
7668 | #define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) | ||
7669 | #define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) | ||
7670 | |||
7520 | #define PCH_GPIOA _MMIO(0xc5010) | 7671 | #define PCH_GPIOA _MMIO(0xc5010) |
7521 | #define PCH_GPIOB _MMIO(0xc5014) | 7672 | #define PCH_GPIOB _MMIO(0xc5014) |
7522 | #define PCH_GPIOC _MMIO(0xc5018) | 7673 | #define PCH_GPIOC _MMIO(0xc5018) |
@@ -8555,6 +8706,14 @@ enum { | |||
8555 | #define _HSW_PWR_WELL_CTL3 0x45408 | 8706 | #define _HSW_PWR_WELL_CTL3 0x45408 |
8556 | #define _HSW_PWR_WELL_CTL4 0x4540C | 8707 | #define _HSW_PWR_WELL_CTL4 0x4540C |
8557 | 8708 | ||
8709 | #define _ICL_PWR_WELL_CTL_AUX1 0x45440 | ||
8710 | #define _ICL_PWR_WELL_CTL_AUX2 0x45444 | ||
8711 | #define _ICL_PWR_WELL_CTL_AUX4 0x4544C | ||
8712 | |||
8713 | #define _ICL_PWR_WELL_CTL_DDI1 0x45450 | ||
8714 | #define _ICL_PWR_WELL_CTL_DDI2 0x45454 | ||
8715 | #define _ICL_PWR_WELL_CTL_DDI4 0x4545C | ||
8716 | |||
8558 | /* | 8717 | /* |
8559 | * Each power well control register contains up to 16 (request, status) HW | 8718 | * Each power well control register contains up to 16 (request, status) HW |
8560 | * flag tuples. The register index and HW flag shift is determined by the | 8719 | * flag tuples. The register index and HW flag shift is determined by the |
@@ -8564,14 +8723,20 @@ enum { | |||
8564 | */ | 8723 | */ |
8565 | #define _HSW_PW_REG_IDX(pw) ((pw) >> 4) | 8724 | #define _HSW_PW_REG_IDX(pw) ((pw) >> 4) |
8566 | #define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2) | 8725 | #define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2) |
8567 | /* TODO: Add all PWR_WELL_CTL registers below for new platforms */ | ||
8568 | #define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ | 8726 | #define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ |
8569 | _HSW_PWR_WELL_CTL1)) | 8727 | _HSW_PWR_WELL_CTL1, \ |
8728 | _ICL_PWR_WELL_CTL_AUX1, \ | ||
8729 | _ICL_PWR_WELL_CTL_DDI1)) | ||
8570 | #define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ | 8730 | #define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ |
8571 | _HSW_PWR_WELL_CTL2)) | 8731 | _HSW_PWR_WELL_CTL2, \ |
8732 | _ICL_PWR_WELL_CTL_AUX2, \ | ||
8733 | _ICL_PWR_WELL_CTL_DDI2)) | ||
8734 | /* KVMR doesn't have a reg for AUX or DDI power well control */ | ||
8572 | #define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3) | 8735 | #define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3) |
8573 | #define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ | 8736 | #define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ |
8574 | _HSW_PWR_WELL_CTL4)) | 8737 | _HSW_PWR_WELL_CTL4, \ |
8738 | _ICL_PWR_WELL_CTL_AUX4, \ | ||
8739 | _ICL_PWR_WELL_CTL_DDI4)) | ||
8575 | 8740 | ||
8576 | #define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1)) | 8741 | #define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1)) |
8577 | #define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw)) | 8742 | #define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw)) |
@@ -8592,6 +8757,8 @@ enum skl_power_gate { | |||
8592 | #define SKL_FUSE_DOWNLOAD_STATUS (1 << 31) | 8757 | #define SKL_FUSE_DOWNLOAD_STATUS (1 << 31) |
8593 | /* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */ | 8758 | /* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */ |
8594 | #define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) | 8759 | #define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) |
8760 | /* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */ | ||
8761 | #define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1) | ||
8595 | #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) | 8762 | #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) |
8596 | 8763 | ||
8597 | #define _CNL_AUX_REG_IDX(pw) ((pw) - 9) | 8764 | #define _CNL_AUX_REG_IDX(pw) ((pw) - 9) |
@@ -9047,6 +9214,7 @@ enum skl_power_gate { | |||
9047 | #define _MG_REFCLKIN_CTL_PORT3 0x16A92C | 9214 | #define _MG_REFCLKIN_CTL_PORT3 0x16A92C |
9048 | #define _MG_REFCLKIN_CTL_PORT4 0x16B92C | 9215 | #define _MG_REFCLKIN_CTL_PORT4 0x16B92C |
9049 | #define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) | 9216 | #define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) |
9217 | #define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) | ||
9050 | #define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \ | 9218 | #define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \ |
9051 | _MG_REFCLKIN_CTL_PORT1, \ | 9219 | _MG_REFCLKIN_CTL_PORT1, \ |
9052 | _MG_REFCLKIN_CTL_PORT2) | 9220 | _MG_REFCLKIN_CTL_PORT2) |
@@ -9056,7 +9224,9 @@ enum skl_power_gate { | |||
9056 | #define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 | 9224 | #define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 |
9057 | #define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 | 9225 | #define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 |
9058 | #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) | 9226 | #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) |
9227 | #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) | ||
9059 | #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) | 9228 | #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) |
9229 | #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) | ||
9060 | #define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \ | 9230 | #define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \ |
9061 | _MG_CLKTOP2_CORECLKCTL1_PORT1, \ | 9231 | _MG_CLKTOP2_CORECLKCTL1_PORT1, \ |
9062 | _MG_CLKTOP2_CORECLKCTL1_PORT2) | 9232 | _MG_CLKTOP2_CORECLKCTL1_PORT2) |
@@ -9066,9 +9236,13 @@ enum skl_power_gate { | |||
9066 | #define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 | 9236 | #define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 |
9067 | #define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 | 9237 | #define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 |
9068 | #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) | 9238 | #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) |
9239 | #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) | ||
9069 | #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) | 9240 | #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) |
9241 | #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) | ||
9070 | #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12) | 9242 | #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12) |
9243 | #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) | ||
9071 | #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) | 9244 | #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) |
9245 | #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) | ||
9072 | #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ | 9246 | #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ |
9073 | _MG_CLKTOP2_HSCLKCTL_PORT1, \ | 9247 | _MG_CLKTOP2_HSCLKCTL_PORT1, \ |
9074 | _MG_CLKTOP2_HSCLKCTL_PORT2) | 9248 | _MG_CLKTOP2_HSCLKCTL_PORT2) |
@@ -9142,12 +9316,18 @@ enum skl_power_gate { | |||
9142 | #define _MG_PLL_BIAS_PORT3 0x16AA14 | 9316 | #define _MG_PLL_BIAS_PORT3 0x16AA14 |
9143 | #define _MG_PLL_BIAS_PORT4 0x16BA14 | 9317 | #define _MG_PLL_BIAS_PORT4 0x16BA14 |
9144 | #define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) | 9318 | #define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) |
9319 | #define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) | ||
9145 | #define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) | 9320 | #define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) |
9321 | #define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24) | ||
9146 | #define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) | 9322 | #define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) |
9323 | #define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16) | ||
9147 | #define MG_PLL_BIAS_BIASCAL_EN (1 << 15) | 9324 | #define MG_PLL_BIAS_BIASCAL_EN (1 << 15) |
9148 | #define MG_PLL_BIAS_CTRIM(x) ((x) << 8) | 9325 | #define MG_PLL_BIAS_CTRIM(x) ((x) << 8) |
9326 | #define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8) | ||
9149 | #define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) | 9327 | #define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) |
9328 | #define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) | ||
9150 | #define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) | 9329 | #define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) |
9330 | #define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) | ||
9151 | #define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \ | 9331 | #define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \ |
9152 | _MG_PLL_BIAS_PORT2) | 9332 | _MG_PLL_BIAS_PORT2) |
9153 | 9333 | ||
@@ -9401,6 +9581,22 @@ enum skl_power_gate { | |||
9401 | #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) | 9581 | #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) |
9402 | #define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF | 9582 | #define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF |
9403 | 9583 | ||
9584 | #define _ICL_DSI_ESC_CLK_DIV0 0x6b090 | ||
9585 | #define _ICL_DSI_ESC_CLK_DIV1 0x6b890 | ||
9586 | #define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \ | ||
9587 | _ICL_DSI_ESC_CLK_DIV0, \ | ||
9588 | _ICL_DSI_ESC_CLK_DIV1) | ||
9589 | #define _ICL_DPHY_ESC_CLK_DIV0 0x162190 | ||
9590 | #define _ICL_DPHY_ESC_CLK_DIV1 0x6C190 | ||
9591 | #define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \ | ||
9592 | _ICL_DPHY_ESC_CLK_DIV0, \ | ||
9593 | _ICL_DPHY_ESC_CLK_DIV1) | ||
9594 | #define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16) | ||
9595 | #define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16 | ||
9596 | #define ICL_ESC_CLK_DIV_MASK 0x1ff | ||
9597 | #define ICL_ESC_CLK_DIV_SHIFT 0 | ||
9598 | #define DSI_MAX_ESC_CLK 20000 /* in KHz */ | ||
9599 | |||
9404 | /* Gen4+ Timestamp and Pipe Frame time stamp registers */ | 9600 | /* Gen4+ Timestamp and Pipe Frame time stamp registers */ |
9405 | #define GEN4_TIMESTAMP _MMIO(0x2358) | 9601 | #define GEN4_TIMESTAMP _MMIO(0x2358) |
9406 | #define ILK_TIMESTAMP_HI _MMIO(0x70070) | 9602 | #define ILK_TIMESTAMP_HI _MMIO(0x70070) |
@@ -9535,6 +9731,14 @@ enum skl_power_gate { | |||
9535 | #define _BXT_MIPIC_PORT_CTRL 0x6B8C0 | 9731 | #define _BXT_MIPIC_PORT_CTRL 0x6B8C0 |
9536 | #define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) | 9732 | #define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) |
9537 | 9733 | ||
9734 | /* ICL DSI MODE control */ | ||
9735 | #define _ICL_DSI_IO_MODECTL_0 0x6B094 | ||
9736 | #define _ICL_DSI_IO_MODECTL_1 0x6B894 | ||
9737 | #define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \ | ||
9738 | _ICL_DSI_IO_MODECTL_0, \ | ||
9739 | _ICL_DSI_IO_MODECTL_1) | ||
9740 | #define COMBO_PHY_MODE_DSI (1 << 0) | ||
9741 | |||
9538 | #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) | 9742 | #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) |
9539 | #define STAP_SELECT (1 << 0) | 9743 | #define STAP_SELECT (1 << 0) |
9540 | 9744 | ||
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index e1dbb544046f..5c2c93cbab12 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c | |||
@@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) | |||
206 | /* Carefully retire all requests without writing to the rings */ | 206 | /* Carefully retire all requests without writing to the rings */ |
207 | ret = i915_gem_wait_for_idle(i915, | 207 | ret = i915_gem_wait_for_idle(i915, |
208 | I915_WAIT_INTERRUPTIBLE | | 208 | I915_WAIT_INTERRUPTIBLE | |
209 | I915_WAIT_LOCKED); | 209 | I915_WAIT_LOCKED, |
210 | MAX_SCHEDULE_TIMEOUT); | ||
210 | if (ret) | 211 | if (ret) |
211 | return ret; | 212 | return ret; |
212 | 213 | ||
@@ -503,7 +504,7 @@ static void move_to_timeline(struct i915_request *request, | |||
503 | GEM_BUG_ON(request->timeline == &request->engine->timeline); | 504 | GEM_BUG_ON(request->timeline == &request->engine->timeline); |
504 | lockdep_assert_held(&request->engine->timeline.lock); | 505 | lockdep_assert_held(&request->engine->timeline.lock); |
505 | 506 | ||
506 | spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING); | 507 | spin_lock(&request->timeline->lock); |
507 | list_move_tail(&request->link, &timeline->requests); | 508 | list_move_tail(&request->link, &timeline->requests); |
508 | spin_unlock(&request->timeline->lock); | 509 | spin_unlock(&request->timeline->lock); |
509 | } | 510 | } |
@@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) | |||
735 | /* Ratelimit ourselves to prevent oom from malicious clients */ | 736 | /* Ratelimit ourselves to prevent oom from malicious clients */ |
736 | ret = i915_gem_wait_for_idle(i915, | 737 | ret = i915_gem_wait_for_idle(i915, |
737 | I915_WAIT_LOCKED | | 738 | I915_WAIT_LOCKED | |
738 | I915_WAIT_INTERRUPTIBLE); | 739 | I915_WAIT_INTERRUPTIBLE, |
740 | MAX_SCHEDULE_TIMEOUT); | ||
739 | if (ret) | 741 | if (ret) |
740 | goto err_unreserve; | 742 | goto err_unreserve; |
741 | 743 | ||
@@ -1013,6 +1015,27 @@ i915_request_await_object(struct i915_request *to, | |||
1013 | return ret; | 1015 | return ret; |
1014 | } | 1016 | } |
1015 | 1017 | ||
1018 | void i915_request_skip(struct i915_request *rq, int error) | ||
1019 | { | ||
1020 | void *vaddr = rq->ring->vaddr; | ||
1021 | u32 head; | ||
1022 | |||
1023 | GEM_BUG_ON(!IS_ERR_VALUE((long)error)); | ||
1024 | dma_fence_set_error(&rq->fence, error); | ||
1025 | |||
1026 | /* | ||
1027 | * As this request likely depends on state from the lost | ||
1028 | * context, clear out all the user operations leaving the | ||
1029 | * breadcrumb at the end (so we get the fence notifications). | ||
1030 | */ | ||
1031 | head = rq->infix; | ||
1032 | if (rq->postfix < head) { | ||
1033 | memset(vaddr + head, 0, rq->ring->size - head); | ||
1034 | head = 0; | ||
1035 | } | ||
1036 | memset(vaddr + head, 0, rq->postfix - head); | ||
1037 | } | ||
1038 | |||
1016 | /* | 1039 | /* |
1017 | * NB: This function is not allowed to fail. Doing so would mean the the | 1040 | * NB: This function is not allowed to fail. Doing so would mean the the |
1018 | * request is not being tracked for completion but the work itself is | 1041 | * request is not being tracked for completion but the work itself is |
@@ -1196,7 +1219,7 @@ static bool __i915_spin_request(const struct i915_request *rq, | |||
1196 | * takes to sleep on a request, on the order of a microsecond. | 1219 | * takes to sleep on a request, on the order of a microsecond. |
1197 | */ | 1220 | */ |
1198 | 1221 | ||
1199 | irq = atomic_read(&engine->irq_count); | 1222 | irq = READ_ONCE(engine->breadcrumbs.irq_count); |
1200 | timeout_us += local_clock_us(&cpu); | 1223 | timeout_us += local_clock_us(&cpu); |
1201 | do { | 1224 | do { |
1202 | if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) | 1225 | if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) |
@@ -1208,7 +1231,7 @@ static bool __i915_spin_request(const struct i915_request *rq, | |||
1208 | * assume we won't see one in the near future but require | 1231 | * assume we won't see one in the near future but require |
1209 | * the engine->seqno_barrier() to fixup coherency. | 1232 | * the engine->seqno_barrier() to fixup coherency. |
1210 | */ | 1233 | */ |
1211 | if (atomic_read(&engine->irq_count) != irq) | 1234 | if (READ_ONCE(engine->breadcrumbs.irq_count) != irq) |
1212 | break; | 1235 | break; |
1213 | 1236 | ||
1214 | if (signal_pending_state(state, current)) | 1237 | if (signal_pending_state(state, current)) |
@@ -1285,7 +1308,7 @@ long i915_request_wait(struct i915_request *rq, | |||
1285 | if (flags & I915_WAIT_LOCKED) | 1308 | if (flags & I915_WAIT_LOCKED) |
1286 | add_wait_queue(errq, &reset); | 1309 | add_wait_queue(errq, &reset); |
1287 | 1310 | ||
1288 | intel_wait_init(&wait, rq); | 1311 | intel_wait_init(&wait); |
1289 | 1312 | ||
1290 | restart: | 1313 | restart: |
1291 | do { | 1314 | do { |
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 7ee220ded9c9..e1c9365dfefb 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h | |||
@@ -258,6 +258,8 @@ void i915_request_add(struct i915_request *rq); | |||
258 | void __i915_request_submit(struct i915_request *request); | 258 | void __i915_request_submit(struct i915_request *request); |
259 | void i915_request_submit(struct i915_request *request); | 259 | void i915_request_submit(struct i915_request *request); |
260 | 260 | ||
261 | void i915_request_skip(struct i915_request *request, int error); | ||
262 | |||
261 | void __i915_request_unsubmit(struct i915_request *request); | 263 | void __i915_request_unsubmit(struct i915_request *request); |
262 | void i915_request_unsubmit(struct i915_request *request); | 264 | void i915_request_unsubmit(struct i915_request *request); |
263 | 265 | ||
@@ -378,6 +380,7 @@ static inline void | |||
378 | init_request_active(struct i915_gem_active *active, | 380 | init_request_active(struct i915_gem_active *active, |
379 | i915_gem_retire_fn retire) | 381 | i915_gem_retire_fn retire) |
380 | { | 382 | { |
383 | RCU_INIT_POINTER(active->request, NULL); | ||
381 | INIT_LIST_HEAD(&active->link); | 384 | INIT_LIST_HEAD(&active->link); |
382 | active->retire = retire ?: i915_gem_retire_noop; | 385 | active->retire = retire ?: i915_gem_retire_noop; |
383 | } | 386 | } |
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index dc2a4632faa7..a2c2c3ab5fb0 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h | |||
@@ -37,6 +37,8 @@ struct i915_timeline { | |||
37 | u32 seqno; | 37 | u32 seqno; |
38 | 38 | ||
39 | spinlock_t lock; | 39 | spinlock_t lock; |
40 | #define TIMELINE_CLIENT 0 /* default subclass */ | ||
41 | #define TIMELINE_ENGINE 1 | ||
40 | 42 | ||
41 | /** | 43 | /** |
42 | * List of breadcrumbs associated with GPU requests currently | 44 | * List of breadcrumbs associated with GPU requests currently |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index e82aa804cdba..ed4e0fb558f7 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * IN THE SOFTWARE. | 21 | * IN THE SOFTWARE. |
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include "i915_vma.h" | 25 | #include "i915_vma.h" |
26 | 26 | ||
27 | #include "i915_drv.h" | 27 | #include "i915_drv.h" |
@@ -30,18 +30,53 @@ | |||
30 | 30 | ||
31 | #include <drm/drm_gem.h> | 31 | #include <drm/drm_gem.h> |
32 | 32 | ||
33 | #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) | ||
34 | |||
35 | #include <linux/stackdepot.h> | ||
36 | |||
37 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) | ||
38 | { | ||
39 | unsigned long entries[12]; | ||
40 | struct stack_trace trace = { | ||
41 | .entries = entries, | ||
42 | .max_entries = ARRAY_SIZE(entries), | ||
43 | }; | ||
44 | char buf[512]; | ||
45 | |||
46 | if (!vma->node.stack) { | ||
47 | DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", | ||
48 | vma->node.start, vma->node.size, reason); | ||
49 | return; | ||
50 | } | ||
51 | |||
52 | depot_fetch_stack(vma->node.stack, &trace); | ||
53 | snprint_stack_trace(buf, sizeof(buf), &trace, 0); | ||
54 | DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", | ||
55 | vma->node.start, vma->node.size, reason, buf); | ||
56 | } | ||
57 | |||
58 | #else | ||
59 | |||
60 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) | ||
61 | { | ||
62 | } | ||
63 | |||
64 | #endif | ||
65 | |||
66 | struct i915_vma_active { | ||
67 | struct i915_gem_active base; | ||
68 | struct i915_vma *vma; | ||
69 | struct rb_node node; | ||
70 | u64 timeline; | ||
71 | }; | ||
72 | |||
33 | static void | 73 | static void |
34 | i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) | 74 | __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) |
35 | { | 75 | { |
36 | const unsigned int idx = rq->engine->id; | ||
37 | struct i915_vma *vma = | ||
38 | container_of(active, struct i915_vma, last_read[idx]); | ||
39 | struct drm_i915_gem_object *obj = vma->obj; | 76 | struct drm_i915_gem_object *obj = vma->obj; |
40 | 77 | ||
41 | GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); | 78 | GEM_BUG_ON(!i915_vma_is_active(vma)); |
42 | 79 | if (--vma->active_count) | |
43 | i915_vma_clear_active(vma, idx); | ||
44 | if (i915_vma_is_active(vma)) | ||
45 | return; | 80 | return; |
46 | 81 | ||
47 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 82 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
@@ -75,6 +110,21 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) | |||
75 | } | 110 | } |
76 | } | 111 | } |
77 | 112 | ||
113 | static void | ||
114 | i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq) | ||
115 | { | ||
116 | struct i915_vma_active *active = | ||
117 | container_of(base, typeof(*active), base); | ||
118 | |||
119 | __i915_vma_retire(active->vma, rq); | ||
120 | } | ||
121 | |||
122 | static void | ||
123 | i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq) | ||
124 | { | ||
125 | __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq); | ||
126 | } | ||
127 | |||
78 | static struct i915_vma * | 128 | static struct i915_vma * |
79 | vma_create(struct drm_i915_gem_object *obj, | 129 | vma_create(struct drm_i915_gem_object *obj, |
80 | struct i915_address_space *vm, | 130 | struct i915_address_space *vm, |
@@ -82,7 +132,6 @@ vma_create(struct drm_i915_gem_object *obj, | |||
82 | { | 132 | { |
83 | struct i915_vma *vma; | 133 | struct i915_vma *vma; |
84 | struct rb_node *rb, **p; | 134 | struct rb_node *rb, **p; |
85 | int i; | ||
86 | 135 | ||
87 | /* The aliasing_ppgtt should never be used directly! */ | 136 | /* The aliasing_ppgtt should never be used directly! */ |
88 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); | 137 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); |
@@ -91,8 +140,9 @@ vma_create(struct drm_i915_gem_object *obj, | |||
91 | if (vma == NULL) | 140 | if (vma == NULL) |
92 | return ERR_PTR(-ENOMEM); | 141 | return ERR_PTR(-ENOMEM); |
93 | 142 | ||
94 | for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) | 143 | vma->active = RB_ROOT; |
95 | init_request_active(&vma->last_read[i], i915_vma_retire); | 144 | |
145 | init_request_active(&vma->last_active, i915_vma_last_retire); | ||
96 | init_request_active(&vma->last_fence, NULL); | 146 | init_request_active(&vma->last_fence, NULL); |
97 | vma->vm = vm; | 147 | vma->vm = vm; |
98 | vma->ops = &vm->vma_ops; | 148 | vma->ops = &vm->vma_ops; |
@@ -110,7 +160,7 @@ vma_create(struct drm_i915_gem_object *obj, | |||
110 | obj->base.size >> PAGE_SHIFT)); | 160 | obj->base.size >> PAGE_SHIFT)); |
111 | vma->size = view->partial.size; | 161 | vma->size = view->partial.size; |
112 | vma->size <<= PAGE_SHIFT; | 162 | vma->size <<= PAGE_SHIFT; |
113 | GEM_BUG_ON(vma->size >= obj->base.size); | 163 | GEM_BUG_ON(vma->size > obj->base.size); |
114 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { | 164 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { |
115 | vma->size = intel_rotation_info_size(&view->rotated); | 165 | vma->size = intel_rotation_info_size(&view->rotated); |
116 | vma->size <<= PAGE_SHIFT; | 166 | vma->size <<= PAGE_SHIFT; |
@@ -745,13 +795,11 @@ void i915_vma_reopen(struct i915_vma *vma) | |||
745 | static void __i915_vma_destroy(struct i915_vma *vma) | 795 | static void __i915_vma_destroy(struct i915_vma *vma) |
746 | { | 796 | { |
747 | struct drm_i915_private *i915 = vma->vm->i915; | 797 | struct drm_i915_private *i915 = vma->vm->i915; |
748 | int i; | 798 | struct i915_vma_active *iter, *n; |
749 | 799 | ||
750 | GEM_BUG_ON(vma->node.allocated); | 800 | GEM_BUG_ON(vma->node.allocated); |
751 | GEM_BUG_ON(vma->fence); | 801 | GEM_BUG_ON(vma->fence); |
752 | 802 | ||
753 | for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) | ||
754 | GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); | ||
755 | GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); | 803 | GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); |
756 | 804 | ||
757 | list_del(&vma->obj_link); | 805 | list_del(&vma->obj_link); |
@@ -762,6 +810,11 @@ static void __i915_vma_destroy(struct i915_vma *vma) | |||
762 | if (!i915_vma_is_ggtt(vma)) | 810 | if (!i915_vma_is_ggtt(vma)) |
763 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); | 811 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); |
764 | 812 | ||
813 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { | ||
814 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); | ||
815 | kfree(iter); | ||
816 | } | ||
817 | |||
765 | kmem_cache_free(i915->vmas, vma); | 818 | kmem_cache_free(i915->vmas, vma); |
766 | } | 819 | } |
767 | 820 | ||
@@ -826,9 +879,151 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) | |||
826 | list_del(&vma->obj->userfault_link); | 879 | list_del(&vma->obj->userfault_link); |
827 | } | 880 | } |
828 | 881 | ||
882 | static void export_fence(struct i915_vma *vma, | ||
883 | struct i915_request *rq, | ||
884 | unsigned int flags) | ||
885 | { | ||
886 | struct reservation_object *resv = vma->resv; | ||
887 | |||
888 | /* | ||
889 | * Ignore errors from failing to allocate the new fence, we can't | ||
890 | * handle an error right now. Worst case should be missed | ||
891 | * synchronisation leading to rendering corruption. | ||
892 | */ | ||
893 | reservation_object_lock(resv, NULL); | ||
894 | if (flags & EXEC_OBJECT_WRITE) | ||
895 | reservation_object_add_excl_fence(resv, &rq->fence); | ||
896 | else if (reservation_object_reserve_shared(resv) == 0) | ||
897 | reservation_object_add_shared_fence(resv, &rq->fence); | ||
898 | reservation_object_unlock(resv); | ||
899 | } | ||
900 | |||
901 | static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx) | ||
902 | { | ||
903 | struct i915_vma_active *active; | ||
904 | struct rb_node **p, *parent; | ||
905 | struct i915_request *old; | ||
906 | |||
907 | /* | ||
908 | * We track the most recently used timeline to skip a rbtree search | ||
909 | * for the common case, under typical loads we never need the rbtree | ||
910 | * at all. We can reuse the last_active slot if it is empty, that is | ||
911 | * after the previous activity has been retired, or if the active | ||
912 | * matches the current timeline. | ||
913 | * | ||
914 | * Note that we allow the timeline to be active simultaneously in | ||
915 | * the rbtree and the last_active cache. We do this to avoid having | ||
916 | * to search and replace the rbtree element for a new timeline, with | ||
917 | * the cost being that we must be aware that the vma may be retired | ||
918 | * twice for the same timeline (as the older rbtree element will be | ||
919 | * retired before the new request added to last_active). | ||
920 | */ | ||
921 | old = i915_gem_active_raw(&vma->last_active, | ||
922 | &vma->vm->i915->drm.struct_mutex); | ||
923 | if (!old || old->fence.context == idx) | ||
924 | goto out; | ||
925 | |||
926 | /* Move the currently active fence into the rbtree */ | ||
927 | idx = old->fence.context; | ||
928 | |||
929 | parent = NULL; | ||
930 | p = &vma->active.rb_node; | ||
931 | while (*p) { | ||
932 | parent = *p; | ||
933 | |||
934 | active = rb_entry(parent, struct i915_vma_active, node); | ||
935 | if (active->timeline == idx) | ||
936 | goto replace; | ||
937 | |||
938 | if (active->timeline < idx) | ||
939 | p = &parent->rb_right; | ||
940 | else | ||
941 | p = &parent->rb_left; | ||
942 | } | ||
943 | |||
944 | active = kmalloc(sizeof(*active), GFP_KERNEL); | ||
945 | if (unlikely(!active)) | ||
946 | return ERR_PTR(-ENOMEM); | ||
947 | |||
948 | init_request_active(&active->base, i915_vma_retire); | ||
949 | active->vma = vma; | ||
950 | active->timeline = idx; | ||
951 | |||
952 | rb_link_node(&active->node, parent, p); | ||
953 | rb_insert_color(&active->node, &vma->active); | ||
954 | |||
955 | replace: | ||
956 | /* | ||
957 | * Overwrite the previous active slot in the rbtree with last_active, | ||
958 | * leaving last_active zeroed. If the previous slot is still active, | ||
959 | * we must be careful as we now only expect to receive one retire | ||
960 | * callback not two, and so much undo the active counting for the | ||
961 | * overwritten slot. | ||
962 | */ | ||
963 | if (i915_gem_active_isset(&active->base)) { | ||
964 | /* Retire ourselves from the old rq->active_list */ | ||
965 | __list_del_entry(&active->base.link); | ||
966 | vma->active_count--; | ||
967 | GEM_BUG_ON(!vma->active_count); | ||
968 | } | ||
969 | GEM_BUG_ON(list_empty(&vma->last_active.link)); | ||
970 | list_replace_init(&vma->last_active.link, &active->base.link); | ||
971 | active->base.request = fetch_and_zero(&vma->last_active.request); | ||
972 | |||
973 | out: | ||
974 | return &vma->last_active; | ||
975 | } | ||
976 | |||
977 | int i915_vma_move_to_active(struct i915_vma *vma, | ||
978 | struct i915_request *rq, | ||
979 | unsigned int flags) | ||
980 | { | ||
981 | struct drm_i915_gem_object *obj = vma->obj; | ||
982 | struct i915_gem_active *active; | ||
983 | |||
984 | lockdep_assert_held(&rq->i915->drm.struct_mutex); | ||
985 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | ||
986 | |||
987 | active = active_instance(vma, rq->fence.context); | ||
988 | if (IS_ERR(active)) | ||
989 | return PTR_ERR(active); | ||
990 | |||
991 | /* | ||
992 | * Add a reference if we're newly entering the active list. | ||
993 | * The order in which we add operations to the retirement queue is | ||
994 | * vital here: mark_active adds to the start of the callback list, | ||
995 | * such that subsequent callbacks are called first. Therefore we | ||
996 | * add the active reference first and queue for it to be dropped | ||
997 | * *last*. | ||
998 | */ | ||
999 | if (!i915_gem_active_isset(active) && !vma->active_count++) { | ||
1000 | list_move_tail(&vma->vm_link, &vma->vm->active_list); | ||
1001 | obj->active_count++; | ||
1002 | } | ||
1003 | i915_gem_active_set(active, rq); | ||
1004 | GEM_BUG_ON(!i915_vma_is_active(vma)); | ||
1005 | GEM_BUG_ON(!obj->active_count); | ||
1006 | |||
1007 | obj->write_domain = 0; | ||
1008 | if (flags & EXEC_OBJECT_WRITE) { | ||
1009 | obj->write_domain = I915_GEM_DOMAIN_RENDER; | ||
1010 | |||
1011 | if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) | ||
1012 | i915_gem_active_set(&obj->frontbuffer_write, rq); | ||
1013 | |||
1014 | obj->read_domains = 0; | ||
1015 | } | ||
1016 | obj->read_domains |= I915_GEM_GPU_DOMAINS; | ||
1017 | |||
1018 | if (flags & EXEC_OBJECT_NEEDS_FENCE) | ||
1019 | i915_gem_active_set(&vma->last_fence, rq); | ||
1020 | |||
1021 | export_fence(vma, rq, flags); | ||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
829 | int i915_vma_unbind(struct i915_vma *vma) | 1025 | int i915_vma_unbind(struct i915_vma *vma) |
830 | { | 1026 | { |
831 | unsigned long active; | ||
832 | int ret; | 1027 | int ret; |
833 | 1028 | ||
834 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); | 1029 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
@@ -838,9 +1033,8 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
838 | * have side-effects such as unpinning or even unbinding this vma. | 1033 | * have side-effects such as unpinning or even unbinding this vma. |
839 | */ | 1034 | */ |
840 | might_sleep(); | 1035 | might_sleep(); |
841 | active = i915_vma_get_active(vma); | 1036 | if (i915_vma_is_active(vma)) { |
842 | if (active) { | 1037 | struct i915_vma_active *active, *n; |
843 | int idx; | ||
844 | 1038 | ||
845 | /* | 1039 | /* |
846 | * When a closed VMA is retired, it is unbound - eek. | 1040 | * When a closed VMA is retired, it is unbound - eek. |
@@ -857,26 +1051,32 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
857 | */ | 1051 | */ |
858 | __i915_vma_pin(vma); | 1052 | __i915_vma_pin(vma); |
859 | 1053 | ||
860 | for_each_active(active, idx) { | 1054 | ret = i915_gem_active_retire(&vma->last_active, |
861 | ret = i915_gem_active_retire(&vma->last_read[idx], | 1055 | &vma->vm->i915->drm.struct_mutex); |
862 | &vma->vm->i915->drm.struct_mutex); | 1056 | if (ret) |
863 | if (ret) | 1057 | goto unpin; |
864 | break; | ||
865 | } | ||
866 | 1058 | ||
867 | if (!ret) { | 1059 | rbtree_postorder_for_each_entry_safe(active, n, |
868 | ret = i915_gem_active_retire(&vma->last_fence, | 1060 | &vma->active, node) { |
1061 | ret = i915_gem_active_retire(&active->base, | ||
869 | &vma->vm->i915->drm.struct_mutex); | 1062 | &vma->vm->i915->drm.struct_mutex); |
1063 | if (ret) | ||
1064 | goto unpin; | ||
870 | } | 1065 | } |
871 | 1066 | ||
1067 | ret = i915_gem_active_retire(&vma->last_fence, | ||
1068 | &vma->vm->i915->drm.struct_mutex); | ||
1069 | unpin: | ||
872 | __i915_vma_unpin(vma); | 1070 | __i915_vma_unpin(vma); |
873 | if (ret) | 1071 | if (ret) |
874 | return ret; | 1072 | return ret; |
875 | } | 1073 | } |
876 | GEM_BUG_ON(i915_vma_is_active(vma)); | 1074 | GEM_BUG_ON(i915_vma_is_active(vma)); |
877 | 1075 | ||
878 | if (i915_vma_is_pinned(vma)) | 1076 | if (i915_vma_is_pinned(vma)) { |
1077 | vma_print_allocator(vma, "is pinned"); | ||
879 | return -EBUSY; | 1078 | return -EBUSY; |
1079 | } | ||
880 | 1080 | ||
881 | if (!drm_mm_node_allocated(&vma->node)) | 1081 | if (!drm_mm_node_allocated(&vma->node)) |
882 | return 0; | 1082 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 66a228931517..f06d66377107 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #define __I915_VMA_H__ | 26 | #define __I915_VMA_H__ |
27 | 27 | ||
28 | #include <linux/io-mapping.h> | 28 | #include <linux/io-mapping.h> |
29 | #include <linux/rbtree.h> | ||
29 | 30 | ||
30 | #include <drm/drm_mm.h> | 31 | #include <drm/drm_mm.h> |
31 | 32 | ||
@@ -94,8 +95,9 @@ struct i915_vma { | |||
94 | #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) | 95 | #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) |
95 | #define I915_VMA_GGTT_WRITE BIT(12) | 96 | #define I915_VMA_GGTT_WRITE BIT(12) |
96 | 97 | ||
97 | unsigned int active; | 98 | unsigned int active_count; |
98 | struct i915_gem_active last_read[I915_NUM_ENGINES]; | 99 | struct rb_root active; |
100 | struct i915_gem_active last_active; | ||
99 | struct i915_gem_active last_fence; | 101 | struct i915_gem_active last_fence; |
100 | 102 | ||
101 | /** | 103 | /** |
@@ -138,6 +140,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj, | |||
138 | 140 | ||
139 | void i915_vma_unpin_and_release(struct i915_vma **p_vma); | 141 | void i915_vma_unpin_and_release(struct i915_vma **p_vma); |
140 | 142 | ||
143 | static inline bool i915_vma_is_active(struct i915_vma *vma) | ||
144 | { | ||
145 | return vma->active_count; | ||
146 | } | ||
147 | |||
148 | int __must_check i915_vma_move_to_active(struct i915_vma *vma, | ||
149 | struct i915_request *rq, | ||
150 | unsigned int flags); | ||
151 | |||
141 | static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) | 152 | static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) |
142 | { | 153 | { |
143 | return vma->flags & I915_VMA_GGTT; | 154 | return vma->flags & I915_VMA_GGTT; |
@@ -187,34 +198,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma) | |||
187 | return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); | 198 | return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); |
188 | } | 199 | } |
189 | 200 | ||
190 | static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) | ||
191 | { | ||
192 | return vma->active; | ||
193 | } | ||
194 | |||
195 | static inline bool i915_vma_is_active(const struct i915_vma *vma) | ||
196 | { | ||
197 | return i915_vma_get_active(vma); | ||
198 | } | ||
199 | |||
200 | static inline void i915_vma_set_active(struct i915_vma *vma, | ||
201 | unsigned int engine) | ||
202 | { | ||
203 | vma->active |= BIT(engine); | ||
204 | } | ||
205 | |||
206 | static inline void i915_vma_clear_active(struct i915_vma *vma, | ||
207 | unsigned int engine) | ||
208 | { | ||
209 | vma->active &= ~BIT(engine); | ||
210 | } | ||
211 | |||
212 | static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, | ||
213 | unsigned int engine) | ||
214 | { | ||
215 | return vma->active & BIT(engine); | ||
216 | } | ||
217 | |||
218 | static inline u32 i915_ggtt_offset(const struct i915_vma *vma) | 201 | static inline u32 i915_ggtt_offset(const struct i915_vma *vma) |
219 | { | 202 | { |
220 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); | 203 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); |
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c new file mode 100644 index 000000000000..13830e43a4d1 --- /dev/null +++ b/drivers/gpu/drm/i915/icl_dsi.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * Copyright © 2018 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Madhav Chauhan <madhav.chauhan@intel.com> | ||
25 | * Jani Nikula <jani.nikula@intel.com> | ||
26 | */ | ||
27 | |||
28 | #include "intel_dsi.h" | ||
29 | |||
30 | static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) | ||
31 | { | ||
32 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
33 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
34 | enum port port; | ||
35 | u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); | ||
36 | u32 afe_clk_khz; /* 8X Clock */ | ||
37 | u32 esc_clk_div_m; | ||
38 | |||
39 | afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, | ||
40 | intel_dsi->lane_count); | ||
41 | |||
42 | esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK); | ||
43 | |||
44 | for_each_dsi_port(port, intel_dsi->ports) { | ||
45 | I915_WRITE(ICL_DSI_ESC_CLK_DIV(port), | ||
46 | esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); | ||
47 | POSTING_READ(ICL_DSI_ESC_CLK_DIV(port)); | ||
48 | } | ||
49 | |||
50 | for_each_dsi_port(port, intel_dsi->ports) { | ||
51 | I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port), | ||
52 | esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); | ||
53 | POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port)); | ||
54 | } | ||
55 | } | ||
56 | |||
57 | static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) | ||
58 | { | ||
59 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
60 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
61 | enum port port; | ||
62 | u32 tmp; | ||
63 | |||
64 | for_each_dsi_port(port, intel_dsi->ports) { | ||
65 | tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); | ||
66 | tmp |= COMBO_PHY_MODE_DSI; | ||
67 | I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); | ||
68 | } | ||
69 | |||
70 | for_each_dsi_port(port, intel_dsi->ports) { | ||
71 | intel_display_power_get(dev_priv, port == PORT_A ? | ||
72 | POWER_DOMAIN_PORT_DDI_A_IO : | ||
73 | POWER_DOMAIN_PORT_DDI_B_IO); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) | ||
78 | { | ||
79 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
80 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
81 | enum port port; | ||
82 | u32 tmp; | ||
83 | u32 lane_mask; | ||
84 | |||
85 | switch (intel_dsi->lane_count) { | ||
86 | case 1: | ||
87 | lane_mask = PWR_DOWN_LN_3_1_0; | ||
88 | break; | ||
89 | case 2: | ||
90 | lane_mask = PWR_DOWN_LN_3_1; | ||
91 | break; | ||
92 | case 3: | ||
93 | lane_mask = PWR_DOWN_LN_3; | ||
94 | break; | ||
95 | case 4: | ||
96 | default: | ||
97 | lane_mask = PWR_UP_ALL_LANES; | ||
98 | break; | ||
99 | } | ||
100 | |||
101 | for_each_dsi_port(port, intel_dsi->ports) { | ||
102 | tmp = I915_READ(ICL_PORT_CL_DW10(port)); | ||
103 | tmp &= ~PWR_DOWN_LN_MASK; | ||
104 | I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder) | ||
109 | { | ||
110 | /* step 4a: power up all lanes of the DDI used by DSI */ | ||
111 | gen11_dsi_power_up_lanes(encoder); | ||
112 | } | ||
113 | |||
114 | static void __attribute__((unused)) | ||
115 | gen11_dsi_pre_enable(struct intel_encoder *encoder, | ||
116 | const struct intel_crtc_state *pipe_config, | ||
117 | const struct drm_connector_state *conn_state) | ||
118 | { | ||
119 | /* step2: enable IO power */ | ||
120 | gen11_dsi_enable_io_power(encoder); | ||
121 | |||
122 | /* step3: enable DSI PLL */ | ||
123 | gen11_dsi_program_esc_clk_div(encoder); | ||
124 | |||
125 | /* step4: enable DSI port and DPHY */ | ||
126 | gen11_dsi_enable_port_and_phy(encoder); | ||
127 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 86a987b8ac66..1db6ba7d926e 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
@@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t) | |||
98 | struct intel_engine_cs *engine = | 98 | struct intel_engine_cs *engine = |
99 | from_timer(engine, t, breadcrumbs.hangcheck); | 99 | from_timer(engine, t, breadcrumbs.hangcheck); |
100 | struct intel_breadcrumbs *b = &engine->breadcrumbs; | 100 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
101 | unsigned int irq_count; | ||
101 | 102 | ||
102 | if (!b->irq_armed) | 103 | if (!b->irq_armed) |
103 | return; | 104 | return; |
104 | 105 | ||
105 | if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) { | 106 | irq_count = READ_ONCE(b->irq_count); |
106 | b->hangcheck_interrupts = atomic_read(&engine->irq_count); | 107 | if (b->hangcheck_interrupts != irq_count) { |
108 | b->hangcheck_interrupts = irq_count; | ||
107 | mod_timer(&b->hangcheck, wait_timeout()); | 109 | mod_timer(&b->hangcheck, wait_timeout()); |
108 | return; | 110 | return; |
109 | } | 111 | } |
@@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b) | |||
272 | if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) | 274 | if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) |
273 | return false; | 275 | return false; |
274 | 276 | ||
275 | /* Only start with the heavy weight fake irq timer if we have not | 277 | /* |
278 | * Only start with the heavy weight fake irq timer if we have not | ||
276 | * seen any interrupts since enabling it the first time. If the | 279 | * seen any interrupts since enabling it the first time. If the |
277 | * interrupts are still arriving, it means we made a mistake in our | 280 | * interrupts are still arriving, it means we made a mistake in our |
278 | * engine->seqno_barrier(), a timing error that should be transient | 281 | * engine->seqno_barrier(), a timing error that should be transient |
279 | * and unlikely to reoccur. | 282 | * and unlikely to reoccur. |
280 | */ | 283 | */ |
281 | return atomic_read(&engine->irq_count) == b->hangcheck_interrupts; | 284 | return READ_ONCE(b->irq_count) == b->hangcheck_interrupts; |
282 | } | 285 | } |
283 | 286 | ||
284 | static void enable_fake_irq(struct intel_breadcrumbs *b) | 287 | static void enable_fake_irq(struct intel_breadcrumbs *b) |
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index bf9433d7964d..29075c763428 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c | |||
@@ -316,6 +316,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, | |||
316 | break; | 316 | break; |
317 | default: | 317 | default: |
318 | DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); | 318 | DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); |
319 | /* fall through */ | ||
319 | case GC_DISPLAY_CLOCK_133_MHZ_PNV: | 320 | case GC_DISPLAY_CLOCK_133_MHZ_PNV: |
320 | cdclk_state->cdclk = 133333; | 321 | cdclk_state->cdclk = 133333; |
321 | break; | 322 | break; |
@@ -1797,6 +1798,7 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref) | |||
1797 | switch (ref) { | 1798 | switch (ref) { |
1798 | default: | 1799 | default: |
1799 | MISSING_CASE(ref); | 1800 | MISSING_CASE(ref); |
1801 | /* fall through */ | ||
1800 | case 24000: | 1802 | case 24000: |
1801 | ranges = ranges_24; | 1803 | ranges = ranges_24; |
1802 | break; | 1804 | break; |
@@ -1824,6 +1826,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) | |||
1824 | switch (cdclk) { | 1826 | switch (cdclk) { |
1825 | default: | 1827 | default: |
1826 | MISSING_CASE(cdclk); | 1828 | MISSING_CASE(cdclk); |
1829 | /* fall through */ | ||
1827 | case 307200: | 1830 | case 307200: |
1828 | case 556800: | 1831 | case 556800: |
1829 | case 652800: | 1832 | case 652800: |
@@ -1896,6 +1899,7 @@ static u8 icl_calc_voltage_level(int cdclk) | |||
1896 | return 1; | 1899 | return 1; |
1897 | default: | 1900 | default: |
1898 | MISSING_CASE(cdclk); | 1901 | MISSING_CASE(cdclk); |
1902 | /* fall through */ | ||
1899 | case 652800: | 1903 | case 652800: |
1900 | case 648000: | 1904 | case 648000: |
1901 | return 2; | 1905 | return 2; |
@@ -1913,6 +1917,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, | |||
1913 | switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { | 1917 | switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { |
1914 | default: | 1918 | default: |
1915 | MISSING_CASE(val); | 1919 | MISSING_CASE(val); |
1920 | /* fall through */ | ||
1916 | case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: | 1921 | case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: |
1917 | cdclk_state->ref = 24000; | 1922 | cdclk_state->ref = 24000; |
1918 | break; | 1923 | break; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 044fe1fb9872..32838ed89ee7 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -1069,6 +1069,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, | |||
1069 | switch (id) { | 1069 | switch (id) { |
1070 | default: | 1070 | default: |
1071 | MISSING_CASE(id); | 1071 | MISSING_CASE(id); |
1072 | /* fall through */ | ||
1072 | case DPLL_ID_ICL_DPLL0: | 1073 | case DPLL_ID_ICL_DPLL0: |
1073 | case DPLL_ID_ICL_DPLL1: | 1074 | case DPLL_ID_ICL_DPLL1: |
1074 | return DDI_CLK_SEL_NONE; | 1075 | return DDI_CLK_SEL_NONE; |
@@ -1983,15 +1984,50 @@ out: | |||
1983 | return ret; | 1984 | return ret; |
1984 | } | 1985 | } |
1985 | 1986 | ||
1986 | static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder) | 1987 | static inline enum intel_display_power_domain |
1988 | intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) | ||
1989 | { | ||
1990 | /* CNL HW requires corresponding AUX IOs to be powered up for PSR with | ||
1991 | * DC states enabled at the same time, while for driver initiated AUX | ||
1992 | * transfers we need the same AUX IOs to be powered but with DC states | ||
1993 | * disabled. Accordingly use the AUX power domain here which leaves DC | ||
1994 | * states enabled. | ||
1995 | * However, for non-A AUX ports the corresponding non-EDP transcoders | ||
1996 | * would have already enabled power well 2 and DC_OFF. This means we can | ||
1997 | * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a | ||
1998 | * specific AUX_IO reference without powering up any extra wells. | ||
1999 | * Note that PSR is enabled only on Port A even though this function | ||
2000 | * returns the correct domain for other ports too. | ||
2001 | */ | ||
2002 | return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : | ||
2003 | intel_dp->aux_power_domain; | ||
2004 | } | ||
2005 | |||
2006 | static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, | ||
2007 | struct intel_crtc_state *crtc_state) | ||
1987 | { | 2008 | { |
1988 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | 2009 | struct intel_digital_port *dig_port; |
1989 | enum pipe pipe; | 2010 | u64 domains; |
1990 | 2011 | ||
1991 | if (intel_ddi_get_hw_state(encoder, &pipe)) | 2012 | /* |
1992 | return BIT_ULL(dig_port->ddi_io_power_domain); | 2013 | * TODO: Add support for MST encoders. Atm, the following should never |
2014 | * happen since fake-MST encoders don't set their get_power_domains() | ||
2015 | * hook. | ||
2016 | */ | ||
2017 | if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) | ||
2018 | return 0; | ||
1993 | 2019 | ||
1994 | return 0; | 2020 | dig_port = enc_to_dig_port(&encoder->base); |
2021 | domains = BIT_ULL(dig_port->ddi_io_power_domain); | ||
2022 | |||
2023 | /* AUX power is only needed for (e)DP mode, not for HDMI. */ | ||
2024 | if (intel_crtc_has_dp_encoder(crtc_state)) { | ||
2025 | struct intel_dp *intel_dp = &dig_port->dp; | ||
2026 | |||
2027 | domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp)); | ||
2028 | } | ||
2029 | |||
2030 | return domains; | ||
1995 | } | 2031 | } |
1996 | 2032 | ||
1997 | void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) | 2033 | void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) |
@@ -2631,6 +2667,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
2631 | 2667 | ||
2632 | WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); | 2668 | WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); |
2633 | 2669 | ||
2670 | intel_display_power_get(dev_priv, | ||
2671 | intel_ddi_main_link_aux_domain(intel_dp)); | ||
2672 | |||
2634 | intel_dp_set_link_params(intel_dp, crtc_state->port_clock, | 2673 | intel_dp_set_link_params(intel_dp, crtc_state->port_clock, |
2635 | crtc_state->lane_count, is_mst); | 2674 | crtc_state->lane_count, is_mst); |
2636 | 2675 | ||
@@ -2775,6 +2814,9 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, | |||
2775 | intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); | 2814 | intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); |
2776 | 2815 | ||
2777 | intel_ddi_clk_disable(encoder); | 2816 | intel_ddi_clk_disable(encoder); |
2817 | |||
2818 | intel_display_power_put(dev_priv, | ||
2819 | intel_ddi_main_link_aux_domain(intel_dp)); | ||
2778 | } | 2820 | } |
2779 | 2821 | ||
2780 | static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, | 2822 | static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, |
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 0fd13df424cf..0ef0c6448d53 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c | |||
@@ -858,6 +858,8 @@ void intel_device_info_runtime_init(struct intel_device_info *info) | |||
858 | void intel_driver_caps_print(const struct intel_driver_caps *caps, | 858 | void intel_driver_caps_print(const struct intel_driver_caps *caps, |
859 | struct drm_printer *p) | 859 | struct drm_printer *p) |
860 | { | 860 | { |
861 | drm_printf(p, "Has logical contexts? %s\n", | ||
862 | yesno(caps->has_logical_contexts)); | ||
861 | drm_printf(p, "scheduler: %x\n", caps->scheduler); | 863 | drm_printf(p, "scheduler: %x\n", caps->scheduler); |
862 | } | 864 | } |
863 | 865 | ||
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 933e31669557..633f9fbf72ea 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h | |||
@@ -186,6 +186,7 @@ struct intel_device_info { | |||
186 | 186 | ||
187 | struct intel_driver_caps { | 187 | struct intel_driver_caps { |
188 | unsigned int scheduler; | 188 | unsigned int scheduler; |
189 | bool has_logical_contexts:1; | ||
189 | }; | 190 | }; |
190 | 191 | ||
191 | static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) | 192 | static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 694a4703042f..8f3199b06d1f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5632,6 +5632,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5632 | struct intel_atomic_state *old_intel_state = | 5632 | struct intel_atomic_state *old_intel_state = |
5633 | to_intel_atomic_state(old_state); | 5633 | to_intel_atomic_state(old_state); |
5634 | bool psl_clkgate_wa; | 5634 | bool psl_clkgate_wa; |
5635 | u32 pipe_chicken; | ||
5635 | 5636 | ||
5636 | if (WARN_ON(intel_crtc->active)) | 5637 | if (WARN_ON(intel_crtc->active)) |
5637 | return; | 5638 | return; |
@@ -5691,6 +5692,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5691 | */ | 5692 | */ |
5692 | intel_color_load_luts(&pipe_config->base); | 5693 | intel_color_load_luts(&pipe_config->base); |
5693 | 5694 | ||
5695 | /* | ||
5696 | * Display WA #1153: enable hardware to bypass the alpha math | ||
5697 | * and rounding for per-pixel values 00 and 0xff | ||
5698 | */ | ||
5699 | if (INTEL_GEN(dev_priv) >= 11) { | ||
5700 | pipe_chicken = I915_READ(PIPE_CHICKEN(pipe)); | ||
5701 | if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN)) | ||
5702 | I915_WRITE_FW(PIPE_CHICKEN(pipe), | ||
5703 | pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN); | ||
5704 | } | ||
5705 | |||
5694 | intel_ddi_set_pipe_settings(pipe_config); | 5706 | intel_ddi_set_pipe_settings(pipe_config); |
5695 | if (!transcoder_is_dsi(cpu_transcoder)) | 5707 | if (!transcoder_is_dsi(cpu_transcoder)) |
5696 | intel_ddi_enable_transcoder_func(pipe_config); | 5708 | intel_ddi_enable_transcoder_func(pipe_config); |
@@ -9347,6 +9359,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, | |||
9347 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { | 9359 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
9348 | default: | 9360 | default: |
9349 | WARN(1, "unknown pipe linked to edp transcoder\n"); | 9361 | WARN(1, "unknown pipe linked to edp transcoder\n"); |
9362 | /* fall through */ | ||
9350 | case TRANS_DDI_EDP_INPUT_A_ONOFF: | 9363 | case TRANS_DDI_EDP_INPUT_A_ONOFF: |
9351 | case TRANS_DDI_EDP_INPUT_A_ON: | 9364 | case TRANS_DDI_EDP_INPUT_A_ON: |
9352 | trans_edp_pipe = PIPE_A; | 9365 | trans_edp_pipe = PIPE_A; |
@@ -9402,7 +9415,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, | |||
9402 | * registers/MIPI[BXT]. We can break out here early, since we | 9415 | * registers/MIPI[BXT]. We can break out here early, since we |
9403 | * need the same DSI PLL to be enabled for both DSI ports. | 9416 | * need the same DSI PLL to be enabled for both DSI ports. |
9404 | */ | 9417 | */ |
9405 | if (!intel_dsi_pll_is_enabled(dev_priv)) | 9418 | if (!bxt_dsi_pll_is_enabled(dev_priv)) |
9406 | break; | 9419 | break; |
9407 | 9420 | ||
9408 | /* XXX: this works for video mode only */ | 9421 | /* XXX: this works for video mode only */ |
@@ -10724,7 +10737,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) | |||
10724 | drm_connector_list_iter_begin(dev, &conn_iter); | 10737 | drm_connector_list_iter_begin(dev, &conn_iter); |
10725 | for_each_intel_connector_iter(connector, &conn_iter) { | 10738 | for_each_intel_connector_iter(connector, &conn_iter) { |
10726 | if (connector->base.state->crtc) | 10739 | if (connector->base.state->crtc) |
10727 | drm_connector_unreference(&connector->base); | 10740 | drm_connector_put(&connector->base); |
10728 | 10741 | ||
10729 | if (connector->base.encoder) { | 10742 | if (connector->base.encoder) { |
10730 | connector->base.state->best_encoder = | 10743 | connector->base.state->best_encoder = |
@@ -10732,7 +10745,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) | |||
10732 | connector->base.state->crtc = | 10745 | connector->base.state->crtc = |
10733 | connector->base.encoder->crtc; | 10746 | connector->base.encoder->crtc; |
10734 | 10747 | ||
10735 | drm_connector_reference(&connector->base); | 10748 | drm_connector_get(&connector->base); |
10736 | } else { | 10749 | } else { |
10737 | connector->base.state->best_encoder = NULL; | 10750 | connector->base.state->best_encoder = NULL; |
10738 | connector->base.state->crtc = NULL; | 10751 | connector->base.state->crtc = NULL; |
@@ -11011,6 +11024,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) | |||
11011 | case INTEL_OUTPUT_DDI: | 11024 | case INTEL_OUTPUT_DDI: |
11012 | if (WARN_ON(!HAS_DDI(to_i915(dev)))) | 11025 | if (WARN_ON(!HAS_DDI(to_i915(dev)))) |
11013 | break; | 11026 | break; |
11027 | /* else: fall through */ | ||
11014 | case INTEL_OUTPUT_DP: | 11028 | case INTEL_OUTPUT_DP: |
11015 | case INTEL_OUTPUT_HDMI: | 11029 | case INTEL_OUTPUT_HDMI: |
11016 | case INTEL_OUTPUT_EDP: | 11030 | case INTEL_OUTPUT_EDP: |
@@ -12542,6 +12556,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat | |||
12542 | finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset); | 12556 | finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset); |
12543 | } | 12557 | } |
12544 | 12558 | ||
12559 | static void intel_atomic_cleanup_work(struct work_struct *work) | ||
12560 | { | ||
12561 | struct drm_atomic_state *state = | ||
12562 | container_of(work, struct drm_atomic_state, commit_work); | ||
12563 | struct drm_i915_private *i915 = to_i915(state->dev); | ||
12564 | |||
12565 | drm_atomic_helper_cleanup_planes(&i915->drm, state); | ||
12566 | drm_atomic_helper_commit_cleanup_done(state); | ||
12567 | drm_atomic_state_put(state); | ||
12568 | |||
12569 | intel_atomic_helper_free_state(i915); | ||
12570 | } | ||
12571 | |||
12545 | static void intel_atomic_commit_tail(struct drm_atomic_state *state) | 12572 | static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
12546 | { | 12573 | { |
12547 | struct drm_device *dev = state->dev; | 12574 | struct drm_device *dev = state->dev; |
@@ -12702,13 +12729,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12702 | intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); | 12729 | intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); |
12703 | } | 12730 | } |
12704 | 12731 | ||
12705 | drm_atomic_helper_cleanup_planes(dev, state); | 12732 | /* |
12706 | 12733 | * Defer the cleanup of the old state to a separate worker to not | |
12707 | drm_atomic_helper_commit_cleanup_done(state); | 12734 | * impede the current task (userspace for blocking modesets) that |
12708 | 12735 | * are executed inline. For out-of-line asynchronous modesets/flips, | |
12709 | drm_atomic_state_put(state); | 12736 | * deferring to a new worker seems overkill, but we would place a |
12710 | 12737 | * schedule point (cond_resched()) here anyway to keep latencies | |
12711 | intel_atomic_helper_free_state(dev_priv); | 12738 | * down. |
12739 | */ | ||
12740 | INIT_WORK(&state->commit_work, intel_atomic_cleanup_work); | ||
12741 | schedule_work(&state->commit_work); | ||
12712 | } | 12742 | } |
12713 | 12743 | ||
12714 | static void intel_atomic_commit_work(struct work_struct *work) | 12744 | static void intel_atomic_commit_work(struct work_struct *work) |
@@ -14105,7 +14135,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) | |||
14105 | intel_ddi_init(dev_priv, PORT_B); | 14135 | intel_ddi_init(dev_priv, PORT_B); |
14106 | intel_ddi_init(dev_priv, PORT_C); | 14136 | intel_ddi_init(dev_priv, PORT_C); |
14107 | 14137 | ||
14108 | intel_dsi_init(dev_priv); | 14138 | vlv_dsi_init(dev_priv); |
14109 | } else if (HAS_DDI(dev_priv)) { | 14139 | } else if (HAS_DDI(dev_priv)) { |
14110 | int found; | 14140 | int found; |
14111 | 14141 | ||
@@ -14211,7 +14241,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) | |||
14211 | intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); | 14241 | intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); |
14212 | } | 14242 | } |
14213 | 14243 | ||
14214 | intel_dsi_init(dev_priv); | 14244 | vlv_dsi_init(dev_priv); |
14215 | } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { | 14245 | } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { |
14216 | bool found = false; | 14246 | bool found = false; |
14217 | 14247 | ||
@@ -14493,11 +14523,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14493 | } | 14523 | } |
14494 | break; | 14524 | break; |
14495 | case DRM_FORMAT_NV12: | 14525 | case DRM_FORMAT_NV12: |
14496 | if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS || | ||
14497 | mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) { | ||
14498 | DRM_DEBUG_KMS("RC not to be enabled with NV12\n"); | ||
14499 | goto err; | ||
14500 | } | ||
14501 | if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || | 14526 | if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || |
14502 | IS_BROXTON(dev_priv)) { | 14527 | IS_BROXTON(dev_priv)) { |
14503 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | 14528 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", |
@@ -15676,11 +15701,20 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) | |||
15676 | for_each_intel_encoder(&dev_priv->drm, encoder) { | 15701 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
15677 | u64 get_domains; | 15702 | u64 get_domains; |
15678 | enum intel_display_power_domain domain; | 15703 | enum intel_display_power_domain domain; |
15704 | struct intel_crtc_state *crtc_state; | ||
15679 | 15705 | ||
15680 | if (!encoder->get_power_domains) | 15706 | if (!encoder->get_power_domains) |
15681 | continue; | 15707 | continue; |
15682 | 15708 | ||
15683 | get_domains = encoder->get_power_domains(encoder); | 15709 | /* |
15710 | * MST-primary and inactive encoders don't have a crtc state | ||
15711 | * and neither of these require any power domain references. | ||
15712 | */ | ||
15713 | if (!encoder->base.crtc) | ||
15714 | continue; | ||
15715 | |||
15716 | crtc_state = to_intel_crtc_state(encoder->base.crtc->state); | ||
15717 | get_domains = encoder->get_power_domains(encoder, crtc_state); | ||
15684 | for_each_power_domain(domain, get_domains) | 15718 | for_each_power_domain(domain, get_domains) |
15685 | intel_display_power_get(dev_priv, domain); | 15719 | intel_display_power_get(dev_priv, domain); |
15686 | } | 15720 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index dd30cae5eb00..ca5a10f3400d 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h | |||
@@ -199,6 +199,10 @@ enum intel_display_power_domain { | |||
199 | POWER_DOMAIN_AUX_E, | 199 | POWER_DOMAIN_AUX_E, |
200 | POWER_DOMAIN_AUX_F, | 200 | POWER_DOMAIN_AUX_F, |
201 | POWER_DOMAIN_AUX_IO_A, | 201 | POWER_DOMAIN_AUX_IO_A, |
202 | POWER_DOMAIN_AUX_TBT1, | ||
203 | POWER_DOMAIN_AUX_TBT2, | ||
204 | POWER_DOMAIN_AUX_TBT3, | ||
205 | POWER_DOMAIN_AUX_TBT4, | ||
202 | POWER_DOMAIN_GMBUS, | 206 | POWER_DOMAIN_GMBUS, |
203 | POWER_DOMAIN_MODESET, | 207 | POWER_DOMAIN_MODESET, |
204 | POWER_DOMAIN_GT_IRQ, | 208 | POWER_DOMAIN_GT_IRQ, |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6ac6c8787dcf..5be07e1d816d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -953,7 +953,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp) | |||
953 | } | 953 | } |
954 | 954 | ||
955 | static uint32_t | 955 | static uint32_t |
956 | intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) | 956 | intel_dp_aux_wait_done(struct intel_dp *intel_dp) |
957 | { | 957 | { |
958 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); | 958 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); |
959 | i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); | 959 | i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); |
@@ -961,14 +961,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) | |||
961 | bool done; | 961 | bool done; |
962 | 962 | ||
963 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) | 963 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
964 | if (has_aux_irq) | 964 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, |
965 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, | 965 | msecs_to_jiffies_timeout(10)); |
966 | msecs_to_jiffies_timeout(10)); | ||
967 | else | ||
968 | done = wait_for(C, 10) == 0; | ||
969 | if (!done) | 966 | if (!done) |
970 | DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", | 967 | DRM_ERROR("dp aux hw did not signal timeout!\n"); |
971 | has_aux_irq); | ||
972 | #undef C | 968 | #undef C |
973 | 969 | ||
974 | return status; | 970 | return status; |
@@ -1033,7 +1029,6 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | |||
1033 | } | 1029 | } |
1034 | 1030 | ||
1035 | static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, | 1031 | static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, |
1036 | bool has_aux_irq, | ||
1037 | int send_bytes, | 1032 | int send_bytes, |
1038 | uint32_t aux_clock_divider) | 1033 | uint32_t aux_clock_divider) |
1039 | { | 1034 | { |
@@ -1054,7 +1049,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, | |||
1054 | 1049 | ||
1055 | return DP_AUX_CH_CTL_SEND_BUSY | | 1050 | return DP_AUX_CH_CTL_SEND_BUSY | |
1056 | DP_AUX_CH_CTL_DONE | | 1051 | DP_AUX_CH_CTL_DONE | |
1057 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | | 1052 | DP_AUX_CH_CTL_INTERRUPT | |
1058 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 1053 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
1059 | timeout | | 1054 | timeout | |
1060 | DP_AUX_CH_CTL_RECEIVE_ERROR | | 1055 | DP_AUX_CH_CTL_RECEIVE_ERROR | |
@@ -1064,13 +1059,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, | |||
1064 | } | 1059 | } |
1065 | 1060 | ||
1066 | static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, | 1061 | static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, |
1067 | bool has_aux_irq, | ||
1068 | int send_bytes, | 1062 | int send_bytes, |
1069 | uint32_t unused) | 1063 | uint32_t unused) |
1070 | { | 1064 | { |
1071 | return DP_AUX_CH_CTL_SEND_BUSY | | 1065 | return DP_AUX_CH_CTL_SEND_BUSY | |
1072 | DP_AUX_CH_CTL_DONE | | 1066 | DP_AUX_CH_CTL_DONE | |
1073 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | | 1067 | DP_AUX_CH_CTL_INTERRUPT | |
1074 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 1068 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
1075 | DP_AUX_CH_CTL_TIME_OUT_MAX | | 1069 | DP_AUX_CH_CTL_TIME_OUT_MAX | |
1076 | DP_AUX_CH_CTL_RECEIVE_ERROR | | 1070 | DP_AUX_CH_CTL_RECEIVE_ERROR | |
@@ -1093,7 +1087,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, | |||
1093 | int i, ret, recv_bytes; | 1087 | int i, ret, recv_bytes; |
1094 | uint32_t status; | 1088 | uint32_t status; |
1095 | int try, clock = 0; | 1089 | int try, clock = 0; |
1096 | bool has_aux_irq = HAS_AUX_IRQ(dev_priv); | ||
1097 | bool vdd; | 1090 | bool vdd; |
1098 | 1091 | ||
1099 | ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); | 1092 | ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); |
@@ -1148,7 +1141,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, | |||
1148 | 1141 | ||
1149 | while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { | 1142 | while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { |
1150 | u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, | 1143 | u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, |
1151 | has_aux_irq, | ||
1152 | send_bytes, | 1144 | send_bytes, |
1153 | aux_clock_divider); | 1145 | aux_clock_divider); |
1154 | 1146 | ||
@@ -1165,7 +1157,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, | |||
1165 | /* Send the command and wait for it to complete */ | 1157 | /* Send the command and wait for it to complete */ |
1166 | I915_WRITE(ch_ctl, send_ctl); | 1158 | I915_WRITE(ch_ctl, send_ctl); |
1167 | 1159 | ||
1168 | status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); | 1160 | status = intel_dp_aux_wait_done(intel_dp); |
1169 | 1161 | ||
1170 | /* Clear done status and any errors */ | 1162 | /* Clear done status and any errors */ |
1171 | I915_WRITE(ch_ctl, | 1163 | I915_WRITE(ch_ctl, |
@@ -4499,6 +4491,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) | |||
4499 | if (intel_dp_needs_link_retrain(intel_dp)) | 4491 | if (intel_dp_needs_link_retrain(intel_dp)) |
4500 | return false; | 4492 | return false; |
4501 | 4493 | ||
4494 | intel_psr_short_pulse(intel_dp); | ||
4495 | |||
4502 | if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { | 4496 | if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { |
4503 | DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); | 4497 | DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); |
4504 | /* Send a Hotplug Uevent to userspace to start modeset */ | 4498 | /* Send a Hotplug Uevent to userspace to start modeset */ |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 0f012fbe34eb..85ecf41eeabb 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -514,7 +514,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
514 | intel_connector->mst_port = NULL; | 514 | intel_connector->mst_port = NULL; |
515 | drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); | 515 | drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); |
516 | 516 | ||
517 | drm_connector_unreference(connector); | 517 | drm_connector_put(connector); |
518 | } | 518 | } |
519 | 519 | ||
520 | static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | 520 | static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 156f8e4cbe4c..b51ad2917dbe 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -2566,6 +2566,7 @@ int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, | |||
2566 | switch (index) { | 2566 | switch (index) { |
2567 | default: | 2567 | default: |
2568 | MISSING_CASE(index); | 2568 | MISSING_CASE(index); |
2569 | /* fall through */ | ||
2569 | case 0: | 2570 | case 0: |
2570 | link_clock = 540000; | 2571 | link_clock = 540000; |
2571 | break; | 2572 | break; |
@@ -2639,6 +2640,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, | |||
2639 | switch (div1) { | 2640 | switch (div1) { |
2640 | default: | 2641 | default: |
2641 | MISSING_CASE(div1); | 2642 | MISSING_CASE(div1); |
2643 | /* fall through */ | ||
2642 | case 2: | 2644 | case 2: |
2643 | hsdiv = 0; | 2645 | hsdiv = 0; |
2644 | break; | 2646 | break; |
@@ -2812,25 +2814,31 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, | |||
2812 | MG_PLL_SSC_FLLEN | | 2814 | MG_PLL_SSC_FLLEN | |
2813 | MG_PLL_SSC_STEPSIZE(ssc_stepsize); | 2815 | MG_PLL_SSC_STEPSIZE(ssc_stepsize); |
2814 | 2816 | ||
2815 | pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART; | 2817 | pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART | |
2816 | 2818 | MG_PLL_TDC_COLDST_IREFINT_EN | | |
2817 | if (refclk_khz != 38400) { | 2819 | MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | |
2818 | pll_state->mg_pll_tdc_coldst_bias |= | 2820 | MG_PLL_TDC_TDCOVCCORR_EN | |
2819 | MG_PLL_TDC_COLDST_IREFINT_EN | | 2821 | MG_PLL_TDC_TDCSEL(3); |
2820 | MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | | 2822 | |
2821 | MG_PLL_TDC_COLDST_COLDSTART | | 2823 | pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | |
2822 | MG_PLL_TDC_TDCOVCCORR_EN | | 2824 | MG_PLL_BIAS_INIT_DCOAMP(0x3F) | |
2823 | MG_PLL_TDC_TDCSEL(3); | 2825 | MG_PLL_BIAS_BIAS_BONUS(10) | |
2824 | 2826 | MG_PLL_BIAS_BIASCAL_EN | | |
2825 | pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | | 2827 | MG_PLL_BIAS_CTRIM(12) | |
2826 | MG_PLL_BIAS_INIT_DCOAMP(0x3F) | | 2828 | MG_PLL_BIAS_VREF_RDAC(4) | |
2827 | MG_PLL_BIAS_BIAS_BONUS(10) | | 2829 | MG_PLL_BIAS_IREFTRIM(iref_trim); |
2828 | MG_PLL_BIAS_BIASCAL_EN | | 2830 | |
2829 | MG_PLL_BIAS_CTRIM(12) | | 2831 | if (refclk_khz == 38400) { |
2830 | MG_PLL_BIAS_VREF_RDAC(4) | | 2832 | pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; |
2831 | MG_PLL_BIAS_IREFTRIM(iref_trim); | 2833 | pll_state->mg_pll_bias_mask = 0; |
2834 | } else { | ||
2835 | pll_state->mg_pll_tdc_coldst_bias_mask = -1U; | ||
2836 | pll_state->mg_pll_bias_mask = -1U; | ||
2832 | } | 2837 | } |
2833 | 2838 | ||
2839 | pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask; | ||
2840 | pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; | ||
2841 | |||
2834 | return true; | 2842 | return true; |
2835 | } | 2843 | } |
2836 | 2844 | ||
@@ -2897,6 +2905,7 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) | |||
2897 | switch (id) { | 2905 | switch (id) { |
2898 | default: | 2906 | default: |
2899 | MISSING_CASE(id); | 2907 | MISSING_CASE(id); |
2908 | /* fall through */ | ||
2900 | case DPLL_ID_ICL_DPLL0: | 2909 | case DPLL_ID_ICL_DPLL0: |
2901 | case DPLL_ID_ICL_DPLL1: | 2910 | case DPLL_ID_ICL_DPLL1: |
2902 | return CNL_DPLL_ENABLE(id); | 2911 | return CNL_DPLL_ENABLE(id); |
@@ -2939,18 +2948,41 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
2939 | case DPLL_ID_ICL_MGPLL4: | 2948 | case DPLL_ID_ICL_MGPLL4: |
2940 | port = icl_mg_pll_id_to_port(id); | 2949 | port = icl_mg_pll_id_to_port(id); |
2941 | hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); | 2950 | hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); |
2951 | hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; | ||
2952 | |||
2942 | hw_state->mg_clktop2_coreclkctl1 = | 2953 | hw_state->mg_clktop2_coreclkctl1 = |
2943 | I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); | 2954 | I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); |
2955 | hw_state->mg_clktop2_coreclkctl1 &= | ||
2956 | MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; | ||
2957 | |||
2944 | hw_state->mg_clktop2_hsclkctl = | 2958 | hw_state->mg_clktop2_hsclkctl = |
2945 | I915_READ(MG_CLKTOP2_HSCLKCTL(port)); | 2959 | I915_READ(MG_CLKTOP2_HSCLKCTL(port)); |
2960 | hw_state->mg_clktop2_hsclkctl &= | ||
2961 | MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | | ||
2962 | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | | ||
2963 | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | | ||
2964 | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; | ||
2965 | |||
2946 | hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); | 2966 | hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); |
2947 | hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port)); | 2967 | hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port)); |
2948 | hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port)); | 2968 | hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port)); |
2949 | hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port)); | 2969 | hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port)); |
2950 | hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port)); | 2970 | hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port)); |
2971 | |||
2951 | hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port)); | 2972 | hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port)); |
2952 | hw_state->mg_pll_tdc_coldst_bias = | 2973 | hw_state->mg_pll_tdc_coldst_bias = |
2953 | I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); | 2974 | I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); |
2975 | |||
2976 | if (dev_priv->cdclk.hw.ref == 38400) { | ||
2977 | hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; | ||
2978 | hw_state->mg_pll_bias_mask = 0; | ||
2979 | } else { | ||
2980 | hw_state->mg_pll_tdc_coldst_bias_mask = -1U; | ||
2981 | hw_state->mg_pll_bias_mask = -1U; | ||
2982 | } | ||
2983 | |||
2984 | hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; | ||
2985 | hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; | ||
2954 | break; | 2986 | break; |
2955 | default: | 2987 | default: |
2956 | MISSING_CASE(id); | 2988 | MISSING_CASE(id); |
@@ -2978,19 +3010,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, | |||
2978 | { | 3010 | { |
2979 | struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; | 3011 | struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; |
2980 | enum port port = icl_mg_pll_id_to_port(pll->info->id); | 3012 | enum port port = icl_mg_pll_id_to_port(pll->info->id); |
3013 | u32 val; | ||
3014 | |||
3015 | /* | ||
3016 | * Some of the following registers have reserved fields, so program | ||
3017 | * these with RMW based on a mask. The mask can be fixed or generated | ||
3018 | * during the calc/readout phase if the mask depends on some other HW | ||
3019 | * state like refclk, see icl_calc_mg_pll_state(). | ||
3020 | */ | ||
3021 | val = I915_READ(MG_REFCLKIN_CTL(port)); | ||
3022 | val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; | ||
3023 | val |= hw_state->mg_refclkin_ctl; | ||
3024 | I915_WRITE(MG_REFCLKIN_CTL(port), val); | ||
3025 | |||
3026 | val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); | ||
3027 | val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; | ||
3028 | val |= hw_state->mg_clktop2_coreclkctl1; | ||
3029 | I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val); | ||
3030 | |||
3031 | val = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); | ||
3032 | val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | | ||
3033 | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | | ||
3034 | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | | ||
3035 | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); | ||
3036 | val |= hw_state->mg_clktop2_hsclkctl; | ||
3037 | I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val); | ||
2981 | 3038 | ||
2982 | I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl); | ||
2983 | I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), | ||
2984 | hw_state->mg_clktop2_coreclkctl1); | ||
2985 | I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl); | ||
2986 | I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0); | 3039 | I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0); |
2987 | I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1); | 3040 | I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1); |
2988 | I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf); | 3041 | I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf); |
2989 | I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock); | 3042 | I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock); |
2990 | I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc); | 3043 | I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc); |
2991 | I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias); | 3044 | |
2992 | I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), | 3045 | val = I915_READ(MG_PLL_BIAS(port)); |
2993 | hw_state->mg_pll_tdc_coldst_bias); | 3046 | val &= ~hw_state->mg_pll_bias_mask; |
3047 | val |= hw_state->mg_pll_bias; | ||
3048 | I915_WRITE(MG_PLL_BIAS(port), val); | ||
3049 | |||
3050 | val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); | ||
3051 | val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; | ||
3052 | val |= hw_state->mg_pll_tdc_coldst_bias; | ||
3053 | I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val); | ||
3054 | |||
2994 | POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port)); | 3055 | POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port)); |
2995 | } | 3056 | } |
2996 | 3057 | ||
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index ba925c7ee482..7e522cf4f13f 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h | |||
@@ -180,6 +180,8 @@ struct intel_dpll_hw_state { | |||
180 | uint32_t mg_pll_ssc; | 180 | uint32_t mg_pll_ssc; |
181 | uint32_t mg_pll_bias; | 181 | uint32_t mg_pll_bias; |
182 | uint32_t mg_pll_tdc_coldst_bias; | 182 | uint32_t mg_pll_tdc_coldst_bias; |
183 | uint32_t mg_pll_bias_mask; | ||
184 | uint32_t mg_pll_tdc_coldst_bias_mask; | ||
183 | }; | 185 | }; |
184 | 186 | ||
185 | /** | 187 | /** |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0c3ac0eafde0..61e715ddd0d5 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -254,7 +254,8 @@ struct intel_encoder { | |||
254 | struct intel_crtc_state *pipe_config); | 254 | struct intel_crtc_state *pipe_config); |
255 | /* Returns a mask of power domains that need to be referenced as part | 255 | /* Returns a mask of power domains that need to be referenced as part |
256 | * of the hardware state readout code. */ | 256 | * of the hardware state readout code. */ |
257 | u64 (*get_power_domains)(struct intel_encoder *encoder); | 257 | u64 (*get_power_domains)(struct intel_encoder *encoder, |
258 | struct intel_crtc_state *crtc_state); | ||
258 | /* | 259 | /* |
259 | * Called during system suspend after all pending requests for the | 260 | * Called during system suspend after all pending requests for the |
260 | * encoder are flushed (for example for DP AUX transactions) and | 261 | * encoder are flushed (for example for DP AUX transactions) and |
@@ -1133,7 +1134,6 @@ struct intel_dp { | |||
1133 | * register with to kick off an AUX transaction. | 1134 | * register with to kick off an AUX transaction. |
1134 | */ | 1135 | */ |
1135 | uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, | 1136 | uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, |
1136 | bool has_aux_irq, | ||
1137 | int send_bytes, | 1137 | int send_bytes, |
1138 | uint32_t aux_clock_divider); | 1138 | uint32_t aux_clock_divider); |
1139 | 1139 | ||
@@ -1254,6 +1254,7 @@ enc_to_dig_port(struct drm_encoder *encoder) | |||
1254 | switch (intel_encoder->type) { | 1254 | switch (intel_encoder->type) { |
1255 | case INTEL_OUTPUT_DDI: | 1255 | case INTEL_OUTPUT_DDI: |
1256 | WARN_ON(!HAS_DDI(to_i915(encoder->dev))); | 1256 | WARN_ON(!HAS_DDI(to_i915(encoder->dev))); |
1257 | /* fall through */ | ||
1257 | case INTEL_OUTPUT_DP: | 1258 | case INTEL_OUTPUT_DP: |
1258 | case INTEL_OUTPUT_EDP: | 1259 | case INTEL_OUTPUT_EDP: |
1259 | case INTEL_OUTPUT_HDMI: | 1260 | case INTEL_OUTPUT_HDMI: |
@@ -1730,8 +1731,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); | |||
1730 | /* intel_dp_mst.c */ | 1731 | /* intel_dp_mst.c */ |
1731 | int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); | 1732 | int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); |
1732 | void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); | 1733 | void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); |
1733 | /* intel_dsi.c */ | 1734 | /* vlv_dsi.c */ |
1734 | void intel_dsi_init(struct drm_i915_private *dev_priv); | 1735 | void vlv_dsi_init(struct drm_i915_private *dev_priv); |
1735 | 1736 | ||
1736 | /* intel_dsi_dcs_backlight.c */ | 1737 | /* intel_dsi_dcs_backlight.c */ |
1737 | int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); | 1738 | int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); |
@@ -1921,6 +1922,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, | |||
1921 | struct intel_crtc_state *crtc_state); | 1922 | struct intel_crtc_state *crtc_state); |
1922 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); | 1923 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); |
1923 | void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); | 1924 | void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); |
1925 | void intel_psr_short_pulse(struct intel_dp *intel_dp); | ||
1926 | int intel_psr_wait_for_idle(struct drm_i915_private *dev_priv); | ||
1924 | 1927 | ||
1925 | /* intel_runtime_pm.c */ | 1928 | /* intel_runtime_pm.c */ |
1926 | int intel_power_domains_init(struct drm_i915_private *); | 1929 | int intel_power_domains_init(struct drm_i915_private *); |
@@ -2151,7 +2154,6 @@ void lspcon_resume(struct intel_lspcon *lspcon); | |||
2151 | void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); | 2154 | void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); |
2152 | 2155 | ||
2153 | /* intel_pipe_crc.c */ | 2156 | /* intel_pipe_crc.c */ |
2154 | int intel_pipe_crc_create(struct drm_minor *minor); | ||
2155 | #ifdef CONFIG_DEBUG_FS | 2157 | #ifdef CONFIG_DEBUG_FS |
2156 | int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, | 2158 | int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, |
2157 | size_t *values_cnt); | 2159 | size_t *values_cnt); |
@@ -2167,5 +2169,4 @@ static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc) | |||
2167 | { | 2169 | { |
2168 | } | 2170 | } |
2169 | #endif | 2171 | #endif |
2170 | extern const struct file_operations i915_display_crc_ctl_fops; | ||
2171 | #endif /* __INTEL_DRV_H__ */ | 2172 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 7afeb9580f41..ad7c1cb32983 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h | |||
@@ -129,21 +129,29 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) | |||
129 | return container_of(encoder, struct intel_dsi, base.base); | 129 | return container_of(encoder, struct intel_dsi, base.base); |
130 | } | 130 | } |
131 | 131 | ||
132 | /* intel_dsi.c */ | 132 | /* vlv_dsi.c */ |
133 | void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port); | 133 | void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); |
134 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); | 134 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); |
135 | 135 | ||
136 | /* intel_dsi_pll.c */ | 136 | /* vlv_dsi_pll.c */ |
137 | bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); | 137 | int vlv_dsi_pll_compute(struct intel_encoder *encoder, |
138 | int intel_compute_dsi_pll(struct intel_encoder *encoder, | 138 | struct intel_crtc_state *config); |
139 | struct intel_crtc_state *config); | 139 | void vlv_dsi_pll_enable(struct intel_encoder *encoder, |
140 | void intel_enable_dsi_pll(struct intel_encoder *encoder, | 140 | const struct intel_crtc_state *config); |
141 | const struct intel_crtc_state *config); | 141 | void vlv_dsi_pll_disable(struct intel_encoder *encoder); |
142 | void intel_disable_dsi_pll(struct intel_encoder *encoder); | 142 | u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, |
143 | u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | 143 | struct intel_crtc_state *config); |
144 | struct intel_crtc_state *config); | 144 | void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); |
145 | void intel_dsi_reset_clocks(struct intel_encoder *encoder, | 145 | |
146 | enum port port); | 146 | bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); |
147 | int bxt_dsi_pll_compute(struct intel_encoder *encoder, | ||
148 | struct intel_crtc_state *config); | ||
149 | void bxt_dsi_pll_enable(struct intel_encoder *encoder, | ||
150 | const struct intel_crtc_state *config); | ||
151 | void bxt_dsi_pll_disable(struct intel_encoder *encoder); | ||
152 | u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | ||
153 | struct intel_crtc_state *config); | ||
154 | void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); | ||
147 | 155 | ||
148 | /* intel_dsi_vbt.c */ | 156 | /* intel_dsi_vbt.c */ |
149 | bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); | 157 | bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 4d6ffa7b3e7b..ac83d6b89ae0 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c | |||
@@ -181,7 +181,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, | |||
181 | break; | 181 | break; |
182 | } | 182 | } |
183 | 183 | ||
184 | wait_for_dsi_fifo_empty(intel_dsi, port); | 184 | vlv_dsi_wait_for_fifo_empty(intel_dsi, port); |
185 | 185 | ||
186 | out: | 186 | out: |
187 | data += len; | 187 | data += len; |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 32bf3a408d46..0ac497275a51 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <drm/drm_print.h> | 25 | #include <drm/drm_print.h> |
26 | 26 | ||
27 | #include "i915_drv.h" | 27 | #include "i915_drv.h" |
28 | #include "i915_vgpu.h" | ||
29 | #include "intel_ringbuffer.h" | 28 | #include "intel_ringbuffer.h" |
30 | #include "intel_lrc.h" | 29 | #include "intel_lrc.h" |
31 | 30 | ||
@@ -230,6 +229,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) | |||
230 | break; | 229 | break; |
231 | default: | 230 | default: |
232 | MISSING_CASE(class); | 231 | MISSING_CASE(class); |
232 | /* fall through */ | ||
233 | case VIDEO_DECODE_CLASS: | 233 | case VIDEO_DECODE_CLASS: |
234 | case VIDEO_ENHANCEMENT_CLASS: | 234 | case VIDEO_ENHANCEMENT_CLASS: |
235 | case COPY_ENGINE_CLASS: | 235 | case COPY_ENGINE_CLASS: |
@@ -302,6 +302,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, | |||
302 | engine->class); | 302 | engine->class); |
303 | if (WARN_ON(engine->context_size > BIT(20))) | 303 | if (WARN_ON(engine->context_size > BIT(20))) |
304 | engine->context_size = 0; | 304 | engine->context_size = 0; |
305 | if (engine->context_size) | ||
306 | DRIVER_CAPS(dev_priv)->has_logical_contexts = true; | ||
305 | 307 | ||
306 | /* Nothing to do here, execute in order of dependencies */ | 308 | /* Nothing to do here, execute in order of dependencies */ |
307 | engine->schedule = NULL; | 309 | engine->schedule = NULL; |
@@ -456,21 +458,10 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) | |||
456 | i915_gem_batch_pool_init(&engine->batch_pool, engine); | 458 | i915_gem_batch_pool_init(&engine->batch_pool, engine); |
457 | } | 459 | } |
458 | 460 | ||
459 | static bool csb_force_mmio(struct drm_i915_private *i915) | ||
460 | { | ||
461 | /* Older GVT emulation depends upon intercepting CSB mmio */ | ||
462 | if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915)) | ||
463 | return true; | ||
464 | |||
465 | return false; | ||
466 | } | ||
467 | |||
468 | static void intel_engine_init_execlist(struct intel_engine_cs *engine) | 461 | static void intel_engine_init_execlist(struct intel_engine_cs *engine) |
469 | { | 462 | { |
470 | struct intel_engine_execlists * const execlists = &engine->execlists; | 463 | struct intel_engine_execlists * const execlists = &engine->execlists; |
471 | 464 | ||
472 | execlists->csb_use_mmio = csb_force_mmio(engine->i915); | ||
473 | |||
474 | execlists->port_mask = 1; | 465 | execlists->port_mask = 1; |
475 | BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); | 466 | BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); |
476 | GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); | 467 | GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); |
@@ -492,6 +483,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) | |||
492 | void intel_engine_setup_common(struct intel_engine_cs *engine) | 483 | void intel_engine_setup_common(struct intel_engine_cs *engine) |
493 | { | 484 | { |
494 | i915_timeline_init(engine->i915, &engine->timeline, engine->name); | 485 | i915_timeline_init(engine->i915, &engine->timeline, engine->name); |
486 | lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); | ||
495 | 487 | ||
496 | intel_engine_init_execlist(engine); | 488 | intel_engine_init_execlist(engine); |
497 | intel_engine_init_hangcheck(engine); | 489 | intel_engine_init_hangcheck(engine); |
@@ -1000,10 +992,12 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) | |||
1000 | if (READ_ONCE(engine->execlists.active)) { | 992 | if (READ_ONCE(engine->execlists.active)) { |
1001 | struct intel_engine_execlists *execlists = &engine->execlists; | 993 | struct intel_engine_execlists *execlists = &engine->execlists; |
1002 | 994 | ||
995 | local_bh_disable(); | ||
1003 | if (tasklet_trylock(&execlists->tasklet)) { | 996 | if (tasklet_trylock(&execlists->tasklet)) { |
1004 | execlists->tasklet.func(execlists->tasklet.data); | 997 | execlists->tasklet.func(execlists->tasklet.data); |
1005 | tasklet_unlock(&execlists->tasklet); | 998 | tasklet_unlock(&execlists->tasklet); |
1006 | } | 999 | } |
1000 | local_bh_enable(); | ||
1007 | 1001 | ||
1008 | if (READ_ONCE(execlists->active)) | 1002 | if (READ_ONCE(execlists->active)) |
1009 | return false; | 1003 | return false; |
@@ -1363,12 +1357,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, | |||
1363 | ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); | 1357 | ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); |
1364 | read = GEN8_CSB_READ_PTR(ptr); | 1358 | read = GEN8_CSB_READ_PTR(ptr); |
1365 | write = GEN8_CSB_WRITE_PTR(ptr); | 1359 | write = GEN8_CSB_WRITE_PTR(ptr); |
1366 | drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n", | 1360 | drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n", |
1367 | read, execlists->csb_head, | 1361 | read, execlists->csb_head, |
1368 | write, | 1362 | write, |
1369 | intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), | 1363 | intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), |
1370 | yesno(test_bit(ENGINE_IRQ_EXECLIST, | ||
1371 | &engine->irq_posted)), | ||
1372 | yesno(test_bit(TASKLET_STATE_SCHED, | 1364 | yesno(test_bit(TASKLET_STATE_SCHED, |
1373 | &engine->execlists.tasklet.state)), | 1365 | &engine->execlists.tasklet.state)), |
1374 | enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); | 1366 | enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); |
@@ -1580,11 +1572,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, | |||
1580 | spin_unlock(&b->rb_lock); | 1572 | spin_unlock(&b->rb_lock); |
1581 | local_irq_restore(flags); | 1573 | local_irq_restore(flags); |
1582 | 1574 | ||
1583 | drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", | 1575 | drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n", |
1584 | engine->irq_posted, | 1576 | engine->irq_posted, |
1585 | yesno(test_bit(ENGINE_IRQ_BREADCRUMB, | 1577 | yesno(test_bit(ENGINE_IRQ_BREADCRUMB, |
1586 | &engine->irq_posted)), | ||
1587 | yesno(test_bit(ENGINE_IRQ_EXECLIST, | ||
1588 | &engine->irq_posted))); | 1578 | &engine->irq_posted))); |
1589 | 1579 | ||
1590 | drm_printf(m, "HWSP:\n"); | 1580 | drm_printf(m, "HWSP:\n"); |
@@ -1633,8 +1623,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) | |||
1633 | if (!intel_engine_supports_stats(engine)) | 1623 | if (!intel_engine_supports_stats(engine)) |
1634 | return -ENODEV; | 1624 | return -ENODEV; |
1635 | 1625 | ||
1636 | tasklet_disable(&execlists->tasklet); | 1626 | spin_lock_irqsave(&engine->timeline.lock, flags); |
1637 | write_seqlock_irqsave(&engine->stats.lock, flags); | 1627 | write_seqlock(&engine->stats.lock); |
1638 | 1628 | ||
1639 | if (unlikely(engine->stats.enabled == ~0)) { | 1629 | if (unlikely(engine->stats.enabled == ~0)) { |
1640 | err = -EBUSY; | 1630 | err = -EBUSY; |
@@ -1658,8 +1648,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) | |||
1658 | } | 1648 | } |
1659 | 1649 | ||
1660 | unlock: | 1650 | unlock: |
1661 | write_sequnlock_irqrestore(&engine->stats.lock, flags); | 1651 | write_sequnlock(&engine->stats.lock); |
1662 | tasklet_enable(&execlists->tasklet); | 1652 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
1663 | 1653 | ||
1664 | return err; | 1654 | return err; |
1665 | } | 1655 | } |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index b431b6733cc1..01d1d2088f04 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) | |||
399 | return dev_priv->fbc.active; | 399 | return dev_priv->fbc.active; |
400 | } | 400 | } |
401 | 401 | ||
402 | static void intel_fbc_work_fn(struct work_struct *__work) | ||
403 | { | ||
404 | struct drm_i915_private *dev_priv = | ||
405 | container_of(__work, struct drm_i915_private, fbc.work.work); | ||
406 | struct intel_fbc *fbc = &dev_priv->fbc; | ||
407 | struct intel_fbc_work *work = &fbc->work; | ||
408 | struct intel_crtc *crtc = fbc->crtc; | ||
409 | struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; | ||
410 | |||
411 | if (drm_crtc_vblank_get(&crtc->base)) { | ||
412 | /* CRTC is now off, leave FBC deactivated */ | ||
413 | mutex_lock(&fbc->lock); | ||
414 | work->scheduled = false; | ||
415 | mutex_unlock(&fbc->lock); | ||
416 | return; | ||
417 | } | ||
418 | |||
419 | retry: | ||
420 | /* Delay the actual enabling to let pageflipping cease and the | ||
421 | * display to settle before starting the compression. Note that | ||
422 | * this delay also serves a second purpose: it allows for a | ||
423 | * vblank to pass after disabling the FBC before we attempt | ||
424 | * to modify the control registers. | ||
425 | * | ||
426 | * WaFbcWaitForVBlankBeforeEnable:ilk,snb | ||
427 | * | ||
428 | * It is also worth mentioning that since work->scheduled_vblank can be | ||
429 | * updated multiple times by the other threads, hitting the timeout is | ||
430 | * not an error condition. We'll just end up hitting the "goto retry" | ||
431 | * case below. | ||
432 | */ | ||
433 | wait_event_timeout(vblank->queue, | ||
434 | drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, | ||
435 | msecs_to_jiffies(50)); | ||
436 | |||
437 | mutex_lock(&fbc->lock); | ||
438 | |||
439 | /* Were we cancelled? */ | ||
440 | if (!work->scheduled) | ||
441 | goto out; | ||
442 | |||
443 | /* Were we delayed again while this function was sleeping? */ | ||
444 | if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { | ||
445 | mutex_unlock(&fbc->lock); | ||
446 | goto retry; | ||
447 | } | ||
448 | |||
449 | intel_fbc_hw_activate(dev_priv); | ||
450 | |||
451 | work->scheduled = false; | ||
452 | |||
453 | out: | ||
454 | mutex_unlock(&fbc->lock); | ||
455 | drm_crtc_vblank_put(&crtc->base); | ||
456 | } | ||
457 | |||
458 | static void intel_fbc_schedule_activation(struct intel_crtc *crtc) | ||
459 | { | ||
460 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
461 | struct intel_fbc *fbc = &dev_priv->fbc; | ||
462 | struct intel_fbc_work *work = &fbc->work; | ||
463 | |||
464 | WARN_ON(!mutex_is_locked(&fbc->lock)); | ||
465 | if (WARN_ON(!fbc->enabled)) | ||
466 | return; | ||
467 | |||
468 | if (drm_crtc_vblank_get(&crtc->base)) { | ||
469 | DRM_ERROR("vblank not available for FBC on pipe %c\n", | ||
470 | pipe_name(crtc->pipe)); | ||
471 | return; | ||
472 | } | ||
473 | |||
474 | /* It is useless to call intel_fbc_cancel_work() or cancel_work() in | ||
475 | * this function since we're not releasing fbc.lock, so it won't have an | ||
476 | * opportunity to grab it to discover that it was cancelled. So we just | ||
477 | * update the expected jiffy count. */ | ||
478 | work->scheduled = true; | ||
479 | work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); | ||
480 | drm_crtc_vblank_put(&crtc->base); | ||
481 | |||
482 | schedule_work(&work->work); | ||
483 | } | ||
484 | |||
485 | static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, | 402 | static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, |
486 | const char *reason) | 403 | const char *reason) |
487 | { | 404 | { |
@@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, | |||
489 | 406 | ||
490 | WARN_ON(!mutex_is_locked(&fbc->lock)); | 407 | WARN_ON(!mutex_is_locked(&fbc->lock)); |
491 | 408 | ||
492 | /* Calling cancel_work() here won't help due to the fact that the work | ||
493 | * function grabs fbc->lock. Just set scheduled to false so the work | ||
494 | * function can know it was cancelled. */ | ||
495 | fbc->work.scheduled = false; | ||
496 | |||
497 | if (fbc->active) | 409 | if (fbc->active) |
498 | intel_fbc_hw_deactivate(dev_priv); | 410 | intel_fbc_hw_deactivate(dev_priv); |
499 | 411 | ||
@@ -924,13 +836,6 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, | |||
924 | 32 * fbc->threshold) * 8; | 836 | 32 * fbc->threshold) * 8; |
925 | } | 837 | } |
926 | 838 | ||
927 | static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, | ||
928 | struct intel_fbc_reg_params *params2) | ||
929 | { | ||
930 | /* We can use this since intel_fbc_get_reg_params() does a memset. */ | ||
931 | return memcmp(params1, params2, sizeof(*params1)) == 0; | ||
932 | } | ||
933 | |||
934 | void intel_fbc_pre_update(struct intel_crtc *crtc, | 839 | void intel_fbc_pre_update(struct intel_crtc *crtc, |
935 | struct intel_crtc_state *crtc_state, | 840 | struct intel_crtc_state *crtc_state, |
936 | struct intel_plane_state *plane_state) | 841 | struct intel_plane_state *plane_state) |
@@ -953,6 +858,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc, | |||
953 | goto unlock; | 858 | goto unlock; |
954 | 859 | ||
955 | intel_fbc_update_state_cache(crtc, crtc_state, plane_state); | 860 | intel_fbc_update_state_cache(crtc, crtc_state, plane_state); |
861 | fbc->flip_pending = true; | ||
956 | 862 | ||
957 | deactivate: | 863 | deactivate: |
958 | intel_fbc_deactivate(dev_priv, reason); | 864 | intel_fbc_deactivate(dev_priv, reason); |
@@ -988,13 +894,15 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) | |||
988 | { | 894 | { |
989 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 895 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
990 | struct intel_fbc *fbc = &dev_priv->fbc; | 896 | struct intel_fbc *fbc = &dev_priv->fbc; |
991 | struct intel_fbc_reg_params old_params; | ||
992 | 897 | ||
993 | WARN_ON(!mutex_is_locked(&fbc->lock)); | 898 | WARN_ON(!mutex_is_locked(&fbc->lock)); |
994 | 899 | ||
995 | if (!fbc->enabled || fbc->crtc != crtc) | 900 | if (!fbc->enabled || fbc->crtc != crtc) |
996 | return; | 901 | return; |
997 | 902 | ||
903 | fbc->flip_pending = false; | ||
904 | WARN_ON(fbc->active); | ||
905 | |||
998 | if (!i915_modparams.enable_fbc) { | 906 | if (!i915_modparams.enable_fbc) { |
999 | intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); | 907 | intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); |
1000 | __intel_fbc_disable(dev_priv); | 908 | __intel_fbc_disable(dev_priv); |
@@ -1002,25 +910,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) | |||
1002 | return; | 910 | return; |
1003 | } | 911 | } |
1004 | 912 | ||
1005 | if (!intel_fbc_can_activate(crtc)) { | ||
1006 | WARN_ON(fbc->active); | ||
1007 | return; | ||
1008 | } | ||
1009 | |||
1010 | old_params = fbc->params; | ||
1011 | intel_fbc_get_reg_params(crtc, &fbc->params); | 913 | intel_fbc_get_reg_params(crtc, &fbc->params); |
1012 | 914 | ||
1013 | /* If the scanout has not changed, don't modify the FBC settings. | 915 | if (!intel_fbc_can_activate(crtc)) |
1014 | * Note that we make the fundamental assumption that the fb->obj | ||
1015 | * cannot be unpinned (and have its GTT offset and fence revoked) | ||
1016 | * without first being decoupled from the scanout and FBC disabled. | ||
1017 | */ | ||
1018 | if (fbc->active && | ||
1019 | intel_fbc_reg_params_equal(&old_params, &fbc->params)) | ||
1020 | return; | 916 | return; |
1021 | 917 | ||
1022 | intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)"); | 918 | if (!fbc->busy_bits) { |
1023 | intel_fbc_schedule_activation(crtc); | 919 | intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)"); |
920 | intel_fbc_hw_activate(dev_priv); | ||
921 | } else | ||
922 | intel_fbc_deactivate(dev_priv, "frontbuffer write"); | ||
1024 | } | 923 | } |
1025 | 924 | ||
1026 | void intel_fbc_post_update(struct intel_crtc *crtc) | 925 | void intel_fbc_post_update(struct intel_crtc *crtc) |
@@ -1085,7 +984,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, | |||
1085 | (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { | 984 | (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { |
1086 | if (fbc->active) | 985 | if (fbc->active) |
1087 | intel_fbc_recompress(dev_priv); | 986 | intel_fbc_recompress(dev_priv); |
1088 | else | 987 | else if (!fbc->flip_pending) |
1089 | __intel_fbc_post_update(fbc->crtc); | 988 | __intel_fbc_post_update(fbc->crtc); |
1090 | } | 989 | } |
1091 | 990 | ||
@@ -1225,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc) | |||
1225 | if (fbc->crtc == crtc) | 1124 | if (fbc->crtc == crtc) |
1226 | __intel_fbc_disable(dev_priv); | 1125 | __intel_fbc_disable(dev_priv); |
1227 | mutex_unlock(&fbc->lock); | 1126 | mutex_unlock(&fbc->lock); |
1228 | |||
1229 | cancel_work_sync(&fbc->work.work); | ||
1230 | } | 1127 | } |
1231 | 1128 | ||
1232 | /** | 1129 | /** |
@@ -1248,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv) | |||
1248 | __intel_fbc_disable(dev_priv); | 1145 | __intel_fbc_disable(dev_priv); |
1249 | } | 1146 | } |
1250 | mutex_unlock(&fbc->lock); | 1147 | mutex_unlock(&fbc->lock); |
1251 | |||
1252 | cancel_work_sync(&fbc->work.work); | ||
1253 | } | 1148 | } |
1254 | 1149 | ||
1255 | static void intel_fbc_underrun_work_fn(struct work_struct *work) | 1150 | static void intel_fbc_underrun_work_fn(struct work_struct *work) |
@@ -1400,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) | |||
1400 | { | 1295 | { |
1401 | struct intel_fbc *fbc = &dev_priv->fbc; | 1296 | struct intel_fbc *fbc = &dev_priv->fbc; |
1402 | 1297 | ||
1403 | INIT_WORK(&fbc->work.work, intel_fbc_work_fn); | ||
1404 | INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); | 1298 | INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); |
1405 | mutex_init(&fbc->lock); | 1299 | mutex_init(&fbc->lock); |
1406 | fbc->enabled = false; | 1300 | fbc->enabled = false; |
1407 | fbc->active = false; | 1301 | fbc->active = false; |
1408 | fbc->work.scheduled = false; | ||
1409 | 1302 | ||
1410 | if (need_fbc_vtd_wa(dev_priv)) | 1303 | if (need_fbc_vtd_wa(dev_priv)) |
1411 | mkwrite_device_info(dev_priv)->has_fbc = false; | 1304 | mkwrite_device_info(dev_priv)->has_fbc = false; |
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 1aff30b0870c..e12bd259df17 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include "intel_guc_submission.h" | 27 | #include "intel_guc_submission.h" |
28 | #include "i915_drv.h" | 28 | #include "i915_drv.h" |
29 | 29 | ||
30 | static void guc_init_ggtt_pin_bias(struct intel_guc *guc); | ||
31 | |||
30 | static void gen8_guc_raise_irq(struct intel_guc *guc) | 32 | static void gen8_guc_raise_irq(struct intel_guc *guc) |
31 | { | 33 | { |
32 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 34 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
@@ -73,7 +75,7 @@ void intel_guc_init_early(struct intel_guc *guc) | |||
73 | guc->notify = gen8_guc_raise_irq; | 75 | guc->notify = gen8_guc_raise_irq; |
74 | } | 76 | } |
75 | 77 | ||
76 | int intel_guc_init_wq(struct intel_guc *guc) | 78 | static int guc_init_wq(struct intel_guc *guc) |
77 | { | 79 | { |
78 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 80 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
79 | 81 | ||
@@ -124,7 +126,7 @@ int intel_guc_init_wq(struct intel_guc *guc) | |||
124 | return 0; | 126 | return 0; |
125 | } | 127 | } |
126 | 128 | ||
127 | void intel_guc_fini_wq(struct intel_guc *guc) | 129 | static void guc_fini_wq(struct intel_guc *guc) |
128 | { | 130 | { |
129 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 131 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
130 | 132 | ||
@@ -135,6 +137,28 @@ void intel_guc_fini_wq(struct intel_guc *guc) | |||
135 | destroy_workqueue(guc->log.relay.flush_wq); | 137 | destroy_workqueue(guc->log.relay.flush_wq); |
136 | } | 138 | } |
137 | 139 | ||
140 | int intel_guc_init_misc(struct intel_guc *guc) | ||
141 | { | ||
142 | struct drm_i915_private *i915 = guc_to_i915(guc); | ||
143 | int ret; | ||
144 | |||
145 | guc_init_ggtt_pin_bias(guc); | ||
146 | |||
147 | ret = guc_init_wq(guc); | ||
148 | if (ret) | ||
149 | return ret; | ||
150 | |||
151 | intel_uc_fw_fetch(i915, &guc->fw); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | void intel_guc_fini_misc(struct intel_guc *guc) | ||
157 | { | ||
158 | intel_uc_fw_fini(&guc->fw); | ||
159 | guc_fini_wq(guc); | ||
160 | } | ||
161 | |||
138 | static int guc_shared_data_create(struct intel_guc *guc) | 162 | static int guc_shared_data_create(struct intel_guc *guc) |
139 | { | 163 | { |
140 | struct i915_vma *vma; | 164 | struct i915_vma *vma; |
@@ -169,7 +193,7 @@ int intel_guc_init(struct intel_guc *guc) | |||
169 | 193 | ||
170 | ret = guc_shared_data_create(guc); | 194 | ret = guc_shared_data_create(guc); |
171 | if (ret) | 195 | if (ret) |
172 | return ret; | 196 | goto err_fetch; |
173 | GEM_BUG_ON(!guc->shared_data); | 197 | GEM_BUG_ON(!guc->shared_data); |
174 | 198 | ||
175 | ret = intel_guc_log_create(&guc->log); | 199 | ret = intel_guc_log_create(&guc->log); |
@@ -190,6 +214,8 @@ err_log: | |||
190 | intel_guc_log_destroy(&guc->log); | 214 | intel_guc_log_destroy(&guc->log); |
191 | err_shared: | 215 | err_shared: |
192 | guc_shared_data_destroy(guc); | 216 | guc_shared_data_destroy(guc); |
217 | err_fetch: | ||
218 | intel_uc_fw_fini(&guc->fw); | ||
193 | return ret; | 219 | return ret; |
194 | } | 220 | } |
195 | 221 | ||
@@ -201,12 +227,17 @@ void intel_guc_fini(struct intel_guc *guc) | |||
201 | intel_guc_ads_destroy(guc); | 227 | intel_guc_ads_destroy(guc); |
202 | intel_guc_log_destroy(&guc->log); | 228 | intel_guc_log_destroy(&guc->log); |
203 | guc_shared_data_destroy(guc); | 229 | guc_shared_data_destroy(guc); |
230 | intel_uc_fw_fini(&guc->fw); | ||
204 | } | 231 | } |
205 | 232 | ||
206 | static u32 guc_ctl_debug_flags(struct intel_guc *guc) | 233 | static u32 guc_ctl_debug_flags(struct intel_guc *guc) |
207 | { | 234 | { |
208 | u32 level = intel_guc_log_get_level(&guc->log); | 235 | u32 level = intel_guc_log_get_level(&guc->log); |
209 | u32 flags = 0; | 236 | u32 flags; |
237 | u32 ads; | ||
238 | |||
239 | ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; | ||
240 | flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED; | ||
210 | 241 | ||
211 | if (!GUC_LOG_LEVEL_IS_ENABLED(level)) | 242 | if (!GUC_LOG_LEVEL_IS_ENABLED(level)) |
212 | flags |= GUC_LOG_DEFAULT_DISABLED; | 243 | flags |= GUC_LOG_DEFAULT_DISABLED; |
@@ -217,13 +248,6 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc) | |||
217 | flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << | 248 | flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << |
218 | GUC_LOG_VERBOSITY_SHIFT; | 249 | GUC_LOG_VERBOSITY_SHIFT; |
219 | 250 | ||
220 | if (USES_GUC_SUBMISSION(guc_to_i915(guc))) { | ||
221 | u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) | ||
222 | >> PAGE_SHIFT; | ||
223 | |||
224 | flags |= ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED; | ||
225 | } | ||
226 | |||
227 | return flags; | 251 | return flags; |
228 | } | 252 | } |
229 | 253 | ||
@@ -327,6 +351,9 @@ void intel_guc_init_params(struct intel_guc *guc) | |||
327 | params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); | 351 | params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); |
328 | params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); | 352 | params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); |
329 | 353 | ||
354 | for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) | ||
355 | DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); | ||
356 | |||
330 | /* | 357 | /* |
331 | * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and | 358 | * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and |
332 | * they are power context saved so it's ok to release forcewake | 359 | * they are power context saved so it's ok to release forcewake |
@@ -585,13 +612,13 @@ int intel_guc_resume(struct intel_guc *guc) | |||
585 | */ | 612 | */ |
586 | 613 | ||
587 | /** | 614 | /** |
588 | * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. | 615 | * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. |
589 | * @guc: intel_guc structure. | 616 | * @guc: intel_guc structure. |
590 | * | 617 | * |
591 | * This function will calculate and initialize the ggtt_pin_bias value based on | 618 | * This function will calculate and initialize the ggtt_pin_bias value based on |
592 | * overall WOPCM size and GuC WOPCM size. | 619 | * overall WOPCM size and GuC WOPCM size. |
593 | */ | 620 | */ |
594 | void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc) | 621 | static void guc_init_ggtt_pin_bias(struct intel_guc *guc) |
595 | { | 622 | { |
596 | struct drm_i915_private *i915 = guc_to_i915(guc); | 623 | struct drm_i915_private *i915 = guc_to_i915(guc); |
597 | 624 | ||
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index f1265e122d30..4121928a495e 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h | |||
@@ -151,11 +151,10 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, | |||
151 | void intel_guc_init_early(struct intel_guc *guc); | 151 | void intel_guc_init_early(struct intel_guc *guc); |
152 | void intel_guc_init_send_regs(struct intel_guc *guc); | 152 | void intel_guc_init_send_regs(struct intel_guc *guc); |
153 | void intel_guc_init_params(struct intel_guc *guc); | 153 | void intel_guc_init_params(struct intel_guc *guc); |
154 | void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc); | 154 | int intel_guc_init_misc(struct intel_guc *guc); |
155 | int intel_guc_init_wq(struct intel_guc *guc); | ||
156 | void intel_guc_fini_wq(struct intel_guc *guc); | ||
157 | int intel_guc_init(struct intel_guc *guc); | 155 | int intel_guc_init(struct intel_guc *guc); |
158 | void intel_guc_fini(struct intel_guc *guc); | 156 | void intel_guc_fini(struct intel_guc *guc); |
157 | void intel_guc_fini_misc(struct intel_guc *guc); | ||
159 | int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, | 158 | int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, |
160 | u32 *response_buf, u32 response_buf_size); | 159 | u32 *response_buf, u32 response_buf_size); |
161 | int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, | 160 | int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, |
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 291285277403..ffcad5fad6a7 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c | |||
@@ -32,6 +32,14 @@ void intel_huc_init_early(struct intel_huc *huc) | |||
32 | intel_huc_fw_init_early(huc); | 32 | intel_huc_fw_init_early(huc); |
33 | } | 33 | } |
34 | 34 | ||
35 | int intel_huc_init_misc(struct intel_huc *huc) | ||
36 | { | ||
37 | struct drm_i915_private *i915 = huc_to_i915(huc); | ||
38 | |||
39 | intel_uc_fw_fetch(i915, &huc->fw); | ||
40 | return 0; | ||
41 | } | ||
42 | |||
35 | /** | 43 | /** |
36 | * intel_huc_auth() - Authenticate HuC uCode | 44 | * intel_huc_auth() - Authenticate HuC uCode |
37 | * @huc: intel_huc structure | 45 | * @huc: intel_huc structure |
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h index aa854907abac..7e41d870b509 100644 --- a/drivers/gpu/drm/i915/intel_huc.h +++ b/drivers/gpu/drm/i915/intel_huc.h | |||
@@ -36,9 +36,15 @@ struct intel_huc { | |||
36 | }; | 36 | }; |
37 | 37 | ||
38 | void intel_huc_init_early(struct intel_huc *huc); | 38 | void intel_huc_init_early(struct intel_huc *huc); |
39 | int intel_huc_init_misc(struct intel_huc *huc); | ||
39 | int intel_huc_auth(struct intel_huc *huc); | 40 | int intel_huc_auth(struct intel_huc *huc); |
40 | int intel_huc_check_status(struct intel_huc *huc); | 41 | int intel_huc_check_status(struct intel_huc *huc); |
41 | 42 | ||
43 | static inline void intel_huc_fini_misc(struct intel_huc *huc) | ||
44 | { | ||
45 | intel_uc_fw_fini(&huc->fw); | ||
46 | } | ||
47 | |||
42 | static inline int intel_huc_sanitize(struct intel_huc *huc) | 48 | static inline int intel_huc_sanitize(struct intel_huc *huc) |
43 | { | 49 | { |
44 | intel_uc_fw_sanitize(&huc->fw); | 50 | intel_uc_fw_sanitize(&huc->fw); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 33bc914c2ef5..ab89dabc2965 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -137,6 +137,7 @@ | |||
137 | #include <drm/i915_drm.h> | 137 | #include <drm/i915_drm.h> |
138 | #include "i915_drv.h" | 138 | #include "i915_drv.h" |
139 | #include "i915_gem_render_state.h" | 139 | #include "i915_gem_render_state.h" |
140 | #include "i915_vgpu.h" | ||
140 | #include "intel_lrc_reg.h" | 141 | #include "intel_lrc_reg.h" |
141 | #include "intel_mocs.h" | 142 | #include "intel_mocs.h" |
142 | #include "intel_workarounds.h" | 143 | #include "intel_workarounds.h" |
@@ -562,12 +563,14 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists) | |||
562 | GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); | 563 | GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); |
563 | 564 | ||
564 | execlists_cancel_port_requests(execlists); | 565 | execlists_cancel_port_requests(execlists); |
565 | execlists_unwind_incomplete_requests(execlists); | 566 | __unwind_incomplete_requests(container_of(execlists, |
567 | struct intel_engine_cs, | ||
568 | execlists)); | ||
566 | 569 | ||
567 | execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT); | 570 | execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT); |
568 | } | 571 | } |
569 | 572 | ||
570 | static bool __execlists_dequeue(struct intel_engine_cs *engine) | 573 | static void execlists_dequeue(struct intel_engine_cs *engine) |
571 | { | 574 | { |
572 | struct intel_engine_execlists * const execlists = &engine->execlists; | 575 | struct intel_engine_execlists * const execlists = &engine->execlists; |
573 | struct execlist_port *port = execlists->port; | 576 | struct execlist_port *port = execlists->port; |
@@ -577,9 +580,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) | |||
577 | struct rb_node *rb; | 580 | struct rb_node *rb; |
578 | bool submit = false; | 581 | bool submit = false; |
579 | 582 | ||
580 | lockdep_assert_held(&engine->timeline.lock); | 583 | /* |
581 | 584 | * Hardware submission is through 2 ports. Conceptually each port | |
582 | /* Hardware submission is through 2 ports. Conceptually each port | ||
583 | * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is | 585 | * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is |
584 | * static for a context, and unique to each, so we only execute | 586 | * static for a context, and unique to each, so we only execute |
585 | * requests belonging to a single context from each ring. RING_HEAD | 587 | * requests belonging to a single context from each ring. RING_HEAD |
@@ -622,11 +624,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) | |||
622 | * the HW to indicate that it has had a chance to respond. | 624 | * the HW to indicate that it has had a chance to respond. |
623 | */ | 625 | */ |
624 | if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) | 626 | if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) |
625 | return false; | 627 | return; |
626 | 628 | ||
627 | if (need_preempt(engine, last, execlists->queue_priority)) { | 629 | if (need_preempt(engine, last, execlists->queue_priority)) { |
628 | inject_preempt_context(engine); | 630 | inject_preempt_context(engine); |
629 | return false; | 631 | return; |
630 | } | 632 | } |
631 | 633 | ||
632 | /* | 634 | /* |
@@ -651,7 +653,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) | |||
651 | * priorities of the ports haven't been switch. | 653 | * priorities of the ports haven't been switch. |
652 | */ | 654 | */ |
653 | if (port_count(&port[1])) | 655 | if (port_count(&port[1])) |
654 | return false; | 656 | return; |
655 | 657 | ||
656 | /* | 658 | /* |
657 | * WaIdleLiteRestore:bdw,skl | 659 | * WaIdleLiteRestore:bdw,skl |
@@ -751,8 +753,10 @@ done: | |||
751 | port != execlists->port ? rq_prio(last) : INT_MIN; | 753 | port != execlists->port ? rq_prio(last) : INT_MIN; |
752 | 754 | ||
753 | execlists->first = rb; | 755 | execlists->first = rb; |
754 | if (submit) | 756 | if (submit) { |
755 | port_assign(port, last); | 757 | port_assign(port, last); |
758 | execlists_submit_ports(engine); | ||
759 | } | ||
756 | 760 | ||
757 | /* We must always keep the beast fed if we have work piled up */ | 761 | /* We must always keep the beast fed if we have work piled up */ |
758 | GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); | 762 | GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); |
@@ -761,24 +765,10 @@ done: | |||
761 | if (last) | 765 | if (last) |
762 | execlists_user_begin(execlists, execlists->port); | 766 | execlists_user_begin(execlists, execlists->port); |
763 | 767 | ||
764 | return submit; | 768 | /* If the engine is now idle, so should be the flag; and vice versa. */ |
765 | } | 769 | GEM_BUG_ON(execlists_is_active(&engine->execlists, |
766 | 770 | EXECLISTS_ACTIVE_USER) == | |
767 | static void execlists_dequeue(struct intel_engine_cs *engine) | 771 | !port_isset(engine->execlists.port)); |
768 | { | ||
769 | struct intel_engine_execlists * const execlists = &engine->execlists; | ||
770 | unsigned long flags; | ||
771 | bool submit; | ||
772 | |||
773 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
774 | submit = __execlists_dequeue(engine); | ||
775 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
776 | |||
777 | if (submit) | ||
778 | execlists_submit_ports(engine); | ||
779 | |||
780 | GEM_BUG_ON(port_isset(execlists->port) && | ||
781 | !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); | ||
782 | } | 772 | } |
783 | 773 | ||
784 | void | 774 | void |
@@ -874,17 +864,23 @@ static void reset_irq(struct intel_engine_cs *engine) | |||
874 | { | 864 | { |
875 | /* Mark all CS interrupts as complete */ | 865 | /* Mark all CS interrupts as complete */ |
876 | smp_store_mb(engine->execlists.active, 0); | 866 | smp_store_mb(engine->execlists.active, 0); |
877 | synchronize_hardirq(engine->i915->drm.irq); | ||
878 | 867 | ||
879 | clear_gtiir(engine); | 868 | clear_gtiir(engine); |
869 | } | ||
880 | 870 | ||
871 | static void reset_csb_pointers(struct intel_engine_execlists *execlists) | ||
872 | { | ||
881 | /* | 873 | /* |
882 | * The port is checked prior to scheduling a tasklet, but | 874 | * After a reset, the HW starts writing into CSB entry [0]. We |
883 | * just in case we have suspended the tasklet to do the | 875 | * therefore have to set our HEAD pointer back one entry so that |
884 | * wedging make sure that when it wakes, it decides there | 876 | * the *first* entry we check is entry 0. To complicate this further, |
885 | * is no work to do by clearing the irq_posted bit. | 877 | * as we don't wait for the first interrupt after reset, we have to |
878 | * fake the HW write to point back to the last entry so that our | ||
879 | * inline comparison of our cached head position against the last HW | ||
880 | * write works even before the first interrupt. | ||
886 | */ | 881 | */ |
887 | clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); | 882 | execlists->csb_head = execlists->csb_write_reset; |
883 | WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset); | ||
888 | } | 884 | } |
889 | 885 | ||
890 | static void execlists_cancel_requests(struct intel_engine_cs *engine) | 886 | static void execlists_cancel_requests(struct intel_engine_cs *engine) |
@@ -911,14 +907,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
911 | * submission's irq state, we also wish to remind ourselves that | 907 | * submission's irq state, we also wish to remind ourselves that |
912 | * it is irq state.) | 908 | * it is irq state.) |
913 | */ | 909 | */ |
914 | local_irq_save(flags); | 910 | spin_lock_irqsave(&engine->timeline.lock, flags); |
915 | 911 | ||
916 | /* Cancel the requests on the HW and clear the ELSP tracker. */ | 912 | /* Cancel the requests on the HW and clear the ELSP tracker. */ |
917 | execlists_cancel_port_requests(execlists); | 913 | execlists_cancel_port_requests(execlists); |
918 | reset_irq(engine); | 914 | reset_irq(engine); |
919 | 915 | ||
920 | spin_lock(&engine->timeline.lock); | ||
921 | |||
922 | /* Mark all executing requests as skipped. */ | 916 | /* Mark all executing requests as skipped. */ |
923 | list_for_each_entry(rq, &engine->timeline.requests, link) { | 917 | list_for_each_entry(rq, &engine->timeline.requests, link) { |
924 | GEM_BUG_ON(!rq->global_seqno); | 918 | GEM_BUG_ON(!rq->global_seqno); |
@@ -952,194 +946,169 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
952 | execlists->first = NULL; | 946 | execlists->first = NULL; |
953 | GEM_BUG_ON(port_isset(execlists->port)); | 947 | GEM_BUG_ON(port_isset(execlists->port)); |
954 | 948 | ||
955 | spin_unlock(&engine->timeline.lock); | 949 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
950 | } | ||
956 | 951 | ||
957 | local_irq_restore(flags); | 952 | static inline bool |
953 | reset_in_progress(const struct intel_engine_execlists *execlists) | ||
954 | { | ||
955 | return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); | ||
958 | } | 956 | } |
959 | 957 | ||
960 | static void process_csb(struct intel_engine_cs *engine) | 958 | static void process_csb(struct intel_engine_cs *engine) |
961 | { | 959 | { |
962 | struct intel_engine_execlists * const execlists = &engine->execlists; | 960 | struct intel_engine_execlists * const execlists = &engine->execlists; |
963 | struct execlist_port *port = execlists->port; | 961 | struct execlist_port *port = execlists->port; |
964 | struct drm_i915_private *i915 = engine->i915; | 962 | const u32 * const buf = execlists->csb_status; |
965 | bool fw = false; | 963 | u8 head, tail; |
964 | |||
965 | /* | ||
966 | * Note that csb_write, csb_status may be either in HWSP or mmio. | ||
967 | * When reading from the csb_write mmio register, we have to be | ||
968 | * careful to only use the GEN8_CSB_WRITE_PTR portion, which is | ||
969 | * the low 4bits. As it happens we know the next 4bits are always | ||
970 | * zero and so we can simply masked off the low u8 of the register | ||
971 | * and treat it identically to reading from the HWSP (without having | ||
972 | * to use explicit shifting and masking, and probably bifurcating | ||
973 | * the code to handle the legacy mmio read). | ||
974 | */ | ||
975 | head = execlists->csb_head; | ||
976 | tail = READ_ONCE(*execlists->csb_write); | ||
977 | GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); | ||
978 | if (unlikely(head == tail)) | ||
979 | return; | ||
980 | |||
981 | /* | ||
982 | * Hopefully paired with a wmb() in HW! | ||
983 | * | ||
984 | * We must complete the read of the write pointer before any reads | ||
985 | * from the CSB, so that we do not see stale values. Without an rmb | ||
986 | * (lfence) the HW may speculatively perform the CSB[] reads *before* | ||
987 | * we perform the READ_ONCE(*csb_write). | ||
988 | */ | ||
989 | rmb(); | ||
966 | 990 | ||
967 | do { | 991 | do { |
968 | /* The HWSP contains a (cacheable) mirror of the CSB */ | 992 | struct i915_request *rq; |
969 | const u32 *buf = | 993 | unsigned int status; |
970 | &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; | 994 | unsigned int count; |
971 | unsigned int head, tail; | ||
972 | 995 | ||
973 | /* Clear before reading to catch new interrupts */ | 996 | if (++head == GEN8_CSB_ENTRIES) |
974 | clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); | 997 | head = 0; |
975 | smp_mb__after_atomic(); | ||
976 | 998 | ||
977 | if (unlikely(execlists->csb_use_mmio)) { | 999 | /* |
978 | if (!fw) { | 1000 | * We are flying near dragons again. |
979 | intel_uncore_forcewake_get(i915, execlists->fw_domains); | 1001 | * |
980 | fw = true; | 1002 | * We hold a reference to the request in execlist_port[] |
981 | } | 1003 | * but no more than that. We are operating in softirq |
1004 | * context and so cannot hold any mutex or sleep. That | ||
1005 | * prevents us stopping the requests we are processing | ||
1006 | * in port[] from being retired simultaneously (the | ||
1007 | * breadcrumb will be complete before we see the | ||
1008 | * context-switch). As we only hold the reference to the | ||
1009 | * request, any pointer chasing underneath the request | ||
1010 | * is subject to a potential use-after-free. Thus we | ||
1011 | * store all of the bookkeeping within port[] as | ||
1012 | * required, and avoid using unguarded pointers beneath | ||
1013 | * request itself. The same applies to the atomic | ||
1014 | * status notifier. | ||
1015 | */ | ||
982 | 1016 | ||
983 | buf = (u32 * __force) | 1017 | GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", |
984 | (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); | 1018 | engine->name, head, |
1019 | buf[2 * head + 0], buf[2 * head + 1], | ||
1020 | execlists->active); | ||
1021 | |||
1022 | status = buf[2 * head]; | ||
1023 | if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | | ||
1024 | GEN8_CTX_STATUS_PREEMPTED)) | ||
1025 | execlists_set_active(execlists, | ||
1026 | EXECLISTS_ACTIVE_HWACK); | ||
1027 | if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) | ||
1028 | execlists_clear_active(execlists, | ||
1029 | EXECLISTS_ACTIVE_HWACK); | ||
1030 | |||
1031 | if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) | ||
1032 | continue; | ||
985 | 1033 | ||
986 | head = readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); | 1034 | /* We should never get a COMPLETED | IDLE_ACTIVE! */ |
987 | tail = GEN8_CSB_WRITE_PTR(head); | 1035 | GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); |
988 | head = GEN8_CSB_READ_PTR(head); | ||
989 | execlists->csb_head = head; | ||
990 | } else { | ||
991 | const int write_idx = | ||
992 | intel_hws_csb_write_index(i915) - | ||
993 | I915_HWS_CSB_BUF0_INDEX; | ||
994 | 1036 | ||
995 | head = execlists->csb_head; | 1037 | if (status & GEN8_CTX_STATUS_COMPLETE && |
996 | tail = READ_ONCE(buf[write_idx]); | 1038 | buf[2*head + 1] == execlists->preempt_complete_status) { |
997 | rmb(); /* Hopefully paired with a wmb() in HW */ | 1039 | GEM_TRACE("%s preempt-idle\n", engine->name); |
1040 | complete_preempt_context(execlists); | ||
1041 | continue; | ||
998 | } | 1042 | } |
999 | GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", | ||
1000 | engine->name, | ||
1001 | head, GEN8_CSB_READ_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?", | ||
1002 | tail, GEN8_CSB_WRITE_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?"); | ||
1003 | 1043 | ||
1004 | while (head != tail) { | 1044 | if (status & GEN8_CTX_STATUS_PREEMPTED && |
1005 | struct i915_request *rq; | 1045 | execlists_is_active(execlists, |
1006 | unsigned int status; | 1046 | EXECLISTS_ACTIVE_PREEMPT)) |
1007 | unsigned int count; | 1047 | continue; |
1008 | 1048 | ||
1009 | if (++head == GEN8_CSB_ENTRIES) | 1049 | GEM_BUG_ON(!execlists_is_active(execlists, |
1010 | head = 0; | 1050 | EXECLISTS_ACTIVE_USER)); |
1011 | 1051 | ||
1052 | rq = port_unpack(port, &count); | ||
1053 | GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", | ||
1054 | engine->name, | ||
1055 | port->context_id, count, | ||
1056 | rq ? rq->global_seqno : 0, | ||
1057 | rq ? rq->fence.context : 0, | ||
1058 | rq ? rq->fence.seqno : 0, | ||
1059 | intel_engine_get_seqno(engine), | ||
1060 | rq ? rq_prio(rq) : 0); | ||
1061 | |||
1062 | /* Check the context/desc id for this event matches */ | ||
1063 | GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); | ||
1064 | |||
1065 | GEM_BUG_ON(count == 0); | ||
1066 | if (--count == 0) { | ||
1012 | /* | 1067 | /* |
1013 | * We are flying near dragons again. | 1068 | * On the final event corresponding to the |
1014 | * | 1069 | * submission of this context, we expect either |
1015 | * We hold a reference to the request in execlist_port[] | 1070 | * an element-switch event or a completion |
1016 | * but no more than that. We are operating in softirq | 1071 | * event (and on completion, the active-idle |
1017 | * context and so cannot hold any mutex or sleep. That | 1072 | * marker). No more preemptions, lite-restore |
1018 | * prevents us stopping the requests we are processing | 1073 | * or otherwise. |
1019 | * in port[] from being retired simultaneously (the | ||
1020 | * breadcrumb will be complete before we see the | ||
1021 | * context-switch). As we only hold the reference to the | ||
1022 | * request, any pointer chasing underneath the request | ||
1023 | * is subject to a potential use-after-free. Thus we | ||
1024 | * store all of the bookkeeping within port[] as | ||
1025 | * required, and avoid using unguarded pointers beneath | ||
1026 | * request itself. The same applies to the atomic | ||
1027 | * status notifier. | ||
1028 | */ | 1074 | */ |
1075 | GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); | ||
1076 | GEM_BUG_ON(port_isset(&port[1]) && | ||
1077 | !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); | ||
1078 | GEM_BUG_ON(!port_isset(&port[1]) && | ||
1079 | !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); | ||
1029 | 1080 | ||
1030 | status = READ_ONCE(buf[2 * head]); /* maybe mmio! */ | 1081 | /* |
1031 | GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", | 1082 | * We rely on the hardware being strongly |
1032 | engine->name, head, | 1083 | * ordered, that the breadcrumb write is |
1033 | status, buf[2*head + 1], | 1084 | * coherent (visible from the CPU) before the |
1034 | execlists->active); | 1085 | * user interrupt and CSB is processed. |
1035 | 1086 | */ | |
1036 | if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | | 1087 | GEM_BUG_ON(!i915_request_completed(rq)); |
1037 | GEN8_CTX_STATUS_PREEMPTED)) | ||
1038 | execlists_set_active(execlists, | ||
1039 | EXECLISTS_ACTIVE_HWACK); | ||
1040 | if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) | ||
1041 | execlists_clear_active(execlists, | ||
1042 | EXECLISTS_ACTIVE_HWACK); | ||
1043 | |||
1044 | if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) | ||
1045 | continue; | ||
1046 | |||
1047 | /* We should never get a COMPLETED | IDLE_ACTIVE! */ | ||
1048 | GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); | ||
1049 | |||
1050 | if (status & GEN8_CTX_STATUS_COMPLETE && | ||
1051 | buf[2*head + 1] == execlists->preempt_complete_status) { | ||
1052 | GEM_TRACE("%s preempt-idle\n", engine->name); | ||
1053 | complete_preempt_context(execlists); | ||
1054 | continue; | ||
1055 | } | ||
1056 | |||
1057 | if (status & GEN8_CTX_STATUS_PREEMPTED && | ||
1058 | execlists_is_active(execlists, | ||
1059 | EXECLISTS_ACTIVE_PREEMPT)) | ||
1060 | continue; | ||
1061 | |||
1062 | GEM_BUG_ON(!execlists_is_active(execlists, | ||
1063 | EXECLISTS_ACTIVE_USER)); | ||
1064 | |||
1065 | rq = port_unpack(port, &count); | ||
1066 | GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", | ||
1067 | engine->name, | ||
1068 | port->context_id, count, | ||
1069 | rq ? rq->global_seqno : 0, | ||
1070 | rq ? rq->fence.context : 0, | ||
1071 | rq ? rq->fence.seqno : 0, | ||
1072 | intel_engine_get_seqno(engine), | ||
1073 | rq ? rq_prio(rq) : 0); | ||
1074 | |||
1075 | /* Check the context/desc id for this event matches */ | ||
1076 | GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); | ||
1077 | 1088 | ||
1078 | GEM_BUG_ON(count == 0); | 1089 | execlists_context_schedule_out(rq, |
1079 | if (--count == 0) { | 1090 | INTEL_CONTEXT_SCHEDULE_OUT); |
1080 | /* | 1091 | i915_request_put(rq); |
1081 | * On the final event corresponding to the | ||
1082 | * submission of this context, we expect either | ||
1083 | * an element-switch event or a completion | ||
1084 | * event (and on completion, the active-idle | ||
1085 | * marker). No more preemptions, lite-restore | ||
1086 | * or otherwise. | ||
1087 | */ | ||
1088 | GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); | ||
1089 | GEM_BUG_ON(port_isset(&port[1]) && | ||
1090 | !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); | ||
1091 | GEM_BUG_ON(!port_isset(&port[1]) && | ||
1092 | !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); | ||
1093 | 1092 | ||
1094 | /* | 1093 | GEM_TRACE("%s completed ctx=%d\n", |
1095 | * We rely on the hardware being strongly | 1094 | engine->name, port->context_id); |
1096 | * ordered, that the breadcrumb write is | ||
1097 | * coherent (visible from the CPU) before the | ||
1098 | * user interrupt and CSB is processed. | ||
1099 | */ | ||
1100 | GEM_BUG_ON(!i915_request_completed(rq)); | ||
1101 | |||
1102 | execlists_context_schedule_out(rq, | ||
1103 | INTEL_CONTEXT_SCHEDULE_OUT); | ||
1104 | i915_request_put(rq); | ||
1105 | |||
1106 | GEM_TRACE("%s completed ctx=%d\n", | ||
1107 | engine->name, port->context_id); | ||
1108 | |||
1109 | port = execlists_port_complete(execlists, port); | ||
1110 | if (port_isset(port)) | ||
1111 | execlists_user_begin(execlists, port); | ||
1112 | else | ||
1113 | execlists_user_end(execlists); | ||
1114 | } else { | ||
1115 | port_set(port, port_pack(rq, count)); | ||
1116 | } | ||
1117 | } | ||
1118 | 1095 | ||
1119 | if (head != execlists->csb_head) { | 1096 | port = execlists_port_complete(execlists, port); |
1120 | execlists->csb_head = head; | 1097 | if (port_isset(port)) |
1121 | writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), | 1098 | execlists_user_begin(execlists, port); |
1122 | i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); | 1099 | else |
1100 | execlists_user_end(execlists); | ||
1101 | } else { | ||
1102 | port_set(port, port_pack(rq, count)); | ||
1123 | } | 1103 | } |
1124 | } while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)); | 1104 | } while (head != tail); |
1125 | 1105 | ||
1126 | if (unlikely(fw)) | 1106 | execlists->csb_head = head; |
1127 | intel_uncore_forcewake_put(i915, execlists->fw_domains); | ||
1128 | } | 1107 | } |
1129 | 1108 | ||
1130 | /* | 1109 | static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) |
1131 | * Check the unread Context Status Buffers and manage the submission of new | ||
1132 | * contexts to the ELSP accordingly. | ||
1133 | */ | ||
1134 | static void execlists_submission_tasklet(unsigned long data) | ||
1135 | { | 1110 | { |
1136 | struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; | 1111 | lockdep_assert_held(&engine->timeline.lock); |
1137 | |||
1138 | GEM_TRACE("%s awake?=%d, active=%x, irq-posted?=%d\n", | ||
1139 | engine->name, | ||
1140 | engine->i915->gt.awake, | ||
1141 | engine->execlists.active, | ||
1142 | test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)); | ||
1143 | 1112 | ||
1144 | /* | 1113 | /* |
1145 | * We can skip acquiring intel_runtime_pm_get() here as it was taken | 1114 | * We can skip acquiring intel_runtime_pm_get() here as it was taken |
@@ -1151,21 +1120,31 @@ static void execlists_submission_tasklet(unsigned long data) | |||
1151 | */ | 1120 | */ |
1152 | GEM_BUG_ON(!engine->i915->gt.awake); | 1121 | GEM_BUG_ON(!engine->i915->gt.awake); |
1153 | 1122 | ||
1154 | /* | 1123 | process_csb(engine); |
1155 | * Prefer doing test_and_clear_bit() as a two stage operation to avoid | ||
1156 | * imposing the cost of a locked atomic transaction when submitting a | ||
1157 | * new request (outside of the context-switch interrupt). | ||
1158 | */ | ||
1159 | if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) | ||
1160 | process_csb(engine); | ||
1161 | |||
1162 | if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) | 1124 | if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) |
1163 | execlists_dequeue(engine); | 1125 | execlists_dequeue(engine); |
1126 | } | ||
1164 | 1127 | ||
1165 | /* If the engine is now idle, so should be the flag; and vice versa. */ | 1128 | /* |
1166 | GEM_BUG_ON(execlists_is_active(&engine->execlists, | 1129 | * Check the unread Context Status Buffers and manage the submission of new |
1167 | EXECLISTS_ACTIVE_USER) == | 1130 | * contexts to the ELSP accordingly. |
1168 | !port_isset(engine->execlists.port)); | 1131 | */ |
1132 | static void execlists_submission_tasklet(unsigned long data) | ||
1133 | { | ||
1134 | struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; | ||
1135 | unsigned long flags; | ||
1136 | |||
1137 | GEM_TRACE("%s awake?=%d, active=%x\n", | ||
1138 | engine->name, | ||
1139 | engine->i915->gt.awake, | ||
1140 | engine->execlists.active); | ||
1141 | |||
1142 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
1143 | |||
1144 | if (engine->i915->gt.awake) /* we may be delayed until after we idle! */ | ||
1145 | __execlists_submission_tasklet(engine); | ||
1146 | |||
1147 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
1169 | } | 1148 | } |
1170 | 1149 | ||
1171 | static void queue_request(struct intel_engine_cs *engine, | 1150 | static void queue_request(struct intel_engine_cs *engine, |
@@ -1176,16 +1155,30 @@ static void queue_request(struct intel_engine_cs *engine, | |||
1176 | &lookup_priolist(engine, prio)->requests); | 1155 | &lookup_priolist(engine, prio)->requests); |
1177 | } | 1156 | } |
1178 | 1157 | ||
1179 | static void __submit_queue(struct intel_engine_cs *engine, int prio) | 1158 | static void __update_queue(struct intel_engine_cs *engine, int prio) |
1180 | { | 1159 | { |
1181 | engine->execlists.queue_priority = prio; | 1160 | engine->execlists.queue_priority = prio; |
1182 | tasklet_hi_schedule(&engine->execlists.tasklet); | 1161 | } |
1162 | |||
1163 | static void __submit_queue_imm(struct intel_engine_cs *engine) | ||
1164 | { | ||
1165 | struct intel_engine_execlists * const execlists = &engine->execlists; | ||
1166 | |||
1167 | if (reset_in_progress(execlists)) | ||
1168 | return; /* defer until we restart the engine following reset */ | ||
1169 | |||
1170 | if (execlists->tasklet.func == execlists_submission_tasklet) | ||
1171 | __execlists_submission_tasklet(engine); | ||
1172 | else | ||
1173 | tasklet_hi_schedule(&execlists->tasklet); | ||
1183 | } | 1174 | } |
1184 | 1175 | ||
1185 | static void submit_queue(struct intel_engine_cs *engine, int prio) | 1176 | static void submit_queue(struct intel_engine_cs *engine, int prio) |
1186 | { | 1177 | { |
1187 | if (prio > engine->execlists.queue_priority) | 1178 | if (prio > engine->execlists.queue_priority) { |
1188 | __submit_queue(engine, prio); | 1179 | __update_queue(engine, prio); |
1180 | __submit_queue_imm(engine); | ||
1181 | } | ||
1189 | } | 1182 | } |
1190 | 1183 | ||
1191 | static void execlists_submit_request(struct i915_request *request) | 1184 | static void execlists_submit_request(struct i915_request *request) |
@@ -1197,11 +1190,12 @@ static void execlists_submit_request(struct i915_request *request) | |||
1197 | spin_lock_irqsave(&engine->timeline.lock, flags); | 1190 | spin_lock_irqsave(&engine->timeline.lock, flags); |
1198 | 1191 | ||
1199 | queue_request(engine, &request->sched, rq_prio(request)); | 1192 | queue_request(engine, &request->sched, rq_prio(request)); |
1200 | submit_queue(engine, rq_prio(request)); | ||
1201 | 1193 | ||
1202 | GEM_BUG_ON(!engine->execlists.first); | 1194 | GEM_BUG_ON(!engine->execlists.first); |
1203 | GEM_BUG_ON(list_empty(&request->sched.link)); | 1195 | GEM_BUG_ON(list_empty(&request->sched.link)); |
1204 | 1196 | ||
1197 | submit_queue(engine, rq_prio(request)); | ||
1198 | |||
1205 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | 1199 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
1206 | } | 1200 | } |
1207 | 1201 | ||
@@ -1328,8 +1322,11 @@ static void execlists_schedule(struct i915_request *request, | |||
1328 | } | 1322 | } |
1329 | 1323 | ||
1330 | if (prio > engine->execlists.queue_priority && | 1324 | if (prio > engine->execlists.queue_priority && |
1331 | i915_sw_fence_done(&sched_to_request(node)->submit)) | 1325 | i915_sw_fence_done(&sched_to_request(node)->submit)) { |
1332 | __submit_queue(engine, prio); | 1326 | /* defer submission until after all of our updates */ |
1327 | __update_queue(engine, prio); | ||
1328 | tasklet_hi_schedule(&engine->execlists.tasklet); | ||
1329 | } | ||
1333 | } | 1330 | } |
1334 | 1331 | ||
1335 | spin_unlock_irq(&engine->timeline.lock); | 1332 | spin_unlock_irq(&engine->timeline.lock); |
@@ -1337,11 +1334,15 @@ static void execlists_schedule(struct i915_request *request, | |||
1337 | 1334 | ||
1338 | static void execlists_context_destroy(struct intel_context *ce) | 1335 | static void execlists_context_destroy(struct intel_context *ce) |
1339 | { | 1336 | { |
1340 | GEM_BUG_ON(!ce->state); | ||
1341 | GEM_BUG_ON(ce->pin_count); | 1337 | GEM_BUG_ON(ce->pin_count); |
1342 | 1338 | ||
1339 | if (!ce->state) | ||
1340 | return; | ||
1341 | |||
1343 | intel_ring_free(ce->ring); | 1342 | intel_ring_free(ce->ring); |
1344 | __i915_gem_object_release_unless_active(ce->state->obj); | 1343 | |
1344 | GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); | ||
1345 | i915_gem_object_put(ce->state->obj); | ||
1345 | } | 1346 | } |
1346 | 1347 | ||
1347 | static void execlists_context_unpin(struct intel_context *ce) | 1348 | static void execlists_context_unpin(struct intel_context *ce) |
@@ -1906,6 +1907,7 @@ execlists_reset_prepare(struct intel_engine_cs *engine) | |||
1906 | { | 1907 | { |
1907 | struct intel_engine_execlists * const execlists = &engine->execlists; | 1908 | struct intel_engine_execlists * const execlists = &engine->execlists; |
1908 | struct i915_request *request, *active; | 1909 | struct i915_request *request, *active; |
1910 | unsigned long flags; | ||
1909 | 1911 | ||
1910 | GEM_TRACE("%s\n", engine->name); | 1912 | GEM_TRACE("%s\n", engine->name); |
1911 | 1913 | ||
@@ -1920,6 +1922,8 @@ execlists_reset_prepare(struct intel_engine_cs *engine) | |||
1920 | */ | 1922 | */ |
1921 | __tasklet_disable_sync_once(&execlists->tasklet); | 1923 | __tasklet_disable_sync_once(&execlists->tasklet); |
1922 | 1924 | ||
1925 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
1926 | |||
1923 | /* | 1927 | /* |
1924 | * We want to flush the pending context switches, having disabled | 1928 | * We want to flush the pending context switches, having disabled |
1925 | * the tasklet above, we can assume exclusive access to the execlists. | 1929 | * the tasklet above, we can assume exclusive access to the execlists. |
@@ -1927,8 +1931,7 @@ execlists_reset_prepare(struct intel_engine_cs *engine) | |||
1927 | * and avoid blaming an innocent request if the stall was due to the | 1931 | * and avoid blaming an innocent request if the stall was due to the |
1928 | * preemption itself. | 1932 | * preemption itself. |
1929 | */ | 1933 | */ |
1930 | if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) | 1934 | process_csb(engine); |
1931 | process_csb(engine); | ||
1932 | 1935 | ||
1933 | /* | 1936 | /* |
1934 | * The last active request can then be no later than the last request | 1937 | * The last active request can then be no later than the last request |
@@ -1938,15 +1941,12 @@ execlists_reset_prepare(struct intel_engine_cs *engine) | |||
1938 | active = NULL; | 1941 | active = NULL; |
1939 | request = port_request(execlists->port); | 1942 | request = port_request(execlists->port); |
1940 | if (request) { | 1943 | if (request) { |
1941 | unsigned long flags; | ||
1942 | |||
1943 | /* | 1944 | /* |
1944 | * Prevent the breadcrumb from advancing before we decide | 1945 | * Prevent the breadcrumb from advancing before we decide |
1945 | * which request is currently active. | 1946 | * which request is currently active. |
1946 | */ | 1947 | */ |
1947 | intel_engine_stop_cs(engine); | 1948 | intel_engine_stop_cs(engine); |
1948 | 1949 | ||
1949 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
1950 | list_for_each_entry_from_reverse(request, | 1950 | list_for_each_entry_from_reverse(request, |
1951 | &engine->timeline.requests, | 1951 | &engine->timeline.requests, |
1952 | link) { | 1952 | link) { |
@@ -1956,9 +1956,10 @@ execlists_reset_prepare(struct intel_engine_cs *engine) | |||
1956 | 1956 | ||
1957 | active = request; | 1957 | active = request; |
1958 | } | 1958 | } |
1959 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
1960 | } | 1959 | } |
1961 | 1960 | ||
1961 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
1962 | |||
1962 | return active; | 1963 | return active; |
1963 | } | 1964 | } |
1964 | 1965 | ||
@@ -1973,8 +1974,7 @@ static void execlists_reset(struct intel_engine_cs *engine, | |||
1973 | engine->name, request ? request->global_seqno : 0, | 1974 | engine->name, request ? request->global_seqno : 0, |
1974 | intel_engine_get_seqno(engine)); | 1975 | intel_engine_get_seqno(engine)); |
1975 | 1976 | ||
1976 | /* See execlists_cancel_requests() for the irq/spinlock split. */ | 1977 | spin_lock_irqsave(&engine->timeline.lock, flags); |
1977 | local_irq_save(flags); | ||
1978 | 1978 | ||
1979 | /* | 1979 | /* |
1980 | * Catch up with any missed context-switch interrupts. | 1980 | * Catch up with any missed context-switch interrupts. |
@@ -1989,14 +1989,12 @@ static void execlists_reset(struct intel_engine_cs *engine, | |||
1989 | reset_irq(engine); | 1989 | reset_irq(engine); |
1990 | 1990 | ||
1991 | /* Push back any incomplete requests for replay after the reset. */ | 1991 | /* Push back any incomplete requests for replay after the reset. */ |
1992 | spin_lock(&engine->timeline.lock); | ||
1993 | __unwind_incomplete_requests(engine); | 1992 | __unwind_incomplete_requests(engine); |
1994 | spin_unlock(&engine->timeline.lock); | ||
1995 | 1993 | ||
1996 | /* Following the reset, we need to reload the CSB read/write pointers */ | 1994 | /* Following the reset, we need to reload the CSB read/write pointers */ |
1997 | engine->execlists.csb_head = GEN8_CSB_ENTRIES - 1; | 1995 | reset_csb_pointers(&engine->execlists); |
1998 | 1996 | ||
1999 | local_irq_restore(flags); | 1997 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
2000 | 1998 | ||
2001 | /* | 1999 | /* |
2002 | * If the request was innocent, we leave the request in the ELSP | 2000 | * If the request was innocent, we leave the request in the ELSP |
@@ -2446,28 +2444,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) | |||
2446 | static void | 2444 | static void |
2447 | logical_ring_setup(struct intel_engine_cs *engine) | 2445 | logical_ring_setup(struct intel_engine_cs *engine) |
2448 | { | 2446 | { |
2449 | struct drm_i915_private *dev_priv = engine->i915; | ||
2450 | enum forcewake_domains fw_domains; | ||
2451 | |||
2452 | intel_engine_setup_common(engine); | 2447 | intel_engine_setup_common(engine); |
2453 | 2448 | ||
2454 | /* Intentionally left blank. */ | 2449 | /* Intentionally left blank. */ |
2455 | engine->buffer = NULL; | 2450 | engine->buffer = NULL; |
2456 | 2451 | ||
2457 | fw_domains = intel_uncore_forcewake_for_reg(dev_priv, | ||
2458 | RING_ELSP(engine), | ||
2459 | FW_REG_WRITE); | ||
2460 | |||
2461 | fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, | ||
2462 | RING_CONTEXT_STATUS_PTR(engine), | ||
2463 | FW_REG_READ | FW_REG_WRITE); | ||
2464 | |||
2465 | fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, | ||
2466 | RING_CONTEXT_STATUS_BUF_BASE(engine), | ||
2467 | FW_REG_READ); | ||
2468 | |||
2469 | engine->execlists.fw_domains = fw_domains; | ||
2470 | |||
2471 | tasklet_init(&engine->execlists.tasklet, | 2452 | tasklet_init(&engine->execlists.tasklet, |
2472 | execlists_submission_tasklet, (unsigned long)engine); | 2453 | execlists_submission_tasklet, (unsigned long)engine); |
2473 | 2454 | ||
@@ -2475,34 +2456,60 @@ logical_ring_setup(struct intel_engine_cs *engine) | |||
2475 | logical_ring_default_irqs(engine); | 2456 | logical_ring_default_irqs(engine); |
2476 | } | 2457 | } |
2477 | 2458 | ||
2459 | static bool csb_force_mmio(struct drm_i915_private *i915) | ||
2460 | { | ||
2461 | /* Older GVT emulation depends upon intercepting CSB mmio */ | ||
2462 | return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915); | ||
2463 | } | ||
2464 | |||
2478 | static int logical_ring_init(struct intel_engine_cs *engine) | 2465 | static int logical_ring_init(struct intel_engine_cs *engine) |
2479 | { | 2466 | { |
2467 | struct drm_i915_private *i915 = engine->i915; | ||
2468 | struct intel_engine_execlists * const execlists = &engine->execlists; | ||
2480 | int ret; | 2469 | int ret; |
2481 | 2470 | ||
2482 | ret = intel_engine_init_common(engine); | 2471 | ret = intel_engine_init_common(engine); |
2483 | if (ret) | 2472 | if (ret) |
2484 | goto error; | 2473 | goto error; |
2485 | 2474 | ||
2486 | if (HAS_LOGICAL_RING_ELSQ(engine->i915)) { | 2475 | if (HAS_LOGICAL_RING_ELSQ(i915)) { |
2487 | engine->execlists.submit_reg = engine->i915->regs + | 2476 | execlists->submit_reg = i915->regs + |
2488 | i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); | 2477 | i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); |
2489 | engine->execlists.ctrl_reg = engine->i915->regs + | 2478 | execlists->ctrl_reg = i915->regs + |
2490 | i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine)); | 2479 | i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine)); |
2491 | } else { | 2480 | } else { |
2492 | engine->execlists.submit_reg = engine->i915->regs + | 2481 | execlists->submit_reg = i915->regs + |
2493 | i915_mmio_reg_offset(RING_ELSP(engine)); | 2482 | i915_mmio_reg_offset(RING_ELSP(engine)); |
2494 | } | 2483 | } |
2495 | 2484 | ||
2496 | engine->execlists.preempt_complete_status = ~0u; | 2485 | execlists->preempt_complete_status = ~0u; |
2497 | if (engine->i915->preempt_context) { | 2486 | if (i915->preempt_context) { |
2498 | struct intel_context *ce = | 2487 | struct intel_context *ce = |
2499 | to_intel_context(engine->i915->preempt_context, engine); | 2488 | to_intel_context(i915->preempt_context, engine); |
2500 | 2489 | ||
2501 | engine->execlists.preempt_complete_status = | 2490 | execlists->preempt_complete_status = |
2502 | upper_32_bits(ce->lrc_desc); | 2491 | upper_32_bits(ce->lrc_desc); |
2503 | } | 2492 | } |
2504 | 2493 | ||
2505 | engine->execlists.csb_head = GEN8_CSB_ENTRIES - 1; | 2494 | execlists->csb_read = |
2495 | i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); | ||
2496 | if (csb_force_mmio(i915)) { | ||
2497 | execlists->csb_status = (u32 __force *) | ||
2498 | (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); | ||
2499 | |||
2500 | execlists->csb_write = (u32 __force *)execlists->csb_read; | ||
2501 | execlists->csb_write_reset = | ||
2502 | _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK, | ||
2503 | GEN8_CSB_ENTRIES - 1); | ||
2504 | } else { | ||
2505 | execlists->csb_status = | ||
2506 | &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; | ||
2507 | |||
2508 | execlists->csb_write = | ||
2509 | &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; | ||
2510 | execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1; | ||
2511 | } | ||
2512 | reset_csb_pointers(execlists); | ||
2506 | 2513 | ||
2507 | return 0; | 2514 | return 0; |
2508 | 2515 | ||
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index 39a4e4edda07..849e1b69ba73 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c | |||
@@ -30,160 +30,6 @@ | |||
30 | #include <linux/debugfs.h> | 30 | #include <linux/debugfs.h> |
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | struct pipe_crc_info { | ||
34 | const char *name; | ||
35 | struct drm_i915_private *dev_priv; | ||
36 | enum pipe pipe; | ||
37 | }; | ||
38 | |||
39 | static int i915_pipe_crc_open(struct inode *inode, struct file *filep) | ||
40 | { | ||
41 | struct pipe_crc_info *info = inode->i_private; | ||
42 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
43 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
44 | |||
45 | if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes) | ||
46 | return -ENODEV; | ||
47 | |||
48 | spin_lock_irq(&pipe_crc->lock); | ||
49 | |||
50 | if (pipe_crc->opened) { | ||
51 | spin_unlock_irq(&pipe_crc->lock); | ||
52 | return -EBUSY; /* already open */ | ||
53 | } | ||
54 | |||
55 | pipe_crc->opened = true; | ||
56 | filep->private_data = inode->i_private; | ||
57 | |||
58 | spin_unlock_irq(&pipe_crc->lock); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static int i915_pipe_crc_release(struct inode *inode, struct file *filep) | ||
64 | { | ||
65 | struct pipe_crc_info *info = inode->i_private; | ||
66 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
67 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
68 | |||
69 | spin_lock_irq(&pipe_crc->lock); | ||
70 | pipe_crc->opened = false; | ||
71 | spin_unlock_irq(&pipe_crc->lock); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /* (6 fields, 8 chars each, space separated (5) + '\n') */ | ||
77 | #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) | ||
78 | /* account for \'0' */ | ||
79 | #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) | ||
80 | |||
81 | static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) | ||
82 | { | ||
83 | lockdep_assert_held(&pipe_crc->lock); | ||
84 | return CIRC_CNT(pipe_crc->head, pipe_crc->tail, | ||
85 | INTEL_PIPE_CRC_ENTRIES_NR); | ||
86 | } | ||
87 | |||
88 | static ssize_t | ||
89 | i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, | ||
90 | loff_t *pos) | ||
91 | { | ||
92 | struct pipe_crc_info *info = filep->private_data; | ||
93 | struct drm_i915_private *dev_priv = info->dev_priv; | ||
94 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
95 | char buf[PIPE_CRC_BUFFER_LEN]; | ||
96 | int n_entries; | ||
97 | ssize_t bytes_read; | ||
98 | |||
99 | /* | ||
100 | * Don't allow user space to provide buffers not big enough to hold | ||
101 | * a line of data. | ||
102 | */ | ||
103 | if (count < PIPE_CRC_LINE_LEN) | ||
104 | return -EINVAL; | ||
105 | |||
106 | if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) | ||
107 | return 0; | ||
108 | |||
109 | /* nothing to read */ | ||
110 | spin_lock_irq(&pipe_crc->lock); | ||
111 | while (pipe_crc_data_count(pipe_crc) == 0) { | ||
112 | int ret; | ||
113 | |||
114 | if (filep->f_flags & O_NONBLOCK) { | ||
115 | spin_unlock_irq(&pipe_crc->lock); | ||
116 | return -EAGAIN; | ||
117 | } | ||
118 | |||
119 | ret = wait_event_interruptible_lock_irq(pipe_crc->wq, | ||
120 | pipe_crc_data_count(pipe_crc), pipe_crc->lock); | ||
121 | if (ret) { | ||
122 | spin_unlock_irq(&pipe_crc->lock); | ||
123 | return ret; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | /* We now have one or more entries to read */ | ||
128 | n_entries = count / PIPE_CRC_LINE_LEN; | ||
129 | |||
130 | bytes_read = 0; | ||
131 | while (n_entries > 0) { | ||
132 | struct intel_pipe_crc_entry *entry = | ||
133 | &pipe_crc->entries[pipe_crc->tail]; | ||
134 | |||
135 | if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, | ||
136 | INTEL_PIPE_CRC_ENTRIES_NR) < 1) | ||
137 | break; | ||
138 | |||
139 | BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); | ||
140 | pipe_crc->tail = (pipe_crc->tail + 1) & | ||
141 | (INTEL_PIPE_CRC_ENTRIES_NR - 1); | ||
142 | |||
143 | bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, | ||
144 | "%8u %8x %8x %8x %8x %8x\n", | ||
145 | entry->frame, entry->crc[0], | ||
146 | entry->crc[1], entry->crc[2], | ||
147 | entry->crc[3], entry->crc[4]); | ||
148 | |||
149 | spin_unlock_irq(&pipe_crc->lock); | ||
150 | |||
151 | if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) | ||
152 | return -EFAULT; | ||
153 | |||
154 | user_buf += PIPE_CRC_LINE_LEN; | ||
155 | n_entries--; | ||
156 | |||
157 | spin_lock_irq(&pipe_crc->lock); | ||
158 | } | ||
159 | |||
160 | spin_unlock_irq(&pipe_crc->lock); | ||
161 | |||
162 | return bytes_read; | ||
163 | } | ||
164 | |||
165 | static const struct file_operations i915_pipe_crc_fops = { | ||
166 | .owner = THIS_MODULE, | ||
167 | .open = i915_pipe_crc_open, | ||
168 | .read = i915_pipe_crc_read, | ||
169 | .release = i915_pipe_crc_release, | ||
170 | }; | ||
171 | |||
172 | static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { | ||
173 | { | ||
174 | .name = "i915_pipe_A_crc", | ||
175 | .pipe = PIPE_A, | ||
176 | }, | ||
177 | { | ||
178 | .name = "i915_pipe_B_crc", | ||
179 | .pipe = PIPE_B, | ||
180 | }, | ||
181 | { | ||
182 | .name = "i915_pipe_C_crc", | ||
183 | .pipe = PIPE_C, | ||
184 | }, | ||
185 | }; | ||
186 | |||
187 | static const char * const pipe_crc_sources[] = { | 33 | static const char * const pipe_crc_sources[] = { |
188 | "none", | 34 | "none", |
189 | "plane1", | 35 | "plane1", |
@@ -197,29 +43,6 @@ static const char * const pipe_crc_sources[] = { | |||
197 | "auto", | 43 | "auto", |
198 | }; | 44 | }; |
199 | 45 | ||
200 | static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) | ||
201 | { | ||
202 | BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); | ||
203 | return pipe_crc_sources[source]; | ||
204 | } | ||
205 | |||
206 | static int display_crc_ctl_show(struct seq_file *m, void *data) | ||
207 | { | ||
208 | struct drm_i915_private *dev_priv = m->private; | ||
209 | enum pipe pipe; | ||
210 | |||
211 | for_each_pipe(dev_priv, pipe) | ||
212 | seq_printf(m, "%c %s\n", pipe_name(pipe), | ||
213 | pipe_crc_source_name(dev_priv->pipe_crc[pipe].source)); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int display_crc_ctl_open(struct inode *inode, struct file *file) | ||
219 | { | ||
220 | return single_open(file, display_crc_ctl_show, inode->i_private); | ||
221 | } | ||
222 | |||
223 | static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, | 46 | static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, |
224 | uint32_t *val) | 47 | uint32_t *val) |
225 | { | 48 | { |
@@ -616,177 +439,6 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, | |||
616 | return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); | 439 | return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); |
617 | } | 440 | } |
618 | 441 | ||
619 | static int pipe_crc_set_source(struct drm_i915_private *dev_priv, | ||
620 | enum pipe pipe, | ||
621 | enum intel_pipe_crc_source source) | ||
622 | { | ||
623 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | ||
624 | enum intel_display_power_domain power_domain; | ||
625 | u32 val = 0; /* shut up gcc */ | ||
626 | int ret; | ||
627 | |||
628 | if (pipe_crc->source == source) | ||
629 | return 0; | ||
630 | |||
631 | /* forbid changing the source without going back to 'none' */ | ||
632 | if (pipe_crc->source && source) | ||
633 | return -EINVAL; | ||
634 | |||
635 | power_domain = POWER_DOMAIN_PIPE(pipe); | ||
636 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { | ||
637 | DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); | ||
638 | return -EIO; | ||
639 | } | ||
640 | |||
641 | ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true); | ||
642 | if (ret != 0) | ||
643 | goto out; | ||
644 | |||
645 | /* none -> real source transition */ | ||
646 | if (source) { | ||
647 | struct intel_pipe_crc_entry *entries; | ||
648 | |||
649 | DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", | ||
650 | pipe_name(pipe), pipe_crc_source_name(source)); | ||
651 | |||
652 | entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, | ||
653 | sizeof(pipe_crc->entries[0]), | ||
654 | GFP_KERNEL); | ||
655 | if (!entries) { | ||
656 | ret = -ENOMEM; | ||
657 | goto out; | ||
658 | } | ||
659 | |||
660 | spin_lock_irq(&pipe_crc->lock); | ||
661 | kfree(pipe_crc->entries); | ||
662 | pipe_crc->entries = entries; | ||
663 | pipe_crc->head = 0; | ||
664 | pipe_crc->tail = 0; | ||
665 | spin_unlock_irq(&pipe_crc->lock); | ||
666 | } | ||
667 | |||
668 | pipe_crc->source = source; | ||
669 | |||
670 | I915_WRITE(PIPE_CRC_CTL(pipe), val); | ||
671 | POSTING_READ(PIPE_CRC_CTL(pipe)); | ||
672 | |||
673 | /* real source -> none transition */ | ||
674 | if (!source) { | ||
675 | struct intel_pipe_crc_entry *entries; | ||
676 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, | ||
677 | pipe); | ||
678 | |||
679 | DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", | ||
680 | pipe_name(pipe)); | ||
681 | |||
682 | drm_modeset_lock(&crtc->base.mutex, NULL); | ||
683 | if (crtc->base.state->active) | ||
684 | intel_wait_for_vblank(dev_priv, pipe); | ||
685 | drm_modeset_unlock(&crtc->base.mutex); | ||
686 | |||
687 | spin_lock_irq(&pipe_crc->lock); | ||
688 | entries = pipe_crc->entries; | ||
689 | pipe_crc->entries = NULL; | ||
690 | pipe_crc->head = 0; | ||
691 | pipe_crc->tail = 0; | ||
692 | spin_unlock_irq(&pipe_crc->lock); | ||
693 | |||
694 | kfree(entries); | ||
695 | |||
696 | if (IS_G4X(dev_priv)) | ||
697 | g4x_undo_pipe_scramble_reset(dev_priv, pipe); | ||
698 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
699 | vlv_undo_pipe_scramble_reset(dev_priv, pipe); | ||
700 | else if ((IS_HASWELL(dev_priv) || | ||
701 | IS_BROADWELL(dev_priv)) && pipe == PIPE_A) | ||
702 | hsw_pipe_A_crc_wa(dev_priv, false); | ||
703 | } | ||
704 | |||
705 | ret = 0; | ||
706 | |||
707 | out: | ||
708 | intel_display_power_put(dev_priv, power_domain); | ||
709 | |||
710 | return ret; | ||
711 | } | ||
712 | |||
713 | /* | ||
714 | * Parse pipe CRC command strings: | ||
715 | * command: wsp* object wsp+ name wsp+ source wsp* | ||
716 | * object: 'pipe' | ||
717 | * name: (A | B | C) | ||
718 | * source: (none | plane1 | plane2 | pf) | ||
719 | * wsp: (#0x20 | #0x9 | #0xA)+ | ||
720 | * | ||
721 | * eg.: | ||
722 | * "pipe A plane1" -> Start CRC computations on plane1 of pipe A | ||
723 | * "pipe A none" -> Stop CRC | ||
724 | */ | ||
725 | static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) | ||
726 | { | ||
727 | int n_words = 0; | ||
728 | |||
729 | while (*buf) { | ||
730 | char *end; | ||
731 | |||
732 | /* skip leading white space */ | ||
733 | buf = skip_spaces(buf); | ||
734 | if (!*buf) | ||
735 | break; /* end of buffer */ | ||
736 | |||
737 | /* find end of word */ | ||
738 | for (end = buf; *end && !isspace(*end); end++) | ||
739 | ; | ||
740 | |||
741 | if (n_words == max_words) { | ||
742 | DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", | ||
743 | max_words); | ||
744 | return -EINVAL; /* ran out of words[] before bytes */ | ||
745 | } | ||
746 | |||
747 | if (*end) | ||
748 | *end++ = '\0'; | ||
749 | words[n_words++] = buf; | ||
750 | buf = end; | ||
751 | } | ||
752 | |||
753 | return n_words; | ||
754 | } | ||
755 | |||
756 | enum intel_pipe_crc_object { | ||
757 | PIPE_CRC_OBJECT_PIPE, | ||
758 | }; | ||
759 | |||
760 | static const char * const pipe_crc_objects[] = { | ||
761 | "pipe", | ||
762 | }; | ||
763 | |||
764 | static int | ||
765 | display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) | ||
766 | { | ||
767 | int i; | ||
768 | |||
769 | i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf); | ||
770 | if (i < 0) | ||
771 | return i; | ||
772 | |||
773 | *o = i; | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv, | ||
778 | const char *buf, enum pipe *pipe) | ||
779 | { | ||
780 | const char name = buf[0]; | ||
781 | |||
782 | if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes)) | ||
783 | return -EINVAL; | ||
784 | |||
785 | *pipe = name - 'A'; | ||
786 | |||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | static int | 442 | static int |
791 | display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) | 443 | display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) |
792 | { | 444 | { |
@@ -805,81 +457,6 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) | |||
805 | return 0; | 457 | return 0; |
806 | } | 458 | } |
807 | 459 | ||
808 | static int display_crc_ctl_parse(struct drm_i915_private *dev_priv, | ||
809 | char *buf, size_t len) | ||
810 | { | ||
811 | #define N_WORDS 3 | ||
812 | int n_words; | ||
813 | char *words[N_WORDS]; | ||
814 | enum pipe pipe; | ||
815 | enum intel_pipe_crc_object object; | ||
816 | enum intel_pipe_crc_source source; | ||
817 | |||
818 | n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); | ||
819 | if (n_words != N_WORDS) { | ||
820 | DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", | ||
821 | N_WORDS); | ||
822 | return -EINVAL; | ||
823 | } | ||
824 | |||
825 | if (display_crc_ctl_parse_object(words[0], &object) < 0) { | ||
826 | DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); | ||
827 | return -EINVAL; | ||
828 | } | ||
829 | |||
830 | if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) { | ||
831 | DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); | ||
832 | return -EINVAL; | ||
833 | } | ||
834 | |||
835 | if (display_crc_ctl_parse_source(words[2], &source) < 0) { | ||
836 | DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); | ||
837 | return -EINVAL; | ||
838 | } | ||
839 | |||
840 | return pipe_crc_set_source(dev_priv, pipe, source); | ||
841 | } | ||
842 | |||
843 | static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, | ||
844 | size_t len, loff_t *offp) | ||
845 | { | ||
846 | struct seq_file *m = file->private_data; | ||
847 | struct drm_i915_private *dev_priv = m->private; | ||
848 | char *tmpbuf; | ||
849 | int ret; | ||
850 | |||
851 | if (len == 0) | ||
852 | return 0; | ||
853 | |||
854 | if (len > PAGE_SIZE - 1) { | ||
855 | DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", | ||
856 | PAGE_SIZE); | ||
857 | return -E2BIG; | ||
858 | } | ||
859 | |||
860 | tmpbuf = memdup_user_nul(ubuf, len); | ||
861 | if (IS_ERR(tmpbuf)) | ||
862 | return PTR_ERR(tmpbuf); | ||
863 | |||
864 | ret = display_crc_ctl_parse(dev_priv, tmpbuf, len); | ||
865 | |||
866 | kfree(tmpbuf); | ||
867 | if (ret < 0) | ||
868 | return ret; | ||
869 | |||
870 | *offp += len; | ||
871 | return len; | ||
872 | } | ||
873 | |||
874 | const struct file_operations i915_display_crc_ctl_fops = { | ||
875 | .owner = THIS_MODULE, | ||
876 | .open = display_crc_ctl_open, | ||
877 | .read = seq_read, | ||
878 | .llseek = seq_lseek, | ||
879 | .release = single_release, | ||
880 | .write = display_crc_ctl_write | ||
881 | }; | ||
882 | |||
883 | void intel_display_crc_init(struct drm_i915_private *dev_priv) | 460 | void intel_display_crc_init(struct drm_i915_private *dev_priv) |
884 | { | 461 | { |
885 | enum pipe pipe; | 462 | enum pipe pipe; |
@@ -887,30 +464,8 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv) | |||
887 | for_each_pipe(dev_priv, pipe) { | 464 | for_each_pipe(dev_priv, pipe) { |
888 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | 465 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; |
889 | 466 | ||
890 | pipe_crc->opened = false; | ||
891 | spin_lock_init(&pipe_crc->lock); | 467 | spin_lock_init(&pipe_crc->lock); |
892 | init_waitqueue_head(&pipe_crc->wq); | ||
893 | } | ||
894 | } | ||
895 | |||
896 | int intel_pipe_crc_create(struct drm_minor *minor) | ||
897 | { | ||
898 | struct drm_i915_private *dev_priv = to_i915(minor->dev); | ||
899 | struct dentry *ent; | ||
900 | int i; | ||
901 | |||
902 | for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { | ||
903 | struct pipe_crc_info *info = &i915_pipe_crc_data[i]; | ||
904 | |||
905 | info->dev_priv = dev_priv; | ||
906 | ent = debugfs_create_file(info->name, S_IRUGO, | ||
907 | minor->debugfs_root, info, | ||
908 | &i915_pipe_crc_fops); | ||
909 | if (!ent) | ||
910 | return -ENOMEM; | ||
911 | } | 468 | } |
912 | |||
913 | return 0; | ||
914 | } | 469 | } |
915 | 470 | ||
916 | int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, | 471 | int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index d4cd19fea148..23acc9ac8d4d 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -56,43 +56,6 @@ | |||
56 | #include "intel_drv.h" | 56 | #include "intel_drv.h" |
57 | #include "i915_drv.h" | 57 | #include "i915_drv.h" |
58 | 58 | ||
59 | static inline enum intel_display_power_domain | ||
60 | psr_aux_domain(struct intel_dp *intel_dp) | ||
61 | { | ||
62 | /* CNL HW requires corresponding AUX IOs to be powered up for PSR. | ||
63 | * However, for non-A AUX ports the corresponding non-EDP transcoders | ||
64 | * would have already enabled power well 2 and DC_OFF. This means we can | ||
65 | * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a | ||
66 | * specific AUX_IO reference without powering up any extra wells. | ||
67 | * Note that PSR is enabled only on Port A even though this function | ||
68 | * returns the correct domain for other ports too. | ||
69 | */ | ||
70 | return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : | ||
71 | intel_dp->aux_power_domain; | ||
72 | } | ||
73 | |||
74 | static void psr_aux_io_power_get(struct intel_dp *intel_dp) | ||
75 | { | ||
76 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
77 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | ||
78 | |||
79 | if (INTEL_GEN(dev_priv) < 10) | ||
80 | return; | ||
81 | |||
82 | intel_display_power_get(dev_priv, psr_aux_domain(intel_dp)); | ||
83 | } | ||
84 | |||
85 | static void psr_aux_io_power_put(struct intel_dp *intel_dp) | ||
86 | { | ||
87 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
88 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | ||
89 | |||
90 | if (INTEL_GEN(dev_priv) < 10) | ||
91 | return; | ||
92 | |||
93 | intel_display_power_put(dev_priv, psr_aux_domain(intel_dp)); | ||
94 | } | ||
95 | |||
96 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) | 59 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) |
97 | { | 60 | { |
98 | u32 debug_mask, mask; | 61 | u32 debug_mask, mask; |
@@ -278,8 +241,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) | |||
278 | } | 241 | } |
279 | } | 242 | } |
280 | 243 | ||
281 | static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, | 244 | static void intel_psr_setup_vsc(struct intel_dp *intel_dp, |
282 | const struct intel_crtc_state *crtc_state) | 245 | const struct intel_crtc_state *crtc_state) |
283 | { | 246 | { |
284 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 247 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
285 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | 248 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); |
@@ -336,7 +299,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) | |||
336 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); | 299 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
337 | 300 | ||
338 | /* Start with bits set for DDI_AUX_CTL register */ | 301 | /* Start with bits set for DDI_AUX_CTL register */ |
339 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), | 302 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), |
340 | aux_clock_divider); | 303 | aux_clock_divider); |
341 | 304 | ||
342 | /* Select only valid bits for SRD_AUX_CTL */ | 305 | /* Select only valid bits for SRD_AUX_CTL */ |
@@ -344,7 +307,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) | |||
344 | I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); | 307 | I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); |
345 | } | 308 | } |
346 | 309 | ||
347 | static void hsw_psr_enable_sink(struct intel_dp *intel_dp) | 310 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) |
348 | { | 311 | { |
349 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 312 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
350 | struct drm_device *dev = dig_port->base.base.dev; | 313 | struct drm_device *dev = dig_port->base.base.dev; |
@@ -360,6 +323,8 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) | |||
360 | 323 | ||
361 | if (dev_priv->psr.link_standby) | 324 | if (dev_priv->psr.link_standby) |
362 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; | 325 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; |
326 | if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8) | ||
327 | dpcd_val |= DP_PSR_CRC_VERIFICATION; | ||
363 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); | 328 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); |
364 | 329 | ||
365 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); | 330 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); |
@@ -415,6 +380,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) | |||
415 | else | 380 | else |
416 | val |= EDP_PSR_TP1_TP2_SEL; | 381 | val |= EDP_PSR_TP1_TP2_SEL; |
417 | 382 | ||
383 | if (INTEL_GEN(dev_priv) >= 8) | ||
384 | val |= EDP_PSR_CRC_ENABLE; | ||
385 | |||
418 | val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; | 386 | val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; |
419 | I915_WRITE(EDP_PSR_CTL, val); | 387 | I915_WRITE(EDP_PSR_CTL, val); |
420 | } | 388 | } |
@@ -456,24 +424,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) | |||
456 | I915_WRITE(EDP_PSR2_CTL, val); | 424 | I915_WRITE(EDP_PSR2_CTL, val); |
457 | } | 425 | } |
458 | 426 | ||
459 | static void hsw_psr_activate(struct intel_dp *intel_dp) | ||
460 | { | ||
461 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
462 | struct drm_device *dev = dig_port->base.base.dev; | ||
463 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
464 | |||
465 | /* On HSW+ after we enable PSR on source it will activate it | ||
466 | * as soon as it match configure idle_frame count. So | ||
467 | * we just actually enable it here on activation time. | ||
468 | */ | ||
469 | |||
470 | /* psr1 and psr2 are mutually exclusive.*/ | ||
471 | if (dev_priv->psr.psr2_enabled) | ||
472 | hsw_activate_psr2(intel_dp); | ||
473 | else | ||
474 | hsw_activate_psr1(intel_dp); | ||
475 | } | ||
476 | |||
477 | static bool intel_psr2_config_valid(struct intel_dp *intel_dp, | 427 | static bool intel_psr2_config_valid(struct intel_dp *intel_dp, |
478 | struct intel_crtc_state *crtc_state) | 428 | struct intel_crtc_state *crtc_state) |
479 | { | 429 | { |
@@ -576,27 +526,29 @@ static void intel_psr_activate(struct intel_dp *intel_dp) | |||
576 | struct drm_device *dev = intel_dig_port->base.base.dev; | 526 | struct drm_device *dev = intel_dig_port->base.base.dev; |
577 | struct drm_i915_private *dev_priv = to_i915(dev); | 527 | struct drm_i915_private *dev_priv = to_i915(dev); |
578 | 528 | ||
579 | if (dev_priv->psr.psr2_enabled) | 529 | if (INTEL_GEN(dev_priv) >= 9) |
580 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); | 530 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); |
581 | else | 531 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); |
582 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); | ||
583 | WARN_ON(dev_priv->psr.active); | 532 | WARN_ON(dev_priv->psr.active); |
584 | lockdep_assert_held(&dev_priv->psr.lock); | 533 | lockdep_assert_held(&dev_priv->psr.lock); |
585 | 534 | ||
586 | dev_priv->psr.activate(intel_dp); | 535 | /* psr1 and psr2 are mutually exclusive.*/ |
536 | if (dev_priv->psr.psr2_enabled) | ||
537 | hsw_activate_psr2(intel_dp); | ||
538 | else | ||
539 | hsw_activate_psr1(intel_dp); | ||
540 | |||
587 | dev_priv->psr.active = true; | 541 | dev_priv->psr.active = true; |
588 | } | 542 | } |
589 | 543 | ||
590 | static void hsw_psr_enable_source(struct intel_dp *intel_dp, | 544 | static void intel_psr_enable_source(struct intel_dp *intel_dp, |
591 | const struct intel_crtc_state *crtc_state) | 545 | const struct intel_crtc_state *crtc_state) |
592 | { | 546 | { |
593 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 547 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
594 | struct drm_device *dev = dig_port->base.base.dev; | 548 | struct drm_device *dev = dig_port->base.base.dev; |
595 | struct drm_i915_private *dev_priv = to_i915(dev); | 549 | struct drm_i915_private *dev_priv = to_i915(dev); |
596 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | 550 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
597 | 551 | ||
598 | psr_aux_io_power_get(intel_dp); | ||
599 | |||
600 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ | 552 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
601 | * use hardcoded values PSR AUX transactions | 553 | * use hardcoded values PSR AUX transactions |
602 | */ | 554 | */ |
@@ -632,7 +584,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp, | |||
632 | EDP_PSR_DEBUG_MASK_MEMUP | | 584 | EDP_PSR_DEBUG_MASK_MEMUP | |
633 | EDP_PSR_DEBUG_MASK_HPD | | 585 | EDP_PSR_DEBUG_MASK_HPD | |
634 | EDP_PSR_DEBUG_MASK_LPSP | | 586 | EDP_PSR_DEBUG_MASK_LPSP | |
635 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); | 587 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE | |
588 | EDP_PSR_DEBUG_MASK_MAX_SLEEP); | ||
636 | } | 589 | } |
637 | } | 590 | } |
638 | 591 | ||
@@ -666,9 +619,9 @@ void intel_psr_enable(struct intel_dp *intel_dp, | |||
666 | dev_priv->psr.psr2_enabled = crtc_state->has_psr2; | 619 | dev_priv->psr.psr2_enabled = crtc_state->has_psr2; |
667 | dev_priv->psr.busy_frontbuffer_bits = 0; | 620 | dev_priv->psr.busy_frontbuffer_bits = 0; |
668 | 621 | ||
669 | dev_priv->psr.setup_vsc(intel_dp, crtc_state); | 622 | intel_psr_setup_vsc(intel_dp, crtc_state); |
670 | dev_priv->psr.enable_sink(intel_dp); | 623 | intel_psr_enable_sink(intel_dp); |
671 | dev_priv->psr.enable_source(intel_dp, crtc_state); | 624 | intel_psr_enable_source(intel_dp, crtc_state); |
672 | dev_priv->psr.enabled = intel_dp; | 625 | dev_priv->psr.enabled = intel_dp; |
673 | 626 | ||
674 | intel_psr_activate(intel_dp); | 627 | intel_psr_activate(intel_dp); |
@@ -677,8 +630,8 @@ unlock: | |||
677 | mutex_unlock(&dev_priv->psr.lock); | 630 | mutex_unlock(&dev_priv->psr.lock); |
678 | } | 631 | } |
679 | 632 | ||
680 | static void hsw_psr_disable(struct intel_dp *intel_dp, | 633 | static void |
681 | const struct intel_crtc_state *old_crtc_state) | 634 | intel_psr_disable_source(struct intel_dp *intel_dp) |
682 | { | 635 | { |
683 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 636 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
684 | struct drm_device *dev = intel_dig_port->base.base.dev; | 637 | struct drm_device *dev = intel_dig_port->base.base.dev; |
@@ -717,8 +670,25 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, | |||
717 | else | 670 | else |
718 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); | 671 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); |
719 | } | 672 | } |
673 | } | ||
674 | |||
675 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) | ||
676 | { | ||
677 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
678 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
679 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
720 | 680 | ||
721 | psr_aux_io_power_put(intel_dp); | 681 | lockdep_assert_held(&dev_priv->psr.lock); |
682 | |||
683 | if (!dev_priv->psr.enabled) | ||
684 | return; | ||
685 | |||
686 | intel_psr_disable_source(intel_dp); | ||
687 | |||
688 | /* Disable PSR on Sink */ | ||
689 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | ||
690 | |||
691 | dev_priv->psr.enabled = NULL; | ||
722 | } | 692 | } |
723 | 693 | ||
724 | /** | 694 | /** |
@@ -742,22 +712,44 @@ void intel_psr_disable(struct intel_dp *intel_dp, | |||
742 | return; | 712 | return; |
743 | 713 | ||
744 | mutex_lock(&dev_priv->psr.lock); | 714 | mutex_lock(&dev_priv->psr.lock); |
745 | if (!dev_priv->psr.enabled) { | 715 | intel_psr_disable_locked(intel_dp); |
746 | mutex_unlock(&dev_priv->psr.lock); | 716 | mutex_unlock(&dev_priv->psr.lock); |
747 | return; | 717 | cancel_work_sync(&dev_priv->psr.work); |
748 | } | 718 | } |
749 | 719 | ||
750 | dev_priv->psr.disable_source(intel_dp, old_crtc_state); | 720 | int intel_psr_wait_for_idle(struct drm_i915_private *dev_priv) |
721 | { | ||
722 | i915_reg_t reg; | ||
723 | u32 mask; | ||
751 | 724 | ||
752 | /* Disable PSR on Sink */ | 725 | /* |
753 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | 726 | * The sole user right now is intel_pipe_update_start(), |
727 | * which won't race with psr_enable/disable, which is | ||
728 | * where psr2_enabled is written to. So, we don't need | ||
729 | * to acquire the psr.lock. More importantly, we want the | ||
730 | * latency inside intel_pipe_update_start() to be as low | ||
731 | * as possible, so no need to acquire psr.lock when it is | ||
732 | * not needed and will induce latencies in the atomic | ||
733 | * update path. | ||
734 | */ | ||
735 | if (dev_priv->psr.psr2_enabled) { | ||
736 | reg = EDP_PSR2_STATUS; | ||
737 | mask = EDP_PSR2_STATUS_STATE_MASK; | ||
738 | } else { | ||
739 | reg = EDP_PSR_STATUS; | ||
740 | mask = EDP_PSR_STATUS_STATE_MASK; | ||
741 | } | ||
754 | 742 | ||
755 | dev_priv->psr.enabled = NULL; | 743 | /* |
756 | mutex_unlock(&dev_priv->psr.lock); | 744 | * Max time for PSR to idle = Inverse of the refresh rate + |
757 | cancel_work_sync(&dev_priv->psr.work); | 745 | * 6 ms of exit training time + 1.5 ms of aux channel |
746 | * handshake. 50 msec is defesive enough to cover everything. | ||
747 | */ | ||
748 | return intel_wait_for_register(dev_priv, reg, mask, | ||
749 | EDP_PSR_STATUS_STATE_IDLE, 50); | ||
758 | } | 750 | } |
759 | 751 | ||
760 | static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) | 752 | static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) |
761 | { | 753 | { |
762 | struct intel_dp *intel_dp; | 754 | struct intel_dp *intel_dp; |
763 | i915_reg_t reg; | 755 | i915_reg_t reg; |
@@ -803,7 +795,7 @@ static void intel_psr_work(struct work_struct *work) | |||
803 | * PSR might take some time to get fully disabled | 795 | * PSR might take some time to get fully disabled |
804 | * and be ready for re-enable. | 796 | * and be ready for re-enable. |
805 | */ | 797 | */ |
806 | if (!psr_wait_for_idle(dev_priv)) | 798 | if (!__psr_wait_for_idle_locked(dev_priv)) |
807 | goto unlock; | 799 | goto unlock; |
808 | 800 | ||
809 | /* | 801 | /* |
@@ -811,7 +803,7 @@ static void intel_psr_work(struct work_struct *work) | |||
811 | * recheck. Since psr_flush first clears this and then reschedules we | 803 | * recheck. Since psr_flush first clears this and then reschedules we |
812 | * won't ever miss a flush when bailing out here. | 804 | * won't ever miss a flush when bailing out here. |
813 | */ | 805 | */ |
814 | if (dev_priv->psr.busy_frontbuffer_bits) | 806 | if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) |
815 | goto unlock; | 807 | goto unlock; |
816 | 808 | ||
817 | intel_psr_activate(dev_priv->psr.enabled); | 809 | intel_psr_activate(dev_priv->psr.enabled); |
@@ -986,11 +978,58 @@ void intel_psr_init(struct drm_i915_private *dev_priv) | |||
986 | 978 | ||
987 | INIT_WORK(&dev_priv->psr.work, intel_psr_work); | 979 | INIT_WORK(&dev_priv->psr.work, intel_psr_work); |
988 | mutex_init(&dev_priv->psr.lock); | 980 | mutex_init(&dev_priv->psr.lock); |
981 | } | ||
989 | 982 | ||
990 | dev_priv->psr.enable_source = hsw_psr_enable_source; | 983 | void intel_psr_short_pulse(struct intel_dp *intel_dp) |
991 | dev_priv->psr.disable_source = hsw_psr_disable; | 984 | { |
992 | dev_priv->psr.enable_sink = hsw_psr_enable_sink; | 985 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
993 | dev_priv->psr.activate = hsw_psr_activate; | 986 | struct drm_device *dev = intel_dig_port->base.base.dev; |
994 | dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; | 987 | struct drm_i915_private *dev_priv = to_i915(dev); |
988 | struct i915_psr *psr = &dev_priv->psr; | ||
989 | u8 val; | ||
990 | const u8 errors = DP_PSR_RFB_STORAGE_ERROR | | ||
991 | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | | ||
992 | DP_PSR_LINK_CRC_ERROR; | ||
993 | |||
994 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | ||
995 | return; | ||
996 | |||
997 | mutex_lock(&psr->lock); | ||
998 | |||
999 | if (psr->enabled != intel_dp) | ||
1000 | goto exit; | ||
1001 | |||
1002 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { | ||
1003 | DRM_ERROR("PSR_STATUS dpcd read failed\n"); | ||
1004 | goto exit; | ||
1005 | } | ||
1006 | |||
1007 | if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { | ||
1008 | DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); | ||
1009 | intel_psr_disable_locked(intel_dp); | ||
1010 | } | ||
1011 | |||
1012 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { | ||
1013 | DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n"); | ||
1014 | goto exit; | ||
1015 | } | ||
995 | 1016 | ||
1017 | if (val & DP_PSR_RFB_STORAGE_ERROR) | ||
1018 | DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); | ||
1019 | if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) | ||
1020 | DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); | ||
1021 | if (val & DP_PSR_LINK_CRC_ERROR) | ||
1022 | DRM_ERROR("PSR Link CRC error, disabling PSR\n"); | ||
1023 | |||
1024 | if (val & ~errors) | ||
1025 | DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", | ||
1026 | val & ~errors); | ||
1027 | if (val & errors) | ||
1028 | intel_psr_disable_locked(intel_dp); | ||
1029 | /* clear status register */ | ||
1030 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); | ||
1031 | |||
1032 | /* TODO: handle PSR2 errors */ | ||
1033 | exit: | ||
1034 | mutex_unlock(&psr->lock); | ||
996 | } | 1035 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e0448eff12bd..700f94c371b3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1169,8 +1169,11 @@ static void intel_ring_context_destroy(struct intel_context *ce) | |||
1169 | { | 1169 | { |
1170 | GEM_BUG_ON(ce->pin_count); | 1170 | GEM_BUG_ON(ce->pin_count); |
1171 | 1171 | ||
1172 | if (ce->state) | 1172 | if (!ce->state) |
1173 | __i915_gem_object_release_unless_active(ce->state->obj); | 1173 | return; |
1174 | |||
1175 | GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); | ||
1176 | i915_gem_object_put(ce->state->obj); | ||
1174 | } | 1177 | } |
1175 | 1178 | ||
1176 | static int __context_pin_ppgtt(struct i915_gem_context *ctx) | 1179 | static int __context_pin_ppgtt(struct i915_gem_context *ctx) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a0bc7a8222b4..ce6cc2a6cf7a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -300,24 +300,44 @@ struct intel_engine_execlists { | |||
300 | struct rb_node *first; | 300 | struct rb_node *first; |
301 | 301 | ||
302 | /** | 302 | /** |
303 | * @fw_domains: forcewake domains for irq tasklet | 303 | * @csb_read: control register for Context Switch buffer |
304 | * | ||
305 | * Note this register is always in mmio. | ||
304 | */ | 306 | */ |
305 | unsigned int fw_domains; | 307 | u32 __iomem *csb_read; |
306 | 308 | ||
307 | /** | 309 | /** |
308 | * @csb_head: context status buffer head | 310 | * @csb_write: control register for Context Switch buffer |
311 | * | ||
312 | * Note this register may be either mmio or HWSP shadow. | ||
309 | */ | 313 | */ |
310 | unsigned int csb_head; | 314 | u32 *csb_write; |
311 | 315 | ||
312 | /** | 316 | /** |
313 | * @csb_use_mmio: access csb through mmio, instead of hwsp | 317 | * @csb_status: status array for Context Switch buffer |
318 | * | ||
319 | * Note these register may be either mmio or HWSP shadow. | ||
314 | */ | 320 | */ |
315 | bool csb_use_mmio; | 321 | u32 *csb_status; |
316 | 322 | ||
317 | /** | 323 | /** |
318 | * @preempt_complete_status: expected CSB upon completing preemption | 324 | * @preempt_complete_status: expected CSB upon completing preemption |
319 | */ | 325 | */ |
320 | u32 preempt_complete_status; | 326 | u32 preempt_complete_status; |
327 | |||
328 | /** | ||
329 | * @csb_write_reset: reset value for CSB write pointer | ||
330 | * | ||
331 | * As the CSB write pointer maybe either in HWSP or as a field | ||
332 | * inside an mmio register, we want to reprogram it slightly | ||
333 | * differently to avoid later confusion. | ||
334 | */ | ||
335 | u32 csb_write_reset; | ||
336 | |||
337 | /** | ||
338 | * @csb_head: context status buffer head | ||
339 | */ | ||
340 | u8 csb_head; | ||
321 | }; | 341 | }; |
322 | 342 | ||
323 | #define INTEL_ENGINE_CS_MAX_NAME 8 | 343 | #define INTEL_ENGINE_CS_MAX_NAME 8 |
@@ -345,10 +365,8 @@ struct intel_engine_cs { | |||
345 | struct drm_i915_gem_object *default_state; | 365 | struct drm_i915_gem_object *default_state; |
346 | void *pinned_default_state; | 366 | void *pinned_default_state; |
347 | 367 | ||
348 | atomic_t irq_count; | ||
349 | unsigned long irq_posted; | 368 | unsigned long irq_posted; |
350 | #define ENGINE_IRQ_BREADCRUMB 0 | 369 | #define ENGINE_IRQ_BREADCRUMB 0 |
351 | #define ENGINE_IRQ_EXECLIST 1 | ||
352 | 370 | ||
353 | /* Rather than have every client wait upon all user interrupts, | 371 | /* Rather than have every client wait upon all user interrupts, |
354 | * with the herd waking after every interrupt and each doing the | 372 | * with the herd waking after every interrupt and each doing the |
@@ -380,6 +398,7 @@ struct intel_engine_cs { | |||
380 | 398 | ||
381 | unsigned int hangcheck_interrupts; | 399 | unsigned int hangcheck_interrupts; |
382 | unsigned int irq_enabled; | 400 | unsigned int irq_enabled; |
401 | unsigned int irq_count; | ||
383 | 402 | ||
384 | bool irq_armed : 1; | 403 | bool irq_armed : 1; |
385 | I915_SELFTEST_DECLARE(bool mock : 1); | 404 | I915_SELFTEST_DECLARE(bool mock : 1); |
@@ -928,11 +947,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) | |||
928 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ | 947 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ |
929 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); | 948 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); |
930 | 949 | ||
931 | static inline void intel_wait_init(struct intel_wait *wait, | 950 | static inline void intel_wait_init(struct intel_wait *wait) |
932 | struct i915_request *rq) | ||
933 | { | 951 | { |
934 | wait->tsk = current; | 952 | wait->tsk = current; |
935 | wait->request = rq; | 953 | wait->request = NULL; |
936 | } | 954 | } |
937 | 955 | ||
938 | static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) | 956 | static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index de3a81034f77..6b5aa3b074ec 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -134,6 +134,14 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) | |||
134 | return "AUX_F"; | 134 | return "AUX_F"; |
135 | case POWER_DOMAIN_AUX_IO_A: | 135 | case POWER_DOMAIN_AUX_IO_A: |
136 | return "AUX_IO_A"; | 136 | return "AUX_IO_A"; |
137 | case POWER_DOMAIN_AUX_TBT1: | ||
138 | return "AUX_TBT1"; | ||
139 | case POWER_DOMAIN_AUX_TBT2: | ||
140 | return "AUX_TBT2"; | ||
141 | case POWER_DOMAIN_AUX_TBT3: | ||
142 | return "AUX_TBT3"; | ||
143 | case POWER_DOMAIN_AUX_TBT4: | ||
144 | return "AUX_TBT4"; | ||
137 | case POWER_DOMAIN_GMBUS: | 145 | case POWER_DOMAIN_GMBUS: |
138 | return "GMBUS"; | 146 | return "GMBUS"; |
139 | case POWER_DOMAIN_INIT: | 147 | case POWER_DOMAIN_INIT: |
@@ -384,7 +392,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | |||
384 | u32 val; | 392 | u32 val; |
385 | 393 | ||
386 | if (wait_fuses) { | 394 | if (wait_fuses) { |
387 | pg = SKL_PW_TO_PG(id); | 395 | pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) : |
396 | SKL_PW_TO_PG(id); | ||
388 | /* | 397 | /* |
389 | * For PW1 we have to wait both for the PW0/PG0 fuse state | 398 | * For PW1 we have to wait both for the PW0/PG0 fuse state |
390 | * before enabling the power well and PW1/PG1's own fuse | 399 | * before enabling the power well and PW1/PG1's own fuse |
@@ -430,6 +439,43 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, | |||
430 | hsw_wait_for_power_well_disable(dev_priv, power_well); | 439 | hsw_wait_for_power_well_disable(dev_priv, power_well); |
431 | } | 440 | } |
432 | 441 | ||
442 | #define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A) | ||
443 | |||
444 | static void | ||
445 | icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, | ||
446 | struct i915_power_well *power_well) | ||
447 | { | ||
448 | enum i915_power_well_id id = power_well->id; | ||
449 | enum port port = ICL_AUX_PW_TO_PORT(id); | ||
450 | u32 val; | ||
451 | |||
452 | val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); | ||
453 | I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id)); | ||
454 | |||
455 | val = I915_READ(ICL_PORT_CL_DW12(port)); | ||
456 | I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); | ||
457 | |||
458 | hsw_wait_for_power_well_enable(dev_priv, power_well); | ||
459 | } | ||
460 | |||
461 | static void | ||
462 | icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, | ||
463 | struct i915_power_well *power_well) | ||
464 | { | ||
465 | enum i915_power_well_id id = power_well->id; | ||
466 | enum port port = ICL_AUX_PW_TO_PORT(id); | ||
467 | u32 val; | ||
468 | |||
469 | val = I915_READ(ICL_PORT_CL_DW12(port)); | ||
470 | I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); | ||
471 | |||
472 | val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); | ||
473 | I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), | ||
474 | val & ~HSW_PWR_WELL_CTL_REQ(id)); | ||
475 | |||
476 | hsw_wait_for_power_well_disable(dev_priv, power_well); | ||
477 | } | ||
478 | |||
433 | /* | 479 | /* |
434 | * We should only use the power well if we explicitly asked the hardware to | 480 | * We should only use the power well if we explicitly asked the hardware to |
435 | * enable it, so check if it's enabled and also check if we've requested it to | 481 | * enable it, so check if it's enabled and also check if we've requested it to |
@@ -1824,6 +1870,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
1824 | BIT_ULL(POWER_DOMAIN_INIT)) | 1870 | BIT_ULL(POWER_DOMAIN_INIT)) |
1825 | #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ | 1871 | #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ |
1826 | BIT_ULL(POWER_DOMAIN_AUX_A) | \ | 1872 | BIT_ULL(POWER_DOMAIN_AUX_A) | \ |
1873 | BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ | ||
1827 | BIT_ULL(POWER_DOMAIN_INIT)) | 1874 | BIT_ULL(POWER_DOMAIN_INIT)) |
1828 | #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ | 1875 | #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ |
1829 | BIT_ULL(POWER_DOMAIN_AUX_B) | \ | 1876 | BIT_ULL(POWER_DOMAIN_AUX_B) | \ |
@@ -1896,6 +1943,105 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
1896 | BIT_ULL(POWER_DOMAIN_AUX_A) | \ | 1943 | BIT_ULL(POWER_DOMAIN_AUX_A) | \ |
1897 | BIT_ULL(POWER_DOMAIN_INIT)) | 1944 | BIT_ULL(POWER_DOMAIN_INIT)) |
1898 | 1945 | ||
1946 | /* | ||
1947 | * ICL PW_0/PG_0 domains (HW/DMC control): | ||
1948 | * - PCI | ||
1949 | * - clocks except port PLL | ||
1950 | * - central power except FBC | ||
1951 | * - shared functions except pipe interrupts, pipe MBUS, DBUF registers | ||
1952 | * ICL PW_1/PG_1 domains (HW/DMC control): | ||
1953 | * - DBUF function | ||
1954 | * - PIPE_A and its planes, except VGA | ||
1955 | * - transcoder EDP + PSR | ||
1956 | * - transcoder DSI | ||
1957 | * - DDI_A | ||
1958 | * - FBC | ||
1959 | */ | ||
1960 | #define ICL_PW_4_POWER_DOMAINS ( \ | ||
1961 | BIT_ULL(POWER_DOMAIN_PIPE_C) | \ | ||
1962 | BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ | ||
1963 | BIT_ULL(POWER_DOMAIN_INIT)) | ||
1964 | /* VDSC/joining */ | ||
1965 | #define ICL_PW_3_POWER_DOMAINS ( \ | ||
1966 | ICL_PW_4_POWER_DOMAINS | \ | ||
1967 | BIT_ULL(POWER_DOMAIN_PIPE_B) | \ | ||
1968 | BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ | ||
1969 | BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ | ||
1970 | BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ | ||
1971 | BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ | ||
1972 | BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
1973 | BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ | ||
1974 | BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
1975 | BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ | ||
1976 | BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ | ||
1977 | BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ | ||
1978 | BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ | ||
1979 | BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ | ||
1980 | BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ | ||
1981 | BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ | ||
1982 | BIT_ULL(POWER_DOMAIN_AUX_B) | \ | ||
1983 | BIT_ULL(POWER_DOMAIN_AUX_C) | \ | ||
1984 | BIT_ULL(POWER_DOMAIN_AUX_D) | \ | ||
1985 | BIT_ULL(POWER_DOMAIN_AUX_E) | \ | ||
1986 | BIT_ULL(POWER_DOMAIN_AUX_F) | \ | ||
1987 | BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ | ||
1988 | BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ | ||
1989 | BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ | ||
1990 | BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ | ||
1991 | BIT_ULL(POWER_DOMAIN_VGA) | \ | ||
1992 | BIT_ULL(POWER_DOMAIN_AUDIO) | \ | ||
1993 | BIT_ULL(POWER_DOMAIN_INIT)) | ||
1994 | /* | ||
1995 | * - transcoder WD | ||
1996 | * - KVMR (HW control) | ||
1997 | */ | ||
1998 | #define ICL_PW_2_POWER_DOMAINS ( \ | ||
1999 | ICL_PW_3_POWER_DOMAINS | \ | ||
2000 | BIT_ULL(POWER_DOMAIN_INIT)) | ||
2001 | /* | ||
2002 | * - eDP/DSI VDSC | ||
2003 | * - KVMR (HW control) | ||
2004 | */ | ||
2005 | #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ | ||
2006 | ICL_PW_2_POWER_DOMAINS | \ | ||
2007 | BIT_ULL(POWER_DOMAIN_MODESET) | \ | ||
2008 | BIT_ULL(POWER_DOMAIN_AUX_A) | \ | ||
2009 | BIT_ULL(POWER_DOMAIN_INIT)) | ||
2010 | |||
2011 | #define ICL_DDI_IO_A_POWER_DOMAINS ( \ | ||
2012 | BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) | ||
2013 | #define ICL_DDI_IO_B_POWER_DOMAINS ( \ | ||
2014 | BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) | ||
2015 | #define ICL_DDI_IO_C_POWER_DOMAINS ( \ | ||
2016 | BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) | ||
2017 | #define ICL_DDI_IO_D_POWER_DOMAINS ( \ | ||
2018 | BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) | ||
2019 | #define ICL_DDI_IO_E_POWER_DOMAINS ( \ | ||
2020 | BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) | ||
2021 | #define ICL_DDI_IO_F_POWER_DOMAINS ( \ | ||
2022 | BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) | ||
2023 | |||
2024 | #define ICL_AUX_A_IO_POWER_DOMAINS ( \ | ||
2025 | BIT_ULL(POWER_DOMAIN_AUX_A)) | ||
2026 | #define ICL_AUX_B_IO_POWER_DOMAINS ( \ | ||
2027 | BIT_ULL(POWER_DOMAIN_AUX_B)) | ||
2028 | #define ICL_AUX_C_IO_POWER_DOMAINS ( \ | ||
2029 | BIT_ULL(POWER_DOMAIN_AUX_C)) | ||
2030 | #define ICL_AUX_D_IO_POWER_DOMAINS ( \ | ||
2031 | BIT_ULL(POWER_DOMAIN_AUX_D)) | ||
2032 | #define ICL_AUX_E_IO_POWER_DOMAINS ( \ | ||
2033 | BIT_ULL(POWER_DOMAIN_AUX_E)) | ||
2034 | #define ICL_AUX_F_IO_POWER_DOMAINS ( \ | ||
2035 | BIT_ULL(POWER_DOMAIN_AUX_F)) | ||
2036 | #define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ | ||
2037 | BIT_ULL(POWER_DOMAIN_AUX_TBT1)) | ||
2038 | #define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ | ||
2039 | BIT_ULL(POWER_DOMAIN_AUX_TBT2)) | ||
2040 | #define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ | ||
2041 | BIT_ULL(POWER_DOMAIN_AUX_TBT3)) | ||
2042 | #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ | ||
2043 | BIT_ULL(POWER_DOMAIN_AUX_TBT4)) | ||
2044 | |||
1899 | static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { | 2045 | static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { |
1900 | .sync_hw = i9xx_power_well_sync_hw_noop, | 2046 | .sync_hw = i9xx_power_well_sync_hw_noop, |
1901 | .enable = i9xx_always_on_power_well_noop, | 2047 | .enable = i9xx_always_on_power_well_noop, |
@@ -2453,6 +2599,157 @@ static struct i915_power_well cnl_power_wells[] = { | |||
2453 | }, | 2599 | }, |
2454 | }; | 2600 | }; |
2455 | 2601 | ||
2602 | static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { | ||
2603 | .sync_hw = hsw_power_well_sync_hw, | ||
2604 | .enable = icl_combo_phy_aux_power_well_enable, | ||
2605 | .disable = icl_combo_phy_aux_power_well_disable, | ||
2606 | .is_enabled = hsw_power_well_enabled, | ||
2607 | }; | ||
2608 | |||
2609 | static struct i915_power_well icl_power_wells[] = { | ||
2610 | { | ||
2611 | .name = "always-on", | ||
2612 | .always_on = 1, | ||
2613 | .domains = POWER_DOMAIN_MASK, | ||
2614 | .ops = &i9xx_always_on_power_well_ops, | ||
2615 | .id = I915_DISP_PW_ALWAYS_ON, | ||
2616 | }, | ||
2617 | { | ||
2618 | .name = "power well 1", | ||
2619 | /* Handled by the DMC firmware */ | ||
2620 | .domains = 0, | ||
2621 | .ops = &hsw_power_well_ops, | ||
2622 | .id = ICL_DISP_PW_1, | ||
2623 | .hsw.has_fuses = true, | ||
2624 | }, | ||
2625 | { | ||
2626 | .name = "power well 2", | ||
2627 | .domains = ICL_PW_2_POWER_DOMAINS, | ||
2628 | .ops = &hsw_power_well_ops, | ||
2629 | .id = ICL_DISP_PW_2, | ||
2630 | .hsw.has_fuses = true, | ||
2631 | }, | ||
2632 | { | ||
2633 | .name = "DC off", | ||
2634 | .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, | ||
2635 | .ops = &gen9_dc_off_power_well_ops, | ||
2636 | .id = SKL_DISP_PW_DC_OFF, | ||
2637 | }, | ||
2638 | { | ||
2639 | .name = "power well 3", | ||
2640 | .domains = ICL_PW_3_POWER_DOMAINS, | ||
2641 | .ops = &hsw_power_well_ops, | ||
2642 | .id = ICL_DISP_PW_3, | ||
2643 | .hsw.irq_pipe_mask = BIT(PIPE_B), | ||
2644 | .hsw.has_vga = true, | ||
2645 | .hsw.has_fuses = true, | ||
2646 | }, | ||
2647 | { | ||
2648 | .name = "DDI A IO", | ||
2649 | .domains = ICL_DDI_IO_A_POWER_DOMAINS, | ||
2650 | .ops = &hsw_power_well_ops, | ||
2651 | .id = ICL_DISP_PW_DDI_A, | ||
2652 | }, | ||
2653 | { | ||
2654 | .name = "DDI B IO", | ||
2655 | .domains = ICL_DDI_IO_B_POWER_DOMAINS, | ||
2656 | .ops = &hsw_power_well_ops, | ||
2657 | .id = ICL_DISP_PW_DDI_B, | ||
2658 | }, | ||
2659 | { | ||
2660 | .name = "DDI C IO", | ||
2661 | .domains = ICL_DDI_IO_C_POWER_DOMAINS, | ||
2662 | .ops = &hsw_power_well_ops, | ||
2663 | .id = ICL_DISP_PW_DDI_C, | ||
2664 | }, | ||
2665 | { | ||
2666 | .name = "DDI D IO", | ||
2667 | .domains = ICL_DDI_IO_D_POWER_DOMAINS, | ||
2668 | .ops = &hsw_power_well_ops, | ||
2669 | .id = ICL_DISP_PW_DDI_D, | ||
2670 | }, | ||
2671 | { | ||
2672 | .name = "DDI E IO", | ||
2673 | .domains = ICL_DDI_IO_E_POWER_DOMAINS, | ||
2674 | .ops = &hsw_power_well_ops, | ||
2675 | .id = ICL_DISP_PW_DDI_E, | ||
2676 | }, | ||
2677 | { | ||
2678 | .name = "DDI F IO", | ||
2679 | .domains = ICL_DDI_IO_F_POWER_DOMAINS, | ||
2680 | .ops = &hsw_power_well_ops, | ||
2681 | .id = ICL_DISP_PW_DDI_F, | ||
2682 | }, | ||
2683 | { | ||
2684 | .name = "AUX A", | ||
2685 | .domains = ICL_AUX_A_IO_POWER_DOMAINS, | ||
2686 | .ops = &icl_combo_phy_aux_power_well_ops, | ||
2687 | .id = ICL_DISP_PW_AUX_A, | ||
2688 | }, | ||
2689 | { | ||
2690 | .name = "AUX B", | ||
2691 | .domains = ICL_AUX_B_IO_POWER_DOMAINS, | ||
2692 | .ops = &icl_combo_phy_aux_power_well_ops, | ||
2693 | .id = ICL_DISP_PW_AUX_B, | ||
2694 | }, | ||
2695 | { | ||
2696 | .name = "AUX C", | ||
2697 | .domains = ICL_AUX_C_IO_POWER_DOMAINS, | ||
2698 | .ops = &hsw_power_well_ops, | ||
2699 | .id = ICL_DISP_PW_AUX_C, | ||
2700 | }, | ||
2701 | { | ||
2702 | .name = "AUX D", | ||
2703 | .domains = ICL_AUX_D_IO_POWER_DOMAINS, | ||
2704 | .ops = &hsw_power_well_ops, | ||
2705 | .id = ICL_DISP_PW_AUX_D, | ||
2706 | }, | ||
2707 | { | ||
2708 | .name = "AUX E", | ||
2709 | .domains = ICL_AUX_E_IO_POWER_DOMAINS, | ||
2710 | .ops = &hsw_power_well_ops, | ||
2711 | .id = ICL_DISP_PW_AUX_E, | ||
2712 | }, | ||
2713 | { | ||
2714 | .name = "AUX F", | ||
2715 | .domains = ICL_AUX_F_IO_POWER_DOMAINS, | ||
2716 | .ops = &hsw_power_well_ops, | ||
2717 | .id = ICL_DISP_PW_AUX_F, | ||
2718 | }, | ||
2719 | { | ||
2720 | .name = "AUX TBT1", | ||
2721 | .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, | ||
2722 | .ops = &hsw_power_well_ops, | ||
2723 | .id = ICL_DISP_PW_AUX_TBT1, | ||
2724 | }, | ||
2725 | { | ||
2726 | .name = "AUX TBT2", | ||
2727 | .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, | ||
2728 | .ops = &hsw_power_well_ops, | ||
2729 | .id = ICL_DISP_PW_AUX_TBT2, | ||
2730 | }, | ||
2731 | { | ||
2732 | .name = "AUX TBT3", | ||
2733 | .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, | ||
2734 | .ops = &hsw_power_well_ops, | ||
2735 | .id = ICL_DISP_PW_AUX_TBT3, | ||
2736 | }, | ||
2737 | { | ||
2738 | .name = "AUX TBT4", | ||
2739 | .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, | ||
2740 | .ops = &hsw_power_well_ops, | ||
2741 | .id = ICL_DISP_PW_AUX_TBT4, | ||
2742 | }, | ||
2743 | { | ||
2744 | .name = "power well 4", | ||
2745 | .domains = ICL_PW_4_POWER_DOMAINS, | ||
2746 | .ops = &hsw_power_well_ops, | ||
2747 | .id = ICL_DISP_PW_4, | ||
2748 | .hsw.has_fuses = true, | ||
2749 | .hsw.irq_pipe_mask = BIT(PIPE_C), | ||
2750 | }, | ||
2751 | }; | ||
2752 | |||
2456 | static int | 2753 | static int |
2457 | sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, | 2754 | sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, |
2458 | int disable_power_well) | 2755 | int disable_power_well) |
@@ -2470,7 +2767,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, | |||
2470 | int requested_dc; | 2767 | int requested_dc; |
2471 | int max_dc; | 2768 | int max_dc; |
2472 | 2769 | ||
2473 | if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { | 2770 | if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) { |
2474 | max_dc = 2; | 2771 | max_dc = 2; |
2475 | mask = 0; | 2772 | mask = 0; |
2476 | } else if (IS_GEN9_LP(dev_priv)) { | 2773 | } else if (IS_GEN9_LP(dev_priv)) { |
@@ -2558,7 +2855,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) | |||
2558 | * The enabling order will be from lower to higher indexed wells, | 2855 | * The enabling order will be from lower to higher indexed wells, |
2559 | * the disabling order is reversed. | 2856 | * the disabling order is reversed. |
2560 | */ | 2857 | */ |
2561 | if (IS_HASWELL(dev_priv)) { | 2858 | if (IS_ICELAKE(dev_priv)) { |
2859 | set_power_wells(power_domains, icl_power_wells); | ||
2860 | } else if (IS_HASWELL(dev_priv)) { | ||
2562 | set_power_wells(power_domains, hsw_power_wells); | 2861 | set_power_wells(power_domains, hsw_power_wells); |
2563 | } else if (IS_BROADWELL(dev_priv)) { | 2862 | } else if (IS_BROADWELL(dev_priv)) { |
2564 | set_power_wells(power_domains, bdw_power_wells); | 2863 | set_power_wells(power_domains, bdw_power_wells); |
@@ -2913,6 +3212,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, | |||
2913 | switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { | 3212 | switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { |
2914 | default: | 3213 | default: |
2915 | MISSING_CASE(val); | 3214 | MISSING_CASE(val); |
3215 | /* fall through */ | ||
2916 | case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: | 3216 | case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: |
2917 | procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; | 3217 | procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; |
2918 | break; | 3218 | break; |
@@ -3025,6 +3325,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) | |||
3025 | static void icl_display_core_init(struct drm_i915_private *dev_priv, | 3325 | static void icl_display_core_init(struct drm_i915_private *dev_priv, |
3026 | bool resume) | 3326 | bool resume) |
3027 | { | 3327 | { |
3328 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
3329 | struct i915_power_well *well; | ||
3028 | enum port port; | 3330 | enum port port; |
3029 | u32 val; | 3331 | u32 val; |
3030 | 3332 | ||
@@ -3053,8 +3355,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, | |||
3053 | I915_WRITE(ICL_PORT_CL_DW5(port), val); | 3355 | I915_WRITE(ICL_PORT_CL_DW5(port), val); |
3054 | } | 3356 | } |
3055 | 3357 | ||
3056 | /* 4. Enable power well 1 (PG1) and aux IO power. */ | 3358 | /* |
3057 | /* FIXME: ICL power wells code not here yet. */ | 3359 | * 4. Enable Power Well 1 (PG1). |
3360 | * The AUX IO power wells will be enabled on demand. | ||
3361 | */ | ||
3362 | mutex_lock(&power_domains->lock); | ||
3363 | well = lookup_power_well(dev_priv, ICL_DISP_PW_1); | ||
3364 | intel_power_well_enable(dev_priv, well); | ||
3365 | mutex_unlock(&power_domains->lock); | ||
3058 | 3366 | ||
3059 | /* 5. Enable CDCLK. */ | 3367 | /* 5. Enable CDCLK. */ |
3060 | icl_init_cdclk(dev_priv); | 3368 | icl_init_cdclk(dev_priv); |
@@ -3072,6 +3380,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, | |||
3072 | 3380 | ||
3073 | static void icl_display_core_uninit(struct drm_i915_private *dev_priv) | 3381 | static void icl_display_core_uninit(struct drm_i915_private *dev_priv) |
3074 | { | 3382 | { |
3383 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
3384 | struct i915_power_well *well; | ||
3075 | enum port port; | 3385 | enum port port; |
3076 | u32 val; | 3386 | u32 val; |
3077 | 3387 | ||
@@ -3085,8 +3395,15 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) | |||
3085 | /* 3. Disable CD clock */ | 3395 | /* 3. Disable CD clock */ |
3086 | icl_uninit_cdclk(dev_priv); | 3396 | icl_uninit_cdclk(dev_priv); |
3087 | 3397 | ||
3088 | /* 4. Disable Power Well 1 (PG1) and Aux IO Power */ | 3398 | /* |
3089 | /* FIXME: ICL power wells code not here yet. */ | 3399 | * 4. Disable Power Well 1 (PG1). |
3400 | * The AUX IO power wells are toggled on demand, so they are already | ||
3401 | * disabled at this point. | ||
3402 | */ | ||
3403 | mutex_lock(&power_domains->lock); | ||
3404 | well = lookup_power_well(dev_priv, ICL_DISP_PW_1); | ||
3405 | intel_power_well_disable(dev_priv, well); | ||
3406 | mutex_unlock(&power_domains->lock); | ||
3090 | 3407 | ||
3091 | /* 5. Disable Comp */ | 3408 | /* 5. Disable Comp */ |
3092 | for (port = PORT_A; port <= PORT_B; port++) { | 3409 | for (port = PORT_A; port <= PORT_B; port++) { |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e6a64b3ecd91..396cb59ca4b8 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1340,6 +1340,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, | |||
1340 | switch (crtc_state->pixel_multiplier) { | 1340 | switch (crtc_state->pixel_multiplier) { |
1341 | default: | 1341 | default: |
1342 | WARN(1, "unknown pixel multiplier specified\n"); | 1342 | WARN(1, "unknown pixel multiplier specified\n"); |
1343 | /* fall through */ | ||
1343 | case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; | 1344 | case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; |
1344 | case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; | 1345 | case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; |
1345 | case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; | 1346 | case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; |
@@ -1400,10 +1401,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) | |||
1400 | 1401 | ||
1401 | intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); | 1402 | intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); |
1402 | 1403 | ||
1403 | if (active_outputs & intel_sdvo_connector->output_flag) | 1404 | return active_outputs & intel_sdvo_connector->output_flag; |
1404 | return true; | ||
1405 | else | ||
1406 | return false; | ||
1407 | } | 1405 | } |
1408 | 1406 | ||
1409 | bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, | 1407 | bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, |
@@ -2316,14 +2314,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) | |||
2316 | switch (sdvo->controlled_output) { | 2314 | switch (sdvo->controlled_output) { |
2317 | case SDVO_OUTPUT_LVDS1: | 2315 | case SDVO_OUTPUT_LVDS1: |
2318 | mask |= SDVO_OUTPUT_LVDS1; | 2316 | mask |= SDVO_OUTPUT_LVDS1; |
2317 | /* fall through */ | ||
2319 | case SDVO_OUTPUT_LVDS0: | 2318 | case SDVO_OUTPUT_LVDS0: |
2320 | mask |= SDVO_OUTPUT_LVDS0; | 2319 | mask |= SDVO_OUTPUT_LVDS0; |
2320 | /* fall through */ | ||
2321 | case SDVO_OUTPUT_TMDS1: | 2321 | case SDVO_OUTPUT_TMDS1: |
2322 | mask |= SDVO_OUTPUT_TMDS1; | 2322 | mask |= SDVO_OUTPUT_TMDS1; |
2323 | /* fall through */ | ||
2323 | case SDVO_OUTPUT_TMDS0: | 2324 | case SDVO_OUTPUT_TMDS0: |
2324 | mask |= SDVO_OUTPUT_TMDS0; | 2325 | mask |= SDVO_OUTPUT_TMDS0; |
2326 | /* fall through */ | ||
2325 | case SDVO_OUTPUT_RGB1: | 2327 | case SDVO_OUTPUT_RGB1: |
2326 | mask |= SDVO_OUTPUT_RGB1; | 2328 | mask |= SDVO_OUTPUT_RGB1; |
2329 | /* fall through */ | ||
2327 | case SDVO_OUTPUT_RGB0: | 2330 | case SDVO_OUTPUT_RGB0: |
2328 | mask |= SDVO_OUTPUT_RGB0; | 2331 | mask |= SDVO_OUTPUT_RGB0; |
2329 | break; | 2332 | break; |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 344c0e709b19..4990d6e84ddf 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -107,13 +107,21 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) | |||
107 | VBLANK_EVASION_TIME_US); | 107 | VBLANK_EVASION_TIME_US); |
108 | max = vblank_start - 1; | 108 | max = vblank_start - 1; |
109 | 109 | ||
110 | local_irq_disable(); | ||
111 | |||
112 | if (min <= 0 || max <= 0) | 110 | if (min <= 0 || max <= 0) |
113 | return; | 111 | goto irq_disable; |
114 | 112 | ||
115 | if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) | 113 | if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) |
116 | return; | 114 | goto irq_disable; |
115 | |||
116 | /* | ||
117 | * Wait for psr to idle out after enabling the VBL interrupts | ||
118 | * VBL interrupts will start the PSR exit and prevent a PSR | ||
119 | * re-entry as well. | ||
120 | */ | ||
121 | if (CAN_PSR(dev_priv) && intel_psr_wait_for_idle(dev_priv)) | ||
122 | DRM_ERROR("PSR idle timed out, atomic update may fail\n"); | ||
123 | |||
124 | local_irq_disable(); | ||
117 | 125 | ||
118 | crtc->debug.min_vbl = min; | 126 | crtc->debug.min_vbl = min; |
119 | crtc->debug.max_vbl = max; | 127 | crtc->debug.max_vbl = max; |
@@ -171,6 +179,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) | |||
171 | crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); | 179 | crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); |
172 | 180 | ||
173 | trace_i915_pipe_update_vblank_evaded(crtc); | 181 | trace_i915_pipe_update_vblank_evaded(crtc); |
182 | return; | ||
183 | |||
184 | irq_disable: | ||
185 | local_irq_disable(); | ||
174 | } | 186 | } |
175 | 187 | ||
176 | /** | 188 | /** |
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 94e8863bd97c..7c95697e1a35 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c | |||
@@ -171,24 +171,11 @@ void intel_uc_init_early(struct drm_i915_private *i915) | |||
171 | intel_huc_init_early(huc); | 171 | intel_huc_init_early(huc); |
172 | 172 | ||
173 | sanitize_options_early(i915); | 173 | sanitize_options_early(i915); |
174 | |||
175 | if (USES_GUC(i915)) | ||
176 | intel_uc_fw_fetch(i915, &guc->fw); | ||
177 | |||
178 | if (USES_HUC(i915)) | ||
179 | intel_uc_fw_fetch(i915, &huc->fw); | ||
180 | } | 174 | } |
181 | 175 | ||
182 | void intel_uc_cleanup_early(struct drm_i915_private *i915) | 176 | void intel_uc_cleanup_early(struct drm_i915_private *i915) |
183 | { | 177 | { |
184 | struct intel_guc *guc = &i915->guc; | 178 | struct intel_guc *guc = &i915->guc; |
185 | struct intel_huc *huc = &i915->huc; | ||
186 | |||
187 | if (USES_HUC(i915)) | ||
188 | intel_uc_fw_fini(&huc->fw); | ||
189 | |||
190 | if (USES_GUC(i915)) | ||
191 | intel_uc_fw_fini(&guc->fw); | ||
192 | 179 | ||
193 | guc_free_load_err_log(guc); | 180 | guc_free_load_err_log(guc); |
194 | } | 181 | } |
@@ -252,28 +239,41 @@ static void guc_disable_communication(struct intel_guc *guc) | |||
252 | int intel_uc_init_misc(struct drm_i915_private *i915) | 239 | int intel_uc_init_misc(struct drm_i915_private *i915) |
253 | { | 240 | { |
254 | struct intel_guc *guc = &i915->guc; | 241 | struct intel_guc *guc = &i915->guc; |
242 | struct intel_huc *huc = &i915->huc; | ||
255 | int ret; | 243 | int ret; |
256 | 244 | ||
257 | if (!USES_GUC(i915)) | 245 | if (!USES_GUC(i915)) |
258 | return 0; | 246 | return 0; |
259 | 247 | ||
260 | intel_guc_init_ggtt_pin_bias(guc); | 248 | ret = intel_guc_init_misc(guc); |
261 | |||
262 | ret = intel_guc_init_wq(guc); | ||
263 | if (ret) | 249 | if (ret) |
264 | return ret; | 250 | return ret; |
265 | 251 | ||
252 | if (USES_HUC(i915)) { | ||
253 | ret = intel_huc_init_misc(huc); | ||
254 | if (ret) | ||
255 | goto err_guc; | ||
256 | } | ||
257 | |||
266 | return 0; | 258 | return 0; |
259 | |||
260 | err_guc: | ||
261 | intel_guc_fini_misc(guc); | ||
262 | return ret; | ||
267 | } | 263 | } |
268 | 264 | ||
269 | void intel_uc_fini_misc(struct drm_i915_private *i915) | 265 | void intel_uc_fini_misc(struct drm_i915_private *i915) |
270 | { | 266 | { |
271 | struct intel_guc *guc = &i915->guc; | 267 | struct intel_guc *guc = &i915->guc; |
268 | struct intel_huc *huc = &i915->huc; | ||
272 | 269 | ||
273 | if (!USES_GUC(i915)) | 270 | if (!USES_GUC(i915)) |
274 | return; | 271 | return; |
275 | 272 | ||
276 | intel_guc_fini_wq(guc); | 273 | if (USES_HUC(i915)) |
274 | intel_huc_fini_misc(huc); | ||
275 | |||
276 | intel_guc_fini_misc(guc); | ||
277 | } | 277 | } |
278 | 278 | ||
279 | int intel_uc_init(struct drm_i915_private *i915) | 279 | int intel_uc_init(struct drm_i915_private *i915) |
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index fbe4324116d7..d9f439f6219f 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c | |||
@@ -919,12 +919,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) | |||
919 | *cmd++ = val; | 919 | *cmd++ = val; |
920 | } else if (gen >= 4) { | 920 | } else if (gen >= 4) { |
921 | *cmd++ = MI_STORE_DWORD_IMM_GEN4 | | 921 | *cmd++ = MI_STORE_DWORD_IMM_GEN4 | |
922 | (gen < 6 ? 1 << 22 : 0); | 922 | (gen < 6 ? MI_USE_GGTT : 0); |
923 | *cmd++ = 0; | 923 | *cmd++ = 0; |
924 | *cmd++ = offset; | 924 | *cmd++ = offset; |
925 | *cmd++ = val; | 925 | *cmd++ = val; |
926 | } else { | 926 | } else { |
927 | *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; | 927 | *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
928 | *cmd++ = offset; | 928 | *cmd++ = offset; |
929 | *cmd++ = val; | 929 | *cmd++ = val; |
930 | } | 930 | } |
@@ -985,7 +985,10 @@ static int gpu_write(struct i915_vma *vma, | |||
985 | goto err_request; | 985 | goto err_request; |
986 | } | 986 | } |
987 | 987 | ||
988 | i915_vma_move_to_active(batch, rq, 0); | 988 | err = i915_vma_move_to_active(batch, rq, 0); |
989 | if (err) | ||
990 | goto err_request; | ||
991 | |||
989 | i915_gem_object_set_active_reference(batch->obj); | 992 | i915_gem_object_set_active_reference(batch->obj); |
990 | i915_vma_unpin(batch); | 993 | i915_vma_unpin(batch); |
991 | i915_vma_close(batch); | 994 | i915_vma_close(batch); |
@@ -996,11 +999,9 @@ static int gpu_write(struct i915_vma *vma, | |||
996 | if (err) | 999 | if (err) |
997 | goto err_request; | 1000 | goto err_request; |
998 | 1001 | ||
999 | i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | 1002 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); |
1000 | 1003 | if (err) | |
1001 | reservation_object_lock(vma->resv, NULL); | 1004 | i915_request_skip(rq, err); |
1002 | reservation_object_add_excl_fence(vma->resv, &rq->fence); | ||
1003 | reservation_object_unlock(vma->resv); | ||
1004 | 1005 | ||
1005 | err_request: | 1006 | err_request: |
1006 | i915_request_add(rq); | 1007 | i915_request_add(rq); |
@@ -1694,7 +1695,7 @@ int i915_gem_huge_page_mock_selftests(void) | |||
1694 | dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); | 1695 | dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); |
1695 | 1696 | ||
1696 | mutex_lock(&dev_priv->drm.struct_mutex); | 1697 | mutex_lock(&dev_priv->drm.struct_mutex); |
1697 | ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock"); | 1698 | ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); |
1698 | if (IS_ERR(ppgtt)) { | 1699 | if (IS_ERR(ppgtt)) { |
1699 | err = PTR_ERR(ppgtt); | 1700 | err = PTR_ERR(ppgtt); |
1700 | goto out_unlock; | 1701 | goto out_unlock; |
@@ -1724,7 +1725,7 @@ out_unlock: | |||
1724 | 1725 | ||
1725 | i915_modparams.enable_ppgtt = saved_ppgtt; | 1726 | i915_modparams.enable_ppgtt = saved_ppgtt; |
1726 | 1727 | ||
1727 | drm_dev_unref(&dev_priv->drm); | 1728 | drm_dev_put(&dev_priv->drm); |
1728 | 1729 | ||
1729 | return err; | 1730 | return err; |
1730 | } | 1731 | } |
@@ -1748,6 +1749,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) | |||
1748 | return 0; | 1749 | return 0; |
1749 | } | 1750 | } |
1750 | 1751 | ||
1752 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | ||
1753 | return 0; | ||
1754 | |||
1751 | file = mock_file(dev_priv); | 1755 | file = mock_file(dev_priv); |
1752 | if (IS_ERR(file)) | 1756 | if (IS_ERR(file)) |
1753 | return PTR_ERR(file); | 1757 | return PTR_ERR(file); |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index a4900091ae3d..3a095c37c120 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c | |||
@@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj, | |||
42 | 42 | ||
43 | page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); | 43 | page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); |
44 | map = kmap_atomic(page); | 44 | map = kmap_atomic(page); |
45 | if (needs_clflush & CLFLUSH_BEFORE) | 45 | |
46 | if (needs_clflush & CLFLUSH_BEFORE) { | ||
47 | mb(); | ||
46 | clflush(map+offset_in_page(offset) / sizeof(*map)); | 48 | clflush(map+offset_in_page(offset) / sizeof(*map)); |
49 | mb(); | ||
50 | } | ||
51 | |||
47 | map[offset_in_page(offset) / sizeof(*map)] = v; | 52 | map[offset_in_page(offset) / sizeof(*map)] = v; |
48 | if (needs_clflush & CLFLUSH_AFTER) | 53 | |
54 | if (needs_clflush & CLFLUSH_AFTER) { | ||
55 | mb(); | ||
49 | clflush(map+offset_in_page(offset) / sizeof(*map)); | 56 | clflush(map+offset_in_page(offset) / sizeof(*map)); |
57 | mb(); | ||
58 | } | ||
59 | |||
50 | kunmap_atomic(map); | 60 | kunmap_atomic(map); |
51 | 61 | ||
52 | i915_gem_obj_finish_shmem_access(obj); | 62 | i915_gem_obj_finish_shmem_access(obj); |
@@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj, | |||
68 | 78 | ||
69 | page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); | 79 | page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); |
70 | map = kmap_atomic(page); | 80 | map = kmap_atomic(page); |
71 | if (needs_clflush & CLFLUSH_BEFORE) | 81 | |
82 | if (needs_clflush & CLFLUSH_BEFORE) { | ||
83 | mb(); | ||
72 | clflush(map+offset_in_page(offset) / sizeof(*map)); | 84 | clflush(map+offset_in_page(offset) / sizeof(*map)); |
85 | mb(); | ||
86 | } | ||
87 | |||
73 | *v = map[offset_in_page(offset) / sizeof(*map)]; | 88 | *v = map[offset_in_page(offset) / sizeof(*map)]; |
74 | kunmap_atomic(map); | 89 | kunmap_atomic(map); |
75 | 90 | ||
@@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj, | |||
210 | *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); | 225 | *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); |
211 | *cs++ = v; | 226 | *cs++ = v; |
212 | } else if (INTEL_GEN(i915) >= 4) { | 227 | } else if (INTEL_GEN(i915) >= 4) { |
213 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; | 228 | *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
214 | *cs++ = 0; | 229 | *cs++ = 0; |
215 | *cs++ = i915_ggtt_offset(vma) + offset; | 230 | *cs++ = i915_ggtt_offset(vma) + offset; |
216 | *cs++ = v; | 231 | *cs++ = v; |
217 | } else { | 232 | } else { |
218 | *cs++ = MI_STORE_DWORD_IMM | 1 << 22; | 233 | *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
219 | *cs++ = i915_ggtt_offset(vma) + offset; | 234 | *cs++ = i915_ggtt_offset(vma) + offset; |
220 | *cs++ = v; | 235 | *cs++ = v; |
221 | *cs++ = MI_NOOP; | 236 | *cs++ = MI_NOOP; |
222 | } | 237 | } |
223 | intel_ring_advance(rq, cs); | 238 | intel_ring_advance(rq, cs); |
224 | 239 | ||
225 | i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | 240 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); |
226 | i915_vma_unpin(vma); | 241 | i915_vma_unpin(vma); |
227 | 242 | ||
228 | reservation_object_lock(obj->resv, NULL); | ||
229 | reservation_object_add_excl_fence(obj->resv, &rq->fence); | ||
230 | reservation_object_unlock(obj->resv); | ||
231 | |||
232 | i915_request_add(rq); | 243 | i915_request_add(rq); |
233 | 244 | ||
234 | return 0; | 245 | return err; |
235 | } | 246 | } |
236 | 247 | ||
237 | static bool always_valid(struct drm_i915_private *i915) | 248 | static bool always_valid(struct drm_i915_private *i915) |
@@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915) | |||
239 | return true; | 250 | return true; |
240 | } | 251 | } |
241 | 252 | ||
253 | static bool needs_fence_registers(struct drm_i915_private *i915) | ||
254 | { | ||
255 | return !i915_terminally_wedged(&i915->gpu_error); | ||
256 | } | ||
257 | |||
242 | static bool needs_mi_store_dword(struct drm_i915_private *i915) | 258 | static bool needs_mi_store_dword(struct drm_i915_private *i915) |
243 | { | 259 | { |
260 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
261 | return false; | ||
262 | |||
244 | return intel_engine_can_store_dword(i915->engine[RCS]); | 263 | return intel_engine_can_store_dword(i915->engine[RCS]); |
245 | } | 264 | } |
246 | 265 | ||
@@ -251,7 +270,7 @@ static const struct igt_coherency_mode { | |||
251 | bool (*valid)(struct drm_i915_private *i915); | 270 | bool (*valid)(struct drm_i915_private *i915); |
252 | } igt_coherency_mode[] = { | 271 | } igt_coherency_mode[] = { |
253 | { "cpu", cpu_set, cpu_get, always_valid }, | 272 | { "cpu", cpu_set, cpu_get, always_valid }, |
254 | { "gtt", gtt_set, gtt_get, always_valid }, | 273 | { "gtt", gtt_set, gtt_get, needs_fence_registers }, |
255 | { "wc", wc_set, wc_get, always_valid }, | 274 | { "wc", wc_set, wc_get, always_valid }, |
256 | { "gpu", gpu_set, NULL, needs_mi_store_dword }, | 275 | { "gpu", gpu_set, NULL, needs_mi_store_dword }, |
257 | { }, | 276 | { }, |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 90c3c36173ba..ab2590242033 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c | |||
@@ -63,12 +63,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) | |||
63 | *cmd++ = value; | 63 | *cmd++ = value; |
64 | } else if (gen >= 4) { | 64 | } else if (gen >= 4) { |
65 | *cmd++ = MI_STORE_DWORD_IMM_GEN4 | | 65 | *cmd++ = MI_STORE_DWORD_IMM_GEN4 | |
66 | (gen < 6 ? 1 << 22 : 0); | 66 | (gen < 6 ? MI_USE_GGTT : 0); |
67 | *cmd++ = 0; | 67 | *cmd++ = 0; |
68 | *cmd++ = offset; | 68 | *cmd++ = offset; |
69 | *cmd++ = value; | 69 | *cmd++ = value; |
70 | } else { | 70 | } else { |
71 | *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; | 71 | *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
72 | *cmd++ = offset; | 72 | *cmd++ = offset; |
73 | *cmd++ = value; | 73 | *cmd++ = value; |
74 | } | 74 | } |
@@ -170,22 +170,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj, | |||
170 | if (err) | 170 | if (err) |
171 | goto err_request; | 171 | goto err_request; |
172 | 172 | ||
173 | i915_vma_move_to_active(batch, rq, 0); | 173 | err = i915_vma_move_to_active(batch, rq, 0); |
174 | if (err) | ||
175 | goto skip_request; | ||
176 | |||
177 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | ||
178 | if (err) | ||
179 | goto skip_request; | ||
180 | |||
174 | i915_gem_object_set_active_reference(batch->obj); | 181 | i915_gem_object_set_active_reference(batch->obj); |
175 | i915_vma_unpin(batch); | 182 | i915_vma_unpin(batch); |
176 | i915_vma_close(batch); | 183 | i915_vma_close(batch); |
177 | 184 | ||
178 | i915_vma_move_to_active(vma, rq, 0); | ||
179 | i915_vma_unpin(vma); | 185 | i915_vma_unpin(vma); |
180 | 186 | ||
181 | reservation_object_lock(obj->resv, NULL); | ||
182 | reservation_object_add_excl_fence(obj->resv, &rq->fence); | ||
183 | reservation_object_unlock(obj->resv); | ||
184 | |||
185 | i915_request_add(rq); | 187 | i915_request_add(rq); |
186 | 188 | ||
187 | return 0; | 189 | return 0; |
188 | 190 | ||
191 | skip_request: | ||
192 | i915_request_skip(rq, err); | ||
189 | err_request: | 193 | err_request: |
190 | i915_request_add(rq); | 194 | i915_request_add(rq); |
191 | err_batch: | 195 | err_batch: |
@@ -336,11 +340,15 @@ static int igt_ctx_exec(void *arg) | |||
336 | bool first_shared_gtt = true; | 340 | bool first_shared_gtt = true; |
337 | int err = -ENODEV; | 341 | int err = -ENODEV; |
338 | 342 | ||
339 | /* Create a few different contexts (with different mm) and write | 343 | /* |
344 | * Create a few different contexts (with different mm) and write | ||
340 | * through each ctx/mm using the GPU making sure those writes end | 345 | * through each ctx/mm using the GPU making sure those writes end |
341 | * up in the expected pages of our obj. | 346 | * up in the expected pages of our obj. |
342 | */ | 347 | */ |
343 | 348 | ||
349 | if (!DRIVER_CAPS(i915)->has_logical_contexts) | ||
350 | return 0; | ||
351 | |||
344 | file = mock_file(i915); | 352 | file = mock_file(i915); |
345 | if (IS_ERR(file)) | 353 | if (IS_ERR(file)) |
346 | return PTR_ERR(file); | 354 | return PTR_ERR(file); |
@@ -367,6 +375,9 @@ static int igt_ctx_exec(void *arg) | |||
367 | } | 375 | } |
368 | 376 | ||
369 | for_each_engine(engine, i915, id) { | 377 | for_each_engine(engine, i915, id) { |
378 | if (!engine->context_size) | ||
379 | continue; /* No logical context support in HW */ | ||
380 | |||
370 | if (!intel_engine_can_store_dword(engine)) | 381 | if (!intel_engine_can_store_dword(engine)) |
371 | continue; | 382 | continue; |
372 | 383 | ||
@@ -467,7 +478,9 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, | |||
467 | } | 478 | } |
468 | } | 479 | } |
469 | 480 | ||
470 | err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); | 481 | err = i915_gem_wait_for_idle(i915, |
482 | I915_WAIT_LOCKED, | ||
483 | MAX_SCHEDULE_TIMEOUT); | ||
471 | if (err) | 484 | if (err) |
472 | return err; | 485 | return err; |
473 | 486 | ||
@@ -586,7 +599,7 @@ int i915_gem_context_mock_selftests(void) | |||
586 | 599 | ||
587 | err = i915_subtests(tests, i915); | 600 | err = i915_subtests(tests, i915); |
588 | 601 | ||
589 | drm_dev_unref(&i915->drm); | 602 | drm_dev_put(&i915->drm); |
590 | return err; | 603 | return err; |
591 | } | 604 | } |
592 | 605 | ||
@@ -599,6 +612,9 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) | |||
599 | bool fake_alias = false; | 612 | bool fake_alias = false; |
600 | int err; | 613 | int err; |
601 | 614 | ||
615 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | ||
616 | return 0; | ||
617 | |||
602 | /* Install a fake aliasing gtt for exercise */ | 618 | /* Install a fake aliasing gtt for exercise */ |
603 | if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) { | 619 | if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) { |
604 | mutex_lock(&dev_priv->drm.struct_mutex); | 620 | mutex_lock(&dev_priv->drm.struct_mutex); |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index 89dc25a5a53b..a7055b12e53c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | |||
@@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void) | |||
389 | 389 | ||
390 | err = i915_subtests(tests, i915); | 390 | err = i915_subtests(tests, i915); |
391 | 391 | ||
392 | drm_dev_unref(&i915->drm); | 392 | drm_dev_put(&i915->drm); |
393 | return err; | 393 | return err; |
394 | } | 394 | } |
395 | 395 | ||
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 2dc72a984d45..128ad1cf0647 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c | |||
@@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void) | |||
490 | err = i915_subtests(tests, i915); | 490 | err = i915_subtests(tests, i915); |
491 | mutex_unlock(&i915->drm.struct_mutex); | 491 | mutex_unlock(&i915->drm.struct_mutex); |
492 | 492 | ||
493 | drm_dev_unref(&i915->drm); | 493 | drm_dev_put(&i915->drm); |
494 | return err; | 494 | return err; |
495 | } | 495 | } |
496 | 496 | ||
@@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) | |||
500 | SUBTEST(igt_evict_contexts), | 500 | SUBTEST(igt_evict_contexts), |
501 | }; | 501 | }; |
502 | 502 | ||
503 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
504 | return 0; | ||
505 | |||
503 | return i915_subtests(tests, i915); | 506 | return i915_subtests(tests, i915); |
504 | } | 507 | } |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a4060238bef0..600a3bcbd3d6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | |||
@@ -32,6 +32,20 @@ | |||
32 | #include "mock_drm.h" | 32 | #include "mock_drm.h" |
33 | #include "mock_gem_device.h" | 33 | #include "mock_gem_device.h" |
34 | 34 | ||
35 | static void cleanup_freed_objects(struct drm_i915_private *i915) | ||
36 | { | ||
37 | /* | ||
38 | * As we may hold onto the struct_mutex for inordinate lengths of | ||
39 | * time, the NMI khungtaskd detector may fire for the free objects | ||
40 | * worker. | ||
41 | */ | ||
42 | mutex_unlock(&i915->drm.struct_mutex); | ||
43 | |||
44 | i915_gem_drain_freed_objects(i915); | ||
45 | |||
46 | mutex_lock(&i915->drm.struct_mutex); | ||
47 | } | ||
48 | |||
35 | static void fake_free_pages(struct drm_i915_gem_object *obj, | 49 | static void fake_free_pages(struct drm_i915_gem_object *obj, |
36 | struct sg_table *pages) | 50 | struct sg_table *pages) |
37 | { | 51 | { |
@@ -134,7 +148,7 @@ static int igt_ppgtt_alloc(void *arg) | |||
134 | { | 148 | { |
135 | struct drm_i915_private *dev_priv = arg; | 149 | struct drm_i915_private *dev_priv = arg; |
136 | struct i915_hw_ppgtt *ppgtt; | 150 | struct i915_hw_ppgtt *ppgtt; |
137 | u64 size, last; | 151 | u64 size, last, limit; |
138 | int err = 0; | 152 | int err = 0; |
139 | 153 | ||
140 | /* Allocate a ppggt and try to fill the entire range */ | 154 | /* Allocate a ppggt and try to fill the entire range */ |
@@ -142,20 +156,25 @@ static int igt_ppgtt_alloc(void *arg) | |||
142 | if (!USES_PPGTT(dev_priv)) | 156 | if (!USES_PPGTT(dev_priv)) |
143 | return 0; | 157 | return 0; |
144 | 158 | ||
145 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
146 | ppgtt = __hw_ppgtt_create(dev_priv); | 159 | ppgtt = __hw_ppgtt_create(dev_priv); |
147 | if (IS_ERR(ppgtt)) { | 160 | if (IS_ERR(ppgtt)) |
148 | err = PTR_ERR(ppgtt); | 161 | return PTR_ERR(ppgtt); |
149 | goto err_unlock; | ||
150 | } | ||
151 | 162 | ||
152 | if (!ppgtt->vm.allocate_va_range) | 163 | if (!ppgtt->vm.allocate_va_range) |
153 | goto err_ppgtt_cleanup; | 164 | goto err_ppgtt_cleanup; |
154 | 165 | ||
166 | /* | ||
167 | * While we only allocate the page tables here and so we could | ||
168 | * address a much larger GTT than we could actually fit into | ||
169 | * RAM, a practical limit is the amount of physical pages in the system. | ||
170 | * This should ensure that we do not run into the oomkiller during | ||
171 | * the test and take down the machine wilfully. | ||
172 | */ | ||
173 | limit = totalram_pages << PAGE_SHIFT; | ||
174 | limit = min(ppgtt->vm.total, limit); | ||
175 | |||
155 | /* Check we can allocate the entire range */ | 176 | /* Check we can allocate the entire range */ |
156 | for (size = 4096; | 177 | for (size = 4096; size <= limit; size <<= 2) { |
157 | size <= ppgtt->vm.total; | ||
158 | size <<= 2) { | ||
159 | err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); | 178 | err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); |
160 | if (err) { | 179 | if (err) { |
161 | if (err == -ENOMEM) { | 180 | if (err == -ENOMEM) { |
@@ -166,13 +185,13 @@ static int igt_ppgtt_alloc(void *arg) | |||
166 | goto err_ppgtt_cleanup; | 185 | goto err_ppgtt_cleanup; |
167 | } | 186 | } |
168 | 187 | ||
188 | cond_resched(); | ||
189 | |||
169 | ppgtt->vm.clear_range(&ppgtt->vm, 0, size); | 190 | ppgtt->vm.clear_range(&ppgtt->vm, 0, size); |
170 | } | 191 | } |
171 | 192 | ||
172 | /* Check we can incrementally allocate the entire range */ | 193 | /* Check we can incrementally allocate the entire range */ |
173 | for (last = 0, size = 4096; | 194 | for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) { |
174 | size <= ppgtt->vm.total; | ||
175 | last = size, size <<= 2) { | ||
176 | err = ppgtt->vm.allocate_va_range(&ppgtt->vm, | 195 | err = ppgtt->vm.allocate_va_range(&ppgtt->vm, |
177 | last, size - last); | 196 | last, size - last); |
178 | if (err) { | 197 | if (err) { |
@@ -183,12 +202,13 @@ static int igt_ppgtt_alloc(void *arg) | |||
183 | } | 202 | } |
184 | goto err_ppgtt_cleanup; | 203 | goto err_ppgtt_cleanup; |
185 | } | 204 | } |
205 | |||
206 | cond_resched(); | ||
186 | } | 207 | } |
187 | 208 | ||
188 | err_ppgtt_cleanup: | 209 | err_ppgtt_cleanup: |
189 | ppgtt->vm.cleanup(&ppgtt->vm); | 210 | mutex_lock(&dev_priv->drm.struct_mutex); |
190 | kfree(ppgtt); | 211 | i915_ppgtt_put(ppgtt); |
191 | err_unlock: | ||
192 | mutex_unlock(&dev_priv->drm.struct_mutex); | 212 | mutex_unlock(&dev_priv->drm.struct_mutex); |
193 | return err; | 213 | return err; |
194 | } | 214 | } |
@@ -291,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915, | |||
291 | i915_gem_object_put(obj); | 311 | i915_gem_object_put(obj); |
292 | 312 | ||
293 | kfree(order); | 313 | kfree(order); |
314 | |||
315 | cleanup_freed_objects(i915); | ||
294 | } | 316 | } |
295 | 317 | ||
296 | return 0; | 318 | return 0; |
@@ -519,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915, | |||
519 | } | 541 | } |
520 | 542 | ||
521 | close_object_list(&objects, vm); | 543 | close_object_list(&objects, vm); |
544 | cleanup_freed_objects(i915); | ||
522 | } | 545 | } |
523 | 546 | ||
524 | return 0; | 547 | return 0; |
@@ -605,6 +628,8 @@ err_put: | |||
605 | i915_gem_object_put(obj); | 628 | i915_gem_object_put(obj); |
606 | if (err) | 629 | if (err) |
607 | return err; | 630 | return err; |
631 | |||
632 | cleanup_freed_objects(i915); | ||
608 | } | 633 | } |
609 | 634 | ||
610 | return 0; | 635 | return 0; |
@@ -789,6 +814,8 @@ err_obj: | |||
789 | kfree(order); | 814 | kfree(order); |
790 | if (err) | 815 | if (err) |
791 | return err; | 816 | return err; |
817 | |||
818 | cleanup_freed_objects(i915); | ||
792 | } | 819 | } |
793 | 820 | ||
794 | return 0; | 821 | return 0; |
@@ -857,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915, | |||
857 | } | 884 | } |
858 | 885 | ||
859 | close_object_list(&objects, vm); | 886 | close_object_list(&objects, vm); |
887 | cleanup_freed_objects(i915); | ||
860 | return err; | 888 | return err; |
861 | } | 889 | } |
862 | 890 | ||
@@ -949,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915, | |||
949 | i915_gem_object_put(explode); | 977 | i915_gem_object_put(explode); |
950 | 978 | ||
951 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); | 979 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); |
980 | cleanup_freed_objects(i915); | ||
952 | } | 981 | } |
953 | 982 | ||
954 | return 0; | 983 | return 0; |
@@ -980,7 +1009,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, | |||
980 | return PTR_ERR(file); | 1009 | return PTR_ERR(file); |
981 | 1010 | ||
982 | mutex_lock(&dev_priv->drm.struct_mutex); | 1011 | mutex_lock(&dev_priv->drm.struct_mutex); |
983 | ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock"); | 1012 | ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv); |
984 | if (IS_ERR(ppgtt)) { | 1013 | if (IS_ERR(ppgtt)) { |
985 | err = PTR_ERR(ppgtt); | 1014 | err = PTR_ERR(ppgtt); |
986 | goto out_unlock; | 1015 | goto out_unlock; |
@@ -1644,7 +1673,7 @@ int i915_gem_gtt_mock_selftests(void) | |||
1644 | err = i915_subtests(tests, i915); | 1673 | err = i915_subtests(tests, i915); |
1645 | mutex_unlock(&i915->drm.struct_mutex); | 1674 | mutex_unlock(&i915->drm.struct_mutex); |
1646 | 1675 | ||
1647 | drm_dev_unref(&i915->drm); | 1676 | drm_dev_put(&i915->drm); |
1648 | return err; | 1677 | return err; |
1649 | } | 1678 | } |
1650 | 1679 | ||
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 2b2dde94526f..d77acf4cc439 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c | |||
@@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v) | |||
169 | v += y * tile->width; | 169 | v += y * tile->width; |
170 | v += div64_u64_rem(x, tile->width, &x) << tile->size; | 170 | v += div64_u64_rem(x, tile->width, &x) << tile->size; |
171 | v += x; | 171 | v += x; |
172 | } else { | 172 | } else if (tile->width == 128) { |
173 | const unsigned int ytile_span = 16; | 173 | const unsigned int ytile_span = 16; |
174 | const unsigned int ytile_height = 32 * ytile_span; | 174 | const unsigned int ytile_height = 512; |
175 | |||
176 | v += y * ytile_span; | ||
177 | v += div64_u64_rem(x, ytile_span, &x) * ytile_height; | ||
178 | v += x; | ||
179 | } else { | ||
180 | const unsigned int ytile_span = 32; | ||
181 | const unsigned int ytile_height = 256; | ||
175 | 182 | ||
176 | v += y * ytile_span; | 183 | v += y * ytile_span; |
177 | v += div64_u64_rem(x, ytile_span, &x) * ytile_height; | 184 | v += div64_u64_rem(x, ytile_span, &x) * ytile_height; |
@@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, | |||
288 | kunmap(p); | 295 | kunmap(p); |
289 | if (err) | 296 | if (err) |
290 | return err; | 297 | return err; |
298 | |||
299 | i915_vma_destroy(vma); | ||
291 | } | 300 | } |
292 | 301 | ||
293 | return 0; | 302 | return 0; |
@@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg) | |||
347 | unsigned int pitch; | 356 | unsigned int pitch; |
348 | struct tile tile; | 357 | struct tile tile; |
349 | 358 | ||
359 | if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) | ||
360 | /* | ||
361 | * The swizzling pattern is actually unknown as it | ||
362 | * varies based on physical address of each page. | ||
363 | * See i915_gem_detect_bit_6_swizzle(). | ||
364 | */ | ||
365 | break; | ||
366 | |||
350 | tile.tiling = tiling; | 367 | tile.tiling = tiling; |
351 | switch (tiling) { | 368 | switch (tiling) { |
352 | case I915_TILING_X: | 369 | case I915_TILING_X: |
@@ -357,8 +374,8 @@ static int igt_partial_tiling(void *arg) | |||
357 | break; | 374 | break; |
358 | } | 375 | } |
359 | 376 | ||
360 | if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN || | 377 | GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); |
361 | tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) | 378 | if (tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) |
362 | continue; | 379 | continue; |
363 | 380 | ||
364 | if (INTEL_GEN(i915) <= 2) { | 381 | if (INTEL_GEN(i915) <= 2) { |
@@ -454,12 +471,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) | |||
454 | return PTR_ERR(rq); | 471 | return PTR_ERR(rq); |
455 | } | 472 | } |
456 | 473 | ||
457 | i915_vma_move_to_active(vma, rq, 0); | 474 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); |
475 | |||
458 | i915_request_add(rq); | 476 | i915_request_add(rq); |
459 | 477 | ||
460 | i915_gem_object_set_active_reference(obj); | 478 | __i915_gem_object_release_unless_active(obj); |
461 | i915_vma_unpin(vma); | 479 | i915_vma_unpin(vma); |
462 | return 0; | 480 | |
481 | return err; | ||
463 | } | 482 | } |
464 | 483 | ||
465 | static bool assert_mmap_offset(struct drm_i915_private *i915, | 484 | static bool assert_mmap_offset(struct drm_i915_private *i915, |
@@ -488,6 +507,15 @@ static int igt_mmap_offset_exhaustion(void *arg) | |||
488 | u64 hole_start, hole_end; | 507 | u64 hole_start, hole_end; |
489 | int loop, err; | 508 | int loop, err; |
490 | 509 | ||
510 | /* Disable background reaper */ | ||
511 | mutex_lock(&i915->drm.struct_mutex); | ||
512 | if (!i915->gt.active_requests++) | ||
513 | i915_gem_unpark(i915); | ||
514 | mutex_unlock(&i915->drm.struct_mutex); | ||
515 | cancel_delayed_work_sync(&i915->gt.retire_work); | ||
516 | cancel_delayed_work_sync(&i915->gt.idle_work); | ||
517 | GEM_BUG_ON(!i915->gt.awake); | ||
518 | |||
491 | /* Trim the device mmap space to only a page */ | 519 | /* Trim the device mmap space to only a page */ |
492 | memset(&resv, 0, sizeof(resv)); | 520 | memset(&resv, 0, sizeof(resv)); |
493 | drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { | 521 | drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { |
@@ -496,7 +524,7 @@ static int igt_mmap_offset_exhaustion(void *arg) | |||
496 | err = drm_mm_reserve_node(mm, &resv); | 524 | err = drm_mm_reserve_node(mm, &resv); |
497 | if (err) { | 525 | if (err) { |
498 | pr_err("Failed to trim VMA manager, err=%d\n", err); | 526 | pr_err("Failed to trim VMA manager, err=%d\n", err); |
499 | return err; | 527 | goto out_park; |
500 | } | 528 | } |
501 | break; | 529 | break; |
502 | } | 530 | } |
@@ -538,6 +566,9 @@ static int igt_mmap_offset_exhaustion(void *arg) | |||
538 | 566 | ||
539 | /* Now fill with busy dead objects that we expect to reap */ | 567 | /* Now fill with busy dead objects that we expect to reap */ |
540 | for (loop = 0; loop < 3; loop++) { | 568 | for (loop = 0; loop < 3; loop++) { |
569 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
570 | break; | ||
571 | |||
541 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE); | 572 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE); |
542 | if (IS_ERR(obj)) { | 573 | if (IS_ERR(obj)) { |
543 | err = PTR_ERR(obj); | 574 | err = PTR_ERR(obj); |
@@ -554,6 +585,7 @@ static int igt_mmap_offset_exhaustion(void *arg) | |||
554 | goto err_obj; | 585 | goto err_obj; |
555 | } | 586 | } |
556 | 587 | ||
588 | /* NB we rely on the _active_ reference to access obj now */ | ||
557 | GEM_BUG_ON(!i915_gem_object_is_active(obj)); | 589 | GEM_BUG_ON(!i915_gem_object_is_active(obj)); |
558 | err = i915_gem_object_create_mmap_offset(obj); | 590 | err = i915_gem_object_create_mmap_offset(obj); |
559 | if (err) { | 591 | if (err) { |
@@ -565,6 +597,13 @@ static int igt_mmap_offset_exhaustion(void *arg) | |||
565 | 597 | ||
566 | out: | 598 | out: |
567 | drm_mm_remove_node(&resv); | 599 | drm_mm_remove_node(&resv); |
600 | out_park: | ||
601 | mutex_lock(&i915->drm.struct_mutex); | ||
602 | if (--i915->gt.active_requests) | ||
603 | queue_delayed_work(i915->wq, &i915->gt.retire_work, 0); | ||
604 | else | ||
605 | queue_delayed_work(i915->wq, &i915->gt.idle_work, 0); | ||
606 | mutex_unlock(&i915->drm.struct_mutex); | ||
568 | return err; | 607 | return err; |
569 | err_obj: | 608 | err_obj: |
570 | i915_gem_object_put(obj); | 609 | i915_gem_object_put(obj); |
@@ -586,7 +625,7 @@ int i915_gem_object_mock_selftests(void) | |||
586 | 625 | ||
587 | err = i915_subtests(tests, i915); | 626 | err = i915_subtests(tests, i915); |
588 | 627 | ||
589 | drm_dev_unref(&i915->drm); | 628 | drm_dev_put(&i915->drm); |
590 | return err; | 629 | return err; |
591 | } | 630 | } |
592 | 631 | ||
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 63cd9486cc13..c4aac6141e04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c | |||
@@ -262,7 +262,7 @@ int i915_request_mock_selftests(void) | |||
262 | return -ENOMEM; | 262 | return -ENOMEM; |
263 | 263 | ||
264 | err = i915_subtests(tests, i915); | 264 | err = i915_subtests(tests, i915); |
265 | drm_dev_unref(&i915->drm); | 265 | drm_dev_put(&i915->drm); |
266 | 266 | ||
267 | return err; | 267 | return err; |
268 | } | 268 | } |
@@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t, | |||
286 | t->func = func; | 286 | t->func = func; |
287 | t->name = name; | 287 | t->name = name; |
288 | 288 | ||
289 | err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); | 289 | err = i915_gem_wait_for_idle(i915, |
290 | I915_WAIT_LOCKED, | ||
291 | MAX_SCHEDULE_TIMEOUT); | ||
290 | if (err) { | 292 | if (err) { |
291 | pr_err("%s(%s): failed to idle before, with err=%d!", | 293 | pr_err("%s(%s): failed to idle before, with err=%d!", |
292 | func, name, err); | 294 | func, name, err); |
@@ -594,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) | |||
594 | } else if (gen >= 6) { | 596 | } else if (gen >= 6) { |
595 | *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; | 597 | *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; |
596 | *cmd++ = lower_32_bits(vma->node.start); | 598 | *cmd++ = lower_32_bits(vma->node.start); |
597 | } else if (gen >= 4) { | ||
598 | *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; | ||
599 | *cmd++ = lower_32_bits(vma->node.start); | ||
600 | } else { | 599 | } else { |
601 | *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1; | 600 | *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; |
602 | *cmd++ = lower_32_bits(vma->node.start); | 601 | *cmd++ = lower_32_bits(vma->node.start); |
603 | } | 602 | } |
604 | *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ | 603 | *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ |
@@ -678,7 +677,9 @@ static int live_all_engines(void *arg) | |||
678 | i915_gem_object_set_active_reference(batch->obj); | 677 | i915_gem_object_set_active_reference(batch->obj); |
679 | } | 678 | } |
680 | 679 | ||
681 | i915_vma_move_to_active(batch, request[id], 0); | 680 | err = i915_vma_move_to_active(batch, request[id], 0); |
681 | GEM_BUG_ON(err); | ||
682 | |||
682 | i915_request_get(request[id]); | 683 | i915_request_get(request[id]); |
683 | i915_request_add(request[id]); | 684 | i915_request_add(request[id]); |
684 | } | 685 | } |
@@ -788,7 +789,9 @@ static int live_sequential_engines(void *arg) | |||
788 | GEM_BUG_ON(err); | 789 | GEM_BUG_ON(err); |
789 | request[id]->batch = batch; | 790 | request[id]->batch = batch; |
790 | 791 | ||
791 | i915_vma_move_to_active(batch, request[id], 0); | 792 | err = i915_vma_move_to_active(batch, request[id], 0); |
793 | GEM_BUG_ON(err); | ||
794 | |||
792 | i915_gem_object_set_active_reference(batch->obj); | 795 | i915_gem_object_set_active_reference(batch->obj); |
793 | i915_vma_get(batch); | 796 | i915_vma_get(batch); |
794 | 797 | ||
@@ -862,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915) | |||
862 | SUBTEST(live_sequential_engines), | 865 | SUBTEST(live_sequential_engines), |
863 | SUBTEST(live_empty_request), | 866 | SUBTEST(live_empty_request), |
864 | }; | 867 | }; |
868 | |||
869 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
870 | return 0; | ||
871 | |||
865 | return i915_subtests(tests, i915); | 872 | return i915_subtests(tests, i915); |
866 | } | 873 | } |
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 8400a8cc5cf2..ffa74290e054 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c | |||
@@ -733,7 +733,7 @@ int i915_vma_mock_selftests(void) | |||
733 | err = i915_subtests(tests, i915); | 733 | err = i915_subtests(tests, i915); |
734 | mutex_unlock(&i915->drm.struct_mutex); | 734 | mutex_unlock(&i915->drm.struct_mutex); |
735 | 735 | ||
736 | drm_dev_unref(&i915->drm); | 736 | drm_dev_put(&i915->drm); |
737 | return err; | 737 | return err; |
738 | } | 738 | } |
739 | 739 | ||
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 0d06f559243f..af66e3d4e23a 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c | |||
@@ -9,52 +9,8 @@ | |||
9 | #include "../i915_selftest.h" | 9 | #include "../i915_selftest.h" |
10 | #include "igt_flush_test.h" | 10 | #include "igt_flush_test.h" |
11 | 11 | ||
12 | struct wedge_me { | ||
13 | struct delayed_work work; | ||
14 | struct drm_i915_private *i915; | ||
15 | const void *symbol; | ||
16 | }; | ||
17 | |||
18 | static void wedge_me(struct work_struct *work) | ||
19 | { | ||
20 | struct wedge_me *w = container_of(work, typeof(*w), work.work); | ||
21 | |||
22 | pr_err("%pS timed out, cancelling all further testing.\n", w->symbol); | ||
23 | |||
24 | GEM_TRACE("%pS timed out.\n", w->symbol); | ||
25 | GEM_TRACE_DUMP(); | ||
26 | |||
27 | i915_gem_set_wedged(w->i915); | ||
28 | } | ||
29 | |||
30 | static void __init_wedge(struct wedge_me *w, | ||
31 | struct drm_i915_private *i915, | ||
32 | long timeout, | ||
33 | const void *symbol) | ||
34 | { | ||
35 | w->i915 = i915; | ||
36 | w->symbol = symbol; | ||
37 | |||
38 | INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); | ||
39 | schedule_delayed_work(&w->work, timeout); | ||
40 | } | ||
41 | |||
42 | static void __fini_wedge(struct wedge_me *w) | ||
43 | { | ||
44 | cancel_delayed_work_sync(&w->work); | ||
45 | destroy_delayed_work_on_stack(&w->work); | ||
46 | w->i915 = NULL; | ||
47 | } | ||
48 | |||
49 | #define wedge_on_timeout(W, DEV, TIMEOUT) \ | ||
50 | for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \ | ||
51 | (W)->i915; \ | ||
52 | __fini_wedge((W))) | ||
53 | |||
54 | int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) | 12 | int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) |
55 | { | 13 | { |
56 | struct wedge_me w; | ||
57 | |||
58 | cond_resched(); | 14 | cond_resched(); |
59 | 15 | ||
60 | if (flags & I915_WAIT_LOCKED && | 16 | if (flags & I915_WAIT_LOCKED && |
@@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) | |||
63 | i915_gem_set_wedged(i915); | 19 | i915_gem_set_wedged(i915); |
64 | } | 20 | } |
65 | 21 | ||
66 | wedge_on_timeout(&w, i915, HZ) | 22 | if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) { |
67 | i915_gem_wait_for_idle(i915, flags); | 23 | pr_err("%pS timed out, cancelling all further testing.\n", |
24 | __builtin_return_address(0)); | ||
25 | |||
26 | GEM_TRACE("%pS timed out.\n", __builtin_return_address(0)); | ||
27 | GEM_TRACE_DUMP(); | ||
28 | |||
29 | i915_gem_set_wedged(i915); | ||
30 | } | ||
68 | 31 | ||
69 | return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; | 32 | return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; |
70 | } | 33 | } |
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index d6926e7820e5..f03b407fdbe2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c | |||
@@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void) | |||
464 | return -ENOMEM; | 464 | return -ENOMEM; |
465 | 465 | ||
466 | err = i915_subtests(tests, i915->engine[RCS]); | 466 | err = i915_subtests(tests, i915->engine[RCS]); |
467 | drm_dev_unref(&i915->drm); | 467 | drm_dev_put(&i915->drm); |
468 | 468 | ||
469 | return err; | 469 | return err; |
470 | } | 470 | } |
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index fe7d3190ebfe..73462a65a330 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c | |||
@@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h, | |||
130 | if (err) | 130 | if (err) |
131 | goto unpin_vma; | 131 | goto unpin_vma; |
132 | 132 | ||
133 | i915_vma_move_to_active(vma, rq, 0); | 133 | err = i915_vma_move_to_active(vma, rq, 0); |
134 | if (err) | ||
135 | goto unpin_hws; | ||
136 | |||
134 | if (!i915_gem_object_has_active_reference(vma->obj)) { | 137 | if (!i915_gem_object_has_active_reference(vma->obj)) { |
135 | i915_gem_object_get(vma->obj); | 138 | i915_gem_object_get(vma->obj); |
136 | i915_gem_object_set_active_reference(vma->obj); | 139 | i915_gem_object_set_active_reference(vma->obj); |
137 | } | 140 | } |
138 | 141 | ||
139 | i915_vma_move_to_active(hws, rq, 0); | 142 | err = i915_vma_move_to_active(hws, rq, 0); |
143 | if (err) | ||
144 | goto unpin_hws; | ||
145 | |||
140 | if (!i915_gem_object_has_active_reference(hws->obj)) { | 146 | if (!i915_gem_object_has_active_reference(hws->obj)) { |
141 | i915_gem_object_get(hws->obj); | 147 | i915_gem_object_get(hws->obj); |
142 | i915_gem_object_set_active_reference(hws->obj); | 148 | i915_gem_object_set_active_reference(hws->obj); |
@@ -171,7 +177,7 @@ static int emit_recurse_batch(struct hang *h, | |||
171 | *batch++ = MI_BATCH_BUFFER_START | 1 << 8; | 177 | *batch++ = MI_BATCH_BUFFER_START | 1 << 8; |
172 | *batch++ = lower_32_bits(vma->node.start); | 178 | *batch++ = lower_32_bits(vma->node.start); |
173 | } else if (INTEL_GEN(i915) >= 4) { | 179 | } else if (INTEL_GEN(i915) >= 4) { |
174 | *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; | 180 | *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; |
175 | *batch++ = 0; | 181 | *batch++ = 0; |
176 | *batch++ = lower_32_bits(hws_address(hws, rq)); | 182 | *batch++ = lower_32_bits(hws_address(hws, rq)); |
177 | *batch++ = rq->fence.seqno; | 183 | *batch++ = rq->fence.seqno; |
@@ -184,7 +190,7 @@ static int emit_recurse_batch(struct hang *h, | |||
184 | *batch++ = MI_BATCH_BUFFER_START | 2 << 6; | 190 | *batch++ = MI_BATCH_BUFFER_START | 2 << 6; |
185 | *batch++ = lower_32_bits(vma->node.start); | 191 | *batch++ = lower_32_bits(vma->node.start); |
186 | } else { | 192 | } else { |
187 | *batch++ = MI_STORE_DWORD_IMM; | 193 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
188 | *batch++ = lower_32_bits(hws_address(hws, rq)); | 194 | *batch++ = lower_32_bits(hws_address(hws, rq)); |
189 | *batch++ = rq->fence.seqno; | 195 | *batch++ = rq->fence.seqno; |
190 | *batch++ = MI_ARB_CHECK; | 196 | *batch++ = MI_ARB_CHECK; |
@@ -193,7 +199,7 @@ static int emit_recurse_batch(struct hang *h, | |||
193 | batch += 1024 / sizeof(*batch); | 199 | batch += 1024 / sizeof(*batch); |
194 | 200 | ||
195 | *batch++ = MI_ARB_CHECK; | 201 | *batch++ = MI_ARB_CHECK; |
196 | *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1; | 202 | *batch++ = MI_BATCH_BUFFER_START | 2 << 6; |
197 | *batch++ = lower_32_bits(vma->node.start); | 203 | *batch++ = lower_32_bits(vma->node.start); |
198 | } | 204 | } |
199 | *batch++ = MI_BATCH_BUFFER_END; /* not reached */ | 205 | *batch++ = MI_BATCH_BUFFER_END; /* not reached */ |
@@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h, | |||
205 | 211 | ||
206 | err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); | 212 | err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); |
207 | 213 | ||
214 | unpin_hws: | ||
208 | i915_vma_unpin(hws); | 215 | i915_vma_unpin(hws); |
209 | unpin_vma: | 216 | unpin_vma: |
210 | i915_vma_unpin(vma); | 217 | i915_vma_unpin(vma); |
@@ -1243,6 +1250,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) | |||
1243 | if (!intel_has_gpu_reset(i915)) | 1250 | if (!intel_has_gpu_reset(i915)) |
1244 | return 0; | 1251 | return 0; |
1245 | 1252 | ||
1253 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
1254 | return -EIO; /* we're long past hope of a successful reset */ | ||
1255 | |||
1246 | intel_runtime_pm_get(i915); | 1256 | intel_runtime_pm_get(i915); |
1247 | saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); | 1257 | saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); |
1248 | 1258 | ||
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index ea27c7cfbf96..636cb68191e3 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c | |||
@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin, | |||
104 | if (err) | 104 | if (err) |
105 | goto unpin_vma; | 105 | goto unpin_vma; |
106 | 106 | ||
107 | i915_vma_move_to_active(vma, rq, 0); | 107 | err = i915_vma_move_to_active(vma, rq, 0); |
108 | if (err) | ||
109 | goto unpin_hws; | ||
110 | |||
108 | if (!i915_gem_object_has_active_reference(vma->obj)) { | 111 | if (!i915_gem_object_has_active_reference(vma->obj)) { |
109 | i915_gem_object_get(vma->obj); | 112 | i915_gem_object_get(vma->obj); |
110 | i915_gem_object_set_active_reference(vma->obj); | 113 | i915_gem_object_set_active_reference(vma->obj); |
111 | } | 114 | } |
112 | 115 | ||
113 | i915_vma_move_to_active(hws, rq, 0); | 116 | err = i915_vma_move_to_active(hws, rq, 0); |
117 | if (err) | ||
118 | goto unpin_hws; | ||
119 | |||
114 | if (!i915_gem_object_has_active_reference(hws->obj)) { | 120 | if (!i915_gem_object_has_active_reference(hws->obj)) { |
115 | i915_gem_object_get(hws->obj); | 121 | i915_gem_object_get(hws->obj); |
116 | i915_gem_object_set_active_reference(hws->obj); | 122 | i915_gem_object_set_active_reference(hws->obj); |
@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin, | |||
134 | 140 | ||
135 | err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); | 141 | err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); |
136 | 142 | ||
143 | unpin_hws: | ||
137 | i915_vma_unpin(hws); | 144 | i915_vma_unpin(hws); |
138 | unpin_vma: | 145 | unpin_vma: |
139 | i915_vma_unpin(vma); | 146 | i915_vma_unpin(vma); |
@@ -455,5 +462,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) | |||
455 | if (!HAS_EXECLISTS(i915)) | 462 | if (!HAS_EXECLISTS(i915)) |
456 | return 0; | 463 | return 0; |
457 | 464 | ||
465 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
466 | return 0; | ||
467 | |||
458 | return i915_subtests(tests, i915); | 468 | return i915_subtests(tests, i915); |
459 | } | 469 | } |
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index e1ea2d2bedd2..fafdec3fe83e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c | |||
@@ -49,6 +49,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) | |||
49 | goto err_pin; | 49 | goto err_pin; |
50 | } | 50 | } |
51 | 51 | ||
52 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | ||
53 | if (err) | ||
54 | goto err_req; | ||
55 | |||
52 | srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; | 56 | srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; |
53 | if (INTEL_GEN(ctx->i915) >= 8) | 57 | if (INTEL_GEN(ctx->i915) >= 8) |
54 | srm++; | 58 | srm++; |
@@ -67,11 +71,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) | |||
67 | } | 71 | } |
68 | intel_ring_advance(rq, cs); | 72 | intel_ring_advance(rq, cs); |
69 | 73 | ||
70 | i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | ||
71 | reservation_object_lock(vma->resv, NULL); | ||
72 | reservation_object_add_excl_fence(vma->resv, &rq->fence); | ||
73 | reservation_object_unlock(vma->resv); | ||
74 | |||
75 | i915_gem_object_get(result); | 74 | i915_gem_object_get(result); |
76 | i915_gem_object_set_active_reference(result); | 75 | i915_gem_object_set_active_reference(result); |
77 | 76 | ||
@@ -283,6 +282,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) | |||
283 | }; | 282 | }; |
284 | int err; | 283 | int err; |
285 | 284 | ||
285 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
286 | return 0; | ||
287 | |||
286 | mutex_lock(&i915->drm.struct_mutex); | 288 | mutex_lock(&i915->drm.struct_mutex); |
287 | err = i915_subtests(tests, i915); | 289 | err = i915_subtests(tests, i915); |
288 | mutex_unlock(&i915->drm.struct_mutex); | 290 | mutex_unlock(&i915->drm.struct_mutex); |
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index c2a0451336cf..22a73da45ad5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c | |||
@@ -200,6 +200,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, | |||
200 | engine->base.submit_request = mock_submit_request; | 200 | engine->base.submit_request = mock_submit_request; |
201 | 201 | ||
202 | i915_timeline_init(i915, &engine->base.timeline, engine->base.name); | 202 | i915_timeline_init(i915, &engine->base.timeline, engine->base.name); |
203 | lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); | ||
204 | |||
203 | intel_engine_init_breadcrumbs(&engine->base); | 205 | intel_engine_init_breadcrumbs(&engine->base); |
204 | engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ | 206 | engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ |
205 | 207 | ||
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index c97075c5ccaf..43ed8b28aeaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c | |||
@@ -157,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void) | |||
157 | dev_pm_domain_set(&pdev->dev, &pm_domain); | 157 | dev_pm_domain_set(&pdev->dev, &pm_domain); |
158 | pm_runtime_enable(&pdev->dev); | 158 | pm_runtime_enable(&pdev->dev); |
159 | pm_runtime_dont_use_autosuspend(&pdev->dev); | 159 | pm_runtime_dont_use_autosuspend(&pdev->dev); |
160 | WARN_ON(pm_runtime_get_sync(&pdev->dev)); | 160 | if (pm_runtime_enabled(&pdev->dev)) |
161 | WARN_ON(pm_runtime_get_sync(&pdev->dev)); | ||
161 | 162 | ||
162 | i915 = (struct drm_i915_private *)(pdev + 1); | 163 | i915 = (struct drm_i915_private *)(pdev + 1); |
163 | pci_set_drvdata(pdev, i915); | 164 | pci_set_drvdata(pdev, i915); |
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 6a7f4da7b523..a140ea5c3a7c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c | |||
@@ -70,12 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915, | |||
70 | ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); | 70 | ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); |
71 | ppgtt->vm.file = ERR_PTR(-ENODEV); | 71 | ppgtt->vm.file = ERR_PTR(-ENODEV); |
72 | 72 | ||
73 | INIT_LIST_HEAD(&ppgtt->vm.active_list); | 73 | i915_address_space_init(&ppgtt->vm, i915); |
74 | INIT_LIST_HEAD(&ppgtt->vm.inactive_list); | ||
75 | INIT_LIST_HEAD(&ppgtt->vm.unbound_list); | ||
76 | |||
77 | INIT_LIST_HEAD(&ppgtt->vm.global_link); | ||
78 | drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total); | ||
79 | 74 | ||
80 | ppgtt->vm.clear_range = nop_clear_range; | 75 | ppgtt->vm.clear_range = nop_clear_range; |
81 | ppgtt->vm.insert_page = mock_insert_page; | 76 | ppgtt->vm.insert_page = mock_insert_page; |
@@ -106,8 +101,6 @@ void mock_init_ggtt(struct drm_i915_private *i915) | |||
106 | { | 101 | { |
107 | struct i915_ggtt *ggtt = &i915->ggtt; | 102 | struct i915_ggtt *ggtt = &i915->ggtt; |
108 | 103 | ||
109 | INIT_LIST_HEAD(&i915->vm_list); | ||
110 | |||
111 | ggtt->vm.i915 = i915; | 104 | ggtt->vm.i915 = i915; |
112 | 105 | ||
113 | ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); | 106 | ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); |
@@ -124,7 +117,7 @@ void mock_init_ggtt(struct drm_i915_private *i915) | |||
124 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; | 117 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; |
125 | ggtt->vm.vma_ops.clear_pages = clear_pages; | 118 | ggtt->vm.vma_ops.clear_pages = clear_pages; |
126 | 119 | ||
127 | i915_address_space_init(&ggtt->vm, i915, "global"); | 120 | i915_address_space_init(&ggtt->vm, i915); |
128 | } | 121 | } |
129 | 122 | ||
130 | void mock_fini_ggtt(struct drm_i915_private *i915) | 123 | void mock_fini_ggtt(struct drm_i915_private *i915) |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 3b7acb5a70b3..435a2c35ee8c 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c | |||
@@ -69,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) | |||
69 | } | 69 | } |
70 | } | 70 | } |
71 | 71 | ||
72 | void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) | 72 | void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) |
73 | { | 73 | { |
74 | struct drm_encoder *encoder = &intel_dsi->base.base; | 74 | struct drm_encoder *encoder = &intel_dsi->base.base; |
75 | struct drm_device *dev = encoder->dev; | 75 | struct drm_device *dev = encoder->dev; |
@@ -342,11 +342,15 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, | |||
342 | pipe_config->cpu_transcoder = TRANSCODER_DSI_C; | 342 | pipe_config->cpu_transcoder = TRANSCODER_DSI_C; |
343 | else | 343 | else |
344 | pipe_config->cpu_transcoder = TRANSCODER_DSI_A; | 344 | pipe_config->cpu_transcoder = TRANSCODER_DSI_A; |
345 | } | ||
346 | 345 | ||
347 | ret = intel_compute_dsi_pll(encoder, pipe_config); | 346 | ret = bxt_dsi_pll_compute(encoder, pipe_config); |
348 | if (ret) | 347 | if (ret) |
349 | return false; | 348 | return false; |
349 | } else { | ||
350 | ret = vlv_dsi_pll_compute(encoder, pipe_config); | ||
351 | if (ret) | ||
352 | return false; | ||
353 | } | ||
350 | 354 | ||
351 | pipe_config->clock_set = true; | 355 | pipe_config->clock_set = true; |
352 | 356 | ||
@@ -546,12 +550,12 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) | |||
546 | { | 550 | { |
547 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 551 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
548 | 552 | ||
549 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 553 | if (IS_GEMINILAKE(dev_priv)) |
550 | vlv_dsi_device_ready(encoder); | ||
551 | else if (IS_BROXTON(dev_priv)) | ||
552 | bxt_dsi_device_ready(encoder); | ||
553 | else if (IS_GEMINILAKE(dev_priv)) | ||
554 | glk_dsi_device_ready(encoder); | 554 | glk_dsi_device_ready(encoder); |
555 | else if (IS_GEN9_LP(dev_priv)) | ||
556 | bxt_dsi_device_ready(encoder); | ||
557 | else | ||
558 | vlv_dsi_device_ready(encoder); | ||
555 | } | 559 | } |
556 | 560 | ||
557 | static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) | 561 | static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) |
@@ -810,8 +814,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, | |||
810 | * The BIOS may leave the PLL in a wonky state where it doesn't | 814 | * The BIOS may leave the PLL in a wonky state where it doesn't |
811 | * lock. It needs to be fully powered down to fix it. | 815 | * lock. It needs to be fully powered down to fix it. |
812 | */ | 816 | */ |
813 | intel_disable_dsi_pll(encoder); | 817 | if (IS_GEN9_LP(dev_priv)) { |
814 | intel_enable_dsi_pll(encoder, pipe_config); | 818 | bxt_dsi_pll_disable(encoder); |
819 | bxt_dsi_pll_enable(encoder, pipe_config); | ||
820 | } else { | ||
821 | vlv_dsi_pll_disable(encoder); | ||
822 | vlv_dsi_pll_enable(encoder, pipe_config); | ||
823 | } | ||
815 | 824 | ||
816 | if (IS_BROXTON(dev_priv)) { | 825 | if (IS_BROXTON(dev_priv)) { |
817 | /* Add MIPI IO reset programming for modeset */ | 826 | /* Add MIPI IO reset programming for modeset */ |
@@ -929,11 +938,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) | |||
929 | { | 938 | { |
930 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 939 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
931 | 940 | ||
932 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || | 941 | if (IS_GEMINILAKE(dev_priv)) |
933 | IS_BROXTON(dev_priv)) | ||
934 | vlv_dsi_clear_device_ready(encoder); | ||
935 | else if (IS_GEMINILAKE(dev_priv)) | ||
936 | glk_dsi_clear_device_ready(encoder); | 942 | glk_dsi_clear_device_ready(encoder); |
943 | else | ||
944 | vlv_dsi_clear_device_ready(encoder); | ||
937 | } | 945 | } |
938 | 946 | ||
939 | static void intel_dsi_post_disable(struct intel_encoder *encoder, | 947 | static void intel_dsi_post_disable(struct intel_encoder *encoder, |
@@ -949,7 +957,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, | |||
949 | 957 | ||
950 | if (is_vid_mode(intel_dsi)) { | 958 | if (is_vid_mode(intel_dsi)) { |
951 | for_each_dsi_port(port, intel_dsi->ports) | 959 | for_each_dsi_port(port, intel_dsi->ports) |
952 | wait_for_dsi_fifo_empty(intel_dsi, port); | 960 | vlv_dsi_wait_for_fifo_empty(intel_dsi, port); |
953 | 961 | ||
954 | intel_dsi_port_disable(encoder); | 962 | intel_dsi_port_disable(encoder); |
955 | usleep_range(2000, 5000); | 963 | usleep_range(2000, 5000); |
@@ -979,11 +987,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, | |||
979 | val & ~MIPIO_RST_CTRL); | 987 | val & ~MIPIO_RST_CTRL); |
980 | } | 988 | } |
981 | 989 | ||
982 | intel_disable_dsi_pll(encoder); | 990 | if (IS_GEN9_LP(dev_priv)) { |
983 | 991 | bxt_dsi_pll_disable(encoder); | |
984 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 992 | } else { |
985 | u32 val; | 993 | u32 val; |
986 | 994 | ||
995 | vlv_dsi_pll_disable(encoder); | ||
996 | |||
987 | val = I915_READ(DSPCLK_GATE_D); | 997 | val = I915_READ(DSPCLK_GATE_D); |
988 | val &= ~DPOUNIT_CLOCK_GATE_DISABLE; | 998 | val &= ~DPOUNIT_CLOCK_GATE_DISABLE; |
989 | I915_WRITE(DSPCLK_GATE_D, val); | 999 | I915_WRITE(DSPCLK_GATE_D, val); |
@@ -1024,7 +1034,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | |||
1024 | * configuration, otherwise accessing DSI registers will hang the | 1034 | * configuration, otherwise accessing DSI registers will hang the |
1025 | * machine. See BSpec North Display Engine registers/MIPI[BXT]. | 1035 | * machine. See BSpec North Display Engine registers/MIPI[BXT]. |
1026 | */ | 1036 | */ |
1027 | if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) | 1037 | if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv)) |
1028 | goto out_put_power; | 1038 | goto out_put_power; |
1029 | 1039 | ||
1030 | /* XXX: this only works for one DSI output */ | 1040 | /* XXX: this only works for one DSI output */ |
@@ -1247,16 +1257,19 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, | |||
1247 | 1257 | ||
1248 | pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); | 1258 | pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); |
1249 | 1259 | ||
1250 | if (IS_GEN9_LP(dev_priv)) | 1260 | if (IS_GEN9_LP(dev_priv)) { |
1251 | bxt_dsi_get_pipe_config(encoder, pipe_config); | 1261 | bxt_dsi_get_pipe_config(encoder, pipe_config); |
1262 | pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp, | ||
1263 | pipe_config); | ||
1264 | } else { | ||
1265 | pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp, | ||
1266 | pipe_config); | ||
1267 | } | ||
1252 | 1268 | ||
1253 | pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, | 1269 | if (pclk) { |
1254 | pipe_config); | 1270 | pipe_config->base.adjusted_mode.crtc_clock = pclk; |
1255 | if (!pclk) | 1271 | pipe_config->port_clock = pclk; |
1256 | return; | 1272 | } |
1257 | |||
1258 | pipe_config->base.adjusted_mode.crtc_clock = pclk; | ||
1259 | pipe_config->port_clock = pclk; | ||
1260 | } | 1273 | } |
1261 | 1274 | ||
1262 | static enum drm_mode_status | 1275 | static enum drm_mode_status |
@@ -1585,20 +1598,24 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) | |||
1585 | enum port port; | 1598 | enum port port; |
1586 | u32 val; | 1599 | u32 val; |
1587 | 1600 | ||
1588 | if (!IS_GEMINILAKE(dev_priv)) { | 1601 | if (IS_GEMINILAKE(dev_priv)) |
1589 | for_each_dsi_port(port, intel_dsi->ports) { | 1602 | return; |
1590 | /* Panel commands can be sent when clock is in LP11 */ | ||
1591 | I915_WRITE(MIPI_DEVICE_READY(port), 0x0); | ||
1592 | 1603 | ||
1593 | intel_dsi_reset_clocks(encoder, port); | 1604 | for_each_dsi_port(port, intel_dsi->ports) { |
1594 | I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); | 1605 | /* Panel commands can be sent when clock is in LP11 */ |
1606 | I915_WRITE(MIPI_DEVICE_READY(port), 0x0); | ||
1595 | 1607 | ||
1596 | val = I915_READ(MIPI_DSI_FUNC_PRG(port)); | 1608 | if (IS_GEN9_LP(dev_priv)) |
1597 | val &= ~VID_MODE_FORMAT_MASK; | 1609 | bxt_dsi_reset_clocks(encoder, port); |
1598 | I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); | 1610 | else |
1611 | vlv_dsi_reset_clocks(encoder, port); | ||
1612 | I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); | ||
1599 | 1613 | ||
1600 | I915_WRITE(MIPI_DEVICE_READY(port), 0x1); | 1614 | val = I915_READ(MIPI_DSI_FUNC_PRG(port)); |
1601 | } | 1615 | val &= ~VID_MODE_FORMAT_MASK; |
1616 | I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); | ||
1617 | |||
1618 | I915_WRITE(MIPI_DEVICE_READY(port), 0x1); | ||
1602 | } | 1619 | } |
1603 | } | 1620 | } |
1604 | 1621 | ||
@@ -1713,7 +1730,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector) | |||
1713 | } | 1730 | } |
1714 | } | 1731 | } |
1715 | 1732 | ||
1716 | void intel_dsi_init(struct drm_i915_private *dev_priv) | 1733 | void vlv_dsi_init(struct drm_i915_private *dev_priv) |
1717 | { | 1734 | { |
1718 | struct drm_device *dev = &dev_priv->drm; | 1735 | struct drm_device *dev = &dev_priv->drm; |
1719 | struct intel_dsi *intel_dsi; | 1736 | struct intel_dsi *intel_dsi; |
@@ -1730,14 +1747,10 @@ void intel_dsi_init(struct drm_i915_private *dev_priv) | |||
1730 | if (!intel_bios_is_dsi_present(dev_priv, &port)) | 1747 | if (!intel_bios_is_dsi_present(dev_priv, &port)) |
1731 | return; | 1748 | return; |
1732 | 1749 | ||
1733 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 1750 | if (IS_GEN9_LP(dev_priv)) |
1734 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; | ||
1735 | } else if (IS_GEN9_LP(dev_priv)) { | ||
1736 | dev_priv->mipi_mmio_base = BXT_MIPI_BASE; | 1751 | dev_priv->mipi_mmio_base = BXT_MIPI_BASE; |
1737 | } else { | 1752 | else |
1738 | DRM_ERROR("Unsupported Mipi device to reg base"); | 1753 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; |
1739 | return; | ||
1740 | } | ||
1741 | 1754 | ||
1742 | intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); | 1755 | intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); |
1743 | if (!intel_dsi) | 1756 | if (!intel_dsi) |
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c index 2ff2ee7f3b78..a132a8037ecc 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c | |||
@@ -111,8 +111,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, | |||
111 | * XXX: The muxing and gating is hard coded for now. Need to add support for | 111 | * XXX: The muxing and gating is hard coded for now. Need to add support for |
112 | * sharing PLLs with two DSI outputs. | 112 | * sharing PLLs with two DSI outputs. |
113 | */ | 113 | */ |
114 | static int vlv_compute_dsi_pll(struct intel_encoder *encoder, | 114 | int vlv_dsi_pll_compute(struct intel_encoder *encoder, |
115 | struct intel_crtc_state *config) | 115 | struct intel_crtc_state *config) |
116 | { | 116 | { |
117 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 117 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
118 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 118 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
@@ -142,8 +142,8 @@ static int vlv_compute_dsi_pll(struct intel_encoder *encoder, | |||
142 | return 0; | 142 | return 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | static void vlv_enable_dsi_pll(struct intel_encoder *encoder, | 145 | void vlv_dsi_pll_enable(struct intel_encoder *encoder, |
146 | const struct intel_crtc_state *config) | 146 | const struct intel_crtc_state *config) |
147 | { | 147 | { |
148 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 148 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
149 | 149 | ||
@@ -175,7 +175,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder, | |||
175 | DRM_DEBUG_KMS("DSI PLL locked\n"); | 175 | DRM_DEBUG_KMS("DSI PLL locked\n"); |
176 | } | 176 | } |
177 | 177 | ||
178 | static void vlv_disable_dsi_pll(struct intel_encoder *encoder) | 178 | void vlv_dsi_pll_disable(struct intel_encoder *encoder) |
179 | { | 179 | { |
180 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 180 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
181 | u32 tmp; | 181 | u32 tmp; |
@@ -192,7 +192,7 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder) | |||
192 | mutex_unlock(&dev_priv->sb_lock); | 192 | mutex_unlock(&dev_priv->sb_lock); |
193 | } | 193 | } |
194 | 194 | ||
195 | static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) | 195 | bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) |
196 | { | 196 | { |
197 | bool enabled; | 197 | bool enabled; |
198 | u32 val; | 198 | u32 val; |
@@ -229,7 +229,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) | |||
229 | return enabled; | 229 | return enabled; |
230 | } | 230 | } |
231 | 231 | ||
232 | static void bxt_disable_dsi_pll(struct intel_encoder *encoder) | 232 | void bxt_dsi_pll_disable(struct intel_encoder *encoder) |
233 | { | 233 | { |
234 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 234 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
235 | u32 val; | 235 | u32 val; |
@@ -261,8 +261,8 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp) | |||
261 | bpp, pipe_bpp); | 261 | bpp, pipe_bpp); |
262 | } | 262 | } |
263 | 263 | ||
264 | static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | 264 | u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, |
265 | struct intel_crtc_state *config) | 265 | struct intel_crtc_state *config) |
266 | { | 266 | { |
267 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 267 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
268 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 268 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
@@ -327,8 +327,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | |||
327 | return pclk; | 327 | return pclk; |
328 | } | 328 | } |
329 | 329 | ||
330 | static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | 330 | u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, |
331 | struct intel_crtc_state *config) | 331 | struct intel_crtc_state *config) |
332 | { | 332 | { |
333 | u32 pclk; | 333 | u32 pclk; |
334 | u32 dsi_clk; | 334 | u32 dsi_clk; |
@@ -357,16 +357,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | |||
357 | return pclk; | 357 | return pclk; |
358 | } | 358 | } |
359 | 359 | ||
360 | u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | 360 | void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) |
361 | struct intel_crtc_state *config) | ||
362 | { | ||
363 | if (IS_GEN9_LP(to_i915(encoder->base.dev))) | ||
364 | return bxt_dsi_get_pclk(encoder, pipe_bpp, config); | ||
365 | else | ||
366 | return vlv_dsi_get_pclk(encoder, pipe_bpp, config); | ||
367 | } | ||
368 | |||
369 | static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) | ||
370 | { | 361 | { |
371 | u32 temp; | 362 | u32 temp; |
372 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 363 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
@@ -480,8 +471,8 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, | |||
480 | I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); | 471 | I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); |
481 | } | 472 | } |
482 | 473 | ||
483 | static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder, | 474 | int bxt_dsi_pll_compute(struct intel_encoder *encoder, |
484 | struct intel_crtc_state *config) | 475 | struct intel_crtc_state *config) |
485 | { | 476 | { |
486 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 477 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
487 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 478 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
@@ -528,8 +519,8 @@ static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder, | |||
528 | return 0; | 519 | return 0; |
529 | } | 520 | } |
530 | 521 | ||
531 | static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder, | 522 | void bxt_dsi_pll_enable(struct intel_encoder *encoder, |
532 | const struct intel_crtc_state *config) | 523 | const struct intel_crtc_state *config) |
533 | { | 524 | { |
534 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 525 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
535 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 526 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
@@ -568,52 +559,7 @@ static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder, | |||
568 | DRM_DEBUG_KMS("DSI PLL locked\n"); | 559 | DRM_DEBUG_KMS("DSI PLL locked\n"); |
569 | } | 560 | } |
570 | 561 | ||
571 | bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) | 562 | void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) |
572 | { | ||
573 | if (IS_GEN9_LP(dev_priv)) | ||
574 | return bxt_dsi_pll_is_enabled(dev_priv); | ||
575 | |||
576 | MISSING_CASE(INTEL_DEVID(dev_priv)); | ||
577 | |||
578 | return false; | ||
579 | } | ||
580 | |||
581 | int intel_compute_dsi_pll(struct intel_encoder *encoder, | ||
582 | struct intel_crtc_state *config) | ||
583 | { | ||
584 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
585 | |||
586 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
587 | return vlv_compute_dsi_pll(encoder, config); | ||
588 | else if (IS_GEN9_LP(dev_priv)) | ||
589 | return gen9lp_compute_dsi_pll(encoder, config); | ||
590 | |||
591 | return -ENODEV; | ||
592 | } | ||
593 | |||
594 | void intel_enable_dsi_pll(struct intel_encoder *encoder, | ||
595 | const struct intel_crtc_state *config) | ||
596 | { | ||
597 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
598 | |||
599 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
600 | vlv_enable_dsi_pll(encoder, config); | ||
601 | else if (IS_GEN9_LP(dev_priv)) | ||
602 | gen9lp_enable_dsi_pll(encoder, config); | ||
603 | } | ||
604 | |||
605 | void intel_disable_dsi_pll(struct intel_encoder *encoder) | ||
606 | { | ||
607 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
608 | |||
609 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
610 | vlv_disable_dsi_pll(encoder); | ||
611 | else if (IS_GEN9_LP(dev_priv)) | ||
612 | bxt_disable_dsi_pll(encoder); | ||
613 | } | ||
614 | |||
615 | static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder, | ||
616 | enum port port) | ||
617 | { | 563 | { |
618 | u32 tmp; | 564 | u32 tmp; |
619 | struct drm_device *dev = encoder->base.dev; | 565 | struct drm_device *dev = encoder->base.dev; |
@@ -638,13 +584,3 @@ static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder, | |||
638 | } | 584 | } |
639 | I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); | 585 | I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); |
640 | } | 586 | } |
641 | |||
642 | void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) | ||
643 | { | ||
644 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
645 | |||
646 | if (IS_GEN9_LP(dev_priv)) | ||
647 | gen9lp_dsi_reset_clocks(encoder, port); | ||
648 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
649 | vlv_dsi_reset_clocks(encoder, port); | ||
650 | } | ||