aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_debugfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_debugfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c270
1 files changed, 203 insertions, 67 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 063b44817e08..779a275eb1fd 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -116,7 +116,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
116 116
117static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 117static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
118{ 118{
119 return obj->has_global_gtt_mapping ? "g" : " "; 119 return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
120} 120}
121 121
122static void 122static void
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
516 struct drm_info_node *node = m->private; 516 struct drm_info_node *node = m->private;
517 struct drm_device *dev = node->minor->dev; 517 struct drm_device *dev = node->minor->dev;
518 struct drm_i915_private *dev_priv = dev->dev_private; 518 struct drm_i915_private *dev_priv = dev->dev_private;
519 unsigned long flags;
520 struct intel_crtc *crtc; 519 struct intel_crtc *crtc;
521 int ret; 520 int ret;
522 521
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
529 const char plane = plane_name(crtc->plane); 528 const char plane = plane_name(crtc->plane);
530 struct intel_unpin_work *work; 529 struct intel_unpin_work *work;
531 530
532 spin_lock_irqsave(&dev->event_lock, flags); 531 spin_lock_irq(&dev->event_lock);
533 work = crtc->unpin_work; 532 work = crtc->unpin_work;
534 if (work == NULL) { 533 if (work == NULL) {
535 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 534 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
575 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 574 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
576 } 575 }
577 } 576 }
578 spin_unlock_irqrestore(&dev->event_lock, flags); 577 spin_unlock_irq(&dev->event_lock);
579 } 578 }
580 579
581 mutex_unlock(&dev->struct_mutex); 580 mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
717 } 716 }
718 717
719 for_each_pipe(dev_priv, pipe) { 718 for_each_pipe(dev_priv, pipe) {
720 if (!intel_display_power_enabled(dev_priv, 719 if (!intel_display_power_is_enabled(dev_priv,
721 POWER_DOMAIN_PIPE(pipe))) { 720 POWER_DOMAIN_PIPE(pipe))) {
722 seq_printf(m, "Pipe %c power disabled\n", 721 seq_printf(m, "Pipe %c power disabled\n",
723 pipe_name(pipe)); 722 pipe_name(pipe));
@@ -1241,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
1241 struct drm_info_node *node = m->private; 1240 struct drm_info_node *node = m->private;
1242 struct drm_device *dev = node->minor->dev; 1241 struct drm_device *dev = node->minor->dev;
1243 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1244 u32 rpmodectl1, rcctl1; 1243 u32 rpmodectl1, rcctl1, pw_status;
1245 unsigned fw_rendercount = 0, fw_mediacount = 0; 1244 unsigned fw_rendercount = 0, fw_mediacount = 0;
1246 1245
1247 intel_runtime_pm_get(dev_priv); 1246 intel_runtime_pm_get(dev_priv);
1248 1247
1248 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1249 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1249 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1250 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1250 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1251 1251
@@ -1264,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
1264 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1264 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1265 GEN6_RC_CTL_EI_MODE(1)))); 1265 GEN6_RC_CTL_EI_MODE(1))));
1266 seq_printf(m, "Render Power Well: %s\n", 1266 seq_printf(m, "Render Power Well: %s\n",
1267 (I915_READ(VLV_GTLC_PW_STATUS) & 1267 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1268 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1269 seq_printf(m, "Media Power Well: %s\n", 1268 seq_printf(m, "Media Power Well: %s\n",
1270 (I915_READ(VLV_GTLC_PW_STATUS) & 1269 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1271 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1272 1270
1273 seq_printf(m, "Render RC6 residency since boot: %u\n", 1271 seq_printf(m, "Render RC6 residency since boot: %u\n",
1274 I915_READ(VLV_GT_RENDER_RC6)); 1272 I915_READ(VLV_GT_RENDER_RC6));
@@ -1774,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
1774 return 0; 1772 return 0;
1775} 1773}
1776 1774
1775static void i915_dump_lrc_obj(struct seq_file *m,
1776 struct intel_engine_cs *ring,
1777 struct drm_i915_gem_object *ctx_obj)
1778{
1779 struct page *page;
1780 uint32_t *reg_state;
1781 int j;
1782 unsigned long ggtt_offset = 0;
1783
1784 if (ctx_obj == NULL) {
1785 seq_printf(m, "Context on %s with no gem object\n",
1786 ring->name);
1787 return;
1788 }
1789
1790 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1791 intel_execlists_ctx_id(ctx_obj));
1792
1793 if (!i915_gem_obj_ggtt_bound(ctx_obj))
1794 seq_puts(m, "\tNot bound in GGTT\n");
1795 else
1796 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
1797
1798 if (i915_gem_object_get_pages(ctx_obj)) {
1799 seq_puts(m, "\tFailed to get pages for context object\n");
1800 return;
1801 }
1802
1803 page = i915_gem_object_get_page(ctx_obj, 1);
1804 if (!WARN_ON(page == NULL)) {
1805 reg_state = kmap_atomic(page);
1806
1807 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1808 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1809 ggtt_offset + 4096 + (j * 4),
1810 reg_state[j], reg_state[j + 1],
1811 reg_state[j + 2], reg_state[j + 3]);
1812 }
1813 kunmap_atomic(reg_state);
1814 }
1815
1816 seq_putc(m, '\n');
1817}
1818
1777static int i915_dump_lrc(struct seq_file *m, void *unused) 1819static int i915_dump_lrc(struct seq_file *m, void *unused)
1778{ 1820{
1779 struct drm_info_node *node = (struct drm_info_node *) m->private; 1821 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1794,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
1794 1836
1795 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1837 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1796 for_each_ring(ring, dev_priv, i) { 1838 for_each_ring(ring, dev_priv, i) {
1797 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 1839 if (ring->default_context != ctx)
1798 1840 i915_dump_lrc_obj(m, ring,
1799 if (ring->default_context == ctx) 1841 ctx->engine[i].state);
1800 continue;
1801
1802 if (ctx_obj) {
1803 struct page *page = i915_gem_object_get_page(ctx_obj, 1);
1804 uint32_t *reg_state = kmap_atomic(page);
1805 int j;
1806
1807 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1808 intel_execlists_ctx_id(ctx_obj));
1809
1810 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1811 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1812 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
1813 reg_state[j], reg_state[j + 1],
1814 reg_state[j + 2], reg_state[j + 3]);
1815 }
1816 kunmap_atomic(reg_state);
1817
1818 seq_putc(m, '\n');
1819 }
1820 } 1842 }
1821 } 1843 }
1822 1844
@@ -1849,6 +1871,8 @@ static int i915_execlists(struct seq_file *m, void *data)
1849 if (ret) 1871 if (ret)
1850 return ret; 1872 return ret;
1851 1873
1874 intel_runtime_pm_get(dev_priv);
1875
1852 for_each_ring(ring, dev_priv, ring_id) { 1876 for_each_ring(ring, dev_priv, ring_id) {
1853 struct intel_ctx_submit_request *head_req = NULL; 1877 struct intel_ctx_submit_request *head_req = NULL;
1854 int count = 0; 1878 int count = 0;
@@ -1900,6 +1924,7 @@ static int i915_execlists(struct seq_file *m, void *data)
1900 seq_putc(m, '\n'); 1924 seq_putc(m, '\n');
1901 } 1925 }
1902 1926
1927 intel_runtime_pm_put(dev_priv);
1903 mutex_unlock(&dev->struct_mutex); 1928 mutex_unlock(&dev->struct_mutex);
1904 1929
1905 return 0; 1930 return 0;
@@ -1973,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1973 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1998 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1974 seq_printf(m, "DDC = 0x%08x\n", 1999 seq_printf(m, "DDC = 0x%08x\n",
1975 I915_READ(DCC)); 2000 I915_READ(DCC));
2001 seq_printf(m, "DDC2 = 0x%08x\n",
2002 I915_READ(DCC2));
1976 seq_printf(m, "C0DRB3 = 0x%04x\n", 2003 seq_printf(m, "C0DRB3 = 0x%04x\n",
1977 I915_READ16(C0DRB3)); 2004 I915_READ16(C0DRB3));
1978 seq_printf(m, "C1DRB3 = 0x%04x\n", 2005 seq_printf(m, "C1DRB3 = 0x%04x\n",
@@ -1986,7 +2013,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1986 I915_READ(MAD_DIMM_C2)); 2013 I915_READ(MAD_DIMM_C2));
1987 seq_printf(m, "TILECTL = 0x%08x\n", 2014 seq_printf(m, "TILECTL = 0x%08x\n",
1988 I915_READ(TILECTL)); 2015 I915_READ(TILECTL));
1989 if (IS_GEN8(dev)) 2016 if (INTEL_INFO(dev)->gen >= 8)
1990 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2017 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1991 I915_READ(GAMTARBMODE)); 2018 I915_READ(GAMTARBMODE));
1992 else 2019 else
@@ -1995,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1995 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2022 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1996 I915_READ(DISP_ARB_CTL)); 2023 I915_READ(DISP_ARB_CTL));
1997 } 2024 }
2025
2026 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2027 seq_puts(m, "L-shaped memory detected\n");
2028
1998 intel_runtime_pm_put(dev_priv); 2029 intel_runtime_pm_put(dev_priv);
1999 mutex_unlock(&dev->struct_mutex); 2030 mutex_unlock(&dev->struct_mutex);
2000 2031
@@ -2628,14 +2659,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2628 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 2659 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2629 2660
2630 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 2661 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2631 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount, 2662 seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
2632 pll->active, yesno(pll->on)); 2663 pll->config.crtc_mask, pll->active, yesno(pll->on));
2633 seq_printf(m, " tracked hardware state:\n"); 2664 seq_printf(m, " tracked hardware state:\n");
2634 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll); 2665 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
2635 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md); 2666 seq_printf(m, " dpll_md: 0x%08x\n",
2636 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0); 2667 pll->config.hw_state.dpll_md);
2637 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1); 2668 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
2638 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll); 2669 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
2670 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
2639 } 2671 }
2640 drm_modeset_unlock_all(dev); 2672 drm_modeset_unlock_all(dev);
2641 2673
@@ -2656,18 +2688,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
2656 2688
2657 intel_runtime_pm_get(dev_priv); 2689 intel_runtime_pm_get(dev_priv);
2658 2690
2659 seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs); 2691 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
2660 for (i = 0; i < dev_priv->num_wa_regs; ++i) { 2692 for (i = 0; i < dev_priv->workarounds.count; ++i) {
2661 u32 addr, mask; 2693 u32 addr, mask, value, read;
2662 2694 bool ok;
2663 addr = dev_priv->intel_wa_regs[i].addr; 2695
2664 mask = dev_priv->intel_wa_regs[i].mask; 2696 addr = dev_priv->workarounds.reg[i].addr;
2665 dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask; 2697 mask = dev_priv->workarounds.reg[i].mask;
2666 if (dev_priv->intel_wa_regs[i].addr) 2698 value = dev_priv->workarounds.reg[i].value;
2667 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 2699 read = I915_READ(addr);
2668 dev_priv->intel_wa_regs[i].addr, 2700 ok = (value & mask) == (read & mask);
2669 dev_priv->intel_wa_regs[i].value, 2701 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2670 dev_priv->intel_wa_regs[i].mask); 2702 addr, value, mask, read, ok ? "OK" : "FAIL");
2671 } 2703 }
2672 2704
2673 intel_runtime_pm_put(dev_priv); 2705 intel_runtime_pm_put(dev_priv);
@@ -2676,6 +2708,42 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
2676 return 0; 2708 return 0;
2677} 2709}
2678 2710
2711static int i915_ddb_info(struct seq_file *m, void *unused)
2712{
2713 struct drm_info_node *node = m->private;
2714 struct drm_device *dev = node->minor->dev;
2715 struct drm_i915_private *dev_priv = dev->dev_private;
2716 struct skl_ddb_allocation *ddb;
2717 struct skl_ddb_entry *entry;
2718 enum pipe pipe;
2719 int plane;
2720
2721 drm_modeset_lock_all(dev);
2722
2723 ddb = &dev_priv->wm.skl_hw.ddb;
2724
2725 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2726
2727 for_each_pipe(dev_priv, pipe) {
2728 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2729
2730 for_each_plane(pipe, plane) {
2731 entry = &ddb->plane[pipe][plane];
2732 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
2733 entry->start, entry->end,
2734 skl_ddb_entry_size(entry));
2735 }
2736
2737 entry = &ddb->cursor[pipe];
2738 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
2739 entry->end, skl_ddb_entry_size(entry));
2740 }
2741
2742 drm_modeset_unlock_all(dev);
2743
2744 return 0;
2745}
2746
2679struct pipe_crc_info { 2747struct pipe_crc_info {
2680 const char *name; 2748 const char *name;
2681 struct drm_device *dev; 2749 struct drm_device *dev;
@@ -2969,6 +3037,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2969 break; 3037 break;
2970 } 3038 }
2971 break; 3039 break;
3040 default:
3041 break;
2972 } 3042 }
2973 } 3043 }
2974 drm_modeset_unlock_all(dev); 3044 drm_modeset_unlock_all(dev);
@@ -3256,6 +3326,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3256{ 3326{
3257 struct drm_i915_private *dev_priv = dev->dev_private; 3327 struct drm_i915_private *dev_priv = dev->dev_private;
3258 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3328 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3329 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3330 pipe));
3259 u32 val = 0; /* shut up gcc */ 3331 u32 val = 0; /* shut up gcc */
3260 int ret; 3332 int ret;
3261 3333
@@ -3266,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3266 if (pipe_crc->source && source) 3338 if (pipe_crc->source && source)
3267 return -EINVAL; 3339 return -EINVAL;
3268 3340
3341 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
3342 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3343 return -EIO;
3344 }
3345
3269 if (IS_GEN2(dev)) 3346 if (IS_GEN2(dev))
3270 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 3347 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3271 else if (INTEL_INFO(dev)->gen < 5) 3348 else if (INTEL_INFO(dev)->gen < 5)
@@ -3291,6 +3368,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3291 if (!pipe_crc->entries) 3368 if (!pipe_crc->entries)
3292 return -ENOMEM; 3369 return -ENOMEM;
3293 3370
3371 /*
3372 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3373 * enabled and disabled dynamically based on package C states,
3374 * user space can't make reliable use of the CRCs, so let's just
3375 * completely disable it.
3376 */
3377 hsw_disable_ips(crtc);
3378
3294 spin_lock_irq(&pipe_crc->lock); 3379 spin_lock_irq(&pipe_crc->lock);
3295 pipe_crc->head = 0; 3380 pipe_crc->head = 0;
3296 pipe_crc->tail = 0; 3381 pipe_crc->tail = 0;
@@ -3329,6 +3414,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3329 vlv_undo_pipe_scramble_reset(dev, pipe); 3414 vlv_undo_pipe_scramble_reset(dev, pipe);
3330 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3415 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3331 hsw_undo_trans_edp_pipe_A_crc_wa(dev); 3416 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3417
3418 hsw_enable_ips(crtc);
3332 } 3419 }
3333 3420
3334 return 0; 3421 return 0;
@@ -3506,7 +3593,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
3506 .write = display_crc_ctl_write 3593 .write = display_crc_ctl_write
3507}; 3594};
3508 3595
3509static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 3596static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3510{ 3597{
3511 struct drm_device *dev = m->private; 3598 struct drm_device *dev = m->private;
3512 int num_levels = ilk_wm_max_level(dev) + 1; 3599 int num_levels = ilk_wm_max_level(dev) + 1;
@@ -3517,13 +3604,17 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3517 for (level = 0; level < num_levels; level++) { 3604 for (level = 0; level < num_levels; level++) {
3518 unsigned int latency = wm[level]; 3605 unsigned int latency = wm[level];
3519 3606
3520 /* WM1+ latency values in 0.5us units */ 3607 /*
3521 if (level > 0) 3608 * - WM1+ latency values in 0.5us units
3609 * - latencies are in us on gen9
3610 */
3611 if (INTEL_INFO(dev)->gen >= 9)
3612 latency *= 10;
3613 else if (level > 0)
3522 latency *= 5; 3614 latency *= 5;
3523 3615
3524 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3616 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3525 level, wm[level], 3617 level, wm[level], latency / 10, latency % 10);
3526 latency / 10, latency % 10);
3527 } 3618 }
3528 3619
3529 drm_modeset_unlock_all(dev); 3620 drm_modeset_unlock_all(dev);
@@ -3532,8 +3623,15 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3532static int pri_wm_latency_show(struct seq_file *m, void *data) 3623static int pri_wm_latency_show(struct seq_file *m, void *data)
3533{ 3624{
3534 struct drm_device *dev = m->private; 3625 struct drm_device *dev = m->private;
3626 struct drm_i915_private *dev_priv = dev->dev_private;
3627 const uint16_t *latencies;
3628
3629 if (INTEL_INFO(dev)->gen >= 9)
3630 latencies = dev_priv->wm.skl_latency;
3631 else
3632 latencies = to_i915(dev)->wm.pri_latency;
3535 3633
3536 wm_latency_show(m, to_i915(dev)->wm.pri_latency); 3634 wm_latency_show(m, latencies);
3537 3635
3538 return 0; 3636 return 0;
3539} 3637}
@@ -3541,8 +3639,15 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
3541static int spr_wm_latency_show(struct seq_file *m, void *data) 3639static int spr_wm_latency_show(struct seq_file *m, void *data)
3542{ 3640{
3543 struct drm_device *dev = m->private; 3641 struct drm_device *dev = m->private;
3642 struct drm_i915_private *dev_priv = dev->dev_private;
3643 const uint16_t *latencies;
3644
3645 if (INTEL_INFO(dev)->gen >= 9)
3646 latencies = dev_priv->wm.skl_latency;
3647 else
3648 latencies = to_i915(dev)->wm.spr_latency;
3544 3649
3545 wm_latency_show(m, to_i915(dev)->wm.spr_latency); 3650 wm_latency_show(m, latencies);
3546 3651
3547 return 0; 3652 return 0;
3548} 3653}
@@ -3550,8 +3655,15 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
3550static int cur_wm_latency_show(struct seq_file *m, void *data) 3655static int cur_wm_latency_show(struct seq_file *m, void *data)
3551{ 3656{
3552 struct drm_device *dev = m->private; 3657 struct drm_device *dev = m->private;
3658 struct drm_i915_private *dev_priv = dev->dev_private;
3659 const uint16_t *latencies;
3660
3661 if (INTEL_INFO(dev)->gen >= 9)
3662 latencies = dev_priv->wm.skl_latency;
3663 else
3664 latencies = to_i915(dev)->wm.cur_latency;
3553 3665
3554 wm_latency_show(m, to_i915(dev)->wm.cur_latency); 3666 wm_latency_show(m, latencies);
3555 3667
3556 return 0; 3668 return 0;
3557} 3669}
@@ -3587,11 +3699,11 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
3587} 3699}
3588 3700
3589static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3701static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3590 size_t len, loff_t *offp, uint16_t wm[5]) 3702 size_t len, loff_t *offp, uint16_t wm[8])
3591{ 3703{
3592 struct seq_file *m = file->private_data; 3704 struct seq_file *m = file->private_data;
3593 struct drm_device *dev = m->private; 3705 struct drm_device *dev = m->private;
3594 uint16_t new[5] = { 0 }; 3706 uint16_t new[8] = { 0 };
3595 int num_levels = ilk_wm_max_level(dev) + 1; 3707 int num_levels = ilk_wm_max_level(dev) + 1;
3596 int level; 3708 int level;
3597 int ret; 3709 int ret;
@@ -3605,7 +3717,9 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3605 3717
3606 tmp[len] = '\0'; 3718 tmp[len] = '\0';
3607 3719
3608 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); 3720 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3721 &new[0], &new[1], &new[2], &new[3],
3722 &new[4], &new[5], &new[6], &new[7]);
3609 if (ret != num_levels) 3723 if (ret != num_levels)
3610 return -EINVAL; 3724 return -EINVAL;
3611 3725
@@ -3625,8 +3739,15 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3625{ 3739{
3626 struct seq_file *m = file->private_data; 3740 struct seq_file *m = file->private_data;
3627 struct drm_device *dev = m->private; 3741 struct drm_device *dev = m->private;
3742 struct drm_i915_private *dev_priv = dev->dev_private;
3743 uint16_t *latencies;
3628 3744
3629 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); 3745 if (INTEL_INFO(dev)->gen >= 9)
3746 latencies = dev_priv->wm.skl_latency;
3747 else
3748 latencies = to_i915(dev)->wm.pri_latency;
3749
3750 return wm_latency_write(file, ubuf, len, offp, latencies);
3630} 3751}
3631 3752
3632static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3753static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3634,8 +3755,15 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3634{ 3755{
3635 struct seq_file *m = file->private_data; 3756 struct seq_file *m = file->private_data;
3636 struct drm_device *dev = m->private; 3757 struct drm_device *dev = m->private;
3758 struct drm_i915_private *dev_priv = dev->dev_private;
3759 uint16_t *latencies;
3637 3760
3638 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); 3761 if (INTEL_INFO(dev)->gen >= 9)
3762 latencies = dev_priv->wm.skl_latency;
3763 else
3764 latencies = to_i915(dev)->wm.spr_latency;
3765
3766 return wm_latency_write(file, ubuf, len, offp, latencies);
3639} 3767}
3640 3768
3641static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3769static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3643,8 +3771,15 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3643{ 3771{
3644 struct seq_file *m = file->private_data; 3772 struct seq_file *m = file->private_data;
3645 struct drm_device *dev = m->private; 3773 struct drm_device *dev = m->private;
3774 struct drm_i915_private *dev_priv = dev->dev_private;
3775 uint16_t *latencies;
3776
3777 if (INTEL_INFO(dev)->gen >= 9)
3778 latencies = dev_priv->wm.skl_latency;
3779 else
3780 latencies = to_i915(dev)->wm.cur_latency;
3646 3781
3647 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); 3782 return wm_latency_write(file, ubuf, len, offp, latencies);
3648} 3783}
3649 3784
3650static const struct file_operations i915_pri_wm_latency_fops = { 3785static const struct file_operations i915_pri_wm_latency_fops = {
@@ -4187,6 +4322,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
4187 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4322 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4188 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4323 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4189 {"i915_wa_registers", i915_wa_registers, 0}, 4324 {"i915_wa_registers", i915_wa_registers, 0},
4325 {"i915_ddb_info", i915_ddb_info, 0},
4190}; 4326};
4191#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4327#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4192 4328