diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-11-14 09:17:41 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-11-15 03:44:29 -0500 |
commit | 96ab4c70396e4e5a4d623bc95e86484682bef78f (patch) | |
tree | b5f31a822c7b8c1872ce91ca2aff6506fcd26af9 | |
parent | 565ee3897f0cb1e9b09905747b3784e6605767e8 (diff) | |
parent | 596cc11e7a4a89bf6c45f955402d0bd0c7d51f13 (diff) |
Merge branch 'bdw-fixes' into backlight-rework
Merge the bdw changes into the backlight rework branch so that we can
adapt the new code for bdw, too. This is a bit a mess, but doing this
another way would have delayed the merging of the backlight
refactoring. Mea culpa.
As discussed with Jani on irc only do bdw-specific callbacks for the
set/get methods and bake in the only other special-case into the pch
enable function.
Conflicts:
drivers/gpu/drm/i915/intel_panel.c
v2: Don't enable the PWM too early for bdw (Jani).
v3: Create new bdw_ functions for setup and enable - the rules change
sufficiently imo with the switch from controlling the pwm from the cpu
to controlling it completel from the pch to warrant this.
v4: Rip out unused pipe variable in bdw_enable_backlight (0-day
builder).
Tested-by: Ben Widawsky <ben@bwidawsk.net> (on bdw)
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/drm_edid.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_opregion.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_panel.c | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_uncore.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 42 |
12 files changed, 182 insertions, 34 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 2f325bcd0708..fb7cf0e796f6 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1329,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid) | |||
1329 | } | 1329 | } |
1330 | 1330 | ||
1331 | #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) | 1331 | #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) |
1332 | #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) | 1332 | #define MODE_REFRESH_DIFF(c,t) (abs((c) - (t))) |
1333 | 1333 | ||
1334 | /** | 1334 | /** |
1335 | * edid_fixup_preferred - set preferred modes based on quirk list | 1335 | * edid_fixup_preferred - set preferred modes based on quirk list |
@@ -1344,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector, | |||
1344 | { | 1344 | { |
1345 | struct drm_display_mode *t, *cur_mode, *preferred_mode; | 1345 | struct drm_display_mode *t, *cur_mode, *preferred_mode; |
1346 | int target_refresh = 0; | 1346 | int target_refresh = 0; |
1347 | int cur_vrefresh, preferred_vrefresh; | ||
1347 | 1348 | ||
1348 | if (list_empty(&connector->probed_modes)) | 1349 | if (list_empty(&connector->probed_modes)) |
1349 | return; | 1350 | return; |
@@ -1366,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector, | |||
1366 | if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) | 1367 | if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) |
1367 | preferred_mode = cur_mode; | 1368 | preferred_mode = cur_mode; |
1368 | 1369 | ||
1370 | cur_vrefresh = cur_mode->vrefresh ? | ||
1371 | cur_mode->vrefresh : drm_mode_vrefresh(cur_mode); | ||
1372 | preferred_vrefresh = preferred_mode->vrefresh ? | ||
1373 | preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode); | ||
1369 | /* At a given size, try to get closest to target refresh */ | 1374 | /* At a given size, try to get closest to target refresh */ |
1370 | if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && | 1375 | if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && |
1371 | MODE_REFRESH_DIFF(cur_mode, target_refresh) < | 1376 | MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) < |
1372 | MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { | 1377 | MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) { |
1373 | preferred_mode = cur_mode; | 1378 | preferred_mode = cur_mode; |
1374 | } | 1379 | } |
1375 | } | 1380 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d069f6ba4286..2a83e14abea6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private { | |||
1755 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1755 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1756 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ | 1756 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
1757 | ((dev)->pdev->device & 0xFF00) == 0x0C00) | 1757 | ((dev)->pdev->device & 0xFF00) == 0x0C00) |
1758 | #define IS_ULT(dev) (IS_HASWELL(dev) && \ | 1758 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
1759 | (((dev)->pdev->device & 0xf) == 0x2 || \ | ||
1760 | ((dev)->pdev->device & 0xf) == 0x6 || \ | ||
1761 | ((dev)->pdev->device & 0xf) == 0xe)) | ||
1762 | #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ | ||
1759 | ((dev)->pdev->device & 0xFF00) == 0x0A00) | 1763 | ((dev)->pdev->device & 0xFF00) == 0x0A00) |
1764 | #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) | ||
1760 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ | 1765 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
1761 | ((dev)->pdev->device & 0x00F0) == 0x0020) | 1766 | ((dev)->pdev->device & 0x00F0) == 0x0020) |
1762 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | 1767 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3620a1b0a73c..f69bdc741b80 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -335,8 +335,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | |||
335 | kfree(ppgtt->gen8_pt_dma_addr[i]); | 335 | kfree(ppgtt->gen8_pt_dma_addr[i]); |
336 | } | 336 | } |
337 | 337 | ||
338 | __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); | 338 | __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); |
339 | __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); | 339 | __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); |
340 | } | 340 | } |
341 | 341 | ||
342 | /** | 342 | /** |
@@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) | |||
1239 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; | 1239 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
1240 | if (bdw_gmch_ctl) | 1240 | if (bdw_gmch_ctl) |
1241 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; | 1241 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
1242 | if (bdw_gmch_ctl > 4) { | ||
1243 | WARN_ON(!i915_preliminary_hw_support); | ||
1244 | return 4<<20; | ||
1245 | } | ||
1246 | |||
1242 | return bdw_gmch_ctl << 20; | 1247 | return bdw_gmch_ctl << 20; |
1243 | } | 1248 | } |
1244 | 1249 | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index a0b5a99204a8..6506df26ac9e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -859,7 +859,9 @@ int intel_opregion_setup(struct drm_device *dev) | |||
859 | return -ENOTSUPP; | 859 | return -ENOTSUPP; |
860 | } | 860 | } |
861 | 861 | ||
862 | #ifdef CONFIG_ACPI | ||
862 | INIT_WORK(&opregion->asle_work, asle_work); | 863 | INIT_WORK(&opregion->asle_work, asle_work); |
864 | #endif | ||
863 | 865 | ||
864 | base = acpi_os_ioremap(asls, OPREGION_SIZE); | 866 | base = acpi_os_ioremap(asls, OPREGION_SIZE); |
865 | if (!base) | 867 | if (!base) |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index eadfe338dbeb..e480cf41c536 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -352,6 +352,14 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, | |||
352 | return val; | 352 | return val; |
353 | } | 353 | } |
354 | 354 | ||
355 | static u32 bdw_get_backlight(struct intel_connector *connector) | ||
356 | { | ||
357 | struct drm_device *dev = connector->base.dev; | ||
358 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
359 | |||
360 | return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
361 | } | ||
362 | |||
355 | static u32 pch_get_backlight(struct intel_connector *connector) | 363 | static u32 pch_get_backlight(struct intel_connector *connector) |
356 | { | 364 | { |
357 | struct drm_device *dev = connector->base.dev; | 365 | struct drm_device *dev = connector->base.dev; |
@@ -414,6 +422,14 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) | |||
414 | return val; | 422 | return val; |
415 | } | 423 | } |
416 | 424 | ||
425 | static void bdw_set_backlight(struct intel_connector *connector, u32 level) | ||
426 | { | ||
427 | struct drm_device *dev = connector->base.dev; | ||
428 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
429 | u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
430 | I915_WRITE(BLC_PWM_PCH_CTL2, val | level); | ||
431 | } | ||
432 | |||
417 | static void pch_set_backlight(struct intel_connector *connector, u32 level) | 433 | static void pch_set_backlight(struct intel_connector *connector, u32 level) |
418 | { | 434 | { |
419 | struct drm_device *dev = connector->base.dev; | 435 | struct drm_device *dev = connector->base.dev; |
@@ -585,6 +601,38 @@ void intel_panel_disable_backlight(struct intel_connector *connector) | |||
585 | spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); | 601 | spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); |
586 | } | 602 | } |
587 | 603 | ||
604 | static void bdw_enable_backlight(struct intel_connector *connector) | ||
605 | { | ||
606 | struct drm_device *dev = connector->base.dev; | ||
607 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
608 | struct intel_panel *panel = &connector->panel; | ||
609 | u32 pch_ctl1, pch_ctl2; | ||
610 | |||
611 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); | ||
612 | if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { | ||
613 | DRM_DEBUG_KMS("pch backlight already enabled\n"); | ||
614 | pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; | ||
615 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); | ||
616 | } | ||
617 | |||
618 | pch_ctl2 = panel->backlight.max << 16; | ||
619 | I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); | ||
620 | |||
621 | pch_ctl1 = 0; | ||
622 | if (panel->backlight.active_low_pwm) | ||
623 | pch_ctl1 |= BLM_PCH_POLARITY; | ||
624 | |||
625 | /* BDW always uses the pch pwm controls. */ | ||
626 | pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; | ||
627 | |||
628 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); | ||
629 | POSTING_READ(BLC_PWM_PCH_CTL1); | ||
630 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); | ||
631 | |||
632 | /* This won't stick until the above enable. */ | ||
633 | intel_panel_actually_set_backlight(connector, panel->backlight.level); | ||
634 | } | ||
635 | |||
588 | static void pch_enable_backlight(struct intel_connector *connector) | 636 | static void pch_enable_backlight(struct intel_connector *connector) |
589 | { | 637 | { |
590 | struct drm_device *dev = connector->base.dev; | 638 | struct drm_device *dev = connector->base.dev; |
@@ -626,6 +674,7 @@ static void pch_enable_backlight(struct intel_connector *connector) | |||
626 | pch_ctl1 = 0; | 674 | pch_ctl1 = 0; |
627 | if (panel->backlight.active_low_pwm) | 675 | if (panel->backlight.active_low_pwm) |
628 | pch_ctl1 |= BLM_PCH_POLARITY; | 676 | pch_ctl1 |= BLM_PCH_POLARITY; |
677 | |||
629 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); | 678 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); |
630 | POSTING_READ(BLC_PWM_PCH_CTL1); | 679 | POSTING_READ(BLC_PWM_PCH_CTL1); |
631 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); | 680 | I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); |
@@ -869,6 +918,30 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) | |||
869 | * XXX: Query mode clock or hardware clock and program PWM modulation frequency | 918 | * XXX: Query mode clock or hardware clock and program PWM modulation frequency |
870 | * appropriately when it's 0. Use VBT and/or sane defaults. | 919 | * appropriately when it's 0. Use VBT and/or sane defaults. |
871 | */ | 920 | */ |
921 | static int bdw_setup_backlight(struct intel_connector *connector) | ||
922 | { | ||
923 | struct drm_device *dev = connector->base.dev; | ||
924 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
925 | struct intel_panel *panel = &connector->panel; | ||
926 | u32 pch_ctl1, pch_ctl2, val; | ||
927 | |||
928 | pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); | ||
929 | panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; | ||
930 | |||
931 | pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); | ||
932 | panel->backlight.max = pch_ctl2 >> 16; | ||
933 | if (!panel->backlight.max) | ||
934 | return -ENODEV; | ||
935 | |||
936 | val = bdw_get_backlight(connector); | ||
937 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | ||
938 | |||
939 | panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) && | ||
940 | panel->backlight.level != 0; | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
872 | static int pch_setup_backlight(struct intel_connector *connector) | 945 | static int pch_setup_backlight(struct intel_connector *connector) |
873 | { | 946 | { |
874 | struct drm_device *dev = connector->base.dev; | 947 | struct drm_device *dev = connector->base.dev; |
@@ -1036,7 +1109,13 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) | |||
1036 | { | 1109 | { |
1037 | struct drm_i915_private *dev_priv = dev->dev_private; | 1110 | struct drm_i915_private *dev_priv = dev->dev_private; |
1038 | 1111 | ||
1039 | if (HAS_PCH_SPLIT(dev)) { | 1112 | if (IS_BROADWELL(dev)) { |
1113 | dev_priv->display.setup_backlight = bdw_setup_backlight; | ||
1114 | dev_priv->display.enable_backlight = bdw_enable_backlight; | ||
1115 | dev_priv->display.disable_backlight = pch_disable_backlight; | ||
1116 | dev_priv->display.set_backlight = bdw_set_backlight; | ||
1117 | dev_priv->display.get_backlight = bdw_get_backlight; | ||
1118 | } else if (HAS_PCH_SPLIT(dev)) { | ||
1040 | dev_priv->display.setup_backlight = pch_setup_backlight; | 1119 | dev_priv->display.setup_backlight = pch_setup_backlight; |
1041 | dev_priv->display.enable_backlight = pch_enable_backlight; | 1120 | dev_priv->display.enable_backlight = pch_enable_backlight; |
1042 | dev_priv->display.disable_backlight = pch_disable_backlight; | 1121 | dev_priv->display.disable_backlight = pch_disable_backlight; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a07d7c9cafc..33a8dbe64039 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -5684,6 +5684,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
5684 | { | 5684 | { |
5685 | struct drm_i915_private *dev_priv = dev->dev_private; | 5685 | struct drm_i915_private *dev_priv = dev->dev_private; |
5686 | bool is_enabled, enable_requested; | 5686 | bool is_enabled, enable_requested; |
5687 | unsigned long irqflags; | ||
5687 | uint32_t tmp; | 5688 | uint32_t tmp; |
5688 | 5689 | ||
5689 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); | 5690 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
@@ -5701,9 +5702,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
5701 | HSW_PWR_WELL_STATE_ENABLED), 20)) | 5702 | HSW_PWR_WELL_STATE_ENABLED), 20)) |
5702 | DRM_ERROR("Timeout enabling power well\n"); | 5703 | DRM_ERROR("Timeout enabling power well\n"); |
5703 | } | 5704 | } |
5705 | |||
5706 | if (IS_BROADWELL(dev)) { | ||
5707 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
5708 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), | ||
5709 | dev_priv->de_irq_mask[PIPE_B]); | ||
5710 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), | ||
5711 | ~dev_priv->de_irq_mask[PIPE_B] | | ||
5712 | GEN8_PIPE_VBLANK); | ||
5713 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), | ||
5714 | dev_priv->de_irq_mask[PIPE_C]); | ||
5715 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), | ||
5716 | ~dev_priv->de_irq_mask[PIPE_C] | | ||
5717 | GEN8_PIPE_VBLANK); | ||
5718 | POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); | ||
5719 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
5720 | } | ||
5704 | } else { | 5721 | } else { |
5705 | if (enable_requested) { | 5722 | if (enable_requested) { |
5706 | unsigned long irqflags; | ||
5707 | enum pipe p; | 5723 | enum pipe p; |
5708 | 5724 | ||
5709 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5725 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b620337e6d67..c2f09d456300 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
965 | } else if (IS_GEN6(ring->dev)) { | 965 | } else if (IS_GEN6(ring->dev)) { |
966 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | 966 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
967 | } else { | 967 | } else { |
968 | /* XXX: gen8 returns to sanity */ | ||
968 | mmio = RING_HWS_PGA(ring->mmio_base); | 969 | mmio = RING_HWS_PGA(ring->mmio_base); |
969 | } | 970 | } |
970 | 971 | ||
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f9883ceff946..6a4f9b615de1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -782,6 +782,7 @@ static int gen6_do_reset(struct drm_device *dev) | |||
782 | int intel_gpu_reset(struct drm_device *dev) | 782 | int intel_gpu_reset(struct drm_device *dev) |
783 | { | 783 | { |
784 | switch (INTEL_INFO(dev)->gen) { | 784 | switch (INTEL_INFO(dev)->gen) { |
785 | case 8: | ||
785 | case 7: | 786 | case 7: |
786 | case 6: return gen6_do_reset(dev); | 787 | case 6: return gen6_do_reset(dev); |
787 | case 5: return ironlake_do_reset(dev); | 788 | case 5: return ironlake_do_reset(dev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index c03514b93f9c..ac617f3ecd0c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
102 | int retval = VM_FAULT_NOPAGE; | 102 | int retval = VM_FAULT_NOPAGE; |
103 | struct ttm_mem_type_manager *man = | 103 | struct ttm_mem_type_manager *man = |
104 | &bdev->man[bo->mem.mem_type]; | 104 | &bdev->man[bo->mem.mem_type]; |
105 | struct vm_area_struct cvma; | ||
105 | 106 | ||
106 | /* | 107 | /* |
107 | * Work around locking order reversal in fault / nopfn | 108 | * Work around locking order reversal in fault / nopfn |
@@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
164 | } | 165 | } |
165 | 166 | ||
166 | /* | 167 | /* |
167 | * Strictly, we're not allowed to modify vma->vm_page_prot here, | 168 | * Make a local vma copy to modify the page_prot member |
168 | * since the mmap_sem is only held in read mode. However, we | 169 | * and vm_flags if necessary. The vma parameter is protected |
169 | * modify only the caching bits of vma->vm_page_prot and | 170 | * by mmap_sem in write mode. |
170 | * consider those bits protected by | ||
171 | * the bo->mutex, as we should be the only writers. | ||
172 | * There shouldn't really be any readers of these bits except | ||
173 | * within vm_insert_mixed()? fork? | ||
174 | * | ||
175 | * TODO: Add a list of vmas to the bo, and change the | ||
176 | * vma->vm_page_prot when the object changes caching policy, with | ||
177 | * the correct locks held. | ||
178 | */ | 171 | */ |
172 | cvma = *vma; | ||
173 | cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); | ||
174 | |||
179 | if (bo->mem.bus.is_iomem) { | 175 | if (bo->mem.bus.is_iomem) { |
180 | vma->vm_page_prot = ttm_io_prot(bo->mem.placement, | 176 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
181 | vma->vm_page_prot); | 177 | cvma.vm_page_prot); |
182 | } else { | 178 | } else { |
183 | ttm = bo->ttm; | 179 | ttm = bo->ttm; |
184 | vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? | 180 | if (!(bo->mem.placement & TTM_PL_FLAG_CACHED)) |
185 | vm_get_page_prot(vma->vm_flags) : | 181 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
186 | ttm_io_prot(bo->mem.placement, vma->vm_page_prot); | 182 | cvma.vm_page_prot); |
187 | 183 | ||
188 | /* Allocate all page at once, most common usage */ | 184 | /* Allocate all page at once, most common usage */ |
189 | if (ttm->bdev->driver->ttm_tt_populate(ttm)) { | 185 | if (ttm->bdev->driver->ttm_tt_populate(ttm)) { |
@@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
210 | pfn = page_to_pfn(page); | 206 | pfn = page_to_pfn(page); |
211 | } | 207 | } |
212 | 208 | ||
213 | ret = vm_insert_mixed(vma, address, pfn); | 209 | ret = vm_insert_mixed(&cvma, address, pfn); |
214 | /* | 210 | /* |
215 | * Somebody beat us to this PTE or prefaulting to | 211 | * Somebody beat us to this PTE or prefaulting to |
216 | * an already populated PTE, or prefaulting error. | 212 | * an already populated PTE, or prefaulting error. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 814665b7a117..20d5485eaf98 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) | |||
453 | */ | 453 | */ |
454 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) | 454 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) |
455 | { | 455 | { |
456 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
457 | static const char *names[vmw_dma_map_max] = { | 456 | static const char *names[vmw_dma_map_max] = { |
458 | [vmw_dma_phys] = "Using physical TTM page addresses.", | 457 | [vmw_dma_phys] = "Using physical TTM page addresses.", |
459 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | 458 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
460 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | 459 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
461 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | 460 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
461 | #ifdef CONFIG_X86 | ||
462 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
462 | 463 | ||
463 | #ifdef CONFIG_INTEL_IOMMU | 464 | #ifdef CONFIG_INTEL_IOMMU |
464 | if (intel_iommu_enabled) { | 465 | if (intel_iommu_enabled) { |
@@ -500,6 +501,10 @@ out_fixup: | |||
500 | return -EINVAL; | 501 | return -EINVAL; |
501 | #endif | 502 | #endif |
502 | 503 | ||
504 | #else /* CONFIG_X86 */ | ||
505 | dev_priv->map_mode = vmw_dma_map_populate; | ||
506 | #endif /* CONFIG_X86 */ | ||
507 | |||
503 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 508 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
504 | 509 | ||
505 | return 0; | 510 | return 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 6d0952366f91..6ef0b035becb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
@@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, | |||
145 | } | 145 | } |
146 | 146 | ||
147 | page_virtual = kmap_atomic(page); | 147 | page_virtual = kmap_atomic(page); |
148 | desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT; | 148 | desc_dma = (dma_addr_t) |
149 | le32_to_cpu(page_virtual[desc_per_page].ppn) << | ||
150 | PAGE_SHIFT; | ||
149 | kunmap_atomic(page_virtual); | 151 | kunmap_atomic(page_virtual); |
150 | 152 | ||
151 | __free_page(page); | 153 | __free_page(page); |
@@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev, | |||
217 | desc_dma = 0; | 219 | desc_dma = 0; |
218 | list_for_each_entry_reverse(page, desc_pages, lru) { | 220 | list_for_each_entry_reverse(page, desc_pages, lru) { |
219 | page_virtual = kmap_atomic(page); | 221 | page_virtual = kmap_atomic(page); |
220 | page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT; | 222 | page_virtual[desc_per_page].ppn = cpu_to_le32 |
223 | (desc_dma >> PAGE_SHIFT); | ||
221 | kunmap_atomic(page_virtual); | 224 | kunmap_atomic(page_virtual); |
222 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, | 225 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, |
223 | DMA_TO_DEVICE); | 226 | DMA_TO_DEVICE); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 37fb4befec82..252501a54def 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
33 | #include "vmwgfx_resource_priv.h" | 33 | #include "vmwgfx_resource_priv.h" |
34 | 34 | ||
35 | #define VMW_RES_EVICT_ERR_COUNT 10 | ||
36 | |||
35 | struct vmw_user_dma_buffer { | 37 | struct vmw_user_dma_buffer { |
36 | struct ttm_base_object base; | 38 | struct ttm_base_object base; |
37 | struct vmw_dma_buffer dma; | 39 | struct vmw_dma_buffer dma; |
@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, | |||
1091 | * to a backup buffer. | 1093 | * to a backup buffer. |
1092 | * | 1094 | * |
1093 | * @res: The resource to evict. | 1095 | * @res: The resource to evict. |
1096 | * @interruptible: Whether to wait interruptible. | ||
1094 | */ | 1097 | */ |
1095 | int vmw_resource_do_evict(struct vmw_resource *res) | 1098 | int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) |
1096 | { | 1099 | { |
1097 | struct ttm_validate_buffer val_buf; | 1100 | struct ttm_validate_buffer val_buf; |
1098 | const struct vmw_res_func *func = res->func; | 1101 | const struct vmw_res_func *func = res->func; |
@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res) | |||
1102 | BUG_ON(!func->may_evict); | 1105 | BUG_ON(!func->may_evict); |
1103 | 1106 | ||
1104 | val_buf.bo = NULL; | 1107 | val_buf.bo = NULL; |
1105 | ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); | 1108 | ret = vmw_resource_check_buffer(res, &ticket, interruptible, |
1109 | &val_buf); | ||
1106 | if (unlikely(ret != 0)) | 1110 | if (unlikely(ret != 0)) |
1107 | return ret; | 1111 | return ret; |
1108 | 1112 | ||
@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res) | |||
1141 | struct vmw_private *dev_priv = res->dev_priv; | 1145 | struct vmw_private *dev_priv = res->dev_priv; |
1142 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; | 1146 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
1143 | struct ttm_validate_buffer val_buf; | 1147 | struct ttm_validate_buffer val_buf; |
1148 | unsigned err_count = 0; | ||
1144 | 1149 | ||
1145 | if (likely(!res->func->may_evict)) | 1150 | if (likely(!res->func->may_evict)) |
1146 | return 0; | 1151 | return 0; |
@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res) | |||
1155 | 1160 | ||
1156 | write_lock(&dev_priv->resource_lock); | 1161 | write_lock(&dev_priv->resource_lock); |
1157 | if (list_empty(lru_list) || !res->func->may_evict) { | 1162 | if (list_empty(lru_list) || !res->func->may_evict) { |
1158 | DRM_ERROR("Out of device device id entries " | 1163 | DRM_ERROR("Out of device device resources " |
1159 | "for %s.\n", res->func->type_name); | 1164 | "for %s.\n", res->func->type_name); |
1160 | ret = -EBUSY; | 1165 | ret = -EBUSY; |
1161 | write_unlock(&dev_priv->resource_lock); | 1166 | write_unlock(&dev_priv->resource_lock); |
@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res) | |||
1168 | list_del_init(&evict_res->lru_head); | 1173 | list_del_init(&evict_res->lru_head); |
1169 | 1174 | ||
1170 | write_unlock(&dev_priv->resource_lock); | 1175 | write_unlock(&dev_priv->resource_lock); |
1171 | vmw_resource_do_evict(evict_res); | 1176 | |
1177 | ret = vmw_resource_do_evict(evict_res, true); | ||
1178 | if (unlikely(ret != 0)) { | ||
1179 | write_lock(&dev_priv->resource_lock); | ||
1180 | list_add_tail(&evict_res->lru_head, lru_list); | ||
1181 | write_unlock(&dev_priv->resource_lock); | ||
1182 | if (ret == -ERESTARTSYS || | ||
1183 | ++err_count > VMW_RES_EVICT_ERR_COUNT) { | ||
1184 | vmw_resource_unreference(&evict_res); | ||
1185 | goto out_no_validate; | ||
1186 | } | ||
1187 | } | ||
1188 | |||
1172 | vmw_resource_unreference(&evict_res); | 1189 | vmw_resource_unreference(&evict_res); |
1173 | } while (1); | 1190 | } while (1); |
1174 | 1191 | ||
@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res) | |||
1253 | * @type: The resource type to evict | 1270 | * @type: The resource type to evict |
1254 | * | 1271 | * |
1255 | * To avoid thrashing starvation or as part of the hibernation sequence, | 1272 | * To avoid thrashing starvation or as part of the hibernation sequence, |
1256 | * evict all evictable resources of a specific type. | 1273 | * try to evict all evictable resources of a specific type. |
1257 | */ | 1274 | */ |
1258 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, | 1275 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, |
1259 | enum vmw_res_type type) | 1276 | enum vmw_res_type type) |
1260 | { | 1277 | { |
1261 | struct list_head *lru_list = &dev_priv->res_lru[type]; | 1278 | struct list_head *lru_list = &dev_priv->res_lru[type]; |
1262 | struct vmw_resource *evict_res; | 1279 | struct vmw_resource *evict_res; |
1280 | unsigned err_count = 0; | ||
1281 | int ret; | ||
1263 | 1282 | ||
1264 | do { | 1283 | do { |
1265 | write_lock(&dev_priv->resource_lock); | 1284 | write_lock(&dev_priv->resource_lock); |
@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, | |||
1272 | lru_head)); | 1291 | lru_head)); |
1273 | list_del_init(&evict_res->lru_head); | 1292 | list_del_init(&evict_res->lru_head); |
1274 | write_unlock(&dev_priv->resource_lock); | 1293 | write_unlock(&dev_priv->resource_lock); |
1275 | vmw_resource_do_evict(evict_res); | 1294 | |
1295 | ret = vmw_resource_do_evict(evict_res, false); | ||
1296 | if (unlikely(ret != 0)) { | ||
1297 | write_lock(&dev_priv->resource_lock); | ||
1298 | list_add_tail(&evict_res->lru_head, lru_list); | ||
1299 | write_unlock(&dev_priv->resource_lock); | ||
1300 | if (++err_count > VMW_RES_EVICT_ERR_COUNT) { | ||
1301 | vmw_resource_unreference(&evict_res); | ||
1302 | return; | ||
1303 | } | ||
1304 | } | ||
1305 | |||
1276 | vmw_resource_unreference(&evict_res); | 1306 | vmw_resource_unreference(&evict_res); |
1277 | } while (1); | 1307 | } while (1); |
1278 | 1308 | ||