diff options
Diffstat (limited to 'drivers/gpu')
47 files changed, 1260 insertions, 766 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 738a4294d820..6a647493ca7f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
677 | /* don't break so fail path works correct */ | 677 | /* don't break so fail path works correct */ |
678 | fail = 1; | 678 | fail = 1; |
679 | break; | 679 | break; |
680 | |||
681 | if (connector->dpms != DRM_MODE_DPMS_ON) { | ||
682 | DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); | ||
683 | mode_changed = true; | ||
684 | } | ||
680 | } | 685 | } |
681 | } | 686 | } |
682 | 687 | ||
@@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
754 | ret = -EINVAL; | 759 | ret = -EINVAL; |
755 | goto fail; | 760 | goto fail; |
756 | } | 761 | } |
762 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
763 | for (i = 0; i < set->num_connectors; i++) { | ||
764 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
765 | drm_get_connector_name(set->connectors[i])); | ||
766 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
767 | } | ||
757 | } | 768 | } |
758 | drm_helper_disable_unused_functions(dev); | 769 | drm_helper_disable_unused_functions(dev); |
759 | } else if (fb_changed) { | 770 | } else if (fb_changed) { |
@@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
771 | } | 782 | } |
772 | } | 783 | } |
773 | 784 | ||
774 | /* | ||
775 | * crtc set_config helpers implicit set the crtc and all connected | ||
776 | * encoders to DPMS on for a full mode set. But for just an fb update it | ||
777 | * doesn't do that. To not confuse userspace, do an explicit DPMS_ON | ||
778 | * unconditionally. This will also ensure driver internal dpms state is | ||
779 | * consistent again. | ||
780 | */ | ||
781 | if (set->crtc->enabled) { | ||
782 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
783 | for (i = 0; i < set->num_connectors; i++) { | ||
784 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
785 | drm_get_connector_name(set->connectors[i])); | ||
786 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | kfree(save_connectors); | 785 | kfree(save_connectors); |
791 | kfree(save_encoders); | 786 | kfree(save_encoders); |
792 | kfree(save_crtcs); | 787 | kfree(save_crtcs); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index cf188ab7051a..66c63808fa35 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1495 | dev_priv->dev = dev; | 1495 | dev_priv->dev = dev; |
1496 | dev_priv->info = info; | 1496 | dev_priv->info = info; |
1497 | 1497 | ||
1498 | spin_lock_init(&dev_priv->irq_lock); | ||
1499 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
1500 | spin_lock_init(&dev_priv->rps.lock); | ||
1501 | spin_lock_init(&dev_priv->gt_lock); | ||
1502 | spin_lock_init(&dev_priv->backlight.lock); | ||
1503 | mutex_init(&dev_priv->dpio_lock); | ||
1504 | mutex_init(&dev_priv->rps.hw_lock); | ||
1505 | mutex_init(&dev_priv->modeset_restore_lock); | ||
1506 | |||
1498 | i915_dump_device_info(dev_priv); | 1507 | i915_dump_device_info(dev_priv); |
1499 | 1508 | ||
1500 | if (i915_get_bridge_dev(dev)) { | 1509 | if (i915_get_bridge_dev(dev)) { |
@@ -1585,6 +1594,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1585 | intel_detect_pch(dev); | 1594 | intel_detect_pch(dev); |
1586 | 1595 | ||
1587 | intel_irq_init(dev); | 1596 | intel_irq_init(dev); |
1597 | intel_gt_sanitize(dev); | ||
1588 | intel_gt_init(dev); | 1598 | intel_gt_init(dev); |
1589 | 1599 | ||
1590 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1600 | /* Try to make sure MCHBAR is enabled before poking at it */ |
@@ -1610,15 +1620,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1610 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 1620 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
1611 | pci_enable_msi(dev->pdev); | 1621 | pci_enable_msi(dev->pdev); |
1612 | 1622 | ||
1613 | spin_lock_init(&dev_priv->irq_lock); | ||
1614 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
1615 | spin_lock_init(&dev_priv->rps.lock); | ||
1616 | spin_lock_init(&dev_priv->backlight.lock); | ||
1617 | mutex_init(&dev_priv->dpio_lock); | ||
1618 | |||
1619 | mutex_init(&dev_priv->rps.hw_lock); | ||
1620 | mutex_init(&dev_priv->modeset_restore_lock); | ||
1621 | |||
1622 | dev_priv->num_plane = 1; | 1623 | dev_priv->num_plane = 1; |
1623 | if (IS_VALLEYVIEW(dev)) | 1624 | if (IS_VALLEYVIEW(dev)) |
1624 | dev_priv->num_plane = 2; | 1625 | dev_priv->num_plane = 2; |
@@ -1648,7 +1649,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1648 | if (INTEL_INFO(dev)->num_pipes) { | 1649 | if (INTEL_INFO(dev)->num_pipes) { |
1649 | /* Must be done after probing outputs */ | 1650 | /* Must be done after probing outputs */ |
1650 | intel_opregion_init(dev); | 1651 | intel_opregion_init(dev); |
1651 | acpi_video_register_with_quirks(); | 1652 | acpi_video_register(); |
1652 | } | 1653 | } |
1653 | 1654 | ||
1654 | if (IS_GEN5(dev)) | 1655 | if (IS_GEN5(dev)) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f4af1ca0fb62..45b3c030f483 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
706 | { | 706 | { |
707 | int error = 0; | 707 | int error = 0; |
708 | 708 | ||
709 | intel_gt_reset(dev); | 709 | intel_gt_sanitize(dev); |
710 | 710 | ||
711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
712 | mutex_lock(&dev->struct_mutex); | 712 | mutex_lock(&dev->struct_mutex); |
@@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev) | |||
732 | 732 | ||
733 | pci_set_master(dev->pdev); | 733 | pci_set_master(dev->pdev); |
734 | 734 | ||
735 | intel_gt_reset(dev); | 735 | intel_gt_sanitize(dev); |
736 | 736 | ||
737 | /* | 737 | /* |
738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and | 738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and |
@@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | |||
1253 | 1253 | ||
1254 | #define __i915_read(x, y) \ | 1254 | #define __i915_read(x, y) \ |
1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1256 | unsigned long irqflags; \ | ||
1256 | u##x val = 0; \ | 1257 | u##x val = 0; \ |
1258 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
1257 | if (IS_GEN5(dev_priv->dev)) \ | 1259 | if (IS_GEN5(dev_priv->dev)) \ |
1258 | ilk_dummy_write(dev_priv); \ | 1260 | ilk_dummy_write(dev_priv); \ |
1259 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1261 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1260 | unsigned long irqflags; \ | ||
1261 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
1262 | if (dev_priv->forcewake_count == 0) \ | 1262 | if (dev_priv->forcewake_count == 0) \ |
1263 | dev_priv->gt.force_wake_get(dev_priv); \ | 1263 | dev_priv->gt.force_wake_get(dev_priv); \ |
1264 | val = read##y(dev_priv->regs + reg); \ | 1264 | val = read##y(dev_priv->regs + reg); \ |
1265 | if (dev_priv->forcewake_count == 0) \ | 1265 | if (dev_priv->forcewake_count == 0) \ |
1266 | dev_priv->gt.force_wake_put(dev_priv); \ | 1266 | dev_priv->gt.force_wake_put(dev_priv); \ |
1267 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
1268 | } else { \ | 1267 | } else { \ |
1269 | val = read##y(dev_priv->regs + reg); \ | 1268 | val = read##y(dev_priv->regs + reg); \ |
1270 | } \ | 1269 | } \ |
1270 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | 1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
1272 | return val; \ | 1272 | return val; \ |
1273 | } | 1273 | } |
@@ -1280,8 +1280,10 @@ __i915_read(64, q) | |||
1280 | 1280 | ||
1281 | #define __i915_write(x, y) \ | 1281 | #define __i915_write(x, y) \ |
1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1283 | unsigned long irqflags; \ | ||
1283 | u32 __fifo_ret = 0; \ | 1284 | u32 __fifo_ret = 0; \ |
1284 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1285 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
1286 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
1285 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1287 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1286 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 1288 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
1287 | } \ | 1289 | } \ |
@@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | |||
1293 | gen6_gt_check_fifodbg(dev_priv); \ | 1295 | gen6_gt_check_fifodbg(dev_priv); \ |
1294 | } \ | 1296 | } \ |
1295 | hsw_unclaimed_reg_check(dev_priv, reg); \ | 1297 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
1298 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
1296 | } | 1299 | } |
1297 | __i915_write(8, b) | 1300 | __i915_write(8, b) |
1298 | __i915_write(16, w) | 1301 | __i915_write(16, w) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a416645bcd23..d2ee3343c943 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -555,6 +555,7 @@ enum intel_sbi_destination { | |||
555 | #define QUIRK_PIPEA_FORCE (1<<0) | 555 | #define QUIRK_PIPEA_FORCE (1<<0) |
556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | 556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) | 557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
558 | #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) | ||
558 | 559 | ||
559 | struct intel_fbdev; | 560 | struct intel_fbdev; |
560 | struct intel_fbc_work; | 561 | struct intel_fbc_work; |
@@ -1583,7 +1584,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged); | |||
1583 | extern void intel_irq_init(struct drm_device *dev); | 1584 | extern void intel_irq_init(struct drm_device *dev); |
1584 | extern void intel_hpd_init(struct drm_device *dev); | 1585 | extern void intel_hpd_init(struct drm_device *dev); |
1585 | extern void intel_gt_init(struct drm_device *dev); | 1586 | extern void intel_gt_init(struct drm_device *dev); |
1586 | extern void intel_gt_reset(struct drm_device *dev); | 1587 | extern void intel_gt_sanitize(struct drm_device *dev); |
1587 | 1588 | ||
1588 | void i915_error_state_free(struct kref *error_ref); | 1589 | void i915_error_state_free(struct kref *error_ref); |
1589 | 1590 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 97afd2639fb6..d9e2208cfe98 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2258,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev) | |||
2258 | 2258 | ||
2259 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 2259 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
2260 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 2260 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
2261 | i915_gem_write_fence(dev, i, reg->obj); | 2261 | |
2262 | /* | ||
2263 | * Commit delayed tiling changes if we have an object still | ||
2264 | * attached to the fence, otherwise just clear the fence. | ||
2265 | */ | ||
2266 | if (reg->obj) { | ||
2267 | i915_gem_object_update_fence(reg->obj, reg, | ||
2268 | reg->obj->tiling_mode); | ||
2269 | } else { | ||
2270 | i915_gem_write_fence(dev, i, NULL); | ||
2271 | } | ||
2262 | } | 2272 | } |
2263 | } | 2273 | } |
2264 | 2274 | ||
@@ -2795,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, | |||
2795 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) | 2805 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
2796 | mb(); | 2806 | mb(); |
2797 | 2807 | ||
2808 | WARN(obj && (!obj->stride || !obj->tiling_mode), | ||
2809 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", | ||
2810 | obj->stride, obj->tiling_mode); | ||
2811 | |||
2798 | switch (INTEL_INFO(dev)->gen) { | 2812 | switch (INTEL_INFO(dev)->gen) { |
2799 | case 7: | 2813 | case 7: |
2800 | case 6: | 2814 | case 6: |
@@ -2836,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
2836 | fence->obj = NULL; | 2850 | fence->obj = NULL; |
2837 | list_del_init(&fence->lru_list); | 2851 | list_del_init(&fence->lru_list); |
2838 | } | 2852 | } |
2853 | obj->fence_dirty = false; | ||
2839 | } | 2854 | } |
2840 | 2855 | ||
2841 | static int | 2856 | static int |
@@ -2965,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
2965 | return 0; | 2980 | return 0; |
2966 | 2981 | ||
2967 | i915_gem_object_update_fence(obj, reg, enable); | 2982 | i915_gem_object_update_fence(obj, reg, enable); |
2968 | obj->fence_dirty = false; | ||
2969 | 2983 | ||
2970 | return 0; | 2984 | return 0; |
2971 | } | 2985 | } |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 324211ac9c55..b042ee5c4070 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
301 | struct intel_digital_port *intel_dig_port = | 301 | struct intel_digital_port *intel_dig_port = |
302 | enc_to_dig_port(encoder); | 302 | enc_to_dig_port(encoder); |
303 | 303 | ||
304 | intel_dp->DP = intel_dig_port->port_reversal | | 304 | intel_dp->DP = intel_dig_port->saved_port_bits | |
305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; | 305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); | 306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); |
307 | 307 | ||
@@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
1109 | * enabling the port. | 1109 | * enabling the port. |
1110 | */ | 1110 | */ |
1111 | I915_WRITE(DDI_BUF_CTL(port), | 1111 | I915_WRITE(DDI_BUF_CTL(port), |
1112 | intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); | 1112 | intel_dig_port->saved_port_bits | |
1113 | DDI_BUF_CTL_ENABLE); | ||
1113 | } else if (type == INTEL_OUTPUT_EDP) { | 1114 | } else if (type == INTEL_OUTPUT_EDP) { |
1114 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1115 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1115 | 1116 | ||
@@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
1347 | intel_encoder->get_config = intel_ddi_get_config; | 1348 | intel_encoder->get_config = intel_ddi_get_config; |
1348 | 1349 | ||
1349 | intel_dig_port->port = port; | 1350 | intel_dig_port->port = port; |
1350 | intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & | 1351 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
1351 | DDI_BUF_PORT_REVERSAL; | 1352 | (DDI_BUF_PORT_REVERSAL | |
1353 | DDI_A_4_LANES); | ||
1352 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | 1354 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); |
1353 | 1355 | ||
1354 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 1356 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 85f3eb74d2b7..5fb305840db8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, | |||
4913 | uint32_t tmp; | 4913 | uint32_t tmp; |
4914 | 4914 | ||
4915 | tmp = I915_READ(PFIT_CONTROL); | 4915 | tmp = I915_READ(PFIT_CONTROL); |
4916 | if (!(tmp & PFIT_ENABLE)) | ||
4917 | return; | ||
4916 | 4918 | ||
4919 | /* Check whether the pfit is attached to our pipe. */ | ||
4917 | if (INTEL_INFO(dev)->gen < 4) { | 4920 | if (INTEL_INFO(dev)->gen < 4) { |
4918 | if (crtc->pipe != PIPE_B) | 4921 | if (crtc->pipe != PIPE_B) |
4919 | return; | 4922 | return; |
4920 | |||
4921 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
4922 | pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE; | ||
4923 | } else { | 4923 | } else { |
4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) | 4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) |
4925 | return; | 4925 | return; |
4926 | } | 4926 | } |
4927 | 4927 | ||
4928 | if (!(tmp & PFIT_ENABLE)) | 4928 | pipe_config->gmch_pfit.control = tmp; |
4929 | return; | ||
4930 | |||
4931 | pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL); | ||
4932 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); | 4929 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); |
4933 | if (INTEL_INFO(dev)->gen < 5) | 4930 | if (INTEL_INFO(dev)->gen < 5) |
4934 | pipe_config->gmch_pfit.lvds_border_bits = | 4931 | pipe_config->gmch_pfit.lvds_border_bits = |
@@ -8317,6 +8314,8 @@ check_shared_dpll_state(struct drm_device *dev) | |||
8317 | pll->active, pll->refcount); | 8314 | pll->active, pll->refcount); |
8318 | WARN(pll->active && !pll->on, | 8315 | WARN(pll->active && !pll->on, |
8319 | "pll in active use but not on in sw tracking\n"); | 8316 | "pll in active use but not on in sw tracking\n"); |
8317 | WARN(pll->on && !pll->active, | ||
8318 | "pll in on but not on in use in sw tracking\n"); | ||
8320 | WARN(pll->on != active, | 8319 | WARN(pll->on != active, |
8321 | "pll on state mismatch (expected %i, found %i)\n", | 8320 | "pll on state mismatch (expected %i, found %i)\n", |
8322 | pll->on, active); | 8321 | pll->on, active); |
@@ -8541,15 +8540,20 @@ static void intel_set_config_restore_state(struct drm_device *dev, | |||
8541 | } | 8540 | } |
8542 | 8541 | ||
8543 | static bool | 8542 | static bool |
8544 | is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, | 8543 | is_crtc_connector_off(struct drm_mode_set *set) |
8545 | int num_connectors) | ||
8546 | { | 8544 | { |
8547 | int i; | 8545 | int i; |
8548 | 8546 | ||
8549 | for (i = 0; i < num_connectors; i++) | 8547 | if (set->num_connectors == 0) |
8550 | if (connectors[i].encoder && | 8548 | return false; |
8551 | connectors[i].encoder->crtc == crtc && | 8549 | |
8552 | connectors[i].dpms != DRM_MODE_DPMS_ON) | 8550 | if (WARN_ON(set->connectors == NULL)) |
8551 | return false; | ||
8552 | |||
8553 | for (i = 0; i < set->num_connectors; i++) | ||
8554 | if (set->connectors[i]->encoder && | ||
8555 | set->connectors[i]->encoder->crtc == set->crtc && | ||
8556 | set->connectors[i]->dpms != DRM_MODE_DPMS_ON) | ||
8553 | return true; | 8557 | return true; |
8554 | 8558 | ||
8555 | return false; | 8559 | return false; |
@@ -8562,10 +8566,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, | |||
8562 | 8566 | ||
8563 | /* We should be able to check here if the fb has the same properties | 8567 | /* We should be able to check here if the fb has the same properties |
8564 | * and then just flip_or_move it */ | 8568 | * and then just flip_or_move it */ |
8565 | if (set->connectors != NULL && | 8569 | if (is_crtc_connector_off(set)) { |
8566 | is_crtc_connector_off(set->crtc, *set->connectors, | 8570 | config->mode_changed = true; |
8567 | set->num_connectors)) { | ||
8568 | config->mode_changed = true; | ||
8569 | } else if (set->crtc->fb != set->fb) { | 8571 | } else if (set->crtc->fb != set->fb) { |
8570 | /* If we have no fb then treat it as a full mode set */ | 8572 | /* If we have no fb then treat it as a full mode set */ |
8571 | if (set->crtc->fb == NULL) { | 8573 | if (set->crtc->fb == NULL) { |
@@ -9398,6 +9400,17 @@ static void quirk_invert_brightness(struct drm_device *dev) | |||
9398 | DRM_INFO("applying inverted panel brightness quirk\n"); | 9400 | DRM_INFO("applying inverted panel brightness quirk\n"); |
9399 | } | 9401 | } |
9400 | 9402 | ||
9403 | /* | ||
9404 | * Some machines (Dell XPS13) suffer broken backlight controls if | ||
9405 | * BLM_PCH_PWM_ENABLE is set. | ||
9406 | */ | ||
9407 | static void quirk_no_pcm_pwm_enable(struct drm_device *dev) | ||
9408 | { | ||
9409 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9410 | dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; | ||
9411 | DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); | ||
9412 | } | ||
9413 | |||
9401 | struct intel_quirk { | 9414 | struct intel_quirk { |
9402 | int device; | 9415 | int device; |
9403 | int subsystem_vendor; | 9416 | int subsystem_vendor; |
@@ -9467,6 +9480,11 @@ static struct intel_quirk intel_quirks[] = { | |||
9467 | 9480 | ||
9468 | /* Acer Aspire 4736Z */ | 9481 | /* Acer Aspire 4736Z */ |
9469 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | 9482 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
9483 | |||
9484 | /* Dell XPS13 HD Sandy Bridge */ | ||
9485 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, | ||
9486 | /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ | ||
9487 | { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, | ||
9470 | }; | 9488 | }; |
9471 | 9489 | ||
9472 | static void intel_init_quirks(struct drm_device *dev) | 9490 | static void intel_init_quirks(struct drm_device *dev) |
@@ -9817,8 +9835,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
9817 | } | 9835 | } |
9818 | pll->refcount = pll->active; | 9836 | pll->refcount = pll->active; |
9819 | 9837 | ||
9820 | DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", | 9838 | DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", |
9821 | pll->name, pll->refcount); | 9839 | pll->name, pll->refcount, pll->on); |
9822 | } | 9840 | } |
9823 | 9841 | ||
9824 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9842 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
@@ -9869,6 +9887,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
9869 | struct drm_plane *plane; | 9887 | struct drm_plane *plane; |
9870 | struct intel_crtc *crtc; | 9888 | struct intel_crtc *crtc; |
9871 | struct intel_encoder *encoder; | 9889 | struct intel_encoder *encoder; |
9890 | int i; | ||
9872 | 9891 | ||
9873 | intel_modeset_readout_hw_state(dev); | 9892 | intel_modeset_readout_hw_state(dev); |
9874 | 9893 | ||
@@ -9884,6 +9903,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
9884 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); | 9903 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); |
9885 | } | 9904 | } |
9886 | 9905 | ||
9906 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
9907 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
9908 | |||
9909 | if (!pll->on || pll->active) | ||
9910 | continue; | ||
9911 | |||
9912 | DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); | ||
9913 | |||
9914 | pll->disable(dev_priv, pll); | ||
9915 | pll->on = false; | ||
9916 | } | ||
9917 | |||
9887 | if (force_restore) { | 9918 | if (force_restore) { |
9888 | /* | 9919 | /* |
9889 | * We need to use raw interfaces for restoring state to avoid | 9920 | * We need to use raw interfaces for restoring state to avoid |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c8c9b6f48230..b7d6e09456ce 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -504,7 +504,7 @@ struct intel_dp { | |||
504 | struct intel_digital_port { | 504 | struct intel_digital_port { |
505 | struct intel_encoder base; | 505 | struct intel_encoder base; |
506 | enum port port; | 506 | enum port port; |
507 | u32 port_reversal; | 507 | u32 saved_port_bits; |
508 | struct intel_dp dp; | 508 | struct intel_dp dp; |
509 | struct intel_hdmi hdmi; | 509 | struct intel_hdmi hdmi; |
510 | }; | 510 | }; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98df2a0c85bd..2fd3fd5b943e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
785 | } | 785 | } |
786 | } | 786 | } |
787 | 787 | ||
788 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | ||
789 | { | ||
790 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
791 | |||
792 | if (IS_G4X(dev)) | ||
793 | return 165000; | ||
794 | else if (IS_HASWELL(dev)) | ||
795 | return 300000; | ||
796 | else | ||
797 | return 225000; | ||
798 | } | ||
799 | |||
788 | static int intel_hdmi_mode_valid(struct drm_connector *connector, | 800 | static int intel_hdmi_mode_valid(struct drm_connector *connector, |
789 | struct drm_display_mode *mode) | 801 | struct drm_display_mode *mode) |
790 | { | 802 | { |
791 | if (mode->clock > 165000) | 803 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) |
792 | return MODE_CLOCK_HIGH; | 804 | return MODE_CLOCK_HIGH; |
793 | if (mode->clock < 20000) | 805 | if (mode->clock < 20000) |
794 | return MODE_CLOCK_LOW; | 806 | return MODE_CLOCK_LOW; |
@@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
806 | struct drm_device *dev = encoder->base.dev; | 818 | struct drm_device *dev = encoder->base.dev; |
807 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 819 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
808 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; | 820 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; |
821 | int portclock_limit = hdmi_portclock_limit(intel_hdmi); | ||
809 | int desired_bpp; | 822 | int desired_bpp; |
810 | 823 | ||
811 | if (intel_hdmi->color_range_auto) { | 824 | if (intel_hdmi->color_range_auto) { |
@@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
829 | * outputs. We also need to check that the higher clock still fits | 842 | * outputs. We also need to check that the higher clock still fits |
830 | * within limits. | 843 | * within limits. |
831 | */ | 844 | */ |
832 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 | 845 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit |
833 | && HAS_PCH_SPLIT(dev)) { | 846 | && HAS_PCH_SPLIT(dev)) { |
834 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 847 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
835 | desired_bpp = 12*3; | 848 | desired_bpp = 12*3; |
@@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
846 | pipe_config->pipe_bpp = desired_bpp; | 859 | pipe_config->pipe_bpp = desired_bpp; |
847 | } | 860 | } |
848 | 861 | ||
849 | if (adjusted_mode->clock > 225000) { | 862 | if (adjusted_mode->clock > portclock_limit) { |
850 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); | 863 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); |
851 | return false; | 864 | return false; |
852 | } | 865 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 021e8daa022d..61348eae2f04 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, | |||
109 | flags |= DRM_MODE_FLAG_PVSYNC; | 109 | flags |= DRM_MODE_FLAG_PVSYNC; |
110 | 110 | ||
111 | pipe_config->adjusted_mode.flags |= flags; | 111 | pipe_config->adjusted_mode.flags |= flags; |
112 | |||
113 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
114 | if (INTEL_INFO(dev)->gen < 4) { | ||
115 | tmp = I915_READ(PFIT_CONTROL); | ||
116 | |||
117 | pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; | ||
118 | } | ||
112 | } | 119 | } |
113 | 120 | ||
114 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 121 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
@@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
290 | 297 | ||
291 | intel_pch_panel_fitting(intel_crtc, pipe_config, | 298 | intel_pch_panel_fitting(intel_crtc, pipe_config, |
292 | intel_connector->panel.fitting_mode); | 299 | intel_connector->panel.fitting_mode); |
293 | return true; | ||
294 | } else { | 300 | } else { |
295 | intel_gmch_panel_fitting(intel_crtc, pipe_config, | 301 | intel_gmch_panel_fitting(intel_crtc, pipe_config, |
296 | intel_connector->panel.fitting_mode); | 302 | intel_connector->panel.fitting_mode); |
297 | } | ||
298 | 303 | ||
299 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 304 | } |
300 | pipe_config->timings_set = true; | ||
301 | 305 | ||
302 | /* | 306 | /* |
303 | * XXX: It would be nice to support lower refresh rates on the | 307 | * XXX: It would be nice to support lower refresh rates on the |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 80bea1d3209f..67e2c1f1c9a8 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, | |||
194 | adjusted_mode->vdisplay == mode->vdisplay) | 194 | adjusted_mode->vdisplay == mode->vdisplay) |
195 | goto out; | 195 | goto out; |
196 | 196 | ||
197 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
198 | pipe_config->timings_set = true; | ||
199 | |||
197 | switch (fitting_mode) { | 200 | switch (fitting_mode) { |
198 | case DRM_MODE_SCALE_CENTER: | 201 | case DRM_MODE_SCALE_CENTER: |
199 | /* | 202 | /* |
@@ -580,7 +583,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
580 | POSTING_READ(reg); | 583 | POSTING_READ(reg); |
581 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | 584 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); |
582 | 585 | ||
583 | if (HAS_PCH_SPLIT(dev)) { | 586 | if (HAS_PCH_SPLIT(dev) && |
587 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { | ||
584 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | 588 | tmp = I915_READ(BLC_PWM_PCH_CTL1); |
585 | tmp |= BLM_PCH_PWM_ENABLE; | 589 | tmp |= BLM_PCH_PWM_ENABLE; |
586 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; | 590 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d10e6735771f..51a2a60f5bfc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -5476,7 +5476,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | |||
5476 | gen6_gt_check_fifodbg(dev_priv); | 5476 | gen6_gt_check_fifodbg(dev_priv); |
5477 | } | 5477 | } |
5478 | 5478 | ||
5479 | void intel_gt_reset(struct drm_device *dev) | 5479 | void intel_gt_sanitize(struct drm_device *dev) |
5480 | { | 5480 | { |
5481 | struct drm_i915_private *dev_priv = dev->dev_private; | 5481 | struct drm_i915_private *dev_priv = dev->dev_private; |
5482 | 5482 | ||
@@ -5487,16 +5487,16 @@ void intel_gt_reset(struct drm_device *dev) | |||
5487 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 5487 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
5488 | __gen6_gt_force_wake_mt_reset(dev_priv); | 5488 | __gen6_gt_force_wake_mt_reset(dev_priv); |
5489 | } | 5489 | } |
5490 | |||
5491 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ | ||
5492 | if (INTEL_INFO(dev)->gen >= 6) | ||
5493 | intel_disable_gt_powersave(dev); | ||
5490 | } | 5494 | } |
5491 | 5495 | ||
5492 | void intel_gt_init(struct drm_device *dev) | 5496 | void intel_gt_init(struct drm_device *dev) |
5493 | { | 5497 | { |
5494 | struct drm_i915_private *dev_priv = dev->dev_private; | 5498 | struct drm_i915_private *dev_priv = dev->dev_private; |
5495 | 5499 | ||
5496 | spin_lock_init(&dev_priv->gt_lock); | ||
5497 | |||
5498 | intel_gt_reset(dev); | ||
5499 | |||
5500 | if (IS_VALLEYVIEW(dev)) { | 5500 | if (IS_VALLEYVIEW(dev)) { |
5501 | dev_priv->gt.force_wake_get = vlv_force_wake_get; | 5501 | dev_priv->gt.force_wake_get = vlv_force_wake_get; |
5502 | dev_priv->gt.force_wake_put = vlv_force_wake_put; | 5502 | dev_priv->gt.force_wake_put = vlv_force_wake_put; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c index 262c9f5f5f60..ce860de43e61 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_bsp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c index c46882c83982..ba6aeca0285e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c | |||
@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nve0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_bsp_cclass; |
94 | nv_engine(priv)->sclass = nve0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nve0_bsp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c index 3c7a31f7590e..e03fc8e4dc1d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c | |||
@@ -23,6 +23,25 @@ | |||
23 | #include <engine/falcon.h> | 23 | #include <engine/falcon.h> |
24 | #include <subdev/timer.h> | 24 | #include <subdev/timer.h> |
25 | 25 | ||
26 | void | ||
27 | nouveau_falcon_intr(struct nouveau_subdev *subdev) | ||
28 | { | ||
29 | struct nouveau_falcon *falcon = (void *)subdev; | ||
30 | u32 dispatch = nv_ro32(falcon, 0x01c); | ||
31 | u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); | ||
32 | |||
33 | if (intr & 0x00000010) { | ||
34 | nv_debug(falcon, "ucode halted\n"); | ||
35 | nv_wo32(falcon, 0x004, 0x00000010); | ||
36 | intr &= ~0x00000010; | ||
37 | } | ||
38 | |||
39 | if (intr) { | ||
40 | nv_error(falcon, "unhandled intr 0x%08x\n", intr); | ||
41 | nv_wo32(falcon, 0x004, intr); | ||
42 | } | ||
43 | } | ||
44 | |||
26 | u32 | 45 | u32 |
27 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) | 46 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) |
28 | { | 47 | { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c index 98072c1ff360..73719aaa62d6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00000002; | 92 | nv_subdev(priv)->unit = 0x00000002; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_ppp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_ppp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c index 1879229b60eb..ac1f62aace72 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_vp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_vp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_vp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c index d28ecbf7bc49..d4c3108479c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c | |||
@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nve0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_vp_cclass; |
94 | nv_engine(priv)->sclass = nve0_vp_sclass; | 95 | nv_engine(priv)->sclass = nve0_vp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h index 1edec386ab36..181aa7da524d 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h | |||
@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, | |||
72 | struct nouveau_oclass *, u32, bool, const char *, | 72 | struct nouveau_oclass *, u32, bool, const char *, |
73 | const char *, int, void **); | 73 | const char *, int, void **); |
74 | 74 | ||
75 | void nouveau_falcon_intr(struct nouveau_subdev *subdev); | ||
76 | |||
75 | #define _nouveau_falcon_dtor _nouveau_engine_dtor | 77 | #define _nouveau_falcon_dtor _nouveau_engine_dtor |
76 | int _nouveau_falcon_init(struct nouveau_object *); | 78 | int _nouveau_falcon_init(struct nouveau_object *); |
77 | int _nouveau_falcon_fini(struct nouveau_object *, bool); | 79 | int _nouveau_falcon_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4b1afb131380..4e7ee5f4155c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
148 | 148 | ||
149 | if (unlikely(nvbo->gem)) | 149 | if (unlikely(nvbo->gem)) |
150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
151 | WARN_ON(nvbo->pin_refcnt > 0); | ||
151 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); | 152 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
152 | kfree(nvbo); | 153 | kfree(nvbo); |
153 | } | 154 | } |
@@ -197,6 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
197 | size_t acc_size; | 198 | size_t acc_size; |
198 | int ret; | 199 | int ret; |
199 | int type = ttm_bo_type_device; | 200 | int type = ttm_bo_type_device; |
201 | int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1); | ||
202 | |||
203 | if (size <= 0 || size > max_size) { | ||
204 | nv_warn(drm, "skipped size %x\n", (u32)size); | ||
205 | return -EINVAL; | ||
206 | } | ||
200 | 207 | ||
201 | if (sg) | 208 | if (sg) |
202 | type = ttm_bo_type_sg; | 209 | type = ttm_bo_type_sg; |
@@ -340,13 +347,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
340 | { | 347 | { |
341 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 348 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
342 | struct ttm_buffer_object *bo = &nvbo->bo; | 349 | struct ttm_buffer_object *bo = &nvbo->bo; |
343 | int ret; | 350 | int ret, ref; |
344 | 351 | ||
345 | ret = ttm_bo_reserve(bo, false, false, false, 0); | 352 | ret = ttm_bo_reserve(bo, false, false, false, 0); |
346 | if (ret) | 353 | if (ret) |
347 | return ret; | 354 | return ret; |
348 | 355 | ||
349 | if (--nvbo->pin_refcnt) | 356 | ref = --nvbo->pin_refcnt; |
357 | WARN_ON_ONCE(ref < 0); | ||
358 | if (ref) | ||
350 | goto out; | 359 | goto out; |
351 | 360 | ||
352 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); | 361 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
@@ -578,7 +587,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
578 | int ret = RING_SPACE(chan, 2); | 587 | int ret = RING_SPACE(chan, 2); |
579 | if (ret == 0) { | 588 | if (ret == 0) { |
580 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | 589 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
581 | OUT_RING (chan, handle); | 590 | OUT_RING (chan, handle & 0x0000ffff); |
582 | FIRE_RING (chan); | 591 | FIRE_RING (chan); |
583 | } | 592 | } |
584 | return ret; | 593 | return ret; |
@@ -973,7 +982,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
973 | struct ttm_mem_reg *old_mem = &bo->mem; | 982 | struct ttm_mem_reg *old_mem = &bo->mem; |
974 | int ret; | 983 | int ret; |
975 | 984 | ||
976 | mutex_lock(&chan->cli->mutex); | 985 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); |
977 | 986 | ||
978 | /* create temporary vmas for the transfer and attach them to the | 987 | /* create temporary vmas for the transfer and attach them to the |
979 | * old nouveau_mem node, these will get cleaned up after ttm has | 988 | * old nouveau_mem node, these will get cleaned up after ttm has |
@@ -1014,7 +1023,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1014 | struct ttm_mem_reg *, struct ttm_mem_reg *); | 1023 | struct ttm_mem_reg *, struct ttm_mem_reg *); |
1015 | int (*init)(struct nouveau_channel *, u32 handle); | 1024 | int (*init)(struct nouveau_channel *, u32 handle); |
1016 | } _methods[] = { | 1025 | } _methods[] = { |
1017 | { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, | 1026 | { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
1018 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | 1027 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
1019 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1028 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
1020 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1029 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, |
@@ -1034,7 +1043,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1034 | struct nouveau_channel *chan; | 1043 | struct nouveau_channel *chan; |
1035 | u32 handle = (mthd->engine << 16) | mthd->oclass; | 1044 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
1036 | 1045 | ||
1037 | if (mthd->init == nve0_bo_move_init) | 1046 | if (mthd->engine) |
1038 | chan = drm->cechan; | 1047 | chan = drm->cechan; |
1039 | else | 1048 | else |
1040 | chan = drm->channel; | 1049 | chan = drm->channel; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 708b2d1c0037..907d20ef6d4d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
138 | { | 138 | { |
139 | struct nouveau_framebuffer *nouveau_fb; | 139 | struct nouveau_framebuffer *nouveau_fb; |
140 | struct drm_gem_object *gem; | 140 | struct drm_gem_object *gem; |
141 | int ret; | 141 | int ret = -ENOMEM; |
142 | 142 | ||
143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | 143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); |
144 | if (!gem) | 144 | if (!gem) |
@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
146 | 146 | ||
147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); | 147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); |
148 | if (!nouveau_fb) | 148 | if (!nouveau_fb) |
149 | return ERR_PTR(-ENOMEM); | 149 | goto err_unref; |
150 | 150 | ||
151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); | 151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); |
152 | if (ret) { | 152 | if (ret) |
153 | drm_gem_object_unreference(gem); | 153 | goto err; |
154 | return ERR_PTR(ret); | ||
155 | } | ||
156 | 154 | ||
157 | return &nouveau_fb->base; | 155 | return &nouveau_fb->base; |
156 | |||
157 | err: | ||
158 | kfree(nouveau_fb); | ||
159 | err_unref: | ||
160 | drm_gem_object_unreference(gem); | ||
161 | return ERR_PTR(ret); | ||
158 | } | 162 | } |
159 | 163 | ||
160 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | 164 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { |
@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
524 | struct nouveau_page_flip_state *s; | 528 | struct nouveau_page_flip_state *s; |
525 | struct nouveau_channel *chan = NULL; | 529 | struct nouveau_channel *chan = NULL; |
526 | struct nouveau_fence *fence; | 530 | struct nouveau_fence *fence; |
527 | struct list_head res; | 531 | struct ttm_validate_buffer resv[2] = { |
528 | struct ttm_validate_buffer res_val[2]; | 532 | { .bo = &old_bo->bo }, |
533 | { .bo = &new_bo->bo }, | ||
534 | }; | ||
529 | struct ww_acquire_ctx ticket; | 535 | struct ww_acquire_ctx ticket; |
536 | LIST_HEAD(res); | ||
530 | int ret; | 537 | int ret; |
531 | 538 | ||
532 | if (!drm->channel) | 539 | if (!drm->channel) |
@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
545 | chan = drm->channel; | 552 | chan = drm->channel; |
546 | spin_unlock(&old_bo->bo.bdev->fence_lock); | 553 | spin_unlock(&old_bo->bo.bdev->fence_lock); |
547 | 554 | ||
548 | mutex_lock(&chan->cli->mutex); | ||
549 | |||
550 | if (new_bo != old_bo) { | 555 | if (new_bo != old_bo) { |
551 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 556 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
552 | if (likely(!ret)) { | 557 | if (ret) |
553 | res_val[0].bo = &old_bo->bo; | 558 | goto fail_free; |
554 | res_val[1].bo = &new_bo->bo; | ||
555 | INIT_LIST_HEAD(&res); | ||
556 | list_add_tail(&res_val[0].head, &res); | ||
557 | list_add_tail(&res_val[1].head, &res); | ||
558 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
559 | if (ret) | ||
560 | nouveau_bo_unpin(new_bo); | ||
561 | } | ||
562 | } else | ||
563 | ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); | ||
564 | 559 | ||
565 | if (ret) { | 560 | list_add(&resv[1].head, &res); |
566 | mutex_unlock(&chan->cli->mutex); | ||
567 | goto fail_free; | ||
568 | } | 561 | } |
562 | list_add(&resv[0].head, &res); | ||
563 | |||
564 | mutex_lock(&chan->cli->mutex); | ||
565 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
566 | if (ret) | ||
567 | goto fail_unpin; | ||
569 | 568 | ||
570 | /* Initialize a page flip struct */ | 569 | /* Initialize a page flip struct */ |
571 | *s = (struct nouveau_page_flip_state) | 570 | *s = (struct nouveau_page_flip_state) |
@@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
576 | /* Emit a page flip */ | 575 | /* Emit a page flip */ |
577 | if (nv_device(drm->device)->card_type >= NV_50) { | 576 | if (nv_device(drm->device)->card_type >= NV_50) { |
578 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); |
579 | if (ret) { | 578 | if (ret) |
580 | mutex_unlock(&chan->cli->mutex); | ||
581 | goto fail_unreserve; | 579 | goto fail_unreserve; |
582 | } | ||
583 | } | 580 | } |
584 | 581 | ||
585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 582 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
@@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
590 | /* Update the crtc struct and cleanup */ | 587 | /* Update the crtc struct and cleanup */ |
591 | crtc->fb = fb; | 588 | crtc->fb = fb; |
592 | 589 | ||
593 | if (old_bo != new_bo) { | 590 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); |
594 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); | 591 | if (old_bo != new_bo) |
595 | nouveau_bo_unpin(old_bo); | 592 | nouveau_bo_unpin(old_bo); |
596 | } else { | ||
597 | nouveau_bo_fence(new_bo, fence); | ||
598 | ttm_bo_unreserve(&new_bo->bo); | ||
599 | } | ||
600 | nouveau_fence_unref(&fence); | 593 | nouveau_fence_unref(&fence); |
601 | return 0; | 594 | return 0; |
602 | 595 | ||
603 | fail_unreserve: | 596 | fail_unreserve: |
604 | if (old_bo != new_bo) { | 597 | ttm_eu_backoff_reservation(&ticket, &res); |
605 | ttm_eu_backoff_reservation(&ticket, &res); | 598 | fail_unpin: |
599 | mutex_unlock(&chan->cli->mutex); | ||
600 | if (old_bo != new_bo) | ||
606 | nouveau_bo_unpin(new_bo); | 601 | nouveau_bo_unpin(new_bo); |
607 | } else | ||
608 | ttm_bo_unreserve(&new_bo->bo); | ||
609 | fail_free: | 602 | fail_free: |
610 | kfree(s); | 603 | kfree(s); |
611 | return ret; | 604 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 218a4b522fe5..61972668fd05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
192 | 192 | ||
193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; | 193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; |
194 | arg1 = 1; | 194 | arg1 = 1; |
195 | } else | ||
196 | if (device->chipset >= 0xa3 && | ||
197 | device->chipset != 0xaa && | ||
198 | device->chipset != 0xac) { | ||
199 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, | ||
200 | NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, | ||
201 | &drm->cechan); | ||
202 | if (ret) | ||
203 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | ||
204 | |||
205 | arg0 = NvDmaFB; | ||
206 | arg1 = NvDmaTT; | ||
195 | } else { | 207 | } else { |
196 | arg0 = NvDmaFB; | 208 | arg0 = NvDmaFB; |
197 | arg1 = NvDmaTT; | 209 | arg1 = NvDmaTT; |
@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
284 | return 0; | 296 | return 0; |
285 | } | 297 | } |
286 | 298 | ||
287 | static struct lock_class_key drm_client_lock_class_key; | ||
288 | |||
289 | static int | 299 | static int |
290 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 300 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
291 | { | 301 | { |
@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
297 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 307 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
298 | if (ret) | 308 | if (ret) |
299 | return ret; | 309 | return ret; |
300 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
301 | 310 | ||
302 | dev->dev_private = drm; | 311 | dev->dev_private = drm; |
303 | drm->dev = dev; | 312 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9352010030e9..4c1bc061fae2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -385,6 +385,7 @@ out_unlock: | |||
385 | mutex_unlock(&dev->struct_mutex); | 385 | mutex_unlock(&dev->struct_mutex); |
386 | if (chan) | 386 | if (chan) |
387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); | 387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); |
388 | nouveau_bo_unmap(nvbo); | ||
388 | out_unpin: | 389 | out_unpin: |
389 | nouveau_bo_unpin(nvbo); | 390 | nouveau_bo_unpin(nvbo); |
390 | out_unref: | 391 | out_unref: |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 1680d9187bab..be3149932c2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
143 | int ret; | 143 | int ret; |
144 | 144 | ||
145 | fence->channel = chan; | 145 | fence->channel = chan; |
146 | fence->timeout = jiffies + (3 * DRM_HZ); | 146 | fence->timeout = jiffies + (15 * DRM_HZ); |
147 | fence->sequence = ++fctx->sequence; | 147 | fence->sequence = ++fctx->sequence; |
148 | 148 | ||
149 | ret = fctx->emit(fence); | 149 | ret = fctx->emit(fence); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e72d09c068a8..830cb7bad922 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
50 | return; | 50 | return; |
51 | nvbo->gem = NULL; | 51 | nvbo->gem = NULL; |
52 | 52 | ||
53 | /* Lockdep hates you for doing reserve with gem object lock held */ | ||
54 | if (WARN_ON_ONCE(nvbo->pin_refcnt)) { | ||
55 | nvbo->pin_refcnt = 1; | ||
56 | nouveau_bo_unpin(nvbo); | ||
57 | } | ||
58 | |||
59 | if (gem->import_attach) | 53 | if (gem->import_attach) |
60 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | 54 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
61 | 55 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 54dc6355b0c2..8b40a36c1b57 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -355,6 +355,7 @@ struct nv50_oimm { | |||
355 | 355 | ||
356 | struct nv50_head { | 356 | struct nv50_head { |
357 | struct nouveau_crtc base; | 357 | struct nouveau_crtc base; |
358 | struct nouveau_bo *image; | ||
358 | struct nv50_curs curs; | 359 | struct nv50_curs curs; |
359 | struct nv50_sync sync; | 360 | struct nv50_sync sync; |
360 | struct nv50_ovly ovly; | 361 | struct nv50_ovly ovly; |
@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
517 | { | 518 | { |
518 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 519 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
519 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 520 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
521 | struct nv50_head *head = nv50_head(crtc); | ||
520 | struct nv50_sync *sync = nv50_sync(crtc); | 522 | struct nv50_sync *sync = nv50_sync(crtc); |
521 | int head = nv_crtc->index, ret; | ||
522 | u32 *push; | 523 | u32 *push; |
524 | int ret; | ||
523 | 525 | ||
524 | swap_interval <<= 4; | 526 | swap_interval <<= 4; |
525 | if (swap_interval == 0) | 527 | if (swap_interval == 0) |
@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
537 | return ret; | 539 | return ret; |
538 | 540 | ||
539 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | 541 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); |
540 | OUT_RING (chan, NvEvoSema0 + head); | 542 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); |
541 | OUT_RING (chan, sync->addr ^ 0x10); | 543 | OUT_RING (chan, sync->addr ^ 0x10); |
542 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | 544 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); |
543 | OUT_RING (chan, sync->data + 1); | 545 | OUT_RING (chan, sync->data + 1); |
@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
546 | OUT_RING (chan, sync->data); | 548 | OUT_RING (chan, sync->data); |
547 | } else | 549 | } else |
548 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 550 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { |
549 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 551 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
550 | ret = RING_SPACE(chan, 12); | 552 | ret = RING_SPACE(chan, 12); |
551 | if (ret) | 553 | if (ret) |
552 | return ret; | 554 | return ret; |
@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
565 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); | 567 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); |
566 | } else | 568 | } else |
567 | if (chan) { | 569 | if (chan) { |
568 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 570 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
569 | ret = RING_SPACE(chan, 10); | 571 | ret = RING_SPACE(chan, 10); |
570 | if (ret) | 572 | if (ret) |
571 | return ret; | 573 | return ret; |
@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
630 | evo_mthd(push, 0x0080, 1); | 632 | evo_mthd(push, 0x0080, 1); |
631 | evo_data(push, 0x00000000); | 633 | evo_data(push, 0x00000000); |
632 | evo_kick(push, sync); | 634 | evo_kick(push, sync); |
635 | |||
636 | nouveau_bo_ref(nv_fb->nvbo, &head->image); | ||
633 | return 0; | 637 | return 0; |
634 | } | 638 | } |
635 | 639 | ||
@@ -1038,18 +1042,17 @@ static int | |||
1038 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | 1042 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) |
1039 | { | 1043 | { |
1040 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | 1044 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); |
1045 | struct nv50_head *head = nv50_head(crtc); | ||
1041 | int ret; | 1046 | int ret; |
1042 | 1047 | ||
1043 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | 1048 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); |
1044 | if (ret) | 1049 | if (ret == 0) { |
1045 | return ret; | 1050 | if (head->image) |
1046 | 1051 | nouveau_bo_unpin(head->image); | |
1047 | if (old_fb) { | 1052 | nouveau_bo_ref(nvfb->nvbo, &head->image); |
1048 | nvfb = nouveau_framebuffer(old_fb); | ||
1049 | nouveau_bo_unpin(nvfb->nvbo); | ||
1050 | } | 1053 | } |
1051 | 1054 | ||
1052 | return 0; | 1055 | return ret; |
1053 | } | 1056 | } |
1054 | 1057 | ||
1055 | static int | 1058 | static int |
@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) | |||
1198 | } | 1201 | } |
1199 | } | 1202 | } |
1200 | 1203 | ||
1204 | static void | ||
1205 | nv50_crtc_disable(struct drm_crtc *crtc) | ||
1206 | { | ||
1207 | struct nv50_head *head = nv50_head(crtc); | ||
1208 | if (head->image) | ||
1209 | nouveau_bo_unpin(head->image); | ||
1210 | nouveau_bo_ref(NULL, &head->image); | ||
1211 | } | ||
1212 | |||
1201 | static int | 1213 | static int |
1202 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 1214 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, |
1203 | uint32_t handle, uint32_t width, uint32_t height) | 1215 | uint32_t handle, uint32_t width, uint32_t height) |
@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
1271 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 1283 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
1272 | struct nv50_disp *disp = nv50_disp(crtc->dev); | 1284 | struct nv50_disp *disp = nv50_disp(crtc->dev); |
1273 | struct nv50_head *head = nv50_head(crtc); | 1285 | struct nv50_head *head = nv50_head(crtc); |
1286 | |||
1274 | nv50_dmac_destroy(disp->core, &head->ovly.base); | 1287 | nv50_dmac_destroy(disp->core, &head->ovly.base); |
1275 | nv50_pioc_destroy(disp->core, &head->oimm.base); | 1288 | nv50_pioc_destroy(disp->core, &head->oimm.base); |
1276 | nv50_dmac_destroy(disp->core, &head->sync.base); | 1289 | nv50_dmac_destroy(disp->core, &head->sync.base); |
1277 | nv50_pioc_destroy(disp->core, &head->curs.base); | 1290 | nv50_pioc_destroy(disp->core, &head->curs.base); |
1291 | |||
1292 | /*XXX: this shouldn't be necessary, but the core doesn't call | ||
1293 | * disconnect() during the cleanup paths | ||
1294 | */ | ||
1295 | if (head->image) | ||
1296 | nouveau_bo_unpin(head->image); | ||
1297 | nouveau_bo_ref(NULL, &head->image); | ||
1298 | |||
1278 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 1299 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
1279 | if (nv_crtc->cursor.nvbo) | 1300 | if (nv_crtc->cursor.nvbo) |
1280 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 1301 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
1281 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 1302 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
1303 | |||
1282 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | 1304 | nouveau_bo_unmap(nv_crtc->lut.nvbo); |
1283 | if (nv_crtc->lut.nvbo) | 1305 | if (nv_crtc->lut.nvbo) |
1284 | nouveau_bo_unpin(nv_crtc->lut.nvbo); | 1306 | nouveau_bo_unpin(nv_crtc->lut.nvbo); |
1285 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | 1307 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); |
1308 | |||
1286 | drm_crtc_cleanup(crtc); | 1309 | drm_crtc_cleanup(crtc); |
1287 | kfree(crtc); | 1310 | kfree(crtc); |
1288 | } | 1311 | } |
@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { | |||
1296 | .mode_set_base = nv50_crtc_mode_set_base, | 1319 | .mode_set_base = nv50_crtc_mode_set_base, |
1297 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, | 1320 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, |
1298 | .load_lut = nv50_crtc_lut_load, | 1321 | .load_lut = nv50_crtc_lut_load, |
1322 | .disable = nv50_crtc_disable, | ||
1299 | }; | 1323 | }; |
1300 | 1324 | ||
1301 | static const struct drm_crtc_funcs nv50_crtc_func = { | 1325 | static const struct drm_crtc_funcs nv50_crtc_func = { |
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 93c2f2cceb51..eb89653a7a17 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c | |||
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea | |||
179 | uint32_t type, bool interruptible) | 179 | uint32_t type, bool interruptible) |
180 | { | 180 | { |
181 | struct qxl_command cmd; | 181 | struct qxl_command cmd; |
182 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
182 | 183 | ||
183 | cmd.type = type; | 184 | cmd.type = type; |
184 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 185 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
185 | 186 | ||
186 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); | 187 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); |
187 | } | 188 | } |
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas | |||
191 | uint32_t type, bool interruptible) | 192 | uint32_t type, bool interruptible) |
192 | { | 193 | { |
193 | struct qxl_command cmd; | 194 | struct qxl_command cmd; |
195 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
194 | 196 | ||
195 | cmd.type = type; | 197 | cmd.type = type; |
196 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 198 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
197 | 199 | ||
198 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); | 200 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); |
199 | } | 201 | } |
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
214 | struct qxl_release *release; | 216 | struct qxl_release *release; |
215 | uint64_t id, next_id; | 217 | uint64_t id, next_id; |
216 | int i = 0; | 218 | int i = 0; |
217 | int ret; | ||
218 | union qxl_release_info *info; | 219 | union qxl_release_info *info; |
219 | 220 | ||
220 | while (qxl_ring_pop(qdev->release_ring, &id)) { | 221 | while (qxl_ring_pop(qdev->release_ring, &id)) { |
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
224 | if (release == NULL) | 225 | if (release == NULL) |
225 | break; | 226 | break; |
226 | 227 | ||
227 | ret = qxl_release_reserve(qdev, release, false); | ||
228 | if (ret) { | ||
229 | qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); | ||
230 | DRM_ERROR("failed to reserve release %lld\n", id); | ||
231 | } | ||
232 | |||
233 | info = qxl_release_map(qdev, release); | 228 | info = qxl_release_map(qdev, release); |
234 | next_id = info->next; | 229 | next_id = info->next; |
235 | qxl_release_unmap(qdev, release, info); | 230 | qxl_release_unmap(qdev, release, info); |
236 | 231 | ||
237 | qxl_release_unreserve(qdev, release); | ||
238 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, | 232 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, |
239 | next_id); | 233 | next_id); |
240 | 234 | ||
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
259 | return i; | 253 | return i; |
260 | } | 254 | } |
261 | 255 | ||
262 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 256 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
257 | struct qxl_release *release, | ||
258 | unsigned long size, | ||
263 | struct qxl_bo **_bo) | 259 | struct qxl_bo **_bo) |
264 | { | 260 | { |
265 | struct qxl_bo *bo; | 261 | struct qxl_bo *bo; |
266 | int ret; | 262 | int ret; |
267 | 263 | ||
268 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, | 264 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, |
269 | QXL_GEM_DOMAIN_VRAM, NULL, &bo); | 265 | false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); |
270 | if (ret) { | 266 | if (ret) { |
271 | DRM_ERROR("failed to allocate VRAM BO\n"); | 267 | DRM_ERROR("failed to allocate VRAM BO\n"); |
272 | return ret; | 268 | return ret; |
273 | } | 269 | } |
274 | ret = qxl_bo_reserve(bo, false); | 270 | ret = qxl_release_list_add(release, bo); |
275 | if (unlikely(ret != 0)) | 271 | if (ret) |
276 | goto out_unref; | 272 | goto out_unref; |
277 | 273 | ||
278 | *_bo = bo; | 274 | *_bo = bo; |
279 | return 0; | 275 | return 0; |
280 | out_unref: | 276 | out_unref: |
281 | qxl_bo_unref(&bo); | 277 | qxl_bo_unref(&bo); |
282 | return 0; | 278 | return ret; |
283 | } | 279 | } |
284 | 280 | ||
285 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) | 281 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) |
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
503 | if (ret) | 499 | if (ret) |
504 | return ret; | 500 | return ret; |
505 | 501 | ||
502 | ret = qxl_release_reserve_list(release, true); | ||
503 | if (ret) | ||
504 | return ret; | ||
505 | |||
506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | 506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); |
507 | cmd->type = QXL_SURFACE_CMD_CREATE; | 507 | cmd->type = QXL_SURFACE_CMD_CREATE; |
508 | cmd->u.surface_create.format = surf->surf.format; | 508 | cmd->u.surface_create.format = surf->surf.format; |
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
524 | 524 | ||
525 | surf->surf_create = release; | 525 | surf->surf_create = release; |
526 | 526 | ||
527 | /* no need to add a release to the fence for this bo, | 527 | /* no need to add a release to the fence for this surface bo, |
528 | since it is only released when we ask to destroy the surface | 528 | since it is only released when we ask to destroy the surface |
529 | and it would never signal otherwise */ | 529 | and it would never signal otherwise */ |
530 | qxl_fence_releaseable(qdev, release); | ||
531 | |||
532 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 530 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
533 | 531 | qxl_release_fence_buffer_objects(release); | |
534 | qxl_release_unreserve(qdev, release); | ||
535 | 532 | ||
536 | surf->hw_surf_alloc = true; | 533 | surf->hw_surf_alloc = true; |
537 | spin_lock(&qdev->surf_id_idr_lock); | 534 | spin_lock(&qdev->surf_id_idr_lock); |
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, | |||
573 | cmd->surface_id = id; | 570 | cmd->surface_id = id; |
574 | qxl_release_unmap(qdev, release, &cmd->release_info); | 571 | qxl_release_unmap(qdev, release, &cmd->release_info); |
575 | 572 | ||
576 | qxl_fence_releaseable(qdev, release); | ||
577 | |||
578 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 573 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
579 | 574 | ||
580 | qxl_release_unreserve(qdev, release); | 575 | qxl_release_fence_buffer_objects(release); |
581 | |||
582 | 576 | ||
583 | return 0; | 577 | return 0; |
584 | } | 578 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index f76f5dd7bfc4..835caba026d3 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) | |||
179 | kfree(qxl_crtc); | 179 | kfree(qxl_crtc); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void | 182 | static int |
183 | qxl_hide_cursor(struct qxl_device *qdev) | 183 | qxl_hide_cursor(struct qxl_device *qdev) |
184 | { | 184 | { |
185 | struct qxl_release *release; | 185 | struct qxl_release *release; |
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev) | |||
188 | 188 | ||
189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
190 | &release, NULL); | 190 | &release, NULL); |
191 | if (ret) | ||
192 | return ret; | ||
193 | |||
194 | ret = qxl_release_reserve_list(release, true); | ||
195 | if (ret) { | ||
196 | qxl_release_free(qdev, release); | ||
197 | return ret; | ||
198 | } | ||
191 | 199 | ||
192 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 200 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
193 | cmd->type = QXL_CURSOR_HIDE; | 201 | cmd->type = QXL_CURSOR_HIDE; |
194 | qxl_release_unmap(qdev, release, &cmd->release_info); | 202 | qxl_release_unmap(qdev, release, &cmd->release_info); |
195 | 203 | ||
196 | qxl_fence_releaseable(qdev, release); | ||
197 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 204 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
198 | qxl_release_unreserve(qdev, release); | 205 | qxl_release_fence_buffer_objects(release); |
206 | return 0; | ||
199 | } | 207 | } |
200 | 208 | ||
201 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | 209 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, |
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
216 | 224 | ||
217 | int size = 64*64*4; | 225 | int size = 64*64*4; |
218 | int ret = 0; | 226 | int ret = 0; |
219 | if (!handle) { | 227 | if (!handle) |
220 | qxl_hide_cursor(qdev); | 228 | return qxl_hide_cursor(qdev); |
221 | return 0; | ||
222 | } | ||
223 | 229 | ||
224 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | 230 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
225 | if (!obj) { | 231 | if (!obj) { |
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
234 | goto out_unref; | 240 | goto out_unref; |
235 | 241 | ||
236 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); | 242 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); |
243 | qxl_bo_unreserve(user_bo); | ||
237 | if (ret) | 244 | if (ret) |
238 | goto out_unreserve; | 245 | goto out_unref; |
239 | 246 | ||
240 | ret = qxl_bo_kmap(user_bo, &user_ptr); | 247 | ret = qxl_bo_kmap(user_bo, &user_ptr); |
241 | if (ret) | 248 | if (ret) |
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
246 | &release, NULL); | 253 | &release, NULL); |
247 | if (ret) | 254 | if (ret) |
248 | goto out_kunmap; | 255 | goto out_kunmap; |
249 | ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, | 256 | |
250 | &cursor_bo); | 257 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, |
258 | &cursor_bo); | ||
251 | if (ret) | 259 | if (ret) |
252 | goto out_free_release; | 260 | goto out_free_release; |
253 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | 261 | |
262 | ret = qxl_release_reserve_list(release, false); | ||
254 | if (ret) | 263 | if (ret) |
255 | goto out_free_bo; | 264 | goto out_free_bo; |
256 | 265 | ||
266 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | ||
267 | if (ret) | ||
268 | goto out_backoff; | ||
269 | |||
257 | cursor->header.unique = 0; | 270 | cursor->header.unique = 0; |
258 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; | 271 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; |
259 | cursor->header.width = 64; | 272 | cursor->header.width = 64; |
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
269 | 282 | ||
270 | qxl_bo_kunmap(cursor_bo); | 283 | qxl_bo_kunmap(cursor_bo); |
271 | 284 | ||
272 | /* finish with the userspace bo */ | ||
273 | qxl_bo_kunmap(user_bo); | 285 | qxl_bo_kunmap(user_bo); |
274 | qxl_bo_unpin(user_bo); | ||
275 | qxl_bo_unreserve(user_bo); | ||
276 | drm_gem_object_unreference_unlocked(obj); | ||
277 | 286 | ||
278 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 287 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
279 | cmd->type = QXL_CURSOR_SET; | 288 | cmd->type = QXL_CURSOR_SET; |
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
281 | cmd->u.set.position.y = qcrtc->cur_y; | 290 | cmd->u.set.position.y = qcrtc->cur_y; |
282 | 291 | ||
283 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); | 292 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); |
284 | qxl_release_add_res(qdev, release, cursor_bo); | ||
285 | 293 | ||
286 | cmd->u.set.visible = 1; | 294 | cmd->u.set.visible = 1; |
287 | qxl_release_unmap(qdev, release, &cmd->release_info); | 295 | qxl_release_unmap(qdev, release, &cmd->release_info); |
288 | 296 | ||
289 | qxl_fence_releaseable(qdev, release); | ||
290 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 297 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
291 | qxl_release_unreserve(qdev, release); | 298 | qxl_release_fence_buffer_objects(release); |
299 | |||
300 | /* finish with the userspace bo */ | ||
301 | ret = qxl_bo_reserve(user_bo, false); | ||
302 | if (!ret) { | ||
303 | qxl_bo_unpin(user_bo); | ||
304 | qxl_bo_unreserve(user_bo); | ||
305 | } | ||
306 | drm_gem_object_unreference_unlocked(obj); | ||
292 | 307 | ||
293 | qxl_bo_unreserve(cursor_bo); | ||
294 | qxl_bo_unref(&cursor_bo); | 308 | qxl_bo_unref(&cursor_bo); |
295 | 309 | ||
296 | return ret; | 310 | return ret; |
311 | |||
312 | out_backoff: | ||
313 | qxl_release_backoff_reserve_list(release); | ||
297 | out_free_bo: | 314 | out_free_bo: |
298 | qxl_bo_unref(&cursor_bo); | 315 | qxl_bo_unref(&cursor_bo); |
299 | out_free_release: | 316 | out_free_release: |
300 | qxl_release_unreserve(qdev, release); | ||
301 | qxl_release_free(qdev, release); | 317 | qxl_release_free(qdev, release); |
302 | out_kunmap: | 318 | out_kunmap: |
303 | qxl_bo_kunmap(user_bo); | 319 | qxl_bo_kunmap(user_bo); |
304 | out_unpin: | 320 | out_unpin: |
305 | qxl_bo_unpin(user_bo); | 321 | qxl_bo_unpin(user_bo); |
306 | out_unreserve: | ||
307 | qxl_bo_unreserve(user_bo); | ||
308 | out_unref: | 322 | out_unref: |
309 | drm_gem_object_unreference_unlocked(obj); | 323 | drm_gem_object_unreference_unlocked(obj); |
310 | return ret; | 324 | return ret; |
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
322 | 336 | ||
323 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 337 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
324 | &release, NULL); | 338 | &release, NULL); |
339 | if (ret) | ||
340 | return ret; | ||
341 | |||
342 | ret = qxl_release_reserve_list(release, true); | ||
343 | if (ret) { | ||
344 | qxl_release_free(qdev, release); | ||
345 | return ret; | ||
346 | } | ||
325 | 347 | ||
326 | qcrtc->cur_x = x; | 348 | qcrtc->cur_x = x; |
327 | qcrtc->cur_y = y; | 349 | qcrtc->cur_y = y; |
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
332 | cmd->u.position.y = qcrtc->cur_y; | 354 | cmd->u.position.y = qcrtc->cur_y; |
333 | qxl_release_unmap(qdev, release, &cmd->release_info); | 355 | qxl_release_unmap(qdev, release, &cmd->release_info); |
334 | 356 | ||
335 | qxl_fence_releaseable(qdev, release); | ||
336 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 357 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
337 | qxl_release_unreserve(qdev, release); | 358 | qxl_release_fence_buffer_objects(release); |
359 | |||
338 | return 0; | 360 | return 0; |
339 | } | 361 | } |
340 | 362 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 3c8c3dbf9378..56e1d633875e 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c | |||
@@ -23,25 +23,29 @@ | |||
23 | #include "qxl_drv.h" | 23 | #include "qxl_drv.h" |
24 | #include "qxl_object.h" | 24 | #include "qxl_object.h" |
25 | 25 | ||
26 | static int alloc_clips(struct qxl_device *qdev, | ||
27 | struct qxl_release *release, | ||
28 | unsigned num_clips, | ||
29 | struct qxl_bo **clips_bo) | ||
30 | { | ||
31 | int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips; | ||
32 | |||
33 | return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); | ||
34 | } | ||
35 | |||
26 | /* returns a pointer to the already allocated qxl_rect array inside | 36 | /* returns a pointer to the already allocated qxl_rect array inside |
27 | * the qxl_clip_rects. This is *not* the same as the memory allocated | 37 | * the qxl_clip_rects. This is *not* the same as the memory allocated |
28 | * on the device, it is offset to qxl_clip_rects.chunk.data */ | 38 | * on the device, it is offset to qxl_clip_rects.chunk.data */ |
29 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | 39 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, |
30 | struct qxl_drawable *drawable, | 40 | struct qxl_drawable *drawable, |
31 | unsigned num_clips, | 41 | unsigned num_clips, |
32 | struct qxl_bo **clips_bo, | 42 | struct qxl_bo *clips_bo) |
33 | struct qxl_release *release) | ||
34 | { | 43 | { |
35 | struct qxl_clip_rects *dev_clips; | 44 | struct qxl_clip_rects *dev_clips; |
36 | int ret; | 45 | int ret; |
37 | int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips; | ||
38 | ret = qxl_alloc_bo_reserved(qdev, size, clips_bo); | ||
39 | if (ret) | ||
40 | return NULL; | ||
41 | 46 | ||
42 | ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); | 47 | ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); |
43 | if (ret) { | 48 | if (ret) { |
44 | qxl_bo_unref(clips_bo); | ||
45 | return NULL; | 49 | return NULL; |
46 | } | 50 | } |
47 | dev_clips->num_rects = num_clips; | 51 | dev_clips->num_rects = num_clips; |
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | |||
52 | } | 56 | } |
53 | 57 | ||
54 | static int | 58 | static int |
59 | alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) | ||
60 | { | ||
61 | int ret; | ||
62 | ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), | ||
63 | QXL_RELEASE_DRAWABLE, release, | ||
64 | NULL); | ||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | free_drawable(struct qxl_device *qdev, struct qxl_release *release) | ||
70 | { | ||
71 | qxl_release_free(qdev, release); | ||
72 | } | ||
73 | |||
74 | /* release needs to be reserved at this point */ | ||
75 | static int | ||
55 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | 76 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, |
56 | const struct qxl_rect *rect, | 77 | const struct qxl_rect *rect, |
57 | struct qxl_release **release) | 78 | struct qxl_release *release) |
58 | { | 79 | { |
59 | struct qxl_drawable *drawable; | 80 | struct qxl_drawable *drawable; |
60 | int i, ret; | 81 | int i; |
61 | 82 | ||
62 | ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), | 83 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
63 | QXL_RELEASE_DRAWABLE, release, | 84 | if (!drawable) |
64 | NULL); | 85 | return -ENOMEM; |
65 | if (ret) | ||
66 | return ret; | ||
67 | 86 | ||
68 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release); | ||
69 | drawable->type = type; | 87 | drawable->type = type; |
70 | 88 | ||
71 | drawable->surface_id = surface; /* Only primary for now */ | 89 | drawable->surface_id = surface; /* Only primary for now */ |
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | |||
91 | drawable->bbox = *rect; | 109 | drawable->bbox = *rect; |
92 | 110 | ||
93 | drawable->mm_time = qdev->rom->mm_clock; | 111 | drawable->mm_time = qdev->rom->mm_clock; |
94 | qxl_release_unmap(qdev, *release, &drawable->release_info); | 112 | qxl_release_unmap(qdev, release, &drawable->release_info); |
95 | return 0; | 113 | return 0; |
96 | } | 114 | } |
97 | 115 | ||
98 | static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | 116 | static int alloc_palette_object(struct qxl_device *qdev, |
117 | struct qxl_release *release, | ||
118 | struct qxl_bo **palette_bo) | ||
119 | { | ||
120 | return qxl_alloc_bo_reserved(qdev, release, | ||
121 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
122 | palette_bo); | ||
123 | } | ||
124 | |||
125 | static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, | ||
126 | struct qxl_release *release, | ||
99 | const struct qxl_fb_image *qxl_fb_image) | 127 | const struct qxl_fb_image *qxl_fb_image) |
100 | { | 128 | { |
101 | struct qxl_device *qdev = qxl_fb_image->qdev; | ||
102 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; | 129 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; |
103 | uint32_t visual = qxl_fb_image->visual; | 130 | uint32_t visual = qxl_fb_image->visual; |
104 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; | 131 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; |
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
108 | static uint64_t unique; /* we make no attempt to actually set this | 135 | static uint64_t unique; /* we make no attempt to actually set this |
109 | * correctly globaly, since that would require | 136 | * correctly globaly, since that would require |
110 | * tracking all of our palettes. */ | 137 | * tracking all of our palettes. */ |
111 | 138 | ret = qxl_bo_kmap(palette_bo, (void **)&pal); | |
112 | ret = qxl_alloc_bo_reserved(qdev, | ||
113 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
114 | palette_bo); | ||
115 | |||
116 | ret = qxl_bo_kmap(*palette_bo, (void **)&pal); | ||
117 | pal->num_ents = 2; | 139 | pal->num_ents = 2; |
118 | pal->unique = unique++; | 140 | pal->unique = unique++; |
119 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { | 141 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { |
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
126 | } | 148 | } |
127 | pal->ents[0] = bgcolor; | 149 | pal->ents[0] = bgcolor; |
128 | pal->ents[1] = fgcolor; | 150 | pal->ents[1] = fgcolor; |
129 | qxl_bo_kunmap(*palette_bo); | 151 | qxl_bo_kunmap(palette_bo); |
130 | return 0; | 152 | return 0; |
131 | } | 153 | } |
132 | 154 | ||
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
144 | const char *src = fb_image->data; | 166 | const char *src = fb_image->data; |
145 | int depth = fb_image->depth; | 167 | int depth = fb_image->depth; |
146 | struct qxl_release *release; | 168 | struct qxl_release *release; |
147 | struct qxl_bo *image_bo; | ||
148 | struct qxl_image *image; | 169 | struct qxl_image *image; |
149 | int ret; | 170 | int ret; |
150 | 171 | struct qxl_drm_image *dimage; | |
172 | struct qxl_bo *palette_bo = NULL; | ||
151 | if (stride == 0) | 173 | if (stride == 0) |
152 | stride = depth * width / 8; | 174 | stride = depth * width / 8; |
153 | 175 | ||
176 | ret = alloc_drawable(qdev, &release); | ||
177 | if (ret) | ||
178 | return; | ||
179 | |||
180 | ret = qxl_image_alloc_objects(qdev, release, | ||
181 | &dimage, | ||
182 | height, stride); | ||
183 | if (ret) | ||
184 | goto out_free_drawable; | ||
185 | |||
186 | if (depth == 1) { | ||
187 | ret = alloc_palette_object(qdev, release, &palette_bo); | ||
188 | if (ret) | ||
189 | goto out_free_image; | ||
190 | } | ||
191 | |||
192 | /* do a reservation run over all the objects we just allocated */ | ||
193 | ret = qxl_release_reserve_list(release, true); | ||
194 | if (ret) | ||
195 | goto out_free_palette; | ||
196 | |||
154 | rect.left = x; | 197 | rect.left = x; |
155 | rect.right = x + width; | 198 | rect.right = x + width; |
156 | rect.top = y; | 199 | rect.top = y; |
157 | rect.bottom = y + height; | 200 | rect.bottom = y + height; |
158 | 201 | ||
159 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); | 202 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release); |
160 | if (ret) | 203 | if (ret) { |
161 | return; | 204 | qxl_release_backoff_reserve_list(release); |
205 | goto out_free_palette; | ||
206 | } | ||
162 | 207 | ||
163 | ret = qxl_image_create(qdev, release, &image_bo, | 208 | ret = qxl_image_init(qdev, release, dimage, |
164 | (const uint8_t *)src, 0, 0, | 209 | (const uint8_t *)src, 0, 0, |
165 | width, height, depth, stride); | 210 | width, height, depth, stride); |
166 | if (ret) { | 211 | if (ret) { |
167 | qxl_release_unreserve(qdev, release); | 212 | qxl_release_backoff_reserve_list(release); |
168 | qxl_release_free(qdev, release); | 213 | qxl_release_free(qdev, release); |
169 | return; | 214 | return; |
170 | } | 215 | } |
171 | 216 | ||
172 | if (depth == 1) { | 217 | if (depth == 1) { |
173 | struct qxl_bo *palette_bo; | ||
174 | void *ptr; | 218 | void *ptr; |
175 | ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); | 219 | ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image); |
176 | qxl_release_add_res(qdev, release, palette_bo); | ||
177 | 220 | ||
178 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); | 221 | ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0); |
179 | image = ptr; | 222 | image = ptr; |
180 | image->u.bitmap.palette = | 223 | image->u.bitmap.palette = |
181 | qxl_bo_physical_address(qdev, palette_bo, 0); | 224 | qxl_bo_physical_address(qdev, palette_bo, 0); |
182 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); | 225 | qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr); |
183 | qxl_bo_unreserve(palette_bo); | ||
184 | qxl_bo_unref(&palette_bo); | ||
185 | } | 226 | } |
186 | 227 | ||
187 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 228 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
199 | drawable->u.copy.mask.bitmap = 0; | 240 | drawable->u.copy.mask.bitmap = 0; |
200 | 241 | ||
201 | drawable->u.copy.src_bitmap = | 242 | drawable->u.copy.src_bitmap = |
202 | qxl_bo_physical_address(qdev, image_bo, 0); | 243 | qxl_bo_physical_address(qdev, dimage->bo, 0); |
203 | qxl_release_unmap(qdev, release, &drawable->release_info); | 244 | qxl_release_unmap(qdev, release, &drawable->release_info); |
204 | 245 | ||
205 | qxl_release_add_res(qdev, release, image_bo); | ||
206 | qxl_bo_unreserve(image_bo); | ||
207 | qxl_bo_unref(&image_bo); | ||
208 | |||
209 | qxl_fence_releaseable(qdev, release); | ||
210 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 246 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
211 | qxl_release_unreserve(qdev, release); | 247 | qxl_release_fence_buffer_objects(release); |
248 | |||
249 | out_free_palette: | ||
250 | if (palette_bo) | ||
251 | qxl_bo_unref(&palette_bo); | ||
252 | out_free_image: | ||
253 | qxl_image_free_objects(qdev, dimage); | ||
254 | out_free_drawable: | ||
255 | if (ret) | ||
256 | free_drawable(qdev, release); | ||
212 | } | 257 | } |
213 | 258 | ||
214 | /* push a draw command using the given clipping rectangles as | 259 | /* push a draw command using the given clipping rectangles as |
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
243 | int depth = qxl_fb->base.bits_per_pixel; | 288 | int depth = qxl_fb->base.bits_per_pixel; |
244 | uint8_t *surface_base; | 289 | uint8_t *surface_base; |
245 | struct qxl_release *release; | 290 | struct qxl_release *release; |
246 | struct qxl_bo *image_bo; | ||
247 | struct qxl_bo *clips_bo; | 291 | struct qxl_bo *clips_bo; |
292 | struct qxl_drm_image *dimage; | ||
248 | int ret; | 293 | int ret; |
249 | 294 | ||
295 | ret = alloc_drawable(qdev, &release); | ||
296 | if (ret) | ||
297 | return; | ||
298 | |||
250 | left = clips->x1; | 299 | left = clips->x1; |
251 | right = clips->x2; | 300 | right = clips->x2; |
252 | top = clips->y1; | 301 | top = clips->y1; |
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
263 | 312 | ||
264 | width = right - left; | 313 | width = right - left; |
265 | height = bottom - top; | 314 | height = bottom - top; |
315 | |||
316 | ret = alloc_clips(qdev, release, num_clips, &clips_bo); | ||
317 | if (ret) | ||
318 | goto out_free_drawable; | ||
319 | |||
320 | ret = qxl_image_alloc_objects(qdev, release, | ||
321 | &dimage, | ||
322 | height, stride); | ||
323 | if (ret) | ||
324 | goto out_free_clips; | ||
325 | |||
326 | /* do a reservation run over all the objects we just allocated */ | ||
327 | ret = qxl_release_reserve_list(release, true); | ||
328 | if (ret) | ||
329 | goto out_free_image; | ||
330 | |||
266 | drawable_rect.left = left; | 331 | drawable_rect.left = left; |
267 | drawable_rect.right = right; | 332 | drawable_rect.right = right; |
268 | drawable_rect.top = top; | 333 | drawable_rect.top = top; |
269 | drawable_rect.bottom = bottom; | 334 | drawable_rect.bottom = bottom; |
335 | |||
270 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, | 336 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, |
271 | &release); | 337 | release); |
272 | if (ret) | 338 | if (ret) |
273 | return; | 339 | goto out_release_backoff; |
274 | 340 | ||
275 | ret = qxl_bo_kmap(bo, (void **)&surface_base); | 341 | ret = qxl_bo_kmap(bo, (void **)&surface_base); |
276 | if (ret) | 342 | if (ret) |
277 | goto out_unref; | 343 | goto out_release_backoff; |
278 | 344 | ||
279 | ret = qxl_image_create(qdev, release, &image_bo, surface_base, | 345 | |
280 | left, top, width, height, depth, stride); | 346 | ret = qxl_image_init(qdev, release, dimage, surface_base, |
347 | left, top, width, height, depth, stride); | ||
281 | qxl_bo_kunmap(bo); | 348 | qxl_bo_kunmap(bo); |
282 | if (ret) | 349 | if (ret) |
283 | goto out_unref; | 350 | goto out_release_backoff; |
351 | |||
352 | rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); | ||
353 | if (!rects) | ||
354 | goto out_release_backoff; | ||
284 | 355 | ||
285 | rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release); | ||
286 | if (!rects) { | ||
287 | qxl_bo_unref(&image_bo); | ||
288 | goto out_unref; | ||
289 | } | ||
290 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 356 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
291 | 357 | ||
292 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; | 358 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; |
293 | drawable->clip.data = qxl_bo_physical_address(qdev, | 359 | drawable->clip.data = qxl_bo_physical_address(qdev, |
294 | clips_bo, 0); | 360 | clips_bo, 0); |
295 | qxl_release_add_res(qdev, release, clips_bo); | ||
296 | 361 | ||
297 | drawable->u.copy.src_area.top = 0; | 362 | drawable->u.copy.src_area.top = 0; |
298 | drawable->u.copy.src_area.bottom = height; | 363 | drawable->u.copy.src_area.bottom = height; |
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
306 | drawable->u.copy.mask.pos.y = 0; | 371 | drawable->u.copy.mask.pos.y = 0; |
307 | drawable->u.copy.mask.bitmap = 0; | 372 | drawable->u.copy.mask.bitmap = 0; |
308 | 373 | ||
309 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); | 374 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); |
310 | qxl_release_unmap(qdev, release, &drawable->release_info); | 375 | qxl_release_unmap(qdev, release, &drawable->release_info); |
311 | qxl_release_add_res(qdev, release, image_bo); | 376 | |
312 | qxl_bo_unreserve(image_bo); | ||
313 | qxl_bo_unref(&image_bo); | ||
314 | clips_ptr = clips; | 377 | clips_ptr = clips; |
315 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { | 378 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { |
316 | rects[i].left = clips_ptr->x1; | 379 | rects[i].left = clips_ptr->x1; |
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
319 | rects[i].bottom = clips_ptr->y2; | 382 | rects[i].bottom = clips_ptr->y2; |
320 | } | 383 | } |
321 | qxl_bo_kunmap(clips_bo); | 384 | qxl_bo_kunmap(clips_bo); |
322 | qxl_bo_unreserve(clips_bo); | ||
323 | qxl_bo_unref(&clips_bo); | ||
324 | 385 | ||
325 | qxl_fence_releaseable(qdev, release); | ||
326 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 386 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
327 | qxl_release_unreserve(qdev, release); | 387 | qxl_release_fence_buffer_objects(release); |
328 | return; | 388 | |
389 | out_release_backoff: | ||
390 | if (ret) | ||
391 | qxl_release_backoff_reserve_list(release); | ||
392 | out_free_image: | ||
393 | qxl_image_free_objects(qdev, dimage); | ||
394 | out_free_clips: | ||
395 | qxl_bo_unref(&clips_bo); | ||
396 | out_free_drawable: | ||
397 | /* only free drawable on error */ | ||
398 | if (ret) | ||
399 | free_drawable(qdev, release); | ||
329 | 400 | ||
330 | out_unref: | ||
331 | qxl_release_unreserve(qdev, release); | ||
332 | qxl_release_free(qdev, release); | ||
333 | } | 401 | } |
334 | 402 | ||
335 | void qxl_draw_copyarea(struct qxl_device *qdev, | 403 | void qxl_draw_copyarea(struct qxl_device *qdev, |
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
342 | struct qxl_release *release; | 410 | struct qxl_release *release; |
343 | int ret; | 411 | int ret; |
344 | 412 | ||
413 | ret = alloc_drawable(qdev, &release); | ||
414 | if (ret) | ||
415 | return; | ||
416 | |||
417 | /* do a reservation run over all the objects we just allocated */ | ||
418 | ret = qxl_release_reserve_list(release, true); | ||
419 | if (ret) | ||
420 | goto out_free_release; | ||
421 | |||
345 | rect.left = dx; | 422 | rect.left = dx; |
346 | rect.top = dy; | 423 | rect.top = dy; |
347 | rect.right = dx + width; | 424 | rect.right = dx + width; |
348 | rect.bottom = dy + height; | 425 | rect.bottom = dy + height; |
349 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); | 426 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release); |
350 | if (ret) | 427 | if (ret) { |
351 | return; | 428 | qxl_release_backoff_reserve_list(release); |
429 | goto out_free_release; | ||
430 | } | ||
352 | 431 | ||
353 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 432 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
354 | drawable->u.copy_bits.src_pos.x = sx; | 433 | drawable->u.copy_bits.src_pos.x = sx; |
355 | drawable->u.copy_bits.src_pos.y = sy; | 434 | drawable->u.copy_bits.src_pos.y = sy; |
356 | |||
357 | qxl_release_unmap(qdev, release, &drawable->release_info); | 435 | qxl_release_unmap(qdev, release, &drawable->release_info); |
358 | qxl_fence_releaseable(qdev, release); | 436 | |
359 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 437 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
360 | qxl_release_unreserve(qdev, release); | 438 | qxl_release_fence_buffer_objects(release); |
439 | |||
440 | out_free_release: | ||
441 | if (ret) | ||
442 | free_drawable(qdev, release); | ||
361 | } | 443 | } |
362 | 444 | ||
363 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | 445 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) |
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
370 | struct qxl_release *release; | 452 | struct qxl_release *release; |
371 | int ret; | 453 | int ret; |
372 | 454 | ||
373 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); | 455 | ret = alloc_drawable(qdev, &release); |
374 | if (ret) | 456 | if (ret) |
375 | return; | 457 | return; |
376 | 458 | ||
459 | /* do a reservation run over all the objects we just allocated */ | ||
460 | ret = qxl_release_reserve_list(release, true); | ||
461 | if (ret) | ||
462 | goto out_free_release; | ||
463 | |||
464 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release); | ||
465 | if (ret) { | ||
466 | qxl_release_backoff_reserve_list(release); | ||
467 | goto out_free_release; | ||
468 | } | ||
469 | |||
377 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 470 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
378 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; | 471 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; |
379 | drawable->u.fill.brush.u.color = color; | 472 | drawable->u.fill.brush.u.color = color; |
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
384 | drawable->u.fill.mask.bitmap = 0; | 477 | drawable->u.fill.mask.bitmap = 0; |
385 | 478 | ||
386 | qxl_release_unmap(qdev, release, &drawable->release_info); | 479 | qxl_release_unmap(qdev, release, &drawable->release_info); |
387 | qxl_fence_releaseable(qdev, release); | 480 | |
388 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 481 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
389 | qxl_release_unreserve(qdev, release); | 482 | qxl_release_fence_buffer_objects(release); |
483 | |||
484 | out_free_release: | ||
485 | if (ret) | ||
486 | free_drawable(qdev, release); | ||
390 | } | 487 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index aacb791464a3..7e96f4f11738 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -42,6 +42,9 @@ | |||
42 | #include <ttm/ttm_placement.h> | 42 | #include <ttm/ttm_placement.h> |
43 | #include <ttm/ttm_module.h> | 43 | #include <ttm/ttm_module.h> |
44 | 44 | ||
45 | /* just for ttm_validate_buffer */ | ||
46 | #include <ttm/ttm_execbuf_util.h> | ||
47 | |||
45 | #include <drm/qxl_drm.h> | 48 | #include <drm/qxl_drm.h> |
46 | #include "qxl_dev.h" | 49 | #include "qxl_dev.h" |
47 | 50 | ||
@@ -118,9 +121,9 @@ struct qxl_bo { | |||
118 | uint32_t surface_id; | 121 | uint32_t surface_id; |
119 | struct qxl_fence fence; /* per bo fence - list of releases */ | 122 | struct qxl_fence fence; /* per bo fence - list of releases */ |
120 | struct qxl_release *surf_create; | 123 | struct qxl_release *surf_create; |
121 | atomic_t reserve_count; | ||
122 | }; | 124 | }; |
123 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) | 125 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) |
126 | #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) | ||
124 | 127 | ||
125 | struct qxl_gem { | 128 | struct qxl_gem { |
126 | struct mutex mutex; | 129 | struct mutex mutex; |
@@ -128,12 +131,7 @@ struct qxl_gem { | |||
128 | }; | 131 | }; |
129 | 132 | ||
130 | struct qxl_bo_list { | 133 | struct qxl_bo_list { |
131 | struct list_head lhead; | 134 | struct ttm_validate_buffer tv; |
132 | struct qxl_bo *bo; | ||
133 | }; | ||
134 | |||
135 | struct qxl_reloc_list { | ||
136 | struct list_head bos; | ||
137 | }; | 135 | }; |
138 | 136 | ||
139 | struct qxl_crtc { | 137 | struct qxl_crtc { |
@@ -195,10 +193,20 @@ enum { | |||
195 | struct qxl_release { | 193 | struct qxl_release { |
196 | int id; | 194 | int id; |
197 | int type; | 195 | int type; |
198 | int bo_count; | ||
199 | uint32_t release_offset; | 196 | uint32_t release_offset; |
200 | uint32_t surface_release_id; | 197 | uint32_t surface_release_id; |
201 | struct qxl_bo *bos[QXL_MAX_RES]; | 198 | struct ww_acquire_ctx ticket; |
199 | struct list_head bos; | ||
200 | }; | ||
201 | |||
202 | struct qxl_drm_chunk { | ||
203 | struct list_head head; | ||
204 | struct qxl_bo *bo; | ||
205 | }; | ||
206 | |||
207 | struct qxl_drm_image { | ||
208 | struct qxl_bo *bo; | ||
209 | struct list_head chunk_list; | ||
202 | }; | 210 | }; |
203 | 211 | ||
204 | struct qxl_fb_image { | 212 | struct qxl_fb_image { |
@@ -314,6 +322,7 @@ struct qxl_device { | |||
314 | struct workqueue_struct *gc_queue; | 322 | struct workqueue_struct *gc_queue; |
315 | struct work_struct gc_work; | 323 | struct work_struct gc_work; |
316 | 324 | ||
325 | struct work_struct fb_work; | ||
317 | }; | 326 | }; |
318 | 327 | ||
319 | /* forward declaration for QXL_INFO_IO */ | 328 | /* forward declaration for QXL_INFO_IO */ |
@@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma); | |||
433 | 442 | ||
434 | /* qxl image */ | 443 | /* qxl image */ |
435 | 444 | ||
436 | int qxl_image_create(struct qxl_device *qdev, | 445 | int qxl_image_init(struct qxl_device *qdev, |
437 | struct qxl_release *release, | 446 | struct qxl_release *release, |
438 | struct qxl_bo **image_bo, | 447 | struct qxl_drm_image *dimage, |
439 | const uint8_t *data, | 448 | const uint8_t *data, |
440 | int x, int y, int width, int height, | 449 | int x, int y, int width, int height, |
441 | int depth, int stride); | 450 | int depth, int stride); |
451 | int | ||
452 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
453 | struct qxl_release *release, | ||
454 | struct qxl_drm_image **image_ptr, | ||
455 | int height, int stride); | ||
456 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); | ||
457 | |||
442 | void qxl_update_screen(struct qxl_device *qxl); | 458 | void qxl_update_screen(struct qxl_device *qxl); |
443 | 459 | ||
444 | /* qxl io operations (qxl_cmd.c) */ | 460 | /* qxl io operations (qxl_cmd.c) */ |
@@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible | |||
459 | void qxl_io_flush_release(struct qxl_device *qdev); | 475 | void qxl_io_flush_release(struct qxl_device *qdev); |
460 | void qxl_io_flush_surfaces(struct qxl_device *qdev); | 476 | void qxl_io_flush_surfaces(struct qxl_device *qdev); |
461 | 477 | ||
462 | int qxl_release_reserve(struct qxl_device *qdev, | ||
463 | struct qxl_release *release, bool no_wait); | ||
464 | void qxl_release_unreserve(struct qxl_device *qdev, | ||
465 | struct qxl_release *release); | ||
466 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | 478 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, |
467 | struct qxl_release *release); | 479 | struct qxl_release *release); |
468 | void qxl_release_unmap(struct qxl_device *qdev, | 480 | void qxl_release_unmap(struct qxl_device *qdev, |
469 | struct qxl_release *release, | 481 | struct qxl_release *release, |
470 | union qxl_release_info *info); | 482 | union qxl_release_info *info); |
471 | /* | 483 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo); |
472 | * qxl_bo_add_resource. | 484 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr); |
473 | * | 485 | void qxl_release_backoff_reserve_list(struct qxl_release *release); |
474 | */ | 486 | void qxl_release_fence_buffer_objects(struct qxl_release *release); |
475 | void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); | ||
476 | 487 | ||
477 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 488 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
478 | enum qxl_surface_cmd_type surface_cmd_type, | 489 | enum qxl_surface_cmd_type surface_cmd_type, |
@@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | |||
481 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | 492 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, |
482 | int type, struct qxl_release **release, | 493 | int type, struct qxl_release **release, |
483 | struct qxl_bo **rbo); | 494 | struct qxl_bo **rbo); |
484 | int qxl_fence_releaseable(struct qxl_device *qdev, | 495 | |
485 | struct qxl_release *release); | ||
486 | int | 496 | int |
487 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 497 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
488 | uint32_t type, bool interruptible); | 498 | uint32_t type, bool interruptible); |
489 | int | 499 | int |
490 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 500 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
491 | uint32_t type, bool interruptible); | 501 | uint32_t type, bool interruptible); |
492 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 502 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
503 | struct qxl_release *release, | ||
504 | unsigned long size, | ||
493 | struct qxl_bo **_bo); | 505 | struct qxl_bo **_bo); |
494 | /* qxl drawing commands */ | 506 | /* qxl drawing commands */ |
495 | 507 | ||
@@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
510 | u32 sx, u32 sy, | 522 | u32 sx, u32 sy, |
511 | u32 dx, u32 dy); | 523 | u32 dx, u32 dy); |
512 | 524 | ||
513 | uint64_t | ||
514 | qxl_release_alloc(struct qxl_device *qdev, int type, | ||
515 | struct qxl_release **ret); | ||
516 | |||
517 | void qxl_release_free(struct qxl_device *qdev, | 525 | void qxl_release_free(struct qxl_device *qdev, |
518 | struct qxl_release *release); | 526 | struct qxl_release *release); |
519 | void qxl_release_add_res(struct qxl_device *qdev, | 527 | |
520 | struct qxl_release *release, | ||
521 | struct qxl_bo *bo); | ||
522 | /* used by qxl_debugfs_release */ | 528 | /* used by qxl_debugfs_release */ |
523 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 529 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
524 | uint64_t id); | 530 | uint64_t id); |
@@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein | |||
561 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); | 567 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); |
562 | 568 | ||
563 | /* qxl_fence.c */ | 569 | /* qxl_fence.c */ |
564 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); | 570 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id); |
565 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); | 571 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); |
566 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); | 572 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); |
567 | void qxl_fence_fini(struct qxl_fence *qfence); | 573 | void qxl_fence_fini(struct qxl_fence *qfence); |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 76f39d88d684..88722f233430 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
@@ -37,12 +37,29 @@ | |||
37 | 37 | ||
38 | #define QXL_DIRTY_DELAY (HZ / 30) | 38 | #define QXL_DIRTY_DELAY (HZ / 30) |
39 | 39 | ||
40 | #define QXL_FB_OP_FILLRECT 1 | ||
41 | #define QXL_FB_OP_COPYAREA 2 | ||
42 | #define QXL_FB_OP_IMAGEBLIT 3 | ||
43 | |||
44 | struct qxl_fb_op { | ||
45 | struct list_head head; | ||
46 | int op_type; | ||
47 | union { | ||
48 | struct fb_fillrect fr; | ||
49 | struct fb_copyarea ca; | ||
50 | struct fb_image ib; | ||
51 | } op; | ||
52 | void *img_data; | ||
53 | }; | ||
54 | |||
40 | struct qxl_fbdev { | 55 | struct qxl_fbdev { |
41 | struct drm_fb_helper helper; | 56 | struct drm_fb_helper helper; |
42 | struct qxl_framebuffer qfb; | 57 | struct qxl_framebuffer qfb; |
43 | struct list_head fbdev_list; | 58 | struct list_head fbdev_list; |
44 | struct qxl_device *qdev; | 59 | struct qxl_device *qdev; |
45 | 60 | ||
61 | spinlock_t delayed_ops_lock; | ||
62 | struct list_head delayed_ops; | ||
46 | void *shadow; | 63 | void *shadow; |
47 | int size; | 64 | int size; |
48 | 65 | ||
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = { | |||
164 | .deferred_io = qxl_deferred_io, | 181 | .deferred_io = qxl_deferred_io, |
165 | }; | 182 | }; |
166 | 183 | ||
167 | static void qxl_fb_fillrect(struct fb_info *info, | 184 | static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev, |
168 | const struct fb_fillrect *fb_rect) | 185 | const struct fb_fillrect *fb_rect) |
186 | { | ||
187 | struct qxl_fb_op *op; | ||
188 | unsigned long flags; | ||
189 | |||
190 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
191 | if (!op) | ||
192 | return; | ||
193 | |||
194 | op->op.fr = *fb_rect; | ||
195 | op->img_data = NULL; | ||
196 | op->op_type = QXL_FB_OP_FILLRECT; | ||
197 | |||
198 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
199 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
200 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
201 | } | ||
202 | |||
203 | static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev, | ||
204 | const struct fb_copyarea *fb_copy) | ||
205 | { | ||
206 | struct qxl_fb_op *op; | ||
207 | unsigned long flags; | ||
208 | |||
209 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
210 | if (!op) | ||
211 | return; | ||
212 | |||
213 | op->op.ca = *fb_copy; | ||
214 | op->img_data = NULL; | ||
215 | op->op_type = QXL_FB_OP_COPYAREA; | ||
216 | |||
217 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
218 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
219 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
220 | } | ||
221 | |||
222 | static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev, | ||
223 | const struct fb_image *fb_image) | ||
224 | { | ||
225 | struct qxl_fb_op *op; | ||
226 | unsigned long flags; | ||
227 | uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1); | ||
228 | |||
229 | op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN); | ||
230 | if (!op) | ||
231 | return; | ||
232 | |||
233 | op->op.ib = *fb_image; | ||
234 | op->img_data = (void *)(op + 1); | ||
235 | op->op_type = QXL_FB_OP_IMAGEBLIT; | ||
236 | |||
237 | memcpy(op->img_data, fb_image->data, size); | ||
238 | |||
239 | op->op.ib.data = op->img_data; | ||
240 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
241 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
242 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
243 | } | ||
244 | |||
245 | static void qxl_fb_fillrect_internal(struct fb_info *info, | ||
246 | const struct fb_fillrect *fb_rect) | ||
169 | { | 247 | { |
170 | struct qxl_fbdev *qfbdev = info->par; | 248 | struct qxl_fbdev *qfbdev = info->par; |
171 | struct qxl_device *qdev = qfbdev->qdev; | 249 | struct qxl_device *qdev = qfbdev->qdev; |
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info, | |||
203 | qxl_draw_fill_rec.rect = rect; | 281 | qxl_draw_fill_rec.rect = rect; |
204 | qxl_draw_fill_rec.color = color; | 282 | qxl_draw_fill_rec.color = color; |
205 | qxl_draw_fill_rec.rop = rop; | 283 | qxl_draw_fill_rec.rop = rop; |
284 | |||
285 | qxl_draw_fill(&qxl_draw_fill_rec); | ||
286 | } | ||
287 | |||
288 | static void qxl_fb_fillrect(struct fb_info *info, | ||
289 | const struct fb_fillrect *fb_rect) | ||
290 | { | ||
291 | struct qxl_fbdev *qfbdev = info->par; | ||
292 | struct qxl_device *qdev = qfbdev->qdev; | ||
293 | |||
206 | if (!drm_can_sleep()) { | 294 | if (!drm_can_sleep()) { |
207 | qxl_io_log(qdev, | 295 | qxl_fb_delayed_fillrect(qfbdev, fb_rect); |
208 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | 296 | schedule_work(&qdev->fb_work); |
209 | __func__); | ||
210 | return; | 297 | return; |
211 | } | 298 | } |
212 | qxl_draw_fill(&qxl_draw_fill_rec); | 299 | /* make sure any previous work is done */ |
300 | flush_work(&qdev->fb_work); | ||
301 | qxl_fb_fillrect_internal(info, fb_rect); | ||
213 | } | 302 | } |
214 | 303 | ||
215 | static void qxl_fb_copyarea(struct fb_info *info, | 304 | static void qxl_fb_copyarea_internal(struct fb_info *info, |
216 | const struct fb_copyarea *region) | 305 | const struct fb_copyarea *region) |
217 | { | 306 | { |
218 | struct qxl_fbdev *qfbdev = info->par; | 307 | struct qxl_fbdev *qfbdev = info->par; |
219 | 308 | ||
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info, | |||
223 | region->dx, region->dy); | 312 | region->dx, region->dy); |
224 | } | 313 | } |
225 | 314 | ||
315 | static void qxl_fb_copyarea(struct fb_info *info, | ||
316 | const struct fb_copyarea *region) | ||
317 | { | ||
318 | struct qxl_fbdev *qfbdev = info->par; | ||
319 | struct qxl_device *qdev = qfbdev->qdev; | ||
320 | |||
321 | if (!drm_can_sleep()) { | ||
322 | qxl_fb_delayed_copyarea(qfbdev, region); | ||
323 | schedule_work(&qdev->fb_work); | ||
324 | return; | ||
325 | } | ||
326 | /* make sure any previous work is done */ | ||
327 | flush_work(&qdev->fb_work); | ||
328 | qxl_fb_copyarea_internal(info, region); | ||
329 | } | ||
330 | |||
226 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) | 331 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) |
227 | { | 332 | { |
228 | qxl_draw_opaque_fb(qxl_fb_image, 0); | 333 | qxl_draw_opaque_fb(qxl_fb_image, 0); |
229 | } | 334 | } |
230 | 335 | ||
336 | static void qxl_fb_imageblit_internal(struct fb_info *info, | ||
337 | const struct fb_image *image) | ||
338 | { | ||
339 | struct qxl_fbdev *qfbdev = info->par; | ||
340 | struct qxl_fb_image qxl_fb_image; | ||
341 | |||
342 | /* ensure proper order rendering operations - TODO: must do this | ||
343 | * for everything. */ | ||
344 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | ||
345 | qxl_fb_imageblit_safe(&qxl_fb_image); | ||
346 | } | ||
347 | |||
231 | static void qxl_fb_imageblit(struct fb_info *info, | 348 | static void qxl_fb_imageblit(struct fb_info *info, |
232 | const struct fb_image *image) | 349 | const struct fb_image *image) |
233 | { | 350 | { |
234 | struct qxl_fbdev *qfbdev = info->par; | 351 | struct qxl_fbdev *qfbdev = info->par; |
235 | struct qxl_device *qdev = qfbdev->qdev; | 352 | struct qxl_device *qdev = qfbdev->qdev; |
236 | struct qxl_fb_image qxl_fb_image; | ||
237 | 353 | ||
238 | if (!drm_can_sleep()) { | 354 | if (!drm_can_sleep()) { |
239 | /* we cannot do any ttm_bo allocation since that will fail on | 355 | qxl_fb_delayed_imageblit(qfbdev, image); |
240 | * ioremap_wc..__get_vm_area_node, so queue the work item | 356 | schedule_work(&qdev->fb_work); |
241 | * instead This can happen from printk inside an interrupt | ||
242 | * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */ | ||
243 | qxl_io_log(qdev, | ||
244 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | ||
245 | __func__); | ||
246 | return; | 357 | return; |
247 | } | 358 | } |
359 | /* make sure any previous work is done */ | ||
360 | flush_work(&qdev->fb_work); | ||
361 | qxl_fb_imageblit_internal(info, image); | ||
362 | } | ||
248 | 363 | ||
249 | /* ensure proper order of rendering operations - TODO: must do this | 364 | static void qxl_fb_work(struct work_struct *work) |
250 | * for everything. */ | 365 | { |
251 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | 366 | struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); |
252 | qxl_fb_imageblit_safe(&qxl_fb_image); | 367 | unsigned long flags; |
368 | struct qxl_fb_op *entry, *tmp; | ||
369 | struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev; | ||
370 | |||
371 | /* since the irq context just adds entries to the end of the | ||
372 | list dropping the lock should be fine, as entry isn't modified | ||
373 | in the operation code */ | ||
374 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
375 | list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) { | ||
376 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
377 | switch (entry->op_type) { | ||
378 | case QXL_FB_OP_FILLRECT: | ||
379 | qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr); | ||
380 | break; | ||
381 | case QXL_FB_OP_COPYAREA: | ||
382 | qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca); | ||
383 | break; | ||
384 | case QXL_FB_OP_IMAGEBLIT: | ||
385 | qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib); | ||
386 | break; | ||
387 | } | ||
388 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
389 | list_del(&entry->head); | ||
390 | kfree(entry); | ||
391 | } | ||
392 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
253 | } | 393 | } |
254 | 394 | ||
255 | int qxl_fb_init(struct qxl_device *qdev) | 395 | int qxl_fb_init(struct qxl_device *qdev) |
256 | { | 396 | { |
397 | INIT_WORK(&qdev->fb_work, qxl_fb_work); | ||
257 | return 0; | 398 | return 0; |
258 | } | 399 | } |
259 | 400 | ||
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev) | |||
536 | qfbdev->qdev = qdev; | 677 | qfbdev->qdev = qdev; |
537 | qdev->mode_info.qfbdev = qfbdev; | 678 | qdev->mode_info.qfbdev = qfbdev; |
538 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; | 679 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; |
539 | 680 | spin_lock_init(&qfbdev->delayed_ops_lock); | |
681 | INIT_LIST_HEAD(&qfbdev->delayed_ops); | ||
540 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, | 682 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, |
541 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, | 683 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, |
542 | QXLFB_CONN_LIMIT); | 684 | QXLFB_CONN_LIMIT); |
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c index 63c6715ad385..ae59e91cfb9a 100644 --- a/drivers/gpu/drm/qxl/qxl_fence.c +++ b/drivers/gpu/drm/qxl/qxl_fence.c | |||
@@ -49,17 +49,11 @@ | |||
49 | 49 | ||
50 | For some reason every so often qxl hw fails to release, things go wrong. | 50 | For some reason every so often qxl hw fails to release, things go wrong. |
51 | */ | 51 | */ |
52 | 52 | /* must be called with the fence lock held */ | |
53 | 53 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id) | |
54 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id) | ||
55 | { | 54 | { |
56 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
57 | |||
58 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
59 | radix_tree_insert(&qfence->tree, rel_id, qfence); | 55 | radix_tree_insert(&qfence->tree, rel_id, qfence); |
60 | qfence->num_active_releases++; | 56 | qfence->num_active_releases++; |
61 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
62 | return 0; | ||
63 | } | 57 | } |
64 | 58 | ||
65 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) | 59 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) |
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a235693aabba..25e1777fb0a2 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c | |||
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, | |||
55 | /* At least align on page size */ | 55 | /* At least align on page size */ |
56 | if (alignment < PAGE_SIZE) | 56 | if (alignment < PAGE_SIZE) |
57 | alignment = PAGE_SIZE; | 57 | alignment = PAGE_SIZE; |
58 | r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); | 58 | r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); |
59 | if (r) { | 59 | if (r) { |
60 | if (r != -ERESTARTSYS) | 60 | if (r != -ERESTARTSYS) |
61 | DRM_ERROR( | 61 | DRM_ERROR( |
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index cf856206996b..7fbcc35e8ad3 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c | |||
@@ -30,31 +30,100 @@ | |||
30 | #include "qxl_object.h" | 30 | #include "qxl_object.h" |
31 | 31 | ||
32 | static int | 32 | static int |
33 | qxl_image_create_helper(struct qxl_device *qdev, | 33 | qxl_allocate_chunk(struct qxl_device *qdev, |
34 | struct qxl_release *release, | ||
35 | struct qxl_drm_image *image, | ||
36 | unsigned int chunk_size) | ||
37 | { | ||
38 | struct qxl_drm_chunk *chunk; | ||
39 | int ret; | ||
40 | |||
41 | chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); | ||
42 | if (!chunk) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); | ||
46 | if (ret) { | ||
47 | kfree(chunk); | ||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | list_add_tail(&chunk->head, &image->chunk_list); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | int | ||
56 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
34 | struct qxl_release *release, | 57 | struct qxl_release *release, |
35 | struct qxl_bo **image_bo, | 58 | struct qxl_drm_image **image_ptr, |
36 | const uint8_t *data, | 59 | int height, int stride) |
37 | int width, int height, | 60 | { |
38 | int depth, unsigned int hash, | 61 | struct qxl_drm_image *image; |
39 | int stride) | 62 | int ret; |
63 | |||
64 | image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); | ||
65 | if (!image) | ||
66 | return -ENOMEM; | ||
67 | |||
68 | INIT_LIST_HEAD(&image->chunk_list); | ||
69 | |||
70 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); | ||
71 | if (ret) { | ||
72 | kfree(image); | ||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); | ||
77 | if (ret) { | ||
78 | qxl_bo_unref(&image->bo); | ||
79 | kfree(image); | ||
80 | return ret; | ||
81 | } | ||
82 | *image_ptr = image; | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) | ||
40 | { | 87 | { |
88 | struct qxl_drm_chunk *chunk, *tmp; | ||
89 | |||
90 | list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { | ||
91 | qxl_bo_unref(&chunk->bo); | ||
92 | kfree(chunk); | ||
93 | } | ||
94 | |||
95 | qxl_bo_unref(&dimage->bo); | ||
96 | kfree(dimage); | ||
97 | } | ||
98 | |||
99 | static int | ||
100 | qxl_image_init_helper(struct qxl_device *qdev, | ||
101 | struct qxl_release *release, | ||
102 | struct qxl_drm_image *dimage, | ||
103 | const uint8_t *data, | ||
104 | int width, int height, | ||
105 | int depth, unsigned int hash, | ||
106 | int stride) | ||
107 | { | ||
108 | struct qxl_drm_chunk *drv_chunk; | ||
41 | struct qxl_image *image; | 109 | struct qxl_image *image; |
42 | struct qxl_data_chunk *chunk; | 110 | struct qxl_data_chunk *chunk; |
43 | int i; | 111 | int i; |
44 | int chunk_stride; | 112 | int chunk_stride; |
45 | int linesize = width * depth / 8; | 113 | int linesize = width * depth / 8; |
46 | struct qxl_bo *chunk_bo; | 114 | struct qxl_bo *chunk_bo, *image_bo; |
47 | int ret; | ||
48 | void *ptr; | 115 | void *ptr; |
49 | /* Chunk */ | 116 | /* Chunk */ |
50 | /* FIXME: Check integer overflow */ | 117 | /* FIXME: Check integer overflow */ |
51 | /* TODO: variable number of chunks */ | 118 | /* TODO: variable number of chunks */ |
119 | |||
120 | drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); | ||
121 | |||
122 | chunk_bo = drv_chunk->bo; | ||
52 | chunk_stride = stride; /* TODO: should use linesize, but it renders | 123 | chunk_stride = stride; /* TODO: should use linesize, but it renders |
53 | wrong (check the bitmaps are sent correctly | 124 | wrong (check the bitmaps are sent correctly |
54 | first) */ | 125 | first) */ |
55 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, | 126 | |
56 | &chunk_bo); | ||
57 | |||
58 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); | 127 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); |
59 | chunk = ptr; | 128 | chunk = ptr; |
60 | chunk->data_size = height * chunk_stride; | 129 | chunk->data_size = height * chunk_stride; |
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
102 | while (remain > 0) { | 171 | while (remain > 0) { |
103 | page_base = out_offset & PAGE_MASK; | 172 | page_base = out_offset & PAGE_MASK; |
104 | page_offset = offset_in_page(out_offset); | 173 | page_offset = offset_in_page(out_offset); |
105 | |||
106 | size = min((int)(PAGE_SIZE - page_offset), remain); | 174 | size = min((int)(PAGE_SIZE - page_offset), remain); |
107 | 175 | ||
108 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); | 176 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); |
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
116 | } | 184 | } |
117 | } | 185 | } |
118 | } | 186 | } |
119 | |||
120 | |||
121 | qxl_bo_kunmap(chunk_bo); | 187 | qxl_bo_kunmap(chunk_bo); |
122 | 188 | ||
123 | /* Image */ | 189 | image_bo = dimage->bo; |
124 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); | 190 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); |
125 | |||
126 | ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); | ||
127 | image = ptr; | 191 | image = ptr; |
128 | 192 | ||
129 | image->descriptor.id = 0; | 193 | image->descriptor.id = 0; |
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
154 | image->u.bitmap.stride = chunk_stride; | 218 | image->u.bitmap.stride = chunk_stride; |
155 | image->u.bitmap.palette = 0; | 219 | image->u.bitmap.palette = 0; |
156 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); | 220 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); |
157 | qxl_release_add_res(qdev, release, chunk_bo); | ||
158 | qxl_bo_unreserve(chunk_bo); | ||
159 | qxl_bo_unref(&chunk_bo); | ||
160 | 221 | ||
161 | qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); | 222 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); |
162 | 223 | ||
163 | return 0; | 224 | return 0; |
164 | } | 225 | } |
165 | 226 | ||
166 | int qxl_image_create(struct qxl_device *qdev, | 227 | int qxl_image_init(struct qxl_device *qdev, |
167 | struct qxl_release *release, | 228 | struct qxl_release *release, |
168 | struct qxl_bo **image_bo, | 229 | struct qxl_drm_image *dimage, |
169 | const uint8_t *data, | 230 | const uint8_t *data, |
170 | int x, int y, int width, int height, | 231 | int x, int y, int width, int height, |
171 | int depth, int stride) | 232 | int depth, int stride) |
172 | { | 233 | { |
173 | data += y * stride + x * (depth / 8); | 234 | data += y * stride + x * (depth / 8); |
174 | return qxl_image_create_helper(qdev, release, image_bo, data, | 235 | return qxl_image_init_helper(qdev, release, dimage, data, |
175 | width, height, depth, 0, stride); | 236 | width, height, depth, 0, stride); |
176 | } | 237 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 27f45e49250d..6de33563d6f1 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c | |||
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data, | |||
68 | &qxl_map->offset); | 68 | &qxl_map->offset); |
69 | } | 69 | } |
70 | 70 | ||
71 | struct qxl_reloc_info { | ||
72 | int type; | ||
73 | struct qxl_bo *dst_bo; | ||
74 | uint32_t dst_offset; | ||
75 | struct qxl_bo *src_bo; | ||
76 | int src_offset; | ||
77 | }; | ||
78 | |||
71 | /* | 79 | /* |
72 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's | 80 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's |
73 | * are on vram). | 81 | * are on vram). |
74 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) | 82 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) |
75 | */ | 83 | */ |
76 | static void | 84 | static void |
77 | apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 85 | apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
78 | struct qxl_bo *src, uint64_t src_off) | ||
79 | { | 86 | { |
80 | void *reloc_page; | 87 | void *reloc_page; |
81 | 88 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); | |
82 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 89 | *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, |
83 | *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, | 90 | info->src_bo, |
84 | src, src_off); | 91 | info->src_offset); |
85 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 92 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
86 | } | 93 | } |
87 | 94 | ||
88 | static void | 95 | static void |
89 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 96 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
90 | struct qxl_bo *src) | ||
91 | { | 97 | { |
92 | uint32_t id = 0; | 98 | uint32_t id = 0; |
93 | void *reloc_page; | 99 | void *reloc_page; |
94 | 100 | ||
95 | if (src && !src->is_primary) | 101 | if (info->src_bo && !info->src_bo->is_primary) |
96 | id = src->surface_id; | 102 | id = info->src_bo->surface_id; |
97 | 103 | ||
98 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 104 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); |
99 | *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; | 105 | *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; |
100 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 106 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
101 | } | 107 | } |
102 | 108 | ||
103 | /* return holding the reference to this object */ | 109 | /* return holding the reference to this object */ |
104 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | 110 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, |
105 | struct drm_file *file_priv, uint64_t handle, | 111 | struct drm_file *file_priv, uint64_t handle, |
106 | struct qxl_reloc_list *reloc_list) | 112 | struct qxl_release *release) |
107 | { | 113 | { |
108 | struct drm_gem_object *gobj; | 114 | struct drm_gem_object *gobj; |
109 | struct qxl_bo *qobj; | 115 | struct qxl_bo *qobj; |
110 | int ret; | 116 | int ret; |
111 | 117 | ||
112 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); | 118 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); |
113 | if (!gobj) { | 119 | if (!gobj) |
114 | DRM_ERROR("bad bo handle %lld\n", handle); | ||
115 | return NULL; | 120 | return NULL; |
116 | } | 121 | |
117 | qobj = gem_to_qxl_bo(gobj); | 122 | qobj = gem_to_qxl_bo(gobj); |
118 | 123 | ||
119 | ret = qxl_bo_list_add(reloc_list, qobj); | 124 | ret = qxl_release_list_add(release, qobj); |
120 | if (ret) | 125 | if (ret) |
121 | return NULL; | 126 | return NULL; |
122 | 127 | ||
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | |||
129 | * However, the command as passed from user space must *not* contain the initial | 134 | * However, the command as passed from user space must *not* contain the initial |
130 | * QXLReleaseInfo struct (first XXX bytes) | 135 | * QXLReleaseInfo struct (first XXX bytes) |
131 | */ | 136 | */ |
132 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | 137 | static int qxl_process_single_command(struct qxl_device *qdev, |
133 | struct drm_file *file_priv) | 138 | struct drm_qxl_command *cmd, |
139 | struct drm_file *file_priv) | ||
134 | { | 140 | { |
135 | struct qxl_device *qdev = dev->dev_private; | 141 | struct qxl_reloc_info *reloc_info; |
136 | struct drm_qxl_execbuffer *execbuffer = data; | 142 | int release_type; |
137 | struct drm_qxl_command user_cmd; | 143 | struct qxl_release *release; |
138 | int cmd_num; | 144 | struct qxl_bo *cmd_bo; |
139 | struct qxl_bo *reloc_src_bo; | ||
140 | struct qxl_bo *reloc_dst_bo; | ||
141 | struct drm_qxl_reloc reloc; | ||
142 | void *fb_cmd; | 145 | void *fb_cmd; |
143 | int i, ret; | 146 | int i, j, ret, num_relocs; |
144 | struct qxl_reloc_list reloc_list; | ||
145 | int unwritten; | 147 | int unwritten; |
146 | uint32_t reloc_dst_offset; | ||
147 | INIT_LIST_HEAD(&reloc_list.bos); | ||
148 | 148 | ||
149 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | 149 | switch (cmd->type) { |
150 | struct qxl_release *release; | 150 | case QXL_CMD_DRAW: |
151 | struct qxl_bo *cmd_bo; | 151 | release_type = QXL_RELEASE_DRAWABLE; |
152 | int release_type; | 152 | break; |
153 | struct drm_qxl_command *commands = | 153 | case QXL_CMD_SURFACE: |
154 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | 154 | case QXL_CMD_CURSOR: |
155 | default: | ||
156 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
157 | return -EINVAL; | ||
158 | break; | ||
159 | } | ||
155 | 160 | ||
156 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | 161 | if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) |
157 | sizeof(user_cmd))) | 162 | return -EINVAL; |
158 | return -EFAULT; | ||
159 | switch (user_cmd.type) { | ||
160 | case QXL_CMD_DRAW: | ||
161 | release_type = QXL_RELEASE_DRAWABLE; | ||
162 | break; | ||
163 | case QXL_CMD_SURFACE: | ||
164 | case QXL_CMD_CURSOR: | ||
165 | default: | ||
166 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
167 | return -EINVAL; | ||
168 | break; | ||
169 | } | ||
170 | 163 | ||
171 | if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) | 164 | if (!access_ok(VERIFY_READ, |
172 | return -EINVAL; | 165 | (void *)(unsigned long)cmd->command, |
166 | cmd->command_size)) | ||
167 | return -EFAULT; | ||
173 | 168 | ||
174 | if (!access_ok(VERIFY_READ, | 169 | reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); |
175 | (void *)(unsigned long)user_cmd.command, | 170 | if (!reloc_info) |
176 | user_cmd.command_size)) | 171 | return -ENOMEM; |
177 | return -EFAULT; | ||
178 | 172 | ||
179 | ret = qxl_alloc_release_reserved(qdev, | 173 | ret = qxl_alloc_release_reserved(qdev, |
180 | sizeof(union qxl_release_info) + | 174 | sizeof(union qxl_release_info) + |
181 | user_cmd.command_size, | 175 | cmd->command_size, |
182 | release_type, | 176 | release_type, |
183 | &release, | 177 | &release, |
184 | &cmd_bo); | 178 | &cmd_bo); |
185 | if (ret) | 179 | if (ret) |
186 | return ret; | 180 | goto out_free_reloc; |
187 | 181 | ||
188 | /* TODO copy slow path code from i915 */ | 182 | /* TODO copy slow path code from i915 */ |
189 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); | 183 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); |
190 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); | 184 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); |
191 | 185 | ||
192 | { | 186 | { |
193 | struct qxl_drawable *draw = fb_cmd; | 187 | struct qxl_drawable *draw = fb_cmd; |
188 | draw->mm_time = qdev->rom->mm_clock; | ||
189 | } | ||
194 | 190 | ||
195 | draw->mm_time = qdev->rom->mm_clock; | 191 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); |
196 | } | 192 | if (unwritten) { |
197 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); | 193 | DRM_ERROR("got unwritten %d\n", unwritten); |
198 | if (unwritten) { | 194 | ret = -EFAULT; |
199 | DRM_ERROR("got unwritten %d\n", unwritten); | 195 | goto out_free_release; |
200 | qxl_release_unreserve(qdev, release); | 196 | } |
201 | qxl_release_free(qdev, release); | 197 | |
202 | return -EFAULT; | 198 | /* fill out reloc info structs */ |
199 | num_relocs = 0; | ||
200 | for (i = 0; i < cmd->relocs_num; ++i) { | ||
201 | struct drm_qxl_reloc reloc; | ||
202 | |||
203 | if (DRM_COPY_FROM_USER(&reloc, | ||
204 | &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], | ||
205 | sizeof(reloc))) { | ||
206 | ret = -EFAULT; | ||
207 | goto out_free_bos; | ||
203 | } | 208 | } |
204 | 209 | ||
205 | for (i = 0 ; i < user_cmd.relocs_num; ++i) { | 210 | /* add the bos to the list of bos to validate - |
206 | if (DRM_COPY_FROM_USER(&reloc, | 211 | need to validate first then process relocs? */ |
207 | &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], | 212 | if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { |
208 | sizeof(reloc))) { | 213 | DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type); |
209 | qxl_bo_list_unreserve(&reloc_list, true); | ||
210 | qxl_release_unreserve(qdev, release); | ||
211 | qxl_release_free(qdev, release); | ||
212 | return -EFAULT; | ||
213 | } | ||
214 | 214 | ||
215 | /* add the bos to the list of bos to validate - | 215 | ret = -EINVAL; |
216 | need to validate first then process relocs? */ | 216 | goto out_free_bos; |
217 | if (reloc.dst_handle) { | 217 | } |
218 | reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, | 218 | reloc_info[i].type = reloc.reloc_type; |
219 | reloc.dst_handle, &reloc_list); | 219 | |
220 | if (!reloc_dst_bo) { | 220 | if (reloc.dst_handle) { |
221 | qxl_bo_list_unreserve(&reloc_list, true); | 221 | reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv, |
222 | qxl_release_unreserve(qdev, release); | 222 | reloc.dst_handle, release); |
223 | qxl_release_free(qdev, release); | 223 | if (!reloc_info[i].dst_bo) { |
224 | return -EINVAL; | 224 | ret = -EINVAL; |
225 | } | 225 | reloc_info[i].src_bo = NULL; |
226 | reloc_dst_offset = 0; | 226 | goto out_free_bos; |
227 | } else { | ||
228 | reloc_dst_bo = cmd_bo; | ||
229 | reloc_dst_offset = release->release_offset; | ||
230 | } | 227 | } |
231 | 228 | reloc_info[i].dst_offset = reloc.dst_offset; | |
232 | /* reserve and validate the reloc dst bo */ | 229 | } else { |
233 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { | 230 | reloc_info[i].dst_bo = cmd_bo; |
234 | reloc_src_bo = | 231 | reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; |
235 | qxlhw_handle_to_bo(qdev, file_priv, | 232 | } |
236 | reloc.src_handle, &reloc_list); | 233 | num_relocs++; |
237 | if (!reloc_src_bo) { | 234 | |
238 | if (reloc_dst_bo != cmd_bo) | 235 | /* reserve and validate the reloc dst bo */ |
239 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 236 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { |
240 | qxl_bo_list_unreserve(&reloc_list, true); | 237 | reloc_info[i].src_bo = |
241 | qxl_release_unreserve(qdev, release); | 238 | qxlhw_handle_to_bo(qdev, file_priv, |
242 | qxl_release_free(qdev, release); | 239 | reloc.src_handle, release); |
243 | return -EINVAL; | 240 | if (!reloc_info[i].src_bo) { |
244 | } | 241 | if (reloc_info[i].dst_bo != cmd_bo) |
245 | } else | 242 | drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base); |
246 | reloc_src_bo = NULL; | 243 | ret = -EINVAL; |
247 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { | 244 | goto out_free_bos; |
248 | apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, | ||
249 | reloc_src_bo, reloc.src_offset); | ||
250 | } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { | ||
251 | apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); | ||
252 | } else { | ||
253 | DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); | ||
254 | return -EINVAL; | ||
255 | } | 245 | } |
246 | reloc_info[i].src_offset = reloc.src_offset; | ||
247 | } else { | ||
248 | reloc_info[i].src_bo = NULL; | ||
249 | reloc_info[i].src_offset = 0; | ||
250 | } | ||
251 | } | ||
256 | 252 | ||
257 | if (reloc_src_bo && reloc_src_bo != cmd_bo) { | 253 | /* validate all buffers */ |
258 | qxl_release_add_res(qdev, release, reloc_src_bo); | 254 | ret = qxl_release_reserve_list(release, false); |
259 | drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); | 255 | if (ret) |
260 | } | 256 | goto out_free_bos; |
261 | 257 | ||
262 | if (reloc_dst_bo != cmd_bo) | 258 | for (i = 0; i < cmd->relocs_num; ++i) { |
263 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 259 | if (reloc_info[i].type == QXL_RELOC_TYPE_BO) |
264 | } | 260 | apply_reloc(qdev, &reloc_info[i]); |
265 | qxl_fence_releaseable(qdev, release); | 261 | else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) |
262 | apply_surf_reloc(qdev, &reloc_info[i]); | ||
263 | } | ||
266 | 264 | ||
267 | ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); | 265 | ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); |
268 | if (ret == -ERESTARTSYS) { | 266 | if (ret) |
269 | qxl_release_unreserve(qdev, release); | 267 | qxl_release_backoff_reserve_list(release); |
270 | qxl_release_free(qdev, release); | 268 | else |
271 | qxl_bo_list_unreserve(&reloc_list, true); | 269 | qxl_release_fence_buffer_objects(release); |
270 | |||
271 | out_free_bos: | ||
272 | for (j = 0; j < num_relocs; j++) { | ||
273 | if (reloc_info[j].dst_bo != cmd_bo) | ||
274 | drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base); | ||
275 | if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo) | ||
276 | drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base); | ||
277 | } | ||
278 | out_free_release: | ||
279 | if (ret) | ||
280 | qxl_release_free(qdev, release); | ||
281 | out_free_reloc: | ||
282 | kfree(reloc_info); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | ||
287 | struct drm_file *file_priv) | ||
288 | { | ||
289 | struct qxl_device *qdev = dev->dev_private; | ||
290 | struct drm_qxl_execbuffer *execbuffer = data; | ||
291 | struct drm_qxl_command user_cmd; | ||
292 | int cmd_num; | ||
293 | int ret; | ||
294 | |||
295 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | ||
296 | |||
297 | struct drm_qxl_command *commands = | ||
298 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | ||
299 | |||
300 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | ||
301 | sizeof(user_cmd))) | ||
302 | return -EFAULT; | ||
303 | |||
304 | ret = qxl_process_single_command(qdev, &user_cmd, file_priv); | ||
305 | if (ret) | ||
272 | return ret; | 306 | return ret; |
273 | } | ||
274 | qxl_release_unreserve(qdev, release); | ||
275 | } | 307 | } |
276 | qxl_bo_list_unreserve(&reloc_list, 0); | ||
277 | return 0; | 308 | return 0; |
278 | } | 309 | } |
279 | 310 | ||
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, | |||
305 | goto out; | 336 | goto out; |
306 | 337 | ||
307 | if (!qobj->pin_count) { | 338 | if (!qobj->pin_count) { |
308 | qxl_ttm_placement_from_domain(qobj, qobj->type); | 339 | qxl_ttm_placement_from_domain(qobj, qobj->type, false); |
309 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, | 340 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, |
310 | true, false); | 341 | true, false); |
311 | if (unlikely(ret)) | 342 | if (unlikely(ret)) |
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 1191fe7788c9..aa161cddd87e 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c | |||
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) | |||
51 | return false; | 51 | return false; |
52 | } | 52 | } |
53 | 53 | ||
54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | 54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) |
55 | { | 55 | { |
56 | u32 c = 0; | 56 | u32 c = 0; |
57 | u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; | ||
57 | 58 | ||
58 | qbo->placement.fpfn = 0; | 59 | qbo->placement.fpfn = 0; |
59 | qbo->placement.lpfn = 0; | 60 | qbo->placement.lpfn = 0; |
60 | qbo->placement.placement = qbo->placements; | 61 | qbo->placement.placement = qbo->placements; |
61 | qbo->placement.busy_placement = qbo->placements; | 62 | qbo->placement.busy_placement = qbo->placements; |
62 | if (domain == QXL_GEM_DOMAIN_VRAM) | 63 | if (domain == QXL_GEM_DOMAIN_VRAM) |
63 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; | 64 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; |
64 | if (domain == QXL_GEM_DOMAIN_SURFACE) | 65 | if (domain == QXL_GEM_DOMAIN_SURFACE) |
65 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; | 66 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; |
66 | if (domain == QXL_GEM_DOMAIN_CPU) | 67 | if (domain == QXL_GEM_DOMAIN_CPU) |
67 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 68 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; |
68 | if (!c) | 69 | if (!c) |
69 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 70 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
70 | qbo->placement.num_placement = c; | 71 | qbo->placement.num_placement = c; |
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | |||
73 | 74 | ||
74 | 75 | ||
75 | int qxl_bo_create(struct qxl_device *qdev, | 76 | int qxl_bo_create(struct qxl_device *qdev, |
76 | unsigned long size, bool kernel, u32 domain, | 77 | unsigned long size, bool kernel, bool pinned, u32 domain, |
77 | struct qxl_surface *surf, | 78 | struct qxl_surface *surf, |
78 | struct qxl_bo **bo_ptr) | 79 | struct qxl_bo **bo_ptr) |
79 | { | 80 | { |
@@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev, | |||
99 | } | 100 | } |
100 | bo->gem_base.driver_private = NULL; | 101 | bo->gem_base.driver_private = NULL; |
101 | bo->type = domain; | 102 | bo->type = domain; |
102 | bo->pin_count = 0; | 103 | bo->pin_count = pinned ? 1 : 0; |
103 | bo->surface_id = 0; | 104 | bo->surface_id = 0; |
104 | qxl_fence_init(qdev, &bo->fence); | 105 | qxl_fence_init(qdev, &bo->fence); |
105 | INIT_LIST_HEAD(&bo->list); | 106 | INIT_LIST_HEAD(&bo->list); |
106 | atomic_set(&bo->reserve_count, 0); | 107 | |
107 | if (surf) | 108 | if (surf) |
108 | bo->surf = *surf; | 109 | bo->surf = *surf; |
109 | 110 | ||
110 | qxl_ttm_placement_from_domain(bo, domain); | 111 | qxl_ttm_placement_from_domain(bo, domain, pinned); |
111 | 112 | ||
112 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, | 113 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, |
113 | &bo->placement, 0, !kernel, NULL, size, | 114 | &bo->placement, 0, !kernel, NULL, size, |
@@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) | |||
228 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | 229 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) |
229 | { | 230 | { |
230 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | 231 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; |
231 | int r, i; | 232 | int r; |
232 | 233 | ||
233 | if (bo->pin_count) { | 234 | if (bo->pin_count) { |
234 | bo->pin_count++; | 235 | bo->pin_count++; |
@@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | |||
236 | *gpu_addr = qxl_bo_gpu_offset(bo); | 237 | *gpu_addr = qxl_bo_gpu_offset(bo); |
237 | return 0; | 238 | return 0; |
238 | } | 239 | } |
239 | qxl_ttm_placement_from_domain(bo, domain); | 240 | qxl_ttm_placement_from_domain(bo, domain, true); |
240 | for (i = 0; i < bo->placement.num_placement; i++) | ||
241 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
242 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 241 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
243 | if (likely(r == 0)) { | 242 | if (likely(r == 0)) { |
244 | bo->pin_count = 1; | 243 | bo->pin_count = 1; |
@@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) | |||
317 | return 0; | 316 | return 0; |
318 | } | 317 | } |
319 | 318 | ||
320 | void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) | ||
321 | { | ||
322 | struct qxl_bo_list *entry, *sf; | ||
323 | |||
324 | list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { | ||
325 | qxl_bo_unreserve(entry->bo); | ||
326 | list_del(&entry->lhead); | ||
327 | kfree(entry); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) | ||
332 | { | ||
333 | struct qxl_bo_list *entry; | ||
334 | int ret; | ||
335 | |||
336 | list_for_each_entry(entry, &reloc_list->bos, lhead) { | ||
337 | if (entry->bo == bo) | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
342 | if (!entry) | ||
343 | return -ENOMEM; | ||
344 | |||
345 | entry->bo = bo; | ||
346 | list_add(&entry->lhead, &reloc_list->bos); | ||
347 | |||
348 | ret = qxl_bo_reserve(bo, false); | ||
349 | if (ret) | ||
350 | return ret; | ||
351 | |||
352 | if (!bo->pin_count) { | ||
353 | qxl_ttm_placement_from_domain(bo, bo->type); | ||
354 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
355 | true, false); | ||
356 | if (ret) | ||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | /* allocate a surface for reserved + validated buffers */ | ||
361 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
362 | if (ret) | ||
363 | return ret; | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | int qxl_surf_evict(struct qxl_device *qdev) | 319 | int qxl_surf_evict(struct qxl_device *qdev) |
368 | { | 320 | { |
369 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); | 321 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); |
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index ee7ad79ce781..8cb6167038e5 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h | |||
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | |||
88 | 88 | ||
89 | extern int qxl_bo_create(struct qxl_device *qdev, | 89 | extern int qxl_bo_create(struct qxl_device *qdev, |
90 | unsigned long size, | 90 | unsigned long size, |
91 | bool kernel, u32 domain, | 91 | bool kernel, bool pinned, u32 domain, |
92 | struct qxl_surface *surf, | 92 | struct qxl_surface *surf, |
93 | struct qxl_bo **bo_ptr); | 93 | struct qxl_bo **bo_ptr); |
94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); | 94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); |
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); | |||
99 | extern void qxl_bo_unref(struct qxl_bo **bo); | 99 | extern void qxl_bo_unref(struct qxl_bo **bo); |
100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); | 100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); |
101 | extern int qxl_bo_unpin(struct qxl_bo *bo); | 101 | extern int qxl_bo_unpin(struct qxl_bo *bo); |
102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); | 102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); |
103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); | 103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); |
104 | 104 | ||
105 | extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo); | ||
106 | extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed); | ||
107 | #endif | 105 | #endif |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b443d6751d5f..b61449e52cd5 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -38,7 +38,8 @@ | |||
38 | 38 | ||
39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; | 39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; |
40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; | 40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; |
41 | uint64_t | 41 | |
42 | static uint64_t | ||
42 | qxl_release_alloc(struct qxl_device *qdev, int type, | 43 | qxl_release_alloc(struct qxl_device *qdev, int type, |
43 | struct qxl_release **ret) | 44 | struct qxl_release **ret) |
44 | { | 45 | { |
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type, | |||
53 | return 0; | 54 | return 0; |
54 | } | 55 | } |
55 | release->type = type; | 56 | release->type = type; |
56 | release->bo_count = 0; | ||
57 | release->release_offset = 0; | 57 | release->release_offset = 0; |
58 | release->surface_release_id = 0; | 58 | release->surface_release_id = 0; |
59 | INIT_LIST_HEAD(&release->bos); | ||
59 | 60 | ||
60 | idr_preload(GFP_KERNEL); | 61 | idr_preload(GFP_KERNEL); |
61 | spin_lock(&qdev->release_idr_lock); | 62 | spin_lock(&qdev->release_idr_lock); |
@@ -77,20 +78,20 @@ void | |||
77 | qxl_release_free(struct qxl_device *qdev, | 78 | qxl_release_free(struct qxl_device *qdev, |
78 | struct qxl_release *release) | 79 | struct qxl_release *release) |
79 | { | 80 | { |
80 | int i; | 81 | struct qxl_bo_list *entry, *tmp; |
81 | 82 | QXL_INFO(qdev, "release %d, type %d\n", release->id, | |
82 | QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, | 83 | release->type); |
83 | release->type, release->bo_count); | ||
84 | 84 | ||
85 | if (release->surface_release_id) | 85 | if (release->surface_release_id) |
86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); | 86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); |
87 | 87 | ||
88 | for (i = 0 ; i < release->bo_count; ++i) { | 88 | list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { |
89 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
89 | QXL_INFO(qdev, "release %llx\n", | 90 | QXL_INFO(qdev, "release %llx\n", |
90 | release->bos[i]->tbo.addr_space_offset | 91 | entry->tv.bo->addr_space_offset |
91 | - DRM_FILE_OFFSET); | 92 | - DRM_FILE_OFFSET); |
92 | qxl_fence_remove_release(&release->bos[i]->fence, release->id); | 93 | qxl_fence_remove_release(&bo->fence, release->id); |
93 | qxl_bo_unref(&release->bos[i]); | 94 | qxl_bo_unref(&bo); |
94 | } | 95 | } |
95 | spin_lock(&qdev->release_idr_lock); | 96 | spin_lock(&qdev->release_idr_lock); |
96 | idr_remove(&qdev->release_idr, release->id); | 97 | idr_remove(&qdev->release_idr, release->id); |
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev, | |||
98 | kfree(release); | 99 | kfree(release); |
99 | } | 100 | } |
100 | 101 | ||
101 | void | ||
102 | qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, | ||
103 | struct qxl_bo *bo) | ||
104 | { | ||
105 | int i; | ||
106 | for (i = 0; i < release->bo_count; i++) | ||
107 | if (release->bos[i] == bo) | ||
108 | return; | ||
109 | |||
110 | if (release->bo_count >= QXL_MAX_RES) { | ||
111 | DRM_ERROR("exceeded max resource on a qxl_release item\n"); | ||
112 | return; | ||
113 | } | ||
114 | release->bos[release->bo_count++] = qxl_bo_ref(bo); | ||
115 | } | ||
116 | |||
117 | static int qxl_release_bo_alloc(struct qxl_device *qdev, | 102 | static int qxl_release_bo_alloc(struct qxl_device *qdev, |
118 | struct qxl_bo **bo) | 103 | struct qxl_bo **bo) |
119 | { | 104 | { |
120 | int ret; | 105 | int ret; |
121 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, | 106 | /* pin releases bo's they are too messy to evict */ |
107 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, | ||
108 | QXL_GEM_DOMAIN_VRAM, NULL, | ||
122 | bo); | 109 | bo); |
123 | return ret; | 110 | return ret; |
124 | } | 111 | } |
125 | 112 | ||
126 | int qxl_release_reserve(struct qxl_device *qdev, | 113 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) |
127 | struct qxl_release *release, bool no_wait) | 114 | { |
115 | struct qxl_bo_list *entry; | ||
116 | |||
117 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
118 | if (entry->tv.bo == &bo->tbo) | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
123 | if (!entry) | ||
124 | return -ENOMEM; | ||
125 | |||
126 | qxl_bo_ref(bo); | ||
127 | entry->tv.bo = &bo->tbo; | ||
128 | list_add_tail(&entry->tv.head, &release->bos); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int qxl_release_validate_bo(struct qxl_bo *bo) | ||
128 | { | 133 | { |
129 | int ret; | 134 | int ret; |
130 | if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { | 135 | |
131 | ret = qxl_bo_reserve(release->bos[0], no_wait); | 136 | if (!bo->pin_count) { |
137 | qxl_ttm_placement_from_domain(bo, bo->type, false); | ||
138 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
139 | true, false); | ||
132 | if (ret) | 140 | if (ret) |
133 | return ret; | 141 | return ret; |
134 | } | 142 | } |
143 | |||
144 | /* allocate a surface for reserved + validated buffers */ | ||
145 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
146 | if (ret) | ||
147 | return ret; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) | ||
152 | { | ||
153 | int ret; | ||
154 | struct qxl_bo_list *entry; | ||
155 | |||
156 | /* if only one object on the release its the release itself | ||
157 | since these objects are pinned no need to reserve */ | ||
158 | if (list_is_singular(&release->bos)) | ||
159 | return 0; | ||
160 | |||
161 | ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); | ||
162 | if (ret) | ||
163 | return ret; | ||
164 | |||
165 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
166 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
167 | |||
168 | ret = qxl_release_validate_bo(bo); | ||
169 | if (ret) { | ||
170 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
171 | return ret; | ||
172 | } | ||
173 | } | ||
135 | return 0; | 174 | return 0; |
136 | } | 175 | } |
137 | 176 | ||
138 | void qxl_release_unreserve(struct qxl_device *qdev, | 177 | void qxl_release_backoff_reserve_list(struct qxl_release *release) |
139 | struct qxl_release *release) | ||
140 | { | 178 | { |
141 | if (atomic_dec_and_test(&release->bos[0]->reserve_count)) | 179 | /* if only one object on the release its the release itself |
142 | qxl_bo_unreserve(release->bos[0]); | 180 | since these objects are pinned no need to reserve */ |
181 | if (list_is_singular(&release->bos)) | ||
182 | return; | ||
183 | |||
184 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
143 | } | 185 | } |
144 | 186 | ||
187 | |||
145 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 188 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
146 | enum qxl_surface_cmd_type surface_cmd_type, | 189 | enum qxl_surface_cmd_type surface_cmd_type, |
147 | struct qxl_release *create_rel, | 190 | struct qxl_release *create_rel, |
148 | struct qxl_release **release) | 191 | struct qxl_release **release) |
149 | { | 192 | { |
150 | int ret; | ||
151 | |||
152 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { | 193 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { |
153 | int idr_ret; | 194 | int idr_ret; |
195 | struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); | ||
154 | struct qxl_bo *bo; | 196 | struct qxl_bo *bo; |
155 | union qxl_release_info *info; | 197 | union qxl_release_info *info; |
156 | 198 | ||
157 | /* stash the release after the create command */ | 199 | /* stash the release after the create command */ |
158 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); | 200 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); |
159 | bo = qxl_bo_ref(create_rel->bos[0]); | 201 | bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); |
160 | 202 | ||
161 | (*release)->release_offset = create_rel->release_offset + 64; | 203 | (*release)->release_offset = create_rel->release_offset + 64; |
162 | 204 | ||
163 | qxl_release_add_res(qdev, *release, bo); | 205 | qxl_release_list_add(*release, bo); |
164 | 206 | ||
165 | ret = qxl_release_reserve(qdev, *release, false); | ||
166 | if (ret) { | ||
167 | DRM_ERROR("release reserve failed\n"); | ||
168 | goto out_unref; | ||
169 | } | ||
170 | info = qxl_release_map(qdev, *release); | 207 | info = qxl_release_map(qdev, *release); |
171 | info->id = idr_ret; | 208 | info->id = idr_ret; |
172 | qxl_release_unmap(qdev, *release, info); | 209 | qxl_release_unmap(qdev, *release, info); |
173 | 210 | ||
174 | |||
175 | out_unref: | ||
176 | qxl_bo_unref(&bo); | 211 | qxl_bo_unref(&bo); |
177 | return ret; | 212 | return 0; |
178 | } | 213 | } |
179 | 214 | ||
180 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), | 215 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), |
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
187 | { | 222 | { |
188 | struct qxl_bo *bo; | 223 | struct qxl_bo *bo; |
189 | int idr_ret; | 224 | int idr_ret; |
190 | int ret; | 225 | int ret = 0; |
191 | union qxl_release_info *info; | 226 | union qxl_release_info *info; |
192 | int cur_idx; | 227 | int cur_idx; |
193 | 228 | ||
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
216 | mutex_unlock(&qdev->release_mutex); | 251 | mutex_unlock(&qdev->release_mutex); |
217 | return ret; | 252 | return ret; |
218 | } | 253 | } |
219 | |||
220 | /* pin releases bo's they are too messy to evict */ | ||
221 | ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); | ||
222 | qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); | ||
223 | qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); | ||
224 | } | 254 | } |
225 | 255 | ||
226 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); | 256 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); |
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
231 | if (rbo) | 261 | if (rbo) |
232 | *rbo = bo; | 262 | *rbo = bo; |
233 | 263 | ||
234 | qxl_release_add_res(qdev, *release, bo); | ||
235 | |||
236 | ret = qxl_release_reserve(qdev, *release, false); | ||
237 | mutex_unlock(&qdev->release_mutex); | 264 | mutex_unlock(&qdev->release_mutex); |
238 | if (ret) | 265 | |
239 | goto out_unref; | 266 | qxl_release_list_add(*release, bo); |
240 | 267 | ||
241 | info = qxl_release_map(qdev, *release); | 268 | info = qxl_release_map(qdev, *release); |
242 | info->id = idr_ret; | 269 | info->id = idr_ret; |
243 | qxl_release_unmap(qdev, *release, info); | 270 | qxl_release_unmap(qdev, *release, info); |
244 | 271 | ||
245 | out_unref: | ||
246 | qxl_bo_unref(&bo); | 272 | qxl_bo_unref(&bo); |
247 | return ret; | 273 | return ret; |
248 | } | 274 | } |
249 | 275 | ||
250 | int qxl_fence_releaseable(struct qxl_device *qdev, | ||
251 | struct qxl_release *release) | ||
252 | { | ||
253 | int i, ret; | ||
254 | for (i = 0; i < release->bo_count; i++) { | ||
255 | if (!release->bos[i]->tbo.sync_obj) | ||
256 | release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; | ||
257 | ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); | ||
258 | if (ret) | ||
259 | return ret; | ||
260 | } | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 276 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
265 | uint64_t id) | 277 | uint64_t id) |
266 | { | 278 | { |
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | |||
273 | DRM_ERROR("failed to find id in release_idr\n"); | 285 | DRM_ERROR("failed to find id in release_idr\n"); |
274 | return NULL; | 286 | return NULL; |
275 | } | 287 | } |
276 | if (release->bo_count < 1) { | 288 | |
277 | DRM_ERROR("read a released resource with 0 bos\n"); | ||
278 | return NULL; | ||
279 | } | ||
280 | return release; | 289 | return release; |
281 | } | 290 | } |
282 | 291 | ||
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | |||
285 | { | 294 | { |
286 | void *ptr; | 295 | void *ptr; |
287 | union qxl_release_info *info; | 296 | union qxl_release_info *info; |
288 | struct qxl_bo *bo = release->bos[0]; | 297 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
298 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
289 | 299 | ||
290 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); | 300 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); |
301 | if (!ptr) | ||
302 | return NULL; | ||
291 | info = ptr + (release->release_offset & ~PAGE_SIZE); | 303 | info = ptr + (release->release_offset & ~PAGE_SIZE); |
292 | return info; | 304 | return info; |
293 | } | 305 | } |
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev, | |||
296 | struct qxl_release *release, | 308 | struct qxl_release *release, |
297 | union qxl_release_info *info) | 309 | union qxl_release_info *info) |
298 | { | 310 | { |
299 | struct qxl_bo *bo = release->bos[0]; | 311 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
312 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
300 | void *ptr; | 313 | void *ptr; |
301 | 314 | ||
302 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); | 315 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); |
303 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); | 316 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); |
304 | } | 317 | } |
318 | |||
319 | void qxl_release_fence_buffer_objects(struct qxl_release *release) | ||
320 | { | ||
321 | struct ttm_validate_buffer *entry; | ||
322 | struct ttm_buffer_object *bo; | ||
323 | struct ttm_bo_global *glob; | ||
324 | struct ttm_bo_device *bdev; | ||
325 | struct ttm_bo_driver *driver; | ||
326 | struct qxl_bo *qbo; | ||
327 | |||
328 | /* if only one object on the release its the release itself | ||
329 | since these objects are pinned no need to reserve */ | ||
330 | if (list_is_singular(&release->bos)) | ||
331 | return; | ||
332 | |||
333 | bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; | ||
334 | bdev = bo->bdev; | ||
335 | driver = bdev->driver; | ||
336 | glob = bo->glob; | ||
337 | |||
338 | spin_lock(&glob->lru_lock); | ||
339 | spin_lock(&bdev->fence_lock); | ||
340 | |||
341 | list_for_each_entry(entry, &release->bos, head) { | ||
342 | bo = entry->bo; | ||
343 | qbo = to_qxl_bo(bo); | ||
344 | |||
345 | if (!entry->bo->sync_obj) | ||
346 | entry->bo->sync_obj = &qbo->fence; | ||
347 | |||
348 | qxl_fence_add_release_locked(&qbo->fence, release->id); | ||
349 | |||
350 | ttm_bo_add_to_lru(bo); | ||
351 | ww_mutex_unlock(&bo->resv->lock); | ||
352 | entry->reserved = false; | ||
353 | } | ||
354 | spin_unlock(&bdev->fence_lock); | ||
355 | spin_unlock(&glob->lru_lock); | ||
356 | ww_acquire_fini(&release->ticket); | ||
357 | } | ||
358 | |||
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 489cb8cece4d..1dfd84cda2a1 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, | |||
206 | return; | 206 | return; |
207 | } | 207 | } |
208 | qbo = container_of(bo, struct qxl_bo, tbo); | 208 | qbo = container_of(bo, struct qxl_bo, tbo); |
209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); | 209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); |
210 | *placement = qbo->placement; | 210 | *placement = qbo->placement; |
211 | } | 211 | } |
212 | 212 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed480..32501f6ec991 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | /***** radeon AUX functions *****/ | 46 | /***** radeon AUX functions *****/ |
47 | |||
48 | /* Atom needs data in little endian format | ||
49 | * so swap as appropriate when copying data to | ||
50 | * or from atom. Note that atom operates on | ||
51 | * dw units. | ||
52 | */ | ||
53 | static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) | ||
54 | { | ||
55 | #ifdef __BIG_ENDIAN | ||
56 | u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ | ||
57 | u32 *dst32, *src32; | ||
58 | int i; | ||
59 | |||
60 | memcpy(src_tmp, src, num_bytes); | ||
61 | src32 = (u32 *)src_tmp; | ||
62 | dst32 = (u32 *)dst_tmp; | ||
63 | if (to_le) { | ||
64 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
65 | dst32[i] = cpu_to_le32(src32[i]); | ||
66 | memcpy(dst, dst_tmp, num_bytes); | ||
67 | } else { | ||
68 | u8 dws = num_bytes & ~3; | ||
69 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
70 | dst32[i] = le32_to_cpu(src32[i]); | ||
71 | memcpy(dst, dst_tmp, dws); | ||
72 | if (num_bytes % 4) { | ||
73 | for (i = 0; i < (num_bytes % 4); i++) | ||
74 | dst[dws+i] = dst_tmp[dws+i]; | ||
75 | } | ||
76 | } | ||
77 | #else | ||
78 | memcpy(dst, src, num_bytes); | ||
79 | #endif | ||
80 | } | ||
81 | |||
47 | union aux_channel_transaction { | 82 | union aux_channel_transaction { |
48 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; | 83 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; |
49 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; | 84 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; |
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
65 | 100 | ||
66 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); | 101 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); |
67 | 102 | ||
68 | memcpy(base, send, send_bytes); | 103 | radeon_copy_swap(base, send, send_bytes, true); |
69 | 104 | ||
70 | args.v1.lpAuxRequest = 0 + 4; | 105 | args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); |
71 | args.v1.lpDataOut = 16 + 4; | 106 | args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); |
72 | args.v1.ucDataOutLen = 0; | 107 | args.v1.ucDataOutLen = 0; |
73 | args.v1.ucChannelID = chan->rec.i2c_id; | 108 | args.v1.ucChannelID = chan->rec.i2c_id; |
74 | args.v1.ucDelay = delay / 10; | 109 | args.v1.ucDelay = delay / 10; |
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
102 | recv_bytes = recv_size; | 137 | recv_bytes = recv_size; |
103 | 138 | ||
104 | if (recv && recv_size) | 139 | if (recv && recv_size) |
105 | memcpy(recv, base + 16, recv_bytes); | 140 | radeon_copy_swap(recv, base + 16, recv_bytes, false); |
106 | 141 | ||
107 | return recv_bytes; | 142 | return recv_bytes; |
108 | } | 143 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 393880a09412..10f712e37003 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3166,7 +3166,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
3166 | 3166 | ||
3167 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | 3167 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
3168 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); | 3168 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
3169 | r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21); | 3169 | r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); |
3170 | if (r) { | 3170 | if (r) { |
3171 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 3171 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
3172 | radeon_semaphore_free(rdev, &sem, NULL); | 3172 | radeon_semaphore_free(rdev, &sem, NULL); |
@@ -3181,6 +3181,9 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
3181 | radeon_semaphore_free(rdev, &sem, NULL); | 3181 | radeon_semaphore_free(rdev, &sem, NULL); |
3182 | } | 3182 | } |
3183 | 3183 | ||
3184 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
3185 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | ||
3186 | radeon_ring_write(ring, WAIT_3D_IDLE_bit); | ||
3184 | for (i = 0; i < num_loops; i++) { | 3187 | for (i = 0; i < num_loops; i++) { |
3185 | cur_size_in_bytes = size_in_bytes; | 3188 | cur_size_in_bytes = size_in_bytes; |
3186 | if (cur_size_in_bytes > 0x1fffff) | 3189 | if (cur_size_in_bytes > 0x1fffff) |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index b88f54b134ab..e5c860f4ccbe 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev) | |||
278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) | 278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) |
279 | { | 279 | { |
280 | if (enable) | 280 | if (enable) |
281 | WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); | 281 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); |
282 | else | 282 | else |
283 | WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); | 283 | WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); |
284 | } | 284 | } |
285 | 285 | ||
286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) | 286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 78bec1a58ed1..f8f8b3113ddd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = { | |||
1161 | .get_mclk = &rv6xx_dpm_get_mclk, | 1161 | .get_mclk = &rv6xx_dpm_get_mclk, |
1162 | .print_power_state = &rv6xx_dpm_print_power_state, | 1162 | .print_power_state = &rv6xx_dpm_print_power_state, |
1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, | 1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, |
1164 | .force_performance_level = &rv6xx_dpm_force_performance_level, | ||
1164 | }, | 1165 | }, |
1165 | .pflip = { | 1166 | .pflip = { |
1166 | .pre_page_flip = &rs600_pre_page_flip, | 1167 | .pre_page_flip = &rs600_pre_page_flip, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index ca1895709908..902479fa737f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -421,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev, | |||
421 | struct radeon_ps *ps); | 421 | struct radeon_ps *ps); |
422 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 422 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
423 | struct seq_file *m); | 423 | struct seq_file *m); |
424 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
425 | enum radeon_dpm_forced_level level); | ||
424 | /* rs780 dpm */ | 426 | /* rs780 dpm */ |
425 | int rs780_dpm_init(struct radeon_device *rdev); | 427 | int rs780_dpm_init(struct radeon_device *rdev); |
426 | int rs780_dpm_enable(struct radeon_device *rdev); | 428 | int rs780_dpm_enable(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 78edadc9e86b..68ce36056019 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
147 | enum radeon_combios_table_offset table) | 147 | enum radeon_combios_table_offset table) |
148 | { | 148 | { |
149 | struct radeon_device *rdev = dev->dev_private; | 149 | struct radeon_device *rdev = dev->dev_private; |
150 | int rev; | 150 | int rev, size; |
151 | uint16_t offset = 0, check_offset; | 151 | uint16_t offset = 0, check_offset; |
152 | 152 | ||
153 | if (!rdev->bios) | 153 | if (!rdev->bios) |
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
156 | switch (table) { | 156 | switch (table) { |
157 | /* absolute offset tables */ | 157 | /* absolute offset tables */ |
158 | case COMBIOS_ASIC_INIT_1_TABLE: | 158 | case COMBIOS_ASIC_INIT_1_TABLE: |
159 | check_offset = RBIOS16(rdev->bios_header_start + 0xc); | 159 | check_offset = 0xc; |
160 | if (check_offset) | ||
161 | offset = check_offset; | ||
162 | break; | 160 | break; |
163 | case COMBIOS_BIOS_SUPPORT_TABLE: | 161 | case COMBIOS_BIOS_SUPPORT_TABLE: |
164 | check_offset = RBIOS16(rdev->bios_header_start + 0x14); | 162 | check_offset = 0x14; |
165 | if (check_offset) | ||
166 | offset = check_offset; | ||
167 | break; | 163 | break; |
168 | case COMBIOS_DAC_PROGRAMMING_TABLE: | 164 | case COMBIOS_DAC_PROGRAMMING_TABLE: |
169 | check_offset = RBIOS16(rdev->bios_header_start + 0x2a); | 165 | check_offset = 0x2a; |
170 | if (check_offset) | ||
171 | offset = check_offset; | ||
172 | break; | 166 | break; |
173 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: | 167 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: |
174 | check_offset = RBIOS16(rdev->bios_header_start + 0x2c); | 168 | check_offset = 0x2c; |
175 | if (check_offset) | ||
176 | offset = check_offset; | ||
177 | break; | 169 | break; |
178 | case COMBIOS_CRTC_INFO_TABLE: | 170 | case COMBIOS_CRTC_INFO_TABLE: |
179 | check_offset = RBIOS16(rdev->bios_header_start + 0x2e); | 171 | check_offset = 0x2e; |
180 | if (check_offset) | ||
181 | offset = check_offset; | ||
182 | break; | 172 | break; |
183 | case COMBIOS_PLL_INFO_TABLE: | 173 | case COMBIOS_PLL_INFO_TABLE: |
184 | check_offset = RBIOS16(rdev->bios_header_start + 0x30); | 174 | check_offset = 0x30; |
185 | if (check_offset) | ||
186 | offset = check_offset; | ||
187 | break; | 175 | break; |
188 | case COMBIOS_TV_INFO_TABLE: | 176 | case COMBIOS_TV_INFO_TABLE: |
189 | check_offset = RBIOS16(rdev->bios_header_start + 0x32); | 177 | check_offset = 0x32; |
190 | if (check_offset) | ||
191 | offset = check_offset; | ||
192 | break; | 178 | break; |
193 | case COMBIOS_DFP_INFO_TABLE: | 179 | case COMBIOS_DFP_INFO_TABLE: |
194 | check_offset = RBIOS16(rdev->bios_header_start + 0x34); | 180 | check_offset = 0x34; |
195 | if (check_offset) | ||
196 | offset = check_offset; | ||
197 | break; | 181 | break; |
198 | case COMBIOS_HW_CONFIG_INFO_TABLE: | 182 | case COMBIOS_HW_CONFIG_INFO_TABLE: |
199 | check_offset = RBIOS16(rdev->bios_header_start + 0x36); | 183 | check_offset = 0x36; |
200 | if (check_offset) | ||
201 | offset = check_offset; | ||
202 | break; | 184 | break; |
203 | case COMBIOS_MULTIMEDIA_INFO_TABLE: | 185 | case COMBIOS_MULTIMEDIA_INFO_TABLE: |
204 | check_offset = RBIOS16(rdev->bios_header_start + 0x38); | 186 | check_offset = 0x38; |
205 | if (check_offset) | ||
206 | offset = check_offset; | ||
207 | break; | 187 | break; |
208 | case COMBIOS_TV_STD_PATCH_TABLE: | 188 | case COMBIOS_TV_STD_PATCH_TABLE: |
209 | check_offset = RBIOS16(rdev->bios_header_start + 0x3e); | 189 | check_offset = 0x3e; |
210 | if (check_offset) | ||
211 | offset = check_offset; | ||
212 | break; | 190 | break; |
213 | case COMBIOS_LCD_INFO_TABLE: | 191 | case COMBIOS_LCD_INFO_TABLE: |
214 | check_offset = RBIOS16(rdev->bios_header_start + 0x40); | 192 | check_offset = 0x40; |
215 | if (check_offset) | ||
216 | offset = check_offset; | ||
217 | break; | 193 | break; |
218 | case COMBIOS_MOBILE_INFO_TABLE: | 194 | case COMBIOS_MOBILE_INFO_TABLE: |
219 | check_offset = RBIOS16(rdev->bios_header_start + 0x42); | 195 | check_offset = 0x42; |
220 | if (check_offset) | ||
221 | offset = check_offset; | ||
222 | break; | 196 | break; |
223 | case COMBIOS_PLL_INIT_TABLE: | 197 | case COMBIOS_PLL_INIT_TABLE: |
224 | check_offset = RBIOS16(rdev->bios_header_start + 0x46); | 198 | check_offset = 0x46; |
225 | if (check_offset) | ||
226 | offset = check_offset; | ||
227 | break; | 199 | break; |
228 | case COMBIOS_MEM_CONFIG_TABLE: | 200 | case COMBIOS_MEM_CONFIG_TABLE: |
229 | check_offset = RBIOS16(rdev->bios_header_start + 0x48); | 201 | check_offset = 0x48; |
230 | if (check_offset) | ||
231 | offset = check_offset; | ||
232 | break; | 202 | break; |
233 | case COMBIOS_SAVE_MASK_TABLE: | 203 | case COMBIOS_SAVE_MASK_TABLE: |
234 | check_offset = RBIOS16(rdev->bios_header_start + 0x4a); | 204 | check_offset = 0x4a; |
235 | if (check_offset) | ||
236 | offset = check_offset; | ||
237 | break; | 205 | break; |
238 | case COMBIOS_HARDCODED_EDID_TABLE: | 206 | case COMBIOS_HARDCODED_EDID_TABLE: |
239 | check_offset = RBIOS16(rdev->bios_header_start + 0x4c); | 207 | check_offset = 0x4c; |
240 | if (check_offset) | ||
241 | offset = check_offset; | ||
242 | break; | 208 | break; |
243 | case COMBIOS_ASIC_INIT_2_TABLE: | 209 | case COMBIOS_ASIC_INIT_2_TABLE: |
244 | check_offset = RBIOS16(rdev->bios_header_start + 0x4e); | 210 | check_offset = 0x4e; |
245 | if (check_offset) | ||
246 | offset = check_offset; | ||
247 | break; | 211 | break; |
248 | case COMBIOS_CONNECTOR_INFO_TABLE: | 212 | case COMBIOS_CONNECTOR_INFO_TABLE: |
249 | check_offset = RBIOS16(rdev->bios_header_start + 0x50); | 213 | check_offset = 0x50; |
250 | if (check_offset) | ||
251 | offset = check_offset; | ||
252 | break; | 214 | break; |
253 | case COMBIOS_DYN_CLK_1_TABLE: | 215 | case COMBIOS_DYN_CLK_1_TABLE: |
254 | check_offset = RBIOS16(rdev->bios_header_start + 0x52); | 216 | check_offset = 0x52; |
255 | if (check_offset) | ||
256 | offset = check_offset; | ||
257 | break; | 217 | break; |
258 | case COMBIOS_RESERVED_MEM_TABLE: | 218 | case COMBIOS_RESERVED_MEM_TABLE: |
259 | check_offset = RBIOS16(rdev->bios_header_start + 0x54); | 219 | check_offset = 0x54; |
260 | if (check_offset) | ||
261 | offset = check_offset; | ||
262 | break; | 220 | break; |
263 | case COMBIOS_EXT_TMDS_INFO_TABLE: | 221 | case COMBIOS_EXT_TMDS_INFO_TABLE: |
264 | check_offset = RBIOS16(rdev->bios_header_start + 0x58); | 222 | check_offset = 0x58; |
265 | if (check_offset) | ||
266 | offset = check_offset; | ||
267 | break; | 223 | break; |
268 | case COMBIOS_MEM_CLK_INFO_TABLE: | 224 | case COMBIOS_MEM_CLK_INFO_TABLE: |
269 | check_offset = RBIOS16(rdev->bios_header_start + 0x5a); | 225 | check_offset = 0x5a; |
270 | if (check_offset) | ||
271 | offset = check_offset; | ||
272 | break; | 226 | break; |
273 | case COMBIOS_EXT_DAC_INFO_TABLE: | 227 | case COMBIOS_EXT_DAC_INFO_TABLE: |
274 | check_offset = RBIOS16(rdev->bios_header_start + 0x5c); | 228 | check_offset = 0x5c; |
275 | if (check_offset) | ||
276 | offset = check_offset; | ||
277 | break; | 229 | break; |
278 | case COMBIOS_MISC_INFO_TABLE: | 230 | case COMBIOS_MISC_INFO_TABLE: |
279 | check_offset = RBIOS16(rdev->bios_header_start + 0x5e); | 231 | check_offset = 0x5e; |
280 | if (check_offset) | ||
281 | offset = check_offset; | ||
282 | break; | 232 | break; |
283 | case COMBIOS_CRT_INFO_TABLE: | 233 | case COMBIOS_CRT_INFO_TABLE: |
284 | check_offset = RBIOS16(rdev->bios_header_start + 0x60); | 234 | check_offset = 0x60; |
285 | if (check_offset) | ||
286 | offset = check_offset; | ||
287 | break; | 235 | break; |
288 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: | 236 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: |
289 | check_offset = RBIOS16(rdev->bios_header_start + 0x62); | 237 | check_offset = 0x62; |
290 | if (check_offset) | ||
291 | offset = check_offset; | ||
292 | break; | 238 | break; |
293 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: | 239 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: |
294 | check_offset = RBIOS16(rdev->bios_header_start + 0x64); | 240 | check_offset = 0x64; |
295 | if (check_offset) | ||
296 | offset = check_offset; | ||
297 | break; | 241 | break; |
298 | case COMBIOS_FAN_SPEED_INFO_TABLE: | 242 | case COMBIOS_FAN_SPEED_INFO_TABLE: |
299 | check_offset = RBIOS16(rdev->bios_header_start + 0x66); | 243 | check_offset = 0x66; |
300 | if (check_offset) | ||
301 | offset = check_offset; | ||
302 | break; | 244 | break; |
303 | case COMBIOS_OVERDRIVE_INFO_TABLE: | 245 | case COMBIOS_OVERDRIVE_INFO_TABLE: |
304 | check_offset = RBIOS16(rdev->bios_header_start + 0x68); | 246 | check_offset = 0x68; |
305 | if (check_offset) | ||
306 | offset = check_offset; | ||
307 | break; | 247 | break; |
308 | case COMBIOS_OEM_INFO_TABLE: | 248 | case COMBIOS_OEM_INFO_TABLE: |
309 | check_offset = RBIOS16(rdev->bios_header_start + 0x6a); | 249 | check_offset = 0x6a; |
310 | if (check_offset) | ||
311 | offset = check_offset; | ||
312 | break; | 250 | break; |
313 | case COMBIOS_DYN_CLK_2_TABLE: | 251 | case COMBIOS_DYN_CLK_2_TABLE: |
314 | check_offset = RBIOS16(rdev->bios_header_start + 0x6c); | 252 | check_offset = 0x6c; |
315 | if (check_offset) | ||
316 | offset = check_offset; | ||
317 | break; | 253 | break; |
318 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: | 254 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: |
319 | check_offset = RBIOS16(rdev->bios_header_start + 0x6e); | 255 | check_offset = 0x6e; |
320 | if (check_offset) | ||
321 | offset = check_offset; | ||
322 | break; | 256 | break; |
323 | case COMBIOS_I2C_INFO_TABLE: | 257 | case COMBIOS_I2C_INFO_TABLE: |
324 | check_offset = RBIOS16(rdev->bios_header_start + 0x70); | 258 | check_offset = 0x70; |
325 | if (check_offset) | ||
326 | offset = check_offset; | ||
327 | break; | 259 | break; |
328 | /* relative offset tables */ | 260 | /* relative offset tables */ |
329 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ | 261 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ |
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
439 | } | 371 | } |
440 | break; | 372 | break; |
441 | default: | 373 | default: |
374 | check_offset = 0; | ||
442 | break; | 375 | break; |
443 | } | 376 | } |
444 | 377 | ||
445 | return offset; | 378 | size = RBIOS8(rdev->bios_header_start + 0x6); |
379 | /* check absolute offset tables */ | ||
380 | if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size) | ||
381 | offset = RBIOS16(rdev->bios_header_start + check_offset); | ||
446 | 382 | ||
383 | return offset; | ||
447 | } | 384 | } |
448 | 385 | ||
449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | 386 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) |
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
965 | dac = RBIOS8(dac_info + 0x3) & 0xf; | 902 | dac = RBIOS8(dac_info + 0x3) & 0xf; |
966 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); | 903 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); |
967 | } | 904 | } |
968 | /* if the values are all zeros, use the table */ | 905 | /* if the values are zeros, use the table */ |
969 | if (p_dac->ps2_pdac_adj) | 906 | if ((dac == 0) || (bg == 0)) |
907 | found = 0; | ||
908 | else | ||
970 | found = 1; | 909 | found = 1; |
971 | } | 910 | } |
972 | 911 | ||
973 | /* quirks */ | 912 | /* quirks */ |
913 | /* Radeon 7000 (RV100) */ | ||
914 | if (((dev->pdev->device == 0x5159) && | ||
915 | (dev->pdev->subsystem_vendor == 0x174B) && | ||
916 | (dev->pdev->subsystem_device == 0x7c28)) || | ||
974 | /* Radeon 9100 (R200) */ | 917 | /* Radeon 9100 (R200) */ |
975 | if ((dev->pdev->device == 0x514D) && | 918 | ((dev->pdev->device == 0x514D) && |
976 | (dev->pdev->subsystem_vendor == 0x174B) && | 919 | (dev->pdev->subsystem_vendor == 0x174B) && |
977 | (dev->pdev->subsystem_device == 0x7149)) { | 920 | (dev->pdev->subsystem_device == 0x7149))) { |
978 | /* vbios value is bad, use the default */ | 921 | /* vbios value is bad, use the default */ |
979 | found = 0; | 922 | found = 0; |
980 | } | 923 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d9d31a383276..6a51d943ccf4 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -466,7 +466,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) | |||
466 | size += rdev->vm_manager.max_pfn * 8; | 466 | size += rdev->vm_manager.max_pfn * 8; |
467 | size *= 2; | 467 | size *= 2; |
468 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, | 468 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
469 | RADEON_VM_PTB_ALIGN(size), | 469 | RADEON_GPU_PAGE_ALIGN(size), |
470 | RADEON_VM_PTB_ALIGN_SIZE, | 470 | RADEON_VM_PTB_ALIGN_SIZE, |
471 | RADEON_GEM_DOMAIN_VRAM); | 471 | RADEON_GEM_DOMAIN_VRAM); |
472 | if (r) { | 472 | if (r) { |
@@ -621,7 +621,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) | |||
621 | } | 621 | } |
622 | 622 | ||
623 | retry: | 623 | retry: |
624 | pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev)); | 624 | pd_size = radeon_vm_directory_size(rdev); |
625 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | 625 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
626 | &vm->page_directory, pd_size, | 626 | &vm->page_directory, pd_size, |
627 | RADEON_VM_PTB_ALIGN_SIZE, false); | 627 | RADEON_VM_PTB_ALIGN_SIZE, false); |
@@ -953,8 +953,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev, | |||
953 | retry: | 953 | retry: |
954 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | 954 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
955 | &vm->page_tables[pt_idx], | 955 | &vm->page_tables[pt_idx], |
956 | RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8), | 956 | RADEON_VM_PTE_COUNT * 8, |
957 | RADEON_VM_PTB_ALIGN_SIZE, false); | 957 | RADEON_GPU_PAGE_SIZE, false); |
958 | 958 | ||
959 | if (r == -ENOMEM) { | 959 | if (r == -ENOMEM) { |
960 | r = radeon_vm_evict(rdev, vm); | 960 | r = radeon_vm_evict(rdev, vm); |
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 65e33f387341..363018c60412 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev) | |||
819 | POWERMODE1(calculate_memory_refresh_rate(rdev, | 819 | POWERMODE1(calculate_memory_refresh_rate(rdev, |
820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | |
821 | POWERMODE2(calculate_memory_refresh_rate(rdev, | 821 | POWERMODE2(calculate_memory_refresh_rate(rdev, |
822 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 822 | pi->hw.sclks[R600_POWER_LEVEL_HIGH])) | |
823 | POWERMODE3(calculate_memory_refresh_rate(rdev, | 823 | POWERMODE3(calculate_memory_refresh_rate(rdev, |
824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); | 824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); |
825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); | 825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); |
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev) | |||
1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); | 1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
1183 | 1183 | ||
1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); | 1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
1185 | if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1185 | if (rdev->pm.dpm.new_active_crtcs & 1) { |
1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1188 | } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1188 | } else if (rdev->pm.dpm.new_active_crtcs & 2) { |
1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
1191 | } else { | 1191 | } else { |
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; | 1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; |
1671 | int ret; | 1671 | int ret; |
1672 | 1672 | ||
1673 | pi->restricted_levels = 0; | ||
1674 | |||
1673 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | 1675 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
1674 | 1676 | ||
1675 | rv6xx_clear_vc(rdev); | 1677 | rv6xx_clear_vc(rdev); |
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
1756 | 1758 | ||
1757 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); | 1759 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
1758 | 1760 | ||
1761 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
1762 | |||
1759 | return 0; | 1763 | return 0; |
1760 | } | 1764 | } |
1761 | 1765 | ||
@@ -2085,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low) | |||
2085 | else | 2089 | else |
2086 | return requested_state->high.mclk; | 2090 | return requested_state->high.mclk; |
2087 | } | 2091 | } |
2092 | |||
2093 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
2094 | enum radeon_dpm_forced_level level) | ||
2095 | { | ||
2096 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); | ||
2097 | |||
2098 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | ||
2099 | pi->restricted_levels = 3; | ||
2100 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { | ||
2101 | pi->restricted_levels = 2; | ||
2102 | } else { | ||
2103 | pi->restricted_levels = 0; | ||
2104 | } | ||
2105 | |||
2106 | rv6xx_clear_vc(rdev); | ||
2107 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true); | ||
2108 | r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF); | ||
2109 | r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW); | ||
2110 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false); | ||
2111 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false); | ||
2112 | rv6xx_enable_medium(rdev); | ||
2113 | rv6xx_enable_high(rdev); | ||
2114 | if (pi->restricted_levels == 3) | ||
2115 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false); | ||
2116 | rv6xx_program_vc(rdev); | ||
2117 | rv6xx_program_at(rdev); | ||
2118 | |||
2119 | rdev->pm.dpm.forced_level = level; | ||
2120 | |||
2121 | return 0; | ||
2122 | } | ||