diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_dp_mst_topology.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_mm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_uncore.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fence.c | 68 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 78 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 14 |
11 files changed, 156 insertions, 111 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b6b07ff720b..f6d04c7b5115 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -43,9 +43,10 @@ | |||
43 | #include "drm_crtc_internal.h" | 43 | #include "drm_crtc_internal.h" |
44 | #include "drm_internal.h" | 44 | #include "drm_internal.h" |
45 | 45 | ||
46 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 46 | static struct drm_framebuffer * |
47 | struct drm_mode_fb_cmd2 *r, | 47 | internal_framebuffer_create(struct drm_device *dev, |
48 | struct drm_file *file_priv); | 48 | struct drm_mode_fb_cmd2 *r, |
49 | struct drm_file *file_priv); | ||
49 | 50 | ||
50 | /* Avoid boilerplate. I'm tired of typing. */ | 51 | /* Avoid boilerplate. I'm tired of typing. */ |
51 | #define DRM_ENUM_NAME_FN(fnname, list) \ | 52 | #define DRM_ENUM_NAME_FN(fnname, list) \ |
@@ -2908,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, | |||
2908 | */ | 2909 | */ |
2909 | if (req->flags & DRM_MODE_CURSOR_BO) { | 2910 | if (req->flags & DRM_MODE_CURSOR_BO) { |
2910 | if (req->handle) { | 2911 | if (req->handle) { |
2911 | fb = add_framebuffer_internal(dev, &fbreq, file_priv); | 2912 | fb = internal_framebuffer_create(dev, &fbreq, file_priv); |
2912 | if (IS_ERR(fb)) { | 2913 | if (IS_ERR(fb)) { |
2913 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); | 2914 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); |
2914 | return PTR_ERR(fb); | 2915 | return PTR_ERR(fb); |
2915 | } | 2916 | } |
2916 | |||
2917 | drm_framebuffer_reference(fb); | ||
2918 | } else { | 2917 | } else { |
2919 | fb = NULL; | 2918 | fb = NULL; |
2920 | } | 2919 | } |
@@ -3267,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) | |||
3267 | return 0; | 3266 | return 0; |
3268 | } | 3267 | } |
3269 | 3268 | ||
3270 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 3269 | static struct drm_framebuffer * |
3271 | struct drm_mode_fb_cmd2 *r, | 3270 | internal_framebuffer_create(struct drm_device *dev, |
3272 | struct drm_file *file_priv) | 3271 | struct drm_mode_fb_cmd2 *r, |
3272 | struct drm_file *file_priv) | ||
3273 | { | 3273 | { |
3274 | struct drm_mode_config *config = &dev->mode_config; | 3274 | struct drm_mode_config *config = &dev->mode_config; |
3275 | struct drm_framebuffer *fb; | 3275 | struct drm_framebuffer *fb; |
@@ -3301,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3301 | return fb; | 3301 | return fb; |
3302 | } | 3302 | } |
3303 | 3303 | ||
3304 | mutex_lock(&file_priv->fbs_lock); | ||
3305 | r->fb_id = fb->base.id; | ||
3306 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3307 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3308 | mutex_unlock(&file_priv->fbs_lock); | ||
3309 | |||
3310 | return fb; | 3304 | return fb; |
3311 | } | 3305 | } |
3312 | 3306 | ||
@@ -3328,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3328 | int drm_mode_addfb2(struct drm_device *dev, | 3322 | int drm_mode_addfb2(struct drm_device *dev, |
3329 | void *data, struct drm_file *file_priv) | 3323 | void *data, struct drm_file *file_priv) |
3330 | { | 3324 | { |
3325 | struct drm_mode_fb_cmd2 *r = data; | ||
3331 | struct drm_framebuffer *fb; | 3326 | struct drm_framebuffer *fb; |
3332 | 3327 | ||
3333 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3328 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
3334 | return -EINVAL; | 3329 | return -EINVAL; |
3335 | 3330 | ||
3336 | fb = add_framebuffer_internal(dev, data, file_priv); | 3331 | fb = internal_framebuffer_create(dev, r, file_priv); |
3337 | if (IS_ERR(fb)) | 3332 | if (IS_ERR(fb)) |
3338 | return PTR_ERR(fb); | 3333 | return PTR_ERR(fb); |
3339 | 3334 | ||
3335 | /* Transfer ownership to the filp for reaping on close */ | ||
3336 | |||
3337 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3338 | mutex_lock(&file_priv->fbs_lock); | ||
3339 | r->fb_id = fb->base.id; | ||
3340 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3341 | mutex_unlock(&file_priv->fbs_lock); | ||
3342 | |||
3340 | return 0; | 3343 | return 0; |
3341 | } | 3344 | } |
3342 | 3345 | ||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9a5b68717ec8..379ab4555756 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, | |||
733 | struct drm_dp_sideband_msg_tx *txmsg) | 733 | struct drm_dp_sideband_msg_tx *txmsg) |
734 | { | 734 | { |
735 | bool ret; | 735 | bool ret; |
736 | mutex_lock(&mgr->qlock); | 736 | |
737 | /* | ||
738 | * All updates to txmsg->state are protected by mgr->qlock, and the two | ||
739 | * cases we check here are terminal states. For those the barriers | ||
740 | * provided by the wake_up/wait_event pair are enough. | ||
741 | */ | ||
737 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || | 742 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || |
738 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); | 743 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); |
739 | mutex_unlock(&mgr->qlock); | ||
740 | return ret; | 744 | return ret; |
741 | } | 745 | } |
742 | 746 | ||
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, | |||
1363 | return 0; | 1367 | return 0; |
1364 | } | 1368 | } |
1365 | 1369 | ||
1366 | /* must be called holding qlock */ | ||
1367 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) | 1370 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) |
1368 | { | 1371 | { |
1369 | struct drm_dp_sideband_msg_tx *txmsg; | 1372 | struct drm_dp_sideband_msg_tx *txmsg; |
1370 | int ret; | 1373 | int ret; |
1371 | 1374 | ||
1375 | WARN_ON(!mutex_is_locked(&mgr->qlock)); | ||
1376 | |||
1372 | /* construct a chunk from the first msg in the tx_msg queue */ | 1377 | /* construct a chunk from the first msg in the tx_msg queue */ |
1373 | if (list_empty(&mgr->tx_msg_downq)) { | 1378 | if (list_empty(&mgr->tx_msg_downq)) { |
1374 | mgr->tx_down_in_progress = false; | 1379 | mgr->tx_down_in_progress = false; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7fc6f8bd4821..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) | |||
403 | unsigned rem; | 403 | unsigned rem; |
404 | 404 | ||
405 | rem = do_div(tmp, alignment); | 405 | rem = do_div(tmp, alignment); |
406 | if (tmp) | 406 | if (rem) |
407 | start += alignment - rem; | 407 | start += alignment - rem; |
408 | } | 408 | } |
409 | 409 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5daad5f75fb..5b205863b659 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2936,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2936 | req = obj->last_read_req; | 2936 | req = obj->last_read_req; |
2937 | 2937 | ||
2938 | /* Do this after OLR check to make sure we make forward progress polling | 2938 | /* Do this after OLR check to make sure we make forward progress polling |
2939 | * on this IOCTL with a timeout <=0 (like busy ioctl) | 2939 | * on this IOCTL with a timeout == 0 (like busy ioctl) |
2940 | */ | 2940 | */ |
2941 | if (args->timeout_ns <= 0) { | 2941 | if (args->timeout_ns == 0) { |
2942 | ret = -ETIME; | 2942 | ret = -ETIME; |
2943 | goto out; | 2943 | goto out; |
2944 | } | 2944 | } |
@@ -2948,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2948 | i915_gem_request_reference(req); | 2948 | i915_gem_request_reference(req); |
2949 | mutex_unlock(&dev->struct_mutex); | 2949 | mutex_unlock(&dev->struct_mutex); |
2950 | 2950 | ||
2951 | ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, | 2951 | ret = __i915_wait_request(req, reset_counter, true, |
2952 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, | ||
2952 | file->driver_priv); | 2953 | file->driver_priv); |
2953 | mutex_lock(&dev->struct_mutex); | 2954 | mutex_lock(&dev->struct_mutex); |
2954 | i915_gem_request_unreference(req); | 2955 | i915_gem_request_unreference(req); |
@@ -4792,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4792 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 4793 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
4793 | return -EIO; | 4794 | return -EIO; |
4794 | 4795 | ||
4796 | /* Double layer security blanket, see i915_gem_init() */ | ||
4797 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4798 | |||
4795 | if (dev_priv->ellc_size) | 4799 | if (dev_priv->ellc_size) |
4796 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 4800 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4797 | 4801 | ||
@@ -4824,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4824 | for_each_ring(ring, dev_priv, i) { | 4828 | for_each_ring(ring, dev_priv, i) { |
4825 | ret = ring->init_hw(ring); | 4829 | ret = ring->init_hw(ring); |
4826 | if (ret) | 4830 | if (ret) |
4827 | return ret; | 4831 | goto out; |
4828 | } | 4832 | } |
4829 | 4833 | ||
4830 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4834 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
@@ -4841,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4841 | DRM_ERROR("Context enable failed %d\n", ret); | 4845 | DRM_ERROR("Context enable failed %d\n", ret); |
4842 | i915_gem_cleanup_ringbuffer(dev); | 4846 | i915_gem_cleanup_ringbuffer(dev); |
4843 | 4847 | ||
4844 | return ret; | 4848 | goto out; |
4845 | } | 4849 | } |
4846 | 4850 | ||
4851 | out: | ||
4852 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4847 | return ret; | 4853 | return ret; |
4848 | } | 4854 | } |
4849 | 4855 | ||
@@ -4877,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev) | |||
4877 | dev_priv->gt.stop_ring = intel_logical_ring_stop; | 4883 | dev_priv->gt.stop_ring = intel_logical_ring_stop; |
4878 | } | 4884 | } |
4879 | 4885 | ||
4886 | /* This is just a security blanket to placate dragons. | ||
4887 | * On some systems, we very sporadically observe that the first TLBs | ||
4888 | * used by the CS may be stale, despite us poking the TLB reset. If | ||
4889 | * we hold the forcewake during initialisation these problems | ||
4890 | * just magically go away. | ||
4891 | */ | ||
4892 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4893 | |||
4880 | ret = i915_gem_init_userptr(dev); | 4894 | ret = i915_gem_init_userptr(dev); |
4881 | if (ret) | 4895 | if (ret) |
4882 | goto out_unlock; | 4896 | goto out_unlock; |
@@ -4903,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev) | |||
4903 | } | 4917 | } |
4904 | 4918 | ||
4905 | out_unlock: | 4919 | out_unlock: |
4920 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4906 | mutex_unlock(&dev->struct_mutex); | 4921 | mutex_unlock(&dev->struct_mutex); |
4907 | 4922 | ||
4908 | return ret; | 4923 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e730789b53b7..9943c20a741d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -9716,7 +9716,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) | |||
9716 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 9716 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
9717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9718 | 9718 | ||
9719 | WARN_ON(!in_irq()); | 9719 | WARN_ON(!in_interrupt()); |
9720 | 9720 | ||
9721 | if (crtc == NULL) | 9721 | if (crtc == NULL) |
9722 | return; | 9722 | return; |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c47a3baa53d5..4e8fb891d4ea 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) | |||
1048 | 1048 | ||
1049 | /* We need to init first for ECOBUS access and then | 1049 | /* We need to init first for ECOBUS access and then |
1050 | * determine later if we want to reinit, in case of MT access is | 1050 | * determine later if we want to reinit, in case of MT access is |
1051 | * not working | 1051 | * not working. In this stage we don't know which flavour this |
1052 | * ivb is, so it is better to reset also the gen6 fw registers | ||
1053 | * before the ecobus check. | ||
1052 | */ | 1054 | */ |
1055 | |||
1056 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | ||
1057 | __raw_posting_read(dev_priv, ECOBUS); | ||
1058 | |||
1053 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | 1059 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1054 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); | 1060 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
1055 | 1061 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) | |||
1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | 1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | struct radeon_wait_cb { | ||
1034 | struct fence_cb base; | ||
1035 | struct task_struct *task; | ||
1036 | }; | ||
1037 | |||
1038 | static void | ||
1039 | radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
1040 | { | ||
1041 | struct radeon_wait_cb *wait = | ||
1042 | container_of(cb, struct radeon_wait_cb, base); | ||
1043 | |||
1044 | wake_up_process(wait->task); | ||
1045 | } | ||
1046 | |||
1033 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, | 1047 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, |
1034 | signed long t) | 1048 | signed long t) |
1035 | { | 1049 | { |
1036 | struct radeon_fence *fence = to_radeon_fence(f); | 1050 | struct radeon_fence *fence = to_radeon_fence(f); |
1037 | struct radeon_device *rdev = fence->rdev; | 1051 | struct radeon_device *rdev = fence->rdev; |
1038 | bool signaled; | 1052 | struct radeon_wait_cb cb; |
1039 | 1053 | ||
1040 | fence_enable_sw_signaling(&fence->base); | 1054 | cb.task = current; |
1041 | 1055 | ||
1042 | /* | 1056 | if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
1043 | * This function has to return -EDEADLK, but cannot hold | 1057 | return t; |
1044 | * exclusive_lock during the wait because some callers | 1058 | |
1045 | * may already hold it. This means checking needs_reset without | 1059 | while (t > 0) { |
1046 | * lock, and not fiddling with any gpu internals. | 1060 | if (intr) |
1047 | * | 1061 | set_current_state(TASK_INTERRUPTIBLE); |
1048 | * The callback installed with fence_enable_sw_signaling will | 1062 | else |
1049 | * run before our wait_event_*timeout call, so we will see | 1063 | set_current_state(TASK_UNINTERRUPTIBLE); |
1050 | * both the signaled fence and the changes to needs_reset. | 1064 | |
1051 | */ | 1065 | /* |
1066 | * radeon_test_signaled must be called after | ||
1067 | * set_current_state to prevent a race with wake_up_process | ||
1068 | */ | ||
1069 | if (radeon_test_signaled(fence)) | ||
1070 | break; | ||
1071 | |||
1072 | if (rdev->needs_reset) { | ||
1073 | t = -EDEADLK; | ||
1074 | break; | ||
1075 | } | ||
1076 | |||
1077 | t = schedule_timeout(t); | ||
1078 | |||
1079 | if (t > 0 && intr && signal_pending(current)) | ||
1080 | t = -ERESTARTSYS; | ||
1081 | } | ||
1082 | |||
1083 | __set_current_state(TASK_RUNNING); | ||
1084 | fence_remove_callback(f, &cb.base); | ||
1052 | 1085 | ||
1053 | if (intr) | ||
1054 | t = wait_event_interruptible_timeout(rdev->fence_queue, | ||
1055 | ((signaled = radeon_test_signaled(fence)) || | ||
1056 | rdev->needs_reset), t); | ||
1057 | else | ||
1058 | t = wait_event_timeout(rdev->fence_queue, | ||
1059 | ((signaled = radeon_test_signaled(fence)) || | ||
1060 | rdev->needs_reset), t); | ||
1061 | |||
1062 | if (t > 0 && !signaled) | ||
1063 | return -EDEADLK; | ||
1064 | return t; | 1086 | return t; |
1065 | } | 1087 | } |
1066 | 1088 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e088e5558da0..a7fb2735d4a9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7130 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); | 7130 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
7131 | 7131 | ||
7132 | if (!vclk || !dclk) { | 7132 | if (!vclk || !dclk) { |
7133 | /* keep the Bypass mode, put PLL to sleep */ | 7133 | /* keep the Bypass mode */ |
7134 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7135 | return 0; | 7134 | return 0; |
7136 | } | 7135 | } |
7137 | 7136 | ||
@@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7147 | /* set VCO_MODE to 1 */ | 7146 | /* set VCO_MODE to 1 */ |
7148 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); | 7147 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
7149 | 7148 | ||
7150 | /* toggle UPLL_SLEEP to 1 then back to 0 */ | 7149 | /* disable sleep mode */ |
7151 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7152 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); | 7150 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); |
7153 | 7151 | ||
7154 | /* deassert UPLL_RESET */ | 7152 | /* deassert UPLL_RESET */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
725 | goto out_err1; | 725 | goto out_err1; |
726 | } | 726 | } |
727 | 727 | ||
728 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
729 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
730 | if (unlikely(ret != 0)) { | ||
731 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
732 | goto out_err2; | ||
733 | } | ||
734 | |||
735 | dev_priv->has_gmr = true; | ||
736 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
737 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
738 | VMW_PL_GMR) != 0) { | ||
739 | DRM_INFO("No GMR memory available. " | ||
740 | "Graphics memory resources are very limited.\n"); | ||
741 | dev_priv->has_gmr = false; | ||
742 | } | ||
743 | |||
744 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
745 | dev_priv->has_mob = true; | ||
746 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
747 | VMW_PL_MOB) != 0) { | ||
748 | DRM_INFO("No MOB memory available. " | ||
749 | "3D will be disabled.\n"); | ||
750 | dev_priv->has_mob = false; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 728 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
755 | dev_priv->mmio_size); | 729 | dev_priv->mmio_size); |
756 | 730 | ||
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
813 | goto out_no_fman; | 787 | goto out_no_fman; |
814 | } | 788 | } |
815 | 789 | ||
790 | |||
791 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
792 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
793 | if (unlikely(ret != 0)) { | ||
794 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
795 | goto out_no_vram; | ||
796 | } | ||
797 | |||
798 | dev_priv->has_gmr = true; | ||
799 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
800 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
801 | VMW_PL_GMR) != 0) { | ||
802 | DRM_INFO("No GMR memory available. " | ||
803 | "Graphics memory resources are very limited.\n"); | ||
804 | dev_priv->has_gmr = false; | ||
805 | } | ||
806 | |||
807 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
808 | dev_priv->has_mob = true; | ||
809 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
810 | VMW_PL_MOB) != 0) { | ||
811 | DRM_INFO("No MOB memory available. " | ||
812 | "3D will be disabled.\n"); | ||
813 | dev_priv->has_mob = false; | ||
814 | } | ||
815 | } | ||
816 | |||
816 | vmw_kms_save_vga(dev_priv); | 817 | vmw_kms_save_vga(dev_priv); |
817 | 818 | ||
818 | /* Start kms and overlay systems, needs fifo. */ | 819 | /* Start kms and overlay systems, needs fifo. */ |
@@ -838,6 +839,12 @@ out_no_fifo: | |||
838 | vmw_kms_close(dev_priv); | 839 | vmw_kms_close(dev_priv); |
839 | out_no_kms: | 840 | out_no_kms: |
840 | vmw_kms_restore_vga(dev_priv); | 841 | vmw_kms_restore_vga(dev_priv); |
842 | if (dev_priv->has_mob) | ||
843 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
844 | if (dev_priv->has_gmr) | ||
845 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
846 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
847 | out_no_vram: | ||
841 | vmw_fence_manager_takedown(dev_priv->fman); | 848 | vmw_fence_manager_takedown(dev_priv->fman); |
842 | out_no_fman: | 849 | out_no_fman: |
843 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 850 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
@@ -853,12 +860,6 @@ out_err4: | |||
853 | iounmap(dev_priv->mmio_virt); | 860 | iounmap(dev_priv->mmio_virt); |
854 | out_err3: | 861 | out_err3: |
855 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 862 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
856 | if (dev_priv->has_mob) | ||
857 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
858 | if (dev_priv->has_gmr) | ||
859 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
860 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
861 | out_err2: | ||
862 | (void)ttm_bo_device_release(&dev_priv->bdev); | 863 | (void)ttm_bo_device_release(&dev_priv->bdev); |
863 | out_err1: | 864 | out_err1: |
864 | vmw_ttm_global_release(dev_priv); | 865 | vmw_ttm_global_release(dev_priv); |
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
887 | } | 888 | } |
888 | vmw_kms_close(dev_priv); | 889 | vmw_kms_close(dev_priv); |
889 | vmw_overlay_close(dev_priv); | 890 | vmw_overlay_close(dev_priv); |
891 | |||
892 | if (dev_priv->has_mob) | ||
893 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
894 | if (dev_priv->has_gmr) | ||
895 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
896 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
897 | |||
890 | vmw_fence_manager_takedown(dev_priv->fman); | 898 | vmw_fence_manager_takedown(dev_priv->fman); |
891 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 899 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
892 | drm_irq_uninstall(dev_priv->dev); | 900 | drm_irq_uninstall(dev_priv->dev); |
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
898 | ttm_object_device_release(&dev_priv->tdev); | 906 | ttm_object_device_release(&dev_priv->tdev); |
899 | iounmap(dev_priv->mmio_virt); | 907 | iounmap(dev_priv->mmio_virt); |
900 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 908 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
901 | if (dev_priv->has_mob) | ||
902 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
903 | if (dev_priv->has_gmr) | ||
904 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
905 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
906 | (void)ttm_bo_device_release(&dev_priv->bdev); | 909 | (void)ttm_bo_device_release(&dev_priv->bdev); |
907 | vmw_ttm_global_release(dev_priv); | 910 | vmw_ttm_global_release(dev_priv); |
908 | 911 | ||
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) | |||
1235 | { | 1238 | { |
1236 | struct drm_device *dev = pci_get_drvdata(pdev); | 1239 | struct drm_device *dev = pci_get_drvdata(pdev); |
1237 | 1240 | ||
1241 | pci_disable_device(pdev); | ||
1238 | drm_put_dev(dev); | 1242 | drm_put_dev(dev); |
1239 | } | 1243 | } |
1240 | 1244 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
891 | if (unlikely(ret != 0)) { | 891 | if (unlikely(ret != 0)) { |
892 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 892 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
893 | return -EINVAL; | 893 | ret = -EINVAL; |
894 | goto out_no_reloc; | ||
894 | } | 895 | } |
895 | bo = &vmw_bo->base; | 896 | bo = &vmw_bo->base; |
896 | 897 | ||
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
914 | 915 | ||
915 | out_no_reloc: | 916 | out_no_reloc: |
916 | vmw_dmabuf_unreference(&vmw_bo); | 917 | vmw_dmabuf_unreference(&vmw_bo); |
917 | vmw_bo_p = NULL; | 918 | *vmw_bo_p = NULL; |
918 | return ret; | 919 | return ret; |
919 | } | 920 | } |
920 | 921 | ||
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
951 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 952 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
952 | if (unlikely(ret != 0)) { | 953 | if (unlikely(ret != 0)) { |
953 | DRM_ERROR("Could not find or use GMR region.\n"); | 954 | DRM_ERROR("Could not find or use GMR region.\n"); |
954 | return -EINVAL; | 955 | ret = -EINVAL; |
956 | goto out_no_reloc; | ||
955 | } | 957 | } |
956 | bo = &vmw_bo->base; | 958 | bo = &vmw_bo->base; |
957 | 959 | ||
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
974 | 976 | ||
975 | out_no_reloc: | 977 | out_no_reloc: |
976 | vmw_dmabuf_unreference(&vmw_bo); | 978 | vmw_dmabuf_unreference(&vmw_bo); |
977 | vmw_bo_p = NULL; | 979 | *vmw_bo_p = NULL; |
978 | return ret; | 980 | return ret; |
979 | } | 981 | } |
980 | 982 | ||
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
2780 | NULL, arg->command_size, arg->throttle_us, | 2782 | NULL, arg->command_size, arg->throttle_us, |
2781 | (void __user *)(unsigned long)arg->fence_rep, | 2783 | (void __user *)(unsigned long)arg->fence_rep, |
2782 | NULL); | 2784 | NULL); |
2783 | 2785 | ttm_read_unlock(&dev_priv->reservation_sem); | |
2784 | if (unlikely(ret != 0)) | 2786 | if (unlikely(ret != 0)) |
2785 | goto out_unlock; | 2787 | return ret; |
2786 | 2788 | ||
2787 | vmw_kms_cursor_post_execbuf(dev_priv); | 2789 | vmw_kms_cursor_post_execbuf(dev_priv); |
2788 | 2790 | ||
2789 | out_unlock: | 2791 | return 0; |
2790 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2791 | return ret; | ||
2792 | } | 2792 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2033 | int i; | 2033 | int i; |
2034 | struct drm_mode_config *mode_config = &dev->mode_config; | 2034 | struct drm_mode_config *mode_config = &dev->mode_config; |
2035 | 2035 | ||
2036 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | ||
2037 | if (unlikely(ret != 0)) | ||
2038 | return ret; | ||
2039 | |||
2040 | if (!arg->num_outputs) { | 2036 | if (!arg->num_outputs) { |
2041 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | 2037 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; |
2042 | vmw_du_update_layout(dev_priv, 1, &def_rect); | 2038 | vmw_du_update_layout(dev_priv, 1, &def_rect); |
2043 | goto out_unlock; | 2039 | return 0; |
2044 | } | 2040 | } |
2045 | 2041 | ||
2046 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | 2042 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); |
2047 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), | 2043 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), |
2048 | GFP_KERNEL); | 2044 | GFP_KERNEL); |
2049 | if (unlikely(!rects)) { | 2045 | if (unlikely(!rects)) |
2050 | ret = -ENOMEM; | 2046 | return -ENOMEM; |
2051 | goto out_unlock; | ||
2052 | } | ||
2053 | 2047 | ||
2054 | user_rects = (void __user *)(unsigned long)arg->rects; | 2048 | user_rects = (void __user *)(unsigned long)arg->rects; |
2055 | ret = copy_from_user(rects, user_rects, rects_size); | 2049 | ret = copy_from_user(rects, user_rects, rects_size); |
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2074 | 2068 | ||
2075 | out_free: | 2069 | out_free: |
2076 | kfree(rects); | 2070 | kfree(rects); |
2077 | out_unlock: | ||
2078 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2079 | return ret; | 2071 | return ret; |
2080 | } | 2072 | } |