diff options
author | Dave Airlie <airlied@redhat.com> | 2015-12-23 17:08:47 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-12-23 17:08:47 -0500 |
commit | 20f8e032e6dc7053ab803f488e2a8839cd2f69a6 (patch) | |
tree | 406b662934bffa205d90e9435bf96af7dc1a0cb0 /drivers/gpu/drm | |
parent | ade1ba7346070709856d7e38f8d1a77b7aa710aa (diff) | |
parent | 5b726e06d6e8309e5c9ef4109a32caf27c71dfc8 (diff) |
Backmerge drm-fixes merge into Linus's tree into drm-next.
This merges '5b726e06d6e8309e5c9ef4109a32caf27c71dfc8' into drm-next
Just to resolve some merges to make Daniel's life easier.
Signed-off-by: DAve Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
23 files changed, 201 insertions, 143 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index fca4ef78589c..003959f99251 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -1266,7 +1266,8 @@ struct amdgpu_cs_parser { | |||
1266 | struct ww_acquire_ctx ticket; | 1266 | struct ww_acquire_ctx ticket; |
1267 | 1267 | ||
1268 | /* user fence */ | 1268 | /* user fence */ |
1269 | struct amdgpu_user_fence uf; | 1269 | struct amdgpu_user_fence uf; |
1270 | struct amdgpu_bo_list_entry uf_entry; | ||
1270 | }; | 1271 | }; |
1271 | 1272 | ||
1272 | struct amdgpu_job { | 1273 | struct amdgpu_job { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index fa0e3276e8da..ce0254d4dcd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | ||
131 | struct drm_amdgpu_cs_chunk_fence *fence_data) | ||
132 | { | ||
133 | struct drm_gem_object *gobj; | ||
134 | uint32_t handle; | ||
135 | |||
136 | handle = fence_data->handle; | ||
137 | gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, | ||
138 | fence_data->handle); | ||
139 | if (gobj == NULL) | ||
140 | return -EINVAL; | ||
141 | |||
142 | p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | ||
143 | p->uf.offset = fence_data->offset; | ||
144 | |||
145 | if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { | ||
146 | drm_gem_object_unreference_unlocked(gobj); | ||
147 | return -EINVAL; | ||
148 | } | ||
149 | |||
150 | p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); | ||
151 | p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
152 | p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
153 | p->uf_entry.priority = 0; | ||
154 | p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; | ||
155 | p->uf_entry.tv.shared = true; | ||
156 | |||
157 | drm_gem_object_unreference_unlocked(gobj); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
130 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | 161 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
131 | { | 162 | { |
132 | union drm_amdgpu_cs *cs = data; | 163 | union drm_amdgpu_cs *cs = data; |
@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
207 | 238 | ||
208 | case AMDGPU_CHUNK_ID_FENCE: | 239 | case AMDGPU_CHUNK_ID_FENCE: |
209 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); | 240 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
210 | if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { | 241 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
211 | uint32_t handle; | ||
212 | struct drm_gem_object *gobj; | ||
213 | struct drm_amdgpu_cs_chunk_fence *fence_data; | ||
214 | |||
215 | fence_data = (void *)p->chunks[i].kdata; | ||
216 | handle = fence_data->handle; | ||
217 | gobj = drm_gem_object_lookup(p->adev->ddev, | ||
218 | p->filp, handle); | ||
219 | if (gobj == NULL) { | ||
220 | ret = -EINVAL; | ||
221 | goto free_partial_kdata; | ||
222 | } | ||
223 | |||
224 | p->uf.bo = gem_to_amdgpu_bo(gobj); | ||
225 | amdgpu_bo_ref(p->uf.bo); | ||
226 | drm_gem_object_unreference_unlocked(gobj); | ||
227 | p->uf.offset = fence_data->offset; | ||
228 | } else { | ||
229 | ret = -EINVAL; | 242 | ret = -EINVAL; |
230 | goto free_partial_kdata; | 243 | goto free_partial_kdata; |
231 | } | 244 | } |
245 | |||
246 | ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); | ||
247 | if (ret) | ||
248 | goto free_partial_kdata; | ||
249 | |||
232 | break; | 250 | break; |
233 | 251 | ||
234 | case AMDGPU_CHUNK_ID_DEPENDENCIES: | 252 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |||
391 | INIT_LIST_HEAD(&duplicates); | 409 | INIT_LIST_HEAD(&duplicates); |
392 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); | 410 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); |
393 | 411 | ||
412 | if (p->uf.bo) | ||
413 | list_add(&p->uf_entry.tv.head, &p->validated); | ||
414 | |||
394 | if (need_mmap_lock) | 415 | if (need_mmap_lock) |
395 | down_read(¤t->mm->mmap_sem); | 416 | down_read(¤t->mm->mmap_sem); |
396 | 417 | ||
@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
488 | for (i = 0; i < parser->num_ibs; i++) | 509 | for (i = 0; i < parser->num_ibs; i++) |
489 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | 510 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); |
490 | kfree(parser->ibs); | 511 | kfree(parser->ibs); |
491 | if (parser->uf.bo) | 512 | amdgpu_bo_unref(&parser->uf.bo); |
492 | amdgpu_bo_unref(&parser->uf.bo); | 513 | amdgpu_bo_unref(&parser->uf_entry.robj); |
493 | } | 514 | } |
494 | 515 | ||
495 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | 516 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 6d136b260bb3..7380f782cd14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -476,6 +476,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
476 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | 476 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
477 | goto error_unreserve; | 477 | goto error_unreserve; |
478 | } | 478 | } |
479 | list_for_each_entry(entry, &duplicates, head) { | ||
480 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | ||
481 | /* if anything is swapped out don't swap it in here, | ||
482 | just abort and wait for the next CS */ | ||
483 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | ||
484 | goto error_unreserve; | ||
485 | } | ||
486 | |||
479 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | 487 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
480 | if (r) | 488 | if (r) |
481 | goto error_unreserve; | 489 | goto error_unreserve; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 80f797414236..8f5ac535d809 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc, | |||
55 | { | 55 | { |
56 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | 56 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
57 | 57 | ||
58 | if (!state->enable) | ||
59 | return 0; | ||
60 | |||
58 | if (exynos_crtc->ops->atomic_check) | 61 | if (exynos_crtc->ops->atomic_check) |
59 | return exynos_crtc->ops->atomic_check(exynos_crtc, state); | 62 | return exynos_crtc->ops->atomic_check(exynos_crtc, state); |
60 | 63 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index abd2d2944022..02f6ccb848a9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -14059,6 +14059,7 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
14059 | struct drm_crtc *crtc = crtc_state->base.crtc; | 14059 | struct drm_crtc *crtc = crtc_state->base.crtc; |
14060 | struct drm_framebuffer *fb = state->base.fb; | 14060 | struct drm_framebuffer *fb = state->base.fb; |
14061 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 14061 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
14062 | enum pipe pipe = to_intel_plane(plane)->pipe; | ||
14062 | unsigned stride; | 14063 | unsigned stride; |
14063 | int ret; | 14064 | int ret; |
14064 | 14065 | ||
@@ -14092,6 +14093,22 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
14092 | return -EINVAL; | 14093 | return -EINVAL; |
14093 | } | 14094 | } |
14094 | 14095 | ||
14096 | /* | ||
14097 | * There's something wrong with the cursor on CHV pipe C. | ||
14098 | * If it straddles the left edge of the screen then | ||
14099 | * moving it away from the edge or disabling it often | ||
14100 | * results in a pipe underrun, and often that can lead to | ||
14101 | * dead pipe (constant underrun reported, and it scans | ||
14102 | * out just a solid color). To recover from that, the | ||
14103 | * display power well must be turned off and on again. | ||
14104 | * Refuse the put the cursor into that compromised position. | ||
14105 | */ | ||
14106 | if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && | ||
14107 | state->visible && state->base.crtc_x < 0) { | ||
14108 | DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); | ||
14109 | return -EINVAL; | ||
14110 | } | ||
14111 | |||
14095 | return 0; | 14112 | return 0; |
14096 | } | 14113 | } |
14097 | 14114 | ||
@@ -14124,7 +14141,8 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
14124 | 14141 | ||
14125 | intel_crtc->cursor_addr = addr; | 14142 | intel_crtc->cursor_addr = addr; |
14126 | 14143 | ||
14127 | intel_crtc_update_cursor(crtc, state->visible); | 14144 | if (crtc->state->active) |
14145 | intel_crtc_update_cursor(crtc, state->visible); | ||
14128 | } | 14146 | } |
14129 | 14147 | ||
14130 | static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, | 14148 | static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a372cc392510..62141751c2f0 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1385,17 +1385,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
1385 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 1385 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
1386 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | 1386 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
1387 | bool live_status = false; | 1387 | bool live_status = false; |
1388 | unsigned int retry = 3; | 1388 | unsigned int try; |
1389 | 1389 | ||
1390 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 1390 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
1391 | connector->base.id, connector->name); | 1391 | connector->base.id, connector->name); |
1392 | 1392 | ||
1393 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); | 1393 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
1394 | 1394 | ||
1395 | while (!live_status && --retry) { | 1395 | for (try = 0; !live_status && try < 4; try++) { |
1396 | if (try) | ||
1397 | msleep(10); | ||
1396 | live_status = intel_digital_port_connected(dev_priv, | 1398 | live_status = intel_digital_port_connected(dev_priv, |
1397 | hdmi_to_dig_port(intel_hdmi)); | 1399 | hdmi_to_dig_port(intel_hdmi)); |
1398 | msleep(10); | ||
1399 | } | 1400 | } |
1400 | 1401 | ||
1401 | if (!live_status) | 1402 | if (!live_status) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8d0d6f59a72b..eb5fa05cf476 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -4713,9 +4713,8 @@ static void gen9_enable_rc6(struct drm_device *dev) | |||
4713 | * 3b: Enable Coarse Power Gating only when RC6 is enabled. | 4713 | * 3b: Enable Coarse Power Gating only when RC6 is enabled. |
4714 | * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. | 4714 | * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. |
4715 | */ | 4715 | */ |
4716 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || | 4716 | if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || |
4717 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && | 4717 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) |
4718 | IS_SKL_REVID(dev, 0, SKL_REVID_F0))) | ||
4719 | I915_WRITE(GEN9_PG_ENABLE, 0); | 4718 | I915_WRITE(GEN9_PG_ENABLE, 0); |
4720 | else | 4719 | else |
4721 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? | 4720 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 8f760002e401..913192c94876 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
@@ -159,7 +159,6 @@ struct nvkm_device_func { | |||
159 | struct nvkm_device_quirk { | 159 | struct nvkm_device_quirk { |
160 | u8 tv_pin_mask; | 160 | u8 tv_pin_mask; |
161 | u8 tv_gpio; | 161 | u8 tv_gpio; |
162 | bool War00C800_0; | ||
163 | }; | 162 | }; |
164 | 163 | ||
165 | struct nvkm_device_chip { | 164 | struct nvkm_device_chip { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index caf22b589edc..62ad0300cfa5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | |||
@@ -259,12 +259,6 @@ nvkm_device_pci_10de_0df4[] = { | |||
259 | }; | 259 | }; |
260 | 260 | ||
261 | static const struct nvkm_device_pci_vendor | 261 | static const struct nvkm_device_pci_vendor |
262 | nvkm_device_pci_10de_0fcd[] = { | ||
263 | { 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */ | ||
264 | {} | ||
265 | }; | ||
266 | |||
267 | static const struct nvkm_device_pci_vendor | ||
268 | nvkm_device_pci_10de_0fd2[] = { | 262 | nvkm_device_pci_10de_0fd2[] = { |
269 | { 0x1028, 0x0595, "GeForce GT 640M LE" }, | 263 | { 0x1028, 0x0595, "GeForce GT 640M LE" }, |
270 | { 0x1028, 0x05b2, "GeForce GT 640M LE" }, | 264 | { 0x1028, 0x05b2, "GeForce GT 640M LE" }, |
@@ -279,12 +273,6 @@ nvkm_device_pci_10de_0fe3[] = { | |||
279 | }; | 273 | }; |
280 | 274 | ||
281 | static const struct nvkm_device_pci_vendor | 275 | static const struct nvkm_device_pci_vendor |
282 | nvkm_device_pci_10de_0fe4[] = { | ||
283 | { 0x144d, 0xc740, NULL, { .War00C800_0 = true } }, | ||
284 | {} | ||
285 | }; | ||
286 | |||
287 | static const struct nvkm_device_pci_vendor | ||
288 | nvkm_device_pci_10de_104b[] = { | 276 | nvkm_device_pci_10de_104b[] = { |
289 | { 0x1043, 0x844c, "GeForce GT 625" }, | 277 | { 0x1043, 0x844c, "GeForce GT 625" }, |
290 | { 0x1043, 0x846b, "GeForce GT 625" }, | 278 | { 0x1043, 0x846b, "GeForce GT 625" }, |
@@ -690,13 +678,6 @@ nvkm_device_pci_10de_1189[] = { | |||
690 | static const struct nvkm_device_pci_vendor | 678 | static const struct nvkm_device_pci_vendor |
691 | nvkm_device_pci_10de_1199[] = { | 679 | nvkm_device_pci_10de_1199[] = { |
692 | { 0x1458, 0xd001, "GeForce GTX 760" }, | 680 | { 0x1458, 0xd001, "GeForce GTX 760" }, |
693 | { 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */ | ||
694 | {} | ||
695 | }; | ||
696 | |||
697 | static const struct nvkm_device_pci_vendor | ||
698 | nvkm_device_pci_10de_11e0[] = { | ||
699 | { 0x1558, 0x5106, NULL, { .War00C800_0 = true } }, | ||
700 | {} | 681 | {} |
701 | }; | 682 | }; |
702 | 683 | ||
@@ -707,14 +688,6 @@ nvkm_device_pci_10de_11e3[] = { | |||
707 | }; | 688 | }; |
708 | 689 | ||
709 | static const struct nvkm_device_pci_vendor | 690 | static const struct nvkm_device_pci_vendor |
710 | nvkm_device_pci_10de_11fc[] = { | ||
711 | { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */ | ||
712 | { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ | ||
713 | { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ | ||
714 | {} | ||
715 | }; | ||
716 | |||
717 | static const struct nvkm_device_pci_vendor | ||
718 | nvkm_device_pci_10de_1247[] = { | 691 | nvkm_device_pci_10de_1247[] = { |
719 | { 0x1043, 0x212a, "GeForce GT 635M" }, | 692 | { 0x1043, 0x212a, "GeForce GT 635M" }, |
720 | { 0x1043, 0x212b, "GeForce GT 635M" }, | 693 | { 0x1043, 0x212b, "GeForce GT 635M" }, |
@@ -1368,7 +1341,7 @@ nvkm_device_pci_10de[] = { | |||
1368 | { 0x0fc6, "GeForce GTX 650" }, | 1341 | { 0x0fc6, "GeForce GTX 650" }, |
1369 | { 0x0fc8, "GeForce GT 740" }, | 1342 | { 0x0fc8, "GeForce GT 740" }, |
1370 | { 0x0fc9, "GeForce GT 730" }, | 1343 | { 0x0fc9, "GeForce GT 730" }, |
1371 | { 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd }, | 1344 | { 0x0fcd, "GeForce GT 755M" }, |
1372 | { 0x0fce, "GeForce GT 640M LE" }, | 1345 | { 0x0fce, "GeForce GT 640M LE" }, |
1373 | { 0x0fd1, "GeForce GT 650M" }, | 1346 | { 0x0fd1, "GeForce GT 650M" }, |
1374 | { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, | 1347 | { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, |
@@ -1382,7 +1355,7 @@ nvkm_device_pci_10de[] = { | |||
1382 | { 0x0fe1, "GeForce GT 730M" }, | 1355 | { 0x0fe1, "GeForce GT 730M" }, |
1383 | { 0x0fe2, "GeForce GT 745M" }, | 1356 | { 0x0fe2, "GeForce GT 745M" }, |
1384 | { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, | 1357 | { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, |
1385 | { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 }, | 1358 | { 0x0fe4, "GeForce GT 750M" }, |
1386 | { 0x0fe9, "GeForce GT 750M" }, | 1359 | { 0x0fe9, "GeForce GT 750M" }, |
1387 | { 0x0fea, "GeForce GT 755M" }, | 1360 | { 0x0fea, "GeForce GT 755M" }, |
1388 | { 0x0fec, "GeForce 710A" }, | 1361 | { 0x0fec, "GeForce 710A" }, |
@@ -1497,12 +1470,12 @@ nvkm_device_pci_10de[] = { | |||
1497 | { 0x11c6, "GeForce GTX 650 Ti" }, | 1470 | { 0x11c6, "GeForce GTX 650 Ti" }, |
1498 | { 0x11c8, "GeForce GTX 650" }, | 1471 | { 0x11c8, "GeForce GTX 650" }, |
1499 | { 0x11cb, "GeForce GT 740" }, | 1472 | { 0x11cb, "GeForce GT 740" }, |
1500 | { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 }, | 1473 | { 0x11e0, "GeForce GTX 770M" }, |
1501 | { 0x11e1, "GeForce GTX 765M" }, | 1474 | { 0x11e1, "GeForce GTX 765M" }, |
1502 | { 0x11e2, "GeForce GTX 765M" }, | 1475 | { 0x11e2, "GeForce GTX 765M" }, |
1503 | { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, | 1476 | { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, |
1504 | { 0x11fa, "Quadro K4000" }, | 1477 | { 0x11fa, "Quadro K4000" }, |
1505 | { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc }, | 1478 | { 0x11fc, "Quadro K2100M" }, |
1506 | { 0x1200, "GeForce GTX 560 Ti" }, | 1479 | { 0x1200, "GeForce GTX 560 Ti" }, |
1507 | { 0x1201, "GeForce GTX 560" }, | 1480 | { 0x1201, "GeForce GTX 560" }, |
1508 | { 0x1203, "GeForce GTX 460 SE v2" }, | 1481 | { 0x1203, "GeForce GTX 460 SE v2" }, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c index 43006db6fd58..80fed7e78dcb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c | |||
@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan) | |||
83 | fan->type = NVBIOS_THERM_FAN_UNK; | 83 | fan->type = NVBIOS_THERM_FAN_UNK; |
84 | } | 84 | } |
85 | 85 | ||
86 | fan->fan_mode = NVBIOS_THERM_FAN_LINEAR; | ||
86 | fan->min_duty = nvbios_rd08(bios, data + 0x02); | 87 | fan->min_duty = nvbios_rd08(bios, data + 0x02); |
87 | fan->max_duty = nvbios_rd08(bios, data + 0x03); | 88 | fan->max_duty = nvbios_rd08(bios, data + 0x03); |
88 | 89 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index d942fa7b9f18..86f9f3b13f71 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c | |||
@@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) | |||
81 | nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); | 81 | nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); |
82 | nvkm_rd32(device, 0x000200); | 82 | nvkm_rd32(device, 0x000200); |
83 | 83 | ||
84 | if ( nvkm_boolopt(device->cfgopt, "War00C800_0", | 84 | if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) { |
85 | device->quirk ? device->quirk->War00C800_0 : false)) { | ||
86 | nvkm_info(&pmu->subdev, "hw bug workaround enabled\n"); | ||
87 | switch (device->chipset) { | 85 | switch (device->chipset) { |
88 | case 0xe4: | 86 | case 0xe4: |
89 | magic(device, 0x04000000); | 87 | magic(device, 0x04000000); |
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index b8e4cdec28c3..24f92bea39c7 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c | |||
@@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, | |||
112 | dma_addr_t paddr; | 112 | dma_addr_t paddr; |
113 | int ret; | 113 | int ret; |
114 | 114 | ||
115 | /* only doing ARGB32 since this is what is needed to alpha-blend | ||
116 | * with video overlays: | ||
117 | */ | ||
118 | sizes->surface_bpp = 32; | 115 | sizes->surface_bpp = 32; |
119 | sizes->surface_depth = 32; | 116 | sizes->surface_depth = 24; |
120 | 117 | ||
121 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, | 118 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, |
122 | sizes->surface_height, sizes->surface_bpp, | 119 | sizes->surface_height, sizes->surface_bpp, |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 1e3a80165309..4c30d8c65558 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -4173,11 +4173,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
4173 | control |= ib->length_dw | (vm_id << 24); | 4173 | control |= ib->length_dw | (vm_id << 24); |
4174 | 4174 | ||
4175 | radeon_ring_write(ring, header); | 4175 | radeon_ring_write(ring, header); |
4176 | radeon_ring_write(ring, | 4176 | radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC)); |
4177 | #ifdef __BIG_ENDIAN | ||
4178 | (2 << 0) | | ||
4179 | #endif | ||
4180 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
4181 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); | 4177 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); |
4182 | radeon_ring_write(ring, control); | 4178 | radeon_ring_write(ring, control); |
4183 | } | 4179 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 574f62bbd215..7eb1ae758906 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
@@ -361,31 +361,31 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, | |||
361 | 361 | ||
362 | /* stitch together an VCE create msg */ | 362 | /* stitch together an VCE create msg */ |
363 | ib.length_dw = 0; | 363 | ib.length_dw = 0; |
364 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | 364 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ |
365 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | 365 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ |
366 | ib.ptr[ib.length_dw++] = handle; | 366 | ib.ptr[ib.length_dw++] = cpu_to_le32(handle); |
367 | 367 | ||
368 | ib.ptr[ib.length_dw++] = 0x00000030; /* len */ | 368 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */ |
369 | ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ | 369 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */ |
370 | ib.ptr[ib.length_dw++] = 0x00000000; | 370 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); |
371 | ib.ptr[ib.length_dw++] = 0x00000042; | 371 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042); |
372 | ib.ptr[ib.length_dw++] = 0x0000000a; | 372 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a); |
373 | ib.ptr[ib.length_dw++] = 0x00000001; | 373 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
374 | ib.ptr[ib.length_dw++] = 0x00000080; | 374 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080); |
375 | ib.ptr[ib.length_dw++] = 0x00000060; | 375 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060); |
376 | ib.ptr[ib.length_dw++] = 0x00000100; | 376 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); |
377 | ib.ptr[ib.length_dw++] = 0x00000100; | 377 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); |
378 | ib.ptr[ib.length_dw++] = 0x0000000c; | 378 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); |
379 | ib.ptr[ib.length_dw++] = 0x00000000; | 379 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); |
380 | 380 | ||
381 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | 381 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ |
382 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | 382 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ |
383 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | 383 | ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); |
384 | ib.ptr[ib.length_dw++] = dummy; | 384 | ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); |
385 | ib.ptr[ib.length_dw++] = 0x00000001; | 385 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
386 | 386 | ||
387 | for (i = ib.length_dw; i < ib_size_dw; ++i) | 387 | for (i = ib.length_dw; i < ib_size_dw; ++i) |
388 | ib.ptr[i] = 0x0; | 388 | ib.ptr[i] = cpu_to_le32(0x0); |
389 | 389 | ||
390 | r = radeon_ib_schedule(rdev, &ib, NULL, false); | 390 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
391 | if (r) { | 391 | if (r) { |
@@ -428,21 +428,21 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
428 | 428 | ||
429 | /* stitch together an VCE destroy msg */ | 429 | /* stitch together an VCE destroy msg */ |
430 | ib.length_dw = 0; | 430 | ib.length_dw = 0; |
431 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | 431 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ |
432 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | 432 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ |
433 | ib.ptr[ib.length_dw++] = handle; | 433 | ib.ptr[ib.length_dw++] = cpu_to_le32(handle); |
434 | 434 | ||
435 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | 435 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ |
436 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | 436 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ |
437 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | 437 | ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); |
438 | ib.ptr[ib.length_dw++] = dummy; | 438 | ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); |
439 | ib.ptr[ib.length_dw++] = 0x00000001; | 439 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
440 | 440 | ||
441 | ib.ptr[ib.length_dw++] = 0x00000008; /* len */ | 441 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */ |
442 | ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ | 442 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */ |
443 | 443 | ||
444 | for (i = ib.length_dw; i < ib_size_dw; ++i) | 444 | for (i = ib.length_dw; i < ib_size_dw; ++i) |
445 | ib.ptr[i] = 0x0; | 445 | ib.ptr[i] = cpu_to_le32(0x0); |
446 | 446 | ||
447 | r = radeon_ib_schedule(rdev, &ib, NULL, false); | 447 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
448 | if (r) { | 448 | if (r) { |
@@ -699,12 +699,12 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | |||
699 | { | 699 | { |
700 | uint64_t addr = semaphore->gpu_addr; | 700 | uint64_t addr = semaphore->gpu_addr; |
701 | 701 | ||
702 | radeon_ring_write(ring, VCE_CMD_SEMAPHORE); | 702 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE)); |
703 | radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); | 703 | radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF)); |
704 | radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); | 704 | radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF)); |
705 | radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); | 705 | radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0))); |
706 | if (!emit_wait) | 706 | if (!emit_wait) |
707 | radeon_ring_write(ring, VCE_CMD_END); | 707 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
708 | 708 | ||
709 | return true; | 709 | return true; |
710 | } | 710 | } |
@@ -719,10 +719,10 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | |||
719 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | 719 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
720 | { | 720 | { |
721 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | 721 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
722 | radeon_ring_write(ring, VCE_CMD_IB); | 722 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB)); |
723 | radeon_ring_write(ring, ib->gpu_addr); | 723 | radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr)); |
724 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); | 724 | radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr))); |
725 | radeon_ring_write(ring, ib->length_dw); | 725 | radeon_ring_write(ring, cpu_to_le32(ib->length_dw)); |
726 | } | 726 | } |
727 | 727 | ||
728 | /** | 728 | /** |
@@ -738,12 +738,12 @@ void radeon_vce_fence_emit(struct radeon_device *rdev, | |||
738 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | 738 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
739 | uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; | 739 | uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; |
740 | 740 | ||
741 | radeon_ring_write(ring, VCE_CMD_FENCE); | 741 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE)); |
742 | radeon_ring_write(ring, addr); | 742 | radeon_ring_write(ring, cpu_to_le32(addr)); |
743 | radeon_ring_write(ring, upper_32_bits(addr)); | 743 | radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr))); |
744 | radeon_ring_write(ring, fence->seq); | 744 | radeon_ring_write(ring, cpu_to_le32(fence->seq)); |
745 | radeon_ring_write(ring, VCE_CMD_TRAP); | 745 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP)); |
746 | radeon_ring_write(ring, VCE_CMD_END); | 746 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
747 | } | 747 | } |
748 | 748 | ||
749 | /** | 749 | /** |
@@ -765,7 +765,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
765 | ring->idx, r); | 765 | ring->idx, r); |
766 | return r; | 766 | return r; |
767 | } | 767 | } |
768 | radeon_ring_write(ring, VCE_CMD_END); | 768 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
769 | radeon_ring_unlock_commit(rdev, ring, false); | 769 | radeon_ring_unlock_commit(rdev, ring, false); |
770 | 770 | ||
771 | for (i = 0; i < rdev->usec_timeout; i++) { | 771 | for (i = 0; i < rdev->usec_timeout; i++) { |
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index 6a954544727f..f154fb1929bd 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -180,7 +180,7 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | |||
180 | spin_unlock(&lock->lock); | 180 | spin_unlock(&lock->lock); |
181 | } | 181 | } |
182 | } else | 182 | } else |
183 | wait_event(lock->queue, __ttm_read_lock(lock)); | 183 | wait_event(lock->queue, __ttm_write_lock(lock)); |
184 | 184 | ||
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a09cf8529b9f..c49812b80dd0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -1233,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
1233 | 1233 | ||
1234 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 1234 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
1235 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 1235 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
1236 | vmw_kms_legacy_hotspot_clear(dev_priv); | ||
1236 | if (unlikely((ret != 0))) { | 1237 | if (unlikely((ret != 0))) { |
1237 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 1238 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
1238 | drm_master_put(&vmw_fp->locked_master); | 1239 | drm_master_put(&vmw_fp->locked_master); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index a8ae9dfb83b7..469cdd520615 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -925,6 +925,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, | |||
925 | uint32_t num_clips); | 925 | uint32_t num_clips); |
926 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 926 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
927 | struct drm_file *file_priv); | 927 | struct drm_file *file_priv); |
928 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); | ||
928 | 929 | ||
929 | int vmw_dumb_create(struct drm_file *file_priv, | 930 | int vmw_dumb_create(struct drm_file *file_priv, |
930 | struct drm_device *dev, | 931 | struct drm_device *dev, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index a8baf5f5e765..b6a0806b06bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -390,7 +390,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, | |||
390 | else if (ctx_id == SVGA3D_INVALID_ID) | 390 | else if (ctx_id == SVGA3D_INVALID_ID) |
391 | ret = vmw_local_fifo_reserve(dev_priv, bytes); | 391 | ret = vmw_local_fifo_reserve(dev_priv, bytes); |
392 | else { | 392 | else { |
393 | WARN_ON("Command buffer has not been allocated.\n"); | 393 | WARN(1, "Command buffer has not been allocated.\n"); |
394 | ret = NULL; | 394 | ret = NULL; |
395 | } | 395 | } |
396 | if (IS_ERR_OR_NULL(ret)) { | 396 | if (IS_ERR_OR_NULL(ret)) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 162f188969a7..274c90da44a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -133,13 +133,19 @@ void vmw_cursor_update_position(struct vmw_private *dev_priv, | |||
133 | vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); | 133 | vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); |
134 | } | 134 | } |
135 | 135 | ||
136 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 136 | |
137 | uint32_t handle, uint32_t width, uint32_t height) | 137 | /* |
138 | * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback. | ||
139 | */ | ||
140 | int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
141 | uint32_t handle, uint32_t width, uint32_t height, | ||
142 | int32_t hot_x, int32_t hot_y) | ||
138 | { | 143 | { |
139 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | 144 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); |
140 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); | 145 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
141 | struct vmw_surface *surface = NULL; | 146 | struct vmw_surface *surface = NULL; |
142 | struct vmw_dma_buffer *dmabuf = NULL; | 147 | struct vmw_dma_buffer *dmabuf = NULL; |
148 | s32 hotspot_x, hotspot_y; | ||
143 | int ret; | 149 | int ret; |
144 | 150 | ||
145 | /* | 151 | /* |
@@ -151,6 +157,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
151 | */ | 157 | */ |
152 | drm_modeset_unlock_crtc(crtc); | 158 | drm_modeset_unlock_crtc(crtc); |
153 | drm_modeset_lock_all(dev_priv->dev); | 159 | drm_modeset_lock_all(dev_priv->dev); |
160 | hotspot_x = hot_x + du->hotspot_x; | ||
161 | hotspot_y = hot_y + du->hotspot_y; | ||
154 | 162 | ||
155 | /* A lot of the code assumes this */ | 163 | /* A lot of the code assumes this */ |
156 | if (handle && (width != 64 || height != 64)) { | 164 | if (handle && (width != 64 || height != 64)) { |
@@ -187,31 +195,34 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
187 | vmw_dmabuf_unreference(&du->cursor_dmabuf); | 195 | vmw_dmabuf_unreference(&du->cursor_dmabuf); |
188 | 196 | ||
189 | /* setup new image */ | 197 | /* setup new image */ |
198 | ret = 0; | ||
190 | if (surface) { | 199 | if (surface) { |
191 | /* vmw_user_surface_lookup takes one reference */ | 200 | /* vmw_user_surface_lookup takes one reference */ |
192 | du->cursor_surface = surface; | 201 | du->cursor_surface = surface; |
193 | 202 | ||
194 | du->cursor_surface->snooper.crtc = crtc; | 203 | du->cursor_surface->snooper.crtc = crtc; |
195 | du->cursor_age = du->cursor_surface->snooper.age; | 204 | du->cursor_age = du->cursor_surface->snooper.age; |
196 | vmw_cursor_update_image(dev_priv, surface->snooper.image, | 205 | ret = vmw_cursor_update_image(dev_priv, surface->snooper.image, |
197 | 64, 64, du->hotspot_x, du->hotspot_y); | 206 | 64, 64, hotspot_x, hotspot_y); |
198 | } else if (dmabuf) { | 207 | } else if (dmabuf) { |
199 | /* vmw_user_surface_lookup takes one reference */ | 208 | /* vmw_user_surface_lookup takes one reference */ |
200 | du->cursor_dmabuf = dmabuf; | 209 | du->cursor_dmabuf = dmabuf; |
201 | 210 | ||
202 | ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, | 211 | ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, |
203 | du->hotspot_x, du->hotspot_y); | 212 | hotspot_x, hotspot_y); |
204 | } else { | 213 | } else { |
205 | vmw_cursor_update_position(dev_priv, false, 0, 0); | 214 | vmw_cursor_update_position(dev_priv, false, 0, 0); |
206 | ret = 0; | ||
207 | goto out; | 215 | goto out; |
208 | } | 216 | } |
209 | 217 | ||
210 | vmw_cursor_update_position(dev_priv, true, | 218 | if (!ret) { |
211 | du->cursor_x + du->hotspot_x, | 219 | vmw_cursor_update_position(dev_priv, true, |
212 | du->cursor_y + du->hotspot_y); | 220 | du->cursor_x + hotspot_x, |
221 | du->cursor_y + hotspot_y); | ||
222 | du->core_hotspot_x = hot_x; | ||
223 | du->core_hotspot_y = hot_y; | ||
224 | } | ||
213 | 225 | ||
214 | ret = 0; | ||
215 | out: | 226 | out: |
216 | drm_modeset_unlock_all(dev_priv->dev); | 227 | drm_modeset_unlock_all(dev_priv->dev); |
217 | drm_modeset_lock_crtc(crtc, crtc->cursor); | 228 | drm_modeset_lock_crtc(crtc, crtc->cursor); |
@@ -239,8 +250,10 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
239 | drm_modeset_lock_all(dev_priv->dev); | 250 | drm_modeset_lock_all(dev_priv->dev); |
240 | 251 | ||
241 | vmw_cursor_update_position(dev_priv, shown, | 252 | vmw_cursor_update_position(dev_priv, shown, |
242 | du->cursor_x + du->hotspot_x, | 253 | du->cursor_x + du->hotspot_x + |
243 | du->cursor_y + du->hotspot_y); | 254 | du->core_hotspot_x, |
255 | du->cursor_y + du->hotspot_y + | ||
256 | du->core_hotspot_y); | ||
244 | 257 | ||
245 | drm_modeset_unlock_all(dev_priv->dev); | 258 | drm_modeset_unlock_all(dev_priv->dev); |
246 | drm_modeset_lock_crtc(crtc, crtc->cursor); | 259 | drm_modeset_lock_crtc(crtc, crtc->cursor); |
@@ -334,6 +347,29 @@ err_unreserve: | |||
334 | ttm_bo_unreserve(bo); | 347 | ttm_bo_unreserve(bo); |
335 | } | 348 | } |
336 | 349 | ||
350 | /** | ||
351 | * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots | ||
352 | * | ||
353 | * @dev_priv: Pointer to the device private struct. | ||
354 | * | ||
355 | * Clears all legacy hotspots. | ||
356 | */ | ||
357 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) | ||
358 | { | ||
359 | struct drm_device *dev = dev_priv->dev; | ||
360 | struct vmw_display_unit *du; | ||
361 | struct drm_crtc *crtc; | ||
362 | |||
363 | drm_modeset_lock_all(dev); | ||
364 | drm_for_each_crtc(crtc, dev) { | ||
365 | du = vmw_crtc_to_du(crtc); | ||
366 | |||
367 | du->hotspot_x = 0; | ||
368 | du->hotspot_y = 0; | ||
369 | } | ||
370 | drm_modeset_unlock_all(dev); | ||
371 | } | ||
372 | |||
337 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) | 373 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) |
338 | { | 374 | { |
339 | struct drm_device *dev = dev_priv->dev; | 375 | struct drm_device *dev = dev_priv->dev; |
@@ -351,7 +387,9 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) | |||
351 | du->cursor_age = du->cursor_surface->snooper.age; | 387 | du->cursor_age = du->cursor_surface->snooper.age; |
352 | vmw_cursor_update_image(dev_priv, | 388 | vmw_cursor_update_image(dev_priv, |
353 | du->cursor_surface->snooper.image, | 389 | du->cursor_surface->snooper.image, |
354 | 64, 64, du->hotspot_x, du->hotspot_y); | 390 | 64, 64, |
391 | du->hotspot_x + du->core_hotspot_x, | ||
392 | du->hotspot_y + du->core_hotspot_y); | ||
355 | } | 393 | } |
356 | 394 | ||
357 | mutex_unlock(&dev->mode_config.mutex); | 395 | mutex_unlock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 782df7ca9794..edd81503516d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -159,6 +159,8 @@ struct vmw_display_unit { | |||
159 | 159 | ||
160 | int hotspot_x; | 160 | int hotspot_x; |
161 | int hotspot_y; | 161 | int hotspot_y; |
162 | s32 core_hotspot_x; | ||
163 | s32 core_hotspot_y; | ||
162 | 164 | ||
163 | unsigned unit; | 165 | unsigned unit; |
164 | 166 | ||
@@ -193,8 +195,9 @@ void vmw_du_crtc_restore(struct drm_crtc *crtc); | |||
193 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | 195 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, |
194 | u16 *r, u16 *g, u16 *b, | 196 | u16 *r, u16 *g, u16 *b, |
195 | uint32_t start, uint32_t size); | 197 | uint32_t start, uint32_t size); |
196 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 198 | int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, |
197 | uint32_t handle, uint32_t width, uint32_t height); | 199 | uint32_t handle, uint32_t width, uint32_t height, |
200 | int32_t hot_x, int32_t hot_y); | ||
198 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | 201 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); |
199 | int vmw_du_connector_dpms(struct drm_connector *connector, int mode); | 202 | int vmw_du_connector_dpms(struct drm_connector *connector, int mode); |
200 | void vmw_du_connector_save(struct drm_connector *connector); | 203 | void vmw_du_connector_save(struct drm_connector *connector); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 2def684e61a4..b6fa44fe8929 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -295,7 +295,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
295 | } | 295 | } |
296 | 296 | ||
297 | static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = { | 297 | static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = { |
298 | .cursor_set = vmw_du_crtc_cursor_set, | 298 | .cursor_set2 = vmw_du_crtc_cursor_set2, |
299 | .cursor_move = vmw_du_crtc_cursor_move, | 299 | .cursor_move = vmw_du_crtc_cursor_move, |
300 | .gamma_set = vmw_du_crtc_gamma_set, | 300 | .gamma_set = vmw_du_crtc_gamma_set, |
301 | .destroy = vmw_ldu_crtc_destroy, | 301 | .destroy = vmw_ldu_crtc_destroy, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index ecac70af032a..db082bea8daf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -531,7 +531,7 @@ out_no_fence: | |||
531 | } | 531 | } |
532 | 532 | ||
533 | static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { | 533 | static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { |
534 | .cursor_set = vmw_du_crtc_cursor_set, | 534 | .cursor_set2 = vmw_du_crtc_cursor_set2, |
535 | .cursor_move = vmw_du_crtc_cursor_move, | 535 | .cursor_move = vmw_du_crtc_cursor_move, |
536 | .gamma_set = vmw_du_crtc_gamma_set, | 536 | .gamma_set = vmw_du_crtc_gamma_set, |
537 | .destroy = vmw_sou_crtc_destroy, | 537 | .destroy = vmw_sou_crtc_destroy, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 87fc00af8d28..4ef5ffd7189d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
@@ -1041,7 +1041,7 @@ out_finish: | |||
1041 | * Screen Target CRTC dispatch table | 1041 | * Screen Target CRTC dispatch table |
1042 | */ | 1042 | */ |
1043 | static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = { | 1043 | static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = { |
1044 | .cursor_set = vmw_du_crtc_cursor_set, | 1044 | .cursor_set2 = vmw_du_crtc_cursor_set2, |
1045 | .cursor_move = vmw_du_crtc_cursor_move, | 1045 | .cursor_move = vmw_du_crtc_cursor_move, |
1046 | .gamma_set = vmw_du_crtc_gamma_set, | 1046 | .gamma_set = vmw_du_crtc_gamma_set, |
1047 | .destroy = vmw_stdu_crtc_destroy, | 1047 | .destroy = vmw_stdu_crtc_destroy, |