diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-15 01:33:11 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-15 01:33:11 -0500 |
commit | 1bb95834bbcdc969e477a9284cf96c17a4c2616f (patch) | |
tree | 9cf66b22a611bb6bc78778c05dac72263bb45a23 /drivers/gpu/drm | |
parent | 85345517fe6d4de27b0d6ca19fef9d28ac947c4a (diff) | |
parent | a41c73e04673b47730df682446f0d52f95e32a5b (diff) |
Merge remote branch 'airlied/drm-fixes' into drm-intel-fixes
Diffstat (limited to 'drivers/gpu/drm')
33 files changed, 445 insertions, 296 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index dcbeb98f195a..f7af91cb273d 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, | |||
276 | struct drm_crtc *tmp; | 276 | struct drm_crtc *tmp; |
277 | int crtc_mask = 1; | 277 | int crtc_mask = 1; |
278 | 278 | ||
279 | WARN(!crtc, "checking null crtc?"); | 279 | WARN(!crtc, "checking null crtc?\n"); |
280 | 280 | ||
281 | dev = crtc->dev; | 281 | dev = crtc->dev; |
282 | 282 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c1a26217a530..a245d17165ae 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | |||
240 | .addr = DDC_ADDR, | 240 | .addr = DDC_ADDR, |
241 | .flags = I2C_M_RD, | 241 | .flags = I2C_M_RD, |
242 | .len = len, | 242 | .len = len, |
243 | .buf = buf + start, | 243 | .buf = buf, |
244 | } | 244 | } |
245 | }; | 245 | }; |
246 | 246 | ||
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | |||
253 | static u8 * | 253 | static u8 * |
254 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | 254 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
255 | { | 255 | { |
256 | int i, j = 0; | 256 | int i, j = 0, valid_extensions = 0; |
257 | u8 *block, *new; | 257 | u8 *block, *new; |
258 | 258 | ||
259 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) | 259 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) |
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | |||
280 | 280 | ||
281 | for (j = 1; j <= block[0x7e]; j++) { | 281 | for (j = 1; j <= block[0x7e]; j++) { |
282 | for (i = 0; i < 4; i++) { | 282 | for (i = 0; i < 4; i++) { |
283 | if (drm_do_probe_ddc_edid(adapter, block, j, | 283 | if (drm_do_probe_ddc_edid(adapter, |
284 | EDID_LENGTH)) | 284 | block + (valid_extensions + 1) * EDID_LENGTH, |
285 | j, EDID_LENGTH)) | ||
285 | goto out; | 286 | goto out; |
286 | if (drm_edid_block_valid(block + j * EDID_LENGTH)) | 287 | if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) { |
288 | valid_extensions++; | ||
287 | break; | 289 | break; |
290 | } | ||
288 | } | 291 | } |
289 | if (i == 4) | 292 | if (i == 4) |
290 | goto carp; | 293 | dev_warn(connector->dev->dev, |
294 | "%s: Ignoring invalid EDID block %d.\n", | ||
295 | drm_get_connector_name(connector), j); | ||
296 | } | ||
297 | |||
298 | if (valid_extensions != block[0x7e]) { | ||
299 | block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; | ||
300 | block[0x7e] = valid_extensions; | ||
301 | new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | ||
302 | if (!new) | ||
303 | goto out; | ||
304 | block = new; | ||
291 | } | 305 | } |
292 | 306 | ||
293 | return block; | 307 | return block; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 951e3d463113..781c26c37b38 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -4079,8 +4079,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
4079 | alignment = i915_gem_get_gtt_alignment(obj); | 4079 | alignment = i915_gem_get_gtt_alignment(obj); |
4080 | if (obj_priv->gtt_offset & (alignment - 1)) { | 4080 | if (obj_priv->gtt_offset & (alignment - 1)) { |
4081 | WARN(obj_priv->pin_count, | 4081 | WARN(obj_priv->pin_count, |
4082 | "bo is already pinned with incorrect alignment:" | 4082 | "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", |
4083 | " offset=%x, req.alignment=%x\n", | ||
4084 | obj_priv->gtt_offset, alignment); | 4083 | obj_priv->gtt_offset, alignment); |
4085 | ret = i915_gem_object_unbind(obj); | 4084 | ret = i915_gem_object_unbind(obj); |
4086 | if (ret) | 4085 | if (ret) |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f12a5b3ec050..488c36c8f5e6 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2033 | u32 grbm_int_cntl = 0; | 2033 | u32 grbm_int_cntl = 0; |
2034 | 2034 | ||
2035 | if (!rdev->irq.installed) { | 2035 | if (!rdev->irq.installed) { |
2036 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 2036 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2037 | return -EINVAL; | 2037 | return -EINVAL; |
2038 | } | 2038 | } |
2039 | /* don't enable anything if the ih is disabled */ | 2039 | /* don't enable anything if the ih is disabled */ |
@@ -2295,6 +2295,7 @@ restart_ih: | |||
2295 | case 0: /* D1 vblank */ | 2295 | case 0: /* D1 vblank */ |
2296 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2296 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { |
2297 | drm_handle_vblank(rdev->ddev, 0); | 2297 | drm_handle_vblank(rdev->ddev, 0); |
2298 | rdev->pm.vblank_sync = true; | ||
2298 | wake_up(&rdev->irq.vblank_queue); | 2299 | wake_up(&rdev->irq.vblank_queue); |
2299 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2300 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
2300 | DRM_DEBUG("IH: D1 vblank\n"); | 2301 | DRM_DEBUG("IH: D1 vblank\n"); |
@@ -2316,6 +2317,7 @@ restart_ih: | |||
2316 | case 0: /* D2 vblank */ | 2317 | case 0: /* D2 vblank */ |
2317 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | 2318 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
2318 | drm_handle_vblank(rdev->ddev, 1); | 2319 | drm_handle_vblank(rdev->ddev, 1); |
2320 | rdev->pm.vblank_sync = true; | ||
2319 | wake_up(&rdev->irq.vblank_queue); | 2321 | wake_up(&rdev->irq.vblank_queue); |
2320 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | 2322 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; |
2321 | DRM_DEBUG("IH: D2 vblank\n"); | 2323 | DRM_DEBUG("IH: D2 vblank\n"); |
@@ -2337,6 +2339,7 @@ restart_ih: | |||
2337 | case 0: /* D3 vblank */ | 2339 | case 0: /* D3 vblank */ |
2338 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | 2340 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
2339 | drm_handle_vblank(rdev->ddev, 2); | 2341 | drm_handle_vblank(rdev->ddev, 2); |
2342 | rdev->pm.vblank_sync = true; | ||
2340 | wake_up(&rdev->irq.vblank_queue); | 2343 | wake_up(&rdev->irq.vblank_queue); |
2341 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | 2344 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; |
2342 | DRM_DEBUG("IH: D3 vblank\n"); | 2345 | DRM_DEBUG("IH: D3 vblank\n"); |
@@ -2358,6 +2361,7 @@ restart_ih: | |||
2358 | case 0: /* D4 vblank */ | 2361 | case 0: /* D4 vblank */ |
2359 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | 2362 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
2360 | drm_handle_vblank(rdev->ddev, 3); | 2363 | drm_handle_vblank(rdev->ddev, 3); |
2364 | rdev->pm.vblank_sync = true; | ||
2361 | wake_up(&rdev->irq.vblank_queue); | 2365 | wake_up(&rdev->irq.vblank_queue); |
2362 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | 2366 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; |
2363 | DRM_DEBUG("IH: D4 vblank\n"); | 2367 | DRM_DEBUG("IH: D4 vblank\n"); |
@@ -2379,6 +2383,7 @@ restart_ih: | |||
2379 | case 0: /* D5 vblank */ | 2383 | case 0: /* D5 vblank */ |
2380 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | 2384 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
2381 | drm_handle_vblank(rdev->ddev, 4); | 2385 | drm_handle_vblank(rdev->ddev, 4); |
2386 | rdev->pm.vblank_sync = true; | ||
2382 | wake_up(&rdev->irq.vblank_queue); | 2387 | wake_up(&rdev->irq.vblank_queue); |
2383 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | 2388 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; |
2384 | DRM_DEBUG("IH: D5 vblank\n"); | 2389 | DRM_DEBUG("IH: D5 vblank\n"); |
@@ -2400,6 +2405,7 @@ restart_ih: | |||
2400 | case 0: /* D6 vblank */ | 2405 | case 0: /* D6 vblank */ |
2401 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | 2406 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
2402 | drm_handle_vblank(rdev->ddev, 5); | 2407 | drm_handle_vblank(rdev->ddev, 5); |
2408 | rdev->pm.vblank_sync = true; | ||
2403 | wake_up(&rdev->irq.vblank_queue); | 2409 | wake_up(&rdev->irq.vblank_queue); |
2404 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | 2410 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; |
2405 | DRM_DEBUG("IH: D6 vblank\n"); | 2411 | DRM_DEBUG("IH: D6 vblank\n"); |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 086b9b0416c4..ac3b6dde23db 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -495,6 +495,7 @@ done: | |||
495 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | 495 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
496 | return r; | 496 | return r; |
497 | } | 497 | } |
498 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | ||
498 | return 0; | 499 | return 0; |
499 | } | 500 | } |
500 | 501 | ||
@@ -502,6 +503,7 @@ void evergreen_blit_fini(struct radeon_device *rdev) | |||
502 | { | 503 | { |
503 | int r; | 504 | int r; |
504 | 505 | ||
506 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
505 | if (rdev->r600_blit.shader_obj == NULL) | 507 | if (rdev->r600_blit.shader_obj == NULL) |
506 | return; | 508 | return; |
507 | /* If we can't reserve the bo, unref should be enough to destroy | 509 | /* If we can't reserve the bo, unref should be enough to destroy |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 6d1540c0bfed..8e10aa9f74b0 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
442 | int r; | 442 | int r; |
443 | 443 | ||
444 | if (rdev->gart.table.ram.ptr) { | 444 | if (rdev->gart.table.ram.ptr) { |
445 | WARN(1, "R100 PCI GART already initialized.\n"); | 445 | WARN(1, "R100 PCI GART already initialized\n"); |
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | /* Initialize common gart structure */ | 448 | /* Initialize common gart structure */ |
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev) | |||
516 | uint32_t tmp = 0; | 516 | uint32_t tmp = 0; |
517 | 517 | ||
518 | if (!rdev->irq.installed) { | 518 | if (!rdev->irq.installed) { |
519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
520 | WREG32(R_000040_GEN_INT_CNTL, 0); | 520 | WREG32(R_000040_GEN_INT_CNTL, 0); |
521 | return -EINVAL; | 521 | return -EINVAL; |
522 | } | 522 | } |
@@ -3180,6 +3180,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
3180 | for (u = 0; u < track->num_texture; u++) { | 3180 | for (u = 0; u < track->num_texture; u++) { |
3181 | if (!track->textures[u].enabled) | 3181 | if (!track->textures[u].enabled) |
3182 | continue; | 3182 | continue; |
3183 | if (track->textures[u].lookup_disable) | ||
3184 | continue; | ||
3183 | robj = track->textures[u].robj; | 3185 | robj = track->textures[u].robj; |
3184 | if (robj == NULL) { | 3186 | if (robj == NULL) { |
3185 | DRM_ERROR("No texture bound to unit %u\n", u); | 3187 | DRM_ERROR("No texture bound to unit %u\n", u); |
@@ -3414,6 +3416,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3414 | track->textures[i].robj = NULL; | 3416 | track->textures[i].robj = NULL; |
3415 | /* CS IB emission code makes sure texture unit are disabled */ | 3417 | /* CS IB emission code makes sure texture unit are disabled */ |
3416 | track->textures[i].enabled = false; | 3418 | track->textures[i].enabled = false; |
3419 | track->textures[i].lookup_disable = false; | ||
3417 | track->textures[i].roundup_w = true; | 3420 | track->textures[i].roundup_w = true; |
3418 | track->textures[i].roundup_h = true; | 3421 | track->textures[i].roundup_h = true; |
3419 | if (track->separate_cube) | 3422 | if (track->separate_cube) |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index f47cdca1c004..af65600e6564 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -46,6 +46,7 @@ struct r100_cs_track_texture { | |||
46 | unsigned height_11; | 46 | unsigned height_11; |
47 | bool use_pitch; | 47 | bool use_pitch; |
48 | bool enabled; | 48 | bool enabled; |
49 | bool lookup_disable; | ||
49 | bool roundup_w; | 50 | bool roundup_w; |
50 | bool roundup_h; | 51 | bool roundup_h; |
51 | unsigned compress_format; | 52 | unsigned compress_format; |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 0266d72e0a4c..d2408c395619 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -447,6 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
447 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); | 447 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
448 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); | 448 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
449 | } | 449 | } |
450 | if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE) | ||
451 | track->textures[i].lookup_disable = true; | ||
450 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { | 452 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
451 | case R200_TXFORMAT_I8: | 453 | case R200_TXFORMAT_I8: |
452 | case R200_TXFORMAT_RGB332: | 454 | case R200_TXFORMAT_RGB332: |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 34527e600fe9..cde1d3480d93 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
91 | int r; | 91 | int r; |
92 | 92 | ||
93 | if (rdev->gart.table.vram.robj) { | 93 | if (rdev->gart.table.vram.robj) { |
94 | WARN(1, "RV370 PCIE GART already initialized.\n"); | 94 | WARN(1, "RV370 PCIE GART already initialized\n"); |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | /* Initialize common gart structure */ | 97 | /* Initialize common gart structure */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 33952a12f0a3..0f806cc7dc75 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev) | |||
97 | { | 97 | { |
98 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> | 98 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> |
99 | ASIC_T_SHIFT; | 99 | ASIC_T_SHIFT; |
100 | u32 actual_temp = 0; | ||
101 | 100 | ||
102 | if ((temp >> 7) & 1) | 101 | return temp * 1000; |
103 | actual_temp = 0; | ||
104 | else | ||
105 | actual_temp = (temp >> 1) & 0xff; | ||
106 | |||
107 | return actual_temp * 1000; | ||
108 | } | 102 | } |
109 | 103 | ||
110 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) | 104 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) |
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) | |||
919 | int r; | 913 | int r; |
920 | 914 | ||
921 | if (rdev->gart.table.vram.robj) { | 915 | if (rdev->gart.table.vram.robj) { |
922 | WARN(1, "R600 PCIE GART already initialized.\n"); | 916 | WARN(1, "R600 PCIE GART already initialized\n"); |
923 | return 0; | 917 | return 0; |
924 | } | 918 | } |
925 | /* Initialize common gart structure */ | 919 | /* Initialize common gart structure */ |
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev) | |||
2995 | u32 hdmi1, hdmi2; | 2989 | u32 hdmi1, hdmi2; |
2996 | 2990 | ||
2997 | if (!rdev->irq.installed) { | 2991 | if (!rdev->irq.installed) { |
2998 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 2992 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2999 | return -EINVAL; | 2993 | return -EINVAL; |
3000 | } | 2994 | } |
3001 | /* don't enable anything if the ih is disabled */ | 2995 | /* don't enable anything if the ih is disabled */ |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 7b294c127c5f..0f90fc3482ce 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -50,6 +50,7 @@ struct r600_cs_track { | |||
50 | u32 nsamples; | 50 | u32 nsamples; |
51 | u32 cb_color_base_last[8]; | 51 | u32 cb_color_base_last[8]; |
52 | struct radeon_bo *cb_color_bo[8]; | 52 | struct radeon_bo *cb_color_bo[8]; |
53 | u64 cb_color_bo_mc[8]; | ||
53 | u32 cb_color_bo_offset[8]; | 54 | u32 cb_color_bo_offset[8]; |
54 | struct radeon_bo *cb_color_frag_bo[8]; | 55 | struct radeon_bo *cb_color_frag_bo[8]; |
55 | struct radeon_bo *cb_color_tile_bo[8]; | 56 | struct radeon_bo *cb_color_tile_bo[8]; |
@@ -67,6 +68,7 @@ struct r600_cs_track { | |||
67 | u32 db_depth_size; | 68 | u32 db_depth_size; |
68 | u32 db_offset; | 69 | u32 db_offset; |
69 | struct radeon_bo *db_bo; | 70 | struct radeon_bo *db_bo; |
71 | u64 db_bo_mc; | ||
70 | }; | 72 | }; |
71 | 73 | ||
72 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) | 74 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) |
@@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format) | |||
140 | return 0; | 142 | return 0; |
141 | } | 143 | } |
142 | 144 | ||
145 | struct array_mode_checker { | ||
146 | int array_mode; | ||
147 | u32 group_size; | ||
148 | u32 nbanks; | ||
149 | u32 npipes; | ||
150 | u32 nsamples; | ||
151 | u32 bpe; | ||
152 | }; | ||
153 | |||
154 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ | ||
155 | static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, | ||
156 | u32 *pitch_align, | ||
157 | u32 *height_align, | ||
158 | u32 *depth_align, | ||
159 | u64 *base_align) | ||
160 | { | ||
161 | u32 tile_width = 8; | ||
162 | u32 tile_height = 8; | ||
163 | u32 macro_tile_width = values->nbanks; | ||
164 | u32 macro_tile_height = values->npipes; | ||
165 | u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples; | ||
166 | u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; | ||
167 | |||
168 | switch (values->array_mode) { | ||
169 | case ARRAY_LINEAR_GENERAL: | ||
170 | /* technically tile_width/_height for pitch/height */ | ||
171 | *pitch_align = 1; /* tile_width */ | ||
172 | *height_align = 1; /* tile_height */ | ||
173 | *depth_align = 1; | ||
174 | *base_align = 1; | ||
175 | break; | ||
176 | case ARRAY_LINEAR_ALIGNED: | ||
177 | *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe)); | ||
178 | *height_align = tile_height; | ||
179 | *depth_align = 1; | ||
180 | *base_align = values->group_size; | ||
181 | break; | ||
182 | case ARRAY_1D_TILED_THIN1: | ||
183 | *pitch_align = max((u32)tile_width, | ||
184 | (u32)(values->group_size / | ||
185 | (tile_height * values->bpe * values->nsamples))); | ||
186 | *height_align = tile_height; | ||
187 | *depth_align = 1; | ||
188 | *base_align = values->group_size; | ||
189 | break; | ||
190 | case ARRAY_2D_TILED_THIN1: | ||
191 | *pitch_align = max((u32)macro_tile_width, | ||
192 | (u32)(((values->group_size / tile_height) / | ||
193 | (values->bpe * values->nsamples)) * | ||
194 | values->nbanks)) * tile_width; | ||
195 | *height_align = macro_tile_height * tile_height; | ||
196 | *depth_align = 1; | ||
197 | *base_align = max(macro_tile_bytes, | ||
198 | (*pitch_align) * values->bpe * (*height_align) * values->nsamples); | ||
199 | break; | ||
200 | default: | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
143 | static void r600_cs_track_init(struct r600_cs_track *track) | 207 | static void r600_cs_track_init(struct r600_cs_track *track) |
144 | { | 208 | { |
145 | int i; | 209 | int i; |
@@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
153 | track->cb_color_info[i] = 0; | 217 | track->cb_color_info[i] = 0; |
154 | track->cb_color_bo[i] = NULL; | 218 | track->cb_color_bo[i] = NULL; |
155 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | 219 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; |
220 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; | ||
156 | } | 221 | } |
157 | track->cb_target_mask = 0xFFFFFFFF; | 222 | track->cb_target_mask = 0xFFFFFFFF; |
158 | track->cb_shader_mask = 0xFFFFFFFF; | 223 | track->cb_shader_mask = 0xFFFFFFFF; |
159 | track->db_bo = NULL; | 224 | track->db_bo = NULL; |
225 | track->db_bo_mc = 0xFFFFFFFF; | ||
160 | /* assume the biggest format and that htile is enabled */ | 226 | /* assume the biggest format and that htile is enabled */ |
161 | track->db_depth_info = 7 | (1 << 25); | 227 | track->db_depth_info = 7 | (1 << 25); |
162 | track->db_depth_view = 0xFFFFC000; | 228 | track->db_depth_view = 0xFFFFC000; |
@@ -168,7 +234,10 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
168 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | 234 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) |
169 | { | 235 | { |
170 | struct r600_cs_track *track = p->track; | 236 | struct r600_cs_track *track = p->track; |
171 | u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; | 237 | u32 bpe = 0, slice_tile_max, size, tmp; |
238 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
239 | u64 base_offset, base_align; | ||
240 | struct array_mode_checker array_check; | ||
172 | volatile u32 *ib = p->ib->ptr; | 241 | volatile u32 *ib = p->ib->ptr; |
173 | unsigned array_mode; | 242 | unsigned array_mode; |
174 | 243 | ||
@@ -183,60 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
183 | i, track->cb_color_info[i]); | 252 | i, track->cb_color_info[i]); |
184 | return -EINVAL; | 253 | return -EINVAL; |
185 | } | 254 | } |
186 | /* pitch is the number of 8x8 tiles per row */ | 255 | /* pitch in pixels */ |
187 | pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; | 256 | pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; |
188 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; | 257 | slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; |
189 | slice_tile_max *= 64; | 258 | slice_tile_max *= 64; |
190 | height = slice_tile_max / (pitch * 8); | 259 | height = slice_tile_max / pitch; |
191 | if (height > 8192) | 260 | if (height > 8192) |
192 | height = 8192; | 261 | height = 8192; |
193 | array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); | 262 | array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); |
263 | |||
264 | base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; | ||
265 | array_check.array_mode = array_mode; | ||
266 | array_check.group_size = track->group_size; | ||
267 | array_check.nbanks = track->nbanks; | ||
268 | array_check.npipes = track->npipes; | ||
269 | array_check.nsamples = track->nsamples; | ||
270 | array_check.bpe = bpe; | ||
271 | if (r600_get_array_mode_alignment(&array_check, | ||
272 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
273 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | ||
274 | G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, | ||
275 | track->cb_color_info[i]); | ||
276 | return -EINVAL; | ||
277 | } | ||
194 | switch (array_mode) { | 278 | switch (array_mode) { |
195 | case V_0280A0_ARRAY_LINEAR_GENERAL: | 279 | case V_0280A0_ARRAY_LINEAR_GENERAL: |
196 | /* technically height & 0x7 */ | ||
197 | break; | 280 | break; |
198 | case V_0280A0_ARRAY_LINEAR_ALIGNED: | 281 | case V_0280A0_ARRAY_LINEAR_ALIGNED: |
199 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | ||
200 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
201 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
202 | __func__, __LINE__, pitch); | ||
203 | return -EINVAL; | ||
204 | } | ||
205 | if (!IS_ALIGNED(height, 8)) { | ||
206 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
207 | __func__, __LINE__, height); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | break; | 282 | break; |
211 | case V_0280A0_ARRAY_1D_TILED_THIN1: | 283 | case V_0280A0_ARRAY_1D_TILED_THIN1: |
212 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8; | ||
213 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
214 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
215 | __func__, __LINE__, pitch); | ||
216 | return -EINVAL; | ||
217 | } | ||
218 | /* avoid breaking userspace */ | 284 | /* avoid breaking userspace */ |
219 | if (height > 7) | 285 | if (height > 7) |
220 | height &= ~0x7; | 286 | height &= ~0x7; |
221 | if (!IS_ALIGNED(height, 8)) { | ||
222 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
223 | __func__, __LINE__, height); | ||
224 | return -EINVAL; | ||
225 | } | ||
226 | break; | 287 | break; |
227 | case V_0280A0_ARRAY_2D_TILED_THIN1: | 288 | case V_0280A0_ARRAY_2D_TILED_THIN1: |
228 | pitch_align = max((u32)track->nbanks, | ||
229 | (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8; | ||
230 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
231 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
232 | __func__, __LINE__, pitch); | ||
233 | return -EINVAL; | ||
234 | } | ||
235 | if (!IS_ALIGNED((height / 8), track->npipes)) { | ||
236 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
237 | __func__, __LINE__, height); | ||
238 | return -EINVAL; | ||
239 | } | ||
240 | break; | 289 | break; |
241 | default: | 290 | default: |
242 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | 291 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, |
@@ -244,8 +293,24 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
244 | track->cb_color_info[i]); | 293 | track->cb_color_info[i]); |
245 | return -EINVAL; | 294 | return -EINVAL; |
246 | } | 295 | } |
296 | |||
297 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
298 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | ||
299 | __func__, __LINE__, pitch); | ||
300 | return -EINVAL; | ||
301 | } | ||
302 | if (!IS_ALIGNED(height, height_align)) { | ||
303 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | ||
304 | __func__, __LINE__, height); | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
308 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | ||
309 | return -EINVAL; | ||
310 | } | ||
311 | |||
247 | /* check offset */ | 312 | /* check offset */ |
248 | tmp = height * pitch * 8 * bpe; | 313 | tmp = height * pitch * bpe; |
249 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 314 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { |
250 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { | 315 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { |
251 | /* the initial DDX does bad things with the CB size occasionally */ | 316 | /* the initial DDX does bad things with the CB size occasionally */ |
@@ -260,15 +325,11 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
260 | return -EINVAL; | 325 | return -EINVAL; |
261 | } | 326 | } |
262 | } | 327 | } |
263 | if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { | ||
264 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); | ||
265 | return -EINVAL; | ||
266 | } | ||
267 | /* limit max tile */ | 328 | /* limit max tile */ |
268 | tmp = (height * pitch * 8) >> 6; | 329 | tmp = (height * pitch) >> 6; |
269 | if (tmp < slice_tile_max) | 330 | if (tmp < slice_tile_max) |
270 | slice_tile_max = tmp; | 331 | slice_tile_max = tmp; |
271 | tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | | 332 | tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | |
272 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); | 333 | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); |
273 | ib[track->cb_color_size_idx[i]] = tmp; | 334 | ib[track->cb_color_size_idx[i]] = tmp; |
274 | return 0; | 335 | return 0; |
@@ -310,7 +371,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
310 | /* Check depth buffer */ | 371 | /* Check depth buffer */ |
311 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || | 372 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || |
312 | G_028800_Z_ENABLE(track->db_depth_control)) { | 373 | G_028800_Z_ENABLE(track->db_depth_control)) { |
313 | u32 nviews, bpe, ntiles, pitch, pitch_align, height, size; | 374 | u32 nviews, bpe, ntiles, size, slice_tile_max; |
375 | u32 height, height_align, pitch, pitch_align, depth_align; | ||
376 | u64 base_offset, base_align; | ||
377 | struct array_mode_checker array_check; | ||
378 | int array_mode; | ||
379 | |||
314 | if (track->db_bo == NULL) { | 380 | if (track->db_bo == NULL) { |
315 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); | 381 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); |
316 | return -EINVAL; | 382 | return -EINVAL; |
@@ -353,39 +419,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
353 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | 419 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); |
354 | } else { | 420 | } else { |
355 | size = radeon_bo_size(track->db_bo); | 421 | size = radeon_bo_size(track->db_bo); |
356 | pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; | 422 | /* pitch in pixels */ |
357 | height = size / (pitch * 8 * bpe); | 423 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; |
358 | height &= ~0x7; | 424 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
359 | if (!height) | 425 | slice_tile_max *= 64; |
360 | height = 8; | 426 | height = slice_tile_max / pitch; |
361 | 427 | if (height > 8192) | |
362 | switch (G_028010_ARRAY_MODE(track->db_depth_info)) { | 428 | height = 8192; |
429 | base_offset = track->db_bo_mc + track->db_offset; | ||
430 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
431 | array_check.array_mode = array_mode; | ||
432 | array_check.group_size = track->group_size; | ||
433 | array_check.nbanks = track->nbanks; | ||
434 | array_check.npipes = track->npipes; | ||
435 | array_check.nsamples = track->nsamples; | ||
436 | array_check.bpe = bpe; | ||
437 | if (r600_get_array_mode_alignment(&array_check, | ||
438 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
439 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
440 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
441 | track->db_depth_info); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | switch (array_mode) { | ||
363 | case V_028010_ARRAY_1D_TILED_THIN1: | 445 | case V_028010_ARRAY_1D_TILED_THIN1: |
364 | pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8); | 446 | /* don't break userspace */ |
365 | if (!IS_ALIGNED(pitch, pitch_align)) { | 447 | height &= ~0x7; |
366 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
367 | __func__, __LINE__, pitch); | ||
368 | return -EINVAL; | ||
369 | } | ||
370 | if (!IS_ALIGNED(height, 8)) { | ||
371 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
372 | __func__, __LINE__, height); | ||
373 | return -EINVAL; | ||
374 | } | ||
375 | break; | 448 | break; |
376 | case V_028010_ARRAY_2D_TILED_THIN1: | 449 | case V_028010_ARRAY_2D_TILED_THIN1: |
377 | pitch_align = max((u32)track->nbanks, | ||
378 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; | ||
379 | if (!IS_ALIGNED(pitch, pitch_align)) { | ||
380 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
381 | __func__, __LINE__, pitch); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | if (!IS_ALIGNED((height / 8), track->npipes)) { | ||
385 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
386 | __func__, __LINE__, height); | ||
387 | return -EINVAL; | ||
388 | } | ||
389 | break; | 450 | break; |
390 | default: | 451 | default: |
391 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | 452 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, |
@@ -393,15 +454,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
393 | track->db_depth_info); | 454 | track->db_depth_info); |
394 | return -EINVAL; | 455 | return -EINVAL; |
395 | } | 456 | } |
396 | if (!IS_ALIGNED(track->db_offset, track->group_size)) { | 457 | |
397 | dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); | 458 | if (!IS_ALIGNED(pitch, pitch_align)) { |
459 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | ||
460 | __func__, __LINE__, pitch); | ||
461 | return -EINVAL; | ||
462 | } | ||
463 | if (!IS_ALIGNED(height, height_align)) { | ||
464 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | ||
465 | __func__, __LINE__, height); | ||
398 | return -EINVAL; | 466 | return -EINVAL; |
399 | } | 467 | } |
468 | if (!IS_ALIGNED(base_offset, base_align)) { | ||
469 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | ||
470 | return -EINVAL; | ||
471 | } | ||
472 | |||
400 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | 473 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
401 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | 474 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; |
402 | tmp = ntiles * bpe * 64 * nviews; | 475 | tmp = ntiles * bpe * 64 * nviews; |
403 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | 476 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { |
404 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", | 477 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", |
405 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | 478 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, |
406 | radeon_bo_size(track->db_bo)); | 479 | radeon_bo_size(track->db_bo)); |
407 | return -EINVAL; | 480 | return -EINVAL; |
@@ -952,6 +1025,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
952 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1025 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
953 | track->cb_color_base_last[tmp] = ib[idx]; | 1026 | track->cb_color_base_last[tmp] = ib[idx]; |
954 | track->cb_color_bo[tmp] = reloc->robj; | 1027 | track->cb_color_bo[tmp] = reloc->robj; |
1028 | track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; | ||
955 | break; | 1029 | break; |
956 | case DB_DEPTH_BASE: | 1030 | case DB_DEPTH_BASE: |
957 | r = r600_cs_packet_next_reloc(p, &reloc); | 1031 | r = r600_cs_packet_next_reloc(p, &reloc); |
@@ -963,6 +1037,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
963 | track->db_offset = radeon_get_ib_value(p, idx) << 8; | 1037 | track->db_offset = radeon_get_ib_value(p, idx) << 8; |
964 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1038 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
965 | track->db_bo = reloc->robj; | 1039 | track->db_bo = reloc->robj; |
1040 | track->db_bo_mc = reloc->lobj.gpu_offset; | ||
966 | break; | 1041 | break; |
967 | case DB_HTILE_DATA_BASE: | 1042 | case DB_HTILE_DATA_BASE: |
968 | case SQ_PGM_START_FS: | 1043 | case SQ_PGM_START_FS: |
@@ -1084,16 +1159,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels | |||
1084 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | 1159 | static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, |
1085 | struct radeon_bo *texture, | 1160 | struct radeon_bo *texture, |
1086 | struct radeon_bo *mipmap, | 1161 | struct radeon_bo *mipmap, |
1162 | u64 base_offset, | ||
1163 | u64 mip_offset, | ||
1087 | u32 tiling_flags) | 1164 | u32 tiling_flags) |
1088 | { | 1165 | { |
1089 | struct r600_cs_track *track = p->track; | 1166 | struct r600_cs_track *track = p->track; |
1090 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; | 1167 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; |
1091 | u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; | 1168 | u32 word0, word1, l0_size, mipmap_size; |
1169 | u32 height_align, pitch, pitch_align, depth_align; | ||
1170 | u64 base_align; | ||
1171 | struct array_mode_checker array_check; | ||
1092 | 1172 | ||
1093 | /* on legacy kernel we don't perform advanced check */ | 1173 | /* on legacy kernel we don't perform advanced check */ |
1094 | if (p->rdev == NULL) | 1174 | if (p->rdev == NULL) |
1095 | return 0; | 1175 | return 0; |
1096 | 1176 | ||
1177 | /* convert to bytes */ | ||
1178 | base_offset <<= 8; | ||
1179 | mip_offset <<= 8; | ||
1180 | |||
1097 | word0 = radeon_get_ib_value(p, idx + 0); | 1181 | word0 = radeon_get_ib_value(p, idx + 0); |
1098 | if (tiling_flags & RADEON_TILING_MACRO) | 1182 | if (tiling_flags & RADEON_TILING_MACRO) |
1099 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); | 1183 | word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); |
@@ -1126,46 +1210,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1126 | return -EINVAL; | 1210 | return -EINVAL; |
1127 | } | 1211 | } |
1128 | 1212 | ||
1129 | pitch = G_038000_PITCH(word0) + 1; | 1213 | /* pitch in texels */ |
1130 | switch (G_038000_TILE_MODE(word0)) { | 1214 | pitch = (G_038000_PITCH(word0) + 1) * 8; |
1131 | case V_038000_ARRAY_LINEAR_GENERAL: | 1215 | array_check.array_mode = G_038000_TILE_MODE(word0); |
1132 | pitch_align = 1; | 1216 | array_check.group_size = track->group_size; |
1133 | /* XXX check height align */ | 1217 | array_check.nbanks = track->nbanks; |
1134 | break; | 1218 | array_check.npipes = track->npipes; |
1135 | case V_038000_ARRAY_LINEAR_ALIGNED: | 1219 | array_check.nsamples = 1; |
1136 | pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; | 1220 | array_check.bpe = bpe; |
1137 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1221 | if (r600_get_array_mode_alignment(&array_check, |
1138 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1222 | &pitch_align, &height_align, &depth_align, &base_align)) { |
1139 | __func__, __LINE__, pitch); | 1223 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", |
1140 | return -EINVAL; | 1224 | __func__, __LINE__, G_038000_TILE_MODE(word0)); |
1141 | } | 1225 | return -EINVAL; |
1142 | /* XXX check height align */ | 1226 | } |
1143 | break; | 1227 | |
1144 | case V_038000_ARRAY_1D_TILED_THIN1: | 1228 | /* XXX check height as well... */ |
1145 | pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; | 1229 | |
1146 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1230 | if (!IS_ALIGNED(pitch, pitch_align)) { |
1147 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1231 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", |
1148 | __func__, __LINE__, pitch); | 1232 | __func__, __LINE__, pitch); |
1149 | return -EINVAL; | 1233 | return -EINVAL; |
1150 | } | 1234 | } |
1151 | /* XXX check height align */ | 1235 | if (!IS_ALIGNED(base_offset, base_align)) { |
1152 | break; | 1236 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", |
1153 | case V_038000_ARRAY_2D_TILED_THIN1: | 1237 | __func__, __LINE__, base_offset); |
1154 | pitch_align = max((u32)track->nbanks, | 1238 | return -EINVAL; |
1155 | (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; | 1239 | } |
1156 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1240 | if (!IS_ALIGNED(mip_offset, base_align)) { |
1157 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1241 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", |
1158 | __func__, __LINE__, pitch); | 1242 | __func__, __LINE__, mip_offset); |
1159 | return -EINVAL; | ||
1160 | } | ||
1161 | /* XXX check height align */ | ||
1162 | break; | ||
1163 | default: | ||
1164 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
1165 | G_038000_TILE_MODE(word0), word0); | ||
1166 | return -EINVAL; | 1243 | return -EINVAL; |
1167 | } | 1244 | } |
1168 | /* XXX check offset align */ | ||
1169 | 1245 | ||
1170 | word0 = radeon_get_ib_value(p, idx + 4); | 1246 | word0 = radeon_get_ib_value(p, idx + 4); |
1171 | word1 = radeon_get_ib_value(p, idx + 5); | 1247 | word1 = radeon_get_ib_value(p, idx + 5); |
@@ -1400,7 +1476,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1400 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1476 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1401 | mipmap = reloc->robj; | 1477 | mipmap = reloc->robj; |
1402 | r = r600_check_texture_resource(p, idx+(i*7)+1, | 1478 | r = r600_check_texture_resource(p, idx+(i*7)+1, |
1403 | texture, mipmap, reloc->lobj.tiling_flags); | 1479 | texture, mipmap, |
1480 | base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), | ||
1481 | mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), | ||
1482 | reloc->lobj.tiling_flags); | ||
1404 | if (r) | 1483 | if (r) |
1405 | return r; | 1484 | return r; |
1406 | ib[idx+1+(i*7)+2] += base_offset; | 1485 | ib[idx+1+(i*7)+2] += base_offset; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 966a793e225b..bff4dc4f410f 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -51,6 +51,12 @@ | |||
51 | #define PTE_READABLE (1 << 5) | 51 | #define PTE_READABLE (1 << 5) |
52 | #define PTE_WRITEABLE (1 << 6) | 52 | #define PTE_WRITEABLE (1 << 6) |
53 | 53 | ||
54 | /* tiling bits */ | ||
55 | #define ARRAY_LINEAR_GENERAL 0x00000000 | ||
56 | #define ARRAY_LINEAR_ALIGNED 0x00000001 | ||
57 | #define ARRAY_1D_TILED_THIN1 0x00000002 | ||
58 | #define ARRAY_2D_TILED_THIN1 0x00000004 | ||
59 | |||
54 | /* Registers */ | 60 | /* Registers */ |
55 | #define ARB_POP 0x2418 | 61 | #define ARB_POP 0x2418 |
56 | #define ENABLE_TC128 (1 << 30) | 62 | #define ENABLE_TC128 (1 << 30) |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 04cac7ec9039..87ead090c7d5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
526 | if (crev < 2) | 526 | if (crev < 2) |
527 | return false; | 527 | return false; |
528 | 528 | ||
529 | router.valid = false; | ||
530 | |||
531 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); | 529 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); |
532 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) | 530 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) |
533 | (ctx->bios + data_offset + | 531 | (ctx->bios + data_offset + |
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
624 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 622 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
625 | continue; | 623 | continue; |
626 | 624 | ||
625 | router.ddc_valid = false; | ||
626 | router.cd_valid = false; | ||
627 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { | 627 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { |
628 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; | 628 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; |
629 | 629 | ||
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
647 | usDeviceTag)); | 647 | usDeviceTag)); |
648 | 648 | ||
649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { | 649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { |
650 | router.valid = false; | ||
651 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { | 650 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { |
652 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); | 651 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); |
653 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { | 652 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { |
654 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) | 653 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) |
655 | (ctx->bios + data_offset + | 654 | (ctx->bios + data_offset + |
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
657 | ATOM_I2C_RECORD *i2c_record; | 656 | ATOM_I2C_RECORD *i2c_record; |
658 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; | 657 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; |
659 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; | 658 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; |
659 | ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; | ||
660 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = | 660 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = |
661 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) | 661 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) |
662 | (ctx->bios + data_offset + | 662 | (ctx->bios + data_offset + |
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
690 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: | 690 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: |
691 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) | 691 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) |
692 | record; | 692 | record; |
693 | router.valid = true; | 693 | router.ddc_valid = true; |
694 | router.mux_type = ddc_path->ucMuxType; | 694 | router.ddc_mux_type = ddc_path->ucMuxType; |
695 | router.mux_control_pin = ddc_path->ucMuxControlPin; | 695 | router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; |
696 | router.mux_state = ddc_path->ucMuxState[enum_id]; | 696 | router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; |
697 | break; | ||
698 | case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: | ||
699 | cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) | ||
700 | record; | ||
701 | router.cd_valid = true; | ||
702 | router.cd_mux_type = cd_path->ucMuxType; | ||
703 | router.cd_mux_control_pin = cd_path->ucMuxControlPin; | ||
704 | router.cd_mux_state = cd_path->ucMuxState[enum_id]; | ||
697 | break; | 705 | break; |
698 | } | 706 | } |
699 | record = (ATOM_COMMON_RECORD_HEADER *) | 707 | record = (ATOM_COMMON_RECORD_HEADER *) |
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
860 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; | 868 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; |
861 | struct radeon_router router; | 869 | struct radeon_router router; |
862 | 870 | ||
863 | router.valid = false; | 871 | router.ddc_valid = false; |
872 | router.cd_valid = false; | ||
864 | 873 | ||
865 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); | 874 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); |
866 | if (!bios_connectors) | 875 | if (!bios_connectors) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 4dac4b0a02ee..fe6c74780f18 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
183 | continue; | 183 | continue; |
184 | 184 | ||
185 | if (priority == true) { | 185 | if (priority == true) { |
186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 186 | DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 187 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); |
188 | conflict->status = connector_status_disconnected; | 188 | conflict->status = connector_status_disconnected; |
189 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); | 189 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); |
190 | } else { | 190 | } else { |
191 | DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); | 191 | DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); |
192 | DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); | 192 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); |
193 | current_status = connector_status_disconnected; | 193 | current_status = connector_status_disconnected; |
194 | } | 194 | } |
195 | break; | 195 | break; |
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
432 | mode->vdisplay == native_mode->vdisplay) { | 432 | mode->vdisplay == native_mode->vdisplay) { |
433 | *native_mode = *mode; | 433 | *native_mode = *mode; |
434 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); | 434 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); |
435 | DRM_INFO("Determined LVDS native mode details from EDID\n"); | 435 | DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); |
436 | break; | 436 | break; |
437 | } | 437 | } |
438 | } | 438 | } |
439 | } | 439 | } |
440 | if (!native_mode->clock) { | 440 | if (!native_mode->clock) { |
441 | DRM_INFO("No LVDS native mode details, disabling RMX\n"); | 441 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); |
442 | radeon_encoder->rmx_type = RMX_OFF; | 442 | radeon_encoder->rmx_type = RMX_OFF; |
443 | } | 443 | } |
444 | } | 444 | } |
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1116 | radeon_connector->shared_ddc = true; | 1116 | radeon_connector->shared_ddc = true; |
1117 | shared_ddc = true; | 1117 | shared_ddc = true; |
1118 | } | 1118 | } |
1119 | if (radeon_connector->router_bus && router->valid && | 1119 | if (radeon_connector->router_bus && router->ddc_valid && |
1120 | (radeon_connector->router.router_id == router->router_id)) { | 1120 | (radeon_connector->router.router_id == router->router_id)) { |
1121 | radeon_connector->shared_ddc = false; | 1121 | radeon_connector->shared_ddc = false; |
1122 | shared_ddc = false; | 1122 | shared_ddc = false; |
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1136 | radeon_connector->connector_object_id = connector_object_id; | 1136 | radeon_connector->connector_object_id = connector_object_id; |
1137 | radeon_connector->hpd = *hpd; | 1137 | radeon_connector->hpd = *hpd; |
1138 | radeon_connector->router = *router; | 1138 | radeon_connector->router = *router; |
1139 | if (router->valid) { | 1139 | if (router->ddc_valid || router->cd_valid) { |
1140 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1140 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
1141 | if (!radeon_connector->router_bus) | 1141 | if (!radeon_connector->router_bus) |
1142 | goto failed; | 1142 | goto failed; |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0383631da69c..1df4dc6c063c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
315 | radeon_connector->ddc_bus->rec.en_data_reg, | 315 | radeon_connector->ddc_bus->rec.en_data_reg, |
316 | radeon_connector->ddc_bus->rec.y_clk_reg, | 316 | radeon_connector->ddc_bus->rec.y_clk_reg, |
317 | radeon_connector->ddc_bus->rec.y_data_reg); | 317 | radeon_connector->ddc_bus->rec.y_data_reg); |
318 | if (radeon_connector->router_bus) | 318 | if (radeon_connector->router.ddc_valid) |
319 | DRM_INFO(" DDC Router 0x%x/0x%x\n", | 319 | DRM_INFO(" DDC Router 0x%x/0x%x\n", |
320 | radeon_connector->router.mux_control_pin, | 320 | radeon_connector->router.ddc_mux_control_pin, |
321 | radeon_connector->router.mux_state); | 321 | radeon_connector->router.ddc_mux_state); |
322 | if (radeon_connector->router.cd_valid) | ||
323 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", | ||
324 | radeon_connector->router.cd_mux_control_pin, | ||
325 | radeon_connector->router.cd_mux_state); | ||
322 | } else { | 326 | } else { |
323 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | 327 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || |
324 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | 328 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || |
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
398 | int ret = 0; | 402 | int ret = 0; |
399 | 403 | ||
400 | /* on hw with routers, select right port */ | 404 | /* on hw with routers, select right port */ |
401 | if (radeon_connector->router.valid) | 405 | if (radeon_connector->router.ddc_valid) |
402 | radeon_router_select_port(radeon_connector); | 406 | radeon_router_select_ddc_port(radeon_connector); |
403 | 407 | ||
404 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 408 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
405 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 409 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { |
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
432 | int ret = 0; | 436 | int ret = 0; |
433 | 437 | ||
434 | /* on hw with routers, select right port */ | 438 | /* on hw with routers, select right port */ |
435 | if (radeon_connector->router.valid) | 439 | if (radeon_connector->router.ddc_valid) |
436 | radeon_router_select_port(radeon_connector); | 440 | radeon_router_select_ddc_port(radeon_connector); |
437 | 441 | ||
438 | if (!radeon_connector->ddc_bus) | 442 | if (!radeon_connector->ddc_bus) |
439 | return -1; | 443 | return -1; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index ae58b6849a2e..f678257c42e6 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec | |||
1520 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | 1520 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) |
1521 | { | 1521 | { |
1522 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1522 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1523 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1523 | 1524 | ||
1524 | if (radeon_encoder->active_device & | 1525 | if (radeon_encoder->active_device & |
1525 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | 1526 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { |
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
1531 | radeon_atom_output_lock(encoder, true); | 1532 | radeon_atom_output_lock(encoder, true); |
1532 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1533 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1533 | 1534 | ||
1535 | /* select the clock/data port if it uses a router */ | ||
1536 | if (connector) { | ||
1537 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1538 | if (radeon_connector->router.cd_valid) | ||
1539 | radeon_router_select_cd_port(radeon_connector); | ||
1540 | } | ||
1541 | |||
1534 | /* this is needed for the pll/ss setup to work correctly in some cases */ | 1542 | /* this is needed for the pll/ss setup to work correctly in some cases */ |
1535 | atombios_set_encoder_crtc_source(encoder); | 1543 | atombios_set_encoder_crtc_source(encoder); |
1536 | } | 1544 | } |
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1547 | struct radeon_device *rdev = dev->dev_private; | 1555 | struct radeon_device *rdev = dev->dev_private; |
1548 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1556 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1549 | struct radeon_encoder_atom_dig *dig; | 1557 | struct radeon_encoder_atom_dig *dig; |
1558 | |||
1559 | /* check for pre-DCE3 cards with shared encoders; | ||
1560 | * can't really use the links individually, so don't disable | ||
1561 | * the encoder if it's in use by another connector | ||
1562 | */ | ||
1563 | if (!ASIC_IS_DCE3(rdev)) { | ||
1564 | struct drm_encoder *other_encoder; | ||
1565 | struct radeon_encoder *other_radeon_encoder; | ||
1566 | |||
1567 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
1568 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
1569 | if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && | ||
1570 | drm_helper_encoder_in_use(other_encoder)) | ||
1571 | goto disable_done; | ||
1572 | } | ||
1573 | } | ||
1574 | |||
1550 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1575 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1551 | 1576 | ||
1552 | switch (radeon_encoder->encoder_id) { | 1577 | switch (radeon_encoder->encoder_id) { |
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1586 | break; | 1611 | break; |
1587 | } | 1612 | } |
1588 | 1613 | ||
1614 | disable_done: | ||
1589 | if (radeon_encoder_is_digital(encoder)) { | 1615 | if (radeon_encoder_is_digital(encoder)) { |
1590 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | 1616 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) |
1591 | r600_hdmi_disable(encoder); | 1617 | r600_hdmi_disable(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 216392d0353b..daacb281dfaf 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -240,7 +240,8 @@ retry: | |||
240 | */ | 240 | */ |
241 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { | 241 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { |
242 | /* good news we believe it's a lockup */ | 242 | /* good news we believe it's a lockup */ |
243 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); | 243 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
244 | fence->seq, seq); | ||
244 | /* FIXME: what should we do ? marking everyone | 245 | /* FIXME: what should we do ? marking everyone |
245 | * as signaled for now | 246 | * as signaled for now |
246 | */ | 247 | */ |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6a13ee38a5b9..0cfbba02c4d0 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* on hw with routers, select right port */ | 55 | /* on hw with routers, select right port */ |
56 | if (radeon_connector->router.valid) | 56 | if (radeon_connector->router.ddc_valid) |
57 | radeon_router_select_port(radeon_connector); | 57 | radeon_router_select_ddc_port(radeon_connector); |
58 | 58 | ||
59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); | 59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); |
60 | if (ret == 2) | 60 | if (ret == 2) |
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, | |||
1084 | addr, val); | 1084 | addr, val); |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | /* router switching */ | 1087 | /* ddc router switching */ |
1088 | void radeon_router_select_port(struct radeon_connector *radeon_connector) | 1088 | void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector) |
1089 | { | 1089 | { |
1090 | u8 val; | 1090 | u8 val; |
1091 | 1091 | ||
1092 | if (!radeon_connector->router.valid) | 1092 | if (!radeon_connector->router.ddc_valid) |
1093 | return; | 1093 | return; |
1094 | 1094 | ||
1095 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1095 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1096 | radeon_connector->router.i2c_addr, | 1096 | radeon_connector->router.i2c_addr, |
1097 | 0x3, &val); | 1097 | 0x3, &val); |
1098 | val &= radeon_connector->router.mux_control_pin; | 1098 | val &= ~radeon_connector->router.ddc_mux_control_pin; |
1099 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1099 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1100 | radeon_connector->router.i2c_addr, | 1100 | radeon_connector->router.i2c_addr, |
1101 | 0x3, val); | 1101 | 0x3, val); |
1102 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1102 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1103 | radeon_connector->router.i2c_addr, | 1103 | radeon_connector->router.i2c_addr, |
1104 | 0x1, &val); | 1104 | 0x1, &val); |
1105 | val &= radeon_connector->router.mux_control_pin; | 1105 | val &= ~radeon_connector->router.ddc_mux_control_pin; |
1106 | val |= radeon_connector->router.mux_state; | 1106 | val |= radeon_connector->router.ddc_mux_state; |
1107 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1108 | radeon_connector->router.i2c_addr, | ||
1109 | 0x1, val); | ||
1110 | } | ||
1111 | |||
1112 | /* clock/data router switching */ | ||
1113 | void radeon_router_select_cd_port(struct radeon_connector *radeon_connector) | ||
1114 | { | ||
1115 | u8 val; | ||
1116 | |||
1117 | if (!radeon_connector->router.cd_valid) | ||
1118 | return; | ||
1119 | |||
1120 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1121 | radeon_connector->router.i2c_addr, | ||
1122 | 0x3, &val); | ||
1123 | val &= ~radeon_connector->router.cd_mux_control_pin; | ||
1124 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1125 | radeon_connector->router.i2c_addr, | ||
1126 | 0x3, val); | ||
1127 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1128 | radeon_connector->router.i2c_addr, | ||
1129 | 0x1, &val); | ||
1130 | val &= ~radeon_connector->router.cd_mux_control_pin; | ||
1131 | val |= radeon_connector->router.cd_mux_state; | ||
1107 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1132 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1108 | radeon_connector->router.i2c_addr, | 1133 | radeon_connector->router.i2c_addr, |
1109 | 0x1, val); | 1134 | 0x1, val); |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 2f349a300195..465746bd51b7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
@@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
76 | default: | 76 | default: |
77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 77 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
78 | crtc); | 78 | crtc); |
79 | return EINVAL; | 79 | return -EINVAL; |
80 | } | 80 | } |
81 | } else { | 81 | } else { |
82 | switch (crtc) { | 82 | switch (crtc) { |
@@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) | |||
89 | default: | 89 | default: |
90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", | 90 | DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", |
91 | crtc); | 91 | crtc); |
92 | return EINVAL; | 92 | return -EINVAL; |
93 | } | 93 | } |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 92457163d070..680f57644e86 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -401,13 +401,19 @@ struct radeon_hpd { | |||
401 | }; | 401 | }; |
402 | 402 | ||
403 | struct radeon_router { | 403 | struct radeon_router { |
404 | bool valid; | ||
405 | u32 router_id; | 404 | u32 router_id; |
406 | struct radeon_i2c_bus_rec i2c_info; | 405 | struct radeon_i2c_bus_rec i2c_info; |
407 | u8 i2c_addr; | 406 | u8 i2c_addr; |
408 | u8 mux_type; | 407 | /* i2c mux */ |
409 | u8 mux_control_pin; | 408 | bool ddc_valid; |
410 | u8 mux_state; | 409 | u8 ddc_mux_type; |
410 | u8 ddc_mux_control_pin; | ||
411 | u8 ddc_mux_state; | ||
412 | /* clock/data mux */ | ||
413 | bool cd_valid; | ||
414 | u8 cd_mux_type; | ||
415 | u8 cd_mux_control_pin; | ||
416 | u8 cd_mux_state; | ||
411 | }; | 417 | }; |
412 | 418 | ||
413 | struct radeon_connector { | 419 | struct radeon_connector { |
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, | |||
488 | u8 slave_addr, | 494 | u8 slave_addr, |
489 | u8 addr, | 495 | u8 addr, |
490 | u8 val); | 496 | u8 val); |
491 | extern void radeon_router_select_port(struct radeon_connector *radeon_connector); | 497 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
498 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); | ||
492 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 499 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
493 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 500 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
494 | 501 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d7ab91416410..8eb183466015 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
102 | type = ttm_bo_type_device; | 102 | type = ttm_bo_type_device; |
103 | } | 103 | } |
104 | *bo_ptr = NULL; | 104 | *bo_ptr = NULL; |
105 | |||
106 | retry: | ||
105 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); | 107 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
106 | if (bo == NULL) | 108 | if (bo == NULL) |
107 | return -ENOMEM; | 109 | return -ENOMEM; |
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
109 | bo->gobj = gobj; | 111 | bo->gobj = gobj; |
110 | bo->surface_reg = -1; | 112 | bo->surface_reg = -1; |
111 | INIT_LIST_HEAD(&bo->list); | 113 | INIT_LIST_HEAD(&bo->list); |
112 | |||
113 | retry: | ||
114 | radeon_ttm_placement_from_domain(bo, domain); | 114 | radeon_ttm_placement_from_domain(bo, domain); |
115 | /* Kernel allocation are uninterruptible */ | 115 | /* Kernel allocation are uninterruptible */ |
116 | mutex_lock(&rdev->vram_mutex); | 116 | mutex_lock(&rdev->vram_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index c332f46340d5..64928814de53 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -2836,6 +2836,7 @@ | |||
2836 | # define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24) | 2836 | # define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24) |
2837 | # define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24) | 2837 | # define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24) |
2838 | # define R200_TXFORMAT_ST_ROUTE_SHIFT 24 | 2838 | # define R200_TXFORMAT_ST_ROUTE_SHIFT 24 |
2839 | # define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27) | ||
2839 | # define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) | 2840 | # define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) |
2840 | # define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) | 2841 | # define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) |
2841 | # define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) | 2842 | # define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index fe95bb35317e..01c2c736a1da 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |||
689 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 689 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
690 | gtt->offset = bo_mem->start << PAGE_SHIFT; | 690 | gtt->offset = bo_mem->start << PAGE_SHIFT; |
691 | if (!gtt->num_pages) { | 691 | if (!gtt->num_pages) { |
692 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); | 692 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
693 | gtt->num_pages, bo_mem, backend); | ||
693 | } | 694 | } |
694 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | 695 | r = radeon_gart_bind(gtt->rdev, gtt->offset, |
695 | gtt->num_pages, gtt->pages); | 696 | gtt->num_pages, gtt->pages); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index f683e51a2a06..5512e4e5e636 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev) | |||
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.ram.ptr) { | 80 | if (rdev->gart.table.ram.ptr) { |
81 | WARN(1, "RS400 GART already initialized.\n"); | 81 | WARN(1, "RS400 GART already initialized\n"); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | /* Check gart size */ | 84 | /* Check gart size */ |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index b091a1f6fa4e..f1c6e02c2e6b 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
375 | int r; | 375 | int r; |
376 | 376 | ||
377 | if (rdev->gart.table.vram.robj) { | 377 | if (rdev->gart.table.vram.robj) { |
378 | WARN(1, "RS600 GART already initialized.\n"); | 378 | WARN(1, "RS600 GART already initialized\n"); |
379 | return 0; | 379 | return 0; |
380 | } | 380 | } |
381 | /* Initialize common gart structure */ | 381 | /* Initialize common gart structure */ |
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
506 | 506 | ||
507 | if (!rdev->irq.installed) { | 507 | if (!rdev->irq.installed) { |
508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
509 | WREG32(R_000040_GEN_INT_CNTL, 0); | 509 | WREG32(R_000040_GEN_INT_CNTL, 0); |
510 | return -EINVAL; | 510 | return -EINVAL; |
511 | } | 511 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a1cb783c7131..3ca77dc03915 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -27,14 +27,6 @@ | |||
27 | /* | 27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | /* Notes: | ||
31 | * | ||
32 | * We store bo pointer in drm_mm_node struct so we know which bo own a | ||
33 | * specific node. There is no protection on the pointer, thus to make | ||
34 | * sure things don't go berserk you have to access this pointer while | ||
35 | * holding the global lru lock and make sure anytime you free a node you | ||
36 | * reset the pointer to NULL. | ||
37 | */ | ||
38 | 30 | ||
39 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
40 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
@@ -45,6 +37,7 @@ | |||
45 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
46 | #include <linux/file.h> | 38 | #include <linux/file.h> |
47 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <asm/atomic.h> | ||
48 | 41 | ||
49 | #define TTM_ASSERT_LOCKED(param) | 42 | #define TTM_ASSERT_LOCKED(param) |
50 | #define TTM_DEBUG(fmt, arg...) | 43 | #define TTM_DEBUG(fmt, arg...) |
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
452 | ttm_bo_mem_put(bo, &bo->mem); | 445 | ttm_bo_mem_put(bo, &bo->mem); |
453 | 446 | ||
454 | atomic_set(&bo->reserved, 0); | 447 | atomic_set(&bo->reserved, 0); |
448 | |||
449 | /* | ||
450 | * Make processes trying to reserve really pick it up. | ||
451 | */ | ||
452 | smp_mb__after_atomic_dec(); | ||
455 | wake_up_all(&bo->event_queue); | 453 | wake_up_all(&bo->event_queue); |
456 | } | 454 | } |
457 | 455 | ||
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
460 | struct ttm_bo_device *bdev = bo->bdev; | 458 | struct ttm_bo_device *bdev = bo->bdev; |
461 | struct ttm_bo_global *glob = bo->glob; | 459 | struct ttm_bo_global *glob = bo->glob; |
462 | struct ttm_bo_driver *driver; | 460 | struct ttm_bo_driver *driver; |
463 | void *sync_obj; | 461 | void *sync_obj = NULL; |
464 | void *sync_obj_arg; | 462 | void *sync_obj_arg; |
465 | int put_count; | 463 | int put_count; |
466 | int ret; | 464 | int ret; |
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
495 | spin_lock(&glob->lru_lock); | 493 | spin_lock(&glob->lru_lock); |
496 | } | 494 | } |
497 | queue: | 495 | queue: |
498 | sync_obj = bo->sync_obj; | ||
499 | sync_obj_arg = bo->sync_obj_arg; | ||
500 | driver = bdev->driver; | 496 | driver = bdev->driver; |
497 | if (bo->sync_obj) | ||
498 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
499 | sync_obj_arg = bo->sync_obj_arg; | ||
501 | 500 | ||
502 | kref_get(&bo->list_kref); | 501 | kref_get(&bo->list_kref); |
503 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | 502 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
504 | spin_unlock(&glob->lru_lock); | 503 | spin_unlock(&glob->lru_lock); |
505 | spin_unlock(&bo->lock); | 504 | spin_unlock(&bo->lock); |
506 | 505 | ||
507 | if (sync_obj) | 506 | if (sync_obj) { |
508 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | 507 | driver->sync_obj_flush(sync_obj, sync_obj_arg); |
508 | driver->sync_obj_unref(&sync_obj); | ||
509 | } | ||
509 | schedule_delayed_work(&bdev->wq, | 510 | schedule_delayed_work(&bdev->wq, |
510 | ((HZ / 100) < 1) ? 1 : HZ / 100); | 511 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
511 | } | 512 | } |
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
822 | bool no_wait_gpu) | 823 | bool no_wait_gpu) |
823 | { | 824 | { |
824 | struct ttm_bo_device *bdev = bo->bdev; | 825 | struct ttm_bo_device *bdev = bo->bdev; |
825 | struct ttm_bo_global *glob = bdev->glob; | ||
826 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 826 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
827 | int ret; | 827 | int ret; |
828 | 828 | ||
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
832 | return ret; | 832 | return ret; |
833 | if (mem->mm_node) | 833 | if (mem->mm_node) |
834 | break; | 834 | break; |
835 | spin_lock(&glob->lru_lock); | ||
836 | if (list_empty(&man->lru)) { | ||
837 | spin_unlock(&glob->lru_lock); | ||
838 | break; | ||
839 | } | ||
840 | spin_unlock(&glob->lru_lock); | ||
841 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, | 835 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, |
842 | no_wait_reserve, no_wait_gpu); | 836 | no_wait_reserve, no_wait_gpu); |
843 | if (unlikely(ret != 0)) | 837 | if (unlikely(ret != 0)) |
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate); | |||
1125 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, | 1119 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
1126 | struct ttm_placement *placement) | 1120 | struct ttm_placement *placement) |
1127 | { | 1121 | { |
1128 | int i; | 1122 | BUG_ON((placement->fpfn || placement->lpfn) && |
1123 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); | ||
1129 | 1124 | ||
1130 | if (placement->fpfn || placement->lpfn) { | ||
1131 | if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { | ||
1132 | printk(KERN_ERR TTM_PFX "Page number range to small " | ||
1133 | "Need %lu pages, range is [%u, %u]\n", | ||
1134 | bo->mem.num_pages, placement->fpfn, | ||
1135 | placement->lpfn); | ||
1136 | return -EINVAL; | ||
1137 | } | ||
1138 | } | ||
1139 | for (i = 0; i < placement->num_placement; i++) { | ||
1140 | if (!capable(CAP_SYS_ADMIN)) { | ||
1141 | if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1142 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1143 | "modify NO_EVICT status.\n"); | ||
1144 | return -EINVAL; | ||
1145 | } | ||
1146 | } | ||
1147 | } | ||
1148 | for (i = 0; i < placement->num_busy_placement; i++) { | ||
1149 | if (!capable(CAP_SYS_ADMIN)) { | ||
1150 | if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1151 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1152 | "modify NO_EVICT status.\n"); | ||
1153 | return -EINVAL; | ||
1154 | } | ||
1155 | } | ||
1156 | } | ||
1157 | return 0; | 1125 | return 0; |
1158 | } | 1126 | } |
1159 | 1127 | ||
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1176 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1144 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1177 | if (num_pages == 0) { | 1145 | if (num_pages == 0) { |
1178 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); | 1146 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); |
1147 | if (destroy) | ||
1148 | (*destroy)(bo); | ||
1149 | else | ||
1150 | kfree(bo); | ||
1179 | return -EINVAL; | 1151 | return -EINVAL; |
1180 | } | 1152 | } |
1181 | bo->destroy = destroy; | 1153 | bo->destroy = destroy; |
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1369 | int ret = -EINVAL; | 1341 | int ret = -EINVAL; |
1370 | struct ttm_mem_type_manager *man; | 1342 | struct ttm_mem_type_manager *man; |
1371 | 1343 | ||
1372 | if (type >= TTM_NUM_MEM_TYPES) { | 1344 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1373 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); | ||
1374 | return ret; | ||
1375 | } | ||
1376 | |||
1377 | man = &bdev->man[type]; | 1345 | man = &bdev->man[type]; |
1378 | if (man->has_type) { | 1346 | BUG_ON(man->has_type); |
1379 | printk(KERN_ERR TTM_PFX | ||
1380 | "Memory manager already initialized for type %d\n", | ||
1381 | type); | ||
1382 | return ret; | ||
1383 | } | ||
1384 | 1347 | ||
1385 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1348 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1386 | if (ret) | 1349 | if (ret) |
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1389 | 1352 | ||
1390 | ret = 0; | 1353 | ret = 0; |
1391 | if (type != TTM_PL_SYSTEM) { | 1354 | if (type != TTM_PL_SYSTEM) { |
1392 | if (!p_size) { | ||
1393 | printk(KERN_ERR TTM_PFX | ||
1394 | "Zero size memory manager type %d\n", | ||
1395 | type); | ||
1396 | return ret; | ||
1397 | } | ||
1398 | |||
1399 | ret = (*man->func->init)(man, p_size); | 1355 | ret = (*man->func->init)(man, p_size); |
1400 | if (ret) | 1356 | if (ret) |
1401 | return ret; | 1357 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 7410c190c891..038e947d00f9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -31,20 +31,29 @@ | |||
31 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
32 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
33 | #include "ttm/ttm_placement.h" | 33 | #include "ttm/ttm_placement.h" |
34 | #include <linux/jiffies.h> | 34 | #include "drm_mm.h" |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/mm.h> | ||
38 | #include <linux/file.h> | ||
39 | #include <linux/module.h> | 37 | #include <linux/module.h> |
40 | 38 | ||
39 | /** | ||
40 | * Currently we use a spinlock for the lock, but a mutex *may* be | ||
41 | * more appropriate to reduce scheduling latency if the range manager | ||
42 | * ends up with very fragmented allocation patterns. | ||
43 | */ | ||
44 | |||
45 | struct ttm_range_manager { | ||
46 | struct drm_mm mm; | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
41 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | 50 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, |
42 | struct ttm_buffer_object *bo, | 51 | struct ttm_buffer_object *bo, |
43 | struct ttm_placement *placement, | 52 | struct ttm_placement *placement, |
44 | struct ttm_mem_reg *mem) | 53 | struct ttm_mem_reg *mem) |
45 | { | 54 | { |
46 | struct ttm_bo_global *glob = man->bdev->glob; | 55 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
47 | struct drm_mm *mm = man->priv; | 56 | struct drm_mm *mm = &rman->mm; |
48 | struct drm_mm_node *node = NULL; | 57 | struct drm_mm_node *node = NULL; |
49 | unsigned long lpfn; | 58 | unsigned long lpfn; |
50 | int ret; | 59 | int ret; |
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
57 | if (unlikely(ret)) | 66 | if (unlikely(ret)) |
58 | return ret; | 67 | return ret; |
59 | 68 | ||
60 | spin_lock(&glob->lru_lock); | 69 | spin_lock(&rman->lock); |
61 | node = drm_mm_search_free_in_range(mm, | 70 | node = drm_mm_search_free_in_range(mm, |
62 | mem->num_pages, mem->page_alignment, | 71 | mem->num_pages, mem->page_alignment, |
63 | placement->fpfn, lpfn, 1); | 72 | placement->fpfn, lpfn, 1); |
64 | if (unlikely(node == NULL)) { | 73 | if (unlikely(node == NULL)) { |
65 | spin_unlock(&glob->lru_lock); | 74 | spin_unlock(&rman->lock); |
66 | return 0; | 75 | return 0; |
67 | } | 76 | } |
68 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, | 77 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, |
69 | mem->page_alignment, | 78 | mem->page_alignment, |
70 | placement->fpfn, | 79 | placement->fpfn, |
71 | lpfn); | 80 | lpfn); |
72 | spin_unlock(&glob->lru_lock); | 81 | spin_unlock(&rman->lock); |
73 | } while (node == NULL); | 82 | } while (node == NULL); |
74 | 83 | ||
75 | mem->mm_node = node; | 84 | mem->mm_node = node; |
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
80 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | 89 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, |
81 | struct ttm_mem_reg *mem) | 90 | struct ttm_mem_reg *mem) |
82 | { | 91 | { |
83 | struct ttm_bo_global *glob = man->bdev->glob; | 92 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
84 | 93 | ||
85 | if (mem->mm_node) { | 94 | if (mem->mm_node) { |
86 | spin_lock(&glob->lru_lock); | 95 | spin_lock(&rman->lock); |
87 | drm_mm_put_block(mem->mm_node); | 96 | drm_mm_put_block(mem->mm_node); |
88 | spin_unlock(&glob->lru_lock); | 97 | spin_unlock(&rman->lock); |
89 | mem->mm_node = NULL; | 98 | mem->mm_node = NULL; |
90 | } | 99 | } |
91 | } | 100 | } |
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | |||
93 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, | 102 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, |
94 | unsigned long p_size) | 103 | unsigned long p_size) |
95 | { | 104 | { |
96 | struct drm_mm *mm; | 105 | struct ttm_range_manager *rman; |
97 | int ret; | 106 | int ret; |
98 | 107 | ||
99 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | 108 | rman = kzalloc(sizeof(*rman), GFP_KERNEL); |
100 | if (!mm) | 109 | if (!rman) |
101 | return -ENOMEM; | 110 | return -ENOMEM; |
102 | 111 | ||
103 | ret = drm_mm_init(mm, 0, p_size); | 112 | ret = drm_mm_init(&rman->mm, 0, p_size); |
104 | if (ret) { | 113 | if (ret) { |
105 | kfree(mm); | 114 | kfree(rman); |
106 | return ret; | 115 | return ret; |
107 | } | 116 | } |
108 | 117 | ||
109 | man->priv = mm; | 118 | spin_lock_init(&rman->lock); |
119 | man->priv = rman; | ||
110 | return 0; | 120 | return 0; |
111 | } | 121 | } |
112 | 122 | ||
113 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) | 123 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) |
114 | { | 124 | { |
115 | struct ttm_bo_global *glob = man->bdev->glob; | 125 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
116 | struct drm_mm *mm = man->priv; | 126 | struct drm_mm *mm = &rman->mm; |
117 | int ret = 0; | ||
118 | 127 | ||
119 | spin_lock(&glob->lru_lock); | 128 | spin_lock(&rman->lock); |
120 | if (drm_mm_clean(mm)) { | 129 | if (drm_mm_clean(mm)) { |
121 | drm_mm_takedown(mm); | 130 | drm_mm_takedown(mm); |
122 | kfree(mm); | 131 | spin_unlock(&rman->lock); |
132 | kfree(rman); | ||
123 | man->priv = NULL; | 133 | man->priv = NULL; |
124 | } else | 134 | return 0; |
125 | ret = -EBUSY; | 135 | } |
126 | spin_unlock(&glob->lru_lock); | 136 | spin_unlock(&rman->lock); |
127 | return ret; | 137 | return -EBUSY; |
128 | } | 138 | } |
129 | 139 | ||
130 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, | 140 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, |
131 | const char *prefix) | 141 | const char *prefix) |
132 | { | 142 | { |
133 | struct ttm_bo_global *glob = man->bdev->glob; | 143 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
134 | struct drm_mm *mm = man->priv; | ||
135 | 144 | ||
136 | spin_lock(&glob->lru_lock); | 145 | spin_lock(&rman->lock); |
137 | drm_mm_debug_table(mm, prefix); | 146 | drm_mm_debug_table(&rman->mm, prefix); |
138 | spin_unlock(&glob->lru_lock); | 147 | spin_unlock(&rman->lock); |
139 | } | 148 | } |
140 | 149 | ||
141 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { | 150 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a7bab87a548b..af789dc869b9 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
440 | return ret; | 440 | return ret; |
441 | 441 | ||
442 | ret = be->func->bind(be, bo_mem); | 442 | ret = be->func->bind(be, bo_mem); |
443 | if (ret) { | 443 | if (unlikely(ret != 0)) |
444 | printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); | ||
445 | return ret; | 444 | return ret; |
446 | } | ||
447 | 445 | ||
448 | ttm->state = tt_bound; | 446 | ttm->state = tt_bound; |
449 | 447 | ||
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 9b5b4d9dd62c..3e038a394c51 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c | |||
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |||
235 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - | 235 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - |
236 | first_pfn + 1; | 236 | first_pfn + 1; |
237 | 237 | ||
238 | if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) | 238 | vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); |
239 | if (NULL == vsg->pages) | ||
239 | return -ENOMEM; | 240 | return -ENOMEM; |
240 | memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); | ||
241 | down_read(¤t->mm->mmap_sem); | 241 | down_read(¤t->mm->mmap_sem); |
242 | ret = get_user_pages(current, current->mm, | 242 | ret = get_user_pages(current, current->mm, |
243 | (unsigned long)xfer->mem_addr, | 243 | (unsigned long)xfer->mem_addr, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 51d9f9f1d7f2..76954e3528c1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
691 | 691 | ||
692 | fence_rep.error = ret; | 692 | fence_rep.error = ret; |
693 | fence_rep.fence_seq = (uint64_t) sequence; | 693 | fence_rep.fence_seq = (uint64_t) sequence; |
694 | fence_rep.pad64 = 0; | ||
694 | 695 | ||
695 | user_fence_rep = (struct drm_vmw_fence_rep __user *) | 696 | user_fence_rep = (struct drm_vmw_fence_rep __user *) |
696 | (unsigned long)arg->fence_rep; | 697 | (unsigned long)arg->fence_rep; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 87c6e6156d7d..cceeb42789b6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
720 | &vmw_vram_ne_placement, | 720 | &vmw_vram_ne_placement, |
721 | false, &vmw_dmabuf_bo_free); | 721 | false, &vmw_dmabuf_bo_free); |
722 | vmw_overlay_resume_all(dev_priv); | 722 | vmw_overlay_resume_all(dev_priv); |
723 | if (unlikely(ret != 0)) | ||
724 | vfbs->buffer = NULL; | ||
723 | 725 | ||
724 | return ret; | 726 | return ret; |
725 | } | 727 | } |
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
730 | struct vmw_framebuffer_surface *vfbs = | 732 | struct vmw_framebuffer_surface *vfbs = |
731 | vmw_framebuffer_to_vfbs(&vfb->base); | 733 | vmw_framebuffer_to_vfbs(&vfb->base); |
732 | 734 | ||
735 | if (unlikely(vfbs->buffer == NULL)) | ||
736 | return 0; | ||
737 | |||
733 | bo = &vfbs->buffer->base; | 738 | bo = &vfbs->buffer->base; |
734 | ttm_bo_unref(&bo); | 739 | ttm_bo_unref(&bo); |
735 | vfbs->buffer = NULL; | 740 | vfbs->buffer = NULL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index a01c47ddb5bc..29113c9b26a8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
557 | return -EINVAL; | 557 | return -EINVAL; |
558 | } | 558 | } |
559 | 559 | ||
560 | dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); | 560 | dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); |
561 | 561 | ||
562 | if (!dev_priv->ldu_priv) | 562 | if (!dev_priv->ldu_priv) |
563 | return -ENOMEM; | 563 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index df2036ed18d5..f1a52f9e7298 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv) | |||
585 | return -ENOSYS; | 585 | return -ENOSYS; |
586 | } | 586 | } |
587 | 587 | ||
588 | overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); | 588 | overlay = kmalloc(sizeof(*overlay), GFP_KERNEL); |
589 | if (!overlay) | 589 | if (!overlay) |
590 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | 591 | ||