diff options
Diffstat (limited to 'drivers/gpu')
36 files changed, 403 insertions, 117 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c1981861bbbd..f87bf104df7a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, | |||
864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, | 864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, |
865 | false); | 865 | false); |
866 | mode->hdisplay = 1366; | 866 | mode->hdisplay = 1366; |
867 | mode->vsync_start = mode->vsync_start - 1; | 867 | mode->hsync_start = mode->hsync_start - 1; |
868 | mode->vsync_end = mode->vsync_end - 1; | 868 | mode->hsync_end = mode->hsync_end - 1; |
869 | return mode; | 869 | return mode; |
870 | } | 870 | } |
871 | 871 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1f2cc6b09623..719662034bbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -315,8 +315,9 @@ static void drm_fb_helper_on(struct fb_info *info) | |||
315 | struct drm_device *dev = fb_helper->dev; | 315 | struct drm_device *dev = fb_helper->dev; |
316 | struct drm_crtc *crtc; | 316 | struct drm_crtc *crtc; |
317 | struct drm_crtc_helper_funcs *crtc_funcs; | 317 | struct drm_crtc_helper_funcs *crtc_funcs; |
318 | struct drm_connector *connector; | ||
318 | struct drm_encoder *encoder; | 319 | struct drm_encoder *encoder; |
319 | int i; | 320 | int i, j; |
320 | 321 | ||
321 | /* | 322 | /* |
322 | * For each CRTC in this fb, turn the crtc on then, | 323 | * For each CRTC in this fb, turn the crtc on then, |
@@ -332,7 +333,14 @@ static void drm_fb_helper_on(struct fb_info *info) | |||
332 | 333 | ||
333 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 334 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
334 | 335 | ||
335 | 336 | /* Walk the connectors & encoders on this fb turning them on */ | |
337 | for (j = 0; j < fb_helper->connector_count; j++) { | ||
338 | connector = fb_helper->connector_info[j]->connector; | ||
339 | connector->dpms = DRM_MODE_DPMS_ON; | ||
340 | drm_connector_property_set_value(connector, | ||
341 | dev->mode_config.dpms_property, | ||
342 | DRM_MODE_DPMS_ON); | ||
343 | } | ||
336 | /* Found a CRTC on this fb, now find encoders */ | 344 | /* Found a CRTC on this fb, now find encoders */ |
337 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 345 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
338 | if (encoder->crtc == crtc) { | 346 | if (encoder->crtc == crtc) { |
@@ -352,8 +360,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
352 | struct drm_device *dev = fb_helper->dev; | 360 | struct drm_device *dev = fb_helper->dev; |
353 | struct drm_crtc *crtc; | 361 | struct drm_crtc *crtc; |
354 | struct drm_crtc_helper_funcs *crtc_funcs; | 362 | struct drm_crtc_helper_funcs *crtc_funcs; |
363 | struct drm_connector *connector; | ||
355 | struct drm_encoder *encoder; | 364 | struct drm_encoder *encoder; |
356 | int i; | 365 | int i, j; |
357 | 366 | ||
358 | /* | 367 | /* |
359 | * For each CRTC in this fb, find all associated encoders | 368 | * For each CRTC in this fb, find all associated encoders |
@@ -367,6 +376,14 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
367 | if (!crtc->enabled) | 376 | if (!crtc->enabled) |
368 | continue; | 377 | continue; |
369 | 378 | ||
379 | /* Walk the connectors on this fb and mark them off */ | ||
380 | for (j = 0; j < fb_helper->connector_count; j++) { | ||
381 | connector = fb_helper->connector_info[j]->connector; | ||
382 | connector->dpms = dpms_mode; | ||
383 | drm_connector_property_set_value(connector, | ||
384 | dev->mode_config.dpms_property, | ||
385 | dpms_mode); | ||
386 | } | ||
370 | /* Found a CRTC on this fb, now find encoders */ | 387 | /* Found a CRTC on this fb, now find encoders */ |
371 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 388 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
372 | if (encoder->crtc == crtc) { | 389 | if (encoder->crtc == crtc) { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aee83fa178f6..9214119c0154 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
605 | case FBC_NOT_TILED: | 605 | case FBC_NOT_TILED: |
606 | seq_printf(m, "scanout buffer not tiled"); | 606 | seq_printf(m, "scanout buffer not tiled"); |
607 | break; | 607 | break; |
608 | case FBC_MULTIPLE_PIPES: | ||
609 | seq_printf(m, "multiple pipes are enabled"); | ||
610 | break; | ||
608 | default: | 611 | default: |
609 | seq_printf(m, "unknown reason"); | 612 | seq_printf(m, "unknown reason"); |
610 | } | 613 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f00c5ae9556c..2305a1234f1e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1300,7 +1300,7 @@ static void i915_cleanup_compression(struct drm_device *dev) | |||
1300 | struct drm_i915_private *dev_priv = dev->dev_private; | 1300 | struct drm_i915_private *dev_priv = dev->dev_private; |
1301 | 1301 | ||
1302 | drm_mm_put_block(dev_priv->compressed_fb); | 1302 | drm_mm_put_block(dev_priv->compressed_fb); |
1303 | if (!IS_GM45(dev)) | 1303 | if (dev_priv->compressed_llb) |
1304 | drm_mm_put_block(dev_priv->compressed_llb); | 1304 | drm_mm_put_block(dev_priv->compressed_llb); |
1305 | } | 1305 | } |
1306 | 1306 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d147ab2f5bfc..2e1744d37ad5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -215,6 +215,7 @@ enum no_fbc_reason { | |||
215 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ | 215 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
216 | FBC_BAD_PLANE, /* fbc not supported on plane */ | 216 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
217 | FBC_NOT_TILED, /* buffer not tiled */ | 217 | FBC_NOT_TILED, /* buffer not tiled */ |
218 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ | ||
218 | }; | 219 | }; |
219 | 220 | ||
220 | enum intel_pch { | 221 | enum intel_pch { |
@@ -222,6 +223,8 @@ enum intel_pch { | |||
222 | PCH_CPT, /* Cougarpoint PCH */ | 223 | PCH_CPT, /* Cougarpoint PCH */ |
223 | }; | 224 | }; |
224 | 225 | ||
226 | #define QUIRK_PIPEA_FORCE (1<<0) | ||
227 | |||
225 | struct intel_fbdev; | 228 | struct intel_fbdev; |
226 | 229 | ||
227 | typedef struct drm_i915_private { | 230 | typedef struct drm_i915_private { |
@@ -337,6 +340,8 @@ typedef struct drm_i915_private { | |||
337 | /* PCH chipset type */ | 340 | /* PCH chipset type */ |
338 | enum intel_pch pch_type; | 341 | enum intel_pch pch_type; |
339 | 342 | ||
343 | unsigned long quirks; | ||
344 | |||
340 | /* Register state */ | 345 | /* Register state */ |
341 | bool modeset_on_lid; | 346 | bool modeset_on_lid; |
342 | u8 saveLBB; | 347 | u8 saveLBB; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 074385882ccf..5aa747fc25a9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
2241 | page = read_cache_page_gfp(mapping, i, | 2241 | page = read_cache_page_gfp(mapping, i, |
2242 | GFP_HIGHUSER | | 2242 | GFP_HIGHUSER | |
2243 | __GFP_COLD | | 2243 | __GFP_COLD | |
2244 | __GFP_RECLAIMABLE | | ||
2244 | gfpmask); | 2245 | gfpmask); |
2245 | if (IS_ERR(page)) | 2246 | if (IS_ERR(page)) |
2246 | goto err_pages; | 2247 | goto err_pages; |
@@ -3646,6 +3647,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3646 | return ret; | 3647 | return ret; |
3647 | } | 3648 | } |
3648 | 3649 | ||
3650 | |||
3649 | int | 3651 | int |
3650 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3652 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3651 | struct drm_file *file_priv, | 3653 | struct drm_file *file_priv, |
@@ -3793,7 +3795,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3793 | unsigned long long total_size = 0; | 3795 | unsigned long long total_size = 0; |
3794 | int num_fences = 0; | 3796 | int num_fences = 0; |
3795 | for (i = 0; i < args->buffer_count; i++) { | 3797 | for (i = 0; i < args->buffer_count; i++) { |
3796 | obj_priv = object_list[i]->driver_private; | 3798 | obj_priv = to_intel_bo(object_list[i]); |
3797 | 3799 | ||
3798 | total_size += object_list[i]->size; | 3800 | total_size += object_list[i]->size; |
3799 | num_fences += | 3801 | num_fences += |
@@ -4741,6 +4743,16 @@ i915_gem_load(struct drm_device *dev) | |||
4741 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | 4743 | list_add(&dev_priv->mm.shrink_list, &shrink_list); |
4742 | spin_unlock(&shrink_list_lock); | 4744 | spin_unlock(&shrink_list_lock); |
4743 | 4745 | ||
4746 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | ||
4747 | if (IS_GEN3(dev)) { | ||
4748 | u32 tmp = I915_READ(MI_ARB_STATE); | ||
4749 | if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { | ||
4750 | /* arb state is a masked write, so set bit + bit in mask */ | ||
4751 | tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); | ||
4752 | I915_WRITE(MI_ARB_STATE, tmp); | ||
4753 | } | ||
4754 | } | ||
4755 | |||
4744 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4756 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4745 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4757 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4746 | dev_priv->fence_reg_start = 3; | 4758 | dev_priv->fence_reg_start = 3; |
@@ -4977,7 +4989,7 @@ i915_gpu_is_active(struct drm_device *dev) | |||
4977 | } | 4989 | } |
4978 | 4990 | ||
4979 | static int | 4991 | static int |
4980 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | 4992 | i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
4981 | { | 4993 | { |
4982 | drm_i915_private_t *dev_priv, *next_dev; | 4994 | drm_i915_private_t *dev_priv, *next_dev; |
4983 | struct drm_i915_gem_object *obj_priv, *next_obj; | 4995 | struct drm_i915_gem_object *obj_priv, *next_obj; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 150400f40534..cf41c672defe 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -359,6 +359,70 @@ | |||
359 | #define LM_BURST_LENGTH 0x00000700 | 359 | #define LM_BURST_LENGTH 0x00000700 |
360 | #define LM_FIFO_WATERMARK 0x0000001F | 360 | #define LM_FIFO_WATERMARK 0x0000001F |
361 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ | 361 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ |
362 | #define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ | ||
363 | |||
364 | /* Make render/texture TLB fetches lower priorty than associated data | ||
365 | * fetches. This is not turned on by default | ||
366 | */ | ||
367 | #define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) | ||
368 | |||
369 | /* Isoch request wait on GTT enable (Display A/B/C streams). | ||
370 | * Make isoch requests stall on the TLB update. May cause | ||
371 | * display underruns (test mode only) | ||
372 | */ | ||
373 | #define MI_ARB_ISOCH_WAIT_GTT (1 << 14) | ||
374 | |||
375 | /* Block grant count for isoch requests when block count is | ||
376 | * set to a finite value. | ||
377 | */ | ||
378 | #define MI_ARB_BLOCK_GRANT_MASK (3 << 12) | ||
379 | #define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ | ||
380 | #define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ | ||
381 | #define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ | ||
382 | #define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ | ||
383 | |||
384 | /* Enable render writes to complete in C2/C3/C4 power states. | ||
385 | * If this isn't enabled, render writes are prevented in low | ||
386 | * power states. That seems bad to me. | ||
387 | */ | ||
388 | #define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) | ||
389 | |||
390 | /* This acknowledges an async flip immediately instead | ||
391 | * of waiting for 2TLB fetches. | ||
392 | */ | ||
393 | #define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) | ||
394 | |||
395 | /* Enables non-sequential data reads through arbiter | ||
396 | */ | ||
397 | #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) | ||
398 | |||
399 | /* Disable FSB snooping of cacheable write cycles from binner/render | ||
400 | * command stream | ||
401 | */ | ||
402 | #define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) | ||
403 | |||
404 | /* Arbiter time slice for non-isoch streams */ | ||
405 | #define MI_ARB_TIME_SLICE_MASK (7 << 5) | ||
406 | #define MI_ARB_TIME_SLICE_1 (0 << 5) | ||
407 | #define MI_ARB_TIME_SLICE_2 (1 << 5) | ||
408 | #define MI_ARB_TIME_SLICE_4 (2 << 5) | ||
409 | #define MI_ARB_TIME_SLICE_6 (3 << 5) | ||
410 | #define MI_ARB_TIME_SLICE_8 (4 << 5) | ||
411 | #define MI_ARB_TIME_SLICE_10 (5 << 5) | ||
412 | #define MI_ARB_TIME_SLICE_14 (6 << 5) | ||
413 | #define MI_ARB_TIME_SLICE_16 (7 << 5) | ||
414 | |||
415 | /* Low priority grace period page size */ | ||
416 | #define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ | ||
417 | #define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) | ||
418 | |||
419 | /* Disable display A/B trickle feed */ | ||
420 | #define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) | ||
421 | |||
422 | /* Set display plane priority */ | ||
423 | #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ | ||
424 | #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ | ||
425 | |||
362 | #define CACHE_MODE_0 0x02120 /* 915+ only */ | 426 | #define CACHE_MODE_0 0x02120 /* 915+ only */ |
363 | #define CM0_MASK_SHIFT 16 | 427 | #define CM0_MASK_SHIFT 16 |
364 | #define CM0_IZ_OPT_DISABLE (1<<6) | 428 | #define CM0_IZ_OPT_DISABLE (1<<6) |
@@ -2805,6 +2869,7 @@ | |||
2805 | 2869 | ||
2806 | #define PCH_PP_STATUS 0xc7200 | 2870 | #define PCH_PP_STATUS 0xc7200 |
2807 | #define PCH_PP_CONTROL 0xc7204 | 2871 | #define PCH_PP_CONTROL 0xc7204 |
2872 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | ||
2808 | #define EDP_FORCE_VDD (1 << 3) | 2873 | #define EDP_FORCE_VDD (1 << 3) |
2809 | #define EDP_BLC_ENABLE (1 << 2) | 2874 | #define EDP_BLC_ENABLE (1 << 2) |
2810 | #define PANEL_POWER_RESET (1 << 1) | 2875 | #define PANEL_POWER_RESET (1 << 1) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 68dcf36e2793..5e21b3119824 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -862,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
862 | intel_clock_t clock; | 862 | intel_clock_t clock; |
863 | int max_n; | 863 | int max_n; |
864 | bool found; | 864 | bool found; |
865 | /* approximately equals target * 0.00488 */ | 865 | /* approximately equals target * 0.00585 */ |
866 | int err_most = (target >> 8) + (target >> 10); | 866 | int err_most = (target >> 8) + (target >> 9); |
867 | found = false; | 867 | found = false; |
868 | 868 | ||
869 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 869 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
@@ -1180,8 +1180,12 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1180 | struct drm_framebuffer *fb = crtc->fb; | 1180 | struct drm_framebuffer *fb = crtc->fb; |
1181 | struct intel_framebuffer *intel_fb; | 1181 | struct intel_framebuffer *intel_fb; |
1182 | struct drm_i915_gem_object *obj_priv; | 1182 | struct drm_i915_gem_object *obj_priv; |
1183 | struct drm_crtc *tmp_crtc; | ||
1183 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1184 | int plane = intel_crtc->plane; | 1185 | int plane = intel_crtc->plane; |
1186 | int crtcs_enabled = 0; | ||
1187 | |||
1188 | DRM_DEBUG_KMS("\n"); | ||
1185 | 1189 | ||
1186 | if (!i915_powersave) | 1190 | if (!i915_powersave) |
1187 | return; | 1191 | return; |
@@ -1199,10 +1203,21 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1199 | * If FBC is already on, we just have to verify that we can | 1203 | * If FBC is already on, we just have to verify that we can |
1200 | * keep it that way... | 1204 | * keep it that way... |
1201 | * Need to disable if: | 1205 | * Need to disable if: |
1206 | * - more than one pipe is active | ||
1202 | * - changing FBC params (stride, fence, mode) | 1207 | * - changing FBC params (stride, fence, mode) |
1203 | * - new fb is too large to fit in compressed buffer | 1208 | * - new fb is too large to fit in compressed buffer |
1204 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1209 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1205 | */ | 1210 | */ |
1211 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | ||
1212 | if (tmp_crtc->enabled) | ||
1213 | crtcs_enabled++; | ||
1214 | } | ||
1215 | DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); | ||
1216 | if (crtcs_enabled > 1) { | ||
1217 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
1218 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||
1219 | goto out_disable; | ||
1220 | } | ||
1206 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1221 | if (intel_fb->obj->size > dev_priv->cfb_size) { |
1207 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1222 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1208 | "compression\n"); | 1223 | "compression\n"); |
@@ -1255,7 +1270,7 @@ out_disable: | |||
1255 | } | 1270 | } |
1256 | } | 1271 | } |
1257 | 1272 | ||
1258 | static int | 1273 | int |
1259 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1274 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
1260 | { | 1275 | { |
1261 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1276 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
@@ -2255,6 +2270,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2255 | intel_wait_for_vblank(dev); | 2270 | intel_wait_for_vblank(dev); |
2256 | } | 2271 | } |
2257 | 2272 | ||
2273 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
2274 | if (pipeconf_reg == PIPEACONF && | ||
2275 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
2276 | goto skip_pipe_off; | ||
2277 | |||
2258 | /* Next, disable display pipes */ | 2278 | /* Next, disable display pipes */ |
2259 | temp = I915_READ(pipeconf_reg); | 2279 | temp = I915_READ(pipeconf_reg); |
2260 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2280 | if ((temp & PIPEACONF_ENABLE) != 0) { |
@@ -2270,7 +2290,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2270 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2290 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); |
2271 | I915_READ(dpll_reg); | 2291 | I915_READ(dpll_reg); |
2272 | } | 2292 | } |
2273 | 2293 | skip_pipe_off: | |
2274 | /* Wait for the clocks to turn off. */ | 2294 | /* Wait for the clocks to turn off. */ |
2275 | udelay(150); | 2295 | udelay(150); |
2276 | break; | 2296 | break; |
@@ -2356,8 +2376,6 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
2356 | if (mode->clock * 3 > 27000 * 4) | 2376 | if (mode->clock * 3 > 27000 * 4) |
2357 | return MODE_CLOCK_HIGH; | 2377 | return MODE_CLOCK_HIGH; |
2358 | } | 2378 | } |
2359 | |||
2360 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
2361 | return true; | 2379 | return true; |
2362 | } | 2380 | } |
2363 | 2381 | ||
@@ -3736,6 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3736 | if (dev_priv->lvds_dither) { | 3754 | if (dev_priv->lvds_dither) { |
3737 | if (HAS_PCH_SPLIT(dev)) { | 3755 | if (HAS_PCH_SPLIT(dev)) { |
3738 | pipeconf |= PIPE_ENABLE_DITHER; | 3756 | pipeconf |= PIPE_ENABLE_DITHER; |
3757 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
3739 | pipeconf |= PIPE_DITHER_TYPE_ST01; | 3758 | pipeconf |= PIPE_DITHER_TYPE_ST01; |
3740 | } else | 3759 | } else |
3741 | lvds |= LVDS_ENABLE_DITHER; | 3760 | lvds |= LVDS_ENABLE_DITHER; |
@@ -4412,7 +4431,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4412 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 4431 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
4413 | 4432 | ||
4414 | /* Unlock panel regs */ | 4433 | /* Unlock panel regs */ |
4415 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4434 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
4435 | PANEL_UNLOCK_REGS); | ||
4416 | 4436 | ||
4417 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 4437 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4418 | I915_WRITE(dpll_reg, dpll); | 4438 | I915_WRITE(dpll_reg, dpll); |
@@ -4455,7 +4475,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4455 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | 4475 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
4456 | 4476 | ||
4457 | /* Unlock panel regs */ | 4477 | /* Unlock panel regs */ |
4458 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4478 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
4479 | PANEL_UNLOCK_REGS); | ||
4459 | 4480 | ||
4460 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 4481 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
4461 | I915_WRITE(dpll_reg, dpll); | 4482 | I915_WRITE(dpll_reg, dpll); |
@@ -4695,7 +4716,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4695 | struct drm_gem_object *obj; | 4716 | struct drm_gem_object *obj; |
4696 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4697 | struct intel_unpin_work *work; | 4718 | struct intel_unpin_work *work; |
4698 | unsigned long flags; | 4719 | unsigned long flags, offset; |
4699 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4720 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
4700 | int ret, pipesrc; | 4721 | int ret, pipesrc; |
4701 | u32 flip_mask; | 4722 | u32 flip_mask; |
@@ -4762,19 +4783,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4762 | while (I915_READ(ISR) & flip_mask) | 4783 | while (I915_READ(ISR) & flip_mask) |
4763 | ; | 4784 | ; |
4764 | 4785 | ||
4786 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
4787 | offset = obj_priv->gtt_offset; | ||
4788 | offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); | ||
4789 | |||
4765 | BEGIN_LP_RING(4); | 4790 | BEGIN_LP_RING(4); |
4766 | if (IS_I965G(dev)) { | 4791 | if (IS_I965G(dev)) { |
4767 | OUT_RING(MI_DISPLAY_FLIP | | 4792 | OUT_RING(MI_DISPLAY_FLIP | |
4768 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 4793 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
4769 | OUT_RING(fb->pitch); | 4794 | OUT_RING(fb->pitch); |
4770 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 4795 | OUT_RING(offset | obj_priv->tiling_mode); |
4771 | pipesrc = I915_READ(pipesrc_reg); | 4796 | pipesrc = I915_READ(pipesrc_reg); |
4772 | OUT_RING(pipesrc & 0x0fff0fff); | 4797 | OUT_RING(pipesrc & 0x0fff0fff); |
4773 | } else { | 4798 | } else { |
4774 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 4799 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
4775 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 4800 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
4776 | OUT_RING(fb->pitch); | 4801 | OUT_RING(fb->pitch); |
4777 | OUT_RING(obj_priv->gtt_offset); | 4802 | OUT_RING(offset); |
4778 | OUT_RING(MI_NOOP); | 4803 | OUT_RING(MI_NOOP); |
4779 | } | 4804 | } |
4780 | ADVANCE_LP_RING(); | 4805 | ADVANCE_LP_RING(); |
@@ -5506,6 +5531,66 @@ static void intel_init_display(struct drm_device *dev) | |||
5506 | } | 5531 | } |
5507 | } | 5532 | } |
5508 | 5533 | ||
5534 | /* | ||
5535 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, | ||
5536 | * resume, or other times. This quirk makes sure that's the case for | ||
5537 | * affected systems. | ||
5538 | */ | ||
5539 | static void quirk_pipea_force (struct drm_device *dev) | ||
5540 | { | ||
5541 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5542 | |||
5543 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; | ||
5544 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | ||
5545 | } | ||
5546 | |||
5547 | struct intel_quirk { | ||
5548 | int device; | ||
5549 | int subsystem_vendor; | ||
5550 | int subsystem_device; | ||
5551 | void (*hook)(struct drm_device *dev); | ||
5552 | }; | ||
5553 | |||
5554 | struct intel_quirk intel_quirks[] = { | ||
5555 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | ||
5556 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | ||
5557 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | ||
5558 | { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, | ||
5559 | |||
5560 | /* Thinkpad R31 needs pipe A force quirk */ | ||
5561 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | ||
5562 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | ||
5563 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | ||
5564 | |||
5565 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ | ||
5566 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, | ||
5567 | /* ThinkPad X40 needs pipe A force quirk */ | ||
5568 | |||
5569 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | ||
5570 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | ||
5571 | |||
5572 | /* 855 & before need to leave pipe A & dpll A up */ | ||
5573 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
5574 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
5575 | }; | ||
5576 | |||
5577 | static void intel_init_quirks(struct drm_device *dev) | ||
5578 | { | ||
5579 | struct pci_dev *d = dev->pdev; | ||
5580 | int i; | ||
5581 | |||
5582 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | ||
5583 | struct intel_quirk *q = &intel_quirks[i]; | ||
5584 | |||
5585 | if (d->device == q->device && | ||
5586 | (d->subsystem_vendor == q->subsystem_vendor || | ||
5587 | q->subsystem_vendor == PCI_ANY_ID) && | ||
5588 | (d->subsystem_device == q->subsystem_device || | ||
5589 | q->subsystem_device == PCI_ANY_ID)) | ||
5590 | q->hook(dev); | ||
5591 | } | ||
5592 | } | ||
5593 | |||
5509 | void intel_modeset_init(struct drm_device *dev) | 5594 | void intel_modeset_init(struct drm_device *dev) |
5510 | { | 5595 | { |
5511 | struct drm_i915_private *dev_priv = dev->dev_private; | 5596 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -5518,6 +5603,8 @@ void intel_modeset_init(struct drm_device *dev) | |||
5518 | 5603 | ||
5519 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 5604 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
5520 | 5605 | ||
5606 | intel_init_quirks(dev); | ||
5607 | |||
5521 | intel_init_display(dev); | 5608 | intel_init_display(dev); |
5522 | 5609 | ||
5523 | if (IS_I965G(dev)) { | 5610 | if (IS_I965G(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1aac59e83bff..5dde80f9e652 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -717,6 +717,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
717 | } | 717 | } |
718 | } | 718 | } |
719 | 719 | ||
720 | static void ironlake_edp_panel_on (struct drm_device *dev) | ||
721 | { | ||
722 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
723 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
724 | u32 pp, pp_status; | ||
725 | |||
726 | pp_status = I915_READ(PCH_PP_STATUS); | ||
727 | if (pp_status & PP_ON) | ||
728 | return; | ||
729 | |||
730 | pp = I915_READ(PCH_PP_CONTROL); | ||
731 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | ||
732 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
733 | do { | ||
734 | pp_status = I915_READ(PCH_PP_STATUS); | ||
735 | } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); | ||
736 | |||
737 | if (time_after(jiffies, timeout)) | ||
738 | DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); | ||
739 | |||
740 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); | ||
741 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
742 | } | ||
743 | |||
744 | static void ironlake_edp_panel_off (struct drm_device *dev) | ||
745 | { | ||
746 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
747 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
748 | u32 pp, pp_status; | ||
749 | |||
750 | pp = I915_READ(PCH_PP_CONTROL); | ||
751 | pp &= ~POWER_TARGET_ON; | ||
752 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
753 | do { | ||
754 | pp_status = I915_READ(PCH_PP_STATUS); | ||
755 | } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); | ||
756 | |||
757 | if (time_after(jiffies, timeout)) | ||
758 | DRM_DEBUG_KMS("panel off wait timed out\n"); | ||
759 | |||
760 | /* Make sure VDD is enabled so DP AUX will work */ | ||
761 | pp |= EDP_FORCE_VDD; | ||
762 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
763 | } | ||
764 | |||
720 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 765 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
721 | { | 766 | { |
722 | struct drm_i915_private *dev_priv = dev->dev_private; | 767 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -751,14 +796,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
751 | if (mode != DRM_MODE_DPMS_ON) { | 796 | if (mode != DRM_MODE_DPMS_ON) { |
752 | if (dp_reg & DP_PORT_EN) { | 797 | if (dp_reg & DP_PORT_EN) { |
753 | intel_dp_link_down(intel_encoder, dp_priv->DP); | 798 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
754 | if (IS_eDP(intel_encoder)) | 799 | if (IS_eDP(intel_encoder)) { |
755 | ironlake_edp_backlight_off(dev); | 800 | ironlake_edp_backlight_off(dev); |
801 | ironlake_edp_panel_off(dev); | ||
802 | } | ||
756 | } | 803 | } |
757 | } else { | 804 | } else { |
758 | if (!(dp_reg & DP_PORT_EN)) { | 805 | if (!(dp_reg & DP_PORT_EN)) { |
759 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); | 806 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
760 | if (IS_eDP(intel_encoder)) | 807 | if (IS_eDP(intel_encoder)) { |
808 | ironlake_edp_panel_on(dev); | ||
761 | ironlake_edp_backlight_on(dev); | 809 | ironlake_edp_backlight_on(dev); |
810 | } | ||
762 | } | 811 | } |
763 | } | 812 | } |
764 | dp_priv->dpms_mode = mode; | 813 | dp_priv->dpms_mode = mode; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 72206f37c4fb..2f7970be9051 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -215,6 +215,9 @@ extern void intel_init_clock_gating(struct drm_device *dev); | |||
215 | extern void ironlake_enable_drps(struct drm_device *dev); | 215 | extern void ironlake_enable_drps(struct drm_device *dev); |
216 | extern void ironlake_disable_drps(struct drm_device *dev); | 216 | extern void ironlake_disable_drps(struct drm_device *dev); |
217 | 217 | ||
218 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | ||
219 | struct drm_gem_object *obj); | ||
220 | |||
218 | extern int intel_framebuffer_init(struct drm_device *dev, | 221 | extern int intel_framebuffer_init(struct drm_device *dev, |
219 | struct intel_framebuffer *ifb, | 222 | struct intel_framebuffer *ifb, |
220 | struct drm_mode_fb_cmd *mode_cmd, | 223 | struct drm_mode_fb_cmd *mode_cmd, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index c3c505244e07..3e18c9e7729b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
98 | 98 | ||
99 | mutex_lock(&dev->struct_mutex); | 99 | mutex_lock(&dev->struct_mutex); |
100 | 100 | ||
101 | ret = i915_gem_object_pin(fbo, 64*1024); | 101 | ret = intel_pin_and_fence_fb_obj(dev, fbo); |
102 | if (ret) { | 102 | if (ret) { |
103 | DRM_ERROR("failed to pin fb: %d\n", ret); | 103 | DRM_ERROR("failed to pin fb: %d\n", ret); |
104 | goto out_unref; | 104 | goto out_unref; |
@@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
236 | 236 | ||
237 | drm_framebuffer_cleanup(&ifb->base); | 237 | drm_framebuffer_cleanup(&ifb->base); |
238 | if (ifb->obj) | 238 | if (ifb->obj) |
239 | drm_gem_object_unreference_unlocked(ifb->obj); | 239 | drm_gem_object_unreference(ifb->obj); |
240 | 240 | ||
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 31df55f0a0a7..0eab8df5bf7e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -599,6 +599,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
601 | 601 | ||
602 | static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) | ||
603 | { | ||
604 | DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); | ||
605 | return 1; | ||
606 | } | ||
607 | |||
608 | /* The GPU hangs up on these systems if modeset is performed on LID open */ | ||
609 | static const struct dmi_system_id intel_no_modeset_on_lid[] = { | ||
610 | { | ||
611 | .callback = intel_no_modeset_on_lid_dmi_callback, | ||
612 | .ident = "Toshiba Tecra A11", | ||
613 | .matches = { | ||
614 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
615 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), | ||
616 | }, | ||
617 | }, | ||
618 | |||
619 | { } /* terminating entry */ | ||
620 | }; | ||
621 | |||
602 | /* | 622 | /* |
603 | * Lid events. Note the use of 'modeset_on_lid': | 623 | * Lid events. Note the use of 'modeset_on_lid': |
604 | * - we set it on lid close, and reset it on open | 624 | * - we set it on lid close, and reset it on open |
@@ -622,6 +642,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
622 | */ | 642 | */ |
623 | if (connector) | 643 | if (connector) |
624 | connector->status = connector->funcs->detect(connector); | 644 | connector->status = connector->funcs->detect(connector); |
645 | /* Don't force modeset on machines where it causes a GPU lockup */ | ||
646 | if (dmi_check_system(intel_no_modeset_on_lid)) | ||
647 | return NOTIFY_OK; | ||
625 | if (!acpi_lid_open()) { | 648 | if (!acpi_lid_open()) { |
626 | dev_priv->modeset_on_lid = 1; | 649 | dev_priv->modeset_on_lid = 1; |
627 | return NOTIFY_OK; | 650 | return NOTIFY_OK; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index fc924b649195..e492919faf44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -203,36 +203,26 @@ struct methods { | |||
203 | const bool rw; | 203 | const bool rw; |
204 | }; | 204 | }; |
205 | 205 | ||
206 | static struct methods nv04_methods[] = { | 206 | static struct methods shadow_methods[] = { |
207 | { "PROM", load_vbios_prom, false }, | ||
208 | { "PRAMIN", load_vbios_pramin, true }, | ||
209 | { "PCIROM", load_vbios_pci, true }, | ||
210 | }; | ||
211 | |||
212 | static struct methods nv50_methods[] = { | ||
213 | { "ACPI", load_vbios_acpi, true }, | ||
214 | { "PRAMIN", load_vbios_pramin, true }, | 207 | { "PRAMIN", load_vbios_pramin, true }, |
215 | { "PROM", load_vbios_prom, false }, | 208 | { "PROM", load_vbios_prom, false }, |
216 | { "PCIROM", load_vbios_pci, true }, | 209 | { "PCIROM", load_vbios_pci, true }, |
210 | { "ACPI", load_vbios_acpi, true }, | ||
217 | }; | 211 | }; |
218 | 212 | ||
219 | #define METHODCNT 3 | ||
220 | |||
221 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | 213 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) |
222 | { | 214 | { |
223 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 215 | const int nr_methods = ARRAY_SIZE(shadow_methods); |
224 | struct methods *methods; | 216 | struct methods *methods = shadow_methods; |
225 | int i; | ||
226 | int testscore = 3; | 217 | int testscore = 3; |
227 | int scores[METHODCNT]; | 218 | int scores[nr_methods], i; |
228 | 219 | ||
229 | if (nouveau_vbios) { | 220 | if (nouveau_vbios) { |
230 | methods = nv04_methods; | 221 | for (i = 0; i < nr_methods; i++) |
231 | for (i = 0; i < METHODCNT; i++) | ||
232 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) | 222 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) |
233 | break; | 223 | break; |
234 | 224 | ||
235 | if (i < METHODCNT) { | 225 | if (i < nr_methods) { |
236 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", | 226 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", |
237 | methods[i].desc); | 227 | methods[i].desc); |
238 | 228 | ||
@@ -244,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
244 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); | 234 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); |
245 | } | 235 | } |
246 | 236 | ||
247 | if (dev_priv->card_type < NV_50) | 237 | for (i = 0; i < nr_methods; i++) { |
248 | methods = nv04_methods; | ||
249 | else | ||
250 | methods = nv50_methods; | ||
251 | |||
252 | for (i = 0; i < METHODCNT; i++) { | ||
253 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", | 238 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", |
254 | methods[i].desc); | 239 | methods[i].desc); |
255 | data[0] = data[1] = 0; /* avoid reuse of previous image */ | 240 | data[0] = data[1] = 0; /* avoid reuse of previous image */ |
@@ -260,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
260 | } | 245 | } |
261 | 246 | ||
262 | while (--testscore > 0) { | 247 | while (--testscore > 0) { |
263 | for (i = 0; i < METHODCNT; i++) { | 248 | for (i = 0; i < nr_methods; i++) { |
264 | if (scores[i] == testscore) { | 249 | if (scores[i] == testscore) { |
265 | NV_TRACE(dev, "Using BIOS image from %s\n", | 250 | NV_TRACE(dev, "Using BIOS image from %s\n", |
266 | methods[i].desc); | 251 | methods[i].desc); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index c9a4a0d2a115..257ea130ae13 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -387,7 +387,8 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
387 | dev_priv->nfbdev = nfbdev; | 387 | dev_priv->nfbdev = nfbdev; |
388 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; | 388 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; |
389 | 389 | ||
390 | ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); | 390 | ret = drm_fb_helper_init(dev, &nfbdev->helper, |
391 | nv_two_heads(dev) ? 2 : 1, 4); | ||
391 | if (ret) { | 392 | if (ret) { |
392 | kfree(nfbdev); | 393 | kfree(nfbdev); |
393 | return ret; | 394 | return ret; |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 010963d4570f..345a75a03c96 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -333,7 +333,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
333 | header = radeon_get_ib_value(p, h_idx); | 333 | header = radeon_get_ib_value(p, h_idx); |
334 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 334 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
335 | reg = CP_PACKET0_GET_REG(header); | 335 | reg = CP_PACKET0_GET_REG(header); |
336 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
337 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 336 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
338 | if (!obj) { | 337 | if (!obj) { |
339 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 338 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
@@ -368,7 +367,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
368 | } | 367 | } |
369 | } | 368 | } |
370 | out: | 369 | out: |
371 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
372 | return r; | 370 | return r; |
373 | } | 371 | } |
374 | 372 | ||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 3970e62eaab8..a89a15ab524d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1230,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1230 | header = radeon_get_ib_value(p, h_idx); | 1230 | header = radeon_get_ib_value(p, h_idx); |
1231 | crtc_id = radeon_get_ib_value(p, h_idx + 5); | 1231 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
1232 | reg = CP_PACKET0_GET_REG(header); | 1232 | reg = CP_PACKET0_GET_REG(header); |
1233 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
1234 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 1233 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
1235 | if (!obj) { | 1234 | if (!obj) { |
1236 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 1235 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
@@ -1264,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1264 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1263 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
1265 | } | 1264 | } |
1266 | out: | 1265 | out: |
1267 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
1268 | return r; | 1266 | return r; |
1269 | } | 1267 | } |
1270 | 1268 | ||
@@ -2354,6 +2352,7 @@ void r100_mc_init(struct radeon_device *rdev) | |||
2354 | if (rdev->flags & RADEON_IS_IGP) | 2352 | if (rdev->flags & RADEON_IS_IGP) |
2355 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 2353 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
2356 | radeon_vram_location(rdev, &rdev->mc, base); | 2354 | radeon_vram_location(rdev, &rdev->mc, base); |
2355 | rdev->mc.gtt_base_align = 0; | ||
2357 | if (!(rdev->flags & RADEON_IS_AGP)) | 2356 | if (!(rdev->flags & RADEON_IS_AGP)) |
2358 | radeon_gtt_location(rdev, &rdev->mc); | 2357 | radeon_gtt_location(rdev, &rdev->mc); |
2359 | radeon_update_bandwidth_info(rdev); | 2358 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7e81db5eb804..19a7ef7ee344 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -481,6 +481,7 @@ void r300_mc_init(struct radeon_device *rdev) | |||
481 | if (rdev->flags & RADEON_IS_IGP) | 481 | if (rdev->flags & RADEON_IS_IGP) |
482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
483 | radeon_vram_location(rdev, &rdev->mc, base); | 483 | radeon_vram_location(rdev, &rdev->mc, base); |
484 | rdev->mc.gtt_base_align = 0; | ||
484 | if (!(rdev->flags & RADEON_IS_AGP)) | 485 | if (!(rdev->flags & RADEON_IS_AGP)) |
485 | radeon_gtt_location(rdev, &rdev->mc); | 486 | radeon_gtt_location(rdev, &rdev->mc); |
486 | radeon_update_bandwidth_info(rdev); | 487 | radeon_update_bandwidth_info(rdev); |
@@ -1176,6 +1177,8 @@ int r300_cs_parse(struct radeon_cs_parser *p) | |||
1176 | int r; | 1177 | int r; |
1177 | 1178 | ||
1178 | track = kzalloc(sizeof(*track), GFP_KERNEL); | 1179 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
1180 | if (track == NULL) | ||
1181 | return -ENOMEM; | ||
1179 | r100_cs_track_clear(p->rdev, track); | 1182 | r100_cs_track_clear(p->rdev, track); |
1180 | p->track = track; | 1183 | p->track = track; |
1181 | do { | 1184 | do { |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 34330df28483..694af7cc23ac 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -125,6 +125,7 @@ void r520_mc_init(struct radeon_device *rdev) | |||
125 | r520_vram_get_type(rdev); | 125 | r520_vram_get_type(rdev); |
126 | r100_vram_init_sizes(rdev); | 126 | r100_vram_init_sizes(rdev); |
127 | radeon_vram_location(rdev, &rdev->mc, 0); | 127 | radeon_vram_location(rdev, &rdev->mc, 0); |
128 | rdev->mc.gtt_base_align = 0; | ||
128 | if (!(rdev->flags & RADEON_IS_AGP)) | 129 | if (!(rdev->flags & RADEON_IS_AGP)) |
129 | radeon_gtt_location(rdev, &rdev->mc); | 130 | radeon_gtt_location(rdev, &rdev->mc); |
130 | radeon_update_bandwidth_info(rdev); | 131 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 3d6645ce2151..e100f69faeec 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1179,6 +1179,7 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
1179 | if (rdev->flags & RADEON_IS_IGP) | 1179 | if (rdev->flags & RADEON_IS_IGP) |
1180 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | 1180 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; |
1181 | radeon_vram_location(rdev, &rdev->mc, base); | 1181 | radeon_vram_location(rdev, &rdev->mc, base); |
1182 | rdev->mc.gtt_base_align = 0; | ||
1182 | radeon_gtt_location(rdev, mc); | 1183 | radeon_gtt_location(rdev, mc); |
1183 | } | 1184 | } |
1184 | } | 1185 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index f4fb88ece2bb..ca5c29f70779 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
@@ -538,9 +538,12 @@ int | |||
538 | r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) | 538 | r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) |
539 | { | 539 | { |
540 | drm_radeon_private_t *dev_priv = dev->dev_private; | 540 | drm_radeon_private_t *dev_priv = dev->dev_private; |
541 | int ret; | ||
541 | DRM_DEBUG("\n"); | 542 | DRM_DEBUG("\n"); |
542 | 543 | ||
543 | r600_nomm_get_vb(dev); | 544 | ret = r600_nomm_get_vb(dev); |
545 | if (ret) | ||
546 | return ret; | ||
544 | 547 | ||
545 | dev_priv->blit_vb->file_priv = file_priv; | 548 | dev_priv->blit_vb->file_priv = file_priv; |
546 | 549 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index c39c1bc13016..144c32d37136 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -585,7 +585,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
585 | header = radeon_get_ib_value(p, h_idx); | 585 | header = radeon_get_ib_value(p, h_idx); |
586 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 586 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
587 | reg = CP_PACKET0_GET_REG(header); | 587 | reg = CP_PACKET0_GET_REG(header); |
588 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 588 | |
589 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 589 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
590 | if (!obj) { | 590 | if (!obj) { |
591 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 591 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
@@ -620,7 +620,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
620 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; | 620 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; |
621 | } | 621 | } |
622 | out: | 622 | out: |
623 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
624 | return r; | 623 | return r; |
625 | } | 624 | } |
626 | 625 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ab61aaa887bb..2f94dc66c183 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -351,6 +351,7 @@ struct radeon_mc { | |||
351 | int vram_mtrr; | 351 | int vram_mtrr; |
352 | bool vram_is_ddr; | 352 | bool vram_is_ddr; |
353 | bool igp_sideport_enabled; | 353 | bool igp_sideport_enabled; |
354 | u64 gtt_base_align; | ||
354 | }; | 355 | }; |
355 | 356 | ||
356 | bool radeon_combios_sideport_present(struct radeon_device *rdev); | 357 | bool radeon_combios_sideport_present(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 99bd8a9c56b3..10673ae59cfa 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -280,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
280 | } | 280 | } |
281 | } | 281 | } |
282 | 282 | ||
283 | /* ASUS HD 3600 board lists the DVI port as HDMI */ | ||
284 | if ((dev->pdev->device == 0x9598) && | ||
285 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
286 | (dev->pdev->subsystem_device == 0x01e4)) { | ||
287 | if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { | ||
288 | *connector_type = DRM_MODE_CONNECTOR_DVII; | ||
289 | } | ||
290 | } | ||
291 | |||
283 | /* ASUS HD 3450 board lists the DVI port as HDMI */ | 292 | /* ASUS HD 3450 board lists the DVI port as HDMI */ |
284 | if ((dev->pdev->device == 0x95C5) && | 293 | if ((dev->pdev->device == 0x95C5) && |
285 | (dev->pdev->subsystem_vendor == 0x1043) && | 294 | (dev->pdev->subsystem_vendor == 0x1043) && |
@@ -1029,8 +1038,15 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
1029 | data_offset); | 1038 | data_offset); |
1030 | switch (crev) { | 1039 | switch (crev) { |
1031 | case 1: | 1040 | case 1: |
1032 | if (igp_info->info.ucMemoryType & 0xf0) | 1041 | /* AMD IGPS */ |
1033 | return true; | 1042 | if ((rdev->family == CHIP_RS690) || |
1043 | (rdev->family == CHIP_RS740)) { | ||
1044 | if (igp_info->info.ulBootUpMemoryClock) | ||
1045 | return true; | ||
1046 | } else { | ||
1047 | if (igp_info->info.ucMemoryType & 0xf0) | ||
1048 | return true; | ||
1049 | } | ||
1034 | break; | 1050 | break; |
1035 | case 2: | 1051 | case 2: |
1036 | if (igp_info->info_2.ucMemoryType & 0x0f) | 1052 | if (igp_info->info_2.ucMemoryType & 0x0f) |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index d1c1d8dd93ce..2417d7b06fdb 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -3050,6 +3050,14 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
3050 | rdev->pdev->subsystem_device == 0x308b) | 3050 | rdev->pdev->subsystem_device == 0x308b) |
3051 | return; | 3051 | return; |
3052 | 3052 | ||
3053 | /* quirk for rs4xx HP dv5000 laptop to make it resume | ||
3054 | * - it hangs on resume inside the dynclk 1 table. | ||
3055 | */ | ||
3056 | if (rdev->family == CHIP_RS480 && | ||
3057 | rdev->pdev->subsystem_vendor == 0x103c && | ||
3058 | rdev->pdev->subsystem_device == 0x30a4) | ||
3059 | return; | ||
3060 | |||
3053 | /* DYN CLK 1 */ | 3061 | /* DYN CLK 1 */ |
3054 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3062 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
3055 | if (table) | 3063 | if (table) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 0c7ccc6961a3..adccbc2c202c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -771,30 +771,27 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
771 | } else | 771 | } else |
772 | ret = connector_status_connected; | 772 | ret = connector_status_connected; |
773 | 773 | ||
774 | /* multiple connectors on the same encoder with the same ddc line | 774 | /* This gets complicated. We have boards with VGA + HDMI with a |
775 | * This tends to be HDMI and DVI on the same encoder with the | 775 | * shared DDC line and we have boards with DVI-D + HDMI with a shared |
776 | * same ddc line. If the edid says HDMI, consider the HDMI port | 776 | * DDC line. The latter is more complex because with DVI<->HDMI adapters |
777 | * connected and the DVI port disconnected. If the edid doesn't | 777 | * you don't really know what's connected to which port as both are digital. |
778 | * say HDMI, vice versa. | ||
779 | */ | 778 | */ |
780 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { | 779 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
781 | struct drm_device *dev = connector->dev; | 780 | struct drm_device *dev = connector->dev; |
781 | struct radeon_device *rdev = dev->dev_private; | ||
782 | struct drm_connector *list_connector; | 782 | struct drm_connector *list_connector; |
783 | struct radeon_connector *list_radeon_connector; | 783 | struct radeon_connector *list_radeon_connector; |
784 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | 784 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { |
785 | if (connector == list_connector) | 785 | if (connector == list_connector) |
786 | continue; | 786 | continue; |
787 | list_radeon_connector = to_radeon_connector(list_connector); | 787 | list_radeon_connector = to_radeon_connector(list_connector); |
788 | if (radeon_connector->devices == list_radeon_connector->devices) { | 788 | if (list_radeon_connector->shared_ddc && |
789 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 789 | (list_radeon_connector->ddc_bus->rec.i2c_id == |
790 | if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { | 790 | radeon_connector->ddc_bus->rec.i2c_id)) { |
791 | kfree(radeon_connector->edid); | 791 | /* cases where both connectors are digital */ |
792 | radeon_connector->edid = NULL; | 792 | if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { |
793 | ret = connector_status_disconnected; | 793 | /* hpd is our only option in this case */ |
794 | } | 794 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
795 | } else { | ||
796 | if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) || | ||
797 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) { | ||
798 | kfree(radeon_connector->edid); | 795 | kfree(radeon_connector->edid); |
799 | radeon_connector->edid = NULL; | 796 | radeon_connector->edid = NULL; |
800 | ret = connector_status_disconnected; | 797 | ret = connector_status_disconnected; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 5f317317aba2..dd279da90546 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -226,20 +226,20 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
226 | { | 226 | { |
227 | u64 size_af, size_bf; | 227 | u64 size_af, size_bf; |
228 | 228 | ||
229 | size_af = 0xFFFFFFFF - mc->vram_end; | 229 | size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; |
230 | size_bf = mc->vram_start; | 230 | size_bf = mc->vram_start & ~mc->gtt_base_align; |
231 | if (size_bf > size_af) { | 231 | if (size_bf > size_af) { |
232 | if (mc->gtt_size > size_bf) { | 232 | if (mc->gtt_size > size_bf) { |
233 | dev_warn(rdev->dev, "limiting GTT\n"); | 233 | dev_warn(rdev->dev, "limiting GTT\n"); |
234 | mc->gtt_size = size_bf; | 234 | mc->gtt_size = size_bf; |
235 | } | 235 | } |
236 | mc->gtt_start = mc->vram_start - mc->gtt_size; | 236 | mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; |
237 | } else { | 237 | } else { |
238 | if (mc->gtt_size > size_af) { | 238 | if (mc->gtt_size > size_af) { |
239 | dev_warn(rdev->dev, "limiting GTT\n"); | 239 | dev_warn(rdev->dev, "limiting GTT\n"); |
240 | mc->gtt_size = size_af; | 240 | mc->gtt_size = size_af; |
241 | } | 241 | } |
242 | mc->gtt_start = mc->vram_end + 1; | 242 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; |
243 | } | 243 | } |
244 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | 244 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; |
245 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", | 245 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 6a70c0dc7f92..ab389f89fa8d 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -128,7 +128,8 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
128 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { | 128 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { |
129 | crtc = (struct drm_crtc *)minfo->crtcs[i]; | 129 | crtc = (struct drm_crtc *)minfo->crtcs[i]; |
130 | if (crtc && crtc->base.id == value) { | 130 | if (crtc && crtc->base.id == value) { |
131 | value = i; | 131 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
132 | value = radeon_crtc->crtc_id; | ||
132 | found = 1; | 133 | found = 1; |
133 | break; | 134 | break; |
134 | } | 135 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index bad77f40a9da..5688a0cf6bbe 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
108 | udelay(panel_pwr_delay * 1000); | 108 | udelay(panel_pwr_delay * 1000); |
109 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); | 109 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); |
110 | WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); | 110 | WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); |
111 | udelay(panel_pwr_delay * 1000); | ||
111 | break; | 112 | break; |
112 | } | 113 | } |
113 | 114 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index f2ed27c8055b..032040397743 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
@@ -642,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, | |||
642 | } | 642 | } |
643 | flicker_removal = (tmp + 500) / 1000; | 643 | flicker_removal = (tmp + 500) / 1000; |
644 | 644 | ||
645 | if (flicker_removal < 2) | 645 | if (flicker_removal < 3) |
646 | flicker_removal = 2; | 646 | flicker_removal = 3; |
647 | for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { | 647 | for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { |
648 | if (flicker_removal == SLOPE_limit[i]) | 648 | if (flicker_removal == SLOPE_limit[i]) |
649 | break; | 649 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 115d26b762cc..3fa6984d9896 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -333,6 +333,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev, | |||
333 | return snprintf(buf, PAGE_SIZE, "%s\n", | 333 | return snprintf(buf, PAGE_SIZE, "%s\n", |
334 | (cp == PM_PROFILE_AUTO) ? "auto" : | 334 | (cp == PM_PROFILE_AUTO) ? "auto" : |
335 | (cp == PM_PROFILE_LOW) ? "low" : | 335 | (cp == PM_PROFILE_LOW) ? "low" : |
336 | (cp == PM_PROFILE_MID) ? "mid" : | ||
336 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); | 337 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); |
337 | } | 338 | } |
338 | 339 | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9e4240b3bf0b..f454c9a5e7f2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -57,7 +57,9 @@ void rs400_gart_adjust_size(struct radeon_device *rdev) | |||
57 | } | 57 | } |
58 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | 58 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { |
59 | /* FIXME: RS400 & RS480 seems to have issue with GART size | 59 | /* FIXME: RS400 & RS480 seems to have issue with GART size |
60 | * if 4G of system memory (needs more testing) */ | 60 | * if 4G of system memory (needs more testing) |
61 | */ | ||
62 | /* XXX is this still an issue with proper alignment? */ | ||
61 | rdev->mc.gtt_size = 32 * 1024 * 1024; | 63 | rdev->mc.gtt_size = 32 * 1024 * 1024; |
62 | DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); | 64 | DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); |
63 | } | 65 | } |
@@ -263,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev) | |||
263 | r100_vram_init_sizes(rdev); | 265 | r100_vram_init_sizes(rdev); |
264 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 266 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
265 | radeon_vram_location(rdev, &rdev->mc, base); | 267 | radeon_vram_location(rdev, &rdev->mc, base); |
268 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; | ||
266 | radeon_gtt_location(rdev, &rdev->mc); | 269 | radeon_gtt_location(rdev, &rdev->mc); |
267 | radeon_update_bandwidth_info(rdev); | 270 | radeon_update_bandwidth_info(rdev); |
268 | } | 271 | } |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 7bb4c3e52f3b..6dc15ea8ba33 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -698,6 +698,7 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
698 | base = G_000004_MC_FB_START(base) << 16; | 698 | base = G_000004_MC_FB_START(base) << 16; |
699 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 699 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
700 | radeon_vram_location(rdev, &rdev->mc, base); | 700 | radeon_vram_location(rdev, &rdev->mc, base); |
701 | rdev->mc.gtt_base_align = 0; | ||
701 | radeon_gtt_location(rdev, &rdev->mc); | 702 | radeon_gtt_location(rdev, &rdev->mc); |
702 | radeon_update_bandwidth_info(rdev); | 703 | radeon_update_bandwidth_info(rdev); |
703 | } | 704 | } |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index f4f0a61bcdce..ce4ecbe10816 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -162,6 +162,7 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
162 | rs690_pm_info(rdev); | 162 | rs690_pm_info(rdev); |
163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
164 | radeon_vram_location(rdev, &rdev->mc, base); | 164 | radeon_vram_location(rdev, &rdev->mc, base); |
165 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; | ||
165 | radeon_gtt_location(rdev, &rdev->mc); | 166 | radeon_gtt_location(rdev, &rdev->mc); |
166 | radeon_update_bandwidth_info(rdev); | 167 | radeon_update_bandwidth_info(rdev); |
167 | } | 168 | } |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 7d9a7b0a180a..0c9c169a6852 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -195,6 +195,7 @@ void rv515_mc_init(struct radeon_device *rdev) | |||
195 | rv515_vram_get_type(rdev); | 195 | rv515_vram_get_type(rdev); |
196 | r100_vram_init_sizes(rdev); | 196 | r100_vram_init_sizes(rdev); |
197 | radeon_vram_location(rdev, &rdev->mc, 0); | 197 | radeon_vram_location(rdev, &rdev->mc, 0); |
198 | rdev->mc.gtt_base_align = 0; | ||
198 | if (!(rdev->flags & RADEON_IS_AGP)) | 199 | if (!(rdev->flags & RADEON_IS_AGP)) |
199 | radeon_gtt_location(rdev, &rdev->mc); | 200 | radeon_gtt_location(rdev, &rdev->mc); |
200 | radeon_update_bandwidth_info(rdev); | 201 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 2f047577b1e3..ca904799f018 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -40,11 +40,13 @@ | |||
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | 41 | ||
42 | #include <asm/atomic.h> | 42 | #include <asm/atomic.h> |
43 | #include <asm/agp.h> | ||
44 | 43 | ||
45 | #include "ttm/ttm_bo_driver.h" | 44 | #include "ttm/ttm_bo_driver.h" |
46 | #include "ttm/ttm_page_alloc.h" | 45 | #include "ttm/ttm_page_alloc.h" |
47 | 46 | ||
47 | #ifdef TTM_HAS_AGP | ||
48 | #include <asm/agp.h> | ||
49 | #endif | ||
48 | 50 | ||
49 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | 51 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) |
50 | #define SMALL_ALLOCATION 16 | 52 | #define SMALL_ALLOCATION 16 |
@@ -104,7 +106,6 @@ struct ttm_pool_opts { | |||
104 | struct ttm_pool_manager { | 106 | struct ttm_pool_manager { |
105 | struct kobject kobj; | 107 | struct kobject kobj; |
106 | struct shrinker mm_shrink; | 108 | struct shrinker mm_shrink; |
107 | atomic_t page_alloc_inited; | ||
108 | struct ttm_pool_opts options; | 109 | struct ttm_pool_opts options; |
109 | 110 | ||
110 | union { | 111 | union { |
@@ -142,7 +143,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj) | |||
142 | { | 143 | { |
143 | struct ttm_pool_manager *m = | 144 | struct ttm_pool_manager *m = |
144 | container_of(kobj, struct ttm_pool_manager, kobj); | 145 | container_of(kobj, struct ttm_pool_manager, kobj); |
145 | (void)m; | 146 | kfree(m); |
146 | } | 147 | } |
147 | 148 | ||
148 | static ssize_t ttm_pool_store(struct kobject *kobj, | 149 | static ssize_t ttm_pool_store(struct kobject *kobj, |
@@ -214,9 +215,7 @@ static struct kobj_type ttm_pool_kobj_type = { | |||
214 | .default_attrs = ttm_pool_attrs, | 215 | .default_attrs = ttm_pool_attrs, |
215 | }; | 216 | }; |
216 | 217 | ||
217 | static struct ttm_pool_manager _manager = { | 218 | static struct ttm_pool_manager *_manager; |
218 | .page_alloc_inited = ATOMIC_INIT(0) | ||
219 | }; | ||
220 | 219 | ||
221 | #ifndef CONFIG_X86 | 220 | #ifndef CONFIG_X86 |
222 | static int set_pages_array_wb(struct page **pages, int addrinarray) | 221 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
@@ -271,7 +270,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags, | |||
271 | if (flags & TTM_PAGE_FLAG_DMA32) | 270 | if (flags & TTM_PAGE_FLAG_DMA32) |
272 | pool_index |= 0x2; | 271 | pool_index |= 0x2; |
273 | 272 | ||
274 | return &_manager.pools[pool_index]; | 273 | return &_manager->pools[pool_index]; |
275 | } | 274 | } |
276 | 275 | ||
277 | /* set memory back to wb and free the pages. */ | 276 | /* set memory back to wb and free the pages. */ |
@@ -387,7 +386,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
387 | unsigned i; | 386 | unsigned i; |
388 | int total = 0; | 387 | int total = 0; |
389 | for (i = 0; i < NUM_POOLS; ++i) | 388 | for (i = 0; i < NUM_POOLS; ++i) |
390 | total += _manager.pools[i].npages; | 389 | total += _manager->pools[i].npages; |
391 | 390 | ||
392 | return total; | 391 | return total; |
393 | } | 392 | } |
@@ -395,7 +394,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
395 | /** | 394 | /** |
396 | * Callback for mm to request pool to reduce number of page held. | 395 | * Callback for mm to request pool to reduce number of page held. |
397 | */ | 396 | */ |
398 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | 397 | static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) |
399 | { | 398 | { |
400 | static atomic_t start_pool = ATOMIC_INIT(0); | 399 | static atomic_t start_pool = ATOMIC_INIT(0); |
401 | unsigned i; | 400 | unsigned i; |
@@ -408,7 +407,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | |||
408 | unsigned nr_free = shrink_pages; | 407 | unsigned nr_free = shrink_pages; |
409 | if (shrink_pages == 0) | 408 | if (shrink_pages == 0) |
410 | break; | 409 | break; |
411 | pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; | 410 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
412 | shrink_pages = ttm_page_pool_free(pool, nr_free); | 411 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
413 | } | 412 | } |
414 | /* return estimated number of unused pages in pool */ | 413 | /* return estimated number of unused pages in pool */ |
@@ -576,10 +575,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
576 | 575 | ||
577 | /* If allocation request is small and there is not enough | 576 | /* If allocation request is small and there is not enough |
578 | * pages in pool we fill the pool first */ | 577 | * pages in pool we fill the pool first */ |
579 | if (count < _manager.options.small | 578 | if (count < _manager->options.small |
580 | && count > pool->npages) { | 579 | && count > pool->npages) { |
581 | struct list_head new_pages; | 580 | struct list_head new_pages; |
582 | unsigned alloc_size = _manager.options.alloc_size; | 581 | unsigned alloc_size = _manager->options.alloc_size; |
583 | 582 | ||
584 | /** | 583 | /** |
585 | * Can't change page caching if in irqsave context. We have to | 584 | * Can't change page caching if in irqsave context. We have to |
@@ -759,8 +758,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | |||
759 | pool->npages += page_count; | 758 | pool->npages += page_count; |
760 | /* Check that we don't go over the pool limit */ | 759 | /* Check that we don't go over the pool limit */ |
761 | page_count = 0; | 760 | page_count = 0; |
762 | if (pool->npages > _manager.options.max_size) { | 761 | if (pool->npages > _manager->options.max_size) { |
763 | page_count = pool->npages - _manager.options.max_size; | 762 | page_count = pool->npages - _manager->options.max_size; |
764 | /* free at least NUM_PAGES_TO_ALLOC number of pages | 763 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
765 | * to reduce calls to set_memory_wb */ | 764 | * to reduce calls to set_memory_wb */ |
766 | if (page_count < NUM_PAGES_TO_ALLOC) | 765 | if (page_count < NUM_PAGES_TO_ALLOC) |
@@ -785,33 +784,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, | |||
785 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | 784 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
786 | { | 785 | { |
787 | int ret; | 786 | int ret; |
788 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) | 787 | |
789 | return 0; | 788 | WARN_ON(_manager); |
790 | 789 | ||
791 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); | 790 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); |
792 | 791 | ||
793 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); | 792 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
793 | |||
794 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); | ||
794 | 795 | ||
795 | ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); | 796 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
796 | 797 | ||
797 | ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, | 798 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
798 | "wc dma"); | 799 | GFP_USER | GFP_DMA32, "wc dma"); |
799 | 800 | ||
800 | ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, | 801 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
801 | "uc dma"); | 802 | GFP_USER | GFP_DMA32, "uc dma"); |
802 | 803 | ||
803 | _manager.options.max_size = max_pages; | 804 | _manager->options.max_size = max_pages; |
804 | _manager.options.small = SMALL_ALLOCATION; | 805 | _manager->options.small = SMALL_ALLOCATION; |
805 | _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; | 806 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; |
806 | 807 | ||
807 | kobject_init(&_manager.kobj, &ttm_pool_kobj_type); | 808 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, |
808 | ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); | 809 | &glob->kobj, "pool"); |
809 | if (unlikely(ret != 0)) { | 810 | if (unlikely(ret != 0)) { |
810 | kobject_put(&_manager.kobj); | 811 | kobject_put(&_manager->kobj); |
812 | _manager = NULL; | ||
811 | return ret; | 813 | return ret; |
812 | } | 814 | } |
813 | 815 | ||
814 | ttm_pool_mm_shrink_init(&_manager); | 816 | ttm_pool_mm_shrink_init(_manager); |
815 | 817 | ||
816 | return 0; | 818 | return 0; |
817 | } | 819 | } |
@@ -820,16 +822,14 @@ void ttm_page_alloc_fini() | |||
820 | { | 822 | { |
821 | int i; | 823 | int i; |
822 | 824 | ||
823 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) | ||
824 | return; | ||
825 | |||
826 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); | 825 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); |
827 | ttm_pool_mm_shrink_fini(&_manager); | 826 | ttm_pool_mm_shrink_fini(_manager); |
828 | 827 | ||
829 | for (i = 0; i < NUM_POOLS; ++i) | 828 | for (i = 0; i < NUM_POOLS; ++i) |
830 | ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); | 829 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
831 | 830 | ||
832 | kobject_put(&_manager.kobj); | 831 | kobject_put(&_manager->kobj); |
832 | _manager = NULL; | ||
833 | } | 833 | } |
834 | 834 | ||
835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | 835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
@@ -837,14 +837,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | |||
837 | struct ttm_page_pool *p; | 837 | struct ttm_page_pool *p; |
838 | unsigned i; | 838 | unsigned i; |
839 | char *h[] = {"pool", "refills", "pages freed", "size"}; | 839 | char *h[] = {"pool", "refills", "pages freed", "size"}; |
840 | if (atomic_read(&_manager.page_alloc_inited) == 0) { | 840 | if (!_manager) { |
841 | seq_printf(m, "No pool allocator running.\n"); | 841 | seq_printf(m, "No pool allocator running.\n"); |
842 | return 0; | 842 | return 0; |
843 | } | 843 | } |
844 | seq_printf(m, "%6s %12s %13s %8s\n", | 844 | seq_printf(m, "%6s %12s %13s %8s\n", |
845 | h[0], h[1], h[2], h[3]); | 845 | h[0], h[1], h[2], h[3]); |
846 | for (i = 0; i < NUM_POOLS; ++i) { | 846 | for (i = 0; i < NUM_POOLS; ++i) { |
847 | p = &_manager.pools[i]; | 847 | p = &_manager->pools[i]; |
848 | 848 | ||
849 | seq_printf(m, "%6s %12ld %13ld %8d\n", | 849 | seq_printf(m, "%6s %12ld %13ld %8d\n", |
850 | p->name, p->nrefills, | 850 | p->name, p->nrefills, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f1d626112415..437ac786277a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -972,6 +972,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
972 | ret = copy_from_user(rects, user_rects, rects_size); | 972 | ret = copy_from_user(rects, user_rects, rects_size); |
973 | if (unlikely(ret != 0)) { | 973 | if (unlikely(ret != 0)) { |
974 | DRM_ERROR("Failed to get rects.\n"); | 974 | DRM_ERROR("Failed to get rects.\n"); |
975 | ret = -EFAULT; | ||
975 | goto out_free; | 976 | goto out_free; |
976 | } | 977 | } |
977 | 978 | ||