diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2010-08-04 09:14:38 -0400 |
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2010-08-04 09:14:38 -0400 |
| commit | d790d4d583aeaed9fc6f8a9f4d9f8ce6b1c15c7f (patch) | |
| tree | 854ab394486288d40fa8179cbfaf66e8bdc44b0f /drivers/gpu | |
| parent | 73b2c7165b76b20eb1290e7efebc33cfd21db1ca (diff) | |
| parent | 3a09b1be53d23df780a0cd0e4087a05e2ca4a00c (diff) | |
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/gpu')
51 files changed, 797 insertions, 296 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index da06476f2df4..9585e531ac6b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, | |||
| 864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, | 864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, |
| 865 | false); | 865 | false); |
| 866 | mode->hdisplay = 1366; | 866 | mode->hdisplay = 1366; |
| 867 | mode->vsync_start = mode->vsync_start - 1; | 867 | mode->hsync_start = mode->hsync_start - 1; |
| 868 | mode->vsync_end = mode->vsync_end - 1; | 868 | mode->hsync_end = mode->hsync_end - 1; |
| 869 | return mode; | 869 | return mode; |
| 870 | } | 870 | } |
| 871 | 871 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 08c4c926e65f..719662034bbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -146,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn | |||
| 146 | cvt = 1; | 146 | cvt = 1; |
| 147 | break; | 147 | break; |
| 148 | case 'R': | 148 | case 'R': |
| 149 | if (!cvt) | 149 | if (cvt) |
| 150 | rb = 1; | 150 | rb = 1; |
| 151 | break; | 151 | break; |
| 152 | case 'm': | 152 | case 'm': |
| @@ -315,8 +315,9 @@ static void drm_fb_helper_on(struct fb_info *info) | |||
| 315 | struct drm_device *dev = fb_helper->dev; | 315 | struct drm_device *dev = fb_helper->dev; |
| 316 | struct drm_crtc *crtc; | 316 | struct drm_crtc *crtc; |
| 317 | struct drm_crtc_helper_funcs *crtc_funcs; | 317 | struct drm_crtc_helper_funcs *crtc_funcs; |
| 318 | struct drm_connector *connector; | ||
| 318 | struct drm_encoder *encoder; | 319 | struct drm_encoder *encoder; |
| 319 | int i; | 320 | int i, j; |
| 320 | 321 | ||
| 321 | /* | 322 | /* |
| 322 | * For each CRTC in this fb, turn the crtc on then, | 323 | * For each CRTC in this fb, turn the crtc on then, |
| @@ -332,7 +333,14 @@ static void drm_fb_helper_on(struct fb_info *info) | |||
| 332 | 333 | ||
| 333 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 334 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
| 334 | 335 | ||
| 335 | 336 | /* Walk the connectors & encoders on this fb turning them on */ | |
| 337 | for (j = 0; j < fb_helper->connector_count; j++) { | ||
| 338 | connector = fb_helper->connector_info[j]->connector; | ||
| 339 | connector->dpms = DRM_MODE_DPMS_ON; | ||
| 340 | drm_connector_property_set_value(connector, | ||
| 341 | dev->mode_config.dpms_property, | ||
| 342 | DRM_MODE_DPMS_ON); | ||
| 343 | } | ||
| 336 | /* Found a CRTC on this fb, now find encoders */ | 344 | /* Found a CRTC on this fb, now find encoders */ |
| 337 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 345 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 338 | if (encoder->crtc == crtc) { | 346 | if (encoder->crtc == crtc) { |
| @@ -352,8 +360,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
| 352 | struct drm_device *dev = fb_helper->dev; | 360 | struct drm_device *dev = fb_helper->dev; |
| 353 | struct drm_crtc *crtc; | 361 | struct drm_crtc *crtc; |
| 354 | struct drm_crtc_helper_funcs *crtc_funcs; | 362 | struct drm_crtc_helper_funcs *crtc_funcs; |
| 363 | struct drm_connector *connector; | ||
| 355 | struct drm_encoder *encoder; | 364 | struct drm_encoder *encoder; |
| 356 | int i; | 365 | int i, j; |
| 357 | 366 | ||
| 358 | /* | 367 | /* |
| 359 | * For each CRTC in this fb, find all associated encoders | 368 | * For each CRTC in this fb, find all associated encoders |
| @@ -367,6 +376,14 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
| 367 | if (!crtc->enabled) | 376 | if (!crtc->enabled) |
| 368 | continue; | 377 | continue; |
| 369 | 378 | ||
| 379 | /* Walk the connectors on this fb and mark them off */ | ||
| 380 | for (j = 0; j < fb_helper->connector_count; j++) { | ||
| 381 | connector = fb_helper->connector_info[j]->connector; | ||
| 382 | connector->dpms = dpms_mode; | ||
| 383 | drm_connector_property_set_value(connector, | ||
| 384 | dev->mode_config.dpms_property, | ||
| 385 | dpms_mode); | ||
| 386 | } | ||
| 370 | /* Found a CRTC on this fb, now find encoders */ | 387 | /* Found a CRTC on this fb, now find encoders */ |
| 371 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 388 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 372 | if (encoder->crtc == crtc) { | 389 | if (encoder->crtc == crtc) { |
| @@ -1024,11 +1041,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne | |||
| 1024 | } | 1041 | } |
| 1025 | 1042 | ||
| 1026 | create_mode: | 1043 | create_mode: |
| 1027 | mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, | 1044 | if (cmdline_mode->cvt) |
| 1028 | cmdline_mode->yres, | 1045 | mode = drm_cvt_mode(fb_helper_conn->connector->dev, |
| 1029 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | 1046 | cmdline_mode->xres, cmdline_mode->yres, |
| 1030 | cmdline_mode->rb, cmdline_mode->interlace, | 1047 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, |
| 1031 | cmdline_mode->margins); | 1048 | cmdline_mode->rb, cmdline_mode->interlace, |
| 1049 | cmdline_mode->margins); | ||
| 1050 | else | ||
| 1051 | mode = drm_gtf_mode(fb_helper_conn->connector->dev, | ||
| 1052 | cmdline_mode->xres, cmdline_mode->yres, | ||
| 1053 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | ||
| 1054 | cmdline_mode->interlace, | ||
| 1055 | cmdline_mode->margins); | ||
| 1032 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 1056 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
| 1033 | list_add(&mode->head, &fb_helper_conn->connector->modes); | 1057 | list_add(&mode->head, &fb_helper_conn->connector->modes); |
| 1034 | return mode; | 1058 | return mode; |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 66c697bc9b22..56f66426207f 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
| @@ -208,7 +208,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) | |||
| 208 | uint8_t ctl2; | 208 | uint8_t ctl2; |
| 209 | 209 | ||
| 210 | if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { | 210 | if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { |
| 211 | if (ctl2 & TFP410_CTL_2_HTPLG) | 211 | if (ctl2 & TFP410_CTL_2_RSEN) |
| 212 | ret = connector_status_connected; | 212 | ret = connector_status_connected; |
| 213 | else | 213 | else |
| 214 | ret = connector_status_disconnected; | 214 | ret = connector_status_disconnected; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 52510ad8b25d..9214119c0154 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
| 605 | case FBC_NOT_TILED: | 605 | case FBC_NOT_TILED: |
| 606 | seq_printf(m, "scanout buffer not tiled"); | 606 | seq_printf(m, "scanout buffer not tiled"); |
| 607 | break; | 607 | break; |
| 608 | case FBC_MULTIPLE_PIPES: | ||
| 609 | seq_printf(m, "multiple pipes are enabled"); | ||
| 610 | break; | ||
| 608 | default: | 611 | default: |
| 609 | seq_printf(m, "unknown reason"); | 612 | seq_printf(m, "unknown reason"); |
| 610 | } | 613 | } |
| @@ -620,7 +623,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
| 620 | drm_i915_private_t *dev_priv = dev->dev_private; | 623 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 621 | bool sr_enabled = false; | 624 | bool sr_enabled = false; |
| 622 | 625 | ||
| 623 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) | 626 | if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) |
| 624 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | 627 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
| 625 | else if (IS_I915GM(dev)) | 628 | else if (IS_I915GM(dev)) |
| 626 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; | 629 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 59a2bf8592ec..2305a1234f1e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -128,9 +128,11 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
| 128 | if (dev->irq_enabled) | 128 | if (dev->irq_enabled) |
| 129 | drm_irq_uninstall(dev); | 129 | drm_irq_uninstall(dev); |
| 130 | 130 | ||
| 131 | mutex_lock(&dev->struct_mutex); | ||
| 131 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 132 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
| 132 | if (HAS_BSD(dev)) | 133 | if (HAS_BSD(dev)) |
| 133 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 134 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); |
| 135 | mutex_unlock(&dev->struct_mutex); | ||
| 134 | 136 | ||
| 135 | /* Clear the HWS virtual address at teardown */ | 137 | /* Clear the HWS virtual address at teardown */ |
| 136 | if (I915_NEED_GFX_HWS(dev)) | 138 | if (I915_NEED_GFX_HWS(dev)) |
| @@ -1229,7 +1231,7 @@ static void i915_warn_stolen(struct drm_device *dev) | |||
| 1229 | static void i915_setup_compression(struct drm_device *dev, int size) | 1231 | static void i915_setup_compression(struct drm_device *dev, int size) |
| 1230 | { | 1232 | { |
| 1231 | struct drm_i915_private *dev_priv = dev->dev_private; | 1233 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1232 | struct drm_mm_node *compressed_fb, *compressed_llb; | 1234 | struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); |
| 1233 | unsigned long cfb_base; | 1235 | unsigned long cfb_base; |
| 1234 | unsigned long ll_base = 0; | 1236 | unsigned long ll_base = 0; |
| 1235 | 1237 | ||
| @@ -1298,7 +1300,7 @@ static void i915_cleanup_compression(struct drm_device *dev) | |||
| 1298 | struct drm_i915_private *dev_priv = dev->dev_private; | 1300 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1299 | 1301 | ||
| 1300 | drm_mm_put_block(dev_priv->compressed_fb); | 1302 | drm_mm_put_block(dev_priv->compressed_fb); |
| 1301 | if (!IS_GM45(dev)) | 1303 | if (dev_priv->compressed_llb) |
| 1302 | drm_mm_put_block(dev_priv->compressed_llb); | 1304 | drm_mm_put_block(dev_priv->compressed_llb); |
| 1303 | } | 1305 | } |
| 1304 | 1306 | ||
| @@ -1410,6 +1412,10 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
| 1410 | if (ret) | 1412 | if (ret) |
| 1411 | goto cleanup_vga_client; | 1413 | goto cleanup_vga_client; |
| 1412 | 1414 | ||
| 1415 | /* IIR "flip pending" bit means done if this bit is set */ | ||
| 1416 | if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) | ||
| 1417 | dev_priv->flip_pending_is_done = true; | ||
| 1418 | |||
| 1413 | intel_modeset_init(dev); | 1419 | intel_modeset_init(dev); |
| 1414 | 1420 | ||
| 1415 | ret = drm_irq_install(dev); | 1421 | ret = drm_irq_install(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 276583159847..2e1744d37ad5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -215,6 +215,7 @@ enum no_fbc_reason { | |||
| 215 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ | 215 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
| 216 | FBC_BAD_PLANE, /* fbc not supported on plane */ | 216 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
| 217 | FBC_NOT_TILED, /* buffer not tiled */ | 217 | FBC_NOT_TILED, /* buffer not tiled */ |
| 218 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ | ||
| 218 | }; | 219 | }; |
| 219 | 220 | ||
| 220 | enum intel_pch { | 221 | enum intel_pch { |
| @@ -222,6 +223,8 @@ enum intel_pch { | |||
| 222 | PCH_CPT, /* Cougarpoint PCH */ | 223 | PCH_CPT, /* Cougarpoint PCH */ |
| 223 | }; | 224 | }; |
| 224 | 225 | ||
| 226 | #define QUIRK_PIPEA_FORCE (1<<0) | ||
| 227 | |||
| 225 | struct intel_fbdev; | 228 | struct intel_fbdev; |
| 226 | 229 | ||
| 227 | typedef struct drm_i915_private { | 230 | typedef struct drm_i915_private { |
| @@ -337,6 +340,8 @@ typedef struct drm_i915_private { | |||
| 337 | /* PCH chipset type */ | 340 | /* PCH chipset type */ |
| 338 | enum intel_pch pch_type; | 341 | enum intel_pch pch_type; |
| 339 | 342 | ||
| 343 | unsigned long quirks; | ||
| 344 | |||
| 340 | /* Register state */ | 345 | /* Register state */ |
| 341 | bool modeset_on_lid; | 346 | bool modeset_on_lid; |
| 342 | u8 saveLBB; | 347 | u8 saveLBB; |
| @@ -596,6 +601,7 @@ typedef struct drm_i915_private { | |||
| 596 | struct drm_crtc *plane_to_crtc_mapping[2]; | 601 | struct drm_crtc *plane_to_crtc_mapping[2]; |
| 597 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 602 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
| 598 | wait_queue_head_t pending_flip_queue; | 603 | wait_queue_head_t pending_flip_queue; |
| 604 | bool flip_pending_is_done; | ||
| 599 | 605 | ||
| 600 | /* Reclocking support */ | 606 | /* Reclocking support */ |
| 601 | bool render_reclock_avail; | 607 | bool render_reclock_avail; |
| @@ -1076,7 +1082,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
| 1076 | drm_i915_private_t *dev_priv = dev->dev_private; \ | 1082 | drm_i915_private_t *dev_priv = dev->dev_private; \ |
| 1077 | if (I915_VERBOSE) \ | 1083 | if (I915_VERBOSE) \ |
| 1078 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ | 1084 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ |
| 1079 | intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ | 1085 | intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ |
| 1080 | } while (0) | 1086 | } while (0) |
| 1081 | 1087 | ||
| 1082 | 1088 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9ded3dae6c87..5aa747fc25a9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2239,8 +2239,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
| 2239 | mapping = inode->i_mapping; | 2239 | mapping = inode->i_mapping; |
| 2240 | for (i = 0; i < page_count; i++) { | 2240 | for (i = 0; i < page_count; i++) { |
| 2241 | page = read_cache_page_gfp(mapping, i, | 2241 | page = read_cache_page_gfp(mapping, i, |
| 2242 | mapping_gfp_mask (mapping) | | 2242 | GFP_HIGHUSER | |
| 2243 | __GFP_COLD | | 2243 | __GFP_COLD | |
| 2244 | __GFP_RECLAIMABLE | | ||
| 2244 | gfpmask); | 2245 | gfpmask); |
| 2245 | if (IS_ERR(page)) | 2246 | if (IS_ERR(page)) |
| 2246 | goto err_pages; | 2247 | goto err_pages; |
| @@ -3646,6 +3647,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
| 3646 | return ret; | 3647 | return ret; |
| 3647 | } | 3648 | } |
| 3648 | 3649 | ||
| 3650 | |||
| 3649 | int | 3651 | int |
| 3650 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3652 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
| 3651 | struct drm_file *file_priv, | 3653 | struct drm_file *file_priv, |
| @@ -3793,7 +3795,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3793 | unsigned long long total_size = 0; | 3795 | unsigned long long total_size = 0; |
| 3794 | int num_fences = 0; | 3796 | int num_fences = 0; |
| 3795 | for (i = 0; i < args->buffer_count; i++) { | 3797 | for (i = 0; i < args->buffer_count; i++) { |
| 3796 | obj_priv = object_list[i]->driver_private; | 3798 | obj_priv = to_intel_bo(object_list[i]); |
| 3797 | 3799 | ||
| 3798 | total_size += object_list[i]->size; | 3800 | total_size += object_list[i]->size; |
| 3799 | num_fences += | 3801 | num_fences += |
| @@ -4741,6 +4743,16 @@ i915_gem_load(struct drm_device *dev) | |||
| 4741 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | 4743 | list_add(&dev_priv->mm.shrink_list, &shrink_list); |
| 4742 | spin_unlock(&shrink_list_lock); | 4744 | spin_unlock(&shrink_list_lock); |
| 4743 | 4745 | ||
| 4746 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | ||
| 4747 | if (IS_GEN3(dev)) { | ||
| 4748 | u32 tmp = I915_READ(MI_ARB_STATE); | ||
| 4749 | if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { | ||
| 4750 | /* arb state is a masked write, so set bit + bit in mask */ | ||
| 4751 | tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); | ||
| 4752 | I915_WRITE(MI_ARB_STATE, tmp); | ||
| 4753 | } | ||
| 4754 | } | ||
| 4755 | |||
| 4744 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4756 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
| 4745 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4757 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 4746 | dev_priv->fence_reg_start = 3; | 4758 | dev_priv->fence_reg_start = 3; |
| @@ -4977,7 +4989,7 @@ i915_gpu_is_active(struct drm_device *dev) | |||
| 4977 | } | 4989 | } |
| 4978 | 4990 | ||
| 4979 | static int | 4991 | static int |
| 4980 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | 4992 | i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
| 4981 | { | 4993 | { |
| 4982 | drm_i915_private_t *dev_priv, *next_dev; | 4994 | drm_i915_private_t *dev_priv, *next_dev; |
| 4983 | struct drm_i915_gem_object *obj_priv, *next_obj; | 4995 | struct drm_i915_gem_object *obj_priv, *next_obj; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2479be001e40..dba53d4b9fb3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -940,22 +940,30 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
| 940 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) | 940 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) |
| 941 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | 941 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); |
| 942 | 942 | ||
| 943 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) | 943 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
| 944 | intel_prepare_page_flip(dev, 0); | 944 | intel_prepare_page_flip(dev, 0); |
| 945 | if (dev_priv->flip_pending_is_done) | ||
| 946 | intel_finish_page_flip_plane(dev, 0); | ||
| 947 | } | ||
| 945 | 948 | ||
| 946 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) | 949 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { |
| 947 | intel_prepare_page_flip(dev, 1); | 950 | intel_prepare_page_flip(dev, 1); |
| 951 | if (dev_priv->flip_pending_is_done) | ||
| 952 | intel_finish_page_flip_plane(dev, 1); | ||
| 953 | } | ||
| 948 | 954 | ||
| 949 | if (pipea_stats & vblank_status) { | 955 | if (pipea_stats & vblank_status) { |
| 950 | vblank++; | 956 | vblank++; |
| 951 | drm_handle_vblank(dev, 0); | 957 | drm_handle_vblank(dev, 0); |
| 952 | intel_finish_page_flip(dev, 0); | 958 | if (!dev_priv->flip_pending_is_done) |
| 959 | intel_finish_page_flip(dev, 0); | ||
| 953 | } | 960 | } |
| 954 | 961 | ||
| 955 | if (pipeb_stats & vblank_status) { | 962 | if (pipeb_stats & vblank_status) { |
| 956 | vblank++; | 963 | vblank++; |
| 957 | drm_handle_vblank(dev, 1); | 964 | drm_handle_vblank(dev, 1); |
| 958 | intel_finish_page_flip(dev, 1); | 965 | if (!dev_priv->flip_pending_is_done) |
| 966 | intel_finish_page_flip(dev, 1); | ||
| 959 | } | 967 | } |
| 960 | 968 | ||
| 961 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 969 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || |
| @@ -1387,29 +1395,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
| 1387 | dev_priv->pipestat[1] = 0; | 1395 | dev_priv->pipestat[1] = 0; |
| 1388 | 1396 | ||
| 1389 | if (I915_HAS_HOTPLUG(dev)) { | 1397 | if (I915_HAS_HOTPLUG(dev)) { |
| 1390 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||
| 1391 | |||
| 1392 | /* Note HDMI and DP share bits */ | ||
| 1393 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||
| 1394 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
| 1395 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
| 1396 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
| 1397 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
| 1398 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
| 1399 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
| 1400 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
| 1401 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
| 1402 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
| 1403 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
| 1404 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
| 1405 | /* Ignore TV since it's buggy */ | ||
| 1406 | |||
| 1407 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
| 1408 | |||
| 1409 | /* Enable in IER... */ | 1398 | /* Enable in IER... */ |
| 1410 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1399 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
| 1411 | /* and unmask in IMR */ | 1400 | /* and unmask in IMR */ |
| 1412 | i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); | 1401 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; |
| 1413 | } | 1402 | } |
| 1414 | 1403 | ||
| 1415 | /* | 1404 | /* |
| @@ -1427,16 +1416,41 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
| 1427 | } | 1416 | } |
| 1428 | I915_WRITE(EMR, error_mask); | 1417 | I915_WRITE(EMR, error_mask); |
| 1429 | 1418 | ||
| 1430 | /* Disable pipe interrupt enables, clear pending pipe status */ | ||
| 1431 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | ||
| 1432 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | ||
| 1433 | /* Clear pending interrupt status */ | ||
| 1434 | I915_WRITE(IIR, I915_READ(IIR)); | ||
| 1435 | |||
| 1436 | I915_WRITE(IER, enable_mask); | ||
| 1437 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 1419 | I915_WRITE(IMR, dev_priv->irq_mask_reg); |
| 1420 | I915_WRITE(IER, enable_mask); | ||
| 1438 | (void) I915_READ(IER); | 1421 | (void) I915_READ(IER); |
| 1439 | 1422 | ||
| 1423 | if (I915_HAS_HOTPLUG(dev)) { | ||
| 1424 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||
| 1425 | |||
| 1426 | /* Note HDMI and DP share bits */ | ||
| 1427 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||
| 1428 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
| 1429 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
| 1430 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
| 1431 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
| 1432 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
| 1433 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
| 1434 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
| 1435 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
| 1436 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
| 1437 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | ||
| 1438 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
| 1439 | |||
| 1440 | /* Programming the CRT detection parameters tends | ||
| 1441 | to generate a spurious hotplug event about three | ||
| 1442 | seconds later. So just do it once. | ||
| 1443 | */ | ||
| 1444 | if (IS_G4X(dev)) | ||
| 1445 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
| 1446 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
| 1447 | } | ||
| 1448 | |||
| 1449 | /* Ignore TV since it's buggy */ | ||
| 1450 | |||
| 1451 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
| 1452 | } | ||
| 1453 | |||
| 1440 | opregion_enable_asle(dev); | 1454 | opregion_enable_asle(dev); |
| 1441 | 1455 | ||
| 1442 | return 0; | 1456 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 64b0a3afd92b..cf41c672defe 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -178,6 +178,7 @@ | |||
| 178 | #define MI_OVERLAY_OFF (0x2<<21) | 178 | #define MI_OVERLAY_OFF (0x2<<21) |
| 179 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) | 179 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) |
| 180 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | 180 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) |
| 181 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) | ||
| 181 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | 182 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) |
| 182 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 183 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
| 183 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 184 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
| @@ -358,6 +359,70 @@ | |||
| 358 | #define LM_BURST_LENGTH 0x00000700 | 359 | #define LM_BURST_LENGTH 0x00000700 |
| 359 | #define LM_FIFO_WATERMARK 0x0000001F | 360 | #define LM_FIFO_WATERMARK 0x0000001F |
| 360 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ | 361 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ |
| 362 | #define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ | ||
| 363 | |||
| 364 | /* Make render/texture TLB fetches lower priorty than associated data | ||
| 365 | * fetches. This is not turned on by default | ||
| 366 | */ | ||
| 367 | #define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) | ||
| 368 | |||
| 369 | /* Isoch request wait on GTT enable (Display A/B/C streams). | ||
| 370 | * Make isoch requests stall on the TLB update. May cause | ||
| 371 | * display underruns (test mode only) | ||
| 372 | */ | ||
| 373 | #define MI_ARB_ISOCH_WAIT_GTT (1 << 14) | ||
| 374 | |||
| 375 | /* Block grant count for isoch requests when block count is | ||
| 376 | * set to a finite value. | ||
| 377 | */ | ||
| 378 | #define MI_ARB_BLOCK_GRANT_MASK (3 << 12) | ||
| 379 | #define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ | ||
| 380 | #define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ | ||
| 381 | #define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ | ||
| 382 | #define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ | ||
| 383 | |||
| 384 | /* Enable render writes to complete in C2/C3/C4 power states. | ||
| 385 | * If this isn't enabled, render writes are prevented in low | ||
| 386 | * power states. That seems bad to me. | ||
| 387 | */ | ||
| 388 | #define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) | ||
| 389 | |||
| 390 | /* This acknowledges an async flip immediately instead | ||
| 391 | * of waiting for 2TLB fetches. | ||
| 392 | */ | ||
| 393 | #define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) | ||
| 394 | |||
| 395 | /* Enables non-sequential data reads through arbiter | ||
| 396 | */ | ||
| 397 | #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) | ||
| 398 | |||
| 399 | /* Disable FSB snooping of cacheable write cycles from binner/render | ||
| 400 | * command stream | ||
| 401 | */ | ||
| 402 | #define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) | ||
| 403 | |||
| 404 | /* Arbiter time slice for non-isoch streams */ | ||
| 405 | #define MI_ARB_TIME_SLICE_MASK (7 << 5) | ||
| 406 | #define MI_ARB_TIME_SLICE_1 (0 << 5) | ||
| 407 | #define MI_ARB_TIME_SLICE_2 (1 << 5) | ||
| 408 | #define MI_ARB_TIME_SLICE_4 (2 << 5) | ||
| 409 | #define MI_ARB_TIME_SLICE_6 (3 << 5) | ||
| 410 | #define MI_ARB_TIME_SLICE_8 (4 << 5) | ||
| 411 | #define MI_ARB_TIME_SLICE_10 (5 << 5) | ||
| 412 | #define MI_ARB_TIME_SLICE_14 (6 << 5) | ||
| 413 | #define MI_ARB_TIME_SLICE_16 (7 << 5) | ||
| 414 | |||
| 415 | /* Low priority grace period page size */ | ||
| 416 | #define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ | ||
| 417 | #define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) | ||
| 418 | |||
| 419 | /* Disable display A/B trickle feed */ | ||
| 420 | #define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) | ||
| 421 | |||
| 422 | /* Set display plane priority */ | ||
| 423 | #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ | ||
| 424 | #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ | ||
| 425 | |||
| 361 | #define CACHE_MODE_0 0x02120 /* 915+ only */ | 426 | #define CACHE_MODE_0 0x02120 /* 915+ only */ |
| 362 | #define CM0_MASK_SHIFT 16 | 427 | #define CM0_MASK_SHIFT 16 |
| 363 | #define CM0_IZ_OPT_DISABLE (1<<6) | 428 | #define CM0_IZ_OPT_DISABLE (1<<6) |
| @@ -368,6 +433,9 @@ | |||
| 368 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 433 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
| 369 | #define BB_ADDR 0x02140 /* 8 bytes */ | 434 | #define BB_ADDR 0x02140 /* 8 bytes */ |
| 370 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 435 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
| 436 | #define ECOSKPD 0x021d0 | ||
| 437 | #define ECO_GATING_CX_ONLY (1<<3) | ||
| 438 | #define ECO_FLIP_DONE (1<<0) | ||
| 371 | 439 | ||
| 372 | /* GEN6 interrupt control */ | 440 | /* GEN6 interrupt control */ |
| 373 | #define GEN6_RENDER_HWSTAM 0x2098 | 441 | #define GEN6_RENDER_HWSTAM 0x2098 |
| @@ -1130,7 +1198,6 @@ | |||
| 1130 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) | 1198 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) |
| 1131 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) | 1199 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) |
| 1132 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1200 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
| 1133 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | ||
| 1134 | 1201 | ||
| 1135 | #define PORT_HOTPLUG_STAT 0x61114 | 1202 | #define PORT_HOTPLUG_STAT 0x61114 |
| 1136 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 1203 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
| @@ -2802,6 +2869,7 @@ | |||
| 2802 | 2869 | ||
| 2803 | #define PCH_PP_STATUS 0xc7200 | 2870 | #define PCH_PP_STATUS 0xc7200 |
| 2804 | #define PCH_PP_CONTROL 0xc7204 | 2871 | #define PCH_PP_CONTROL 0xc7204 |
| 2872 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | ||
| 2805 | #define EDP_FORCE_VDD (1 << 3) | 2873 | #define EDP_FORCE_VDD (1 << 3) |
| 2806 | #define EDP_BLC_ENABLE (1 << 2) | 2874 | #define EDP_BLC_ENABLE (1 << 2) |
| 2807 | #define PANEL_POWER_RESET (1 << 1) | 2875 | #define PANEL_POWER_RESET (1 << 1) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 22ff38455731..ee0732b222a1 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -234,14 +234,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
| 234 | else | 234 | else |
| 235 | tries = 1; | 235 | tries = 1; |
| 236 | hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); | 236 | hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); |
| 237 | hotplug_en &= CRT_HOTPLUG_MASK; | ||
| 238 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; | 237 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; |
| 239 | 238 | ||
| 240 | if (IS_G4X(dev)) | ||
| 241 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
| 242 | |||
| 243 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
| 244 | |||
| 245 | for (i = 0; i < tries ; i++) { | 239 | for (i = 0; i < tries ; i++) { |
| 246 | unsigned long timeout; | 240 | unsigned long timeout; |
| 247 | /* turn on the FORCE_DETECT */ | 241 | /* turn on the FORCE_DETECT */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index cc8131ff319f..5e21b3119824 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -862,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
| 862 | intel_clock_t clock; | 862 | intel_clock_t clock; |
| 863 | int max_n; | 863 | int max_n; |
| 864 | bool found; | 864 | bool found; |
| 865 | /* approximately equals target * 0.00488 */ | 865 | /* approximately equals target * 0.00585 */ |
| 866 | int err_most = (target >> 8) + (target >> 10); | 866 | int err_most = (target >> 8) + (target >> 9); |
| 867 | found = false; | 867 | found = false; |
| 868 | 868 | ||
| 869 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 869 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
| @@ -1180,8 +1180,12 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
| 1180 | struct drm_framebuffer *fb = crtc->fb; | 1180 | struct drm_framebuffer *fb = crtc->fb; |
| 1181 | struct intel_framebuffer *intel_fb; | 1181 | struct intel_framebuffer *intel_fb; |
| 1182 | struct drm_i915_gem_object *obj_priv; | 1182 | struct drm_i915_gem_object *obj_priv; |
| 1183 | struct drm_crtc *tmp_crtc; | ||
| 1183 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1184 | int plane = intel_crtc->plane; | 1185 | int plane = intel_crtc->plane; |
| 1186 | int crtcs_enabled = 0; | ||
| 1187 | |||
| 1188 | DRM_DEBUG_KMS("\n"); | ||
| 1185 | 1189 | ||
| 1186 | if (!i915_powersave) | 1190 | if (!i915_powersave) |
| 1187 | return; | 1191 | return; |
| @@ -1199,10 +1203,21 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
| 1199 | * If FBC is already on, we just have to verify that we can | 1203 | * If FBC is already on, we just have to verify that we can |
| 1200 | * keep it that way... | 1204 | * keep it that way... |
| 1201 | * Need to disable if: | 1205 | * Need to disable if: |
| 1206 | * - more than one pipe is active | ||
| 1202 | * - changing FBC params (stride, fence, mode) | 1207 | * - changing FBC params (stride, fence, mode) |
| 1203 | * - new fb is too large to fit in compressed buffer | 1208 | * - new fb is too large to fit in compressed buffer |
| 1204 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1209 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
| 1205 | */ | 1210 | */ |
| 1211 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | ||
| 1212 | if (tmp_crtc->enabled) | ||
| 1213 | crtcs_enabled++; | ||
| 1214 | } | ||
| 1215 | DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); | ||
| 1216 | if (crtcs_enabled > 1) { | ||
| 1217 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
| 1218 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||
| 1219 | goto out_disable; | ||
| 1220 | } | ||
| 1206 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1221 | if (intel_fb->obj->size > dev_priv->cfb_size) { |
| 1207 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1222 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
| 1208 | "compression\n"); | 1223 | "compression\n"); |
| @@ -1255,7 +1270,7 @@ out_disable: | |||
| 1255 | } | 1270 | } |
| 1256 | } | 1271 | } |
| 1257 | 1272 | ||
| 1258 | static int | 1273 | int |
| 1259 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1274 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
| 1260 | { | 1275 | { |
| 1261 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1276 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| @@ -2255,6 +2270,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 2255 | intel_wait_for_vblank(dev); | 2270 | intel_wait_for_vblank(dev); |
| 2256 | } | 2271 | } |
| 2257 | 2272 | ||
| 2273 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
| 2274 | if (pipeconf_reg == PIPEACONF && | ||
| 2275 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
| 2276 | goto skip_pipe_off; | ||
| 2277 | |||
| 2258 | /* Next, disable display pipes */ | 2278 | /* Next, disable display pipes */ |
| 2259 | temp = I915_READ(pipeconf_reg); | 2279 | temp = I915_READ(pipeconf_reg); |
| 2260 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2280 | if ((temp & PIPEACONF_ENABLE) != 0) { |
| @@ -2270,7 +2290,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 2270 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2290 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); |
| 2271 | I915_READ(dpll_reg); | 2291 | I915_READ(dpll_reg); |
| 2272 | } | 2292 | } |
| 2273 | 2293 | skip_pipe_off: | |
| 2274 | /* Wait for the clocks to turn off. */ | 2294 | /* Wait for the clocks to turn off. */ |
| 2275 | udelay(150); | 2295 | udelay(150); |
| 2276 | break; | 2296 | break; |
| @@ -2356,8 +2376,6 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
| 2356 | if (mode->clock * 3 > 27000 * 4) | 2376 | if (mode->clock * 3 > 27000 * 4) |
| 2357 | return MODE_CLOCK_HIGH; | 2377 | return MODE_CLOCK_HIGH; |
| 2358 | } | 2378 | } |
| 2359 | |||
| 2360 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 2361 | return true; | 2379 | return true; |
| 2362 | } | 2380 | } |
| 2363 | 2381 | ||
| @@ -2970,11 +2988,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2970 | if (srwm < 0) | 2988 | if (srwm < 0) |
| 2971 | srwm = 1; | 2989 | srwm = 1; |
| 2972 | srwm &= 0x3f; | 2990 | srwm &= 0x3f; |
| 2973 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2991 | if (IS_I965GM(dev)) |
| 2992 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
| 2974 | } else { | 2993 | } else { |
| 2975 | /* Turn off self refresh if both pipes are enabled */ | 2994 | /* Turn off self refresh if both pipes are enabled */ |
| 2976 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | 2995 | if (IS_I965GM(dev)) |
| 2977 | & ~FW_BLC_SELF_EN); | 2996 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
| 2997 | & ~FW_BLC_SELF_EN); | ||
| 2978 | } | 2998 | } |
| 2979 | 2999 | ||
| 2980 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 3000 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
| @@ -3734,6 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 3734 | if (dev_priv->lvds_dither) { | 3754 | if (dev_priv->lvds_dither) { |
| 3735 | if (HAS_PCH_SPLIT(dev)) { | 3755 | if (HAS_PCH_SPLIT(dev)) { |
| 3736 | pipeconf |= PIPE_ENABLE_DITHER; | 3756 | pipeconf |= PIPE_ENABLE_DITHER; |
| 3757 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
| 3737 | pipeconf |= PIPE_DITHER_TYPE_ST01; | 3758 | pipeconf |= PIPE_DITHER_TYPE_ST01; |
| 3738 | } else | 3759 | } else |
| 3739 | lvds |= LVDS_ENABLE_DITHER; | 3760 | lvds |= LVDS_ENABLE_DITHER; |
| @@ -4410,7 +4431,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
| 4410 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 4431 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
| 4411 | 4432 | ||
| 4412 | /* Unlock panel regs */ | 4433 | /* Unlock panel regs */ |
| 4413 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4434 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
| 4435 | PANEL_UNLOCK_REGS); | ||
| 4414 | 4436 | ||
| 4415 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 4437 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
| 4416 | I915_WRITE(dpll_reg, dpll); | 4438 | I915_WRITE(dpll_reg, dpll); |
| @@ -4453,7 +4475,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
| 4453 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | 4475 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
| 4454 | 4476 | ||
| 4455 | /* Unlock panel regs */ | 4477 | /* Unlock panel regs */ |
| 4456 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4478 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
| 4479 | PANEL_UNLOCK_REGS); | ||
| 4457 | 4480 | ||
| 4458 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 4481 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
| 4459 | I915_WRITE(dpll_reg, dpll); | 4482 | I915_WRITE(dpll_reg, dpll); |
| @@ -4483,6 +4506,7 @@ static void intel_idle_update(struct work_struct *work) | |||
| 4483 | struct drm_device *dev = dev_priv->dev; | 4506 | struct drm_device *dev = dev_priv->dev; |
| 4484 | struct drm_crtc *crtc; | 4507 | struct drm_crtc *crtc; |
| 4485 | struct intel_crtc *intel_crtc; | 4508 | struct intel_crtc *intel_crtc; |
| 4509 | int enabled = 0; | ||
| 4486 | 4510 | ||
| 4487 | if (!i915_powersave) | 4511 | if (!i915_powersave) |
| 4488 | return; | 4512 | return; |
| @@ -4491,21 +4515,22 @@ static void intel_idle_update(struct work_struct *work) | |||
| 4491 | 4515 | ||
| 4492 | i915_update_gfx_val(dev_priv); | 4516 | i915_update_gfx_val(dev_priv); |
| 4493 | 4517 | ||
| 4494 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
| 4495 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
| 4496 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
| 4497 | } | ||
| 4498 | |||
| 4499 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4518 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 4500 | /* Skip inactive CRTCs */ | 4519 | /* Skip inactive CRTCs */ |
| 4501 | if (!crtc->fb) | 4520 | if (!crtc->fb) |
| 4502 | continue; | 4521 | continue; |
| 4503 | 4522 | ||
| 4523 | enabled++; | ||
| 4504 | intel_crtc = to_intel_crtc(crtc); | 4524 | intel_crtc = to_intel_crtc(crtc); |
| 4505 | if (!intel_crtc->busy) | 4525 | if (!intel_crtc->busy) |
| 4506 | intel_decrease_pllclock(crtc); | 4526 | intel_decrease_pllclock(crtc); |
| 4507 | } | 4527 | } |
| 4508 | 4528 | ||
| 4529 | if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { | ||
| 4530 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
| 4531 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
| 4532 | } | ||
| 4533 | |||
| 4509 | mutex_unlock(&dev->struct_mutex); | 4534 | mutex_unlock(&dev->struct_mutex); |
| 4510 | } | 4535 | } |
| 4511 | 4536 | ||
| @@ -4601,10 +4626,10 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
| 4601 | kfree(work); | 4626 | kfree(work); |
| 4602 | } | 4627 | } |
| 4603 | 4628 | ||
| 4604 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | 4629 | static void do_intel_finish_page_flip(struct drm_device *dev, |
| 4630 | struct drm_crtc *crtc) | ||
| 4605 | { | 4631 | { |
| 4606 | drm_i915_private_t *dev_priv = dev->dev_private; | 4632 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 4607 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 4608 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4633 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 4609 | struct intel_unpin_work *work; | 4634 | struct intel_unpin_work *work; |
| 4610 | struct drm_i915_gem_object *obj_priv; | 4635 | struct drm_i915_gem_object *obj_priv; |
| @@ -4648,6 +4673,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
| 4648 | schedule_work(&work->work); | 4673 | schedule_work(&work->work); |
| 4649 | } | 4674 | } |
| 4650 | 4675 | ||
| 4676 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | ||
| 4677 | { | ||
| 4678 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4679 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 4680 | |||
| 4681 | do_intel_finish_page_flip(dev, crtc); | ||
| 4682 | } | ||
| 4683 | |||
| 4684 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) | ||
| 4685 | { | ||
| 4686 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4687 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; | ||
| 4688 | |||
| 4689 | do_intel_finish_page_flip(dev, crtc); | ||
| 4690 | } | ||
| 4691 | |||
| 4651 | void intel_prepare_page_flip(struct drm_device *dev, int plane) | 4692 | void intel_prepare_page_flip(struct drm_device *dev, int plane) |
| 4652 | { | 4693 | { |
| 4653 | drm_i915_private_t *dev_priv = dev->dev_private; | 4694 | drm_i915_private_t *dev_priv = dev->dev_private; |
| @@ -4675,9 +4716,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4675 | struct drm_gem_object *obj; | 4716 | struct drm_gem_object *obj; |
| 4676 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 4677 | struct intel_unpin_work *work; | 4718 | struct intel_unpin_work *work; |
| 4678 | unsigned long flags; | 4719 | unsigned long flags, offset; |
| 4679 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4720 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
| 4680 | int ret, pipesrc; | 4721 | int ret, pipesrc; |
| 4722 | u32 flip_mask; | ||
| 4681 | 4723 | ||
| 4682 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4724 | work = kzalloc(sizeof *work, GFP_KERNEL); |
| 4683 | if (work == NULL) | 4725 | if (work == NULL) |
| @@ -4731,16 +4773,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4731 | atomic_inc(&obj_priv->pending_flip); | 4773 | atomic_inc(&obj_priv->pending_flip); |
| 4732 | work->pending_flip_obj = obj; | 4774 | work->pending_flip_obj = obj; |
| 4733 | 4775 | ||
| 4776 | if (intel_crtc->plane) | ||
| 4777 | flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | ||
| 4778 | else | ||
| 4779 | flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; | ||
| 4780 | |||
| 4781 | /* Wait for any previous flip to finish */ | ||
| 4782 | if (IS_GEN3(dev)) | ||
| 4783 | while (I915_READ(ISR) & flip_mask) | ||
| 4784 | ; | ||
| 4785 | |||
| 4786 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
| 4787 | offset = obj_priv->gtt_offset; | ||
| 4788 | offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); | ||
| 4789 | |||
| 4734 | BEGIN_LP_RING(4); | 4790 | BEGIN_LP_RING(4); |
| 4735 | OUT_RING(MI_DISPLAY_FLIP | | ||
| 4736 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
| 4737 | OUT_RING(fb->pitch); | ||
| 4738 | if (IS_I965G(dev)) { | 4791 | if (IS_I965G(dev)) { |
| 4739 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 4792 | OUT_RING(MI_DISPLAY_FLIP | |
| 4793 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
| 4794 | OUT_RING(fb->pitch); | ||
| 4795 | OUT_RING(offset | obj_priv->tiling_mode); | ||
| 4740 | pipesrc = I915_READ(pipesrc_reg); | 4796 | pipesrc = I915_READ(pipesrc_reg); |
| 4741 | OUT_RING(pipesrc & 0x0fff0fff); | 4797 | OUT_RING(pipesrc & 0x0fff0fff); |
| 4742 | } else { | 4798 | } else { |
| 4743 | OUT_RING(obj_priv->gtt_offset); | 4799 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
| 4800 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
| 4801 | OUT_RING(fb->pitch); | ||
| 4802 | OUT_RING(offset); | ||
| 4744 | OUT_RING(MI_NOOP); | 4803 | OUT_RING(MI_NOOP); |
| 4745 | } | 4804 | } |
| 4746 | ADVANCE_LP_RING(); | 4805 | ADVANCE_LP_RING(); |
| @@ -5472,6 +5531,66 @@ static void intel_init_display(struct drm_device *dev) | |||
| 5472 | } | 5531 | } |
| 5473 | } | 5532 | } |
| 5474 | 5533 | ||
| 5534 | /* | ||
| 5535 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, | ||
| 5536 | * resume, or other times. This quirk makes sure that's the case for | ||
| 5537 | * affected systems. | ||
| 5538 | */ | ||
| 5539 | static void quirk_pipea_force (struct drm_device *dev) | ||
| 5540 | { | ||
| 5541 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5542 | |||
| 5543 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; | ||
| 5544 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | ||
| 5545 | } | ||
| 5546 | |||
| 5547 | struct intel_quirk { | ||
| 5548 | int device; | ||
| 5549 | int subsystem_vendor; | ||
| 5550 | int subsystem_device; | ||
| 5551 | void (*hook)(struct drm_device *dev); | ||
| 5552 | }; | ||
| 5553 | |||
| 5554 | struct intel_quirk intel_quirks[] = { | ||
| 5555 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | ||
| 5556 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | ||
| 5557 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | ||
| 5558 | { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, | ||
| 5559 | |||
| 5560 | /* Thinkpad R31 needs pipe A force quirk */ | ||
| 5561 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | ||
| 5562 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | ||
| 5563 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | ||
| 5564 | |||
| 5565 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ | ||
| 5566 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, | ||
| 5567 | /* ThinkPad X40 needs pipe A force quirk */ | ||
| 5568 | |||
| 5569 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | ||
| 5570 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | ||
| 5571 | |||
| 5572 | /* 855 & before need to leave pipe A & dpll A up */ | ||
| 5573 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
| 5574 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
| 5575 | }; | ||
| 5576 | |||
| 5577 | static void intel_init_quirks(struct drm_device *dev) | ||
| 5578 | { | ||
| 5579 | struct pci_dev *d = dev->pdev; | ||
| 5580 | int i; | ||
| 5581 | |||
| 5582 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | ||
| 5583 | struct intel_quirk *q = &intel_quirks[i]; | ||
| 5584 | |||
| 5585 | if (d->device == q->device && | ||
| 5586 | (d->subsystem_vendor == q->subsystem_vendor || | ||
| 5587 | q->subsystem_vendor == PCI_ANY_ID) && | ||
| 5588 | (d->subsystem_device == q->subsystem_device || | ||
| 5589 | q->subsystem_device == PCI_ANY_ID)) | ||
| 5590 | q->hook(dev); | ||
| 5591 | } | ||
| 5592 | } | ||
| 5593 | |||
| 5475 | void intel_modeset_init(struct drm_device *dev) | 5594 | void intel_modeset_init(struct drm_device *dev) |
| 5476 | { | 5595 | { |
| 5477 | struct drm_i915_private *dev_priv = dev->dev_private; | 5596 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -5484,6 +5603,8 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 5484 | 5603 | ||
| 5485 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 5604 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
| 5486 | 5605 | ||
| 5606 | intel_init_quirks(dev); | ||
| 5607 | |||
| 5487 | intel_init_display(dev); | 5608 | intel_init_display(dev); |
| 5488 | 5609 | ||
| 5489 | if (IS_I965G(dev)) { | 5610 | if (IS_I965G(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 49b54f05d3cf..5dde80f9e652 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -136,6 +136,12 @@ intel_dp_link_required(struct drm_device *dev, | |||
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | static int | 138 | static int |
| 139 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) | ||
| 140 | { | ||
| 141 | return (max_link_clock * max_lanes * 8) / 10; | ||
| 142 | } | ||
| 143 | |||
| 144 | static int | ||
| 139 | intel_dp_mode_valid(struct drm_connector *connector, | 145 | intel_dp_mode_valid(struct drm_connector *connector, |
| 140 | struct drm_display_mode *mode) | 146 | struct drm_display_mode *mode) |
| 141 | { | 147 | { |
| @@ -144,8 +150,11 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
| 144 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); | 150 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
| 145 | int max_lanes = intel_dp_max_lane_count(intel_encoder); | 151 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
| 146 | 152 | ||
| 147 | if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) | 153 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
| 148 | > max_link_clock * max_lanes) | 154 | which are outside spec tolerances but somehow work by magic */ |
| 155 | if (!IS_eDP(intel_encoder) && | ||
| 156 | (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) | ||
| 157 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | ||
| 149 | return MODE_CLOCK_HIGH; | 158 | return MODE_CLOCK_HIGH; |
| 150 | 159 | ||
| 151 | if (mode->clock < 10000) | 160 | if (mode->clock < 10000) |
| @@ -506,7 +515,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 506 | 515 | ||
| 507 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 516 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
| 508 | for (clock = 0; clock <= max_clock; clock++) { | 517 | for (clock = 0; clock <= max_clock; clock++) { |
| 509 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 518 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
| 510 | 519 | ||
| 511 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) | 520 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) |
| 512 | <= link_avail) { | 521 | <= link_avail) { |
| @@ -521,6 +530,18 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 521 | } | 530 | } |
| 522 | } | 531 | } |
| 523 | } | 532 | } |
| 533 | |||
| 534 | if (IS_eDP(intel_encoder)) { | ||
| 535 | /* okay we failed just pick the highest */ | ||
| 536 | dp_priv->lane_count = max_lane_count; | ||
| 537 | dp_priv->link_bw = bws[max_clock]; | ||
| 538 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | ||
| 539 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
| 540 | "count %d clock %d\n", | ||
| 541 | dp_priv->link_bw, dp_priv->lane_count, | ||
| 542 | adjusted_mode->clock); | ||
| 543 | return true; | ||
| 544 | } | ||
| 524 | return false; | 545 | return false; |
| 525 | } | 546 | } |
| 526 | 547 | ||
| @@ -696,6 +717,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 696 | } | 717 | } |
| 697 | } | 718 | } |
| 698 | 719 | ||
| 720 | static void ironlake_edp_panel_on (struct drm_device *dev) | ||
| 721 | { | ||
| 722 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 723 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
| 724 | u32 pp, pp_status; | ||
| 725 | |||
| 726 | pp_status = I915_READ(PCH_PP_STATUS); | ||
| 727 | if (pp_status & PP_ON) | ||
| 728 | return; | ||
| 729 | |||
| 730 | pp = I915_READ(PCH_PP_CONTROL); | ||
| 731 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | ||
| 732 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 733 | do { | ||
| 734 | pp_status = I915_READ(PCH_PP_STATUS); | ||
| 735 | } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); | ||
| 736 | |||
| 737 | if (time_after(jiffies, timeout)) | ||
| 738 | DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); | ||
| 739 | |||
| 740 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); | ||
| 741 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 742 | } | ||
| 743 | |||
| 744 | static void ironlake_edp_panel_off (struct drm_device *dev) | ||
| 745 | { | ||
| 746 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 747 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
| 748 | u32 pp, pp_status; | ||
| 749 | |||
| 750 | pp = I915_READ(PCH_PP_CONTROL); | ||
| 751 | pp &= ~POWER_TARGET_ON; | ||
| 752 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 753 | do { | ||
| 754 | pp_status = I915_READ(PCH_PP_STATUS); | ||
| 755 | } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); | ||
| 756 | |||
| 757 | if (time_after(jiffies, timeout)) | ||
| 758 | DRM_DEBUG_KMS("panel off wait timed out\n"); | ||
| 759 | |||
| 760 | /* Make sure VDD is enabled so DP AUX will work */ | ||
| 761 | pp |= EDP_FORCE_VDD; | ||
| 762 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 763 | } | ||
| 764 | |||
| 699 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 765 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
| 700 | { | 766 | { |
| 701 | struct drm_i915_private *dev_priv = dev->dev_private; | 767 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -730,14 +796,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
| 730 | if (mode != DRM_MODE_DPMS_ON) { | 796 | if (mode != DRM_MODE_DPMS_ON) { |
| 731 | if (dp_reg & DP_PORT_EN) { | 797 | if (dp_reg & DP_PORT_EN) { |
| 732 | intel_dp_link_down(intel_encoder, dp_priv->DP); | 798 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
| 733 | if (IS_eDP(intel_encoder)) | 799 | if (IS_eDP(intel_encoder)) { |
| 734 | ironlake_edp_backlight_off(dev); | 800 | ironlake_edp_backlight_off(dev); |
| 801 | ironlake_edp_panel_off(dev); | ||
| 802 | } | ||
| 735 | } | 803 | } |
| 736 | } else { | 804 | } else { |
| 737 | if (!(dp_reg & DP_PORT_EN)) { | 805 | if (!(dp_reg & DP_PORT_EN)) { |
| 738 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); | 806 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
| 739 | if (IS_eDP(intel_encoder)) | 807 | if (IS_eDP(intel_encoder)) { |
| 808 | ironlake_edp_panel_on(dev); | ||
| 740 | ironlake_edp_backlight_on(dev); | 809 | ironlake_edp_backlight_on(dev); |
| 810 | } | ||
| 741 | } | 811 | } |
| 742 | } | 812 | } |
| 743 | dp_priv->dpms_mode = mode; | 813 | dp_priv->dpms_mode = mode; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index df931f787665..2f7970be9051 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -215,6 +215,9 @@ extern void intel_init_clock_gating(struct drm_device *dev); | |||
| 215 | extern void ironlake_enable_drps(struct drm_device *dev); | 215 | extern void ironlake_enable_drps(struct drm_device *dev); |
| 216 | extern void ironlake_disable_drps(struct drm_device *dev); | 216 | extern void ironlake_disable_drps(struct drm_device *dev); |
| 217 | 217 | ||
| 218 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | ||
| 219 | struct drm_gem_object *obj); | ||
| 220 | |||
| 218 | extern int intel_framebuffer_init(struct drm_device *dev, | 221 | extern int intel_framebuffer_init(struct drm_device *dev, |
| 219 | struct intel_framebuffer *ifb, | 222 | struct intel_framebuffer *ifb, |
| 220 | struct drm_mode_fb_cmd *mode_cmd, | 223 | struct drm_mode_fb_cmd *mode_cmd, |
| @@ -224,6 +227,7 @@ extern void intel_fbdev_fini(struct drm_device *dev); | |||
| 224 | 227 | ||
| 225 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); | 228 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
| 226 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); | 229 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); |
| 230 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | ||
| 227 | 231 | ||
| 228 | extern void intel_setup_overlay(struct drm_device *dev); | 232 | extern void intel_setup_overlay(struct drm_device *dev); |
| 229 | extern void intel_cleanup_overlay(struct drm_device *dev); | 233 | extern void intel_cleanup_overlay(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index c3c505244e07..3e18c9e7729b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
| 98 | 98 | ||
| 99 | mutex_lock(&dev->struct_mutex); | 99 | mutex_lock(&dev->struct_mutex); |
| 100 | 100 | ||
| 101 | ret = i915_gem_object_pin(fbo, 64*1024); | 101 | ret = intel_pin_and_fence_fb_obj(dev, fbo); |
| 102 | if (ret) { | 102 | if (ret) { |
| 103 | DRM_ERROR("failed to pin fb: %d\n", ret); | 103 | DRM_ERROR("failed to pin fb: %d\n", ret); |
| 104 | goto out_unref; | 104 | goto out_unref; |
| @@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
| 236 | 236 | ||
| 237 | drm_framebuffer_cleanup(&ifb->base); | 237 | drm_framebuffer_cleanup(&ifb->base); |
| 238 | if (ifb->obj) | 238 | if (ifb->obj) |
| 239 | drm_gem_object_unreference_unlocked(ifb->obj); | 239 | drm_gem_object_unreference(ifb->obj); |
| 240 | 240 | ||
| 241 | return 0; | 241 | return 0; |
| 242 | } | 242 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6a1accd83aec..0eab8df5bf7e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -599,6 +599,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
| 599 | return 0; | 599 | return 0; |
| 600 | } | 600 | } |
| 601 | 601 | ||
| 602 | static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) | ||
| 603 | { | ||
| 604 | DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); | ||
| 605 | return 1; | ||
| 606 | } | ||
| 607 | |||
| 608 | /* The GPU hangs up on these systems if modeset is performed on LID open */ | ||
| 609 | static const struct dmi_system_id intel_no_modeset_on_lid[] = { | ||
| 610 | { | ||
| 611 | .callback = intel_no_modeset_on_lid_dmi_callback, | ||
| 612 | .ident = "Toshiba Tecra A11", | ||
| 613 | .matches = { | ||
| 614 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
| 615 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), | ||
| 616 | }, | ||
| 617 | }, | ||
| 618 | |||
| 619 | { } /* terminating entry */ | ||
| 620 | }; | ||
| 621 | |||
| 602 | /* | 622 | /* |
| 603 | * Lid events. Note the use of 'modeset_on_lid': | 623 | * Lid events. Note the use of 'modeset_on_lid': |
| 604 | * - we set it on lid close, and reset it on open | 624 | * - we set it on lid close, and reset it on open |
| @@ -622,6 +642,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
| 622 | */ | 642 | */ |
| 623 | if (connector) | 643 | if (connector) |
| 624 | connector->status = connector->funcs->detect(connector); | 644 | connector->status = connector->funcs->detect(connector); |
| 645 | /* Don't force modeset on machines where it causes a GPU lockup */ | ||
| 646 | if (dmi_check_system(intel_no_modeset_on_lid)) | ||
| 647 | return NOTIFY_OK; | ||
| 625 | if (!acpi_lid_open()) { | 648 | if (!acpi_lid_open()) { |
| 626 | dev_priv->modeset_on_lid = 1; | 649 | dev_priv->modeset_on_lid = 1; |
| 627 | return NOTIFY_OK; | 650 | return NOTIFY_OK; |
| @@ -983,8 +1006,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 983 | 1006 | ||
| 984 | drm_connector_attach_property(&intel_connector->base, | 1007 | drm_connector_attach_property(&intel_connector->base, |
| 985 | dev->mode_config.scaling_mode_property, | 1008 | dev->mode_config.scaling_mode_property, |
| 986 | DRM_MODE_SCALE_FULLSCREEN); | 1009 | DRM_MODE_SCALE_ASPECT); |
| 987 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | 1010 | lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; |
| 988 | /* | 1011 | /* |
| 989 | * LVDS discovery: | 1012 | * LVDS discovery: |
| 990 | * 1) check for EDID on DDC | 1013 | * 1) check for EDID on DDC |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cea4f1a8709e..26362f8495a8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -94,7 +94,7 @@ render_ring_flush(struct drm_device *dev, | |||
| 94 | #if WATCH_EXEC | 94 | #if WATCH_EXEC |
| 95 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | 95 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
| 96 | #endif | 96 | #endif |
| 97 | intel_ring_begin(dev, ring, 8); | 97 | intel_ring_begin(dev, ring, 2); |
| 98 | intel_ring_emit(dev, ring, cmd); | 98 | intel_ring_emit(dev, ring, cmd); |
| 99 | intel_ring_emit(dev, ring, MI_NOOP); | 99 | intel_ring_emit(dev, ring, MI_NOOP); |
| 100 | intel_ring_advance(dev, ring); | 100 | intel_ring_advance(dev, ring); |
| @@ -358,7 +358,7 @@ bsd_ring_flush(struct drm_device *dev, | |||
| 358 | u32 invalidate_domains, | 358 | u32 invalidate_domains, |
| 359 | u32 flush_domains) | 359 | u32 flush_domains) |
| 360 | { | 360 | { |
| 361 | intel_ring_begin(dev, ring, 8); | 361 | intel_ring_begin(dev, ring, 2); |
| 362 | intel_ring_emit(dev, ring, MI_FLUSH); | 362 | intel_ring_emit(dev, ring, MI_FLUSH); |
| 363 | intel_ring_emit(dev, ring, MI_NOOP); | 363 | intel_ring_emit(dev, ring, MI_NOOP); |
| 364 | intel_ring_advance(dev, ring); | 364 | intel_ring_advance(dev, ring); |
| @@ -687,6 +687,7 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
| 687 | *virt++ = MI_NOOP; | 687 | *virt++ = MI_NOOP; |
| 688 | 688 | ||
| 689 | ring->tail = 0; | 689 | ring->tail = 0; |
| 690 | ring->space = ring->head - 8; | ||
| 690 | 691 | ||
| 691 | return 0; | 692 | return 0; |
| 692 | } | 693 | } |
| @@ -721,8 +722,9 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
| 721 | } | 722 | } |
| 722 | 723 | ||
| 723 | void intel_ring_begin(struct drm_device *dev, | 724 | void intel_ring_begin(struct drm_device *dev, |
| 724 | struct intel_ring_buffer *ring, int n) | 725 | struct intel_ring_buffer *ring, int num_dwords) |
| 725 | { | 726 | { |
| 727 | int n = 4*num_dwords; | ||
| 726 | if (unlikely(ring->tail + n > ring->size)) | 728 | if (unlikely(ring->tail + n > ring->size)) |
| 727 | intel_wrap_ring_buffer(dev, ring); | 729 | intel_wrap_ring_buffer(dev, ring); |
| 728 | if (unlikely(ring->space < n)) | 730 | if (unlikely(ring->space < n)) |
| @@ -752,7 +754,7 @@ void intel_fill_struct(struct drm_device *dev, | |||
| 752 | { | 754 | { |
| 753 | unsigned int *virt = ring->virtual_start + ring->tail; | 755 | unsigned int *virt = ring->virtual_start + ring->tail; |
| 754 | BUG_ON((len&~(4-1)) != 0); | 756 | BUG_ON((len&~(4-1)) != 0); |
| 755 | intel_ring_begin(dev, ring, len); | 757 | intel_ring_begin(dev, ring, len/4); |
| 756 | memcpy(virt, data, len); | 758 | memcpy(virt, data, len); |
| 757 | ring->tail += len; | 759 | ring->tail += len; |
| 758 | ring->tail &= ring->size - 1; | 760 | ring->tail &= ring->size - 1; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index fc924b649195..e492919faf44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -203,36 +203,26 @@ struct methods { | |||
| 203 | const bool rw; | 203 | const bool rw; |
| 204 | }; | 204 | }; |
| 205 | 205 | ||
| 206 | static struct methods nv04_methods[] = { | 206 | static struct methods shadow_methods[] = { |
| 207 | { "PROM", load_vbios_prom, false }, | ||
| 208 | { "PRAMIN", load_vbios_pramin, true }, | ||
| 209 | { "PCIROM", load_vbios_pci, true }, | ||
| 210 | }; | ||
| 211 | |||
| 212 | static struct methods nv50_methods[] = { | ||
| 213 | { "ACPI", load_vbios_acpi, true }, | ||
| 214 | { "PRAMIN", load_vbios_pramin, true }, | 207 | { "PRAMIN", load_vbios_pramin, true }, |
| 215 | { "PROM", load_vbios_prom, false }, | 208 | { "PROM", load_vbios_prom, false }, |
| 216 | { "PCIROM", load_vbios_pci, true }, | 209 | { "PCIROM", load_vbios_pci, true }, |
| 210 | { "ACPI", load_vbios_acpi, true }, | ||
| 217 | }; | 211 | }; |
| 218 | 212 | ||
| 219 | #define METHODCNT 3 | ||
| 220 | |||
| 221 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | 213 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) |
| 222 | { | 214 | { |
| 223 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 215 | const int nr_methods = ARRAY_SIZE(shadow_methods); |
| 224 | struct methods *methods; | 216 | struct methods *methods = shadow_methods; |
| 225 | int i; | ||
| 226 | int testscore = 3; | 217 | int testscore = 3; |
| 227 | int scores[METHODCNT]; | 218 | int scores[nr_methods], i; |
| 228 | 219 | ||
| 229 | if (nouveau_vbios) { | 220 | if (nouveau_vbios) { |
| 230 | methods = nv04_methods; | 221 | for (i = 0; i < nr_methods; i++) |
| 231 | for (i = 0; i < METHODCNT; i++) | ||
| 232 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) | 222 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) |
| 233 | break; | 223 | break; |
| 234 | 224 | ||
| 235 | if (i < METHODCNT) { | 225 | if (i < nr_methods) { |
| 236 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", | 226 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", |
| 237 | methods[i].desc); | 227 | methods[i].desc); |
| 238 | 228 | ||
| @@ -244,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
| 244 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); | 234 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); |
| 245 | } | 235 | } |
| 246 | 236 | ||
| 247 | if (dev_priv->card_type < NV_50) | 237 | for (i = 0; i < nr_methods; i++) { |
| 248 | methods = nv04_methods; | ||
| 249 | else | ||
| 250 | methods = nv50_methods; | ||
| 251 | |||
| 252 | for (i = 0; i < METHODCNT; i++) { | ||
| 253 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", | 238 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", |
| 254 | methods[i].desc); | 239 | methods[i].desc); |
| 255 | data[0] = data[1] = 0; /* avoid reuse of previous image */ | 240 | data[0] = data[1] = 0; /* avoid reuse of previous image */ |
| @@ -260,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
| 260 | } | 245 | } |
| 261 | 246 | ||
| 262 | while (--testscore > 0) { | 247 | while (--testscore > 0) { |
| 263 | for (i = 0; i < METHODCNT; i++) { | 248 | for (i = 0; i < nr_methods; i++) { |
| 264 | if (scores[i] == testscore) { | 249 | if (scores[i] == testscore) { |
| 265 | NV_TRACE(dev, "Using BIOS image from %s\n", | 250 | NV_TRACE(dev, "Using BIOS image from %s\n", |
| 266 | methods[i].desc); | 251 | methods[i].desc); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index c9a4a0d2a115..257ea130ae13 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -387,7 +387,8 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
| 387 | dev_priv->nfbdev = nfbdev; | 387 | dev_priv->nfbdev = nfbdev; |
| 388 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; | 388 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; |
| 389 | 389 | ||
| 390 | ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); | 390 | ret = drm_fb_helper_init(dev, &nfbdev->helper, |
| 391 | nv_two_heads(dev) ? 2 : 1, 4); | ||
| 391 | if (ret) { | 392 | if (ret) { |
| 392 | kfree(nfbdev); | 393 | kfree(nfbdev); |
| 393 | return ret; | 394 | return ret; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index f3f2827017ef..8c2d6478a221 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -498,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 498 | if ((rdev->family == CHIP_RS600) || | 498 | if ((rdev->family == CHIP_RS600) || |
| 499 | (rdev->family == CHIP_RS690) || | 499 | (rdev->family == CHIP_RS690) || |
| 500 | (rdev->family == CHIP_RS740)) | 500 | (rdev->family == CHIP_RS740)) |
| 501 | pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | 501 | pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ |
| 502 | RADEON_PLL_PREFER_CLOSEST_LOWER); | 502 | RADEON_PLL_PREFER_CLOSEST_LOWER); |
| 503 | 503 | ||
| 504 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 504 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4b6623df3b96..1caf625e472b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -607,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
| 607 | WREG32(MC_VM_FB_LOCATION, tmp); | 607 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 608 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 608 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 609 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 609 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 610 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 610 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
| 611 | if (rdev->flags & RADEON_IS_AGP) { | 611 | if (rdev->flags & RADEON_IS_AGP) { |
| 612 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 612 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
| 613 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 613 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
| @@ -1222,11 +1222,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1222 | ps_thread_count = 128; | 1222 | ps_thread_count = 128; |
| 1223 | 1223 | ||
| 1224 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); | 1224 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); |
| 1225 | sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1225 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1226 | sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1226 | sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1227 | sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1227 | sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1228 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1228 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1229 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1229 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1230 | 1230 | ||
| 1231 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | 1231 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); |
| 1232 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | 1232 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); |
| @@ -1260,6 +1260,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1260 | WREG32(VGT_GS_VERTEX_REUSE, 16); | 1260 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
| 1261 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | 1261 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
| 1262 | 1262 | ||
| 1263 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); | ||
| 1264 | WREG32(VGT_OUT_DEALLOC_CNTL, 16); | ||
| 1265 | |||
| 1263 | WREG32(CB_PERF_CTR0_SEL_0, 0); | 1266 | WREG32(CB_PERF_CTR0_SEL_0, 0); |
| 1264 | WREG32(CB_PERF_CTR0_SEL_1, 0); | 1267 | WREG32(CB_PERF_CTR0_SEL_1, 0); |
| 1265 | WREG32(CB_PERF_CTR1_SEL_0, 0); | 1268 | WREG32(CB_PERF_CTR1_SEL_0, 0); |
| @@ -1269,6 +1272,26 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1269 | WREG32(CB_PERF_CTR3_SEL_0, 0); | 1272 | WREG32(CB_PERF_CTR3_SEL_0, 0); |
| 1270 | WREG32(CB_PERF_CTR3_SEL_1, 0); | 1273 | WREG32(CB_PERF_CTR3_SEL_1, 0); |
| 1271 | 1274 | ||
| 1275 | /* clear render buffer base addresses */ | ||
| 1276 | WREG32(CB_COLOR0_BASE, 0); | ||
| 1277 | WREG32(CB_COLOR1_BASE, 0); | ||
| 1278 | WREG32(CB_COLOR2_BASE, 0); | ||
| 1279 | WREG32(CB_COLOR3_BASE, 0); | ||
| 1280 | WREG32(CB_COLOR4_BASE, 0); | ||
| 1281 | WREG32(CB_COLOR5_BASE, 0); | ||
| 1282 | WREG32(CB_COLOR6_BASE, 0); | ||
| 1283 | WREG32(CB_COLOR7_BASE, 0); | ||
| 1284 | WREG32(CB_COLOR8_BASE, 0); | ||
| 1285 | WREG32(CB_COLOR9_BASE, 0); | ||
| 1286 | WREG32(CB_COLOR10_BASE, 0); | ||
| 1287 | WREG32(CB_COLOR11_BASE, 0); | ||
| 1288 | |||
| 1289 | /* set the shader const cache sizes to 0 */ | ||
| 1290 | for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) | ||
| 1291 | WREG32(i, 0); | ||
| 1292 | for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) | ||
| 1293 | WREG32(i, 0); | ||
| 1294 | |||
| 1272 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); | 1295 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); |
| 1273 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | 1296 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); |
| 1274 | 1297 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 64516b950891..345a75a03c96 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -333,7 +333,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 333 | header = radeon_get_ib_value(p, h_idx); | 333 | header = radeon_get_ib_value(p, h_idx); |
| 334 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 334 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
| 335 | reg = CP_PACKET0_GET_REG(header); | 335 | reg = CP_PACKET0_GET_REG(header); |
| 336 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
| 337 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 336 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 338 | if (!obj) { | 337 | if (!obj) { |
| 339 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 338 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
| @@ -368,7 +367,6 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 368 | } | 367 | } |
| 369 | } | 368 | } |
| 370 | out: | 369 | out: |
| 371 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
| 372 | return r; | 370 | return r; |
| 373 | } | 371 | } |
| 374 | 372 | ||
| @@ -1197,7 +1195,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
| 1197 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | 1195 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); |
| 1198 | return -EINVAL; | 1196 | return -EINVAL; |
| 1199 | } | 1197 | } |
| 1200 | ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1198 | ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
| 1201 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1199 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
| 1202 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 1200 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); |
| 1203 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1201 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
| @@ -1209,7 +1207,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
| 1209 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | 1207 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); |
| 1210 | return -EINVAL; | 1208 | return -EINVAL; |
| 1211 | } | 1209 | } |
| 1212 | ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1210 | ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
| 1213 | mipmap = reloc->robj; | 1211 | mipmap = reloc->robj; |
| 1214 | r = evergreen_check_texture_resource(p, idx+1+(i*8), | 1212 | r = evergreen_check_texture_resource(p, idx+1+(i*8), |
| 1215 | texture, mipmap); | 1213 | texture, mipmap); |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 79683f6b4452..a1cd621780e2 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
| @@ -713,6 +713,9 @@ | |||
| 713 | #define SQ_GSVS_RING_OFFSET_2 0x28930 | 713 | #define SQ_GSVS_RING_OFFSET_2 0x28930 |
| 714 | #define SQ_GSVS_RING_OFFSET_3 0x28934 | 714 | #define SQ_GSVS_RING_OFFSET_3 0x28934 |
| 715 | 715 | ||
| 716 | #define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140 | ||
| 717 | #define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80 | ||
| 718 | |||
| 716 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 | 719 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 |
| 717 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 | 720 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 |
| 718 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 | 721 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index cf89aa2eb28c..a89a15ab524d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -1230,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 1230 | header = radeon_get_ib_value(p, h_idx); | 1230 | header = radeon_get_ib_value(p, h_idx); |
| 1231 | crtc_id = radeon_get_ib_value(p, h_idx + 5); | 1231 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
| 1232 | reg = CP_PACKET0_GET_REG(header); | 1232 | reg = CP_PACKET0_GET_REG(header); |
| 1233 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
| 1234 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 1233 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 1235 | if (!obj) { | 1234 | if (!obj) { |
| 1236 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 1235 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
| @@ -1264,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 1264 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1263 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
| 1265 | } | 1264 | } |
| 1266 | out: | 1265 | out: |
| 1267 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
| 1268 | return r; | 1266 | return r; |
| 1269 | } | 1267 | } |
| 1270 | 1268 | ||
| @@ -1628,6 +1626,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
| 1628 | case RADEON_TXFORMAT_RGB332: | 1626 | case RADEON_TXFORMAT_RGB332: |
| 1629 | case RADEON_TXFORMAT_Y8: | 1627 | case RADEON_TXFORMAT_Y8: |
| 1630 | track->textures[i].cpp = 1; | 1628 | track->textures[i].cpp = 1; |
| 1629 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 1631 | break; | 1630 | break; |
| 1632 | case RADEON_TXFORMAT_AI88: | 1631 | case RADEON_TXFORMAT_AI88: |
| 1633 | case RADEON_TXFORMAT_ARGB1555: | 1632 | case RADEON_TXFORMAT_ARGB1555: |
| @@ -1639,12 +1638,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
| 1639 | case RADEON_TXFORMAT_LDUDV655: | 1638 | case RADEON_TXFORMAT_LDUDV655: |
| 1640 | case RADEON_TXFORMAT_DUDV88: | 1639 | case RADEON_TXFORMAT_DUDV88: |
| 1641 | track->textures[i].cpp = 2; | 1640 | track->textures[i].cpp = 2; |
| 1641 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 1642 | break; | 1642 | break; |
| 1643 | case RADEON_TXFORMAT_ARGB8888: | 1643 | case RADEON_TXFORMAT_ARGB8888: |
| 1644 | case RADEON_TXFORMAT_RGBA8888: | 1644 | case RADEON_TXFORMAT_RGBA8888: |
| 1645 | case RADEON_TXFORMAT_SHADOW32: | 1645 | case RADEON_TXFORMAT_SHADOW32: |
| 1646 | case RADEON_TXFORMAT_LDUDUV8888: | 1646 | case RADEON_TXFORMAT_LDUDUV8888: |
| 1647 | track->textures[i].cpp = 4; | 1647 | track->textures[i].cpp = 4; |
| 1648 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 1648 | break; | 1649 | break; |
| 1649 | case RADEON_TXFORMAT_DXT1: | 1650 | case RADEON_TXFORMAT_DXT1: |
| 1650 | track->textures[i].cpp = 1; | 1651 | track->textures[i].cpp = 1; |
| @@ -2351,6 +2352,7 @@ void r100_mc_init(struct radeon_device *rdev) | |||
| 2351 | if (rdev->flags & RADEON_IS_IGP) | 2352 | if (rdev->flags & RADEON_IS_IGP) |
| 2352 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 2353 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
| 2353 | radeon_vram_location(rdev, &rdev->mc, base); | 2354 | radeon_vram_location(rdev, &rdev->mc, base); |
| 2355 | rdev->mc.gtt_base_align = 0; | ||
| 2354 | if (!(rdev->flags & RADEON_IS_AGP)) | 2356 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 2355 | radeon_gtt_location(rdev, &rdev->mc); | 2357 | radeon_gtt_location(rdev, &rdev->mc); |
| 2356 | radeon_update_bandwidth_info(rdev); | 2358 | radeon_update_bandwidth_info(rdev); |
| @@ -2604,12 +2606,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 2604 | int surf_index = reg * 16; | 2606 | int surf_index = reg * 16; |
| 2605 | int flags = 0; | 2607 | int flags = 0; |
| 2606 | 2608 | ||
| 2607 | /* r100/r200 divide by 16 */ | ||
| 2608 | if (rdev->family < CHIP_R300) | ||
| 2609 | flags = pitch / 16; | ||
| 2610 | else | ||
| 2611 | flags = pitch / 8; | ||
| 2612 | |||
| 2613 | if (rdev->family <= CHIP_RS200) { | 2609 | if (rdev->family <= CHIP_RS200) { |
| 2614 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | 2610 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
| 2615 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | 2611 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
| @@ -2633,6 +2629,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 2633 | if (tiling_flags & RADEON_TILING_SWAP_32BIT) | 2629 | if (tiling_flags & RADEON_TILING_SWAP_32BIT) |
| 2634 | flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; | 2630 | flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; |
| 2635 | 2631 | ||
| 2632 | /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ | ||
| 2633 | if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { | ||
| 2634 | if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) | ||
| 2635 | if (ASIC_IS_RN50(rdev)) | ||
| 2636 | pitch /= 16; | ||
| 2637 | } | ||
| 2638 | |||
| 2639 | /* r100/r200 divide by 16 */ | ||
| 2640 | if (rdev->family < CHIP_R300) | ||
| 2641 | flags |= pitch / 16; | ||
| 2642 | else | ||
| 2643 | flags |= pitch / 8; | ||
| 2644 | |||
| 2645 | |||
| 2636 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); | 2646 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); |
| 2637 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); | 2647 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); |
| 2638 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); | 2648 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); |
| @@ -3147,33 +3157,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) | |||
| 3147 | DRM_ERROR("compress format %d\n", t->compress_format); | 3157 | DRM_ERROR("compress format %d\n", t->compress_format); |
| 3148 | } | 3158 | } |
| 3149 | 3159 | ||
| 3150 | static int r100_cs_track_cube(struct radeon_device *rdev, | ||
| 3151 | struct r100_cs_track *track, unsigned idx) | ||
| 3152 | { | ||
| 3153 | unsigned face, w, h; | ||
| 3154 | struct radeon_bo *cube_robj; | ||
| 3155 | unsigned long size; | ||
| 3156 | |||
| 3157 | for (face = 0; face < 5; face++) { | ||
| 3158 | cube_robj = track->textures[idx].cube_info[face].robj; | ||
| 3159 | w = track->textures[idx].cube_info[face].width; | ||
| 3160 | h = track->textures[idx].cube_info[face].height; | ||
| 3161 | |||
| 3162 | size = w * h; | ||
| 3163 | size *= track->textures[idx].cpp; | ||
| 3164 | |||
| 3165 | size += track->textures[idx].cube_info[face].offset; | ||
| 3166 | |||
| 3167 | if (size > radeon_bo_size(cube_robj)) { | ||
| 3168 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | ||
| 3169 | size, radeon_bo_size(cube_robj)); | ||
| 3170 | r100_cs_track_texture_print(&track->textures[idx]); | ||
| 3171 | return -1; | ||
| 3172 | } | ||
| 3173 | } | ||
| 3174 | return 0; | ||
| 3175 | } | ||
| 3176 | |||
| 3177 | static int r100_track_compress_size(int compress_format, int w, int h) | 3160 | static int r100_track_compress_size(int compress_format, int w, int h) |
| 3178 | { | 3161 | { |
| 3179 | int block_width, block_height, block_bytes; | 3162 | int block_width, block_height, block_bytes; |
| @@ -3204,6 +3187,37 @@ static int r100_track_compress_size(int compress_format, int w, int h) | |||
| 3204 | return sz; | 3187 | return sz; |
| 3205 | } | 3188 | } |
| 3206 | 3189 | ||
| 3190 | static int r100_cs_track_cube(struct radeon_device *rdev, | ||
| 3191 | struct r100_cs_track *track, unsigned idx) | ||
| 3192 | { | ||
| 3193 | unsigned face, w, h; | ||
| 3194 | struct radeon_bo *cube_robj; | ||
| 3195 | unsigned long size; | ||
| 3196 | unsigned compress_format = track->textures[idx].compress_format; | ||
| 3197 | |||
| 3198 | for (face = 0; face < 5; face++) { | ||
| 3199 | cube_robj = track->textures[idx].cube_info[face].robj; | ||
| 3200 | w = track->textures[idx].cube_info[face].width; | ||
| 3201 | h = track->textures[idx].cube_info[face].height; | ||
| 3202 | |||
| 3203 | if (compress_format) { | ||
| 3204 | size = r100_track_compress_size(compress_format, w, h); | ||
| 3205 | } else | ||
| 3206 | size = w * h; | ||
| 3207 | size *= track->textures[idx].cpp; | ||
| 3208 | |||
| 3209 | size += track->textures[idx].cube_info[face].offset; | ||
| 3210 | |||
| 3211 | if (size > radeon_bo_size(cube_robj)) { | ||
| 3212 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | ||
| 3213 | size, radeon_bo_size(cube_robj)); | ||
| 3214 | r100_cs_track_texture_print(&track->textures[idx]); | ||
| 3215 | return -1; | ||
| 3216 | } | ||
| 3217 | } | ||
| 3218 | return 0; | ||
| 3219 | } | ||
| 3220 | |||
| 3207 | static int r100_cs_track_texture_check(struct radeon_device *rdev, | 3221 | static int r100_cs_track_texture_check(struct radeon_device *rdev, |
| 3208 | struct r100_cs_track *track) | 3222 | struct r100_cs_track *track) |
| 3209 | { | 3223 | { |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 85617c311212..0266d72e0a4c 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
| @@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 415 | /* 2D, 3D, CUBE */ | 415 | /* 2D, 3D, CUBE */ |
| 416 | switch (tmp) { | 416 | switch (tmp) { |
| 417 | case 0: | 417 | case 0: |
| 418 | case 3: | ||
| 419 | case 4: | ||
| 418 | case 5: | 420 | case 5: |
| 419 | case 6: | 421 | case 6: |
| 420 | case 7: | 422 | case 7: |
| @@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 450 | case R200_TXFORMAT_RGB332: | 452 | case R200_TXFORMAT_RGB332: |
| 451 | case R200_TXFORMAT_Y8: | 453 | case R200_TXFORMAT_Y8: |
| 452 | track->textures[i].cpp = 1; | 454 | track->textures[i].cpp = 1; |
| 455 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 453 | break; | 456 | break; |
| 454 | case R200_TXFORMAT_AI88: | 457 | case R200_TXFORMAT_AI88: |
| 455 | case R200_TXFORMAT_ARGB1555: | 458 | case R200_TXFORMAT_ARGB1555: |
| @@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 461 | case R200_TXFORMAT_DVDU88: | 464 | case R200_TXFORMAT_DVDU88: |
| 462 | case R200_TXFORMAT_AVYU4444: | 465 | case R200_TXFORMAT_AVYU4444: |
| 463 | track->textures[i].cpp = 2; | 466 | track->textures[i].cpp = 2; |
| 467 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 464 | break; | 468 | break; |
| 465 | case R200_TXFORMAT_ARGB8888: | 469 | case R200_TXFORMAT_ARGB8888: |
| 466 | case R200_TXFORMAT_RGBA8888: | 470 | case R200_TXFORMAT_RGBA8888: |
| @@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 468 | case R200_TXFORMAT_BGR111110: | 472 | case R200_TXFORMAT_BGR111110: |
| 469 | case R200_TXFORMAT_LDVDU8888: | 473 | case R200_TXFORMAT_LDVDU8888: |
| 470 | track->textures[i].cpp = 4; | 474 | track->textures[i].cpp = 4; |
| 475 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 471 | break; | 476 | break; |
| 472 | case R200_TXFORMAT_DXT1: | 477 | case R200_TXFORMAT_DXT1: |
| 473 | track->textures[i].cpp = 1; | 478 | track->textures[i].cpp = 1; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index b2f9efe2897c..19a7ef7ee344 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -481,6 +481,7 @@ void r300_mc_init(struct radeon_device *rdev) | |||
| 481 | if (rdev->flags & RADEON_IS_IGP) | 481 | if (rdev->flags & RADEON_IS_IGP) |
| 482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
| 483 | radeon_vram_location(rdev, &rdev->mc, base); | 483 | radeon_vram_location(rdev, &rdev->mc, base); |
| 484 | rdev->mc.gtt_base_align = 0; | ||
| 484 | if (!(rdev->flags & RADEON_IS_AGP)) | 485 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 485 | radeon_gtt_location(rdev, &rdev->mc); | 486 | radeon_gtt_location(rdev, &rdev->mc); |
| 486 | radeon_update_bandwidth_info(rdev); | 487 | radeon_update_bandwidth_info(rdev); |
| @@ -881,6 +882,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 881 | case R300_TX_FORMAT_Y4X4: | 882 | case R300_TX_FORMAT_Y4X4: |
| 882 | case R300_TX_FORMAT_Z3Y3X2: | 883 | case R300_TX_FORMAT_Z3Y3X2: |
| 883 | track->textures[i].cpp = 1; | 884 | track->textures[i].cpp = 1; |
| 885 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 884 | break; | 886 | break; |
| 885 | case R300_TX_FORMAT_X16: | 887 | case R300_TX_FORMAT_X16: |
| 886 | case R300_TX_FORMAT_Y8X8: | 888 | case R300_TX_FORMAT_Y8X8: |
| @@ -892,6 +894,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 892 | case R300_TX_FORMAT_B8G8_B8G8: | 894 | case R300_TX_FORMAT_B8G8_B8G8: |
| 893 | case R300_TX_FORMAT_G8R8_G8B8: | 895 | case R300_TX_FORMAT_G8R8_G8B8: |
| 894 | track->textures[i].cpp = 2; | 896 | track->textures[i].cpp = 2; |
| 897 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 895 | break; | 898 | break; |
| 896 | case R300_TX_FORMAT_Y16X16: | 899 | case R300_TX_FORMAT_Y16X16: |
| 897 | case R300_TX_FORMAT_Z11Y11X10: | 900 | case R300_TX_FORMAT_Z11Y11X10: |
| @@ -902,14 +905,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 902 | case R300_TX_FORMAT_FL_I32: | 905 | case R300_TX_FORMAT_FL_I32: |
| 903 | case 0x1e: | 906 | case 0x1e: |
| 904 | track->textures[i].cpp = 4; | 907 | track->textures[i].cpp = 4; |
| 908 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 905 | break; | 909 | break; |
| 906 | case R300_TX_FORMAT_W16Z16Y16X16: | 910 | case R300_TX_FORMAT_W16Z16Y16X16: |
| 907 | case R300_TX_FORMAT_FL_R16G16B16A16: | 911 | case R300_TX_FORMAT_FL_R16G16B16A16: |
| 908 | case R300_TX_FORMAT_FL_I32A32: | 912 | case R300_TX_FORMAT_FL_I32A32: |
| 909 | track->textures[i].cpp = 8; | 913 | track->textures[i].cpp = 8; |
| 914 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 910 | break; | 915 | break; |
| 911 | case R300_TX_FORMAT_FL_R32G32B32A32: | 916 | case R300_TX_FORMAT_FL_R32G32B32A32: |
| 912 | track->textures[i].cpp = 16; | 917 | track->textures[i].cpp = 16; |
| 918 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 913 | break; | 919 | break; |
| 914 | case R300_TX_FORMAT_DXT1: | 920 | case R300_TX_FORMAT_DXT1: |
| 915 | track->textures[i].cpp = 1; | 921 | track->textures[i].cpp = 1; |
| @@ -1171,6 +1177,8 @@ int r300_cs_parse(struct radeon_cs_parser *p) | |||
| 1171 | int r; | 1177 | int r; |
| 1172 | 1178 | ||
| 1173 | track = kzalloc(sizeof(*track), GFP_KERNEL); | 1179 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
| 1180 | if (track == NULL) | ||
| 1181 | return -ENOMEM; | ||
| 1174 | r100_cs_track_clear(p->rdev, track); | 1182 | r100_cs_track_clear(p->rdev, track); |
| 1175 | p->track = track; | 1183 | p->track = track; |
| 1176 | do { | 1184 | do { |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 34330df28483..694af7cc23ac 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -125,6 +125,7 @@ void r520_mc_init(struct radeon_device *rdev) | |||
| 125 | r520_vram_get_type(rdev); | 125 | r520_vram_get_type(rdev); |
| 126 | r100_vram_init_sizes(rdev); | 126 | r100_vram_init_sizes(rdev); |
| 127 | radeon_vram_location(rdev, &rdev->mc, 0); | 127 | radeon_vram_location(rdev, &rdev->mc, 0); |
| 128 | rdev->mc.gtt_base_align = 0; | ||
| 128 | if (!(rdev->flags & RADEON_IS_AGP)) | 129 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 129 | radeon_gtt_location(rdev, &rdev->mc); | 130 | radeon_gtt_location(rdev, &rdev->mc); |
| 130 | radeon_update_bandwidth_info(rdev); | 131 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0e91871f45be..e100f69faeec 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -130,9 +130,14 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) | |||
| 130 | break; | 130 | break; |
| 131 | } | 131 | } |
| 132 | } | 132 | } |
| 133 | } else | 133 | } else { |
| 134 | rdev->pm.requested_power_state_index = | 134 | if (rdev->pm.current_power_state_index == 0) |
| 135 | rdev->pm.current_power_state_index - 1; | 135 | rdev->pm.requested_power_state_index = |
| 136 | rdev->pm.num_power_states - 1; | ||
| 137 | else | ||
| 138 | rdev->pm.requested_power_state_index = | ||
| 139 | rdev->pm.current_power_state_index - 1; | ||
| 140 | } | ||
| 136 | } | 141 | } |
| 137 | rdev->pm.requested_clock_mode_index = 0; | 142 | rdev->pm.requested_clock_mode_index = 0; |
| 138 | /* don't use the power state if crtcs are active and no display flag is set */ | 143 | /* don't use the power state if crtcs are active and no display flag is set */ |
| @@ -1097,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev) | |||
| 1097 | WREG32(MC_VM_FB_LOCATION, tmp); | 1102 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 1098 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 1103 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 1099 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 1104 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 1100 | WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); | 1105 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
| 1101 | if (rdev->flags & RADEON_IS_AGP) { | 1106 | if (rdev->flags & RADEON_IS_AGP) { |
| 1102 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); | 1107 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
| 1103 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); | 1108 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
| @@ -1174,6 +1179,7 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
| 1174 | if (rdev->flags & RADEON_IS_IGP) | 1179 | if (rdev->flags & RADEON_IS_IGP) |
| 1175 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | 1180 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; |
| 1176 | radeon_vram_location(rdev, &rdev->mc, base); | 1181 | radeon_vram_location(rdev, &rdev->mc, base); |
| 1182 | rdev->mc.gtt_base_align = 0; | ||
| 1177 | radeon_gtt_location(rdev, mc); | 1183 | radeon_gtt_location(rdev, mc); |
| 1178 | } | 1184 | } |
| 1179 | } | 1185 | } |
| @@ -1219,8 +1225,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 1219 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1225 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 1220 | r600_vram_gtt_location(rdev, &rdev->mc); | 1226 | r600_vram_gtt_location(rdev, &rdev->mc); |
| 1221 | 1227 | ||
| 1222 | if (rdev->flags & RADEON_IS_IGP) | 1228 | if (rdev->flags & RADEON_IS_IGP) { |
| 1229 | rs690_pm_info(rdev); | ||
| 1223 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 1230 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 1231 | } | ||
| 1224 | radeon_update_bandwidth_info(rdev); | 1232 | radeon_update_bandwidth_info(rdev); |
| 1225 | return 0; | 1233 | return 0; |
| 1226 | } | 1234 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index f4fb88ece2bb..ca5c29f70779 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
| @@ -538,9 +538,12 @@ int | |||
| 538 | r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) | 538 | r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) |
| 539 | { | 539 | { |
| 540 | drm_radeon_private_t *dev_priv = dev->dev_private; | 540 | drm_radeon_private_t *dev_priv = dev->dev_private; |
| 541 | int ret; | ||
| 541 | DRM_DEBUG("\n"); | 542 | DRM_DEBUG("\n"); |
| 542 | 543 | ||
| 543 | r600_nomm_get_vb(dev); | 544 | ret = r600_nomm_get_vb(dev); |
| 545 | if (ret) | ||
| 546 | return ret; | ||
| 544 | 547 | ||
| 545 | dev_priv->blit_vb->file_priv = file_priv; | 548 | dev_priv->blit_vb->file_priv = file_priv; |
| 546 | 549 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index c39c1bc13016..144c32d37136 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -585,7 +585,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 585 | header = radeon_get_ib_value(p, h_idx); | 585 | header = radeon_get_ib_value(p, h_idx); |
| 586 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 586 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
| 587 | reg = CP_PACKET0_GET_REG(header); | 587 | reg = CP_PACKET0_GET_REG(header); |
| 588 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 588 | |
| 589 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 589 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 590 | if (!obj) { | 590 | if (!obj) { |
| 591 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 591 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
| @@ -620,7 +620,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 620 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; | 620 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; |
| 621 | } | 621 | } |
| 622 | out: | 622 | out: |
| 623 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
| 624 | return r; | 623 | return r; |
| 625 | } | 624 | } |
| 626 | 625 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8e1d44ca26ec..2f94dc66c183 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -177,6 +177,7 @@ void radeon_pm_resume(struct radeon_device *rdev); | |||
| 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
| 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
| 179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | 179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); |
| 180 | void rs690_pm_info(struct radeon_device *rdev); | ||
| 180 | 181 | ||
| 181 | /* | 182 | /* |
| 182 | * Fences. | 183 | * Fences. |
| @@ -350,6 +351,7 @@ struct radeon_mc { | |||
| 350 | int vram_mtrr; | 351 | int vram_mtrr; |
| 351 | bool vram_is_ddr; | 352 | bool vram_is_ddr; |
| 352 | bool igp_sideport_enabled; | 353 | bool igp_sideport_enabled; |
| 354 | u64 gtt_base_align; | ||
| 353 | }; | 355 | }; |
| 354 | 356 | ||
| 355 | bool radeon_combios_sideport_present(struct radeon_device *rdev); | 357 | bool radeon_combios_sideport_present(struct radeon_device *rdev); |
| @@ -619,7 +621,8 @@ enum radeon_dynpm_state { | |||
| 619 | DYNPM_STATE_DISABLED, | 621 | DYNPM_STATE_DISABLED, |
| 620 | DYNPM_STATE_MINIMUM, | 622 | DYNPM_STATE_MINIMUM, |
| 621 | DYNPM_STATE_PAUSED, | 623 | DYNPM_STATE_PAUSED, |
| 622 | DYNPM_STATE_ACTIVE | 624 | DYNPM_STATE_ACTIVE, |
| 625 | DYNPM_STATE_SUSPENDED, | ||
| 623 | }; | 626 | }; |
| 624 | enum radeon_dynpm_action { | 627 | enum radeon_dynpm_action { |
| 625 | DYNPM_ACTION_NONE, | 628 | DYNPM_ACTION_NONE, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 87f7e2cc52d4..646f96f97c77 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -780,6 +780,13 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 780 | case CHIP_R423: | 780 | case CHIP_R423: |
| 781 | case CHIP_RV410: | 781 | case CHIP_RV410: |
| 782 | rdev->asic = &r420_asic; | 782 | rdev->asic = &r420_asic; |
| 783 | /* handle macs */ | ||
| 784 | if (rdev->bios == NULL) { | ||
| 785 | rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock; | ||
| 786 | rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock; | ||
| 787 | rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock; | ||
| 788 | rdev->asic->set_memory_clock = NULL; | ||
| 789 | } | ||
| 783 | break; | 790 | break; |
| 784 | case CHIP_RS400: | 791 | case CHIP_RS400: |
| 785 | case CHIP_RS480: | 792 | case CHIP_RS480: |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 99bd8a9c56b3..10673ae59cfa 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -280,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
| 280 | } | 280 | } |
| 281 | } | 281 | } |
| 282 | 282 | ||
| 283 | /* ASUS HD 3600 board lists the DVI port as HDMI */ | ||
| 284 | if ((dev->pdev->device == 0x9598) && | ||
| 285 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
| 286 | (dev->pdev->subsystem_device == 0x01e4)) { | ||
| 287 | if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { | ||
| 288 | *connector_type = DRM_MODE_CONNECTOR_DVII; | ||
| 289 | } | ||
| 290 | } | ||
| 291 | |||
| 283 | /* ASUS HD 3450 board lists the DVI port as HDMI */ | 292 | /* ASUS HD 3450 board lists the DVI port as HDMI */ |
| 284 | if ((dev->pdev->device == 0x95C5) && | 293 | if ((dev->pdev->device == 0x95C5) && |
| 285 | (dev->pdev->subsystem_vendor == 0x1043) && | 294 | (dev->pdev->subsystem_vendor == 0x1043) && |
| @@ -1029,8 +1038,15 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
| 1029 | data_offset); | 1038 | data_offset); |
| 1030 | switch (crev) { | 1039 | switch (crev) { |
| 1031 | case 1: | 1040 | case 1: |
| 1032 | if (igp_info->info.ucMemoryType & 0xf0) | 1041 | /* AMD IGPS */ |
| 1033 | return true; | 1042 | if ((rdev->family == CHIP_RS690) || |
| 1043 | (rdev->family == CHIP_RS740)) { | ||
| 1044 | if (igp_info->info.ulBootUpMemoryClock) | ||
| 1045 | return true; | ||
| 1046 | } else { | ||
| 1047 | if (igp_info->info.ucMemoryType & 0xf0) | ||
| 1048 | return true; | ||
| 1049 | } | ||
| 1034 | break; | 1050 | break; |
| 1035 | case 2: | 1051 | case 2: |
| 1036 | if (igp_info->info_2.ucMemoryType & 0x0f) | 1052 | if (igp_info->info_2.ucMemoryType & 0x0f) |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index fbba938f8048..2c9213739999 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -48,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) | |||
| 48 | resource_size_t vram_base; | 48 | resource_size_t vram_base; |
| 49 | resource_size_t size = 256 * 1024; /* ??? */ | 49 | resource_size_t size = 256 * 1024; /* ??? */ |
| 50 | 50 | ||
| 51 | if (!(rdev->flags & RADEON_IS_IGP)) | ||
| 52 | if (!radeon_card_posted(rdev)) | ||
| 53 | return false; | ||
| 54 | |||
| 51 | rdev->bios = NULL; | 55 | rdev->bios = NULL; |
| 52 | vram_base = drm_get_resource_start(rdev->ddev, 0); | 56 | vram_base = drm_get_resource_start(rdev->ddev, 0); |
| 53 | bios = ioremap(vram_base, size); | 57 | bios = ioremap(vram_base, size); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 1bee2f9e24a5..2417d7b06fdb 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -1411,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1411 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; | 1411 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; |
| 1412 | } else | 1412 | } else |
| 1413 | #endif /* CONFIG_PPC_PMAC */ | 1413 | #endif /* CONFIG_PPC_PMAC */ |
| 1414 | #ifdef CONFIG_PPC64 | ||
| 1415 | if (ASIC_IS_RN50(rdev)) | ||
| 1416 | rdev->mode_info.connector_table = CT_RN50_POWER; | ||
| 1417 | else | ||
| 1418 | #endif | ||
| 1414 | rdev->mode_info.connector_table = CT_GENERIC; | 1419 | rdev->mode_info.connector_table = CT_GENERIC; |
| 1415 | } | 1420 | } |
| 1416 | 1421 | ||
| @@ -1853,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1853 | CONNECTOR_OBJECT_ID_SVIDEO, | 1858 | CONNECTOR_OBJECT_ID_SVIDEO, |
| 1854 | &hpd); | 1859 | &hpd); |
| 1855 | break; | 1860 | break; |
| 1861 | case CT_RN50_POWER: | ||
| 1862 | DRM_INFO("Connector Table: %d (rn50-power)\n", | ||
| 1863 | rdev->mode_info.connector_table); | ||
| 1864 | /* VGA - primary dac */ | ||
| 1865 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); | ||
| 1866 | hpd.hpd = RADEON_HPD_NONE; | ||
| 1867 | radeon_add_legacy_encoder(dev, | ||
| 1868 | radeon_get_encoder_id(dev, | ||
| 1869 | ATOM_DEVICE_CRT1_SUPPORT, | ||
| 1870 | 1), | ||
| 1871 | ATOM_DEVICE_CRT1_SUPPORT); | ||
| 1872 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, | ||
| 1873 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | ||
| 1874 | CONNECTOR_OBJECT_ID_VGA, | ||
| 1875 | &hpd); | ||
| 1876 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); | ||
| 1877 | hpd.hpd = RADEON_HPD_NONE; | ||
| 1878 | radeon_add_legacy_encoder(dev, | ||
| 1879 | radeon_get_encoder_id(dev, | ||
| 1880 | ATOM_DEVICE_CRT2_SUPPORT, | ||
| 1881 | 2), | ||
| 1882 | ATOM_DEVICE_CRT2_SUPPORT); | ||
| 1883 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | ||
| 1884 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | ||
| 1885 | CONNECTOR_OBJECT_ID_VGA, | ||
| 1886 | &hpd); | ||
| 1887 | break; | ||
| 1856 | default: | 1888 | default: |
| 1857 | DRM_INFO("Connector table: %d (invalid)\n", | 1889 | DRM_INFO("Connector table: %d (invalid)\n", |
| 1858 | rdev->mode_info.connector_table); | 1890 | rdev->mode_info.connector_table); |
| @@ -1906,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, | |||
| 1906 | return false; | 1938 | return false; |
| 1907 | } | 1939 | } |
| 1908 | 1940 | ||
| 1909 | /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */ | ||
| 1910 | if (dev->pdev->device == 0x5159 && | ||
| 1911 | dev->pdev->subsystem_vendor == 0x1002 && | ||
| 1912 | dev->pdev->subsystem_device == 0x013a) { | ||
| 1913 | if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) | ||
| 1914 | *legacy_connector = CONNECTOR_CRT_LEGACY; | ||
| 1915 | |||
| 1916 | } | ||
| 1917 | |||
| 1918 | /* X300 card with extra non-existent DVI port */ | 1941 | /* X300 card with extra non-existent DVI port */ |
| 1919 | if (dev->pdev->device == 0x5B60 && | 1942 | if (dev->pdev->device == 0x5B60 && |
| 1920 | dev->pdev->subsystem_vendor == 0x17af && | 1943 | dev->pdev->subsystem_vendor == 0x17af && |
| @@ -3019,6 +3042,22 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
| 3019 | combios_write_ram_size(dev); | 3042 | combios_write_ram_size(dev); |
| 3020 | } | 3043 | } |
| 3021 | 3044 | ||
| 3045 | /* quirk for rs4xx HP nx6125 laptop to make it resume | ||
| 3046 | * - it hangs on resume inside the dynclk 1 table. | ||
| 3047 | */ | ||
| 3048 | if (rdev->family == CHIP_RS480 && | ||
| 3049 | rdev->pdev->subsystem_vendor == 0x103c && | ||
| 3050 | rdev->pdev->subsystem_device == 0x308b) | ||
| 3051 | return; | ||
| 3052 | |||
| 3053 | /* quirk for rs4xx HP dv5000 laptop to make it resume | ||
| 3054 | * - it hangs on resume inside the dynclk 1 table. | ||
| 3055 | */ | ||
| 3056 | if (rdev->family == CHIP_RS480 && | ||
| 3057 | rdev->pdev->subsystem_vendor == 0x103c && | ||
| 3058 | rdev->pdev->subsystem_device == 0x30a4) | ||
| 3059 | return; | ||
| 3060 | |||
| 3022 | /* DYN CLK 1 */ | 3061 | /* DYN CLK 1 */ |
| 3023 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3062 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
| 3024 | if (table) | 3063 | if (table) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 0c7ccc6961a3..adccbc2c202c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -771,30 +771,27 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
| 771 | } else | 771 | } else |
| 772 | ret = connector_status_connected; | 772 | ret = connector_status_connected; |
| 773 | 773 | ||
| 774 | /* multiple connectors on the same encoder with the same ddc line | 774 | /* This gets complicated. We have boards with VGA + HDMI with a |
| 775 | * This tends to be HDMI and DVI on the same encoder with the | 775 | * shared DDC line and we have boards with DVI-D + HDMI with a shared |
| 776 | * same ddc line. If the edid says HDMI, consider the HDMI port | 776 | * DDC line. The latter is more complex because with DVI<->HDMI adapters |
| 777 | * connected and the DVI port disconnected. If the edid doesn't | 777 | * you don't really know what's connected to which port as both are digital. |
| 778 | * say HDMI, vice versa. | ||
| 779 | */ | 778 | */ |
| 780 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { | 779 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
| 781 | struct drm_device *dev = connector->dev; | 780 | struct drm_device *dev = connector->dev; |
| 781 | struct radeon_device *rdev = dev->dev_private; | ||
| 782 | struct drm_connector *list_connector; | 782 | struct drm_connector *list_connector; |
| 783 | struct radeon_connector *list_radeon_connector; | 783 | struct radeon_connector *list_radeon_connector; |
| 784 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | 784 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { |
| 785 | if (connector == list_connector) | 785 | if (connector == list_connector) |
| 786 | continue; | 786 | continue; |
| 787 | list_radeon_connector = to_radeon_connector(list_connector); | 787 | list_radeon_connector = to_radeon_connector(list_connector); |
| 788 | if (radeon_connector->devices == list_radeon_connector->devices) { | 788 | if (list_radeon_connector->shared_ddc && |
| 789 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 789 | (list_radeon_connector->ddc_bus->rec.i2c_id == |
| 790 | if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { | 790 | radeon_connector->ddc_bus->rec.i2c_id)) { |
| 791 | kfree(radeon_connector->edid); | 791 | /* cases where both connectors are digital */ |
| 792 | radeon_connector->edid = NULL; | 792 | if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { |
| 793 | ret = connector_status_disconnected; | 793 | /* hpd is our only option in this case */ |
| 794 | } | 794 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
| 795 | } else { | ||
| 796 | if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) || | ||
| 797 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) { | ||
| 798 | kfree(radeon_connector->edid); | 795 | kfree(radeon_connector->edid); |
| 799 | radeon_connector->edid = NULL; | 796 | radeon_connector->edid = NULL; |
| 800 | ret = connector_status_disconnected; | 797 | ret = connector_status_disconnected; |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index b7023fff89eb..4eb67c0e0996 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
| @@ -194,7 +194,7 @@ unpin: | |||
| 194 | fail: | 194 | fail: |
| 195 | drm_gem_object_unreference_unlocked(obj); | 195 | drm_gem_object_unreference_unlocked(obj); |
| 196 | 196 | ||
| 197 | return 0; | 197 | return ret; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | int radeon_crtc_cursor_move(struct drm_crtc *crtc, | 200 | int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f10faed21567..dd279da90546 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -226,20 +226,20 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
| 226 | { | 226 | { |
| 227 | u64 size_af, size_bf; | 227 | u64 size_af, size_bf; |
| 228 | 228 | ||
| 229 | size_af = 0xFFFFFFFF - mc->vram_end; | 229 | size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; |
| 230 | size_bf = mc->vram_start; | 230 | size_bf = mc->vram_start & ~mc->gtt_base_align; |
| 231 | if (size_bf > size_af) { | 231 | if (size_bf > size_af) { |
| 232 | if (mc->gtt_size > size_bf) { | 232 | if (mc->gtt_size > size_bf) { |
| 233 | dev_warn(rdev->dev, "limiting GTT\n"); | 233 | dev_warn(rdev->dev, "limiting GTT\n"); |
| 234 | mc->gtt_size = size_bf; | 234 | mc->gtt_size = size_bf; |
| 235 | } | 235 | } |
| 236 | mc->gtt_start = mc->vram_start - mc->gtt_size; | 236 | mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; |
| 237 | } else { | 237 | } else { |
| 238 | if (mc->gtt_size > size_af) { | 238 | if (mc->gtt_size > size_af) { |
| 239 | dev_warn(rdev->dev, "limiting GTT\n"); | 239 | dev_warn(rdev->dev, "limiting GTT\n"); |
| 240 | mc->gtt_size = size_af; | 240 | mc->gtt_size = size_af; |
| 241 | } | 241 | } |
| 242 | mc->gtt_start = mc->vram_end + 1; | 242 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; |
| 243 | } | 243 | } |
| 244 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | 244 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; |
| 245 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", | 245 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", |
| @@ -779,6 +779,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 779 | 779 | ||
| 780 | int radeon_resume_kms(struct drm_device *dev) | 780 | int radeon_resume_kms(struct drm_device *dev) |
| 781 | { | 781 | { |
| 782 | struct drm_connector *connector; | ||
| 782 | struct radeon_device *rdev = dev->dev_private; | 783 | struct radeon_device *rdev = dev->dev_private; |
| 783 | 784 | ||
| 784 | if (rdev->powered_down) | 785 | if (rdev->powered_down) |
| @@ -797,6 +798,12 @@ int radeon_resume_kms(struct drm_device *dev) | |||
| 797 | radeon_resume(rdev); | 798 | radeon_resume(rdev); |
| 798 | radeon_pm_resume(rdev); | 799 | radeon_pm_resume(rdev); |
| 799 | radeon_restore_bios_scratch_regs(rdev); | 800 | radeon_restore_bios_scratch_regs(rdev); |
| 801 | |||
| 802 | /* turn on display hw */ | ||
| 803 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 804 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
| 805 | } | ||
| 806 | |||
| 800 | radeon_fbdev_set_suspend(rdev, 0); | 807 | radeon_fbdev_set_suspend(rdev, 0); |
| 801 | release_console_sem(); | 808 | release_console_sem(); |
| 802 | 809 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 1ebb100015b7..e0b30b264c28 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -1072,6 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1072 | if (is_dig) { | 1072 | if (is_dig) { |
| 1073 | switch (mode) { | 1073 | switch (mode) { |
| 1074 | case DRM_MODE_DPMS_ON: | 1074 | case DRM_MODE_DPMS_ON: |
| 1075 | if (!ASIC_IS_DCE4(rdev)) | ||
| 1076 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
| 1075 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1077 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
| 1076 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1078 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 1077 | 1079 | ||
| @@ -1079,8 +1081,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1079 | if (ASIC_IS_DCE4(rdev)) | 1081 | if (ASIC_IS_DCE4(rdev)) |
| 1080 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | 1082 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); |
| 1081 | } | 1083 | } |
| 1082 | if (!ASIC_IS_DCE4(rdev)) | ||
| 1083 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
| 1084 | break; | 1084 | break; |
| 1085 | case DRM_MODE_DPMS_STANDBY: | 1085 | case DRM_MODE_DPMS_STANDBY: |
| 1086 | case DRM_MODE_DPMS_SUSPEND: | 1086 | case DRM_MODE_DPMS_SUSPEND: |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 6a70c0dc7f92..ab389f89fa8d 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -128,7 +128,8 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 128 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { | 128 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { |
| 129 | crtc = (struct drm_crtc *)minfo->crtcs[i]; | 129 | crtc = (struct drm_crtc *)minfo->crtcs[i]; |
| 130 | if (crtc && crtc->base.id == value) { | 130 | if (crtc && crtc->base.id == value) { |
| 131 | value = i; | 131 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 132 | value = radeon_crtc->crtc_id; | ||
| 132 | found = 1; | 133 | found = 1; |
| 133 | break; | 134 | break; |
| 134 | } | 135 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5b07b8848e09..5688a0cf6bbe 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
| 108 | udelay(panel_pwr_delay * 1000); | 108 | udelay(panel_pwr_delay * 1000); |
| 109 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); | 109 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); |
| 110 | WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); | 110 | WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); |
| 111 | udelay(panel_pwr_delay * 1000); | ||
| 111 | break; | 112 | break; |
| 112 | } | 113 | } |
| 113 | 114 | ||
| @@ -928,16 +929,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 928 | if (ASIC_IS_R300(rdev)) { | 929 | if (ASIC_IS_R300(rdev)) { |
| 929 | gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; | 930 | gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; |
| 930 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); | 931 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); |
| 931 | } | 932 | } else if (rdev->family != CHIP_R200) |
| 932 | |||
| 933 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) | ||
| 934 | disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); | ||
| 935 | else | ||
| 936 | disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); | 933 | disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); |
| 937 | 934 | else if (rdev->family == CHIP_R200) | |
| 938 | if (rdev->family == CHIP_R200) | ||
| 939 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 935 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
| 940 | 936 | ||
| 937 | if (rdev->family >= CHIP_R200) | ||
| 938 | disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); | ||
| 939 | |||
| 941 | if (is_tv) { | 940 | if (is_tv) { |
| 942 | uint32_t dac_cntl; | 941 | uint32_t dac_cntl; |
| 943 | 942 | ||
| @@ -1002,15 +1001,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 1002 | if (ASIC_IS_R300(rdev)) { | 1001 | if (ASIC_IS_R300(rdev)) { |
| 1003 | WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); | 1002 | WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); |
| 1004 | WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); | 1003 | WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); |
| 1005 | } | 1004 | } else if (rdev->family != CHIP_R200) |
| 1005 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | ||
| 1006 | else if (rdev->family == CHIP_R200) | ||
| 1007 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | ||
| 1006 | 1008 | ||
| 1007 | if (rdev->family >= CHIP_R200) | 1009 | if (rdev->family >= CHIP_R200) |
| 1008 | WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); | 1010 | WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); |
| 1009 | else | ||
| 1010 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | ||
| 1011 | |||
| 1012 | if (rdev->family == CHIP_R200) | ||
| 1013 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | ||
| 1014 | 1011 | ||
| 1015 | if (is_tv) | 1012 | if (is_tv) |
| 1016 | radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); | 1013 | radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index f2ed27c8055b..032040397743 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
| @@ -642,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, | |||
| 642 | } | 642 | } |
| 643 | flicker_removal = (tmp + 500) / 1000; | 643 | flicker_removal = (tmp + 500) / 1000; |
| 644 | 644 | ||
| 645 | if (flicker_removal < 2) | 645 | if (flicker_removal < 3) |
| 646 | flicker_removal = 2; | 646 | flicker_removal = 3; |
| 647 | for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { | 647 | for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { |
| 648 | if (flicker_removal == SLOPE_limit[i]) | 648 | if (flicker_removal == SLOPE_limit[i]) |
| 649 | break; | 649 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 67358baf28b2..95696aa57ac8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -206,6 +206,7 @@ enum radeon_connector_table { | |||
| 206 | CT_MINI_INTERNAL, | 206 | CT_MINI_INTERNAL, |
| 207 | CT_IMAC_G5_ISIGHT, | 207 | CT_IMAC_G5_ISIGHT, |
| 208 | CT_EMAC, | 208 | CT_EMAC, |
| 209 | CT_RN50_POWER, | ||
| 209 | }; | 210 | }; |
| 210 | 211 | ||
| 211 | enum radeon_dvo_chip { | 212 | enum radeon_dvo_chip { |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 63f679a04b25..3fa6984d9896 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -333,6 +333,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev, | |||
| 333 | return snprintf(buf, PAGE_SIZE, "%s\n", | 333 | return snprintf(buf, PAGE_SIZE, "%s\n", |
| 334 | (cp == PM_PROFILE_AUTO) ? "auto" : | 334 | (cp == PM_PROFILE_AUTO) ? "auto" : |
| 335 | (cp == PM_PROFILE_LOW) ? "low" : | 335 | (cp == PM_PROFILE_LOW) ? "low" : |
| 336 | (cp == PM_PROFILE_MID) ? "mid" : | ||
| 336 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); | 337 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); |
| 337 | } | 338 | } |
| 338 | 339 | ||
| @@ -397,13 +398,20 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
| 397 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 398 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
| 398 | mutex_unlock(&rdev->pm.mutex); | 399 | mutex_unlock(&rdev->pm.mutex); |
| 399 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { | 400 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { |
| 401 | bool flush_wq = false; | ||
| 402 | |||
| 400 | mutex_lock(&rdev->pm.mutex); | 403 | mutex_lock(&rdev->pm.mutex); |
| 401 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 404 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
| 405 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
| 406 | flush_wq = true; | ||
| 407 | } | ||
| 402 | /* disable dynpm */ | 408 | /* disable dynpm */ |
| 403 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 409 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
| 404 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 410 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
| 405 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | 411 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| 406 | mutex_unlock(&rdev->pm.mutex); | 412 | mutex_unlock(&rdev->pm.mutex); |
| 413 | if (flush_wq) | ||
| 414 | flush_workqueue(rdev->wq); | ||
| 407 | } else { | 415 | } else { |
| 408 | DRM_ERROR("invalid power method!\n"); | 416 | DRM_ERROR("invalid power method!\n"); |
| 409 | goto fail; | 417 | goto fail; |
| @@ -418,9 +426,18 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon | |||
| 418 | 426 | ||
| 419 | void radeon_pm_suspend(struct radeon_device *rdev) | 427 | void radeon_pm_suspend(struct radeon_device *rdev) |
| 420 | { | 428 | { |
| 429 | bool flush_wq = false; | ||
| 430 | |||
| 421 | mutex_lock(&rdev->pm.mutex); | 431 | mutex_lock(&rdev->pm.mutex); |
| 422 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | 432 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
| 433 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
| 434 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) | ||
| 435 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; | ||
| 436 | flush_wq = true; | ||
| 437 | } | ||
| 423 | mutex_unlock(&rdev->pm.mutex); | 438 | mutex_unlock(&rdev->pm.mutex); |
| 439 | if (flush_wq) | ||
| 440 | flush_workqueue(rdev->wq); | ||
| 424 | } | 441 | } |
| 425 | 442 | ||
| 426 | void radeon_pm_resume(struct radeon_device *rdev) | 443 | void radeon_pm_resume(struct radeon_device *rdev) |
| @@ -432,6 +449,12 @@ void radeon_pm_resume(struct radeon_device *rdev) | |||
| 432 | rdev->pm.current_sclk = rdev->clock.default_sclk; | 449 | rdev->pm.current_sclk = rdev->clock.default_sclk; |
| 433 | rdev->pm.current_mclk = rdev->clock.default_mclk; | 450 | rdev->pm.current_mclk = rdev->clock.default_mclk; |
| 434 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 451 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
| 452 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | ||
| 453 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | ||
| 454 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | ||
| 455 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
| 456 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
| 457 | } | ||
| 435 | mutex_unlock(&rdev->pm.mutex); | 458 | mutex_unlock(&rdev->pm.mutex); |
| 436 | radeon_pm_compute_clocks(rdev); | 459 | radeon_pm_compute_clocks(rdev); |
| 437 | } | 460 | } |
| @@ -486,6 +509,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 486 | void radeon_pm_fini(struct radeon_device *rdev) | 509 | void radeon_pm_fini(struct radeon_device *rdev) |
| 487 | { | 510 | { |
| 488 | if (rdev->pm.num_power_states > 1) { | 511 | if (rdev->pm.num_power_states > 1) { |
| 512 | bool flush_wq = false; | ||
| 513 | |||
| 489 | mutex_lock(&rdev->pm.mutex); | 514 | mutex_lock(&rdev->pm.mutex); |
| 490 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 515 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
| 491 | rdev->pm.profile = PM_PROFILE_DEFAULT; | 516 | rdev->pm.profile = PM_PROFILE_DEFAULT; |
| @@ -493,13 +518,16 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
| 493 | radeon_pm_set_clocks(rdev); | 518 | radeon_pm_set_clocks(rdev); |
| 494 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 519 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
| 495 | /* cancel work */ | 520 | /* cancel work */ |
| 496 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); | 521 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
| 522 | flush_wq = true; | ||
| 497 | /* reset default clocks */ | 523 | /* reset default clocks */ |
| 498 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 524 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
| 499 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 525 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
| 500 | radeon_pm_set_clocks(rdev); | 526 | radeon_pm_set_clocks(rdev); |
| 501 | } | 527 | } |
| 502 | mutex_unlock(&rdev->pm.mutex); | 528 | mutex_unlock(&rdev->pm.mutex); |
| 529 | if (flush_wq) | ||
| 530 | flush_workqueue(rdev->wq); | ||
| 503 | 531 | ||
| 504 | device_remove_file(rdev->dev, &dev_attr_power_profile); | 532 | device_remove_file(rdev->dev, &dev_attr_power_profile); |
| 505 | device_remove_file(rdev->dev, &dev_attr_power_method); | 533 | device_remove_file(rdev->dev, &dev_attr_power_method); |
| @@ -720,12 +748,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) | |||
| 720 | radeon_pm_get_dynpm_state(rdev); | 748 | radeon_pm_get_dynpm_state(rdev); |
| 721 | radeon_pm_set_clocks(rdev); | 749 | radeon_pm_set_clocks(rdev); |
| 722 | } | 750 | } |
| 751 | |||
| 752 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
| 753 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
| 723 | } | 754 | } |
| 724 | mutex_unlock(&rdev->pm.mutex); | 755 | mutex_unlock(&rdev->pm.mutex); |
| 725 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 756 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
| 726 | |||
| 727 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
| 728 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
| 729 | } | 757 | } |
| 730 | 758 | ||
| 731 | /* | 759 | /* |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index b5c757f68d3c..f78fd592544d 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
| @@ -80,8 +80,8 @@ evergreen 0x9400 | |||
| 80 | 0x00028010 DB_RENDER_OVERRIDE2 | 80 | 0x00028010 DB_RENDER_OVERRIDE2 |
| 81 | 0x00028028 DB_STENCIL_CLEAR | 81 | 0x00028028 DB_STENCIL_CLEAR |
| 82 | 0x0002802C DB_DEPTH_CLEAR | 82 | 0x0002802C DB_DEPTH_CLEAR |
| 83 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | ||
| 84 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL | 83 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL |
| 84 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | ||
| 85 | 0x0002805C DB_DEPTH_SLICE | 85 | 0x0002805C DB_DEPTH_SLICE |
| 86 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 | 86 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 |
| 87 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 | 87 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 |
| @@ -460,8 +460,8 @@ evergreen 0x9400 | |||
| 460 | 0x00028844 SQ_PGM_RESOURCES_PS | 460 | 0x00028844 SQ_PGM_RESOURCES_PS |
| 461 | 0x00028848 SQ_PGM_RESOURCES_2_PS | 461 | 0x00028848 SQ_PGM_RESOURCES_2_PS |
| 462 | 0x0002884C SQ_PGM_EXPORTS_PS | 462 | 0x0002884C SQ_PGM_EXPORTS_PS |
| 463 | 0x0002885C SQ_PGM_RESOURCES_VS | 463 | 0x00028860 SQ_PGM_RESOURCES_VS |
| 464 | 0x00028860 SQ_PGM_RESOURCES_2_VS | 464 | 0x00028864 SQ_PGM_RESOURCES_2_VS |
| 465 | 0x00028878 SQ_PGM_RESOURCES_GS | 465 | 0x00028878 SQ_PGM_RESOURCES_GS |
| 466 | 0x0002887C SQ_PGM_RESOURCES_2_GS | 466 | 0x0002887C SQ_PGM_RESOURCES_2_GS |
| 467 | 0x00028890 SQ_PGM_RESOURCES_ES | 467 | 0x00028890 SQ_PGM_RESOURCES_ES |
| @@ -469,8 +469,8 @@ evergreen 0x9400 | |||
| 469 | 0x000288A8 SQ_PGM_RESOURCES_FS | 469 | 0x000288A8 SQ_PGM_RESOURCES_FS |
| 470 | 0x000288BC SQ_PGM_RESOURCES_HS | 470 | 0x000288BC SQ_PGM_RESOURCES_HS |
| 471 | 0x000288C0 SQ_PGM_RESOURCES_2_HS | 471 | 0x000288C0 SQ_PGM_RESOURCES_2_HS |
| 472 | 0x000288D0 SQ_PGM_RESOURCES_LS | 472 | 0x000288D4 SQ_PGM_RESOURCES_LS |
| 473 | 0x000288D4 SQ_PGM_RESOURCES_2_LS | 473 | 0x000288D8 SQ_PGM_RESOURCES_2_LS |
| 474 | 0x000288E8 SQ_LDS_ALLOC | 474 | 0x000288E8 SQ_LDS_ALLOC |
| 475 | 0x000288EC SQ_LDS_ALLOC_PS | 475 | 0x000288EC SQ_LDS_ALLOC_PS |
| 476 | 0x000288F0 SQ_VTX_SEMANTIC_CLEAR | 476 | 0x000288F0 SQ_VTX_SEMANTIC_CLEAR |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9e4240b3bf0b..f454c9a5e7f2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -57,7 +57,9 @@ void rs400_gart_adjust_size(struct radeon_device *rdev) | |||
| 57 | } | 57 | } |
| 58 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | 58 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { |
| 59 | /* FIXME: RS400 & RS480 seems to have issue with GART size | 59 | /* FIXME: RS400 & RS480 seems to have issue with GART size |
| 60 | * if 4G of system memory (needs more testing) */ | 60 | * if 4G of system memory (needs more testing) |
| 61 | */ | ||
| 62 | /* XXX is this still an issue with proper alignment? */ | ||
| 61 | rdev->mc.gtt_size = 32 * 1024 * 1024; | 63 | rdev->mc.gtt_size = 32 * 1024 * 1024; |
| 62 | DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); | 64 | DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); |
| 63 | } | 65 | } |
| @@ -263,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev) | |||
| 263 | r100_vram_init_sizes(rdev); | 265 | r100_vram_init_sizes(rdev); |
| 264 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 266 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
| 265 | radeon_vram_location(rdev, &rdev->mc, base); | 267 | radeon_vram_location(rdev, &rdev->mc, base); |
| 268 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; | ||
| 266 | radeon_gtt_location(rdev, &rdev->mc); | 269 | radeon_gtt_location(rdev, &rdev->mc); |
| 267 | radeon_update_bandwidth_info(rdev); | 270 | radeon_update_bandwidth_info(rdev); |
| 268 | } | 271 | } |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 7bb4c3e52f3b..6dc15ea8ba33 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -698,6 +698,7 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
| 698 | base = G_000004_MC_FB_START(base) << 16; | 698 | base = G_000004_MC_FB_START(base) << 16; |
| 699 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 699 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 700 | radeon_vram_location(rdev, &rdev->mc, base); | 700 | radeon_vram_location(rdev, &rdev->mc, base); |
| 701 | rdev->mc.gtt_base_align = 0; | ||
| 701 | radeon_gtt_location(rdev, &rdev->mc); | 702 | radeon_gtt_location(rdev, &rdev->mc); |
| 702 | radeon_update_bandwidth_info(rdev); | 703 | radeon_update_bandwidth_info(rdev); |
| 703 | } | 704 | } |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index bcc33195ebc2..ce4ecbe10816 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -79,7 +79,13 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
| 79 | tmp.full = dfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
| 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); |
| 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
| 82 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 82 | if (info->info.usK8MemoryClock) |
| 83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | ||
| 84 | else if (rdev->clock.default_mclk) { | ||
| 85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | ||
| 86 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
| 87 | } else | ||
| 88 | rdev->pm.igp_system_mclk.full = dfixed_const(400); | ||
| 83 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); | 89 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
| 84 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); | 90 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
| 85 | break; | 91 | break; |
| @@ -87,34 +93,31 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
| 87 | tmp.full = dfixed_const(100); | 93 | tmp.full = dfixed_const(100); |
| 88 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); | 94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); |
| 89 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
| 90 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | 96 | if (info->info_v2.ulBootUpUMAClock) |
| 97 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | ||
| 98 | else if (rdev->clock.default_mclk) | ||
| 99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | ||
| 100 | else | ||
| 101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); | ||
| 91 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
| 92 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); | 103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); |
| 93 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
| 94 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
| 95 | break; | 106 | break; |
| 96 | default: | 107 | default: |
| 97 | tmp.full = dfixed_const(100); | ||
| 98 | /* We assume the slower possible clock ie worst case */ | 108 | /* We assume the slower possible clock ie worst case */ |
| 99 | /* DDR 333Mhz */ | 109 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
| 100 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); | 110 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
| 101 | /* FIXME: system clock ? */ | 111 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
| 102 | rdev->pm.igp_system_mclk.full = dfixed_const(100); | ||
| 103 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
| 104 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); | ||
| 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 112 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
| 106 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 113 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
| 107 | break; | 114 | break; |
| 108 | } | 115 | } |
| 109 | } else { | 116 | } else { |
| 110 | tmp.full = dfixed_const(100); | ||
| 111 | /* We assume the slower possible clock ie worst case */ | 117 | /* We assume the slower possible clock ie worst case */ |
| 112 | /* DDR 333Mhz */ | 118 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
| 113 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); | 119 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
| 114 | /* FIXME: system clock ? */ | 120 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
| 115 | rdev->pm.igp_system_mclk.full = dfixed_const(100); | ||
| 116 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
| 117 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); | ||
| 118 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 121 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
| 119 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 122 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
| 120 | } | 123 | } |
| @@ -159,6 +162,7 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
| 159 | rs690_pm_info(rdev); | 162 | rs690_pm_info(rdev); |
| 160 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 161 | radeon_vram_location(rdev, &rdev->mc, base); | 164 | radeon_vram_location(rdev, &rdev->mc, base); |
| 165 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; | ||
| 162 | radeon_gtt_location(rdev, &rdev->mc); | 166 | radeon_gtt_location(rdev, &rdev->mc); |
| 163 | radeon_update_bandwidth_info(rdev); | 167 | radeon_update_bandwidth_info(rdev); |
| 164 | } | 168 | } |
| @@ -228,10 +232,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 228 | fixed20_12 a, b, c; | 232 | fixed20_12 a, b, c; |
| 229 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | 233 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
| 230 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | 234 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
| 231 | /* FIXME: detect IGP with sideport memory, i don't think there is any | ||
| 232 | * such product available | ||
| 233 | */ | ||
| 234 | bool sideport = false; | ||
| 235 | 235 | ||
| 236 | if (!crtc->base.enabled) { | 236 | if (!crtc->base.enabled) { |
| 237 | /* FIXME: wouldn't it better to set priority mark to maximum */ | 237 | /* FIXME: wouldn't it better to set priority mark to maximum */ |
| @@ -300,7 +300,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 300 | 300 | ||
| 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ | 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
| 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; |
| 303 | if (sideport) { | 303 | if (rdev->mc.igp_sideport_enabled) { |
| 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
| 305 | rdev->pm.sideport_bandwidth.full) | 305 | rdev->pm.sideport_bandwidth.full) |
| 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 7d9a7b0a180a..0c9c169a6852 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -195,6 +195,7 @@ void rv515_mc_init(struct radeon_device *rdev) | |||
| 195 | rv515_vram_get_type(rdev); | 195 | rv515_vram_get_type(rdev); |
| 196 | r100_vram_init_sizes(rdev); | 196 | r100_vram_init_sizes(rdev); |
| 197 | radeon_vram_location(rdev, &rdev->mc, 0); | 197 | radeon_vram_location(rdev, &rdev->mc, 0); |
| 198 | rdev->mc.gtt_base_align = 0; | ||
| 198 | if (!(rdev->flags & RADEON_IS_AGP)) | 199 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 199 | radeon_gtt_location(rdev, &rdev->mc); | 200 | radeon_gtt_location(rdev, &rdev->mc); |
| 200 | radeon_update_bandwidth_info(rdev); | 201 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index cec536c222c5..b7fd82064922 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -224,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
| 224 | WREG32(MC_VM_FB_LOCATION, tmp); | 224 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 225 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 225 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 226 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 226 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 227 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 227 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
| 228 | if (rdev->flags & RADEON_IS_AGP) { | 228 | if (rdev->flags & RADEON_IS_AGP) { |
| 229 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 229 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
| 230 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 230 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ef910694bd63..ca904799f018 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
| @@ -40,11 +40,13 @@ | |||
| 40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 41 | 41 | ||
| 42 | #include <asm/atomic.h> | 42 | #include <asm/atomic.h> |
| 43 | #include <asm/agp.h> | ||
| 44 | 43 | ||
| 45 | #include "ttm/ttm_bo_driver.h" | 44 | #include "ttm/ttm_bo_driver.h" |
| 46 | #include "ttm/ttm_page_alloc.h" | 45 | #include "ttm/ttm_page_alloc.h" |
| 47 | 46 | ||
| 47 | #ifdef TTM_HAS_AGP | ||
| 48 | #include <asm/agp.h> | ||
| 49 | #endif | ||
| 48 | 50 | ||
| 49 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | 51 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) |
| 50 | #define SMALL_ALLOCATION 16 | 52 | #define SMALL_ALLOCATION 16 |
| @@ -104,7 +106,6 @@ struct ttm_pool_opts { | |||
| 104 | struct ttm_pool_manager { | 106 | struct ttm_pool_manager { |
| 105 | struct kobject kobj; | 107 | struct kobject kobj; |
| 106 | struct shrinker mm_shrink; | 108 | struct shrinker mm_shrink; |
| 107 | atomic_t page_alloc_inited; | ||
| 108 | struct ttm_pool_opts options; | 109 | struct ttm_pool_opts options; |
| 109 | 110 | ||
| 110 | union { | 111 | union { |
| @@ -142,7 +143,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj) | |||
| 142 | { | 143 | { |
| 143 | struct ttm_pool_manager *m = | 144 | struct ttm_pool_manager *m = |
| 144 | container_of(kobj, struct ttm_pool_manager, kobj); | 145 | container_of(kobj, struct ttm_pool_manager, kobj); |
| 145 | (void)m; | 146 | kfree(m); |
| 146 | } | 147 | } |
| 147 | 148 | ||
| 148 | static ssize_t ttm_pool_store(struct kobject *kobj, | 149 | static ssize_t ttm_pool_store(struct kobject *kobj, |
| @@ -214,9 +215,7 @@ static struct kobj_type ttm_pool_kobj_type = { | |||
| 214 | .default_attrs = ttm_pool_attrs, | 215 | .default_attrs = ttm_pool_attrs, |
| 215 | }; | 216 | }; |
| 216 | 217 | ||
| 217 | static struct ttm_pool_manager _manager = { | 218 | static struct ttm_pool_manager *_manager; |
| 218 | .page_alloc_inited = ATOMIC_INIT(0) | ||
| 219 | }; | ||
| 220 | 219 | ||
| 221 | #ifndef CONFIG_X86 | 220 | #ifndef CONFIG_X86 |
| 222 | static int set_pages_array_wb(struct page **pages, int addrinarray) | 221 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
| @@ -271,7 +270,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags, | |||
| 271 | if (flags & TTM_PAGE_FLAG_DMA32) | 270 | if (flags & TTM_PAGE_FLAG_DMA32) |
| 272 | pool_index |= 0x2; | 271 | pool_index |= 0x2; |
| 273 | 272 | ||
| 274 | return &_manager.pools[pool_index]; | 273 | return &_manager->pools[pool_index]; |
| 275 | } | 274 | } |
| 276 | 275 | ||
| 277 | /* set memory back to wb and free the pages. */ | 276 | /* set memory back to wb and free the pages. */ |
| @@ -387,7 +386,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
| 387 | unsigned i; | 386 | unsigned i; |
| 388 | int total = 0; | 387 | int total = 0; |
| 389 | for (i = 0; i < NUM_POOLS; ++i) | 388 | for (i = 0; i < NUM_POOLS; ++i) |
| 390 | total += _manager.pools[i].npages; | 389 | total += _manager->pools[i].npages; |
| 391 | 390 | ||
| 392 | return total; | 391 | return total; |
| 393 | } | 392 | } |
| @@ -395,7 +394,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
| 395 | /** | 394 | /** |
| 396 | * Callback for mm to request pool to reduce number of page held. | 395 | * Callback for mm to request pool to reduce number of page held. |
| 397 | */ | 396 | */ |
| 398 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | 397 | static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) |
| 399 | { | 398 | { |
| 400 | static atomic_t start_pool = ATOMIC_INIT(0); | 399 | static atomic_t start_pool = ATOMIC_INIT(0); |
| 401 | unsigned i; | 400 | unsigned i; |
| @@ -408,7 +407,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | |||
| 408 | unsigned nr_free = shrink_pages; | 407 | unsigned nr_free = shrink_pages; |
| 409 | if (shrink_pages == 0) | 408 | if (shrink_pages == 0) |
| 410 | break; | 409 | break; |
| 411 | pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; | 410 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
| 412 | shrink_pages = ttm_page_pool_free(pool, nr_free); | 411 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
| 413 | } | 412 | } |
| 414 | /* return estimated number of unused pages in pool */ | 413 | /* return estimated number of unused pages in pool */ |
| @@ -576,10 +575,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
| 576 | 575 | ||
| 577 | /* If allocation request is small and there is not enough | 576 | /* If allocation request is small and there is not enough |
| 578 | * pages in pool we fill the pool first */ | 577 | * pages in pool we fill the pool first */ |
| 579 | if (count < _manager.options.small | 578 | if (count < _manager->options.small |
| 580 | && count > pool->npages) { | 579 | && count > pool->npages) { |
| 581 | struct list_head new_pages; | 580 | struct list_head new_pages; |
| 582 | unsigned alloc_size = _manager.options.alloc_size; | 581 | unsigned alloc_size = _manager->options.alloc_size; |
| 583 | 582 | ||
| 584 | /** | 583 | /** |
| 585 | * Can't change page caching if in irqsave context. We have to | 584 | * Can't change page caching if in irqsave context. We have to |
| @@ -667,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 667 | { | 666 | { |
| 668 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
| 669 | struct page *p = NULL; | 668 | struct page *p = NULL; |
| 670 | int gfp_flags = 0; | 669 | int gfp_flags = GFP_USER; |
| 671 | int r; | 670 | int r; |
| 672 | 671 | ||
| 673 | /* set zero flag for page allocation if required */ | 672 | /* set zero flag for page allocation if required */ |
| @@ -759,8 +758,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | |||
| 759 | pool->npages += page_count; | 758 | pool->npages += page_count; |
| 760 | /* Check that we don't go over the pool limit */ | 759 | /* Check that we don't go over the pool limit */ |
| 761 | page_count = 0; | 760 | page_count = 0; |
| 762 | if (pool->npages > _manager.options.max_size) { | 761 | if (pool->npages > _manager->options.max_size) { |
| 763 | page_count = pool->npages - _manager.options.max_size; | 762 | page_count = pool->npages - _manager->options.max_size; |
| 764 | /* free at least NUM_PAGES_TO_ALLOC number of pages | 763 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
| 765 | * to reduce calls to set_memory_wb */ | 764 | * to reduce calls to set_memory_wb */ |
| 766 | if (page_count < NUM_PAGES_TO_ALLOC) | 765 | if (page_count < NUM_PAGES_TO_ALLOC) |
| @@ -785,33 +784,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, | |||
| 785 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | 784 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
| 786 | { | 785 | { |
| 787 | int ret; | 786 | int ret; |
| 788 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) | 787 | |
| 789 | return 0; | 788 | WARN_ON(_manager); |
| 790 | 789 | ||
| 791 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); | 790 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); |
| 792 | 791 | ||
| 793 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); | 792 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
| 793 | |||
| 794 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); | ||
| 794 | 795 | ||
| 795 | ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); | 796 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
| 796 | 797 | ||
| 797 | ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, | 798 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
| 798 | "wc dma"); | 799 | GFP_USER | GFP_DMA32, "wc dma"); |
| 799 | 800 | ||
| 800 | ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, | 801 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
| 801 | "uc dma"); | 802 | GFP_USER | GFP_DMA32, "uc dma"); |
| 802 | 803 | ||
| 803 | _manager.options.max_size = max_pages; | 804 | _manager->options.max_size = max_pages; |
| 804 | _manager.options.small = SMALL_ALLOCATION; | 805 | _manager->options.small = SMALL_ALLOCATION; |
| 805 | _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; | 806 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; |
| 806 | 807 | ||
| 807 | kobject_init(&_manager.kobj, &ttm_pool_kobj_type); | 808 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, |
| 808 | ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); | 809 | &glob->kobj, "pool"); |
| 809 | if (unlikely(ret != 0)) { | 810 | if (unlikely(ret != 0)) { |
| 810 | kobject_put(&_manager.kobj); | 811 | kobject_put(&_manager->kobj); |
| 812 | _manager = NULL; | ||
| 811 | return ret; | 813 | return ret; |
| 812 | } | 814 | } |
| 813 | 815 | ||
| 814 | ttm_pool_mm_shrink_init(&_manager); | 816 | ttm_pool_mm_shrink_init(_manager); |
| 815 | 817 | ||
| 816 | return 0; | 818 | return 0; |
| 817 | } | 819 | } |
| @@ -820,16 +822,14 @@ void ttm_page_alloc_fini() | |||
| 820 | { | 822 | { |
| 821 | int i; | 823 | int i; |
| 822 | 824 | ||
| 823 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) | ||
| 824 | return; | ||
| 825 | |||
| 826 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); | 825 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); |
| 827 | ttm_pool_mm_shrink_fini(&_manager); | 826 | ttm_pool_mm_shrink_fini(_manager); |
| 828 | 827 | ||
| 829 | for (i = 0; i < NUM_POOLS; ++i) | 828 | for (i = 0; i < NUM_POOLS; ++i) |
| 830 | ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); | 829 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
| 831 | 830 | ||
| 832 | kobject_put(&_manager.kobj); | 831 | kobject_put(&_manager->kobj); |
| 832 | _manager = NULL; | ||
| 833 | } | 833 | } |
| 834 | 834 | ||
| 835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | 835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
| @@ -837,14 +837,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | |||
| 837 | struct ttm_page_pool *p; | 837 | struct ttm_page_pool *p; |
| 838 | unsigned i; | 838 | unsigned i; |
| 839 | char *h[] = {"pool", "refills", "pages freed", "size"}; | 839 | char *h[] = {"pool", "refills", "pages freed", "size"}; |
| 840 | if (atomic_read(&_manager.page_alloc_inited) == 0) { | 840 | if (!_manager) { |
| 841 | seq_printf(m, "No pool allocator running.\n"); | 841 | seq_printf(m, "No pool allocator running.\n"); |
| 842 | return 0; | 842 | return 0; |
| 843 | } | 843 | } |
| 844 | seq_printf(m, "%6s %12s %13s %8s\n", | 844 | seq_printf(m, "%6s %12s %13s %8s\n", |
| 845 | h[0], h[1], h[2], h[3]); | 845 | h[0], h[1], h[2], h[3]); |
| 846 | for (i = 0; i < NUM_POOLS; ++i) { | 846 | for (i = 0; i < NUM_POOLS; ++i) { |
| 847 | p = &_manager.pools[i]; | 847 | p = &_manager->pools[i]; |
| 848 | 848 | ||
| 849 | seq_printf(m, "%6s %12ld %13ld %8d\n", | 849 | seq_printf(m, "%6s %12ld %13ld %8d\n", |
| 850 | p->name, p->nrefills, | 850 | p->name, p->nrefills, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f1d626112415..437ac786277a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -972,6 +972,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
| 972 | ret = copy_from_user(rects, user_rects, rects_size); | 972 | ret = copy_from_user(rects, user_rects, rects_size); |
| 973 | if (unlikely(ret != 0)) { | 973 | if (unlikely(ret != 0)) { |
| 974 | DRM_ERROR("Failed to get rects.\n"); | 974 | DRM_ERROR("Failed to get rects.\n"); |
| 975 | ret = -EFAULT; | ||
| 975 | goto out_free; | 976 | goto out_free; |
| 976 | } | 977 | } |
| 977 | 978 | ||
