diff options
Diffstat (limited to 'drivers/gpu')
93 files changed, 7140 insertions, 1752 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 994d23beeb1d..57cea01c4ffb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -1840,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | |||
| 1840 | 1840 | ||
| 1841 | ret = copy_from_user(clips, clips_ptr, | 1841 | ret = copy_from_user(clips, clips_ptr, |
| 1842 | num_clips * sizeof(*clips)); | 1842 | num_clips * sizeof(*clips)); |
| 1843 | if (ret) | 1843 | if (ret) { |
| 1844 | ret = -EFAULT; | ||
| 1844 | goto out_err2; | 1845 | goto out_err2; |
| 1846 | } | ||
| 1845 | } | 1847 | } |
| 1846 | 1848 | ||
| 1847 | if (fb->funcs->dirty) { | 1849 | if (fb->funcs->dirty) { |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 764401951041..9b2a54117c91 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work) | |||
| 860 | } | 860 | } |
| 861 | } | 861 | } |
| 862 | 862 | ||
| 863 | void drm_kms_helper_poll_init(struct drm_device *dev) | 863 | void drm_kms_helper_poll_disable(struct drm_device *dev) |
| 864 | { | ||
| 865 | if (!dev->mode_config.poll_enabled) | ||
| 866 | return; | ||
| 867 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | ||
| 868 | } | ||
| 869 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); | ||
| 870 | |||
| 871 | void drm_kms_helper_poll_enable(struct drm_device *dev) | ||
| 864 | { | 872 | { |
| 865 | struct drm_connector *connector; | ||
| 866 | bool poll = false; | 873 | bool poll = false; |
| 874 | struct drm_connector *connector; | ||
| 867 | int ret; | 875 | int ret; |
| 868 | 876 | ||
| 869 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 877 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 870 | if (connector->polled) | 878 | if (connector->polled) |
| 871 | poll = true; | 879 | poll = true; |
| 872 | } | 880 | } |
| 873 | slow_work_register_user(THIS_MODULE); | ||
| 874 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
| 875 | &output_poll_ops); | ||
| 876 | 881 | ||
| 877 | if (poll) { | 882 | if (poll) { |
| 878 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); | 883 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); |
| @@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev) | |||
| 880 | DRM_ERROR("delayed enqueue failed %d\n", ret); | 885 | DRM_ERROR("delayed enqueue failed %d\n", ret); |
| 881 | } | 886 | } |
| 882 | } | 887 | } |
| 888 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); | ||
| 889 | |||
| 890 | void drm_kms_helper_poll_init(struct drm_device *dev) | ||
| 891 | { | ||
| 892 | slow_work_register_user(THIS_MODULE); | ||
| 893 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
| 894 | &output_poll_ops); | ||
| 895 | dev->mode_config.poll_enabled = true; | ||
| 896 | |||
| 897 | drm_kms_helper_poll_enable(dev); | ||
| 898 | } | ||
| 883 | EXPORT_SYMBOL(drm_kms_helper_poll_init); | 899 | EXPORT_SYMBOL(drm_kms_helper_poll_init); |
| 884 | 900 | ||
| 885 | void drm_kms_helper_poll_fini(struct drm_device *dev) | 901 | void drm_kms_helper_poll_fini(struct drm_device *dev) |
| 886 | { | 902 | { |
| 887 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 903 | drm_kms_helper_poll_disable(dev); |
| 888 | slow_work_unregister_user(THIS_MODULE); | 904 | slow_work_unregister_user(THIS_MODULE); |
| 889 | } | 905 | } |
| 890 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); | 906 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c1981861bbbd..f87bf104df7a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, | |||
| 864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, | 864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, |
| 865 | false); | 865 | false); |
| 866 | mode->hdisplay = 1366; | 866 | mode->hdisplay = 1366; |
| 867 | mode->vsync_start = mode->vsync_start - 1; | 867 | mode->hsync_start = mode->hsync_start - 1; |
| 868 | mode->vsync_end = mode->vsync_end - 1; | 868 | mode->hsync_end = mode->hsync_end - 1; |
| 869 | return mode; | 869 | return mode; |
| 870 | } | 870 | } |
| 871 | 871 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index b3779d243aef..719662034bbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -146,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn | |||
| 146 | cvt = 1; | 146 | cvt = 1; |
| 147 | break; | 147 | break; |
| 148 | case 'R': | 148 | case 'R': |
| 149 | if (!cvt) | 149 | if (cvt) |
| 150 | rb = 1; | 150 | rb = 1; |
| 151 | break; | 151 | break; |
| 152 | case 'm': | 152 | case 'm': |
| @@ -264,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void) | |||
| 264 | int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, | 264 | int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, |
| 265 | void *panic_str) | 265 | void *panic_str) |
| 266 | { | 266 | { |
| 267 | DRM_ERROR("panic occurred, switching back to text console\n"); | 267 | printk(KERN_ERR "panic occurred, switching back to text console\n"); |
| 268 | return drm_fb_helper_force_kernel_mode(); | 268 | return drm_fb_helper_force_kernel_mode(); |
| 269 | return 0; | 269 | return 0; |
| 270 | } | 270 | } |
| @@ -315,8 +315,9 @@ static void drm_fb_helper_on(struct fb_info *info) | |||
| 315 | struct drm_device *dev = fb_helper->dev; | 315 | struct drm_device *dev = fb_helper->dev; |
| 316 | struct drm_crtc *crtc; | 316 | struct drm_crtc *crtc; |
| 317 | struct drm_crtc_helper_funcs *crtc_funcs; | 317 | struct drm_crtc_helper_funcs *crtc_funcs; |
| 318 | struct drm_connector *connector; | ||
| 318 | struct drm_encoder *encoder; | 319 | struct drm_encoder *encoder; |
| 319 | int i; | 320 | int i, j; |
| 320 | 321 | ||
| 321 | /* | 322 | /* |
| 322 | * For each CRTC in this fb, turn the crtc on then, | 323 | * For each CRTC in this fb, turn the crtc on then, |
| @@ -332,7 +333,14 @@ static void drm_fb_helper_on(struct fb_info *info) | |||
| 332 | 333 | ||
| 333 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 334 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
| 334 | 335 | ||
| 335 | 336 | /* Walk the connectors & encoders on this fb turning them on */ | |
| 337 | for (j = 0; j < fb_helper->connector_count; j++) { | ||
| 338 | connector = fb_helper->connector_info[j]->connector; | ||
| 339 | connector->dpms = DRM_MODE_DPMS_ON; | ||
| 340 | drm_connector_property_set_value(connector, | ||
| 341 | dev->mode_config.dpms_property, | ||
| 342 | DRM_MODE_DPMS_ON); | ||
| 343 | } | ||
| 336 | /* Found a CRTC on this fb, now find encoders */ | 344 | /* Found a CRTC on this fb, now find encoders */ |
| 337 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 345 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 338 | if (encoder->crtc == crtc) { | 346 | if (encoder->crtc == crtc) { |
| @@ -352,8 +360,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
| 352 | struct drm_device *dev = fb_helper->dev; | 360 | struct drm_device *dev = fb_helper->dev; |
| 353 | struct drm_crtc *crtc; | 361 | struct drm_crtc *crtc; |
| 354 | struct drm_crtc_helper_funcs *crtc_funcs; | 362 | struct drm_crtc_helper_funcs *crtc_funcs; |
| 363 | struct drm_connector *connector; | ||
| 355 | struct drm_encoder *encoder; | 364 | struct drm_encoder *encoder; |
| 356 | int i; | 365 | int i, j; |
| 357 | 366 | ||
| 358 | /* | 367 | /* |
| 359 | * For each CRTC in this fb, find all associated encoders | 368 | * For each CRTC in this fb, find all associated encoders |
| @@ -367,6 +376,14 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
| 367 | if (!crtc->enabled) | 376 | if (!crtc->enabled) |
| 368 | continue; | 377 | continue; |
| 369 | 378 | ||
| 379 | /* Walk the connectors on this fb and mark them off */ | ||
| 380 | for (j = 0; j < fb_helper->connector_count; j++) { | ||
| 381 | connector = fb_helper->connector_info[j]->connector; | ||
| 382 | connector->dpms = dpms_mode; | ||
| 383 | drm_connector_property_set_value(connector, | ||
| 384 | dev->mode_config.dpms_property, | ||
| 385 | dpms_mode); | ||
| 386 | } | ||
| 370 | /* Found a CRTC on this fb, now find encoders */ | 387 | /* Found a CRTC on this fb, now find encoders */ |
| 371 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 388 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 372 | if (encoder->crtc == crtc) { | 389 | if (encoder->crtc == crtc) { |
| @@ -1024,11 +1041,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne | |||
| 1024 | } | 1041 | } |
| 1025 | 1042 | ||
| 1026 | create_mode: | 1043 | create_mode: |
| 1027 | mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, | 1044 | if (cmdline_mode->cvt) |
| 1028 | cmdline_mode->yres, | 1045 | mode = drm_cvt_mode(fb_helper_conn->connector->dev, |
| 1029 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | 1046 | cmdline_mode->xres, cmdline_mode->yres, |
| 1030 | cmdline_mode->rb, cmdline_mode->interlace, | 1047 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, |
| 1031 | cmdline_mode->margins); | 1048 | cmdline_mode->rb, cmdline_mode->interlace, |
| 1049 | cmdline_mode->margins); | ||
| 1050 | else | ||
| 1051 | mode = drm_gtf_mode(fb_helper_conn->connector->dev, | ||
| 1052 | cmdline_mode->xres, cmdline_mode->yres, | ||
| 1053 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | ||
| 1054 | cmdline_mode->interlace, | ||
| 1055 | cmdline_mode->margins); | ||
| 1032 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 1056 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
| 1033 | list_add(&mode->head, &fb_helper_conn->connector->modes); | 1057 | list_add(&mode->head, &fb_helper_conn->connector->modes); |
| 1034 | return mode; | 1058 | return mode; |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 95639017bdbe..da78f2c0d909 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
| @@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
| 22 | intel_fb.o \ | 22 | intel_fb.o \ |
| 23 | intel_tv.o \ | 23 | intel_tv.o \ |
| 24 | intel_dvo.o \ | 24 | intel_dvo.o \ |
| 25 | intel_ringbuffer.o \ | ||
| 25 | intel_overlay.o \ | 26 | intel_overlay.o \ |
| 26 | dvo_ch7xxx.o \ | 27 | dvo_ch7xxx.o \ |
| 27 | dvo_ch7017.o \ | 28 | dvo_ch7017.o \ |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 66c697bc9b22..56f66426207f 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
| @@ -208,7 +208,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) | |||
| 208 | uint8_t ctl2; | 208 | uint8_t ctl2; |
| 209 | 209 | ||
| 210 | if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { | 210 | if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { |
| 211 | if (ctl2 & TFP410_CTL_2_HTPLG) | 211 | if (ctl2 & TFP410_CTL_2_RSEN) |
| 212 | ret = connector_status_connected; | 212 | ret = connector_status_connected; |
| 213 | else | 213 | else |
| 214 | ret = connector_status_disconnected; | 214 | ret = connector_status_disconnected; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 322070c0c631..9214119c0154 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
| 77 | case ACTIVE_LIST: | 77 | case ACTIVE_LIST: |
| 78 | seq_printf(m, "Active:\n"); | 78 | seq_printf(m, "Active:\n"); |
| 79 | lock = &dev_priv->mm.active_list_lock; | 79 | lock = &dev_priv->mm.active_list_lock; |
| 80 | head = &dev_priv->mm.active_list; | 80 | head = &dev_priv->render_ring.active_list; |
| 81 | break; | 81 | break; |
| 82 | case INACTIVE_LIST: | 82 | case INACTIVE_LIST: |
| 83 | seq_printf(m, "Inactive:\n"); | 83 | seq_printf(m, "Inactive:\n"); |
| @@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data) | |||
| 129 | struct drm_i915_gem_request *gem_request; | 129 | struct drm_i915_gem_request *gem_request; |
| 130 | 130 | ||
| 131 | seq_printf(m, "Request:\n"); | 131 | seq_printf(m, "Request:\n"); |
| 132 | list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { | 132 | list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, |
| 133 | list) { | ||
| 133 | seq_printf(m, " %d @ %d\n", | 134 | seq_printf(m, " %d @ %d\n", |
| 134 | gem_request->seqno, | 135 | gem_request->seqno, |
| 135 | (int) (jiffies - gem_request->emitted_jiffies)); | 136 | (int) (jiffies - gem_request->emitted_jiffies)); |
| @@ -143,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) | |||
| 143 | struct drm_device *dev = node->minor->dev; | 144 | struct drm_device *dev = node->minor->dev; |
| 144 | drm_i915_private_t *dev_priv = dev->dev_private; | 145 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 145 | 146 | ||
| 146 | if (dev_priv->hw_status_page != NULL) { | 147 | if (dev_priv->render_ring.status_page.page_addr != NULL) { |
| 147 | seq_printf(m, "Current sequence: %d\n", | 148 | seq_printf(m, "Current sequence: %d\n", |
| 148 | i915_get_gem_seqno(dev)); | 149 | i915_get_gem_seqno(dev, &dev_priv->render_ring)); |
| 149 | } else { | 150 | } else { |
| 150 | seq_printf(m, "Current sequence: hws uninitialized\n"); | 151 | seq_printf(m, "Current sequence: hws uninitialized\n"); |
| 151 | } | 152 | } |
| @@ -195,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
| 195 | } | 196 | } |
| 196 | seq_printf(m, "Interrupts received: %d\n", | 197 | seq_printf(m, "Interrupts received: %d\n", |
| 197 | atomic_read(&dev_priv->irq_received)); | 198 | atomic_read(&dev_priv->irq_received)); |
| 198 | if (dev_priv->hw_status_page != NULL) { | 199 | if (dev_priv->render_ring.status_page.page_addr != NULL) { |
| 199 | seq_printf(m, "Current sequence: %d\n", | 200 | seq_printf(m, "Current sequence: %d\n", |
| 200 | i915_get_gem_seqno(dev)); | 201 | i915_get_gem_seqno(dev, &dev_priv->render_ring)); |
| 201 | } else { | 202 | } else { |
| 202 | seq_printf(m, "Current sequence: hws uninitialized\n"); | 203 | seq_printf(m, "Current sequence: hws uninitialized\n"); |
| 203 | } | 204 | } |
| @@ -251,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
| 251 | int i; | 252 | int i; |
| 252 | volatile u32 *hws; | 253 | volatile u32 *hws; |
| 253 | 254 | ||
| 254 | hws = (volatile u32 *)dev_priv->hw_status_page; | 255 | hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; |
| 255 | if (hws == NULL) | 256 | if (hws == NULL) |
| 256 | return 0; | 257 | return 0; |
| 257 | 258 | ||
| @@ -287,7 +288,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
| 287 | 288 | ||
| 288 | spin_lock(&dev_priv->mm.active_list_lock); | 289 | spin_lock(&dev_priv->mm.active_list_lock); |
| 289 | 290 | ||
| 290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 291 | list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, |
| 292 | list) { | ||
| 291 | obj = &obj_priv->base; | 293 | obj = &obj_priv->base; |
| 292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 294 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { |
| 293 | ret = i915_gem_object_get_pages(obj, 0); | 295 | ret = i915_gem_object_get_pages(obj, 0); |
| @@ -317,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) | |||
| 317 | u8 *virt; | 319 | u8 *virt; |
| 318 | uint32_t *ptr, off; | 320 | uint32_t *ptr, off; |
| 319 | 321 | ||
| 320 | if (!dev_priv->ring.ring_obj) { | 322 | if (!dev_priv->render_ring.gem_object) { |
| 321 | seq_printf(m, "No ringbuffer setup\n"); | 323 | seq_printf(m, "No ringbuffer setup\n"); |
| 322 | return 0; | 324 | return 0; |
| 323 | } | 325 | } |
| 324 | 326 | ||
| 325 | virt = dev_priv->ring.virtual_start; | 327 | virt = dev_priv->render_ring.virtual_start; |
| 326 | 328 | ||
| 327 | for (off = 0; off < dev_priv->ring.Size; off += 4) { | 329 | for (off = 0; off < dev_priv->render_ring.size; off += 4) { |
| 328 | ptr = (uint32_t *)(virt + off); | 330 | ptr = (uint32_t *)(virt + off); |
| 329 | seq_printf(m, "%08x : %08x\n", off, *ptr); | 331 | seq_printf(m, "%08x : %08x\n", off, *ptr); |
| 330 | } | 332 | } |
| @@ -344,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
| 344 | 346 | ||
| 345 | seq_printf(m, "RingHead : %08x\n", head); | 347 | seq_printf(m, "RingHead : %08x\n", head); |
| 346 | seq_printf(m, "RingTail : %08x\n", tail); | 348 | seq_printf(m, "RingTail : %08x\n", tail); |
| 347 | seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); | 349 | seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); |
| 348 | seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); | 350 | seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); |
| 349 | 351 | ||
| 350 | return 0; | 352 | return 0; |
| @@ -489,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
| 489 | struct drm_device *dev = node->minor->dev; | 491 | struct drm_device *dev = node->minor->dev; |
| 490 | drm_i915_private_t *dev_priv = dev->dev_private; | 492 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 491 | u16 rgvswctl = I915_READ16(MEMSWCTL); | 493 | u16 rgvswctl = I915_READ16(MEMSWCTL); |
| 494 | u16 rgvstat = I915_READ16(MEMSTAT_ILK); | ||
| 492 | 495 | ||
| 493 | seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); | 496 | seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); |
| 494 | seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); | 497 | seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); |
| 495 | seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, | 498 | seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> |
| 496 | rgvswctl & 0x3f); | 499 | MEMSTAT_VID_SHIFT); |
| 500 | seq_printf(m, "Current P-state: %d\n", | ||
| 501 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | ||
| 497 | 502 | ||
| 498 | return 0; | 503 | return 0; |
| 499 | } | 504 | } |
| @@ -508,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) | |||
| 508 | 513 | ||
| 509 | for (i = 0; i < 16; i++) { | 514 | for (i = 0; i < 16; i++) { |
| 510 | delayfreq = I915_READ(PXVFREQ_BASE + i * 4); | 515 | delayfreq = I915_READ(PXVFREQ_BASE + i * 4); |
| 511 | seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); | 516 | seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, |
| 517 | (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); | ||
| 512 | } | 518 | } |
| 513 | 519 | ||
| 514 | return 0; | 520 | return 0; |
| @@ -541,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
| 541 | struct drm_device *dev = node->minor->dev; | 547 | struct drm_device *dev = node->minor->dev; |
| 542 | drm_i915_private_t *dev_priv = dev->dev_private; | 548 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 543 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 549 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
| 550 | u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); | ||
| 551 | u16 crstandvid = I915_READ16(CRSTANDVID); | ||
| 544 | 552 | ||
| 545 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | 553 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? |
| 546 | "yes" : "no"); | 554 | "yes" : "no"); |
| @@ -555,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
| 555 | rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); | 563 | rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); |
| 556 | seq_printf(m, "Starting frequency: P%d\n", | 564 | seq_printf(m, "Starting frequency: P%d\n", |
| 557 | (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); | 565 | (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); |
| 558 | seq_printf(m, "Max frequency: P%d\n", | 566 | seq_printf(m, "Max P-state: P%d\n", |
| 559 | (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); | 567 | (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); |
| 560 | seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); | 568 | seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); |
| 569 | seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); | ||
| 570 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); | ||
| 571 | seq_printf(m, "Render standby enabled: %s\n", | ||
| 572 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); | ||
| 561 | 573 | ||
| 562 | return 0; | 574 | return 0; |
| 563 | } | 575 | } |
| @@ -593,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
| 593 | case FBC_NOT_TILED: | 605 | case FBC_NOT_TILED: |
| 594 | seq_printf(m, "scanout buffer not tiled"); | 606 | seq_printf(m, "scanout buffer not tiled"); |
| 595 | break; | 607 | break; |
| 608 | case FBC_MULTIPLE_PIPES: | ||
| 609 | seq_printf(m, "multiple pipes are enabled"); | ||
| 610 | break; | ||
| 596 | default: | 611 | default: |
| 597 | seq_printf(m, "unknown reason"); | 612 | seq_printf(m, "unknown reason"); |
| 598 | } | 613 | } |
| @@ -608,7 +623,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
| 608 | drm_i915_private_t *dev_priv = dev->dev_private; | 623 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 609 | bool sr_enabled = false; | 624 | bool sr_enabled = false; |
| 610 | 625 | ||
| 611 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) | 626 | if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) |
| 612 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | 627 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
| 613 | else if (IS_I915GM(dev)) | 628 | else if (IS_I915GM(dev)) |
| 614 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; | 629 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; |
| @@ -621,6 +636,36 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
| 621 | return 0; | 636 | return 0; |
| 622 | } | 637 | } |
| 623 | 638 | ||
| 639 | static int i915_emon_status(struct seq_file *m, void *unused) | ||
| 640 | { | ||
| 641 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 642 | struct drm_device *dev = node->minor->dev; | ||
| 643 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 644 | unsigned long temp, chipset, gfx; | ||
| 645 | |||
| 646 | temp = i915_mch_val(dev_priv); | ||
| 647 | chipset = i915_chipset_val(dev_priv); | ||
| 648 | gfx = i915_gfx_val(dev_priv); | ||
| 649 | |||
| 650 | seq_printf(m, "GMCH temp: %ld\n", temp); | ||
| 651 | seq_printf(m, "Chipset power: %ld\n", chipset); | ||
| 652 | seq_printf(m, "GFX power: %ld\n", gfx); | ||
| 653 | seq_printf(m, "Total power: %ld\n", chipset + gfx); | ||
| 654 | |||
| 655 | return 0; | ||
| 656 | } | ||
| 657 | |||
| 658 | static int i915_gfxec(struct seq_file *m, void *unused) | ||
| 659 | { | ||
| 660 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 661 | struct drm_device *dev = node->minor->dev; | ||
| 662 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 663 | |||
| 664 | seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); | ||
| 665 | |||
| 666 | return 0; | ||
| 667 | } | ||
| 668 | |||
| 624 | static int | 669 | static int |
| 625 | i915_wedged_open(struct inode *inode, | 670 | i915_wedged_open(struct inode *inode, |
| 626 | struct file *filp) | 671 | struct file *filp) |
| @@ -743,6 +788,8 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
| 743 | {"i915_delayfreq_table", i915_delayfreq_table, 0}, | 788 | {"i915_delayfreq_table", i915_delayfreq_table, 0}, |
| 744 | {"i915_inttoext_table", i915_inttoext_table, 0}, | 789 | {"i915_inttoext_table", i915_inttoext_table, 0}, |
| 745 | {"i915_drpc_info", i915_drpc_info, 0}, | 790 | {"i915_drpc_info", i915_drpc_info, 0}, |
| 791 | {"i915_emon_status", i915_emon_status, 0}, | ||
| 792 | {"i915_gfxec", i915_gfxec, 0}, | ||
| 746 | {"i915_fbc_status", i915_fbc_status, 0}, | 793 | {"i915_fbc_status", i915_fbc_status, 0}, |
| 747 | {"i915_sr_status", i915_sr_status, 0}, | 794 | {"i915_sr_status", i915_sr_status, 0}, |
| 748 | }; | 795 | }; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2a6b5de5ae5d..2305a1234f1e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -40,84 +40,6 @@ | |||
| 40 | #include <linux/vga_switcheroo.h> | 40 | #include <linux/vga_switcheroo.h> |
| 41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
| 42 | 42 | ||
| 43 | /* Really want an OS-independent resettable timer. Would like to have | ||
| 44 | * this loop run for (eg) 3 sec, but have the timer reset every time | ||
| 45 | * the head pointer changes, so that EBUSY only happens if the ring | ||
| 46 | * actually stalls for (eg) 3 seconds. | ||
| 47 | */ | ||
| 48 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | ||
| 49 | { | ||
| 50 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 51 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | ||
| 52 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | ||
| 53 | u32 last_acthd = I915_READ(acthd_reg); | ||
| 54 | u32 acthd; | ||
| 55 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
| 56 | int i; | ||
| 57 | |||
| 58 | trace_i915_ring_wait_begin (dev); | ||
| 59 | |||
| 60 | for (i = 0; i < 100000; i++) { | ||
| 61 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
| 62 | acthd = I915_READ(acthd_reg); | ||
| 63 | ring->space = ring->head - (ring->tail + 8); | ||
| 64 | if (ring->space < 0) | ||
| 65 | ring->space += ring->Size; | ||
| 66 | if (ring->space >= n) { | ||
| 67 | trace_i915_ring_wait_end (dev); | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | if (dev->primary->master) { | ||
| 72 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
| 73 | if (master_priv->sarea_priv) | ||
| 74 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | ||
| 75 | } | ||
| 76 | |||
| 77 | |||
| 78 | if (ring->head != last_head) | ||
| 79 | i = 0; | ||
| 80 | if (acthd != last_acthd) | ||
| 81 | i = 0; | ||
| 82 | |||
| 83 | last_head = ring->head; | ||
| 84 | last_acthd = acthd; | ||
| 85 | msleep_interruptible(10); | ||
| 86 | |||
| 87 | } | ||
| 88 | |||
| 89 | trace_i915_ring_wait_end (dev); | ||
| 90 | return -EBUSY; | ||
| 91 | } | ||
| 92 | |||
| 93 | /* As a ringbuffer is only allowed to wrap between instructions, fill | ||
| 94 | * the tail with NOOPs. | ||
| 95 | */ | ||
| 96 | int i915_wrap_ring(struct drm_device *dev) | ||
| 97 | { | ||
| 98 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 99 | volatile unsigned int *virt; | ||
| 100 | int rem; | ||
| 101 | |||
| 102 | rem = dev_priv->ring.Size - dev_priv->ring.tail; | ||
| 103 | if (dev_priv->ring.space < rem) { | ||
| 104 | int ret = i915_wait_ring(dev, rem, __func__); | ||
| 105 | if (ret) | ||
| 106 | return ret; | ||
| 107 | } | ||
| 108 | dev_priv->ring.space -= rem; | ||
| 109 | |||
| 110 | virt = (unsigned int *) | ||
| 111 | (dev_priv->ring.virtual_start + dev_priv->ring.tail); | ||
| 112 | rem /= 4; | ||
| 113 | while (rem--) | ||
| 114 | *virt++ = MI_NOOP; | ||
| 115 | |||
| 116 | dev_priv->ring.tail = 0; | ||
| 117 | |||
| 118 | return 0; | ||
| 119 | } | ||
| 120 | |||
| 121 | /** | 43 | /** |
| 122 | * Sets up the hardware status page for devices that need a physical address | 44 | * Sets up the hardware status page for devices that need a physical address |
| 123 | * in the register. | 45 | * in the register. |
| @@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
| 133 | DRM_ERROR("Can not allocate hardware status page\n"); | 55 | DRM_ERROR("Can not allocate hardware status page\n"); |
| 134 | return -ENOMEM; | 56 | return -ENOMEM; |
| 135 | } | 57 | } |
| 136 | dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; | 58 | dev_priv->render_ring.status_page.page_addr |
| 59 | = dev_priv->status_page_dmah->vaddr; | ||
| 137 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | 60 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; |
| 138 | 61 | ||
| 139 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 62 | memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); |
| 140 | 63 | ||
| 141 | if (IS_I965G(dev)) | 64 | if (IS_I965G(dev)) |
| 142 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | 65 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & |
| @@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev) | |||
| 159 | dev_priv->status_page_dmah = NULL; | 82 | dev_priv->status_page_dmah = NULL; |
| 160 | } | 83 | } |
| 161 | 84 | ||
| 162 | if (dev_priv->status_gfx_addr) { | 85 | if (dev_priv->render_ring.status_page.gfx_addr) { |
| 163 | dev_priv->status_gfx_addr = 0; | 86 | dev_priv->render_ring.status_page.gfx_addr = 0; |
| 164 | drm_core_ioremapfree(&dev_priv->hws_map, dev); | 87 | drm_core_ioremapfree(&dev_priv->hws_map, dev); |
| 165 | } | 88 | } |
| 166 | 89 | ||
| @@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
| 172 | { | 95 | { |
| 173 | drm_i915_private_t *dev_priv = dev->dev_private; | 96 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 174 | struct drm_i915_master_private *master_priv; | 97 | struct drm_i915_master_private *master_priv; |
| 175 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 98 | struct intel_ring_buffer *ring = &dev_priv->render_ring; |
| 176 | 99 | ||
| 177 | /* | 100 | /* |
| 178 | * We should never lose context on the ring with modesetting | 101 | * We should never lose context on the ring with modesetting |
| @@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
| 185 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 108 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; |
| 186 | ring->space = ring->head - (ring->tail + 8); | 109 | ring->space = ring->head - (ring->tail + 8); |
| 187 | if (ring->space < 0) | 110 | if (ring->space < 0) |
| 188 | ring->space += ring->Size; | 111 | ring->space += ring->size; |
| 189 | 112 | ||
| 190 | if (!dev->primary->master) | 113 | if (!dev->primary->master) |
| 191 | return; | 114 | return; |
| @@ -205,12 +128,11 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
| 205 | if (dev->irq_enabled) | 128 | if (dev->irq_enabled) |
| 206 | drm_irq_uninstall(dev); | 129 | drm_irq_uninstall(dev); |
| 207 | 130 | ||
| 208 | if (dev_priv->ring.virtual_start) { | 131 | mutex_lock(&dev->struct_mutex); |
| 209 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | 132 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
| 210 | dev_priv->ring.virtual_start = NULL; | 133 | if (HAS_BSD(dev)) |
| 211 | dev_priv->ring.map.handle = NULL; | 134 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); |
| 212 | dev_priv->ring.map.size = 0; | 135 | mutex_unlock(&dev->struct_mutex); |
| 213 | } | ||
| 214 | 136 | ||
| 215 | /* Clear the HWS virtual address at teardown */ | 137 | /* Clear the HWS virtual address at teardown */ |
| 216 | if (I915_NEED_GFX_HWS(dev)) | 138 | if (I915_NEED_GFX_HWS(dev)) |
| @@ -233,24 +155,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
| 233 | } | 155 | } |
| 234 | 156 | ||
| 235 | if (init->ring_size != 0) { | 157 | if (init->ring_size != 0) { |
| 236 | if (dev_priv->ring.ring_obj != NULL) { | 158 | if (dev_priv->render_ring.gem_object != NULL) { |
| 237 | i915_dma_cleanup(dev); | 159 | i915_dma_cleanup(dev); |
| 238 | DRM_ERROR("Client tried to initialize ringbuffer in " | 160 | DRM_ERROR("Client tried to initialize ringbuffer in " |
| 239 | "GEM mode\n"); | 161 | "GEM mode\n"); |
| 240 | return -EINVAL; | 162 | return -EINVAL; |
| 241 | } | 163 | } |
| 242 | 164 | ||
| 243 | dev_priv->ring.Size = init->ring_size; | 165 | dev_priv->render_ring.size = init->ring_size; |
| 244 | 166 | ||
| 245 | dev_priv->ring.map.offset = init->ring_start; | 167 | dev_priv->render_ring.map.offset = init->ring_start; |
| 246 | dev_priv->ring.map.size = init->ring_size; | 168 | dev_priv->render_ring.map.size = init->ring_size; |
| 247 | dev_priv->ring.map.type = 0; | 169 | dev_priv->render_ring.map.type = 0; |
| 248 | dev_priv->ring.map.flags = 0; | 170 | dev_priv->render_ring.map.flags = 0; |
| 249 | dev_priv->ring.map.mtrr = 0; | 171 | dev_priv->render_ring.map.mtrr = 0; |
| 250 | 172 | ||
| 251 | drm_core_ioremap_wc(&dev_priv->ring.map, dev); | 173 | drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); |
| 252 | 174 | ||
| 253 | if (dev_priv->ring.map.handle == NULL) { | 175 | if (dev_priv->render_ring.map.handle == NULL) { |
| 254 | i915_dma_cleanup(dev); | 176 | i915_dma_cleanup(dev); |
| 255 | DRM_ERROR("can not ioremap virtual address for" | 177 | DRM_ERROR("can not ioremap virtual address for" |
| 256 | " ring buffer\n"); | 178 | " ring buffer\n"); |
| @@ -258,7 +180,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
| 258 | } | 180 | } |
| 259 | } | 181 | } |
| 260 | 182 | ||
| 261 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | 183 | dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; |
| 262 | 184 | ||
| 263 | dev_priv->cpp = init->cpp; | 185 | dev_priv->cpp = init->cpp; |
| 264 | dev_priv->back_offset = init->back_offset; | 186 | dev_priv->back_offset = init->back_offset; |
| @@ -278,26 +200,29 @@ static int i915_dma_resume(struct drm_device * dev) | |||
| 278 | { | 200 | { |
| 279 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 201 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 280 | 202 | ||
| 203 | struct intel_ring_buffer *ring; | ||
| 281 | DRM_DEBUG_DRIVER("%s\n", __func__); | 204 | DRM_DEBUG_DRIVER("%s\n", __func__); |
| 282 | 205 | ||
| 283 | if (dev_priv->ring.map.handle == NULL) { | 206 | ring = &dev_priv->render_ring; |
| 207 | |||
| 208 | if (ring->map.handle == NULL) { | ||
| 284 | DRM_ERROR("can not ioremap virtual address for" | 209 | DRM_ERROR("can not ioremap virtual address for" |
| 285 | " ring buffer\n"); | 210 | " ring buffer\n"); |
| 286 | return -ENOMEM; | 211 | return -ENOMEM; |
| 287 | } | 212 | } |
| 288 | 213 | ||
| 289 | /* Program Hardware Status Page */ | 214 | /* Program Hardware Status Page */ |
| 290 | if (!dev_priv->hw_status_page) { | 215 | if (!ring->status_page.page_addr) { |
| 291 | DRM_ERROR("Can not find hardware status page\n"); | 216 | DRM_ERROR("Can not find hardware status page\n"); |
| 292 | return -EINVAL; | 217 | return -EINVAL; |
| 293 | } | 218 | } |
| 294 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | 219 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
| 295 | dev_priv->hw_status_page); | 220 | ring->status_page.page_addr); |
| 296 | 221 | if (ring->status_page.gfx_addr != 0) | |
| 297 | if (dev_priv->status_gfx_addr != 0) | 222 | ring->setup_status_page(dev, ring); |
| 298 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | ||
| 299 | else | 223 | else |
| 300 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 224 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
| 225 | |||
| 301 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 226 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
| 302 | 227 | ||
| 303 | return 0; | 228 | return 0; |
| @@ -407,9 +332,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |||
| 407 | { | 332 | { |
| 408 | drm_i915_private_t *dev_priv = dev->dev_private; | 333 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 409 | int i; | 334 | int i; |
| 410 | RING_LOCALS; | ||
| 411 | 335 | ||
| 412 | if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) | 336 | if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) |
| 413 | return -EINVAL; | 337 | return -EINVAL; |
| 414 | 338 | ||
| 415 | BEGIN_LP_RING((dwords+1)&~1); | 339 | BEGIN_LP_RING((dwords+1)&~1); |
| @@ -442,9 +366,7 @@ i915_emit_box(struct drm_device *dev, | |||
| 442 | struct drm_clip_rect *boxes, | 366 | struct drm_clip_rect *boxes, |
| 443 | int i, int DR1, int DR4) | 367 | int i, int DR1, int DR4) |
| 444 | { | 368 | { |
| 445 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 446 | struct drm_clip_rect box = boxes[i]; | 369 | struct drm_clip_rect box = boxes[i]; |
| 447 | RING_LOCALS; | ||
| 448 | 370 | ||
| 449 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 371 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { |
| 450 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 372 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
| @@ -481,7 +403,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
| 481 | { | 403 | { |
| 482 | drm_i915_private_t *dev_priv = dev->dev_private; | 404 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 483 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 405 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
| 484 | RING_LOCALS; | ||
| 485 | 406 | ||
| 486 | dev_priv->counter++; | 407 | dev_priv->counter++; |
| 487 | if (dev_priv->counter > 0x7FFFFFFFUL) | 408 | if (dev_priv->counter > 0x7FFFFFFFUL) |
| @@ -535,10 +456,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
| 535 | drm_i915_batchbuffer_t * batch, | 456 | drm_i915_batchbuffer_t * batch, |
| 536 | struct drm_clip_rect *cliprects) | 457 | struct drm_clip_rect *cliprects) |
| 537 | { | 458 | { |
| 538 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 539 | int nbox = batch->num_cliprects; | 459 | int nbox = batch->num_cliprects; |
| 540 | int i = 0, count; | 460 | int i = 0, count; |
| 541 | RING_LOCALS; | ||
| 542 | 461 | ||
| 543 | if ((batch->start | batch->used) & 0x7) { | 462 | if ((batch->start | batch->used) & 0x7) { |
| 544 | DRM_ERROR("alignment"); | 463 | DRM_ERROR("alignment"); |
| @@ -587,7 +506,6 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
| 587 | drm_i915_private_t *dev_priv = dev->dev_private; | 506 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 588 | struct drm_i915_master_private *master_priv = | 507 | struct drm_i915_master_private *master_priv = |
| 589 | dev->primary->master->driver_priv; | 508 | dev->primary->master->driver_priv; |
| 590 | RING_LOCALS; | ||
| 591 | 509 | ||
| 592 | if (!master_priv->sarea_priv) | 510 | if (!master_priv->sarea_priv) |
| 593 | return -EINVAL; | 511 | return -EINVAL; |
| @@ -640,7 +558,8 @@ static int i915_quiescent(struct drm_device * dev) | |||
| 640 | drm_i915_private_t *dev_priv = dev->dev_private; | 558 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 641 | 559 | ||
| 642 | i915_kernel_lost_context(dev); | 560 | i915_kernel_lost_context(dev); |
| 643 | return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); | 561 | return intel_wait_ring_buffer(dev, &dev_priv->render_ring, |
| 562 | dev_priv->render_ring.size - 8); | ||
| 644 | } | 563 | } |
| 645 | 564 | ||
| 646 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 565 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
| @@ -827,6 +746,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
| 827 | /* depends on GEM */ | 746 | /* depends on GEM */ |
| 828 | value = dev_priv->has_gem; | 747 | value = dev_priv->has_gem; |
| 829 | break; | 748 | break; |
| 749 | case I915_PARAM_HAS_BSD: | ||
| 750 | value = HAS_BSD(dev); | ||
| 751 | break; | ||
| 830 | default: | 752 | default: |
| 831 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 753 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
| 832 | param->param); | 754 | param->param); |
| @@ -882,6 +804,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
| 882 | { | 804 | { |
| 883 | drm_i915_private_t *dev_priv = dev->dev_private; | 805 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 884 | drm_i915_hws_addr_t *hws = data; | 806 | drm_i915_hws_addr_t *hws = data; |
| 807 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | ||
| 885 | 808 | ||
| 886 | if (!I915_NEED_GFX_HWS(dev)) | 809 | if (!I915_NEED_GFX_HWS(dev)) |
| 887 | return -EINVAL; | 810 | return -EINVAL; |
| @@ -898,7 +821,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
| 898 | 821 | ||
| 899 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); | 822 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); |
| 900 | 823 | ||
| 901 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); | 824 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
| 902 | 825 | ||
| 903 | dev_priv->hws_map.offset = dev->agp->base + hws->addr; | 826 | dev_priv->hws_map.offset = dev->agp->base + hws->addr; |
| 904 | dev_priv->hws_map.size = 4*1024; | 827 | dev_priv->hws_map.size = 4*1024; |
| @@ -909,19 +832,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
| 909 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); | 832 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); |
| 910 | if (dev_priv->hws_map.handle == NULL) { | 833 | if (dev_priv->hws_map.handle == NULL) { |
| 911 | i915_dma_cleanup(dev); | 834 | i915_dma_cleanup(dev); |
| 912 | dev_priv->status_gfx_addr = 0; | 835 | ring->status_page.gfx_addr = 0; |
| 913 | DRM_ERROR("can not ioremap virtual address for" | 836 | DRM_ERROR("can not ioremap virtual address for" |
| 914 | " G33 hw status page\n"); | 837 | " G33 hw status page\n"); |
| 915 | return -ENOMEM; | 838 | return -ENOMEM; |
| 916 | } | 839 | } |
| 917 | dev_priv->hw_status_page = dev_priv->hws_map.handle; | 840 | ring->status_page.page_addr = dev_priv->hws_map.handle; |
| 841 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
| 842 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | ||
| 918 | 843 | ||
| 919 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | ||
| 920 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | ||
| 921 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | 844 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
| 922 | dev_priv->status_gfx_addr); | 845 | ring->status_page.gfx_addr); |
| 923 | DRM_DEBUG_DRIVER("load hws at %p\n", | 846 | DRM_DEBUG_DRIVER("load hws at %p\n", |
| 924 | dev_priv->hw_status_page); | 847 | ring->status_page.page_addr); |
| 925 | return 0; | 848 | return 0; |
| 926 | } | 849 | } |
| 927 | 850 | ||
| @@ -1308,7 +1231,7 @@ static void i915_warn_stolen(struct drm_device *dev) | |||
| 1308 | static void i915_setup_compression(struct drm_device *dev, int size) | 1231 | static void i915_setup_compression(struct drm_device *dev, int size) |
| 1309 | { | 1232 | { |
| 1310 | struct drm_i915_private *dev_priv = dev->dev_private; | 1233 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1311 | struct drm_mm_node *compressed_fb, *compressed_llb; | 1234 | struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); |
| 1312 | unsigned long cfb_base; | 1235 | unsigned long cfb_base; |
| 1313 | unsigned long ll_base = 0; | 1236 | unsigned long ll_base = 0; |
| 1314 | 1237 | ||
| @@ -1377,7 +1300,7 @@ static void i915_cleanup_compression(struct drm_device *dev) | |||
| 1377 | struct drm_i915_private *dev_priv = dev->dev_private; | 1300 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1378 | 1301 | ||
| 1379 | drm_mm_put_block(dev_priv->compressed_fb); | 1302 | drm_mm_put_block(dev_priv->compressed_fb); |
| 1380 | if (!IS_GM45(dev)) | 1303 | if (dev_priv->compressed_llb) |
| 1381 | drm_mm_put_block(dev_priv->compressed_llb); | 1304 | drm_mm_put_block(dev_priv->compressed_llb); |
| 1382 | } | 1305 | } |
| 1383 | 1306 | ||
| @@ -1399,12 +1322,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
| 1399 | struct drm_device *dev = pci_get_drvdata(pdev); | 1322 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1400 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1323 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
| 1401 | if (state == VGA_SWITCHEROO_ON) { | 1324 | if (state == VGA_SWITCHEROO_ON) { |
| 1402 | printk(KERN_INFO "i915: switched off\n"); | 1325 | printk(KERN_INFO "i915: switched on\n"); |
| 1403 | /* i915 resume handler doesn't set to D0 */ | 1326 | /* i915 resume handler doesn't set to D0 */ |
| 1404 | pci_set_power_state(dev->pdev, PCI_D0); | 1327 | pci_set_power_state(dev->pdev, PCI_D0); |
| 1405 | i915_resume(dev); | 1328 | i915_resume(dev); |
| 1329 | drm_kms_helper_poll_enable(dev); | ||
| 1406 | } else { | 1330 | } else { |
| 1407 | printk(KERN_ERR "i915: switched off\n"); | 1331 | printk(KERN_ERR "i915: switched off\n"); |
| 1332 | drm_kms_helper_poll_disable(dev); | ||
| 1408 | i915_suspend(dev, pmm); | 1333 | i915_suspend(dev, pmm); |
| 1409 | } | 1334 | } |
| 1410 | } | 1335 | } |
| @@ -1479,19 +1404,23 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
| 1479 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 1404 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
| 1480 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | 1405 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
| 1481 | if (ret) | 1406 | if (ret) |
| 1482 | goto destroy_ringbuffer; | 1407 | goto cleanup_ringbuffer; |
| 1483 | 1408 | ||
| 1484 | ret = vga_switcheroo_register_client(dev->pdev, | 1409 | ret = vga_switcheroo_register_client(dev->pdev, |
| 1485 | i915_switcheroo_set_state, | 1410 | i915_switcheroo_set_state, |
| 1486 | i915_switcheroo_can_switch); | 1411 | i915_switcheroo_can_switch); |
| 1487 | if (ret) | 1412 | if (ret) |
| 1488 | goto destroy_ringbuffer; | 1413 | goto cleanup_vga_client; |
| 1414 | |||
| 1415 | /* IIR "flip pending" bit means done if this bit is set */ | ||
| 1416 | if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) | ||
| 1417 | dev_priv->flip_pending_is_done = true; | ||
| 1489 | 1418 | ||
| 1490 | intel_modeset_init(dev); | 1419 | intel_modeset_init(dev); |
| 1491 | 1420 | ||
| 1492 | ret = drm_irq_install(dev); | 1421 | ret = drm_irq_install(dev); |
| 1493 | if (ret) | 1422 | if (ret) |
| 1494 | goto destroy_ringbuffer; | 1423 | goto cleanup_vga_switcheroo; |
| 1495 | 1424 | ||
| 1496 | /* Always safe in the mode setting case. */ | 1425 | /* Always safe in the mode setting case. */ |
| 1497 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | 1426 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
| @@ -1503,11 +1432,20 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
| 1503 | 1432 | ||
| 1504 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | 1433 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); |
| 1505 | 1434 | ||
| 1506 | intel_fbdev_init(dev); | 1435 | ret = intel_fbdev_init(dev); |
| 1436 | if (ret) | ||
| 1437 | goto cleanup_irq; | ||
| 1438 | |||
| 1507 | drm_kms_helper_poll_init(dev); | 1439 | drm_kms_helper_poll_init(dev); |
| 1508 | return 0; | 1440 | return 0; |
| 1509 | 1441 | ||
| 1510 | destroy_ringbuffer: | 1442 | cleanup_irq: |
| 1443 | drm_irq_uninstall(dev); | ||
| 1444 | cleanup_vga_switcheroo: | ||
| 1445 | vga_switcheroo_unregister_client(dev->pdev); | ||
| 1446 | cleanup_vga_client: | ||
| 1447 | vga_client_register(dev->pdev, NULL, NULL, NULL); | ||
| 1448 | cleanup_ringbuffer: | ||
| 1511 | mutex_lock(&dev->struct_mutex); | 1449 | mutex_lock(&dev->struct_mutex); |
| 1512 | i915_gem_cleanup_ringbuffer(dev); | 1450 | i915_gem_cleanup_ringbuffer(dev); |
| 1513 | mutex_unlock(&dev->struct_mutex); | 1451 | mutex_unlock(&dev->struct_mutex); |
| @@ -1539,14 +1477,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |||
| 1539 | master->driver_priv = NULL; | 1477 | master->driver_priv = NULL; |
| 1540 | } | 1478 | } |
| 1541 | 1479 | ||
| 1542 | static void i915_get_mem_freq(struct drm_device *dev) | 1480 | static void i915_pineview_get_mem_freq(struct drm_device *dev) |
| 1543 | { | 1481 | { |
| 1544 | drm_i915_private_t *dev_priv = dev->dev_private; | 1482 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1545 | u32 tmp; | 1483 | u32 tmp; |
| 1546 | 1484 | ||
| 1547 | if (!IS_PINEVIEW(dev)) | ||
| 1548 | return; | ||
| 1549 | |||
| 1550 | tmp = I915_READ(CLKCFG); | 1485 | tmp = I915_READ(CLKCFG); |
| 1551 | 1486 | ||
| 1552 | switch (tmp & CLKCFG_FSB_MASK) { | 1487 | switch (tmp & CLKCFG_FSB_MASK) { |
| @@ -1575,8 +1510,525 @@ static void i915_get_mem_freq(struct drm_device *dev) | |||
| 1575 | dev_priv->mem_freq = 800; | 1510 | dev_priv->mem_freq = 800; |
| 1576 | break; | 1511 | break; |
| 1577 | } | 1512 | } |
| 1513 | |||
| 1514 | /* detect pineview DDR3 setting */ | ||
| 1515 | tmp = I915_READ(CSHRDDR3CTL); | ||
| 1516 | dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | static void i915_ironlake_get_mem_freq(struct drm_device *dev) | ||
| 1520 | { | ||
| 1521 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 1522 | u16 ddrpll, csipll; | ||
| 1523 | |||
| 1524 | ddrpll = I915_READ16(DDRMPLL1); | ||
| 1525 | csipll = I915_READ16(CSIPLL0); | ||
| 1526 | |||
| 1527 | switch (ddrpll & 0xff) { | ||
| 1528 | case 0xc: | ||
| 1529 | dev_priv->mem_freq = 800; | ||
| 1530 | break; | ||
| 1531 | case 0x10: | ||
| 1532 | dev_priv->mem_freq = 1066; | ||
| 1533 | break; | ||
| 1534 | case 0x14: | ||
| 1535 | dev_priv->mem_freq = 1333; | ||
| 1536 | break; | ||
| 1537 | case 0x18: | ||
| 1538 | dev_priv->mem_freq = 1600; | ||
| 1539 | break; | ||
| 1540 | default: | ||
| 1541 | DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", | ||
| 1542 | ddrpll & 0xff); | ||
| 1543 | dev_priv->mem_freq = 0; | ||
| 1544 | break; | ||
| 1545 | } | ||
| 1546 | |||
| 1547 | dev_priv->r_t = dev_priv->mem_freq; | ||
| 1548 | |||
| 1549 | switch (csipll & 0x3ff) { | ||
| 1550 | case 0x00c: | ||
| 1551 | dev_priv->fsb_freq = 3200; | ||
| 1552 | break; | ||
| 1553 | case 0x00e: | ||
| 1554 | dev_priv->fsb_freq = 3733; | ||
| 1555 | break; | ||
| 1556 | case 0x010: | ||
| 1557 | dev_priv->fsb_freq = 4266; | ||
| 1558 | break; | ||
| 1559 | case 0x012: | ||
| 1560 | dev_priv->fsb_freq = 4800; | ||
| 1561 | break; | ||
| 1562 | case 0x014: | ||
| 1563 | dev_priv->fsb_freq = 5333; | ||
| 1564 | break; | ||
| 1565 | case 0x016: | ||
| 1566 | dev_priv->fsb_freq = 5866; | ||
| 1567 | break; | ||
| 1568 | case 0x018: | ||
| 1569 | dev_priv->fsb_freq = 6400; | ||
| 1570 | break; | ||
| 1571 | default: | ||
| 1572 | DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", | ||
| 1573 | csipll & 0x3ff); | ||
| 1574 | dev_priv->fsb_freq = 0; | ||
| 1575 | break; | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | if (dev_priv->fsb_freq == 3200) { | ||
| 1579 | dev_priv->c_m = 0; | ||
| 1580 | } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { | ||
| 1581 | dev_priv->c_m = 1; | ||
| 1582 | } else { | ||
| 1583 | dev_priv->c_m = 2; | ||
| 1584 | } | ||
| 1585 | } | ||
| 1586 | |||
| 1587 | struct v_table { | ||
| 1588 | u8 vid; | ||
| 1589 | unsigned long vd; /* in .1 mil */ | ||
| 1590 | unsigned long vm; /* in .1 mil */ | ||
| 1591 | u8 pvid; | ||
| 1592 | }; | ||
| 1593 | |||
| 1594 | static struct v_table v_table[] = { | ||
| 1595 | { 0, 16125, 15000, 0x7f, }, | ||
| 1596 | { 1, 16000, 14875, 0x7e, }, | ||
| 1597 | { 2, 15875, 14750, 0x7d, }, | ||
| 1598 | { 3, 15750, 14625, 0x7c, }, | ||
| 1599 | { 4, 15625, 14500, 0x7b, }, | ||
| 1600 | { 5, 15500, 14375, 0x7a, }, | ||
| 1601 | { 6, 15375, 14250, 0x79, }, | ||
| 1602 | { 7, 15250, 14125, 0x78, }, | ||
| 1603 | { 8, 15125, 14000, 0x77, }, | ||
| 1604 | { 9, 15000, 13875, 0x76, }, | ||
| 1605 | { 10, 14875, 13750, 0x75, }, | ||
| 1606 | { 11, 14750, 13625, 0x74, }, | ||
| 1607 | { 12, 14625, 13500, 0x73, }, | ||
| 1608 | { 13, 14500, 13375, 0x72, }, | ||
| 1609 | { 14, 14375, 13250, 0x71, }, | ||
| 1610 | { 15, 14250, 13125, 0x70, }, | ||
| 1611 | { 16, 14125, 13000, 0x6f, }, | ||
| 1612 | { 17, 14000, 12875, 0x6e, }, | ||
| 1613 | { 18, 13875, 12750, 0x6d, }, | ||
| 1614 | { 19, 13750, 12625, 0x6c, }, | ||
| 1615 | { 20, 13625, 12500, 0x6b, }, | ||
| 1616 | { 21, 13500, 12375, 0x6a, }, | ||
| 1617 | { 22, 13375, 12250, 0x69, }, | ||
| 1618 | { 23, 13250, 12125, 0x68, }, | ||
| 1619 | { 24, 13125, 12000, 0x67, }, | ||
| 1620 | { 25, 13000, 11875, 0x66, }, | ||
| 1621 | { 26, 12875, 11750, 0x65, }, | ||
| 1622 | { 27, 12750, 11625, 0x64, }, | ||
| 1623 | { 28, 12625, 11500, 0x63, }, | ||
| 1624 | { 29, 12500, 11375, 0x62, }, | ||
| 1625 | { 30, 12375, 11250, 0x61, }, | ||
| 1626 | { 31, 12250, 11125, 0x60, }, | ||
| 1627 | { 32, 12125, 11000, 0x5f, }, | ||
| 1628 | { 33, 12000, 10875, 0x5e, }, | ||
| 1629 | { 34, 11875, 10750, 0x5d, }, | ||
| 1630 | { 35, 11750, 10625, 0x5c, }, | ||
| 1631 | { 36, 11625, 10500, 0x5b, }, | ||
| 1632 | { 37, 11500, 10375, 0x5a, }, | ||
| 1633 | { 38, 11375, 10250, 0x59, }, | ||
| 1634 | { 39, 11250, 10125, 0x58, }, | ||
| 1635 | { 40, 11125, 10000, 0x57, }, | ||
| 1636 | { 41, 11000, 9875, 0x56, }, | ||
| 1637 | { 42, 10875, 9750, 0x55, }, | ||
| 1638 | { 43, 10750, 9625, 0x54, }, | ||
| 1639 | { 44, 10625, 9500, 0x53, }, | ||
| 1640 | { 45, 10500, 9375, 0x52, }, | ||
| 1641 | { 46, 10375, 9250, 0x51, }, | ||
| 1642 | { 47, 10250, 9125, 0x50, }, | ||
| 1643 | { 48, 10125, 9000, 0x4f, }, | ||
| 1644 | { 49, 10000, 8875, 0x4e, }, | ||
| 1645 | { 50, 9875, 8750, 0x4d, }, | ||
| 1646 | { 51, 9750, 8625, 0x4c, }, | ||
| 1647 | { 52, 9625, 8500, 0x4b, }, | ||
| 1648 | { 53, 9500, 8375, 0x4a, }, | ||
| 1649 | { 54, 9375, 8250, 0x49, }, | ||
| 1650 | { 55, 9250, 8125, 0x48, }, | ||
| 1651 | { 56, 9125, 8000, 0x47, }, | ||
| 1652 | { 57, 9000, 7875, 0x46, }, | ||
| 1653 | { 58, 8875, 7750, 0x45, }, | ||
| 1654 | { 59, 8750, 7625, 0x44, }, | ||
| 1655 | { 60, 8625, 7500, 0x43, }, | ||
| 1656 | { 61, 8500, 7375, 0x42, }, | ||
| 1657 | { 62, 8375, 7250, 0x41, }, | ||
| 1658 | { 63, 8250, 7125, 0x40, }, | ||
| 1659 | { 64, 8125, 7000, 0x3f, }, | ||
| 1660 | { 65, 8000, 6875, 0x3e, }, | ||
| 1661 | { 66, 7875, 6750, 0x3d, }, | ||
| 1662 | { 67, 7750, 6625, 0x3c, }, | ||
| 1663 | { 68, 7625, 6500, 0x3b, }, | ||
| 1664 | { 69, 7500, 6375, 0x3a, }, | ||
| 1665 | { 70, 7375, 6250, 0x39, }, | ||
| 1666 | { 71, 7250, 6125, 0x38, }, | ||
| 1667 | { 72, 7125, 6000, 0x37, }, | ||
| 1668 | { 73, 7000, 5875, 0x36, }, | ||
| 1669 | { 74, 6875, 5750, 0x35, }, | ||
| 1670 | { 75, 6750, 5625, 0x34, }, | ||
| 1671 | { 76, 6625, 5500, 0x33, }, | ||
| 1672 | { 77, 6500, 5375, 0x32, }, | ||
| 1673 | { 78, 6375, 5250, 0x31, }, | ||
| 1674 | { 79, 6250, 5125, 0x30, }, | ||
| 1675 | { 80, 6125, 5000, 0x2f, }, | ||
| 1676 | { 81, 6000, 4875, 0x2e, }, | ||
| 1677 | { 82, 5875, 4750, 0x2d, }, | ||
| 1678 | { 83, 5750, 4625, 0x2c, }, | ||
| 1679 | { 84, 5625, 4500, 0x2b, }, | ||
| 1680 | { 85, 5500, 4375, 0x2a, }, | ||
| 1681 | { 86, 5375, 4250, 0x29, }, | ||
| 1682 | { 87, 5250, 4125, 0x28, }, | ||
| 1683 | { 88, 5125, 4000, 0x27, }, | ||
| 1684 | { 89, 5000, 3875, 0x26, }, | ||
| 1685 | { 90, 4875, 3750, 0x25, }, | ||
| 1686 | { 91, 4750, 3625, 0x24, }, | ||
| 1687 | { 92, 4625, 3500, 0x23, }, | ||
| 1688 | { 93, 4500, 3375, 0x22, }, | ||
| 1689 | { 94, 4375, 3250, 0x21, }, | ||
| 1690 | { 95, 4250, 3125, 0x20, }, | ||
| 1691 | { 96, 4125, 3000, 0x1f, }, | ||
| 1692 | { 97, 4125, 3000, 0x1e, }, | ||
| 1693 | { 98, 4125, 3000, 0x1d, }, | ||
| 1694 | { 99, 4125, 3000, 0x1c, }, | ||
| 1695 | { 100, 4125, 3000, 0x1b, }, | ||
| 1696 | { 101, 4125, 3000, 0x1a, }, | ||
| 1697 | { 102, 4125, 3000, 0x19, }, | ||
| 1698 | { 103, 4125, 3000, 0x18, }, | ||
| 1699 | { 104, 4125, 3000, 0x17, }, | ||
| 1700 | { 105, 4125, 3000, 0x16, }, | ||
| 1701 | { 106, 4125, 3000, 0x15, }, | ||
| 1702 | { 107, 4125, 3000, 0x14, }, | ||
| 1703 | { 108, 4125, 3000, 0x13, }, | ||
| 1704 | { 109, 4125, 3000, 0x12, }, | ||
| 1705 | { 110, 4125, 3000, 0x11, }, | ||
| 1706 | { 111, 4125, 3000, 0x10, }, | ||
| 1707 | { 112, 4125, 3000, 0x0f, }, | ||
| 1708 | { 113, 4125, 3000, 0x0e, }, | ||
| 1709 | { 114, 4125, 3000, 0x0d, }, | ||
| 1710 | { 115, 4125, 3000, 0x0c, }, | ||
| 1711 | { 116, 4125, 3000, 0x0b, }, | ||
| 1712 | { 117, 4125, 3000, 0x0a, }, | ||
| 1713 | { 118, 4125, 3000, 0x09, }, | ||
| 1714 | { 119, 4125, 3000, 0x08, }, | ||
| 1715 | { 120, 1125, 0, 0x07, }, | ||
| 1716 | { 121, 1000, 0, 0x06, }, | ||
| 1717 | { 122, 875, 0, 0x05, }, | ||
| 1718 | { 123, 750, 0, 0x04, }, | ||
| 1719 | { 124, 625, 0, 0x03, }, | ||
| 1720 | { 125, 500, 0, 0x02, }, | ||
| 1721 | { 126, 375, 0, 0x01, }, | ||
| 1722 | { 127, 0, 0, 0x00, }, | ||
| 1723 | }; | ||
| 1724 | |||
| 1725 | struct cparams { | ||
| 1726 | int i; | ||
| 1727 | int t; | ||
| 1728 | int m; | ||
| 1729 | int c; | ||
| 1730 | }; | ||
| 1731 | |||
| 1732 | static struct cparams cparams[] = { | ||
| 1733 | { 1, 1333, 301, 28664 }, | ||
| 1734 | { 1, 1066, 294, 24460 }, | ||
| 1735 | { 1, 800, 294, 25192 }, | ||
| 1736 | { 0, 1333, 276, 27605 }, | ||
| 1737 | { 0, 1066, 276, 27605 }, | ||
| 1738 | { 0, 800, 231, 23784 }, | ||
| 1739 | }; | ||
| 1740 | |||
| 1741 | unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | ||
| 1742 | { | ||
| 1743 | u64 total_count, diff, ret; | ||
| 1744 | u32 count1, count2, count3, m = 0, c = 0; | ||
| 1745 | unsigned long now = jiffies_to_msecs(jiffies), diff1; | ||
| 1746 | int i; | ||
| 1747 | |||
| 1748 | diff1 = now - dev_priv->last_time1; | ||
| 1749 | |||
| 1750 | count1 = I915_READ(DMIEC); | ||
| 1751 | count2 = I915_READ(DDREC); | ||
| 1752 | count3 = I915_READ(CSIEC); | ||
| 1753 | |||
| 1754 | total_count = count1 + count2 + count3; | ||
| 1755 | |||
| 1756 | /* FIXME: handle per-counter overflow */ | ||
| 1757 | if (total_count < dev_priv->last_count1) { | ||
| 1758 | diff = ~0UL - dev_priv->last_count1; | ||
| 1759 | diff += total_count; | ||
| 1760 | } else { | ||
| 1761 | diff = total_count - dev_priv->last_count1; | ||
| 1762 | } | ||
| 1763 | |||
| 1764 | for (i = 0; i < ARRAY_SIZE(cparams); i++) { | ||
| 1765 | if (cparams[i].i == dev_priv->c_m && | ||
| 1766 | cparams[i].t == dev_priv->r_t) { | ||
| 1767 | m = cparams[i].m; | ||
| 1768 | c = cparams[i].c; | ||
| 1769 | break; | ||
| 1770 | } | ||
| 1771 | } | ||
| 1772 | |||
| 1773 | div_u64(diff, diff1); | ||
| 1774 | ret = ((m * diff) + c); | ||
| 1775 | div_u64(ret, 10); | ||
| 1776 | |||
| 1777 | dev_priv->last_count1 = total_count; | ||
| 1778 | dev_priv->last_time1 = now; | ||
| 1779 | |||
| 1780 | return ret; | ||
| 1578 | } | 1781 | } |
| 1579 | 1782 | ||
| 1783 | unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | ||
| 1784 | { | ||
| 1785 | unsigned long m, x, b; | ||
| 1786 | u32 tsfs; | ||
| 1787 | |||
| 1788 | tsfs = I915_READ(TSFS); | ||
| 1789 | |||
| 1790 | m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); | ||
| 1791 | x = I915_READ8(TR1); | ||
| 1792 | |||
| 1793 | b = tsfs & TSFS_INTR_MASK; | ||
| 1794 | |||
| 1795 | return ((m * x) / 127) - b; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | ||
| 1799 | { | ||
| 1800 | unsigned long val = 0; | ||
| 1801 | int i; | ||
| 1802 | |||
| 1803 | for (i = 0; i < ARRAY_SIZE(v_table); i++) { | ||
| 1804 | if (v_table[i].pvid == pxvid) { | ||
| 1805 | if (IS_MOBILE(dev_priv->dev)) | ||
| 1806 | val = v_table[i].vm; | ||
| 1807 | else | ||
| 1808 | val = v_table[i].vd; | ||
| 1809 | } | ||
| 1810 | } | ||
| 1811 | |||
| 1812 | return val; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | ||
| 1816 | { | ||
| 1817 | struct timespec now, diff1; | ||
| 1818 | u64 diff; | ||
| 1819 | unsigned long diffms; | ||
| 1820 | u32 count; | ||
| 1821 | |||
| 1822 | getrawmonotonic(&now); | ||
| 1823 | diff1 = timespec_sub(now, dev_priv->last_time2); | ||
| 1824 | |||
| 1825 | /* Don't divide by 0 */ | ||
| 1826 | diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; | ||
| 1827 | if (!diffms) | ||
| 1828 | return; | ||
| 1829 | |||
| 1830 | count = I915_READ(GFXEC); | ||
| 1831 | |||
| 1832 | if (count < dev_priv->last_count2) { | ||
| 1833 | diff = ~0UL - dev_priv->last_count2; | ||
| 1834 | diff += count; | ||
| 1835 | } else { | ||
| 1836 | diff = count - dev_priv->last_count2; | ||
| 1837 | } | ||
| 1838 | |||
| 1839 | dev_priv->last_count2 = count; | ||
| 1840 | dev_priv->last_time2 = now; | ||
| 1841 | |||
| 1842 | /* More magic constants... */ | ||
| 1843 | diff = diff * 1181; | ||
| 1844 | div_u64(diff, diffms * 10); | ||
| 1845 | dev_priv->gfx_power = diff; | ||
| 1846 | } | ||
| 1847 | |||
| 1848 | unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) | ||
| 1849 | { | ||
| 1850 | unsigned long t, corr, state1, corr2, state2; | ||
| 1851 | u32 pxvid, ext_v; | ||
| 1852 | |||
| 1853 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); | ||
| 1854 | pxvid = (pxvid >> 24) & 0x7f; | ||
| 1855 | ext_v = pvid_to_extvid(dev_priv, pxvid); | ||
| 1856 | |||
| 1857 | state1 = ext_v; | ||
| 1858 | |||
| 1859 | t = i915_mch_val(dev_priv); | ||
| 1860 | |||
| 1861 | /* Revel in the empirically derived constants */ | ||
| 1862 | |||
| 1863 | /* Correction factor in 1/100000 units */ | ||
| 1864 | if (t > 80) | ||
| 1865 | corr = ((t * 2349) + 135940); | ||
| 1866 | else if (t >= 50) | ||
| 1867 | corr = ((t * 964) + 29317); | ||
| 1868 | else /* < 50 */ | ||
| 1869 | corr = ((t * 301) + 1004); | ||
| 1870 | |||
| 1871 | corr = corr * ((150142 * state1) / 10000 - 78642); | ||
| 1872 | corr /= 100000; | ||
| 1873 | corr2 = (corr * dev_priv->corr); | ||
| 1874 | |||
| 1875 | state2 = (corr2 * state1) / 10000; | ||
| 1876 | state2 /= 100; /* convert to mW */ | ||
| 1877 | |||
| 1878 | i915_update_gfx_val(dev_priv); | ||
| 1879 | |||
| 1880 | return dev_priv->gfx_power + state2; | ||
| 1881 | } | ||
| 1882 | |||
| 1883 | /* Global for IPS driver to get at the current i915 device */ | ||
| 1884 | static struct drm_i915_private *i915_mch_dev; | ||
| 1885 | /* | ||
| 1886 | * Lock protecting IPS related data structures | ||
| 1887 | * - i915_mch_dev | ||
| 1888 | * - dev_priv->max_delay | ||
| 1889 | * - dev_priv->min_delay | ||
| 1890 | * - dev_priv->fmax | ||
| 1891 | * - dev_priv->gpu_busy | ||
| 1892 | */ | ||
| 1893 | DEFINE_SPINLOCK(mchdev_lock); | ||
| 1894 | |||
| 1895 | /** | ||
| 1896 | * i915_read_mch_val - return value for IPS use | ||
| 1897 | * | ||
| 1898 | * Calculate and return a value for the IPS driver to use when deciding whether | ||
| 1899 | * we have thermal and power headroom to increase CPU or GPU power budget. | ||
| 1900 | */ | ||
| 1901 | unsigned long i915_read_mch_val(void) | ||
| 1902 | { | ||
| 1903 | struct drm_i915_private *dev_priv; | ||
| 1904 | unsigned long chipset_val, graphics_val, ret = 0; | ||
| 1905 | |||
| 1906 | spin_lock(&mchdev_lock); | ||
| 1907 | if (!i915_mch_dev) | ||
| 1908 | goto out_unlock; | ||
| 1909 | dev_priv = i915_mch_dev; | ||
| 1910 | |||
| 1911 | chipset_val = i915_chipset_val(dev_priv); | ||
| 1912 | graphics_val = i915_gfx_val(dev_priv); | ||
| 1913 | |||
| 1914 | ret = chipset_val + graphics_val; | ||
| 1915 | |||
| 1916 | out_unlock: | ||
| 1917 | spin_unlock(&mchdev_lock); | ||
| 1918 | |||
| 1919 | return ret; | ||
| 1920 | } | ||
| 1921 | EXPORT_SYMBOL_GPL(i915_read_mch_val); | ||
| 1922 | |||
| 1923 | /** | ||
| 1924 | * i915_gpu_raise - raise GPU frequency limit | ||
| 1925 | * | ||
| 1926 | * Raise the limit; IPS indicates we have thermal headroom. | ||
| 1927 | */ | ||
| 1928 | bool i915_gpu_raise(void) | ||
| 1929 | { | ||
| 1930 | struct drm_i915_private *dev_priv; | ||
| 1931 | bool ret = true; | ||
| 1932 | |||
| 1933 | spin_lock(&mchdev_lock); | ||
| 1934 | if (!i915_mch_dev) { | ||
| 1935 | ret = false; | ||
| 1936 | goto out_unlock; | ||
| 1937 | } | ||
| 1938 | dev_priv = i915_mch_dev; | ||
| 1939 | |||
| 1940 | if (dev_priv->max_delay > dev_priv->fmax) | ||
| 1941 | dev_priv->max_delay--; | ||
| 1942 | |||
| 1943 | out_unlock: | ||
| 1944 | spin_unlock(&mchdev_lock); | ||
| 1945 | |||
| 1946 | return ret; | ||
| 1947 | } | ||
| 1948 | EXPORT_SYMBOL_GPL(i915_gpu_raise); | ||
| 1949 | |||
| 1950 | /** | ||
| 1951 | * i915_gpu_lower - lower GPU frequency limit | ||
| 1952 | * | ||
| 1953 | * IPS indicates we're close to a thermal limit, so throttle back the GPU | ||
| 1954 | * frequency maximum. | ||
| 1955 | */ | ||
| 1956 | bool i915_gpu_lower(void) | ||
| 1957 | { | ||
| 1958 | struct drm_i915_private *dev_priv; | ||
| 1959 | bool ret = true; | ||
| 1960 | |||
| 1961 | spin_lock(&mchdev_lock); | ||
| 1962 | if (!i915_mch_dev) { | ||
| 1963 | ret = false; | ||
| 1964 | goto out_unlock; | ||
| 1965 | } | ||
| 1966 | dev_priv = i915_mch_dev; | ||
| 1967 | |||
| 1968 | if (dev_priv->max_delay < dev_priv->min_delay) | ||
| 1969 | dev_priv->max_delay++; | ||
| 1970 | |||
| 1971 | out_unlock: | ||
| 1972 | spin_unlock(&mchdev_lock); | ||
| 1973 | |||
| 1974 | return ret; | ||
| 1975 | } | ||
| 1976 | EXPORT_SYMBOL_GPL(i915_gpu_lower); | ||
| 1977 | |||
| 1978 | /** | ||
| 1979 | * i915_gpu_busy - indicate GPU business to IPS | ||
| 1980 | * | ||
| 1981 | * Tell the IPS driver whether or not the GPU is busy. | ||
| 1982 | */ | ||
| 1983 | bool i915_gpu_busy(void) | ||
| 1984 | { | ||
| 1985 | struct drm_i915_private *dev_priv; | ||
| 1986 | bool ret = false; | ||
| 1987 | |||
| 1988 | spin_lock(&mchdev_lock); | ||
| 1989 | if (!i915_mch_dev) | ||
| 1990 | goto out_unlock; | ||
| 1991 | dev_priv = i915_mch_dev; | ||
| 1992 | |||
| 1993 | ret = dev_priv->busy; | ||
| 1994 | |||
| 1995 | out_unlock: | ||
| 1996 | spin_unlock(&mchdev_lock); | ||
| 1997 | |||
| 1998 | return ret; | ||
| 1999 | } | ||
| 2000 | EXPORT_SYMBOL_GPL(i915_gpu_busy); | ||
| 2001 | |||
| 2002 | /** | ||
| 2003 | * i915_gpu_turbo_disable - disable graphics turbo | ||
| 2004 | * | ||
| 2005 | * Disable graphics turbo by resetting the max frequency and setting the | ||
| 2006 | * current frequency to the default. | ||
| 2007 | */ | ||
| 2008 | bool i915_gpu_turbo_disable(void) | ||
| 2009 | { | ||
| 2010 | struct drm_i915_private *dev_priv; | ||
| 2011 | bool ret = true; | ||
| 2012 | |||
| 2013 | spin_lock(&mchdev_lock); | ||
| 2014 | if (!i915_mch_dev) { | ||
| 2015 | ret = false; | ||
| 2016 | goto out_unlock; | ||
| 2017 | } | ||
| 2018 | dev_priv = i915_mch_dev; | ||
| 2019 | |||
| 2020 | dev_priv->max_delay = dev_priv->fstart; | ||
| 2021 | |||
| 2022 | if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) | ||
| 2023 | ret = false; | ||
| 2024 | |||
| 2025 | out_unlock: | ||
| 2026 | spin_unlock(&mchdev_lock); | ||
| 2027 | |||
| 2028 | return ret; | ||
| 2029 | } | ||
| 2030 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | ||
| 2031 | |||
| 1580 | /** | 2032 | /** |
| 1581 | * i915_driver_load - setup chip and create an initial config | 2033 | * i915_driver_load - setup chip and create an initial config |
| 1582 | * @dev: DRM device | 2034 | * @dev: DRM device |
| @@ -1594,7 +2046,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1594 | resource_size_t base, size; | 2046 | resource_size_t base, size; |
| 1595 | int ret = 0, mmio_bar; | 2047 | int ret = 0, mmio_bar; |
| 1596 | uint32_t agp_size, prealloc_size, prealloc_start; | 2048 | uint32_t agp_size, prealloc_size, prealloc_start; |
| 1597 | |||
| 1598 | /* i915 has 4 more counters */ | 2049 | /* i915 has 4 more counters */ |
| 1599 | dev->counters += 4; | 2050 | dev->counters += 4; |
| 1600 | dev->types[6] = _DRM_STAT_IRQ; | 2051 | dev->types[6] = _DRM_STAT_IRQ; |
| @@ -1672,6 +2123,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1672 | dev_priv->has_gem = 0; | 2123 | dev_priv->has_gem = 0; |
| 1673 | } | 2124 | } |
| 1674 | 2125 | ||
| 2126 | if (dev_priv->has_gem == 0 && | ||
| 2127 | drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
| 2128 | DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); | ||
| 2129 | ret = -ENODEV; | ||
| 2130 | goto out_iomapfree; | ||
| 2131 | } | ||
| 2132 | |||
| 1675 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 2133 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
| 1676 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 2134 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
| 1677 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { | 2135 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { |
| @@ -1691,7 +2149,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1691 | goto out_workqueue_free; | 2149 | goto out_workqueue_free; |
| 1692 | } | 2150 | } |
| 1693 | 2151 | ||
| 1694 | i915_get_mem_freq(dev); | 2152 | if (IS_PINEVIEW(dev)) |
| 2153 | i915_pineview_get_mem_freq(dev); | ||
| 2154 | else if (IS_IRONLAKE(dev)) | ||
| 2155 | i915_ironlake_get_mem_freq(dev); | ||
| 1695 | 2156 | ||
| 1696 | /* On the 945G/GM, the chipset reports the MSI capability on the | 2157 | /* On the 945G/GM, the chipset reports the MSI capability on the |
| 1697 | * integrated graphics even though the support isn't actually there | 2158 | * integrated graphics even though the support isn't actually there |
| @@ -1709,7 +2170,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1709 | 2170 | ||
| 1710 | spin_lock_init(&dev_priv->user_irq_lock); | 2171 | spin_lock_init(&dev_priv->user_irq_lock); |
| 1711 | spin_lock_init(&dev_priv->error_lock); | 2172 | spin_lock_init(&dev_priv->error_lock); |
| 1712 | dev_priv->user_irq_refcount = 0; | ||
| 1713 | dev_priv->trace_irq_seqno = 0; | 2173 | dev_priv->trace_irq_seqno = 0; |
| 1714 | 2174 | ||
| 1715 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 2175 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
| @@ -1738,6 +2198,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1738 | 2198 | ||
| 1739 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | 2199 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
| 1740 | (unsigned long) dev); | 2200 | (unsigned long) dev); |
| 2201 | |||
| 2202 | spin_lock(&mchdev_lock); | ||
| 2203 | i915_mch_dev = dev_priv; | ||
| 2204 | dev_priv->mchdev_lock = &mchdev_lock; | ||
| 2205 | spin_unlock(&mchdev_lock); | ||
| 2206 | |||
| 1741 | return 0; | 2207 | return 0; |
| 1742 | 2208 | ||
| 1743 | out_workqueue_free: | 2209 | out_workqueue_free: |
| @@ -1759,6 +2225,10 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1759 | 2225 | ||
| 1760 | i915_destroy_error_state(dev); | 2226 | i915_destroy_error_state(dev); |
| 1761 | 2227 | ||
| 2228 | spin_lock(&mchdev_lock); | ||
| 2229 | i915_mch_dev = NULL; | ||
| 2230 | spin_unlock(&mchdev_lock); | ||
| 2231 | |||
| 1762 | destroy_workqueue(dev_priv->wq); | 2232 | destroy_workqueue(dev_priv->wq); |
| 1763 | del_timer_sync(&dev_priv->hangcheck_timer); | 2233 | del_timer_sync(&dev_priv->hangcheck_timer); |
| 1764 | 2234 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5c51e45ab68d..423dc90c1e20 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -60,95 +60,95 @@ extern int intel_agp_enabled; | |||
| 60 | .subdevice = PCI_ANY_ID, \ | 60 | .subdevice = PCI_ANY_ID, \ |
| 61 | .driver_data = (unsigned long) info } | 61 | .driver_data = (unsigned long) info } |
| 62 | 62 | ||
| 63 | const static struct intel_device_info intel_i830_info = { | 63 | static const struct intel_device_info intel_i830_info = { |
| 64 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 64 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | const static struct intel_device_info intel_845g_info = { | 67 | static const struct intel_device_info intel_845g_info = { |
| 68 | .is_i8xx = 1, | 68 | .is_i8xx = 1, |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | const static struct intel_device_info intel_i85x_info = { | 71 | static const struct intel_device_info intel_i85x_info = { |
| 72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, | 72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, |
| 73 | .cursor_needs_physical = 1, | 73 | .cursor_needs_physical = 1, |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | const static struct intel_device_info intel_i865g_info = { | 76 | static const struct intel_device_info intel_i865g_info = { |
| 77 | .is_i8xx = 1, | 77 | .is_i8xx = 1, |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | const static struct intel_device_info intel_i915g_info = { | 80 | static const struct intel_device_info intel_i915g_info = { |
| 81 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | 81 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, |
| 82 | }; | 82 | }; |
| 83 | const static struct intel_device_info intel_i915gm_info = { | 83 | static const struct intel_device_info intel_i915gm_info = { |
| 84 | .is_i9xx = 1, .is_mobile = 1, | 84 | .is_i9xx = 1, .is_mobile = 1, |
| 85 | .cursor_needs_physical = 1, | 85 | .cursor_needs_physical = 1, |
| 86 | }; | 86 | }; |
| 87 | const static struct intel_device_info intel_i945g_info = { | 87 | static const struct intel_device_info intel_i945g_info = { |
| 88 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | 88 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, |
| 89 | }; | 89 | }; |
| 90 | const static struct intel_device_info intel_i945gm_info = { | 90 | static const struct intel_device_info intel_i945gm_info = { |
| 91 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, | 91 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, |
| 92 | .has_hotplug = 1, .cursor_needs_physical = 1, | 92 | .has_hotplug = 1, .cursor_needs_physical = 1, |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | const static struct intel_device_info intel_i965g_info = { | 95 | static const struct intel_device_info intel_i965g_info = { |
| 96 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | 96 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | const static struct intel_device_info intel_i965gm_info = { | 99 | static const struct intel_device_info intel_i965gm_info = { |
| 100 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, | 100 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, |
| 101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | 101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, |
| 102 | .has_hotplug = 1, | 102 | .has_hotplug = 1, |
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | const static struct intel_device_info intel_g33_info = { | 105 | static const struct intel_device_info intel_g33_info = { |
| 106 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 106 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
| 107 | .has_hotplug = 1, | 107 | .has_hotplug = 1, |
| 108 | }; | 108 | }; |
| 109 | 109 | ||
| 110 | const static struct intel_device_info intel_g45_info = { | 110 | static const struct intel_device_info intel_g45_info = { |
| 111 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 111 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
| 112 | .has_pipe_cxsr = 1, | 112 | .has_pipe_cxsr = 1, |
| 113 | .has_hotplug = 1, | 113 | .has_hotplug = 1, |
| 114 | }; | 114 | }; |
| 115 | 115 | ||
| 116 | const static struct intel_device_info intel_gm45_info = { | 116 | static const struct intel_device_info intel_gm45_info = { |
| 117 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, | 117 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, |
| 118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, |
| 119 | .has_pipe_cxsr = 1, | 119 | .has_pipe_cxsr = 1, |
| 120 | .has_hotplug = 1, | 120 | .has_hotplug = 1, |
| 121 | }; | 121 | }; |
| 122 | 122 | ||
| 123 | const static struct intel_device_info intel_pineview_info = { | 123 | static const struct intel_device_info intel_pineview_info = { |
| 124 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | 124 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, |
| 125 | .need_gfx_hws = 1, | 125 | .need_gfx_hws = 1, |
| 126 | .has_hotplug = 1, | 126 | .has_hotplug = 1, |
| 127 | }; | 127 | }; |
| 128 | 128 | ||
| 129 | const static struct intel_device_info intel_ironlake_d_info = { | 129 | static const struct intel_device_info intel_ironlake_d_info = { |
| 130 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 130 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
| 131 | .has_pipe_cxsr = 1, | 131 | .has_pipe_cxsr = 1, |
| 132 | .has_hotplug = 1, | 132 | .has_hotplug = 1, |
| 133 | }; | 133 | }; |
| 134 | 134 | ||
| 135 | const static struct intel_device_info intel_ironlake_m_info = { | 135 | static const struct intel_device_info intel_ironlake_m_info = { |
| 136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | 136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, |
| 137 | .need_gfx_hws = 1, .has_rc6 = 1, | 137 | .need_gfx_hws = 1, .has_rc6 = 1, |
| 138 | .has_hotplug = 1, | 138 | .has_hotplug = 1, |
| 139 | }; | 139 | }; |
| 140 | 140 | ||
| 141 | const static struct intel_device_info intel_sandybridge_d_info = { | 141 | static const struct intel_device_info intel_sandybridge_d_info = { |
| 142 | .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 142 | .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
| 143 | .has_hotplug = 1, .is_gen6 = 1, | 143 | .has_hotplug = 1, .is_gen6 = 1, |
| 144 | }; | 144 | }; |
| 145 | 145 | ||
| 146 | const static struct intel_device_info intel_sandybridge_m_info = { | 146 | static const struct intel_device_info intel_sandybridge_m_info = { |
| 147 | .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 147 | .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, |
| 148 | .has_hotplug = 1, .is_gen6 = 1, | 148 | .has_hotplug = 1, .is_gen6 = 1, |
| 149 | }; | 149 | }; |
| 150 | 150 | ||
| 151 | const static struct pci_device_id pciidlist[] = { | 151 | static const struct pci_device_id pciidlist[] = { |
| 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), |
| 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), |
| 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), |
| @@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
| 340 | /* | 340 | /* |
| 341 | * Clear request list | 341 | * Clear request list |
| 342 | */ | 342 | */ |
| 343 | i915_gem_retire_requests(dev); | 343 | i915_gem_retire_requests(dev, &dev_priv->render_ring); |
| 344 | 344 | ||
| 345 | if (need_display) | 345 | if (need_display) |
| 346 | i915_save_display(dev); | 346 | i915_save_display(dev); |
| @@ -370,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
| 370 | } | 370 | } |
| 371 | } else { | 371 | } else { |
| 372 | DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); | 372 | DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); |
| 373 | mutex_unlock(&dev->struct_mutex); | ||
| 373 | return -ENODEV; | 374 | return -ENODEV; |
| 374 | } | 375 | } |
| 375 | 376 | ||
| @@ -388,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
| 388 | * switched away). | 389 | * switched away). |
| 389 | */ | 390 | */ |
| 390 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | 391 | if (drm_core_check_feature(dev, DRIVER_MODESET) || |
| 391 | !dev_priv->mm.suspended) { | 392 | !dev_priv->mm.suspended) { |
| 392 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | 393 | struct intel_ring_buffer *ring = &dev_priv->render_ring; |
| 393 | struct drm_gem_object *obj = ring->ring_obj; | ||
| 394 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
| 395 | dev_priv->mm.suspended = 0; | 394 | dev_priv->mm.suspended = 0; |
| 396 | 395 | ring->init(dev, ring); | |
| 397 | /* Stop the ring if it's running. */ | ||
| 398 | I915_WRITE(PRB0_CTL, 0); | ||
| 399 | I915_WRITE(PRB0_TAIL, 0); | ||
| 400 | I915_WRITE(PRB0_HEAD, 0); | ||
| 401 | |||
| 402 | /* Initialize the ring. */ | ||
| 403 | I915_WRITE(PRB0_START, obj_priv->gtt_offset); | ||
| 404 | I915_WRITE(PRB0_CTL, | ||
| 405 | ((obj->size - 4096) & RING_NR_PAGES) | | ||
| 406 | RING_NO_REPORT | | ||
| 407 | RING_VALID); | ||
| 408 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 409 | i915_kernel_lost_context(dev); | ||
| 410 | else { | ||
| 411 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
| 412 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | ||
| 413 | ring->space = ring->head - (ring->tail + 8); | ||
| 414 | if (ring->space < 0) | ||
| 415 | ring->space += ring->Size; | ||
| 416 | } | ||
| 417 | |||
| 418 | mutex_unlock(&dev->struct_mutex); | 396 | mutex_unlock(&dev->struct_mutex); |
| 419 | drm_irq_uninstall(dev); | 397 | drm_irq_uninstall(dev); |
| 420 | drm_irq_install(dev); | 398 | drm_irq_install(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7f797ef1ab39..2e1744d37ad5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | #include "i915_reg.h" | 33 | #include "i915_reg.h" |
| 34 | #include "intel_bios.h" | 34 | #include "intel_bios.h" |
| 35 | #include "intel_ringbuffer.h" | ||
| 35 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
| 36 | 37 | ||
| 37 | /* General customization: | 38 | /* General customization: |
| @@ -55,6 +56,8 @@ enum plane { | |||
| 55 | 56 | ||
| 56 | #define I915_NUM_PIPE 2 | 57 | #define I915_NUM_PIPE 2 |
| 57 | 58 | ||
| 59 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | ||
| 60 | |||
| 58 | /* Interface history: | 61 | /* Interface history: |
| 59 | * | 62 | * |
| 60 | * 1.1: Original. | 63 | * 1.1: Original. |
| @@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object { | |||
| 89 | struct drm_gem_object *cur_obj; | 92 | struct drm_gem_object *cur_obj; |
| 90 | }; | 93 | }; |
| 91 | 94 | ||
| 92 | typedef struct _drm_i915_ring_buffer { | ||
| 93 | unsigned long Size; | ||
| 94 | u8 *virtual_start; | ||
| 95 | int head; | ||
| 96 | int tail; | ||
| 97 | int space; | ||
| 98 | drm_local_map_t map; | ||
| 99 | struct drm_gem_object *ring_obj; | ||
| 100 | } drm_i915_ring_buffer_t; | ||
| 101 | |||
| 102 | struct mem_block { | 95 | struct mem_block { |
| 103 | struct mem_block *next; | 96 | struct mem_block *next; |
| 104 | struct mem_block *prev; | 97 | struct mem_block *prev; |
| @@ -222,6 +215,7 @@ enum no_fbc_reason { | |||
| 222 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ | 215 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
| 223 | FBC_BAD_PLANE, /* fbc not supported on plane */ | 216 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
| 224 | FBC_NOT_TILED, /* buffer not tiled */ | 217 | FBC_NOT_TILED, /* buffer not tiled */ |
| 218 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ | ||
| 225 | }; | 219 | }; |
| 226 | 220 | ||
| 227 | enum intel_pch { | 221 | enum intel_pch { |
| @@ -229,6 +223,8 @@ enum intel_pch { | |||
| 229 | PCH_CPT, /* Cougarpoint PCH */ | 223 | PCH_CPT, /* Cougarpoint PCH */ |
| 230 | }; | 224 | }; |
| 231 | 225 | ||
| 226 | #define QUIRK_PIPEA_FORCE (1<<0) | ||
| 227 | |||
| 232 | struct intel_fbdev; | 228 | struct intel_fbdev; |
| 233 | 229 | ||
| 234 | typedef struct drm_i915_private { | 230 | typedef struct drm_i915_private { |
| @@ -241,17 +237,15 @@ typedef struct drm_i915_private { | |||
| 241 | void __iomem *regs; | 237 | void __iomem *regs; |
| 242 | 238 | ||
| 243 | struct pci_dev *bridge_dev; | 239 | struct pci_dev *bridge_dev; |
| 244 | drm_i915_ring_buffer_t ring; | 240 | struct intel_ring_buffer render_ring; |
| 241 | struct intel_ring_buffer bsd_ring; | ||
| 245 | 242 | ||
| 246 | drm_dma_handle_t *status_page_dmah; | 243 | drm_dma_handle_t *status_page_dmah; |
| 247 | void *hw_status_page; | ||
| 248 | void *seqno_page; | 244 | void *seqno_page; |
| 249 | dma_addr_t dma_status_page; | 245 | dma_addr_t dma_status_page; |
| 250 | uint32_t counter; | 246 | uint32_t counter; |
| 251 | unsigned int status_gfx_addr; | ||
| 252 | unsigned int seqno_gfx_addr; | 247 | unsigned int seqno_gfx_addr; |
| 253 | drm_local_map_t hws_map; | 248 | drm_local_map_t hws_map; |
| 254 | struct drm_gem_object *hws_obj; | ||
| 255 | struct drm_gem_object *seqno_obj; | 249 | struct drm_gem_object *seqno_obj; |
| 256 | struct drm_gem_object *pwrctx; | 250 | struct drm_gem_object *pwrctx; |
| 257 | 251 | ||
| @@ -267,8 +261,6 @@ typedef struct drm_i915_private { | |||
| 267 | atomic_t irq_received; | 261 | atomic_t irq_received; |
| 268 | /** Protects user_irq_refcount and irq_mask_reg */ | 262 | /** Protects user_irq_refcount and irq_mask_reg */ |
| 269 | spinlock_t user_irq_lock; | 263 | spinlock_t user_irq_lock; |
| 270 | /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ | ||
| 271 | int user_irq_refcount; | ||
| 272 | u32 trace_irq_seqno; | 264 | u32 trace_irq_seqno; |
| 273 | /** Cached value of IMR to avoid reads in updating the bitfield */ | 265 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
| 274 | u32 irq_mask_reg; | 266 | u32 irq_mask_reg; |
| @@ -289,6 +281,7 @@ typedef struct drm_i915_private { | |||
| 289 | struct mem_block *agp_heap; | 281 | struct mem_block *agp_heap; |
| 290 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 282 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
| 291 | int vblank_pipe; | 283 | int vblank_pipe; |
| 284 | int num_pipe; | ||
| 292 | 285 | ||
| 293 | /* For hangcheck timer */ | 286 | /* For hangcheck timer */ |
| 294 | #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ | 287 | #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ |
| @@ -334,7 +327,7 @@ typedef struct drm_i915_private { | |||
| 334 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 327 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
| 335 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | 328 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
| 336 | 329 | ||
| 337 | unsigned int fsb_freq, mem_freq; | 330 | unsigned int fsb_freq, mem_freq, is_ddr3; |
| 338 | 331 | ||
| 339 | spinlock_t error_lock; | 332 | spinlock_t error_lock; |
| 340 | struct drm_i915_error_state *first_error; | 333 | struct drm_i915_error_state *first_error; |
| @@ -347,6 +340,8 @@ typedef struct drm_i915_private { | |||
| 347 | /* PCH chipset type */ | 340 | /* PCH chipset type */ |
| 348 | enum intel_pch pch_type; | 341 | enum intel_pch pch_type; |
| 349 | 342 | ||
| 343 | unsigned long quirks; | ||
| 344 | |||
| 350 | /* Register state */ | 345 | /* Register state */ |
| 351 | bool modeset_on_lid; | 346 | bool modeset_on_lid; |
| 352 | u8 saveLBB; | 347 | u8 saveLBB; |
| @@ -514,18 +509,7 @@ typedef struct drm_i915_private { | |||
| 514 | */ | 509 | */ |
| 515 | struct list_head shrink_list; | 510 | struct list_head shrink_list; |
| 516 | 511 | ||
| 517 | /** | ||
| 518 | * List of objects currently involved in rendering from the | ||
| 519 | * ringbuffer. | ||
| 520 | * | ||
| 521 | * Includes buffers having the contents of their GPU caches | ||
| 522 | * flushed, not necessarily primitives. last_rendering_seqno | ||
| 523 | * represents when the rendering involved will be completed. | ||
| 524 | * | ||
| 525 | * A reference is held on the buffer while on this list. | ||
| 526 | */ | ||
| 527 | spinlock_t active_list_lock; | 512 | spinlock_t active_list_lock; |
| 528 | struct list_head active_list; | ||
| 529 | 513 | ||
| 530 | /** | 514 | /** |
| 531 | * List of objects which are not in the ringbuffer but which | 515 | * List of objects which are not in the ringbuffer but which |
| @@ -563,12 +547,6 @@ typedef struct drm_i915_private { | |||
| 563 | struct list_head fence_list; | 547 | struct list_head fence_list; |
| 564 | 548 | ||
| 565 | /** | 549 | /** |
| 566 | * List of breadcrumbs associated with GPU requests currently | ||
| 567 | * outstanding. | ||
| 568 | */ | ||
| 569 | struct list_head request_list; | ||
| 570 | |||
| 571 | /** | ||
| 572 | * We leave the user IRQ off as much as possible, | 550 | * We leave the user IRQ off as much as possible, |
| 573 | * but this means that requests will finish and never | 551 | * but this means that requests will finish and never |
| 574 | * be retired once the system goes idle. Set a timer to | 552 | * be retired once the system goes idle. Set a timer to |
| @@ -623,6 +601,7 @@ typedef struct drm_i915_private { | |||
| 623 | struct drm_crtc *plane_to_crtc_mapping[2]; | 601 | struct drm_crtc *plane_to_crtc_mapping[2]; |
| 624 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 602 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
| 625 | wait_queue_head_t pending_flip_queue; | 603 | wait_queue_head_t pending_flip_queue; |
| 604 | bool flip_pending_is_done; | ||
| 626 | 605 | ||
| 627 | /* Reclocking support */ | 606 | /* Reclocking support */ |
| 628 | bool render_reclock_avail; | 607 | bool render_reclock_avail; |
| @@ -644,6 +623,18 @@ typedef struct drm_i915_private { | |||
| 644 | u8 cur_delay; | 623 | u8 cur_delay; |
| 645 | u8 min_delay; | 624 | u8 min_delay; |
| 646 | u8 max_delay; | 625 | u8 max_delay; |
| 626 | u8 fmax; | ||
| 627 | u8 fstart; | ||
| 628 | |||
| 629 | u64 last_count1; | ||
| 630 | unsigned long last_time1; | ||
| 631 | u64 last_count2; | ||
| 632 | struct timespec last_time2; | ||
| 633 | unsigned long gfx_power; | ||
| 634 | int c_m; | ||
| 635 | int r_t; | ||
| 636 | u8 corr; | ||
| 637 | spinlock_t *mchdev_lock; | ||
| 647 | 638 | ||
| 648 | enum no_fbc_reason no_fbc_reason; | 639 | enum no_fbc_reason no_fbc_reason; |
| 649 | 640 | ||
| @@ -671,19 +662,64 @@ struct drm_i915_gem_object { | |||
| 671 | * (has pending rendering), and is not set if it's on inactive (ready | 662 | * (has pending rendering), and is not set if it's on inactive (ready |
| 672 | * to be unbound). | 663 | * to be unbound). |
| 673 | */ | 664 | */ |
| 674 | int active; | 665 | unsigned int active : 1; |
| 675 | 666 | ||
| 676 | /** | 667 | /** |
| 677 | * This is set if the object has been written to since last bound | 668 | * This is set if the object has been written to since last bound |
| 678 | * to the GTT | 669 | * to the GTT |
| 679 | */ | 670 | */ |
| 680 | int dirty; | 671 | unsigned int dirty : 1; |
| 672 | |||
| 673 | /** | ||
| 674 | * Fence register bits (if any) for this object. Will be set | ||
| 675 | * as needed when mapped into the GTT. | ||
| 676 | * Protected by dev->struct_mutex. | ||
| 677 | * | ||
| 678 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) | ||
| 679 | */ | ||
| 680 | int fence_reg : 5; | ||
| 681 | |||
| 682 | /** | ||
| 683 | * Used for checking the object doesn't appear more than once | ||
| 684 | * in an execbuffer object list. | ||
| 685 | */ | ||
| 686 | unsigned int in_execbuffer : 1; | ||
| 687 | |||
| 688 | /** | ||
| 689 | * Advice: are the backing pages purgeable? | ||
| 690 | */ | ||
| 691 | unsigned int madv : 2; | ||
| 692 | |||
| 693 | /** | ||
| 694 | * Refcount for the pages array. With the current locking scheme, there | ||
| 695 | * are at most two concurrent users: Binding a bo to the gtt and | ||
| 696 | * pwrite/pread using physical addresses. So two bits for a maximum | ||
| 697 | * of two users are enough. | ||
| 698 | */ | ||
| 699 | unsigned int pages_refcount : 2; | ||
| 700 | #define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 | ||
| 701 | |||
| 702 | /** | ||
| 703 | * Current tiling mode for the object. | ||
| 704 | */ | ||
| 705 | unsigned int tiling_mode : 2; | ||
| 706 | |||
| 707 | /** How many users have pinned this object in GTT space. The following | ||
| 708 | * users can each hold at most one reference: pwrite/pread, pin_ioctl | ||
| 709 | * (via user_pin_count), execbuffer (objects are not allowed multiple | ||
| 710 | * times for the same batchbuffer), and the framebuffer code. When | ||
| 711 | * switching/pageflipping, the framebuffer code has at most two buffers | ||
| 712 | * pinned per crtc. | ||
| 713 | * | ||
| 714 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 | ||
| 715 | * bits with absolutely no headroom. So use 4 bits. */ | ||
| 716 | int pin_count : 4; | ||
| 717 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | ||
| 681 | 718 | ||
| 682 | /** AGP memory structure for our GTT binding. */ | 719 | /** AGP memory structure for our GTT binding. */ |
| 683 | DRM_AGP_MEM *agp_mem; | 720 | DRM_AGP_MEM *agp_mem; |
| 684 | 721 | ||
| 685 | struct page **pages; | 722 | struct page **pages; |
| 686 | int pages_refcount; | ||
| 687 | 723 | ||
| 688 | /** | 724 | /** |
| 689 | * Current offset of the object in GTT space. | 725 | * Current offset of the object in GTT space. |
| @@ -692,26 +728,18 @@ struct drm_i915_gem_object { | |||
| 692 | */ | 728 | */ |
| 693 | uint32_t gtt_offset; | 729 | uint32_t gtt_offset; |
| 694 | 730 | ||
| 731 | /* Which ring is refering to is this object */ | ||
| 732 | struct intel_ring_buffer *ring; | ||
| 733 | |||
| 695 | /** | 734 | /** |
| 696 | * Fake offset for use by mmap(2) | 735 | * Fake offset for use by mmap(2) |
| 697 | */ | 736 | */ |
| 698 | uint64_t mmap_offset; | 737 | uint64_t mmap_offset; |
| 699 | 738 | ||
| 700 | /** | ||
| 701 | * Fence register bits (if any) for this object. Will be set | ||
| 702 | * as needed when mapped into the GTT. | ||
| 703 | * Protected by dev->struct_mutex. | ||
| 704 | */ | ||
| 705 | int fence_reg; | ||
| 706 | |||
| 707 | /** How many users have pinned this object in GTT space */ | ||
| 708 | int pin_count; | ||
| 709 | |||
| 710 | /** Breadcrumb of last rendering to the buffer. */ | 739 | /** Breadcrumb of last rendering to the buffer. */ |
| 711 | uint32_t last_rendering_seqno; | 740 | uint32_t last_rendering_seqno; |
| 712 | 741 | ||
| 713 | /** Current tiling mode for the object. */ | 742 | /** Current tiling stride for the object, if it's tiled. */ |
| 714 | uint32_t tiling_mode; | ||
| 715 | uint32_t stride; | 743 | uint32_t stride; |
| 716 | 744 | ||
| 717 | /** Record of address bit 17 of each page at last unbind. */ | 745 | /** Record of address bit 17 of each page at last unbind. */ |
| @@ -734,17 +762,6 @@ struct drm_i915_gem_object { | |||
| 734 | struct drm_i915_gem_phys_object *phys_obj; | 762 | struct drm_i915_gem_phys_object *phys_obj; |
| 735 | 763 | ||
| 736 | /** | 764 | /** |
| 737 | * Used for checking the object doesn't appear more than once | ||
| 738 | * in an execbuffer object list. | ||
| 739 | */ | ||
| 740 | int in_execbuffer; | ||
| 741 | |||
| 742 | /** | ||
| 743 | * Advice: are the backing pages purgeable? | ||
| 744 | */ | ||
| 745 | int madv; | ||
| 746 | |||
| 747 | /** | ||
| 748 | * Number of crtcs where this object is currently the fb, but | 765 | * Number of crtcs where this object is currently the fb, but |
| 749 | * will be page flipped away on the next vblank. When it | 766 | * will be page flipped away on the next vblank. When it |
| 750 | * reaches 0, dev_priv->pending_flip_queue will be woken up. | 767 | * reaches 0, dev_priv->pending_flip_queue will be woken up. |
| @@ -765,6 +782,9 @@ struct drm_i915_gem_object { | |||
| 765 | * an emission time with seqnos for tracking how far ahead of the GPU we are. | 782 | * an emission time with seqnos for tracking how far ahead of the GPU we are. |
| 766 | */ | 783 | */ |
| 767 | struct drm_i915_gem_request { | 784 | struct drm_i915_gem_request { |
| 785 | /** On Which ring this request was generated */ | ||
| 786 | struct intel_ring_buffer *ring; | ||
| 787 | |||
| 768 | /** GEM sequence number associated with this request. */ | 788 | /** GEM sequence number associated with this request. */ |
| 769 | uint32_t seqno; | 789 | uint32_t seqno; |
| 770 | 790 | ||
| @@ -821,6 +841,11 @@ extern int i915_emit_box(struct drm_device *dev, | |||
| 821 | struct drm_clip_rect *boxes, | 841 | struct drm_clip_rect *boxes, |
| 822 | int i, int DR1, int DR4); | 842 | int i, int DR1, int DR4); |
| 823 | extern int i965_reset(struct drm_device *dev, u8 flags); | 843 | extern int i965_reset(struct drm_device *dev, u8 flags); |
| 844 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); | ||
| 845 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | ||
| 846 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); | ||
| 847 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | ||
| 848 | |||
| 824 | 849 | ||
| 825 | /* i915_irq.c */ | 850 | /* i915_irq.c */ |
| 826 | void i915_hangcheck_elapsed(unsigned long data); | 851 | void i915_hangcheck_elapsed(unsigned long data); |
| @@ -829,9 +854,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, | |||
| 829 | struct drm_file *file_priv); | 854 | struct drm_file *file_priv); |
| 830 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 855 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
| 831 | struct drm_file *file_priv); | 856 | struct drm_file *file_priv); |
| 832 | void i915_user_irq_get(struct drm_device *dev); | ||
| 833 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno); | 857 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno); |
| 834 | void i915_user_irq_put(struct drm_device *dev); | ||
| 835 | extern void i915_enable_interrupt (struct drm_device *dev); | 858 | extern void i915_enable_interrupt (struct drm_device *dev); |
| 836 | 859 | ||
| 837 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | 860 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); |
| @@ -849,6 +872,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); | |||
| 849 | extern int i915_vblank_swap(struct drm_device *dev, void *data, | 872 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
| 850 | struct drm_file *file_priv); | 873 | struct drm_file *file_priv); |
| 851 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); | 874 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); |
| 875 | extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
| 876 | extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, | ||
| 877 | u32 mask); | ||
| 878 | extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, | ||
| 879 | u32 mask); | ||
| 852 | 880 | ||
| 853 | void | 881 | void |
| 854 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 882 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
| @@ -922,11 +950,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj); | |||
| 922 | int i915_gem_object_unbind(struct drm_gem_object *obj); | 950 | int i915_gem_object_unbind(struct drm_gem_object *obj); |
| 923 | void i915_gem_release_mmap(struct drm_gem_object *obj); | 951 | void i915_gem_release_mmap(struct drm_gem_object *obj); |
| 924 | void i915_gem_lastclose(struct drm_device *dev); | 952 | void i915_gem_lastclose(struct drm_device *dev); |
| 925 | uint32_t i915_get_gem_seqno(struct drm_device *dev); | 953 | uint32_t i915_get_gem_seqno(struct drm_device *dev, |
| 954 | struct intel_ring_buffer *ring); | ||
| 926 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); | 955 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); |
| 927 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 956 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); |
| 928 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); | 957 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); |
| 929 | void i915_gem_retire_requests(struct drm_device *dev); | 958 | void i915_gem_retire_requests(struct drm_device *dev, |
| 959 | struct intel_ring_buffer *ring); | ||
| 930 | void i915_gem_retire_work_handler(struct work_struct *work); | 960 | void i915_gem_retire_work_handler(struct work_struct *work); |
| 931 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 961 | void i915_gem_clflush_object(struct drm_gem_object *obj); |
| 932 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 962 | int i915_gem_object_set_domain(struct drm_gem_object *obj, |
| @@ -937,9 +967,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | |||
| 937 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 967 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
| 938 | unsigned long end); | 968 | unsigned long end); |
| 939 | int i915_gem_idle(struct drm_device *dev); | 969 | int i915_gem_idle(struct drm_device *dev); |
| 940 | uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | 970 | uint32_t i915_add_request(struct drm_device *dev, |
| 941 | uint32_t flush_domains); | 971 | struct drm_file *file_priv, |
| 942 | int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); | 972 | uint32_t flush_domains, |
| 973 | struct intel_ring_buffer *ring); | ||
| 974 | int i915_do_wait_request(struct drm_device *dev, | ||
| 975 | uint32_t seqno, int interruptible, | ||
| 976 | struct intel_ring_buffer *ring); | ||
| 943 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 977 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 944 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 978 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
| 945 | int write); | 979 | int write); |
| @@ -1015,7 +1049,7 @@ extern void g4x_disable_fbc(struct drm_device *dev); | |||
| 1015 | extern void intel_disable_fbc(struct drm_device *dev); | 1049 | extern void intel_disable_fbc(struct drm_device *dev); |
| 1016 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); | 1050 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); |
| 1017 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1051 | extern bool intel_fbc_enabled(struct drm_device *dev); |
| 1018 | 1052 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | |
| 1019 | extern void intel_detect_pch (struct drm_device *dev); | 1053 | extern void intel_detect_pch (struct drm_device *dev); |
| 1020 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | 1054 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); |
| 1021 | 1055 | ||
| @@ -1026,7 +1060,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
| 1026 | * has access to the ring. | 1060 | * has access to the ring. |
| 1027 | */ | 1061 | */ |
| 1028 | #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ | 1062 | #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ |
| 1029 | if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ | 1063 | if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ |
| 1064 | == NULL) \ | ||
| 1030 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ | 1065 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ |
| 1031 | } while (0) | 1066 | } while (0) |
| 1032 | 1067 | ||
| @@ -1039,35 +1074,31 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
| 1039 | #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) | 1074 | #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) |
| 1040 | #define I915_READ64(reg) readq(dev_priv->regs + (reg)) | 1075 | #define I915_READ64(reg) readq(dev_priv->regs + (reg)) |
| 1041 | #define POSTING_READ(reg) (void)I915_READ(reg) | 1076 | #define POSTING_READ(reg) (void)I915_READ(reg) |
| 1077 | #define POSTING_READ16(reg) (void)I915_READ16(reg) | ||
| 1042 | 1078 | ||
| 1043 | #define I915_VERBOSE 0 | 1079 | #define I915_VERBOSE 0 |
| 1044 | 1080 | ||
| 1045 | #define RING_LOCALS volatile unsigned int *ring_virt__; | 1081 | #define BEGIN_LP_RING(n) do { \ |
| 1046 | 1082 | drm_i915_private_t *dev_priv = dev->dev_private; \ | |
| 1047 | #define BEGIN_LP_RING(n) do { \ | 1083 | if (I915_VERBOSE) \ |
| 1048 | int bytes__ = 4*(n); \ | 1084 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ |
| 1049 | if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ | 1085 | intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ |
| 1050 | /* a wrap must occur between instructions so pad beforehand */ \ | ||
| 1051 | if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \ | ||
| 1052 | i915_wrap_ring(dev); \ | ||
| 1053 | if (unlikely (dev_priv->ring.space < bytes__)) \ | ||
| 1054 | i915_wait_ring(dev, bytes__, __func__); \ | ||
| 1055 | ring_virt__ = (unsigned int *) \ | ||
| 1056 | (dev_priv->ring.virtual_start + dev_priv->ring.tail); \ | ||
| 1057 | dev_priv->ring.tail += bytes__; \ | ||
| 1058 | dev_priv->ring.tail &= dev_priv->ring.Size - 1; \ | ||
| 1059 | dev_priv->ring.space -= bytes__; \ | ||
| 1060 | } while (0) | 1086 | } while (0) |
| 1061 | 1087 | ||
| 1062 | #define OUT_RING(n) do { \ | 1088 | |
| 1063 | if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ | 1089 | #define OUT_RING(x) do { \ |
| 1064 | *ring_virt__++ = (n); \ | 1090 | drm_i915_private_t *dev_priv = dev->dev_private; \ |
| 1091 | if (I915_VERBOSE) \ | ||
| 1092 | DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ | ||
| 1093 | intel_ring_emit(dev, &dev_priv->render_ring, x); \ | ||
| 1065 | } while (0) | 1094 | } while (0) |
| 1066 | 1095 | ||
| 1067 | #define ADVANCE_LP_RING() do { \ | 1096 | #define ADVANCE_LP_RING() do { \ |
| 1097 | drm_i915_private_t *dev_priv = dev->dev_private; \ | ||
| 1068 | if (I915_VERBOSE) \ | 1098 | if (I915_VERBOSE) \ |
| 1069 | DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \ | 1099 | DRM_DEBUG("ADVANCE_LP_RING %x\n", \ |
| 1070 | I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \ | 1100 | dev_priv->render_ring.tail); \ |
| 1101 | intel_ring_advance(dev, &dev_priv->render_ring); \ | ||
| 1071 | } while(0) | 1102 | } while(0) |
| 1072 | 1103 | ||
| 1073 | /** | 1104 | /** |
| @@ -1085,14 +1116,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
| 1085 | * | 1116 | * |
| 1086 | * The area from dword 0x20 to 0x3ff is available for driver usage. | 1117 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
| 1087 | */ | 1118 | */ |
| 1088 | #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) | 1119 | #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ |
| 1120 | (dev_priv->render_ring.status_page.page_addr))[reg]) | ||
| 1089 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | 1121 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
| 1090 | #define I915_GEM_HWS_INDEX 0x20 | 1122 | #define I915_GEM_HWS_INDEX 0x20 |
| 1091 | #define I915_BREADCRUMB_INDEX 0x21 | 1123 | #define I915_BREADCRUMB_INDEX 0x21 |
| 1092 | 1124 | ||
| 1093 | extern int i915_wrap_ring(struct drm_device * dev); | ||
| 1094 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | ||
| 1095 | |||
| 1096 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) | 1125 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
| 1097 | 1126 | ||
| 1098 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1127 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
| @@ -1138,6 +1167,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
| 1138 | (dev)->pci_device == 0x2A42 || \ | 1167 | (dev)->pci_device == 0x2A42 || \ |
| 1139 | (dev)->pci_device == 0x2E42) | 1168 | (dev)->pci_device == 0x2E42) |
| 1140 | 1169 | ||
| 1170 | #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) | ||
| 1141 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1171 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
| 1142 | 1172 | ||
| 1143 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1173 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 112699f71fa4..5aa747fc25a9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -35,8 +35,6 @@ | |||
| 35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
| 36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
| 37 | 37 | ||
| 38 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | ||
| 39 | |||
| 40 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 38 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
| 41 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
| 42 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
| @@ -169,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | |||
| 169 | obj_priv->tiling_mode != I915_TILING_NONE; | 167 | obj_priv->tiling_mode != I915_TILING_NONE; |
| 170 | } | 168 | } |
| 171 | 169 | ||
| 172 | static inline int | 170 | static inline void |
| 173 | slow_shmem_copy(struct page *dst_page, | 171 | slow_shmem_copy(struct page *dst_page, |
| 174 | int dst_offset, | 172 | int dst_offset, |
| 175 | struct page *src_page, | 173 | struct page *src_page, |
| @@ -178,25 +176,16 @@ slow_shmem_copy(struct page *dst_page, | |||
| 178 | { | 176 | { |
| 179 | char *dst_vaddr, *src_vaddr; | 177 | char *dst_vaddr, *src_vaddr; |
| 180 | 178 | ||
| 181 | dst_vaddr = kmap_atomic(dst_page, KM_USER0); | 179 | dst_vaddr = kmap(dst_page); |
| 182 | if (dst_vaddr == NULL) | 180 | src_vaddr = kmap(src_page); |
| 183 | return -ENOMEM; | ||
| 184 | |||
| 185 | src_vaddr = kmap_atomic(src_page, KM_USER1); | ||
| 186 | if (src_vaddr == NULL) { | ||
| 187 | kunmap_atomic(dst_vaddr, KM_USER0); | ||
| 188 | return -ENOMEM; | ||
| 189 | } | ||
| 190 | 181 | ||
| 191 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | 182 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); |
| 192 | 183 | ||
| 193 | kunmap_atomic(src_vaddr, KM_USER1); | 184 | kunmap(src_page); |
| 194 | kunmap_atomic(dst_vaddr, KM_USER0); | 185 | kunmap(dst_page); |
| 195 | |||
| 196 | return 0; | ||
| 197 | } | 186 | } |
| 198 | 187 | ||
| 199 | static inline int | 188 | static inline void |
| 200 | slow_shmem_bit17_copy(struct page *gpu_page, | 189 | slow_shmem_bit17_copy(struct page *gpu_page, |
| 201 | int gpu_offset, | 190 | int gpu_offset, |
| 202 | struct page *cpu_page, | 191 | struct page *cpu_page, |
| @@ -216,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
| 216 | cpu_page, cpu_offset, length); | 205 | cpu_page, cpu_offset, length); |
| 217 | } | 206 | } |
| 218 | 207 | ||
| 219 | gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); | 208 | gpu_vaddr = kmap(gpu_page); |
| 220 | if (gpu_vaddr == NULL) | 209 | cpu_vaddr = kmap(cpu_page); |
| 221 | return -ENOMEM; | ||
| 222 | |||
| 223 | cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); | ||
| 224 | if (cpu_vaddr == NULL) { | ||
| 225 | kunmap_atomic(gpu_vaddr, KM_USER0); | ||
| 226 | return -ENOMEM; | ||
| 227 | } | ||
| 228 | 210 | ||
| 229 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's | 211 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's |
| 230 | * XORing with the other bits (A9 for Y, A9 and A10 for X) | 212 | * XORing with the other bits (A9 for Y, A9 and A10 for X) |
| @@ -248,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
| 248 | length -= this_length; | 230 | length -= this_length; |
| 249 | } | 231 | } |
| 250 | 232 | ||
| 251 | kunmap_atomic(cpu_vaddr, KM_USER1); | 233 | kunmap(cpu_page); |
| 252 | kunmap_atomic(gpu_vaddr, KM_USER0); | 234 | kunmap(gpu_page); |
| 253 | |||
| 254 | return 0; | ||
| 255 | } | 235 | } |
| 256 | 236 | ||
| 257 | /** | 237 | /** |
| @@ -427,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 427 | page_length = PAGE_SIZE - data_page_offset; | 407 | page_length = PAGE_SIZE - data_page_offset; |
| 428 | 408 | ||
| 429 | if (do_bit17_swizzling) { | 409 | if (do_bit17_swizzling) { |
| 430 | ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 410 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], |
| 431 | shmem_page_offset, | ||
| 432 | user_pages[data_page_index], | ||
| 433 | data_page_offset, | ||
| 434 | page_length, | ||
| 435 | 1); | ||
| 436 | } else { | ||
| 437 | ret = slow_shmem_copy(user_pages[data_page_index], | ||
| 438 | data_page_offset, | ||
| 439 | obj_priv->pages[shmem_page_index], | ||
| 440 | shmem_page_offset, | 411 | shmem_page_offset, |
| 441 | page_length); | 412 | user_pages[data_page_index], |
| 413 | data_page_offset, | ||
| 414 | page_length, | ||
| 415 | 1); | ||
| 416 | } else { | ||
| 417 | slow_shmem_copy(user_pages[data_page_index], | ||
| 418 | data_page_offset, | ||
| 419 | obj_priv->pages[shmem_page_index], | ||
| 420 | shmem_page_offset, | ||
| 421 | page_length); | ||
| 442 | } | 422 | } |
| 443 | if (ret) | ||
| 444 | goto fail_put_pages; | ||
| 445 | 423 | ||
| 446 | remain -= page_length; | 424 | remain -= page_length; |
| 447 | data_ptr += page_length; | 425 | data_ptr += page_length; |
| @@ -531,25 +509,24 @@ fast_user_write(struct io_mapping *mapping, | |||
| 531 | * page faults | 509 | * page faults |
| 532 | */ | 510 | */ |
| 533 | 511 | ||
| 534 | static inline int | 512 | static inline void |
| 535 | slow_kernel_write(struct io_mapping *mapping, | 513 | slow_kernel_write(struct io_mapping *mapping, |
| 536 | loff_t gtt_base, int gtt_offset, | 514 | loff_t gtt_base, int gtt_offset, |
| 537 | struct page *user_page, int user_offset, | 515 | struct page *user_page, int user_offset, |
| 538 | int length) | 516 | int length) |
| 539 | { | 517 | { |
| 540 | char *src_vaddr, *dst_vaddr; | 518 | char __iomem *dst_vaddr; |
| 541 | unsigned long unwritten; | 519 | char *src_vaddr; |
| 542 | 520 | ||
| 543 | dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); | 521 | dst_vaddr = io_mapping_map_wc(mapping, gtt_base); |
| 544 | src_vaddr = kmap_atomic(user_page, KM_USER1); | 522 | src_vaddr = kmap(user_page); |
| 545 | unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, | 523 | |
| 546 | src_vaddr + user_offset, | 524 | memcpy_toio(dst_vaddr + gtt_offset, |
| 547 | length); | 525 | src_vaddr + user_offset, |
| 548 | kunmap_atomic(src_vaddr, KM_USER1); | 526 | length); |
| 549 | io_mapping_unmap_atomic(dst_vaddr); | 527 | |
| 550 | if (unwritten) | 528 | kunmap(user_page); |
| 551 | return -EFAULT; | 529 | io_mapping_unmap(dst_vaddr); |
| 552 | return 0; | ||
| 553 | } | 530 | } |
| 554 | 531 | ||
| 555 | static inline int | 532 | static inline int |
| @@ -722,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 722 | if ((data_page_offset + page_length) > PAGE_SIZE) | 699 | if ((data_page_offset + page_length) > PAGE_SIZE) |
| 723 | page_length = PAGE_SIZE - data_page_offset; | 700 | page_length = PAGE_SIZE - data_page_offset; |
| 724 | 701 | ||
| 725 | ret = slow_kernel_write(dev_priv->mm.gtt_mapping, | 702 | slow_kernel_write(dev_priv->mm.gtt_mapping, |
| 726 | gtt_page_base, gtt_page_offset, | 703 | gtt_page_base, gtt_page_offset, |
| 727 | user_pages[data_page_index], | 704 | user_pages[data_page_index], |
| 728 | data_page_offset, | 705 | data_page_offset, |
| 729 | page_length); | 706 | page_length); |
| 730 | |||
| 731 | /* If we get a fault while copying data, then (presumably) our | ||
| 732 | * source page isn't available. Return the error and we'll | ||
| 733 | * retry in the slow path. | ||
| 734 | */ | ||
| 735 | if (ret) | ||
| 736 | goto out_unpin_object; | ||
| 737 | 707 | ||
| 738 | remain -= page_length; | 708 | remain -= page_length; |
| 739 | offset += page_length; | 709 | offset += page_length; |
| @@ -902,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 902 | page_length = PAGE_SIZE - data_page_offset; | 872 | page_length = PAGE_SIZE - data_page_offset; |
| 903 | 873 | ||
| 904 | if (do_bit17_swizzling) { | 874 | if (do_bit17_swizzling) { |
| 905 | ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 875 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], |
| 906 | shmem_page_offset, | ||
| 907 | user_pages[data_page_index], | ||
| 908 | data_page_offset, | ||
| 909 | page_length, | ||
| 910 | 0); | ||
| 911 | } else { | ||
| 912 | ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | ||
| 913 | shmem_page_offset, | 876 | shmem_page_offset, |
| 914 | user_pages[data_page_index], | 877 | user_pages[data_page_index], |
| 915 | data_page_offset, | 878 | data_page_offset, |
| 916 | page_length); | 879 | page_length, |
| 880 | 0); | ||
| 881 | } else { | ||
| 882 | slow_shmem_copy(obj_priv->pages[shmem_page_index], | ||
| 883 | shmem_page_offset, | ||
| 884 | user_pages[data_page_index], | ||
| 885 | data_page_offset, | ||
| 886 | page_length); | ||
| 917 | } | 887 | } |
| 918 | if (ret) | ||
| 919 | goto fail_put_pages; | ||
| 920 | 888 | ||
| 921 | remain -= page_length; | 889 | remain -= page_length; |
| 922 | data_ptr += page_length; | 890 | data_ptr += page_length; |
| @@ -973,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 973 | if (obj_priv->phys_obj) | 941 | if (obj_priv->phys_obj) |
| 974 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); | 942 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); |
| 975 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 943 | else if (obj_priv->tiling_mode == I915_TILING_NONE && |
| 976 | dev->gtt_total != 0) { | 944 | dev->gtt_total != 0 && |
| 945 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | ||
| 977 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); | 946 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); |
| 978 | if (ret == -EFAULT) { | 947 | if (ret == -EFAULT) { |
| 979 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, | 948 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, |
| @@ -1484,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
| 1484 | } | 1453 | } |
| 1485 | 1454 | ||
| 1486 | static void | 1455 | static void |
| 1487 | i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) | 1456 | i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, |
| 1457 | struct intel_ring_buffer *ring) | ||
| 1488 | { | 1458 | { |
| 1489 | struct drm_device *dev = obj->dev; | 1459 | struct drm_device *dev = obj->dev; |
| 1490 | drm_i915_private_t *dev_priv = dev->dev_private; | 1460 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1491 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1461 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1462 | BUG_ON(ring == NULL); | ||
| 1463 | obj_priv->ring = ring; | ||
| 1492 | 1464 | ||
| 1493 | /* Add a reference if we're newly entering the active list. */ | 1465 | /* Add a reference if we're newly entering the active list. */ |
| 1494 | if (!obj_priv->active) { | 1466 | if (!obj_priv->active) { |
| @@ -1497,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) | |||
| 1497 | } | 1469 | } |
| 1498 | /* Move from whatever list we were on to the tail of execution. */ | 1470 | /* Move from whatever list we were on to the tail of execution. */ |
| 1499 | spin_lock(&dev_priv->mm.active_list_lock); | 1471 | spin_lock(&dev_priv->mm.active_list_lock); |
| 1500 | list_move_tail(&obj_priv->list, | 1472 | list_move_tail(&obj_priv->list, &ring->active_list); |
| 1501 | &dev_priv->mm.active_list); | ||
| 1502 | spin_unlock(&dev_priv->mm.active_list_lock); | 1473 | spin_unlock(&dev_priv->mm.active_list_lock); |
| 1503 | obj_priv->last_rendering_seqno = seqno; | 1474 | obj_priv->last_rendering_seqno = seqno; |
| 1504 | } | 1475 | } |
| @@ -1551,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
| 1551 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | 1522 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); |
| 1552 | 1523 | ||
| 1553 | obj_priv->last_rendering_seqno = 0; | 1524 | obj_priv->last_rendering_seqno = 0; |
| 1525 | obj_priv->ring = NULL; | ||
| 1554 | if (obj_priv->active) { | 1526 | if (obj_priv->active) { |
| 1555 | obj_priv->active = 0; | 1527 | obj_priv->active = 0; |
| 1556 | drm_gem_object_unreference(obj); | 1528 | drm_gem_object_unreference(obj); |
| @@ -1560,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
| 1560 | 1532 | ||
| 1561 | static void | 1533 | static void |
| 1562 | i915_gem_process_flushing_list(struct drm_device *dev, | 1534 | i915_gem_process_flushing_list(struct drm_device *dev, |
| 1563 | uint32_t flush_domains, uint32_t seqno) | 1535 | uint32_t flush_domains, uint32_t seqno, |
| 1536 | struct intel_ring_buffer *ring) | ||
| 1564 | { | 1537 | { |
| 1565 | drm_i915_private_t *dev_priv = dev->dev_private; | 1538 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1566 | struct drm_i915_gem_object *obj_priv, *next; | 1539 | struct drm_i915_gem_object *obj_priv, *next; |
| @@ -1571,12 +1544,13 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
| 1571 | struct drm_gem_object *obj = &obj_priv->base; | 1544 | struct drm_gem_object *obj = &obj_priv->base; |
| 1572 | 1545 | ||
| 1573 | if ((obj->write_domain & flush_domains) == | 1546 | if ((obj->write_domain & flush_domains) == |
| 1574 | obj->write_domain) { | 1547 | obj->write_domain && |
| 1548 | obj_priv->ring->ring_flag == ring->ring_flag) { | ||
| 1575 | uint32_t old_write_domain = obj->write_domain; | 1549 | uint32_t old_write_domain = obj->write_domain; |
| 1576 | 1550 | ||
| 1577 | obj->write_domain = 0; | 1551 | obj->write_domain = 0; |
| 1578 | list_del_init(&obj_priv->gpu_write_list); | 1552 | list_del_init(&obj_priv->gpu_write_list); |
| 1579 | i915_gem_object_move_to_active(obj, seqno); | 1553 | i915_gem_object_move_to_active(obj, seqno, ring); |
| 1580 | 1554 | ||
| 1581 | /* update the fence lru list */ | 1555 | /* update the fence lru list */ |
| 1582 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 1556 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { |
| @@ -1593,31 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
| 1593 | } | 1567 | } |
| 1594 | } | 1568 | } |
| 1595 | 1569 | ||
| 1596 | #define PIPE_CONTROL_FLUSH(addr) \ | ||
| 1597 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | ||
| 1598 | PIPE_CONTROL_DEPTH_STALL); \ | ||
| 1599 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | ||
| 1600 | OUT_RING(0); \ | ||
| 1601 | OUT_RING(0); \ | ||
| 1602 | |||
| 1603 | /** | ||
| 1604 | * Creates a new sequence number, emitting a write of it to the status page | ||
| 1605 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | ||
| 1606 | * | ||
| 1607 | * Must be called with struct_lock held. | ||
| 1608 | * | ||
| 1609 | * Returned sequence numbers are nonzero on success. | ||
| 1610 | */ | ||
| 1611 | uint32_t | 1570 | uint32_t |
| 1612 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | 1571 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, |
| 1613 | uint32_t flush_domains) | 1572 | uint32_t flush_domains, struct intel_ring_buffer *ring) |
| 1614 | { | 1573 | { |
| 1615 | drm_i915_private_t *dev_priv = dev->dev_private; | 1574 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1616 | struct drm_i915_file_private *i915_file_priv = NULL; | 1575 | struct drm_i915_file_private *i915_file_priv = NULL; |
| 1617 | struct drm_i915_gem_request *request; | 1576 | struct drm_i915_gem_request *request; |
| 1618 | uint32_t seqno; | 1577 | uint32_t seqno; |
| 1619 | int was_empty; | 1578 | int was_empty; |
| 1620 | RING_LOCALS; | ||
| 1621 | 1579 | ||
| 1622 | if (file_priv != NULL) | 1580 | if (file_priv != NULL) |
| 1623 | i915_file_priv = file_priv->driver_priv; | 1581 | i915_file_priv = file_priv->driver_priv; |
| @@ -1626,62 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
| 1626 | if (request == NULL) | 1584 | if (request == NULL) |
| 1627 | return 0; | 1585 | return 0; |
| 1628 | 1586 | ||
| 1629 | /* Grab the seqno we're going to make this request be, and bump the | 1587 | seqno = ring->add_request(dev, ring, file_priv, flush_domains); |
| 1630 | * next (skipping 0 so it can be the reserved no-seqno value). | ||
| 1631 | */ | ||
| 1632 | seqno = dev_priv->mm.next_gem_seqno; | ||
| 1633 | dev_priv->mm.next_gem_seqno++; | ||
| 1634 | if (dev_priv->mm.next_gem_seqno == 0) | ||
| 1635 | dev_priv->mm.next_gem_seqno++; | ||
| 1636 | |||
| 1637 | if (HAS_PIPE_CONTROL(dev)) { | ||
| 1638 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; | ||
| 1639 | |||
| 1640 | /* | ||
| 1641 | * Workaround qword write incoherence by flushing the | ||
| 1642 | * PIPE_NOTIFY buffers out to memory before requesting | ||
| 1643 | * an interrupt. | ||
| 1644 | */ | ||
| 1645 | BEGIN_LP_RING(32); | ||
| 1646 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 1647 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
| 1648 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 1649 | OUT_RING(seqno); | ||
| 1650 | OUT_RING(0); | ||
| 1651 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1652 | scratch_addr += 128; /* write to separate cachelines */ | ||
| 1653 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1654 | scratch_addr += 128; | ||
| 1655 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1656 | scratch_addr += 128; | ||
| 1657 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1658 | scratch_addr += 128; | ||
| 1659 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1660 | scratch_addr += 128; | ||
| 1661 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1662 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 1663 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
| 1664 | PIPE_CONTROL_NOTIFY); | ||
| 1665 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 1666 | OUT_RING(seqno); | ||
| 1667 | OUT_RING(0); | ||
| 1668 | ADVANCE_LP_RING(); | ||
| 1669 | } else { | ||
| 1670 | BEGIN_LP_RING(4); | ||
| 1671 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
| 1672 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
| 1673 | OUT_RING(seqno); | ||
| 1674 | |||
| 1675 | OUT_RING(MI_USER_INTERRUPT); | ||
| 1676 | ADVANCE_LP_RING(); | ||
| 1677 | } | ||
| 1678 | |||
| 1679 | DRM_DEBUG_DRIVER("%d\n", seqno); | ||
| 1680 | 1588 | ||
| 1681 | request->seqno = seqno; | 1589 | request->seqno = seqno; |
| 1590 | request->ring = ring; | ||
| 1682 | request->emitted_jiffies = jiffies; | 1591 | request->emitted_jiffies = jiffies; |
| 1683 | was_empty = list_empty(&dev_priv->mm.request_list); | 1592 | was_empty = list_empty(&ring->request_list); |
| 1684 | list_add_tail(&request->list, &dev_priv->mm.request_list); | 1593 | list_add_tail(&request->list, &ring->request_list); |
| 1594 | |||
| 1685 | if (i915_file_priv) { | 1595 | if (i915_file_priv) { |
| 1686 | list_add_tail(&request->client_list, | 1596 | list_add_tail(&request->client_list, |
| 1687 | &i915_file_priv->mm.request_list); | 1597 | &i915_file_priv->mm.request_list); |
| @@ -1693,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
| 1693 | * domain we're flushing with our flush. | 1603 | * domain we're flushing with our flush. |
| 1694 | */ | 1604 | */ |
| 1695 | if (flush_domains != 0) | 1605 | if (flush_domains != 0) |
| 1696 | i915_gem_process_flushing_list(dev, flush_domains, seqno); | 1606 | i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); |
| 1697 | 1607 | ||
| 1698 | if (!dev_priv->mm.suspended) { | 1608 | if (!dev_priv->mm.suspended) { |
| 1699 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1609 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
| @@ -1710,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
| 1710 | * before signalling the CPU | 1620 | * before signalling the CPU |
| 1711 | */ | 1621 | */ |
| 1712 | static uint32_t | 1622 | static uint32_t |
| 1713 | i915_retire_commands(struct drm_device *dev) | 1623 | i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) |
| 1714 | { | 1624 | { |
| 1715 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 1716 | uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
| 1717 | uint32_t flush_domains = 0; | 1625 | uint32_t flush_domains = 0; |
| 1718 | RING_LOCALS; | ||
| 1719 | 1626 | ||
| 1720 | /* The sampler always gets flushed on i965 (sigh) */ | 1627 | /* The sampler always gets flushed on i965 (sigh) */ |
| 1721 | if (IS_I965G(dev)) | 1628 | if (IS_I965G(dev)) |
| 1722 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | 1629 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; |
| 1723 | BEGIN_LP_RING(2); | 1630 | |
| 1724 | OUT_RING(cmd); | 1631 | ring->flush(dev, ring, |
| 1725 | OUT_RING(0); /* noop */ | 1632 | I915_GEM_DOMAIN_COMMAND, flush_domains); |
| 1726 | ADVANCE_LP_RING(); | ||
| 1727 | return flush_domains; | 1633 | return flush_domains; |
| 1728 | } | 1634 | } |
| 1729 | 1635 | ||
| @@ -1743,11 +1649,11 @@ i915_gem_retire_request(struct drm_device *dev, | |||
| 1743 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1649 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
| 1744 | */ | 1650 | */ |
| 1745 | spin_lock(&dev_priv->mm.active_list_lock); | 1651 | spin_lock(&dev_priv->mm.active_list_lock); |
| 1746 | while (!list_empty(&dev_priv->mm.active_list)) { | 1652 | while (!list_empty(&request->ring->active_list)) { |
| 1747 | struct drm_gem_object *obj; | 1653 | struct drm_gem_object *obj; |
| 1748 | struct drm_i915_gem_object *obj_priv; | 1654 | struct drm_i915_gem_object *obj_priv; |
| 1749 | 1655 | ||
| 1750 | obj_priv = list_first_entry(&dev_priv->mm.active_list, | 1656 | obj_priv = list_first_entry(&request->ring->active_list, |
| 1751 | struct drm_i915_gem_object, | 1657 | struct drm_i915_gem_object, |
| 1752 | list); | 1658 | list); |
| 1753 | obj = &obj_priv->base; | 1659 | obj = &obj_priv->base; |
| @@ -1794,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
| 1794 | } | 1700 | } |
| 1795 | 1701 | ||
| 1796 | uint32_t | 1702 | uint32_t |
| 1797 | i915_get_gem_seqno(struct drm_device *dev) | 1703 | i915_get_gem_seqno(struct drm_device *dev, |
| 1704 | struct intel_ring_buffer *ring) | ||
| 1798 | { | 1705 | { |
| 1799 | drm_i915_private_t *dev_priv = dev->dev_private; | 1706 | return ring->get_gem_seqno(dev, ring); |
| 1800 | |||
| 1801 | if (HAS_PIPE_CONTROL(dev)) | ||
| 1802 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | ||
| 1803 | else | ||
| 1804 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | ||
| 1805 | } | 1707 | } |
| 1806 | 1708 | ||
| 1807 | /** | 1709 | /** |
| 1808 | * This function clears the request list as sequence numbers are passed. | 1710 | * This function clears the request list as sequence numbers are passed. |
| 1809 | */ | 1711 | */ |
| 1810 | void | 1712 | void |
| 1811 | i915_gem_retire_requests(struct drm_device *dev) | 1713 | i915_gem_retire_requests(struct drm_device *dev, |
| 1714 | struct intel_ring_buffer *ring) | ||
| 1812 | { | 1715 | { |
| 1813 | drm_i915_private_t *dev_priv = dev->dev_private; | 1716 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1814 | uint32_t seqno; | 1717 | uint32_t seqno; |
| 1815 | 1718 | ||
| 1816 | if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) | 1719 | if (!ring->status_page.page_addr |
| 1720 | || list_empty(&ring->request_list)) | ||
| 1817 | return; | 1721 | return; |
| 1818 | 1722 | ||
| 1819 | seqno = i915_get_gem_seqno(dev); | 1723 | seqno = i915_get_gem_seqno(dev, ring); |
| 1820 | 1724 | ||
| 1821 | while (!list_empty(&dev_priv->mm.request_list)) { | 1725 | while (!list_empty(&ring->request_list)) { |
| 1822 | struct drm_i915_gem_request *request; | 1726 | struct drm_i915_gem_request *request; |
| 1823 | uint32_t retiring_seqno; | 1727 | uint32_t retiring_seqno; |
| 1824 | 1728 | ||
| 1825 | request = list_first_entry(&dev_priv->mm.request_list, | 1729 | request = list_first_entry(&ring->request_list, |
| 1826 | struct drm_i915_gem_request, | 1730 | struct drm_i915_gem_request, |
| 1827 | list); | 1731 | list); |
| 1828 | retiring_seqno = request->seqno; | 1732 | retiring_seqno = request->seqno; |
| @@ -1840,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
| 1840 | 1744 | ||
| 1841 | if (unlikely (dev_priv->trace_irq_seqno && | 1745 | if (unlikely (dev_priv->trace_irq_seqno && |
| 1842 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 1746 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { |
| 1843 | i915_user_irq_put(dev); | 1747 | |
| 1748 | ring->user_irq_put(dev, ring); | ||
| 1844 | dev_priv->trace_irq_seqno = 0; | 1749 | dev_priv->trace_irq_seqno = 0; |
| 1845 | } | 1750 | } |
| 1846 | } | 1751 | } |
| @@ -1856,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
| 1856 | dev = dev_priv->dev; | 1761 | dev = dev_priv->dev; |
| 1857 | 1762 | ||
| 1858 | mutex_lock(&dev->struct_mutex); | 1763 | mutex_lock(&dev->struct_mutex); |
| 1859 | i915_gem_retire_requests(dev); | 1764 | i915_gem_retire_requests(dev, &dev_priv->render_ring); |
| 1765 | |||
| 1766 | if (HAS_BSD(dev)) | ||
| 1767 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
| 1768 | |||
| 1860 | if (!dev_priv->mm.suspended && | 1769 | if (!dev_priv->mm.suspended && |
| 1861 | !list_empty(&dev_priv->mm.request_list)) | 1770 | (!list_empty(&dev_priv->render_ring.request_list) || |
| 1771 | (HAS_BSD(dev) && | ||
| 1772 | !list_empty(&dev_priv->bsd_ring.request_list)))) | ||
| 1862 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1773 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
| 1863 | mutex_unlock(&dev->struct_mutex); | 1774 | mutex_unlock(&dev->struct_mutex); |
| 1864 | } | 1775 | } |
| 1865 | 1776 | ||
| 1866 | int | 1777 | int |
| 1867 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | 1778 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, |
| 1779 | int interruptible, struct intel_ring_buffer *ring) | ||
| 1868 | { | 1780 | { |
| 1869 | drm_i915_private_t *dev_priv = dev->dev_private; | 1781 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1870 | u32 ier; | 1782 | u32 ier; |
| @@ -1875,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | |||
| 1875 | if (atomic_read(&dev_priv->mm.wedged)) | 1787 | if (atomic_read(&dev_priv->mm.wedged)) |
| 1876 | return -EIO; | 1788 | return -EIO; |
| 1877 | 1789 | ||
| 1878 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { | 1790 | if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { |
| 1879 | if (HAS_PCH_SPLIT(dev)) | 1791 | if (HAS_PCH_SPLIT(dev)) |
| 1880 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 1792 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
| 1881 | else | 1793 | else |
| @@ -1889,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | |||
| 1889 | 1801 | ||
| 1890 | trace_i915_gem_request_wait_begin(dev, seqno); | 1802 | trace_i915_gem_request_wait_begin(dev, seqno); |
| 1891 | 1803 | ||
| 1892 | dev_priv->mm.waiting_gem_seqno = seqno; | 1804 | ring->waiting_gem_seqno = seqno; |
| 1893 | i915_user_irq_get(dev); | 1805 | ring->user_irq_get(dev, ring); |
| 1894 | if (interruptible) | 1806 | if (interruptible) |
| 1895 | ret = wait_event_interruptible(dev_priv->irq_queue, | 1807 | ret = wait_event_interruptible(ring->irq_queue, |
| 1896 | i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || | 1808 | i915_seqno_passed( |
| 1897 | atomic_read(&dev_priv->mm.wedged)); | 1809 | ring->get_gem_seqno(dev, ring), seqno) |
| 1810 | || atomic_read(&dev_priv->mm.wedged)); | ||
| 1898 | else | 1811 | else |
| 1899 | wait_event(dev_priv->irq_queue, | 1812 | wait_event(ring->irq_queue, |
| 1900 | i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || | 1813 | i915_seqno_passed( |
| 1901 | atomic_read(&dev_priv->mm.wedged)); | 1814 | ring->get_gem_seqno(dev, ring), seqno) |
| 1815 | || atomic_read(&dev_priv->mm.wedged)); | ||
| 1902 | 1816 | ||
| 1903 | i915_user_irq_put(dev); | 1817 | ring->user_irq_put(dev, ring); |
| 1904 | dev_priv->mm.waiting_gem_seqno = 0; | 1818 | ring->waiting_gem_seqno = 0; |
| 1905 | 1819 | ||
| 1906 | trace_i915_gem_request_wait_end(dev, seqno); | 1820 | trace_i915_gem_request_wait_end(dev, seqno); |
| 1907 | } | 1821 | } |
| @@ -1910,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | |||
| 1910 | 1824 | ||
| 1911 | if (ret && ret != -ERESTARTSYS) | 1825 | if (ret && ret != -ERESTARTSYS) |
| 1912 | DRM_ERROR("%s returns %d (awaiting %d at %d)\n", | 1826 | DRM_ERROR("%s returns %d (awaiting %d at %d)\n", |
| 1913 | __func__, ret, seqno, i915_get_gem_seqno(dev)); | 1827 | __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); |
| 1914 | 1828 | ||
| 1915 | /* Directly dispatch request retiring. While we have the work queue | 1829 | /* Directly dispatch request retiring. While we have the work queue |
| 1916 | * to handle this, the waiter on a request often wants an associated | 1830 | * to handle this, the waiter on a request often wants an associated |
| @@ -1918,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | |||
| 1918 | * a separate wait queue to handle that. | 1832 | * a separate wait queue to handle that. |
| 1919 | */ | 1833 | */ |
| 1920 | if (ret == 0) | 1834 | if (ret == 0) |
| 1921 | i915_gem_retire_requests(dev); | 1835 | i915_gem_retire_requests(dev, ring); |
| 1922 | 1836 | ||
| 1923 | return ret; | 1837 | return ret; |
| 1924 | } | 1838 | } |
| @@ -1928,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | |||
| 1928 | * request and object lists appropriately for that event. | 1842 | * request and object lists appropriately for that event. |
| 1929 | */ | 1843 | */ |
| 1930 | static int | 1844 | static int |
| 1931 | i915_wait_request(struct drm_device *dev, uint32_t seqno) | 1845 | i915_wait_request(struct drm_device *dev, uint32_t seqno, |
| 1846 | struct intel_ring_buffer *ring) | ||
| 1932 | { | 1847 | { |
| 1933 | return i915_do_wait_request(dev, seqno, 1); | 1848 | return i915_do_wait_request(dev, seqno, 1, ring); |
| 1934 | } | 1849 | } |
| 1935 | 1850 | ||
| 1936 | static void | 1851 | static void |
| @@ -1939,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev, | |||
| 1939 | uint32_t flush_domains) | 1854 | uint32_t flush_domains) |
| 1940 | { | 1855 | { |
| 1941 | drm_i915_private_t *dev_priv = dev->dev_private; | 1856 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1942 | uint32_t cmd; | ||
| 1943 | RING_LOCALS; | ||
| 1944 | |||
| 1945 | #if WATCH_EXEC | ||
| 1946 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | ||
| 1947 | invalidate_domains, flush_domains); | ||
| 1948 | #endif | ||
| 1949 | trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, | ||
| 1950 | invalidate_domains, flush_domains); | ||
| 1951 | |||
| 1952 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 1857 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
| 1953 | drm_agp_chipset_flush(dev); | 1858 | drm_agp_chipset_flush(dev); |
| 1859 | dev_priv->render_ring.flush(dev, &dev_priv->render_ring, | ||
| 1860 | invalidate_domains, | ||
| 1861 | flush_domains); | ||
| 1862 | |||
| 1863 | if (HAS_BSD(dev)) | ||
| 1864 | dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, | ||
| 1865 | invalidate_domains, | ||
| 1866 | flush_domains); | ||
| 1867 | } | ||
| 1954 | 1868 | ||
| 1955 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 1869 | static void |
| 1956 | /* | 1870 | i915_gem_flush_ring(struct drm_device *dev, |
| 1957 | * read/write caches: | 1871 | uint32_t invalidate_domains, |
| 1958 | * | 1872 | uint32_t flush_domains, |
| 1959 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | 1873 | struct intel_ring_buffer *ring) |
| 1960 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | 1874 | { |
| 1961 | * also flushed at 2d versus 3d pipeline switches. | 1875 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
| 1962 | * | 1876 | drm_agp_chipset_flush(dev); |
| 1963 | * read-only caches: | 1877 | ring->flush(dev, ring, |
| 1964 | * | 1878 | invalidate_domains, |
| 1965 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | 1879 | flush_domains); |
| 1966 | * MI_READ_FLUSH is set, and is always flushed on 965. | ||
| 1967 | * | ||
| 1968 | * I915_GEM_DOMAIN_COMMAND may not exist? | ||
| 1969 | * | ||
| 1970 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | ||
| 1971 | * invalidated when MI_EXE_FLUSH is set. | ||
| 1972 | * | ||
| 1973 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | ||
| 1974 | * invalidated with every MI_FLUSH. | ||
| 1975 | * | ||
| 1976 | * TLBs: | ||
| 1977 | * | ||
| 1978 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | ||
| 1979 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | ||
| 1980 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | ||
| 1981 | * are flushed at any MI_FLUSH. | ||
| 1982 | */ | ||
| 1983 | |||
| 1984 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
| 1985 | if ((invalidate_domains|flush_domains) & | ||
| 1986 | I915_GEM_DOMAIN_RENDER) | ||
| 1987 | cmd &= ~MI_NO_WRITE_FLUSH; | ||
| 1988 | if (!IS_I965G(dev)) { | ||
| 1989 | /* | ||
| 1990 | * On the 965, the sampler cache always gets flushed | ||
| 1991 | * and this bit is reserved. | ||
| 1992 | */ | ||
| 1993 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | ||
| 1994 | cmd |= MI_READ_FLUSH; | ||
| 1995 | } | ||
| 1996 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | ||
| 1997 | cmd |= MI_EXE_FLUSH; | ||
| 1998 | |||
| 1999 | #if WATCH_EXEC | ||
| 2000 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | ||
| 2001 | #endif | ||
| 2002 | BEGIN_LP_RING(2); | ||
| 2003 | OUT_RING(cmd); | ||
| 2004 | OUT_RING(MI_NOOP); | ||
| 2005 | ADVANCE_LP_RING(); | ||
| 2006 | } | ||
| 2007 | } | 1880 | } |
| 2008 | 1881 | ||
| 2009 | /** | 1882 | /** |
| @@ -2030,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
| 2030 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 1903 | DRM_INFO("%s: object %p wait for seqno %08x\n", |
| 2031 | __func__, obj, obj_priv->last_rendering_seqno); | 1904 | __func__, obj, obj_priv->last_rendering_seqno); |
| 2032 | #endif | 1905 | #endif |
| 2033 | ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); | 1906 | ret = i915_wait_request(dev, |
| 1907 | obj_priv->last_rendering_seqno, obj_priv->ring); | ||
| 2034 | if (ret != 0) | 1908 | if (ret != 0) |
| 2035 | return ret; | 1909 | return ret; |
| 2036 | } | 1910 | } |
| @@ -2146,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev) | |||
| 2146 | { | 2020 | { |
| 2147 | drm_i915_private_t *dev_priv = dev->dev_private; | 2021 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2148 | bool lists_empty; | 2022 | bool lists_empty; |
| 2149 | uint32_t seqno; | 2023 | uint32_t seqno1, seqno2; |
| 2024 | int ret; | ||
| 2150 | 2025 | ||
| 2151 | spin_lock(&dev_priv->mm.active_list_lock); | 2026 | spin_lock(&dev_priv->mm.active_list_lock); |
| 2152 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 2027 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
| 2153 | list_empty(&dev_priv->mm.active_list); | 2028 | list_empty(&dev_priv->render_ring.active_list) && |
| 2029 | (!HAS_BSD(dev) || | ||
| 2030 | list_empty(&dev_priv->bsd_ring.active_list))); | ||
| 2154 | spin_unlock(&dev_priv->mm.active_list_lock); | 2031 | spin_unlock(&dev_priv->mm.active_list_lock); |
| 2155 | 2032 | ||
| 2156 | if (lists_empty) | 2033 | if (lists_empty) |
| @@ -2158,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev) | |||
| 2158 | 2035 | ||
| 2159 | /* Flush everything onto the inactive list. */ | 2036 | /* Flush everything onto the inactive list. */ |
| 2160 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2037 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
| 2161 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | 2038 | seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, |
| 2162 | if (seqno == 0) | 2039 | &dev_priv->render_ring); |
| 2040 | if (seqno1 == 0) | ||
| 2163 | return -ENOMEM; | 2041 | return -ENOMEM; |
| 2042 | ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); | ||
| 2164 | 2043 | ||
| 2165 | return i915_wait_request(dev, seqno); | 2044 | if (HAS_BSD(dev)) { |
| 2045 | seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, | ||
| 2046 | &dev_priv->bsd_ring); | ||
| 2047 | if (seqno2 == 0) | ||
| 2048 | return -ENOMEM; | ||
| 2049 | |||
| 2050 | ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); | ||
| 2051 | if (ret) | ||
| 2052 | return ret; | ||
| 2053 | } | ||
| 2054 | |||
| 2055 | |||
| 2056 | return ret; | ||
| 2166 | } | 2057 | } |
| 2167 | 2058 | ||
| 2168 | static int | 2059 | static int |
| @@ -2175,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
| 2175 | spin_lock(&dev_priv->mm.active_list_lock); | 2066 | spin_lock(&dev_priv->mm.active_list_lock); |
| 2176 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 2067 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
| 2177 | list_empty(&dev_priv->mm.flushing_list) && | 2068 | list_empty(&dev_priv->mm.flushing_list) && |
| 2178 | list_empty(&dev_priv->mm.active_list)); | 2069 | list_empty(&dev_priv->render_ring.active_list) && |
| 2070 | (!HAS_BSD(dev) | ||
| 2071 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
| 2179 | spin_unlock(&dev_priv->mm.active_list_lock); | 2072 | spin_unlock(&dev_priv->mm.active_list_lock); |
| 2180 | 2073 | ||
| 2181 | if (lists_empty) | 2074 | if (lists_empty) |
| @@ -2195,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
| 2195 | spin_lock(&dev_priv->mm.active_list_lock); | 2088 | spin_lock(&dev_priv->mm.active_list_lock); |
| 2196 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 2089 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
| 2197 | list_empty(&dev_priv->mm.flushing_list) && | 2090 | list_empty(&dev_priv->mm.flushing_list) && |
| 2198 | list_empty(&dev_priv->mm.active_list)); | 2091 | list_empty(&dev_priv->render_ring.active_list) && |
| 2092 | (!HAS_BSD(dev) | ||
| 2093 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
| 2199 | spin_unlock(&dev_priv->mm.active_list_lock); | 2094 | spin_unlock(&dev_priv->mm.active_list_lock); |
| 2200 | BUG_ON(!lists_empty); | 2095 | BUG_ON(!lists_empty); |
| 2201 | 2096 | ||
| @@ -2209,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
| 2209 | struct drm_gem_object *obj; | 2104 | struct drm_gem_object *obj; |
| 2210 | int ret; | 2105 | int ret; |
| 2211 | 2106 | ||
| 2107 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
| 2108 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; | ||
| 2212 | for (;;) { | 2109 | for (;;) { |
| 2213 | i915_gem_retire_requests(dev); | 2110 | i915_gem_retire_requests(dev, render_ring); |
| 2111 | |||
| 2112 | if (HAS_BSD(dev)) | ||
| 2113 | i915_gem_retire_requests(dev, bsd_ring); | ||
| 2214 | 2114 | ||
| 2215 | /* If there's an inactive buffer available now, grab it | 2115 | /* If there's an inactive buffer available now, grab it |
| 2216 | * and be done. | 2116 | * and be done. |
| @@ -2234,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
| 2234 | * things, wait for the next to finish and hopefully leave us | 2134 | * things, wait for the next to finish and hopefully leave us |
| 2235 | * a buffer to evict. | 2135 | * a buffer to evict. |
| 2236 | */ | 2136 | */ |
| 2237 | if (!list_empty(&dev_priv->mm.request_list)) { | 2137 | if (!list_empty(&render_ring->request_list)) { |
| 2238 | struct drm_i915_gem_request *request; | 2138 | struct drm_i915_gem_request *request; |
| 2239 | 2139 | ||
| 2240 | request = list_first_entry(&dev_priv->mm.request_list, | 2140 | request = list_first_entry(&render_ring->request_list, |
| 2241 | struct drm_i915_gem_request, | 2141 | struct drm_i915_gem_request, |
| 2242 | list); | 2142 | list); |
| 2243 | 2143 | ||
| 2244 | ret = i915_wait_request(dev, request->seqno); | 2144 | ret = i915_wait_request(dev, |
| 2145 | request->seqno, request->ring); | ||
| 2146 | if (ret) | ||
| 2147 | return ret; | ||
| 2148 | |||
| 2149 | continue; | ||
| 2150 | } | ||
| 2151 | |||
| 2152 | if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { | ||
| 2153 | struct drm_i915_gem_request *request; | ||
| 2154 | |||
| 2155 | request = list_first_entry(&bsd_ring->request_list, | ||
| 2156 | struct drm_i915_gem_request, | ||
| 2157 | list); | ||
| 2158 | |||
| 2159 | ret = i915_wait_request(dev, | ||
| 2160 | request->seqno, request->ring); | ||
| 2245 | if (ret) | 2161 | if (ret) |
| 2246 | return ret; | 2162 | return ret; |
| 2247 | 2163 | ||
| @@ -2268,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
| 2268 | if (obj != NULL) { | 2184 | if (obj != NULL) { |
| 2269 | uint32_t seqno; | 2185 | uint32_t seqno; |
| 2270 | 2186 | ||
| 2271 | i915_gem_flush(dev, | 2187 | i915_gem_flush_ring(dev, |
| 2188 | obj->write_domain, | ||
| 2272 | obj->write_domain, | 2189 | obj->write_domain, |
| 2273 | obj->write_domain); | 2190 | obj_priv->ring); |
| 2274 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2191 | seqno = i915_add_request(dev, NULL, |
| 2192 | obj->write_domain, | ||
| 2193 | obj_priv->ring); | ||
| 2275 | if (seqno == 0) | 2194 | if (seqno == 0) |
| 2276 | return -ENOMEM; | 2195 | return -ENOMEM; |
| 2277 | continue; | 2196 | continue; |
| @@ -2299,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
| 2299 | struct inode *inode; | 2218 | struct inode *inode; |
| 2300 | struct page *page; | 2219 | struct page *page; |
| 2301 | 2220 | ||
| 2221 | BUG_ON(obj_priv->pages_refcount | ||
| 2222 | == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); | ||
| 2223 | |||
| 2302 | if (obj_priv->pages_refcount++ != 0) | 2224 | if (obj_priv->pages_refcount++ != 0) |
| 2303 | return 0; | 2225 | return 0; |
| 2304 | 2226 | ||
| @@ -2317,8 +2239,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
| 2317 | mapping = inode->i_mapping; | 2239 | mapping = inode->i_mapping; |
| 2318 | for (i = 0; i < page_count; i++) { | 2240 | for (i = 0; i < page_count; i++) { |
| 2319 | page = read_cache_page_gfp(mapping, i, | 2241 | page = read_cache_page_gfp(mapping, i, |
| 2320 | mapping_gfp_mask (mapping) | | 2242 | GFP_HIGHUSER | |
| 2321 | __GFP_COLD | | 2243 | __GFP_COLD | |
| 2244 | __GFP_RECLAIMABLE | | ||
| 2322 | gfpmask); | 2245 | gfpmask); |
| 2323 | if (IS_ERR(page)) | 2246 | if (IS_ERR(page)) |
| 2324 | goto err_pages; | 2247 | goto err_pages; |
| @@ -2697,6 +2620,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
| 2697 | return -EINVAL; | 2620 | return -EINVAL; |
| 2698 | } | 2621 | } |
| 2699 | 2622 | ||
| 2623 | /* If the object is bigger than the entire aperture, reject it early | ||
| 2624 | * before evicting everything in a vain attempt to find space. | ||
| 2625 | */ | ||
| 2626 | if (obj->size > dev->gtt_total) { | ||
| 2627 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | ||
| 2628 | return -E2BIG; | ||
| 2629 | } | ||
| 2630 | |||
| 2700 | search_free: | 2631 | search_free: |
| 2701 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 2632 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, |
| 2702 | obj->size, alignment, 0); | 2633 | obj->size, alignment, 0); |
| @@ -2807,6 +2738,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
| 2807 | { | 2738 | { |
| 2808 | struct drm_device *dev = obj->dev; | 2739 | struct drm_device *dev = obj->dev; |
| 2809 | uint32_t old_write_domain; | 2740 | uint32_t old_write_domain; |
| 2741 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
| 2810 | 2742 | ||
| 2811 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2743 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
| 2812 | return; | 2744 | return; |
| @@ -2814,7 +2746,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
| 2814 | /* Queue the GPU write cache flushing we need. */ | 2746 | /* Queue the GPU write cache flushing we need. */ |
| 2815 | old_write_domain = obj->write_domain; | 2747 | old_write_domain = obj->write_domain; |
| 2816 | i915_gem_flush(dev, 0, obj->write_domain); | 2748 | i915_gem_flush(dev, 0, obj->write_domain); |
| 2817 | (void) i915_add_request(dev, NULL, obj->write_domain); | 2749 | (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); |
| 2818 | BUG_ON(obj->write_domain); | 2750 | BUG_ON(obj->write_domain); |
| 2819 | 2751 | ||
| 2820 | trace_i915_gem_object_change_domain(obj, | 2752 | trace_i915_gem_object_change_domain(obj, |
| @@ -2954,23 +2886,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | |||
| 2954 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 2886 | DRM_INFO("%s: object %p wait for seqno %08x\n", |
| 2955 | __func__, obj, obj_priv->last_rendering_seqno); | 2887 | __func__, obj, obj_priv->last_rendering_seqno); |
| 2956 | #endif | 2888 | #endif |
| 2957 | ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); | 2889 | ret = i915_do_wait_request(dev, |
| 2890 | obj_priv->last_rendering_seqno, | ||
| 2891 | 0, | ||
| 2892 | obj_priv->ring); | ||
| 2958 | if (ret != 0) | 2893 | if (ret != 0) |
| 2959 | return ret; | 2894 | return ret; |
| 2960 | } | 2895 | } |
| 2961 | 2896 | ||
| 2897 | i915_gem_object_flush_cpu_write_domain(obj); | ||
| 2898 | |||
| 2962 | old_write_domain = obj->write_domain; | 2899 | old_write_domain = obj->write_domain; |
| 2963 | old_read_domains = obj->read_domains; | 2900 | old_read_domains = obj->read_domains; |
| 2964 | 2901 | ||
| 2965 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
| 2966 | |||
| 2967 | i915_gem_object_flush_cpu_write_domain(obj); | ||
| 2968 | |||
| 2969 | /* It should now be out of any other write domains, and we can update | 2902 | /* It should now be out of any other write domains, and we can update |
| 2970 | * the domain values for our changes. | 2903 | * the domain values for our changes. |
| 2971 | */ | 2904 | */ |
| 2972 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 2905 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
| 2973 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2906 | obj->read_domains = I915_GEM_DOMAIN_GTT; |
| 2974 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 2907 | obj->write_domain = I915_GEM_DOMAIN_GTT; |
| 2975 | obj_priv->dirty = 1; | 2908 | obj_priv->dirty = 1; |
| 2976 | 2909 | ||
| @@ -3354,9 +3287,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3354 | obj_priv->tiling_mode != I915_TILING_NONE; | 3287 | obj_priv->tiling_mode != I915_TILING_NONE; |
| 3355 | 3288 | ||
| 3356 | /* Check fence reg constraints and rebind if necessary */ | 3289 | /* Check fence reg constraints and rebind if necessary */ |
| 3357 | if (need_fence && !i915_gem_object_fence_offset_ok(obj, | 3290 | if (need_fence && |
| 3358 | obj_priv->tiling_mode)) | 3291 | !i915_gem_object_fence_offset_ok(obj, |
| 3359 | i915_gem_object_unbind(obj); | 3292 | obj_priv->tiling_mode)) { |
| 3293 | ret = i915_gem_object_unbind(obj); | ||
| 3294 | if (ret) | ||
| 3295 | return ret; | ||
| 3296 | } | ||
| 3360 | 3297 | ||
| 3361 | /* Choose the GTT offset for our buffer and put it there. */ | 3298 | /* Choose the GTT offset for our buffer and put it there. */ |
| 3362 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3299 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
| @@ -3370,9 +3307,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3370 | if (need_fence) { | 3307 | if (need_fence) { |
| 3371 | ret = i915_gem_object_get_fence_reg(obj); | 3308 | ret = i915_gem_object_get_fence_reg(obj); |
| 3372 | if (ret != 0) { | 3309 | if (ret != 0) { |
| 3373 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
| 3374 | DRM_ERROR("Failure to install fence: %d\n", | ||
| 3375 | ret); | ||
| 3376 | i915_gem_object_unpin(obj); | 3310 | i915_gem_object_unpin(obj); |
| 3377 | return ret; | 3311 | return ret; |
| 3378 | } | 3312 | } |
| @@ -3545,62 +3479,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3545 | return 0; | 3479 | return 0; |
| 3546 | } | 3480 | } |
| 3547 | 3481 | ||
| 3548 | /** Dispatch a batchbuffer to the ring | ||
| 3549 | */ | ||
| 3550 | static int | ||
| 3551 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | ||
| 3552 | struct drm_i915_gem_execbuffer2 *exec, | ||
| 3553 | struct drm_clip_rect *cliprects, | ||
| 3554 | uint64_t exec_offset) | ||
| 3555 | { | ||
| 3556 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 3557 | int nbox = exec->num_cliprects; | ||
| 3558 | int i = 0, count; | ||
| 3559 | uint32_t exec_start, exec_len; | ||
| 3560 | RING_LOCALS; | ||
| 3561 | |||
| 3562 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
| 3563 | exec_len = (uint32_t) exec->batch_len; | ||
| 3564 | |||
| 3565 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); | ||
| 3566 | |||
| 3567 | count = nbox ? nbox : 1; | ||
| 3568 | |||
| 3569 | for (i = 0; i < count; i++) { | ||
| 3570 | if (i < nbox) { | ||
| 3571 | int ret = i915_emit_box(dev, cliprects, i, | ||
| 3572 | exec->DR1, exec->DR4); | ||
| 3573 | if (ret) | ||
| 3574 | return ret; | ||
| 3575 | } | ||
| 3576 | |||
| 3577 | if (IS_I830(dev) || IS_845G(dev)) { | ||
| 3578 | BEGIN_LP_RING(4); | ||
| 3579 | OUT_RING(MI_BATCH_BUFFER); | ||
| 3580 | OUT_RING(exec_start | MI_BATCH_NON_SECURE); | ||
| 3581 | OUT_RING(exec_start + exec_len - 4); | ||
| 3582 | OUT_RING(0); | ||
| 3583 | ADVANCE_LP_RING(); | ||
| 3584 | } else { | ||
| 3585 | BEGIN_LP_RING(2); | ||
| 3586 | if (IS_I965G(dev)) { | ||
| 3587 | OUT_RING(MI_BATCH_BUFFER_START | | ||
| 3588 | (2 << 6) | | ||
| 3589 | MI_BATCH_NON_SECURE_I965); | ||
| 3590 | OUT_RING(exec_start); | ||
| 3591 | } else { | ||
| 3592 | OUT_RING(MI_BATCH_BUFFER_START | | ||
| 3593 | (2 << 6)); | ||
| 3594 | OUT_RING(exec_start | MI_BATCH_NON_SECURE); | ||
| 3595 | } | ||
| 3596 | ADVANCE_LP_RING(); | ||
| 3597 | } | ||
| 3598 | } | ||
| 3599 | |||
| 3600 | /* XXX breadcrumb */ | ||
| 3601 | return 0; | ||
| 3602 | } | ||
| 3603 | |||
| 3604 | /* Throttle our rendering by waiting until the ring has completed our requests | 3482 | /* Throttle our rendering by waiting until the ring has completed our requests |
| 3605 | * emitted over 20 msec ago. | 3483 | * emitted over 20 msec ago. |
| 3606 | * | 3484 | * |
| @@ -3629,7 +3507,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
| 3629 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 3507 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
| 3630 | break; | 3508 | break; |
| 3631 | 3509 | ||
| 3632 | ret = i915_wait_request(dev, request->seqno); | 3510 | ret = i915_wait_request(dev, request->seqno, request->ring); |
| 3633 | if (ret != 0) | 3511 | if (ret != 0) |
| 3634 | break; | 3512 | break; |
| 3635 | } | 3513 | } |
| @@ -3769,6 +3647,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
| 3769 | return ret; | 3647 | return ret; |
| 3770 | } | 3648 | } |
| 3771 | 3649 | ||
| 3650 | |||
| 3772 | int | 3651 | int |
| 3773 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3652 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
| 3774 | struct drm_file *file_priv, | 3653 | struct drm_file *file_priv, |
| @@ -3786,10 +3665,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3786 | uint32_t seqno, flush_domains, reloc_index; | 3665 | uint32_t seqno, flush_domains, reloc_index; |
| 3787 | int pin_tries, flips; | 3666 | int pin_tries, flips; |
| 3788 | 3667 | ||
| 3668 | struct intel_ring_buffer *ring = NULL; | ||
| 3669 | |||
| 3789 | #if WATCH_EXEC | 3670 | #if WATCH_EXEC |
| 3790 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 3671 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", |
| 3791 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 3672 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); |
| 3792 | #endif | 3673 | #endif |
| 3674 | if (args->flags & I915_EXEC_BSD) { | ||
| 3675 | if (!HAS_BSD(dev)) { | ||
| 3676 | DRM_ERROR("execbuf with wrong flag\n"); | ||
| 3677 | return -EINVAL; | ||
| 3678 | } | ||
| 3679 | ring = &dev_priv->bsd_ring; | ||
| 3680 | } else { | ||
| 3681 | ring = &dev_priv->render_ring; | ||
| 3682 | } | ||
| 3683 | |||
| 3793 | 3684 | ||
| 3794 | if (args->buffer_count < 1) { | 3685 | if (args->buffer_count < 1) { |
| 3795 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3686 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
| @@ -3902,11 +3793,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3902 | if (ret != -ENOSPC || pin_tries >= 1) { | 3793 | if (ret != -ENOSPC || pin_tries >= 1) { |
| 3903 | if (ret != -ERESTARTSYS) { | 3794 | if (ret != -ERESTARTSYS) { |
| 3904 | unsigned long long total_size = 0; | 3795 | unsigned long long total_size = 0; |
| 3905 | for (i = 0; i < args->buffer_count; i++) | 3796 | int num_fences = 0; |
| 3797 | for (i = 0; i < args->buffer_count; i++) { | ||
| 3798 | obj_priv = to_intel_bo(object_list[i]); | ||
| 3799 | |||
| 3906 | total_size += object_list[i]->size; | 3800 | total_size += object_list[i]->size; |
| 3907 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", | 3801 | num_fences += |
| 3802 | exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && | ||
| 3803 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
| 3804 | } | ||
| 3805 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", | ||
| 3908 | pinned+1, args->buffer_count, | 3806 | pinned+1, args->buffer_count, |
| 3909 | total_size, ret); | 3807 | total_size, num_fences, |
| 3808 | ret); | ||
| 3910 | DRM_ERROR("%d objects [%d pinned], " | 3809 | DRM_ERROR("%d objects [%d pinned], " |
| 3911 | "%d object bytes [%d pinned], " | 3810 | "%d object bytes [%d pinned], " |
| 3912 | "%d/%d gtt bytes\n", | 3811 | "%d/%d gtt bytes\n", |
| @@ -3976,9 +3875,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3976 | i915_gem_flush(dev, | 3875 | i915_gem_flush(dev, |
| 3977 | dev->invalidate_domains, | 3876 | dev->invalidate_domains, |
| 3978 | dev->flush_domains); | 3877 | dev->flush_domains); |
| 3979 | if (dev->flush_domains & I915_GEM_GPU_DOMAINS) | 3878 | if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { |
| 3980 | (void)i915_add_request(dev, file_priv, | 3879 | (void)i915_add_request(dev, file_priv, |
| 3981 | dev->flush_domains); | 3880 | dev->flush_domains, |
| 3881 | &dev_priv->render_ring); | ||
| 3882 | |||
| 3883 | if (HAS_BSD(dev)) | ||
| 3884 | (void)i915_add_request(dev, file_priv, | ||
| 3885 | dev->flush_domains, | ||
| 3886 | &dev_priv->bsd_ring); | ||
| 3887 | } | ||
| 3982 | } | 3888 | } |
| 3983 | 3889 | ||
| 3984 | for (i = 0; i < args->buffer_count; i++) { | 3890 | for (i = 0; i < args->buffer_count; i++) { |
| @@ -4015,7 +3921,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 4015 | #endif | 3921 | #endif |
| 4016 | 3922 | ||
| 4017 | /* Exec the batchbuffer */ | 3923 | /* Exec the batchbuffer */ |
| 4018 | ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); | 3924 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, |
| 3925 | cliprects, exec_offset); | ||
| 4019 | if (ret) { | 3926 | if (ret) { |
| 4020 | DRM_ERROR("dispatch failed %d\n", ret); | 3927 | DRM_ERROR("dispatch failed %d\n", ret); |
| 4021 | goto err; | 3928 | goto err; |
| @@ -4025,7 +3932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 4025 | * Ensure that the commands in the batch buffer are | 3932 | * Ensure that the commands in the batch buffer are |
| 4026 | * finished before the interrupt fires | 3933 | * finished before the interrupt fires |
| 4027 | */ | 3934 | */ |
| 4028 | flush_domains = i915_retire_commands(dev); | 3935 | flush_domains = i915_retire_commands(dev, ring); |
| 4029 | 3936 | ||
| 4030 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3937 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| 4031 | 3938 | ||
| @@ -4036,12 +3943,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 4036 | * *some* interrupts representing completion of buffers that we can | 3943 | * *some* interrupts representing completion of buffers that we can |
| 4037 | * wait on when trying to clear up gtt space). | 3944 | * wait on when trying to clear up gtt space). |
| 4038 | */ | 3945 | */ |
| 4039 | seqno = i915_add_request(dev, file_priv, flush_domains); | 3946 | seqno = i915_add_request(dev, file_priv, flush_domains, ring); |
| 4040 | BUG_ON(seqno == 0); | 3947 | BUG_ON(seqno == 0); |
| 4041 | for (i = 0; i < args->buffer_count; i++) { | 3948 | for (i = 0; i < args->buffer_count; i++) { |
| 4042 | struct drm_gem_object *obj = object_list[i]; | 3949 | struct drm_gem_object *obj = object_list[i]; |
| 3950 | obj_priv = to_intel_bo(obj); | ||
| 4043 | 3951 | ||
| 4044 | i915_gem_object_move_to_active(obj, seqno); | 3952 | i915_gem_object_move_to_active(obj, seqno, ring); |
| 4045 | #if WATCH_LRU | 3953 | #if WATCH_LRU |
| 4046 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); | 3954 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); |
| 4047 | #endif | 3955 | #endif |
| @@ -4153,7 +4061,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 4153 | exec2.DR4 = args->DR4; | 4061 | exec2.DR4 = args->DR4; |
| 4154 | exec2.num_cliprects = args->num_cliprects; | 4062 | exec2.num_cliprects = args->num_cliprects; |
| 4155 | exec2.cliprects_ptr = args->cliprects_ptr; | 4063 | exec2.cliprects_ptr = args->cliprects_ptr; |
| 4156 | exec2.flags = 0; | 4064 | exec2.flags = I915_EXEC_RENDER; |
| 4157 | 4065 | ||
| 4158 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | 4066 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); |
| 4159 | if (!ret) { | 4067 | if (!ret) { |
| @@ -4239,7 +4147,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
| 4239 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4147 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 4240 | int ret; | 4148 | int ret; |
| 4241 | 4149 | ||
| 4150 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | ||
| 4151 | |||
| 4242 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4152 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| 4153 | |||
| 4154 | if (obj_priv->gtt_space != NULL) { | ||
| 4155 | if (alignment == 0) | ||
| 4156 | alignment = i915_gem_get_gtt_alignment(obj); | ||
| 4157 | if (obj_priv->gtt_offset & (alignment - 1)) { | ||
| 4158 | ret = i915_gem_object_unbind(obj); | ||
| 4159 | if (ret) | ||
| 4160 | return ret; | ||
| 4161 | } | ||
| 4162 | } | ||
| 4163 | |||
| 4243 | if (obj_priv->gtt_space == NULL) { | 4164 | if (obj_priv->gtt_space == NULL) { |
| 4244 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 4165 | ret = i915_gem_object_bind_to_gtt(obj, alignment); |
| 4245 | if (ret) | 4166 | if (ret) |
| @@ -4392,6 +4313,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
| 4392 | struct drm_i915_gem_busy *args = data; | 4313 | struct drm_i915_gem_busy *args = data; |
| 4393 | struct drm_gem_object *obj; | 4314 | struct drm_gem_object *obj; |
| 4394 | struct drm_i915_gem_object *obj_priv; | 4315 | struct drm_i915_gem_object *obj_priv; |
| 4316 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4395 | 4317 | ||
| 4396 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4318 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
| 4397 | if (obj == NULL) { | 4319 | if (obj == NULL) { |
| @@ -4406,7 +4328,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
| 4406 | * actually unmasked, and our working set ends up being larger than | 4328 | * actually unmasked, and our working set ends up being larger than |
| 4407 | * required. | 4329 | * required. |
| 4408 | */ | 4330 | */ |
| 4409 | i915_gem_retire_requests(dev); | 4331 | i915_gem_retire_requests(dev, &dev_priv->render_ring); |
| 4332 | |||
| 4333 | if (HAS_BSD(dev)) | ||
| 4334 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
| 4410 | 4335 | ||
| 4411 | obj_priv = to_intel_bo(obj); | 4336 | obj_priv = to_intel_bo(obj); |
| 4412 | /* Don't count being on the flushing list against the object being | 4337 | /* Don't count being on the flushing list against the object being |
| @@ -4573,7 +4498,10 @@ i915_gem_idle(struct drm_device *dev) | |||
| 4573 | 4498 | ||
| 4574 | mutex_lock(&dev->struct_mutex); | 4499 | mutex_lock(&dev->struct_mutex); |
| 4575 | 4500 | ||
| 4576 | if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { | 4501 | if (dev_priv->mm.suspended || |
| 4502 | (dev_priv->render_ring.gem_object == NULL) || | ||
| 4503 | (HAS_BSD(dev) && | ||
| 4504 | dev_priv->bsd_ring.gem_object == NULL)) { | ||
| 4577 | mutex_unlock(&dev->struct_mutex); | 4505 | mutex_unlock(&dev->struct_mutex); |
| 4578 | return 0; | 4506 | return 0; |
| 4579 | } | 4507 | } |
| @@ -4654,71 +4582,6 @@ err: | |||
| 4654 | return ret; | 4582 | return ret; |
| 4655 | } | 4583 | } |
| 4656 | 4584 | ||
| 4657 | static int | ||
| 4658 | i915_gem_init_hws(struct drm_device *dev) | ||
| 4659 | { | ||
| 4660 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4661 | struct drm_gem_object *obj; | ||
| 4662 | struct drm_i915_gem_object *obj_priv; | ||
| 4663 | int ret; | ||
| 4664 | |||
| 4665 | /* If we need a physical address for the status page, it's already | ||
| 4666 | * initialized at driver load time. | ||
| 4667 | */ | ||
| 4668 | if (!I915_NEED_GFX_HWS(dev)) | ||
| 4669 | return 0; | ||
| 4670 | |||
| 4671 | obj = i915_gem_alloc_object(dev, 4096); | ||
| 4672 | if (obj == NULL) { | ||
| 4673 | DRM_ERROR("Failed to allocate status page\n"); | ||
| 4674 | ret = -ENOMEM; | ||
| 4675 | goto err; | ||
| 4676 | } | ||
| 4677 | obj_priv = to_intel_bo(obj); | ||
| 4678 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
| 4679 | |||
| 4680 | ret = i915_gem_object_pin(obj, 4096); | ||
| 4681 | if (ret != 0) { | ||
| 4682 | drm_gem_object_unreference(obj); | ||
| 4683 | goto err_unref; | ||
| 4684 | } | ||
| 4685 | |||
| 4686 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | ||
| 4687 | |||
| 4688 | dev_priv->hw_status_page = kmap(obj_priv->pages[0]); | ||
| 4689 | if (dev_priv->hw_status_page == NULL) { | ||
| 4690 | DRM_ERROR("Failed to map status page.\n"); | ||
| 4691 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
| 4692 | ret = -EINVAL; | ||
| 4693 | goto err_unpin; | ||
| 4694 | } | ||
| 4695 | |||
| 4696 | if (HAS_PIPE_CONTROL(dev)) { | ||
| 4697 | ret = i915_gem_init_pipe_control(dev); | ||
| 4698 | if (ret) | ||
| 4699 | goto err_unpin; | ||
| 4700 | } | ||
| 4701 | |||
| 4702 | dev_priv->hws_obj = obj; | ||
| 4703 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | ||
| 4704 | if (IS_GEN6(dev)) { | ||
| 4705 | I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); | ||
| 4706 | I915_READ(HWS_PGA_GEN6); /* posting read */ | ||
| 4707 | } else { | ||
| 4708 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | ||
| 4709 | I915_READ(HWS_PGA); /* posting read */ | ||
| 4710 | } | ||
| 4711 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | ||
| 4712 | |||
| 4713 | return 0; | ||
| 4714 | |||
| 4715 | err_unpin: | ||
| 4716 | i915_gem_object_unpin(obj); | ||
| 4717 | err_unref: | ||
| 4718 | drm_gem_object_unreference(obj); | ||
| 4719 | err: | ||
| 4720 | return 0; | ||
| 4721 | } | ||
| 4722 | 4585 | ||
| 4723 | static void | 4586 | static void |
| 4724 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | 4587 | i915_gem_cleanup_pipe_control(struct drm_device *dev) |
| @@ -4737,146 +4600,46 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev) | |||
| 4737 | dev_priv->seqno_page = NULL; | 4600 | dev_priv->seqno_page = NULL; |
| 4738 | } | 4601 | } |
| 4739 | 4602 | ||
| 4740 | static void | ||
| 4741 | i915_gem_cleanup_hws(struct drm_device *dev) | ||
| 4742 | { | ||
| 4743 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4744 | struct drm_gem_object *obj; | ||
| 4745 | struct drm_i915_gem_object *obj_priv; | ||
| 4746 | |||
| 4747 | if (dev_priv->hws_obj == NULL) | ||
| 4748 | return; | ||
| 4749 | |||
| 4750 | obj = dev_priv->hws_obj; | ||
| 4751 | obj_priv = to_intel_bo(obj); | ||
| 4752 | |||
| 4753 | kunmap(obj_priv->pages[0]); | ||
| 4754 | i915_gem_object_unpin(obj); | ||
| 4755 | drm_gem_object_unreference(obj); | ||
| 4756 | dev_priv->hws_obj = NULL; | ||
| 4757 | |||
| 4758 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
| 4759 | dev_priv->hw_status_page = NULL; | ||
| 4760 | |||
| 4761 | if (HAS_PIPE_CONTROL(dev)) | ||
| 4762 | i915_gem_cleanup_pipe_control(dev); | ||
| 4763 | |||
| 4764 | /* Write high address into HWS_PGA when disabling. */ | ||
| 4765 | I915_WRITE(HWS_PGA, 0x1ffff000); | ||
| 4766 | } | ||
| 4767 | |||
| 4768 | int | 4603 | int |
| 4769 | i915_gem_init_ringbuffer(struct drm_device *dev) | 4604 | i915_gem_init_ringbuffer(struct drm_device *dev) |
| 4770 | { | 4605 | { |
| 4771 | drm_i915_private_t *dev_priv = dev->dev_private; | 4606 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 4772 | struct drm_gem_object *obj; | ||
| 4773 | struct drm_i915_gem_object *obj_priv; | ||
| 4774 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | ||
| 4775 | int ret; | 4607 | int ret; |
| 4776 | u32 head; | ||
| 4777 | 4608 | ||
| 4778 | ret = i915_gem_init_hws(dev); | 4609 | dev_priv->render_ring = render_ring; |
| 4779 | if (ret != 0) | ||
| 4780 | return ret; | ||
| 4781 | 4610 | ||
| 4782 | obj = i915_gem_alloc_object(dev, 128 * 1024); | 4611 | if (!I915_NEED_GFX_HWS(dev)) { |
| 4783 | if (obj == NULL) { | 4612 | dev_priv->render_ring.status_page.page_addr |
| 4784 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 4613 | = dev_priv->status_page_dmah->vaddr; |
| 4785 | i915_gem_cleanup_hws(dev); | 4614 | memset(dev_priv->render_ring.status_page.page_addr, |
| 4786 | return -ENOMEM; | 4615 | 0, PAGE_SIZE); |
| 4787 | } | 4616 | } |
| 4788 | obj_priv = to_intel_bo(obj); | ||
| 4789 | 4617 | ||
| 4790 | ret = i915_gem_object_pin(obj, 4096); | 4618 | if (HAS_PIPE_CONTROL(dev)) { |
| 4791 | if (ret != 0) { | 4619 | ret = i915_gem_init_pipe_control(dev); |
| 4792 | drm_gem_object_unreference(obj); | 4620 | if (ret) |
| 4793 | i915_gem_cleanup_hws(dev); | 4621 | return ret; |
| 4794 | return ret; | ||
| 4795 | } | ||
| 4796 | |||
| 4797 | /* Set up the kernel mapping for the ring. */ | ||
| 4798 | ring->Size = obj->size; | ||
| 4799 | |||
| 4800 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; | ||
| 4801 | ring->map.size = obj->size; | ||
| 4802 | ring->map.type = 0; | ||
| 4803 | ring->map.flags = 0; | ||
| 4804 | ring->map.mtrr = 0; | ||
| 4805 | |||
| 4806 | drm_core_ioremap_wc(&ring->map, dev); | ||
| 4807 | if (ring->map.handle == NULL) { | ||
| 4808 | DRM_ERROR("Failed to map ringbuffer.\n"); | ||
| 4809 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | ||
| 4810 | i915_gem_object_unpin(obj); | ||
| 4811 | drm_gem_object_unreference(obj); | ||
| 4812 | i915_gem_cleanup_hws(dev); | ||
| 4813 | return -EINVAL; | ||
| 4814 | } | ||
| 4815 | ring->ring_obj = obj; | ||
| 4816 | ring->virtual_start = ring->map.handle; | ||
| 4817 | |||
| 4818 | /* Stop the ring if it's running. */ | ||
| 4819 | I915_WRITE(PRB0_CTL, 0); | ||
| 4820 | I915_WRITE(PRB0_TAIL, 0); | ||
| 4821 | I915_WRITE(PRB0_HEAD, 0); | ||
| 4822 | |||
| 4823 | /* Initialize the ring. */ | ||
| 4824 | I915_WRITE(PRB0_START, obj_priv->gtt_offset); | ||
| 4825 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
| 4826 | |||
| 4827 | /* G45 ring initialization fails to reset head to zero */ | ||
| 4828 | if (head != 0) { | ||
| 4829 | DRM_ERROR("Ring head not reset to zero " | ||
| 4830 | "ctl %08x head %08x tail %08x start %08x\n", | ||
| 4831 | I915_READ(PRB0_CTL), | ||
| 4832 | I915_READ(PRB0_HEAD), | ||
| 4833 | I915_READ(PRB0_TAIL), | ||
| 4834 | I915_READ(PRB0_START)); | ||
| 4835 | I915_WRITE(PRB0_HEAD, 0); | ||
| 4836 | |||
| 4837 | DRM_ERROR("Ring head forced to zero " | ||
| 4838 | "ctl %08x head %08x tail %08x start %08x\n", | ||
| 4839 | I915_READ(PRB0_CTL), | ||
| 4840 | I915_READ(PRB0_HEAD), | ||
| 4841 | I915_READ(PRB0_TAIL), | ||
| 4842 | I915_READ(PRB0_START)); | ||
| 4843 | } | ||
| 4844 | |||
| 4845 | I915_WRITE(PRB0_CTL, | ||
| 4846 | ((obj->size - 4096) & RING_NR_PAGES) | | ||
| 4847 | RING_NO_REPORT | | ||
| 4848 | RING_VALID); | ||
| 4849 | |||
| 4850 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
| 4851 | |||
| 4852 | /* If the head is still not zero, the ring is dead */ | ||
| 4853 | if (head != 0) { | ||
| 4854 | DRM_ERROR("Ring initialization failed " | ||
| 4855 | "ctl %08x head %08x tail %08x start %08x\n", | ||
| 4856 | I915_READ(PRB0_CTL), | ||
| 4857 | I915_READ(PRB0_HEAD), | ||
| 4858 | I915_READ(PRB0_TAIL), | ||
| 4859 | I915_READ(PRB0_START)); | ||
| 4860 | return -EIO; | ||
| 4861 | } | 4622 | } |
| 4862 | 4623 | ||
| 4863 | /* Update our cache of the ring state */ | 4624 | ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); |
| 4864 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4625 | if (ret) |
| 4865 | i915_kernel_lost_context(dev); | 4626 | goto cleanup_pipe_control; |
| 4866 | else { | ||
| 4867 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
| 4868 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | ||
| 4869 | ring->space = ring->head - (ring->tail + 8); | ||
| 4870 | if (ring->space < 0) | ||
| 4871 | ring->space += ring->Size; | ||
| 4872 | } | ||
| 4873 | 4627 | ||
| 4874 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | 4628 | if (HAS_BSD(dev)) { |
| 4875 | I915_WRITE(MI_MODE, | 4629 | dev_priv->bsd_ring = bsd_ring; |
| 4876 | (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); | 4630 | ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); |
| 4631 | if (ret) | ||
| 4632 | goto cleanup_render_ring; | ||
| 4877 | } | 4633 | } |
| 4878 | 4634 | ||
| 4879 | return 0; | 4635 | return 0; |
| 4636 | |||
| 4637 | cleanup_render_ring: | ||
| 4638 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | ||
| 4639 | cleanup_pipe_control: | ||
| 4640 | if (HAS_PIPE_CONTROL(dev)) | ||
| 4641 | i915_gem_cleanup_pipe_control(dev); | ||
| 4642 | return ret; | ||
| 4880 | } | 4643 | } |
| 4881 | 4644 | ||
| 4882 | void | 4645 | void |
| @@ -4884,17 +4647,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
| 4884 | { | 4647 | { |
| 4885 | drm_i915_private_t *dev_priv = dev->dev_private; | 4648 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 4886 | 4649 | ||
| 4887 | if (dev_priv->ring.ring_obj == NULL) | 4650 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
| 4888 | return; | 4651 | if (HAS_BSD(dev)) |
| 4889 | 4652 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | |
| 4890 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | 4653 | if (HAS_PIPE_CONTROL(dev)) |
| 4891 | 4654 | i915_gem_cleanup_pipe_control(dev); | |
| 4892 | i915_gem_object_unpin(dev_priv->ring.ring_obj); | ||
| 4893 | drm_gem_object_unreference(dev_priv->ring.ring_obj); | ||
| 4894 | dev_priv->ring.ring_obj = NULL; | ||
| 4895 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | ||
| 4896 | |||
| 4897 | i915_gem_cleanup_hws(dev); | ||
| 4898 | } | 4655 | } |
| 4899 | 4656 | ||
| 4900 | int | 4657 | int |
| @@ -4922,12 +4679,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
| 4922 | } | 4679 | } |
| 4923 | 4680 | ||
| 4924 | spin_lock(&dev_priv->mm.active_list_lock); | 4681 | spin_lock(&dev_priv->mm.active_list_lock); |
| 4925 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 4682 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); |
| 4683 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); | ||
| 4926 | spin_unlock(&dev_priv->mm.active_list_lock); | 4684 | spin_unlock(&dev_priv->mm.active_list_lock); |
| 4927 | 4685 | ||
| 4928 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 4686 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
| 4929 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 4687 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
| 4930 | BUG_ON(!list_empty(&dev_priv->mm.request_list)); | 4688 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); |
| 4689 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); | ||
| 4931 | mutex_unlock(&dev->struct_mutex); | 4690 | mutex_unlock(&dev->struct_mutex); |
| 4932 | 4691 | ||
| 4933 | drm_irq_install(dev); | 4692 | drm_irq_install(dev); |
| @@ -4966,22 +4725,34 @@ i915_gem_load(struct drm_device *dev) | |||
| 4966 | drm_i915_private_t *dev_priv = dev->dev_private; | 4725 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 4967 | 4726 | ||
| 4968 | spin_lock_init(&dev_priv->mm.active_list_lock); | 4727 | spin_lock_init(&dev_priv->mm.active_list_lock); |
| 4969 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | ||
| 4970 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4728 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
| 4971 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | 4729 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); |
| 4972 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4730 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
| 4973 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | ||
| 4974 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4731 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
| 4732 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | ||
| 4733 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | ||
| 4734 | if (HAS_BSD(dev)) { | ||
| 4735 | INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); | ||
| 4736 | INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); | ||
| 4737 | } | ||
| 4975 | for (i = 0; i < 16; i++) | 4738 | for (i = 0; i < 16; i++) |
| 4976 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 4739 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
| 4977 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 4740 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
| 4978 | i915_gem_retire_work_handler); | 4741 | i915_gem_retire_work_handler); |
| 4979 | dev_priv->mm.next_gem_seqno = 1; | ||
| 4980 | |||
| 4981 | spin_lock(&shrink_list_lock); | 4742 | spin_lock(&shrink_list_lock); |
| 4982 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | 4743 | list_add(&dev_priv->mm.shrink_list, &shrink_list); |
| 4983 | spin_unlock(&shrink_list_lock); | 4744 | spin_unlock(&shrink_list_lock); |
| 4984 | 4745 | ||
| 4746 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | ||
| 4747 | if (IS_GEN3(dev)) { | ||
| 4748 | u32 tmp = I915_READ(MI_ARB_STATE); | ||
| 4749 | if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { | ||
| 4750 | /* arb state is a masked write, so set bit + bit in mask */ | ||
| 4751 | tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); | ||
| 4752 | I915_WRITE(MI_ARB_STATE, tmp); | ||
| 4753 | } | ||
| 4754 | } | ||
| 4755 | |||
| 4985 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4756 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
| 4986 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4757 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 4987 | dev_priv->fence_reg_start = 3; | 4758 | dev_priv->fence_reg_start = 3; |
| @@ -5209,14 +4980,16 @@ i915_gpu_is_active(struct drm_device *dev) | |||
| 5209 | 4980 | ||
| 5210 | spin_lock(&dev_priv->mm.active_list_lock); | 4981 | spin_lock(&dev_priv->mm.active_list_lock); |
| 5211 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 4982 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && |
| 5212 | list_empty(&dev_priv->mm.active_list); | 4983 | list_empty(&dev_priv->render_ring.active_list); |
| 4984 | if (HAS_BSD(dev)) | ||
| 4985 | lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); | ||
| 5213 | spin_unlock(&dev_priv->mm.active_list_lock); | 4986 | spin_unlock(&dev_priv->mm.active_list_lock); |
| 5214 | 4987 | ||
| 5215 | return !lists_empty; | 4988 | return !lists_empty; |
| 5216 | } | 4989 | } |
| 5217 | 4990 | ||
| 5218 | static int | 4991 | static int |
| 5219 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | 4992 | i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
| 5220 | { | 4993 | { |
| 5221 | drm_i915_private_t *dev_priv, *next_dev; | 4994 | drm_i915_private_t *dev_priv, *next_dev; |
| 5222 | struct drm_i915_gem_object *obj_priv, *next_obj; | 4995 | struct drm_i915_gem_object *obj_priv, *next_obj; |
| @@ -5254,8 +5027,10 @@ rescan: | |||
| 5254 | continue; | 5027 | continue; |
| 5255 | 5028 | ||
| 5256 | spin_unlock(&shrink_list_lock); | 5029 | spin_unlock(&shrink_list_lock); |
| 5030 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | ||
| 5257 | 5031 | ||
| 5258 | i915_gem_retire_requests(dev); | 5032 | if (HAS_BSD(dev)) |
| 5033 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
| 5259 | 5034 | ||
| 5260 | list_for_each_entry_safe(obj_priv, next_obj, | 5035 | list_for_each_entry_safe(obj_priv, next_obj, |
| 5261 | &dev_priv->mm.inactive_list, | 5036 | &dev_priv->mm.inactive_list, |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8c3f0802686d..dba53d4b9fb3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 53 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
| 54 | 54 | ||
| 55 | /** Interrupts that we mask and unmask at runtime. */ | 55 | /** Interrupts that we mask and unmask at runtime. */ |
| 56 | #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) | 56 | #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) |
| 57 | 57 | ||
| 58 | #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ | 58 | #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ |
| 59 | PIPE_VBLANK_INTERRUPT_STATUS) | 59 | PIPE_VBLANK_INTERRUPT_STATUS) |
| @@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
| 74 | } | 74 | } |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static inline void | 77 | void |
| 78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | 78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) |
| 79 | { | 79 | { |
| 80 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { | 80 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { |
| @@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
| 115 | } | 115 | } |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | static inline void | 118 | void |
| 119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | 119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
| 120 | { | 120 | { |
| 121 | if ((dev_priv->irq_mask_reg & mask) != mask) { | 121 | if ((dev_priv->irq_mask_reg & mask) != mask) { |
| @@ -278,10 +278,9 @@ static void i915_handle_rps_change(struct drm_device *dev) | |||
| 278 | { | 278 | { |
| 279 | drm_i915_private_t *dev_priv = dev->dev_private; | 279 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 280 | u32 busy_up, busy_down, max_avg, min_avg; | 280 | u32 busy_up, busy_down, max_avg, min_avg; |
| 281 | u16 rgvswctl; | ||
| 282 | u8 new_delay = dev_priv->cur_delay; | 281 | u8 new_delay = dev_priv->cur_delay; |
| 283 | 282 | ||
| 284 | I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); | 283 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
| 285 | busy_up = I915_READ(RCPREVBSYTUPAVG); | 284 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
| 286 | busy_down = I915_READ(RCPREVBSYTDNAVG); | 285 | busy_down = I915_READ(RCPREVBSYTDNAVG); |
| 287 | max_avg = I915_READ(RCBMAXAVG); | 286 | max_avg = I915_READ(RCBMAXAVG); |
| @@ -300,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev) | |||
| 300 | new_delay = dev_priv->min_delay; | 299 | new_delay = dev_priv->min_delay; |
| 301 | } | 300 | } |
| 302 | 301 | ||
| 303 | DRM_DEBUG("rps change requested: %d -> %d\n", | 302 | if (ironlake_set_drps(dev, new_delay)) |
| 304 | dev_priv->cur_delay, new_delay); | 303 | dev_priv->cur_delay = new_delay; |
| 305 | |||
| 306 | rgvswctl = I915_READ(MEMSWCTL); | ||
| 307 | if (rgvswctl & MEMCTL_CMD_STS) { | ||
| 308 | DRM_ERROR("gpu busy, RCS change rejected\n"); | ||
| 309 | return; /* still busy with another command */ | ||
| 310 | } | ||
| 311 | |||
| 312 | /* Program the new state */ | ||
| 313 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
| 314 | (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
| 315 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
| 316 | POSTING_READ(MEMSWCTL); | ||
| 317 | |||
| 318 | rgvswctl |= MEMCTL_CMD_STS; | ||
| 319 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
| 320 | |||
| 321 | dev_priv->cur_delay = new_delay; | ||
| 322 | |||
| 323 | DRM_DEBUG("rps changed\n"); | ||
| 324 | 304 | ||
| 325 | return; | 305 | return; |
| 326 | } | 306 | } |
| @@ -331,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
| 331 | int ret = IRQ_NONE; | 311 | int ret = IRQ_NONE; |
| 332 | u32 de_iir, gt_iir, de_ier, pch_iir; | 312 | u32 de_iir, gt_iir, de_ier, pch_iir; |
| 333 | struct drm_i915_master_private *master_priv; | 313 | struct drm_i915_master_private *master_priv; |
| 314 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
| 334 | 315 | ||
| 335 | /* disable master interrupt before clearing iir */ | 316 | /* disable master interrupt before clearing iir */ |
| 336 | de_ier = I915_READ(DEIER); | 317 | de_ier = I915_READ(DEIER); |
| @@ -354,13 +335,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
| 354 | } | 335 | } |
| 355 | 336 | ||
| 356 | if (gt_iir & GT_PIPE_NOTIFY) { | 337 | if (gt_iir & GT_PIPE_NOTIFY) { |
| 357 | u32 seqno = i915_get_gem_seqno(dev); | 338 | u32 seqno = render_ring->get_gem_seqno(dev, render_ring); |
| 358 | dev_priv->mm.irq_gem_seqno = seqno; | 339 | render_ring->irq_gem_seqno = seqno; |
| 359 | trace_i915_gem_request_complete(dev, seqno); | 340 | trace_i915_gem_request_complete(dev, seqno); |
| 360 | DRM_WAKEUP(&dev_priv->irq_queue); | 341 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); |
| 361 | dev_priv->hangcheck_count = 0; | 342 | dev_priv->hangcheck_count = 0; |
| 362 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 343 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
| 363 | } | 344 | } |
| 345 | if (gt_iir & GT_BSD_USER_INTERRUPT) | ||
| 346 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
| 347 | |||
| 364 | 348 | ||
| 365 | if (de_iir & DE_GSE) | 349 | if (de_iir & DE_GSE) |
| 366 | ironlake_opregion_gse_intr(dev); | 350 | ironlake_opregion_gse_intr(dev); |
| @@ -388,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
| 388 | } | 372 | } |
| 389 | 373 | ||
| 390 | if (de_iir & DE_PCU_EVENT) { | 374 | if (de_iir & DE_PCU_EVENT) { |
| 391 | I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 375 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
| 392 | i915_handle_rps_change(dev); | 376 | i915_handle_rps_change(dev); |
| 393 | } | 377 | } |
| 394 | 378 | ||
| @@ -536,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev) | |||
| 536 | */ | 520 | */ |
| 537 | bbaddr = 0; | 521 | bbaddr = 0; |
| 538 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 522 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
| 539 | ring = (u32 *)(dev_priv->ring.virtual_start + head); | 523 | ring = (u32 *)(dev_priv->render_ring.virtual_start + head); |
| 540 | 524 | ||
| 541 | while (--ring >= (u32 *)dev_priv->ring.virtual_start) { | 525 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { |
| 542 | bbaddr = i915_get_bbaddr(dev, ring); | 526 | bbaddr = i915_get_bbaddr(dev, ring); |
| 543 | if (bbaddr) | 527 | if (bbaddr) |
| 544 | break; | 528 | break; |
| 545 | } | 529 | } |
| 546 | 530 | ||
| 547 | if (bbaddr == 0) { | 531 | if (bbaddr == 0) { |
| 548 | ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); | 532 | ring = (u32 *)(dev_priv->render_ring.virtual_start |
| 549 | while (--ring >= (u32 *)dev_priv->ring.virtual_start) { | 533 | + dev_priv->render_ring.size); |
| 534 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | ||
| 550 | bbaddr = i915_get_bbaddr(dev, ring); | 535 | bbaddr = i915_get_bbaddr(dev, ring); |
| 551 | if (bbaddr) | 536 | if (bbaddr) |
| 552 | break; | 537 | break; |
| @@ -587,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
| 587 | return; | 572 | return; |
| 588 | } | 573 | } |
| 589 | 574 | ||
| 590 | error->seqno = i915_get_gem_seqno(dev); | 575 | error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); |
| 591 | error->eir = I915_READ(EIR); | 576 | error->eir = I915_READ(EIR); |
| 592 | error->pgtbl_er = I915_READ(PGTBL_ER); | 577 | error->pgtbl_er = I915_READ(PGTBL_ER); |
| 593 | error->pipeastat = I915_READ(PIPEASTAT); | 578 | error->pipeastat = I915_READ(PIPEASTAT); |
| @@ -615,7 +600,9 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
| 615 | batchbuffer[0] = NULL; | 600 | batchbuffer[0] = NULL; |
| 616 | batchbuffer[1] = NULL; | 601 | batchbuffer[1] = NULL; |
| 617 | count = 0; | 602 | count = 0; |
| 618 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 603 | list_for_each_entry(obj_priv, |
| 604 | &dev_priv->render_ring.active_list, list) { | ||
| 605 | |||
| 619 | struct drm_gem_object *obj = &obj_priv->base; | 606 | struct drm_gem_object *obj = &obj_priv->base; |
| 620 | 607 | ||
| 621 | if (batchbuffer[0] == NULL && | 608 | if (batchbuffer[0] == NULL && |
| @@ -639,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
| 639 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | 626 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); |
| 640 | 627 | ||
| 641 | /* Record the ringbuffer */ | 628 | /* Record the ringbuffer */ |
| 642 | error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); | 629 | error->ringbuffer = i915_error_object_create(dev, |
| 630 | dev_priv->render_ring.gem_object); | ||
| 643 | 631 | ||
| 644 | /* Record buffers on the active list. */ | 632 | /* Record buffers on the active list. */ |
| 645 | error->active_bo = NULL; | 633 | error->active_bo = NULL; |
| @@ -651,7 +639,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
| 651 | 639 | ||
| 652 | if (error->active_bo) { | 640 | if (error->active_bo) { |
| 653 | int i = 0; | 641 | int i = 0; |
| 654 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 642 | list_for_each_entry(obj_priv, |
| 643 | &dev_priv->render_ring.active_list, list) { | ||
| 655 | struct drm_gem_object *obj = &obj_priv->base; | 644 | struct drm_gem_object *obj = &obj_priv->base; |
| 656 | 645 | ||
| 657 | error->active_bo[i].size = obj->size; | 646 | error->active_bo[i].size = obj->size; |
| @@ -703,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
| 703 | i915_error_state_free(dev, error); | 692 | i915_error_state_free(dev, error); |
| 704 | } | 693 | } |
| 705 | 694 | ||
| 706 | /** | 695 | static void i915_report_and_clear_eir(struct drm_device *dev) |
| 707 | * i915_handle_error - handle an error interrupt | ||
| 708 | * @dev: drm device | ||
| 709 | * | ||
| 710 | * Do some basic checking of regsiter state at error interrupt time and | ||
| 711 | * dump it to the syslog. Also call i915_capture_error_state() to make | ||
| 712 | * sure we get a record and make it available in debugfs. Fire a uevent | ||
| 713 | * so userspace knows something bad happened (should trigger collection | ||
| 714 | * of a ring dump etc.). | ||
| 715 | */ | ||
| 716 | static void i915_handle_error(struct drm_device *dev, bool wedged) | ||
| 717 | { | 696 | { |
| 718 | struct drm_i915_private *dev_priv = dev->dev_private; | 697 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 719 | u32 eir = I915_READ(EIR); | 698 | u32 eir = I915_READ(EIR); |
| 720 | u32 pipea_stats = I915_READ(PIPEASTAT); | ||
| 721 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | ||
| 722 | 699 | ||
| 723 | i915_capture_error_state(dev); | 700 | if (!eir) |
| 701 | return; | ||
| 724 | 702 | ||
| 725 | printk(KERN_ERR "render error detected, EIR: 0x%08x\n", | 703 | printk(KERN_ERR "render error detected, EIR: 0x%08x\n", |
| 726 | eir); | 704 | eir); |
| @@ -766,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
| 766 | } | 744 | } |
| 767 | 745 | ||
| 768 | if (eir & I915_ERROR_MEMORY_REFRESH) { | 746 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
| 747 | u32 pipea_stats = I915_READ(PIPEASTAT); | ||
| 748 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | ||
| 749 | |||
| 769 | printk(KERN_ERR "memory refresh error\n"); | 750 | printk(KERN_ERR "memory refresh error\n"); |
| 770 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", | 751 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", |
| 771 | pipea_stats); | 752 | pipea_stats); |
| @@ -822,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
| 822 | I915_WRITE(EMR, I915_READ(EMR) | eir); | 803 | I915_WRITE(EMR, I915_READ(EMR) | eir); |
| 823 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 804 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 824 | } | 805 | } |
| 806 | } | ||
| 807 | |||
| 808 | /** | ||
| 809 | * i915_handle_error - handle an error interrupt | ||
| 810 | * @dev: drm device | ||
| 811 | * | ||
| 812 | * Do some basic checking of regsiter state at error interrupt time and | ||
| 813 | * dump it to the syslog. Also call i915_capture_error_state() to make | ||
| 814 | * sure we get a record and make it available in debugfs. Fire a uevent | ||
| 815 | * so userspace knows something bad happened (should trigger collection | ||
| 816 | * of a ring dump etc.). | ||
| 817 | */ | ||
| 818 | static void i915_handle_error(struct drm_device *dev, bool wedged) | ||
| 819 | { | ||
| 820 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 821 | |||
| 822 | i915_capture_error_state(dev); | ||
| 823 | i915_report_and_clear_eir(dev); | ||
| 825 | 824 | ||
| 826 | if (wedged) { | 825 | if (wedged) { |
| 827 | atomic_set(&dev_priv->mm.wedged, 1); | 826 | atomic_set(&dev_priv->mm.wedged, 1); |
| @@ -829,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
| 829 | /* | 828 | /* |
| 830 | * Wakeup waiting processes so they don't hang | 829 | * Wakeup waiting processes so they don't hang |
| 831 | */ | 830 | */ |
| 832 | DRM_WAKEUP(&dev_priv->irq_queue); | 831 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); |
| 833 | } | 832 | } |
| 834 | 833 | ||
| 835 | queue_work(dev_priv->wq, &dev_priv->error_work); | 834 | queue_work(dev_priv->wq, &dev_priv->error_work); |
| @@ -848,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
| 848 | unsigned long irqflags; | 847 | unsigned long irqflags; |
| 849 | int irq_received; | 848 | int irq_received; |
| 850 | int ret = IRQ_NONE; | 849 | int ret = IRQ_NONE; |
| 850 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
| 851 | 851 | ||
| 852 | atomic_inc(&dev_priv->irq_received); | 852 | atomic_inc(&dev_priv->irq_received); |
| 853 | 853 | ||
| @@ -928,30 +928,42 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
| 928 | } | 928 | } |
| 929 | 929 | ||
| 930 | if (iir & I915_USER_INTERRUPT) { | 930 | if (iir & I915_USER_INTERRUPT) { |
| 931 | u32 seqno = i915_get_gem_seqno(dev); | 931 | u32 seqno = |
| 932 | dev_priv->mm.irq_gem_seqno = seqno; | 932 | render_ring->get_gem_seqno(dev, render_ring); |
| 933 | render_ring->irq_gem_seqno = seqno; | ||
| 933 | trace_i915_gem_request_complete(dev, seqno); | 934 | trace_i915_gem_request_complete(dev, seqno); |
| 934 | DRM_WAKEUP(&dev_priv->irq_queue); | 935 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); |
| 935 | dev_priv->hangcheck_count = 0; | 936 | dev_priv->hangcheck_count = 0; |
| 936 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 937 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
| 937 | } | 938 | } |
| 938 | 939 | ||
| 939 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) | 940 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) |
| 941 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
| 942 | |||
| 943 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | ||
| 940 | intel_prepare_page_flip(dev, 0); | 944 | intel_prepare_page_flip(dev, 0); |
| 945 | if (dev_priv->flip_pending_is_done) | ||
| 946 | intel_finish_page_flip_plane(dev, 0); | ||
| 947 | } | ||
| 941 | 948 | ||
| 942 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) | 949 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { |
| 943 | intel_prepare_page_flip(dev, 1); | 950 | intel_prepare_page_flip(dev, 1); |
| 951 | if (dev_priv->flip_pending_is_done) | ||
| 952 | intel_finish_page_flip_plane(dev, 1); | ||
| 953 | } | ||
| 944 | 954 | ||
| 945 | if (pipea_stats & vblank_status) { | 955 | if (pipea_stats & vblank_status) { |
| 946 | vblank++; | 956 | vblank++; |
| 947 | drm_handle_vblank(dev, 0); | 957 | drm_handle_vblank(dev, 0); |
| 948 | intel_finish_page_flip(dev, 0); | 958 | if (!dev_priv->flip_pending_is_done) |
| 959 | intel_finish_page_flip(dev, 0); | ||
| 949 | } | 960 | } |
| 950 | 961 | ||
| 951 | if (pipeb_stats & vblank_status) { | 962 | if (pipeb_stats & vblank_status) { |
| 952 | vblank++; | 963 | vblank++; |
| 953 | drm_handle_vblank(dev, 1); | 964 | drm_handle_vblank(dev, 1); |
| 954 | intel_finish_page_flip(dev, 1); | 965 | if (!dev_priv->flip_pending_is_done) |
| 966 | intel_finish_page_flip(dev, 1); | ||
| 955 | } | 967 | } |
| 956 | 968 | ||
| 957 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 969 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || |
| @@ -984,7 +996,6 @@ static int i915_emit_irq(struct drm_device * dev) | |||
| 984 | { | 996 | { |
| 985 | drm_i915_private_t *dev_priv = dev->dev_private; | 997 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 986 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 998 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
| 987 | RING_LOCALS; | ||
| 988 | 999 | ||
| 989 | i915_kernel_lost_context(dev); | 1000 | i915_kernel_lost_context(dev); |
| 990 | 1001 | ||
| @@ -1006,43 +1017,13 @@ static int i915_emit_irq(struct drm_device * dev) | |||
| 1006 | return dev_priv->counter; | 1017 | return dev_priv->counter; |
| 1007 | } | 1018 | } |
| 1008 | 1019 | ||
| 1009 | void i915_user_irq_get(struct drm_device *dev) | ||
| 1010 | { | ||
| 1011 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
| 1012 | unsigned long irqflags; | ||
| 1013 | |||
| 1014 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
| 1015 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { | ||
| 1016 | if (HAS_PCH_SPLIT(dev)) | ||
| 1017 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | ||
| 1018 | else | ||
| 1019 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | ||
| 1020 | } | ||
| 1021 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | void i915_user_irq_put(struct drm_device *dev) | ||
| 1025 | { | ||
| 1026 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
| 1027 | unsigned long irqflags; | ||
| 1028 | |||
| 1029 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
| 1030 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); | ||
| 1031 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { | ||
| 1032 | if (HAS_PCH_SPLIT(dev)) | ||
| 1033 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | ||
| 1034 | else | ||
| 1035 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | ||
| 1036 | } | ||
| 1037 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | 1020 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) |
| 1041 | { | 1021 | { |
| 1042 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1022 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1023 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
| 1043 | 1024 | ||
| 1044 | if (dev_priv->trace_irq_seqno == 0) | 1025 | if (dev_priv->trace_irq_seqno == 0) |
| 1045 | i915_user_irq_get(dev); | 1026 | render_ring->user_irq_get(dev, render_ring); |
| 1046 | 1027 | ||
| 1047 | dev_priv->trace_irq_seqno = seqno; | 1028 | dev_priv->trace_irq_seqno = seqno; |
| 1048 | } | 1029 | } |
| @@ -1052,6 +1033,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
| 1052 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1033 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1053 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 1034 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
| 1054 | int ret = 0; | 1035 | int ret = 0; |
| 1036 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
| 1055 | 1037 | ||
| 1056 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | 1038 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
| 1057 | READ_BREADCRUMB(dev_priv)); | 1039 | READ_BREADCRUMB(dev_priv)); |
| @@ -1065,10 +1047,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
| 1065 | if (master_priv->sarea_priv) | 1047 | if (master_priv->sarea_priv) |
| 1066 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1048 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
| 1067 | 1049 | ||
| 1068 | i915_user_irq_get(dev); | 1050 | render_ring->user_irq_get(dev, render_ring); |
| 1069 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, | 1051 | DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, |
| 1070 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 1052 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
| 1071 | i915_user_irq_put(dev); | 1053 | render_ring->user_irq_put(dev, render_ring); |
| 1072 | 1054 | ||
| 1073 | if (ret == -EBUSY) { | 1055 | if (ret == -EBUSY) { |
| 1074 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 1056 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
| @@ -1087,7 +1069,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
| 1087 | drm_i915_irq_emit_t *emit = data; | 1069 | drm_i915_irq_emit_t *emit = data; |
| 1088 | int result; | 1070 | int result; |
| 1089 | 1071 | ||
| 1090 | if (!dev_priv || !dev_priv->ring.virtual_start) { | 1072 | if (!dev_priv || !dev_priv->render_ring.virtual_start) { |
| 1091 | DRM_ERROR("called with no initialization\n"); | 1073 | DRM_ERROR("called with no initialization\n"); |
| 1092 | return -EINVAL; | 1074 | return -EINVAL; |
| 1093 | } | 1075 | } |
| @@ -1233,9 +1215,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
| 1233 | return -EINVAL; | 1215 | return -EINVAL; |
| 1234 | } | 1216 | } |
| 1235 | 1217 | ||
| 1236 | struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { | 1218 | struct drm_i915_gem_request * |
| 1219 | i915_get_tail_request(struct drm_device *dev) | ||
| 1220 | { | ||
| 1237 | drm_i915_private_t *dev_priv = dev->dev_private; | 1221 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1238 | return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); | 1222 | return list_entry(dev_priv->render_ring.request_list.prev, |
| 1223 | struct drm_i915_gem_request, list); | ||
| 1239 | } | 1224 | } |
| 1240 | 1225 | ||
| 1241 | /** | 1226 | /** |
| @@ -1260,8 +1245,10 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
| 1260 | acthd = I915_READ(ACTHD_I965); | 1245 | acthd = I915_READ(ACTHD_I965); |
| 1261 | 1246 | ||
| 1262 | /* If all work is done then ACTHD clearly hasn't advanced. */ | 1247 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
| 1263 | if (list_empty(&dev_priv->mm.request_list) || | 1248 | if (list_empty(&dev_priv->render_ring.request_list) || |
| 1264 | i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { | 1249 | i915_seqno_passed(i915_get_gem_seqno(dev, |
| 1250 | &dev_priv->render_ring), | ||
| 1251 | i915_get_tail_request(dev)->seqno)) { | ||
| 1265 | dev_priv->hangcheck_count = 0; | 1252 | dev_priv->hangcheck_count = 0; |
| 1266 | return; | 1253 | return; |
| 1267 | } | 1254 | } |
| @@ -1314,7 +1301,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 1314 | /* enable kind of interrupts always enabled */ | 1301 | /* enable kind of interrupts always enabled */ |
| 1315 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1302 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 1316 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1303 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
| 1317 | u32 render_mask = GT_PIPE_NOTIFY; | 1304 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; |
| 1318 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1305 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
| 1319 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1306 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
| 1320 | 1307 | ||
| @@ -1328,7 +1315,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 1328 | (void) I915_READ(DEIER); | 1315 | (void) I915_READ(DEIER); |
| 1329 | 1316 | ||
| 1330 | /* user interrupt should be enabled, but masked initial */ | 1317 | /* user interrupt should be enabled, but masked initial */ |
| 1331 | dev_priv->gt_irq_mask_reg = 0xffffffff; | 1318 | dev_priv->gt_irq_mask_reg = ~render_mask; |
| 1332 | dev_priv->gt_irq_enable_reg = render_mask; | 1319 | dev_priv->gt_irq_enable_reg = render_mask; |
| 1333 | 1320 | ||
| 1334 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1321 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| @@ -1391,7 +1378,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
| 1391 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; | 1378 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; |
| 1392 | u32 error_mask; | 1379 | u32 error_mask; |
| 1393 | 1380 | ||
| 1394 | DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); | 1381 | DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); |
| 1382 | |||
| 1383 | if (HAS_BSD(dev)) | ||
| 1384 | DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); | ||
| 1395 | 1385 | ||
| 1396 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 1386 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
| 1397 | 1387 | ||
| @@ -1405,29 +1395,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
| 1405 | dev_priv->pipestat[1] = 0; | 1395 | dev_priv->pipestat[1] = 0; |
| 1406 | 1396 | ||
| 1407 | if (I915_HAS_HOTPLUG(dev)) { | 1397 | if (I915_HAS_HOTPLUG(dev)) { |
| 1408 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||
| 1409 | |||
| 1410 | /* Note HDMI and DP share bits */ | ||
| 1411 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||
| 1412 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
| 1413 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
| 1414 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
| 1415 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
| 1416 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
| 1417 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
| 1418 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
| 1419 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
| 1420 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
| 1421 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
| 1422 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
| 1423 | /* Ignore TV since it's buggy */ | ||
| 1424 | |||
| 1425 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
| 1426 | |||
| 1427 | /* Enable in IER... */ | 1398 | /* Enable in IER... */ |
| 1428 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1399 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
| 1429 | /* and unmask in IMR */ | 1400 | /* and unmask in IMR */ |
| 1430 | i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); | 1401 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; |
| 1431 | } | 1402 | } |
| 1432 | 1403 | ||
| 1433 | /* | 1404 | /* |
| @@ -1445,16 +1416,41 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
| 1445 | } | 1416 | } |
| 1446 | I915_WRITE(EMR, error_mask); | 1417 | I915_WRITE(EMR, error_mask); |
| 1447 | 1418 | ||
| 1448 | /* Disable pipe interrupt enables, clear pending pipe status */ | ||
| 1449 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | ||
| 1450 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | ||
| 1451 | /* Clear pending interrupt status */ | ||
| 1452 | I915_WRITE(IIR, I915_READ(IIR)); | ||
| 1453 | |||
| 1454 | I915_WRITE(IER, enable_mask); | ||
| 1455 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 1419 | I915_WRITE(IMR, dev_priv->irq_mask_reg); |
| 1420 | I915_WRITE(IER, enable_mask); | ||
| 1456 | (void) I915_READ(IER); | 1421 | (void) I915_READ(IER); |
| 1457 | 1422 | ||
| 1423 | if (I915_HAS_HOTPLUG(dev)) { | ||
| 1424 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||
| 1425 | |||
| 1426 | /* Note HDMI and DP share bits */ | ||
| 1427 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||
| 1428 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
| 1429 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
| 1430 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
| 1431 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
| 1432 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
| 1433 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
| 1434 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
| 1435 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
| 1436 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
| 1437 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | ||
| 1438 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
| 1439 | |||
| 1440 | /* Programming the CRT detection parameters tends | ||
| 1441 | to generate a spurious hotplug event about three | ||
| 1442 | seconds later. So just do it once. | ||
| 1443 | */ | ||
| 1444 | if (IS_G4X(dev)) | ||
| 1445 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
| 1446 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
| 1447 | } | ||
| 1448 | |||
| 1449 | /* Ignore TV since it's buggy */ | ||
| 1450 | |||
| 1451 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
| 1452 | } | ||
| 1453 | |||
| 1458 | opregion_enable_asle(dev); | 1454 | opregion_enable_asle(dev); |
| 1459 | 1455 | ||
| 1460 | return 0; | 1456 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f3e39cc46f0d..cf41c672defe 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -178,6 +178,7 @@ | |||
| 178 | #define MI_OVERLAY_OFF (0x2<<21) | 178 | #define MI_OVERLAY_OFF (0x2<<21) |
| 179 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) | 179 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) |
| 180 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | 180 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) |
| 181 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) | ||
| 181 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | 182 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) |
| 182 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 183 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
| 183 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 184 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
| @@ -334,6 +335,7 @@ | |||
| 334 | #define I915_DEBUG_INTERRUPT (1<<2) | 335 | #define I915_DEBUG_INTERRUPT (1<<2) |
| 335 | #define I915_USER_INTERRUPT (1<<1) | 336 | #define I915_USER_INTERRUPT (1<<1) |
| 336 | #define I915_ASLE_INTERRUPT (1<<0) | 337 | #define I915_ASLE_INTERRUPT (1<<0) |
| 338 | #define I915_BSD_USER_INTERRUPT (1<<25) | ||
| 337 | #define EIR 0x020b0 | 339 | #define EIR 0x020b0 |
| 338 | #define EMR 0x020b4 | 340 | #define EMR 0x020b4 |
| 339 | #define ESR 0x020b8 | 341 | #define ESR 0x020b8 |
| @@ -357,6 +359,70 @@ | |||
| 357 | #define LM_BURST_LENGTH 0x00000700 | 359 | #define LM_BURST_LENGTH 0x00000700 |
| 358 | #define LM_FIFO_WATERMARK 0x0000001F | 360 | #define LM_FIFO_WATERMARK 0x0000001F |
| 359 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ | 361 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ |
| 362 | #define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ | ||
| 363 | |||
| 364 | /* Make render/texture TLB fetches lower priorty than associated data | ||
| 365 | * fetches. This is not turned on by default | ||
| 366 | */ | ||
| 367 | #define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) | ||
| 368 | |||
| 369 | /* Isoch request wait on GTT enable (Display A/B/C streams). | ||
| 370 | * Make isoch requests stall on the TLB update. May cause | ||
| 371 | * display underruns (test mode only) | ||
| 372 | */ | ||
| 373 | #define MI_ARB_ISOCH_WAIT_GTT (1 << 14) | ||
| 374 | |||
| 375 | /* Block grant count for isoch requests when block count is | ||
| 376 | * set to a finite value. | ||
| 377 | */ | ||
| 378 | #define MI_ARB_BLOCK_GRANT_MASK (3 << 12) | ||
| 379 | #define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ | ||
| 380 | #define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ | ||
| 381 | #define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ | ||
| 382 | #define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ | ||
| 383 | |||
| 384 | /* Enable render writes to complete in C2/C3/C4 power states. | ||
| 385 | * If this isn't enabled, render writes are prevented in low | ||
| 386 | * power states. That seems bad to me. | ||
| 387 | */ | ||
| 388 | #define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) | ||
| 389 | |||
| 390 | /* This acknowledges an async flip immediately instead | ||
| 391 | * of waiting for 2TLB fetches. | ||
| 392 | */ | ||
| 393 | #define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) | ||
| 394 | |||
| 395 | /* Enables non-sequential data reads through arbiter | ||
| 396 | */ | ||
| 397 | #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) | ||
| 398 | |||
| 399 | /* Disable FSB snooping of cacheable write cycles from binner/render | ||
| 400 | * command stream | ||
| 401 | */ | ||
| 402 | #define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) | ||
| 403 | |||
| 404 | /* Arbiter time slice for non-isoch streams */ | ||
| 405 | #define MI_ARB_TIME_SLICE_MASK (7 << 5) | ||
| 406 | #define MI_ARB_TIME_SLICE_1 (0 << 5) | ||
| 407 | #define MI_ARB_TIME_SLICE_2 (1 << 5) | ||
| 408 | #define MI_ARB_TIME_SLICE_4 (2 << 5) | ||
| 409 | #define MI_ARB_TIME_SLICE_6 (3 << 5) | ||
| 410 | #define MI_ARB_TIME_SLICE_8 (4 << 5) | ||
| 411 | #define MI_ARB_TIME_SLICE_10 (5 << 5) | ||
| 412 | #define MI_ARB_TIME_SLICE_14 (6 << 5) | ||
| 413 | #define MI_ARB_TIME_SLICE_16 (7 << 5) | ||
| 414 | |||
| 415 | /* Low priority grace period page size */ | ||
| 416 | #define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ | ||
| 417 | #define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) | ||
| 418 | |||
| 419 | /* Disable display A/B trickle feed */ | ||
| 420 | #define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) | ||
| 421 | |||
| 422 | /* Set display plane priority */ | ||
| 423 | #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ | ||
| 424 | #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ | ||
| 425 | |||
| 360 | #define CACHE_MODE_0 0x02120 /* 915+ only */ | 426 | #define CACHE_MODE_0 0x02120 /* 915+ only */ |
| 361 | #define CM0_MASK_SHIFT 16 | 427 | #define CM0_MASK_SHIFT 16 |
| 362 | #define CM0_IZ_OPT_DISABLE (1<<6) | 428 | #define CM0_IZ_OPT_DISABLE (1<<6) |
| @@ -367,7 +433,40 @@ | |||
| 367 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 433 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
| 368 | #define BB_ADDR 0x02140 /* 8 bytes */ | 434 | #define BB_ADDR 0x02140 /* 8 bytes */ |
| 369 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 435 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
| 436 | #define ECOSKPD 0x021d0 | ||
| 437 | #define ECO_GATING_CX_ONLY (1<<3) | ||
| 438 | #define ECO_FLIP_DONE (1<<0) | ||
| 439 | |||
| 440 | /* GEN6 interrupt control */ | ||
| 441 | #define GEN6_RENDER_HWSTAM 0x2098 | ||
| 442 | #define GEN6_RENDER_IMR 0x20a8 | ||
| 443 | #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) | ||
| 444 | #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) | ||
| 445 | #define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) | ||
| 446 | #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) | ||
| 447 | #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) | ||
| 448 | #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) | ||
| 449 | #define GEN6_RENDER_SYNC_STATUS (1 << 2) | ||
| 450 | #define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) | ||
| 451 | #define GEN6_RENDER_USER_INTERRUPT (1 << 0) | ||
| 452 | |||
| 453 | #define GEN6_BLITTER_HWSTAM 0x22098 | ||
| 454 | #define GEN6_BLITTER_IMR 0x220a8 | ||
| 455 | #define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) | ||
| 456 | #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) | ||
| 457 | #define GEN6_BLITTER_SYNC_STATUS (1 << 24) | ||
| 458 | #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) | ||
| 459 | /* | ||
| 460 | * BSD (bit stream decoder instruction and interrupt control register defines | ||
| 461 | * (G4X and Ironlake only) | ||
| 462 | */ | ||
| 370 | 463 | ||
| 464 | #define BSD_RING_TAIL 0x04030 | ||
| 465 | #define BSD_RING_HEAD 0x04034 | ||
| 466 | #define BSD_RING_START 0x04038 | ||
| 467 | #define BSD_RING_CTL 0x0403c | ||
| 468 | #define BSD_RING_ACTHD 0x04074 | ||
| 469 | #define BSD_HWS_PGA 0x04080 | ||
| 371 | 470 | ||
| 372 | /* | 471 | /* |
| 373 | * Framebuffer compression (915+ only) | 472 | * Framebuffer compression (915+ only) |
| @@ -805,6 +904,10 @@ | |||
| 805 | #define DCC_CHANNEL_XOR_DISABLE (1 << 10) | 904 | #define DCC_CHANNEL_XOR_DISABLE (1 << 10) |
| 806 | #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) | 905 | #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) |
| 807 | 906 | ||
| 907 | /** Pineview MCH register contains DDR3 setting */ | ||
| 908 | #define CSHRDDR3CTL 0x101a8 | ||
| 909 | #define CSHRDDR3CTL_DDR3 (1 << 2) | ||
| 910 | |||
| 808 | /** 965 MCH register controlling DRAM channel configuration */ | 911 | /** 965 MCH register controlling DRAM channel configuration */ |
| 809 | #define C0DRB3 0x10206 | 912 | #define C0DRB3 0x10206 |
| 810 | #define C1DRB3 0x10606 | 913 | #define C1DRB3 0x10606 |
| @@ -826,6 +929,12 @@ | |||
| 826 | #define CLKCFG_MEM_800 (3 << 4) | 929 | #define CLKCFG_MEM_800 (3 << 4) |
| 827 | #define CLKCFG_MEM_MASK (7 << 4) | 930 | #define CLKCFG_MEM_MASK (7 << 4) |
| 828 | 931 | ||
| 932 | #define TR1 0x11006 | ||
| 933 | #define TSFS 0x11020 | ||
| 934 | #define TSFS_SLOPE_MASK 0x0000ff00 | ||
| 935 | #define TSFS_SLOPE_SHIFT 8 | ||
| 936 | #define TSFS_INTR_MASK 0x000000ff | ||
| 937 | |||
| 829 | #define CRSTANDVID 0x11100 | 938 | #define CRSTANDVID 0x11100 |
| 830 | #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ | 939 | #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ |
| 831 | #define PXVFREQ_PX_MASK 0x7f000000 | 940 | #define PXVFREQ_PX_MASK 0x7f000000 |
| @@ -964,6 +1073,41 @@ | |||
| 964 | #define MEMSTAT_SRC_CTL_STDBY 3 | 1073 | #define MEMSTAT_SRC_CTL_STDBY 3 |
| 965 | #define RCPREVBSYTUPAVG 0x113b8 | 1074 | #define RCPREVBSYTUPAVG 0x113b8 |
| 966 | #define RCPREVBSYTDNAVG 0x113bc | 1075 | #define RCPREVBSYTDNAVG 0x113bc |
| 1076 | #define SDEW 0x1124c | ||
| 1077 | #define CSIEW0 0x11250 | ||
| 1078 | #define CSIEW1 0x11254 | ||
| 1079 | #define CSIEW2 0x11258 | ||
| 1080 | #define PEW 0x1125c | ||
| 1081 | #define DEW 0x11270 | ||
| 1082 | #define MCHAFE 0x112c0 | ||
| 1083 | #define CSIEC 0x112e0 | ||
| 1084 | #define DMIEC 0x112e4 | ||
| 1085 | #define DDREC 0x112e8 | ||
| 1086 | #define PEG0EC 0x112ec | ||
| 1087 | #define PEG1EC 0x112f0 | ||
| 1088 | #define GFXEC 0x112f4 | ||
| 1089 | #define RPPREVBSYTUPAVG 0x113b8 | ||
| 1090 | #define RPPREVBSYTDNAVG 0x113bc | ||
| 1091 | #define ECR 0x11600 | ||
| 1092 | #define ECR_GPFE (1<<31) | ||
| 1093 | #define ECR_IMONE (1<<30) | ||
| 1094 | #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ | ||
| 1095 | #define OGW0 0x11608 | ||
| 1096 | #define OGW1 0x1160c | ||
| 1097 | #define EG0 0x11610 | ||
| 1098 | #define EG1 0x11614 | ||
| 1099 | #define EG2 0x11618 | ||
| 1100 | #define EG3 0x1161c | ||
| 1101 | #define EG4 0x11620 | ||
| 1102 | #define EG5 0x11624 | ||
| 1103 | #define EG6 0x11628 | ||
| 1104 | #define EG7 0x1162c | ||
| 1105 | #define PXW 0x11664 | ||
| 1106 | #define PXWL 0x11680 | ||
| 1107 | #define LCFUSE02 0x116c0 | ||
| 1108 | #define LCFUSE_HIV_MASK 0x000000ff | ||
| 1109 | #define CSIPLL0 0x12c10 | ||
| 1110 | #define DDRMPLL1 0X12c20 | ||
| 967 | #define PEG_BAND_GAP_DATA 0x14d68 | 1111 | #define PEG_BAND_GAP_DATA 0x14d68 |
| 968 | 1112 | ||
| 969 | /* | 1113 | /* |
| @@ -1054,8 +1198,6 @@ | |||
| 1054 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) | 1198 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) |
| 1055 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) | 1199 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) |
| 1056 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1200 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
| 1057 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | ||
| 1058 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f | ||
| 1059 | 1201 | ||
| 1060 | #define PORT_HOTPLUG_STAT 0x61114 | 1202 | #define PORT_HOTPLUG_STAT 0x61114 |
| 1061 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 1203 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
| @@ -2355,6 +2497,8 @@ | |||
| 2355 | #define GT_PIPE_NOTIFY (1 << 4) | 2497 | #define GT_PIPE_NOTIFY (1 << 4) |
| 2356 | #define GT_SYNC_STATUS (1 << 2) | 2498 | #define GT_SYNC_STATUS (1 << 2) |
| 2357 | #define GT_USER_INTERRUPT (1 << 0) | 2499 | #define GT_USER_INTERRUPT (1 << 0) |
| 2500 | #define GT_BSD_USER_INTERRUPT (1 << 5) | ||
| 2501 | |||
| 2358 | 2502 | ||
| 2359 | #define GTISR 0x44010 | 2503 | #define GTISR 0x44010 |
| 2360 | #define GTIMR 0x44014 | 2504 | #define GTIMR 0x44014 |
| @@ -2690,6 +2834,9 @@ | |||
| 2690 | #define SDVO_ENCODING (0) | 2834 | #define SDVO_ENCODING (0) |
| 2691 | #define TMDS_ENCODING (2 << 10) | 2835 | #define TMDS_ENCODING (2 << 10) |
| 2692 | #define NULL_PACKET_VSYNC_ENABLE (1 << 9) | 2836 | #define NULL_PACKET_VSYNC_ENABLE (1 << 9) |
| 2837 | /* CPT */ | ||
| 2838 | #define HDMI_MODE_SELECT (1 << 9) | ||
| 2839 | #define DVI_MODE_SELECT (0) | ||
| 2693 | #define SDVOB_BORDER_ENABLE (1 << 7) | 2840 | #define SDVOB_BORDER_ENABLE (1 << 7) |
| 2694 | #define AUDIO_ENABLE (1 << 6) | 2841 | #define AUDIO_ENABLE (1 << 6) |
| 2695 | #define VSYNC_ACTIVE_HIGH (1 << 4) | 2842 | #define VSYNC_ACTIVE_HIGH (1 << 4) |
| @@ -2722,6 +2869,7 @@ | |||
| 2722 | 2869 | ||
| 2723 | #define PCH_PP_STATUS 0xc7200 | 2870 | #define PCH_PP_STATUS 0xc7200 |
| 2724 | #define PCH_PP_CONTROL 0xc7204 | 2871 | #define PCH_PP_CONTROL 0xc7204 |
| 2872 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | ||
| 2725 | #define EDP_FORCE_VDD (1 << 3) | 2873 | #define EDP_FORCE_VDD (1 << 3) |
| 2726 | #define EDP_BLC_ENABLE (1 << 2) | 2874 | #define EDP_BLC_ENABLE (1 << 2) |
| 2727 | #define PANEL_POWER_RESET (1 << 1) | 2875 | #define PANEL_POWER_RESET (1 << 1) |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 9e4c45f68d6e..fab21760dd57 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
| @@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind, | |||
| 53 | __entry->obj, __entry->gtt_offset) | 53 | __entry->obj, __entry->gtt_offset) |
| 54 | ); | 54 | ); |
| 55 | 55 | ||
| 56 | TRACE_EVENT(i915_gem_object_clflush, | ||
| 57 | |||
| 58 | TP_PROTO(struct drm_gem_object *obj), | ||
| 59 | |||
| 60 | TP_ARGS(obj), | ||
| 61 | |||
| 62 | TP_STRUCT__entry( | ||
| 63 | __field(struct drm_gem_object *, obj) | ||
| 64 | ), | ||
| 65 | |||
| 66 | TP_fast_assign( | ||
| 67 | __entry->obj = obj; | ||
| 68 | ), | ||
| 69 | |||
| 70 | TP_printk("obj=%p", __entry->obj) | ||
| 71 | ); | ||
| 72 | |||
| 73 | TRACE_EVENT(i915_gem_object_change_domain, | 56 | TRACE_EVENT(i915_gem_object_change_domain, |
| 74 | 57 | ||
| 75 | TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | 58 | TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), |
| @@ -132,6 +115,13 @@ DECLARE_EVENT_CLASS(i915_gem_object, | |||
| 132 | TP_printk("obj=%p", __entry->obj) | 115 | TP_printk("obj=%p", __entry->obj) |
| 133 | ); | 116 | ); |
| 134 | 117 | ||
| 118 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, | ||
| 119 | |||
| 120 | TP_PROTO(struct drm_gem_object *obj), | ||
| 121 | |||
| 122 | TP_ARGS(obj) | ||
| 123 | ); | ||
| 124 | |||
| 135 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, | 125 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, |
| 136 | 126 | ||
| 137 | TP_PROTO(struct drm_gem_object *obj), | 127 | TP_PROTO(struct drm_gem_object *obj), |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 4c748d8f73d6..96f75d7f6633 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | |||
| 95 | panel_fixed_mode->clock = dvo_timing->clock * 10; | 95 | panel_fixed_mode->clock = dvo_timing->clock * 10; |
| 96 | panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; | 96 | panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; |
| 97 | 97 | ||
| 98 | if (dvo_timing->hsync_positive) | ||
| 99 | panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; | ||
| 100 | else | ||
| 101 | panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; | ||
| 102 | |||
| 103 | if (dvo_timing->vsync_positive) | ||
| 104 | panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; | ||
| 105 | else | ||
| 106 | panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; | ||
| 107 | |||
| 98 | /* Some VBTs have bogus h/vtotal values */ | 108 | /* Some VBTs have bogus h/vtotal values */ |
| 99 | if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) | 109 | if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) |
| 100 | panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; | 110 | panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e16ac5a28c3c..ee0732b222a1 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -217,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
| 217 | { | 217 | { |
| 218 | struct drm_device *dev = connector->dev; | 218 | struct drm_device *dev = connector->dev; |
| 219 | struct drm_i915_private *dev_priv = dev->dev_private; | 219 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 220 | u32 hotplug_en; | 220 | u32 hotplug_en, orig, stat; |
| 221 | bool ret = false; | ||
| 221 | int i, tries = 0; | 222 | int i, tries = 0; |
| 222 | 223 | ||
| 223 | if (HAS_PCH_SPLIT(dev)) | 224 | if (HAS_PCH_SPLIT(dev)) |
| @@ -232,15 +233,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
| 232 | tries = 2; | 233 | tries = 2; |
| 233 | else | 234 | else |
| 234 | tries = 1; | 235 | tries = 1; |
| 235 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 236 | hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); |
| 236 | hotplug_en &= CRT_FORCE_HOTPLUG_MASK; | ||
| 237 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; | 237 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; |
| 238 | 238 | ||
| 239 | if (IS_G4X(dev)) | ||
| 240 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
| 241 | |||
| 242 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
| 243 | |||
| 244 | for (i = 0; i < tries ; i++) { | 239 | for (i = 0; i < tries ; i++) { |
| 245 | unsigned long timeout; | 240 | unsigned long timeout; |
| 246 | /* turn on the FORCE_DETECT */ | 241 | /* turn on the FORCE_DETECT */ |
| @@ -255,11 +250,17 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
| 255 | } while (time_after(timeout, jiffies)); | 250 | } while (time_after(timeout, jiffies)); |
| 256 | } | 251 | } |
| 257 | 252 | ||
| 258 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != | 253 | stat = I915_READ(PORT_HOTPLUG_STAT); |
| 259 | CRT_HOTPLUG_MONITOR_NONE) | 254 | if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) |
| 260 | return true; | 255 | ret = true; |
| 256 | |||
| 257 | /* clear the interrupt we just generated, if any */ | ||
| 258 | I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); | ||
| 259 | |||
| 260 | /* and put the bits back */ | ||
| 261 | I915_WRITE(PORT_HOTPLUG_EN, orig); | ||
| 261 | 262 | ||
| 262 | return false; | 263 | return ret; |
| 263 | } | 264 | } |
| 264 | 265 | ||
| 265 | static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | 266 | static bool intel_crt_detect_ddc(struct drm_encoder *encoder) |
| @@ -569,7 +570,7 @@ void intel_crt_init(struct drm_device *dev) | |||
| 569 | (1 << INTEL_ANALOG_CLONE_BIT) | | 570 | (1 << INTEL_ANALOG_CLONE_BIT) | |
| 570 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 571 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
| 571 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 572 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
| 572 | connector->interlace_allowed = 0; | 573 | connector->interlace_allowed = 1; |
| 573 | connector->doublescan_allowed = 0; | 574 | connector->doublescan_allowed = 0; |
| 574 | 575 | ||
| 575 | drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); | 576 | drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f469a84cacfd..5e21b3119824 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -862,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
| 862 | intel_clock_t clock; | 862 | intel_clock_t clock; |
| 863 | int max_n; | 863 | int max_n; |
| 864 | bool found; | 864 | bool found; |
| 865 | /* approximately equals target * 0.00488 */ | 865 | /* approximately equals target * 0.00585 */ |
| 866 | int err_most = (target >> 8) + (target >> 10); | 866 | int err_most = (target >> 8) + (target >> 9); |
| 867 | found = false; | 867 | found = false; |
| 868 | 868 | ||
| 869 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 869 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
| @@ -1029,19 +1029,28 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 1029 | void i8xx_disable_fbc(struct drm_device *dev) | 1029 | void i8xx_disable_fbc(struct drm_device *dev) |
| 1030 | { | 1030 | { |
| 1031 | struct drm_i915_private *dev_priv = dev->dev_private; | 1031 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1032 | unsigned long timeout = jiffies + msecs_to_jiffies(1); | ||
| 1032 | u32 fbc_ctl; | 1033 | u32 fbc_ctl; |
| 1033 | 1034 | ||
| 1034 | if (!I915_HAS_FBC(dev)) | 1035 | if (!I915_HAS_FBC(dev)) |
| 1035 | return; | 1036 | return; |
| 1036 | 1037 | ||
| 1038 | if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) | ||
| 1039 | return; /* Already off, just return */ | ||
| 1040 | |||
| 1037 | /* Disable compression */ | 1041 | /* Disable compression */ |
| 1038 | fbc_ctl = I915_READ(FBC_CONTROL); | 1042 | fbc_ctl = I915_READ(FBC_CONTROL); |
| 1039 | fbc_ctl &= ~FBC_CTL_EN; | 1043 | fbc_ctl &= ~FBC_CTL_EN; |
| 1040 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1044 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
| 1041 | 1045 | ||
| 1042 | /* Wait for compressing bit to clear */ | 1046 | /* Wait for compressing bit to clear */ |
| 1043 | while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) | 1047 | while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { |
| 1044 | ; /* nothing */ | 1048 | if (time_after(jiffies, timeout)) { |
| 1049 | DRM_DEBUG_DRIVER("FBC idle timed out\n"); | ||
| 1050 | break; | ||
| 1051 | } | ||
| 1052 | ; /* do nothing */ | ||
| 1053 | } | ||
| 1045 | 1054 | ||
| 1046 | intel_wait_for_vblank(dev); | 1055 | intel_wait_for_vblank(dev); |
| 1047 | 1056 | ||
| @@ -1171,8 +1180,12 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
| 1171 | struct drm_framebuffer *fb = crtc->fb; | 1180 | struct drm_framebuffer *fb = crtc->fb; |
| 1172 | struct intel_framebuffer *intel_fb; | 1181 | struct intel_framebuffer *intel_fb; |
| 1173 | struct drm_i915_gem_object *obj_priv; | 1182 | struct drm_i915_gem_object *obj_priv; |
| 1183 | struct drm_crtc *tmp_crtc; | ||
| 1174 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1175 | int plane = intel_crtc->plane; | 1185 | int plane = intel_crtc->plane; |
| 1186 | int crtcs_enabled = 0; | ||
| 1187 | |||
| 1188 | DRM_DEBUG_KMS("\n"); | ||
| 1176 | 1189 | ||
| 1177 | if (!i915_powersave) | 1190 | if (!i915_powersave) |
| 1178 | return; | 1191 | return; |
| @@ -1190,10 +1203,21 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
| 1190 | * If FBC is already on, we just have to verify that we can | 1203 | * If FBC is already on, we just have to verify that we can |
| 1191 | * keep it that way... | 1204 | * keep it that way... |
| 1192 | * Need to disable if: | 1205 | * Need to disable if: |
| 1206 | * - more than one pipe is active | ||
| 1193 | * - changing FBC params (stride, fence, mode) | 1207 | * - changing FBC params (stride, fence, mode) |
| 1194 | * - new fb is too large to fit in compressed buffer | 1208 | * - new fb is too large to fit in compressed buffer |
| 1195 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1209 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
| 1196 | */ | 1210 | */ |
| 1211 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | ||
| 1212 | if (tmp_crtc->enabled) | ||
| 1213 | crtcs_enabled++; | ||
| 1214 | } | ||
| 1215 | DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); | ||
| 1216 | if (crtcs_enabled > 1) { | ||
| 1217 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
| 1218 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||
| 1219 | goto out_disable; | ||
| 1220 | } | ||
| 1197 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1221 | if (intel_fb->obj->size > dev_priv->cfb_size) { |
| 1198 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1222 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
| 1199 | "compression\n"); | 1223 | "compression\n"); |
| @@ -1239,13 +1263,14 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
| 1239 | return; | 1263 | return; |
| 1240 | 1264 | ||
| 1241 | out_disable: | 1265 | out_disable: |
| 1242 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); | ||
| 1243 | /* Multiple disables should be harmless */ | 1266 | /* Multiple disables should be harmless */ |
| 1244 | if (intel_fbc_enabled(dev)) | 1267 | if (intel_fbc_enabled(dev)) { |
| 1268 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); | ||
| 1245 | intel_disable_fbc(dev); | 1269 | intel_disable_fbc(dev); |
| 1270 | } | ||
| 1246 | } | 1271 | } |
| 1247 | 1272 | ||
| 1248 | static int | 1273 | int |
| 1249 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1274 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
| 1250 | { | 1275 | { |
| 1251 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1276 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| @@ -1386,7 +1411,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 1386 | Start = obj_priv->gtt_offset; | 1411 | Start = obj_priv->gtt_offset; |
| 1387 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | 1412 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); |
| 1388 | 1413 | ||
| 1389 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | 1414 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
| 1415 | Start, Offset, x, y, crtc->fb->pitch); | ||
| 1390 | I915_WRITE(dspstride, crtc->fb->pitch); | 1416 | I915_WRITE(dspstride, crtc->fb->pitch); |
| 1391 | if (IS_I965G(dev)) { | 1417 | if (IS_I965G(dev)) { |
| 1392 | I915_WRITE(dspbase, Offset); | 1418 | I915_WRITE(dspbase, Offset); |
| @@ -2244,6 +2270,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 2244 | intel_wait_for_vblank(dev); | 2270 | intel_wait_for_vblank(dev); |
| 2245 | } | 2271 | } |
| 2246 | 2272 | ||
| 2273 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
| 2274 | if (pipeconf_reg == PIPEACONF && | ||
| 2275 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
| 2276 | goto skip_pipe_off; | ||
| 2277 | |||
| 2247 | /* Next, disable display pipes */ | 2278 | /* Next, disable display pipes */ |
| 2248 | temp = I915_READ(pipeconf_reg); | 2279 | temp = I915_READ(pipeconf_reg); |
| 2249 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2280 | if ((temp & PIPEACONF_ENABLE) != 0) { |
| @@ -2259,7 +2290,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 2259 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2290 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); |
| 2260 | I915_READ(dpll_reg); | 2291 | I915_READ(dpll_reg); |
| 2261 | } | 2292 | } |
| 2262 | 2293 | skip_pipe_off: | |
| 2263 | /* Wait for the clocks to turn off. */ | 2294 | /* Wait for the clocks to turn off. */ |
| 2264 | udelay(150); | 2295 | udelay(150); |
| 2265 | break; | 2296 | break; |
| @@ -2629,6 +2660,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
| 2629 | 2660 | ||
| 2630 | struct cxsr_latency { | 2661 | struct cxsr_latency { |
| 2631 | int is_desktop; | 2662 | int is_desktop; |
| 2663 | int is_ddr3; | ||
| 2632 | unsigned long fsb_freq; | 2664 | unsigned long fsb_freq; |
| 2633 | unsigned long mem_freq; | 2665 | unsigned long mem_freq; |
| 2634 | unsigned long display_sr; | 2666 | unsigned long display_sr; |
| @@ -2638,33 +2670,45 @@ struct cxsr_latency { | |||
| 2638 | }; | 2670 | }; |
| 2639 | 2671 | ||
| 2640 | static struct cxsr_latency cxsr_latency_table[] = { | 2672 | static struct cxsr_latency cxsr_latency_table[] = { |
| 2641 | {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ | 2673 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ |
| 2642 | {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | 2674 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ |
| 2643 | {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | 2675 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ |
| 2644 | 2676 | {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ | |
| 2645 | {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ | 2677 | {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ |
| 2646 | {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | 2678 | |
| 2647 | {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ | 2679 | {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ |
| 2648 | 2680 | {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | |
| 2649 | {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ | 2681 | {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ |
| 2650 | {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ | 2682 | {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ |
| 2651 | {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ | 2683 | {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ |
| 2652 | 2684 | ||
| 2653 | {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ | 2685 | {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ |
| 2654 | {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | 2686 | {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ |
| 2655 | {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ | 2687 | {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ |
| 2656 | 2688 | {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ | |
| 2657 | {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | 2689 | {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ |
| 2658 | {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | 2690 | |
| 2659 | {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | 2691 | {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ |
| 2660 | 2692 | {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | |
| 2661 | {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | 2693 | {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ |
| 2662 | {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | 2694 | {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ |
| 2663 | {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | 2695 | {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ |
| 2696 | |||
| 2697 | {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | ||
| 2698 | {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | ||
| 2699 | {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | ||
| 2700 | {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ | ||
| 2701 | {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ | ||
| 2702 | |||
| 2703 | {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | ||
| 2704 | {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | ||
| 2705 | {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | ||
| 2706 | {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ | ||
| 2707 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ | ||
| 2664 | }; | 2708 | }; |
| 2665 | 2709 | ||
| 2666 | static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, | 2710 | static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, |
| 2667 | int mem) | 2711 | int fsb, int mem) |
| 2668 | { | 2712 | { |
| 2669 | int i; | 2713 | int i; |
| 2670 | struct cxsr_latency *latency; | 2714 | struct cxsr_latency *latency; |
| @@ -2675,6 +2719,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, | |||
| 2675 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | 2719 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { |
| 2676 | latency = &cxsr_latency_table[i]; | 2720 | latency = &cxsr_latency_table[i]; |
| 2677 | if (is_desktop == latency->is_desktop && | 2721 | if (is_desktop == latency->is_desktop && |
| 2722 | is_ddr3 == latency->is_ddr3 && | ||
| 2678 | fsb == latency->fsb_freq && mem == latency->mem_freq) | 2723 | fsb == latency->fsb_freq && mem == latency->mem_freq) |
| 2679 | return latency; | 2724 | return latency; |
| 2680 | } | 2725 | } |
| @@ -2789,8 +2834,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2789 | struct cxsr_latency *latency; | 2834 | struct cxsr_latency *latency; |
| 2790 | int sr_clock; | 2835 | int sr_clock; |
| 2791 | 2836 | ||
| 2792 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, | 2837 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
| 2793 | dev_priv->mem_freq); | 2838 | dev_priv->fsb_freq, dev_priv->mem_freq); |
| 2794 | if (!latency) { | 2839 | if (!latency) { |
| 2795 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | 2840 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
| 2796 | pineview_disable_cxsr(dev); | 2841 | pineview_disable_cxsr(dev); |
| @@ -2943,11 +2988,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2943 | if (srwm < 0) | 2988 | if (srwm < 0) |
| 2944 | srwm = 1; | 2989 | srwm = 1; |
| 2945 | srwm &= 0x3f; | 2990 | srwm &= 0x3f; |
| 2946 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2991 | if (IS_I965GM(dev)) |
| 2992 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
| 2947 | } else { | 2993 | } else { |
| 2948 | /* Turn off self refresh if both pipes are enabled */ | 2994 | /* Turn off self refresh if both pipes are enabled */ |
| 2949 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | 2995 | if (IS_I965GM(dev)) |
| 2950 | & ~FW_BLC_SELF_EN); | 2996 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
| 2997 | & ~FW_BLC_SELF_EN); | ||
| 2951 | } | 2998 | } |
| 2952 | 2999 | ||
| 2953 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 3000 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
| @@ -3626,6 +3673,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 3626 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | 3673 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; |
| 3627 | } | 3674 | } |
| 3628 | 3675 | ||
| 3676 | dspcntr |= DISPLAY_PLANE_ENABLE; | ||
| 3677 | pipeconf |= PIPEACONF_ENABLE; | ||
| 3678 | dpll |= DPLL_VCO_ENABLE; | ||
| 3679 | |||
| 3680 | |||
| 3629 | /* Disable the panel fitter if it was on our pipe */ | 3681 | /* Disable the panel fitter if it was on our pipe */ |
| 3630 | if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) | 3682 | if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) |
| 3631 | I915_WRITE(PFIT_CONTROL, 0); | 3683 | I915_WRITE(PFIT_CONTROL, 0); |
| @@ -3702,6 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 3702 | if (dev_priv->lvds_dither) { | 3754 | if (dev_priv->lvds_dither) { |
| 3703 | if (HAS_PCH_SPLIT(dev)) { | 3755 | if (HAS_PCH_SPLIT(dev)) { |
| 3704 | pipeconf |= PIPE_ENABLE_DITHER; | 3756 | pipeconf |= PIPE_ENABLE_DITHER; |
| 3757 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
| 3705 | pipeconf |= PIPE_DITHER_TYPE_ST01; | 3758 | pipeconf |= PIPE_DITHER_TYPE_ST01; |
| 3706 | } else | 3759 | } else |
| 3707 | lvds |= LVDS_ENABLE_DITHER; | 3760 | lvds |= LVDS_ENABLE_DITHER; |
| @@ -3772,6 +3825,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 3772 | } | 3825 | } |
| 3773 | } | 3826 | } |
| 3774 | 3827 | ||
| 3828 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
| 3829 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | ||
| 3830 | /* the chip adds 2 halflines automatically */ | ||
| 3831 | adjusted_mode->crtc_vdisplay -= 1; | ||
| 3832 | adjusted_mode->crtc_vtotal -= 1; | ||
| 3833 | adjusted_mode->crtc_vblank_start -= 1; | ||
| 3834 | adjusted_mode->crtc_vblank_end -= 1; | ||
| 3835 | adjusted_mode->crtc_vsync_end -= 1; | ||
| 3836 | adjusted_mode->crtc_vsync_start -= 1; | ||
| 3837 | } else | ||
| 3838 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ | ||
| 3839 | |||
| 3775 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | 3840 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | |
| 3776 | ((adjusted_mode->crtc_htotal - 1) << 16)); | 3841 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
| 3777 | I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | | 3842 | I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | |
| @@ -3934,6 +3999,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 3934 | DRM_ERROR("failed to pin cursor bo\n"); | 3999 | DRM_ERROR("failed to pin cursor bo\n"); |
| 3935 | goto fail_locked; | 4000 | goto fail_locked; |
| 3936 | } | 4001 | } |
| 4002 | |||
| 4003 | ret = i915_gem_object_set_to_gtt_domain(bo, 0); | ||
| 4004 | if (ret) { | ||
| 4005 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | ||
| 4006 | goto fail_unpin; | ||
| 4007 | } | ||
| 4008 | |||
| 3937 | addr = obj_priv->gtt_offset; | 4009 | addr = obj_priv->gtt_offset; |
| 3938 | } else { | 4010 | } else { |
| 3939 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | 4011 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); |
| @@ -3977,6 +4049,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 3977 | intel_crtc->cursor_bo = bo; | 4049 | intel_crtc->cursor_bo = bo; |
| 3978 | 4050 | ||
| 3979 | return 0; | 4051 | return 0; |
| 4052 | fail_unpin: | ||
| 4053 | i915_gem_object_unpin(bo); | ||
| 3980 | fail_locked: | 4054 | fail_locked: |
| 3981 | mutex_unlock(&dev->struct_mutex); | 4055 | mutex_unlock(&dev->struct_mutex); |
| 3982 | fail: | 4056 | fail: |
| @@ -4357,7 +4431,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
| 4357 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 4431 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
| 4358 | 4432 | ||
| 4359 | /* Unlock panel regs */ | 4433 | /* Unlock panel regs */ |
| 4360 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4434 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
| 4435 | PANEL_UNLOCK_REGS); | ||
| 4361 | 4436 | ||
| 4362 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 4437 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
| 4363 | I915_WRITE(dpll_reg, dpll); | 4438 | I915_WRITE(dpll_reg, dpll); |
| @@ -4400,7 +4475,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
| 4400 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | 4475 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
| 4401 | 4476 | ||
| 4402 | /* Unlock panel regs */ | 4477 | /* Unlock panel regs */ |
| 4403 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4478 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
| 4479 | PANEL_UNLOCK_REGS); | ||
| 4404 | 4480 | ||
| 4405 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 4481 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
| 4406 | I915_WRITE(dpll_reg, dpll); | 4482 | I915_WRITE(dpll_reg, dpll); |
| @@ -4430,27 +4506,31 @@ static void intel_idle_update(struct work_struct *work) | |||
| 4430 | struct drm_device *dev = dev_priv->dev; | 4506 | struct drm_device *dev = dev_priv->dev; |
| 4431 | struct drm_crtc *crtc; | 4507 | struct drm_crtc *crtc; |
| 4432 | struct intel_crtc *intel_crtc; | 4508 | struct intel_crtc *intel_crtc; |
| 4509 | int enabled = 0; | ||
| 4433 | 4510 | ||
| 4434 | if (!i915_powersave) | 4511 | if (!i915_powersave) |
| 4435 | return; | 4512 | return; |
| 4436 | 4513 | ||
| 4437 | mutex_lock(&dev->struct_mutex); | 4514 | mutex_lock(&dev->struct_mutex); |
| 4438 | 4515 | ||
| 4439 | if (IS_I945G(dev) || IS_I945GM(dev)) { | 4516 | i915_update_gfx_val(dev_priv); |
| 4440 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
| 4441 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
| 4442 | } | ||
| 4443 | 4517 | ||
| 4444 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4518 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 4445 | /* Skip inactive CRTCs */ | 4519 | /* Skip inactive CRTCs */ |
| 4446 | if (!crtc->fb) | 4520 | if (!crtc->fb) |
| 4447 | continue; | 4521 | continue; |
| 4448 | 4522 | ||
| 4523 | enabled++; | ||
| 4449 | intel_crtc = to_intel_crtc(crtc); | 4524 | intel_crtc = to_intel_crtc(crtc); |
| 4450 | if (!intel_crtc->busy) | 4525 | if (!intel_crtc->busy) |
| 4451 | intel_decrease_pllclock(crtc); | 4526 | intel_decrease_pllclock(crtc); |
| 4452 | } | 4527 | } |
| 4453 | 4528 | ||
| 4529 | if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { | ||
| 4530 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
| 4531 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
| 4532 | } | ||
| 4533 | |||
| 4454 | mutex_unlock(&dev->struct_mutex); | 4534 | mutex_unlock(&dev->struct_mutex); |
| 4455 | } | 4535 | } |
| 4456 | 4536 | ||
| @@ -4546,10 +4626,10 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
| 4546 | kfree(work); | 4626 | kfree(work); |
| 4547 | } | 4627 | } |
| 4548 | 4628 | ||
| 4549 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | 4629 | static void do_intel_finish_page_flip(struct drm_device *dev, |
| 4630 | struct drm_crtc *crtc) | ||
| 4550 | { | 4631 | { |
| 4551 | drm_i915_private_t *dev_priv = dev->dev_private; | 4632 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 4552 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 4553 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4633 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 4554 | struct intel_unpin_work *work; | 4634 | struct intel_unpin_work *work; |
| 4555 | struct drm_i915_gem_object *obj_priv; | 4635 | struct drm_i915_gem_object *obj_priv; |
| @@ -4564,12 +4644,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
| 4564 | spin_lock_irqsave(&dev->event_lock, flags); | 4644 | spin_lock_irqsave(&dev->event_lock, flags); |
| 4565 | work = intel_crtc->unpin_work; | 4645 | work = intel_crtc->unpin_work; |
| 4566 | if (work == NULL || !work->pending) { | 4646 | if (work == NULL || !work->pending) { |
| 4567 | if (work && !work->pending) { | ||
| 4568 | obj_priv = to_intel_bo(work->pending_flip_obj); | ||
| 4569 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | ||
| 4570 | obj_priv, | ||
| 4571 | atomic_read(&obj_priv->pending_flip)); | ||
| 4572 | } | ||
| 4573 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4647 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 4574 | return; | 4648 | return; |
| 4575 | } | 4649 | } |
| @@ -4599,6 +4673,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
| 4599 | schedule_work(&work->work); | 4673 | schedule_work(&work->work); |
| 4600 | } | 4674 | } |
| 4601 | 4675 | ||
| 4676 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | ||
| 4677 | { | ||
| 4678 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4679 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 4680 | |||
| 4681 | do_intel_finish_page_flip(dev, crtc); | ||
| 4682 | } | ||
| 4683 | |||
| 4684 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) | ||
| 4685 | { | ||
| 4686 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4687 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; | ||
| 4688 | |||
| 4689 | do_intel_finish_page_flip(dev, crtc); | ||
| 4690 | } | ||
| 4691 | |||
| 4602 | void intel_prepare_page_flip(struct drm_device *dev, int plane) | 4692 | void intel_prepare_page_flip(struct drm_device *dev, int plane) |
| 4603 | { | 4693 | { |
| 4604 | drm_i915_private_t *dev_priv = dev->dev_private; | 4694 | drm_i915_private_t *dev_priv = dev->dev_private; |
| @@ -4626,17 +4716,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4626 | struct drm_gem_object *obj; | 4716 | struct drm_gem_object *obj; |
| 4627 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 4628 | struct intel_unpin_work *work; | 4718 | struct intel_unpin_work *work; |
| 4629 | unsigned long flags; | 4719 | unsigned long flags, offset; |
| 4630 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4720 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
| 4631 | int ret, pipesrc; | 4721 | int ret, pipesrc; |
| 4632 | RING_LOCALS; | 4722 | u32 flip_mask; |
| 4633 | 4723 | ||
| 4634 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4724 | work = kzalloc(sizeof *work, GFP_KERNEL); |
| 4635 | if (work == NULL) | 4725 | if (work == NULL) |
| 4636 | return -ENOMEM; | 4726 | return -ENOMEM; |
| 4637 | 4727 | ||
| 4638 | mutex_lock(&dev->struct_mutex); | ||
| 4639 | |||
| 4640 | work->event = event; | 4728 | work->event = event; |
| 4641 | work->dev = crtc->dev; | 4729 | work->dev = crtc->dev; |
| 4642 | intel_fb = to_intel_framebuffer(crtc->fb); | 4730 | intel_fb = to_intel_framebuffer(crtc->fb); |
| @@ -4646,10 +4734,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4646 | /* We borrow the event spin lock for protecting unpin_work */ | 4734 | /* We borrow the event spin lock for protecting unpin_work */ |
| 4647 | spin_lock_irqsave(&dev->event_lock, flags); | 4735 | spin_lock_irqsave(&dev->event_lock, flags); |
| 4648 | if (intel_crtc->unpin_work) { | 4736 | if (intel_crtc->unpin_work) { |
| 4649 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
| 4650 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4737 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 4651 | kfree(work); | 4738 | kfree(work); |
| 4652 | mutex_unlock(&dev->struct_mutex); | 4739 | |
| 4740 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
| 4653 | return -EBUSY; | 4741 | return -EBUSY; |
| 4654 | } | 4742 | } |
| 4655 | intel_crtc->unpin_work = work; | 4743 | intel_crtc->unpin_work = work; |
| @@ -4658,13 +4746,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4658 | intel_fb = to_intel_framebuffer(fb); | 4746 | intel_fb = to_intel_framebuffer(fb); |
| 4659 | obj = intel_fb->obj; | 4747 | obj = intel_fb->obj; |
| 4660 | 4748 | ||
| 4749 | mutex_lock(&dev->struct_mutex); | ||
| 4661 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4750 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
| 4662 | if (ret != 0) { | 4751 | if (ret != 0) { |
| 4663 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
| 4664 | to_intel_bo(obj)); | ||
| 4665 | kfree(work); | ||
| 4666 | intel_crtc->unpin_work = NULL; | ||
| 4667 | mutex_unlock(&dev->struct_mutex); | 4752 | mutex_unlock(&dev->struct_mutex); |
| 4753 | |||
| 4754 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 4755 | intel_crtc->unpin_work = NULL; | ||
| 4756 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 4757 | |||
| 4758 | kfree(work); | ||
| 4759 | |||
| 4760 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
| 4761 | to_intel_bo(obj)); | ||
| 4668 | return ret; | 4762 | return ret; |
| 4669 | } | 4763 | } |
| 4670 | 4764 | ||
| @@ -4679,16 +4773,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4679 | atomic_inc(&obj_priv->pending_flip); | 4773 | atomic_inc(&obj_priv->pending_flip); |
| 4680 | work->pending_flip_obj = obj; | 4774 | work->pending_flip_obj = obj; |
| 4681 | 4775 | ||
| 4776 | if (intel_crtc->plane) | ||
| 4777 | flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | ||
| 4778 | else | ||
| 4779 | flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; | ||
| 4780 | |||
| 4781 | /* Wait for any previous flip to finish */ | ||
| 4782 | if (IS_GEN3(dev)) | ||
| 4783 | while (I915_READ(ISR) & flip_mask) | ||
| 4784 | ; | ||
| 4785 | |||
| 4786 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
| 4787 | offset = obj_priv->gtt_offset; | ||
| 4788 | offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); | ||
| 4789 | |||
| 4682 | BEGIN_LP_RING(4); | 4790 | BEGIN_LP_RING(4); |
| 4683 | OUT_RING(MI_DISPLAY_FLIP | | ||
| 4684 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
| 4685 | OUT_RING(fb->pitch); | ||
| 4686 | if (IS_I965G(dev)) { | 4791 | if (IS_I965G(dev)) { |
| 4687 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 4792 | OUT_RING(MI_DISPLAY_FLIP | |
| 4793 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
| 4794 | OUT_RING(fb->pitch); | ||
| 4795 | OUT_RING(offset | obj_priv->tiling_mode); | ||
| 4688 | pipesrc = I915_READ(pipesrc_reg); | 4796 | pipesrc = I915_READ(pipesrc_reg); |
| 4689 | OUT_RING(pipesrc & 0x0fff0fff); | 4797 | OUT_RING(pipesrc & 0x0fff0fff); |
| 4690 | } else { | 4798 | } else { |
| 4691 | OUT_RING(obj_priv->gtt_offset); | 4799 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
| 4800 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
| 4801 | OUT_RING(fb->pitch); | ||
| 4802 | OUT_RING(offset); | ||
| 4692 | OUT_RING(MI_NOOP); | 4803 | OUT_RING(MI_NOOP); |
| 4693 | } | 4804 | } |
| 4694 | ADVANCE_LP_RING(); | 4805 | ADVANCE_LP_RING(); |
| @@ -5023,10 +5134,32 @@ err_unref: | |||
| 5023 | return NULL; | 5134 | return NULL; |
| 5024 | } | 5135 | } |
| 5025 | 5136 | ||
| 5137 | bool ironlake_set_drps(struct drm_device *dev, u8 val) | ||
| 5138 | { | ||
| 5139 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5140 | u16 rgvswctl; | ||
| 5141 | |||
| 5142 | rgvswctl = I915_READ16(MEMSWCTL); | ||
| 5143 | if (rgvswctl & MEMCTL_CMD_STS) { | ||
| 5144 | DRM_DEBUG("gpu busy, RCS change rejected\n"); | ||
| 5145 | return false; /* still busy with another command */ | ||
| 5146 | } | ||
| 5147 | |||
| 5148 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
| 5149 | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
| 5150 | I915_WRITE16(MEMSWCTL, rgvswctl); | ||
| 5151 | POSTING_READ16(MEMSWCTL); | ||
| 5152 | |||
| 5153 | rgvswctl |= MEMCTL_CMD_STS; | ||
| 5154 | I915_WRITE16(MEMSWCTL, rgvswctl); | ||
| 5155 | |||
| 5156 | return true; | ||
| 5157 | } | ||
| 5158 | |||
| 5026 | void ironlake_enable_drps(struct drm_device *dev) | 5159 | void ironlake_enable_drps(struct drm_device *dev) |
| 5027 | { | 5160 | { |
| 5028 | struct drm_i915_private *dev_priv = dev->dev_private; | 5161 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5029 | u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; | 5162 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
| 5030 | u8 fmax, fmin, fstart, vstart; | 5163 | u8 fmax, fmin, fstart, vstart; |
| 5031 | int i = 0; | 5164 | int i = 0; |
| 5032 | 5165 | ||
| @@ -5045,13 +5178,21 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
| 5045 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | 5178 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
| 5046 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | 5179 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
| 5047 | MEMMODE_FSTART_SHIFT; | 5180 | MEMMODE_FSTART_SHIFT; |
| 5181 | fstart = fmax; | ||
| 5182 | |||
| 5048 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | 5183 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
| 5049 | PXVFREQ_PX_SHIFT; | 5184 | PXVFREQ_PX_SHIFT; |
| 5050 | 5185 | ||
| 5051 | dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ | 5186 | dev_priv->fmax = fstart; /* IPS callback will increase this */ |
| 5187 | dev_priv->fstart = fstart; | ||
| 5188 | |||
| 5189 | dev_priv->max_delay = fmax; | ||
| 5052 | dev_priv->min_delay = fmin; | 5190 | dev_priv->min_delay = fmin; |
| 5053 | dev_priv->cur_delay = fstart; | 5191 | dev_priv->cur_delay = fstart; |
| 5054 | 5192 | ||
| 5193 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, | ||
| 5194 | fstart); | ||
| 5195 | |||
| 5055 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | 5196 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
| 5056 | 5197 | ||
| 5057 | /* | 5198 | /* |
| @@ -5073,20 +5214,19 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
| 5073 | } | 5214 | } |
| 5074 | msleep(1); | 5215 | msleep(1); |
| 5075 | 5216 | ||
| 5076 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | 5217 | ironlake_set_drps(dev, fstart); |
| 5077 | (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
| 5078 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
| 5079 | POSTING_READ(MEMSWCTL); | ||
| 5080 | 5218 | ||
| 5081 | rgvswctl |= MEMCTL_CMD_STS; | 5219 | dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + |
| 5082 | I915_WRITE(MEMSWCTL, rgvswctl); | 5220 | I915_READ(0x112e0); |
| 5221 | dev_priv->last_time1 = jiffies_to_msecs(jiffies); | ||
| 5222 | dev_priv->last_count2 = I915_READ(0x112f4); | ||
| 5223 | getrawmonotonic(&dev_priv->last_time2); | ||
| 5083 | } | 5224 | } |
| 5084 | 5225 | ||
| 5085 | void ironlake_disable_drps(struct drm_device *dev) | 5226 | void ironlake_disable_drps(struct drm_device *dev) |
| 5086 | { | 5227 | { |
| 5087 | struct drm_i915_private *dev_priv = dev->dev_private; | 5228 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5088 | u32 rgvswctl; | 5229 | u16 rgvswctl = I915_READ16(MEMSWCTL); |
| 5089 | u8 fstart; | ||
| 5090 | 5230 | ||
| 5091 | /* Ack interrupts, disable EFC interrupt */ | 5231 | /* Ack interrupts, disable EFC interrupt */ |
| 5092 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); | 5232 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); |
| @@ -5096,11 +5236,7 @@ void ironlake_disable_drps(struct drm_device *dev) | |||
| 5096 | I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); | 5236 | I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); |
| 5097 | 5237 | ||
| 5098 | /* Go back to the starting frequency */ | 5238 | /* Go back to the starting frequency */ |
| 5099 | fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> | 5239 | ironlake_set_drps(dev, dev_priv->fstart); |
| 5100 | MEMMODE_FSTART_SHIFT; | ||
| 5101 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
| 5102 | (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
| 5103 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
| 5104 | msleep(1); | 5240 | msleep(1); |
| 5105 | rgvswctl |= MEMCTL_CMD_STS; | 5241 | rgvswctl |= MEMCTL_CMD_STS; |
| 5106 | I915_WRITE(MEMSWCTL, rgvswctl); | 5242 | I915_WRITE(MEMSWCTL, rgvswctl); |
| @@ -5108,6 +5244,92 @@ void ironlake_disable_drps(struct drm_device *dev) | |||
| 5108 | 5244 | ||
| 5109 | } | 5245 | } |
| 5110 | 5246 | ||
| 5247 | static unsigned long intel_pxfreq(u32 vidfreq) | ||
| 5248 | { | ||
| 5249 | unsigned long freq; | ||
| 5250 | int div = (vidfreq & 0x3f0000) >> 16; | ||
| 5251 | int post = (vidfreq & 0x3000) >> 12; | ||
| 5252 | int pre = (vidfreq & 0x7); | ||
| 5253 | |||
| 5254 | if (!pre) | ||
| 5255 | return 0; | ||
| 5256 | |||
| 5257 | freq = ((div * 133333) / ((1<<post) * pre)); | ||
| 5258 | |||
| 5259 | return freq; | ||
| 5260 | } | ||
| 5261 | |||
| 5262 | void intel_init_emon(struct drm_device *dev) | ||
| 5263 | { | ||
| 5264 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5265 | u32 lcfuse; | ||
| 5266 | u8 pxw[16]; | ||
| 5267 | int i; | ||
| 5268 | |||
| 5269 | /* Disable to program */ | ||
| 5270 | I915_WRITE(ECR, 0); | ||
| 5271 | POSTING_READ(ECR); | ||
| 5272 | |||
| 5273 | /* Program energy weights for various events */ | ||
| 5274 | I915_WRITE(SDEW, 0x15040d00); | ||
| 5275 | I915_WRITE(CSIEW0, 0x007f0000); | ||
| 5276 | I915_WRITE(CSIEW1, 0x1e220004); | ||
| 5277 | I915_WRITE(CSIEW2, 0x04000004); | ||
| 5278 | |||
| 5279 | for (i = 0; i < 5; i++) | ||
| 5280 | I915_WRITE(PEW + (i * 4), 0); | ||
| 5281 | for (i = 0; i < 3; i++) | ||
| 5282 | I915_WRITE(DEW + (i * 4), 0); | ||
| 5283 | |||
| 5284 | /* Program P-state weights to account for frequency power adjustment */ | ||
| 5285 | for (i = 0; i < 16; i++) { | ||
| 5286 | u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); | ||
| 5287 | unsigned long freq = intel_pxfreq(pxvidfreq); | ||
| 5288 | unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> | ||
| 5289 | PXVFREQ_PX_SHIFT; | ||
| 5290 | unsigned long val; | ||
| 5291 | |||
| 5292 | val = vid * vid; | ||
| 5293 | val *= (freq / 1000); | ||
| 5294 | val *= 255; | ||
| 5295 | val /= (127*127*900); | ||
| 5296 | if (val > 0xff) | ||
| 5297 | DRM_ERROR("bad pxval: %ld\n", val); | ||
| 5298 | pxw[i] = val; | ||
| 5299 | } | ||
| 5300 | /* Render standby states get 0 weight */ | ||
| 5301 | pxw[14] = 0; | ||
| 5302 | pxw[15] = 0; | ||
| 5303 | |||
| 5304 | for (i = 0; i < 4; i++) { | ||
| 5305 | u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | | ||
| 5306 | (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); | ||
| 5307 | I915_WRITE(PXW + (i * 4), val); | ||
| 5308 | } | ||
| 5309 | |||
| 5310 | /* Adjust magic regs to magic values (more experimental results) */ | ||
| 5311 | I915_WRITE(OGW0, 0); | ||
| 5312 | I915_WRITE(OGW1, 0); | ||
| 5313 | I915_WRITE(EG0, 0x00007f00); | ||
| 5314 | I915_WRITE(EG1, 0x0000000e); | ||
| 5315 | I915_WRITE(EG2, 0x000e0000); | ||
| 5316 | I915_WRITE(EG3, 0x68000300); | ||
| 5317 | I915_WRITE(EG4, 0x42000000); | ||
| 5318 | I915_WRITE(EG5, 0x00140031); | ||
| 5319 | I915_WRITE(EG6, 0); | ||
| 5320 | I915_WRITE(EG7, 0); | ||
| 5321 | |||
| 5322 | for (i = 0; i < 8; i++) | ||
| 5323 | I915_WRITE(PXWL + (i * 4), 0); | ||
| 5324 | |||
| 5325 | /* Enable PMON + select events */ | ||
| 5326 | I915_WRITE(ECR, 0x80000019); | ||
| 5327 | |||
| 5328 | lcfuse = I915_READ(LCFUSE02); | ||
| 5329 | |||
| 5330 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | ||
| 5331 | } | ||
| 5332 | |||
| 5111 | void intel_init_clock_gating(struct drm_device *dev) | 5333 | void intel_init_clock_gating(struct drm_device *dev) |
| 5112 | { | 5334 | { |
| 5113 | struct drm_i915_private *dev_priv = dev->dev_private; | 5335 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -5277,11 +5499,13 @@ static void intel_init_display(struct drm_device *dev) | |||
| 5277 | dev_priv->display.update_wm = NULL; | 5499 | dev_priv->display.update_wm = NULL; |
| 5278 | } else if (IS_PINEVIEW(dev)) { | 5500 | } else if (IS_PINEVIEW(dev)) { |
| 5279 | if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), | 5501 | if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), |
| 5502 | dev_priv->is_ddr3, | ||
| 5280 | dev_priv->fsb_freq, | 5503 | dev_priv->fsb_freq, |
| 5281 | dev_priv->mem_freq)) { | 5504 | dev_priv->mem_freq)) { |
| 5282 | DRM_INFO("failed to find known CxSR latency " | 5505 | DRM_INFO("failed to find known CxSR latency " |
| 5283 | "(found fsb freq %d, mem freq %d), " | 5506 | "(found ddr%s fsb freq %d, mem freq %d), " |
| 5284 | "disabling CxSR\n", | 5507 | "disabling CxSR\n", |
| 5508 | (dev_priv->is_ddr3 == 1) ? "3": "2", | ||
| 5285 | dev_priv->fsb_freq, dev_priv->mem_freq); | 5509 | dev_priv->fsb_freq, dev_priv->mem_freq); |
| 5286 | /* Disable CxSR and never update its watermark again */ | 5510 | /* Disable CxSR and never update its watermark again */ |
| 5287 | pineview_disable_cxsr(dev); | 5511 | pineview_disable_cxsr(dev); |
| @@ -5307,10 +5531,69 @@ static void intel_init_display(struct drm_device *dev) | |||
| 5307 | } | 5531 | } |
| 5308 | } | 5532 | } |
| 5309 | 5533 | ||
| 5534 | /* | ||
| 5535 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, | ||
| 5536 | * resume, or other times. This quirk makes sure that's the case for | ||
| 5537 | * affected systems. | ||
| 5538 | */ | ||
| 5539 | static void quirk_pipea_force (struct drm_device *dev) | ||
| 5540 | { | ||
| 5541 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5542 | |||
| 5543 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; | ||
| 5544 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | ||
| 5545 | } | ||
| 5546 | |||
| 5547 | struct intel_quirk { | ||
| 5548 | int device; | ||
| 5549 | int subsystem_vendor; | ||
| 5550 | int subsystem_device; | ||
| 5551 | void (*hook)(struct drm_device *dev); | ||
| 5552 | }; | ||
| 5553 | |||
| 5554 | struct intel_quirk intel_quirks[] = { | ||
| 5555 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | ||
| 5556 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | ||
| 5557 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | ||
| 5558 | { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, | ||
| 5559 | |||
| 5560 | /* Thinkpad R31 needs pipe A force quirk */ | ||
| 5561 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | ||
| 5562 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | ||
| 5563 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | ||
| 5564 | |||
| 5565 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ | ||
| 5566 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, | ||
| 5567 | /* ThinkPad X40 needs pipe A force quirk */ | ||
| 5568 | |||
| 5569 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | ||
| 5570 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | ||
| 5571 | |||
| 5572 | /* 855 & before need to leave pipe A & dpll A up */ | ||
| 5573 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
| 5574 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
| 5575 | }; | ||
| 5576 | |||
| 5577 | static void intel_init_quirks(struct drm_device *dev) | ||
| 5578 | { | ||
| 5579 | struct pci_dev *d = dev->pdev; | ||
| 5580 | int i; | ||
| 5581 | |||
| 5582 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | ||
| 5583 | struct intel_quirk *q = &intel_quirks[i]; | ||
| 5584 | |||
| 5585 | if (d->device == q->device && | ||
| 5586 | (d->subsystem_vendor == q->subsystem_vendor || | ||
| 5587 | q->subsystem_vendor == PCI_ANY_ID) && | ||
| 5588 | (d->subsystem_device == q->subsystem_device || | ||
| 5589 | q->subsystem_device == PCI_ANY_ID)) | ||
| 5590 | q->hook(dev); | ||
| 5591 | } | ||
| 5592 | } | ||
| 5593 | |||
| 5310 | void intel_modeset_init(struct drm_device *dev) | 5594 | void intel_modeset_init(struct drm_device *dev) |
| 5311 | { | 5595 | { |
| 5312 | struct drm_i915_private *dev_priv = dev->dev_private; | 5596 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5313 | int num_pipe; | ||
| 5314 | int i; | 5597 | int i; |
| 5315 | 5598 | ||
| 5316 | drm_mode_config_init(dev); | 5599 | drm_mode_config_init(dev); |
| @@ -5320,6 +5603,8 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 5320 | 5603 | ||
| 5321 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 5604 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
| 5322 | 5605 | ||
| 5606 | intel_init_quirks(dev); | ||
| 5607 | |||
| 5323 | intel_init_display(dev); | 5608 | intel_init_display(dev); |
| 5324 | 5609 | ||
| 5325 | if (IS_I965G(dev)) { | 5610 | if (IS_I965G(dev)) { |
| @@ -5340,13 +5625,13 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 5340 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | 5625 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); |
| 5341 | 5626 | ||
| 5342 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | 5627 | if (IS_MOBILE(dev) || IS_I9XX(dev)) |
| 5343 | num_pipe = 2; | 5628 | dev_priv->num_pipe = 2; |
| 5344 | else | 5629 | else |
| 5345 | num_pipe = 1; | 5630 | dev_priv->num_pipe = 1; |
| 5346 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | 5631 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
| 5347 | num_pipe, num_pipe > 1 ? "s" : ""); | 5632 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
| 5348 | 5633 | ||
| 5349 | for (i = 0; i < num_pipe; i++) { | 5634 | for (i = 0; i < dev_priv->num_pipe; i++) { |
| 5350 | intel_crtc_init(dev, i); | 5635 | intel_crtc_init(dev, i); |
| 5351 | } | 5636 | } |
| 5352 | 5637 | ||
| @@ -5354,8 +5639,10 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 5354 | 5639 | ||
| 5355 | intel_init_clock_gating(dev); | 5640 | intel_init_clock_gating(dev); |
| 5356 | 5641 | ||
| 5357 | if (IS_IRONLAKE_M(dev)) | 5642 | if (IS_IRONLAKE_M(dev)) { |
| 5358 | ironlake_enable_drps(dev); | 5643 | ironlake_enable_drps(dev); |
| 5644 | intel_init_emon(dev); | ||
| 5645 | } | ||
| 5359 | 5646 | ||
| 5360 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 5647 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
| 5361 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 5648 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6b1c9a27c27a..5dde80f9e652 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -136,6 +136,12 @@ intel_dp_link_required(struct drm_device *dev, | |||
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | static int | 138 | static int |
| 139 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) | ||
| 140 | { | ||
| 141 | return (max_link_clock * max_lanes * 8) / 10; | ||
| 142 | } | ||
| 143 | |||
| 144 | static int | ||
| 139 | intel_dp_mode_valid(struct drm_connector *connector, | 145 | intel_dp_mode_valid(struct drm_connector *connector, |
| 140 | struct drm_display_mode *mode) | 146 | struct drm_display_mode *mode) |
| 141 | { | 147 | { |
| @@ -144,8 +150,11 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
| 144 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); | 150 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
| 145 | int max_lanes = intel_dp_max_lane_count(intel_encoder); | 151 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
| 146 | 152 | ||
| 147 | if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) | 153 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
| 148 | > max_link_clock * max_lanes) | 154 | which are outside spec tolerances but somehow work by magic */ |
| 155 | if (!IS_eDP(intel_encoder) && | ||
| 156 | (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) | ||
| 157 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | ||
| 149 | return MODE_CLOCK_HIGH; | 158 | return MODE_CLOCK_HIGH; |
| 150 | 159 | ||
| 151 | if (mode->clock < 10000) | 160 | if (mode->clock < 10000) |
| @@ -506,7 +515,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 506 | 515 | ||
| 507 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 516 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
| 508 | for (clock = 0; clock <= max_clock; clock++) { | 517 | for (clock = 0; clock <= max_clock; clock++) { |
| 509 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 518 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
| 510 | 519 | ||
| 511 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) | 520 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) |
| 512 | <= link_avail) { | 521 | <= link_avail) { |
| @@ -521,6 +530,18 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 521 | } | 530 | } |
| 522 | } | 531 | } |
| 523 | } | 532 | } |
| 533 | |||
| 534 | if (IS_eDP(intel_encoder)) { | ||
| 535 | /* okay we failed just pick the highest */ | ||
| 536 | dp_priv->lane_count = max_lane_count; | ||
| 537 | dp_priv->link_bw = bws[max_clock]; | ||
| 538 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | ||
| 539 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
| 540 | "count %d clock %d\n", | ||
| 541 | dp_priv->link_bw, dp_priv->lane_count, | ||
| 542 | adjusted_mode->clock); | ||
| 543 | return true; | ||
| 544 | } | ||
| 524 | return false; | 545 | return false; |
| 525 | } | 546 | } |
| 526 | 547 | ||
| @@ -576,7 +597,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 576 | struct intel_encoder *intel_encoder; | 597 | struct intel_encoder *intel_encoder; |
| 577 | struct intel_dp_priv *dp_priv; | 598 | struct intel_dp_priv *dp_priv; |
| 578 | 599 | ||
| 579 | if (!encoder || encoder->crtc != crtc) | 600 | if (encoder->crtc != crtc) |
| 580 | continue; | 601 | continue; |
| 581 | 602 | ||
| 582 | intel_encoder = enc_to_intel_encoder(encoder); | 603 | intel_encoder = enc_to_intel_encoder(encoder); |
| @@ -675,10 +696,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 675 | dp_priv->link_configuration[1] = dp_priv->lane_count; | 696 | dp_priv->link_configuration[1] = dp_priv->lane_count; |
| 676 | 697 | ||
| 677 | /* | 698 | /* |
| 678 | * Check for DPCD version > 1.1, | 699 | * Check for DPCD version > 1.1 and enhanced framing support |
| 679 | * enable enahanced frame stuff in that case | ||
| 680 | */ | 700 | */ |
| 681 | if (dp_priv->dpcd[0] >= 0x11) { | 701 | if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { |
| 682 | dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 702 | dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
| 683 | dp_priv->DP |= DP_ENHANCED_FRAMING; | 703 | dp_priv->DP |= DP_ENHANCED_FRAMING; |
| 684 | } | 704 | } |
| @@ -697,6 +717,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 697 | } | 717 | } |
| 698 | } | 718 | } |
| 699 | 719 | ||
| 720 | static void ironlake_edp_panel_on (struct drm_device *dev) | ||
| 721 | { | ||
| 722 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 723 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
| 724 | u32 pp, pp_status; | ||
| 725 | |||
| 726 | pp_status = I915_READ(PCH_PP_STATUS); | ||
| 727 | if (pp_status & PP_ON) | ||
| 728 | return; | ||
| 729 | |||
| 730 | pp = I915_READ(PCH_PP_CONTROL); | ||
| 731 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | ||
| 732 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 733 | do { | ||
| 734 | pp_status = I915_READ(PCH_PP_STATUS); | ||
| 735 | } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); | ||
| 736 | |||
| 737 | if (time_after(jiffies, timeout)) | ||
| 738 | DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); | ||
| 739 | |||
| 740 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); | ||
| 741 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 742 | } | ||
| 743 | |||
| 744 | static void ironlake_edp_panel_off (struct drm_device *dev) | ||
| 745 | { | ||
| 746 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 747 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
| 748 | u32 pp, pp_status; | ||
| 749 | |||
| 750 | pp = I915_READ(PCH_PP_CONTROL); | ||
| 751 | pp &= ~POWER_TARGET_ON; | ||
| 752 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 753 | do { | ||
| 754 | pp_status = I915_READ(PCH_PP_STATUS); | ||
| 755 | } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); | ||
| 756 | |||
| 757 | if (time_after(jiffies, timeout)) | ||
| 758 | DRM_DEBUG_KMS("panel off wait timed out\n"); | ||
| 759 | |||
| 760 | /* Make sure VDD is enabled so DP AUX will work */ | ||
| 761 | pp |= EDP_FORCE_VDD; | ||
| 762 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
| 763 | } | ||
| 764 | |||
| 700 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 765 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
| 701 | { | 766 | { |
| 702 | struct drm_i915_private *dev_priv = dev->dev_private; | 767 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -731,14 +796,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
| 731 | if (mode != DRM_MODE_DPMS_ON) { | 796 | if (mode != DRM_MODE_DPMS_ON) { |
| 732 | if (dp_reg & DP_PORT_EN) { | 797 | if (dp_reg & DP_PORT_EN) { |
| 733 | intel_dp_link_down(intel_encoder, dp_priv->DP); | 798 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
| 734 | if (IS_eDP(intel_encoder)) | 799 | if (IS_eDP(intel_encoder)) { |
| 735 | ironlake_edp_backlight_off(dev); | 800 | ironlake_edp_backlight_off(dev); |
| 801 | ironlake_edp_panel_off(dev); | ||
| 802 | } | ||
| 736 | } | 803 | } |
| 737 | } else { | 804 | } else { |
| 738 | if (!(dp_reg & DP_PORT_EN)) { | 805 | if (!(dp_reg & DP_PORT_EN)) { |
| 739 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); | 806 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
| 740 | if (IS_eDP(intel_encoder)) | 807 | if (IS_eDP(intel_encoder)) { |
| 808 | ironlake_edp_panel_on(dev); | ||
| 741 | ironlake_edp_backlight_on(dev); | 809 | ironlake_edp_backlight_on(dev); |
| 810 | } | ||
| 742 | } | 811 | } |
| 743 | } | 812 | } |
| 744 | dp_priv->dpms_mode = mode; | 813 | dp_priv->dpms_mode = mode; |
| @@ -1208,6 +1277,8 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
| 1208 | if (dp_priv->dpcd[0] != 0) | 1277 | if (dp_priv->dpcd[0] != 0) |
| 1209 | status = connector_status_connected; | 1278 | status = connector_status_connected; |
| 1210 | } | 1279 | } |
| 1280 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], | ||
| 1281 | dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); | ||
| 1211 | return status; | 1282 | return status; |
| 1212 | } | 1283 | } |
| 1213 | 1284 | ||
| @@ -1352,7 +1423,7 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc) | |||
| 1352 | struct intel_encoder *intel_encoder = NULL; | 1423 | struct intel_encoder *intel_encoder = NULL; |
| 1353 | 1424 | ||
| 1354 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 1425 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
| 1355 | if (!encoder || encoder->crtc != crtc) | 1426 | if (encoder->crtc != crtc) |
| 1356 | continue; | 1427 | continue; |
| 1357 | 1428 | ||
| 1358 | intel_encoder = enc_to_intel_encoder(encoder); | 1429 | intel_encoder = enc_to_intel_encoder(encoder); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index df931f787665..2f7970be9051 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -215,6 +215,9 @@ extern void intel_init_clock_gating(struct drm_device *dev); | |||
| 215 | extern void ironlake_enable_drps(struct drm_device *dev); | 215 | extern void ironlake_enable_drps(struct drm_device *dev); |
| 216 | extern void ironlake_disable_drps(struct drm_device *dev); | 216 | extern void ironlake_disable_drps(struct drm_device *dev); |
| 217 | 217 | ||
| 218 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | ||
| 219 | struct drm_gem_object *obj); | ||
| 220 | |||
| 218 | extern int intel_framebuffer_init(struct drm_device *dev, | 221 | extern int intel_framebuffer_init(struct drm_device *dev, |
| 219 | struct intel_framebuffer *ifb, | 222 | struct intel_framebuffer *ifb, |
| 220 | struct drm_mode_fb_cmd *mode_cmd, | 223 | struct drm_mode_fb_cmd *mode_cmd, |
| @@ -224,6 +227,7 @@ extern void intel_fbdev_fini(struct drm_device *dev); | |||
| 224 | 227 | ||
| 225 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); | 228 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
| 226 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); | 229 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); |
| 230 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | ||
| 227 | 231 | ||
| 228 | extern void intel_setup_overlay(struct drm_device *dev); | 232 | extern void intel_setup_overlay(struct drm_device *dev); |
| 229 | extern void intel_cleanup_overlay(struct drm_device *dev); | 233 | extern void intel_cleanup_overlay(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 6f53cf7fbc50..3e18c9e7729b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -98,14 +98,18 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
| 98 | 98 | ||
| 99 | mutex_lock(&dev->struct_mutex); | 99 | mutex_lock(&dev->struct_mutex); |
| 100 | 100 | ||
| 101 | ret = i915_gem_object_pin(fbo, 64*1024); | 101 | ret = intel_pin_and_fence_fb_obj(dev, fbo); |
| 102 | if (ret) { | 102 | if (ret) { |
| 103 | DRM_ERROR("failed to pin fb: %d\n", ret); | 103 | DRM_ERROR("failed to pin fb: %d\n", ret); |
| 104 | goto out_unref; | 104 | goto out_unref; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | /* Flush everything out, we'll be doing GTT only from now on */ | 107 | /* Flush everything out, we'll be doing GTT only from now on */ |
| 108 | i915_gem_object_set_to_gtt_domain(fbo, 1); | 108 | ret = i915_gem_object_set_to_gtt_domain(fbo, 1); |
| 109 | if (ret) { | ||
| 110 | DRM_ERROR("failed to bind fb: %d.\n", ret); | ||
| 111 | goto out_unpin; | ||
| 112 | } | ||
| 109 | 113 | ||
| 110 | info = framebuffer_alloc(0, device); | 114 | info = framebuffer_alloc(0, device); |
| 111 | if (!info) { | 115 | if (!info) { |
| @@ -232,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
| 232 | 236 | ||
| 233 | drm_framebuffer_cleanup(&ifb->base); | 237 | drm_framebuffer_cleanup(&ifb->base); |
| 234 | if (ifb->obj) | 238 | if (ifb->obj) |
| 235 | drm_gem_object_unreference_unlocked(ifb->obj); | 239 | drm_gem_object_unreference(ifb->obj); |
| 236 | 240 | ||
| 237 | return 0; | 241 | return 0; |
| 238 | } | 242 | } |
| @@ -241,6 +245,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
| 241 | { | 245 | { |
| 242 | struct intel_fbdev *ifbdev; | 246 | struct intel_fbdev *ifbdev; |
| 243 | drm_i915_private_t *dev_priv = dev->dev_private; | 247 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 248 | int ret; | ||
| 244 | 249 | ||
| 245 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); | 250 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); |
| 246 | if (!ifbdev) | 251 | if (!ifbdev) |
| @@ -249,8 +254,13 @@ int intel_fbdev_init(struct drm_device *dev) | |||
| 249 | dev_priv->fbdev = ifbdev; | 254 | dev_priv->fbdev = ifbdev; |
| 250 | ifbdev->helper.funcs = &intel_fb_helper_funcs; | 255 | ifbdev->helper.funcs = &intel_fb_helper_funcs; |
| 251 | 256 | ||
| 252 | drm_fb_helper_init(dev, &ifbdev->helper, 2, | 257 | ret = drm_fb_helper_init(dev, &ifbdev->helper, |
| 253 | INTELFB_CONN_LIMIT); | 258 | dev_priv->num_pipe, |
| 259 | INTELFB_CONN_LIMIT); | ||
| 260 | if (ret) { | ||
| 261 | kfree(ifbdev); | ||
| 262 | return ret; | ||
| 263 | } | ||
| 254 | 264 | ||
| 255 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); | 265 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); |
| 256 | drm_fb_helper_initial_config(&ifbdev->helper, 32); | 266 | drm_fb_helper_initial_config(&ifbdev->helper, 32); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 65727f0a79a3..83bd764b000e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -59,8 +59,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
| 59 | SDVO_VSYNC_ACTIVE_HIGH | | 59 | SDVO_VSYNC_ACTIVE_HIGH | |
| 60 | SDVO_HSYNC_ACTIVE_HIGH; | 60 | SDVO_HSYNC_ACTIVE_HIGH; |
| 61 | 61 | ||
| 62 | if (hdmi_priv->has_hdmi_sink) | 62 | if (hdmi_priv->has_hdmi_sink) { |
| 63 | sdvox |= SDVO_AUDIO_ENABLE; | 63 | sdvox |= SDVO_AUDIO_ENABLE; |
| 64 | if (HAS_PCH_CPT(dev)) | ||
| 65 | sdvox |= HDMI_MODE_SELECT; | ||
| 66 | } | ||
| 64 | 67 | ||
| 65 | if (intel_crtc->pipe == 1) { | 68 | if (intel_crtc->pipe == 1) { |
| 66 | if (HAS_PCH_CPT(dev)) | 69 | if (HAS_PCH_CPT(dev)) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6a1accd83aec..0eab8df5bf7e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -599,6 +599,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
| 599 | return 0; | 599 | return 0; |
| 600 | } | 600 | } |
| 601 | 601 | ||
| 602 | static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) | ||
| 603 | { | ||
| 604 | DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); | ||
| 605 | return 1; | ||
| 606 | } | ||
| 607 | |||
| 608 | /* The GPU hangs up on these systems if modeset is performed on LID open */ | ||
| 609 | static const struct dmi_system_id intel_no_modeset_on_lid[] = { | ||
| 610 | { | ||
| 611 | .callback = intel_no_modeset_on_lid_dmi_callback, | ||
| 612 | .ident = "Toshiba Tecra A11", | ||
| 613 | .matches = { | ||
| 614 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
| 615 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), | ||
| 616 | }, | ||
| 617 | }, | ||
| 618 | |||
| 619 | { } /* terminating entry */ | ||
| 620 | }; | ||
| 621 | |||
| 602 | /* | 622 | /* |
| 603 | * Lid events. Note the use of 'modeset_on_lid': | 623 | * Lid events. Note the use of 'modeset_on_lid': |
| 604 | * - we set it on lid close, and reset it on open | 624 | * - we set it on lid close, and reset it on open |
| @@ -622,6 +642,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
| 622 | */ | 642 | */ |
| 623 | if (connector) | 643 | if (connector) |
| 624 | connector->status = connector->funcs->detect(connector); | 644 | connector->status = connector->funcs->detect(connector); |
| 645 | /* Don't force modeset on machines where it causes a GPU lockup */ | ||
| 646 | if (dmi_check_system(intel_no_modeset_on_lid)) | ||
| 647 | return NOTIFY_OK; | ||
| 625 | if (!acpi_lid_open()) { | 648 | if (!acpi_lid_open()) { |
| 626 | dev_priv->modeset_on_lid = 1; | 649 | dev_priv->modeset_on_lid = 1; |
| 627 | return NOTIFY_OK; | 650 | return NOTIFY_OK; |
| @@ -983,8 +1006,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 983 | 1006 | ||
| 984 | drm_connector_attach_property(&intel_connector->base, | 1007 | drm_connector_attach_property(&intel_connector->base, |
| 985 | dev->mode_config.scaling_mode_property, | 1008 | dev->mode_config.scaling_mode_property, |
| 986 | DRM_MODE_SCALE_FULLSCREEN); | 1009 | DRM_MODE_SCALE_ASPECT); |
| 987 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | 1010 | lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; |
| 988 | /* | 1011 | /* |
| 989 | * LVDS discovery: | 1012 | * LVDS discovery: |
| 990 | * 1) check for EDID on DDC | 1013 | * 1) check for EDID on DDC |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index b0e17b06eb6e..d7ad5139d17c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
| @@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) | |||
| 211 | static int intel_overlay_on(struct intel_overlay *overlay) | 211 | static int intel_overlay_on(struct intel_overlay *overlay) |
| 212 | { | 212 | { |
| 213 | struct drm_device *dev = overlay->dev; | 213 | struct drm_device *dev = overlay->dev; |
| 214 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 215 | int ret; | 214 | int ret; |
| 216 | RING_LOCALS; | 215 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 217 | 216 | ||
| 218 | BUG_ON(overlay->active); | 217 | BUG_ON(overlay->active); |
| 219 | 218 | ||
| @@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
| 227 | OUT_RING(MI_NOOP); | 226 | OUT_RING(MI_NOOP); |
| 228 | ADVANCE_LP_RING(); | 227 | ADVANCE_LP_RING(); |
| 229 | 228 | ||
| 230 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | 229 | overlay->last_flip_req = |
| 230 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
| 231 | if (overlay->last_flip_req == 0) | 231 | if (overlay->last_flip_req == 0) |
| 232 | return -ENOMEM; | 232 | return -ENOMEM; |
| 233 | 233 | ||
| 234 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | 234 | ret = i915_do_wait_request(dev, |
| 235 | overlay->last_flip_req, 1, &dev_priv->render_ring); | ||
| 235 | if (ret != 0) | 236 | if (ret != 0) |
| 236 | return ret; | 237 | return ret; |
| 237 | 238 | ||
| @@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay, | |||
| 248 | drm_i915_private_t *dev_priv = dev->dev_private; | 249 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 249 | u32 flip_addr = overlay->flip_addr; | 250 | u32 flip_addr = overlay->flip_addr; |
| 250 | u32 tmp; | 251 | u32 tmp; |
| 251 | RING_LOCALS; | ||
| 252 | 252 | ||
| 253 | BUG_ON(!overlay->active); | 253 | BUG_ON(!overlay->active); |
| 254 | 254 | ||
| @@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay, | |||
| 265 | OUT_RING(flip_addr); | 265 | OUT_RING(flip_addr); |
| 266 | ADVANCE_LP_RING(); | 266 | ADVANCE_LP_RING(); |
| 267 | 267 | ||
| 268 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | 268 | overlay->last_flip_req = |
| 269 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
| 269 | } | 270 | } |
| 270 | 271 | ||
| 271 | static int intel_overlay_wait_flip(struct intel_overlay *overlay) | 272 | static int intel_overlay_wait_flip(struct intel_overlay *overlay) |
| @@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) | |||
| 274 | drm_i915_private_t *dev_priv = dev->dev_private; | 275 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 275 | int ret; | 276 | int ret; |
| 276 | u32 tmp; | 277 | u32 tmp; |
| 277 | RING_LOCALS; | ||
| 278 | 278 | ||
| 279 | if (overlay->last_flip_req != 0) { | 279 | if (overlay->last_flip_req != 0) { |
| 280 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | 280 | ret = i915_do_wait_request(dev, overlay->last_flip_req, |
| 281 | 1, &dev_priv->render_ring); | ||
| 281 | if (ret == 0) { | 282 | if (ret == 0) { |
| 282 | overlay->last_flip_req = 0; | 283 | overlay->last_flip_req = 0; |
| 283 | 284 | ||
| @@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) | |||
| 296 | OUT_RING(MI_NOOP); | 297 | OUT_RING(MI_NOOP); |
| 297 | ADVANCE_LP_RING(); | 298 | ADVANCE_LP_RING(); |
| 298 | 299 | ||
| 299 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | 300 | overlay->last_flip_req = |
| 301 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
| 300 | if (overlay->last_flip_req == 0) | 302 | if (overlay->last_flip_req == 0) |
| 301 | return -ENOMEM; | 303 | return -ENOMEM; |
| 302 | 304 | ||
| 303 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | 305 | ret = i915_do_wait_request(dev, overlay->last_flip_req, |
| 306 | 1, &dev_priv->render_ring); | ||
| 304 | if (ret != 0) | 307 | if (ret != 0) |
| 305 | return ret; | 308 | return ret; |
| 306 | 309 | ||
| @@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
| 314 | { | 317 | { |
| 315 | u32 flip_addr = overlay->flip_addr; | 318 | u32 flip_addr = overlay->flip_addr; |
| 316 | struct drm_device *dev = overlay->dev; | 319 | struct drm_device *dev = overlay->dev; |
| 317 | drm_i915_private_t *dev_priv = dev->dev_private; | 320 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 318 | int ret; | 321 | int ret; |
| 319 | RING_LOCALS; | ||
| 320 | 322 | ||
| 321 | BUG_ON(!overlay->active); | 323 | BUG_ON(!overlay->active); |
| 322 | 324 | ||
| @@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
| 336 | OUT_RING(MI_NOOP); | 338 | OUT_RING(MI_NOOP); |
| 337 | ADVANCE_LP_RING(); | 339 | ADVANCE_LP_RING(); |
| 338 | 340 | ||
| 339 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | 341 | overlay->last_flip_req = |
| 342 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
| 340 | if (overlay->last_flip_req == 0) | 343 | if (overlay->last_flip_req == 0) |
| 341 | return -ENOMEM; | 344 | return -ENOMEM; |
| 342 | 345 | ||
| 343 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | 346 | ret = i915_do_wait_request(dev, overlay->last_flip_req, |
| 347 | 1, &dev_priv->render_ring); | ||
| 344 | if (ret != 0) | 348 | if (ret != 0) |
| 345 | return ret; | 349 | return ret; |
| 346 | 350 | ||
| @@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
| 354 | OUT_RING(MI_NOOP); | 358 | OUT_RING(MI_NOOP); |
| 355 | ADVANCE_LP_RING(); | 359 | ADVANCE_LP_RING(); |
| 356 | 360 | ||
| 357 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | 361 | overlay->last_flip_req = |
| 362 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
| 358 | if (overlay->last_flip_req == 0) | 363 | if (overlay->last_flip_req == 0) |
| 359 | return -ENOMEM; | 364 | return -ENOMEM; |
| 360 | 365 | ||
| 361 | ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); | 366 | ret = i915_do_wait_request(dev, overlay->last_flip_req, |
| 367 | 1, &dev_priv->render_ring); | ||
| 362 | if (ret != 0) | 368 | if (ret != 0) |
| 363 | return ret; | 369 | return ret; |
| 364 | 370 | ||
| @@ -390,22 +396,23 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | |||
| 390 | int interruptible) | 396 | int interruptible) |
| 391 | { | 397 | { |
| 392 | struct drm_device *dev = overlay->dev; | 398 | struct drm_device *dev = overlay->dev; |
| 393 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 394 | struct drm_gem_object *obj; | 399 | struct drm_gem_object *obj; |
| 400 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 395 | u32 flip_addr; | 401 | u32 flip_addr; |
| 396 | int ret; | 402 | int ret; |
| 397 | RING_LOCALS; | ||
| 398 | 403 | ||
| 399 | if (overlay->hw_wedged == HW_WEDGED) | 404 | if (overlay->hw_wedged == HW_WEDGED) |
| 400 | return -EIO; | 405 | return -EIO; |
| 401 | 406 | ||
| 402 | if (overlay->last_flip_req == 0) { | 407 | if (overlay->last_flip_req == 0) { |
| 403 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | 408 | overlay->last_flip_req = |
| 409 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
| 404 | if (overlay->last_flip_req == 0) | 410 | if (overlay->last_flip_req == 0) |
| 405 | return -ENOMEM; | 411 | return -ENOMEM; |
| 406 | } | 412 | } |
| 407 | 413 | ||
| 408 | ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); | 414 | ret = i915_do_wait_request(dev, overlay->last_flip_req, |
| 415 | interruptible, &dev_priv->render_ring); | ||
| 409 | if (ret != 0) | 416 | if (ret != 0) |
| 410 | return ret; | 417 | return ret; |
| 411 | 418 | ||
| @@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | |||
| 429 | OUT_RING(MI_NOOP); | 436 | OUT_RING(MI_NOOP); |
| 430 | ADVANCE_LP_RING(); | 437 | ADVANCE_LP_RING(); |
| 431 | 438 | ||
| 432 | overlay->last_flip_req = i915_add_request(dev, NULL, 0); | 439 | overlay->last_flip_req = i915_add_request(dev, NULL, |
| 440 | 0, &dev_priv->render_ring); | ||
| 433 | if (overlay->last_flip_req == 0) | 441 | if (overlay->last_flip_req == 0) |
| 434 | return -ENOMEM; | 442 | return -ENOMEM; |
| 435 | 443 | ||
| 436 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | 444 | ret = i915_do_wait_request(dev, overlay->last_flip_req, |
| 437 | interruptible); | 445 | interruptible, &dev_priv->render_ring); |
| 438 | if (ret != 0) | 446 | if (ret != 0) |
| 439 | return ret; | 447 | return ret; |
| 440 | 448 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c new file mode 100644 index 000000000000..26362f8495a8 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -0,0 +1,851 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2008-2010 Intel Corporation | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
| 21 | * IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | * Authors: | ||
| 24 | * Eric Anholt <eric@anholt.net> | ||
| 25 | * Zou Nan hai <nanhai.zou@intel.com> | ||
| 26 | * Xiang Hai hao<haihao.xiang@intel.com> | ||
| 27 | * | ||
| 28 | */ | ||
| 29 | |||
| 30 | #include "drmP.h" | ||
| 31 | #include "drm.h" | ||
| 32 | #include "i915_drv.h" | ||
| 33 | #include "i915_drm.h" | ||
| 34 | #include "i915_trace.h" | ||
| 35 | |||
| 36 | static void | ||
| 37 | render_ring_flush(struct drm_device *dev, | ||
| 38 | struct intel_ring_buffer *ring, | ||
| 39 | u32 invalidate_domains, | ||
| 40 | u32 flush_domains) | ||
| 41 | { | ||
| 42 | #if WATCH_EXEC | ||
| 43 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | ||
| 44 | invalidate_domains, flush_domains); | ||
| 45 | #endif | ||
| 46 | u32 cmd; | ||
| 47 | trace_i915_gem_request_flush(dev, ring->next_seqno, | ||
| 48 | invalidate_domains, flush_domains); | ||
| 49 | |||
| 50 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | ||
| 51 | /* | ||
| 52 | * read/write caches: | ||
| 53 | * | ||
| 54 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | ||
| 55 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | ||
| 56 | * also flushed at 2d versus 3d pipeline switches. | ||
| 57 | * | ||
| 58 | * read-only caches: | ||
| 59 | * | ||
| 60 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | ||
| 61 | * MI_READ_FLUSH is set, and is always flushed on 965. | ||
| 62 | * | ||
| 63 | * I915_GEM_DOMAIN_COMMAND may not exist? | ||
| 64 | * | ||
| 65 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | ||
| 66 | * invalidated when MI_EXE_FLUSH is set. | ||
| 67 | * | ||
| 68 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | ||
| 69 | * invalidated with every MI_FLUSH. | ||
| 70 | * | ||
| 71 | * TLBs: | ||
| 72 | * | ||
| 73 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | ||
| 74 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | ||
| 75 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | ||
| 76 | * are flushed at any MI_FLUSH. | ||
| 77 | */ | ||
| 78 | |||
| 79 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
| 80 | if ((invalidate_domains|flush_domains) & | ||
| 81 | I915_GEM_DOMAIN_RENDER) | ||
| 82 | cmd &= ~MI_NO_WRITE_FLUSH; | ||
| 83 | if (!IS_I965G(dev)) { | ||
| 84 | /* | ||
| 85 | * On the 965, the sampler cache always gets flushed | ||
| 86 | * and this bit is reserved. | ||
| 87 | */ | ||
| 88 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | ||
| 89 | cmd |= MI_READ_FLUSH; | ||
| 90 | } | ||
| 91 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | ||
| 92 | cmd |= MI_EXE_FLUSH; | ||
| 93 | |||
| 94 | #if WATCH_EXEC | ||
| 95 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | ||
| 96 | #endif | ||
| 97 | intel_ring_begin(dev, ring, 2); | ||
| 98 | intel_ring_emit(dev, ring, cmd); | ||
| 99 | intel_ring_emit(dev, ring, MI_NOOP); | ||
| 100 | intel_ring_advance(dev, ring); | ||
| 101 | } | ||
| 102 | } | ||
| 103 | |||
| 104 | static unsigned int render_ring_get_head(struct drm_device *dev, | ||
| 105 | struct intel_ring_buffer *ring) | ||
| 106 | { | ||
| 107 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 108 | return I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
| 109 | } | ||
| 110 | |||
| 111 | static unsigned int render_ring_get_tail(struct drm_device *dev, | ||
| 112 | struct intel_ring_buffer *ring) | ||
| 113 | { | ||
| 114 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 115 | return I915_READ(PRB0_TAIL) & TAIL_ADDR; | ||
| 116 | } | ||
| 117 | |||
| 118 | static unsigned int render_ring_get_active_head(struct drm_device *dev, | ||
| 119 | struct intel_ring_buffer *ring) | ||
| 120 | { | ||
| 121 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 122 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | ||
| 123 | |||
| 124 | return I915_READ(acthd_reg); | ||
| 125 | } | ||
| 126 | |||
| 127 | static void render_ring_advance_ring(struct drm_device *dev, | ||
| 128 | struct intel_ring_buffer *ring) | ||
| 129 | { | ||
| 130 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 131 | I915_WRITE(PRB0_TAIL, ring->tail); | ||
| 132 | } | ||
| 133 | |||
| 134 | static int init_ring_common(struct drm_device *dev, | ||
| 135 | struct intel_ring_buffer *ring) | ||
| 136 | { | ||
| 137 | u32 head; | ||
| 138 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 139 | struct drm_i915_gem_object *obj_priv; | ||
| 140 | obj_priv = to_intel_bo(ring->gem_object); | ||
| 141 | |||
| 142 | /* Stop the ring if it's running. */ | ||
| 143 | I915_WRITE(ring->regs.ctl, 0); | ||
| 144 | I915_WRITE(ring->regs.head, 0); | ||
| 145 | I915_WRITE(ring->regs.tail, 0); | ||
| 146 | |||
| 147 | /* Initialize the ring. */ | ||
| 148 | I915_WRITE(ring->regs.start, obj_priv->gtt_offset); | ||
| 149 | head = ring->get_head(dev, ring); | ||
| 150 | |||
| 151 | /* G45 ring initialization fails to reset head to zero */ | ||
| 152 | if (head != 0) { | ||
| 153 | DRM_ERROR("%s head not reset to zero " | ||
| 154 | "ctl %08x head %08x tail %08x start %08x\n", | ||
| 155 | ring->name, | ||
| 156 | I915_READ(ring->regs.ctl), | ||
| 157 | I915_READ(ring->regs.head), | ||
| 158 | I915_READ(ring->regs.tail), | ||
| 159 | I915_READ(ring->regs.start)); | ||
| 160 | |||
| 161 | I915_WRITE(ring->regs.head, 0); | ||
| 162 | |||
| 163 | DRM_ERROR("%s head forced to zero " | ||
| 164 | "ctl %08x head %08x tail %08x start %08x\n", | ||
| 165 | ring->name, | ||
| 166 | I915_READ(ring->regs.ctl), | ||
| 167 | I915_READ(ring->regs.head), | ||
| 168 | I915_READ(ring->regs.tail), | ||
| 169 | I915_READ(ring->regs.start)); | ||
| 170 | } | ||
| 171 | |||
| 172 | I915_WRITE(ring->regs.ctl, | ||
| 173 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | ||
| 174 | | RING_NO_REPORT | RING_VALID); | ||
| 175 | |||
| 176 | head = I915_READ(ring->regs.head) & HEAD_ADDR; | ||
| 177 | /* If the head is still not zero, the ring is dead */ | ||
| 178 | if (head != 0) { | ||
| 179 | DRM_ERROR("%s initialization failed " | ||
| 180 | "ctl %08x head %08x tail %08x start %08x\n", | ||
| 181 | ring->name, | ||
| 182 | I915_READ(ring->regs.ctl), | ||
| 183 | I915_READ(ring->regs.head), | ||
| 184 | I915_READ(ring->regs.tail), | ||
| 185 | I915_READ(ring->regs.start)); | ||
| 186 | return -EIO; | ||
| 187 | } | ||
| 188 | |||
| 189 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 190 | i915_kernel_lost_context(dev); | ||
| 191 | else { | ||
| 192 | ring->head = ring->get_head(dev, ring); | ||
| 193 | ring->tail = ring->get_tail(dev, ring); | ||
| 194 | ring->space = ring->head - (ring->tail + 8); | ||
| 195 | if (ring->space < 0) | ||
| 196 | ring->space += ring->size; | ||
| 197 | } | ||
| 198 | return 0; | ||
| 199 | } | ||
| 200 | |||
| 201 | static int init_render_ring(struct drm_device *dev, | ||
| 202 | struct intel_ring_buffer *ring) | ||
| 203 | { | ||
| 204 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 205 | int ret = init_ring_common(dev, ring); | ||
| 206 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | ||
| 207 | I915_WRITE(MI_MODE, | ||
| 208 | (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); | ||
| 209 | } | ||
| 210 | return ret; | ||
| 211 | } | ||
| 212 | |||
| 213 | #define PIPE_CONTROL_FLUSH(addr) \ | ||
| 214 | do { \ | ||
| 215 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | ||
| 216 | PIPE_CONTROL_DEPTH_STALL | 2); \ | ||
| 217 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | ||
| 218 | OUT_RING(0); \ | ||
| 219 | OUT_RING(0); \ | ||
| 220 | } while (0) | ||
| 221 | |||
| 222 | /** | ||
| 223 | * Creates a new sequence number, emitting a write of it to the status page | ||
| 224 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | ||
| 225 | * | ||
| 226 | * Must be called with struct_lock held. | ||
| 227 | * | ||
| 228 | * Returned sequence numbers are nonzero on success. | ||
| 229 | */ | ||
| 230 | static u32 | ||
| 231 | render_ring_add_request(struct drm_device *dev, | ||
| 232 | struct intel_ring_buffer *ring, | ||
| 233 | struct drm_file *file_priv, | ||
| 234 | u32 flush_domains) | ||
| 235 | { | ||
| 236 | u32 seqno; | ||
| 237 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 238 | seqno = intel_ring_get_seqno(dev, ring); | ||
| 239 | |||
| 240 | if (IS_GEN6(dev)) { | ||
| 241 | BEGIN_LP_RING(6); | ||
| 242 | OUT_RING(GFX_OP_PIPE_CONTROL | 3); | ||
| 243 | OUT_RING(PIPE_CONTROL_QW_WRITE | | ||
| 244 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | | ||
| 245 | PIPE_CONTROL_NOTIFY); | ||
| 246 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 247 | OUT_RING(seqno); | ||
| 248 | OUT_RING(0); | ||
| 249 | OUT_RING(0); | ||
| 250 | ADVANCE_LP_RING(); | ||
| 251 | } else if (HAS_PIPE_CONTROL(dev)) { | ||
| 252 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; | ||
| 253 | |||
| 254 | /* | ||
| 255 | * Workaround qword write incoherence by flushing the | ||
| 256 | * PIPE_NOTIFY buffers out to memory before requesting | ||
| 257 | * an interrupt. | ||
| 258 | */ | ||
| 259 | BEGIN_LP_RING(32); | ||
| 260 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 261 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
| 262 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 263 | OUT_RING(seqno); | ||
| 264 | OUT_RING(0); | ||
| 265 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 266 | scratch_addr += 128; /* write to separate cachelines */ | ||
| 267 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 268 | scratch_addr += 128; | ||
| 269 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 270 | scratch_addr += 128; | ||
| 271 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 272 | scratch_addr += 128; | ||
| 273 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 274 | scratch_addr += 128; | ||
| 275 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 276 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 277 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
| 278 | PIPE_CONTROL_NOTIFY); | ||
| 279 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 280 | OUT_RING(seqno); | ||
| 281 | OUT_RING(0); | ||
| 282 | ADVANCE_LP_RING(); | ||
| 283 | } else { | ||
| 284 | BEGIN_LP_RING(4); | ||
| 285 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
| 286 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
| 287 | OUT_RING(seqno); | ||
| 288 | |||
| 289 | OUT_RING(MI_USER_INTERRUPT); | ||
| 290 | ADVANCE_LP_RING(); | ||
| 291 | } | ||
| 292 | return seqno; | ||
| 293 | } | ||
| 294 | |||
| 295 | static u32 | ||
| 296 | render_ring_get_gem_seqno(struct drm_device *dev, | ||
| 297 | struct intel_ring_buffer *ring) | ||
| 298 | { | ||
| 299 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
| 300 | if (HAS_PIPE_CONTROL(dev)) | ||
| 301 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | ||
| 302 | else | ||
| 303 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | ||
| 304 | } | ||
| 305 | |||
| 306 | static void | ||
| 307 | render_ring_get_user_irq(struct drm_device *dev, | ||
| 308 | struct intel_ring_buffer *ring) | ||
| 309 | { | ||
| 310 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
| 311 | unsigned long irqflags; | ||
| 312 | |||
| 313 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
| 314 | if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { | ||
| 315 | if (HAS_PCH_SPLIT(dev)) | ||
| 316 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | ||
| 317 | else | ||
| 318 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | ||
| 319 | } | ||
| 320 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
| 321 | } | ||
| 322 | |||
| 323 | static void | ||
| 324 | render_ring_put_user_irq(struct drm_device *dev, | ||
| 325 | struct intel_ring_buffer *ring) | ||
| 326 | { | ||
| 327 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
| 328 | unsigned long irqflags; | ||
| 329 | |||
| 330 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
| 331 | BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); | ||
| 332 | if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { | ||
| 333 | if (HAS_PCH_SPLIT(dev)) | ||
| 334 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | ||
| 335 | else | ||
| 336 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | ||
| 337 | } | ||
| 338 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
| 339 | } | ||
| 340 | |||
| 341 | static void render_setup_status_page(struct drm_device *dev, | ||
| 342 | struct intel_ring_buffer *ring) | ||
| 343 | { | ||
| 344 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 345 | if (IS_GEN6(dev)) { | ||
| 346 | I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); | ||
| 347 | I915_READ(HWS_PGA_GEN6); /* posting read */ | ||
| 348 | } else { | ||
| 349 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | ||
| 350 | I915_READ(HWS_PGA); /* posting read */ | ||
| 351 | } | ||
| 352 | |||
| 353 | } | ||
| 354 | |||
| 355 | void | ||
| 356 | bsd_ring_flush(struct drm_device *dev, | ||
| 357 | struct intel_ring_buffer *ring, | ||
| 358 | u32 invalidate_domains, | ||
| 359 | u32 flush_domains) | ||
| 360 | { | ||
| 361 | intel_ring_begin(dev, ring, 2); | ||
| 362 | intel_ring_emit(dev, ring, MI_FLUSH); | ||
| 363 | intel_ring_emit(dev, ring, MI_NOOP); | ||
| 364 | intel_ring_advance(dev, ring); | ||
| 365 | } | ||
| 366 | |||
| 367 | static inline unsigned int bsd_ring_get_head(struct drm_device *dev, | ||
| 368 | struct intel_ring_buffer *ring) | ||
| 369 | { | ||
| 370 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 371 | return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; | ||
| 372 | } | ||
| 373 | |||
| 374 | static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, | ||
| 375 | struct intel_ring_buffer *ring) | ||
| 376 | { | ||
| 377 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 378 | return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; | ||
| 379 | } | ||
| 380 | |||
| 381 | static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, | ||
| 382 | struct intel_ring_buffer *ring) | ||
| 383 | { | ||
| 384 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 385 | return I915_READ(BSD_RING_ACTHD); | ||
| 386 | } | ||
| 387 | |||
| 388 | static inline void bsd_ring_advance_ring(struct drm_device *dev, | ||
| 389 | struct intel_ring_buffer *ring) | ||
| 390 | { | ||
| 391 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 392 | I915_WRITE(BSD_RING_TAIL, ring->tail); | ||
| 393 | } | ||
| 394 | |||
| 395 | static int init_bsd_ring(struct drm_device *dev, | ||
| 396 | struct intel_ring_buffer *ring) | ||
| 397 | { | ||
| 398 | return init_ring_common(dev, ring); | ||
| 399 | } | ||
| 400 | |||
| 401 | static u32 | ||
| 402 | bsd_ring_add_request(struct drm_device *dev, | ||
| 403 | struct intel_ring_buffer *ring, | ||
| 404 | struct drm_file *file_priv, | ||
| 405 | u32 flush_domains) | ||
| 406 | { | ||
| 407 | u32 seqno; | ||
| 408 | seqno = intel_ring_get_seqno(dev, ring); | ||
| 409 | intel_ring_begin(dev, ring, 4); | ||
| 410 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | ||
| 411 | intel_ring_emit(dev, ring, | ||
| 412 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
| 413 | intel_ring_emit(dev, ring, seqno); | ||
| 414 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
| 415 | intel_ring_advance(dev, ring); | ||
| 416 | |||
| 417 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
| 418 | |||
| 419 | return seqno; | ||
| 420 | } | ||
| 421 | |||
| 422 | static void bsd_setup_status_page(struct drm_device *dev, | ||
| 423 | struct intel_ring_buffer *ring) | ||
| 424 | { | ||
| 425 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 426 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); | ||
| 427 | I915_READ(BSD_HWS_PGA); | ||
| 428 | } | ||
| 429 | |||
| 430 | static void | ||
| 431 | bsd_ring_get_user_irq(struct drm_device *dev, | ||
| 432 | struct intel_ring_buffer *ring) | ||
| 433 | { | ||
| 434 | /* do nothing */ | ||
| 435 | } | ||
| 436 | static void | ||
| 437 | bsd_ring_put_user_irq(struct drm_device *dev, | ||
| 438 | struct intel_ring_buffer *ring) | ||
| 439 | { | ||
| 440 | /* do nothing */ | ||
| 441 | } | ||
| 442 | |||
| 443 | static u32 | ||
| 444 | bsd_ring_get_gem_seqno(struct drm_device *dev, | ||
| 445 | struct intel_ring_buffer *ring) | ||
| 446 | { | ||
| 447 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | ||
| 448 | } | ||
| 449 | |||
| 450 | static int | ||
| 451 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | ||
| 452 | struct intel_ring_buffer *ring, | ||
| 453 | struct drm_i915_gem_execbuffer2 *exec, | ||
| 454 | struct drm_clip_rect *cliprects, | ||
| 455 | uint64_t exec_offset) | ||
| 456 | { | ||
| 457 | uint32_t exec_start; | ||
| 458 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
| 459 | intel_ring_begin(dev, ring, 2); | ||
| 460 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | | ||
| 461 | (2 << 6) | MI_BATCH_NON_SECURE_I965); | ||
| 462 | intel_ring_emit(dev, ring, exec_start); | ||
| 463 | intel_ring_advance(dev, ring); | ||
| 464 | return 0; | ||
| 465 | } | ||
| 466 | |||
| 467 | |||
| 468 | static int | ||
| 469 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | ||
| 470 | struct intel_ring_buffer *ring, | ||
| 471 | struct drm_i915_gem_execbuffer2 *exec, | ||
| 472 | struct drm_clip_rect *cliprects, | ||
| 473 | uint64_t exec_offset) | ||
| 474 | { | ||
| 475 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 476 | int nbox = exec->num_cliprects; | ||
| 477 | int i = 0, count; | ||
| 478 | uint32_t exec_start, exec_len; | ||
| 479 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
| 480 | exec_len = (uint32_t) exec->batch_len; | ||
| 481 | |||
| 482 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); | ||
| 483 | |||
| 484 | count = nbox ? nbox : 1; | ||
| 485 | |||
| 486 | for (i = 0; i < count; i++) { | ||
| 487 | if (i < nbox) { | ||
| 488 | int ret = i915_emit_box(dev, cliprects, i, | ||
| 489 | exec->DR1, exec->DR4); | ||
| 490 | if (ret) | ||
| 491 | return ret; | ||
| 492 | } | ||
| 493 | |||
| 494 | if (IS_I830(dev) || IS_845G(dev)) { | ||
| 495 | intel_ring_begin(dev, ring, 4); | ||
| 496 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER); | ||
| 497 | intel_ring_emit(dev, ring, | ||
| 498 | exec_start | MI_BATCH_NON_SECURE); | ||
| 499 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | ||
| 500 | intel_ring_emit(dev, ring, 0); | ||
| 501 | } else { | ||
| 502 | intel_ring_begin(dev, ring, 4); | ||
| 503 | if (IS_I965G(dev)) { | ||
| 504 | intel_ring_emit(dev, ring, | ||
| 505 | MI_BATCH_BUFFER_START | (2 << 6) | ||
| 506 | | MI_BATCH_NON_SECURE_I965); | ||
| 507 | intel_ring_emit(dev, ring, exec_start); | ||
| 508 | } else { | ||
| 509 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | ||
| 510 | | (2 << 6)); | ||
| 511 | intel_ring_emit(dev, ring, exec_start | | ||
| 512 | MI_BATCH_NON_SECURE); | ||
| 513 | } | ||
| 514 | } | ||
| 515 | intel_ring_advance(dev, ring); | ||
| 516 | } | ||
| 517 | |||
| 518 | /* XXX breadcrumb */ | ||
| 519 | return 0; | ||
| 520 | } | ||
| 521 | |||
| 522 | static void cleanup_status_page(struct drm_device *dev, | ||
| 523 | struct intel_ring_buffer *ring) | ||
| 524 | { | ||
| 525 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 526 | struct drm_gem_object *obj; | ||
| 527 | struct drm_i915_gem_object *obj_priv; | ||
| 528 | |||
| 529 | obj = ring->status_page.obj; | ||
| 530 | if (obj == NULL) | ||
| 531 | return; | ||
| 532 | obj_priv = to_intel_bo(obj); | ||
| 533 | |||
| 534 | kunmap(obj_priv->pages[0]); | ||
| 535 | i915_gem_object_unpin(obj); | ||
| 536 | drm_gem_object_unreference(obj); | ||
| 537 | ring->status_page.obj = NULL; | ||
| 538 | |||
| 539 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
| 540 | } | ||
| 541 | |||
| 542 | static int init_status_page(struct drm_device *dev, | ||
| 543 | struct intel_ring_buffer *ring) | ||
| 544 | { | ||
| 545 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 546 | struct drm_gem_object *obj; | ||
| 547 | struct drm_i915_gem_object *obj_priv; | ||
| 548 | int ret; | ||
| 549 | |||
| 550 | obj = i915_gem_alloc_object(dev, 4096); | ||
| 551 | if (obj == NULL) { | ||
| 552 | DRM_ERROR("Failed to allocate status page\n"); | ||
| 553 | ret = -ENOMEM; | ||
| 554 | goto err; | ||
| 555 | } | ||
| 556 | obj_priv = to_intel_bo(obj); | ||
| 557 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
| 558 | |||
| 559 | ret = i915_gem_object_pin(obj, 4096); | ||
| 560 | if (ret != 0) { | ||
| 561 | goto err_unref; | ||
| 562 | } | ||
| 563 | |||
| 564 | ring->status_page.gfx_addr = obj_priv->gtt_offset; | ||
| 565 | ring->status_page.page_addr = kmap(obj_priv->pages[0]); | ||
| 566 | if (ring->status_page.page_addr == NULL) { | ||
| 567 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
| 568 | goto err_unpin; | ||
| 569 | } | ||
| 570 | ring->status_page.obj = obj; | ||
| 571 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
| 572 | |||
| 573 | ring->setup_status_page(dev, ring); | ||
| 574 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | ||
| 575 | ring->name, ring->status_page.gfx_addr); | ||
| 576 | |||
| 577 | return 0; | ||
| 578 | |||
| 579 | err_unpin: | ||
| 580 | i915_gem_object_unpin(obj); | ||
| 581 | err_unref: | ||
| 582 | drm_gem_object_unreference(obj); | ||
| 583 | err: | ||
| 584 | return ret; | ||
| 585 | } | ||
| 586 | |||
| 587 | |||
| 588 | int intel_init_ring_buffer(struct drm_device *dev, | ||
| 589 | struct intel_ring_buffer *ring) | ||
| 590 | { | ||
| 591 | int ret; | ||
| 592 | struct drm_i915_gem_object *obj_priv; | ||
| 593 | struct drm_gem_object *obj; | ||
| 594 | ring->dev = dev; | ||
| 595 | |||
| 596 | if (I915_NEED_GFX_HWS(dev)) { | ||
| 597 | ret = init_status_page(dev, ring); | ||
| 598 | if (ret) | ||
| 599 | return ret; | ||
| 600 | } | ||
| 601 | |||
| 602 | obj = i915_gem_alloc_object(dev, ring->size); | ||
| 603 | if (obj == NULL) { | ||
| 604 | DRM_ERROR("Failed to allocate ringbuffer\n"); | ||
| 605 | ret = -ENOMEM; | ||
| 606 | goto cleanup; | ||
| 607 | } | ||
| 608 | |||
| 609 | ring->gem_object = obj; | ||
| 610 | |||
| 611 | ret = i915_gem_object_pin(obj, ring->alignment); | ||
| 612 | if (ret != 0) { | ||
| 613 | drm_gem_object_unreference(obj); | ||
| 614 | goto cleanup; | ||
| 615 | } | ||
| 616 | |||
| 617 | obj_priv = to_intel_bo(obj); | ||
| 618 | ring->map.size = ring->size; | ||
| 619 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; | ||
| 620 | ring->map.type = 0; | ||
| 621 | ring->map.flags = 0; | ||
| 622 | ring->map.mtrr = 0; | ||
| 623 | |||
| 624 | drm_core_ioremap_wc(&ring->map, dev); | ||
| 625 | if (ring->map.handle == NULL) { | ||
| 626 | DRM_ERROR("Failed to map ringbuffer.\n"); | ||
| 627 | i915_gem_object_unpin(obj); | ||
| 628 | drm_gem_object_unreference(obj); | ||
| 629 | ret = -EINVAL; | ||
| 630 | goto cleanup; | ||
| 631 | } | ||
| 632 | |||
| 633 | ring->virtual_start = ring->map.handle; | ||
| 634 | ret = ring->init(dev, ring); | ||
| 635 | if (ret != 0) { | ||
| 636 | intel_cleanup_ring_buffer(dev, ring); | ||
| 637 | return ret; | ||
| 638 | } | ||
| 639 | |||
| 640 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 641 | i915_kernel_lost_context(dev); | ||
| 642 | else { | ||
| 643 | ring->head = ring->get_head(dev, ring); | ||
| 644 | ring->tail = ring->get_tail(dev, ring); | ||
| 645 | ring->space = ring->head - (ring->tail + 8); | ||
| 646 | if (ring->space < 0) | ||
| 647 | ring->space += ring->size; | ||
| 648 | } | ||
| 649 | INIT_LIST_HEAD(&ring->active_list); | ||
| 650 | INIT_LIST_HEAD(&ring->request_list); | ||
| 651 | return ret; | ||
| 652 | cleanup: | ||
| 653 | cleanup_status_page(dev, ring); | ||
| 654 | return ret; | ||
| 655 | } | ||
| 656 | |||
| 657 | void intel_cleanup_ring_buffer(struct drm_device *dev, | ||
| 658 | struct intel_ring_buffer *ring) | ||
| 659 | { | ||
| 660 | if (ring->gem_object == NULL) | ||
| 661 | return; | ||
| 662 | |||
| 663 | drm_core_ioremapfree(&ring->map, dev); | ||
| 664 | |||
| 665 | i915_gem_object_unpin(ring->gem_object); | ||
| 666 | drm_gem_object_unreference(ring->gem_object); | ||
| 667 | ring->gem_object = NULL; | ||
| 668 | cleanup_status_page(dev, ring); | ||
| 669 | } | ||
| 670 | |||
| 671 | int intel_wrap_ring_buffer(struct drm_device *dev, | ||
| 672 | struct intel_ring_buffer *ring) | ||
| 673 | { | ||
| 674 | unsigned int *virt; | ||
| 675 | int rem; | ||
| 676 | rem = ring->size - ring->tail; | ||
| 677 | |||
| 678 | if (ring->space < rem) { | ||
| 679 | int ret = intel_wait_ring_buffer(dev, ring, rem); | ||
| 680 | if (ret) | ||
| 681 | return ret; | ||
| 682 | } | ||
| 683 | |||
| 684 | virt = (unsigned int *)(ring->virtual_start + ring->tail); | ||
| 685 | rem /= 4; | ||
| 686 | while (rem--) | ||
| 687 | *virt++ = MI_NOOP; | ||
| 688 | |||
| 689 | ring->tail = 0; | ||
| 690 | ring->space = ring->head - 8; | ||
| 691 | |||
| 692 | return 0; | ||
| 693 | } | ||
| 694 | |||
| 695 | int intel_wait_ring_buffer(struct drm_device *dev, | ||
| 696 | struct intel_ring_buffer *ring, int n) | ||
| 697 | { | ||
| 698 | unsigned long end; | ||
| 699 | |||
| 700 | trace_i915_ring_wait_begin (dev); | ||
| 701 | end = jiffies + 3 * HZ; | ||
| 702 | do { | ||
| 703 | ring->head = ring->get_head(dev, ring); | ||
| 704 | ring->space = ring->head - (ring->tail + 8); | ||
| 705 | if (ring->space < 0) | ||
| 706 | ring->space += ring->size; | ||
| 707 | if (ring->space >= n) { | ||
| 708 | trace_i915_ring_wait_end (dev); | ||
| 709 | return 0; | ||
| 710 | } | ||
| 711 | |||
| 712 | if (dev->primary->master) { | ||
| 713 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
| 714 | if (master_priv->sarea_priv) | ||
| 715 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | ||
| 716 | } | ||
| 717 | |||
| 718 | yield(); | ||
| 719 | } while (!time_after(jiffies, end)); | ||
| 720 | trace_i915_ring_wait_end (dev); | ||
| 721 | return -EBUSY; | ||
| 722 | } | ||
| 723 | |||
| 724 | void intel_ring_begin(struct drm_device *dev, | ||
| 725 | struct intel_ring_buffer *ring, int num_dwords) | ||
| 726 | { | ||
| 727 | int n = 4*num_dwords; | ||
| 728 | if (unlikely(ring->tail + n > ring->size)) | ||
| 729 | intel_wrap_ring_buffer(dev, ring); | ||
| 730 | if (unlikely(ring->space < n)) | ||
| 731 | intel_wait_ring_buffer(dev, ring, n); | ||
| 732 | } | ||
| 733 | |||
| 734 | void intel_ring_emit(struct drm_device *dev, | ||
| 735 | struct intel_ring_buffer *ring, unsigned int data) | ||
| 736 | { | ||
| 737 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
| 738 | *virt = data; | ||
| 739 | ring->tail += 4; | ||
| 740 | ring->tail &= ring->size - 1; | ||
| 741 | ring->space -= 4; | ||
| 742 | } | ||
| 743 | |||
| 744 | void intel_ring_advance(struct drm_device *dev, | ||
| 745 | struct intel_ring_buffer *ring) | ||
| 746 | { | ||
| 747 | ring->advance_ring(dev, ring); | ||
| 748 | } | ||
| 749 | |||
| 750 | void intel_fill_struct(struct drm_device *dev, | ||
| 751 | struct intel_ring_buffer *ring, | ||
| 752 | void *data, | ||
| 753 | unsigned int len) | ||
| 754 | { | ||
| 755 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
| 756 | BUG_ON((len&~(4-1)) != 0); | ||
| 757 | intel_ring_begin(dev, ring, len/4); | ||
| 758 | memcpy(virt, data, len); | ||
| 759 | ring->tail += len; | ||
| 760 | ring->tail &= ring->size - 1; | ||
| 761 | ring->space -= len; | ||
| 762 | intel_ring_advance(dev, ring); | ||
| 763 | } | ||
| 764 | |||
| 765 | u32 intel_ring_get_seqno(struct drm_device *dev, | ||
| 766 | struct intel_ring_buffer *ring) | ||
| 767 | { | ||
| 768 | u32 seqno; | ||
| 769 | seqno = ring->next_seqno; | ||
| 770 | |||
| 771 | /* reserve 0 for non-seqno */ | ||
| 772 | if (++ring->next_seqno == 0) | ||
| 773 | ring->next_seqno = 1; | ||
| 774 | return seqno; | ||
| 775 | } | ||
| 776 | |||
| 777 | struct intel_ring_buffer render_ring = { | ||
| 778 | .name = "render ring", | ||
| 779 | .regs = { | ||
| 780 | .ctl = PRB0_CTL, | ||
| 781 | .head = PRB0_HEAD, | ||
| 782 | .tail = PRB0_TAIL, | ||
| 783 | .start = PRB0_START | ||
| 784 | }, | ||
| 785 | .ring_flag = I915_EXEC_RENDER, | ||
| 786 | .size = 32 * PAGE_SIZE, | ||
| 787 | .alignment = PAGE_SIZE, | ||
| 788 | .virtual_start = NULL, | ||
| 789 | .dev = NULL, | ||
| 790 | .gem_object = NULL, | ||
| 791 | .head = 0, | ||
| 792 | .tail = 0, | ||
| 793 | .space = 0, | ||
| 794 | .next_seqno = 1, | ||
| 795 | .user_irq_refcount = 0, | ||
| 796 | .irq_gem_seqno = 0, | ||
| 797 | .waiting_gem_seqno = 0, | ||
| 798 | .setup_status_page = render_setup_status_page, | ||
| 799 | .init = init_render_ring, | ||
| 800 | .get_head = render_ring_get_head, | ||
| 801 | .get_tail = render_ring_get_tail, | ||
| 802 | .get_active_head = render_ring_get_active_head, | ||
| 803 | .advance_ring = render_ring_advance_ring, | ||
| 804 | .flush = render_ring_flush, | ||
| 805 | .add_request = render_ring_add_request, | ||
| 806 | .get_gem_seqno = render_ring_get_gem_seqno, | ||
| 807 | .user_irq_get = render_ring_get_user_irq, | ||
| 808 | .user_irq_put = render_ring_put_user_irq, | ||
| 809 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | ||
| 810 | .status_page = {NULL, 0, NULL}, | ||
| 811 | .map = {0,} | ||
| 812 | }; | ||
| 813 | |||
| 814 | /* ring buffer for bit-stream decoder */ | ||
| 815 | |||
| 816 | struct intel_ring_buffer bsd_ring = { | ||
| 817 | .name = "bsd ring", | ||
| 818 | .regs = { | ||
| 819 | .ctl = BSD_RING_CTL, | ||
| 820 | .head = BSD_RING_HEAD, | ||
| 821 | .tail = BSD_RING_TAIL, | ||
| 822 | .start = BSD_RING_START | ||
| 823 | }, | ||
| 824 | .ring_flag = I915_EXEC_BSD, | ||
| 825 | .size = 32 * PAGE_SIZE, | ||
| 826 | .alignment = PAGE_SIZE, | ||
| 827 | .virtual_start = NULL, | ||
| 828 | .dev = NULL, | ||
| 829 | .gem_object = NULL, | ||
| 830 | .head = 0, | ||
| 831 | .tail = 0, | ||
| 832 | .space = 0, | ||
| 833 | .next_seqno = 1, | ||
| 834 | .user_irq_refcount = 0, | ||
| 835 | .irq_gem_seqno = 0, | ||
| 836 | .waiting_gem_seqno = 0, | ||
| 837 | .setup_status_page = bsd_setup_status_page, | ||
| 838 | .init = init_bsd_ring, | ||
| 839 | .get_head = bsd_ring_get_head, | ||
| 840 | .get_tail = bsd_ring_get_tail, | ||
| 841 | .get_active_head = bsd_ring_get_active_head, | ||
| 842 | .advance_ring = bsd_ring_advance_ring, | ||
| 843 | .flush = bsd_ring_flush, | ||
| 844 | .add_request = bsd_ring_add_request, | ||
| 845 | .get_gem_seqno = bsd_ring_get_gem_seqno, | ||
| 846 | .user_irq_get = bsd_ring_get_user_irq, | ||
| 847 | .user_irq_put = bsd_ring_put_user_irq, | ||
| 848 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, | ||
| 849 | .status_page = {NULL, 0, NULL}, | ||
| 850 | .map = {0,} | ||
| 851 | }; | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h new file mode 100644 index 000000000000..d5568d3766de --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -0,0 +1,124 @@ | |||
| 1 | #ifndef _INTEL_RINGBUFFER_H_ | ||
| 2 | #define _INTEL_RINGBUFFER_H_ | ||
| 3 | |||
| 4 | struct intel_hw_status_page { | ||
| 5 | void *page_addr; | ||
| 6 | unsigned int gfx_addr; | ||
| 7 | struct drm_gem_object *obj; | ||
| 8 | }; | ||
| 9 | |||
| 10 | struct drm_i915_gem_execbuffer2; | ||
| 11 | struct intel_ring_buffer { | ||
| 12 | const char *name; | ||
| 13 | struct ring_regs { | ||
| 14 | u32 ctl; | ||
| 15 | u32 head; | ||
| 16 | u32 tail; | ||
| 17 | u32 start; | ||
| 18 | } regs; | ||
| 19 | unsigned int ring_flag; | ||
| 20 | unsigned long size; | ||
| 21 | unsigned int alignment; | ||
| 22 | void *virtual_start; | ||
| 23 | struct drm_device *dev; | ||
| 24 | struct drm_gem_object *gem_object; | ||
| 25 | |||
| 26 | unsigned int head; | ||
| 27 | unsigned int tail; | ||
| 28 | unsigned int space; | ||
| 29 | u32 next_seqno; | ||
| 30 | struct intel_hw_status_page status_page; | ||
| 31 | |||
| 32 | u32 irq_gem_seqno; /* last seq seem at irq time */ | ||
| 33 | u32 waiting_gem_seqno; | ||
| 34 | int user_irq_refcount; | ||
| 35 | void (*user_irq_get)(struct drm_device *dev, | ||
| 36 | struct intel_ring_buffer *ring); | ||
| 37 | void (*user_irq_put)(struct drm_device *dev, | ||
| 38 | struct intel_ring_buffer *ring); | ||
| 39 | void (*setup_status_page)(struct drm_device *dev, | ||
| 40 | struct intel_ring_buffer *ring); | ||
| 41 | |||
| 42 | int (*init)(struct drm_device *dev, | ||
| 43 | struct intel_ring_buffer *ring); | ||
| 44 | |||
| 45 | unsigned int (*get_head)(struct drm_device *dev, | ||
| 46 | struct intel_ring_buffer *ring); | ||
| 47 | unsigned int (*get_tail)(struct drm_device *dev, | ||
| 48 | struct intel_ring_buffer *ring); | ||
| 49 | unsigned int (*get_active_head)(struct drm_device *dev, | ||
| 50 | struct intel_ring_buffer *ring); | ||
| 51 | void (*advance_ring)(struct drm_device *dev, | ||
| 52 | struct intel_ring_buffer *ring); | ||
| 53 | void (*flush)(struct drm_device *dev, | ||
| 54 | struct intel_ring_buffer *ring, | ||
| 55 | u32 invalidate_domains, | ||
| 56 | u32 flush_domains); | ||
| 57 | u32 (*add_request)(struct drm_device *dev, | ||
| 58 | struct intel_ring_buffer *ring, | ||
| 59 | struct drm_file *file_priv, | ||
| 60 | u32 flush_domains); | ||
| 61 | u32 (*get_gem_seqno)(struct drm_device *dev, | ||
| 62 | struct intel_ring_buffer *ring); | ||
| 63 | int (*dispatch_gem_execbuffer)(struct drm_device *dev, | ||
| 64 | struct intel_ring_buffer *ring, | ||
| 65 | struct drm_i915_gem_execbuffer2 *exec, | ||
| 66 | struct drm_clip_rect *cliprects, | ||
| 67 | uint64_t exec_offset); | ||
| 68 | |||
| 69 | /** | ||
| 70 | * List of objects currently involved in rendering from the | ||
| 71 | * ringbuffer. | ||
| 72 | * | ||
| 73 | * Includes buffers having the contents of their GPU caches | ||
| 74 | * flushed, not necessarily primitives. last_rendering_seqno | ||
| 75 | * represents when the rendering involved will be completed. | ||
| 76 | * | ||
| 77 | * A reference is held on the buffer while on this list. | ||
| 78 | */ | ||
| 79 | struct list_head active_list; | ||
| 80 | |||
| 81 | /** | ||
| 82 | * List of breadcrumbs associated with GPU requests currently | ||
| 83 | * outstanding. | ||
| 84 | */ | ||
| 85 | struct list_head request_list; | ||
| 86 | |||
| 87 | wait_queue_head_t irq_queue; | ||
| 88 | drm_local_map_t map; | ||
| 89 | }; | ||
| 90 | |||
| 91 | static inline u32 | ||
| 92 | intel_read_status_page(struct intel_ring_buffer *ring, | ||
| 93 | int reg) | ||
| 94 | { | ||
| 95 | u32 *regs = ring->status_page.page_addr; | ||
| 96 | return regs[reg]; | ||
| 97 | } | ||
| 98 | |||
| 99 | int intel_init_ring_buffer(struct drm_device *dev, | ||
| 100 | struct intel_ring_buffer *ring); | ||
| 101 | void intel_cleanup_ring_buffer(struct drm_device *dev, | ||
| 102 | struct intel_ring_buffer *ring); | ||
| 103 | int intel_wait_ring_buffer(struct drm_device *dev, | ||
| 104 | struct intel_ring_buffer *ring, int n); | ||
| 105 | int intel_wrap_ring_buffer(struct drm_device *dev, | ||
| 106 | struct intel_ring_buffer *ring); | ||
| 107 | void intel_ring_begin(struct drm_device *dev, | ||
| 108 | struct intel_ring_buffer *ring, int n); | ||
| 109 | void intel_ring_emit(struct drm_device *dev, | ||
| 110 | struct intel_ring_buffer *ring, u32 data); | ||
| 111 | void intel_fill_struct(struct drm_device *dev, | ||
| 112 | struct intel_ring_buffer *ring, | ||
| 113 | void *data, | ||
| 114 | unsigned int len); | ||
| 115 | void intel_ring_advance(struct drm_device *dev, | ||
| 116 | struct intel_ring_buffer *ring); | ||
| 117 | |||
| 118 | u32 intel_ring_get_seqno(struct drm_device *dev, | ||
| 119 | struct intel_ring_buffer *ring); | ||
| 120 | |||
| 121 | extern struct intel_ring_buffer render_ring; | ||
| 122 | extern struct intel_ring_buffer bsd_ring; | ||
| 123 | |||
| 124 | #endif /* _INTEL_RINGBUFFER_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index aba72c489a2f..76993ac16cc1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -1479,7 +1479,7 @@ intel_find_analog_connector(struct drm_device *dev) | |||
| 1479 | intel_encoder = enc_to_intel_encoder(encoder); | 1479 | intel_encoder = enc_to_intel_encoder(encoder); |
| 1480 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { | 1480 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { |
| 1481 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1481 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 1482 | if (connector && encoder == intel_attached_encoder(connector)) | 1482 | if (encoder == intel_attached_encoder(connector)) |
| 1483 | return connector; | 1483 | return connector; |
| 1484 | } | 1484 | } |
| 1485 | } | 1485 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index e13f6af0037a..d4bcca8a5133 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | static struct nouveau_dsm_priv { | 34 | static struct nouveau_dsm_priv { |
| 35 | bool dsm_detected; | 35 | bool dsm_detected; |
| 36 | acpi_handle dhandle; | 36 | acpi_handle dhandle; |
| 37 | acpi_handle dsm_handle; | 37 | acpi_handle rom_handle; |
| 38 | } nouveau_dsm_priv; | 38 | } nouveau_dsm_priv; |
| 39 | 39 | ||
| 40 | static const char nouveau_dsm_muid[] = { | 40 | static const char nouveau_dsm_muid[] = { |
| @@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero | |||
| 107 | static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) | 107 | static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) |
| 108 | { | 108 | { |
| 109 | if (id == VGA_SWITCHEROO_IGD) | 109 | if (id == VGA_SWITCHEROO_IGD) |
| 110 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); | 110 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); |
| 111 | else | 111 | else |
| 112 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); | 112 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, | 115 | static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, |
| @@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, | |||
| 118 | if (id == VGA_SWITCHEROO_IGD) | 118 | if (id == VGA_SWITCHEROO_IGD) |
| 119 | return 0; | 119 | return 0; |
| 120 | 120 | ||
| 121 | return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); | 121 | return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | static int nouveau_dsm_init(void) | 124 | static int nouveau_dsm_init(void) |
| @@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) | |||
| 151 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 151 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); |
| 152 | if (!dhandle) | 152 | if (!dhandle) |
| 153 | return false; | 153 | return false; |
| 154 | |||
| 154 | status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); | 155 | status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); |
| 155 | if (ACPI_FAILURE(status)) { | 156 | if (ACPI_FAILURE(status)) { |
| 156 | return false; | 157 | return false; |
| 157 | } | 158 | } |
| 158 | 159 | ||
| 159 | ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, | 160 | ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, |
| 160 | NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); | 161 | NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); |
| 161 | if (ret < 0) | 162 | if (ret < 0) |
| 162 | return false; | 163 | return false; |
| 163 | 164 | ||
| 164 | nouveau_dsm_priv.dhandle = dhandle; | 165 | nouveau_dsm_priv.dhandle = dhandle; |
| 165 | nouveau_dsm_priv.dsm_handle = nvidia_handle; | ||
| 166 | return true; | 166 | return true; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| @@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void) | |||
| 173 | struct pci_dev *pdev = NULL; | 173 | struct pci_dev *pdev = NULL; |
| 174 | int has_dsm = 0; | 174 | int has_dsm = 0; |
| 175 | int vga_count = 0; | 175 | int vga_count = 0; |
| 176 | |||
| 176 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | 177 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { |
| 177 | vga_count++; | 178 | vga_count++; |
| 178 | 179 | ||
| @@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void) | |||
| 180 | } | 181 | } |
| 181 | 182 | ||
| 182 | if (vga_count == 2 && has_dsm) { | 183 | if (vga_count == 2 && has_dsm) { |
| 183 | acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); | 184 | acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); |
| 184 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", | 185 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", |
| 185 | acpi_method_name); | 186 | acpi_method_name); |
| 186 | nouveau_dsm_priv.dsm_detected = true; | 187 | nouveau_dsm_priv.dsm_detected = true; |
| @@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void) | |||
| 204 | { | 205 | { |
| 205 | vga_switcheroo_unregister_handler(); | 206 | vga_switcheroo_unregister_handler(); |
| 206 | } | 207 | } |
| 208 | |||
| 209 | /* retrieve the ROM in 4k blocks */ | ||
| 210 | static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, | ||
| 211 | int offset, int len) | ||
| 212 | { | ||
| 213 | acpi_status status; | ||
| 214 | union acpi_object rom_arg_elements[2], *obj; | ||
| 215 | struct acpi_object_list rom_arg; | ||
| 216 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; | ||
| 217 | |||
| 218 | rom_arg.count = 2; | ||
| 219 | rom_arg.pointer = &rom_arg_elements[0]; | ||
| 220 | |||
| 221 | rom_arg_elements[0].type = ACPI_TYPE_INTEGER; | ||
| 222 | rom_arg_elements[0].integer.value = offset; | ||
| 223 | |||
| 224 | rom_arg_elements[1].type = ACPI_TYPE_INTEGER; | ||
| 225 | rom_arg_elements[1].integer.value = len; | ||
| 226 | |||
| 227 | status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); | ||
| 228 | if (ACPI_FAILURE(status)) { | ||
| 229 | printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); | ||
| 230 | return -ENODEV; | ||
| 231 | } | ||
| 232 | obj = (union acpi_object *)buffer.pointer; | ||
| 233 | memcpy(bios+offset, obj->buffer.pointer, len); | ||
| 234 | kfree(buffer.pointer); | ||
| 235 | return len; | ||
| 236 | } | ||
| 237 | |||
| 238 | bool nouveau_acpi_rom_supported(struct pci_dev *pdev) | ||
| 239 | { | ||
| 240 | acpi_status status; | ||
| 241 | acpi_handle dhandle, rom_handle; | ||
| 242 | |||
| 243 | if (!nouveau_dsm_priv.dsm_detected) | ||
| 244 | return false; | ||
| 245 | |||
| 246 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | ||
| 247 | if (!dhandle) | ||
| 248 | return false; | ||
| 249 | |||
| 250 | status = acpi_get_handle(dhandle, "_ROM", &rom_handle); | ||
| 251 | if (ACPI_FAILURE(status)) | ||
| 252 | return false; | ||
| 253 | |||
| 254 | nouveau_dsm_priv.rom_handle = rom_handle; | ||
| 255 | return true; | ||
| 256 | } | ||
| 257 | |||
| 258 | int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) | ||
| 259 | { | ||
| 260 | return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); | ||
| 261 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index e7e69ccce5c9..e492919faf44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -178,41 +178,51 @@ out: | |||
| 178 | pci_disable_rom(dev->pdev); | 178 | pci_disable_rom(dev->pdev); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | static void load_vbios_acpi(struct drm_device *dev, uint8_t *data) | ||
| 182 | { | ||
| 183 | int i; | ||
| 184 | int ret; | ||
| 185 | int size = 64 * 1024; | ||
| 186 | |||
| 187 | if (!nouveau_acpi_rom_supported(dev->pdev)) | ||
| 188 | return; | ||
| 189 | |||
| 190 | for (i = 0; i < (size / ROM_BIOS_PAGE); i++) { | ||
| 191 | ret = nouveau_acpi_get_bios_chunk(data, | ||
| 192 | (i * ROM_BIOS_PAGE), | ||
| 193 | ROM_BIOS_PAGE); | ||
| 194 | if (ret <= 0) | ||
| 195 | break; | ||
| 196 | } | ||
| 197 | return; | ||
| 198 | } | ||
| 199 | |||
| 181 | struct methods { | 200 | struct methods { |
| 182 | const char desc[8]; | 201 | const char desc[8]; |
| 183 | void (*loadbios)(struct drm_device *, uint8_t *); | 202 | void (*loadbios)(struct drm_device *, uint8_t *); |
| 184 | const bool rw; | 203 | const bool rw; |
| 185 | }; | 204 | }; |
| 186 | 205 | ||
| 187 | static struct methods nv04_methods[] = { | 206 | static struct methods shadow_methods[] = { |
| 188 | { "PROM", load_vbios_prom, false }, | ||
| 189 | { "PRAMIN", load_vbios_pramin, true }, | ||
| 190 | { "PCIROM", load_vbios_pci, true }, | ||
| 191 | }; | ||
| 192 | |||
| 193 | static struct methods nv50_methods[] = { | ||
| 194 | { "PRAMIN", load_vbios_pramin, true }, | 207 | { "PRAMIN", load_vbios_pramin, true }, |
| 195 | { "PROM", load_vbios_prom, false }, | 208 | { "PROM", load_vbios_prom, false }, |
| 196 | { "PCIROM", load_vbios_pci, true }, | 209 | { "PCIROM", load_vbios_pci, true }, |
| 210 | { "ACPI", load_vbios_acpi, true }, | ||
| 197 | }; | 211 | }; |
| 198 | 212 | ||
| 199 | #define METHODCNT 3 | ||
| 200 | |||
| 201 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | 213 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) |
| 202 | { | 214 | { |
| 203 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 215 | const int nr_methods = ARRAY_SIZE(shadow_methods); |
| 204 | struct methods *methods; | 216 | struct methods *methods = shadow_methods; |
| 205 | int i; | ||
| 206 | int testscore = 3; | 217 | int testscore = 3; |
| 207 | int scores[METHODCNT]; | 218 | int scores[nr_methods], i; |
| 208 | 219 | ||
| 209 | if (nouveau_vbios) { | 220 | if (nouveau_vbios) { |
| 210 | methods = nv04_methods; | 221 | for (i = 0; i < nr_methods; i++) |
| 211 | for (i = 0; i < METHODCNT; i++) | ||
| 212 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) | 222 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) |
| 213 | break; | 223 | break; |
| 214 | 224 | ||
| 215 | if (i < METHODCNT) { | 225 | if (i < nr_methods) { |
| 216 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", | 226 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", |
| 217 | methods[i].desc); | 227 | methods[i].desc); |
| 218 | 228 | ||
| @@ -224,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
| 224 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); | 234 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); |
| 225 | } | 235 | } |
| 226 | 236 | ||
| 227 | if (dev_priv->card_type < NV_50) | 237 | for (i = 0; i < nr_methods; i++) { |
| 228 | methods = nv04_methods; | ||
| 229 | else | ||
| 230 | methods = nv50_methods; | ||
| 231 | |||
| 232 | for (i = 0; i < METHODCNT; i++) { | ||
| 233 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", | 238 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", |
| 234 | methods[i].desc); | 239 | methods[i].desc); |
| 235 | data[0] = data[1] = 0; /* avoid reuse of previous image */ | 240 | data[0] = data[1] = 0; /* avoid reuse of previous image */ |
| @@ -240,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
| 240 | } | 245 | } |
| 241 | 246 | ||
| 242 | while (--testscore > 0) { | 247 | while (--testscore > 0) { |
| 243 | for (i = 0; i < METHODCNT; i++) { | 248 | for (i = 0; i < nr_methods; i++) { |
| 244 | if (scores[i] == testscore) { | 249 | if (scores[i] == testscore) { |
| 245 | NV_TRACE(dev, "Using BIOS image from %s\n", | 250 | NV_TRACE(dev, "Using BIOS image from %s\n", |
| 246 | methods[i].desc); | 251 | methods[i].desc); |
| @@ -814,7 +819,7 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index) | |||
| 814 | if (i2c_index == 0x81) | 819 | if (i2c_index == 0x81) |
| 815 | i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4; | 820 | i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4; |
| 816 | 821 | ||
| 817 | if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) { | 822 | if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) { |
| 818 | NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index); | 823 | NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index); |
| 819 | return NULL; | 824 | return NULL; |
| 820 | } | 825 | } |
| @@ -2807,7 +2812,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2807 | 2812 | ||
| 2808 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); | 2813 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); |
| 2809 | 2814 | ||
| 2810 | nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); | 2815 | BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", |
| 2816 | offset, gpio->tag, gpio->state_default); | ||
| 2817 | if (bios->execute) | ||
| 2818 | nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); | ||
| 2811 | 2819 | ||
| 2812 | /* The NVIDIA binary driver doesn't appear to actually do | 2820 | /* The NVIDIA binary driver doesn't appear to actually do |
| 2813 | * any of this, my VBIOS does however. | 2821 | * any of this, my VBIOS does however. |
| @@ -3897,7 +3905,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b | |||
| 3897 | 3905 | ||
| 3898 | static uint8_t * | 3906 | static uint8_t * |
| 3899 | bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, | 3907 | bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, |
| 3900 | uint16_t record, int record_len, int record_nr) | 3908 | uint16_t record, int record_len, int record_nr, |
| 3909 | bool match_link) | ||
| 3901 | { | 3910 | { |
| 3902 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 3911 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 3903 | struct nvbios *bios = &dev_priv->vbios; | 3912 | struct nvbios *bios = &dev_priv->vbios; |
| @@ -3905,12 +3914,28 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3905 | uint16_t table; | 3914 | uint16_t table; |
| 3906 | int i, v; | 3915 | int i, v; |
| 3907 | 3916 | ||
| 3917 | switch (dcbent->type) { | ||
| 3918 | case OUTPUT_TMDS: | ||
| 3919 | case OUTPUT_LVDS: | ||
| 3920 | case OUTPUT_DP: | ||
| 3921 | break; | ||
| 3922 | default: | ||
| 3923 | match_link = false; | ||
| 3924 | break; | ||
| 3925 | } | ||
| 3926 | |||
| 3908 | for (i = 0; i < record_nr; i++, record += record_len) { | 3927 | for (i = 0; i < record_nr; i++, record += record_len) { |
| 3909 | table = ROM16(bios->data[record]); | 3928 | table = ROM16(bios->data[record]); |
| 3910 | if (!table) | 3929 | if (!table) |
| 3911 | continue; | 3930 | continue; |
| 3912 | entry = ROM32(bios->data[table]); | 3931 | entry = ROM32(bios->data[table]); |
| 3913 | 3932 | ||
| 3933 | if (match_link) { | ||
| 3934 | v = (entry & 0x00c00000) >> 22; | ||
| 3935 | if (!(v & dcbent->sorconf.link)) | ||
| 3936 | continue; | ||
| 3937 | } | ||
| 3938 | |||
| 3914 | v = (entry & 0x000f0000) >> 16; | 3939 | v = (entry & 0x000f0000) >> 16; |
| 3915 | if (!(v & dcbent->or)) | 3940 | if (!(v & dcbent->or)) |
| 3916 | continue; | 3941 | continue; |
| @@ -3952,7 +3977,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3952 | *length = table[4]; | 3977 | *length = table[4]; |
| 3953 | return bios_output_config_match(dev, dcbent, | 3978 | return bios_output_config_match(dev, dcbent, |
| 3954 | bios->display.dp_table_ptr + table[1], | 3979 | bios->display.dp_table_ptr + table[1], |
| 3955 | table[2], table[3]); | 3980 | table[2], table[3], table[0] >= 0x21); |
| 3956 | } | 3981 | } |
| 3957 | 3982 | ||
| 3958 | int | 3983 | int |
| @@ -4041,7 +4066,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 4041 | dcbent->type, dcbent->location, dcbent->or); | 4066 | dcbent->type, dcbent->location, dcbent->or); |
| 4042 | otable = bios_output_config_match(dev, dcbent, table[1] + | 4067 | otable = bios_output_config_match(dev, dcbent, table[1] + |
| 4043 | bios->display.script_table_ptr, | 4068 | bios->display.script_table_ptr, |
| 4044 | table[2], table[3]); | 4069 | table[2], table[3], table[0] >= 0x21); |
| 4045 | if (!otable) { | 4070 | if (!otable) { |
| 4046 | NV_ERROR(dev, "Couldn't find matching output script table\n"); | 4071 | NV_ERROR(dev, "Couldn't find matching output script table\n"); |
| 4047 | return 1; | 4072 | return 1; |
| @@ -5533,12 +5558,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
| 5533 | entry->bus = (conn >> 16) & 0xf; | 5558 | entry->bus = (conn >> 16) & 0xf; |
| 5534 | entry->location = (conn >> 20) & 0x3; | 5559 | entry->location = (conn >> 20) & 0x3; |
| 5535 | entry->or = (conn >> 24) & 0xf; | 5560 | entry->or = (conn >> 24) & 0xf; |
| 5536 | /* | ||
| 5537 | * Normal entries consist of a single bit, but dual link has the | ||
| 5538 | * next most significant bit set too | ||
| 5539 | */ | ||
| 5540 | entry->duallink_possible = | ||
| 5541 | ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); | ||
| 5542 | 5561 | ||
| 5543 | switch (entry->type) { | 5562 | switch (entry->type) { |
| 5544 | case OUTPUT_ANALOG: | 5563 | case OUTPUT_ANALOG: |
| @@ -5622,6 +5641,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
| 5622 | break; | 5641 | break; |
| 5623 | } | 5642 | } |
| 5624 | 5643 | ||
| 5644 | if (dcb->version < 0x40) { | ||
| 5645 | /* Normal entries consist of a single bit, but dual link has | ||
| 5646 | * the next most significant bit set too | ||
| 5647 | */ | ||
| 5648 | entry->duallink_possible = | ||
| 5649 | ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); | ||
| 5650 | } else { | ||
| 5651 | entry->duallink_possible = (entry->sorconf.link == 3); | ||
| 5652 | } | ||
| 5653 | |||
| 5625 | /* unsure what DCB version introduces this, 3.0? */ | 5654 | /* unsure what DCB version introduces this, 3.0? */ |
| 5626 | if (conf & 0x100000) | 5655 | if (conf & 0x100000) |
| 5627 | entry->i2c_upper_default = true; | 5656 | entry->i2c_upper_default = true; |
| @@ -6205,6 +6234,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev) | |||
| 6205 | nouveau_i2c_fini(dev, entry); | 6234 | nouveau_i2c_fini(dev, entry); |
| 6206 | } | 6235 | } |
| 6207 | 6236 | ||
| 6237 | static bool | ||
| 6238 | nouveau_bios_posted(struct drm_device *dev) | ||
| 6239 | { | ||
| 6240 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 6241 | bool was_locked; | ||
| 6242 | unsigned htotal; | ||
| 6243 | |||
| 6244 | if (dev_priv->chipset >= NV_50) { | ||
| 6245 | if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && | ||
| 6246 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) | ||
| 6247 | return false; | ||
| 6248 | return true; | ||
| 6249 | } | ||
| 6250 | |||
| 6251 | was_locked = NVLockVgaCrtcs(dev, false); | ||
| 6252 | htotal = NVReadVgaCrtc(dev, 0, 0x06); | ||
| 6253 | htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; | ||
| 6254 | htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; | ||
| 6255 | htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; | ||
| 6256 | htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; | ||
| 6257 | NVLockVgaCrtcs(dev, was_locked); | ||
| 6258 | return (htotal != 0); | ||
| 6259 | } | ||
| 6260 | |||
| 6208 | int | 6261 | int |
| 6209 | nouveau_bios_init(struct drm_device *dev) | 6262 | nouveau_bios_init(struct drm_device *dev) |
| 6210 | { | 6263 | { |
| @@ -6239,11 +6292,9 @@ nouveau_bios_init(struct drm_device *dev) | |||
| 6239 | bios->execute = false; | 6292 | bios->execute = false; |
| 6240 | 6293 | ||
| 6241 | /* ... unless card isn't POSTed already */ | 6294 | /* ... unless card isn't POSTed already */ |
| 6242 | if (dev_priv->card_type >= NV_10 && | 6295 | if (!nouveau_bios_posted(dev)) { |
| 6243 | NVReadVgaCrtc(dev, 0, 0x00) == 0 && | ||
| 6244 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) { | ||
| 6245 | NV_INFO(dev, "Adaptor not initialised\n"); | 6296 | NV_INFO(dev, "Adaptor not initialised\n"); |
| 6246 | if (dev_priv->card_type < NV_50) { | 6297 | if (dev_priv->card_type < NV_40) { |
| 6247 | NV_ERROR(dev, "Unable to POST this chipset\n"); | 6298 | NV_ERROR(dev, "Unable to POST this chipset\n"); |
| 6248 | return -ENODEV; | 6299 | return -ENODEV; |
| 6249 | } | 6300 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 266b0ff441af..149ed224c3cb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -432,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector, | |||
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | static struct drm_display_mode * | 434 | static struct drm_display_mode * |
| 435 | nouveau_connector_native_mode(struct nouveau_connector *connector) | 435 | nouveau_connector_native_mode(struct drm_connector *connector) |
| 436 | { | 436 | { |
| 437 | struct drm_device *dev = connector->base.dev; | 437 | struct drm_connector_helper_funcs *helper = connector->helper_private; |
| 438 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | ||
| 439 | struct drm_device *dev = connector->dev; | ||
| 438 | struct drm_display_mode *mode, *largest = NULL; | 440 | struct drm_display_mode *mode, *largest = NULL; |
| 439 | int high_w = 0, high_h = 0, high_v = 0; | 441 | int high_w = 0, high_h = 0, high_v = 0; |
| 440 | 442 | ||
| 441 | /* Use preferred mode if there is one.. */ | 443 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { |
| 442 | list_for_each_entry(mode, &connector->base.probed_modes, head) { | 444 | if (helper->mode_valid(connector, mode) != MODE_OK) |
| 445 | continue; | ||
| 446 | |||
| 447 | /* Use preferred mode if there is one.. */ | ||
| 443 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | 448 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { |
| 444 | NV_DEBUG_KMS(dev, "native mode from preferred\n"); | 449 | NV_DEBUG_KMS(dev, "native mode from preferred\n"); |
| 445 | return drm_mode_duplicate(dev, mode); | 450 | return drm_mode_duplicate(dev, mode); |
| 446 | } | 451 | } |
| 447 | } | ||
| 448 | 452 | ||
| 449 | /* Otherwise, take the resolution with the largest width, then height, | 453 | /* Otherwise, take the resolution with the largest width, then |
| 450 | * then vertical refresh | 454 | * height, then vertical refresh |
| 451 | */ | 455 | */ |
| 452 | list_for_each_entry(mode, &connector->base.probed_modes, head) { | ||
| 453 | if (mode->hdisplay < high_w) | 456 | if (mode->hdisplay < high_w) |
| 454 | continue; | 457 | continue; |
| 455 | 458 | ||
| @@ -553,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
| 553 | */ | 556 | */ |
| 554 | if (!nv_connector->native_mode) | 557 | if (!nv_connector->native_mode) |
| 555 | nv_connector->native_mode = | 558 | nv_connector->native_mode = |
| 556 | nouveau_connector_native_mode(nv_connector); | 559 | nouveau_connector_native_mode(connector); |
| 557 | if (ret == 0 && nv_connector->native_mode) { | 560 | if (ret == 0 && nv_connector->native_mode) { |
| 558 | struct drm_display_mode *mode; | 561 | struct drm_display_mode *mode; |
| 559 | 562 | ||
| @@ -584,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
| 584 | 587 | ||
| 585 | switch (nv_encoder->dcb->type) { | 588 | switch (nv_encoder->dcb->type) { |
| 586 | case OUTPUT_LVDS: | 589 | case OUTPUT_LVDS: |
| 587 | BUG_ON(!nv_connector->native_mode); | 590 | if (nv_connector->native_mode && |
| 588 | if (mode->hdisplay > nv_connector->native_mode->hdisplay || | 591 | (mode->hdisplay > nv_connector->native_mode->hdisplay || |
| 589 | mode->vdisplay > nv_connector->native_mode->vdisplay) | 592 | mode->vdisplay > nv_connector->native_mode->vdisplay)) |
| 590 | return MODE_PANEL; | 593 | return MODE_PANEL; |
| 591 | 594 | ||
| 592 | min_clock = 0; | 595 | min_clock = 0; |
| @@ -594,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
| 594 | break; | 597 | break; |
| 595 | case OUTPUT_TMDS: | 598 | case OUTPUT_TMDS: |
| 596 | if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || | 599 | if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || |
| 597 | (dev_priv->card_type < NV_50 && | 600 | !nv_encoder->dcb->duallink_possible) |
| 598 | !nv_encoder->dcb->duallink_possible)) | ||
| 599 | max_clock = 165000; | 601 | max_clock = 165000; |
| 600 | else | 602 | else |
| 601 | max_clock = 330000; | 603 | max_clock = 330000; |
| @@ -729,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, | |||
| 729 | if (ret == 0) | 731 | if (ret == 0) |
| 730 | goto out; | 732 | goto out; |
| 731 | nv_connector->detected_encoder = nv_encoder; | 733 | nv_connector->detected_encoder = nv_encoder; |
| 732 | nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); | 734 | nv_connector->native_mode = nouveau_connector_native_mode(connector); |
| 733 | list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) | 735 | list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) |
| 734 | drm_mode_remove(connector, mode); | 736 | drm_mode_remove(connector, mode); |
| 735 | 737 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h index 49fa7b2d257e..cb1ce2a09162 100644 --- a/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h | |||
| @@ -40,6 +40,8 @@ struct nouveau_crtc { | |||
| 40 | int sharpness; | 40 | int sharpness; |
| 41 | int last_dpms; | 41 | int last_dpms; |
| 42 | 42 | ||
| 43 | int cursor_saved_x, cursor_saved_y; | ||
| 44 | |||
| 43 | struct { | 45 | struct { |
| 44 | int cpp; | 46 | int cpp; |
| 45 | bool blanked; | 47 | bool blanked; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index c6079e36669d..273770432298 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
| @@ -175,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
| 175 | nouveau_bo_unpin(nouveau_fb->nvbo); | 175 | nouveau_bo_unpin(nouveau_fb->nvbo); |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 179 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 180 | |||
| 181 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | ||
| 182 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | ||
| 183 | } | ||
| 184 | |||
| 178 | NV_INFO(dev, "Evicting buffers...\n"); | 185 | NV_INFO(dev, "Evicting buffers...\n"); |
| 179 | ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | 186 | ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); |
| 180 | 187 | ||
| @@ -314,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
| 314 | nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); | 321 | nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); |
| 315 | } | 322 | } |
| 316 | 323 | ||
| 324 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 325 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 326 | int ret; | ||
| 327 | |||
| 328 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | ||
| 329 | if (!ret) | ||
| 330 | ret = nouveau_bo_map(nv_crtc->cursor.nvbo); | ||
| 331 | if (ret) | ||
| 332 | NV_ERROR(dev, "Could not pin/map cursor.\n"); | ||
| 333 | } | ||
| 334 | |||
| 317 | if (dev_priv->card_type < NV_50) { | 335 | if (dev_priv->card_type < NV_50) { |
| 318 | nv04_display_restore(dev); | 336 | nv04_display_restore(dev); |
| 319 | NVLockVgaCrtcs(dev, false); | 337 | NVLockVgaCrtcs(dev, false); |
| 320 | } else | 338 | } else |
| 321 | nv50_display_init(dev); | 339 | nv50_display_init(dev); |
| 322 | 340 | ||
| 341 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 342 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 343 | |||
| 344 | nv_crtc->cursor.set_offset(nv_crtc, | ||
| 345 | nv_crtc->cursor.nvbo->bo.offset - | ||
| 346 | dev_priv->vm_vram_base); | ||
| 347 | |||
| 348 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, | ||
| 349 | nv_crtc->cursor_saved_y); | ||
| 350 | } | ||
| 351 | |||
| 323 | /* Force CLUT to get re-loaded during modeset */ | 352 | /* Force CLUT to get re-loaded during modeset */ |
| 324 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 353 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 325 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 354 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5b134438effe..c69719106489 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -851,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *); | |||
| 851 | extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); | 851 | extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); |
| 852 | 852 | ||
| 853 | /* nouveau_acpi.c */ | 853 | /* nouveau_acpi.c */ |
| 854 | #define ROM_BIOS_PAGE 4096 | ||
| 854 | #if defined(CONFIG_ACPI) | 855 | #if defined(CONFIG_ACPI) |
| 855 | void nouveau_register_dsm_handler(void); | 856 | void nouveau_register_dsm_handler(void); |
| 856 | void nouveau_unregister_dsm_handler(void); | 857 | void nouveau_unregister_dsm_handler(void); |
| 858 | int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); | ||
| 859 | bool nouveau_acpi_rom_supported(struct pci_dev *pdev); | ||
| 857 | #else | 860 | #else |
| 858 | static inline void nouveau_register_dsm_handler(void) {} | 861 | static inline void nouveau_register_dsm_handler(void) {} |
| 859 | static inline void nouveau_unregister_dsm_handler(void) {} | 862 | static inline void nouveau_unregister_dsm_handler(void) {} |
| 863 | static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } | ||
| 864 | static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } | ||
| 860 | #endif | 865 | #endif |
| 861 | 866 | ||
| 862 | /* nouveau_backlight.c */ | 867 | /* nouveau_backlight.c */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index fd4a2df715e9..257ea130ae13 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -377,6 +377,7 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
| 377 | { | 377 | { |
| 378 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 378 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 379 | struct nouveau_fbdev *nfbdev; | 379 | struct nouveau_fbdev *nfbdev; |
| 380 | int ret; | ||
| 380 | 381 | ||
| 381 | nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); | 382 | nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); |
| 382 | if (!nfbdev) | 383 | if (!nfbdev) |
| @@ -386,7 +387,13 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
| 386 | dev_priv->nfbdev = nfbdev; | 387 | dev_priv->nfbdev = nfbdev; |
| 387 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; | 388 | nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; |
| 388 | 389 | ||
| 389 | drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); | 390 | ret = drm_fb_helper_init(dev, &nfbdev->helper, |
| 391 | nv_two_heads(dev) ? 2 : 1, 4); | ||
| 392 | if (ret) { | ||
| 393 | kfree(nfbdev); | ||
| 394 | return ret; | ||
| 395 | } | ||
| 396 | |||
| 390 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); | 397 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); |
| 391 | drm_fb_helper_initial_config(&nfbdev->helper, 32); | 398 | drm_fb_helper_initial_config(&nfbdev->helper, 32); |
| 392 | return 0; | 399 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 775a7017af64..c1fd42b0dad1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev) | |||
| 540 | dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); | 540 | dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); |
| 541 | dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; | 541 | dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; |
| 542 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) | 542 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) |
| 543 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; | 543 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); |
| 544 | dev_priv->vram_sys_base <<= 12; | ||
| 544 | } | 545 | } |
| 545 | 546 | ||
| 546 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | 547 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e632339c323e..b02a231d6937 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -376,12 +376,15 @@ out_err: | |||
| 376 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, | 376 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, |
| 377 | enum vga_switcheroo_state state) | 377 | enum vga_switcheroo_state state) |
| 378 | { | 378 | { |
| 379 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 379 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 380 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
| 380 | if (state == VGA_SWITCHEROO_ON) { | 381 | if (state == VGA_SWITCHEROO_ON) { |
| 381 | printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); | 382 | printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); |
| 382 | nouveau_pci_resume(pdev); | 383 | nouveau_pci_resume(pdev); |
| 384 | drm_kms_helper_poll_enable(dev); | ||
| 383 | } else { | 385 | } else { |
| 384 | printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); | 386 | printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); |
| 387 | drm_kms_helper_poll_disable(dev); | ||
| 385 | nouveau_pci_suspend(pdev, pmm); | 388 | nouveau_pci_suspend(pdev, pmm); |
| 386 | } | 389 | } |
| 387 | } | 390 | } |
| @@ -776,29 +779,24 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
| 776 | return ret; | 779 | return ret; |
| 777 | } | 780 | } |
| 778 | 781 | ||
| 779 | /* map larger RAMIN aperture on NV40 cards */ | 782 | /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ |
| 780 | dev_priv->ramin = NULL; | ||
| 781 | if (dev_priv->card_type >= NV_40) { | 783 | if (dev_priv->card_type >= NV_40) { |
| 782 | int ramin_bar = 2; | 784 | int ramin_bar = 2; |
| 783 | if (pci_resource_len(dev->pdev, ramin_bar) == 0) | 785 | if (pci_resource_len(dev->pdev, ramin_bar) == 0) |
| 784 | ramin_bar = 3; | 786 | ramin_bar = 3; |
| 785 | 787 | ||
| 786 | dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); | 788 | dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); |
| 787 | dev_priv->ramin = ioremap( | 789 | dev_priv->ramin = |
| 788 | pci_resource_start(dev->pdev, ramin_bar), | 790 | ioremap(pci_resource_start(dev->pdev, ramin_bar), |
| 789 | dev_priv->ramin_size); | 791 | dev_priv->ramin_size); |
| 790 | if (!dev_priv->ramin) { | 792 | if (!dev_priv->ramin) { |
| 791 | NV_ERROR(dev, "Failed to init RAMIN mapping, " | 793 | NV_ERROR(dev, "Failed to PRAMIN BAR"); |
| 792 | "limited instance memory available\n"); | 794 | return -ENOMEM; |
| 793 | } | 795 | } |
| 794 | } | 796 | } else { |
| 795 | |||
| 796 | /* On older cards (or if the above failed), create a map covering | ||
| 797 | * the BAR0 PRAMIN aperture */ | ||
| 798 | if (!dev_priv->ramin) { | ||
| 799 | dev_priv->ramin_size = 1 * 1024 * 1024; | 797 | dev_priv->ramin_size = 1 * 1024 * 1024; |
| 800 | dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, | 798 | dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, |
| 801 | dev_priv->ramin_size); | 799 | dev_priv->ramin_size); |
| 802 | if (!dev_priv->ramin) { | 800 | if (!dev_priv->ramin) { |
| 803 | NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); | 801 | NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); |
| 804 | return -ENOMEM; | 802 | return -ENOMEM; |
| @@ -913,6 +911,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
| 913 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: | 911 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: |
| 914 | getparam->value = dev_priv->vm_vram_base; | 912 | getparam->value = dev_priv->vm_vram_base; |
| 915 | break; | 913 | break; |
| 914 | case NOUVEAU_GETPARAM_PTIMER_TIME: | ||
| 915 | getparam->value = dev_priv->engine.timer.read(dev); | ||
| 916 | break; | ||
| 916 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | 917 | case NOUVEAU_GETPARAM_GRAPH_UNITS: |
| 917 | /* NV40 and NV50 versions are quite different, but register | 918 | /* NV40 and NV50 versions are quite different, but register |
| 918 | * address is the same. User is supposed to know the card | 919 | * address is the same. User is supposed to know the card |
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c index 89a91b9d8b25..aaf3de3bc816 100644 --- a/drivers/gpu/drm/nouveau/nv04_cursor.c +++ b/drivers/gpu/drm/nouveau/nv04_cursor.c | |||
| @@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) | |||
| 20 | static void | 20 | static void |
| 21 | nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) | 21 | nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) |
| 22 | { | 22 | { |
| 23 | nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; | ||
| 23 | NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, | 24 | NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, |
| 24 | NV_PRAMDAC_CU_START_POS, | 25 | NV_PRAMDAC_CU_START_POS, |
| 25 | XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | | 26 | XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | |
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c index 753e723adb3a..03ad7ab14f09 100644 --- a/drivers/gpu/drm/nouveau/nv50_cursor.c +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c | |||
| @@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) | |||
| 107 | { | 107 | { |
| 108 | struct drm_device *dev = nv_crtc->base.dev; | 108 | struct drm_device *dev = nv_crtc->base.dev; |
| 109 | 109 | ||
| 110 | nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; | ||
| 110 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), | 111 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), |
| 111 | ((y & 0xFFFF) << 16) | (x & 0xFFFF)); | 112 | ((y & 0xFFFF) << 16) | (x & 0xFFFF)); |
| 112 | /* Needed to make the cursor move. */ | 113 | /* Needed to make the cursor move. */ |
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index a95e6941ba88..32611bd30e6d 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c | |||
| @@ -6,10 +6,16 @@ | |||
| 6 | int | 6 | int |
| 7 | nv50_fb_init(struct drm_device *dev) | 7 | nv50_fb_init(struct drm_device *dev) |
| 8 | { | 8 | { |
| 9 | /* This is needed to get meaningful information from 100c90 | ||
| 10 | * on traps. No idea what these values mean exactly. */ | ||
| 11 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 9 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 12 | 10 | ||
| 11 | /* Not a clue what this is exactly. Without pointing it at a | ||
| 12 | * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) | ||
| 13 | * cause IOMMU "read from address 0" errors (rh#561267) | ||
| 14 | */ | ||
| 15 | nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); | ||
| 16 | |||
| 17 | /* This is needed to get meaningful information from 100c90 | ||
| 18 | * on traps. No idea what these values mean exactly. */ | ||
| 13 | switch (dev_priv->chipset) { | 19 | switch (dev_priv->chipset) { |
| 14 | case 0x50: | 20 | case 0x50: |
| 15 | nv_wr32(dev, 0x100c90, 0x0707ff); | 21 | nv_wr32(dev, 0x100c90, 0x0707ff); |
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index c61782b314e7..bb47ad737267 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c | |||
| @@ -31,7 +31,7 @@ nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) | |||
| 31 | { | 31 | { |
| 32 | const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | 32 | const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; |
| 33 | 33 | ||
| 34 | if (gpio->line > 32) | 34 | if (gpio->line >= 32) |
| 35 | return -EINVAL; | 35 | return -EINVAL; |
| 36 | 36 | ||
| 37 | *reg = nv50_gpio_reg[gpio->line >> 3]; | 37 | *reg = nv50_gpio_reg[gpio->line >> 3]; |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index b11eaf9c5c7c..812778db76ac 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
| @@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { | |||
| 274 | int | 274 | int |
| 275 | nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) | 275 | nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) |
| 276 | { | 276 | { |
| 277 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 278 | struct nouveau_encoder *nv_encoder = NULL; | 277 | struct nouveau_encoder *nv_encoder = NULL; |
| 279 | struct drm_encoder *encoder; | 278 | struct drm_encoder *encoder; |
| 280 | bool dum; | 279 | bool dum; |
| @@ -324,11 +323,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) | |||
| 324 | int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); | 323 | int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); |
| 325 | uint32_t tmp; | 324 | uint32_t tmp; |
| 326 | 325 | ||
| 327 | if (dev_priv->chipset < 0x90 || | 326 | tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); |
| 328 | dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) | ||
| 329 | tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); | ||
| 330 | else | ||
| 331 | tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); | ||
| 332 | 327 | ||
| 333 | switch ((tmp & 0x00000f00) >> 8) { | 328 | switch ((tmp & 0x00000f00) >> 8) { |
| 334 | case 8: | 329 | case 8: |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 3c91312dea9a..84b1f2729d43 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
| @@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable | |||
| 33 | $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable | 33 | $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable |
| 34 | $(call if_changed,mkregtable) | 34 | $(call if_changed,mkregtable) |
| 35 | 35 | ||
| 36 | $(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable | ||
| 37 | $(call if_changed,mkregtable) | ||
| 38 | |||
| 36 | $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h | 39 | $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h |
| 37 | 40 | ||
| 38 | $(obj)/r200.o: $(obj)/r200_reg_safe.h | 41 | $(obj)/r200.o: $(obj)/r200_reg_safe.h |
| @@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h | |||
| 47 | 50 | ||
| 48 | $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h | 51 | $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h |
| 49 | 52 | ||
| 53 | $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h | ||
| 54 | |||
| 50 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ | 55 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ |
| 51 | radeon_irq.o r300_cmdbuf.o r600_cp.o | 56 | radeon_irq.o r300_cmdbuf.o r600_cp.o |
| 52 | # add KMS driver | 57 | # add KMS driver |
| @@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
| 60 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ | 65 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ |
| 61 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
| 62 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
| 63 | evergreen.o | 68 | evergreen.o evergreen_cs.o |
| 64 | 69 | ||
| 65 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 70 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
| 66 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 71 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index f3f2827017ef..8c2d6478a221 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -498,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 498 | if ((rdev->family == CHIP_RS600) || | 498 | if ((rdev->family == CHIP_RS600) || |
| 499 | (rdev->family == CHIP_RS690) || | 499 | (rdev->family == CHIP_RS690) || |
| 500 | (rdev->family == CHIP_RS740)) | 500 | (rdev->family == CHIP_RS740)) |
| 501 | pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | 501 | pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ |
| 502 | RADEON_PLL_PREFER_CLOSEST_LOWER); | 502 | RADEON_PLL_PREFER_CLOSEST_LOWER); |
| 503 | 503 | ||
| 504 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 504 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 8c8e4d3cbaa3..1caf625e472b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -41,7 +41,18 @@ void evergreen_fini(struct radeon_device *rdev); | |||
| 41 | 41 | ||
| 42 | void evergreen_pm_misc(struct radeon_device *rdev) | 42 | void evergreen_pm_misc(struct radeon_device *rdev) |
| 43 | { | 43 | { |
| 44 | 44 | int req_ps_idx = rdev->pm.requested_power_state_index; | |
| 45 | int req_cm_idx = rdev->pm.requested_clock_mode_index; | ||
| 46 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | ||
| 47 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | ||
| 48 | |||
| 49 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | ||
| 50 | if (voltage->voltage != rdev->pm.current_vddc) { | ||
| 51 | radeon_atom_set_voltage(rdev, voltage->voltage); | ||
| 52 | rdev->pm.current_vddc = voltage->voltage; | ||
| 53 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | ||
| 54 | } | ||
| 55 | } | ||
| 45 | } | 56 | } |
| 46 | 57 | ||
| 47 | void evergreen_pm_prepare(struct radeon_device *rdev) | 58 | void evergreen_pm_prepare(struct radeon_device *rdev) |
| @@ -596,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
| 596 | WREG32(MC_VM_FB_LOCATION, tmp); | 607 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 597 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 608 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 598 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 609 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 599 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 610 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
| 600 | if (rdev->flags & RADEON_IS_AGP) { | 611 | if (rdev->flags & RADEON_IS_AGP) { |
| 601 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 612 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
| 602 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 613 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
| @@ -1211,11 +1222,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1211 | ps_thread_count = 128; | 1222 | ps_thread_count = 128; |
| 1212 | 1223 | ||
| 1213 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); | 1224 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); |
| 1214 | sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1225 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1215 | sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1226 | sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1216 | sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1227 | sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1217 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1228 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1218 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1229 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
| 1219 | 1230 | ||
| 1220 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | 1231 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); |
| 1221 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | 1232 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); |
| @@ -1249,6 +1260,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1249 | WREG32(VGT_GS_VERTEX_REUSE, 16); | 1260 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
| 1250 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | 1261 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
| 1251 | 1262 | ||
| 1263 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); | ||
| 1264 | WREG32(VGT_OUT_DEALLOC_CNTL, 16); | ||
| 1265 | |||
| 1252 | WREG32(CB_PERF_CTR0_SEL_0, 0); | 1266 | WREG32(CB_PERF_CTR0_SEL_0, 0); |
| 1253 | WREG32(CB_PERF_CTR0_SEL_1, 0); | 1267 | WREG32(CB_PERF_CTR0_SEL_1, 0); |
| 1254 | WREG32(CB_PERF_CTR1_SEL_0, 0); | 1268 | WREG32(CB_PERF_CTR1_SEL_0, 0); |
| @@ -1258,6 +1272,26 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1258 | WREG32(CB_PERF_CTR3_SEL_0, 0); | 1272 | WREG32(CB_PERF_CTR3_SEL_0, 0); |
| 1259 | WREG32(CB_PERF_CTR3_SEL_1, 0); | 1273 | WREG32(CB_PERF_CTR3_SEL_1, 0); |
| 1260 | 1274 | ||
| 1275 | /* clear render buffer base addresses */ | ||
| 1276 | WREG32(CB_COLOR0_BASE, 0); | ||
| 1277 | WREG32(CB_COLOR1_BASE, 0); | ||
| 1278 | WREG32(CB_COLOR2_BASE, 0); | ||
| 1279 | WREG32(CB_COLOR3_BASE, 0); | ||
| 1280 | WREG32(CB_COLOR4_BASE, 0); | ||
| 1281 | WREG32(CB_COLOR5_BASE, 0); | ||
| 1282 | WREG32(CB_COLOR6_BASE, 0); | ||
| 1283 | WREG32(CB_COLOR7_BASE, 0); | ||
| 1284 | WREG32(CB_COLOR8_BASE, 0); | ||
| 1285 | WREG32(CB_COLOR9_BASE, 0); | ||
| 1286 | WREG32(CB_COLOR10_BASE, 0); | ||
| 1287 | WREG32(CB_COLOR11_BASE, 0); | ||
| 1288 | |||
| 1289 | /* set the shader const cache sizes to 0 */ | ||
| 1290 | for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) | ||
| 1291 | WREG32(i, 0); | ||
| 1292 | for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) | ||
| 1293 | WREG32(i, 0); | ||
| 1294 | |||
| 1261 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); | 1295 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); |
| 1262 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | 1296 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); |
| 1263 | 1297 | ||
| @@ -2148,7 +2182,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 2148 | if (r) | 2182 | if (r) |
| 2149 | return r; | 2183 | return r; |
| 2150 | 2184 | ||
| 2151 | rdev->accel_working = false; | 2185 | rdev->accel_working = true; |
| 2152 | r = evergreen_startup(rdev); | 2186 | r = evergreen_startup(rdev); |
| 2153 | if (r) { | 2187 | if (r) { |
| 2154 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 2188 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c new file mode 100644 index 000000000000..345a75a03c96 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -0,0 +1,1354 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include "drmP.h" | ||
| 29 | #include "radeon.h" | ||
| 30 | #include "evergreend.h" | ||
| 31 | #include "evergreen_reg_safe.h" | ||
| 32 | |||
| 33 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, | ||
| 34 | struct radeon_cs_reloc **cs_reloc); | ||
| 35 | |||
| 36 | struct evergreen_cs_track { | ||
| 37 | u32 group_size; | ||
| 38 | u32 nbanks; | ||
| 39 | u32 npipes; | ||
| 40 | /* value we track */ | ||
| 41 | u32 nsamples; | ||
| 42 | u32 cb_color_base_last[12]; | ||
| 43 | struct radeon_bo *cb_color_bo[12]; | ||
| 44 | u32 cb_color_bo_offset[12]; | ||
| 45 | struct radeon_bo *cb_color_fmask_bo[8]; | ||
| 46 | struct radeon_bo *cb_color_cmask_bo[8]; | ||
| 47 | u32 cb_color_info[12]; | ||
| 48 | u32 cb_color_view[12]; | ||
| 49 | u32 cb_color_pitch_idx[12]; | ||
| 50 | u32 cb_color_slice_idx[12]; | ||
| 51 | u32 cb_color_dim_idx[12]; | ||
| 52 | u32 cb_color_dim[12]; | ||
| 53 | u32 cb_color_pitch[12]; | ||
| 54 | u32 cb_color_slice[12]; | ||
| 55 | u32 cb_color_cmask_slice[8]; | ||
| 56 | u32 cb_color_fmask_slice[8]; | ||
| 57 | u32 cb_target_mask; | ||
| 58 | u32 cb_shader_mask; | ||
| 59 | u32 vgt_strmout_config; | ||
| 60 | u32 vgt_strmout_buffer_config; | ||
| 61 | u32 db_depth_control; | ||
| 62 | u32 db_depth_view; | ||
| 63 | u32 db_depth_size; | ||
| 64 | u32 db_depth_size_idx; | ||
| 65 | u32 db_z_info; | ||
| 66 | u32 db_z_idx; | ||
| 67 | u32 db_z_read_offset; | ||
| 68 | u32 db_z_write_offset; | ||
| 69 | struct radeon_bo *db_z_read_bo; | ||
| 70 | struct radeon_bo *db_z_write_bo; | ||
| 71 | u32 db_s_info; | ||
| 72 | u32 db_s_idx; | ||
| 73 | u32 db_s_read_offset; | ||
| 74 | u32 db_s_write_offset; | ||
| 75 | struct radeon_bo *db_s_read_bo; | ||
| 76 | struct radeon_bo *db_s_write_bo; | ||
| 77 | }; | ||
| 78 | |||
| 79 | static void evergreen_cs_track_init(struct evergreen_cs_track *track) | ||
| 80 | { | ||
| 81 | int i; | ||
| 82 | |||
| 83 | for (i = 0; i < 8; i++) { | ||
| 84 | track->cb_color_fmask_bo[i] = NULL; | ||
| 85 | track->cb_color_cmask_bo[i] = NULL; | ||
| 86 | track->cb_color_cmask_slice[i] = 0; | ||
| 87 | track->cb_color_fmask_slice[i] = 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | for (i = 0; i < 12; i++) { | ||
| 91 | track->cb_color_base_last[i] = 0; | ||
| 92 | track->cb_color_bo[i] = NULL; | ||
| 93 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | ||
| 94 | track->cb_color_info[i] = 0; | ||
| 95 | track->cb_color_view[i] = 0; | ||
| 96 | track->cb_color_pitch_idx[i] = 0; | ||
| 97 | track->cb_color_slice_idx[i] = 0; | ||
| 98 | track->cb_color_dim[i] = 0; | ||
| 99 | track->cb_color_pitch[i] = 0; | ||
| 100 | track->cb_color_slice[i] = 0; | ||
| 101 | track->cb_color_dim[i] = 0; | ||
| 102 | } | ||
| 103 | track->cb_target_mask = 0xFFFFFFFF; | ||
| 104 | track->cb_shader_mask = 0xFFFFFFFF; | ||
| 105 | |||
| 106 | track->db_depth_view = 0xFFFFC000; | ||
| 107 | track->db_depth_size = 0xFFFFFFFF; | ||
| 108 | track->db_depth_size_idx = 0; | ||
| 109 | track->db_depth_control = 0xFFFFFFFF; | ||
| 110 | track->db_z_info = 0xFFFFFFFF; | ||
| 111 | track->db_z_idx = 0xFFFFFFFF; | ||
| 112 | track->db_z_read_offset = 0xFFFFFFFF; | ||
| 113 | track->db_z_write_offset = 0xFFFFFFFF; | ||
| 114 | track->db_z_read_bo = NULL; | ||
| 115 | track->db_z_write_bo = NULL; | ||
| 116 | track->db_s_info = 0xFFFFFFFF; | ||
| 117 | track->db_s_idx = 0xFFFFFFFF; | ||
| 118 | track->db_s_read_offset = 0xFFFFFFFF; | ||
| 119 | track->db_s_write_offset = 0xFFFFFFFF; | ||
| 120 | track->db_s_read_bo = NULL; | ||
| 121 | track->db_s_write_bo = NULL; | ||
| 122 | } | ||
| 123 | |||
| 124 | static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | ||
| 125 | { | ||
| 126 | /* XXX fill in */ | ||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | static int evergreen_cs_track_check(struct radeon_cs_parser *p) | ||
| 131 | { | ||
| 132 | struct evergreen_cs_track *track = p->track; | ||
| 133 | |||
| 134 | /* we don't support stream out buffer yet */ | ||
| 135 | if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) { | ||
| 136 | dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); | ||
| 137 | return -EINVAL; | ||
| 138 | } | ||
| 139 | |||
| 140 | /* XXX fill in */ | ||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | /** | ||
| 145 | * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet | ||
| 146 | * @parser: parser structure holding parsing context. | ||
| 147 | * @pkt: where to store packet informations | ||
| 148 | * | ||
| 149 | * Assume that chunk_ib_index is properly set. Will return -EINVAL | ||
| 150 | * if packet is bigger than remaining ib size. or if packets is unknown. | ||
| 151 | **/ | ||
| 152 | int evergreen_cs_packet_parse(struct radeon_cs_parser *p, | ||
| 153 | struct radeon_cs_packet *pkt, | ||
| 154 | unsigned idx) | ||
| 155 | { | ||
| 156 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
| 157 | uint32_t header; | ||
| 158 | |||
| 159 | if (idx >= ib_chunk->length_dw) { | ||
| 160 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | ||
| 161 | idx, ib_chunk->length_dw); | ||
| 162 | return -EINVAL; | ||
| 163 | } | ||
| 164 | header = radeon_get_ib_value(p, idx); | ||
| 165 | pkt->idx = idx; | ||
| 166 | pkt->type = CP_PACKET_GET_TYPE(header); | ||
| 167 | pkt->count = CP_PACKET_GET_COUNT(header); | ||
| 168 | pkt->one_reg_wr = 0; | ||
| 169 | switch (pkt->type) { | ||
| 170 | case PACKET_TYPE0: | ||
| 171 | pkt->reg = CP_PACKET0_GET_REG(header); | ||
| 172 | break; | ||
| 173 | case PACKET_TYPE3: | ||
| 174 | pkt->opcode = CP_PACKET3_GET_OPCODE(header); | ||
| 175 | break; | ||
| 176 | case PACKET_TYPE2: | ||
| 177 | pkt->count = -1; | ||
| 178 | break; | ||
| 179 | default: | ||
| 180 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); | ||
| 181 | return -EINVAL; | ||
| 182 | } | ||
| 183 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { | ||
| 184 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", | ||
| 185 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); | ||
| 186 | return -EINVAL; | ||
| 187 | } | ||
| 188 | return 0; | ||
| 189 | } | ||
| 190 | |||
| 191 | /** | ||
| 192 | * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3 | ||
| 193 | * @parser: parser structure holding parsing context. | ||
| 194 | * @data: pointer to relocation data | ||
| 195 | * @offset_start: starting offset | ||
| 196 | * @offset_mask: offset mask (to align start offset on) | ||
| 197 | * @reloc: reloc informations | ||
| 198 | * | ||
| 199 | * Check next packet is relocation packet3, do bo validation and compute | ||
| 200 | * GPU offset using the provided start. | ||
| 201 | **/ | ||
| 202 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, | ||
| 203 | struct radeon_cs_reloc **cs_reloc) | ||
| 204 | { | ||
| 205 | struct radeon_cs_chunk *relocs_chunk; | ||
| 206 | struct radeon_cs_packet p3reloc; | ||
| 207 | unsigned idx; | ||
| 208 | int r; | ||
| 209 | |||
| 210 | if (p->chunk_relocs_idx == -1) { | ||
| 211 | DRM_ERROR("No relocation chunk !\n"); | ||
| 212 | return -EINVAL; | ||
| 213 | } | ||
| 214 | *cs_reloc = NULL; | ||
| 215 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | ||
| 216 | r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); | ||
| 217 | if (r) { | ||
| 218 | return r; | ||
| 219 | } | ||
| 220 | p->idx += p3reloc.count + 2; | ||
| 221 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | ||
| 222 | DRM_ERROR("No packet3 for relocation for packet at %d.\n", | ||
| 223 | p3reloc.idx); | ||
| 224 | return -EINVAL; | ||
| 225 | } | ||
| 226 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); | ||
| 227 | if (idx >= relocs_chunk->length_dw) { | ||
| 228 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | ||
| 229 | idx, relocs_chunk->length_dw); | ||
| 230 | return -EINVAL; | ||
| 231 | } | ||
| 232 | /* FIXME: we assume reloc size is 4 dwords */ | ||
| 233 | *cs_reloc = p->relocs_ptr[(idx / 4)]; | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | |||
| 237 | /** | ||
| 238 | * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc | ||
| 239 | * @parser: parser structure holding parsing context. | ||
| 240 | * | ||
| 241 | * Check next packet is relocation packet3, do bo validation and compute | ||
| 242 | * GPU offset using the provided start. | ||
| 243 | **/ | ||
| 244 | static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) | ||
| 245 | { | ||
| 246 | struct radeon_cs_packet p3reloc; | ||
| 247 | int r; | ||
| 248 | |||
| 249 | r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); | ||
| 250 | if (r) { | ||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | ||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | return 1; | ||
| 257 | } | ||
| 258 | |||
| 259 | /** | ||
| 260 | * evergreen_cs_packet_next_vline() - parse userspace VLINE packet | ||
| 261 | * @parser: parser structure holding parsing context. | ||
| 262 | * | ||
| 263 | * Userspace sends a special sequence for VLINE waits. | ||
| 264 | * PACKET0 - VLINE_START_END + value | ||
| 265 | * PACKET3 - WAIT_REG_MEM poll vline status reg | ||
| 266 | * RELOC (P3) - crtc_id in reloc. | ||
| 267 | * | ||
| 268 | * This function parses this and relocates the VLINE START END | ||
| 269 | * and WAIT_REG_MEM packets to the correct crtc. | ||
| 270 | * It also detects a switched off crtc and nulls out the | ||
| 271 | * wait in that case. | ||
| 272 | */ | ||
| 273 | static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | ||
| 274 | { | ||
| 275 | struct drm_mode_object *obj; | ||
| 276 | struct drm_crtc *crtc; | ||
| 277 | struct radeon_crtc *radeon_crtc; | ||
| 278 | struct radeon_cs_packet p3reloc, wait_reg_mem; | ||
| 279 | int crtc_id; | ||
| 280 | int r; | ||
| 281 | uint32_t header, h_idx, reg, wait_reg_mem_info; | ||
| 282 | volatile uint32_t *ib; | ||
| 283 | |||
| 284 | ib = p->ib->ptr; | ||
| 285 | |||
| 286 | /* parse the WAIT_REG_MEM */ | ||
| 287 | r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); | ||
| 288 | if (r) | ||
| 289 | return r; | ||
| 290 | |||
| 291 | /* check its a WAIT_REG_MEM */ | ||
| 292 | if (wait_reg_mem.type != PACKET_TYPE3 || | ||
| 293 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { | ||
| 294 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); | ||
| 295 | r = -EINVAL; | ||
| 296 | return r; | ||
| 297 | } | ||
| 298 | |||
| 299 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); | ||
| 300 | /* bit 4 is reg (0) or mem (1) */ | ||
| 301 | if (wait_reg_mem_info & 0x10) { | ||
| 302 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); | ||
| 303 | r = -EINVAL; | ||
| 304 | return r; | ||
| 305 | } | ||
| 306 | /* waiting for value to be equal */ | ||
| 307 | if ((wait_reg_mem_info & 0x7) != 0x3) { | ||
| 308 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); | ||
| 309 | r = -EINVAL; | ||
| 310 | return r; | ||
| 311 | } | ||
| 312 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { | ||
| 313 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); | ||
| 314 | r = -EINVAL; | ||
| 315 | return r; | ||
| 316 | } | ||
| 317 | |||
| 318 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { | ||
| 319 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); | ||
| 320 | r = -EINVAL; | ||
| 321 | return r; | ||
| 322 | } | ||
| 323 | |||
| 324 | /* jump over the NOP */ | ||
| 325 | r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); | ||
| 326 | if (r) | ||
| 327 | return r; | ||
| 328 | |||
| 329 | h_idx = p->idx - 2; | ||
| 330 | p->idx += wait_reg_mem.count + 2; | ||
| 331 | p->idx += p3reloc.count + 2; | ||
| 332 | |||
| 333 | header = radeon_get_ib_value(p, h_idx); | ||
| 334 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | ||
| 335 | reg = CP_PACKET0_GET_REG(header); | ||
| 336 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | ||
| 337 | if (!obj) { | ||
| 338 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | ||
| 339 | r = -EINVAL; | ||
| 340 | goto out; | ||
| 341 | } | ||
| 342 | crtc = obj_to_crtc(obj); | ||
| 343 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 344 | crtc_id = radeon_crtc->crtc_id; | ||
| 345 | |||
| 346 | if (!crtc->enabled) { | ||
| 347 | /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ | ||
| 348 | ib[h_idx + 2] = PACKET2(0); | ||
| 349 | ib[h_idx + 3] = PACKET2(0); | ||
| 350 | ib[h_idx + 4] = PACKET2(0); | ||
| 351 | ib[h_idx + 5] = PACKET2(0); | ||
| 352 | ib[h_idx + 6] = PACKET2(0); | ||
| 353 | ib[h_idx + 7] = PACKET2(0); | ||
| 354 | ib[h_idx + 8] = PACKET2(0); | ||
| 355 | } else { | ||
| 356 | switch (reg) { | ||
| 357 | case EVERGREEN_VLINE_START_END: | ||
| 358 | header &= ~R600_CP_PACKET0_REG_MASK; | ||
| 359 | header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2; | ||
| 360 | ib[h_idx] = header; | ||
| 361 | ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2; | ||
| 362 | break; | ||
| 363 | default: | ||
| 364 | DRM_ERROR("unknown crtc reloc\n"); | ||
| 365 | r = -EINVAL; | ||
| 366 | goto out; | ||
| 367 | } | ||
| 368 | } | ||
| 369 | out: | ||
| 370 | return r; | ||
| 371 | } | ||
| 372 | |||
| 373 | static int evergreen_packet0_check(struct radeon_cs_parser *p, | ||
| 374 | struct radeon_cs_packet *pkt, | ||
| 375 | unsigned idx, unsigned reg) | ||
| 376 | { | ||
| 377 | int r; | ||
| 378 | |||
| 379 | switch (reg) { | ||
| 380 | case EVERGREEN_VLINE_START_END: | ||
| 381 | r = evergreen_cs_packet_parse_vline(p); | ||
| 382 | if (r) { | ||
| 383 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
| 384 | idx, reg); | ||
| 385 | return r; | ||
| 386 | } | ||
| 387 | break; | ||
| 388 | default: | ||
| 389 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | ||
| 390 | reg, idx); | ||
| 391 | return -EINVAL; | ||
| 392 | } | ||
| 393 | return 0; | ||
| 394 | } | ||
| 395 | |||
| 396 | static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, | ||
| 397 | struct radeon_cs_packet *pkt) | ||
| 398 | { | ||
| 399 | unsigned reg, i; | ||
| 400 | unsigned idx; | ||
| 401 | int r; | ||
| 402 | |||
| 403 | idx = pkt->idx + 1; | ||
| 404 | reg = pkt->reg; | ||
| 405 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { | ||
| 406 | r = evergreen_packet0_check(p, pkt, idx, reg); | ||
| 407 | if (r) { | ||
| 408 | return r; | ||
| 409 | } | ||
| 410 | } | ||
| 411 | return 0; | ||
| 412 | } | ||
| 413 | |||
| 414 | /** | ||
| 415 | * evergreen_cs_check_reg() - check if register is authorized or not | ||
| 416 | * @parser: parser structure holding parsing context | ||
| 417 | * @reg: register we are testing | ||
| 418 | * @idx: index into the cs buffer | ||
| 419 | * | ||
| 420 | * This function will test against evergreen_reg_safe_bm and return 0 | ||
| 421 | * if register is safe. If register is not flag as safe this function | ||
| 422 | * will test it against a list of register needind special handling. | ||
| 423 | */ | ||
| 424 | static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | ||
| 425 | { | ||
| 426 | struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; | ||
| 427 | struct radeon_cs_reloc *reloc; | ||
| 428 | u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); | ||
| 429 | u32 m, i, tmp, *ib; | ||
| 430 | int r; | ||
| 431 | |||
| 432 | i = (reg >> 7); | ||
| 433 | if (i > last_reg) { | ||
| 434 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
| 435 | return -EINVAL; | ||
| 436 | } | ||
| 437 | m = 1 << ((reg >> 2) & 31); | ||
| 438 | if (!(evergreen_reg_safe_bm[i] & m)) | ||
| 439 | return 0; | ||
| 440 | ib = p->ib->ptr; | ||
| 441 | switch (reg) { | ||
| 442 | /* force following reg to 0 in an attemp to disable out buffer | ||
| 443 | * which will need us to better understand how it works to perform | ||
| 444 | * security check on it (Jerome) | ||
| 445 | */ | ||
| 446 | case SQ_ESGS_RING_SIZE: | ||
| 447 | case SQ_GSVS_RING_SIZE: | ||
| 448 | case SQ_ESTMP_RING_SIZE: | ||
| 449 | case SQ_GSTMP_RING_SIZE: | ||
| 450 | case SQ_HSTMP_RING_SIZE: | ||
| 451 | case SQ_LSTMP_RING_SIZE: | ||
| 452 | case SQ_PSTMP_RING_SIZE: | ||
| 453 | case SQ_VSTMP_RING_SIZE: | ||
| 454 | case SQ_ESGS_RING_ITEMSIZE: | ||
| 455 | case SQ_ESTMP_RING_ITEMSIZE: | ||
| 456 | case SQ_GSTMP_RING_ITEMSIZE: | ||
| 457 | case SQ_GSVS_RING_ITEMSIZE: | ||
| 458 | case SQ_GS_VERT_ITEMSIZE: | ||
| 459 | case SQ_GS_VERT_ITEMSIZE_1: | ||
| 460 | case SQ_GS_VERT_ITEMSIZE_2: | ||
| 461 | case SQ_GS_VERT_ITEMSIZE_3: | ||
| 462 | case SQ_GSVS_RING_OFFSET_1: | ||
| 463 | case SQ_GSVS_RING_OFFSET_2: | ||
| 464 | case SQ_GSVS_RING_OFFSET_3: | ||
| 465 | case SQ_HSTMP_RING_ITEMSIZE: | ||
| 466 | case SQ_LSTMP_RING_ITEMSIZE: | ||
| 467 | case SQ_PSTMP_RING_ITEMSIZE: | ||
| 468 | case SQ_VSTMP_RING_ITEMSIZE: | ||
| 469 | case VGT_TF_RING_SIZE: | ||
| 470 | /* get value to populate the IB don't remove */ | ||
| 471 | tmp =radeon_get_ib_value(p, idx); | ||
| 472 | ib[idx] = 0; | ||
| 473 | break; | ||
| 474 | case DB_DEPTH_CONTROL: | ||
| 475 | track->db_depth_control = radeon_get_ib_value(p, idx); | ||
| 476 | break; | ||
| 477 | case DB_Z_INFO: | ||
| 478 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 479 | if (r) { | ||
| 480 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 481 | "0x%04X\n", reg); | ||
| 482 | return -EINVAL; | ||
| 483 | } | ||
| 484 | track->db_z_info = radeon_get_ib_value(p, idx); | ||
| 485 | ib[idx] &= ~Z_ARRAY_MODE(0xf); | ||
| 486 | track->db_z_info &= ~Z_ARRAY_MODE(0xf); | ||
| 487 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
| 488 | ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 489 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 490 | } else { | ||
| 491 | ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 492 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 493 | } | ||
| 494 | break; | ||
| 495 | case DB_STENCIL_INFO: | ||
| 496 | track->db_s_info = radeon_get_ib_value(p, idx); | ||
| 497 | break; | ||
| 498 | case DB_DEPTH_VIEW: | ||
| 499 | track->db_depth_view = radeon_get_ib_value(p, idx); | ||
| 500 | break; | ||
| 501 | case DB_DEPTH_SIZE: | ||
| 502 | track->db_depth_size = radeon_get_ib_value(p, idx); | ||
| 503 | track->db_depth_size_idx = idx; | ||
| 504 | break; | ||
| 505 | case DB_Z_READ_BASE: | ||
| 506 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 507 | if (r) { | ||
| 508 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 509 | "0x%04X\n", reg); | ||
| 510 | return -EINVAL; | ||
| 511 | } | ||
| 512 | track->db_z_read_offset = radeon_get_ib_value(p, idx); | ||
| 513 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 514 | track->db_z_read_bo = reloc->robj; | ||
| 515 | break; | ||
| 516 | case DB_Z_WRITE_BASE: | ||
| 517 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 518 | if (r) { | ||
| 519 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 520 | "0x%04X\n", reg); | ||
| 521 | return -EINVAL; | ||
| 522 | } | ||
| 523 | track->db_z_write_offset = radeon_get_ib_value(p, idx); | ||
| 524 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 525 | track->db_z_write_bo = reloc->robj; | ||
| 526 | break; | ||
| 527 | case DB_STENCIL_READ_BASE: | ||
| 528 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 529 | if (r) { | ||
| 530 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 531 | "0x%04X\n", reg); | ||
| 532 | return -EINVAL; | ||
| 533 | } | ||
| 534 | track->db_s_read_offset = radeon_get_ib_value(p, idx); | ||
| 535 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 536 | track->db_s_read_bo = reloc->robj; | ||
| 537 | break; | ||
| 538 | case DB_STENCIL_WRITE_BASE: | ||
| 539 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 540 | if (r) { | ||
| 541 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 542 | "0x%04X\n", reg); | ||
| 543 | return -EINVAL; | ||
| 544 | } | ||
| 545 | track->db_s_write_offset = radeon_get_ib_value(p, idx); | ||
| 546 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 547 | track->db_s_write_bo = reloc->robj; | ||
| 548 | break; | ||
| 549 | case VGT_STRMOUT_CONFIG: | ||
| 550 | track->vgt_strmout_config = radeon_get_ib_value(p, idx); | ||
| 551 | break; | ||
| 552 | case VGT_STRMOUT_BUFFER_CONFIG: | ||
| 553 | track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); | ||
| 554 | break; | ||
| 555 | case CB_TARGET_MASK: | ||
| 556 | track->cb_target_mask = radeon_get_ib_value(p, idx); | ||
| 557 | break; | ||
| 558 | case CB_SHADER_MASK: | ||
| 559 | track->cb_shader_mask = radeon_get_ib_value(p, idx); | ||
| 560 | break; | ||
| 561 | case PA_SC_AA_CONFIG: | ||
| 562 | tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; | ||
| 563 | track->nsamples = 1 << tmp; | ||
| 564 | break; | ||
| 565 | case CB_COLOR0_VIEW: | ||
| 566 | case CB_COLOR1_VIEW: | ||
| 567 | case CB_COLOR2_VIEW: | ||
| 568 | case CB_COLOR3_VIEW: | ||
| 569 | case CB_COLOR4_VIEW: | ||
| 570 | case CB_COLOR5_VIEW: | ||
| 571 | case CB_COLOR6_VIEW: | ||
| 572 | case CB_COLOR7_VIEW: | ||
| 573 | tmp = (reg - CB_COLOR0_VIEW) / 0x3c; | ||
| 574 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); | ||
| 575 | break; | ||
| 576 | case CB_COLOR8_VIEW: | ||
| 577 | case CB_COLOR9_VIEW: | ||
| 578 | case CB_COLOR10_VIEW: | ||
| 579 | case CB_COLOR11_VIEW: | ||
| 580 | tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; | ||
| 581 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); | ||
| 582 | break; | ||
| 583 | case CB_COLOR0_INFO: | ||
| 584 | case CB_COLOR1_INFO: | ||
| 585 | case CB_COLOR2_INFO: | ||
| 586 | case CB_COLOR3_INFO: | ||
| 587 | case CB_COLOR4_INFO: | ||
| 588 | case CB_COLOR5_INFO: | ||
| 589 | case CB_COLOR6_INFO: | ||
| 590 | case CB_COLOR7_INFO: | ||
| 591 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 592 | if (r) { | ||
| 593 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 594 | "0x%04X\n", reg); | ||
| 595 | return -EINVAL; | ||
| 596 | } | ||
| 597 | tmp = (reg - CB_COLOR0_INFO) / 0x3c; | ||
| 598 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | ||
| 599 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
| 600 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 601 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 602 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
| 603 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 604 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 605 | } | ||
| 606 | break; | ||
| 607 | case CB_COLOR8_INFO: | ||
| 608 | case CB_COLOR9_INFO: | ||
| 609 | case CB_COLOR10_INFO: | ||
| 610 | case CB_COLOR11_INFO: | ||
| 611 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 612 | if (r) { | ||
| 613 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 614 | "0x%04X\n", reg); | ||
| 615 | return -EINVAL; | ||
| 616 | } | ||
| 617 | tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; | ||
| 618 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | ||
| 619 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
| 620 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 621 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 622 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
| 623 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 624 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 625 | } | ||
| 626 | break; | ||
| 627 | case CB_COLOR0_PITCH: | ||
| 628 | case CB_COLOR1_PITCH: | ||
| 629 | case CB_COLOR2_PITCH: | ||
| 630 | case CB_COLOR3_PITCH: | ||
| 631 | case CB_COLOR4_PITCH: | ||
| 632 | case CB_COLOR5_PITCH: | ||
| 633 | case CB_COLOR6_PITCH: | ||
| 634 | case CB_COLOR7_PITCH: | ||
| 635 | tmp = (reg - CB_COLOR0_PITCH) / 0x3c; | ||
| 636 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); | ||
| 637 | track->cb_color_pitch_idx[tmp] = idx; | ||
| 638 | break; | ||
| 639 | case CB_COLOR8_PITCH: | ||
| 640 | case CB_COLOR9_PITCH: | ||
| 641 | case CB_COLOR10_PITCH: | ||
| 642 | case CB_COLOR11_PITCH: | ||
| 643 | tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; | ||
| 644 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); | ||
| 645 | track->cb_color_pitch_idx[tmp] = idx; | ||
| 646 | break; | ||
| 647 | case CB_COLOR0_SLICE: | ||
| 648 | case CB_COLOR1_SLICE: | ||
| 649 | case CB_COLOR2_SLICE: | ||
| 650 | case CB_COLOR3_SLICE: | ||
| 651 | case CB_COLOR4_SLICE: | ||
| 652 | case CB_COLOR5_SLICE: | ||
| 653 | case CB_COLOR6_SLICE: | ||
| 654 | case CB_COLOR7_SLICE: | ||
| 655 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; | ||
| 656 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 657 | track->cb_color_slice_idx[tmp] = idx; | ||
| 658 | break; | ||
| 659 | case CB_COLOR8_SLICE: | ||
| 660 | case CB_COLOR9_SLICE: | ||
| 661 | case CB_COLOR10_SLICE: | ||
| 662 | case CB_COLOR11_SLICE: | ||
| 663 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; | ||
| 664 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 665 | track->cb_color_slice_idx[tmp] = idx; | ||
| 666 | break; | ||
| 667 | case CB_COLOR0_ATTRIB: | ||
| 668 | case CB_COLOR1_ATTRIB: | ||
| 669 | case CB_COLOR2_ATTRIB: | ||
| 670 | case CB_COLOR3_ATTRIB: | ||
| 671 | case CB_COLOR4_ATTRIB: | ||
| 672 | case CB_COLOR5_ATTRIB: | ||
| 673 | case CB_COLOR6_ATTRIB: | ||
| 674 | case CB_COLOR7_ATTRIB: | ||
| 675 | case CB_COLOR8_ATTRIB: | ||
| 676 | case CB_COLOR9_ATTRIB: | ||
| 677 | case CB_COLOR10_ATTRIB: | ||
| 678 | case CB_COLOR11_ATTRIB: | ||
| 679 | break; | ||
| 680 | case CB_COLOR0_DIM: | ||
| 681 | case CB_COLOR1_DIM: | ||
| 682 | case CB_COLOR2_DIM: | ||
| 683 | case CB_COLOR3_DIM: | ||
| 684 | case CB_COLOR4_DIM: | ||
| 685 | case CB_COLOR5_DIM: | ||
| 686 | case CB_COLOR6_DIM: | ||
| 687 | case CB_COLOR7_DIM: | ||
| 688 | tmp = (reg - CB_COLOR0_DIM) / 0x3c; | ||
| 689 | track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); | ||
| 690 | track->cb_color_dim_idx[tmp] = idx; | ||
| 691 | break; | ||
| 692 | case CB_COLOR8_DIM: | ||
| 693 | case CB_COLOR9_DIM: | ||
| 694 | case CB_COLOR10_DIM: | ||
| 695 | case CB_COLOR11_DIM: | ||
| 696 | tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8; | ||
| 697 | track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); | ||
| 698 | track->cb_color_dim_idx[tmp] = idx; | ||
| 699 | break; | ||
| 700 | case CB_COLOR0_FMASK: | ||
| 701 | case CB_COLOR1_FMASK: | ||
| 702 | case CB_COLOR2_FMASK: | ||
| 703 | case CB_COLOR3_FMASK: | ||
| 704 | case CB_COLOR4_FMASK: | ||
| 705 | case CB_COLOR5_FMASK: | ||
| 706 | case CB_COLOR6_FMASK: | ||
| 707 | case CB_COLOR7_FMASK: | ||
| 708 | tmp = (reg - CB_COLOR0_FMASK) / 0x3c; | ||
| 709 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 710 | if (r) { | ||
| 711 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | ||
| 712 | return -EINVAL; | ||
| 713 | } | ||
| 714 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 715 | track->cb_color_fmask_bo[tmp] = reloc->robj; | ||
| 716 | break; | ||
| 717 | case CB_COLOR0_CMASK: | ||
| 718 | case CB_COLOR1_CMASK: | ||
| 719 | case CB_COLOR2_CMASK: | ||
| 720 | case CB_COLOR3_CMASK: | ||
| 721 | case CB_COLOR4_CMASK: | ||
| 722 | case CB_COLOR5_CMASK: | ||
| 723 | case CB_COLOR6_CMASK: | ||
| 724 | case CB_COLOR7_CMASK: | ||
| 725 | tmp = (reg - CB_COLOR0_CMASK) / 0x3c; | ||
| 726 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 727 | if (r) { | ||
| 728 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | ||
| 729 | return -EINVAL; | ||
| 730 | } | ||
| 731 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 732 | track->cb_color_cmask_bo[tmp] = reloc->robj; | ||
| 733 | break; | ||
| 734 | case CB_COLOR0_FMASK_SLICE: | ||
| 735 | case CB_COLOR1_FMASK_SLICE: | ||
| 736 | case CB_COLOR2_FMASK_SLICE: | ||
| 737 | case CB_COLOR3_FMASK_SLICE: | ||
| 738 | case CB_COLOR4_FMASK_SLICE: | ||
| 739 | case CB_COLOR5_FMASK_SLICE: | ||
| 740 | case CB_COLOR6_FMASK_SLICE: | ||
| 741 | case CB_COLOR7_FMASK_SLICE: | ||
| 742 | tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c; | ||
| 743 | track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 744 | break; | ||
| 745 | case CB_COLOR0_CMASK_SLICE: | ||
| 746 | case CB_COLOR1_CMASK_SLICE: | ||
| 747 | case CB_COLOR2_CMASK_SLICE: | ||
| 748 | case CB_COLOR3_CMASK_SLICE: | ||
| 749 | case CB_COLOR4_CMASK_SLICE: | ||
| 750 | case CB_COLOR5_CMASK_SLICE: | ||
| 751 | case CB_COLOR6_CMASK_SLICE: | ||
| 752 | case CB_COLOR7_CMASK_SLICE: | ||
| 753 | tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c; | ||
| 754 | track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 755 | break; | ||
| 756 | case CB_COLOR0_BASE: | ||
| 757 | case CB_COLOR1_BASE: | ||
| 758 | case CB_COLOR2_BASE: | ||
| 759 | case CB_COLOR3_BASE: | ||
| 760 | case CB_COLOR4_BASE: | ||
| 761 | case CB_COLOR5_BASE: | ||
| 762 | case CB_COLOR6_BASE: | ||
| 763 | case CB_COLOR7_BASE: | ||
| 764 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 765 | if (r) { | ||
| 766 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 767 | "0x%04X\n", reg); | ||
| 768 | return -EINVAL; | ||
| 769 | } | ||
| 770 | tmp = (reg - CB_COLOR0_BASE) / 0x3c; | ||
| 771 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | ||
| 772 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 773 | track->cb_color_base_last[tmp] = ib[idx]; | ||
| 774 | track->cb_color_bo[tmp] = reloc->robj; | ||
| 775 | break; | ||
| 776 | case CB_COLOR8_BASE: | ||
| 777 | case CB_COLOR9_BASE: | ||
| 778 | case CB_COLOR10_BASE: | ||
| 779 | case CB_COLOR11_BASE: | ||
| 780 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 781 | if (r) { | ||
| 782 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 783 | "0x%04X\n", reg); | ||
| 784 | return -EINVAL; | ||
| 785 | } | ||
| 786 | tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; | ||
| 787 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | ||
| 788 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 789 | track->cb_color_base_last[tmp] = ib[idx]; | ||
| 790 | track->cb_color_bo[tmp] = reloc->robj; | ||
| 791 | break; | ||
| 792 | case CB_IMMED0_BASE: | ||
| 793 | case CB_IMMED1_BASE: | ||
| 794 | case CB_IMMED2_BASE: | ||
| 795 | case CB_IMMED3_BASE: | ||
| 796 | case CB_IMMED4_BASE: | ||
| 797 | case CB_IMMED5_BASE: | ||
| 798 | case CB_IMMED6_BASE: | ||
| 799 | case CB_IMMED7_BASE: | ||
| 800 | case CB_IMMED8_BASE: | ||
| 801 | case CB_IMMED9_BASE: | ||
| 802 | case CB_IMMED10_BASE: | ||
| 803 | case CB_IMMED11_BASE: | ||
| 804 | case DB_HTILE_DATA_BASE: | ||
| 805 | case SQ_PGM_START_FS: | ||
| 806 | case SQ_PGM_START_ES: | ||
| 807 | case SQ_PGM_START_VS: | ||
| 808 | case SQ_PGM_START_GS: | ||
| 809 | case SQ_PGM_START_PS: | ||
| 810 | case SQ_PGM_START_HS: | ||
| 811 | case SQ_PGM_START_LS: | ||
| 812 | case GDS_ADDR_BASE: | ||
| 813 | case SQ_CONST_MEM_BASE: | ||
| 814 | case SQ_ALU_CONST_CACHE_GS_0: | ||
| 815 | case SQ_ALU_CONST_CACHE_GS_1: | ||
| 816 | case SQ_ALU_CONST_CACHE_GS_2: | ||
| 817 | case SQ_ALU_CONST_CACHE_GS_3: | ||
| 818 | case SQ_ALU_CONST_CACHE_GS_4: | ||
| 819 | case SQ_ALU_CONST_CACHE_GS_5: | ||
| 820 | case SQ_ALU_CONST_CACHE_GS_6: | ||
| 821 | case SQ_ALU_CONST_CACHE_GS_7: | ||
| 822 | case SQ_ALU_CONST_CACHE_GS_8: | ||
| 823 | case SQ_ALU_CONST_CACHE_GS_9: | ||
| 824 | case SQ_ALU_CONST_CACHE_GS_10: | ||
| 825 | case SQ_ALU_CONST_CACHE_GS_11: | ||
| 826 | case SQ_ALU_CONST_CACHE_GS_12: | ||
| 827 | case SQ_ALU_CONST_CACHE_GS_13: | ||
| 828 | case SQ_ALU_CONST_CACHE_GS_14: | ||
| 829 | case SQ_ALU_CONST_CACHE_GS_15: | ||
| 830 | case SQ_ALU_CONST_CACHE_PS_0: | ||
| 831 | case SQ_ALU_CONST_CACHE_PS_1: | ||
| 832 | case SQ_ALU_CONST_CACHE_PS_2: | ||
| 833 | case SQ_ALU_CONST_CACHE_PS_3: | ||
| 834 | case SQ_ALU_CONST_CACHE_PS_4: | ||
| 835 | case SQ_ALU_CONST_CACHE_PS_5: | ||
| 836 | case SQ_ALU_CONST_CACHE_PS_6: | ||
| 837 | case SQ_ALU_CONST_CACHE_PS_7: | ||
| 838 | case SQ_ALU_CONST_CACHE_PS_8: | ||
| 839 | case SQ_ALU_CONST_CACHE_PS_9: | ||
| 840 | case SQ_ALU_CONST_CACHE_PS_10: | ||
| 841 | case SQ_ALU_CONST_CACHE_PS_11: | ||
| 842 | case SQ_ALU_CONST_CACHE_PS_12: | ||
| 843 | case SQ_ALU_CONST_CACHE_PS_13: | ||
| 844 | case SQ_ALU_CONST_CACHE_PS_14: | ||
| 845 | case SQ_ALU_CONST_CACHE_PS_15: | ||
| 846 | case SQ_ALU_CONST_CACHE_VS_0: | ||
| 847 | case SQ_ALU_CONST_CACHE_VS_1: | ||
| 848 | case SQ_ALU_CONST_CACHE_VS_2: | ||
| 849 | case SQ_ALU_CONST_CACHE_VS_3: | ||
| 850 | case SQ_ALU_CONST_CACHE_VS_4: | ||
| 851 | case SQ_ALU_CONST_CACHE_VS_5: | ||
| 852 | case SQ_ALU_CONST_CACHE_VS_6: | ||
| 853 | case SQ_ALU_CONST_CACHE_VS_7: | ||
| 854 | case SQ_ALU_CONST_CACHE_VS_8: | ||
| 855 | case SQ_ALU_CONST_CACHE_VS_9: | ||
| 856 | case SQ_ALU_CONST_CACHE_VS_10: | ||
| 857 | case SQ_ALU_CONST_CACHE_VS_11: | ||
| 858 | case SQ_ALU_CONST_CACHE_VS_12: | ||
| 859 | case SQ_ALU_CONST_CACHE_VS_13: | ||
| 860 | case SQ_ALU_CONST_CACHE_VS_14: | ||
| 861 | case SQ_ALU_CONST_CACHE_VS_15: | ||
| 862 | case SQ_ALU_CONST_CACHE_HS_0: | ||
| 863 | case SQ_ALU_CONST_CACHE_HS_1: | ||
| 864 | case SQ_ALU_CONST_CACHE_HS_2: | ||
| 865 | case SQ_ALU_CONST_CACHE_HS_3: | ||
| 866 | case SQ_ALU_CONST_CACHE_HS_4: | ||
| 867 | case SQ_ALU_CONST_CACHE_HS_5: | ||
| 868 | case SQ_ALU_CONST_CACHE_HS_6: | ||
| 869 | case SQ_ALU_CONST_CACHE_HS_7: | ||
| 870 | case SQ_ALU_CONST_CACHE_HS_8: | ||
| 871 | case SQ_ALU_CONST_CACHE_HS_9: | ||
| 872 | case SQ_ALU_CONST_CACHE_HS_10: | ||
| 873 | case SQ_ALU_CONST_CACHE_HS_11: | ||
| 874 | case SQ_ALU_CONST_CACHE_HS_12: | ||
| 875 | case SQ_ALU_CONST_CACHE_HS_13: | ||
| 876 | case SQ_ALU_CONST_CACHE_HS_14: | ||
| 877 | case SQ_ALU_CONST_CACHE_HS_15: | ||
| 878 | case SQ_ALU_CONST_CACHE_LS_0: | ||
| 879 | case SQ_ALU_CONST_CACHE_LS_1: | ||
| 880 | case SQ_ALU_CONST_CACHE_LS_2: | ||
| 881 | case SQ_ALU_CONST_CACHE_LS_3: | ||
| 882 | case SQ_ALU_CONST_CACHE_LS_4: | ||
| 883 | case SQ_ALU_CONST_CACHE_LS_5: | ||
| 884 | case SQ_ALU_CONST_CACHE_LS_6: | ||
| 885 | case SQ_ALU_CONST_CACHE_LS_7: | ||
| 886 | case SQ_ALU_CONST_CACHE_LS_8: | ||
| 887 | case SQ_ALU_CONST_CACHE_LS_9: | ||
| 888 | case SQ_ALU_CONST_CACHE_LS_10: | ||
| 889 | case SQ_ALU_CONST_CACHE_LS_11: | ||
| 890 | case SQ_ALU_CONST_CACHE_LS_12: | ||
| 891 | case SQ_ALU_CONST_CACHE_LS_13: | ||
| 892 | case SQ_ALU_CONST_CACHE_LS_14: | ||
| 893 | case SQ_ALU_CONST_CACHE_LS_15: | ||
| 894 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 895 | if (r) { | ||
| 896 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 897 | "0x%04X\n", reg); | ||
| 898 | return -EINVAL; | ||
| 899 | } | ||
| 900 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 901 | break; | ||
| 902 | default: | ||
| 903 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
| 904 | return -EINVAL; | ||
| 905 | } | ||
| 906 | return 0; | ||
| 907 | } | ||
| 908 | |||
| 909 | /** | ||
| 910 | * evergreen_check_texture_resource() - check if register is authorized or not | ||
| 911 | * @p: parser structure holding parsing context | ||
| 912 | * @idx: index into the cs buffer | ||
| 913 | * @texture: texture's bo structure | ||
| 914 | * @mipmap: mipmap's bo structure | ||
| 915 | * | ||
| 916 | * This function will check that the resource has valid field and that | ||
| 917 | * the texture and mipmap bo object are big enough to cover this resource. | ||
| 918 | */ | ||
| 919 | static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | ||
| 920 | struct radeon_bo *texture, | ||
| 921 | struct radeon_bo *mipmap) | ||
| 922 | { | ||
| 923 | /* XXX fill in */ | ||
| 924 | return 0; | ||
| 925 | } | ||
| 926 | |||
| 927 | static int evergreen_packet3_check(struct radeon_cs_parser *p, | ||
| 928 | struct radeon_cs_packet *pkt) | ||
| 929 | { | ||
| 930 | struct radeon_cs_reloc *reloc; | ||
| 931 | struct evergreen_cs_track *track; | ||
| 932 | volatile u32 *ib; | ||
| 933 | unsigned idx; | ||
| 934 | unsigned i; | ||
| 935 | unsigned start_reg, end_reg, reg; | ||
| 936 | int r; | ||
| 937 | u32 idx_value; | ||
| 938 | |||
| 939 | track = (struct evergreen_cs_track *)p->track; | ||
| 940 | ib = p->ib->ptr; | ||
| 941 | idx = pkt->idx + 1; | ||
| 942 | idx_value = radeon_get_ib_value(p, idx); | ||
| 943 | |||
| 944 | switch (pkt->opcode) { | ||
| 945 | case PACKET3_CONTEXT_CONTROL: | ||
| 946 | if (pkt->count != 1) { | ||
| 947 | DRM_ERROR("bad CONTEXT_CONTROL\n"); | ||
| 948 | return -EINVAL; | ||
| 949 | } | ||
| 950 | break; | ||
| 951 | case PACKET3_INDEX_TYPE: | ||
| 952 | case PACKET3_NUM_INSTANCES: | ||
| 953 | case PACKET3_CLEAR_STATE: | ||
| 954 | if (pkt->count) { | ||
| 955 | DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); | ||
| 956 | return -EINVAL; | ||
| 957 | } | ||
| 958 | break; | ||
| 959 | case PACKET3_INDEX_BASE: | ||
| 960 | if (pkt->count != 1) { | ||
| 961 | DRM_ERROR("bad INDEX_BASE\n"); | ||
| 962 | return -EINVAL; | ||
| 963 | } | ||
| 964 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 965 | if (r) { | ||
| 966 | DRM_ERROR("bad INDEX_BASE\n"); | ||
| 967 | return -EINVAL; | ||
| 968 | } | ||
| 969 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 970 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 971 | r = evergreen_cs_track_check(p); | ||
| 972 | if (r) { | ||
| 973 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 974 | return r; | ||
| 975 | } | ||
| 976 | break; | ||
| 977 | case PACKET3_DRAW_INDEX: | ||
| 978 | if (pkt->count != 3) { | ||
| 979 | DRM_ERROR("bad DRAW_INDEX\n"); | ||
| 980 | return -EINVAL; | ||
| 981 | } | ||
| 982 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 983 | if (r) { | ||
| 984 | DRM_ERROR("bad DRAW_INDEX\n"); | ||
| 985 | return -EINVAL; | ||
| 986 | } | ||
| 987 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 988 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 989 | r = evergreen_cs_track_check(p); | ||
| 990 | if (r) { | ||
| 991 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 992 | return r; | ||
| 993 | } | ||
| 994 | break; | ||
| 995 | case PACKET3_DRAW_INDEX_2: | ||
| 996 | if (pkt->count != 4) { | ||
| 997 | DRM_ERROR("bad DRAW_INDEX_2\n"); | ||
| 998 | return -EINVAL; | ||
| 999 | } | ||
| 1000 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1001 | if (r) { | ||
| 1002 | DRM_ERROR("bad DRAW_INDEX_2\n"); | ||
| 1003 | return -EINVAL; | ||
| 1004 | } | ||
| 1005 | ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1006 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1007 | r = evergreen_cs_track_check(p); | ||
| 1008 | if (r) { | ||
| 1009 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1010 | return r; | ||
| 1011 | } | ||
| 1012 | break; | ||
| 1013 | case PACKET3_DRAW_INDEX_AUTO: | ||
| 1014 | if (pkt->count != 1) { | ||
| 1015 | DRM_ERROR("bad DRAW_INDEX_AUTO\n"); | ||
| 1016 | return -EINVAL; | ||
| 1017 | } | ||
| 1018 | r = evergreen_cs_track_check(p); | ||
| 1019 | if (r) { | ||
| 1020 | dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); | ||
| 1021 | return r; | ||
| 1022 | } | ||
| 1023 | break; | ||
| 1024 | case PACKET3_DRAW_INDEX_MULTI_AUTO: | ||
| 1025 | if (pkt->count != 2) { | ||
| 1026 | DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); | ||
| 1027 | return -EINVAL; | ||
| 1028 | } | ||
| 1029 | r = evergreen_cs_track_check(p); | ||
| 1030 | if (r) { | ||
| 1031 | dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); | ||
| 1032 | return r; | ||
| 1033 | } | ||
| 1034 | break; | ||
| 1035 | case PACKET3_DRAW_INDEX_IMMD: | ||
| 1036 | if (pkt->count < 2) { | ||
| 1037 | DRM_ERROR("bad DRAW_INDEX_IMMD\n"); | ||
| 1038 | return -EINVAL; | ||
| 1039 | } | ||
| 1040 | r = evergreen_cs_track_check(p); | ||
| 1041 | if (r) { | ||
| 1042 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1043 | return r; | ||
| 1044 | } | ||
| 1045 | break; | ||
| 1046 | case PACKET3_DRAW_INDEX_OFFSET: | ||
| 1047 | if (pkt->count != 2) { | ||
| 1048 | DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); | ||
| 1049 | return -EINVAL; | ||
| 1050 | } | ||
| 1051 | r = evergreen_cs_track_check(p); | ||
| 1052 | if (r) { | ||
| 1053 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1054 | return r; | ||
| 1055 | } | ||
| 1056 | break; | ||
| 1057 | case PACKET3_DRAW_INDEX_OFFSET_2: | ||
| 1058 | if (pkt->count != 3) { | ||
| 1059 | DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); | ||
| 1060 | return -EINVAL; | ||
| 1061 | } | ||
| 1062 | r = evergreen_cs_track_check(p); | ||
| 1063 | if (r) { | ||
| 1064 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1065 | return r; | ||
| 1066 | } | ||
| 1067 | break; | ||
| 1068 | case PACKET3_WAIT_REG_MEM: | ||
| 1069 | if (pkt->count != 5) { | ||
| 1070 | DRM_ERROR("bad WAIT_REG_MEM\n"); | ||
| 1071 | return -EINVAL; | ||
| 1072 | } | ||
| 1073 | /* bit 4 is reg (0) or mem (1) */ | ||
| 1074 | if (idx_value & 0x10) { | ||
| 1075 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1076 | if (r) { | ||
| 1077 | DRM_ERROR("bad WAIT_REG_MEM\n"); | ||
| 1078 | return -EINVAL; | ||
| 1079 | } | ||
| 1080 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1081 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1082 | } | ||
| 1083 | break; | ||
| 1084 | case PACKET3_SURFACE_SYNC: | ||
| 1085 | if (pkt->count != 3) { | ||
| 1086 | DRM_ERROR("bad SURFACE_SYNC\n"); | ||
| 1087 | return -EINVAL; | ||
| 1088 | } | ||
| 1089 | /* 0xffffffff/0x0 is flush all cache flag */ | ||
| 1090 | if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || | ||
| 1091 | radeon_get_ib_value(p, idx + 2) != 0) { | ||
| 1092 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1093 | if (r) { | ||
| 1094 | DRM_ERROR("bad SURFACE_SYNC\n"); | ||
| 1095 | return -EINVAL; | ||
| 1096 | } | ||
| 1097 | ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 1098 | } | ||
| 1099 | break; | ||
| 1100 | case PACKET3_EVENT_WRITE: | ||
| 1101 | if (pkt->count != 2 && pkt->count != 0) { | ||
| 1102 | DRM_ERROR("bad EVENT_WRITE\n"); | ||
| 1103 | return -EINVAL; | ||
| 1104 | } | ||
| 1105 | if (pkt->count) { | ||
| 1106 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1107 | if (r) { | ||
| 1108 | DRM_ERROR("bad EVENT_WRITE\n"); | ||
| 1109 | return -EINVAL; | ||
| 1110 | } | ||
| 1111 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1112 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1113 | } | ||
| 1114 | break; | ||
| 1115 | case PACKET3_EVENT_WRITE_EOP: | ||
| 1116 | if (pkt->count != 4) { | ||
| 1117 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); | ||
| 1118 | return -EINVAL; | ||
| 1119 | } | ||
| 1120 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1121 | if (r) { | ||
| 1122 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); | ||
| 1123 | return -EINVAL; | ||
| 1124 | } | ||
| 1125 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1126 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1127 | break; | ||
| 1128 | case PACKET3_EVENT_WRITE_EOS: | ||
| 1129 | if (pkt->count != 3) { | ||
| 1130 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); | ||
| 1131 | return -EINVAL; | ||
| 1132 | } | ||
| 1133 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1134 | if (r) { | ||
| 1135 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); | ||
| 1136 | return -EINVAL; | ||
| 1137 | } | ||
| 1138 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1139 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1140 | break; | ||
| 1141 | case PACKET3_SET_CONFIG_REG: | ||
| 1142 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; | ||
| 1143 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1144 | if ((start_reg < PACKET3_SET_CONFIG_REG_START) || | ||
| 1145 | (start_reg >= PACKET3_SET_CONFIG_REG_END) || | ||
| 1146 | (end_reg >= PACKET3_SET_CONFIG_REG_END)) { | ||
| 1147 | DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); | ||
| 1148 | return -EINVAL; | ||
| 1149 | } | ||
| 1150 | for (i = 0; i < pkt->count; i++) { | ||
| 1151 | reg = start_reg + (4 * i); | ||
| 1152 | r = evergreen_cs_check_reg(p, reg, idx+1+i); | ||
| 1153 | if (r) | ||
| 1154 | return r; | ||
| 1155 | } | ||
| 1156 | break; | ||
| 1157 | case PACKET3_SET_CONTEXT_REG: | ||
| 1158 | start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START; | ||
| 1159 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1160 | if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || | ||
| 1161 | (start_reg >= PACKET3_SET_CONTEXT_REG_END) || | ||
| 1162 | (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { | ||
| 1163 | DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); | ||
| 1164 | return -EINVAL; | ||
| 1165 | } | ||
| 1166 | for (i = 0; i < pkt->count; i++) { | ||
| 1167 | reg = start_reg + (4 * i); | ||
| 1168 | r = evergreen_cs_check_reg(p, reg, idx+1+i); | ||
| 1169 | if (r) | ||
| 1170 | return r; | ||
| 1171 | } | ||
| 1172 | break; | ||
| 1173 | case PACKET3_SET_RESOURCE: | ||
| 1174 | if (pkt->count % 8) { | ||
| 1175 | DRM_ERROR("bad SET_RESOURCE\n"); | ||
| 1176 | return -EINVAL; | ||
| 1177 | } | ||
| 1178 | start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; | ||
| 1179 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1180 | if ((start_reg < PACKET3_SET_RESOURCE_START) || | ||
| 1181 | (start_reg >= PACKET3_SET_RESOURCE_END) || | ||
| 1182 | (end_reg >= PACKET3_SET_RESOURCE_END)) { | ||
| 1183 | DRM_ERROR("bad SET_RESOURCE\n"); | ||
| 1184 | return -EINVAL; | ||
| 1185 | } | ||
| 1186 | for (i = 0; i < (pkt->count / 8); i++) { | ||
| 1187 | struct radeon_bo *texture, *mipmap; | ||
| 1188 | u32 size, offset; | ||
| 1189 | |||
| 1190 | switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { | ||
| 1191 | case SQ_TEX_VTX_VALID_TEXTURE: | ||
| 1192 | /* tex base */ | ||
| 1193 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1194 | if (r) { | ||
| 1195 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | ||
| 1196 | return -EINVAL; | ||
| 1197 | } | ||
| 1198 | ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 1199 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
| 1200 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 1201 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
| 1202 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 1203 | texture = reloc->robj; | ||
| 1204 | /* tex mip base */ | ||
| 1205 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1206 | if (r) { | ||
| 1207 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | ||
| 1208 | return -EINVAL; | ||
| 1209 | } | ||
| 1210 | ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 1211 | mipmap = reloc->robj; | ||
| 1212 | r = evergreen_check_texture_resource(p, idx+1+(i*8), | ||
| 1213 | texture, mipmap); | ||
| 1214 | if (r) | ||
| 1215 | return r; | ||
| 1216 | break; | ||
| 1217 | case SQ_TEX_VTX_VALID_BUFFER: | ||
| 1218 | /* vtx base */ | ||
| 1219 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1220 | if (r) { | ||
| 1221 | DRM_ERROR("bad SET_RESOURCE (vtx)\n"); | ||
| 1222 | return -EINVAL; | ||
| 1223 | } | ||
| 1224 | offset = radeon_get_ib_value(p, idx+1+(i*8)+0); | ||
| 1225 | size = radeon_get_ib_value(p, idx+1+(i*8)+1); | ||
| 1226 | if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { | ||
| 1227 | /* force size to size of the buffer */ | ||
| 1228 | dev_warn(p->dev, "vbo resource seems too big for the bo\n"); | ||
| 1229 | ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj); | ||
| 1230 | } | ||
| 1231 | ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); | ||
| 1232 | ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1233 | break; | ||
| 1234 | case SQ_TEX_VTX_INVALID_TEXTURE: | ||
| 1235 | case SQ_TEX_VTX_INVALID_BUFFER: | ||
| 1236 | default: | ||
| 1237 | DRM_ERROR("bad SET_RESOURCE\n"); | ||
| 1238 | return -EINVAL; | ||
| 1239 | } | ||
| 1240 | } | ||
| 1241 | break; | ||
| 1242 | case PACKET3_SET_ALU_CONST: | ||
| 1243 | /* XXX fix me ALU const buffers only */ | ||
| 1244 | break; | ||
| 1245 | case PACKET3_SET_BOOL_CONST: | ||
| 1246 | start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START; | ||
| 1247 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1248 | if ((start_reg < PACKET3_SET_BOOL_CONST_START) || | ||
| 1249 | (start_reg >= PACKET3_SET_BOOL_CONST_END) || | ||
| 1250 | (end_reg >= PACKET3_SET_BOOL_CONST_END)) { | ||
| 1251 | DRM_ERROR("bad SET_BOOL_CONST\n"); | ||
| 1252 | return -EINVAL; | ||
| 1253 | } | ||
| 1254 | break; | ||
| 1255 | case PACKET3_SET_LOOP_CONST: | ||
| 1256 | start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START; | ||
| 1257 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1258 | if ((start_reg < PACKET3_SET_LOOP_CONST_START) || | ||
| 1259 | (start_reg >= PACKET3_SET_LOOP_CONST_END) || | ||
| 1260 | (end_reg >= PACKET3_SET_LOOP_CONST_END)) { | ||
| 1261 | DRM_ERROR("bad SET_LOOP_CONST\n"); | ||
| 1262 | return -EINVAL; | ||
| 1263 | } | ||
| 1264 | break; | ||
| 1265 | case PACKET3_SET_CTL_CONST: | ||
| 1266 | start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START; | ||
| 1267 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1268 | if ((start_reg < PACKET3_SET_CTL_CONST_START) || | ||
| 1269 | (start_reg >= PACKET3_SET_CTL_CONST_END) || | ||
| 1270 | (end_reg >= PACKET3_SET_CTL_CONST_END)) { | ||
| 1271 | DRM_ERROR("bad SET_CTL_CONST\n"); | ||
| 1272 | return -EINVAL; | ||
| 1273 | } | ||
| 1274 | break; | ||
| 1275 | case PACKET3_SET_SAMPLER: | ||
| 1276 | if (pkt->count % 3) { | ||
| 1277 | DRM_ERROR("bad SET_SAMPLER\n"); | ||
| 1278 | return -EINVAL; | ||
| 1279 | } | ||
| 1280 | start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; | ||
| 1281 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1282 | if ((start_reg < PACKET3_SET_SAMPLER_START) || | ||
| 1283 | (start_reg >= PACKET3_SET_SAMPLER_END) || | ||
| 1284 | (end_reg >= PACKET3_SET_SAMPLER_END)) { | ||
| 1285 | DRM_ERROR("bad SET_SAMPLER\n"); | ||
| 1286 | return -EINVAL; | ||
| 1287 | } | ||
| 1288 | break; | ||
| 1289 | case PACKET3_NOP: | ||
| 1290 | break; | ||
| 1291 | default: | ||
| 1292 | DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); | ||
| 1293 | return -EINVAL; | ||
| 1294 | } | ||
| 1295 | return 0; | ||
| 1296 | } | ||
| 1297 | |||
| 1298 | int evergreen_cs_parse(struct radeon_cs_parser *p) | ||
| 1299 | { | ||
| 1300 | struct radeon_cs_packet pkt; | ||
| 1301 | struct evergreen_cs_track *track; | ||
| 1302 | int r; | ||
| 1303 | |||
| 1304 | if (p->track == NULL) { | ||
| 1305 | /* initialize tracker, we are in kms */ | ||
| 1306 | track = kzalloc(sizeof(*track), GFP_KERNEL); | ||
| 1307 | if (track == NULL) | ||
| 1308 | return -ENOMEM; | ||
| 1309 | evergreen_cs_track_init(track); | ||
| 1310 | track->npipes = p->rdev->config.evergreen.tiling_npipes; | ||
| 1311 | track->nbanks = p->rdev->config.evergreen.tiling_nbanks; | ||
| 1312 | track->group_size = p->rdev->config.evergreen.tiling_group_size; | ||
| 1313 | p->track = track; | ||
| 1314 | } | ||
| 1315 | do { | ||
| 1316 | r = evergreen_cs_packet_parse(p, &pkt, p->idx); | ||
| 1317 | if (r) { | ||
| 1318 | kfree(p->track); | ||
| 1319 | p->track = NULL; | ||
| 1320 | return r; | ||
| 1321 | } | ||
| 1322 | p->idx += pkt.count + 2; | ||
| 1323 | switch (pkt.type) { | ||
| 1324 | case PACKET_TYPE0: | ||
| 1325 | r = evergreen_cs_parse_packet0(p, &pkt); | ||
| 1326 | break; | ||
| 1327 | case PACKET_TYPE2: | ||
| 1328 | break; | ||
| 1329 | case PACKET_TYPE3: | ||
| 1330 | r = evergreen_packet3_check(p, &pkt); | ||
| 1331 | break; | ||
| 1332 | default: | ||
| 1333 | DRM_ERROR("Unknown packet type %d !\n", pkt.type); | ||
| 1334 | kfree(p->track); | ||
| 1335 | p->track = NULL; | ||
| 1336 | return -EINVAL; | ||
| 1337 | } | ||
| 1338 | if (r) { | ||
| 1339 | kfree(p->track); | ||
| 1340 | p->track = NULL; | ||
| 1341 | return r; | ||
| 1342 | } | ||
| 1343 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | ||
| 1344 | #if 0 | ||
| 1345 | for (r = 0; r < p->ib->length_dw; r++) { | ||
| 1346 | printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); | ||
| 1347 | mdelay(1); | ||
| 1348 | } | ||
| 1349 | #endif | ||
| 1350 | kfree(p->track); | ||
| 1351 | p->track = NULL; | ||
| 1352 | return 0; | ||
| 1353 | } | ||
| 1354 | |||
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index af86af836f13..e028c1cd9d9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
| @@ -151,6 +151,9 @@ | |||
| 151 | #define EVERGREEN_DATA_FORMAT 0x6b00 | 151 | #define EVERGREEN_DATA_FORMAT 0x6b00 |
| 152 | # define EVERGREEN_INTERLEAVE_EN (1 << 0) | 152 | # define EVERGREEN_INTERLEAVE_EN (1 << 0) |
| 153 | #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 | 153 | #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 |
| 154 | #define EVERGREEN_VLINE_START_END 0x6b08 | ||
| 155 | #define EVERGREEN_VLINE_STATUS 0x6bb8 | ||
| 156 | # define EVERGREEN_VLINE_STAT (1 << 12) | ||
| 154 | 157 | ||
| 155 | #define EVERGREEN_VIEWPORT_START 0x6d70 | 158 | #define EVERGREEN_VIEWPORT_START 0x6d70 |
| 156 | #define EVERGREEN_VIEWPORT_SIZE 0x6d74 | 159 | #define EVERGREEN_VIEWPORT_SIZE 0x6d74 |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 93e9e17ad54a..a1cd621780e2 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
| @@ -218,6 +218,8 @@ | |||
| 218 | #define CLIP_VTX_REORDER_ENA (1 << 0) | 218 | #define CLIP_VTX_REORDER_ENA (1 << 0) |
| 219 | #define NUM_CLIP_SEQ(x) ((x) << 1) | 219 | #define NUM_CLIP_SEQ(x) ((x) << 1) |
| 220 | #define PA_SC_AA_CONFIG 0x28C04 | 220 | #define PA_SC_AA_CONFIG 0x28C04 |
| 221 | #define MSAA_NUM_SAMPLES_SHIFT 0 | ||
| 222 | #define MSAA_NUM_SAMPLES_MASK 0x3 | ||
| 221 | #define PA_SC_CLIPRECT_RULE 0x2820C | 223 | #define PA_SC_CLIPRECT_RULE 0x2820C |
| 222 | #define PA_SC_EDGERULE 0x28230 | 224 | #define PA_SC_EDGERULE 0x28230 |
| 223 | #define PA_SC_FIFO_SIZE 0x8BCC | 225 | #define PA_SC_FIFO_SIZE 0x8BCC |
| @@ -553,4 +555,469 @@ | |||
| 553 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) | 555 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) |
| 554 | # define DC_HPDx_EN (1 << 28) | 556 | # define DC_HPDx_EN (1 << 28) |
| 555 | 557 | ||
| 558 | /* | ||
| 559 | * PM4 | ||
| 560 | */ | ||
| 561 | #define PACKET_TYPE0 0 | ||
| 562 | #define PACKET_TYPE1 1 | ||
| 563 | #define PACKET_TYPE2 2 | ||
| 564 | #define PACKET_TYPE3 3 | ||
| 565 | |||
| 566 | #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) | ||
| 567 | #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) | ||
| 568 | #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) | ||
| 569 | #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) | ||
| 570 | #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ | ||
| 571 | (((reg) >> 2) & 0xFFFF) | \ | ||
| 572 | ((n) & 0x3FFF) << 16) | ||
| 573 | #define CP_PACKET2 0x80000000 | ||
| 574 | #define PACKET2_PAD_SHIFT 0 | ||
| 575 | #define PACKET2_PAD_MASK (0x3fffffff << 0) | ||
| 576 | |||
| 577 | #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) | ||
| 578 | |||
| 579 | #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ | ||
| 580 | (((op) & 0xFF) << 8) | \ | ||
| 581 | ((n) & 0x3FFF) << 16) | ||
| 582 | |||
| 583 | /* Packet 3 types */ | ||
| 584 | #define PACKET3_NOP 0x10 | ||
| 585 | #define PACKET3_SET_BASE 0x11 | ||
| 586 | #define PACKET3_CLEAR_STATE 0x12 | ||
| 587 | #define PACKET3_INDIRECT_BUFFER_SIZE 0x13 | ||
| 588 | #define PACKET3_DISPATCH_DIRECT 0x15 | ||
| 589 | #define PACKET3_DISPATCH_INDIRECT 0x16 | ||
| 590 | #define PACKET3_INDIRECT_BUFFER_END 0x17 | ||
| 591 | #define PACKET3_SET_PREDICATION 0x20 | ||
| 592 | #define PACKET3_REG_RMW 0x21 | ||
| 593 | #define PACKET3_COND_EXEC 0x22 | ||
| 594 | #define PACKET3_PRED_EXEC 0x23 | ||
| 595 | #define PACKET3_DRAW_INDIRECT 0x24 | ||
| 596 | #define PACKET3_DRAW_INDEX_INDIRECT 0x25 | ||
| 597 | #define PACKET3_INDEX_BASE 0x26 | ||
| 598 | #define PACKET3_DRAW_INDEX_2 0x27 | ||
| 599 | #define PACKET3_CONTEXT_CONTROL 0x28 | ||
| 600 | #define PACKET3_DRAW_INDEX_OFFSET 0x29 | ||
| 601 | #define PACKET3_INDEX_TYPE 0x2A | ||
| 602 | #define PACKET3_DRAW_INDEX 0x2B | ||
| 603 | #define PACKET3_DRAW_INDEX_AUTO 0x2D | ||
| 604 | #define PACKET3_DRAW_INDEX_IMMD 0x2E | ||
| 605 | #define PACKET3_NUM_INSTANCES 0x2F | ||
| 606 | #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 | ||
| 607 | #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 | ||
| 608 | #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 | ||
| 609 | #define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 | ||
| 610 | #define PACKET3_MEM_SEMAPHORE 0x39 | ||
| 611 | #define PACKET3_MPEG_INDEX 0x3A | ||
| 612 | #define PACKET3_WAIT_REG_MEM 0x3C | ||
| 613 | #define PACKET3_MEM_WRITE 0x3D | ||
| 614 | #define PACKET3_INDIRECT_BUFFER 0x32 | ||
| 615 | #define PACKET3_SURFACE_SYNC 0x43 | ||
| 616 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | ||
| 617 | # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) | ||
| 618 | # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) | ||
| 619 | # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) | ||
| 620 | # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) | ||
| 621 | # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) | ||
| 622 | # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) | ||
| 623 | # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) | ||
| 624 | # define PACKET3_DB_DEST_BASE_ENA (1 << 14) | ||
| 625 | # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) | ||
| 626 | # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) | ||
| 627 | # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) | ||
| 628 | # define PACKET3_CB11_DEST_BASE_ENA (1 << 17) | ||
| 629 | # define PACKET3_FULL_CACHE_ENA (1 << 20) | ||
| 630 | # define PACKET3_TC_ACTION_ENA (1 << 23) | ||
| 631 | # define PACKET3_VC_ACTION_ENA (1 << 24) | ||
| 632 | # define PACKET3_CB_ACTION_ENA (1 << 25) | ||
| 633 | # define PACKET3_DB_ACTION_ENA (1 << 26) | ||
| 634 | # define PACKET3_SH_ACTION_ENA (1 << 27) | ||
| 635 | # define PACKET3_SMX_ACTION_ENA (1 << 28) | ||
| 636 | #define PACKET3_ME_INITIALIZE 0x44 | ||
| 637 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | ||
| 638 | #define PACKET3_COND_WRITE 0x45 | ||
| 639 | #define PACKET3_EVENT_WRITE 0x46 | ||
| 640 | #define PACKET3_EVENT_WRITE_EOP 0x47 | ||
| 641 | #define PACKET3_EVENT_WRITE_EOS 0x48 | ||
| 642 | #define PACKET3_PREAMBLE_CNTL 0x4A | ||
| 643 | #define PACKET3_RB_OFFSET 0x4B | ||
| 644 | #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C | ||
| 645 | #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D | ||
| 646 | #define PACKET3_ALU_PS_CONST_UPDATE 0x4E | ||
| 647 | #define PACKET3_ALU_VS_CONST_UPDATE 0x4F | ||
| 648 | #define PACKET3_ONE_REG_WRITE 0x57 | ||
| 649 | #define PACKET3_SET_CONFIG_REG 0x68 | ||
| 650 | #define PACKET3_SET_CONFIG_REG_START 0x00008000 | ||
| 651 | #define PACKET3_SET_CONFIG_REG_END 0x0000ac00 | ||
| 652 | #define PACKET3_SET_CONTEXT_REG 0x69 | ||
| 653 | #define PACKET3_SET_CONTEXT_REG_START 0x00028000 | ||
| 654 | #define PACKET3_SET_CONTEXT_REG_END 0x00029000 | ||
| 655 | #define PACKET3_SET_ALU_CONST 0x6A | ||
| 656 | /* alu const buffers only; no reg file */ | ||
| 657 | #define PACKET3_SET_BOOL_CONST 0x6B | ||
| 658 | #define PACKET3_SET_BOOL_CONST_START 0x0003a500 | ||
| 659 | #define PACKET3_SET_BOOL_CONST_END 0x0003a518 | ||
| 660 | #define PACKET3_SET_LOOP_CONST 0x6C | ||
| 661 | #define PACKET3_SET_LOOP_CONST_START 0x0003a200 | ||
| 662 | #define PACKET3_SET_LOOP_CONST_END 0x0003a500 | ||
| 663 | #define PACKET3_SET_RESOURCE 0x6D | ||
| 664 | #define PACKET3_SET_RESOURCE_START 0x00030000 | ||
| 665 | #define PACKET3_SET_RESOURCE_END 0x00038000 | ||
| 666 | #define PACKET3_SET_SAMPLER 0x6E | ||
| 667 | #define PACKET3_SET_SAMPLER_START 0x0003c000 | ||
| 668 | #define PACKET3_SET_SAMPLER_END 0x0003c600 | ||
| 669 | #define PACKET3_SET_CTL_CONST 0x6F | ||
| 670 | #define PACKET3_SET_CTL_CONST_START 0x0003cff0 | ||
| 671 | #define PACKET3_SET_CTL_CONST_END 0x0003ff0c | ||
| 672 | #define PACKET3_SET_RESOURCE_OFFSET 0x70 | ||
| 673 | #define PACKET3_SET_ALU_CONST_VS 0x71 | ||
| 674 | #define PACKET3_SET_ALU_CONST_DI 0x72 | ||
| 675 | #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 | ||
| 676 | #define PACKET3_SET_RESOURCE_INDIRECT 0x74 | ||
| 677 | #define PACKET3_SET_APPEND_CNT 0x75 | ||
| 678 | |||
| 679 | #define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c | ||
| 680 | #define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30) | ||
| 681 | #define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3) | ||
| 682 | #define SQ_TEX_VTX_INVALID_TEXTURE 0x0 | ||
| 683 | #define SQ_TEX_VTX_INVALID_BUFFER 0x1 | ||
| 684 | #define SQ_TEX_VTX_VALID_TEXTURE 0x2 | ||
| 685 | #define SQ_TEX_VTX_VALID_BUFFER 0x3 | ||
| 686 | |||
| 687 | #define SQ_CONST_MEM_BASE 0x8df8 | ||
| 688 | |||
| 689 | #define SQ_ESGS_RING_SIZE 0x8c44 | ||
| 690 | #define SQ_GSVS_RING_SIZE 0x8c4c | ||
| 691 | #define SQ_ESTMP_RING_SIZE 0x8c54 | ||
| 692 | #define SQ_GSTMP_RING_SIZE 0x8c5c | ||
| 693 | #define SQ_VSTMP_RING_SIZE 0x8c64 | ||
| 694 | #define SQ_PSTMP_RING_SIZE 0x8c6c | ||
| 695 | #define SQ_LSTMP_RING_SIZE 0x8e14 | ||
| 696 | #define SQ_HSTMP_RING_SIZE 0x8e1c | ||
| 697 | #define VGT_TF_RING_SIZE 0x8988 | ||
| 698 | |||
| 699 | #define SQ_ESGS_RING_ITEMSIZE 0x28900 | ||
| 700 | #define SQ_GSVS_RING_ITEMSIZE 0x28904 | ||
| 701 | #define SQ_ESTMP_RING_ITEMSIZE 0x28908 | ||
| 702 | #define SQ_GSTMP_RING_ITEMSIZE 0x2890c | ||
| 703 | #define SQ_VSTMP_RING_ITEMSIZE 0x28910 | ||
| 704 | #define SQ_PSTMP_RING_ITEMSIZE 0x28914 | ||
| 705 | #define SQ_LSTMP_RING_ITEMSIZE 0x28830 | ||
| 706 | #define SQ_HSTMP_RING_ITEMSIZE 0x28834 | ||
| 707 | |||
| 708 | #define SQ_GS_VERT_ITEMSIZE 0x2891c | ||
| 709 | #define SQ_GS_VERT_ITEMSIZE_1 0x28920 | ||
| 710 | #define SQ_GS_VERT_ITEMSIZE_2 0x28924 | ||
| 711 | #define SQ_GS_VERT_ITEMSIZE_3 0x28928 | ||
| 712 | #define SQ_GSVS_RING_OFFSET_1 0x2892c | ||
| 713 | #define SQ_GSVS_RING_OFFSET_2 0x28930 | ||
| 714 | #define SQ_GSVS_RING_OFFSET_3 0x28934 | ||
| 715 | |||
| 716 | #define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140 | ||
| 717 | #define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80 | ||
| 718 | |||
| 719 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 | ||
| 720 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 | ||
| 721 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 | ||
| 722 | #define SQ_ALU_CONST_CACHE_PS_3 0x2894c | ||
| 723 | #define SQ_ALU_CONST_CACHE_PS_4 0x28950 | ||
| 724 | #define SQ_ALU_CONST_CACHE_PS_5 0x28954 | ||
| 725 | #define SQ_ALU_CONST_CACHE_PS_6 0x28958 | ||
| 726 | #define SQ_ALU_CONST_CACHE_PS_7 0x2895c | ||
| 727 | #define SQ_ALU_CONST_CACHE_PS_8 0x28960 | ||
| 728 | #define SQ_ALU_CONST_CACHE_PS_9 0x28964 | ||
| 729 | #define SQ_ALU_CONST_CACHE_PS_10 0x28968 | ||
| 730 | #define SQ_ALU_CONST_CACHE_PS_11 0x2896c | ||
| 731 | #define SQ_ALU_CONST_CACHE_PS_12 0x28970 | ||
| 732 | #define SQ_ALU_CONST_CACHE_PS_13 0x28974 | ||
| 733 | #define SQ_ALU_CONST_CACHE_PS_14 0x28978 | ||
| 734 | #define SQ_ALU_CONST_CACHE_PS_15 0x2897c | ||
| 735 | #define SQ_ALU_CONST_CACHE_VS_0 0x28980 | ||
| 736 | #define SQ_ALU_CONST_CACHE_VS_1 0x28984 | ||
| 737 | #define SQ_ALU_CONST_CACHE_VS_2 0x28988 | ||
| 738 | #define SQ_ALU_CONST_CACHE_VS_3 0x2898c | ||
| 739 | #define SQ_ALU_CONST_CACHE_VS_4 0x28990 | ||
| 740 | #define SQ_ALU_CONST_CACHE_VS_5 0x28994 | ||
| 741 | #define SQ_ALU_CONST_CACHE_VS_6 0x28998 | ||
| 742 | #define SQ_ALU_CONST_CACHE_VS_7 0x2899c | ||
| 743 | #define SQ_ALU_CONST_CACHE_VS_8 0x289a0 | ||
| 744 | #define SQ_ALU_CONST_CACHE_VS_9 0x289a4 | ||
| 745 | #define SQ_ALU_CONST_CACHE_VS_10 0x289a8 | ||
| 746 | #define SQ_ALU_CONST_CACHE_VS_11 0x289ac | ||
| 747 | #define SQ_ALU_CONST_CACHE_VS_12 0x289b0 | ||
| 748 | #define SQ_ALU_CONST_CACHE_VS_13 0x289b4 | ||
| 749 | #define SQ_ALU_CONST_CACHE_VS_14 0x289b8 | ||
| 750 | #define SQ_ALU_CONST_CACHE_VS_15 0x289bc | ||
| 751 | #define SQ_ALU_CONST_CACHE_GS_0 0x289c0 | ||
| 752 | #define SQ_ALU_CONST_CACHE_GS_1 0x289c4 | ||
| 753 | #define SQ_ALU_CONST_CACHE_GS_2 0x289c8 | ||
| 754 | #define SQ_ALU_CONST_CACHE_GS_3 0x289cc | ||
| 755 | #define SQ_ALU_CONST_CACHE_GS_4 0x289d0 | ||
| 756 | #define SQ_ALU_CONST_CACHE_GS_5 0x289d4 | ||
| 757 | #define SQ_ALU_CONST_CACHE_GS_6 0x289d8 | ||
| 758 | #define SQ_ALU_CONST_CACHE_GS_7 0x289dc | ||
| 759 | #define SQ_ALU_CONST_CACHE_GS_8 0x289e0 | ||
| 760 | #define SQ_ALU_CONST_CACHE_GS_9 0x289e4 | ||
| 761 | #define SQ_ALU_CONST_CACHE_GS_10 0x289e8 | ||
| 762 | #define SQ_ALU_CONST_CACHE_GS_11 0x289ec | ||
| 763 | #define SQ_ALU_CONST_CACHE_GS_12 0x289f0 | ||
| 764 | #define SQ_ALU_CONST_CACHE_GS_13 0x289f4 | ||
| 765 | #define SQ_ALU_CONST_CACHE_GS_14 0x289f8 | ||
| 766 | #define SQ_ALU_CONST_CACHE_GS_15 0x289fc | ||
| 767 | #define SQ_ALU_CONST_CACHE_HS_0 0x28f00 | ||
| 768 | #define SQ_ALU_CONST_CACHE_HS_1 0x28f04 | ||
| 769 | #define SQ_ALU_CONST_CACHE_HS_2 0x28f08 | ||
| 770 | #define SQ_ALU_CONST_CACHE_HS_3 0x28f0c | ||
| 771 | #define SQ_ALU_CONST_CACHE_HS_4 0x28f10 | ||
| 772 | #define SQ_ALU_CONST_CACHE_HS_5 0x28f14 | ||
| 773 | #define SQ_ALU_CONST_CACHE_HS_6 0x28f18 | ||
| 774 | #define SQ_ALU_CONST_CACHE_HS_7 0x28f1c | ||
| 775 | #define SQ_ALU_CONST_CACHE_HS_8 0x28f20 | ||
| 776 | #define SQ_ALU_CONST_CACHE_HS_9 0x28f24 | ||
| 777 | #define SQ_ALU_CONST_CACHE_HS_10 0x28f28 | ||
| 778 | #define SQ_ALU_CONST_CACHE_HS_11 0x28f2c | ||
| 779 | #define SQ_ALU_CONST_CACHE_HS_12 0x28f30 | ||
| 780 | #define SQ_ALU_CONST_CACHE_HS_13 0x28f34 | ||
| 781 | #define SQ_ALU_CONST_CACHE_HS_14 0x28f38 | ||
| 782 | #define SQ_ALU_CONST_CACHE_HS_15 0x28f3c | ||
| 783 | #define SQ_ALU_CONST_CACHE_LS_0 0x28f40 | ||
| 784 | #define SQ_ALU_CONST_CACHE_LS_1 0x28f44 | ||
| 785 | #define SQ_ALU_CONST_CACHE_LS_2 0x28f48 | ||
| 786 | #define SQ_ALU_CONST_CACHE_LS_3 0x28f4c | ||
| 787 | #define SQ_ALU_CONST_CACHE_LS_4 0x28f50 | ||
| 788 | #define SQ_ALU_CONST_CACHE_LS_5 0x28f54 | ||
| 789 | #define SQ_ALU_CONST_CACHE_LS_6 0x28f58 | ||
| 790 | #define SQ_ALU_CONST_CACHE_LS_7 0x28f5c | ||
| 791 | #define SQ_ALU_CONST_CACHE_LS_8 0x28f60 | ||
| 792 | #define SQ_ALU_CONST_CACHE_LS_9 0x28f64 | ||
| 793 | #define SQ_ALU_CONST_CACHE_LS_10 0x28f68 | ||
| 794 | #define SQ_ALU_CONST_CACHE_LS_11 0x28f6c | ||
| 795 | #define SQ_ALU_CONST_CACHE_LS_12 0x28f70 | ||
| 796 | #define SQ_ALU_CONST_CACHE_LS_13 0x28f74 | ||
| 797 | #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 | ||
| 798 | #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c | ||
| 799 | |||
| 800 | #define DB_DEPTH_CONTROL 0x28800 | ||
| 801 | #define DB_DEPTH_VIEW 0x28008 | ||
| 802 | #define DB_HTILE_DATA_BASE 0x28014 | ||
| 803 | #define DB_Z_INFO 0x28040 | ||
| 804 | # define Z_ARRAY_MODE(x) ((x) << 4) | ||
| 805 | #define DB_STENCIL_INFO 0x28044 | ||
| 806 | #define DB_Z_READ_BASE 0x28048 | ||
| 807 | #define DB_STENCIL_READ_BASE 0x2804c | ||
| 808 | #define DB_Z_WRITE_BASE 0x28050 | ||
| 809 | #define DB_STENCIL_WRITE_BASE 0x28054 | ||
| 810 | #define DB_DEPTH_SIZE 0x28058 | ||
| 811 | |||
| 812 | #define SQ_PGM_START_PS 0x28840 | ||
| 813 | #define SQ_PGM_START_VS 0x2885c | ||
| 814 | #define SQ_PGM_START_GS 0x28874 | ||
| 815 | #define SQ_PGM_START_ES 0x2888c | ||
| 816 | #define SQ_PGM_START_FS 0x288a4 | ||
| 817 | #define SQ_PGM_START_HS 0x288b8 | ||
| 818 | #define SQ_PGM_START_LS 0x288d0 | ||
| 819 | |||
| 820 | #define VGT_STRMOUT_CONFIG 0x28b94 | ||
| 821 | #define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 | ||
| 822 | |||
| 823 | #define CB_TARGET_MASK 0x28238 | ||
| 824 | #define CB_SHADER_MASK 0x2823c | ||
| 825 | |||
| 826 | #define GDS_ADDR_BASE 0x28720 | ||
| 827 | |||
| 828 | #define CB_IMMED0_BASE 0x28b9c | ||
| 829 | #define CB_IMMED1_BASE 0x28ba0 | ||
| 830 | #define CB_IMMED2_BASE 0x28ba4 | ||
| 831 | #define CB_IMMED3_BASE 0x28ba8 | ||
| 832 | #define CB_IMMED4_BASE 0x28bac | ||
| 833 | #define CB_IMMED5_BASE 0x28bb0 | ||
| 834 | #define CB_IMMED6_BASE 0x28bb4 | ||
| 835 | #define CB_IMMED7_BASE 0x28bb8 | ||
| 836 | #define CB_IMMED8_BASE 0x28bbc | ||
| 837 | #define CB_IMMED9_BASE 0x28bc0 | ||
| 838 | #define CB_IMMED10_BASE 0x28bc4 | ||
| 839 | #define CB_IMMED11_BASE 0x28bc8 | ||
| 840 | |||
| 841 | /* all 12 CB blocks have these regs */ | ||
| 842 | #define CB_COLOR0_BASE 0x28c60 | ||
| 843 | #define CB_COLOR0_PITCH 0x28c64 | ||
| 844 | #define CB_COLOR0_SLICE 0x28c68 | ||
| 845 | #define CB_COLOR0_VIEW 0x28c6c | ||
| 846 | #define CB_COLOR0_INFO 0x28c70 | ||
| 847 | # define CB_ARRAY_MODE(x) ((x) << 8) | ||
| 848 | # define ARRAY_LINEAR_GENERAL 0 | ||
| 849 | # define ARRAY_LINEAR_ALIGNED 1 | ||
| 850 | # define ARRAY_1D_TILED_THIN1 2 | ||
| 851 | # define ARRAY_2D_TILED_THIN1 4 | ||
| 852 | #define CB_COLOR0_ATTRIB 0x28c74 | ||
| 853 | #define CB_COLOR0_DIM 0x28c78 | ||
| 854 | /* only CB0-7 blocks have these regs */ | ||
| 855 | #define CB_COLOR0_CMASK 0x28c7c | ||
| 856 | #define CB_COLOR0_CMASK_SLICE 0x28c80 | ||
| 857 | #define CB_COLOR0_FMASK 0x28c84 | ||
| 858 | #define CB_COLOR0_FMASK_SLICE 0x28c88 | ||
| 859 | #define CB_COLOR0_CLEAR_WORD0 0x28c8c | ||
| 860 | #define CB_COLOR0_CLEAR_WORD1 0x28c90 | ||
| 861 | #define CB_COLOR0_CLEAR_WORD2 0x28c94 | ||
| 862 | #define CB_COLOR0_CLEAR_WORD3 0x28c98 | ||
| 863 | |||
| 864 | #define CB_COLOR1_BASE 0x28c9c | ||
| 865 | #define CB_COLOR2_BASE 0x28cd8 | ||
| 866 | #define CB_COLOR3_BASE 0x28d14 | ||
| 867 | #define CB_COLOR4_BASE 0x28d50 | ||
| 868 | #define CB_COLOR5_BASE 0x28d8c | ||
| 869 | #define CB_COLOR6_BASE 0x28dc8 | ||
| 870 | #define CB_COLOR7_BASE 0x28e04 | ||
| 871 | #define CB_COLOR8_BASE 0x28e40 | ||
| 872 | #define CB_COLOR9_BASE 0x28e5c | ||
| 873 | #define CB_COLOR10_BASE 0x28e78 | ||
| 874 | #define CB_COLOR11_BASE 0x28e94 | ||
| 875 | |||
| 876 | #define CB_COLOR1_PITCH 0x28ca0 | ||
| 877 | #define CB_COLOR2_PITCH 0x28cdc | ||
| 878 | #define CB_COLOR3_PITCH 0x28d18 | ||
| 879 | #define CB_COLOR4_PITCH 0x28d54 | ||
| 880 | #define CB_COLOR5_PITCH 0x28d90 | ||
| 881 | #define CB_COLOR6_PITCH 0x28dcc | ||
| 882 | #define CB_COLOR7_PITCH 0x28e08 | ||
| 883 | #define CB_COLOR8_PITCH 0x28e44 | ||
| 884 | #define CB_COLOR9_PITCH 0x28e60 | ||
| 885 | #define CB_COLOR10_PITCH 0x28e7c | ||
| 886 | #define CB_COLOR11_PITCH 0x28e98 | ||
| 887 | |||
| 888 | #define CB_COLOR1_SLICE 0x28ca4 | ||
| 889 | #define CB_COLOR2_SLICE 0x28ce0 | ||
| 890 | #define CB_COLOR3_SLICE 0x28d1c | ||
| 891 | #define CB_COLOR4_SLICE 0x28d58 | ||
| 892 | #define CB_COLOR5_SLICE 0x28d94 | ||
| 893 | #define CB_COLOR6_SLICE 0x28dd0 | ||
| 894 | #define CB_COLOR7_SLICE 0x28e0c | ||
| 895 | #define CB_COLOR8_SLICE 0x28e48 | ||
| 896 | #define CB_COLOR9_SLICE 0x28e64 | ||
| 897 | #define CB_COLOR10_SLICE 0x28e80 | ||
| 898 | #define CB_COLOR11_SLICE 0x28e9c | ||
| 899 | |||
| 900 | #define CB_COLOR1_VIEW 0x28ca8 | ||
| 901 | #define CB_COLOR2_VIEW 0x28ce4 | ||
| 902 | #define CB_COLOR3_VIEW 0x28d20 | ||
| 903 | #define CB_COLOR4_VIEW 0x28d5c | ||
| 904 | #define CB_COLOR5_VIEW 0x28d98 | ||
| 905 | #define CB_COLOR6_VIEW 0x28dd4 | ||
| 906 | #define CB_COLOR7_VIEW 0x28e10 | ||
| 907 | #define CB_COLOR8_VIEW 0x28e4c | ||
| 908 | #define CB_COLOR9_VIEW 0x28e68 | ||
| 909 | #define CB_COLOR10_VIEW 0x28e84 | ||
| 910 | #define CB_COLOR11_VIEW 0x28ea0 | ||
| 911 | |||
| 912 | #define CB_COLOR1_INFO 0x28cac | ||
| 913 | #define CB_COLOR2_INFO 0x28ce8 | ||
| 914 | #define CB_COLOR3_INFO 0x28d24 | ||
| 915 | #define CB_COLOR4_INFO 0x28d60 | ||
| 916 | #define CB_COLOR5_INFO 0x28d9c | ||
| 917 | #define CB_COLOR6_INFO 0x28dd8 | ||
| 918 | #define CB_COLOR7_INFO 0x28e14 | ||
| 919 | #define CB_COLOR8_INFO 0x28e50 | ||
| 920 | #define CB_COLOR9_INFO 0x28e6c | ||
| 921 | #define CB_COLOR10_INFO 0x28e88 | ||
| 922 | #define CB_COLOR11_INFO 0x28ea4 | ||
| 923 | |||
| 924 | #define CB_COLOR1_ATTRIB 0x28cb0 | ||
| 925 | #define CB_COLOR2_ATTRIB 0x28cec | ||
| 926 | #define CB_COLOR3_ATTRIB 0x28d28 | ||
| 927 | #define CB_COLOR4_ATTRIB 0x28d64 | ||
| 928 | #define CB_COLOR5_ATTRIB 0x28da0 | ||
| 929 | #define CB_COLOR6_ATTRIB 0x28ddc | ||
| 930 | #define CB_COLOR7_ATTRIB 0x28e18 | ||
| 931 | #define CB_COLOR8_ATTRIB 0x28e54 | ||
| 932 | #define CB_COLOR9_ATTRIB 0x28e70 | ||
| 933 | #define CB_COLOR10_ATTRIB 0x28e8c | ||
| 934 | #define CB_COLOR11_ATTRIB 0x28ea8 | ||
| 935 | |||
| 936 | #define CB_COLOR1_DIM 0x28cb4 | ||
| 937 | #define CB_COLOR2_DIM 0x28cf0 | ||
| 938 | #define CB_COLOR3_DIM 0x28d2c | ||
| 939 | #define CB_COLOR4_DIM 0x28d68 | ||
| 940 | #define CB_COLOR5_DIM 0x28da4 | ||
| 941 | #define CB_COLOR6_DIM 0x28de0 | ||
| 942 | #define CB_COLOR7_DIM 0x28e1c | ||
| 943 | #define CB_COLOR8_DIM 0x28e58 | ||
| 944 | #define CB_COLOR9_DIM 0x28e74 | ||
| 945 | #define CB_COLOR10_DIM 0x28e90 | ||
| 946 | #define CB_COLOR11_DIM 0x28eac | ||
| 947 | |||
| 948 | #define CB_COLOR1_CMASK 0x28cb8 | ||
| 949 | #define CB_COLOR2_CMASK 0x28cf4 | ||
| 950 | #define CB_COLOR3_CMASK 0x28d30 | ||
| 951 | #define CB_COLOR4_CMASK 0x28d6c | ||
| 952 | #define CB_COLOR5_CMASK 0x28da8 | ||
| 953 | #define CB_COLOR6_CMASK 0x28de4 | ||
| 954 | #define CB_COLOR7_CMASK 0x28e20 | ||
| 955 | |||
| 956 | #define CB_COLOR1_CMASK_SLICE 0x28cbc | ||
| 957 | #define CB_COLOR2_CMASK_SLICE 0x28cf8 | ||
| 958 | #define CB_COLOR3_CMASK_SLICE 0x28d34 | ||
| 959 | #define CB_COLOR4_CMASK_SLICE 0x28d70 | ||
| 960 | #define CB_COLOR5_CMASK_SLICE 0x28dac | ||
| 961 | #define CB_COLOR6_CMASK_SLICE 0x28de8 | ||
| 962 | #define CB_COLOR7_CMASK_SLICE 0x28e24 | ||
| 963 | |||
| 964 | #define CB_COLOR1_FMASK 0x28cc0 | ||
| 965 | #define CB_COLOR2_FMASK 0x28cfc | ||
| 966 | #define CB_COLOR3_FMASK 0x28d38 | ||
| 967 | #define CB_COLOR4_FMASK 0x28d74 | ||
| 968 | #define CB_COLOR5_FMASK 0x28db0 | ||
| 969 | #define CB_COLOR6_FMASK 0x28dec | ||
| 970 | #define CB_COLOR7_FMASK 0x28e28 | ||
| 971 | |||
| 972 | #define CB_COLOR1_FMASK_SLICE 0x28cc4 | ||
| 973 | #define CB_COLOR2_FMASK_SLICE 0x28d00 | ||
| 974 | #define CB_COLOR3_FMASK_SLICE 0x28d3c | ||
| 975 | #define CB_COLOR4_FMASK_SLICE 0x28d78 | ||
| 976 | #define CB_COLOR5_FMASK_SLICE 0x28db4 | ||
| 977 | #define CB_COLOR6_FMASK_SLICE 0x28df0 | ||
| 978 | #define CB_COLOR7_FMASK_SLICE 0x28e2c | ||
| 979 | |||
| 980 | #define CB_COLOR1_CLEAR_WORD0 0x28cc8 | ||
| 981 | #define CB_COLOR2_CLEAR_WORD0 0x28d04 | ||
| 982 | #define CB_COLOR3_CLEAR_WORD0 0x28d40 | ||
| 983 | #define CB_COLOR4_CLEAR_WORD0 0x28d7c | ||
| 984 | #define CB_COLOR5_CLEAR_WORD0 0x28db8 | ||
| 985 | #define CB_COLOR6_CLEAR_WORD0 0x28df4 | ||
| 986 | #define CB_COLOR7_CLEAR_WORD0 0x28e30 | ||
| 987 | |||
| 988 | #define CB_COLOR1_CLEAR_WORD1 0x28ccc | ||
| 989 | #define CB_COLOR2_CLEAR_WORD1 0x28d08 | ||
| 990 | #define CB_COLOR3_CLEAR_WORD1 0x28d44 | ||
| 991 | #define CB_COLOR4_CLEAR_WORD1 0x28d80 | ||
| 992 | #define CB_COLOR5_CLEAR_WORD1 0x28dbc | ||
| 993 | #define CB_COLOR6_CLEAR_WORD1 0x28df8 | ||
| 994 | #define CB_COLOR7_CLEAR_WORD1 0x28e34 | ||
| 995 | |||
| 996 | #define CB_COLOR1_CLEAR_WORD2 0x28cd0 | ||
| 997 | #define CB_COLOR2_CLEAR_WORD2 0x28d0c | ||
| 998 | #define CB_COLOR3_CLEAR_WORD2 0x28d48 | ||
| 999 | #define CB_COLOR4_CLEAR_WORD2 0x28d84 | ||
| 1000 | #define CB_COLOR5_CLEAR_WORD2 0x28dc0 | ||
| 1001 | #define CB_COLOR6_CLEAR_WORD2 0x28dfc | ||
| 1002 | #define CB_COLOR7_CLEAR_WORD2 0x28e38 | ||
| 1003 | |||
| 1004 | #define CB_COLOR1_CLEAR_WORD3 0x28cd4 | ||
| 1005 | #define CB_COLOR2_CLEAR_WORD3 0x28d10 | ||
| 1006 | #define CB_COLOR3_CLEAR_WORD3 0x28d4c | ||
| 1007 | #define CB_COLOR4_CLEAR_WORD3 0x28d88 | ||
| 1008 | #define CB_COLOR5_CLEAR_WORD3 0x28dc4 | ||
| 1009 | #define CB_COLOR6_CLEAR_WORD3 0x28e00 | ||
| 1010 | #define CB_COLOR7_CLEAR_WORD3 0x28e3c | ||
| 1011 | |||
| 1012 | #define SQ_TEX_RESOURCE_WORD0_0 0x30000 | ||
| 1013 | #define SQ_TEX_RESOURCE_WORD1_0 0x30004 | ||
| 1014 | # define TEX_ARRAY_MODE(x) ((x) << 28) | ||
| 1015 | #define SQ_TEX_RESOURCE_WORD2_0 0x30008 | ||
| 1016 | #define SQ_TEX_RESOURCE_WORD3_0 0x3000C | ||
| 1017 | #define SQ_TEX_RESOURCE_WORD4_0 0x30010 | ||
| 1018 | #define SQ_TEX_RESOURCE_WORD5_0 0x30014 | ||
| 1019 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 | ||
| 1020 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c | ||
| 1021 | |||
| 1022 | |||
| 556 | #endif | 1023 | #endif |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index cc004b05d63e..a89a15ab524d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -162,6 +162,11 @@ void r100_pm_init_profile(struct radeon_device *rdev) | |||
| 162 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; | 162 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; |
| 163 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 163 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 164 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 164 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 165 | /* mid sh */ | ||
| 166 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; | ||
| 167 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; | ||
| 168 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 169 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
| 165 | /* high sh */ | 170 | /* high sh */ |
| 166 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; | 171 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; |
| 167 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 172 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| @@ -172,6 +177,11 @@ void r100_pm_init_profile(struct radeon_device *rdev) | |||
| 172 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 177 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| 173 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 178 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 174 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 179 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 180 | /* mid mh */ | ||
| 181 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; | ||
| 182 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 183 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 184 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
| 175 | /* high mh */ | 185 | /* high mh */ |
| 176 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; | 186 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; |
| 177 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 187 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| @@ -1220,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 1220 | header = radeon_get_ib_value(p, h_idx); | 1230 | header = radeon_get_ib_value(p, h_idx); |
| 1221 | crtc_id = radeon_get_ib_value(p, h_idx + 5); | 1231 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
| 1222 | reg = CP_PACKET0_GET_REG(header); | 1232 | reg = CP_PACKET0_GET_REG(header); |
| 1223 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
| 1224 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 1233 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 1225 | if (!obj) { | 1234 | if (!obj) { |
| 1226 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 1235 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
| @@ -1254,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 1254 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1263 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
| 1255 | } | 1264 | } |
| 1256 | out: | 1265 | out: |
| 1257 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
| 1258 | return r; | 1266 | return r; |
| 1259 | } | 1267 | } |
| 1260 | 1268 | ||
| @@ -1618,6 +1626,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
| 1618 | case RADEON_TXFORMAT_RGB332: | 1626 | case RADEON_TXFORMAT_RGB332: |
| 1619 | case RADEON_TXFORMAT_Y8: | 1627 | case RADEON_TXFORMAT_Y8: |
| 1620 | track->textures[i].cpp = 1; | 1628 | track->textures[i].cpp = 1; |
| 1629 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 1621 | break; | 1630 | break; |
| 1622 | case RADEON_TXFORMAT_AI88: | 1631 | case RADEON_TXFORMAT_AI88: |
| 1623 | case RADEON_TXFORMAT_ARGB1555: | 1632 | case RADEON_TXFORMAT_ARGB1555: |
| @@ -1629,12 +1638,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
| 1629 | case RADEON_TXFORMAT_LDUDV655: | 1638 | case RADEON_TXFORMAT_LDUDV655: |
| 1630 | case RADEON_TXFORMAT_DUDV88: | 1639 | case RADEON_TXFORMAT_DUDV88: |
| 1631 | track->textures[i].cpp = 2; | 1640 | track->textures[i].cpp = 2; |
| 1641 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 1632 | break; | 1642 | break; |
| 1633 | case RADEON_TXFORMAT_ARGB8888: | 1643 | case RADEON_TXFORMAT_ARGB8888: |
| 1634 | case RADEON_TXFORMAT_RGBA8888: | 1644 | case RADEON_TXFORMAT_RGBA8888: |
| 1635 | case RADEON_TXFORMAT_SHADOW32: | 1645 | case RADEON_TXFORMAT_SHADOW32: |
| 1636 | case RADEON_TXFORMAT_LDUDUV8888: | 1646 | case RADEON_TXFORMAT_LDUDUV8888: |
| 1637 | track->textures[i].cpp = 4; | 1647 | track->textures[i].cpp = 4; |
| 1648 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 1638 | break; | 1649 | break; |
| 1639 | case RADEON_TXFORMAT_DXT1: | 1650 | case RADEON_TXFORMAT_DXT1: |
| 1640 | track->textures[i].cpp = 1; | 1651 | track->textures[i].cpp = 1; |
| @@ -2341,6 +2352,7 @@ void r100_mc_init(struct radeon_device *rdev) | |||
| 2341 | if (rdev->flags & RADEON_IS_IGP) | 2352 | if (rdev->flags & RADEON_IS_IGP) |
| 2342 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 2353 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
| 2343 | radeon_vram_location(rdev, &rdev->mc, base); | 2354 | radeon_vram_location(rdev, &rdev->mc, base); |
| 2355 | rdev->mc.gtt_base_align = 0; | ||
| 2344 | if (!(rdev->flags & RADEON_IS_AGP)) | 2356 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 2345 | radeon_gtt_location(rdev, &rdev->mc); | 2357 | radeon_gtt_location(rdev, &rdev->mc); |
| 2346 | radeon_update_bandwidth_info(rdev); | 2358 | radeon_update_bandwidth_info(rdev); |
| @@ -2594,12 +2606,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 2594 | int surf_index = reg * 16; | 2606 | int surf_index = reg * 16; |
| 2595 | int flags = 0; | 2607 | int flags = 0; |
| 2596 | 2608 | ||
| 2597 | /* r100/r200 divide by 16 */ | ||
| 2598 | if (rdev->family < CHIP_R300) | ||
| 2599 | flags = pitch / 16; | ||
| 2600 | else | ||
| 2601 | flags = pitch / 8; | ||
| 2602 | |||
| 2603 | if (rdev->family <= CHIP_RS200) { | 2609 | if (rdev->family <= CHIP_RS200) { |
| 2604 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | 2610 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
| 2605 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | 2611 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
| @@ -2623,6 +2629,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 2623 | if (tiling_flags & RADEON_TILING_SWAP_32BIT) | 2629 | if (tiling_flags & RADEON_TILING_SWAP_32BIT) |
| 2624 | flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; | 2630 | flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; |
| 2625 | 2631 | ||
| 2632 | /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ | ||
| 2633 | if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { | ||
| 2634 | if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) | ||
| 2635 | if (ASIC_IS_RN50(rdev)) | ||
| 2636 | pitch /= 16; | ||
| 2637 | } | ||
| 2638 | |||
| 2639 | /* r100/r200 divide by 16 */ | ||
| 2640 | if (rdev->family < CHIP_R300) | ||
| 2641 | flags |= pitch / 16; | ||
| 2642 | else | ||
| 2643 | flags |= pitch / 8; | ||
| 2644 | |||
| 2645 | |||
| 2626 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); | 2646 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); |
| 2627 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); | 2647 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); |
| 2628 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); | 2648 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); |
| @@ -3137,33 +3157,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) | |||
| 3137 | DRM_ERROR("compress format %d\n", t->compress_format); | 3157 | DRM_ERROR("compress format %d\n", t->compress_format); |
| 3138 | } | 3158 | } |
| 3139 | 3159 | ||
| 3140 | static int r100_cs_track_cube(struct radeon_device *rdev, | ||
| 3141 | struct r100_cs_track *track, unsigned idx) | ||
| 3142 | { | ||
| 3143 | unsigned face, w, h; | ||
| 3144 | struct radeon_bo *cube_robj; | ||
| 3145 | unsigned long size; | ||
| 3146 | |||
| 3147 | for (face = 0; face < 5; face++) { | ||
| 3148 | cube_robj = track->textures[idx].cube_info[face].robj; | ||
| 3149 | w = track->textures[idx].cube_info[face].width; | ||
| 3150 | h = track->textures[idx].cube_info[face].height; | ||
| 3151 | |||
| 3152 | size = w * h; | ||
| 3153 | size *= track->textures[idx].cpp; | ||
| 3154 | |||
| 3155 | size += track->textures[idx].cube_info[face].offset; | ||
| 3156 | |||
| 3157 | if (size > radeon_bo_size(cube_robj)) { | ||
| 3158 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | ||
| 3159 | size, radeon_bo_size(cube_robj)); | ||
| 3160 | r100_cs_track_texture_print(&track->textures[idx]); | ||
| 3161 | return -1; | ||
| 3162 | } | ||
| 3163 | } | ||
| 3164 | return 0; | ||
| 3165 | } | ||
| 3166 | |||
| 3167 | static int r100_track_compress_size(int compress_format, int w, int h) | 3160 | static int r100_track_compress_size(int compress_format, int w, int h) |
| 3168 | { | 3161 | { |
| 3169 | int block_width, block_height, block_bytes; | 3162 | int block_width, block_height, block_bytes; |
| @@ -3194,6 +3187,37 @@ static int r100_track_compress_size(int compress_format, int w, int h) | |||
| 3194 | return sz; | 3187 | return sz; |
| 3195 | } | 3188 | } |
| 3196 | 3189 | ||
| 3190 | static int r100_cs_track_cube(struct radeon_device *rdev, | ||
| 3191 | struct r100_cs_track *track, unsigned idx) | ||
| 3192 | { | ||
| 3193 | unsigned face, w, h; | ||
| 3194 | struct radeon_bo *cube_robj; | ||
| 3195 | unsigned long size; | ||
| 3196 | unsigned compress_format = track->textures[idx].compress_format; | ||
| 3197 | |||
| 3198 | for (face = 0; face < 5; face++) { | ||
| 3199 | cube_robj = track->textures[idx].cube_info[face].robj; | ||
| 3200 | w = track->textures[idx].cube_info[face].width; | ||
| 3201 | h = track->textures[idx].cube_info[face].height; | ||
| 3202 | |||
| 3203 | if (compress_format) { | ||
| 3204 | size = r100_track_compress_size(compress_format, w, h); | ||
| 3205 | } else | ||
| 3206 | size = w * h; | ||
| 3207 | size *= track->textures[idx].cpp; | ||
| 3208 | |||
| 3209 | size += track->textures[idx].cube_info[face].offset; | ||
| 3210 | |||
| 3211 | if (size > radeon_bo_size(cube_robj)) { | ||
| 3212 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | ||
| 3213 | size, radeon_bo_size(cube_robj)); | ||
| 3214 | r100_cs_track_texture_print(&track->textures[idx]); | ||
| 3215 | return -1; | ||
| 3216 | } | ||
| 3217 | } | ||
| 3218 | return 0; | ||
| 3219 | } | ||
| 3220 | |||
| 3197 | static int r100_cs_track_texture_check(struct radeon_device *rdev, | 3221 | static int r100_cs_track_texture_check(struct radeon_device *rdev, |
| 3198 | struct r100_cs_track *track) | 3222 | struct r100_cs_track *track) |
| 3199 | { | 3223 | { |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 85617c311212..0266d72e0a4c 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
| @@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 415 | /* 2D, 3D, CUBE */ | 415 | /* 2D, 3D, CUBE */ |
| 416 | switch (tmp) { | 416 | switch (tmp) { |
| 417 | case 0: | 417 | case 0: |
| 418 | case 3: | ||
| 419 | case 4: | ||
| 418 | case 5: | 420 | case 5: |
| 419 | case 6: | 421 | case 6: |
| 420 | case 7: | 422 | case 7: |
| @@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 450 | case R200_TXFORMAT_RGB332: | 452 | case R200_TXFORMAT_RGB332: |
| 451 | case R200_TXFORMAT_Y8: | 453 | case R200_TXFORMAT_Y8: |
| 452 | track->textures[i].cpp = 1; | 454 | track->textures[i].cpp = 1; |
| 455 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 453 | break; | 456 | break; |
| 454 | case R200_TXFORMAT_AI88: | 457 | case R200_TXFORMAT_AI88: |
| 455 | case R200_TXFORMAT_ARGB1555: | 458 | case R200_TXFORMAT_ARGB1555: |
| @@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 461 | case R200_TXFORMAT_DVDU88: | 464 | case R200_TXFORMAT_DVDU88: |
| 462 | case R200_TXFORMAT_AVYU4444: | 465 | case R200_TXFORMAT_AVYU4444: |
| 463 | track->textures[i].cpp = 2; | 466 | track->textures[i].cpp = 2; |
| 467 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 464 | break; | 468 | break; |
| 465 | case R200_TXFORMAT_ARGB8888: | 469 | case R200_TXFORMAT_ARGB8888: |
| 466 | case R200_TXFORMAT_RGBA8888: | 470 | case R200_TXFORMAT_RGBA8888: |
| @@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 468 | case R200_TXFORMAT_BGR111110: | 472 | case R200_TXFORMAT_BGR111110: |
| 469 | case R200_TXFORMAT_LDVDU8888: | 473 | case R200_TXFORMAT_LDVDU8888: |
| 470 | track->textures[i].cpp = 4; | 474 | track->textures[i].cpp = 4; |
| 475 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 471 | break; | 476 | break; |
| 472 | case R200_TXFORMAT_DXT1: | 477 | case R200_TXFORMAT_DXT1: |
| 473 | track->textures[i].cpp = 1; | 478 | track->textures[i].cpp = 1; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index b2f9efe2897c..19a7ef7ee344 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -481,6 +481,7 @@ void r300_mc_init(struct radeon_device *rdev) | |||
| 481 | if (rdev->flags & RADEON_IS_IGP) | 481 | if (rdev->flags & RADEON_IS_IGP) |
| 482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 482 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
| 483 | radeon_vram_location(rdev, &rdev->mc, base); | 483 | radeon_vram_location(rdev, &rdev->mc, base); |
| 484 | rdev->mc.gtt_base_align = 0; | ||
| 484 | if (!(rdev->flags & RADEON_IS_AGP)) | 485 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 485 | radeon_gtt_location(rdev, &rdev->mc); | 486 | radeon_gtt_location(rdev, &rdev->mc); |
| 486 | radeon_update_bandwidth_info(rdev); | 487 | radeon_update_bandwidth_info(rdev); |
| @@ -881,6 +882,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 881 | case R300_TX_FORMAT_Y4X4: | 882 | case R300_TX_FORMAT_Y4X4: |
| 882 | case R300_TX_FORMAT_Z3Y3X2: | 883 | case R300_TX_FORMAT_Z3Y3X2: |
| 883 | track->textures[i].cpp = 1; | 884 | track->textures[i].cpp = 1; |
| 885 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 884 | break; | 886 | break; |
| 885 | case R300_TX_FORMAT_X16: | 887 | case R300_TX_FORMAT_X16: |
| 886 | case R300_TX_FORMAT_Y8X8: | 888 | case R300_TX_FORMAT_Y8X8: |
| @@ -892,6 +894,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 892 | case R300_TX_FORMAT_B8G8_B8G8: | 894 | case R300_TX_FORMAT_B8G8_B8G8: |
| 893 | case R300_TX_FORMAT_G8R8_G8B8: | 895 | case R300_TX_FORMAT_G8R8_G8B8: |
| 894 | track->textures[i].cpp = 2; | 896 | track->textures[i].cpp = 2; |
| 897 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 895 | break; | 898 | break; |
| 896 | case R300_TX_FORMAT_Y16X16: | 899 | case R300_TX_FORMAT_Y16X16: |
| 897 | case R300_TX_FORMAT_Z11Y11X10: | 900 | case R300_TX_FORMAT_Z11Y11X10: |
| @@ -902,14 +905,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 902 | case R300_TX_FORMAT_FL_I32: | 905 | case R300_TX_FORMAT_FL_I32: |
| 903 | case 0x1e: | 906 | case 0x1e: |
| 904 | track->textures[i].cpp = 4; | 907 | track->textures[i].cpp = 4; |
| 908 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 905 | break; | 909 | break; |
| 906 | case R300_TX_FORMAT_W16Z16Y16X16: | 910 | case R300_TX_FORMAT_W16Z16Y16X16: |
| 907 | case R300_TX_FORMAT_FL_R16G16B16A16: | 911 | case R300_TX_FORMAT_FL_R16G16B16A16: |
| 908 | case R300_TX_FORMAT_FL_I32A32: | 912 | case R300_TX_FORMAT_FL_I32A32: |
| 909 | track->textures[i].cpp = 8; | 913 | track->textures[i].cpp = 8; |
| 914 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 910 | break; | 915 | break; |
| 911 | case R300_TX_FORMAT_FL_R32G32B32A32: | 916 | case R300_TX_FORMAT_FL_R32G32B32A32: |
| 912 | track->textures[i].cpp = 16; | 917 | track->textures[i].cpp = 16; |
| 918 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
| 913 | break; | 919 | break; |
| 914 | case R300_TX_FORMAT_DXT1: | 920 | case R300_TX_FORMAT_DXT1: |
| 915 | track->textures[i].cpp = 1; | 921 | track->textures[i].cpp = 1; |
| @@ -1171,6 +1177,8 @@ int r300_cs_parse(struct radeon_cs_parser *p) | |||
| 1171 | int r; | 1177 | int r; |
| 1172 | 1178 | ||
| 1173 | track = kzalloc(sizeof(*track), GFP_KERNEL); | 1179 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
| 1180 | if (track == NULL) | ||
| 1181 | return -ENOMEM; | ||
| 1174 | r100_cs_track_clear(p->rdev, track); | 1182 | r100_cs_track_clear(p->rdev, track); |
| 1175 | p->track = track; | 1183 | p->track = track; |
| 1176 | do { | 1184 | do { |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4415a5ee5871..e6c89142bb4d 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -45,9 +45,14 @@ void r420_pm_init_profile(struct radeon_device *rdev) | |||
| 45 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | 45 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; |
| 46 | /* low sh */ | 46 | /* low sh */ |
| 47 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; | 47 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; |
| 48 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | 48 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; |
| 49 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 49 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 50 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 50 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 51 | /* mid sh */ | ||
| 52 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; | ||
| 53 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; | ||
| 54 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 55 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
| 51 | /* high sh */ | 56 | /* high sh */ |
| 52 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; | 57 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; |
| 53 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 58 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| @@ -58,6 +63,11 @@ void r420_pm_init_profile(struct radeon_device *rdev) | |||
| 58 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 63 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| 59 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 64 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 60 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 65 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 66 | /* mid mh */ | ||
| 67 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; | ||
| 68 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 69 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 70 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
| 61 | /* high mh */ | 71 | /* high mh */ |
| 62 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; | 72 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; |
| 63 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 73 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 34330df28483..694af7cc23ac 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -125,6 +125,7 @@ void r520_mc_init(struct radeon_device *rdev) | |||
| 125 | r520_vram_get_type(rdev); | 125 | r520_vram_get_type(rdev); |
| 126 | r100_vram_init_sizes(rdev); | 126 | r100_vram_init_sizes(rdev); |
| 127 | radeon_vram_location(rdev, &rdev->mc, 0); | 127 | radeon_vram_location(rdev, &rdev->mc, 0); |
| 128 | rdev->mc.gtt_base_align = 0; | ||
| 128 | if (!(rdev->flags & RADEON_IS_AGP)) | 129 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 129 | radeon_gtt_location(rdev, &rdev->mc); | 130 | radeon_gtt_location(rdev, &rdev->mc); |
| 130 | radeon_update_bandwidth_info(rdev); | 131 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 44e96a2ae25a..e100f69faeec 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -130,9 +130,14 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) | |||
| 130 | break; | 130 | break; |
| 131 | } | 131 | } |
| 132 | } | 132 | } |
| 133 | } else | 133 | } else { |
| 134 | rdev->pm.requested_power_state_index = | 134 | if (rdev->pm.current_power_state_index == 0) |
| 135 | rdev->pm.current_power_state_index - 1; | 135 | rdev->pm.requested_power_state_index = |
| 136 | rdev->pm.num_power_states - 1; | ||
| 137 | else | ||
| 138 | rdev->pm.requested_power_state_index = | ||
| 139 | rdev->pm.current_power_state_index - 1; | ||
| 140 | } | ||
| 136 | } | 141 | } |
| 137 | rdev->pm.requested_clock_mode_index = 0; | 142 | rdev->pm.requested_clock_mode_index = 0; |
| 138 | /* don't use the power state if crtcs are active and no display flag is set */ | 143 | /* don't use the power state if crtcs are active and no display flag is set */ |
| @@ -291,6 +296,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
| 291 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; | 296 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; |
| 292 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 297 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 293 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 298 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 299 | /* mid sh */ | ||
| 300 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; | ||
| 301 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; | ||
| 302 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 303 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
| 294 | /* high sh */ | 304 | /* high sh */ |
| 295 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; | 305 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; |
| 296 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; | 306 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; |
| @@ -301,6 +311,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
| 301 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; | 311 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; |
| 302 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 312 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 303 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 313 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 314 | /* mid mh */ | ||
| 315 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; | ||
| 316 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; | ||
| 317 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 318 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
| 304 | /* high mh */ | 319 | /* high mh */ |
| 305 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; | 320 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; |
| 306 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; | 321 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; |
| @@ -317,6 +332,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
| 317 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | 332 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; |
| 318 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 333 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 319 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 334 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 335 | /* mid sh */ | ||
| 336 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; | ||
| 337 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; | ||
| 338 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 339 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
| 320 | /* high sh */ | 340 | /* high sh */ |
| 321 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; | 341 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; |
| 322 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; | 342 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; |
| @@ -327,6 +347,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
| 327 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; | 347 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; |
| 328 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 348 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 329 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 349 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 350 | /* mid mh */ | ||
| 351 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; | ||
| 352 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; | ||
| 353 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 354 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
| 330 | /* high mh */ | 355 | /* high mh */ |
| 331 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; | 356 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; |
| 332 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; | 357 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; |
| @@ -343,6 +368,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
| 343 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; | 368 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; |
| 344 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 369 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 345 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 370 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 371 | /* mid sh */ | ||
| 372 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; | ||
| 373 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; | ||
| 374 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 375 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
| 346 | /* high sh */ | 376 | /* high sh */ |
| 347 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; | 377 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; |
| 348 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; | 378 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; |
| @@ -353,6 +383,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
| 353 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; | 383 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; |
| 354 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 384 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 355 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 385 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 386 | /* mid mh */ | ||
| 387 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; | ||
| 388 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; | ||
| 389 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 390 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
| 356 | /* high mh */ | 391 | /* high mh */ |
| 357 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; | 392 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; |
| 358 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; | 393 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; |
| @@ -375,6 +410,11 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 375 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 410 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| 376 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 411 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 377 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 412 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 413 | /* mid sh */ | ||
| 414 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 415 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 416 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 417 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
| 378 | /* high sh */ | 418 | /* high sh */ |
| 379 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | 419 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
| 380 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 420 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| @@ -385,6 +425,11 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 385 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 425 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| 386 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 426 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 387 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 427 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 428 | /* mid mh */ | ||
| 429 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
| 430 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
| 431 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 432 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
| 388 | /* high mh */ | 433 | /* high mh */ |
| 389 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | 434 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
| 390 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | 435 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
| @@ -401,7 +446,12 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 401 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; | 446 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; |
| 402 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | 447 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; |
| 403 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 448 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 404 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; | 449 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 450 | /* mid sh */ | ||
| 451 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; | ||
| 452 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; | ||
| 453 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 454 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | ||
| 405 | /* high sh */ | 455 | /* high sh */ |
| 406 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; | 456 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; |
| 407 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; | 457 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; |
| @@ -411,7 +461,12 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 411 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; | 461 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; |
| 412 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; | 462 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; |
| 413 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 463 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 414 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1; | 464 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 465 | /* low mh */ | ||
| 466 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; | ||
| 467 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; | ||
| 468 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 469 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | ||
| 415 | /* high mh */ | 470 | /* high mh */ |
| 416 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; | 471 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; |
| 417 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; | 472 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; |
| @@ -430,14 +485,30 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 430 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | 485 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = |
| 431 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | 486 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); |
| 432 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 487 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 433 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; | 488 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 434 | } else { | 489 | } else { |
| 435 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = | 490 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = |
| 436 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | 491 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
| 437 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | 492 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = |
| 438 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | 493 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
| 439 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 494 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
| 440 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1; | 495 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
| 496 | } | ||
| 497 | /* mid sh */ | ||
| 498 | if (rdev->flags & RADEON_IS_MOBILITY) { | ||
| 499 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = | ||
| 500 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
| 501 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = | ||
| 502 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
| 503 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 504 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | ||
| 505 | } else { | ||
| 506 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = | ||
| 507 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
| 508 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = | ||
| 509 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
| 510 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
| 511 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | ||
| 441 | } | 512 | } |
| 442 | /* high sh */ | 513 | /* high sh */ |
| 443 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = | 514 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = |
| @@ -453,14 +524,30 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 453 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | 524 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = |
| 454 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | 525 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); |
| 455 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 526 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 456 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2; | 527 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 457 | } else { | 528 | } else { |
| 458 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = | 529 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = |
| 459 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | 530 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
| 460 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | 531 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = |
| 461 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | 532 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
| 462 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 533 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
| 463 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1; | 534 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
| 535 | } | ||
| 536 | /* mid mh */ | ||
| 537 | if (rdev->flags & RADEON_IS_MOBILITY) { | ||
| 538 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = | ||
| 539 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | ||
| 540 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = | ||
| 541 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | ||
| 542 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 543 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | ||
| 544 | } else { | ||
| 545 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = | ||
| 546 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
| 547 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = | ||
| 548 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
| 549 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
| 550 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | ||
| 464 | } | 551 | } |
| 465 | /* high mh */ | 552 | /* high mh */ |
| 466 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = | 553 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = |
| @@ -475,7 +562,18 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 475 | 562 | ||
| 476 | void r600_pm_misc(struct radeon_device *rdev) | 563 | void r600_pm_misc(struct radeon_device *rdev) |
| 477 | { | 564 | { |
| 565 | int req_ps_idx = rdev->pm.requested_power_state_index; | ||
| 566 | int req_cm_idx = rdev->pm.requested_clock_mode_index; | ||
| 567 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | ||
| 568 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | ||
| 478 | 569 | ||
| 570 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | ||
| 571 | if (voltage->voltage != rdev->pm.current_vddc) { | ||
| 572 | radeon_atom_set_voltage(rdev, voltage->voltage); | ||
| 573 | rdev->pm.current_vddc = voltage->voltage; | ||
| 574 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | ||
| 575 | } | ||
| 576 | } | ||
| 479 | } | 577 | } |
| 480 | 578 | ||
| 481 | bool r600_gui_idle(struct radeon_device *rdev) | 579 | bool r600_gui_idle(struct radeon_device *rdev) |
| @@ -1004,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev) | |||
| 1004 | WREG32(MC_VM_FB_LOCATION, tmp); | 1102 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 1005 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 1103 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 1006 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 1104 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 1007 | WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); | 1105 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
| 1008 | if (rdev->flags & RADEON_IS_AGP) { | 1106 | if (rdev->flags & RADEON_IS_AGP) { |
| 1009 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); | 1107 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
| 1010 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); | 1108 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
| @@ -1081,6 +1179,7 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
| 1081 | if (rdev->flags & RADEON_IS_IGP) | 1179 | if (rdev->flags & RADEON_IS_IGP) |
| 1082 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | 1180 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; |
| 1083 | radeon_vram_location(rdev, &rdev->mc, base); | 1181 | radeon_vram_location(rdev, &rdev->mc, base); |
| 1182 | rdev->mc.gtt_base_align = 0; | ||
| 1084 | radeon_gtt_location(rdev, mc); | 1183 | radeon_gtt_location(rdev, mc); |
| 1085 | } | 1184 | } |
| 1086 | } | 1185 | } |
| @@ -1126,8 +1225,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 1126 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1225 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
| 1127 | r600_vram_gtt_location(rdev, &rdev->mc); | 1226 | r600_vram_gtt_location(rdev, &rdev->mc); |
| 1128 | 1227 | ||
| 1129 | if (rdev->flags & RADEON_IS_IGP) | 1228 | if (rdev->flags & RADEON_IS_IGP) { |
| 1229 | rs690_pm_info(rdev); | ||
| 1130 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 1230 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 1231 | } | ||
| 1131 | radeon_update_bandwidth_info(rdev); | 1232 | radeon_update_bandwidth_info(rdev); |
| 1132 | return 0; | 1233 | return 0; |
| 1133 | } | 1234 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index f4fb88ece2bb..ca5c29f70779 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
| @@ -538,9 +538,12 @@ int | |||
| 538 | r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) | 538 | r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) |
| 539 | { | 539 | { |
| 540 | drm_radeon_private_t *dev_priv = dev->dev_private; | 540 | drm_radeon_private_t *dev_priv = dev->dev_private; |
| 541 | int ret; | ||
| 541 | DRM_DEBUG("\n"); | 542 | DRM_DEBUG("\n"); |
| 542 | 543 | ||
| 543 | r600_nomm_get_vb(dev); | 544 | ret = r600_nomm_get_vb(dev); |
| 545 | if (ret) | ||
| 546 | return ret; | ||
| 544 | 547 | ||
| 545 | dev_priv->blit_vb->file_priv = file_priv; | 548 | dev_priv->blit_vb->file_priv = file_priv; |
| 546 | 549 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index c39c1bc13016..144c32d37136 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -585,7 +585,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 585 | header = radeon_get_ib_value(p, h_idx); | 585 | header = radeon_get_ib_value(p, h_idx); |
| 586 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 586 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
| 587 | reg = CP_PACKET0_GET_REG(header); | 587 | reg = CP_PACKET0_GET_REG(header); |
| 588 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 588 | |
| 589 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 589 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 590 | if (!obj) { | 590 | if (!obj) { |
| 591 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 591 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
| @@ -620,7 +620,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 620 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; | 620 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; |
| 621 | } | 621 | } |
| 622 | out: | 622 | out: |
| 623 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
| 624 | return r; | 623 | return r; |
| 625 | } | 624 | } |
| 626 | 625 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 669feb689bfc..2f94dc66c183 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -176,6 +176,8 @@ void radeon_pm_suspend(struct radeon_device *rdev); | |||
| 176 | void radeon_pm_resume(struct radeon_device *rdev); | 176 | void radeon_pm_resume(struct radeon_device *rdev); |
| 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
| 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
| 179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | ||
| 180 | void rs690_pm_info(struct radeon_device *rdev); | ||
| 179 | 181 | ||
| 180 | /* | 182 | /* |
| 181 | * Fences. | 183 | * Fences. |
| @@ -349,6 +351,7 @@ struct radeon_mc { | |||
| 349 | int vram_mtrr; | 351 | int vram_mtrr; |
| 350 | bool vram_is_ddr; | 352 | bool vram_is_ddr; |
| 351 | bool igp_sideport_enabled; | 353 | bool igp_sideport_enabled; |
| 354 | u64 gtt_base_align; | ||
| 352 | }; | 355 | }; |
| 353 | 356 | ||
| 354 | bool radeon_combios_sideport_present(struct radeon_device *rdev); | 357 | bool radeon_combios_sideport_present(struct radeon_device *rdev); |
| @@ -618,7 +621,8 @@ enum radeon_dynpm_state { | |||
| 618 | DYNPM_STATE_DISABLED, | 621 | DYNPM_STATE_DISABLED, |
| 619 | DYNPM_STATE_MINIMUM, | 622 | DYNPM_STATE_MINIMUM, |
| 620 | DYNPM_STATE_PAUSED, | 623 | DYNPM_STATE_PAUSED, |
| 621 | DYNPM_STATE_ACTIVE | 624 | DYNPM_STATE_ACTIVE, |
| 625 | DYNPM_STATE_SUSPENDED, | ||
| 622 | }; | 626 | }; |
| 623 | enum radeon_dynpm_action { | 627 | enum radeon_dynpm_action { |
| 624 | DYNPM_ACTION_NONE, | 628 | DYNPM_ACTION_NONE, |
| @@ -647,15 +651,18 @@ enum radeon_pm_profile_type { | |||
| 647 | PM_PROFILE_DEFAULT, | 651 | PM_PROFILE_DEFAULT, |
| 648 | PM_PROFILE_AUTO, | 652 | PM_PROFILE_AUTO, |
| 649 | PM_PROFILE_LOW, | 653 | PM_PROFILE_LOW, |
| 654 | PM_PROFILE_MID, | ||
| 650 | PM_PROFILE_HIGH, | 655 | PM_PROFILE_HIGH, |
| 651 | }; | 656 | }; |
| 652 | 657 | ||
| 653 | #define PM_PROFILE_DEFAULT_IDX 0 | 658 | #define PM_PROFILE_DEFAULT_IDX 0 |
| 654 | #define PM_PROFILE_LOW_SH_IDX 1 | 659 | #define PM_PROFILE_LOW_SH_IDX 1 |
| 655 | #define PM_PROFILE_HIGH_SH_IDX 2 | 660 | #define PM_PROFILE_MID_SH_IDX 2 |
| 656 | #define PM_PROFILE_LOW_MH_IDX 3 | 661 | #define PM_PROFILE_HIGH_SH_IDX 3 |
| 657 | #define PM_PROFILE_HIGH_MH_IDX 4 | 662 | #define PM_PROFILE_LOW_MH_IDX 4 |
| 658 | #define PM_PROFILE_MAX 5 | 663 | #define PM_PROFILE_MID_MH_IDX 5 |
| 664 | #define PM_PROFILE_HIGH_MH_IDX 6 | ||
| 665 | #define PM_PROFILE_MAX 7 | ||
| 659 | 666 | ||
| 660 | struct radeon_pm_profile { | 667 | struct radeon_pm_profile { |
| 661 | int dpms_off_ps_idx; | 668 | int dpms_off_ps_idx; |
| @@ -744,6 +751,7 @@ struct radeon_pm { | |||
| 744 | int default_power_state_index; | 751 | int default_power_state_index; |
| 745 | u32 current_sclk; | 752 | u32 current_sclk; |
| 746 | u32 current_mclk; | 753 | u32 current_mclk; |
| 754 | u32 current_vddc; | ||
| 747 | struct radeon_i2c_chan *i2c_bus; | 755 | struct radeon_i2c_chan *i2c_bus; |
| 748 | /* selected pm method */ | 756 | /* selected pm method */ |
| 749 | enum radeon_pm_method pm_method; | 757 | enum radeon_pm_method pm_method; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e57df08d4aeb..646f96f97c77 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = { | |||
| 724 | .irq_set = &evergreen_irq_set, | 724 | .irq_set = &evergreen_irq_set, |
| 725 | .irq_process = &evergreen_irq_process, | 725 | .irq_process = &evergreen_irq_process, |
| 726 | .get_vblank_counter = &evergreen_get_vblank_counter, | 726 | .get_vblank_counter = &evergreen_get_vblank_counter, |
| 727 | .fence_ring_emit = NULL, | 727 | .fence_ring_emit = &r600_fence_ring_emit, |
| 728 | .cs_parse = NULL, | 728 | .cs_parse = &evergreen_cs_parse, |
| 729 | .copy_blit = NULL, | 729 | .copy_blit = NULL, |
| 730 | .copy_dma = NULL, | 730 | .copy_dma = NULL, |
| 731 | .copy = NULL, | 731 | .copy = NULL, |
| @@ -780,6 +780,13 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 780 | case CHIP_R423: | 780 | case CHIP_R423: |
| 781 | case CHIP_RV410: | 781 | case CHIP_RV410: |
| 782 | rdev->asic = &r420_asic; | 782 | rdev->asic = &r420_asic; |
| 783 | /* handle macs */ | ||
| 784 | if (rdev->bios == NULL) { | ||
| 785 | rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock; | ||
| 786 | rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock; | ||
| 787 | rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock; | ||
| 788 | rdev->asic->set_memory_clock = NULL; | ||
| 789 | } | ||
| 783 | break; | 790 | break; |
| 784 | case CHIP_RS400: | 791 | case CHIP_RS400: |
| 785 | case CHIP_RS480: | 792 | case CHIP_RS480: |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5c40a3dfaca2..c0bbaa64157a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev, | |||
| 314 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); | 314 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 315 | int evergreen_irq_set(struct radeon_device *rdev); | 315 | int evergreen_irq_set(struct radeon_device *rdev); |
| 316 | int evergreen_irq_process(struct radeon_device *rdev); | 316 | int evergreen_irq_process(struct radeon_device *rdev); |
| 317 | extern int evergreen_cs_parse(struct radeon_cs_parser *p); | ||
| 317 | extern void evergreen_pm_misc(struct radeon_device *rdev); | 318 | extern void evergreen_pm_misc(struct radeon_device *rdev); |
| 318 | extern void evergreen_pm_prepare(struct radeon_device *rdev); | 319 | extern void evergreen_pm_prepare(struct radeon_device *rdev); |
| 319 | extern void evergreen_pm_finish(struct radeon_device *rdev); | 320 | extern void evergreen_pm_finish(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 24ea683f7cf5..10673ae59cfa 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -280,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
| 280 | } | 280 | } |
| 281 | } | 281 | } |
| 282 | 282 | ||
| 283 | /* ASUS HD 3600 board lists the DVI port as HDMI */ | ||
| 284 | if ((dev->pdev->device == 0x9598) && | ||
| 285 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
| 286 | (dev->pdev->subsystem_device == 0x01e4)) { | ||
| 287 | if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { | ||
| 288 | *connector_type = DRM_MODE_CONNECTOR_DVII; | ||
| 289 | } | ||
| 290 | } | ||
| 291 | |||
| 283 | /* ASUS HD 3450 board lists the DVI port as HDMI */ | 292 | /* ASUS HD 3450 board lists the DVI port as HDMI */ |
| 284 | if ((dev->pdev->device == 0x95C5) && | 293 | if ((dev->pdev->device == 0x95C5) && |
| 285 | (dev->pdev->subsystem_vendor == 0x1043) && | 294 | (dev->pdev->subsystem_vendor == 0x1043) && |
| @@ -1029,8 +1038,15 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
| 1029 | data_offset); | 1038 | data_offset); |
| 1030 | switch (crev) { | 1039 | switch (crev) { |
| 1031 | case 1: | 1040 | case 1: |
| 1032 | if (igp_info->info.ucMemoryType & 0xf0) | 1041 | /* AMD IGPS */ |
| 1033 | return true; | 1042 | if ((rdev->family == CHIP_RS690) || |
| 1043 | (rdev->family == CHIP_RS740)) { | ||
| 1044 | if (igp_info->info.ulBootUpMemoryClock) | ||
| 1045 | return true; | ||
| 1046 | } else { | ||
| 1047 | if (igp_info->info.ucMemoryType & 0xf0) | ||
| 1048 | return true; | ||
| 1049 | } | ||
| 1034 | break; | 1050 | break; |
| 1035 | case 2: | 1051 | case 2: |
| 1036 | if (igp_info->info_2.ucMemoryType & 0x0f) | 1052 | if (igp_info->info_2.ucMemoryType & 0x0f) |
| @@ -1538,7 +1554,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1538 | rdev->pm.power_state[state_index].pcie_lanes = | 1554 | rdev->pm.power_state[state_index].pcie_lanes = |
| 1539 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; | 1555 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; |
| 1540 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); | 1556 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); |
| 1541 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 1557 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
| 1558 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
| 1542 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1559 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
| 1543 | VOLTAGE_GPIO; | 1560 | VOLTAGE_GPIO; |
| 1544 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 1561 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
| @@ -1605,7 +1622,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1605 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; | 1622 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; |
| 1606 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); | 1623 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); |
| 1607 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); | 1624 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); |
| 1608 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 1625 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
| 1626 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
| 1609 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1627 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
| 1610 | VOLTAGE_GPIO; | 1628 | VOLTAGE_GPIO; |
| 1611 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 1629 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
| @@ -1679,7 +1697,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1679 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; | 1697 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; |
| 1680 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); | 1698 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); |
| 1681 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); | 1699 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); |
| 1682 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 1700 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
| 1701 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
| 1683 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1702 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
| 1684 | VOLTAGE_GPIO; | 1703 | VOLTAGE_GPIO; |
| 1685 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 1704 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
| @@ -1755,9 +1774,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1755 | rdev->pm.power_state[state_index].misc2 = 0; | 1774 | rdev->pm.power_state[state_index].misc2 = 0; |
| 1756 | } | 1775 | } |
| 1757 | } else { | 1776 | } else { |
| 1777 | int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | ||
| 1778 | uint8_t fw_frev, fw_crev; | ||
| 1779 | uint16_t fw_data_offset, vddc = 0; | ||
| 1780 | union firmware_info *firmware_info; | ||
| 1781 | ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; | ||
| 1782 | |||
| 1783 | if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, | ||
| 1784 | &fw_frev, &fw_crev, &fw_data_offset)) { | ||
| 1785 | firmware_info = | ||
| 1786 | (union firmware_info *)(mode_info->atom_context->bios + | ||
| 1787 | fw_data_offset); | ||
| 1788 | vddc = firmware_info->info_14.usBootUpVDDCVoltage; | ||
| 1789 | } | ||
| 1790 | |||
| 1758 | /* add the i2c bus for thermal/fan chip */ | 1791 | /* add the i2c bus for thermal/fan chip */ |
| 1759 | /* no support for internal controller yet */ | 1792 | /* no support for internal controller yet */ |
| 1760 | ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; | ||
| 1761 | if (controller->ucType > 0) { | 1793 | if (controller->ucType > 0) { |
| 1762 | if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || | 1794 | if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || |
| 1763 | (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || | 1795 | (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || |
| @@ -1817,10 +1849,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1817 | /* skip invalid modes */ | 1849 | /* skip invalid modes */ |
| 1818 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) | 1850 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) |
| 1819 | continue; | 1851 | continue; |
| 1820 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 1852 | /* voltage works differently on IGPs */ |
| 1821 | VOLTAGE_SW; | ||
| 1822 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | ||
| 1823 | clock_info->usVDDC; | ||
| 1824 | mode_index++; | 1853 | mode_index++; |
| 1825 | } else if (ASIC_IS_DCE4(rdev)) { | 1854 | } else if (ASIC_IS_DCE4(rdev)) { |
| 1826 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = | 1855 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = |
| @@ -1904,6 +1933,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1904 | rdev->pm.default_power_state_index = state_index; | 1933 | rdev->pm.default_power_state_index = state_index; |
| 1905 | rdev->pm.power_state[state_index].default_clock_mode = | 1934 | rdev->pm.power_state[state_index].default_clock_mode = |
| 1906 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | 1935 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
| 1936 | /* patch the table values with the default slck/mclk from firmware info */ | ||
| 1937 | for (j = 0; j < mode_index; j++) { | ||
| 1938 | rdev->pm.power_state[state_index].clock_info[j].mclk = | ||
| 1939 | rdev->clock.default_mclk; | ||
| 1940 | rdev->pm.power_state[state_index].clock_info[j].sclk = | ||
| 1941 | rdev->clock.default_sclk; | ||
| 1942 | if (vddc) | ||
| 1943 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = | ||
| 1944 | vddc; | ||
| 1945 | } | ||
| 1907 | } | 1946 | } |
| 1908 | state_index++; | 1947 | state_index++; |
| 1909 | } | 1948 | } |
| @@ -1943,6 +1982,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1943 | 1982 | ||
| 1944 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 1983 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
| 1945 | rdev->pm.current_clock_mode_index = 0; | 1984 | rdev->pm.current_clock_mode_index = 0; |
| 1985 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | ||
| 1946 | } | 1986 | } |
| 1947 | 1987 | ||
| 1948 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) | 1988 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) |
| @@ -1998,6 +2038,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, | |||
| 1998 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2038 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| 1999 | } | 2039 | } |
| 2000 | 2040 | ||
| 2041 | union set_voltage { | ||
| 2042 | struct _SET_VOLTAGE_PS_ALLOCATION alloc; | ||
| 2043 | struct _SET_VOLTAGE_PARAMETERS v1; | ||
| 2044 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; | ||
| 2045 | }; | ||
| 2046 | |||
| 2047 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) | ||
| 2048 | { | ||
| 2049 | union set_voltage args; | ||
| 2050 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
| 2051 | u8 frev, crev, volt_index = level; | ||
| 2052 | |||
| 2053 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
| 2054 | return; | ||
| 2055 | |||
| 2056 | switch (crev) { | ||
| 2057 | case 1: | ||
| 2058 | args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | ||
| 2059 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | ||
| 2060 | args.v1.ucVoltageIndex = volt_index; | ||
| 2061 | break; | ||
| 2062 | case 2: | ||
| 2063 | args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | ||
| 2064 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | ||
| 2065 | args.v2.usVoltageLevel = cpu_to_le16(level); | ||
| 2066 | break; | ||
| 2067 | default: | ||
| 2068 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 2069 | return; | ||
| 2070 | } | ||
| 2071 | |||
| 2072 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 2073 | } | ||
| 2074 | |||
| 2075 | |||
| 2076 | |||
| 2001 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | 2077 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) |
| 2002 | { | 2078 | { |
| 2003 | struct radeon_device *rdev = dev->dev_private; | 2079 | struct radeon_device *rdev = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index fbba938f8048..2c9213739999 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -48,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) | |||
| 48 | resource_size_t vram_base; | 48 | resource_size_t vram_base; |
| 49 | resource_size_t size = 256 * 1024; /* ??? */ | 49 | resource_size_t size = 256 * 1024; /* ??? */ |
| 50 | 50 | ||
| 51 | if (!(rdev->flags & RADEON_IS_IGP)) | ||
| 52 | if (!radeon_card_posted(rdev)) | ||
| 53 | return false; | ||
| 54 | |||
| 51 | rdev->bios = NULL; | 55 | rdev->bios = NULL; |
| 52 | vram_base = drm_get_resource_start(rdev->ddev, 0); | 56 | vram_base = drm_get_resource_start(rdev->ddev, 0); |
| 53 | bios = ioremap(vram_base, size); | 57 | bios = ioremap(vram_base, size); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 7b5e10d3e9c9..2417d7b06fdb 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -1411,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1411 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; | 1411 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; |
| 1412 | } else | 1412 | } else |
| 1413 | #endif /* CONFIG_PPC_PMAC */ | 1413 | #endif /* CONFIG_PPC_PMAC */ |
| 1414 | #ifdef CONFIG_PPC64 | ||
| 1415 | if (ASIC_IS_RN50(rdev)) | ||
| 1416 | rdev->mode_info.connector_table = CT_RN50_POWER; | ||
| 1417 | else | ||
| 1418 | #endif | ||
| 1414 | rdev->mode_info.connector_table = CT_GENERIC; | 1419 | rdev->mode_info.connector_table = CT_GENERIC; |
| 1415 | } | 1420 | } |
| 1416 | 1421 | ||
| @@ -1853,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1853 | CONNECTOR_OBJECT_ID_SVIDEO, | 1858 | CONNECTOR_OBJECT_ID_SVIDEO, |
| 1854 | &hpd); | 1859 | &hpd); |
| 1855 | break; | 1860 | break; |
| 1861 | case CT_RN50_POWER: | ||
| 1862 | DRM_INFO("Connector Table: %d (rn50-power)\n", | ||
| 1863 | rdev->mode_info.connector_table); | ||
| 1864 | /* VGA - primary dac */ | ||
| 1865 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); | ||
| 1866 | hpd.hpd = RADEON_HPD_NONE; | ||
| 1867 | radeon_add_legacy_encoder(dev, | ||
| 1868 | radeon_get_encoder_id(dev, | ||
| 1869 | ATOM_DEVICE_CRT1_SUPPORT, | ||
| 1870 | 1), | ||
| 1871 | ATOM_DEVICE_CRT1_SUPPORT); | ||
| 1872 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, | ||
| 1873 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | ||
| 1874 | CONNECTOR_OBJECT_ID_VGA, | ||
| 1875 | &hpd); | ||
| 1876 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); | ||
| 1877 | hpd.hpd = RADEON_HPD_NONE; | ||
| 1878 | radeon_add_legacy_encoder(dev, | ||
| 1879 | radeon_get_encoder_id(dev, | ||
| 1880 | ATOM_DEVICE_CRT2_SUPPORT, | ||
| 1881 | 2), | ||
| 1882 | ATOM_DEVICE_CRT2_SUPPORT); | ||
| 1883 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | ||
| 1884 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | ||
| 1885 | CONNECTOR_OBJECT_ID_VGA, | ||
| 1886 | &hpd); | ||
| 1887 | break; | ||
| 1856 | default: | 1888 | default: |
| 1857 | DRM_INFO("Connector table: %d (invalid)\n", | 1889 | DRM_INFO("Connector table: %d (invalid)\n", |
| 1858 | rdev->mode_info.connector_table); | 1890 | rdev->mode_info.connector_table); |
| @@ -1906,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, | |||
| 1906 | return false; | 1938 | return false; |
| 1907 | } | 1939 | } |
| 1908 | 1940 | ||
| 1909 | /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */ | ||
| 1910 | if (dev->pdev->device == 0x5159 && | ||
| 1911 | dev->pdev->subsystem_vendor == 0x1002 && | ||
| 1912 | dev->pdev->subsystem_device == 0x013a) { | ||
| 1913 | if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) | ||
| 1914 | *legacy_connector = CONNECTOR_CRT_LEGACY; | ||
| 1915 | |||
| 1916 | } | ||
| 1917 | |||
| 1918 | /* X300 card with extra non-existent DVI port */ | 1941 | /* X300 card with extra non-existent DVI port */ |
| 1919 | if (dev->pdev->device == 0x5B60 && | 1942 | if (dev->pdev->device == 0x5B60 && |
| 1920 | dev->pdev->subsystem_vendor == 0x17af && | 1943 | dev->pdev->subsystem_vendor == 0x17af && |
| @@ -2026,6 +2049,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2026 | combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); | 2049 | combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); |
| 2027 | break; | 2050 | break; |
| 2028 | default: | 2051 | default: |
| 2052 | ddc_i2c.valid = false; | ||
| 2029 | break; | 2053 | break; |
| 2030 | } | 2054 | } |
| 2031 | 2055 | ||
| @@ -2339,6 +2363,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2339 | if (RBIOS8(tv_info + 6) == 'T') { | 2363 | if (RBIOS8(tv_info + 6) == 'T') { |
| 2340 | if (radeon_apply_legacy_tv_quirks(dev)) { | 2364 | if (radeon_apply_legacy_tv_quirks(dev)) { |
| 2341 | hpd.hpd = RADEON_HPD_NONE; | 2365 | hpd.hpd = RADEON_HPD_NONE; |
| 2366 | ddc_i2c.valid = false; | ||
| 2342 | radeon_add_legacy_encoder(dev, | 2367 | radeon_add_legacy_encoder(dev, |
| 2343 | radeon_get_encoder_id | 2368 | radeon_get_encoder_id |
| 2344 | (dev, | 2369 | (dev, |
| @@ -2454,7 +2479,12 @@ default_mode: | |||
| 2454 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | 2479 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; |
| 2455 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2480 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; |
| 2456 | rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; | 2481 | rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; |
| 2457 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2482 | if ((state_index > 0) && |
| 2483 | (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO)) | ||
| 2484 | rdev->pm.power_state[state_index].clock_info[0].voltage = | ||
| 2485 | rdev->pm.power_state[0].clock_info[0].voltage; | ||
| 2486 | else | ||
| 2487 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | ||
| 2458 | rdev->pm.power_state[state_index].pcie_lanes = 16; | 2488 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
| 2459 | rdev->pm.power_state[state_index].flags = 0; | 2489 | rdev->pm.power_state[state_index].flags = 0; |
| 2460 | rdev->pm.default_power_state_index = state_index; | 2490 | rdev->pm.default_power_state_index = state_index; |
| @@ -3012,6 +3042,22 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
| 3012 | combios_write_ram_size(dev); | 3042 | combios_write_ram_size(dev); |
| 3013 | } | 3043 | } |
| 3014 | 3044 | ||
| 3045 | /* quirk for rs4xx HP nx6125 laptop to make it resume | ||
| 3046 | * - it hangs on resume inside the dynclk 1 table. | ||
| 3047 | */ | ||
| 3048 | if (rdev->family == CHIP_RS480 && | ||
| 3049 | rdev->pdev->subsystem_vendor == 0x103c && | ||
| 3050 | rdev->pdev->subsystem_device == 0x308b) | ||
| 3051 | return; | ||
| 3052 | |||
| 3053 | /* quirk for rs4xx HP dv5000 laptop to make it resume | ||
| 3054 | * - it hangs on resume inside the dynclk 1 table. | ||
| 3055 | */ | ||
| 3056 | if (rdev->family == CHIP_RS480 && | ||
| 3057 | rdev->pdev->subsystem_vendor == 0x103c && | ||
| 3058 | rdev->pdev->subsystem_device == 0x30a4) | ||
| 3059 | return; | ||
| 3060 | |||
| 3015 | /* DYN CLK 1 */ | 3061 | /* DYN CLK 1 */ |
| 3016 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3062 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
| 3017 | if (table) | 3063 | if (table) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 0c7ccc6961a3..adccbc2c202c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -771,30 +771,27 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
| 771 | } else | 771 | } else |
| 772 | ret = connector_status_connected; | 772 | ret = connector_status_connected; |
| 773 | 773 | ||
| 774 | /* multiple connectors on the same encoder with the same ddc line | 774 | /* This gets complicated. We have boards with VGA + HDMI with a |
| 775 | * This tends to be HDMI and DVI on the same encoder with the | 775 | * shared DDC line and we have boards with DVI-D + HDMI with a shared |
| 776 | * same ddc line. If the edid says HDMI, consider the HDMI port | 776 | * DDC line. The latter is more complex because with DVI<->HDMI adapters |
| 777 | * connected and the DVI port disconnected. If the edid doesn't | 777 | * you don't really know what's connected to which port as both are digital. |
| 778 | * say HDMI, vice versa. | ||
| 779 | */ | 778 | */ |
| 780 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { | 779 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
| 781 | struct drm_device *dev = connector->dev; | 780 | struct drm_device *dev = connector->dev; |
| 781 | struct radeon_device *rdev = dev->dev_private; | ||
| 782 | struct drm_connector *list_connector; | 782 | struct drm_connector *list_connector; |
| 783 | struct radeon_connector *list_radeon_connector; | 783 | struct radeon_connector *list_radeon_connector; |
| 784 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | 784 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { |
| 785 | if (connector == list_connector) | 785 | if (connector == list_connector) |
| 786 | continue; | 786 | continue; |
| 787 | list_radeon_connector = to_radeon_connector(list_connector); | 787 | list_radeon_connector = to_radeon_connector(list_connector); |
| 788 | if (radeon_connector->devices == list_radeon_connector->devices) { | 788 | if (list_radeon_connector->shared_ddc && |
| 789 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 789 | (list_radeon_connector->ddc_bus->rec.i2c_id == |
| 790 | if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { | 790 | radeon_connector->ddc_bus->rec.i2c_id)) { |
| 791 | kfree(radeon_connector->edid); | 791 | /* cases where both connectors are digital */ |
| 792 | radeon_connector->edid = NULL; | 792 | if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { |
| 793 | ret = connector_status_disconnected; | 793 | /* hpd is our only option in this case */ |
| 794 | } | 794 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
| 795 | } else { | ||
| 796 | if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) || | ||
| 797 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) { | ||
| 798 | kfree(radeon_connector->edid); | 795 | kfree(radeon_connector->edid); |
| 799 | radeon_connector->edid = NULL; | 796 | radeon_connector->edid = NULL; |
| 800 | ret = connector_status_disconnected; | 797 | ret = connector_status_disconnected; |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index b7023fff89eb..4eb67c0e0996 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
| @@ -194,7 +194,7 @@ unpin: | |||
| 194 | fail: | 194 | fail: |
| 195 | drm_gem_object_unreference_unlocked(obj); | 195 | drm_gem_object_unreference_unlocked(obj); |
| 196 | 196 | ||
| 197 | return 0; | 197 | return ret; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | int radeon_crtc_cursor_move(struct drm_crtc *crtc, | 200 | int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index fdc3fdf78acb..dd279da90546 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -226,20 +226,20 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
| 226 | { | 226 | { |
| 227 | u64 size_af, size_bf; | 227 | u64 size_af, size_bf; |
| 228 | 228 | ||
| 229 | size_af = 0xFFFFFFFF - mc->vram_end; | 229 | size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; |
| 230 | size_bf = mc->vram_start; | 230 | size_bf = mc->vram_start & ~mc->gtt_base_align; |
| 231 | if (size_bf > size_af) { | 231 | if (size_bf > size_af) { |
| 232 | if (mc->gtt_size > size_bf) { | 232 | if (mc->gtt_size > size_bf) { |
| 233 | dev_warn(rdev->dev, "limiting GTT\n"); | 233 | dev_warn(rdev->dev, "limiting GTT\n"); |
| 234 | mc->gtt_size = size_bf; | 234 | mc->gtt_size = size_bf; |
| 235 | } | 235 | } |
| 236 | mc->gtt_start = mc->vram_start - mc->gtt_size; | 236 | mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; |
| 237 | } else { | 237 | } else { |
| 238 | if (mc->gtt_size > size_af) { | 238 | if (mc->gtt_size > size_af) { |
| 239 | dev_warn(rdev->dev, "limiting GTT\n"); | 239 | dev_warn(rdev->dev, "limiting GTT\n"); |
| 240 | mc->gtt_size = size_af; | 240 | mc->gtt_size = size_af; |
| 241 | } | 241 | } |
| 242 | mc->gtt_start = mc->vram_end + 1; | 242 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; |
| 243 | } | 243 | } |
| 244 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | 244 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; |
| 245 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", | 245 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", |
| @@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero | |||
| 546 | /* don't suspend or resume card normally */ | 546 | /* don't suspend or resume card normally */ |
| 547 | rdev->powered_down = false; | 547 | rdev->powered_down = false; |
| 548 | radeon_resume_kms(dev); | 548 | radeon_resume_kms(dev); |
| 549 | drm_kms_helper_poll_enable(dev); | ||
| 549 | } else { | 550 | } else { |
| 550 | printk(KERN_INFO "radeon: switched off\n"); | 551 | printk(KERN_INFO "radeon: switched off\n"); |
| 552 | drm_kms_helper_poll_disable(dev); | ||
| 551 | radeon_suspend_kms(dev, pmm); | 553 | radeon_suspend_kms(dev, pmm); |
| 552 | /* don't suspend or resume card normally */ | 554 | /* don't suspend or resume card normally */ |
| 553 | rdev->powered_down = true; | 555 | rdev->powered_down = true; |
| @@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 711 | { | 713 | { |
| 712 | struct radeon_device *rdev; | 714 | struct radeon_device *rdev; |
| 713 | struct drm_crtc *crtc; | 715 | struct drm_crtc *crtc; |
| 716 | struct drm_connector *connector; | ||
| 714 | int r; | 717 | int r; |
| 715 | 718 | ||
| 716 | if (dev == NULL || dev->dev_private == NULL) { | 719 | if (dev == NULL || dev->dev_private == NULL) { |
| @@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 723 | 726 | ||
| 724 | if (rdev->powered_down) | 727 | if (rdev->powered_down) |
| 725 | return 0; | 728 | return 0; |
| 729 | |||
| 730 | /* turn off display hw */ | ||
| 731 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 732 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
| 733 | } | ||
| 734 | |||
| 726 | /* unpin the front buffers */ | 735 | /* unpin the front buffers */ |
| 727 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 736 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 728 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | 737 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); |
| @@ -770,6 +779,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 770 | 779 | ||
| 771 | int radeon_resume_kms(struct drm_device *dev) | 780 | int radeon_resume_kms(struct drm_device *dev) |
| 772 | { | 781 | { |
| 782 | struct drm_connector *connector; | ||
| 773 | struct radeon_device *rdev = dev->dev_private; | 783 | struct radeon_device *rdev = dev->dev_private; |
| 774 | 784 | ||
| 775 | if (rdev->powered_down) | 785 | if (rdev->powered_down) |
| @@ -788,6 +798,12 @@ int radeon_resume_kms(struct drm_device *dev) | |||
| 788 | radeon_resume(rdev); | 798 | radeon_resume(rdev); |
| 789 | radeon_pm_resume(rdev); | 799 | radeon_pm_resume(rdev); |
| 790 | radeon_restore_bios_scratch_regs(rdev); | 800 | radeon_restore_bios_scratch_regs(rdev); |
| 801 | |||
| 802 | /* turn on display hw */ | ||
| 803 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 804 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
| 805 | } | ||
| 806 | |||
| 791 | radeon_fbdev_set_suspend(rdev, 0); | 807 | radeon_fbdev_set_suspend(rdev, 0); |
| 792 | release_console_sem(); | 808 | release_console_sem(); |
| 793 | 809 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1006549d1570..8154cdf796e4 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -284,8 +284,7 @@ static const char *connector_names[15] = { | |||
| 284 | "eDP", | 284 | "eDP", |
| 285 | }; | 285 | }; |
| 286 | 286 | ||
| 287 | static const char *hpd_names[7] = { | 287 | static const char *hpd_names[6] = { |
| 288 | "NONE", | ||
| 289 | "HPD1", | 288 | "HPD1", |
| 290 | "HPD2", | 289 | "HPD2", |
| 291 | "HPD3", | 290 | "HPD3", |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 902d1731a652..e166fe4d7c30 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -45,9 +45,10 @@ | |||
| 45 | * - 2.2.0 - add r6xx/r7xx const buffer support | 45 | * - 2.2.0 - add r6xx/r7xx const buffer support |
| 46 | * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs | 46 | * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs |
| 47 | * - 2.4.0 - add crtc id query | 47 | * - 2.4.0 - add crtc id query |
| 48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen | ||
| 48 | */ | 49 | */ |
| 49 | #define KMS_DRIVER_MAJOR 2 | 50 | #define KMS_DRIVER_MAJOR 2 |
| 50 | #define KMS_DRIVER_MINOR 4 | 51 | #define KMS_DRIVER_MINOR 5 |
| 51 | #define KMS_DRIVER_PATCHLEVEL 0 | 52 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 52 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 53 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 53 | int radeon_driver_unload_kms(struct drm_device *dev); | 54 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 1ebb100015b7..e0b30b264c28 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -1072,6 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1072 | if (is_dig) { | 1072 | if (is_dig) { |
| 1073 | switch (mode) { | 1073 | switch (mode) { |
| 1074 | case DRM_MODE_DPMS_ON: | 1074 | case DRM_MODE_DPMS_ON: |
| 1075 | if (!ASIC_IS_DCE4(rdev)) | ||
| 1076 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
| 1075 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1077 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
| 1076 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1078 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 1077 | 1079 | ||
| @@ -1079,8 +1081,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1079 | if (ASIC_IS_DCE4(rdev)) | 1081 | if (ASIC_IS_DCE4(rdev)) |
| 1080 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | 1082 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); |
| 1081 | } | 1083 | } |
| 1082 | if (!ASIC_IS_DCE4(rdev)) | ||
| 1083 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
| 1084 | break; | 1084 | break; |
| 1085 | case DRM_MODE_DPMS_STANDBY: | 1085 | case DRM_MODE_DPMS_STANDBY: |
| 1086 | case DRM_MODE_DPMS_SUSPEND: | 1086 | case DRM_MODE_DPMS_SUSPEND: |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index e192acfbf0cd..dc1634bb0c11 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -363,6 +363,7 @@ int radeon_fbdev_init(struct radeon_device *rdev) | |||
| 363 | { | 363 | { |
| 364 | struct radeon_fbdev *rfbdev; | 364 | struct radeon_fbdev *rfbdev; |
| 365 | int bpp_sel = 32; | 365 | int bpp_sel = 32; |
| 366 | int ret; | ||
| 366 | 367 | ||
| 367 | /* select 8 bpp console on RN50 or 16MB cards */ | 368 | /* select 8 bpp console on RN50 or 16MB cards */ |
| 368 | if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) | 369 | if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) |
| @@ -376,9 +377,14 @@ int radeon_fbdev_init(struct radeon_device *rdev) | |||
| 376 | rdev->mode_info.rfbdev = rfbdev; | 377 | rdev->mode_info.rfbdev = rfbdev; |
| 377 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; | 378 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
| 378 | 379 | ||
| 379 | drm_fb_helper_init(rdev->ddev, &rfbdev->helper, | 380 | ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, |
| 380 | rdev->num_crtc, | 381 | rdev->num_crtc, |
| 381 | RADEONFB_CONN_LIMIT); | 382 | RADEONFB_CONN_LIMIT); |
| 383 | if (ret) { | ||
| 384 | kfree(rfbdev); | ||
| 385 | return ret; | ||
| 386 | } | ||
| 387 | |||
| 382 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); | 388 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
| 383 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); | 389 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
| 384 | return 0; | 390 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 04068352ccd2..ab389f89fa8d 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -118,13 +118,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 118 | value = rdev->num_z_pipes; | 118 | value = rdev->num_z_pipes; |
| 119 | break; | 119 | break; |
| 120 | case RADEON_INFO_ACCEL_WORKING: | 120 | case RADEON_INFO_ACCEL_WORKING: |
| 121 | value = rdev->accel_working; | 121 | /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ |
| 122 | if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) | ||
| 123 | value = false; | ||
| 124 | else | ||
| 125 | value = rdev->accel_working; | ||
| 122 | break; | 126 | break; |
| 123 | case RADEON_INFO_CRTC_FROM_ID: | 127 | case RADEON_INFO_CRTC_FROM_ID: |
| 124 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { | 128 | for (i = 0, found = 0; i < rdev->num_crtc; i++) { |
| 125 | crtc = (struct drm_crtc *)minfo->crtcs[i]; | 129 | crtc = (struct drm_crtc *)minfo->crtcs[i]; |
| 126 | if (crtc && crtc->base.id == value) { | 130 | if (crtc && crtc->base.id == value) { |
| 127 | value = i; | 131 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 132 | value = radeon_crtc->crtc_id; | ||
| 128 | found = 1; | 133 | found = 1; |
| 129 | break; | 134 | break; |
| 130 | } | 135 | } |
| @@ -134,6 +139,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 134 | return -EINVAL; | 139 | return -EINVAL; |
| 135 | } | 140 | } |
| 136 | break; | 141 | break; |
| 142 | case RADEON_INFO_ACCEL_WORKING2: | ||
| 143 | value = rdev->accel_working; | ||
| 144 | break; | ||
| 137 | default: | 145 | default: |
| 138 | DRM_DEBUG("Invalid request %d\n", info->request); | 146 | DRM_DEBUG("Invalid request %d\n", info->request); |
| 139 | return -EINVAL; | 147 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5a13b3eeef19..5688a0cf6bbe 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
| 108 | udelay(panel_pwr_delay * 1000); | 108 | udelay(panel_pwr_delay * 1000); |
| 109 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); | 109 | WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); |
| 110 | WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); | 110 | WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); |
| 111 | udelay(panel_pwr_delay * 1000); | ||
| 111 | break; | 112 | break; |
| 112 | } | 113 | } |
| 113 | 114 | ||
| @@ -928,16 +929,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 928 | if (ASIC_IS_R300(rdev)) { | 929 | if (ASIC_IS_R300(rdev)) { |
| 929 | gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; | 930 | gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; |
| 930 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); | 931 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); |
| 931 | } | 932 | } else if (rdev->family != CHIP_R200) |
| 932 | |||
| 933 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) | ||
| 934 | disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); | ||
| 935 | else | ||
| 936 | disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); | 933 | disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); |
| 937 | 934 | else if (rdev->family == CHIP_R200) | |
| 938 | if (rdev->family == CHIP_R200) | ||
| 939 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 935 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
| 940 | 936 | ||
| 937 | if (rdev->family >= CHIP_R200) | ||
| 938 | disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); | ||
| 939 | |||
| 941 | if (is_tv) { | 940 | if (is_tv) { |
| 942 | uint32_t dac_cntl; | 941 | uint32_t dac_cntl; |
| 943 | 942 | ||
| @@ -1002,15 +1001,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 1002 | if (ASIC_IS_R300(rdev)) { | 1001 | if (ASIC_IS_R300(rdev)) { |
| 1003 | WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); | 1002 | WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); |
| 1004 | WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); | 1003 | WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); |
| 1005 | } | 1004 | } else if (rdev->family != CHIP_R200) |
| 1005 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | ||
| 1006 | else if (rdev->family == CHIP_R200) | ||
| 1007 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | ||
| 1006 | 1008 | ||
| 1007 | if (rdev->family >= CHIP_R200) | 1009 | if (rdev->family >= CHIP_R200) |
| 1008 | WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); | 1010 | WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); |
| 1009 | else | ||
| 1010 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | ||
| 1011 | |||
| 1012 | if (rdev->family == CHIP_R200) | ||
| 1013 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | ||
| 1014 | 1011 | ||
| 1015 | if (is_tv) | 1012 | if (is_tv) |
| 1016 | radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); | 1013 | radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); |
| @@ -1168,6 +1165,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder | |||
| 1168 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1165 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 1169 | struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; | 1166 | struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; |
| 1170 | bool color = true; | 1167 | bool color = true; |
| 1168 | struct drm_crtc *crtc; | ||
| 1169 | |||
| 1170 | /* find out if crtc2 is in use or if this encoder is using it */ | ||
| 1171 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 1172 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
| 1173 | if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { | ||
| 1174 | if (encoder->crtc != crtc) { | ||
| 1175 | return connector_status_disconnected; | ||
| 1176 | } | ||
| 1177 | } | ||
| 1178 | } | ||
| 1171 | 1179 | ||
| 1172 | if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || | 1180 | if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || |
| 1173 | connector->connector_type == DRM_MODE_CONNECTOR_Composite || | 1181 | connector->connector_type == DRM_MODE_CONNECTOR_Composite || |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index f2ed27c8055b..032040397743 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
| @@ -642,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, | |||
| 642 | } | 642 | } |
| 643 | flicker_removal = (tmp + 500) / 1000; | 643 | flicker_removal = (tmp + 500) / 1000; |
| 644 | 644 | ||
| 645 | if (flicker_removal < 2) | 645 | if (flicker_removal < 3) |
| 646 | flicker_removal = 2; | 646 | flicker_removal = 3; |
| 647 | for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { | 647 | for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { |
| 648 | if (flicker_removal == SLOPE_limit[i]) | 648 | if (flicker_removal == SLOPE_limit[i]) |
| 649 | break; | 649 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 67358baf28b2..95696aa57ac8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -206,6 +206,7 @@ enum radeon_connector_table { | |||
| 206 | CT_MINI_INTERNAL, | 206 | CT_MINI_INTERNAL, |
| 207 | CT_IMAC_G5_ISIGHT, | 207 | CT_IMAC_G5_ISIGHT, |
| 208 | CT_EMAC, | 208 | CT_EMAC, |
| 209 | CT_RN50_POWER, | ||
| 209 | }; | 210 | }; |
| 210 | 211 | ||
| 211 | enum radeon_dvo_chip { | 212 | enum radeon_dvo_chip { |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index a8d162c6f829..3fa6984d9896 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -33,6 +33,14 @@ | |||
| 33 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 | 33 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 |
| 34 | #define RADEON_WAIT_IDLE_TIMEOUT 200 | 34 | #define RADEON_WAIT_IDLE_TIMEOUT 200 |
| 35 | 35 | ||
| 36 | static const char *radeon_pm_state_type_name[5] = { | ||
| 37 | "Default", | ||
| 38 | "Powersave", | ||
| 39 | "Battery", | ||
| 40 | "Balanced", | ||
| 41 | "Performance", | ||
| 42 | }; | ||
| 43 | |||
| 36 | static void radeon_dynpm_idle_work_handler(struct work_struct *work); | 44 | static void radeon_dynpm_idle_work_handler(struct work_struct *work); |
| 37 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); | 45 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); |
| 38 | static bool radeon_pm_in_vbl(struct radeon_device *rdev); | 46 | static bool radeon_pm_in_vbl(struct radeon_device *rdev); |
| @@ -84,9 +92,9 @@ static void radeon_pm_update_profile(struct radeon_device *rdev) | |||
| 84 | rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; | 92 | rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; |
| 85 | } else { | 93 | } else { |
| 86 | if (rdev->pm.active_crtc_count > 1) | 94 | if (rdev->pm.active_crtc_count > 1) |
| 87 | rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; | 95 | rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; |
| 88 | else | 96 | else |
| 89 | rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; | 97 | rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; |
| 90 | } | 98 | } |
| 91 | break; | 99 | break; |
| 92 | case PM_PROFILE_LOW: | 100 | case PM_PROFILE_LOW: |
| @@ -95,6 +103,12 @@ static void radeon_pm_update_profile(struct radeon_device *rdev) | |||
| 95 | else | 103 | else |
| 96 | rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; | 104 | rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; |
| 97 | break; | 105 | break; |
| 106 | case PM_PROFILE_MID: | ||
| 107 | if (rdev->pm.active_crtc_count > 1) | ||
| 108 | rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; | ||
| 109 | else | ||
| 110 | rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; | ||
| 111 | break; | ||
| 98 | case PM_PROFILE_HIGH: | 112 | case PM_PROFILE_HIGH: |
| 99 | if (rdev->pm.active_crtc_count > 1) | 113 | if (rdev->pm.active_crtc_count > 1) |
| 100 | rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; | 114 | rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; |
| @@ -127,15 +141,6 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev) | |||
| 127 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | 141 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) |
| 128 | ttm_bo_unmap_virtual(&bo->tbo); | 142 | ttm_bo_unmap_virtual(&bo->tbo); |
| 129 | } | 143 | } |
| 130 | |||
| 131 | if (rdev->gart.table.vram.robj) | ||
| 132 | ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo); | ||
| 133 | |||
| 134 | if (rdev->stollen_vga_memory) | ||
| 135 | ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo); | ||
| 136 | |||
| 137 | if (rdev->r600_blit.shader_obj) | ||
| 138 | ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo); | ||
| 139 | } | 144 | } |
| 140 | 145 | ||
| 141 | static void radeon_sync_with_vblank(struct radeon_device *rdev) | 146 | static void radeon_sync_with_vblank(struct radeon_device *rdev) |
| @@ -151,6 +156,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev) | |||
| 151 | static void radeon_set_power_state(struct radeon_device *rdev) | 156 | static void radeon_set_power_state(struct radeon_device *rdev) |
| 152 | { | 157 | { |
| 153 | u32 sclk, mclk; | 158 | u32 sclk, mclk; |
| 159 | bool misc_after = false; | ||
| 154 | 160 | ||
| 155 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && | 161 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && |
| 156 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) | 162 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) |
| @@ -167,55 +173,47 @@ static void radeon_set_power_state(struct radeon_device *rdev) | |||
| 167 | if (mclk > rdev->clock.default_mclk) | 173 | if (mclk > rdev->clock.default_mclk) |
| 168 | mclk = rdev->clock.default_mclk; | 174 | mclk = rdev->clock.default_mclk; |
| 169 | 175 | ||
| 170 | /* voltage, pcie lanes, etc.*/ | 176 | /* upvolt before raising clocks, downvolt after lowering clocks */ |
| 171 | radeon_pm_misc(rdev); | 177 | if (sclk < rdev->pm.current_sclk) |
| 178 | misc_after = true; | ||
| 172 | 179 | ||
| 173 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 180 | radeon_sync_with_vblank(rdev); |
| 174 | radeon_sync_with_vblank(rdev); | ||
| 175 | 181 | ||
| 182 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
| 176 | if (!radeon_pm_in_vbl(rdev)) | 183 | if (!radeon_pm_in_vbl(rdev)) |
| 177 | return; | 184 | return; |
| 185 | } | ||
| 178 | 186 | ||
| 179 | radeon_pm_prepare(rdev); | 187 | radeon_pm_prepare(rdev); |
| 180 | /* set engine clock */ | ||
| 181 | if (sclk != rdev->pm.current_sclk) { | ||
| 182 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
| 183 | radeon_set_engine_clock(rdev, sclk); | ||
| 184 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
| 185 | rdev->pm.current_sclk = sclk; | ||
| 186 | DRM_DEBUG("Setting: e: %d\n", sclk); | ||
| 187 | } | ||
| 188 | 188 | ||
| 189 | /* set memory clock */ | 189 | if (!misc_after) |
| 190 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | 190 | /* voltage, pcie lanes, etc.*/ |
| 191 | radeon_pm_debug_check_in_vbl(rdev, false); | 191 | radeon_pm_misc(rdev); |
| 192 | radeon_set_memory_clock(rdev, mclk); | 192 | |
| 193 | radeon_pm_debug_check_in_vbl(rdev, true); | 193 | /* set engine clock */ |
| 194 | rdev->pm.current_mclk = mclk; | 194 | if (sclk != rdev->pm.current_sclk) { |
| 195 | DRM_DEBUG("Setting: m: %d\n", mclk); | 195 | radeon_pm_debug_check_in_vbl(rdev, false); |
| 196 | } | 196 | radeon_set_engine_clock(rdev, sclk); |
| 197 | radeon_pm_finish(rdev); | 197 | radeon_pm_debug_check_in_vbl(rdev, true); |
| 198 | } else { | 198 | rdev->pm.current_sclk = sclk; |
| 199 | /* set engine clock */ | 199 | DRM_DEBUG("Setting: e: %d\n", sclk); |
| 200 | if (sclk != rdev->pm.current_sclk) { | ||
| 201 | radeon_sync_with_vblank(rdev); | ||
| 202 | radeon_pm_prepare(rdev); | ||
| 203 | radeon_set_engine_clock(rdev, sclk); | ||
| 204 | radeon_pm_finish(rdev); | ||
| 205 | rdev->pm.current_sclk = sclk; | ||
| 206 | DRM_DEBUG("Setting: e: %d\n", sclk); | ||
| 207 | } | ||
| 208 | /* set memory clock */ | ||
| 209 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | ||
| 210 | radeon_sync_with_vblank(rdev); | ||
| 211 | radeon_pm_prepare(rdev); | ||
| 212 | radeon_set_memory_clock(rdev, mclk); | ||
| 213 | radeon_pm_finish(rdev); | ||
| 214 | rdev->pm.current_mclk = mclk; | ||
| 215 | DRM_DEBUG("Setting: m: %d\n", mclk); | ||
| 216 | } | ||
| 217 | } | 200 | } |
| 218 | 201 | ||
| 202 | /* set memory clock */ | ||
| 203 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | ||
| 204 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
| 205 | radeon_set_memory_clock(rdev, mclk); | ||
| 206 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
| 207 | rdev->pm.current_mclk = mclk; | ||
| 208 | DRM_DEBUG("Setting: m: %d\n", mclk); | ||
| 209 | } | ||
| 210 | |||
| 211 | if (misc_after) | ||
| 212 | /* voltage, pcie lanes, etc.*/ | ||
| 213 | radeon_pm_misc(rdev); | ||
| 214 | |||
| 215 | radeon_pm_finish(rdev); | ||
| 216 | |||
| 219 | rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; | 217 | rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; |
| 220 | rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; | 218 | rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; |
| 221 | } else | 219 | } else |
| @@ -288,6 +286,42 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
| 288 | mutex_unlock(&rdev->ddev->struct_mutex); | 286 | mutex_unlock(&rdev->ddev->struct_mutex); |
| 289 | } | 287 | } |
| 290 | 288 | ||
| 289 | static void radeon_pm_print_states(struct radeon_device *rdev) | ||
| 290 | { | ||
| 291 | int i, j; | ||
| 292 | struct radeon_power_state *power_state; | ||
| 293 | struct radeon_pm_clock_info *clock_info; | ||
| 294 | |||
| 295 | DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states); | ||
| 296 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
| 297 | power_state = &rdev->pm.power_state[i]; | ||
| 298 | DRM_DEBUG("State %d: %s\n", i, | ||
| 299 | radeon_pm_state_type_name[power_state->type]); | ||
| 300 | if (i == rdev->pm.default_power_state_index) | ||
| 301 | DRM_DEBUG("\tDefault"); | ||
| 302 | if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) | ||
| 303 | DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes); | ||
| 304 | if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | ||
| 305 | DRM_DEBUG("\tSingle display only\n"); | ||
| 306 | DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes); | ||
| 307 | for (j = 0; j < power_state->num_clock_modes; j++) { | ||
| 308 | clock_info = &(power_state->clock_info[j]); | ||
| 309 | if (rdev->flags & RADEON_IS_IGP) | ||
| 310 | DRM_DEBUG("\t\t%d e: %d%s\n", | ||
| 311 | j, | ||
| 312 | clock_info->sclk * 10, | ||
| 313 | clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); | ||
| 314 | else | ||
| 315 | DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n", | ||
| 316 | j, | ||
| 317 | clock_info->sclk * 10, | ||
| 318 | clock_info->mclk * 10, | ||
| 319 | clock_info->voltage.voltage, | ||
| 320 | clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); | ||
| 321 | } | ||
| 322 | } | ||
| 323 | } | ||
| 324 | |||
| 291 | static ssize_t radeon_get_pm_profile(struct device *dev, | 325 | static ssize_t radeon_get_pm_profile(struct device *dev, |
| 292 | struct device_attribute *attr, | 326 | struct device_attribute *attr, |
| 293 | char *buf) | 327 | char *buf) |
| @@ -299,6 +333,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev, | |||
| 299 | return snprintf(buf, PAGE_SIZE, "%s\n", | 333 | return snprintf(buf, PAGE_SIZE, "%s\n", |
| 300 | (cp == PM_PROFILE_AUTO) ? "auto" : | 334 | (cp == PM_PROFILE_AUTO) ? "auto" : |
| 301 | (cp == PM_PROFILE_LOW) ? "low" : | 335 | (cp == PM_PROFILE_LOW) ? "low" : |
| 336 | (cp == PM_PROFILE_MID) ? "mid" : | ||
| 302 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); | 337 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); |
| 303 | } | 338 | } |
| 304 | 339 | ||
| @@ -318,6 +353,8 @@ static ssize_t radeon_set_pm_profile(struct device *dev, | |||
| 318 | rdev->pm.profile = PM_PROFILE_AUTO; | 353 | rdev->pm.profile = PM_PROFILE_AUTO; |
| 319 | else if (strncmp("low", buf, strlen("low")) == 0) | 354 | else if (strncmp("low", buf, strlen("low")) == 0) |
| 320 | rdev->pm.profile = PM_PROFILE_LOW; | 355 | rdev->pm.profile = PM_PROFILE_LOW; |
| 356 | else if (strncmp("mid", buf, strlen("mid")) == 0) | ||
| 357 | rdev->pm.profile = PM_PROFILE_MID; | ||
| 321 | else if (strncmp("high", buf, strlen("high")) == 0) | 358 | else if (strncmp("high", buf, strlen("high")) == 0) |
| 322 | rdev->pm.profile = PM_PROFILE_HIGH; | 359 | rdev->pm.profile = PM_PROFILE_HIGH; |
| 323 | else { | 360 | else { |
| @@ -361,13 +398,20 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
| 361 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 398 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
| 362 | mutex_unlock(&rdev->pm.mutex); | 399 | mutex_unlock(&rdev->pm.mutex); |
| 363 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { | 400 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { |
| 401 | bool flush_wq = false; | ||
| 402 | |||
| 364 | mutex_lock(&rdev->pm.mutex); | 403 | mutex_lock(&rdev->pm.mutex); |
| 365 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 404 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
| 405 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
| 406 | flush_wq = true; | ||
| 407 | } | ||
| 366 | /* disable dynpm */ | 408 | /* disable dynpm */ |
| 367 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 409 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
| 368 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 410 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
| 369 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | 411 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| 370 | mutex_unlock(&rdev->pm.mutex); | 412 | mutex_unlock(&rdev->pm.mutex); |
| 413 | if (flush_wq) | ||
| 414 | flush_workqueue(rdev->wq); | ||
| 371 | } else { | 415 | } else { |
| 372 | DRM_ERROR("invalid power method!\n"); | 416 | DRM_ERROR("invalid power method!\n"); |
| 373 | goto fail; | 417 | goto fail; |
| @@ -382,17 +426,36 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon | |||
| 382 | 426 | ||
| 383 | void radeon_pm_suspend(struct radeon_device *rdev) | 427 | void radeon_pm_suspend(struct radeon_device *rdev) |
| 384 | { | 428 | { |
| 429 | bool flush_wq = false; | ||
| 430 | |||
| 385 | mutex_lock(&rdev->pm.mutex); | 431 | mutex_lock(&rdev->pm.mutex); |
| 386 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | 432 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
| 387 | rdev->pm.current_power_state_index = -1; | 433 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
| 388 | rdev->pm.current_clock_mode_index = -1; | 434 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) |
| 389 | rdev->pm.current_sclk = 0; | 435 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; |
| 390 | rdev->pm.current_mclk = 0; | 436 | flush_wq = true; |
| 437 | } | ||
| 391 | mutex_unlock(&rdev->pm.mutex); | 438 | mutex_unlock(&rdev->pm.mutex); |
| 439 | if (flush_wq) | ||
| 440 | flush_workqueue(rdev->wq); | ||
| 392 | } | 441 | } |
| 393 | 442 | ||
| 394 | void radeon_pm_resume(struct radeon_device *rdev) | 443 | void radeon_pm_resume(struct radeon_device *rdev) |
| 395 | { | 444 | { |
| 445 | /* asic init will reset the default power state */ | ||
| 446 | mutex_lock(&rdev->pm.mutex); | ||
| 447 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | ||
| 448 | rdev->pm.current_clock_mode_index = 0; | ||
| 449 | rdev->pm.current_sclk = rdev->clock.default_sclk; | ||
| 450 | rdev->pm.current_mclk = rdev->clock.default_mclk; | ||
| 451 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | ||
| 452 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | ||
| 453 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | ||
| 454 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | ||
| 455 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
| 456 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
| 457 | } | ||
| 458 | mutex_unlock(&rdev->pm.mutex); | ||
| 396 | radeon_pm_compute_clocks(rdev); | 459 | radeon_pm_compute_clocks(rdev); |
| 397 | } | 460 | } |
| 398 | 461 | ||
| @@ -401,32 +464,24 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 401 | int ret; | 464 | int ret; |
| 402 | /* default to profile method */ | 465 | /* default to profile method */ |
| 403 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 466 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| 467 | rdev->pm.profile = PM_PROFILE_DEFAULT; | ||
| 404 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 468 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
| 405 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 469 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
| 406 | rdev->pm.dynpm_can_upclock = true; | 470 | rdev->pm.dynpm_can_upclock = true; |
| 407 | rdev->pm.dynpm_can_downclock = true; | 471 | rdev->pm.dynpm_can_downclock = true; |
| 408 | rdev->pm.current_sclk = 0; | 472 | rdev->pm.current_sclk = rdev->clock.default_sclk; |
| 409 | rdev->pm.current_mclk = 0; | 473 | rdev->pm.current_mclk = rdev->clock.default_mclk; |
| 410 | 474 | ||
| 411 | if (rdev->bios) { | 475 | if (rdev->bios) { |
| 412 | if (rdev->is_atom_bios) | 476 | if (rdev->is_atom_bios) |
| 413 | radeon_atombios_get_power_modes(rdev); | 477 | radeon_atombios_get_power_modes(rdev); |
| 414 | else | 478 | else |
| 415 | radeon_combios_get_power_modes(rdev); | 479 | radeon_combios_get_power_modes(rdev); |
| 480 | radeon_pm_print_states(rdev); | ||
| 416 | radeon_pm_init_profile(rdev); | 481 | radeon_pm_init_profile(rdev); |
| 417 | rdev->pm.current_power_state_index = -1; | ||
| 418 | rdev->pm.current_clock_mode_index = -1; | ||
| 419 | } | 482 | } |
| 420 | 483 | ||
| 421 | if (rdev->pm.num_power_states > 1) { | 484 | if (rdev->pm.num_power_states > 1) { |
| 422 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | ||
| 423 | mutex_lock(&rdev->pm.mutex); | ||
| 424 | rdev->pm.profile = PM_PROFILE_DEFAULT; | ||
| 425 | radeon_pm_update_profile(rdev); | ||
| 426 | radeon_pm_set_clocks(rdev); | ||
| 427 | mutex_unlock(&rdev->pm.mutex); | ||
| 428 | } | ||
| 429 | |||
| 430 | /* where's the best place to put these? */ | 485 | /* where's the best place to put these? */ |
| 431 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | 486 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); |
| 432 | if (ret) | 487 | if (ret) |
| @@ -454,6 +509,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 454 | void radeon_pm_fini(struct radeon_device *rdev) | 509 | void radeon_pm_fini(struct radeon_device *rdev) |
| 455 | { | 510 | { |
| 456 | if (rdev->pm.num_power_states > 1) { | 511 | if (rdev->pm.num_power_states > 1) { |
| 512 | bool flush_wq = false; | ||
| 513 | |||
| 457 | mutex_lock(&rdev->pm.mutex); | 514 | mutex_lock(&rdev->pm.mutex); |
| 458 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 515 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
| 459 | rdev->pm.profile = PM_PROFILE_DEFAULT; | 516 | rdev->pm.profile = PM_PROFILE_DEFAULT; |
| @@ -461,13 +518,16 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
| 461 | radeon_pm_set_clocks(rdev); | 518 | radeon_pm_set_clocks(rdev); |
| 462 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 519 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
| 463 | /* cancel work */ | 520 | /* cancel work */ |
| 464 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); | 521 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
| 522 | flush_wq = true; | ||
| 465 | /* reset default clocks */ | 523 | /* reset default clocks */ |
| 466 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 524 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
| 467 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 525 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
| 468 | radeon_pm_set_clocks(rdev); | 526 | radeon_pm_set_clocks(rdev); |
| 469 | } | 527 | } |
| 470 | mutex_unlock(&rdev->pm.mutex); | 528 | mutex_unlock(&rdev->pm.mutex); |
| 529 | if (flush_wq) | ||
| 530 | flush_workqueue(rdev->wq); | ||
| 471 | 531 | ||
| 472 | device_remove_file(rdev->dev, &dev_attr_power_profile); | 532 | device_remove_file(rdev->dev, &dev_attr_power_profile); |
| 473 | device_remove_file(rdev->dev, &dev_attr_power_method); | 533 | device_remove_file(rdev->dev, &dev_attr_power_method); |
| @@ -688,12 +748,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) | |||
| 688 | radeon_pm_get_dynpm_state(rdev); | 748 | radeon_pm_get_dynpm_state(rdev); |
| 689 | radeon_pm_set_clocks(rdev); | 749 | radeon_pm_set_clocks(rdev); |
| 690 | } | 750 | } |
| 751 | |||
| 752 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
| 753 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
| 691 | } | 754 | } |
| 692 | mutex_unlock(&rdev->pm.mutex); | 755 | mutex_unlock(&rdev->pm.mutex); |
| 693 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 756 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
| 694 | |||
| 695 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
| 696 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
| 697 | } | 757 | } |
| 698 | 758 | ||
| 699 | /* | 759 | /* |
| @@ -712,6 +772,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
| 712 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); | 772 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); |
| 713 | if (rdev->asic->get_memory_clock) | 773 | if (rdev->asic->get_memory_clock) |
| 714 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); | 774 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); |
| 775 | if (rdev->pm.current_vddc) | ||
| 776 | seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); | ||
| 715 | if (rdev->asic->get_pcie_lanes) | 777 | if (rdev->asic->get_pcie_lanes) |
| 716 | seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); | 778 | seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); |
| 717 | 779 | ||
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen new file mode 100644 index 000000000000..f78fd592544d --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
| @@ -0,0 +1,611 @@ | |||
| 1 | evergreen 0x9400 | ||
| 2 | 0x00008040 WAIT_UNTIL | ||
| 3 | 0x00008044 WAIT_UNTIL_POLL_CNTL | ||
| 4 | 0x00008048 WAIT_UNTIL_POLL_MASK | ||
| 5 | 0x0000804c WAIT_UNTIL_POLL_REFDATA | ||
| 6 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | ||
| 7 | 0x000088C4 VGT_CACHE_INVALIDATION | ||
| 8 | 0x000088D4 VGT_GS_VERTEX_REUSE | ||
| 9 | 0x00008958 VGT_PRIMITIVE_TYPE | ||
| 10 | 0x0000895C VGT_INDEX_TYPE | ||
| 11 | 0x00008970 VGT_NUM_INDICES | ||
| 12 | 0x00008974 VGT_NUM_INSTANCES | ||
| 13 | 0x00008990 VGT_COMPUTE_DIM_X | ||
| 14 | 0x00008994 VGT_COMPUTE_DIM_Y | ||
| 15 | 0x00008998 VGT_COMPUTE_DIM_Z | ||
| 16 | 0x0000899C VGT_COMPUTE_START_X | ||
| 17 | 0x000089A0 VGT_COMPUTE_START_Y | ||
| 18 | 0x000089A4 VGT_COMPUTE_START_Z | ||
| 19 | 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE | ||
| 20 | 0x00008A14 PA_CL_ENHANCE | ||
| 21 | 0x00008A60 PA_SC_LINE_STIPPLE_VALUE | ||
| 22 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE | ||
| 23 | 0x00008BF0 PA_SC_ENHANCE | ||
| 24 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ | ||
| 25 | 0x00008C00 SQ_CONFIG | ||
| 26 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 | ||
| 27 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 | ||
| 28 | 0x00008C0C SQ_GPR_RESOURCE_MGMT_3 | ||
| 29 | 0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1 | ||
| 30 | 0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2 | ||
| 31 | 0x00008C18 SQ_THREAD_RESOURCE_MGMT | ||
| 32 | 0x00008C1C SQ_THREAD_RESOURCE_MGMT_2 | ||
| 33 | 0x00008C20 SQ_STACK_RESOURCE_MGMT_1 | ||
| 34 | 0x00008C24 SQ_STACK_RESOURCE_MGMT_2 | ||
| 35 | 0x00008C28 SQ_STACK_RESOURCE_MGMT_3 | ||
| 36 | 0x00008DF8 SQ_CONST_MEM_BASE | ||
| 37 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | ||
| 38 | 0x00009100 SPI_CONFIG_CNTL | ||
| 39 | 0x0000913C SPI_CONFIG_CNTL_1 | ||
| 40 | 0x00009700 VC_CNTL | ||
| 41 | 0x00009714 VC_ENHANCE | ||
| 42 | 0x00009830 DB_DEBUG | ||
| 43 | 0x00009834 DB_DEBUG2 | ||
| 44 | 0x00009838 DB_DEBUG3 | ||
| 45 | 0x0000983C DB_DEBUG4 | ||
| 46 | 0x00009854 DB_WATERMARKS | ||
| 47 | 0x0000A400 TD_PS_BORDER_COLOR_INDEX | ||
| 48 | 0x0000A404 TD_PS_BORDER_COLOR_RED | ||
| 49 | 0x0000A408 TD_PS_BORDER_COLOR_GREEN | ||
| 50 | 0x0000A40C TD_PS_BORDER_COLOR_BLUE | ||
| 51 | 0x0000A410 TD_PS_BORDER_COLOR_ALPHA | ||
| 52 | 0x0000A414 TD_VS_BORDER_COLOR_INDEX | ||
| 53 | 0x0000A418 TD_VS_BORDER_COLOR_RED | ||
| 54 | 0x0000A41C TD_VS_BORDER_COLOR_GREEN | ||
| 55 | 0x0000A420 TD_VS_BORDER_COLOR_BLUE | ||
| 56 | 0x0000A424 TD_VS_BORDER_COLOR_ALPHA | ||
| 57 | 0x0000A428 TD_GS_BORDER_COLOR_INDEX | ||
| 58 | 0x0000A42C TD_GS_BORDER_COLOR_RED | ||
| 59 | 0x0000A430 TD_GS_BORDER_COLOR_GREEN | ||
| 60 | 0x0000A434 TD_GS_BORDER_COLOR_BLUE | ||
| 61 | 0x0000A438 TD_GS_BORDER_COLOR_ALPHA | ||
| 62 | 0x0000A43C TD_HS_BORDER_COLOR_INDEX | ||
| 63 | 0x0000A440 TD_HS_BORDER_COLOR_RED | ||
| 64 | 0x0000A444 TD_HS_BORDER_COLOR_GREEN | ||
| 65 | 0x0000A448 TD_HS_BORDER_COLOR_BLUE | ||
| 66 | 0x0000A44C TD_HS_BORDER_COLOR_ALPHA | ||
| 67 | 0x0000A450 TD_LS_BORDER_COLOR_INDEX | ||
| 68 | 0x0000A454 TD_LS_BORDER_COLOR_RED | ||
| 69 | 0x0000A458 TD_LS_BORDER_COLOR_GREEN | ||
| 70 | 0x0000A45C TD_LS_BORDER_COLOR_BLUE | ||
| 71 | 0x0000A460 TD_LS_BORDER_COLOR_ALPHA | ||
| 72 | 0x0000A464 TD_CS_BORDER_COLOR_INDEX | ||
| 73 | 0x0000A468 TD_CS_BORDER_COLOR_RED | ||
| 74 | 0x0000A46C TD_CS_BORDER_COLOR_GREEN | ||
| 75 | 0x0000A470 TD_CS_BORDER_COLOR_BLUE | ||
| 76 | 0x0000A474 TD_CS_BORDER_COLOR_ALPHA | ||
| 77 | 0x00028000 DB_RENDER_CONTROL | ||
| 78 | 0x00028004 DB_COUNT_CONTROL | ||
| 79 | 0x0002800C DB_RENDER_OVERRIDE | ||
| 80 | 0x00028010 DB_RENDER_OVERRIDE2 | ||
| 81 | 0x00028028 DB_STENCIL_CLEAR | ||
| 82 | 0x0002802C DB_DEPTH_CLEAR | ||
| 83 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL | ||
| 84 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | ||
| 85 | 0x0002805C DB_DEPTH_SLICE | ||
| 86 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 | ||
| 87 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 | ||
| 88 | 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 | ||
| 89 | 0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 | ||
| 90 | 0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 | ||
| 91 | 0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 | ||
| 92 | 0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 | ||
| 93 | 0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 | ||
| 94 | 0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 | ||
| 95 | 0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 | ||
| 96 | 0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 | ||
| 97 | 0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 | ||
| 98 | 0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 | ||
| 99 | 0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 | ||
| 100 | 0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 | ||
| 101 | 0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 | ||
| 102 | 0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 | ||
| 103 | 0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 | ||
| 104 | 0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 | ||
| 105 | 0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 | ||
| 106 | 0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 | ||
| 107 | 0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 | ||
| 108 | 0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 | ||
| 109 | 0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 | ||
| 110 | 0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 | ||
| 111 | 0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 | ||
| 112 | 0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 | ||
| 113 | 0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 | ||
| 114 | 0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 | ||
| 115 | 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 | ||
| 116 | 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 | ||
| 117 | 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 | ||
| 118 | 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 | ||
| 119 | 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 | ||
| 120 | 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 | ||
| 121 | 0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 | ||
| 122 | 0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 | ||
| 123 | 0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 | ||
| 124 | 0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 | ||
| 125 | 0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 | ||
| 126 | 0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 | ||
| 127 | 0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 | ||
| 128 | 0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 | ||
| 129 | 0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 | ||
| 130 | 0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 | ||
| 131 | 0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 | ||
| 132 | 0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 | ||
| 133 | 0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 | ||
| 134 | 0x00028200 PA_SC_WINDOW_OFFSET | ||
| 135 | 0x00028204 PA_SC_WINDOW_SCISSOR_TL | ||
| 136 | 0x00028208 PA_SC_WINDOW_SCISSOR_BR | ||
| 137 | 0x0002820C PA_SC_CLIPRECT_RULE | ||
| 138 | 0x00028210 PA_SC_CLIPRECT_0_TL | ||
| 139 | 0x00028214 PA_SC_CLIPRECT_0_BR | ||
| 140 | 0x00028218 PA_SC_CLIPRECT_1_TL | ||
| 141 | 0x0002821C PA_SC_CLIPRECT_1_BR | ||
| 142 | 0x00028220 PA_SC_CLIPRECT_2_TL | ||
| 143 | 0x00028224 PA_SC_CLIPRECT_2_BR | ||
| 144 | 0x00028228 PA_SC_CLIPRECT_3_TL | ||
| 145 | 0x0002822C PA_SC_CLIPRECT_3_BR | ||
| 146 | 0x00028230 PA_SC_EDGERULE | ||
| 147 | 0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET | ||
| 148 | 0x00028240 PA_SC_GENERIC_SCISSOR_TL | ||
| 149 | 0x00028244 PA_SC_GENERIC_SCISSOR_BR | ||
| 150 | 0x00028250 PA_SC_VPORT_SCISSOR_0_TL | ||
| 151 | 0x00028254 PA_SC_VPORT_SCISSOR_0_BR | ||
| 152 | 0x00028258 PA_SC_VPORT_SCISSOR_1_TL | ||
| 153 | 0x0002825C PA_SC_VPORT_SCISSOR_1_BR | ||
| 154 | 0x00028260 PA_SC_VPORT_SCISSOR_2_TL | ||
| 155 | 0x00028264 PA_SC_VPORT_SCISSOR_2_BR | ||
| 156 | 0x00028268 PA_SC_VPORT_SCISSOR_3_TL | ||
| 157 | 0x0002826C PA_SC_VPORT_SCISSOR_3_BR | ||
| 158 | 0x00028270 PA_SC_VPORT_SCISSOR_4_TL | ||
| 159 | 0x00028274 PA_SC_VPORT_SCISSOR_4_BR | ||
| 160 | 0x00028278 PA_SC_VPORT_SCISSOR_5_TL | ||
| 161 | 0x0002827C PA_SC_VPORT_SCISSOR_5_BR | ||
| 162 | 0x00028280 PA_SC_VPORT_SCISSOR_6_TL | ||
| 163 | 0x00028284 PA_SC_VPORT_SCISSOR_6_BR | ||
| 164 | 0x00028288 PA_SC_VPORT_SCISSOR_7_TL | ||
| 165 | 0x0002828C PA_SC_VPORT_SCISSOR_7_BR | ||
| 166 | 0x00028290 PA_SC_VPORT_SCISSOR_8_TL | ||
| 167 | 0x00028294 PA_SC_VPORT_SCISSOR_8_BR | ||
| 168 | 0x00028298 PA_SC_VPORT_SCISSOR_9_TL | ||
| 169 | 0x0002829C PA_SC_VPORT_SCISSOR_9_BR | ||
| 170 | 0x000282A0 PA_SC_VPORT_SCISSOR_10_TL | ||
| 171 | 0x000282A4 PA_SC_VPORT_SCISSOR_10_BR | ||
| 172 | 0x000282A8 PA_SC_VPORT_SCISSOR_11_TL | ||
| 173 | 0x000282AC PA_SC_VPORT_SCISSOR_11_BR | ||
| 174 | 0x000282B0 PA_SC_VPORT_SCISSOR_12_TL | ||
| 175 | 0x000282B4 PA_SC_VPORT_SCISSOR_12_BR | ||
| 176 | 0x000282B8 PA_SC_VPORT_SCISSOR_13_TL | ||
| 177 | 0x000282BC PA_SC_VPORT_SCISSOR_13_BR | ||
| 178 | 0x000282C0 PA_SC_VPORT_SCISSOR_14_TL | ||
| 179 | 0x000282C4 PA_SC_VPORT_SCISSOR_14_BR | ||
| 180 | 0x000282C8 PA_SC_VPORT_SCISSOR_15_TL | ||
| 181 | 0x000282CC PA_SC_VPORT_SCISSOR_15_BR | ||
| 182 | 0x000282D0 PA_SC_VPORT_ZMIN_0 | ||
| 183 | 0x000282D4 PA_SC_VPORT_ZMAX_0 | ||
| 184 | 0x000282D8 PA_SC_VPORT_ZMIN_1 | ||
| 185 | 0x000282DC PA_SC_VPORT_ZMAX_1 | ||
| 186 | 0x000282E0 PA_SC_VPORT_ZMIN_2 | ||
| 187 | 0x000282E4 PA_SC_VPORT_ZMAX_2 | ||
| 188 | 0x000282E8 PA_SC_VPORT_ZMIN_3 | ||
| 189 | 0x000282EC PA_SC_VPORT_ZMAX_3 | ||
| 190 | 0x000282F0 PA_SC_VPORT_ZMIN_4 | ||
| 191 | 0x000282F4 PA_SC_VPORT_ZMAX_4 | ||
| 192 | 0x000282F8 PA_SC_VPORT_ZMIN_5 | ||
| 193 | 0x000282FC PA_SC_VPORT_ZMAX_5 | ||
| 194 | 0x00028300 PA_SC_VPORT_ZMIN_6 | ||
| 195 | 0x00028304 PA_SC_VPORT_ZMAX_6 | ||
| 196 | 0x00028308 PA_SC_VPORT_ZMIN_7 | ||
| 197 | 0x0002830C PA_SC_VPORT_ZMAX_7 | ||
| 198 | 0x00028310 PA_SC_VPORT_ZMIN_8 | ||
| 199 | 0x00028314 PA_SC_VPORT_ZMAX_8 | ||
| 200 | 0x00028318 PA_SC_VPORT_ZMIN_9 | ||
| 201 | 0x0002831C PA_SC_VPORT_ZMAX_9 | ||
| 202 | 0x00028320 PA_SC_VPORT_ZMIN_10 | ||
| 203 | 0x00028324 PA_SC_VPORT_ZMAX_10 | ||
| 204 | 0x00028328 PA_SC_VPORT_ZMIN_11 | ||
| 205 | 0x0002832C PA_SC_VPORT_ZMAX_11 | ||
| 206 | 0x00028330 PA_SC_VPORT_ZMIN_12 | ||
| 207 | 0x00028334 PA_SC_VPORT_ZMAX_12 | ||
| 208 | 0x00028338 PA_SC_VPORT_ZMIN_13 | ||
| 209 | 0x0002833C PA_SC_VPORT_ZMAX_13 | ||
| 210 | 0x00028340 PA_SC_VPORT_ZMIN_14 | ||
| 211 | 0x00028344 PA_SC_VPORT_ZMAX_14 | ||
| 212 | 0x00028348 PA_SC_VPORT_ZMIN_15 | ||
| 213 | 0x0002834C PA_SC_VPORT_ZMAX_15 | ||
| 214 | 0x00028350 SX_MISC | ||
| 215 | 0x00028380 SQ_VTX_SEMANTIC_0 | ||
| 216 | 0x00028384 SQ_VTX_SEMANTIC_1 | ||
| 217 | 0x00028388 SQ_VTX_SEMANTIC_2 | ||
| 218 | 0x0002838C SQ_VTX_SEMANTIC_3 | ||
| 219 | 0x00028390 SQ_VTX_SEMANTIC_4 | ||
| 220 | 0x00028394 SQ_VTX_SEMANTIC_5 | ||
| 221 | 0x00028398 SQ_VTX_SEMANTIC_6 | ||
| 222 | 0x0002839C SQ_VTX_SEMANTIC_7 | ||
| 223 | 0x000283A0 SQ_VTX_SEMANTIC_8 | ||
| 224 | 0x000283A4 SQ_VTX_SEMANTIC_9 | ||
| 225 | 0x000283A8 SQ_VTX_SEMANTIC_10 | ||
| 226 | 0x000283AC SQ_VTX_SEMANTIC_11 | ||
| 227 | 0x000283B0 SQ_VTX_SEMANTIC_12 | ||
| 228 | 0x000283B4 SQ_VTX_SEMANTIC_13 | ||
| 229 | 0x000283B8 SQ_VTX_SEMANTIC_14 | ||
| 230 | 0x000283BC SQ_VTX_SEMANTIC_15 | ||
| 231 | 0x000283C0 SQ_VTX_SEMANTIC_16 | ||
| 232 | 0x000283C4 SQ_VTX_SEMANTIC_17 | ||
| 233 | 0x000283C8 SQ_VTX_SEMANTIC_18 | ||
| 234 | 0x000283CC SQ_VTX_SEMANTIC_19 | ||
| 235 | 0x000283D0 SQ_VTX_SEMANTIC_20 | ||
| 236 | 0x000283D4 SQ_VTX_SEMANTIC_21 | ||
| 237 | 0x000283D8 SQ_VTX_SEMANTIC_22 | ||
| 238 | 0x000283DC SQ_VTX_SEMANTIC_23 | ||
| 239 | 0x000283E0 SQ_VTX_SEMANTIC_24 | ||
| 240 | 0x000283E4 SQ_VTX_SEMANTIC_25 | ||
| 241 | 0x000283E8 SQ_VTX_SEMANTIC_26 | ||
| 242 | 0x000283EC SQ_VTX_SEMANTIC_27 | ||
| 243 | 0x000283F0 SQ_VTX_SEMANTIC_28 | ||
| 244 | 0x000283F4 SQ_VTX_SEMANTIC_29 | ||
| 245 | 0x000283F8 SQ_VTX_SEMANTIC_30 | ||
| 246 | 0x000283FC SQ_VTX_SEMANTIC_31 | ||
| 247 | 0x00028400 VGT_MAX_VTX_INDX | ||
| 248 | 0x00028404 VGT_MIN_VTX_INDX | ||
| 249 | 0x00028408 VGT_INDX_OFFSET | ||
| 250 | 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX | ||
| 251 | 0x00028410 SX_ALPHA_TEST_CONTROL | ||
| 252 | 0x00028414 CB_BLEND_RED | ||
| 253 | 0x00028418 CB_BLEND_GREEN | ||
| 254 | 0x0002841C CB_BLEND_BLUE | ||
| 255 | 0x00028420 CB_BLEND_ALPHA | ||
| 256 | 0x00028430 DB_STENCILREFMASK | ||
| 257 | 0x00028434 DB_STENCILREFMASK_BF | ||
| 258 | 0x00028438 SX_ALPHA_REF | ||
| 259 | 0x0002843C PA_CL_VPORT_XSCALE_0 | ||
| 260 | 0x00028440 PA_CL_VPORT_XOFFSET_0 | ||
| 261 | 0x00028444 PA_CL_VPORT_YSCALE_0 | ||
| 262 | 0x00028448 PA_CL_VPORT_YOFFSET_0 | ||
| 263 | 0x0002844C PA_CL_VPORT_ZSCALE_0 | ||
| 264 | 0x00028450 PA_CL_VPORT_ZOFFSET_0 | ||
| 265 | 0x00028454 PA_CL_VPORT_XSCALE_1 | ||
| 266 | 0x00028458 PA_CL_VPORT_XOFFSET_1 | ||
| 267 | 0x0002845C PA_CL_VPORT_YSCALE_1 | ||
| 268 | 0x00028460 PA_CL_VPORT_YOFFSET_1 | ||
| 269 | 0x00028464 PA_CL_VPORT_ZSCALE_1 | ||
| 270 | 0x00028468 PA_CL_VPORT_ZOFFSET_1 | ||
| 271 | 0x0002846C PA_CL_VPORT_XSCALE_2 | ||
| 272 | 0x00028470 PA_CL_VPORT_XOFFSET_2 | ||
| 273 | 0x00028474 PA_CL_VPORT_YSCALE_2 | ||
| 274 | 0x00028478 PA_CL_VPORT_YOFFSET_2 | ||
| 275 | 0x0002847C PA_CL_VPORT_ZSCALE_2 | ||
| 276 | 0x00028480 PA_CL_VPORT_ZOFFSET_2 | ||
| 277 | 0x00028484 PA_CL_VPORT_XSCALE_3 | ||
| 278 | 0x00028488 PA_CL_VPORT_XOFFSET_3 | ||
| 279 | 0x0002848C PA_CL_VPORT_YSCALE_3 | ||
| 280 | 0x00028490 PA_CL_VPORT_YOFFSET_3 | ||
| 281 | 0x00028494 PA_CL_VPORT_ZSCALE_3 | ||
| 282 | 0x00028498 PA_CL_VPORT_ZOFFSET_3 | ||
| 283 | 0x0002849C PA_CL_VPORT_XSCALE_4 | ||
| 284 | 0x000284A0 PA_CL_VPORT_XOFFSET_4 | ||
| 285 | 0x000284A4 PA_CL_VPORT_YSCALE_4 | ||
| 286 | 0x000284A8 PA_CL_VPORT_YOFFSET_4 | ||
| 287 | 0x000284AC PA_CL_VPORT_ZSCALE_4 | ||
| 288 | 0x000284B0 PA_CL_VPORT_ZOFFSET_4 | ||
| 289 | 0x000284B4 PA_CL_VPORT_XSCALE_5 | ||
| 290 | 0x000284B8 PA_CL_VPORT_XOFFSET_5 | ||
| 291 | 0x000284BC PA_CL_VPORT_YSCALE_5 | ||
| 292 | 0x000284C0 PA_CL_VPORT_YOFFSET_5 | ||
| 293 | 0x000284C4 PA_CL_VPORT_ZSCALE_5 | ||
| 294 | 0x000284C8 PA_CL_VPORT_ZOFFSET_5 | ||
| 295 | 0x000284CC PA_CL_VPORT_XSCALE_6 | ||
| 296 | 0x000284D0 PA_CL_VPORT_XOFFSET_6 | ||
| 297 | 0x000284D4 PA_CL_VPORT_YSCALE_6 | ||
| 298 | 0x000284D8 PA_CL_VPORT_YOFFSET_6 | ||
| 299 | 0x000284DC PA_CL_VPORT_ZSCALE_6 | ||
| 300 | 0x000284E0 PA_CL_VPORT_ZOFFSET_6 | ||
| 301 | 0x000284E4 PA_CL_VPORT_XSCALE_7 | ||
| 302 | 0x000284E8 PA_CL_VPORT_XOFFSET_7 | ||
| 303 | 0x000284EC PA_CL_VPORT_YSCALE_7 | ||
| 304 | 0x000284F0 PA_CL_VPORT_YOFFSET_7 | ||
| 305 | 0x000284F4 PA_CL_VPORT_ZSCALE_7 | ||
| 306 | 0x000284F8 PA_CL_VPORT_ZOFFSET_7 | ||
| 307 | 0x000284FC PA_CL_VPORT_XSCALE_8 | ||
| 308 | 0x00028500 PA_CL_VPORT_XOFFSET_8 | ||
| 309 | 0x00028504 PA_CL_VPORT_YSCALE_8 | ||
| 310 | 0x00028508 PA_CL_VPORT_YOFFSET_8 | ||
| 311 | 0x0002850C PA_CL_VPORT_ZSCALE_8 | ||
| 312 | 0x00028510 PA_CL_VPORT_ZOFFSET_8 | ||
| 313 | 0x00028514 PA_CL_VPORT_XSCALE_9 | ||
| 314 | 0x00028518 PA_CL_VPORT_XOFFSET_9 | ||
| 315 | 0x0002851C PA_CL_VPORT_YSCALE_9 | ||
| 316 | 0x00028520 PA_CL_VPORT_YOFFSET_9 | ||
| 317 | 0x00028524 PA_CL_VPORT_ZSCALE_9 | ||
| 318 | 0x00028528 PA_CL_VPORT_ZOFFSET_9 | ||
| 319 | 0x0002852C PA_CL_VPORT_XSCALE_10 | ||
| 320 | 0x00028530 PA_CL_VPORT_XOFFSET_10 | ||
| 321 | 0x00028534 PA_CL_VPORT_YSCALE_10 | ||
| 322 | 0x00028538 PA_CL_VPORT_YOFFSET_10 | ||
| 323 | 0x0002853C PA_CL_VPORT_ZSCALE_10 | ||
| 324 | 0x00028540 PA_CL_VPORT_ZOFFSET_10 | ||
| 325 | 0x00028544 PA_CL_VPORT_XSCALE_11 | ||
| 326 | 0x00028548 PA_CL_VPORT_XOFFSET_11 | ||
| 327 | 0x0002854C PA_CL_VPORT_YSCALE_11 | ||
| 328 | 0x00028550 PA_CL_VPORT_YOFFSET_11 | ||
| 329 | 0x00028554 PA_CL_VPORT_ZSCALE_11 | ||
| 330 | 0x00028558 PA_CL_VPORT_ZOFFSET_11 | ||
| 331 | 0x0002855C PA_CL_VPORT_XSCALE_12 | ||
| 332 | 0x00028560 PA_CL_VPORT_XOFFSET_12 | ||
| 333 | 0x00028564 PA_CL_VPORT_YSCALE_12 | ||
| 334 | 0x00028568 PA_CL_VPORT_YOFFSET_12 | ||
| 335 | 0x0002856C PA_CL_VPORT_ZSCALE_12 | ||
| 336 | 0x00028570 PA_CL_VPORT_ZOFFSET_12 | ||
| 337 | 0x00028574 PA_CL_VPORT_XSCALE_13 | ||
| 338 | 0x00028578 PA_CL_VPORT_XOFFSET_13 | ||
| 339 | 0x0002857C PA_CL_VPORT_YSCALE_13 | ||
| 340 | 0x00028580 PA_CL_VPORT_YOFFSET_13 | ||
| 341 | 0x00028584 PA_CL_VPORT_ZSCALE_13 | ||
| 342 | 0x00028588 PA_CL_VPORT_ZOFFSET_13 | ||
| 343 | 0x0002858C PA_CL_VPORT_XSCALE_14 | ||
| 344 | 0x00028590 PA_CL_VPORT_XOFFSET_14 | ||
| 345 | 0x00028594 PA_CL_VPORT_YSCALE_14 | ||
| 346 | 0x00028598 PA_CL_VPORT_YOFFSET_14 | ||
| 347 | 0x0002859C PA_CL_VPORT_ZSCALE_14 | ||
| 348 | 0x000285A0 PA_CL_VPORT_ZOFFSET_14 | ||
| 349 | 0x000285A4 PA_CL_VPORT_XSCALE_15 | ||
| 350 | 0x000285A8 PA_CL_VPORT_XOFFSET_15 | ||
| 351 | 0x000285AC PA_CL_VPORT_YSCALE_15 | ||
| 352 | 0x000285B0 PA_CL_VPORT_YOFFSET_15 | ||
| 353 | 0x000285B4 PA_CL_VPORT_ZSCALE_15 | ||
| 354 | 0x000285B8 PA_CL_VPORT_ZOFFSET_15 | ||
| 355 | 0x000285BC PA_CL_UCP_0_X | ||
| 356 | 0x000285C0 PA_CL_UCP_0_Y | ||
| 357 | 0x000285C4 PA_CL_UCP_0_Z | ||
| 358 | 0x000285C8 PA_CL_UCP_0_W | ||
| 359 | 0x000285CC PA_CL_UCP_1_X | ||
| 360 | 0x000285D0 PA_CL_UCP_1_Y | ||
| 361 | 0x000285D4 PA_CL_UCP_1_Z | ||
| 362 | 0x000285D8 PA_CL_UCP_1_W | ||
| 363 | 0x000285DC PA_CL_UCP_2_X | ||
| 364 | 0x000285E0 PA_CL_UCP_2_Y | ||
| 365 | 0x000285E4 PA_CL_UCP_2_Z | ||
| 366 | 0x000285E8 PA_CL_UCP_2_W | ||
| 367 | 0x000285EC PA_CL_UCP_3_X | ||
| 368 | 0x000285F0 PA_CL_UCP_3_Y | ||
| 369 | 0x000285F4 PA_CL_UCP_3_Z | ||
| 370 | 0x000285F8 PA_CL_UCP_3_W | ||
| 371 | 0x000285FC PA_CL_UCP_4_X | ||
| 372 | 0x00028600 PA_CL_UCP_4_Y | ||
| 373 | 0x00028604 PA_CL_UCP_4_Z | ||
| 374 | 0x00028608 PA_CL_UCP_4_W | ||
| 375 | 0x0002860C PA_CL_UCP_5_X | ||
| 376 | 0x00028610 PA_CL_UCP_5_Y | ||
| 377 | 0x00028614 PA_CL_UCP_5_Z | ||
| 378 | 0x00028618 PA_CL_UCP_5_W | ||
| 379 | 0x0002861C SPI_VS_OUT_ID_0 | ||
| 380 | 0x00028620 SPI_VS_OUT_ID_1 | ||
| 381 | 0x00028624 SPI_VS_OUT_ID_2 | ||
| 382 | 0x00028628 SPI_VS_OUT_ID_3 | ||
| 383 | 0x0002862C SPI_VS_OUT_ID_4 | ||
| 384 | 0x00028630 SPI_VS_OUT_ID_5 | ||
| 385 | 0x00028634 SPI_VS_OUT_ID_6 | ||
| 386 | 0x00028638 SPI_VS_OUT_ID_7 | ||
| 387 | 0x0002863C SPI_VS_OUT_ID_8 | ||
| 388 | 0x00028640 SPI_VS_OUT_ID_9 | ||
| 389 | 0x00028644 SPI_PS_INPUT_CNTL_0 | ||
| 390 | 0x00028648 SPI_PS_INPUT_CNTL_1 | ||
| 391 | 0x0002864C SPI_PS_INPUT_CNTL_2 | ||
| 392 | 0x00028650 SPI_PS_INPUT_CNTL_3 | ||
| 393 | 0x00028654 SPI_PS_INPUT_CNTL_4 | ||
| 394 | 0x00028658 SPI_PS_INPUT_CNTL_5 | ||
| 395 | 0x0002865C SPI_PS_INPUT_CNTL_6 | ||
| 396 | 0x00028660 SPI_PS_INPUT_CNTL_7 | ||
| 397 | 0x00028664 SPI_PS_INPUT_CNTL_8 | ||
| 398 | 0x00028668 SPI_PS_INPUT_CNTL_9 | ||
| 399 | 0x0002866C SPI_PS_INPUT_CNTL_10 | ||
| 400 | 0x00028670 SPI_PS_INPUT_CNTL_11 | ||
| 401 | 0x00028674 SPI_PS_INPUT_CNTL_12 | ||
| 402 | 0x00028678 SPI_PS_INPUT_CNTL_13 | ||
| 403 | 0x0002867C SPI_PS_INPUT_CNTL_14 | ||
| 404 | 0x00028680 SPI_PS_INPUT_CNTL_15 | ||
| 405 | 0x00028684 SPI_PS_INPUT_CNTL_16 | ||
| 406 | 0x00028688 SPI_PS_INPUT_CNTL_17 | ||
| 407 | 0x0002868C SPI_PS_INPUT_CNTL_18 | ||
| 408 | 0x00028690 SPI_PS_INPUT_CNTL_19 | ||
| 409 | 0x00028694 SPI_PS_INPUT_CNTL_20 | ||
| 410 | 0x00028698 SPI_PS_INPUT_CNTL_21 | ||
| 411 | 0x0002869C SPI_PS_INPUT_CNTL_22 | ||
| 412 | 0x000286A0 SPI_PS_INPUT_CNTL_23 | ||
| 413 | 0x000286A4 SPI_PS_INPUT_CNTL_24 | ||
| 414 | 0x000286A8 SPI_PS_INPUT_CNTL_25 | ||
| 415 | 0x000286AC SPI_PS_INPUT_CNTL_26 | ||
| 416 | 0x000286B0 SPI_PS_INPUT_CNTL_27 | ||
| 417 | 0x000286B4 SPI_PS_INPUT_CNTL_28 | ||
| 418 | 0x000286B8 SPI_PS_INPUT_CNTL_29 | ||
| 419 | 0x000286BC SPI_PS_INPUT_CNTL_30 | ||
| 420 | 0x000286C0 SPI_PS_INPUT_CNTL_31 | ||
| 421 | 0x000286C4 SPI_VS_OUT_CONFIG | ||
| 422 | 0x000286C8 SPI_THREAD_GROUPING | ||
| 423 | 0x000286CC SPI_PS_IN_CONTROL_0 | ||
| 424 | 0x000286D0 SPI_PS_IN_CONTROL_1 | ||
| 425 | 0x000286D4 SPI_INTERP_CONTROL_0 | ||
| 426 | 0x000286D8 SPI_INPUT_Z | ||
| 427 | 0x000286DC SPI_FOG_CNTL | ||
| 428 | 0x000286E0 SPI_BARYC_CNTL | ||
| 429 | 0x000286E4 SPI_PS_IN_CONTROL_2 | ||
| 430 | 0x000286E8 SPI_COMPUTE_INPUT_CNTL | ||
| 431 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X | ||
| 432 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y | ||
| 433 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z | ||
| 434 | 0x000286F8 GDS_ADDR_SIZE | ||
| 435 | 0x00028780 CB_BLEND0_CONTROL | ||
| 436 | 0x00028784 CB_BLEND1_CONTROL | ||
| 437 | 0x00028788 CB_BLEND2_CONTROL | ||
| 438 | 0x0002878C CB_BLEND3_CONTROL | ||
| 439 | 0x00028790 CB_BLEND4_CONTROL | ||
| 440 | 0x00028794 CB_BLEND5_CONTROL | ||
| 441 | 0x00028798 CB_BLEND6_CONTROL | ||
| 442 | 0x0002879C CB_BLEND7_CONTROL | ||
| 443 | 0x000287CC CS_COPY_STATE | ||
| 444 | 0x000287D0 GFX_COPY_STATE | ||
| 445 | 0x000287D4 PA_CL_POINT_X_RAD | ||
| 446 | 0x000287D8 PA_CL_POINT_Y_RAD | ||
| 447 | 0x000287DC PA_CL_POINT_SIZE | ||
| 448 | 0x000287E0 PA_CL_POINT_CULL_RAD | ||
| 449 | 0x00028808 CB_COLOR_CONTROL | ||
| 450 | 0x0002880C DB_SHADER_CONTROL | ||
| 451 | 0x00028810 PA_CL_CLIP_CNTL | ||
| 452 | 0x00028814 PA_SU_SC_MODE_CNTL | ||
| 453 | 0x00028818 PA_CL_VTE_CNTL | ||
| 454 | 0x0002881C PA_CL_VS_OUT_CNTL | ||
| 455 | 0x00028820 PA_CL_NANINF_CNTL | ||
| 456 | 0x00028824 PA_SU_LINE_STIPPLE_CNTL | ||
| 457 | 0x00028828 PA_SU_LINE_STIPPLE_SCALE | ||
| 458 | 0x0002882C PA_SU_PRIM_FILTER_CNTL | ||
| 459 | 0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1 | ||
| 460 | 0x00028844 SQ_PGM_RESOURCES_PS | ||
| 461 | 0x00028848 SQ_PGM_RESOURCES_2_PS | ||
| 462 | 0x0002884C SQ_PGM_EXPORTS_PS | ||
| 463 | 0x00028860 SQ_PGM_RESOURCES_VS | ||
| 464 | 0x00028864 SQ_PGM_RESOURCES_2_VS | ||
| 465 | 0x00028878 SQ_PGM_RESOURCES_GS | ||
| 466 | 0x0002887C SQ_PGM_RESOURCES_2_GS | ||
| 467 | 0x00028890 SQ_PGM_RESOURCES_ES | ||
| 468 | 0x00028894 SQ_PGM_RESOURCES_2_ES | ||
| 469 | 0x000288A8 SQ_PGM_RESOURCES_FS | ||
| 470 | 0x000288BC SQ_PGM_RESOURCES_HS | ||
| 471 | 0x000288C0 SQ_PGM_RESOURCES_2_HS | ||
| 472 | 0x000288D4 SQ_PGM_RESOURCES_LS | ||
| 473 | 0x000288D8 SQ_PGM_RESOURCES_2_LS | ||
| 474 | 0x000288E8 SQ_LDS_ALLOC | ||
| 475 | 0x000288EC SQ_LDS_ALLOC_PS | ||
| 476 | 0x000288F0 SQ_VTX_SEMANTIC_CLEAR | ||
| 477 | 0x00028A00 PA_SU_POINT_SIZE | ||
| 478 | 0x00028A04 PA_SU_POINT_MINMAX | ||
| 479 | 0x00028A08 PA_SU_LINE_CNTL | ||
| 480 | 0x00028A0C PA_SC_LINE_STIPPLE | ||
| 481 | 0x00028A10 VGT_OUTPUT_PATH_CNTL | ||
| 482 | 0x00028A14 VGT_HOS_CNTL | ||
| 483 | 0x00028A18 VGT_HOS_MAX_TESS_LEVEL | ||
| 484 | 0x00028A1C VGT_HOS_MIN_TESS_LEVEL | ||
| 485 | 0x00028A20 VGT_HOS_REUSE_DEPTH | ||
| 486 | 0x00028A24 VGT_GROUP_PRIM_TYPE | ||
| 487 | 0x00028A28 VGT_GROUP_FIRST_DECR | ||
| 488 | 0x00028A2C VGT_GROUP_DECR | ||
| 489 | 0x00028A30 VGT_GROUP_VECT_0_CNTL | ||
| 490 | 0x00028A34 VGT_GROUP_VECT_1_CNTL | ||
| 491 | 0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL | ||
| 492 | 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL | ||
| 493 | 0x00028A40 VGT_GS_MODE | ||
| 494 | 0x00028A48 PA_SC_MODE_CNTL_0 | ||
| 495 | 0x00028A4C PA_SC_MODE_CNTL_1 | ||
| 496 | 0x00028A50 VGT_ENHANCE | ||
| 497 | 0x00028A54 VGT_GS_PER_ES | ||
| 498 | 0x00028A58 VGT_ES_PER_GS | ||
| 499 | 0x00028A5C VGT_GS_PER_VS | ||
| 500 | 0x00028A6C VGT_GS_OUT_PRIM_TYPE | ||
| 501 | 0x00028A84 VGT_PRIMITIVEID_EN | ||
| 502 | 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN | ||
| 503 | 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 | ||
| 504 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 | ||
| 505 | 0x00028AB4 VGT_REUSE_OFF | ||
| 506 | 0x00028AB8 VGT_VTX_CNT_EN | ||
| 507 | 0x00028ABC DB_HTILE_SURFACE | ||
| 508 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 | ||
| 509 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 | ||
| 510 | 0x00028AC8 DB_PRELOAD_CONTROL | ||
| 511 | 0x00028B38 VGT_GS_MAX_VERT_OUT | ||
| 512 | 0x00028B54 VGT_SHADER_STAGES_EN | ||
| 513 | 0x00028B58 VGT_LS_HS_CONFIG | ||
| 514 | 0x00028B5C VGT_LS_SIZE | ||
| 515 | 0x00028B60 VGT_HS_SIZE | ||
| 516 | 0x00028B64 VGT_LS_HS_ALLOC | ||
| 517 | 0x00028B68 VGT_HS_PATCH_CONST | ||
| 518 | 0x00028B6C VGT_TF_PARAM | ||
| 519 | 0x00028B70 DB_ALPHA_TO_MASK | ||
| 520 | 0x00028B74 VGT_DISPATCH_INITIATOR | ||
| 521 | 0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL | ||
| 522 | 0x00028B7C PA_SU_POLY_OFFSET_CLAMP | ||
| 523 | 0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE | ||
| 524 | 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET | ||
| 525 | 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE | ||
| 526 | 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET | ||
| 527 | 0x00028B74 VGT_GS_INSTANCE_CNT | ||
| 528 | 0x00028C00 PA_SC_LINE_CNTL | ||
| 529 | 0x00028C08 PA_SU_VTX_CNTL | ||
| 530 | 0x00028C0C PA_CL_GB_VERT_CLIP_ADJ | ||
| 531 | 0x00028C10 PA_CL_GB_VERT_DISC_ADJ | ||
| 532 | 0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ | ||
| 533 | 0x00028C18 PA_CL_GB_HORZ_DISC_ADJ | ||
| 534 | 0x00028C1C PA_SC_AA_SAMPLE_LOCS_0 | ||
| 535 | 0x00028C20 PA_SC_AA_SAMPLE_LOCS_1 | ||
| 536 | 0x00028C24 PA_SC_AA_SAMPLE_LOCS_2 | ||
| 537 | 0x00028C28 PA_SC_AA_SAMPLE_LOCS_3 | ||
| 538 | 0x00028C2C PA_SC_AA_SAMPLE_LOCS_4 | ||
| 539 | 0x00028C30 PA_SC_AA_SAMPLE_LOCS_5 | ||
| 540 | 0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 | ||
| 541 | 0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 | ||
| 542 | 0x00028C3C PA_SC_AA_MASK | ||
| 543 | 0x00028C8C CB_COLOR0_CLEAR_WORD0 | ||
| 544 | 0x00028C90 CB_COLOR0_CLEAR_WORD1 | ||
| 545 | 0x00028C94 CB_COLOR0_CLEAR_WORD2 | ||
| 546 | 0x00028C98 CB_COLOR0_CLEAR_WORD3 | ||
| 547 | 0x00028CC8 CB_COLOR1_CLEAR_WORD0 | ||
| 548 | 0x00028CCC CB_COLOR1_CLEAR_WORD1 | ||
| 549 | 0x00028CD0 CB_COLOR1_CLEAR_WORD2 | ||
| 550 | 0x00028CD4 CB_COLOR1_CLEAR_WORD3 | ||
| 551 | 0x00028D04 CB_COLOR2_CLEAR_WORD0 | ||
| 552 | 0x00028D08 CB_COLOR2_CLEAR_WORD1 | ||
| 553 | 0x00028D0C CB_COLOR2_CLEAR_WORD2 | ||
| 554 | 0x00028D10 CB_COLOR2_CLEAR_WORD3 | ||
| 555 | 0x00028D40 CB_COLOR3_CLEAR_WORD0 | ||
| 556 | 0x00028D44 CB_COLOR3_CLEAR_WORD1 | ||
| 557 | 0x00028D48 CB_COLOR3_CLEAR_WORD2 | ||
| 558 | 0x00028D4C CB_COLOR3_CLEAR_WORD3 | ||
| 559 | 0x00028D7C CB_COLOR4_CLEAR_WORD0 | ||
| 560 | 0x00028D80 CB_COLOR4_CLEAR_WORD1 | ||
| 561 | 0x00028D84 CB_COLOR4_CLEAR_WORD2 | ||
| 562 | 0x00028D88 CB_COLOR4_CLEAR_WORD3 | ||
| 563 | 0x00028DB8 CB_COLOR5_CLEAR_WORD0 | ||
| 564 | 0x00028DBC CB_COLOR5_CLEAR_WORD1 | ||
| 565 | 0x00028DC0 CB_COLOR5_CLEAR_WORD2 | ||
| 566 | 0x00028DC4 CB_COLOR5_CLEAR_WORD3 | ||
| 567 | 0x00028DF4 CB_COLOR6_CLEAR_WORD0 | ||
| 568 | 0x00028DF8 CB_COLOR6_CLEAR_WORD1 | ||
| 569 | 0x00028DFC CB_COLOR6_CLEAR_WORD2 | ||
| 570 | 0x00028E00 CB_COLOR6_CLEAR_WORD3 | ||
| 571 | 0x00028E30 CB_COLOR7_CLEAR_WORD0 | ||
| 572 | 0x00028E34 CB_COLOR7_CLEAR_WORD1 | ||
| 573 | 0x00028E38 CB_COLOR7_CLEAR_WORD2 | ||
| 574 | 0x00028E3C CB_COLOR7_CLEAR_WORD3 | ||
| 575 | 0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0 | ||
| 576 | 0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1 | ||
| 577 | 0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2 | ||
| 578 | 0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3 | ||
| 579 | 0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4 | ||
| 580 | 0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5 | ||
| 581 | 0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6 | ||
| 582 | 0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7 | ||
| 583 | 0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8 | ||
| 584 | 0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9 | ||
| 585 | 0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10 | ||
| 586 | 0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11 | ||
| 587 | 0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12 | ||
| 588 | 0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13 | ||
| 589 | 0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14 | ||
| 590 | 0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15 | ||
| 591 | 0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0 | ||
| 592 | 0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1 | ||
| 593 | 0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2 | ||
| 594 | 0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3 | ||
| 595 | 0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4 | ||
| 596 | 0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5 | ||
| 597 | 0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6 | ||
| 598 | 0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7 | ||
| 599 | 0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8 | ||
| 600 | 0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9 | ||
| 601 | 0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10 | ||
| 602 | 0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11 | ||
| 603 | 0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12 | ||
| 604 | 0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13 | ||
| 605 | 0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14 | ||
| 606 | 0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15 | ||
| 607 | 0x0003CFF0 SQ_VTX_BASE_VTX_LOC | ||
| 608 | 0x0003CFF4 SQ_VTX_START_INST_LOC | ||
| 609 | 0x0003FF00 SQ_TEX_SAMPLER_CLEAR | ||
| 610 | 0x0003FF04 SQ_TEX_RESOURCE_CLEAR | ||
| 611 | 0x0003FF08 SQ_LOOP_BOOL_CLEAR | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9e4240b3bf0b..f454c9a5e7f2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -57,7 +57,9 @@ void rs400_gart_adjust_size(struct radeon_device *rdev) | |||
| 57 | } | 57 | } |
| 58 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | 58 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { |
| 59 | /* FIXME: RS400 & RS480 seems to have issue with GART size | 59 | /* FIXME: RS400 & RS480 seems to have issue with GART size |
| 60 | * if 4G of system memory (needs more testing) */ | 60 | * if 4G of system memory (needs more testing) |
| 61 | */ | ||
| 62 | /* XXX is this still an issue with proper alignment? */ | ||
| 61 | rdev->mc.gtt_size = 32 * 1024 * 1024; | 63 | rdev->mc.gtt_size = 32 * 1024 * 1024; |
| 62 | DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); | 64 | DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); |
| 63 | } | 65 | } |
| @@ -263,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev) | |||
| 263 | r100_vram_init_sizes(rdev); | 265 | r100_vram_init_sizes(rdev); |
| 264 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | 266 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
| 265 | radeon_vram_location(rdev, &rdev->mc, base); | 267 | radeon_vram_location(rdev, &rdev->mc, base); |
| 268 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; | ||
| 266 | radeon_gtt_location(rdev, &rdev->mc); | 269 | radeon_gtt_location(rdev, &rdev->mc); |
| 267 | radeon_update_bandwidth_info(rdev); | 270 | radeon_update_bandwidth_info(rdev); |
| 268 | } | 271 | } |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 79887cac5b54..6dc15ea8ba33 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev) | |||
| 74 | if (voltage->delay) | 74 | if (voltage->delay) |
| 75 | udelay(voltage->delay); | 75 | udelay(voltage->delay); |
| 76 | } | 76 | } |
| 77 | } | 77 | } else if (voltage->type == VOLTAGE_VDDC) |
| 78 | radeon_atom_set_voltage(rdev, voltage->vddc_id); | ||
| 78 | 79 | ||
| 79 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | 80 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); |
| 80 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | 81 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); |
| @@ -697,6 +698,7 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
| 697 | base = G_000004_MC_FB_START(base) << 16; | 698 | base = G_000004_MC_FB_START(base) << 16; |
| 698 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 699 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 699 | radeon_vram_location(rdev, &rdev->mc, base); | 700 | radeon_vram_location(rdev, &rdev->mc, base); |
| 701 | rdev->mc.gtt_base_align = 0; | ||
| 700 | radeon_gtt_location(rdev, &rdev->mc); | 702 | radeon_gtt_location(rdev, &rdev->mc); |
| 701 | radeon_update_bandwidth_info(rdev); | 703 | radeon_update_bandwidth_info(rdev); |
| 702 | } | 704 | } |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index bcc33195ebc2..ce4ecbe10816 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -79,7 +79,13 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
| 79 | tmp.full = dfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
| 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); |
| 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
| 82 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 82 | if (info->info.usK8MemoryClock) |
| 83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | ||
| 84 | else if (rdev->clock.default_mclk) { | ||
| 85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | ||
| 86 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
| 87 | } else | ||
| 88 | rdev->pm.igp_system_mclk.full = dfixed_const(400); | ||
| 83 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); | 89 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
| 84 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); | 90 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
| 85 | break; | 91 | break; |
| @@ -87,34 +93,31 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
| 87 | tmp.full = dfixed_const(100); | 93 | tmp.full = dfixed_const(100); |
| 88 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); | 94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); |
| 89 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
| 90 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | 96 | if (info->info_v2.ulBootUpUMAClock) |
| 97 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | ||
| 98 | else if (rdev->clock.default_mclk) | ||
| 99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | ||
| 100 | else | ||
| 101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); | ||
| 91 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
| 92 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); | 103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); |
| 93 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
| 94 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
| 95 | break; | 106 | break; |
| 96 | default: | 107 | default: |
| 97 | tmp.full = dfixed_const(100); | ||
| 98 | /* We assume the slower possible clock ie worst case */ | 108 | /* We assume the slower possible clock ie worst case */ |
| 99 | /* DDR 333Mhz */ | 109 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
| 100 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); | 110 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
| 101 | /* FIXME: system clock ? */ | 111 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
| 102 | rdev->pm.igp_system_mclk.full = dfixed_const(100); | ||
| 103 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
| 104 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); | ||
| 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 112 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
| 106 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 113 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
| 107 | break; | 114 | break; |
| 108 | } | 115 | } |
| 109 | } else { | 116 | } else { |
| 110 | tmp.full = dfixed_const(100); | ||
| 111 | /* We assume the slower possible clock ie worst case */ | 117 | /* We assume the slower possible clock ie worst case */ |
| 112 | /* DDR 333Mhz */ | 118 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
| 113 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); | 119 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
| 114 | /* FIXME: system clock ? */ | 120 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
| 115 | rdev->pm.igp_system_mclk.full = dfixed_const(100); | ||
| 116 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
| 117 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); | ||
| 118 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 121 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
| 119 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 122 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
| 120 | } | 123 | } |
| @@ -159,6 +162,7 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
| 159 | rs690_pm_info(rdev); | 162 | rs690_pm_info(rdev); |
| 160 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
| 161 | radeon_vram_location(rdev, &rdev->mc, base); | 164 | radeon_vram_location(rdev, &rdev->mc, base); |
| 165 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; | ||
| 162 | radeon_gtt_location(rdev, &rdev->mc); | 166 | radeon_gtt_location(rdev, &rdev->mc); |
| 163 | radeon_update_bandwidth_info(rdev); | 167 | radeon_update_bandwidth_info(rdev); |
| 164 | } | 168 | } |
| @@ -228,10 +232,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 228 | fixed20_12 a, b, c; | 232 | fixed20_12 a, b, c; |
| 229 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | 233 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
| 230 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | 234 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
| 231 | /* FIXME: detect IGP with sideport memory, i don't think there is any | ||
| 232 | * such product available | ||
| 233 | */ | ||
| 234 | bool sideport = false; | ||
| 235 | 235 | ||
| 236 | if (!crtc->base.enabled) { | 236 | if (!crtc->base.enabled) { |
| 237 | /* FIXME: wouldn't it better to set priority mark to maximum */ | 237 | /* FIXME: wouldn't it better to set priority mark to maximum */ |
| @@ -300,7 +300,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
| 300 | 300 | ||
| 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ | 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
| 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; |
| 303 | if (sideport) { | 303 | if (rdev->mc.igp_sideport_enabled) { |
| 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
| 305 | rdev->pm.sideport_bandwidth.full) | 305 | rdev->pm.sideport_bandwidth.full) |
| 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 7d9a7b0a180a..0c9c169a6852 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -195,6 +195,7 @@ void rv515_mc_init(struct radeon_device *rdev) | |||
| 195 | rv515_vram_get_type(rdev); | 195 | rv515_vram_get_type(rdev); |
| 196 | r100_vram_init_sizes(rdev); | 196 | r100_vram_init_sizes(rdev); |
| 197 | radeon_vram_location(rdev, &rdev->mc, 0); | 197 | radeon_vram_location(rdev, &rdev->mc, 0); |
| 198 | rdev->mc.gtt_base_align = 0; | ||
| 198 | if (!(rdev->flags & RADEON_IS_AGP)) | 199 | if (!(rdev->flags & RADEON_IS_AGP)) |
| 199 | radeon_gtt_location(rdev, &rdev->mc); | 200 | radeon_gtt_location(rdev, &rdev->mc); |
| 200 | radeon_update_bandwidth_info(rdev); | 201 | radeon_update_bandwidth_info(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 253f24aec031..b7fd82064922 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -44,7 +44,18 @@ void rv770_fini(struct radeon_device *rdev); | |||
| 44 | 44 | ||
| 45 | void rv770_pm_misc(struct radeon_device *rdev) | 45 | void rv770_pm_misc(struct radeon_device *rdev) |
| 46 | { | 46 | { |
| 47 | 47 | int req_ps_idx = rdev->pm.requested_power_state_index; | |
| 48 | int req_cm_idx = rdev->pm.requested_clock_mode_index; | ||
| 49 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | ||
| 50 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | ||
| 51 | |||
| 52 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | ||
| 53 | if (voltage->voltage != rdev->pm.current_vddc) { | ||
| 54 | radeon_atom_set_voltage(rdev, voltage->voltage); | ||
| 55 | rdev->pm.current_vddc = voltage->voltage; | ||
| 56 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | ||
| 57 | } | ||
| 58 | } | ||
| 48 | } | 59 | } |
| 49 | 60 | ||
| 50 | /* | 61 | /* |
| @@ -213,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
| 213 | WREG32(MC_VM_FB_LOCATION, tmp); | 224 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 214 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 225 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 215 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 226 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 216 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 227 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
| 217 | if (rdev->flags & RADEON_IS_AGP) { | 228 | if (rdev->flags & RADEON_IS_AGP) { |
| 218 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 229 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
| 219 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 230 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 0d9a42c2394f..ca904799f018 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
| @@ -40,11 +40,13 @@ | |||
| 40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 41 | 41 | ||
| 42 | #include <asm/atomic.h> | 42 | #include <asm/atomic.h> |
| 43 | #include <asm/agp.h> | ||
| 44 | 43 | ||
| 45 | #include "ttm/ttm_bo_driver.h" | 44 | #include "ttm/ttm_bo_driver.h" |
| 46 | #include "ttm/ttm_page_alloc.h" | 45 | #include "ttm/ttm_page_alloc.h" |
| 47 | 46 | ||
| 47 | #ifdef TTM_HAS_AGP | ||
| 48 | #include <asm/agp.h> | ||
| 49 | #endif | ||
| 48 | 50 | ||
| 49 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | 51 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) |
| 50 | #define SMALL_ALLOCATION 16 | 52 | #define SMALL_ALLOCATION 16 |
| @@ -77,7 +79,7 @@ struct ttm_page_pool { | |||
| 77 | /** | 79 | /** |
| 78 | * Limits for the pool. They are handled without locks because only place where | 80 | * Limits for the pool. They are handled without locks because only place where |
| 79 | * they may change is in sysfs store. They won't have immediate effect anyway | 81 | * they may change is in sysfs store. They won't have immediate effect anyway |
| 80 | * so forcing serialiazation to access them is pointless. | 82 | * so forcing serialization to access them is pointless. |
| 81 | */ | 83 | */ |
| 82 | 84 | ||
| 83 | struct ttm_pool_opts { | 85 | struct ttm_pool_opts { |
| @@ -104,7 +106,6 @@ struct ttm_pool_opts { | |||
| 104 | struct ttm_pool_manager { | 106 | struct ttm_pool_manager { |
| 105 | struct kobject kobj; | 107 | struct kobject kobj; |
| 106 | struct shrinker mm_shrink; | 108 | struct shrinker mm_shrink; |
| 107 | atomic_t page_alloc_inited; | ||
| 108 | struct ttm_pool_opts options; | 109 | struct ttm_pool_opts options; |
| 109 | 110 | ||
| 110 | union { | 111 | union { |
| @@ -142,7 +143,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj) | |||
| 142 | { | 143 | { |
| 143 | struct ttm_pool_manager *m = | 144 | struct ttm_pool_manager *m = |
| 144 | container_of(kobj, struct ttm_pool_manager, kobj); | 145 | container_of(kobj, struct ttm_pool_manager, kobj); |
| 145 | (void)m; | 146 | kfree(m); |
| 146 | } | 147 | } |
| 147 | 148 | ||
| 148 | static ssize_t ttm_pool_store(struct kobject *kobj, | 149 | static ssize_t ttm_pool_store(struct kobject *kobj, |
| @@ -165,16 +166,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj, | |||
| 165 | m->options.small = val; | 166 | m->options.small = val; |
| 166 | else if (attr == &ttm_page_pool_alloc_size) { | 167 | else if (attr == &ttm_page_pool_alloc_size) { |
| 167 | if (val > NUM_PAGES_TO_ALLOC*8) { | 168 | if (val > NUM_PAGES_TO_ALLOC*8) { |
| 168 | printk(KERN_ERR "[ttm] Setting allocation size to %lu " | 169 | printk(KERN_ERR TTM_PFX |
| 169 | "is not allowed. Recomended size is " | 170 | "Setting allocation size to %lu " |
| 170 | "%lu\n", | 171 | "is not allowed. Recommended size is " |
| 171 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), | 172 | "%lu\n", |
| 172 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 173 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
| 174 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
| 173 | return size; | 175 | return size; |
| 174 | } else if (val > NUM_PAGES_TO_ALLOC) { | 176 | } else if (val > NUM_PAGES_TO_ALLOC) { |
| 175 | printk(KERN_WARNING "[ttm] Setting allocation size to " | 177 | printk(KERN_WARNING TTM_PFX |
| 176 | "larger than %lu is not recomended.\n", | 178 | "Setting allocation size to " |
| 177 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 179 | "larger than %lu is not recommended.\n", |
| 180 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
| 178 | } | 181 | } |
| 179 | m->options.alloc_size = val; | 182 | m->options.alloc_size = val; |
| 180 | } | 183 | } |
| @@ -212,9 +215,7 @@ static struct kobj_type ttm_pool_kobj_type = { | |||
| 212 | .default_attrs = ttm_pool_attrs, | 215 | .default_attrs = ttm_pool_attrs, |
| 213 | }; | 216 | }; |
| 214 | 217 | ||
| 215 | static struct ttm_pool_manager _manager = { | 218 | static struct ttm_pool_manager *_manager; |
| 216 | .page_alloc_inited = ATOMIC_INIT(0) | ||
| 217 | }; | ||
| 218 | 219 | ||
| 219 | #ifndef CONFIG_X86 | 220 | #ifndef CONFIG_X86 |
| 220 | static int set_pages_array_wb(struct page **pages, int addrinarray) | 221 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
| @@ -269,7 +270,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags, | |||
| 269 | if (flags & TTM_PAGE_FLAG_DMA32) | 270 | if (flags & TTM_PAGE_FLAG_DMA32) |
| 270 | pool_index |= 0x2; | 271 | pool_index |= 0x2; |
| 271 | 272 | ||
| 272 | return &_manager.pools[pool_index]; | 273 | return &_manager->pools[pool_index]; |
| 273 | } | 274 | } |
| 274 | 275 | ||
| 275 | /* set memory back to wb and free the pages. */ | 276 | /* set memory back to wb and free the pages. */ |
| @@ -277,7 +278,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) | |||
| 277 | { | 278 | { |
| 278 | unsigned i; | 279 | unsigned i; |
| 279 | if (set_pages_array_wb(pages, npages)) | 280 | if (set_pages_array_wb(pages, npages)) |
| 280 | printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", | 281 | printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", |
| 281 | npages); | 282 | npages); |
| 282 | for (i = 0; i < npages; ++i) | 283 | for (i = 0; i < npages; ++i) |
| 283 | __free_page(pages[i]); | 284 | __free_page(pages[i]); |
| @@ -313,7 +314,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) | |||
| 313 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 314 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
| 314 | GFP_KERNEL); | 315 | GFP_KERNEL); |
| 315 | if (!pages_to_free) { | 316 | if (!pages_to_free) { |
| 316 | printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); | 317 | printk(KERN_ERR TTM_PFX |
| 318 | "Failed to allocate memory for pool free operation.\n"); | ||
| 317 | return 0; | 319 | return 0; |
| 318 | } | 320 | } |
| 319 | 321 | ||
| @@ -384,15 +386,15 @@ static int ttm_pool_get_num_unused_pages(void) | |||
| 384 | unsigned i; | 386 | unsigned i; |
| 385 | int total = 0; | 387 | int total = 0; |
| 386 | for (i = 0; i < NUM_POOLS; ++i) | 388 | for (i = 0; i < NUM_POOLS; ++i) |
| 387 | total += _manager.pools[i].npages; | 389 | total += _manager->pools[i].npages; |
| 388 | 390 | ||
| 389 | return total; | 391 | return total; |
| 390 | } | 392 | } |
| 391 | 393 | ||
| 392 | /** | 394 | /** |
| 393 | * Calback for mm to request pool to reduce number of page held. | 395 | * Callback for mm to request pool to reduce number of page held. |
| 394 | */ | 396 | */ |
| 395 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | 397 | static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) |
| 396 | { | 398 | { |
| 397 | static atomic_t start_pool = ATOMIC_INIT(0); | 399 | static atomic_t start_pool = ATOMIC_INIT(0); |
| 398 | unsigned i; | 400 | unsigned i; |
| @@ -405,7 +407,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | |||
| 405 | unsigned nr_free = shrink_pages; | 407 | unsigned nr_free = shrink_pages; |
| 406 | if (shrink_pages == 0) | 408 | if (shrink_pages == 0) |
| 407 | break; | 409 | break; |
| 408 | pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; | 410 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
| 409 | shrink_pages = ttm_page_pool_free(pool, nr_free); | 411 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
| 410 | } | 412 | } |
| 411 | /* return estimated number of unused pages in pool */ | 413 | /* return estimated number of unused pages in pool */ |
| @@ -433,14 +435,16 @@ static int ttm_set_pages_caching(struct page **pages, | |||
| 433 | case tt_uncached: | 435 | case tt_uncached: |
| 434 | r = set_pages_array_uc(pages, cpages); | 436 | r = set_pages_array_uc(pages, cpages); |
| 435 | if (r) | 437 | if (r) |
| 436 | printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", | 438 | printk(KERN_ERR TTM_PFX |
| 437 | cpages); | 439 | "Failed to set %d pages to uc!\n", |
| 440 | cpages); | ||
| 438 | break; | 441 | break; |
| 439 | case tt_wc: | 442 | case tt_wc: |
| 440 | r = set_pages_array_wc(pages, cpages); | 443 | r = set_pages_array_wc(pages, cpages); |
| 441 | if (r) | 444 | if (r) |
| 442 | printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", | 445 | printk(KERN_ERR TTM_PFX |
| 443 | cpages); | 446 | "Failed to set %d pages to wc!\n", |
| 447 | cpages); | ||
| 444 | break; | 448 | break; |
| 445 | default: | 449 | default: |
| 446 | break; | 450 | break; |
| @@ -458,7 +462,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, | |||
| 458 | struct page **failed_pages, unsigned cpages) | 462 | struct page **failed_pages, unsigned cpages) |
| 459 | { | 463 | { |
| 460 | unsigned i; | 464 | unsigned i; |
| 461 | /* Failed pages has to be reed */ | 465 | /* Failed pages have to be freed */ |
| 462 | for (i = 0; i < cpages; ++i) { | 466 | for (i = 0; i < cpages; ++i) { |
| 463 | list_del(&failed_pages[i]->lru); | 467 | list_del(&failed_pages[i]->lru); |
| 464 | __free_page(failed_pages[i]); | 468 | __free_page(failed_pages[i]); |
| @@ -485,7 +489,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | |||
| 485 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | 489 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
| 486 | 490 | ||
| 487 | if (!caching_array) { | 491 | if (!caching_array) { |
| 488 | printk(KERN_ERR "[ttm] unable to allocate table for new pages."); | 492 | printk(KERN_ERR TTM_PFX |
| 493 | "Unable to allocate table for new pages."); | ||
| 489 | return -ENOMEM; | 494 | return -ENOMEM; |
| 490 | } | 495 | } |
| 491 | 496 | ||
| @@ -493,12 +498,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | |||
| 493 | p = alloc_page(gfp_flags); | 498 | p = alloc_page(gfp_flags); |
| 494 | 499 | ||
| 495 | if (!p) { | 500 | if (!p) { |
| 496 | printk(KERN_ERR "[ttm] unable to get page %u\n", i); | 501 | printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); |
| 497 | 502 | ||
| 498 | /* store already allocated pages in the pool after | 503 | /* store already allocated pages in the pool after |
| 499 | * setting the caching state */ | 504 | * setting the caching state */ |
| 500 | if (cpages) { | 505 | if (cpages) { |
| 501 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | 506 | r = ttm_set_pages_caching(caching_array, |
| 507 | cstate, cpages); | ||
| 502 | if (r) | 508 | if (r) |
| 503 | ttm_handle_caching_state_failure(pages, | 509 | ttm_handle_caching_state_failure(pages, |
| 504 | ttm_flags, cstate, | 510 | ttm_flags, cstate, |
| @@ -569,10 +575,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
| 569 | 575 | ||
| 570 | /* If allocation request is small and there is not enough | 576 | /* If allocation request is small and there is not enough |
| 571 | * pages in pool we fill the pool first */ | 577 | * pages in pool we fill the pool first */ |
| 572 | if (count < _manager.options.small | 578 | if (count < _manager->options.small |
| 573 | && count > pool->npages) { | 579 | && count > pool->npages) { |
| 574 | struct list_head new_pages; | 580 | struct list_head new_pages; |
| 575 | unsigned alloc_size = _manager.options.alloc_size; | 581 | unsigned alloc_size = _manager->options.alloc_size; |
| 576 | 582 | ||
| 577 | /** | 583 | /** |
| 578 | * Can't change page caching if in irqsave context. We have to | 584 | * Can't change page caching if in irqsave context. We have to |
| @@ -590,7 +596,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
| 590 | ++pool->nrefills; | 596 | ++pool->nrefills; |
| 591 | pool->npages += alloc_size; | 597 | pool->npages += alloc_size; |
| 592 | } else { | 598 | } else { |
| 593 | printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); | 599 | printk(KERN_ERR TTM_PFX |
| 600 | "Failed to fill pool (%p).", pool); | ||
| 594 | /* If we have any pages left put them to the pool. */ | 601 | /* If we have any pages left put them to the pool. */ |
| 595 | list_for_each_entry(p, &pool->list, lru) { | 602 | list_for_each_entry(p, &pool->list, lru) { |
| 596 | ++cpages; | 603 | ++cpages; |
| @@ -659,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 659 | { | 666 | { |
| 660 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
| 661 | struct page *p = NULL; | 668 | struct page *p = NULL; |
| 662 | int gfp_flags = 0; | 669 | int gfp_flags = GFP_USER; |
| 663 | int r; | 670 | int r; |
| 664 | 671 | ||
| 665 | /* set zero flag for page allocation if required */ | 672 | /* set zero flag for page allocation if required */ |
| @@ -671,13 +678,14 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 671 | if (flags & TTM_PAGE_FLAG_DMA32) | 678 | if (flags & TTM_PAGE_FLAG_DMA32) |
| 672 | gfp_flags |= GFP_DMA32; | 679 | gfp_flags |= GFP_DMA32; |
| 673 | else | 680 | else |
| 674 | gfp_flags |= __GFP_HIGHMEM; | 681 | gfp_flags |= GFP_HIGHUSER; |
| 675 | 682 | ||
| 676 | for (r = 0; r < count; ++r) { | 683 | for (r = 0; r < count; ++r) { |
| 677 | p = alloc_page(gfp_flags); | 684 | p = alloc_page(gfp_flags); |
| 678 | if (!p) { | 685 | if (!p) { |
| 679 | 686 | ||
| 680 | printk(KERN_ERR "[ttm] unable to allocate page."); | 687 | printk(KERN_ERR TTM_PFX |
| 688 | "Unable to allocate page."); | ||
| 681 | return -ENOMEM; | 689 | return -ENOMEM; |
| 682 | } | 690 | } |
| 683 | 691 | ||
| @@ -709,8 +717,9 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 709 | if (r) { | 717 | if (r) { |
| 710 | /* If there is any pages in the list put them back to | 718 | /* If there is any pages in the list put them back to |
| 711 | * the pool. */ | 719 | * the pool. */ |
| 712 | printk(KERN_ERR "[ttm] Failed to allocate extra pages " | 720 | printk(KERN_ERR TTM_PFX |
| 713 | "for large request."); | 721 | "Failed to allocate extra pages " |
| 722 | "for large request."); | ||
| 714 | ttm_put_pages(pages, 0, flags, cstate); | 723 | ttm_put_pages(pages, 0, flags, cstate); |
| 715 | return r; | 724 | return r; |
| 716 | } | 725 | } |
| @@ -749,8 +758,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | |||
| 749 | pool->npages += page_count; | 758 | pool->npages += page_count; |
| 750 | /* Check that we don't go over the pool limit */ | 759 | /* Check that we don't go over the pool limit */ |
| 751 | page_count = 0; | 760 | page_count = 0; |
| 752 | if (pool->npages > _manager.options.max_size) { | 761 | if (pool->npages > _manager->options.max_size) { |
| 753 | page_count = pool->npages - _manager.options.max_size; | 762 | page_count = pool->npages - _manager->options.max_size; |
| 754 | /* free at least NUM_PAGES_TO_ALLOC number of pages | 763 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
| 755 | * to reduce calls to set_memory_wb */ | 764 | * to reduce calls to set_memory_wb */ |
| 756 | if (page_count < NUM_PAGES_TO_ALLOC) | 765 | if (page_count < NUM_PAGES_TO_ALLOC) |
| @@ -775,33 +784,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, | |||
| 775 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | 784 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
| 776 | { | 785 | { |
| 777 | int ret; | 786 | int ret; |
| 778 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) | ||
| 779 | return 0; | ||
| 780 | 787 | ||
| 781 | printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); | 788 | WARN_ON(_manager); |
| 789 | |||
| 790 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); | ||
| 791 | |||
| 792 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); | ||
| 782 | 793 | ||
| 783 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); | 794 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
| 784 | 795 | ||
| 785 | ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); | 796 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
| 786 | 797 | ||
| 787 | ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, | 798 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
| 788 | "wc dma"); | 799 | GFP_USER | GFP_DMA32, "wc dma"); |
| 789 | 800 | ||
| 790 | ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, | 801 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
| 791 | "uc dma"); | 802 | GFP_USER | GFP_DMA32, "uc dma"); |
| 792 | 803 | ||
| 793 | _manager.options.max_size = max_pages; | 804 | _manager->options.max_size = max_pages; |
| 794 | _manager.options.small = SMALL_ALLOCATION; | 805 | _manager->options.small = SMALL_ALLOCATION; |
| 795 | _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; | 806 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; |
| 796 | 807 | ||
| 797 | kobject_init(&_manager.kobj, &ttm_pool_kobj_type); | 808 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, |
| 798 | ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); | 809 | &glob->kobj, "pool"); |
| 799 | if (unlikely(ret != 0)) { | 810 | if (unlikely(ret != 0)) { |
| 800 | kobject_put(&_manager.kobj); | 811 | kobject_put(&_manager->kobj); |
| 812 | _manager = NULL; | ||
| 801 | return ret; | 813 | return ret; |
| 802 | } | 814 | } |
| 803 | 815 | ||
| 804 | ttm_pool_mm_shrink_init(&_manager); | 816 | ttm_pool_mm_shrink_init(_manager); |
| 805 | 817 | ||
| 806 | return 0; | 818 | return 0; |
| 807 | } | 819 | } |
| @@ -810,16 +822,14 @@ void ttm_page_alloc_fini() | |||
| 810 | { | 822 | { |
| 811 | int i; | 823 | int i; |
| 812 | 824 | ||
| 813 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) | 825 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); |
| 814 | return; | 826 | ttm_pool_mm_shrink_fini(_manager); |
| 815 | |||
| 816 | printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); | ||
| 817 | ttm_pool_mm_shrink_fini(&_manager); | ||
| 818 | 827 | ||
| 819 | for (i = 0; i < NUM_POOLS; ++i) | 828 | for (i = 0; i < NUM_POOLS; ++i) |
| 820 | ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); | 829 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
| 821 | 830 | ||
| 822 | kobject_put(&_manager.kobj); | 831 | kobject_put(&_manager->kobj); |
| 832 | _manager = NULL; | ||
| 823 | } | 833 | } |
| 824 | 834 | ||
| 825 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | 835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
| @@ -827,14 +837,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | |||
| 827 | struct ttm_page_pool *p; | 837 | struct ttm_page_pool *p; |
| 828 | unsigned i; | 838 | unsigned i; |
| 829 | char *h[] = {"pool", "refills", "pages freed", "size"}; | 839 | char *h[] = {"pool", "refills", "pages freed", "size"}; |
| 830 | if (atomic_read(&_manager.page_alloc_inited) == 0) { | 840 | if (!_manager) { |
| 831 | seq_printf(m, "No pool allocator running.\n"); | 841 | seq_printf(m, "No pool allocator running.\n"); |
| 832 | return 0; | 842 | return 0; |
| 833 | } | 843 | } |
| 834 | seq_printf(m, "%6s %12s %13s %8s\n", | 844 | seq_printf(m, "%6s %12s %13s %8s\n", |
| 835 | h[0], h[1], h[2], h[3]); | 845 | h[0], h[1], h[2], h[3]); |
| 836 | for (i = 0; i < NUM_POOLS; ++i) { | 846 | for (i = 0; i < NUM_POOLS; ++i) { |
| 837 | p = &_manager.pools[i]; | 847 | p = &_manager->pools[i]; |
| 838 | 848 | ||
| 839 | seq_printf(m, "%6s %12ld %13ld %8d\n", | 849 | seq_printf(m, "%6s %12ld %13ld %8d\n", |
| 840 | p->name, p->nrefills, | 850 | p->name, p->nrefills, |
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 1a3cb6816d1c..4505e17df3f5 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
| @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm | |||
| 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ |
| 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ |
| 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
| 7 | vmwgfx_overlay.o | 7 | vmwgfx_overlay.o vmwgfx_fence.o |
| 8 | 8 | ||
| 9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0c9c0811f42d..b793c8c9acb3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -88,6 +88,9 @@ | |||
| 88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ | 88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
| 89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ | 89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
| 90 | struct drm_vmw_fence_wait_arg) | 90 | struct drm_vmw_fence_wait_arg) |
| 91 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | ||
| 92 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | ||
| 93 | struct drm_vmw_update_layout_arg) | ||
| 91 | 94 | ||
| 92 | 95 | ||
| 93 | /** | 96 | /** |
| @@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { | |||
| 135 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | 138 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, |
| 136 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), | 139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), |
| 137 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | 140 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, |
| 138 | DRM_AUTH | DRM_UNLOCKED) | 141 | DRM_AUTH | DRM_UNLOCKED), |
| 142 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, | ||
| 143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) | ||
| 139 | }; | 144 | }; |
| 140 | 145 | ||
| 141 | static struct pci_device_id vmw_pci_id_list[] = { | 146 | static struct pci_device_id vmw_pci_id_list[] = { |
| @@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 318 | goto out_err3; | 323 | goto out_err3; |
| 319 | } | 324 | } |
| 320 | 325 | ||
| 326 | /* Need mmio memory to check for fifo pitchlock cap. */ | ||
| 327 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && | ||
| 328 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && | ||
| 329 | !vmw_fifo_have_pitchlock(dev_priv)) { | ||
| 330 | ret = -ENOSYS; | ||
| 331 | DRM_ERROR("Hardware has no pitchlock\n"); | ||
| 332 | goto out_err4; | ||
| 333 | } | ||
| 334 | |||
| 321 | dev_priv->tdev = ttm_object_device_init | 335 | dev_priv->tdev = ttm_object_device_init |
| 322 | (dev_priv->mem_global_ref.object, 12); | 336 | (dev_priv->mem_global_ref.object, 12); |
| 323 | 337 | ||
| @@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 399 | { | 413 | { |
| 400 | struct vmw_private *dev_priv = vmw_priv(dev); | 414 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 401 | 415 | ||
| 402 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); | ||
| 403 | |||
| 404 | unregister_pm_notifier(&dev_priv->pm_nb); | 416 | unregister_pm_notifier(&dev_priv->pm_nb); |
| 405 | 417 | ||
| 406 | vmw_fb_close(dev_priv); | 418 | vmw_fb_close(dev_priv); |
| @@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev, | |||
| 546 | { | 558 | { |
| 547 | struct vmw_master *vmaster; | 559 | struct vmw_master *vmaster; |
| 548 | 560 | ||
| 549 | DRM_INFO("Master create.\n"); | ||
| 550 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); | 561 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
| 551 | if (unlikely(vmaster == NULL)) | 562 | if (unlikely(vmaster == NULL)) |
| 552 | return -ENOMEM; | 563 | return -ENOMEM; |
| @@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev, | |||
| 563 | { | 574 | { |
| 564 | struct vmw_master *vmaster = vmw_master(master); | 575 | struct vmw_master *vmaster = vmw_master(master); |
| 565 | 576 | ||
| 566 | DRM_INFO("Master destroy.\n"); | ||
| 567 | master->driver_priv = NULL; | 577 | master->driver_priv = NULL; |
| 568 | kfree(vmaster); | 578 | kfree(vmaster); |
| 569 | } | 579 | } |
| @@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
| 579 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 589 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 580 | int ret = 0; | 590 | int ret = 0; |
| 581 | 591 | ||
| 582 | DRM_INFO("Master set.\n"); | ||
| 583 | |||
| 584 | if (active) { | 592 | if (active) { |
| 585 | BUG_ON(active != &dev_priv->fbdev_master); | 593 | BUG_ON(active != &dev_priv->fbdev_master); |
| 586 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 594 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
| @@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev, | |||
| 622 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 630 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 623 | int ret; | 631 | int ret; |
| 624 | 632 | ||
| 625 | DRM_INFO("Master drop.\n"); | ||
| 626 | |||
| 627 | /** | 633 | /** |
| 628 | * Make sure the master doesn't disappear while we have | 634 | * Make sure the master doesn't disappear while we have |
| 629 | * it locked. | 635 | * it locked. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 356dc935ec13..eaad52095339 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -41,12 +41,13 @@ | |||
| 41 | 41 | ||
| 42 | #define VMWGFX_DRIVER_DATE "20100209" | 42 | #define VMWGFX_DRIVER_DATE "20100209" |
| 43 | #define VMWGFX_DRIVER_MAJOR 1 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
| 44 | #define VMWGFX_DRIVER_MINOR 0 | 44 | #define VMWGFX_DRIVER_MINOR 2 |
| 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
| 48 | #define VMWGFX_MAX_RELOCATIONS 2048 | 48 | #define VMWGFX_MAX_RELOCATIONS 2048 |
| 49 | #define VMWGFX_MAX_GMRS 2048 | 49 | #define VMWGFX_MAX_GMRS 2048 |
| 50 | #define VMWGFX_MAX_DISPLAYS 16 | ||
| 50 | 51 | ||
| 51 | struct vmw_fpriv { | 52 | struct vmw_fpriv { |
| 52 | struct drm_master *locked_master; | 53 | struct drm_master *locked_master; |
| @@ -102,6 +103,13 @@ struct vmw_surface { | |||
| 102 | struct vmw_cursor_snooper snooper; | 103 | struct vmw_cursor_snooper snooper; |
| 103 | }; | 104 | }; |
| 104 | 105 | ||
| 106 | struct vmw_fence_queue { | ||
| 107 | struct list_head head; | ||
| 108 | struct timespec lag; | ||
| 109 | struct timespec lag_time; | ||
| 110 | spinlock_t lock; | ||
| 111 | }; | ||
| 112 | |||
| 105 | struct vmw_fifo_state { | 113 | struct vmw_fifo_state { |
| 106 | unsigned long reserved_size; | 114 | unsigned long reserved_size; |
| 107 | __le32 *dynamic_buffer; | 115 | __le32 *dynamic_buffer; |
| @@ -115,6 +123,7 @@ struct vmw_fifo_state { | |||
| 115 | uint32_t capabilities; | 123 | uint32_t capabilities; |
| 116 | struct mutex fifo_mutex; | 124 | struct mutex fifo_mutex; |
| 117 | struct rw_semaphore rwsem; | 125 | struct rw_semaphore rwsem; |
| 126 | struct vmw_fence_queue fence_queue; | ||
| 118 | }; | 127 | }; |
| 119 | 128 | ||
| 120 | struct vmw_relocation { | 129 | struct vmw_relocation { |
| @@ -144,6 +153,14 @@ struct vmw_master { | |||
| 144 | struct ttm_lock lock; | 153 | struct ttm_lock lock; |
| 145 | }; | 154 | }; |
| 146 | 155 | ||
| 156 | struct vmw_vga_topology_state { | ||
| 157 | uint32_t width; | ||
| 158 | uint32_t height; | ||
| 159 | uint32_t primary; | ||
| 160 | uint32_t pos_x; | ||
| 161 | uint32_t pos_y; | ||
| 162 | }; | ||
| 163 | |||
| 147 | struct vmw_private { | 164 | struct vmw_private { |
| 148 | struct ttm_bo_device bdev; | 165 | struct ttm_bo_device bdev; |
| 149 | struct ttm_bo_global_ref bo_global_ref; | 166 | struct ttm_bo_global_ref bo_global_ref; |
| @@ -171,14 +188,19 @@ struct vmw_private { | |||
| 171 | * VGA registers. | 188 | * VGA registers. |
| 172 | */ | 189 | */ |
| 173 | 190 | ||
| 191 | struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; | ||
| 174 | uint32_t vga_width; | 192 | uint32_t vga_width; |
| 175 | uint32_t vga_height; | 193 | uint32_t vga_height; |
| 176 | uint32_t vga_depth; | 194 | uint32_t vga_depth; |
| 177 | uint32_t vga_bpp; | 195 | uint32_t vga_bpp; |
| 178 | uint32_t vga_pseudo; | 196 | uint32_t vga_pseudo; |
| 179 | uint32_t vga_red_mask; | 197 | uint32_t vga_red_mask; |
| 180 | uint32_t vga_blue_mask; | ||
| 181 | uint32_t vga_green_mask; | 198 | uint32_t vga_green_mask; |
| 199 | uint32_t vga_blue_mask; | ||
| 200 | uint32_t vga_bpl; | ||
| 201 | uint32_t vga_pitchlock; | ||
| 202 | |||
| 203 | uint32_t num_displays; | ||
| 182 | 204 | ||
| 183 | /* | 205 | /* |
| 184 | * Framebuffer info. | 206 | * Framebuffer info. |
| @@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | |||
| 393 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); | 415 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
| 394 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); | 416 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); |
| 395 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); | 417 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); |
| 418 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); | ||
| 396 | 419 | ||
| 397 | /** | 420 | /** |
| 398 | * TTM glue - vmwgfx_ttm_glue.c | 421 | * TTM glue - vmwgfx_ttm_glue.c |
| @@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
| 441 | uint32_t sequence, | 464 | uint32_t sequence, |
| 442 | bool interruptible, | 465 | bool interruptible, |
| 443 | unsigned long timeout); | 466 | unsigned long timeout); |
| 467 | extern void vmw_update_sequence(struct vmw_private *dev_priv, | ||
| 468 | struct vmw_fifo_state *fifo_state); | ||
| 469 | |||
| 470 | |||
| 471 | /** | ||
| 472 | * Rudimentary fence objects currently used only for throttling - | ||
| 473 | * vmwgfx_fence.c | ||
| 474 | */ | ||
| 475 | |||
| 476 | extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); | ||
| 477 | extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); | ||
| 478 | extern int vmw_fence_push(struct vmw_fence_queue *queue, | ||
| 479 | uint32_t sequence); | ||
| 480 | extern int vmw_fence_pull(struct vmw_fence_queue *queue, | ||
| 481 | uint32_t signaled_sequence); | ||
| 482 | extern int vmw_wait_lag(struct vmw_private *dev_priv, | ||
| 483 | struct vmw_fence_queue *queue, uint32_t us); | ||
| 444 | 484 | ||
| 445 | /** | 485 | /** |
| 446 | * Kernel framebuffer - vmwgfx_fb.c | 486 | * Kernel framebuffer - vmwgfx_fb.c |
| @@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
| 466 | struct ttm_object_file *tfile, | 506 | struct ttm_object_file *tfile, |
| 467 | struct ttm_buffer_object *bo, | 507 | struct ttm_buffer_object *bo, |
| 468 | SVGA3dCmdHeader *header); | 508 | SVGA3dCmdHeader *header); |
| 509 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, | ||
| 510 | unsigned width, unsigned height, unsigned pitch, | ||
| 511 | unsigned bbp, unsigned depth); | ||
| 512 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | ||
| 513 | struct drm_file *file_priv); | ||
| 469 | 514 | ||
| 470 | /** | 515 | /** |
| 471 | * Overlay control - vmwgfx_overlay.c | 516 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index dbd36b8910cf..8e396850513c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
| 644 | ret = copy_from_user(cmd, user_cmd, arg->command_size); | 644 | ret = copy_from_user(cmd, user_cmd, arg->command_size); |
| 645 | 645 | ||
| 646 | if (unlikely(ret != 0)) { | 646 | if (unlikely(ret != 0)) { |
| 647 | ret = -EFAULT; | ||
| 647 | DRM_ERROR("Failed copying commands.\n"); | 648 | DRM_ERROR("Failed copying commands.\n"); |
| 648 | goto out_commit; | 649 | goto out_commit; |
| 649 | } | 650 | } |
| @@ -669,6 +670,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
| 669 | goto out_err; | 670 | goto out_err; |
| 670 | 671 | ||
| 671 | vmw_apply_relocations(sw_context); | 672 | vmw_apply_relocations(sw_context); |
| 673 | |||
| 674 | if (arg->throttle_us) { | ||
| 675 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, | ||
| 676 | arg->throttle_us); | ||
| 677 | |||
| 678 | if (unlikely(ret != 0)) | ||
| 679 | goto out_err; | ||
| 680 | } | ||
| 681 | |||
| 672 | vmw_fifo_commit(dev_priv, arg->command_size); | 682 | vmw_fifo_commit(dev_priv, arg->command_size); |
| 673 | 683 | ||
| 674 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | 684 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 7421aaad8d09..b0866f04ec76 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
| @@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, | |||
| 132 | return -EINVAL; | 132 | return -EINVAL; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /* without multimon its hard to resize */ | 135 | if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
| 136 | if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && | 136 | (var->xoffset != 0 || var->yoffset != 0)) { |
| 137 | (var->xres != par->max_width || | 137 | DRM_ERROR("Can not handle panning without display topology\n"); |
| 138 | var->yres != par->max_height)) { | ||
| 139 | DRM_ERROR("Tried to resize, but we don't have multimon\n"); | ||
| 140 | return -EINVAL; | 138 | return -EINVAL; |
| 141 | } | 139 | } |
| 142 | 140 | ||
| 143 | if (var->xres > par->max_width || | 141 | if ((var->xoffset + var->xres) > par->max_width || |
| 144 | var->yres > par->max_height) { | 142 | (var->yoffset + var->yres) > par->max_height) { |
| 145 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); | 143 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); |
| 146 | return -EINVAL; | 144 | return -EINVAL; |
| 147 | } | 145 | } |
| @@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
| 154 | struct vmw_fb_par *par = info->par; | 152 | struct vmw_fb_par *par = info->par; |
| 155 | struct vmw_private *vmw_priv = par->vmw_priv; | 153 | struct vmw_private *vmw_priv = par->vmw_priv; |
| 156 | 154 | ||
| 157 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | 155 | vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, |
| 158 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | 156 | info->fix.line_length, |
| 159 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | 157 | par->bpp, par->depth); |
| 160 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | 158 | if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { |
| 161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 162 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 163 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 164 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 165 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 166 | |||
| 167 | vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); | ||
| 168 | vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); | ||
| 169 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); | ||
| 170 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); | ||
| 171 | vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); | ||
| 172 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 173 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 174 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 175 | |||
| 176 | /* TODO check if pitch and offset changes */ | 159 | /* TODO check if pitch and offset changes */ |
| 177 | |||
| 178 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | 160 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); |
| 179 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | 161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); |
| 180 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | 162 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); |
| @@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
| 183 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); | 165 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); |
| 184 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); | 166 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); |
| 185 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 167 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
| 186 | } else { | ||
| 187 | vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); | ||
| 188 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); | ||
| 189 | |||
| 190 | /* TODO check if pitch and offset changes */ | ||
| 191 | } | 168 | } |
| 192 | 169 | ||
| 170 | /* This is really helpful since if this fails the user | ||
| 171 | * can probably not see anything on the screen. | ||
| 172 | */ | ||
| 173 | WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); | ||
| 174 | |||
| 193 | return 0; | 175 | return 0; |
| 194 | } | 176 | } |
| 195 | 177 | ||
| @@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
| 416 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; | 398 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; |
| 417 | int ret; | 399 | int ret; |
| 418 | 400 | ||
| 401 | /* XXX These shouldn't be hardcoded. */ | ||
| 419 | initial_width = 800; | 402 | initial_width = 800; |
| 420 | initial_height = 600; | 403 | initial_height = 600; |
| 421 | 404 | ||
| 422 | fb_bbp = 32; | 405 | fb_bbp = 32; |
| 423 | fb_depth = 24; | 406 | fb_depth = 24; |
| 424 | 407 | ||
| 425 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | 408 | /* XXX As shouldn't these be as well. */ |
| 426 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); | 409 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); |
| 427 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); | 410 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); |
| 428 | } else { | ||
| 429 | fb_width = min(vmw_priv->fb_max_width, initial_width); | ||
| 430 | fb_height = min(vmw_priv->fb_max_height, initial_height); | ||
| 431 | } | ||
| 432 | 411 | ||
| 433 | initial_width = min(fb_width, initial_width); | 412 | initial_width = min(fb_width, initial_width); |
| 434 | initial_height = min(fb_height, initial_height); | 413 | initial_height = min(fb_height, initial_height); |
| 435 | 414 | ||
| 436 | vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); | 415 | fb_pitch = fb_width * fb_bbp / 8; |
| 437 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); | 416 | fb_size = fb_pitch * fb_height; |
| 438 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); | ||
| 439 | vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); | ||
| 440 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 441 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 442 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 443 | |||
| 444 | fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); | ||
| 445 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); | 417 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); |
| 446 | fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); | ||
| 447 | |||
| 448 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); | ||
| 449 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); | ||
| 450 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); | ||
| 451 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); | ||
| 452 | DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); | ||
| 453 | DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); | ||
| 454 | DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); | ||
| 455 | DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); | ||
| 456 | DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); | ||
| 457 | DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); | ||
| 458 | DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); | ||
| 459 | DRM_DEBUG("fb_pitch %u\n", fb_pitch); | ||
| 460 | DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); | ||
| 461 | 418 | ||
| 462 | info = framebuffer_alloc(sizeof(*par), device); | 419 | info = framebuffer_alloc(sizeof(*par), device); |
| 463 | if (!info) | 420 | if (!info) |
| @@ -659,6 +616,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
| 659 | goto err_unlock; | 616 | goto err_unlock; |
| 660 | 617 | ||
| 661 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 618 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
| 619 | |||
| 620 | /* Could probably bug on */ | ||
| 621 | WARN_ON(bo->offset != 0); | ||
| 622 | |||
| 662 | ttm_bo_unreserve(bo); | 623 | ttm_bo_unreserve(bo); |
| 663 | err_unlock: | 624 | err_unlock: |
| 664 | ttm_write_unlock(&vmw_priv->active_master->lock); | 625 | ttm_write_unlock(&vmw_priv->active_master->lock); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c new file mode 100644 index 000000000000..61eacc1b5ca3 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -0,0 +1,173 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | |||
| 29 | #include "vmwgfx_drv.h" | ||
| 30 | |||
| 31 | struct vmw_fence { | ||
| 32 | struct list_head head; | ||
| 33 | uint32_t sequence; | ||
| 34 | struct timespec submitted; | ||
| 35 | }; | ||
| 36 | |||
| 37 | void vmw_fence_queue_init(struct vmw_fence_queue *queue) | ||
| 38 | { | ||
| 39 | INIT_LIST_HEAD(&queue->head); | ||
| 40 | queue->lag = ns_to_timespec(0); | ||
| 41 | getrawmonotonic(&queue->lag_time); | ||
| 42 | spin_lock_init(&queue->lock); | ||
| 43 | } | ||
| 44 | |||
| 45 | void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) | ||
| 46 | { | ||
| 47 | struct vmw_fence *fence, *next; | ||
| 48 | |||
| 49 | spin_lock(&queue->lock); | ||
| 50 | list_for_each_entry_safe(fence, next, &queue->head, head) { | ||
| 51 | kfree(fence); | ||
| 52 | } | ||
| 53 | spin_unlock(&queue->lock); | ||
| 54 | } | ||
| 55 | |||
| 56 | int vmw_fence_push(struct vmw_fence_queue *queue, | ||
| 57 | uint32_t sequence) | ||
| 58 | { | ||
| 59 | struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); | ||
| 60 | |||
| 61 | if (unlikely(!fence)) | ||
| 62 | return -ENOMEM; | ||
| 63 | |||
| 64 | fence->sequence = sequence; | ||
| 65 | getrawmonotonic(&fence->submitted); | ||
| 66 | spin_lock(&queue->lock); | ||
| 67 | list_add_tail(&fence->head, &queue->head); | ||
| 68 | spin_unlock(&queue->lock); | ||
| 69 | |||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | int vmw_fence_pull(struct vmw_fence_queue *queue, | ||
| 74 | uint32_t signaled_sequence) | ||
| 75 | { | ||
| 76 | struct vmw_fence *fence, *next; | ||
| 77 | struct timespec now; | ||
| 78 | bool updated = false; | ||
| 79 | |||
| 80 | spin_lock(&queue->lock); | ||
| 81 | getrawmonotonic(&now); | ||
| 82 | |||
| 83 | if (list_empty(&queue->head)) { | ||
| 84 | queue->lag = ns_to_timespec(0); | ||
| 85 | queue->lag_time = now; | ||
| 86 | updated = true; | ||
| 87 | goto out_unlock; | ||
| 88 | } | ||
| 89 | |||
| 90 | list_for_each_entry_safe(fence, next, &queue->head, head) { | ||
| 91 | if (signaled_sequence - fence->sequence > (1 << 30)) | ||
| 92 | continue; | ||
| 93 | |||
| 94 | queue->lag = timespec_sub(now, fence->submitted); | ||
| 95 | queue->lag_time = now; | ||
| 96 | updated = true; | ||
| 97 | list_del(&fence->head); | ||
| 98 | kfree(fence); | ||
| 99 | } | ||
| 100 | |||
| 101 | out_unlock: | ||
| 102 | spin_unlock(&queue->lock); | ||
| 103 | |||
| 104 | return (updated) ? 0 : -EBUSY; | ||
| 105 | } | ||
| 106 | |||
| 107 | static struct timespec vmw_timespec_add(struct timespec t1, | ||
| 108 | struct timespec t2) | ||
| 109 | { | ||
| 110 | t1.tv_sec += t2.tv_sec; | ||
| 111 | t1.tv_nsec += t2.tv_nsec; | ||
| 112 | if (t1.tv_nsec >= 1000000000L) { | ||
| 113 | t1.tv_sec += 1; | ||
| 114 | t1.tv_nsec -= 1000000000L; | ||
| 115 | } | ||
| 116 | |||
| 117 | return t1; | ||
| 118 | } | ||
| 119 | |||
| 120 | static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) | ||
| 121 | { | ||
| 122 | struct timespec now; | ||
| 123 | |||
| 124 | spin_lock(&queue->lock); | ||
| 125 | getrawmonotonic(&now); | ||
| 126 | queue->lag = vmw_timespec_add(queue->lag, | ||
| 127 | timespec_sub(now, queue->lag_time)); | ||
| 128 | queue->lag_time = now; | ||
| 129 | spin_unlock(&queue->lock); | ||
| 130 | return queue->lag; | ||
| 131 | } | ||
| 132 | |||
| 133 | |||
| 134 | static bool vmw_lag_lt(struct vmw_fence_queue *queue, | ||
| 135 | uint32_t us) | ||
| 136 | { | ||
| 137 | struct timespec lag, cond; | ||
| 138 | |||
| 139 | cond = ns_to_timespec((s64) us * 1000); | ||
| 140 | lag = vmw_fifo_lag(queue); | ||
| 141 | return (timespec_compare(&lag, &cond) < 1); | ||
| 142 | } | ||
| 143 | |||
| 144 | int vmw_wait_lag(struct vmw_private *dev_priv, | ||
| 145 | struct vmw_fence_queue *queue, uint32_t us) | ||
| 146 | { | ||
| 147 | struct vmw_fence *fence; | ||
| 148 | uint32_t sequence; | ||
| 149 | int ret; | ||
| 150 | |||
| 151 | while (!vmw_lag_lt(queue, us)) { | ||
| 152 | spin_lock(&queue->lock); | ||
| 153 | if (list_empty(&queue->head)) | ||
| 154 | sequence = atomic_read(&dev_priv->fence_seq); | ||
| 155 | else { | ||
| 156 | fence = list_first_entry(&queue->head, | ||
| 157 | struct vmw_fence, head); | ||
| 158 | sequence = fence->sequence; | ||
| 159 | } | ||
| 160 | spin_unlock(&queue->lock); | ||
| 161 | |||
| 162 | ret = vmw_wait_fence(dev_priv, false, sequence, true, | ||
| 163 | 3*HZ); | ||
| 164 | |||
| 165 | if (unlikely(ret != 0)) | ||
| 166 | return ret; | ||
| 167 | |||
| 168 | (void) vmw_fence_pull(queue, sequence); | ||
| 169 | } | ||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | |||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 39d43a01d846..e6a1eb7ea954 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
| 34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| 35 | uint32_t fifo_min, hwversion; | 35 | uint32_t fifo_min, hwversion; |
| 36 | 36 | ||
| 37 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | ||
| 38 | return false; | ||
| 39 | |||
| 37 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 40 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
| 38 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) | 41 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) |
| 39 | return false; | 42 | return false; |
| @@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
| 48 | return true; | 51 | return true; |
| 49 | } | 52 | } |
| 50 | 53 | ||
| 54 | bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) | ||
| 55 | { | ||
| 56 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 57 | uint32_t caps; | ||
| 58 | |||
| 59 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | ||
| 60 | return false; | ||
| 61 | |||
| 62 | caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); | ||
| 63 | if (caps & SVGA_FIFO_CAP_PITCHLOCK) | ||
| 64 | return true; | ||
| 65 | |||
| 66 | return false; | ||
| 67 | } | ||
| 68 | |||
| 51 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 69 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
| 52 | { | 70 | { |
| 53 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| @@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 120 | 138 | ||
| 121 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); | 139 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); |
| 122 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 140 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); |
| 123 | 141 | vmw_fence_queue_init(&fifo->fence_queue); | |
| 124 | return vmw_fifo_send_fence(dev_priv, &dummy); | 142 | return vmw_fifo_send_fence(dev_priv, &dummy); |
| 125 | out_err: | 143 | out_err: |
| 126 | vfree(fifo->static_buffer); | 144 | vfree(fifo->static_buffer); |
| @@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 159 | dev_priv->enable_state); | 177 | dev_priv->enable_state); |
| 160 | 178 | ||
| 161 | mutex_unlock(&dev_priv->hw_mutex); | 179 | mutex_unlock(&dev_priv->hw_mutex); |
| 180 | vmw_fence_queue_takedown(&fifo->fence_queue); | ||
| 162 | 181 | ||
| 163 | if (likely(fifo->last_buffer != NULL)) { | 182 | if (likely(fifo->last_buffer != NULL)) { |
| 164 | vfree(fifo->last_buffer); | 183 | vfree(fifo->last_buffer); |
| @@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
| 484 | fifo_state->last_buffer_add = true; | 503 | fifo_state->last_buffer_add = true; |
| 485 | vmw_fifo_commit(dev_priv, bytes); | 504 | vmw_fifo_commit(dev_priv, bytes); |
| 486 | fifo_state->last_buffer_add = false; | 505 | fifo_state->last_buffer_add = false; |
| 506 | (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); | ||
| 507 | vmw_update_sequence(dev_priv, fifo_state); | ||
| 487 | 508 | ||
| 488 | out_err: | 509 | out_err: |
| 489 | return ret; | 510 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 4d7cb5393860..e92298a6a383 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) | |||
| 64 | return (busy == 0); | 64 | return (busy == 0); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | void vmw_update_sequence(struct vmw_private *dev_priv, | ||
| 68 | struct vmw_fifo_state *fifo_state) | ||
| 69 | { | ||
| 70 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 71 | |||
| 72 | uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
| 73 | |||
| 74 | if (dev_priv->last_read_sequence != sequence) { | ||
| 75 | dev_priv->last_read_sequence = sequence; | ||
| 76 | vmw_fence_pull(&fifo_state->fence_queue, sequence); | ||
| 77 | } | ||
| 78 | } | ||
| 67 | 79 | ||
| 68 | bool vmw_fence_signaled(struct vmw_private *dev_priv, | 80 | bool vmw_fence_signaled(struct vmw_private *dev_priv, |
| 69 | uint32_t sequence) | 81 | uint32_t sequence) |
| 70 | { | 82 | { |
| 71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 72 | struct vmw_fifo_state *fifo_state; | 83 | struct vmw_fifo_state *fifo_state; |
| 73 | bool ret; | 84 | bool ret; |
| 74 | 85 | ||
| 75 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 86 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) |
| 76 | return true; | 87 | return true; |
| 77 | 88 | ||
| 78 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | 89 | fifo_state = &dev_priv->fifo; |
| 90 | vmw_update_sequence(dev_priv, fifo_state); | ||
| 79 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 91 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) |
| 80 | return true; | 92 | return true; |
| 81 | 93 | ||
| 82 | fifo_state = &dev_priv->fifo; | ||
| 83 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && | 94 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && |
| 84 | vmw_fifo_idle(dev_priv, sequence)) | 95 | vmw_fifo_idle(dev_priv, sequence)) |
| 85 | return true; | 96 | return true; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bbc7c4c30bc7..437ac786277a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | /* Might need a hrtimer here? */ | 30 | /* Might need a hrtimer here? */ |
| 31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | 31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) |
| 32 | 32 | ||
| 33 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); | ||
| 34 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); | ||
| 33 | 35 | ||
| 34 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) | 36 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) |
| 35 | { | 37 | { |
| @@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, | |||
| 326 | struct vmw_framebuffer_surface { | 328 | struct vmw_framebuffer_surface { |
| 327 | struct vmw_framebuffer base; | 329 | struct vmw_framebuffer base; |
| 328 | struct vmw_surface *surface; | 330 | struct vmw_surface *surface; |
| 331 | struct vmw_dma_buffer *buffer; | ||
| 329 | struct delayed_work d_work; | 332 | struct delayed_work d_work; |
| 330 | struct mutex work_lock; | 333 | struct mutex work_lock; |
| 331 | bool present_fs; | 334 | bool present_fs; |
| @@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
| 500 | vfbs->base.base.depth = 24; | 503 | vfbs->base.base.depth = 24; |
| 501 | vfbs->base.base.width = width; | 504 | vfbs->base.base.width = width; |
| 502 | vfbs->base.base.height = height; | 505 | vfbs->base.base.height = height; |
| 503 | vfbs->base.pin = NULL; | 506 | vfbs->base.pin = &vmw_surface_dmabuf_pin; |
| 504 | vfbs->base.unpin = NULL; | 507 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; |
| 505 | vfbs->surface = surface; | 508 | vfbs->surface = surface; |
| 506 | mutex_init(&vfbs->work_lock); | 509 | mutex_init(&vfbs->work_lock); |
| 507 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | 510 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); |
| @@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | |||
| 589 | .create_handle = vmw_framebuffer_create_handle, | 592 | .create_handle = vmw_framebuffer_create_handle, |
| 590 | }; | 593 | }; |
| 591 | 594 | ||
| 595 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | ||
| 596 | { | ||
| 597 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | ||
| 598 | struct vmw_framebuffer_surface *vfbs = | ||
| 599 | vmw_framebuffer_to_vfbs(&vfb->base); | ||
| 600 | unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; | ||
| 601 | int ret; | ||
| 602 | |||
| 603 | vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); | ||
| 604 | if (unlikely(vfbs->buffer == NULL)) | ||
| 605 | return -ENOMEM; | ||
| 606 | |||
| 607 | vmw_overlay_pause_all(dev_priv); | ||
| 608 | ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, | ||
| 609 | &vmw_vram_ne_placement, | ||
| 610 | false, &vmw_dmabuf_bo_free); | ||
| 611 | vmw_overlay_resume_all(dev_priv); | ||
| 612 | |||
| 613 | return ret; | ||
| 614 | } | ||
| 615 | |||
| 616 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | ||
| 617 | { | ||
| 618 | struct ttm_buffer_object *bo; | ||
| 619 | struct vmw_framebuffer_surface *vfbs = | ||
| 620 | vmw_framebuffer_to_vfbs(&vfb->base); | ||
| 621 | |||
| 622 | bo = &vfbs->buffer->base; | ||
| 623 | ttm_bo_unref(&bo); | ||
| 624 | vfbs->buffer = NULL; | ||
| 625 | |||
| 626 | return 0; | ||
| 627 | } | ||
| 628 | |||
| 592 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | 629 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) |
| 593 | { | 630 | { |
| 594 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | 631 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); |
| @@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
| 596 | vmw_framebuffer_to_vfbd(&vfb->base); | 633 | vmw_framebuffer_to_vfbd(&vfb->base); |
| 597 | int ret; | 634 | int ret; |
| 598 | 635 | ||
| 636 | |||
| 599 | vmw_overlay_pause_all(dev_priv); | 637 | vmw_overlay_pause_all(dev_priv); |
| 600 | 638 | ||
| 601 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); | 639 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); |
| 602 | 640 | ||
| 603 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 604 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 605 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 606 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 607 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 608 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 609 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 610 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 611 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 612 | |||
| 613 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | ||
| 614 | vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); | ||
| 615 | vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); | ||
| 616 | vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); | ||
| 617 | vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); | ||
| 618 | vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 619 | vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 620 | vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 621 | } else | ||
| 622 | WARN_ON(true); | ||
| 623 | |||
| 624 | vmw_overlay_resume_all(dev_priv); | 641 | vmw_overlay_resume_all(dev_priv); |
| 625 | 642 | ||
| 643 | WARN_ON(ret != 0); | ||
| 644 | |||
| 626 | return 0; | 645 | return 0; |
| 627 | } | 646 | } |
| 628 | 647 | ||
| @@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
| 668 | 687 | ||
| 669 | /* XXX get the first 3 from the surface info */ | 688 | /* XXX get the first 3 from the surface info */ |
| 670 | vfbd->base.base.bits_per_pixel = 32; | 689 | vfbd->base.base.bits_per_pixel = 32; |
| 671 | vfbd->base.base.pitch = width * 32 / 4; | 690 | vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; |
| 672 | vfbd->base.base.depth = 24; | 691 | vfbd->base.base.depth = 24; |
| 673 | vfbd->base.base.width = width; | 692 | vfbd->base.base.width = width; |
| 674 | vfbd->base.base.height = height; | 693 | vfbd->base.base.height = height; |
| @@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
| 765 | dev->mode_config.funcs = &vmw_kms_funcs; | 784 | dev->mode_config.funcs = &vmw_kms_funcs; |
| 766 | dev->mode_config.min_width = 1; | 785 | dev->mode_config.min_width = 1; |
| 767 | dev->mode_config.min_height = 1; | 786 | dev->mode_config.min_height = 1; |
| 768 | dev->mode_config.max_width = dev_priv->fb_max_width; | 787 | /* assumed largest fb size */ |
| 769 | dev->mode_config.max_height = dev_priv->fb_max_height; | 788 | dev->mode_config.max_width = 8192; |
| 789 | dev->mode_config.max_height = 8192; | ||
| 770 | 790 | ||
| 771 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 791 | ret = vmw_kms_init_legacy_display_system(dev_priv); |
| 772 | 792 | ||
| @@ -826,49 +846,141 @@ out: | |||
| 826 | return ret; | 846 | return ret; |
| 827 | } | 847 | } |
| 828 | 848 | ||
| 849 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, | ||
| 850 | unsigned width, unsigned height, unsigned pitch, | ||
| 851 | unsigned bbp, unsigned depth) | ||
| 852 | { | ||
| 853 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | ||
| 854 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); | ||
| 855 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
| 856 | iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); | ||
| 857 | vmw_write(vmw_priv, SVGA_REG_WIDTH, width); | ||
| 858 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); | ||
| 859 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp); | ||
| 860 | vmw_write(vmw_priv, SVGA_REG_DEPTH, depth); | ||
| 861 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 862 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 863 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 864 | } | ||
| 865 | |||
| 829 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) | 866 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) |
| 830 | { | 867 | { |
| 831 | /* | 868 | struct vmw_vga_topology_state *save; |
| 832 | * setup a single multimon monitor with the size | 869 | uint32_t i; |
| 833 | * of 0x0, this stops the UI from resizing when we | ||
| 834 | * change the framebuffer size | ||
| 835 | */ | ||
| 836 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 837 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 838 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 839 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 840 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 841 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 842 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 843 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 844 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 845 | } | ||
| 846 | 870 | ||
| 847 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); | 871 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); |
| 848 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); | 872 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); |
| 849 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | ||
| 850 | vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); | 873 | vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); |
| 874 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | ||
| 851 | vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); | 875 | vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); |
| 852 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); | 876 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); |
| 853 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | ||
| 854 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); | 877 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); |
| 878 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | ||
| 879 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | ||
| 880 | vmw_priv->vga_pitchlock = | ||
| 881 | vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); | ||
| 882 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
| 883 | vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + | ||
| 884 | SVGA_FIFO_PITCHLOCK); | ||
| 885 | |||
| 886 | if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) | ||
| 887 | return 0; | ||
| 855 | 888 | ||
| 889 | vmw_priv->num_displays = vmw_read(vmw_priv, | ||
| 890 | SVGA_REG_NUM_GUEST_DISPLAYS); | ||
| 891 | |||
| 892 | for (i = 0; i < vmw_priv->num_displays; ++i) { | ||
| 893 | save = &vmw_priv->vga_save[i]; | ||
| 894 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); | ||
| 895 | save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); | ||
| 896 | save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); | ||
| 897 | save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); | ||
| 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); | ||
| 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); | ||
| 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 901 | } | ||
| 856 | return 0; | 902 | return 0; |
| 857 | } | 903 | } |
| 858 | 904 | ||
| 859 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv) | 905 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv) |
| 860 | { | 906 | { |
| 907 | struct vmw_vga_topology_state *save; | ||
| 908 | uint32_t i; | ||
| 909 | |||
| 861 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); | 910 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); |
| 862 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); | 911 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); |
| 863 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); | ||
| 864 | vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); | 912 | vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); |
| 913 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); | ||
| 865 | vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); | 914 | vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); |
| 866 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); | 915 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); |
| 867 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); | 916 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); |
| 868 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); | 917 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); |
| 918 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | ||
| 919 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, | ||
| 920 | vmw_priv->vga_pitchlock); | ||
| 921 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
| 922 | iowrite32(vmw_priv->vga_pitchlock, | ||
| 923 | vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); | ||
| 924 | |||
| 925 | if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) | ||
| 926 | return 0; | ||
| 869 | 927 | ||
| 870 | /* TODO check for multimon */ | 928 | for (i = 0; i < vmw_priv->num_displays; ++i) { |
| 871 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | 929 | save = &vmw_priv->vga_save[i]; |
| 930 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); | ||
| 931 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); | ||
| 932 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); | ||
| 933 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); | ||
| 934 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); | ||
| 935 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); | ||
| 936 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 937 | } | ||
| 872 | 938 | ||
| 873 | return 0; | 939 | return 0; |
| 874 | } | 940 | } |
| 941 | |||
| 942 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | ||
| 943 | struct drm_file *file_priv) | ||
| 944 | { | ||
| 945 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 946 | struct drm_vmw_update_layout_arg *arg = | ||
| 947 | (struct drm_vmw_update_layout_arg *)data; | ||
| 948 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 949 | void __user *user_rects; | ||
| 950 | struct drm_vmw_rect *rects; | ||
| 951 | unsigned rects_size; | ||
| 952 | int ret; | ||
| 953 | |||
| 954 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 955 | if (unlikely(ret != 0)) | ||
| 956 | return ret; | ||
| 957 | |||
| 958 | if (!arg->num_outputs) { | ||
| 959 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | ||
| 960 | vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect); | ||
| 961 | goto out_unlock; | ||
| 962 | } | ||
| 963 | |||
| 964 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | ||
| 965 | rects = kzalloc(rects_size, GFP_KERNEL); | ||
| 966 | if (unlikely(!rects)) { | ||
| 967 | ret = -ENOMEM; | ||
| 968 | goto out_unlock; | ||
| 969 | } | ||
| 970 | |||
| 971 | user_rects = (void __user *)(unsigned long)arg->rects; | ||
| 972 | ret = copy_from_user(rects, user_rects, rects_size); | ||
| 973 | if (unlikely(ret != 0)) { | ||
| 974 | DRM_ERROR("Failed to get rects.\n"); | ||
| 975 | ret = -EFAULT; | ||
| 976 | goto out_free; | ||
| 977 | } | ||
| 978 | |||
| 979 | vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects); | ||
| 980 | |||
| 981 | out_free: | ||
| 982 | kfree(rects); | ||
| 983 | out_unlock: | ||
| 984 | ttm_read_unlock(&vmaster->lock); | ||
| 985 | return ret; | ||
| 986 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 8b95249f0531..8a398a0339b6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
| @@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
| 94 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | 94 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); |
| 95 | 95 | ||
| 96 | /* | 96 | /* |
| 97 | * Legacy display unit functions - vmwgfx_ldu.h | 97 | * Legacy display unit functions - vmwgfx_ldu.c |
| 98 | */ | 98 | */ |
| 99 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); | 99 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); |
| 100 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); | 100 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); |
| 101 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
| 102 | struct drm_vmw_rect *rects); | ||
| 101 | 103 | ||
| 102 | #endif | 104 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 90891593bf6c..cfaf690a5b2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
| @@ -38,6 +38,7 @@ struct vmw_legacy_display { | |||
| 38 | struct list_head active; | 38 | struct list_head active; |
| 39 | 39 | ||
| 40 | unsigned num_active; | 40 | unsigned num_active; |
| 41 | unsigned last_num_active; | ||
| 41 | 42 | ||
| 42 | struct vmw_framebuffer *fb; | 43 | struct vmw_framebuffer *fb; |
| 43 | }; | 44 | }; |
| @@ -48,9 +49,12 @@ struct vmw_legacy_display { | |||
| 48 | struct vmw_legacy_display_unit { | 49 | struct vmw_legacy_display_unit { |
| 49 | struct vmw_display_unit base; | 50 | struct vmw_display_unit base; |
| 50 | 51 | ||
| 51 | struct list_head active; | 52 | unsigned pref_width; |
| 53 | unsigned pref_height; | ||
| 54 | bool pref_active; | ||
| 55 | struct drm_display_mode *pref_mode; | ||
| 52 | 56 | ||
| 53 | unsigned unit; | 57 | struct list_head active; |
| 54 | }; | 58 | }; |
| 55 | 59 | ||
| 56 | static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) | 60 | static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) |
| @@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | |||
| 88 | { | 92 | { |
| 89 | struct vmw_legacy_display *lds = dev_priv->ldu_priv; | 93 | struct vmw_legacy_display *lds = dev_priv->ldu_priv; |
| 90 | struct vmw_legacy_display_unit *entry; | 94 | struct vmw_legacy_display_unit *entry; |
| 91 | struct drm_crtc *crtc; | 95 | struct drm_framebuffer *fb = NULL; |
| 96 | struct drm_crtc *crtc = NULL; | ||
| 92 | int i = 0; | 97 | int i = 0; |
| 93 | 98 | ||
| 94 | /* to stop the screen from changing size on resize */ | 99 | /* If there is no display topology the host just assumes |
| 95 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | 100 | * that the guest will set the same layout as the host. |
| 96 | for (i = 0; i < lds->num_active; i++) { | 101 | */ |
| 97 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); | 102 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { |
| 98 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); | 103 | int w = 0, h = 0; |
| 99 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | 104 | list_for_each_entry(entry, &lds->active, active) { |
| 100 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | 105 | crtc = &entry->base.crtc; |
| 101 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | 106 | w = max(w, crtc->x + crtc->mode.hdisplay); |
| 102 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | 107 | h = max(h, crtc->y + crtc->mode.vdisplay); |
| 103 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 108 | i++; |
| 109 | } | ||
| 110 | |||
| 111 | if (crtc == NULL) | ||
| 112 | return 0; | ||
| 113 | fb = entry->base.crtc.fb; | ||
| 114 | |||
| 115 | vmw_kms_write_svga(dev_priv, w, h, fb->pitch, | ||
| 116 | fb->bits_per_pixel, fb->depth); | ||
| 117 | |||
| 118 | return 0; | ||
| 104 | } | 119 | } |
| 105 | 120 | ||
| 106 | /* Now set the mode */ | 121 | if (!list_empty(&lds->active)) { |
| 107 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); | 122 | entry = list_entry(lds->active.next, typeof(*entry), active); |
| 123 | fb = entry->base.crtc.fb; | ||
| 124 | |||
| 125 | vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch, | ||
| 126 | fb->bits_per_pixel, fb->depth); | ||
| 127 | } | ||
| 128 | |||
| 129 | /* Make sure we always show something. */ | ||
| 130 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, | ||
| 131 | lds->num_active ? lds->num_active : 1); | ||
| 132 | |||
| 108 | i = 0; | 133 | i = 0; |
| 109 | list_for_each_entry(entry, &lds->active, active) { | 134 | list_for_each_entry(entry, &lds->active, active) { |
| 110 | crtc = &entry->base.crtc; | 135 | crtc = &entry->base.crtc; |
| @@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | |||
| 120 | i++; | 145 | i++; |
| 121 | } | 146 | } |
| 122 | 147 | ||
| 148 | BUG_ON(i != lds->num_active); | ||
| 149 | |||
| 150 | lds->last_num_active = lds->num_active; | ||
| 151 | |||
| 123 | return 0; | 152 | return 0; |
| 124 | } | 153 | } |
| 125 | 154 | ||
| @@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, | |||
| 130 | if (list_empty(&ldu->active)) | 159 | if (list_empty(&ldu->active)) |
| 131 | return 0; | 160 | return 0; |
| 132 | 161 | ||
| 162 | /* Must init otherwise list_empty(&ldu->active) will not work. */ | ||
| 133 | list_del_init(&ldu->active); | 163 | list_del_init(&ldu->active); |
| 134 | if (--(ld->num_active) == 0) { | 164 | if (--(ld->num_active) == 0) { |
| 135 | BUG_ON(!ld->fb); | 165 | BUG_ON(!ld->fb); |
| @@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, | |||
| 149 | struct vmw_legacy_display_unit *entry; | 179 | struct vmw_legacy_display_unit *entry; |
| 150 | struct list_head *at; | 180 | struct list_head *at; |
| 151 | 181 | ||
| 182 | BUG_ON(!ld->num_active && ld->fb); | ||
| 183 | if (vfb != ld->fb) { | ||
| 184 | if (ld->fb && ld->fb->unpin) | ||
| 185 | ld->fb->unpin(ld->fb); | ||
| 186 | if (vfb->pin) | ||
| 187 | vfb->pin(vfb); | ||
| 188 | ld->fb = vfb; | ||
| 189 | } | ||
| 190 | |||
| 152 | if (!list_empty(&ldu->active)) | 191 | if (!list_empty(&ldu->active)) |
| 153 | return 0; | 192 | return 0; |
| 154 | 193 | ||
| 155 | at = &ld->active; | 194 | at = &ld->active; |
| 156 | list_for_each_entry(entry, &ld->active, active) { | 195 | list_for_each_entry(entry, &ld->active, active) { |
| 157 | if (entry->unit > ldu->unit) | 196 | if (entry->base.unit > ldu->base.unit) |
| 158 | break; | 197 | break; |
| 159 | 198 | ||
| 160 | at = &entry->active; | 199 | at = &entry->active; |
| 161 | } | 200 | } |
| 162 | 201 | ||
| 163 | list_add(&ldu->active, at); | 202 | list_add(&ldu->active, at); |
| 164 | if (ld->num_active++ == 0) { | 203 | |
| 165 | BUG_ON(ld->fb); | 204 | ld->num_active++; |
| 166 | if (vfb->pin) | ||
| 167 | vfb->pin(vfb); | ||
| 168 | ld->fb = vfb; | ||
| 169 | } | ||
| 170 | 205 | ||
| 171 | return 0; | 206 | return 0; |
| 172 | } | 207 | } |
| @@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
| 208 | 243 | ||
| 209 | /* ldu only supports one fb active at the time */ | 244 | /* ldu only supports one fb active at the time */ |
| 210 | if (dev_priv->ldu_priv->fb && vfb && | 245 | if (dev_priv->ldu_priv->fb && vfb && |
| 246 | !(dev_priv->ldu_priv->num_active == 1 && | ||
| 247 | !list_empty(&ldu->active)) && | ||
| 211 | dev_priv->ldu_priv->fb != vfb) { | 248 | dev_priv->ldu_priv->fb != vfb) { |
| 212 | DRM_ERROR("Multiple framebuffers not supported\n"); | 249 | DRM_ERROR("Multiple framebuffers not supported\n"); |
| 213 | return -EINVAL; | 250 | return -EINVAL; |
| @@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) | |||
| 300 | static enum drm_connector_status | 337 | static enum drm_connector_status |
| 301 | vmw_ldu_connector_detect(struct drm_connector *connector) | 338 | vmw_ldu_connector_detect(struct drm_connector *connector) |
| 302 | { | 339 | { |
| 303 | /* XXX vmwctrl should control connection status */ | 340 | if (vmw_connector_to_ldu(connector)->pref_active) |
| 304 | if (vmw_connector_to_ldu(connector)->base.unit == 0) | ||
| 305 | return connector_status_connected; | 341 | return connector_status_connected; |
| 306 | return connector_status_disconnected; | 342 | return connector_status_disconnected; |
| 307 | } | 343 | } |
| @@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { | |||
| 312 | 752, 800, 0, 480, 489, 492, 525, 0, | 348 | 752, 800, 0, 480, 489, 492, 525, 0, |
| 313 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | 349 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
| 314 | /* 800x600@60Hz */ | 350 | /* 800x600@60Hz */ |
| 315 | { DRM_MODE("800x600", | 351 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, |
| 316 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | 352 | 968, 1056, 0, 600, 601, 605, 628, 0, |
| 317 | 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, | 353 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
| 318 | 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 319 | /* 1024x768@60Hz */ | 354 | /* 1024x768@60Hz */ |
| 320 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | 355 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, |
| 321 | 1184, 1344, 0, 768, 771, 777, 806, 0, | 356 | 1184, 1344, 0, 768, 771, 777, 806, 0, |
| @@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { | |||
| 387 | static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | 422 | static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, |
| 388 | uint32_t max_width, uint32_t max_height) | 423 | uint32_t max_width, uint32_t max_height) |
| 389 | { | 424 | { |
| 425 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); | ||
| 390 | struct drm_device *dev = connector->dev; | 426 | struct drm_device *dev = connector->dev; |
| 391 | struct drm_display_mode *mode = NULL; | 427 | struct drm_display_mode *mode = NULL; |
| 428 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | ||
| 429 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
| 430 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 431 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) | ||
| 432 | }; | ||
| 392 | int i; | 433 | int i; |
| 393 | 434 | ||
| 435 | /* Add preferred mode */ | ||
| 436 | { | ||
| 437 | mode = drm_mode_duplicate(dev, &prefmode); | ||
| 438 | if (!mode) | ||
| 439 | return 0; | ||
| 440 | mode->hdisplay = ldu->pref_width; | ||
| 441 | mode->vdisplay = ldu->pref_height; | ||
| 442 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
| 443 | drm_mode_probed_add(connector, mode); | ||
| 444 | |||
| 445 | if (ldu->pref_mode) { | ||
| 446 | list_del_init(&ldu->pref_mode->head); | ||
| 447 | drm_mode_destroy(dev, ldu->pref_mode); | ||
| 448 | } | ||
| 449 | |||
| 450 | ldu->pref_mode = mode; | ||
| 451 | } | ||
| 452 | |||
| 394 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | 453 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { |
| 395 | if (vmw_ldu_connector_builtin[i].hdisplay > max_width || | 454 | if (vmw_ldu_connector_builtin[i].hdisplay > max_width || |
| 396 | vmw_ldu_connector_builtin[i].vdisplay > max_height) | 455 | vmw_ldu_connector_builtin[i].vdisplay > max_height) |
| @@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
| 443 | if (!ldu) | 502 | if (!ldu) |
| 444 | return -ENOMEM; | 503 | return -ENOMEM; |
| 445 | 504 | ||
| 446 | ldu->unit = unit; | 505 | ldu->base.unit = unit; |
| 447 | crtc = &ldu->base.crtc; | 506 | crtc = &ldu->base.crtc; |
| 448 | encoder = &ldu->base.encoder; | 507 | encoder = &ldu->base.encoder; |
| 449 | connector = &ldu->base.connector; | 508 | connector = &ldu->base.connector; |
| 450 | 509 | ||
| 510 | INIT_LIST_HEAD(&ldu->active); | ||
| 511 | |||
| 512 | ldu->pref_active = (unit == 0); | ||
| 513 | ldu->pref_width = 800; | ||
| 514 | ldu->pref_height = 600; | ||
| 515 | ldu->pref_mode = NULL; | ||
| 516 | |||
| 451 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 517 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
| 452 | DRM_MODE_CONNECTOR_LVDS); | 518 | DRM_MODE_CONNECTOR_LVDS); |
| 453 | /* Initial status */ | 519 | connector->status = vmw_ldu_connector_detect(connector); |
| 454 | if (unit == 0) | ||
| 455 | connector->status = connector_status_connected; | ||
| 456 | else | ||
| 457 | connector->status = connector_status_disconnected; | ||
| 458 | 520 | ||
| 459 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 521 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
| 460 | DRM_MODE_ENCODER_LVDS); | 522 | DRM_MODE_ENCODER_LVDS); |
| @@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
| 462 | encoder->possible_crtcs = (1 << unit); | 524 | encoder->possible_crtcs = (1 << unit); |
| 463 | encoder->possible_clones = 0; | 525 | encoder->possible_clones = 0; |
| 464 | 526 | ||
| 465 | INIT_LIST_HEAD(&ldu->active); | ||
| 466 | |||
| 467 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); | 527 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); |
| 468 | 528 | ||
| 469 | drm_connector_attach_property(connector, | 529 | drm_connector_attach_property(connector, |
| @@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
| 487 | 547 | ||
| 488 | INIT_LIST_HEAD(&dev_priv->ldu_priv->active); | 548 | INIT_LIST_HEAD(&dev_priv->ldu_priv->active); |
| 489 | dev_priv->ldu_priv->num_active = 0; | 549 | dev_priv->ldu_priv->num_active = 0; |
| 550 | dev_priv->ldu_priv->last_num_active = 0; | ||
| 490 | dev_priv->ldu_priv->fb = NULL; | 551 | dev_priv->ldu_priv->fb = NULL; |
| 491 | 552 | ||
| 492 | drm_mode_create_dirty_info_property(dev_priv->dev); | 553 | drm_mode_create_dirty_info_property(dev_priv->dev); |
| 493 | 554 | ||
| 494 | vmw_ldu_init(dev_priv, 0); | 555 | vmw_ldu_init(dev_priv, 0); |
| 495 | vmw_ldu_init(dev_priv, 1); | 556 | /* for old hardware without multimon only enable one display */ |
| 496 | vmw_ldu_init(dev_priv, 2); | 557 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
| 497 | vmw_ldu_init(dev_priv, 3); | 558 | vmw_ldu_init(dev_priv, 1); |
| 498 | vmw_ldu_init(dev_priv, 4); | 559 | vmw_ldu_init(dev_priv, 2); |
| 499 | vmw_ldu_init(dev_priv, 5); | 560 | vmw_ldu_init(dev_priv, 3); |
| 500 | vmw_ldu_init(dev_priv, 6); | 561 | vmw_ldu_init(dev_priv, 4); |
| 501 | vmw_ldu_init(dev_priv, 7); | 562 | vmw_ldu_init(dev_priv, 5); |
| 563 | vmw_ldu_init(dev_priv, 6); | ||
| 564 | vmw_ldu_init(dev_priv, 7); | ||
| 565 | } | ||
| 502 | 566 | ||
| 503 | return 0; | 567 | return 0; |
| 504 | } | 568 | } |
| @@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | |||
| 514 | 578 | ||
| 515 | return 0; | 579 | return 0; |
| 516 | } | 580 | } |
| 581 | |||
| 582 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
| 583 | struct drm_vmw_rect *rects) | ||
| 584 | { | ||
| 585 | struct drm_device *dev = dev_priv->dev; | ||
| 586 | struct vmw_legacy_display_unit *ldu; | ||
| 587 | struct drm_connector *con; | ||
| 588 | int i; | ||
| 589 | |||
| 590 | mutex_lock(&dev->mode_config.mutex); | ||
| 591 | |||
| 592 | #if 0 | ||
| 593 | DRM_INFO("%s: new layout ", __func__); | ||
| 594 | for (i = 0; i < (int)num; i++) | ||
| 595 | DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, | ||
| 596 | rects[i].w, rects[i].h); | ||
| 597 | DRM_INFO("\n"); | ||
| 598 | #else | ||
| 599 | (void)i; | ||
| 600 | #endif | ||
| 601 | |||
| 602 | list_for_each_entry(con, &dev->mode_config.connector_list, head) { | ||
| 603 | ldu = vmw_connector_to_ldu(con); | ||
| 604 | if (num > ldu->base.unit) { | ||
| 605 | ldu->pref_width = rects[ldu->base.unit].w; | ||
| 606 | ldu->pref_height = rects[ldu->base.unit].h; | ||
| 607 | ldu->pref_active = true; | ||
| 608 | } else { | ||
| 609 | ldu->pref_width = 800; | ||
| 610 | ldu->pref_height = 600; | ||
| 611 | ldu->pref_active = false; | ||
| 612 | } | ||
| 613 | con->status = vmw_ldu_connector_detect(con); | ||
| 614 | } | ||
| 615 | |||
| 616 | mutex_unlock(&dev->mode_config.mutex); | ||
| 617 | |||
| 618 | return 0; | ||
| 619 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index ad566c85b075..df2036ed18d5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
| @@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | |||
| 358 | if (stream->buf != buf) | 358 | if (stream->buf != buf) |
| 359 | stream->buf = vmw_dmabuf_reference(buf); | 359 | stream->buf = vmw_dmabuf_reference(buf); |
| 360 | stream->saved = *arg; | 360 | stream->saved = *arg; |
| 361 | /* stream is no longer stopped/paused */ | ||
| 362 | stream->paused = false; | ||
| 361 | 363 | ||
| 362 | return 0; | 364 | return 0; |
| 363 | } | 365 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index f8fbbc67a406..8612378b131e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 597 | 597 | ||
| 598 | ret = copy_from_user(srf->sizes, user_sizes, | 598 | ret = copy_from_user(srf->sizes, user_sizes, |
| 599 | srf->num_sizes * sizeof(*srf->sizes)); | 599 | srf->num_sizes * sizeof(*srf->sizes)); |
| 600 | if (unlikely(ret != 0)) | 600 | if (unlikely(ret != 0)) { |
| 601 | ret = -EFAULT; | ||
| 601 | goto out_err1; | 602 | goto out_err1; |
| 603 | } | ||
| 602 | 604 | ||
| 603 | if (srf->scanout && | 605 | if (srf->scanout && |
| 604 | srf->num_sizes == 1 && | 606 | srf->num_sizes == 1 && |
| @@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 697 | if (user_sizes) | 699 | if (user_sizes) |
| 698 | ret = copy_to_user(user_sizes, srf->sizes, | 700 | ret = copy_to_user(user_sizes, srf->sizes, |
| 699 | srf->num_sizes * sizeof(*srf->sizes)); | 701 | srf->num_sizes * sizeof(*srf->sizes)); |
| 700 | if (unlikely(ret != 0)) | 702 | if (unlikely(ret != 0)) { |
| 701 | DRM_ERROR("copy_to_user failed %p %u\n", | 703 | DRM_ERROR("copy_to_user failed %p %u\n", |
| 702 | user_sizes, srf->num_sizes); | 704 | user_sizes, srf->num_sizes); |
| 705 | ret = -EFAULT; | ||
| 706 | } | ||
| 703 | out_bad_resource: | 707 | out_bad_resource: |
| 704 | out_no_reference: | 708 | out_no_reference: |
| 705 | ttm_base_object_unref(&base); | 709 | ttm_base_object_unref(&base); |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 441e38c95a85..b87569e96b16 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
| @@ -1,12 +1,32 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * vgaarb.c | 2 | * vgaarb.c: Implements the VGA arbitration. For details refer to |
| 3 | * Documentation/vgaarbiter.txt | ||
| 4 | * | ||
| 3 | * | 5 | * |
| 4 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> | 6 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> |
| 5 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> | 7 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> |
| 6 | * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> | 8 | * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> |
| 7 | * | 9 | * |
| 8 | * Implements the VGA arbitration. For details refer to | 10 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 9 | * Documentation/vgaarbiter.txt | 11 | * copy of this software and associated documentation files (the "Software"), |
| 12 | * to deal in the Software without restriction, including without limitation | ||
| 13 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 14 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 15 | * Software is furnished to do so, subject to the following conditions: | ||
| 16 | * | ||
| 17 | * The above copyright notice and this permission notice (including the next | ||
| 18 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 19 | * Software. | ||
| 20 | * | ||
| 21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 24 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 26 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 27 | * DEALINGS | ||
| 28 | * IN THE SOFTWARE. | ||
| 29 | * | ||
| 10 | */ | 30 | */ |
| 11 | 31 | ||
| 12 | #include <linux/module.h> | 32 | #include <linux/module.h> |
| @@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, | |||
| 155 | (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) | 175 | (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) |
| 156 | rsrc |= VGA_RSRC_LEGACY_MEM; | 176 | rsrc |= VGA_RSRC_LEGACY_MEM; |
| 157 | 177 | ||
| 158 | pr_devel("%s: %d\n", __func__, rsrc); | 178 | pr_debug("%s: %d\n", __func__, rsrc); |
| 159 | pr_devel("%s: owns: %d\n", __func__, vgadev->owns); | 179 | pr_debug("%s: owns: %d\n", __func__, vgadev->owns); |
| 160 | 180 | ||
| 161 | /* Check what resources we need to acquire */ | 181 | /* Check what resources we need to acquire */ |
| 162 | wants = rsrc & ~vgadev->owns; | 182 | wants = rsrc & ~vgadev->owns; |
| @@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) | |||
| 268 | { | 288 | { |
| 269 | unsigned int old_locks = vgadev->locks; | 289 | unsigned int old_locks = vgadev->locks; |
| 270 | 290 | ||
| 271 | pr_devel("%s\n", __func__); | 291 | pr_debug("%s\n", __func__); |
| 272 | 292 | ||
| 273 | /* Update our counters, and account for equivalent legacy resources | 293 | /* Update our counters, and account for equivalent legacy resources |
| 274 | * if we decode them | 294 | * if we decode them |
| @@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, | |||
| 575 | else | 595 | else |
| 576 | vga_decode_count--; | 596 | vga_decode_count--; |
| 577 | } | 597 | } |
| 598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); | ||
| 578 | } | 599 | } |
| 579 | 600 | ||
| 580 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | 601 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) |
| @@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 831 | curr_pos += 5; | 852 | curr_pos += 5; |
| 832 | remaining -= 5; | 853 | remaining -= 5; |
| 833 | 854 | ||
| 834 | pr_devel("client 0x%p called 'lock'\n", priv); | 855 | pr_debug("client 0x%p called 'lock'\n", priv); |
| 835 | 856 | ||
| 836 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | 857 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { |
| 837 | ret_val = -EPROTO; | 858 | ret_val = -EPROTO; |
| @@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 867 | curr_pos += 7; | 888 | curr_pos += 7; |
| 868 | remaining -= 7; | 889 | remaining -= 7; |
| 869 | 890 | ||
| 870 | pr_devel("client 0x%p called 'unlock'\n", priv); | 891 | pr_debug("client 0x%p called 'unlock'\n", priv); |
| 871 | 892 | ||
| 872 | if (strncmp(curr_pos, "all", 3) == 0) | 893 | if (strncmp(curr_pos, "all", 3) == 0) |
| 873 | io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; | 894 | io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; |
| @@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 917 | curr_pos += 8; | 938 | curr_pos += 8; |
| 918 | remaining -= 8; | 939 | remaining -= 8; |
| 919 | 940 | ||
| 920 | pr_devel("client 0x%p called 'trylock'\n", priv); | 941 | pr_debug("client 0x%p called 'trylock'\n", priv); |
| 921 | 942 | ||
| 922 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | 943 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { |
| 923 | ret_val = -EPROTO; | 944 | ret_val = -EPROTO; |
| @@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 961 | 982 | ||
| 962 | curr_pos += 7; | 983 | curr_pos += 7; |
| 963 | remaining -= 7; | 984 | remaining -= 7; |
| 964 | pr_devel("client 0x%p called 'target'\n", priv); | 985 | pr_debug("client 0x%p called 'target'\n", priv); |
| 965 | /* if target is default */ | 986 | /* if target is default */ |
| 966 | if (!strncmp(curr_pos, "default", 7)) | 987 | if (!strncmp(curr_pos, "default", 7)) |
| 967 | pdev = pci_dev_get(vga_default_device()); | 988 | pdev = pci_dev_get(vga_default_device()); |
| @@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 971 | ret_val = -EPROTO; | 992 | ret_val = -EPROTO; |
| 972 | goto done; | 993 | goto done; |
| 973 | } | 994 | } |
| 974 | pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, | 995 | pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, |
| 975 | domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 996 | domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
| 976 | 997 | ||
| 977 | pbus = pci_find_bus(domain, bus); | 998 | pbus = pci_find_bus(domain, bus); |
| 978 | pr_devel("vgaarb: pbus %p\n", pbus); | 999 | pr_debug("vgaarb: pbus %p\n", pbus); |
| 979 | if (pbus == NULL) { | 1000 | if (pbus == NULL) { |
| 980 | pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", | 1001 | pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", |
| 981 | domain, bus); | 1002 | domain, bus); |
| @@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 983 | goto done; | 1004 | goto done; |
| 984 | } | 1005 | } |
| 985 | pdev = pci_get_slot(pbus, devfn); | 1006 | pdev = pci_get_slot(pbus, devfn); |
| 986 | pr_devel("vgaarb: pdev %p\n", pdev); | 1007 | pr_debug("vgaarb: pdev %p\n", pdev); |
| 987 | if (!pdev) { | 1008 | if (!pdev) { |
| 988 | pr_err("vgaarb: invalid PCI address %x:%x\n", | 1009 | pr_err("vgaarb: invalid PCI address %x:%x\n", |
| 989 | bus, devfn); | 1010 | bus, devfn); |
| @@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 993 | } | 1014 | } |
| 994 | 1015 | ||
| 995 | vgadev = vgadev_find(pdev); | 1016 | vgadev = vgadev_find(pdev); |
| 996 | pr_devel("vgaarb: vgadev %p\n", vgadev); | 1017 | pr_debug("vgaarb: vgadev %p\n", vgadev); |
| 997 | if (vgadev == NULL) { | 1018 | if (vgadev == NULL) { |
| 998 | pr_err("vgaarb: this pci device is not a vga device\n"); | 1019 | pr_err("vgaarb: this pci device is not a vga device\n"); |
| 999 | pci_dev_put(pdev); | 1020 | pci_dev_put(pdev); |
| @@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 1029 | } else if (strncmp(curr_pos, "decodes ", 8) == 0) { | 1050 | } else if (strncmp(curr_pos, "decodes ", 8) == 0) { |
| 1030 | curr_pos += 8; | 1051 | curr_pos += 8; |
| 1031 | remaining -= 8; | 1052 | remaining -= 8; |
| 1032 | pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); | 1053 | pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv); |
| 1033 | 1054 | ||
| 1034 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | 1055 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { |
| 1035 | ret_val = -EPROTO; | 1056 | ret_val = -EPROTO; |
| @@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) | |||
| 1058 | { | 1079 | { |
| 1059 | struct vga_arb_private *priv = file->private_data; | 1080 | struct vga_arb_private *priv = file->private_data; |
| 1060 | 1081 | ||
| 1061 | pr_devel("%s\n", __func__); | 1082 | pr_debug("%s\n", __func__); |
| 1062 | 1083 | ||
| 1063 | if (priv == NULL) | 1084 | if (priv == NULL) |
| 1064 | return -ENODEV; | 1085 | return -ENODEV; |
| @@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file) | |||
| 1071 | struct vga_arb_private *priv; | 1092 | struct vga_arb_private *priv; |
| 1072 | unsigned long flags; | 1093 | unsigned long flags; |
| 1073 | 1094 | ||
| 1074 | pr_devel("%s\n", __func__); | 1095 | pr_debug("%s\n", __func__); |
| 1075 | 1096 | ||
| 1076 | priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); | 1097 | priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); |
| 1077 | if (priv == NULL) | 1098 | if (priv == NULL) |
| @@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) | |||
| 1101 | unsigned long flags; | 1122 | unsigned long flags; |
| 1102 | int i; | 1123 | int i; |
| 1103 | 1124 | ||
| 1104 | pr_devel("%s\n", __func__); | 1125 | pr_debug("%s\n", __func__); |
| 1105 | 1126 | ||
| 1106 | if (priv == NULL) | 1127 | if (priv == NULL) |
| 1107 | return -ENODEV; | 1128 | return -ENODEV; |
| @@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) | |||
| 1112 | uc = &priv->cards[i]; | 1133 | uc = &priv->cards[i]; |
| 1113 | if (uc->pdev == NULL) | 1134 | if (uc->pdev == NULL) |
| 1114 | continue; | 1135 | continue; |
| 1115 | pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", | 1136 | pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n", |
| 1116 | uc->io_cnt, uc->mem_cnt); | 1137 | uc->io_cnt, uc->mem_cnt); |
| 1117 | while (uc->io_cnt--) | 1138 | while (uc->io_cnt--) |
| 1118 | vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); | 1139 | vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); |
| @@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action, | |||
| 1165 | struct pci_dev *pdev = to_pci_dev(dev); | 1186 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1166 | bool notify = false; | 1187 | bool notify = false; |
| 1167 | 1188 | ||
| 1168 | pr_devel("%s\n", __func__); | 1189 | pr_debug("%s\n", __func__); |
| 1169 | 1190 | ||
| 1170 | /* For now we're only intereted in devices added and removed. I didn't | 1191 | /* For now we're only intereted in devices added and removed. I didn't |
| 1171 | * test this thing here, so someone needs to double check for the | 1192 | * test this thing here, so someone needs to double check for the |
