diff options
Diffstat (limited to 'drivers/gpu/drm')
96 files changed, 1634 insertions, 694 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index bea966f8ac84..0902d4460039 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -100,7 +100,10 @@ config DRM_I830 | |||
100 | config DRM_I915 | 100 | config DRM_I915 |
101 | tristate "i915 driver" | 101 | tristate "i915 driver" |
102 | depends on AGP_INTEL | 102 | depends on AGP_INTEL |
103 | # we need shmfs for the swappable backing store, and in particular | ||
104 | # the shmem_readpage() which depends upon tmpfs | ||
103 | select SHMEM | 105 | select SHMEM |
106 | select TMPFS | ||
104 | select DRM_KMS_HELPER | 107 | select DRM_KMS_HELPER |
105 | select FB_CFB_FILLRECT | 108 | select FB_CFB_FILLRECT |
106 | select FB_CFB_COPYAREA | 109 | select FB_CFB_COPYAREA |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 2baa6708e44c..654faa803dcb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -2674,3 +2674,23 @@ out: | |||
2674 | mutex_unlock(&dev->mode_config.mutex); | 2674 | mutex_unlock(&dev->mode_config.mutex); |
2675 | return ret; | 2675 | return ret; |
2676 | } | 2676 | } |
2677 | |||
2678 | void drm_mode_config_reset(struct drm_device *dev) | ||
2679 | { | ||
2680 | struct drm_crtc *crtc; | ||
2681 | struct drm_encoder *encoder; | ||
2682 | struct drm_connector *connector; | ||
2683 | |||
2684 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
2685 | if (crtc->funcs->reset) | ||
2686 | crtc->funcs->reset(crtc); | ||
2687 | |||
2688 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | ||
2689 | if (encoder->funcs->reset) | ||
2690 | encoder->funcs->reset(encoder); | ||
2691 | |||
2692 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
2693 | if (connector->funcs->reset) | ||
2694 | connector->funcs->reset(connector); | ||
2695 | } | ||
2696 | EXPORT_SYMBOL(drm_mode_config_reset); | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 952b3d4fb2a6..92369655dca3 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -343,13 +343,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
343 | struct drm_encoder *encoder; | 343 | struct drm_encoder *encoder; |
344 | bool ret = true; | 344 | bool ret = true; |
345 | 345 | ||
346 | adjusted_mode = drm_mode_duplicate(dev, mode); | ||
347 | |||
348 | crtc->enabled = drm_helper_crtc_in_use(crtc); | 346 | crtc->enabled = drm_helper_crtc_in_use(crtc); |
349 | |||
350 | if (!crtc->enabled) | 347 | if (!crtc->enabled) |
351 | return true; | 348 | return true; |
352 | 349 | ||
350 | adjusted_mode = drm_mode_duplicate(dev, mode); | ||
351 | |||
353 | saved_hwmode = crtc->hwmode; | 352 | saved_hwmode = crtc->hwmode; |
354 | saved_mode = crtc->mode; | 353 | saved_mode = crtc->mode; |
355 | saved_x = crtc->x; | 354 | saved_x = crtc->x; |
@@ -437,10 +436,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
437 | */ | 436 | */ |
438 | drm_calc_timestamping_constants(crtc); | 437 | drm_calc_timestamping_constants(crtc); |
439 | 438 | ||
440 | /* XXX free adjustedmode */ | ||
441 | drm_mode_destroy(dev, adjusted_mode); | ||
442 | /* FIXME: add subpixel order */ | 439 | /* FIXME: add subpixel order */ |
443 | done: | 440 | done: |
441 | drm_mode_destroy(dev, adjusted_mode); | ||
444 | if (!ret) { | 442 | if (!ret) { |
445 | crtc->hwmode = saved_hwmode; | 443 | crtc->hwmode = saved_hwmode; |
446 | crtc->mode = saved_mode; | 444 | crtc->mode = saved_mode; |
@@ -497,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
497 | 495 | ||
498 | crtc_funcs = set->crtc->helper_private; | 496 | crtc_funcs = set->crtc->helper_private; |
499 | 497 | ||
498 | if (!set->mode) | ||
499 | set->fb = NULL; | ||
500 | |||
500 | if (set->fb) { | 501 | if (set->fb) { |
501 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", | 502 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", |
502 | set->crtc->base.id, set->fb->base.id, | 503 | set->crtc->base.id, set->fb->base.id, |
503 | (int)set->num_connectors, set->x, set->y); | 504 | (int)set->num_connectors, set->x, set->y); |
504 | } else { | 505 | } else { |
505 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n", | 506 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); |
506 | set->crtc->base.id, (int)set->num_connectors, | 507 | set->mode = NULL; |
507 | set->x, set->y); | 508 | set->num_connectors = 0; |
508 | } | 509 | } |
509 | 510 | ||
510 | dev = set->crtc->dev; | 511 | dev = set->crtc->dev; |
@@ -649,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
649 | mode_changed = true; | 650 | mode_changed = true; |
650 | 651 | ||
651 | if (mode_changed) { | 652 | if (mode_changed) { |
652 | set->crtc->enabled = (set->mode != NULL); | 653 | set->crtc->enabled = drm_helper_crtc_in_use(set->crtc); |
653 | if (set->mode != NULL) { | 654 | if (set->crtc->enabled) { |
654 | DRM_DEBUG_KMS("attempting to set mode from" | 655 | DRM_DEBUG_KMS("attempting to set mode from" |
655 | " userspace\n"); | 656 | " userspace\n"); |
656 | drm_mode_debug_printmodeline(set->mode); | 657 | drm_mode_debug_printmodeline(set->mode); |
@@ -665,6 +666,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
665 | ret = -EINVAL; | 666 | ret = -EINVAL; |
666 | goto fail; | 667 | goto fail; |
667 | } | 668 | } |
669 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
670 | for (i = 0; i < set->num_connectors; i++) { | ||
671 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
672 | drm_get_connector_name(set->connectors[i])); | ||
673 | set->connectors[i]->dpms = DRM_MODE_DPMS_ON; | ||
674 | } | ||
668 | } | 675 | } |
669 | drm_helper_disable_unused_functions(dev); | 676 | drm_helper_disable_unused_functions(dev); |
670 | } else if (fb_changed) { | 677 | } else if (fb_changed) { |
@@ -681,12 +688,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
681 | goto fail; | 688 | goto fail; |
682 | } | 689 | } |
683 | } | 690 | } |
684 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
685 | for (i = 0; i < set->num_connectors; i++) { | ||
686 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
687 | drm_get_connector_name(set->connectors[i])); | ||
688 | set->connectors[i]->dpms = DRM_MODE_DPMS_ON; | ||
689 | } | ||
690 | 691 | ||
691 | kfree(save_connectors); | 692 | kfree(save_connectors); |
692 | kfree(save_encoders); | 693 | kfree(save_encoders); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6977a1ce9d98..f73ef4390db6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
672 | struct drm_crtc_helper_funcs *crtc_funcs; | 672 | struct drm_crtc_helper_funcs *crtc_funcs; |
673 | u16 *red, *green, *blue, *transp; | 673 | u16 *red, *green, *blue, *transp; |
674 | struct drm_crtc *crtc; | 674 | struct drm_crtc *crtc; |
675 | int i, rc = 0; | 675 | int i, j, rc = 0; |
676 | int start; | 676 | int start; |
677 | 677 | ||
678 | for (i = 0; i < fb_helper->crtc_count; i++) { | 678 | for (i = 0; i < fb_helper->crtc_count; i++) { |
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
685 | transp = cmap->transp; | 685 | transp = cmap->transp; |
686 | start = cmap->start; | 686 | start = cmap->start; |
687 | 687 | ||
688 | for (i = 0; i < cmap->len; i++) { | 688 | for (j = 0; j < cmap->len; j++) { |
689 | u16 hred, hgreen, hblue, htransp = 0xffff; | 689 | u16 hred, hgreen, hblue, htransp = 0xffff; |
690 | 690 | ||
691 | hred = *red++; | 691 | hred = *red++; |
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 3cdbaf379bb5..be9a9c07d152 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -283,17 +283,18 @@ int drm_vma_info(struct seq_file *m, void *data) | |||
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | mutex_lock(&dev->struct_mutex); | 285 | mutex_lock(&dev->struct_mutex); |
286 | seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", | 286 | seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n", |
287 | atomic_read(&dev->vma_count), | 287 | atomic_read(&dev->vma_count), |
288 | high_memory, (u64)virt_to_phys(high_memory)); | 288 | high_memory, (void *)virt_to_phys(high_memory)); |
289 | 289 | ||
290 | list_for_each_entry(pt, &dev->vmalist, head) { | 290 | list_for_each_entry(pt, &dev->vmalist, head) { |
291 | vma = pt->vma; | 291 | vma = pt->vma; |
292 | if (!vma) | 292 | if (!vma) |
293 | continue; | 293 | continue; |
294 | seq_printf(m, | 294 | seq_printf(m, |
295 | "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", | 295 | "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000", |
296 | pt->pid, vma->vm_start, vma->vm_end, | 296 | pt->pid, |
297 | (void *)vma->vm_start, (void *)vma->vm_end, | ||
297 | vma->vm_flags & VM_READ ? 'r' : '-', | 298 | vma->vm_flags & VM_READ ? 'r' : '-', |
298 | vma->vm_flags & VM_WRITE ? 'w' : '-', | 299 | vma->vm_flags & VM_WRITE ? 'w' : '-', |
299 | vma->vm_flags & VM_EXEC ? 'x' : '-', | 300 | vma->vm_flags & VM_EXEC ? 'x' : '-', |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0054e957203f..28d1d3c24d65 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
164 | * available. In that case we can't account for this and just | 164 | * available. In that case we can't account for this and just |
165 | * hope for the best. | 165 | * hope for the best. |
166 | */ | 166 | */ |
167 | if ((vblrc > 0) && (abs(diff_ns) > 1000000)) | 167 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { |
168 | atomic_inc(&dev->_vblank_count[crtc]); | 168 | atomic_inc(&dev->_vblank_count[crtc]); |
169 | smp_mb__after_atomic_inc(); | ||
170 | } | ||
169 | 171 | ||
170 | /* Invalidate all timestamps while vblank irq's are off. */ | 172 | /* Invalidate all timestamps while vblank irq's are off. */ |
171 | clear_vblank_timestamps(dev, crtc); | 173 | clear_vblank_timestamps(dev, crtc); |
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) | |||
491 | /* Dot clock in Hz: */ | 493 | /* Dot clock in Hz: */ |
492 | dotclock = (u64) crtc->hwmode.clock * 1000; | 494 | dotclock = (u64) crtc->hwmode.clock * 1000; |
493 | 495 | ||
496 | /* Fields of interlaced scanout modes are only halve a frame duration. | ||
497 | * Double the dotclock to get halve the frame-/line-/pixelduration. | ||
498 | */ | ||
499 | if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) | ||
500 | dotclock *= 2; | ||
501 | |||
494 | /* Valid dotclock? */ | 502 | /* Valid dotclock? */ |
495 | if (dotclock > 0) { | 503 | if (dotclock > 0) { |
496 | /* Convert scanline length in pixels and video dot clock to | 504 | /* Convert scanline length in pixels and video dot clock to |
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
603 | return -EAGAIN; | 611 | return -EAGAIN; |
604 | } | 612 | } |
605 | 613 | ||
606 | /* Don't know yet how to handle interlaced or | ||
607 | * double scan modes. Just no-op for now. | ||
608 | */ | ||
609 | if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { | ||
610 | DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); | ||
611 | return -ENOTSUPP; | ||
612 | } | ||
613 | |||
614 | /* Get current scanout position with system timestamp. | 614 | /* Get current scanout position with system timestamp. |
615 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times | 615 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times |
616 | * if single query takes longer than max_error nanoseconds. | 616 | * if single query takes longer than max_error nanoseconds. |
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
858 | if (rc) { | 858 | if (rc) { |
859 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; | 859 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; |
860 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | 860 | vblanktimestamp(dev, crtc, tslot) = t_vblank; |
861 | smp_wmb(); | ||
862 | } | 861 | } |
863 | 862 | ||
863 | smp_mb__before_atomic_inc(); | ||
864 | atomic_add(diff, &dev->_vblank_count[crtc]); | 864 | atomic_add(diff, &dev->_vblank_count[crtc]); |
865 | smp_mb__after_atomic_inc(); | ||
865 | } | 866 | } |
866 | 867 | ||
867 | /** | 868 | /** |
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, | |||
1011 | struct drm_file *file_priv) | 1012 | struct drm_file *file_priv) |
1012 | { | 1013 | { |
1013 | struct drm_modeset_ctl *modeset = data; | 1014 | struct drm_modeset_ctl *modeset = data; |
1014 | int crtc, ret = 0; | 1015 | int ret = 0; |
1016 | unsigned int crtc; | ||
1015 | 1017 | ||
1016 | /* If drm_vblank_init() hasn't been called yet, just no-op */ | 1018 | /* If drm_vblank_init() hasn't been called yet, just no-op */ |
1017 | if (!dev->num_crtcs) | 1019 | if (!dev->num_crtcs) |
@@ -1250,7 +1252,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) | |||
1250 | * Drivers should call this routine in their vblank interrupt handlers to | 1252 | * Drivers should call this routine in their vblank interrupt handlers to |
1251 | * update the vblank counter and send any signals that may be pending. | 1253 | * update the vblank counter and send any signals that may be pending. |
1252 | */ | 1254 | */ |
1253 | void drm_handle_vblank(struct drm_device *dev, int crtc) | 1255 | bool drm_handle_vblank(struct drm_device *dev, int crtc) |
1254 | { | 1256 | { |
1255 | u32 vblcount; | 1257 | u32 vblcount; |
1256 | s64 diff_ns; | 1258 | s64 diff_ns; |
@@ -1258,7 +1260,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1258 | unsigned long irqflags; | 1260 | unsigned long irqflags; |
1259 | 1261 | ||
1260 | if (!dev->num_crtcs) | 1262 | if (!dev->num_crtcs) |
1261 | return; | 1263 | return false; |
1262 | 1264 | ||
1263 | /* Need timestamp lock to prevent concurrent execution with | 1265 | /* Need timestamp lock to prevent concurrent execution with |
1264 | * vblank enable/disable, as this would cause inconsistent | 1266 | * vblank enable/disable, as this would cause inconsistent |
@@ -1269,7 +1271,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1269 | /* Vblank irq handling disabled. Nothing to do. */ | 1271 | /* Vblank irq handling disabled. Nothing to do. */ |
1270 | if (!dev->vblank_enabled[crtc]) { | 1272 | if (!dev->vblank_enabled[crtc]) { |
1271 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 1273 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
1272 | return; | 1274 | return false; |
1273 | } | 1275 | } |
1274 | 1276 | ||
1275 | /* Fetch corresponding timestamp for this vblank interval from | 1277 | /* Fetch corresponding timestamp for this vblank interval from |
@@ -1293,15 +1295,16 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1293 | * e.g., due to spurious vblank interrupts. We need to | 1295 | * e.g., due to spurious vblank interrupts. We need to |
1294 | * ignore those for accounting. | 1296 | * ignore those for accounting. |
1295 | */ | 1297 | */ |
1296 | if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { | 1298 | if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { |
1297 | /* Store new timestamp in ringbuffer. */ | 1299 | /* Store new timestamp in ringbuffer. */ |
1298 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; | 1300 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; |
1299 | smp_wmb(); | ||
1300 | 1301 | ||
1301 | /* Increment cooked vblank count. This also atomically commits | 1302 | /* Increment cooked vblank count. This also atomically commits |
1302 | * the timestamp computed above. | 1303 | * the timestamp computed above. |
1303 | */ | 1304 | */ |
1305 | smp_mb__before_atomic_inc(); | ||
1304 | atomic_inc(&dev->_vblank_count[crtc]); | 1306 | atomic_inc(&dev->_vblank_count[crtc]); |
1307 | smp_mb__after_atomic_inc(); | ||
1305 | } else { | 1308 | } else { |
1306 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", | 1309 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", |
1307 | crtc, (int) diff_ns); | 1310 | crtc, (int) diff_ns); |
@@ -1311,5 +1314,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1311 | drm_handle_vblank_events(dev, crtc); | 1314 | drm_handle_vblank_events(dev, crtc); |
1312 | 1315 | ||
1313 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 1316 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
1317 | return true; | ||
1314 | } | 1318 | } |
1315 | EXPORT_SYMBOL(drm_handle_vblank); | 1319 | EXPORT_SYMBOL(drm_handle_vblank); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466c5502..4ff9b6cc973f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
865 | int max_freq; | 865 | int max_freq; |
866 | 866 | ||
867 | /* RPSTAT1 is in the GT power well */ | 867 | /* RPSTAT1 is in the GT power well */ |
868 | __gen6_force_wake_get(dev_priv); | 868 | __gen6_gt_force_wake_get(dev_priv); |
869 | 869 | ||
870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | 870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); |
871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); | 871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); |
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
889 | max_freq * 100); | 889 | max_freq * 100); |
890 | 890 | ||
891 | __gen6_force_wake_put(dev_priv); | 891 | __gen6_gt_force_wake_put(dev_priv); |
892 | } else { | 892 | } else { |
893 | seq_printf(m, "no P-state info available\n"); | 893 | seq_printf(m, "no P-state info available\n"); |
894 | } | 894 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 844f3c972b04..e33d9be7df3b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
152 | { | 152 | { |
153 | drm_i915_private_t *dev_priv = dev->dev_private; | 153 | drm_i915_private_t *dev_priv = dev->dev_private; |
154 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 154 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
155 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | 155 | int ret; |
156 | 156 | ||
157 | master_priv->sarea = drm_getsarea(dev); | 157 | master_priv->sarea = drm_getsarea(dev); |
158 | if (master_priv->sarea) { | 158 | if (master_priv->sarea) { |
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
163 | } | 163 | } |
164 | 164 | ||
165 | if (init->ring_size != 0) { | 165 | if (init->ring_size != 0) { |
166 | if (ring->obj != NULL) { | 166 | if (LP_RING(dev_priv)->obj != NULL) { |
167 | i915_dma_cleanup(dev); | 167 | i915_dma_cleanup(dev); |
168 | DRM_ERROR("Client tried to initialize ringbuffer in " | 168 | DRM_ERROR("Client tried to initialize ringbuffer in " |
169 | "GEM mode\n"); | 169 | "GEM mode\n"); |
170 | return -EINVAL; | 170 | return -EINVAL; |
171 | } | 171 | } |
172 | 172 | ||
173 | ring->size = init->ring_size; | 173 | ret = intel_render_ring_init_dri(dev, |
174 | 174 | init->ring_start, | |
175 | ring->map.offset = init->ring_start; | 175 | init->ring_size); |
176 | ring->map.size = init->ring_size; | 176 | if (ret) { |
177 | ring->map.type = 0; | ||
178 | ring->map.flags = 0; | ||
179 | ring->map.mtrr = 0; | ||
180 | |||
181 | drm_core_ioremap_wc(&ring->map, dev); | ||
182 | |||
183 | if (ring->map.handle == NULL) { | ||
184 | i915_dma_cleanup(dev); | 177 | i915_dma_cleanup(dev); |
185 | DRM_ERROR("can not ioremap virtual address for" | 178 | return ret; |
186 | " ring buffer\n"); | ||
187 | return -ENOMEM; | ||
188 | } | 179 | } |
189 | } | 180 | } |
190 | 181 | ||
191 | ring->virtual_start = ring->map.handle; | ||
192 | |||
193 | dev_priv->cpp = init->cpp; | 182 | dev_priv->cpp = init->cpp; |
194 | dev_priv->back_offset = init->back_offset; | 183 | dev_priv->back_offset = init->back_offset; |
195 | dev_priv->front_offset = init->front_offset; | 184 | dev_priv->front_offset = init->front_offset; |
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1226 | if (ret) | 1215 | if (ret) |
1227 | DRM_INFO("failed to find VBIOS tables\n"); | 1216 | DRM_INFO("failed to find VBIOS tables\n"); |
1228 | 1217 | ||
1229 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 1218 | /* If we have > 1 VGA cards, then we need to arbitrate access |
1219 | * to the common VGA resources. | ||
1220 | * | ||
1221 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | ||
1222 | * then we do not take part in VGA arbitration and the | ||
1223 | * vga_client_register() fails with -ENODEV. | ||
1224 | */ | ||
1230 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | 1225 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1231 | if (ret) | 1226 | if (ret && ret != -ENODEV) |
1232 | goto cleanup_ringbuffer; | 1227 | goto cleanup_ringbuffer; |
1233 | 1228 | ||
1234 | intel_register_dsm_handler(); | 1229 | intel_register_dsm_handler(); |
@@ -1900,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1900 | if (IS_GEN2(dev)) | 1895 | if (IS_GEN2(dev)) |
1901 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
1902 | 1897 | ||
1898 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | ||
1899 | * using 32bit addressing, overwriting memory if HWS is located | ||
1900 | * above 4GB. | ||
1901 | * | ||
1902 | * The documentation also mentions an issue with undefined | ||
1903 | * behaviour if any general state is accessed within a page above 4GB, | ||
1904 | * which also needs to be handled carefully. | ||
1905 | */ | ||
1906 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
1907 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | ||
1908 | |||
1903 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | 1909 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1904 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | 1910 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); |
1905 | if (!dev_priv->regs) { | 1911 | if (!dev_priv->regs) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 72fea2bcfc4f..22ec066adae6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -46,6 +46,12 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | |||
46 | unsigned int i915_powersave = 1; | 46 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0600); | 47 | module_param_named(powersave, i915_powersave, int, 0600); |
48 | 48 | ||
49 | unsigned int i915_semaphores = 0; | ||
50 | module_param_named(semaphores, i915_semaphores, int, 0600); | ||
51 | |||
52 | unsigned int i915_enable_rc6 = 0; | ||
53 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | ||
54 | |||
49 | unsigned int i915_lvds_downclock = 0; | 55 | unsigned int i915_lvds_downclock = 0; |
50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 56 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
51 | 57 | ||
@@ -60,7 +66,7 @@ extern int intel_agp_enabled; | |||
60 | 66 | ||
61 | #define INTEL_VGA_DEVICE(id, info) { \ | 67 | #define INTEL_VGA_DEVICE(id, info) { \ |
62 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ | 68 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ |
63 | .class_mask = 0xffff00, \ | 69 | .class_mask = 0xff0000, \ |
64 | .vendor = 0x8086, \ | 70 | .vendor = 0x8086, \ |
65 | .device = id, \ | 71 | .device = id, \ |
66 | .subvendor = PCI_ANY_ID, \ | 72 | .subvendor = PCI_ANY_ID, \ |
@@ -251,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev) | |||
251 | } | 257 | } |
252 | } | 258 | } |
253 | 259 | ||
254 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | 260 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
255 | { | 261 | { |
256 | int count; | 262 | int count; |
257 | 263 | ||
@@ -267,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | |||
267 | udelay(10); | 273 | udelay(10); |
268 | } | 274 | } |
269 | 275 | ||
270 | void __gen6_force_wake_put(struct drm_i915_private *dev_priv) | 276 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
271 | { | 277 | { |
272 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 278 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
273 | POSTING_READ(FORCEWAKE); | 279 | POSTING_READ(FORCEWAKE); |
274 | } | 280 | } |
275 | 281 | ||
282 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
283 | { | ||
284 | int loop = 500; | ||
285 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
286 | while (fifo < 20 && loop--) { | ||
287 | udelay(10); | ||
288 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
289 | } | ||
290 | } | ||
291 | |||
276 | static int i915_drm_freeze(struct drm_device *dev) | 292 | static int i915_drm_freeze(struct drm_device *dev) |
277 | { | 293 | { |
278 | struct drm_i915_private *dev_priv = dev->dev_private; | 294 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -354,12 +370,13 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
354 | error = i915_gem_init_ringbuffer(dev); | 370 | error = i915_gem_init_ringbuffer(dev); |
355 | mutex_unlock(&dev->struct_mutex); | 371 | mutex_unlock(&dev->struct_mutex); |
356 | 372 | ||
373 | drm_mode_config_reset(dev); | ||
357 | drm_irq_install(dev); | 374 | drm_irq_install(dev); |
358 | 375 | ||
359 | /* Resume the modeset for every activated CRTC */ | 376 | /* Resume the modeset for every activated CRTC */ |
360 | drm_helper_resume_force_mode(dev); | 377 | drm_helper_resume_force_mode(dev); |
361 | 378 | ||
362 | if (dev_priv->renderctx && dev_priv->pwrctx) | 379 | if (IS_IRONLAKE_M(dev)) |
363 | ironlake_enable_rc6(dev); | 380 | ironlake_enable_rc6(dev); |
364 | } | 381 | } |
365 | 382 | ||
@@ -542,6 +559,7 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
542 | 559 | ||
543 | mutex_unlock(&dev->struct_mutex); | 560 | mutex_unlock(&dev->struct_mutex); |
544 | drm_irq_uninstall(dev); | 561 | drm_irq_uninstall(dev); |
562 | drm_mode_config_reset(dev); | ||
545 | drm_irq_install(dev); | 563 | drm_irq_install(dev); |
546 | mutex_lock(&dev->struct_mutex); | 564 | mutex_lock(&dev->struct_mutex); |
547 | } | 565 | } |
@@ -566,6 +584,14 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
566 | static int __devinit | 584 | static int __devinit |
567 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 585 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
568 | { | 586 | { |
587 | /* Only bind to function 0 of the device. Early generations | ||
588 | * used function 1 as a placeholder for multi-head. This causes | ||
589 | * us confusion instead, especially on the systems where both | ||
590 | * functions have the same PCI-ID! | ||
591 | */ | ||
592 | if (PCI_FUNC(pdev->devfn)) | ||
593 | return -ENODEV; | ||
594 | |||
569 | return drm_get_pci_dev(pdev, ent, &driver); | 595 | return drm_get_pci_dev(pdev, ent, &driver); |
570 | } | 596 | } |
571 | 597 | ||
@@ -752,6 +778,9 @@ static int __init i915_init(void) | |||
752 | driver.driver_features &= ~DRIVER_MODESET; | 778 | driver.driver_features &= ~DRIVER_MODESET; |
753 | #endif | 779 | #endif |
754 | 780 | ||
781 | if (!(driver.driver_features & DRIVER_MODESET)) | ||
782 | driver.get_vblank_timestamp = NULL; | ||
783 | |||
755 | return drm_init(&driver); | 784 | return drm_init(&driver); |
756 | } | 785 | } |
757 | 786 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5969f46ac2d6..456f40484838 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -543,8 +543,11 @@ typedef struct drm_i915_private { | |||
543 | /** List of all objects in gtt_space. Used to restore gtt | 543 | /** List of all objects in gtt_space. Used to restore gtt |
544 | * mappings on resume */ | 544 | * mappings on resume */ |
545 | struct list_head gtt_list; | 545 | struct list_head gtt_list; |
546 | /** End of mappable part of GTT */ | 546 | |
547 | /** Usable portion of the GTT for GEM */ | ||
548 | unsigned long gtt_start; | ||
547 | unsigned long gtt_mappable_end; | 549 | unsigned long gtt_mappable_end; |
550 | unsigned long gtt_end; | ||
548 | 551 | ||
549 | struct io_mapping *gtt_mapping; | 552 | struct io_mapping *gtt_mapping; |
550 | int gtt_mtrr; | 553 | int gtt_mtrr; |
@@ -953,8 +956,10 @@ extern struct drm_ioctl_desc i915_ioctls[]; | |||
953 | extern int i915_max_ioctl; | 956 | extern int i915_max_ioctl; |
954 | extern unsigned int i915_fbpercrtc; | 957 | extern unsigned int i915_fbpercrtc; |
955 | extern unsigned int i915_powersave; | 958 | extern unsigned int i915_powersave; |
959 | extern unsigned int i915_semaphores; | ||
956 | extern unsigned int i915_lvds_downclock; | 960 | extern unsigned int i915_lvds_downclock; |
957 | extern unsigned int i915_panel_use_ssc; | 961 | extern unsigned int i915_panel_use_ssc; |
962 | extern unsigned int i915_enable_rc6; | ||
958 | 963 | ||
959 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 964 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
960 | extern int i915_resume(struct drm_device *dev); | 965 | extern int i915_resume(struct drm_device *dev); |
@@ -1173,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
1173 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1178 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1174 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 1179 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1175 | 1180 | ||
1181 | uint32_t | ||
1182 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); | ||
1183 | |||
1176 | /* i915_gem_gtt.c */ | 1184 | /* i915_gem_gtt.c */ |
1177 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1185 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1178 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1186 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
@@ -1349,22 +1357,32 @@ __i915_write(64, q) | |||
1349 | * must be set to prevent GT core from power down and stale values being | 1357 | * must be set to prevent GT core from power down and stale values being |
1350 | * returned. | 1358 | * returned. |
1351 | */ | 1359 | */ |
1352 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv); | 1360 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1353 | void __gen6_force_wake_put (struct drm_i915_private *dev_priv); | 1361 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1354 | static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) | 1362 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1363 | |||
1364 | static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) | ||
1355 | { | 1365 | { |
1356 | u32 val; | 1366 | u32 val; |
1357 | 1367 | ||
1358 | if (dev_priv->info->gen >= 6) { | 1368 | if (dev_priv->info->gen >= 6) { |
1359 | __gen6_force_wake_get(dev_priv); | 1369 | __gen6_gt_force_wake_get(dev_priv); |
1360 | val = I915_READ(reg); | 1370 | val = I915_READ(reg); |
1361 | __gen6_force_wake_put(dev_priv); | 1371 | __gen6_gt_force_wake_put(dev_priv); |
1362 | } else | 1372 | } else |
1363 | val = I915_READ(reg); | 1373 | val = I915_READ(reg); |
1364 | 1374 | ||
1365 | return val; | 1375 | return val; |
1366 | } | 1376 | } |
1367 | 1377 | ||
1378 | static inline void i915_gt_write(struct drm_i915_private *dev_priv, | ||
1379 | u32 reg, u32 val) | ||
1380 | { | ||
1381 | if (dev_priv->info->gen >= 6) | ||
1382 | __gen6_gt_wait_for_fifo(dev_priv); | ||
1383 | I915_WRITE(reg, val); | ||
1384 | } | ||
1385 | |||
1368 | static inline void | 1386 | static inline void |
1369 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) | 1387 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) |
1370 | { | 1388 | { |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3dfc848ff755..36e66cc5225e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev, | |||
140 | { | 140 | { |
141 | drm_i915_private_t *dev_priv = dev->dev_private; | 141 | drm_i915_private_t *dev_priv = dev->dev_private; |
142 | 142 | ||
143 | drm_mm_init(&dev_priv->mm.gtt_space, start, | 143 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); |
144 | end - start); | ||
145 | 144 | ||
145 | dev_priv->mm.gtt_start = start; | ||
146 | dev_priv->mm.gtt_mappable_end = mappable_end; | ||
147 | dev_priv->mm.gtt_end = end; | ||
146 | dev_priv->mm.gtt_total = end - start; | 148 | dev_priv->mm.gtt_total = end - start; |
147 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; | 149 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; |
148 | dev_priv->mm.gtt_mappable_end = mappable_end; | 150 | |
151 | /* Take over this portion of the GTT */ | ||
152 | intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); | ||
149 | } | 153 | } |
150 | 154 | ||
151 | int | 155 | int |
@@ -1394,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) | |||
1394 | * Return the required GTT alignment for an object, only taking into account | 1398 | * Return the required GTT alignment for an object, only taking into account |
1395 | * unfenced tiled surface requirements. | 1399 | * unfenced tiled surface requirements. |
1396 | */ | 1400 | */ |
1397 | static uint32_t | 1401 | uint32_t |
1398 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) | 1402 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) |
1399 | { | 1403 | { |
1400 | struct drm_device *dev = obj->base.dev; | 1404 | struct drm_device *dev = obj->base.dev; |
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1857 | 1861 | ||
1858 | seqno = ring->get_seqno(ring); | 1862 | seqno = ring->get_seqno(ring); |
1859 | 1863 | ||
1860 | for (i = 0; i < I915_NUM_RINGS; i++) | 1864 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) |
1861 | if (seqno >= ring->sync_seqno[i]) | 1865 | if (seqno >= ring->sync_seqno[i]) |
1862 | ring->sync_seqno[i] = 0; | 1866 | ring->sync_seqno[i] = 0; |
1863 | 1867 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index dcfdf4151b6d..50ab1614571c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
772 | if (from == NULL || to == from) | 772 | if (from == NULL || to == from) |
773 | return 0; | 773 | return 0; |
774 | 774 | ||
775 | /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ | 775 | /* XXX gpu semaphores are implicated in various hard hangs on SNB */ |
776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) | 776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) |
777 | return i915_gem_object_wait_rendering(obj, true); | 777 | return i915_gem_object_wait_rendering(obj, true); |
778 | 778 | ||
779 | idx = intel_ring_sync_index(from, to); | 779 | idx = intel_ring_sync_index(from, to); |
@@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1175 | goto err; | 1175 | goto err; |
1176 | 1176 | ||
1177 | seqno = i915_gem_next_request_seqno(dev, ring); | 1177 | seqno = i915_gem_next_request_seqno(dev, ring); |
1178 | for (i = 0; i < I915_NUM_RINGS-1; i++) { | 1178 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { |
1179 | if (seqno < ring->sync_seqno[i]) { | 1179 | if (seqno < ring->sync_seqno[i]) { |
1180 | /* The GPU can not handle its semaphore value wrapping, | 1180 | /* The GPU can not handle its semaphore value wrapping, |
1181 | * so every billion or so execbuffers, we need to stall | 1181 | * so every billion or so execbuffers, we need to stall |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 70433ae50ac8..b0abdc64aa9f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
34 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
35 | struct drm_i915_gem_object *obj; | 35 | struct drm_i915_gem_object *obj; |
36 | 36 | ||
37 | /* First fill our portion of the GTT with scratch pages */ | ||
38 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, | ||
39 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | ||
40 | |||
37 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | 41 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
38 | i915_gem_clflush_object(obj); | 42 | i915_gem_clflush_object(obj); |
39 | 43 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9932c5..d64843e18df2 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && | 349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); | 350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
351 | 351 | ||
352 | obj->tiling_changed = true; | 352 | /* Rebind if we need a change of alignment */ |
353 | obj->tiling_mode = args->tiling_mode; | 353 | if (!obj->map_and_fenceable) { |
354 | obj->stride = args->stride; | 354 | u32 unfenced_alignment = |
355 | i915_gem_get_unfenced_gtt_alignment(obj); | ||
356 | if (obj->gtt_offset & (unfenced_alignment - 1)) | ||
357 | ret = i915_gem_object_unbind(obj); | ||
358 | } | ||
359 | |||
360 | if (ret == 0) { | ||
361 | obj->tiling_changed = true; | ||
362 | obj->tiling_mode = args->tiling_mode; | ||
363 | obj->stride = args->stride; | ||
364 | } | ||
355 | } | 365 | } |
366 | /* we have to maintain this existing ABI... */ | ||
367 | args->stride = obj->stride; | ||
368 | args->tiling_mode = obj->tiling_mode; | ||
356 | drm_gem_object_unreference(&obj->base); | 369 | drm_gem_object_unreference(&obj->base); |
357 | mutex_unlock(&dev->struct_mutex); | 370 | mutex_unlock(&dev->struct_mutex); |
358 | 371 | ||
359 | return 0; | 372 | return ret; |
360 | } | 373 | } |
361 | 374 | ||
362 | /** | 375 | /** |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b8e509ae065e..8a9e08bf1cf7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
274 | return ret; | 274 | return ret; |
275 | } | 275 | } |
276 | 276 | ||
277 | int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, | 277 | int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
278 | int *max_error, | 278 | int *max_error, |
279 | struct timeval *vblank_time, | 279 | struct timeval *vblank_time, |
280 | unsigned flags) | 280 | unsigned flags) |
281 | { | 281 | { |
282 | struct drm_crtc *drmcrtc; | 282 | struct drm_i915_private *dev_priv = dev->dev_private; |
283 | struct drm_crtc *crtc; | ||
283 | 284 | ||
284 | if (crtc < 0 || crtc >= dev->num_crtcs) { | 285 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { |
285 | DRM_ERROR("Invalid crtc %d\n", crtc); | 286 | DRM_ERROR("Invalid crtc %d\n", pipe); |
286 | return -EINVAL; | 287 | return -EINVAL; |
287 | } | 288 | } |
288 | 289 | ||
289 | /* Get drm_crtc to timestamp: */ | 290 | /* Get drm_crtc to timestamp: */ |
290 | drmcrtc = intel_get_crtc_for_pipe(dev, crtc); | 291 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
292 | if (crtc == NULL) { | ||
293 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | |||
297 | if (!crtc->enabled) { | ||
298 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | ||
299 | return -EBUSY; | ||
300 | } | ||
291 | 301 | ||
292 | /* Helper routine in DRM core does all the work: */ | 302 | /* Helper routine in DRM core does all the work: */ |
293 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, | 303 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
294 | vblank_time, flags, drmcrtc); | 304 | vblank_time, flags, |
305 | crtc); | ||
295 | } | 306 | } |
296 | 307 | ||
297 | /* | 308 | /* |
@@ -305,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
305 | struct drm_mode_config *mode_config = &dev->mode_config; | 316 | struct drm_mode_config *mode_config = &dev->mode_config; |
306 | struct intel_encoder *encoder; | 317 | struct intel_encoder *encoder; |
307 | 318 | ||
319 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | ||
320 | |||
308 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 321 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
309 | if (encoder->hot_plug) | 322 | if (encoder->hot_plug) |
310 | encoder->hot_plug(encoder); | 323 | encoder->hot_plug(encoder); |
@@ -348,8 +361,12 @@ static void notify_ring(struct drm_device *dev, | |||
348 | struct intel_ring_buffer *ring) | 361 | struct intel_ring_buffer *ring) |
349 | { | 362 | { |
350 | struct drm_i915_private *dev_priv = dev->dev_private; | 363 | struct drm_i915_private *dev_priv = dev->dev_private; |
351 | u32 seqno = ring->get_seqno(ring); | 364 | u32 seqno; |
352 | 365 | ||
366 | if (ring->obj == NULL) | ||
367 | return; | ||
368 | |||
369 | seqno = ring->get_seqno(ring); | ||
353 | trace_i915_gem_request_complete(dev, seqno); | 370 | trace_i915_gem_request_complete(dev, seqno); |
354 | 371 | ||
355 | ring->irq_seqno = seqno; | 372 | ring->irq_seqno = seqno; |
@@ -831,6 +848,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
831 | i++; | 848 | i++; |
832 | error->pinned_bo_count = i - error->active_bo_count; | 849 | error->pinned_bo_count = i - error->active_bo_count; |
833 | 850 | ||
851 | error->active_bo = NULL; | ||
852 | error->pinned_bo = NULL; | ||
834 | if (i) { | 853 | if (i) { |
835 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, | 854 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
836 | GFP_ATOMIC); | 855 | GFP_ATOMIC); |
@@ -1179,18 +1198,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1179 | intel_finish_page_flip_plane(dev, 1); | 1198 | intel_finish_page_flip_plane(dev, 1); |
1180 | } | 1199 | } |
1181 | 1200 | ||
1182 | if (pipea_stats & vblank_status) { | 1201 | if (pipea_stats & vblank_status && |
1202 | drm_handle_vblank(dev, 0)) { | ||
1183 | vblank++; | 1203 | vblank++; |
1184 | drm_handle_vblank(dev, 0); | ||
1185 | if (!dev_priv->flip_pending_is_done) { | 1204 | if (!dev_priv->flip_pending_is_done) { |
1186 | i915_pageflip_stall_check(dev, 0); | 1205 | i915_pageflip_stall_check(dev, 0); |
1187 | intel_finish_page_flip(dev, 0); | 1206 | intel_finish_page_flip(dev, 0); |
1188 | } | 1207 | } |
1189 | } | 1208 | } |
1190 | 1209 | ||
1191 | if (pipeb_stats & vblank_status) { | 1210 | if (pipeb_stats & vblank_status && |
1211 | drm_handle_vblank(dev, 1)) { | ||
1192 | vblank++; | 1212 | vblank++; |
1193 | drm_handle_vblank(dev, 1); | ||
1194 | if (!dev_priv->flip_pending_is_done) { | 1213 | if (!dev_priv->flip_pending_is_done) { |
1195 | i915_pageflip_stall_check(dev, 1); | 1214 | i915_pageflip_stall_check(dev, 1); |
1196 | intel_finish_page_flip(dev, 1); | 1215 | intel_finish_page_flip(dev, 1); |
@@ -1278,12 +1297,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1278 | if (master_priv->sarea_priv) | 1297 | if (master_priv->sarea_priv) |
1279 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1298 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
1280 | 1299 | ||
1281 | ret = -ENODEV; | ||
1282 | if (ring->irq_get(ring)) { | 1300 | if (ring->irq_get(ring)) { |
1283 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, | 1301 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
1284 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 1302 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
1285 | ring->irq_put(ring); | 1303 | ring->irq_put(ring); |
1286 | } | 1304 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
1305 | ret = -EBUSY; | ||
1287 | 1306 | ||
1288 | if (ret == -EBUSY) { | 1307 | if (ret == -EBUSY) { |
1289 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 1308 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
@@ -1632,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1632 | } else { | 1651 | } else { |
1633 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1652 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1634 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1653 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1635 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | 1654 | hotplug_mask |= SDE_AUX_MASK; |
1636 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1637 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1638 | } | 1655 | } |
1639 | 1656 | ||
1640 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1657 | dev_priv->pch_irq_mask = ~hotplug_mask; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 40a407f41f61..3e6f486f4605 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -174,7 +174,9 @@ | |||
174 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! | 174 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! |
175 | */ | 175 | */ |
176 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) | 176 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) |
177 | #define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ | 177 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ |
178 | #define MI_INVALIDATE_TLB (1<<18) | ||
179 | #define MI_INVALIDATE_BSD (1<<7) | ||
178 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 180 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
179 | #define MI_BATCH_NON_SECURE (1) | 181 | #define MI_BATCH_NON_SECURE (1) |
180 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | 182 | #define MI_BATCH_NON_SECURE_I965 (1<<8) |
@@ -513,6 +515,10 @@ | |||
513 | #define GEN6_BLITTER_SYNC_STATUS (1 << 24) | 515 | #define GEN6_BLITTER_SYNC_STATUS (1 << 24) |
514 | #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) | 516 | #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) |
515 | 517 | ||
518 | #define GEN6_BLITTER_ECOSKPD 0x221d0 | ||
519 | #define GEN6_BLITTER_LOCK_SHIFT 16 | ||
520 | #define GEN6_BLITTER_FBC_NOTIFY (1<<3) | ||
521 | |||
516 | #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 | 522 | #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 |
517 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) | 523 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) |
518 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) | 524 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) |
@@ -1547,17 +1553,7 @@ | |||
1547 | 1553 | ||
1548 | /* Backlight control */ | 1554 | /* Backlight control */ |
1549 | #define BLC_PWM_CTL 0x61254 | 1555 | #define BLC_PWM_CTL 0x61254 |
1550 | #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) | ||
1551 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ | 1556 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ |
1552 | #define BLM_COMBINATION_MODE (1 << 30) | ||
1553 | /* | ||
1554 | * This is the most significant 15 bits of the number of backlight cycles in a | ||
1555 | * complete cycle of the modulated backlight control. | ||
1556 | * | ||
1557 | * The actual value is this field multiplied by two. | ||
1558 | */ | ||
1559 | #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) | ||
1560 | #define BLM_LEGACY_MODE (1 << 16) | ||
1561 | /* | 1557 | /* |
1562 | * This is the number of cycles out of the backlight modulation cycle for which | 1558 | * This is the number of cycles out of the backlight modulation cycle for which |
1563 | * the backlight is on. | 1559 | * the backlight is on. |
@@ -2626,6 +2622,8 @@ | |||
2626 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 | 2622 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 |
2627 | 2623 | ||
2628 | #define PCH_DSPCLK_GATE_D 0x42020 | 2624 | #define PCH_DSPCLK_GATE_D 0x42020 |
2625 | # define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) | ||
2626 | # define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) | ||
2629 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) | 2627 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) |
2630 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) | 2628 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) |
2631 | 2629 | ||
@@ -3263,6 +3261,8 @@ | |||
3263 | #define FORCEWAKE 0xA18C | 3261 | #define FORCEWAKE 0xA18C |
3264 | #define FORCEWAKE_ACK 0x130090 | 3262 | #define FORCEWAKE_ACK 0x130090 |
3265 | 3263 | ||
3264 | #define GT_FIFO_FREE_ENTRIES 0x120008 | ||
3265 | |||
3266 | #define GEN6_RPNSWREQ 0xA008 | 3266 | #define GEN6_RPNSWREQ 0xA008 |
3267 | #define GEN6_TURBO_DISABLE (1<<31) | 3267 | #define GEN6_TURBO_DISABLE (1<<31) |
3268 | #define GEN6_FREQUENCY(x) ((x)<<25) | 3268 | #define GEN6_FREQUENCY(x) ((x)<<25) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 17035b87ee46..8a77ff4a7237 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector, | |||
535 | return 0; | 535 | return 0; |
536 | } | 536 | } |
537 | 537 | ||
538 | static void intel_crt_reset(struct drm_connector *connector) | ||
539 | { | ||
540 | struct drm_device *dev = connector->dev; | ||
541 | struct intel_crt *crt = intel_attached_crt(connector); | ||
542 | |||
543 | if (HAS_PCH_SPLIT(dev)) | ||
544 | crt->force_hotplug_required = 1; | ||
545 | } | ||
546 | |||
538 | /* | 547 | /* |
539 | * Routines for controlling stuff on the analog port | 548 | * Routines for controlling stuff on the analog port |
540 | */ | 549 | */ |
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { | |||
548 | }; | 557 | }; |
549 | 558 | ||
550 | static const struct drm_connector_funcs intel_crt_connector_funcs = { | 559 | static const struct drm_connector_funcs intel_crt_connector_funcs = { |
560 | .reset = intel_crt_reset, | ||
551 | .dpms = drm_helper_connector_dpms, | 561 | .dpms = drm_helper_connector_dpms, |
552 | .detect = intel_crt_detect, | 562 | .detect = intel_crt_detect, |
553 | .fill_modes = drm_helper_probe_single_connector_modes, | 563 | .fill_modes = drm_helper_probe_single_connector_modes, |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 98967f3b7724..49fb54fd9a18 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev) | |||
1213 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 1213 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | static void sandybridge_blit_fbc_update(struct drm_device *dev) | ||
1217 | { | ||
1218 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1219 | u32 blt_ecoskpd; | ||
1220 | |||
1221 | /* Make sure blitter notifies FBC of writes */ | ||
1222 | __gen6_gt_force_wake_get(dev_priv); | ||
1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | ||
1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | ||
1225 | GEN6_BLITTER_LOCK_SHIFT; | ||
1226 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1227 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | ||
1228 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1229 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | ||
1230 | GEN6_BLITTER_LOCK_SHIFT); | ||
1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | ||
1233 | __gen6_gt_force_wake_put(dev_priv); | ||
1234 | } | ||
1235 | |||
1216 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1217 | { | 1237 | { |
1218 | struct drm_device *dev = crtc->dev; | 1238 | struct drm_device *dev = crtc->dev; |
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1266 | I915_WRITE(SNB_DPFC_CTL_SA, | 1286 | I915_WRITE(SNB_DPFC_CTL_SA, |
1267 | SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); | 1287 | SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); |
1268 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | 1288 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); |
1289 | sandybridge_blit_fbc_update(dev); | ||
1269 | } | 1290 | } |
1270 | 1291 | ||
1271 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 1292 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
@@ -1609,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1609 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1610 | 1631 | ||
1611 | wait_event(dev_priv->pending_flip_queue, | 1632 | wait_event(dev_priv->pending_flip_queue, |
1633 | atomic_read(&dev_priv->mm.wedged) || | ||
1612 | atomic_read(&obj->pending_flip) == 0); | 1634 | atomic_read(&obj->pending_flip) == 0); |
1613 | 1635 | ||
1614 | /* Big Hammer, we also need to ensure that any pending | 1636 | /* Big Hammer, we also need to ensure that any pending |
1615 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 1637 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1616 | * current scanout is retired before unpinning the old | 1638 | * current scanout is retired before unpinning the old |
1617 | * framebuffer. | 1639 | * framebuffer. |
1640 | * | ||
1641 | * This should only fail upon a hung GPU, in which case we | ||
1642 | * can safely continue. | ||
1618 | */ | 1643 | */ |
1619 | ret = i915_gem_object_flush_gpu(obj, false); | 1644 | ret = i915_gem_object_flush_gpu(obj, false); |
1620 | if (ret) { | 1645 | (void) ret; |
1621 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
1622 | mutex_unlock(&dev->struct_mutex); | ||
1623 | return ret; | ||
1624 | } | ||
1625 | } | 1646 | } |
1626 | 1647 | ||
1627 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
@@ -2024,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2024 | atomic_read(&obj->pending_flip) == 0); | 2045 | atomic_read(&obj->pending_flip) == 0); |
2025 | } | 2046 | } |
2026 | 2047 | ||
2048 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | ||
2049 | { | ||
2050 | struct drm_device *dev = crtc->dev; | ||
2051 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
2052 | struct intel_encoder *encoder; | ||
2053 | |||
2054 | /* | ||
2055 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | ||
2056 | * must be driven by its own crtc; no sharing is possible. | ||
2057 | */ | ||
2058 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
2059 | if (encoder->base.crtc != crtc) | ||
2060 | continue; | ||
2061 | |||
2062 | switch (encoder->type) { | ||
2063 | case INTEL_OUTPUT_EDP: | ||
2064 | if (!intel_encoder_is_pch_edp(&encoder->base)) | ||
2065 | return false; | ||
2066 | continue; | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2070 | return true; | ||
2071 | } | ||
2072 | |||
2027 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2073 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2028 | { | 2074 | { |
2029 | struct drm_device *dev = crtc->dev; | 2075 | struct drm_device *dev = crtc->dev; |
@@ -2032,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2032 | int pipe = intel_crtc->pipe; | 2078 | int pipe = intel_crtc->pipe; |
2033 | int plane = intel_crtc->plane; | 2079 | int plane = intel_crtc->plane; |
2034 | u32 reg, temp; | 2080 | u32 reg, temp; |
2081 | bool is_pch_port = false; | ||
2035 | 2082 | ||
2036 | if (intel_crtc->active) | 2083 | if (intel_crtc->active) |
2037 | return; | 2084 | return; |
@@ -2045,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2045 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | 2092 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
2046 | } | 2093 | } |
2047 | 2094 | ||
2048 | ironlake_fdi_enable(crtc); | 2095 | is_pch_port = intel_crtc_driving_pch(crtc); |
2096 | |||
2097 | if (is_pch_port) | ||
2098 | ironlake_fdi_enable(crtc); | ||
2099 | else { | ||
2100 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2101 | reg = FDI_TX_CTL(pipe); | ||
2102 | temp = I915_READ(reg); | ||
2103 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2104 | POSTING_READ(reg); | ||
2105 | |||
2106 | reg = FDI_RX_CTL(pipe); | ||
2107 | temp = I915_READ(reg); | ||
2108 | temp &= ~(0x7 << 16); | ||
2109 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2110 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2111 | |||
2112 | POSTING_READ(reg); | ||
2113 | udelay(100); | ||
2114 | |||
2115 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2116 | if (HAS_PCH_IBX(dev)) | ||
2117 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2118 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2119 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2120 | |||
2121 | /* still set train pattern 1 */ | ||
2122 | reg = FDI_TX_CTL(pipe); | ||
2123 | temp = I915_READ(reg); | ||
2124 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2125 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2126 | I915_WRITE(reg, temp); | ||
2127 | |||
2128 | reg = FDI_RX_CTL(pipe); | ||
2129 | temp = I915_READ(reg); | ||
2130 | if (HAS_PCH_CPT(dev)) { | ||
2131 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2132 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2133 | } else { | ||
2134 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2135 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2136 | } | ||
2137 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2138 | temp &= ~(0x07 << 16); | ||
2139 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2140 | I915_WRITE(reg, temp); | ||
2141 | |||
2142 | POSTING_READ(reg); | ||
2143 | udelay(100); | ||
2144 | } | ||
2049 | 2145 | ||
2050 | /* Enable panel fitting for LVDS */ | 2146 | /* Enable panel fitting for LVDS */ |
2051 | if (dev_priv->pch_pf_size && | 2147 | if (dev_priv->pch_pf_size && |
@@ -2079,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2079 | intel_flush_display_plane(dev, plane); | 2175 | intel_flush_display_plane(dev, plane); |
2080 | } | 2176 | } |
2081 | 2177 | ||
2178 | /* Skip the PCH stuff if possible */ | ||
2179 | if (!is_pch_port) | ||
2180 | goto done; | ||
2181 | |||
2082 | /* For PCH output, training FDI link */ | 2182 | /* For PCH output, training FDI link */ |
2083 | if (IS_GEN6(dev)) | 2183 | if (IS_GEN6(dev)) |
2084 | gen6_fdi_link_train(crtc); | 2184 | gen6_fdi_link_train(crtc); |
@@ -2163,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2163 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2263 | I915_WRITE(reg, temp | TRANS_ENABLE); |
2164 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2264 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
2165 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 2265 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
2166 | 2266 | done: | |
2167 | intel_crtc_load_lut(crtc); | 2267 | intel_crtc_load_lut(crtc); |
2168 | intel_update_fbc(dev); | 2268 | intel_update_fbc(dev); |
2169 | intel_crtc_update_cursor(crtc, true); | 2269 | intel_crtc_update_cursor(crtc, true); |
@@ -5530,6 +5630,16 @@ cleanup_work: | |||
5530 | return ret; | 5630 | return ret; |
5531 | } | 5631 | } |
5532 | 5632 | ||
5633 | static void intel_crtc_reset(struct drm_crtc *crtc) | ||
5634 | { | ||
5635 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5636 | |||
5637 | /* Reset flags back to the 'unknown' status so that they | ||
5638 | * will be correctly set on the initial modeset. | ||
5639 | */ | ||
5640 | intel_crtc->dpms_mode = -1; | ||
5641 | } | ||
5642 | |||
5533 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | 5643 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
5534 | .dpms = intel_crtc_dpms, | 5644 | .dpms = intel_crtc_dpms, |
5535 | .mode_fixup = intel_crtc_mode_fixup, | 5645 | .mode_fixup = intel_crtc_mode_fixup, |
@@ -5541,6 +5651,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = { | |||
5541 | }; | 5651 | }; |
5542 | 5652 | ||
5543 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 5653 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
5654 | .reset = intel_crtc_reset, | ||
5544 | .cursor_set = intel_crtc_cursor_set, | 5655 | .cursor_set = intel_crtc_cursor_set, |
5545 | .cursor_move = intel_crtc_cursor_move, | 5656 | .cursor_move = intel_crtc_cursor_move, |
5546 | .gamma_set = intel_crtc_gamma_set, | 5657 | .gamma_set = intel_crtc_gamma_set, |
@@ -5631,8 +5742,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5631 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | 5742 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
5632 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 5743 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
5633 | 5744 | ||
5634 | intel_crtc->cursor_addr = 0; | 5745 | intel_crtc_reset(&intel_crtc->base); |
5635 | intel_crtc->dpms_mode = -1; | ||
5636 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ | 5746 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ |
5637 | 5747 | ||
5638 | if (HAS_PCH_SPLIT(dev)) { | 5748 | if (HAS_PCH_SPLIT(dev)) { |
@@ -6172,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6172 | * userspace... | 6282 | * userspace... |
6173 | */ | 6283 | */ |
6174 | I915_WRITE(GEN6_RC_STATE, 0); | 6284 | I915_WRITE(GEN6_RC_STATE, 0); |
6175 | __gen6_force_wake_get(dev_priv); | 6285 | __gen6_gt_force_wake_get(dev_priv); |
6176 | 6286 | ||
6177 | /* disable the counters and set deterministic thresholds */ | 6287 | /* disable the counters and set deterministic thresholds */ |
6178 | I915_WRITE(GEN6_RC_CONTROL, 0); | 6288 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -6270,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6270 | /* enable all PM interrupts */ | 6380 | /* enable all PM interrupts */ |
6271 | I915_WRITE(GEN6_PMINTRMSK, 0); | 6381 | I915_WRITE(GEN6_PMINTRMSK, 0); |
6272 | 6382 | ||
6273 | __gen6_force_wake_put(dev_priv); | 6383 | __gen6_gt_force_wake_put(dev_priv); |
6274 | } | 6384 | } |
6275 | 6385 | ||
6276 | void intel_enable_clock_gating(struct drm_device *dev) | 6386 | void intel_enable_clock_gating(struct drm_device *dev) |
@@ -6286,7 +6396,9 @@ void intel_enable_clock_gating(struct drm_device *dev) | |||
6286 | 6396 | ||
6287 | if (IS_GEN5(dev)) { | 6397 | if (IS_GEN5(dev)) { |
6288 | /* Required for FBC */ | 6398 | /* Required for FBC */ |
6289 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | 6399 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | |
6400 | DPFCRUNIT_CLOCK_GATE_DISABLE | | ||
6401 | DPFDUNIT_CLOCK_GATE_DISABLE; | ||
6290 | /* Required for CxSR */ | 6402 | /* Required for CxSR */ |
6291 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | 6403 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; |
6292 | 6404 | ||
@@ -6429,52 +6541,60 @@ void intel_enable_clock_gating(struct drm_device *dev) | |||
6429 | } | 6541 | } |
6430 | } | 6542 | } |
6431 | 6543 | ||
6432 | void intel_disable_clock_gating(struct drm_device *dev) | 6544 | static void ironlake_teardown_rc6(struct drm_device *dev) |
6433 | { | 6545 | { |
6434 | struct drm_i915_private *dev_priv = dev->dev_private; | 6546 | struct drm_i915_private *dev_priv = dev->dev_private; |
6435 | 6547 | ||
6436 | if (dev_priv->renderctx) { | 6548 | if (dev_priv->renderctx) { |
6437 | struct drm_i915_gem_object *obj = dev_priv->renderctx; | 6549 | i915_gem_object_unpin(dev_priv->renderctx); |
6438 | 6550 | drm_gem_object_unreference(&dev_priv->renderctx->base); | |
6439 | I915_WRITE(CCID, 0); | ||
6440 | POSTING_READ(CCID); | ||
6441 | |||
6442 | i915_gem_object_unpin(obj); | ||
6443 | drm_gem_object_unreference(&obj->base); | ||
6444 | dev_priv->renderctx = NULL; | 6551 | dev_priv->renderctx = NULL; |
6445 | } | 6552 | } |
6446 | 6553 | ||
6447 | if (dev_priv->pwrctx) { | 6554 | if (dev_priv->pwrctx) { |
6448 | struct drm_i915_gem_object *obj = dev_priv->pwrctx; | 6555 | i915_gem_object_unpin(dev_priv->pwrctx); |
6556 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
6557 | dev_priv->pwrctx = NULL; | ||
6558 | } | ||
6559 | } | ||
6560 | |||
6561 | static void ironlake_disable_rc6(struct drm_device *dev) | ||
6562 | { | ||
6563 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6564 | |||
6565 | if (I915_READ(PWRCTXA)) { | ||
6566 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | ||
6567 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
6568 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
6569 | 50); | ||
6449 | 6570 | ||
6450 | I915_WRITE(PWRCTXA, 0); | 6571 | I915_WRITE(PWRCTXA, 0); |
6451 | POSTING_READ(PWRCTXA); | 6572 | POSTING_READ(PWRCTXA); |
6452 | 6573 | ||
6453 | i915_gem_object_unpin(obj); | 6574 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
6454 | drm_gem_object_unreference(&obj->base); | 6575 | POSTING_READ(RSTDBYCTL); |
6455 | dev_priv->pwrctx = NULL; | ||
6456 | } | 6576 | } |
6577 | |||
6578 | ironlake_teardown_rc6(dev); | ||
6457 | } | 6579 | } |
6458 | 6580 | ||
6459 | static void ironlake_disable_rc6(struct drm_device *dev) | 6581 | static int ironlake_setup_rc6(struct drm_device *dev) |
6460 | { | 6582 | { |
6461 | struct drm_i915_private *dev_priv = dev->dev_private; | 6583 | struct drm_i915_private *dev_priv = dev->dev_private; |
6462 | 6584 | ||
6463 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | 6585 | if (dev_priv->renderctx == NULL) |
6464 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | 6586 | dev_priv->renderctx = intel_alloc_context_page(dev); |
6465 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | 6587 | if (!dev_priv->renderctx) |
6466 | 10); | 6588 | return -ENOMEM; |
6467 | POSTING_READ(CCID); | 6589 | |
6468 | I915_WRITE(PWRCTXA, 0); | 6590 | if (dev_priv->pwrctx == NULL) |
6469 | POSTING_READ(PWRCTXA); | 6591 | dev_priv->pwrctx = intel_alloc_context_page(dev); |
6470 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | 6592 | if (!dev_priv->pwrctx) { |
6471 | POSTING_READ(RSTDBYCTL); | 6593 | ironlake_teardown_rc6(dev); |
6472 | i915_gem_object_unpin(dev_priv->renderctx); | 6594 | return -ENOMEM; |
6473 | drm_gem_object_unreference(&dev_priv->renderctx->base); | 6595 | } |
6474 | dev_priv->renderctx = NULL; | 6596 | |
6475 | i915_gem_object_unpin(dev_priv->pwrctx); | 6597 | return 0; |
6476 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
6477 | dev_priv->pwrctx = NULL; | ||
6478 | } | 6598 | } |
6479 | 6599 | ||
6480 | void ironlake_enable_rc6(struct drm_device *dev) | 6600 | void ironlake_enable_rc6(struct drm_device *dev) |
@@ -6482,15 +6602,26 @@ void ironlake_enable_rc6(struct drm_device *dev) | |||
6482 | struct drm_i915_private *dev_priv = dev->dev_private; | 6602 | struct drm_i915_private *dev_priv = dev->dev_private; |
6483 | int ret; | 6603 | int ret; |
6484 | 6604 | ||
6605 | /* rc6 disabled by default due to repeated reports of hanging during | ||
6606 | * boot and resume. | ||
6607 | */ | ||
6608 | if (!i915_enable_rc6) | ||
6609 | return; | ||
6610 | |||
6611 | ret = ironlake_setup_rc6(dev); | ||
6612 | if (ret) | ||
6613 | return; | ||
6614 | |||
6485 | /* | 6615 | /* |
6486 | * GPU can automatically power down the render unit if given a page | 6616 | * GPU can automatically power down the render unit if given a page |
6487 | * to save state. | 6617 | * to save state. |
6488 | */ | 6618 | */ |
6489 | ret = BEGIN_LP_RING(6); | 6619 | ret = BEGIN_LP_RING(6); |
6490 | if (ret) { | 6620 | if (ret) { |
6491 | ironlake_disable_rc6(dev); | 6621 | ironlake_teardown_rc6(dev); |
6492 | return; | 6622 | return; |
6493 | } | 6623 | } |
6624 | |||
6494 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | 6625 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
6495 | OUT_RING(MI_SET_CONTEXT); | 6626 | OUT_RING(MI_SET_CONTEXT); |
6496 | OUT_RING(dev_priv->renderctx->gtt_offset | | 6627 | OUT_RING(dev_priv->renderctx->gtt_offset | |
@@ -6507,6 +6638,7 @@ void ironlake_enable_rc6(struct drm_device *dev) | |||
6507 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | 6638 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
6508 | } | 6639 | } |
6509 | 6640 | ||
6641 | |||
6510 | /* Set up chip specific display functions */ | 6642 | /* Set up chip specific display functions */ |
6511 | static void intel_init_display(struct drm_device *dev) | 6643 | static void intel_init_display(struct drm_device *dev) |
6512 | { | 6644 | { |
@@ -6749,21 +6881,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
6749 | if (IS_GEN6(dev)) | 6881 | if (IS_GEN6(dev)) |
6750 | gen6_enable_rps(dev_priv); | 6882 | gen6_enable_rps(dev_priv); |
6751 | 6883 | ||
6752 | if (IS_IRONLAKE_M(dev)) { | 6884 | if (IS_IRONLAKE_M(dev)) |
6753 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
6754 | if (!dev_priv->renderctx) | ||
6755 | goto skip_rc6; | ||
6756 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
6757 | if (!dev_priv->pwrctx) { | ||
6758 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6759 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6760 | dev_priv->renderctx = NULL; | ||
6761 | goto skip_rc6; | ||
6762 | } | ||
6763 | ironlake_enable_rc6(dev); | 6885 | ironlake_enable_rc6(dev); |
6764 | } | ||
6765 | 6886 | ||
6766 | skip_rc6: | ||
6767 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 6887 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
6768 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 6888 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
6769 | (unsigned long)dev); | 6889 | (unsigned long)dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1f4242b682c8..51cb4e36997f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1639,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1639 | return 0; | 1639 | return 0; |
1640 | } | 1640 | } |
1641 | 1641 | ||
1642 | static bool | ||
1643 | intel_dp_detect_audio(struct drm_connector *connector) | ||
1644 | { | ||
1645 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1646 | struct edid *edid; | ||
1647 | bool has_audio = false; | ||
1648 | |||
1649 | edid = drm_get_edid(connector, &intel_dp->adapter); | ||
1650 | if (edid) { | ||
1651 | has_audio = drm_detect_monitor_audio(edid); | ||
1652 | |||
1653 | connector->display_info.raw_edid = NULL; | ||
1654 | kfree(edid); | ||
1655 | } | ||
1656 | |||
1657 | return has_audio; | ||
1658 | } | ||
1659 | |||
1642 | static int | 1660 | static int |
1643 | intel_dp_set_property(struct drm_connector *connector, | 1661 | intel_dp_set_property(struct drm_connector *connector, |
1644 | struct drm_property *property, | 1662 | struct drm_property *property, |
@@ -1652,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector, | |||
1652 | return ret; | 1670 | return ret; |
1653 | 1671 | ||
1654 | if (property == intel_dp->force_audio_property) { | 1672 | if (property == intel_dp->force_audio_property) { |
1655 | if (val == intel_dp->force_audio) | 1673 | int i = val; |
1674 | bool has_audio; | ||
1675 | |||
1676 | if (i == intel_dp->force_audio) | ||
1656 | return 0; | 1677 | return 0; |
1657 | 1678 | ||
1658 | intel_dp->force_audio = val; | 1679 | intel_dp->force_audio = i; |
1659 | 1680 | ||
1660 | if (val > 0 && intel_dp->has_audio) | 1681 | if (i == 0) |
1661 | return 0; | 1682 | has_audio = intel_dp_detect_audio(connector); |
1662 | if (val < 0 && !intel_dp->has_audio) | 1683 | else |
1684 | has_audio = i > 0; | ||
1685 | |||
1686 | if (has_audio == intel_dp->has_audio) | ||
1663 | return 0; | 1687 | return 0; |
1664 | 1688 | ||
1665 | intel_dp->has_audio = val > 0; | 1689 | intel_dp->has_audio = has_audio; |
1666 | goto done; | 1690 | goto done; |
1667 | } | 1691 | } |
1668 | 1692 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 74db2557d644..2c431049963c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -298,7 +298,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
298 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | 298 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
299 | u16 *blue, int regno); | 299 | u16 *blue, int regno); |
300 | extern void intel_enable_clock_gating(struct drm_device *dev); | 300 | extern void intel_enable_clock_gating(struct drm_device *dev); |
301 | extern void intel_disable_clock_gating(struct drm_device *dev); | ||
302 | extern void ironlake_enable_drps(struct drm_device *dev); | 301 | extern void ironlake_enable_drps(struct drm_device *dev); |
303 | extern void ironlake_disable_drps(struct drm_device *dev); | 302 | extern void ironlake_disable_drps(struct drm_device *dev); |
304 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | 303 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 0d0273e7b029..c635c9e357b9 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) | |||
251 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | 251 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); |
252 | } | 252 | } |
253 | 253 | ||
254 | static bool | ||
255 | intel_hdmi_detect_audio(struct drm_connector *connector) | ||
256 | { | ||
257 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | ||
258 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
259 | struct edid *edid; | ||
260 | bool has_audio = false; | ||
261 | |||
262 | edid = drm_get_edid(connector, | ||
263 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | ||
264 | if (edid) { | ||
265 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
266 | has_audio = drm_detect_monitor_audio(edid); | ||
267 | |||
268 | connector->display_info.raw_edid = NULL; | ||
269 | kfree(edid); | ||
270 | } | ||
271 | |||
272 | return has_audio; | ||
273 | } | ||
274 | |||
254 | static int | 275 | static int |
255 | intel_hdmi_set_property(struct drm_connector *connector, | 276 | intel_hdmi_set_property(struct drm_connector *connector, |
256 | struct drm_property *property, | 277 | struct drm_property *property, |
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
264 | return ret; | 285 | return ret; |
265 | 286 | ||
266 | if (property == intel_hdmi->force_audio_property) { | 287 | if (property == intel_hdmi->force_audio_property) { |
267 | if (val == intel_hdmi->force_audio) | 288 | int i = val; |
289 | bool has_audio; | ||
290 | |||
291 | if (i == intel_hdmi->force_audio) | ||
268 | return 0; | 292 | return 0; |
269 | 293 | ||
270 | intel_hdmi->force_audio = val; | 294 | intel_hdmi->force_audio = i; |
271 | 295 | ||
272 | if (val > 0 && intel_hdmi->has_audio) | 296 | if (i == 0) |
273 | return 0; | 297 | has_audio = intel_hdmi_detect_audio(connector); |
274 | if (val < 0 && !intel_hdmi->has_audio) | 298 | else |
299 | has_audio = i > 0; | ||
300 | |||
301 | if (has_audio == intel_hdmi->has_audio) | ||
275 | return 0; | 302 | return 0; |
276 | 303 | ||
277 | intel_hdmi->has_audio = val > 0; | 304 | intel_hdmi->has_audio = has_audio; |
278 | goto done; | 305 | goto done; |
279 | } | 306 | } |
280 | 307 | ||
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ace8d5d30dd2..bcdba7bd5cfa 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -261,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
261 | return true; | 261 | return true; |
262 | } | 262 | } |
263 | 263 | ||
264 | /* Make sure pre-965s set dither correctly */ | ||
265 | if (INTEL_INFO(dev)->gen < 4) { | ||
266 | if (dev_priv->lvds_dither) | ||
267 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
268 | } | ||
269 | |||
270 | /* Native modes don't need fitting */ | 264 | /* Native modes don't need fitting */ |
271 | if (adjusted_mode->hdisplay == mode->hdisplay && | 265 | if (adjusted_mode->hdisplay == mode->hdisplay && |
272 | adjusted_mode->vdisplay == mode->vdisplay) | 266 | adjusted_mode->vdisplay == mode->vdisplay) |
@@ -374,10 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
374 | } | 368 | } |
375 | 369 | ||
376 | out: | 370 | out: |
371 | /* If not enabling scaling, be consistent and always use 0. */ | ||
377 | if ((pfit_control & PFIT_ENABLE) == 0) { | 372 | if ((pfit_control & PFIT_ENABLE) == 0) { |
378 | pfit_control = 0; | 373 | pfit_control = 0; |
379 | pfit_pgm_ratios = 0; | 374 | pfit_pgm_ratios = 0; |
380 | } | 375 | } |
376 | |||
377 | /* Make sure pre-965 set dither correctly */ | ||
378 | if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) | ||
379 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
380 | |||
381 | if (pfit_control != intel_lvds->pfit_control || | 381 | if (pfit_control != intel_lvds->pfit_control || |
382 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { | 382 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { |
383 | intel_lvds->pfit_control = pfit_control; | 383 | intel_lvds->pfit_control = pfit_control; |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index f295a7aaadf9..64fd64443ca6 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -26,6 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
29 | #include <linux/acpi_io.h> | ||
29 | #include <acpi/video.h> | 30 | #include <acpi/video.h> |
30 | 31 | ||
31 | #include "drmP.h" | 32 | #include "drmP.h" |
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev) | |||
476 | return -ENOTSUPP; | 477 | return -ENOTSUPP; |
477 | } | 478 | } |
478 | 479 | ||
479 | base = ioremap(asls, OPREGION_SIZE); | 480 | base = acpi_os_ioremap(asls, OPREGION_SIZE); |
480 | if (!base) | 481 | if (!base) |
481 | return -ENOMEM; | 482 | return -ENOMEM; |
482 | 483 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c65992df458d..d860abeda70f 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -30,8 +30,6 @@ | |||
30 | 30 | ||
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ | ||
34 | |||
35 | void | 33 | void |
36 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 34 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
37 | struct drm_display_mode *adjusted_mode) | 35 | struct drm_display_mode *adjusted_mode) |
@@ -112,19 +110,6 @@ done: | |||
112 | dev_priv->pch_pf_size = (width << 16) | height; | 110 | dev_priv->pch_pf_size = (width << 16) | height; |
113 | } | 111 | } |
114 | 112 | ||
115 | static int is_backlight_combination_mode(struct drm_device *dev) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | |||
119 | if (INTEL_INFO(dev)->gen >= 4) | ||
120 | return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; | ||
121 | |||
122 | if (IS_GEN2(dev)) | ||
123 | return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | 113 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) |
129 | { | 114 | { |
130 | u32 val; | 115 | u32 val; |
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) | |||
181 | if (INTEL_INFO(dev)->gen < 4) | 166 | if (INTEL_INFO(dev)->gen < 4) |
182 | max &= ~1; | 167 | max &= ~1; |
183 | } | 168 | } |
184 | |||
185 | if (is_backlight_combination_mode(dev)) | ||
186 | max *= 0xff; | ||
187 | } | 169 | } |
188 | 170 | ||
189 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | 171 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); |
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev) | |||
201 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 183 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
202 | if (IS_PINEVIEW(dev)) | 184 | if (IS_PINEVIEW(dev)) |
203 | val >>= 1; | 185 | val >>= 1; |
204 | |||
205 | if (is_backlight_combination_mode(dev)){ | ||
206 | u8 lbpc; | ||
207 | |||
208 | val &= ~1; | ||
209 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | ||
210 | val *= lbpc; | ||
211 | val >>= 1; | ||
212 | } | ||
213 | } | 186 | } |
214 | 187 | ||
215 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); | 188 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); |
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
232 | 205 | ||
233 | if (HAS_PCH_SPLIT(dev)) | 206 | if (HAS_PCH_SPLIT(dev)) |
234 | return intel_pch_panel_set_backlight(dev, level); | 207 | return intel_pch_panel_set_backlight(dev, level); |
235 | |||
236 | if (is_backlight_combination_mode(dev)){ | ||
237 | u32 max = intel_panel_get_max_backlight(dev); | ||
238 | u8 lpbc; | ||
239 | |||
240 | lpbc = level * 0xfe / max + 1; | ||
241 | level /= lpbc; | ||
242 | pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); | ||
243 | } | ||
244 | |||
245 | tmp = I915_READ(BLC_PWM_CTL); | 208 | tmp = I915_READ(BLC_PWM_CTL); |
246 | if (IS_PINEVIEW(dev)) { | 209 | if (IS_PINEVIEW(dev)) { |
247 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | 210 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f6b9baa6a63d..445f27efe677 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -34,6 +34,14 @@ | |||
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
36 | 36 | ||
37 | static inline int ring_space(struct intel_ring_buffer *ring) | ||
38 | { | ||
39 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); | ||
40 | if (space < 0) | ||
41 | space += ring->size; | ||
42 | return space; | ||
43 | } | ||
44 | |||
37 | static u32 i915_gem_get_seqno(struct drm_device *dev) | 45 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
38 | { | 46 | { |
39 | drm_i915_private_t *dev_priv = dev->dev_private; | 47 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
204 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 212 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
205 | i915_kernel_lost_context(ring->dev); | 213 | i915_kernel_lost_context(ring->dev); |
206 | else { | 214 | else { |
207 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 215 | ring->head = I915_READ_HEAD(ring); |
208 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 216 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
209 | ring->space = ring->head - (ring->tail + 8); | 217 | ring->space = ring_space(ring); |
210 | if (ring->space < 0) | ||
211 | ring->space += ring->size; | ||
212 | } | 218 | } |
213 | 219 | ||
214 | return 0; | 220 | return 0; |
@@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) | |||
921 | } | 927 | } |
922 | 928 | ||
923 | ring->tail = 0; | 929 | ring->tail = 0; |
924 | ring->space = ring->head - 8; | 930 | ring->space = ring_space(ring); |
925 | 931 | ||
926 | return 0; | 932 | return 0; |
927 | } | 933 | } |
928 | 934 | ||
929 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | 935 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
930 | { | 936 | { |
931 | int reread = 0; | ||
932 | struct drm_device *dev = ring->dev; | 937 | struct drm_device *dev = ring->dev; |
933 | struct drm_i915_private *dev_priv = dev->dev_private; | 938 | struct drm_i915_private *dev_priv = dev->dev_private; |
934 | unsigned long end; | 939 | unsigned long end; |
935 | u32 head; | 940 | u32 head; |
936 | 941 | ||
942 | /* If the reported head position has wrapped or hasn't advanced, | ||
943 | * fallback to the slow and accurate path. | ||
944 | */ | ||
945 | head = intel_read_status_page(ring, 4); | ||
946 | if (head > ring->head) { | ||
947 | ring->head = head; | ||
948 | ring->space = ring_space(ring); | ||
949 | if (ring->space >= n) | ||
950 | return 0; | ||
951 | } | ||
952 | |||
937 | trace_i915_ring_wait_begin (dev); | 953 | trace_i915_ring_wait_begin (dev); |
938 | end = jiffies + 3 * HZ; | 954 | end = jiffies + 3 * HZ; |
939 | do { | 955 | do { |
940 | /* If the reported head position has wrapped or hasn't advanced, | 956 | ring->head = I915_READ_HEAD(ring); |
941 | * fallback to the slow and accurate path. | 957 | ring->space = ring_space(ring); |
942 | */ | ||
943 | head = intel_read_status_page(ring, 4); | ||
944 | if (reread) | ||
945 | head = I915_READ_HEAD(ring); | ||
946 | ring->head = head & HEAD_ADDR; | ||
947 | ring->space = ring->head - (ring->tail + 8); | ||
948 | if (ring->space < 0) | ||
949 | ring->space += ring->size; | ||
950 | if (ring->space >= n) { | 958 | if (ring->space >= n) { |
951 | trace_i915_ring_wait_end(dev); | 959 | trace_i915_ring_wait_end(dev); |
952 | return 0; | 960 | return 0; |
@@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
961 | msleep(1); | 969 | msleep(1); |
962 | if (atomic_read(&dev_priv->mm.wedged)) | 970 | if (atomic_read(&dev_priv->mm.wedged)) |
963 | return -EAGAIN; | 971 | return -EAGAIN; |
964 | reread = 1; | ||
965 | } while (!time_after(jiffies, end)); | 972 | } while (!time_after(jiffies, end)); |
966 | trace_i915_ring_wait_end (dev); | 973 | trace_i915_ring_wait_end (dev); |
967 | return -EBUSY; | 974 | return -EBUSY; |
@@ -1052,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | |||
1052 | } | 1059 | } |
1053 | 1060 | ||
1054 | static int gen6_ring_flush(struct intel_ring_buffer *ring, | 1061 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
1055 | u32 invalidate_domains, | 1062 | u32 invalidate, u32 flush) |
1056 | u32 flush_domains) | ||
1057 | { | 1063 | { |
1064 | uint32_t cmd; | ||
1058 | int ret; | 1065 | int ret; |
1059 | 1066 | ||
1060 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | 1067 | if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0) |
1061 | return 0; | 1068 | return 0; |
1062 | 1069 | ||
1063 | ret = intel_ring_begin(ring, 4); | 1070 | ret = intel_ring_begin(ring, 4); |
1064 | if (ret) | 1071 | if (ret) |
1065 | return ret; | 1072 | return ret; |
1066 | 1073 | ||
1067 | intel_ring_emit(ring, MI_FLUSH_DW); | 1074 | cmd = MI_FLUSH_DW; |
1068 | intel_ring_emit(ring, 0); | 1075 | if (invalidate & I915_GEM_GPU_DOMAINS) |
1076 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; | ||
1077 | intel_ring_emit(ring, cmd); | ||
1069 | intel_ring_emit(ring, 0); | 1078 | intel_ring_emit(ring, 0); |
1070 | intel_ring_emit(ring, 0); | 1079 | intel_ring_emit(ring, 0); |
1080 | intel_ring_emit(ring, MI_NOOP); | ||
1071 | intel_ring_advance(ring); | 1081 | intel_ring_advance(ring); |
1072 | return 0; | 1082 | return 0; |
1073 | } | 1083 | } |
@@ -1223,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring, | |||
1223 | } | 1233 | } |
1224 | 1234 | ||
1225 | static int blt_ring_flush(struct intel_ring_buffer *ring, | 1235 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1226 | u32 invalidate_domains, | 1236 | u32 invalidate, u32 flush) |
1227 | u32 flush_domains) | ||
1228 | { | 1237 | { |
1238 | uint32_t cmd; | ||
1229 | int ret; | 1239 | int ret; |
1230 | 1240 | ||
1231 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | 1241 | if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0) |
1232 | return 0; | 1242 | return 0; |
1233 | 1243 | ||
1234 | ret = blt_ring_begin(ring, 4); | 1244 | ret = blt_ring_begin(ring, 4); |
1235 | if (ret) | 1245 | if (ret) |
1236 | return ret; | 1246 | return ret; |
1237 | 1247 | ||
1238 | intel_ring_emit(ring, MI_FLUSH_DW); | 1248 | cmd = MI_FLUSH_DW; |
1239 | intel_ring_emit(ring, 0); | 1249 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
1250 | cmd |= MI_INVALIDATE_TLB; | ||
1251 | intel_ring_emit(ring, cmd); | ||
1240 | intel_ring_emit(ring, 0); | 1252 | intel_ring_emit(ring, 0); |
1241 | intel_ring_emit(ring, 0); | 1253 | intel_ring_emit(ring, 0); |
1254 | intel_ring_emit(ring, MI_NOOP); | ||
1242 | intel_ring_advance(ring); | 1255 | intel_ring_advance(ring); |
1243 | return 0; | 1256 | return 0; |
1244 | } | 1257 | } |
@@ -1292,6 +1305,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1292 | return intel_init_ring_buffer(dev, ring); | 1305 | return intel_init_ring_buffer(dev, ring); |
1293 | } | 1306 | } |
1294 | 1307 | ||
1308 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | ||
1309 | { | ||
1310 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1311 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
1312 | |||
1313 | *ring = render_ring; | ||
1314 | if (INTEL_INFO(dev)->gen >= 6) { | ||
1315 | ring->add_request = gen6_add_request; | ||
1316 | ring->irq_get = gen6_render_ring_get_irq; | ||
1317 | ring->irq_put = gen6_render_ring_put_irq; | ||
1318 | } else if (IS_GEN5(dev)) { | ||
1319 | ring->add_request = pc_render_add_request; | ||
1320 | ring->get_seqno = pc_render_get_seqno; | ||
1321 | } | ||
1322 | |||
1323 | ring->dev = dev; | ||
1324 | INIT_LIST_HEAD(&ring->active_list); | ||
1325 | INIT_LIST_HEAD(&ring->request_list); | ||
1326 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
1327 | |||
1328 | ring->size = size; | ||
1329 | ring->effective_size = ring->size; | ||
1330 | if (IS_I830(ring->dev)) | ||
1331 | ring->effective_size -= 128; | ||
1332 | |||
1333 | ring->map.offset = start; | ||
1334 | ring->map.size = size; | ||
1335 | ring->map.type = 0; | ||
1336 | ring->map.flags = 0; | ||
1337 | ring->map.mtrr = 0; | ||
1338 | |||
1339 | drm_core_ioremap_wc(&ring->map, dev); | ||
1340 | if (ring->map.handle == NULL) { | ||
1341 | DRM_ERROR("can not ioremap virtual address for" | ||
1342 | " ring buffer\n"); | ||
1343 | return -ENOMEM; | ||
1344 | } | ||
1345 | |||
1346 | ring->virtual_start = (void __force __iomem *)ring->map.handle; | ||
1347 | return 0; | ||
1348 | } | ||
1349 | |||
1295 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 1350 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1296 | { | 1351 | { |
1297 | drm_i915_private_t *dev_priv = dev->dev_private; | 1352 | drm_i915_private_t *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5b0abfa881fc..34306865a5df 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -14,22 +14,23 @@ struct intel_hw_status_page { | |||
14 | struct drm_i915_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
15 | }; | 15 | }; |
16 | 16 | ||
17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) | 17 | #define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) |
18 | #define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) | ||
18 | 19 | ||
19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) | 20 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) |
20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | 21 | #define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) |
21 | 22 | ||
22 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) | 23 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) |
23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | 24 | #define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) |
24 | 25 | ||
25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) | 26 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) |
26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | 27 | #define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) |
27 | 28 | ||
28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) | 29 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) |
29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | 30 | #define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) |
30 | 31 | ||
31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) | 32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) |
33 | #define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) | ||
33 | 34 | ||
34 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) | 35 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) |
35 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) | 36 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) |
@@ -166,4 +167,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); | |||
166 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | 167 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
167 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | 168 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
168 | 169 | ||
170 | /* DRI warts */ | ||
171 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | ||
172 | |||
169 | #endif /* _INTEL_RINGBUFFER_H_ */ | 173 | #endif /* _INTEL_RINGBUFFER_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 45cd37652a37..7c50cdce84f0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -46,6 +46,7 @@ | |||
46 | SDVO_TV_MASK) | 46 | SDVO_TV_MASK) |
47 | 47 | ||
48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) | 48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) |
49 | #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) | ||
49 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) | 50 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) |
50 | #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) | 51 | #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) |
51 | 52 | ||
@@ -473,20 +474,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, | |||
473 | return false; | 474 | return false; |
474 | } | 475 | } |
475 | 476 | ||
476 | i = 3; | ||
477 | while (status == SDVO_CMD_STATUS_PENDING && i--) { | ||
478 | if (!intel_sdvo_read_byte(intel_sdvo, | ||
479 | SDVO_I2C_CMD_STATUS, | ||
480 | &status)) | ||
481 | return false; | ||
482 | } | ||
483 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
484 | DRM_DEBUG_KMS("command returns response %s [%d]\n", | ||
485 | status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???", | ||
486 | status); | ||
487 | return false; | ||
488 | } | ||
489 | |||
490 | return true; | 477 | return true; |
491 | } | 478 | } |
492 | 479 | ||
@@ -497,6 +484,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
497 | u8 status; | 484 | u8 status; |
498 | int i; | 485 | int i; |
499 | 486 | ||
487 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); | ||
488 | |||
500 | /* | 489 | /* |
501 | * The documentation states that all commands will be | 490 | * The documentation states that all commands will be |
502 | * processed within 15µs, and that we need only poll | 491 | * processed within 15µs, and that we need only poll |
@@ -505,14 +494,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
505 | * | 494 | * |
506 | * Check 5 times in case the hardware failed to read the docs. | 495 | * Check 5 times in case the hardware failed to read the docs. |
507 | */ | 496 | */ |
508 | do { | 497 | if (!intel_sdvo_read_byte(intel_sdvo, |
498 | SDVO_I2C_CMD_STATUS, | ||
499 | &status)) | ||
500 | goto log_fail; | ||
501 | |||
502 | while (status == SDVO_CMD_STATUS_PENDING && retry--) { | ||
503 | udelay(15); | ||
509 | if (!intel_sdvo_read_byte(intel_sdvo, | 504 | if (!intel_sdvo_read_byte(intel_sdvo, |
510 | SDVO_I2C_CMD_STATUS, | 505 | SDVO_I2C_CMD_STATUS, |
511 | &status)) | 506 | &status)) |
512 | return false; | 507 | goto log_fail; |
513 | } while (status == SDVO_CMD_STATUS_PENDING && --retry); | 508 | } |
514 | 509 | ||
515 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); | ||
516 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) | 510 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) |
517 | DRM_LOG_KMS("(%s)", cmd_status_names[status]); | 511 | DRM_LOG_KMS("(%s)", cmd_status_names[status]); |
518 | else | 512 | else |
@@ -533,7 +527,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
533 | return true; | 527 | return true; |
534 | 528 | ||
535 | log_fail: | 529 | log_fail: |
536 | DRM_LOG_KMS("\n"); | 530 | DRM_LOG_KMS("... failed\n"); |
537 | return false; | 531 | return false; |
538 | } | 532 | } |
539 | 533 | ||
@@ -550,6 +544,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
550 | static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, | 544 | static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, |
551 | u8 ddc_bus) | 545 | u8 ddc_bus) |
552 | { | 546 | { |
547 | /* This must be the immediately preceding write before the i2c xfer */ | ||
553 | return intel_sdvo_write_cmd(intel_sdvo, | 548 | return intel_sdvo_write_cmd(intel_sdvo, |
554 | SDVO_CMD_SET_CONTROL_BUS_SWITCH, | 549 | SDVO_CMD_SET_CONTROL_BUS_SWITCH, |
555 | &ddc_bus, 1); | 550 | &ddc_bus, 1); |
@@ -557,7 +552,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, | |||
557 | 552 | ||
558 | static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) | 553 | static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) |
559 | { | 554 | { |
560 | return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); | 555 | if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) |
556 | return false; | ||
557 | |||
558 | return intel_sdvo_read_response(intel_sdvo, NULL, 0); | ||
561 | } | 559 | } |
562 | 560 | ||
563 | static bool | 561 | static bool |
@@ -859,18 +857,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) | |||
859 | 857 | ||
860 | intel_dip_infoframe_csum(&avi_if); | 858 | intel_dip_infoframe_csum(&avi_if); |
861 | 859 | ||
862 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, | 860 | if (!intel_sdvo_set_value(intel_sdvo, |
861 | SDVO_CMD_SET_HBUF_INDEX, | ||
863 | set_buf_index, 2)) | 862 | set_buf_index, 2)) |
864 | return false; | 863 | return false; |
865 | 864 | ||
866 | for (i = 0; i < sizeof(avi_if); i += 8) { | 865 | for (i = 0; i < sizeof(avi_if); i += 8) { |
867 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, | 866 | if (!intel_sdvo_set_value(intel_sdvo, |
867 | SDVO_CMD_SET_HBUF_DATA, | ||
868 | data, 8)) | 868 | data, 8)) |
869 | return false; | 869 | return false; |
870 | data++; | 870 | data++; |
871 | } | 871 | } |
872 | 872 | ||
873 | return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, | 873 | return intel_sdvo_set_value(intel_sdvo, |
874 | SDVO_CMD_SET_HBUF_TXRATE, | ||
874 | &tx_rate, 1); | 875 | &tx_rate, 1); |
875 | } | 876 | } |
876 | 877 | ||
@@ -1359,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1359 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); | 1360 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); |
1360 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); | 1361 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); |
1361 | } | 1362 | } |
1362 | } | 1363 | } else |
1364 | status = connector_status_disconnected; | ||
1363 | connector->display_info.raw_edid = NULL; | 1365 | connector->display_info.raw_edid = NULL; |
1364 | kfree(edid); | 1366 | kfree(edid); |
1365 | } | 1367 | } |
@@ -1407,10 +1409,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1407 | 1409 | ||
1408 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1410 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1409 | ret = connector_status_disconnected; | 1411 | ret = connector_status_disconnected; |
1410 | else if (response & SDVO_TMDS_MASK) | 1412 | else if (IS_TMDS(intel_sdvo_connector)) |
1411 | ret = intel_sdvo_hdmi_sink_detect(connector); | 1413 | ret = intel_sdvo_hdmi_sink_detect(connector); |
1412 | else | 1414 | else { |
1413 | ret = connector_status_connected; | 1415 | struct edid *edid; |
1416 | |||
1417 | /* if we have an edid check it matches the connection */ | ||
1418 | edid = intel_sdvo_get_edid(connector); | ||
1419 | if (edid == NULL) | ||
1420 | edid = intel_sdvo_get_analog_edid(connector); | ||
1421 | if (edid != NULL) { | ||
1422 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1423 | ret = connector_status_disconnected; | ||
1424 | else | ||
1425 | ret = connector_status_connected; | ||
1426 | connector->display_info.raw_edid = NULL; | ||
1427 | kfree(edid); | ||
1428 | } else | ||
1429 | ret = connector_status_connected; | ||
1430 | } | ||
1414 | 1431 | ||
1415 | /* May update encoder flag for like clock for SDVO TV, etc.*/ | 1432 | /* May update encoder flag for like clock for SDVO TV, etc.*/ |
1416 | if (ret == connector_status_connected) { | 1433 | if (ret == connector_status_connected) { |
@@ -1446,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1446 | edid = intel_sdvo_get_analog_edid(connector); | 1463 | edid = intel_sdvo_get_analog_edid(connector); |
1447 | 1464 | ||
1448 | if (edid != NULL) { | 1465 | if (edid != NULL) { |
1449 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 1466 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1467 | bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); | ||
1468 | bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector); | ||
1469 | |||
1470 | if (connector_is_digital == monitor_is_digital) { | ||
1450 | drm_mode_connector_update_edid_property(connector, edid); | 1471 | drm_mode_connector_update_edid_property(connector, edid); |
1451 | drm_add_edid_modes(connector, edid); | 1472 | drm_add_edid_modes(connector, edid); |
1452 | } | 1473 | } |
1474 | |||
1453 | connector->display_info.raw_edid = NULL; | 1475 | connector->display_info.raw_edid = NULL; |
1454 | kfree(edid); | 1476 | kfree(edid); |
1455 | } | 1477 | } |
@@ -1668,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
1668 | kfree(connector); | 1690 | kfree(connector); |
1669 | } | 1691 | } |
1670 | 1692 | ||
1693 | static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) | ||
1694 | { | ||
1695 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | ||
1696 | struct edid *edid; | ||
1697 | bool has_audio = false; | ||
1698 | |||
1699 | if (!intel_sdvo->is_hdmi) | ||
1700 | return false; | ||
1701 | |||
1702 | edid = intel_sdvo_get_edid(connector); | ||
1703 | if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1704 | has_audio = drm_detect_monitor_audio(edid); | ||
1705 | |||
1706 | return has_audio; | ||
1707 | } | ||
1708 | |||
1671 | static int | 1709 | static int |
1672 | intel_sdvo_set_property(struct drm_connector *connector, | 1710 | intel_sdvo_set_property(struct drm_connector *connector, |
1673 | struct drm_property *property, | 1711 | struct drm_property *property, |
@@ -1684,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1684 | return ret; | 1722 | return ret; |
1685 | 1723 | ||
1686 | if (property == intel_sdvo_connector->force_audio_property) { | 1724 | if (property == intel_sdvo_connector->force_audio_property) { |
1687 | if (val == intel_sdvo_connector->force_audio) | 1725 | int i = val; |
1726 | bool has_audio; | ||
1727 | |||
1728 | if (i == intel_sdvo_connector->force_audio) | ||
1688 | return 0; | 1729 | return 0; |
1689 | 1730 | ||
1690 | intel_sdvo_connector->force_audio = val; | 1731 | intel_sdvo_connector->force_audio = i; |
1691 | 1732 | ||
1692 | if (val > 0 && intel_sdvo->has_hdmi_audio) | 1733 | if (i == 0) |
1693 | return 0; | 1734 | has_audio = intel_sdvo_detect_hdmi_audio(connector); |
1694 | if (val < 0 && !intel_sdvo->has_hdmi_audio) | 1735 | else |
1736 | has_audio = i > 0; | ||
1737 | |||
1738 | if (has_audio == intel_sdvo->has_hdmi_audio) | ||
1695 | return 0; | 1739 | return 0; |
1696 | 1740 | ||
1697 | intel_sdvo->has_hdmi_audio = val > 0; | 1741 | intel_sdvo->has_hdmi_audio = has_audio; |
1698 | goto done; | 1742 | goto done; |
1699 | } | 1743 | } |
1700 | 1744 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 93206e4eaa6f..fe4a53a50b83 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = { | |||
1234 | * \return false if TV is disconnected. | 1234 | * \return false if TV is disconnected. |
1235 | */ | 1235 | */ |
1236 | static int | 1236 | static int |
1237 | intel_tv_detect_type (struct intel_tv *intel_tv) | 1237 | intel_tv_detect_type (struct intel_tv *intel_tv, |
1238 | struct drm_connector *connector) | ||
1238 | { | 1239 | { |
1239 | struct drm_encoder *encoder = &intel_tv->base.base; | 1240 | struct drm_encoder *encoder = &intel_tv->base.base; |
1240 | struct drm_device *dev = encoder->dev; | 1241 | struct drm_device *dev = encoder->dev; |
@@ -1245,11 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1245 | int type; | 1246 | int type; |
1246 | 1247 | ||
1247 | /* Disable TV interrupts around load detect or we'll recurse */ | 1248 | /* Disable TV interrupts around load detect or we'll recurse */ |
1248 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1249 | if (connector->polled & DRM_CONNECTOR_POLL_HPD) { |
1249 | i915_disable_pipestat(dev_priv, 0, | 1250 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1250 | PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1251 | i915_disable_pipestat(dev_priv, 0, |
1251 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1252 | PIPE_HOTPLUG_INTERRUPT_ENABLE | |
1252 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1253 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); |
1254 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1255 | } | ||
1253 | 1256 | ||
1254 | save_tv_dac = tv_dac = I915_READ(TV_DAC); | 1257 | save_tv_dac = tv_dac = I915_READ(TV_DAC); |
1255 | save_tv_ctl = tv_ctl = I915_READ(TV_CTL); | 1258 | save_tv_ctl = tv_ctl = I915_READ(TV_CTL); |
@@ -1302,11 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1302 | I915_WRITE(TV_CTL, save_tv_ctl); | 1305 | I915_WRITE(TV_CTL, save_tv_ctl); |
1303 | 1306 | ||
1304 | /* Restore interrupt config */ | 1307 | /* Restore interrupt config */ |
1305 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1308 | if (connector->polled & DRM_CONNECTOR_POLL_HPD) { |
1306 | i915_enable_pipestat(dev_priv, 0, | 1309 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1307 | PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1310 | i915_enable_pipestat(dev_priv, 0, |
1308 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1311 | PIPE_HOTPLUG_INTERRUPT_ENABLE | |
1309 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1312 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); |
1313 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1314 | } | ||
1310 | 1315 | ||
1311 | return type; | 1316 | return type; |
1312 | } | 1317 | } |
@@ -1356,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1356 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1361 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1357 | 1362 | ||
1358 | if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { | 1363 | if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { |
1359 | type = intel_tv_detect_type(intel_tv); | 1364 | type = intel_tv_detect_type(intel_tv, connector); |
1360 | } else if (force) { | 1365 | } else if (force) { |
1361 | struct drm_crtc *crtc; | 1366 | struct drm_crtc *crtc; |
1362 | int dpms_mode; | 1367 | int dpms_mode; |
@@ -1364,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1364 | crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, | 1369 | crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, |
1365 | &mode, &dpms_mode); | 1370 | &mode, &dpms_mode); |
1366 | if (crtc) { | 1371 | if (crtc) { |
1367 | type = intel_tv_detect_type(intel_tv); | 1372 | type = intel_tv_detect_type(intel_tv, connector); |
1368 | intel_release_load_detect_pipe(&intel_tv->base, connector, | 1373 | intel_release_load_detect_pipe(&intel_tv->base, connector, |
1369 | dpms_mode); | 1374 | dpms_mode); |
1370 | } else | 1375 | } else |
@@ -1658,6 +1663,18 @@ intel_tv_init(struct drm_device *dev) | |||
1658 | intel_encoder = &intel_tv->base; | 1663 | intel_encoder = &intel_tv->base; |
1659 | connector = &intel_connector->base; | 1664 | connector = &intel_connector->base; |
1660 | 1665 | ||
1666 | /* The documentation, for the older chipsets at least, recommend | ||
1667 | * using a polling method rather than hotplug detection for TVs. | ||
1668 | * This is because in order to perform the hotplug detection, the PLLs | ||
1669 | * for the TV must be kept alive increasing power drain and starving | ||
1670 | * bandwidth from other encoders. Notably for instance, it causes | ||
1671 | * pipe underruns on Crestline when this encoder is supposedly idle. | ||
1672 | * | ||
1673 | * More recent chipsets favour HDMI rather than integrated S-Video. | ||
1674 | */ | ||
1675 | connector->polled = | ||
1676 | DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
1677 | |||
1661 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1678 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
1662 | DRM_MODE_CONNECTOR_SVIDEO); | 1679 | DRM_MODE_CONNECTOR_SVIDEO); |
1663 | 1680 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 2aef5cd3acf5..6bdab891c64e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
6228 | entry->tvconf.has_component_output = false; | 6228 | entry->tvconf.has_component_output = false; |
6229 | break; | 6229 | break; |
6230 | case OUTPUT_LVDS: | 6230 | case OUTPUT_LVDS: |
6231 | if ((conn & 0x00003f00) != 0x10) | 6231 | if ((conn & 0x00003f00) >> 8 != 0x10) |
6232 | entry->lvdsconf.use_straps_for_mode = true; | 6232 | entry->lvdsconf.use_straps_for_mode = true; |
6233 | entry->lvdsconf.use_power_scripts = true; | 6233 | entry->lvdsconf.use_power_scripts = true; |
6234 | break; | 6234 | break; |
@@ -6310,6 +6310,9 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) | |||
6310 | static bool | 6310 | static bool |
6311 | apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) | 6311 | apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) |
6312 | { | 6312 | { |
6313 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
6314 | struct dcb_table *dcb = &dev_priv->vbios.dcb; | ||
6315 | |||
6313 | /* Dell Precision M6300 | 6316 | /* Dell Precision M6300 |
6314 | * DCB entry 2: 02025312 00000010 | 6317 | * DCB entry 2: 02025312 00000010 |
6315 | * DCB entry 3: 02026312 00000020 | 6318 | * DCB entry 3: 02026312 00000020 |
@@ -6327,6 +6330,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) | |||
6327 | return false; | 6330 | return false; |
6328 | } | 6331 | } |
6329 | 6332 | ||
6333 | /* GeForce3 Ti 200 | ||
6334 | * | ||
6335 | * DCB reports an LVDS output that should be TMDS: | ||
6336 | * DCB entry 1: f2005014 ffffffff | ||
6337 | */ | ||
6338 | if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { | ||
6339 | if (*conn == 0xf2005014 && *conf == 0xffffffff) { | ||
6340 | fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1); | ||
6341 | return false; | ||
6342 | } | ||
6343 | } | ||
6344 | |||
6330 | return true; | 6345 | return true; |
6331 | } | 6346 | } |
6332 | 6347 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a7fae26f4654..a52184007f5f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
50 | 50 | ||
51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
52 | nouveau_vm_put(&nvbo->vma); | 52 | if (nvbo->vma.node) { |
53 | nouveau_vm_unmap(&nvbo->vma); | ||
54 | nouveau_vm_put(&nvbo->vma); | ||
55 | } | ||
53 | kfree(nvbo); | 56 | kfree(nvbo); |
54 | } | 57 | } |
55 | 58 | ||
@@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
128 | } | 131 | } |
129 | } | 132 | } |
130 | 133 | ||
134 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; | ||
131 | nouveau_bo_placement_set(nvbo, flags, 0); | 135 | nouveau_bo_placement_set(nvbo, flags, 0); |
132 | 136 | ||
133 | nvbo->channel = chan; | 137 | nvbo->channel = chan; |
@@ -166,17 +170,17 @@ static void | |||
166 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | 170 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
167 | { | 171 | { |
168 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 172 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
173 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
169 | 174 | ||
170 | if (dev_priv->card_type == NV_10 && | 175 | if (dev_priv->card_type == NV_10 && |
171 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { | 176 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
177 | nvbo->bo.mem.num_pages < vram_pages / 2) { | ||
172 | /* | 178 | /* |
173 | * Make sure that the color and depth buffers are handled | 179 | * Make sure that the color and depth buffers are handled |
174 | * by independent memory controller units. Up to a 9x | 180 | * by independent memory controller units. Up to a 9x |
175 | * speed up when alpha-blending and depth-test are enabled | 181 | * speed up when alpha-blending and depth-test are enabled |
176 | * at the same time. | 182 | * at the same time. |
177 | */ | 183 | */ |
178 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
179 | |||
180 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { | 184 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
181 | nvbo->placement.fpfn = vram_pages / 2; | 185 | nvbo->placement.fpfn = vram_pages / 2; |
182 | nvbo->placement.lpfn = ~0; | 186 | nvbo->placement.lpfn = ~0; |
@@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
785 | if (ret) | 789 | if (ret) |
786 | goto out; | 790 | goto out; |
787 | 791 | ||
788 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 792 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
789 | out: | 793 | out: |
790 | ttm_bo_mem_put(bo, &tmp_mem); | 794 | ttm_bo_mem_put(bo, &tmp_mem); |
791 | return ret; | 795 | return ret; |
@@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
811 | if (ret) | 815 | if (ret) |
812 | return ret; | 816 | return ret; |
813 | 817 | ||
814 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); | 818 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
815 | if (ret) | 819 | if (ret) |
816 | goto out; | 820 | goto out; |
817 | 821 | ||
818 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); | 822 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); |
819 | if (ret) | 823 | if (ret) |
820 | goto out; | 824 | goto out; |
821 | 825 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a21e00076839..390d82c3c4b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector) | |||
507 | int high_w = 0, high_h = 0, high_v = 0; | 507 | int high_w = 0, high_h = 0, high_v = 0; |
508 | 508 | ||
509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { | 509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { |
510 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
510 | if (helper->mode_valid(connector, mode) != MODE_OK || | 511 | if (helper->mode_valid(connector, mode) != MODE_OK || |
511 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 512 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) |
512 | continue; | 513 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 65699bfaaaea..b368ed74aad7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
83 | return ret; | 83 | return ret; |
84 | 84 | ||
85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | 85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ |
86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); | 86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, |
87 | &chan->m2mf_ntfy); | ||
87 | if (ret) | 88 | if (ret) |
88 | return ret; | 89 | return ret; |
89 | 90 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 13bb672a16f4..f658a04eecf9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -234,9 +234,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
234 | pci_set_power_state(pdev, PCI_D3hot); | 234 | pci_set_power_state(pdev, PCI_D3hot); |
235 | } | 235 | } |
236 | 236 | ||
237 | acquire_console_sem(); | 237 | console_lock(); |
238 | nouveau_fbcon_set_suspend(dev, 1); | 238 | nouveau_fbcon_set_suspend(dev, 1); |
239 | release_console_sem(); | 239 | console_unlock(); |
240 | nouveau_fbcon_restore_accel(dev); | 240 | nouveau_fbcon_restore_accel(dev); |
241 | return 0; | 241 | return 0; |
242 | 242 | ||
@@ -359,9 +359,9 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
359 | nv_crtc->lut.depth = 0; | 359 | nv_crtc->lut.depth = 0; |
360 | } | 360 | } |
361 | 361 | ||
362 | acquire_console_sem(); | 362 | console_lock(); |
363 | nouveau_fbcon_set_suspend(dev, 0); | 363 | nouveau_fbcon_set_suspend(dev, 0); |
364 | release_console_sem(); | 364 | console_unlock(); |
365 | 365 | ||
366 | nouveau_fbcon_zfill_all(dev); | 366 | nouveau_fbcon_zfill_all(dev); |
367 | 367 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 01bffc4412d2..982d70b12722 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -848,14 +848,12 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev, | |||
848 | struct nouveau_fence *fence); | 848 | struct nouveau_fence *fence); |
849 | extern const struct ttm_mem_type_manager_func nouveau_vram_manager; | 849 | extern const struct ttm_mem_type_manager_func nouveau_vram_manager; |
850 | 850 | ||
851 | /* nvc0_vram.c */ | ||
852 | extern const struct ttm_mem_type_manager_func nvc0_vram_manager; | ||
853 | |||
854 | /* nouveau_notifier.c */ | 851 | /* nouveau_notifier.c */ |
855 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); | 852 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); |
856 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); | 853 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); |
857 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, | 854 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, |
858 | int cout, uint32_t *offset); | 855 | int cout, uint32_t start, uint32_t end, |
856 | uint32_t *offset); | ||
859 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); | 857 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); |
860 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, | 858 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, |
861 | struct drm_file *); | 859 | struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7cd872..b0fb9bdcddb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | 725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, |
726 | mem->page_alignment << PAGE_SHIFT, size_nc, | 726 | mem->page_alignment << PAGE_SHIFT, size_nc, |
727 | (nvbo->tile_flags >> 8) & 0xff, &node); | 727 | (nvbo->tile_flags >> 8) & 0xff, &node); |
728 | if (ret) | 728 | if (ret) { |
729 | return ret; | 729 | mem->mm_node = NULL; |
730 | return (ret == -ENOSPC) ? 0 : ret; | ||
731 | } | ||
730 | 732 | ||
731 | node->page_shift = 12; | 733 | node->page_shift = 12; |
732 | if (nvbo->vma.node) | 734 | if (nvbo->vma.node) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 8844b50c3e54..7609756b6faf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | |||
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
125 | 125 | ||
126 | return -ENOMEM; | 126 | return -ENOSPC; |
127 | } | 127 | } |
128 | 128 | ||
129 | int | 129 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index fe29d604b820..5ea167623a82 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, | |||
96 | 96 | ||
97 | int | 97 | int |
98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | 98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, |
99 | int size, uint32_t *b_offset) | 99 | int size, uint32_t start, uint32_t end, |
100 | uint32_t *b_offset) | ||
100 | { | 101 | { |
101 | struct drm_device *dev = chan->dev; | 102 | struct drm_device *dev = chan->dev; |
102 | struct nouveau_gpuobj *nobj = NULL; | 103 | struct nouveau_gpuobj *nobj = NULL; |
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
104 | uint32_t offset; | 105 | uint32_t offset; |
105 | int target, ret; | 106 | int target, ret; |
106 | 107 | ||
107 | mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); | 108 | mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, |
109 | start, end, 0); | ||
108 | if (mem) | 110 | if (mem) |
109 | mem = drm_mm_get_block(mem, size, 0); | 111 | mem = drm_mm_get_block_range(mem, size, 0, start, end); |
110 | if (!mem) { | 112 | if (!mem) { |
111 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); | 113 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); |
112 | return -ENOMEM; | 114 | return -ENOMEM; |
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | |||
177 | if (IS_ERR(chan)) | 179 | if (IS_ERR(chan)) |
178 | return PTR_ERR(chan); | 180 | return PTR_ERR(chan); |
179 | 181 | ||
180 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); | 182 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, |
183 | &na->offset); | ||
181 | nouveau_channel_put(&chan); | 184 | nouveau_channel_put(&chan); |
182 | return ret; | 185 | return ret; |
183 | } | 186 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index fb846a3fef15..4399e2f34db4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -443,7 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
443 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 443 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
444 | 444 | ||
445 | if (pm->hwmon) { | 445 | if (pm->hwmon) { |
446 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); | 446 | sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); |
447 | hwmon_device_unregister(pm->hwmon); | 447 | hwmon_device_unregister(pm->hwmon); |
448 | } | 448 | } |
449 | #endif | 449 | #endif |
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev) | |||
543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
544 | struct nouveau_pm_level *perflvl; | 544 | struct nouveau_pm_level *perflvl; |
545 | 545 | ||
546 | if (pm->cur == &pm->boot) | 546 | if (!pm->cur || pm->cur == &pm->boot) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | perflvl = pm->cur; | 549 | perflvl = pm->cur; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 7ecc4adc1e45..8d9968e1cba8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c | |||
@@ -265,8 +265,8 @@ nouveau_temp_probe_i2c(struct drm_device *dev) | |||
265 | struct i2c_board_info info[] = { | 265 | struct i2c_board_info info[] = { |
266 | { I2C_BOARD_INFO("w83l785ts", 0x2d) }, | 266 | { I2C_BOARD_INFO("w83l785ts", 0x2d) }, |
267 | { I2C_BOARD_INFO("w83781d", 0x2d) }, | 267 | { I2C_BOARD_INFO("w83781d", 0x2d) }, |
268 | { I2C_BOARD_INFO("f75375", 0x2e) }, | ||
269 | { I2C_BOARD_INFO("adt7473", 0x2e) }, | 268 | { I2C_BOARD_INFO("adt7473", 0x2e) }, |
269 | { I2C_BOARD_INFO("f75375", 0x2e) }, | ||
270 | { I2C_BOARD_INFO("lm99", 0x4c) }, | 270 | { I2C_BOARD_INFO("lm99", 0x4c) }, |
271 | { } | 271 | { } |
272 | }; | 272 | }; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index ef23550407b5..c82db37d9f41 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { | 342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { |
343 | bool duallink, dummy; | 343 | bool duallink, dummy; |
344 | 344 | ||
345 | nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> | 345 | nouveau_bios_parse_lvds_table(dev, output_mode->clock, |
346 | clock, &duallink, &dummy); | 346 | &duallink, &dummy); |
347 | if (duallink) | 347 | if (duallink) |
348 | regp->fp_control |= (8 << 28); | 348 | regp->fp_control |= (8 << 28); |
349 | } else | 349 | } else |
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
518 | return; | 518 | return; |
519 | 519 | ||
520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { | 520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { |
521 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
522 | |||
523 | /* when removing an output, crtc may not be set, but PANEL_OFF | 521 | /* when removing an output, crtc may not be set, but PANEL_OFF |
524 | * must still be run | 522 | * must still be run |
525 | */ | 523 | */ |
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
527 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); | 525 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); |
528 | 526 | ||
529 | if (mode == DRM_MODE_DPMS_ON) { | 527 | if (mode == DRM_MODE_DPMS_ON) { |
530 | if (!nv_connector->native_mode) { | ||
531 | NV_ERROR(dev, "Not turning on LVDS without native mode\n"); | ||
532 | return; | ||
533 | } | ||
534 | call_lvds_script(dev, nv_encoder->dcb, head, | 528 | call_lvds_script(dev, nv_encoder->dcb, head, |
535 | LVDS_PANEL_ON, nv_connector->native_mode->clock); | 529 | LVDS_PANEL_ON, nv_encoder->mode.clock); |
536 | } else | 530 | } else |
537 | /* pxclk of 0 is fine for PANEL_OFF, and for a | 531 | /* pxclk of 0 is fine for PANEL_OFF, and for a |
538 | * disconnected LVDS encoder there is no native_mode | 532 | * disconnected LVDS encoder there is no native_mode |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 8870d72388c8..18d30c2c1aa6 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
212 | 212 | ||
213 | switch (dev_priv->chipset) { | 213 | switch (dev_priv->chipset) { |
214 | case 0x40: | ||
215 | case 0x41: /* guess */ | ||
216 | case 0x42: | ||
217 | case 0x43: | ||
218 | case 0x45: /* guess */ | ||
219 | case 0x4e: | ||
220 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
221 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
222 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
223 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
224 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
225 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
226 | break; | ||
214 | case 0x44: | 227 | case 0x44: |
215 | case 0x4a: | 228 | case 0x4a: |
216 | case 0x4e: | ||
217 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | 229 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); |
218 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | 230 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); |
219 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | 231 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); |
220 | break; | 232 | break; |
221 | |||
222 | case 0x46: | 233 | case 0x46: |
223 | case 0x47: | 234 | case 0x47: |
224 | case 0x49: | 235 | case 0x49: |
225 | case 0x4b: | 236 | case 0x4b: |
237 | case 0x4c: | ||
238 | case 0x67: | ||
239 | default: | ||
226 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); | 240 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); |
227 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); | 241 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); |
228 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); | 242 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); |
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
230 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | 244 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); |
231 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | 245 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); |
232 | break; | 246 | break; |
233 | |||
234 | default: | ||
235 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
236 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
237 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
238 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
239 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
240 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
241 | break; | ||
242 | } | 247 | } |
243 | } | 248 | } |
244 | 249 | ||
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev) | |||
396 | break; | 401 | break; |
397 | default: | 402 | default: |
398 | switch (dev_priv->chipset) { | 403 | switch (dev_priv->chipset) { |
399 | case 0x46: | 404 | case 0x41: |
400 | case 0x47: | 405 | case 0x42: |
401 | case 0x49: | 406 | case 0x43: |
402 | case 0x4b: | 407 | case 0x45: |
403 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | 408 | case 0x4e: |
404 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | 409 | case 0x44: |
405 | break; | 410 | case 0x4a: |
406 | default: | ||
407 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); | 411 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); |
408 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); | 412 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); |
409 | break; | 413 | break; |
414 | default: | ||
415 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | ||
416 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | ||
417 | break; | ||
410 | } | 418 | } |
411 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); | 419 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); |
412 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); | 420 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); |
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 14e24e906ee8..0ea090f4244a 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
@@ -283,8 +283,7 @@ nv50_evo_create(struct drm_device *dev) | |||
283 | nv50_evo_channel_del(&dev_priv->evo); | 283 | nv50_evo_channel_del(&dev_priv->evo); |
284 | return ret; | 284 | return ret; |
285 | } | 285 | } |
286 | } else | 286 | } else { |
287 | if (dev_priv->chipset != 0x50) { | ||
288 | ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, | 287 | ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, |
289 | 0, 0xffffffff, 0x00010000); | 288 | 0, 0xffffffff, 0x00010000); |
290 | if (ret) { | 289 | if (ret) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 2d7ea75a09d4..37e21d2be95b 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -256,6 +256,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) | |||
256 | struct drm_device *dev = chan->dev; | 256 | struct drm_device *dev = chan->dev; |
257 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 257 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
258 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 258 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
259 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
259 | int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; | 260 | int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; |
260 | unsigned long flags; | 261 | unsigned long flags; |
261 | 262 | ||
@@ -265,6 +266,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) | |||
265 | return; | 266 | return; |
266 | 267 | ||
267 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 268 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
269 | pfifo->reassign(dev, false); | ||
268 | pgraph->fifo_access(dev, false); | 270 | pgraph->fifo_access(dev, false); |
269 | 271 | ||
270 | if (pgraph->channel(dev) == chan) | 272 | if (pgraph->channel(dev) == chan) |
@@ -275,6 +277,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) | |||
275 | dev_priv->engine.instmem.flush(dev); | 277 | dev_priv->engine.instmem.flush(dev); |
276 | 278 | ||
277 | pgraph->fifo_access(dev, true); | 279 | pgraph->fifo_access(dev, true); |
280 | pfifo->reassign(dev, true); | ||
278 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 281 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
279 | 282 | ||
280 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | 283 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..e57caa2a00e3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) | |||
403 | void | 403 | void |
404 | nv50_instmem_flush(struct drm_device *dev) | 404 | nv50_instmem_flush(struct drm_device *dev) |
405 | { | 405 | { |
406 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
407 | |||
408 | spin_lock(&dev_priv->ramin_lock); | ||
406 | nv_wr32(dev, 0x00330c, 0x00000001); | 409 | nv_wr32(dev, 0x00330c, 0x00000001); |
407 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) | 410 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) |
408 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 411 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
412 | spin_unlock(&dev_priv->ramin_lock); | ||
409 | } | 413 | } |
410 | 414 | ||
411 | void | 415 | void |
412 | nv84_instmem_flush(struct drm_device *dev) | 416 | nv84_instmem_flush(struct drm_device *dev) |
413 | { | 417 | { |
418 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
419 | |||
420 | spin_lock(&dev_priv->ramin_lock); | ||
414 | nv_wr32(dev, 0x070000, 0x00000001); | 421 | nv_wr32(dev, 0x070000, 0x00000001); |
415 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) | 422 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) |
416 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 423 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
424 | spin_unlock(&dev_priv->ramin_lock); | ||
417 | } | 425 | } |
418 | 426 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 38e523e10995..6144156f255a 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -45,11 +45,6 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, | |||
45 | } | 45 | } |
46 | 46 | ||
47 | if (phys & 1) { | 47 | if (phys & 1) { |
48 | if (dev_priv->vram_sys_base) { | ||
49 | phys += dev_priv->vram_sys_base; | ||
50 | phys |= 0x30; | ||
51 | } | ||
52 | |||
53 | if (coverage <= 32 * 1024 * 1024) | 48 | if (coverage <= 32 * 1024 * 1024) |
54 | phys |= 0x60; | 49 | phys |= 0x60; |
55 | else if (coverage <= 64 * 1024 * 1024) | 50 | else if (coverage <= 64 * 1024 * 1024) |
@@ -174,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
174 | void | 169 | void |
175 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | 170 | nv50_vm_flush_engine(struct drm_device *dev, int engine) |
176 | { | 171 | { |
172 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
173 | |||
174 | spin_lock(&dev_priv->ramin_lock); | ||
177 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | 175 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); |
178 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | 176 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) |
179 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | 177 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); |
178 | spin_unlock(&dev_priv->ramin_lock); | ||
180 | } | 179 | } |
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index e6ea7d83187f..eb18a7e89f5b 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "nvc0_graph.h" | 31 | #include "nvc0_graph.h" |
32 | 32 | ||
33 | static void nvc0_graph_isr(struct drm_device *); | 33 | static void nvc0_graph_isr(struct drm_device *); |
34 | static void nvc0_runk140_isr(struct drm_device *); | ||
34 | static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan); | 35 | static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan); |
35 | 36 | ||
36 | void | 37 | void |
@@ -281,6 +282,7 @@ nvc0_graph_destroy(struct drm_device *dev) | |||
281 | return; | 282 | return; |
282 | 283 | ||
283 | nouveau_irq_unregister(dev, 12); | 284 | nouveau_irq_unregister(dev, 12); |
285 | nouveau_irq_unregister(dev, 25); | ||
284 | 286 | ||
285 | nouveau_gpuobj_ref(NULL, &priv->unk4188b8); | 287 | nouveau_gpuobj_ref(NULL, &priv->unk4188b8); |
286 | nouveau_gpuobj_ref(NULL, &priv->unk4188b4); | 288 | nouveau_gpuobj_ref(NULL, &priv->unk4188b4); |
@@ -390,6 +392,7 @@ nvc0_graph_create(struct drm_device *dev) | |||
390 | } | 392 | } |
391 | 393 | ||
392 | nouveau_irq_register(dev, 12, nvc0_graph_isr); | 394 | nouveau_irq_register(dev, 12, nvc0_graph_isr); |
395 | nouveau_irq_register(dev, 25, nvc0_runk140_isr); | ||
393 | NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ | 396 | NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ |
394 | NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ | 397 | NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ |
395 | NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ | 398 | NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ |
@@ -512,8 +515,8 @@ nvc0_graph_init_gpc_1(struct drm_device *dev) | |||
512 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000); | 515 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000); |
513 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000); | 516 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000); |
514 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000); | 517 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000); |
515 | nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe); | 518 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe); |
516 | nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f); | 519 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f); |
517 | } | 520 | } |
518 | nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); | 521 | nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); |
519 | nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); | 522 | nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); |
@@ -777,3 +780,19 @@ nvc0_graph_isr(struct drm_device *dev) | |||
777 | 780 | ||
778 | nv_wr32(dev, 0x400500, 0x00010001); | 781 | nv_wr32(dev, 0x400500, 0x00010001); |
779 | } | 782 | } |
783 | |||
784 | static void | ||
785 | nvc0_runk140_isr(struct drm_device *dev) | ||
786 | { | ||
787 | u32 units = nv_rd32(dev, 0x00017c) & 0x1f; | ||
788 | |||
789 | while (units) { | ||
790 | u32 unit = ffs(units) - 1; | ||
791 | u32 reg = 0x140000 + unit * 0x2000; | ||
792 | u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); | ||
793 | u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); | ||
794 | |||
795 | NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); | ||
796 | units &= ~(1 << unit); | ||
797 | } | ||
798 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index b9e68b2d30aa..f880ff776db8 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c | |||
@@ -1830,7 +1830,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1830 | 1830 | ||
1831 | for (tp = 0, id = 0; tp < 4; tp++) { | 1831 | for (tp = 0, id = 0; tp < 4; tp++) { |
1832 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | 1832 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
1833 | if (tp <= priv->tp_nr[gpc]) { | 1833 | if (tp < priv->tp_nr[gpc]) { |
1834 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id); | 1834 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id); |
1835 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id); | 1835 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id); |
1836 | nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id); | 1836 | nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b0ab185b86f6..a4e5e53e0a62 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, | |||
48 | 48 | ||
49 | switch (radeon_crtc->rmx_type) { | 49 | switch (radeon_crtc->rmx_type) { |
50 | case RMX_CENTER: | 50 | case RMX_CENTER: |
51 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | 51 | args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); |
52 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | 52 | args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); |
53 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | 53 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); |
54 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | 54 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); |
55 | break; | 55 | break; |
56 | case RMX_ASPECT: | 56 | case RMX_ASPECT: |
57 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | 57 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; |
58 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | 58 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; |
59 | 59 | ||
60 | if (a1 > a2) { | 60 | if (a1 > a2) { |
61 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | 61 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); |
62 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | 62 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); |
63 | } else if (a2 > a1) { | 63 | } else if (a2 > a1) { |
64 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | 64 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); |
65 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | 65 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); |
66 | } | 66 | } |
67 | break; | 67 | break; |
68 | case RMX_FULL: | 68 | case RMX_FULL: |
69 | default: | 69 | default: |
70 | args.usOverscanRight = radeon_crtc->h_border; | 70 | args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border); |
71 | args.usOverscanLeft = radeon_crtc->h_border; | 71 | args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border); |
72 | args.usOverscanBottom = radeon_crtc->v_border; | 72 | args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border); |
73 | args.usOverscanTop = radeon_crtc->v_border; | 73 | args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); |
74 | break; | 74 | break; |
75 | } | 75 | } |
76 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 76 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
419 | memset(&args, 0, sizeof(args)); | 419 | memset(&args, 0, sizeof(args)); |
420 | 420 | ||
421 | if (ASIC_IS_DCE5(rdev)) { | 421 | if (ASIC_IS_DCE5(rdev)) { |
422 | args.v3.usSpreadSpectrumAmountFrac = 0; | 422 | args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); |
423 | args.v3.ucSpreadSpectrumType = ss->type; | 423 | args.v3.ucSpreadSpectrumType = ss->type; |
424 | switch (pll_id) { | 424 | switch (pll_id) { |
425 | case ATOM_PPLL1: | 425 | case ATOM_PPLL1: |
426 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; | 426 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; |
427 | args.v3.usSpreadSpectrumAmount = ss->amount; | 427 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
428 | args.v3.usSpreadSpectrumStep = ss->step; | 428 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
429 | break; | 429 | break; |
430 | case ATOM_PPLL2: | 430 | case ATOM_PPLL2: |
431 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; | 431 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; |
432 | args.v3.usSpreadSpectrumAmount = ss->amount; | 432 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
433 | args.v3.usSpreadSpectrumStep = ss->step; | 433 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
434 | break; | 434 | break; |
435 | case ATOM_DCPLL: | 435 | case ATOM_DCPLL: |
436 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; | 436 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; |
437 | args.v3.usSpreadSpectrumAmount = 0; | 437 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(0); |
438 | args.v3.usSpreadSpectrumStep = 0; | 438 | args.v3.usSpreadSpectrumStep = cpu_to_le16(0); |
439 | break; | 439 | break; |
440 | case ATOM_PPLL_INVALID: | 440 | case ATOM_PPLL_INVALID: |
441 | return; | 441 | return; |
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
447 | switch (pll_id) { | 447 | switch (pll_id) { |
448 | case ATOM_PPLL1: | 448 | case ATOM_PPLL1: |
449 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; | 449 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; |
450 | args.v2.usSpreadSpectrumAmount = ss->amount; | 450 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
451 | args.v2.usSpreadSpectrumStep = ss->step; | 451 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
452 | break; | 452 | break; |
453 | case ATOM_PPLL2: | 453 | case ATOM_PPLL2: |
454 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; | 454 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; |
455 | args.v2.usSpreadSpectrumAmount = ss->amount; | 455 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
456 | args.v2.usSpreadSpectrumStep = ss->step; | 456 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
457 | break; | 457 | break; |
458 | case ATOM_DCPLL: | 458 | case ATOM_DCPLL: |
459 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; | 459 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; |
460 | args.v2.usSpreadSpectrumAmount = 0; | 460 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(0); |
461 | args.v2.usSpreadSpectrumStep = 0; | 461 | args.v2.usSpreadSpectrumStep = cpu_to_le16(0); |
462 | break; | 462 | break; |
463 | case ATOM_PPLL_INVALID: | 463 | case ATOM_PPLL_INVALID: |
464 | return; | 464 | return; |
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
538 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 538 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
539 | else | 539 | else |
540 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 540 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
541 | |||
542 | } | 541 | } |
543 | 542 | ||
544 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 543 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
@@ -555,23 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
555 | dp_clock = dig_connector->dp_clock; | 554 | dp_clock = dig_connector->dp_clock; |
556 | } | 555 | } |
557 | } | 556 | } |
558 | #if 0 /* doesn't work properly on some laptops */ | 557 | |
559 | /* use recommended ref_div for ss */ | 558 | /* use recommended ref_div for ss */ |
560 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
561 | if (ss_enabled) { | 560 | if (ss_enabled) { |
562 | if (ss->refdiv) { | 561 | if (ss->refdiv) { |
562 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
563 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 563 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
564 | pll->reference_div = ss->refdiv; | 564 | pll->reference_div = ss->refdiv; |
565 | if (ASIC_IS_AVIVO(rdev)) | ||
566 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
565 | } | 567 | } |
566 | } | 568 | } |
567 | } | 569 | } |
568 | #endif | 570 | |
569 | if (ASIC_IS_AVIVO(rdev)) { | 571 | if (ASIC_IS_AVIVO(rdev)) { |
570 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 572 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
571 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 573 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
572 | adjusted_clock = mode->clock * 2; | 574 | adjusted_clock = mode->clock * 2; |
573 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | 575 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
574 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 576 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
577 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
578 | pll->flags |= RADEON_PLL_IS_LCD; | ||
575 | } else { | 579 | } else { |
576 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 580 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
577 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 581 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -606,14 +610,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
606 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); | 610 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); |
607 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | 611 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; |
608 | args.v1.ucEncodeMode = encoder_mode; | 612 | args.v1.ucEncodeMode = encoder_mode; |
609 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 613 | if (ss_enabled) |
610 | if (ss_enabled) | ||
611 | args.v1.ucConfig |= | ||
612 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | ||
613 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | ||
614 | args.v1.ucConfig |= | 614 | args.v1.ucConfig |= |
615 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | 615 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; |
616 | } | ||
617 | 616 | ||
618 | atom_execute_table(rdev->mode_info.atom_context, | 617 | atom_execute_table(rdev->mode_info.atom_context, |
619 | index, (uint32_t *)&args); | 618 | index, (uint32_t *)&args); |
@@ -624,12 +623,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
624 | args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; | 623 | args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; |
625 | args.v3.sInput.ucEncodeMode = encoder_mode; | 624 | args.v3.sInput.ucEncodeMode = encoder_mode; |
626 | args.v3.sInput.ucDispPllConfig = 0; | 625 | args.v3.sInput.ucDispPllConfig = 0; |
626 | if (ss_enabled) | ||
627 | args.v3.sInput.ucDispPllConfig |= | ||
628 | DISPPLL_CONFIG_SS_ENABLE; | ||
627 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 629 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
628 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 630 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
629 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 631 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
630 | if (ss_enabled) | ||
631 | args.v3.sInput.ucDispPllConfig |= | ||
632 | DISPPLL_CONFIG_SS_ENABLE; | ||
633 | args.v3.sInput.ucDispPllConfig |= | 632 | args.v3.sInput.ucDispPllConfig |= |
634 | DISPPLL_CONFIG_COHERENT_MODE; | 633 | DISPPLL_CONFIG_COHERENT_MODE; |
635 | /* 16200 or 27000 */ | 634 | /* 16200 or 27000 */ |
@@ -649,18 +648,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
649 | } | 648 | } |
650 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 649 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
651 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 650 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
652 | if (ss_enabled) | ||
653 | args.v3.sInput.ucDispPllConfig |= | ||
654 | DISPPLL_CONFIG_SS_ENABLE; | ||
655 | args.v3.sInput.ucDispPllConfig |= | 651 | args.v3.sInput.ucDispPllConfig |= |
656 | DISPPLL_CONFIG_COHERENT_MODE; | 652 | DISPPLL_CONFIG_COHERENT_MODE; |
657 | /* 16200 or 27000 */ | 653 | /* 16200 or 27000 */ |
658 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | 654 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); |
659 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | 655 | } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) { |
660 | if (ss_enabled) | ||
661 | args.v3.sInput.ucDispPllConfig |= | ||
662 | DISPPLL_CONFIG_SS_ENABLE; | ||
663 | } else { | ||
664 | if (mode->clock > 165000) | 656 | if (mode->clock > 165000) |
665 | args.v3.sInput.ucDispPllConfig |= | 657 | args.v3.sInput.ucDispPllConfig |= |
666 | DISPPLL_CONFIG_DUAL_LINK; | 658 | DISPPLL_CONFIG_DUAL_LINK; |
@@ -670,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
670 | index, (uint32_t *)&args); | 662 | index, (uint32_t *)&args); |
671 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; | 663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; |
672 | if (args.v3.sOutput.ucRefDiv) { | 664 | if (args.v3.sOutput.ucRefDiv) { |
665 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
673 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 666 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
674 | pll->reference_div = args.v3.sOutput.ucRefDiv; | 667 | pll->reference_div = args.v3.sOutput.ucRefDiv; |
675 | } | 668 | } |
676 | if (args.v3.sOutput.ucPostDiv) { | 669 | if (args.v3.sOutput.ucPostDiv) { |
670 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
677 | pll->flags |= RADEON_PLL_USE_POST_DIV; | 671 | pll->flags |= RADEON_PLL_USE_POST_DIV; |
678 | pll->post_div = args.v3.sOutput.ucPostDiv; | 672 | pll->post_div = args.v3.sOutput.ucPostDiv; |
679 | } | 673 | } |
@@ -727,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, | |||
727 | * SetPixelClock provides the dividers | 721 | * SetPixelClock provides the dividers |
728 | */ | 722 | */ |
729 | args.v5.ucCRTC = ATOM_CRTC_INVALID; | 723 | args.v5.ucCRTC = ATOM_CRTC_INVALID; |
730 | args.v5.usPixelClock = dispclk; | 724 | args.v5.usPixelClock = cpu_to_le16(dispclk); |
731 | args.v5.ucPpll = ATOM_DCPLL; | 725 | args.v5.ucPpll = ATOM_DCPLL; |
732 | break; | 726 | break; |
733 | case 6: | 727 | case 6: |
734 | /* if the default dcpll clock is specified, | 728 | /* if the default dcpll clock is specified, |
735 | * SetPixelClock provides the dividers | 729 | * SetPixelClock provides the dividers |
736 | */ | 730 | */ |
737 | args.v6.ulDispEngClkFreq = dispclk; | 731 | args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); |
738 | args.v6.ucPpll = ATOM_DCPLL; | 732 | args.v6.ucPpll = ATOM_DCPLL; |
739 | break; | 733 | break; |
740 | default: | 734 | default: |
@@ -963,8 +957,12 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
963 | /* adjust pixel clock as needed */ | 957 | /* adjust pixel clock as needed */ |
964 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); | 958 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); |
965 | 959 | ||
966 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 960 | if (ASIC_IS_AVIVO(rdev)) |
967 | &ref_div, &post_div); | 961 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, |
962 | &ref_div, &post_div); | ||
963 | else | ||
964 | radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
965 | &ref_div, &post_div); | ||
968 | 966 | ||
969 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); | 967 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); |
970 | 968 | ||
@@ -993,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
993 | } | 991 | } |
994 | } | 992 | } |
995 | 993 | ||
996 | static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | 994 | static int dce4_crtc_do_set_base(struct drm_crtc *crtc, |
997 | struct drm_framebuffer *fb, | 995 | struct drm_framebuffer *fb, |
998 | int x, int y, int atomic) | 996 | int x, int y, int atomic) |
999 | { | 997 | { |
1000 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 998 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1001 | struct drm_device *dev = crtc->dev; | 999 | struct drm_device *dev = crtc->dev; |
@@ -1006,6 +1004,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1006 | struct radeon_bo *rbo; | 1004 | struct radeon_bo *rbo; |
1007 | uint64_t fb_location; | 1005 | uint64_t fb_location; |
1008 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 1006 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
1007 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); | ||
1009 | int r; | 1008 | int r; |
1010 | 1009 | ||
1011 | /* no fb bound */ | 1010 | /* no fb bound */ |
@@ -1057,11 +1056,17 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1057 | case 16: | 1056 | case 16: |
1058 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | | 1057 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | |
1059 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); | 1058 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); |
1059 | #ifdef __BIG_ENDIAN | ||
1060 | fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); | ||
1061 | #endif | ||
1060 | break; | 1062 | break; |
1061 | case 24: | 1063 | case 24: |
1062 | case 32: | 1064 | case 32: |
1063 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | | 1065 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | |
1064 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); | 1066 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); |
1067 | #ifdef __BIG_ENDIAN | ||
1068 | fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); | ||
1069 | #endif | ||
1065 | break; | 1070 | break; |
1066 | default: | 1071 | default: |
1067 | DRM_ERROR("Unsupported screen depth %d\n", | 1072 | DRM_ERROR("Unsupported screen depth %d\n", |
@@ -1106,6 +1111,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1106 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 1111 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
1107 | (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); | 1112 | (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); |
1108 | WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); | 1113 | WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); |
1114 | WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); | ||
1109 | 1115 | ||
1110 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); | 1116 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); |
1111 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); | 1117 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); |
@@ -1127,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1127 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1133 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1128 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); | 1134 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); |
1129 | 1135 | ||
1130 | if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) | ||
1131 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1132 | EVERGREEN_INTERLEAVE_EN); | ||
1133 | else | ||
1134 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1135 | |||
1136 | if (!atomic && fb && fb != crtc->fb) { | 1136 | if (!atomic && fb && fb != crtc->fb) { |
1137 | radeon_fb = to_radeon_framebuffer(fb); | 1137 | radeon_fb = to_radeon_framebuffer(fb); |
1138 | rbo = radeon_fb->obj->driver_private; | 1138 | rbo = radeon_fb->obj->driver_private; |
@@ -1162,6 +1162,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1162 | struct drm_framebuffer *target_fb; | 1162 | struct drm_framebuffer *target_fb; |
1163 | uint64_t fb_location; | 1163 | uint64_t fb_location; |
1164 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 1164 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
1165 | u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; | ||
1165 | int r; | 1166 | int r; |
1166 | 1167 | ||
1167 | /* no fb bound */ | 1168 | /* no fb bound */ |
@@ -1215,12 +1216,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1215 | fb_format = | 1216 | fb_format = |
1216 | AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | | 1217 | AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | |
1217 | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; | 1218 | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; |
1219 | #ifdef __BIG_ENDIAN | ||
1220 | fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; | ||
1221 | #endif | ||
1218 | break; | 1222 | break; |
1219 | case 24: | 1223 | case 24: |
1220 | case 32: | 1224 | case 32: |
1221 | fb_format = | 1225 | fb_format = |
1222 | AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | | 1226 | AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | |
1223 | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; | 1227 | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; |
1228 | #ifdef __BIG_ENDIAN | ||
1229 | fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; | ||
1230 | #endif | ||
1224 | break; | 1231 | break; |
1225 | default: | 1232 | default: |
1226 | DRM_ERROR("Unsupported screen depth %d\n", | 1233 | DRM_ERROR("Unsupported screen depth %d\n", |
@@ -1260,6 +1267,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1260 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + | 1267 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + |
1261 | radeon_crtc->crtc_offset, (u32) fb_location); | 1268 | radeon_crtc->crtc_offset, (u32) fb_location); |
1262 | WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); | 1269 | WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); |
1270 | if (rdev->family >= CHIP_R600) | ||
1271 | WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); | ||
1263 | 1272 | ||
1264 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); | 1273 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); |
1265 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); | 1274 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); |
@@ -1281,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1281 | WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1290 | WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1282 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); | 1291 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); |
1283 | 1292 | ||
1284 | if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) | ||
1285 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1286 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1287 | else | ||
1288 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1289 | |||
1290 | if (!atomic && fb && fb != crtc->fb) { | 1293 | if (!atomic && fb && fb != crtc->fb) { |
1291 | radeon_fb = to_radeon_framebuffer(fb); | 1294 | radeon_fb = to_radeon_framebuffer(fb); |
1292 | rbo = radeon_fb->obj->driver_private; | 1295 | rbo = radeon_fb->obj->driver_private; |
@@ -1310,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1310 | struct radeon_device *rdev = dev->dev_private; | 1313 | struct radeon_device *rdev = dev->dev_private; |
1311 | 1314 | ||
1312 | if (ASIC_IS_DCE4(rdev)) | 1315 | if (ASIC_IS_DCE4(rdev)) |
1313 | return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); | 1316 | return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0); |
1314 | else if (ASIC_IS_AVIVO(rdev)) | 1317 | else if (ASIC_IS_AVIVO(rdev)) |
1315 | return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); | 1318 | return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); |
1316 | else | 1319 | else |
@@ -1325,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, | |||
1325 | struct radeon_device *rdev = dev->dev_private; | 1328 | struct radeon_device *rdev = dev->dev_private; |
1326 | 1329 | ||
1327 | if (ASIC_IS_DCE4(rdev)) | 1330 | if (ASIC_IS_DCE4(rdev)) |
1328 | return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); | 1331 | return dce4_crtc_do_set_base(crtc, fb, x, y, 1); |
1329 | else if (ASIC_IS_AVIVO(rdev)) | 1332 | else if (ASIC_IS_AVIVO(rdev)) |
1330 | return avivo_crtc_do_set_base(crtc, fb, x, y, 1); | 1333 | return avivo_crtc_do_set_base(crtc, fb, x, y, 1); |
1331 | else | 1334 | else |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 4e7778d44b8d..695de9a38506 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | |||
187 | int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | 187 | int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) |
188 | { | 188 | { |
189 | int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); | 189 | int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); |
190 | int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); | 190 | int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock); |
191 | 191 | ||
192 | if ((lanes == 0) || (bw == 0)) | 192 | if ((lanes == 0) || (dp_clock == 0)) |
193 | return MODE_CLOCK_HIGH; | 193 | return MODE_CLOCK_HIGH; |
194 | 194 | ||
195 | return MODE_OK; | 195 | return MODE_OK; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a8973acb3987..d270b3ff896b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -97,26 +97,29 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | /* get temperature in millidegrees */ | 99 | /* get temperature in millidegrees */ |
100 | u32 evergreen_get_temp(struct radeon_device *rdev) | 100 | int evergreen_get_temp(struct radeon_device *rdev) |
101 | { | 101 | { |
102 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | 102 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> |
103 | ASIC_T_SHIFT; | 103 | ASIC_T_SHIFT; |
104 | u32 actual_temp = 0; | 104 | u32 actual_temp = 0; |
105 | 105 | ||
106 | if ((temp >> 10) & 1) | 106 | if (temp & 0x400) |
107 | actual_temp = 0; | 107 | actual_temp = -256; |
108 | else if ((temp >> 9) & 1) | 108 | else if (temp & 0x200) |
109 | actual_temp = 255; | 109 | actual_temp = 255; |
110 | else | 110 | else if (temp & 0x100) { |
111 | actual_temp = (temp >> 1) & 0xff; | 111 | actual_temp = temp & 0x1ff; |
112 | actual_temp |= ~0x1ff; | ||
113 | } else | ||
114 | actual_temp = temp & 0xff; | ||
112 | 115 | ||
113 | return actual_temp * 1000; | 116 | return (actual_temp * 1000) / 2; |
114 | } | 117 | } |
115 | 118 | ||
116 | u32 sumo_get_temp(struct radeon_device *rdev) | 119 | int sumo_get_temp(struct radeon_device *rdev) |
117 | { | 120 | { |
118 | u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; | 121 | u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; |
119 | u32 actual_temp = (temp >> 1) & 0xff; | 122 | int actual_temp = temp - 49; |
120 | 123 | ||
121 | return actual_temp * 1000; | 124 | return actual_temp * 1000; |
122 | } | 125 | } |
@@ -1182,6 +1185,22 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
1182 | /* | 1185 | /* |
1183 | * CP. | 1186 | * CP. |
1184 | */ | 1187 | */ |
1188 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | ||
1189 | { | ||
1190 | /* set to DX10/11 mode */ | ||
1191 | radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); | ||
1192 | radeon_ring_write(rdev, 1); | ||
1193 | /* FIXME: implement */ | ||
1194 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
1195 | radeon_ring_write(rdev, | ||
1196 | #ifdef __BIG_ENDIAN | ||
1197 | (2 << 0) | | ||
1198 | #endif | ||
1199 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
1200 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); | ||
1201 | radeon_ring_write(rdev, ib->length_dw); | ||
1202 | } | ||
1203 | |||
1185 | 1204 | ||
1186 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) | 1205 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) |
1187 | { | 1206 | { |
@@ -1192,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
1192 | return -EINVAL; | 1211 | return -EINVAL; |
1193 | 1212 | ||
1194 | r700_cp_stop(rdev); | 1213 | r700_cp_stop(rdev); |
1195 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | 1214 | WREG32(CP_RB_CNTL, |
1215 | #ifdef __BIG_ENDIAN | ||
1216 | BUF_SWAP_32BIT | | ||
1217 | #endif | ||
1218 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
1196 | 1219 | ||
1197 | fw_data = (const __be32 *)rdev->pfp_fw->data; | 1220 | fw_data = (const __be32 *)rdev->pfp_fw->data; |
1198 | WREG32(CP_PFP_UCODE_ADDR, 0); | 1221 | WREG32(CP_PFP_UCODE_ADDR, 0); |
@@ -1233,7 +1256,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) | |||
1233 | cp_me = 0xff; | 1256 | cp_me = 0xff; |
1234 | WREG32(CP_ME_CNTL, cp_me); | 1257 | WREG32(CP_ME_CNTL, cp_me); |
1235 | 1258 | ||
1236 | r = radeon_ring_lock(rdev, evergreen_default_size + 15); | 1259 | r = radeon_ring_lock(rdev, evergreen_default_size + 19); |
1237 | if (r) { | 1260 | if (r) { |
1238 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1261 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1239 | return r; | 1262 | return r; |
@@ -1266,6 +1289,11 @@ static int evergreen_cp_start(struct radeon_device *rdev) | |||
1266 | radeon_ring_write(rdev, 0xffffffff); | 1289 | radeon_ring_write(rdev, 0xffffffff); |
1267 | radeon_ring_write(rdev, 0xffffffff); | 1290 | radeon_ring_write(rdev, 0xffffffff); |
1268 | 1291 | ||
1292 | radeon_ring_write(rdev, 0xc0026900); | ||
1293 | radeon_ring_write(rdev, 0x00000316); | ||
1294 | radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | ||
1295 | radeon_ring_write(rdev, 0x00000010); /* */ | ||
1296 | |||
1269 | radeon_ring_unlock_commit(rdev); | 1297 | radeon_ring_unlock_commit(rdev); |
1270 | 1298 | ||
1271 | return 0; | 1299 | return 0; |
@@ -1306,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1306 | WREG32(CP_RB_WPTR, 0); | 1334 | WREG32(CP_RB_WPTR, 0); |
1307 | 1335 | ||
1308 | /* set the wb address wether it's enabled or not */ | 1336 | /* set the wb address wether it's enabled or not */ |
1309 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 1337 | WREG32(CP_RB_RPTR_ADDR, |
1338 | #ifdef __BIG_ENDIAN | ||
1339 | RB_RPTR_SWAP(2) | | ||
1340 | #endif | ||
1341 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | ||
1310 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | 1342 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
1311 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | 1343 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); |
1312 | 1344 | ||
@@ -2072,6 +2104,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2072 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); | 2104 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); |
2073 | 2105 | ||
2074 | WREG32(VGT_GS_VERTEX_REUSE, 16); | 2106 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
2107 | WREG32(PA_SU_LINE_STIPPLE_VALUE, 0); | ||
2075 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | 2108 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
2076 | 2109 | ||
2077 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); | 2110 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); |
@@ -2201,6 +2234,9 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
2201 | struct evergreen_mc_save save; | 2234 | struct evergreen_mc_save save; |
2202 | u32 grbm_reset = 0; | 2235 | u32 grbm_reset = 0; |
2203 | 2236 | ||
2237 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
2238 | return 0; | ||
2239 | |||
2204 | dev_info(rdev->dev, "GPU softreset \n"); | 2240 | dev_info(rdev->dev, "GPU softreset \n"); |
2205 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | 2241 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
2206 | RREG32(GRBM_STATUS)); | 2242 | RREG32(GRBM_STATUS)); |
@@ -2603,8 +2639,8 @@ restart_ih: | |||
2603 | while (rptr != wptr) { | 2639 | while (rptr != wptr) { |
2604 | /* wptr/rptr are in bytes! */ | 2640 | /* wptr/rptr are in bytes! */ |
2605 | ring_index = rptr / 4; | 2641 | ring_index = rptr / 4; |
2606 | src_id = rdev->ih.ring[ring_index] & 0xff; | 2642 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; |
2607 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | 2643 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; |
2608 | 2644 | ||
2609 | switch (src_id) { | 2645 | switch (src_id) { |
2610 | case 1: /* D1 vblank/vline */ | 2646 | case 1: /* D1 vblank/vline */ |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index b758dc7f2f2c..2adfb03f479b 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format, | |||
55 | if (h < 8) | 55 | if (h < 8) |
56 | h = 8; | 56 | h = 8; |
57 | 57 | ||
58 | cb_color_info = ((format << 2) | (1 << 24)); | 58 | cb_color_info = ((format << 2) | (1 << 24) | (1 << 8)); |
59 | pitch = (w / 8) - 1; | 59 | pitch = (w / 8) - 1; |
60 | slice = ((w * h) / 64) - 1; | 60 | slice = ((w * h) / 64) - 1; |
61 | 61 | ||
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
133 | 133 | ||
134 | /* high addr, stride */ | 134 | /* high addr, stride */ |
135 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | 135 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); |
136 | #ifdef __BIG_ENDIAN | ||
137 | sq_vtx_constant_word2 |= (2 << 30); | ||
138 | #endif | ||
136 | /* xyzw swizzles */ | 139 | /* xyzw swizzles */ |
137 | sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); | 140 | sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); |
138 | 141 | ||
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev, | |||
173 | sq_tex_resource_word0 = (1 << 0); /* 2D */ | 176 | sq_tex_resource_word0 = (1 << 0); /* 2D */ |
174 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | | 177 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | |
175 | ((w - 1) << 18)); | 178 | ((w - 1) << 18)); |
176 | sq_tex_resource_word1 = ((h - 1) << 0); | 179 | sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28); |
177 | /* xyzw swizzles */ | 180 | /* xyzw swizzles */ |
178 | sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); | 181 | sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); |
179 | 182 | ||
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev) | |||
221 | radeon_ring_write(rdev, DI_PT_RECTLIST); | 224 | radeon_ring_write(rdev, DI_PT_RECTLIST); |
222 | 225 | ||
223 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); | 226 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); |
224 | radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); | 227 | radeon_ring_write(rdev, |
228 | #ifdef __BIG_ENDIAN | ||
229 | (2 << 2) | | ||
230 | #endif | ||
231 | DI_INDEX_SIZE_16_BIT); | ||
225 | 232 | ||
226 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); | 233 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); |
227 | radeon_ring_write(rdev, 1); | 234 | radeon_ring_write(rdev, 1); |
@@ -232,7 +239,7 @@ draw_auto(struct radeon_device *rdev) | |||
232 | 239 | ||
233 | } | 240 | } |
234 | 241 | ||
235 | /* emits 30 */ | 242 | /* emits 36 */ |
236 | static void | 243 | static void |
237 | set_default_state(struct radeon_device *rdev) | 244 | set_default_state(struct radeon_device *rdev) |
238 | { | 245 | { |
@@ -245,6 +252,8 @@ set_default_state(struct radeon_device *rdev) | |||
245 | int num_hs_threads, num_ls_threads; | 252 | int num_hs_threads, num_ls_threads; |
246 | int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; | 253 | int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; |
247 | int num_hs_stack_entries, num_ls_stack_entries; | 254 | int num_hs_stack_entries, num_ls_stack_entries; |
255 | u64 gpu_addr; | ||
256 | int dwords; | ||
248 | 257 | ||
249 | switch (rdev->family) { | 258 | switch (rdev->family) { |
250 | case CHIP_CEDAR: | 259 | case CHIP_CEDAR: |
@@ -497,6 +506,18 @@ set_default_state(struct radeon_device *rdev) | |||
497 | radeon_ring_write(rdev, 0x00000000); | 506 | radeon_ring_write(rdev, 0x00000000); |
498 | radeon_ring_write(rdev, 0x00000000); | 507 | radeon_ring_write(rdev, 0x00000000); |
499 | 508 | ||
509 | /* set to DX10/11 mode */ | ||
510 | radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); | ||
511 | radeon_ring_write(rdev, 1); | ||
512 | |||
513 | /* emit an IB pointing at default state */ | ||
514 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | ||
515 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | ||
516 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
517 | radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); | ||
518 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); | ||
519 | radeon_ring_write(rdev, dwords); | ||
520 | |||
500 | } | 521 | } |
501 | 522 | ||
502 | static inline uint32_t i2f(uint32_t input) | 523 | static inline uint32_t i2f(uint32_t input) |
@@ -527,8 +548,10 @@ static inline uint32_t i2f(uint32_t input) | |||
527 | int evergreen_blit_init(struct radeon_device *rdev) | 548 | int evergreen_blit_init(struct radeon_device *rdev) |
528 | { | 549 | { |
529 | u32 obj_size; | 550 | u32 obj_size; |
530 | int r; | 551 | int i, r, dwords; |
531 | void *ptr; | 552 | void *ptr; |
553 | u32 packet2s[16]; | ||
554 | int num_packet2s = 0; | ||
532 | 555 | ||
533 | /* pin copy shader into vram if already initialized */ | 556 | /* pin copy shader into vram if already initialized */ |
534 | if (rdev->r600_blit.shader_obj) | 557 | if (rdev->r600_blit.shader_obj) |
@@ -536,8 +559,17 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
536 | 559 | ||
537 | mutex_init(&rdev->r600_blit.mutex); | 560 | mutex_init(&rdev->r600_blit.mutex); |
538 | rdev->r600_blit.state_offset = 0; | 561 | rdev->r600_blit.state_offset = 0; |
539 | rdev->r600_blit.state_len = 0; | 562 | |
540 | obj_size = 0; | 563 | rdev->r600_blit.state_len = evergreen_default_size; |
564 | |||
565 | dwords = rdev->r600_blit.state_len; | ||
566 | while (dwords & 0xf) { | ||
567 | packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); | ||
568 | dwords++; | ||
569 | } | ||
570 | |||
571 | obj_size = dwords * 4; | ||
572 | obj_size = ALIGN(obj_size, 256); | ||
541 | 573 | ||
542 | rdev->r600_blit.vs_offset = obj_size; | 574 | rdev->r600_blit.vs_offset = obj_size; |
543 | obj_size += evergreen_vs_size * 4; | 575 | obj_size += evergreen_vs_size * 4; |
@@ -567,8 +599,16 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
567 | return r; | 599 | return r; |
568 | } | 600 | } |
569 | 601 | ||
570 | memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); | 602 | memcpy_toio(ptr + rdev->r600_blit.state_offset, |
571 | memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); | 603 | evergreen_default_state, rdev->r600_blit.state_len * 4); |
604 | |||
605 | if (num_packet2s) | ||
606 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | ||
607 | packet2s, num_packet2s * 4); | ||
608 | for (i = 0; i < evergreen_vs_size; i++) | ||
609 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); | ||
610 | for (i = 0; i < evergreen_ps_size; i++) | ||
611 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); | ||
572 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 612 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
573 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 613 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
574 | 614 | ||
@@ -652,7 +692,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
652 | /* calculate number of loops correctly */ | 692 | /* calculate number of loops correctly */ |
653 | ring_size = num_loops * dwords_per_loop; | 693 | ring_size = num_loops * dwords_per_loop; |
654 | /* set default + shaders */ | 694 | /* set default + shaders */ |
655 | ring_size += 46; /* shaders + def state */ | 695 | ring_size += 52; /* shaders + def state */ |
656 | ring_size += 10; /* fence emit for VB IB */ | 696 | ring_size += 10; /* fence emit for VB IB */ |
657 | ring_size += 5; /* done copy */ | 697 | ring_size += 5; /* done copy */ |
658 | ring_size += 10; /* fence emit for done copy */ | 698 | ring_size += 10; /* fence emit for done copy */ |
@@ -660,7 +700,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
660 | if (r) | 700 | if (r) |
661 | return r; | 701 | return r; |
662 | 702 | ||
663 | set_default_state(rdev); /* 30 */ | 703 | set_default_state(rdev); /* 36 */ |
664 | set_shaders(rdev); /* 16 */ | 704 | set_shaders(rdev); /* 16 */ |
665 | return 0; | 705 | return 0; |
666 | } | 706 | } |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c index ef1d28c07fbf..3a10399e0066 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c | |||
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] = | |||
311 | 0x00000000, | 311 | 0x00000000, |
312 | 0x3c000000, | 312 | 0x3c000000, |
313 | 0x67961001, | 313 | 0x67961001, |
314 | #ifdef __BIG_ENDIAN | ||
315 | 0x000a0000, | ||
316 | #else | ||
314 | 0x00080000, | 317 | 0x00080000, |
318 | #endif | ||
315 | 0x00000000, | 319 | 0x00000000, |
316 | 0x1c000000, | 320 | 0x1c000000, |
317 | 0x67961000, | 321 | 0x67961000, |
322 | #ifdef __BIG_ENDIAN | ||
323 | 0x00020008, | ||
324 | #else | ||
318 | 0x00000008, | 325 | 0x00000008, |
326 | #endif | ||
319 | 0x00000000, | 327 | 0x00000000, |
320 | }; | 328 | }; |
321 | 329 | ||
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 36d32d83d866..eb4acf4528ff 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -98,6 +98,7 @@ | |||
98 | #define BUF_SWAP_32BIT (2 << 16) | 98 | #define BUF_SWAP_32BIT (2 << 16) |
99 | #define CP_RB_RPTR 0x8700 | 99 | #define CP_RB_RPTR 0x8700 |
100 | #define CP_RB_RPTR_ADDR 0xC10C | 100 | #define CP_RB_RPTR_ADDR 0xC10C |
101 | #define RB_RPTR_SWAP(x) ((x) << 0) | ||
101 | #define CP_RB_RPTR_ADDR_HI 0xC110 | 102 | #define CP_RB_RPTR_ADDR_HI 0xC110 |
102 | #define CP_RB_RPTR_WR 0xC108 | 103 | #define CP_RB_RPTR_WR 0xC108 |
103 | #define CP_RB_WPTR 0xC114 | 104 | #define CP_RB_WPTR 0xC114 |
@@ -240,6 +241,7 @@ | |||
240 | #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) | 241 | #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) |
241 | #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) | 242 | #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) |
242 | #define PA_SC_LINE_STIPPLE 0x28A0C | 243 | #define PA_SC_LINE_STIPPLE 0x28A0C |
244 | #define PA_SU_LINE_STIPPLE_VALUE 0x8A60 | ||
243 | #define PA_SC_LINE_STIPPLE_STATE 0x8B10 | 245 | #define PA_SC_LINE_STIPPLE_STATE 0x8B10 |
244 | 246 | ||
245 | #define SCRATCH_REG0 0x8500 | 247 | #define SCRATCH_REG0 0x8500 |
@@ -652,6 +654,7 @@ | |||
652 | #define PACKET3_DISPATCH_DIRECT 0x15 | 654 | #define PACKET3_DISPATCH_DIRECT 0x15 |
653 | #define PACKET3_DISPATCH_INDIRECT 0x16 | 655 | #define PACKET3_DISPATCH_INDIRECT 0x16 |
654 | #define PACKET3_INDIRECT_BUFFER_END 0x17 | 656 | #define PACKET3_INDIRECT_BUFFER_END 0x17 |
657 | #define PACKET3_MODE_CONTROL 0x18 | ||
655 | #define PACKET3_SET_PREDICATION 0x20 | 658 | #define PACKET3_SET_PREDICATION 0x20 |
656 | #define PACKET3_REG_RMW 0x21 | 659 | #define PACKET3_REG_RMW 0x21 |
657 | #define PACKET3_COND_EXEC 0x22 | 660 | #define PACKET3_COND_EXEC 0x22 |
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 607241c6a8a9..5a82b6b75849 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c | |||
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename) | |||
673 | last_reg = strtol(last_reg_s, NULL, 16); | 673 | last_reg = strtol(last_reg_s, NULL, 16); |
674 | 674 | ||
675 | do { | 675 | do { |
676 | if (fgets(buf, 1024, file) == NULL) | 676 | if (fgets(buf, 1024, file) == NULL) { |
677 | fclose(file); | ||
677 | return -1; | 678 | return -1; |
679 | } | ||
678 | len = strlen(buf); | 680 | len = strlen(buf); |
679 | if (ftell(file) == end) | 681 | if (ftell(file) == end) |
680 | done = 1; | 682 | done = 1; |
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename) | |||
685 | fprintf(stderr, | 687 | fprintf(stderr, |
686 | "Error matching regular expression %d in %s\n", | 688 | "Error matching regular expression %d in %s\n", |
687 | r, filename); | 689 | r, filename); |
690 | fclose(file); | ||
688 | return -1; | 691 | return -1; |
689 | } else { | 692 | } else { |
690 | buf[match[0].rm_eo] = 0; | 693 | buf[match[0].rm_eo] = 0; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 46da5142b131..93fa735c8c1a 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1031,8 +1031,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1031 | WREG32(RADEON_CP_CSQ_MODE, | 1031 | WREG32(RADEON_CP_CSQ_MODE, |
1032 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1032 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
1033 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); | 1033 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); |
1034 | WREG32(0x718, 0); | 1034 | WREG32(RADEON_CP_RB_WPTR_DELAY, 0); |
1035 | WREG32(0x744, 0x00004D4D); | 1035 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
1036 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); | 1036 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
1037 | radeon_ring_start(rdev); | 1037 | radeon_ring_start(rdev); |
1038 | r = radeon_ring_test(rdev); | 1038 | r = radeon_ring_test(rdev); |
@@ -1427,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1427 | } | 1427 | } |
1428 | track->zb.robj = reloc->robj; | 1428 | track->zb.robj = reloc->robj; |
1429 | track->zb.offset = idx_value; | 1429 | track->zb.offset = idx_value; |
1430 | track->zb_dirty = true; | ||
1430 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1431 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1431 | break; | 1432 | break; |
1432 | case RADEON_RB3D_COLOROFFSET: | 1433 | case RADEON_RB3D_COLOROFFSET: |
@@ -1439,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1439 | } | 1440 | } |
1440 | track->cb[0].robj = reloc->robj; | 1441 | track->cb[0].robj = reloc->robj; |
1441 | track->cb[0].offset = idx_value; | 1442 | track->cb[0].offset = idx_value; |
1443 | track->cb_dirty = true; | ||
1442 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1444 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1443 | break; | 1445 | break; |
1444 | case RADEON_PP_TXOFFSET_0: | 1446 | case RADEON_PP_TXOFFSET_0: |
@@ -1454,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1454 | } | 1456 | } |
1455 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1457 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1456 | track->textures[i].robj = reloc->robj; | 1458 | track->textures[i].robj = reloc->robj; |
1459 | track->tex_dirty = true; | ||
1457 | break; | 1460 | break; |
1458 | case RADEON_PP_CUBIC_OFFSET_T0_0: | 1461 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
1459 | case RADEON_PP_CUBIC_OFFSET_T0_1: | 1462 | case RADEON_PP_CUBIC_OFFSET_T0_1: |
@@ -1471,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1471 | track->textures[0].cube_info[i].offset = idx_value; | 1474 | track->textures[0].cube_info[i].offset = idx_value; |
1472 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1475 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1473 | track->textures[0].cube_info[i].robj = reloc->robj; | 1476 | track->textures[0].cube_info[i].robj = reloc->robj; |
1477 | track->tex_dirty = true; | ||
1474 | break; | 1478 | break; |
1475 | case RADEON_PP_CUBIC_OFFSET_T1_0: | 1479 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
1476 | case RADEON_PP_CUBIC_OFFSET_T1_1: | 1480 | case RADEON_PP_CUBIC_OFFSET_T1_1: |
@@ -1488,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1488 | track->textures[1].cube_info[i].offset = idx_value; | 1492 | track->textures[1].cube_info[i].offset = idx_value; |
1489 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1493 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1490 | track->textures[1].cube_info[i].robj = reloc->robj; | 1494 | track->textures[1].cube_info[i].robj = reloc->robj; |
1495 | track->tex_dirty = true; | ||
1491 | break; | 1496 | break; |
1492 | case RADEON_PP_CUBIC_OFFSET_T2_0: | 1497 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
1493 | case RADEON_PP_CUBIC_OFFSET_T2_1: | 1498 | case RADEON_PP_CUBIC_OFFSET_T2_1: |
@@ -1505,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1505 | track->textures[2].cube_info[i].offset = idx_value; | 1510 | track->textures[2].cube_info[i].offset = idx_value; |
1506 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1511 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1507 | track->textures[2].cube_info[i].robj = reloc->robj; | 1512 | track->textures[2].cube_info[i].robj = reloc->robj; |
1513 | track->tex_dirty = true; | ||
1508 | break; | 1514 | break; |
1509 | case RADEON_RE_WIDTH_HEIGHT: | 1515 | case RADEON_RE_WIDTH_HEIGHT: |
1510 | track->maxy = ((idx_value >> 16) & 0x7FF); | 1516 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1517 | track->cb_dirty = true; | ||
1518 | track->zb_dirty = true; | ||
1511 | break; | 1519 | break; |
1512 | case RADEON_RB3D_COLORPITCH: | 1520 | case RADEON_RB3D_COLORPITCH: |
1513 | r = r100_cs_packet_next_reloc(p, &reloc); | 1521 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1528,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1528 | ib[idx] = tmp; | 1536 | ib[idx] = tmp; |
1529 | 1537 | ||
1530 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 1538 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1539 | track->cb_dirty = true; | ||
1531 | break; | 1540 | break; |
1532 | case RADEON_RB3D_DEPTHPITCH: | 1541 | case RADEON_RB3D_DEPTHPITCH: |
1533 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; | 1542 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1543 | track->zb_dirty = true; | ||
1534 | break; | 1544 | break; |
1535 | case RADEON_RB3D_CNTL: | 1545 | case RADEON_RB3D_CNTL: |
1536 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 1546 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
@@ -1555,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1555 | return -EINVAL; | 1565 | return -EINVAL; |
1556 | } | 1566 | } |
1557 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); | 1567 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1568 | track->cb_dirty = true; | ||
1569 | track->zb_dirty = true; | ||
1558 | break; | 1570 | break; |
1559 | case RADEON_RB3D_ZSTENCILCNTL: | 1571 | case RADEON_RB3D_ZSTENCILCNTL: |
1560 | switch (idx_value & 0xf) { | 1572 | switch (idx_value & 0xf) { |
@@ -1572,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1572 | default: | 1584 | default: |
1573 | break; | 1585 | break; |
1574 | } | 1586 | } |
1587 | track->zb_dirty = true; | ||
1575 | break; | 1588 | break; |
1576 | case RADEON_RB3D_ZPASS_ADDR: | 1589 | case RADEON_RB3D_ZPASS_ADDR: |
1577 | r = r100_cs_packet_next_reloc(p, &reloc); | 1590 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1588,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1588 | uint32_t temp = idx_value >> 4; | 1601 | uint32_t temp = idx_value >> 4; |
1589 | for (i = 0; i < track->num_texture; i++) | 1602 | for (i = 0; i < track->num_texture; i++) |
1590 | track->textures[i].enabled = !!(temp & (1 << i)); | 1603 | track->textures[i].enabled = !!(temp & (1 << i)); |
1604 | track->tex_dirty = true; | ||
1591 | } | 1605 | } |
1592 | break; | 1606 | break; |
1593 | case RADEON_SE_VF_CNTL: | 1607 | case RADEON_SE_VF_CNTL: |
@@ -1602,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1602 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; | 1616 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1603 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; | 1617 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1604 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 1618 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1619 | track->tex_dirty = true; | ||
1605 | break; | 1620 | break; |
1606 | case RADEON_PP_TEX_PITCH_0: | 1621 | case RADEON_PP_TEX_PITCH_0: |
1607 | case RADEON_PP_TEX_PITCH_1: | 1622 | case RADEON_PP_TEX_PITCH_1: |
1608 | case RADEON_PP_TEX_PITCH_2: | 1623 | case RADEON_PP_TEX_PITCH_2: |
1609 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; | 1624 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1610 | track->textures[i].pitch = idx_value + 32; | 1625 | track->textures[i].pitch = idx_value + 32; |
1626 | track->tex_dirty = true; | ||
1611 | break; | 1627 | break; |
1612 | case RADEON_PP_TXFILTER_0: | 1628 | case RADEON_PP_TXFILTER_0: |
1613 | case RADEON_PP_TXFILTER_1: | 1629 | case RADEON_PP_TXFILTER_1: |
@@ -1621,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1621 | tmp = (idx_value >> 27) & 0x7; | 1637 | tmp = (idx_value >> 27) & 0x7; |
1622 | if (tmp == 2 || tmp == 6) | 1638 | if (tmp == 2 || tmp == 6) |
1623 | track->textures[i].roundup_h = false; | 1639 | track->textures[i].roundup_h = false; |
1640 | track->tex_dirty = true; | ||
1624 | break; | 1641 | break; |
1625 | case RADEON_PP_TXFORMAT_0: | 1642 | case RADEON_PP_TXFORMAT_0: |
1626 | case RADEON_PP_TXFORMAT_1: | 1643 | case RADEON_PP_TXFORMAT_1: |
@@ -1673,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1673 | } | 1690 | } |
1674 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); | 1691 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1675 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); | 1692 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1693 | track->tex_dirty = true; | ||
1676 | break; | 1694 | break; |
1677 | case RADEON_PP_CUBIC_FACES_0: | 1695 | case RADEON_PP_CUBIC_FACES_0: |
1678 | case RADEON_PP_CUBIC_FACES_1: | 1696 | case RADEON_PP_CUBIC_FACES_1: |
@@ -1683,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1683 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 1701 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
1684 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); | 1702 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
1685 | } | 1703 | } |
1704 | track->tex_dirty = true; | ||
1686 | break; | 1705 | break; |
1687 | default: | 1706 | default: |
1688 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1707 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
@@ -2347,10 +2366,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state) | |||
2347 | 2366 | ||
2348 | temp = RREG32(RADEON_CONFIG_CNTL); | 2367 | temp = RREG32(RADEON_CONFIG_CNTL); |
2349 | if (state == false) { | 2368 | if (state == false) { |
2350 | temp &= ~(1<<8); | 2369 | temp &= ~RADEON_CFG_VGA_RAM_EN; |
2351 | temp |= (1<<9); | 2370 | temp |= RADEON_CFG_VGA_IO_DIS; |
2352 | } else { | 2371 | } else { |
2353 | temp &= ~(1<<9); | 2372 | temp &= ~RADEON_CFG_VGA_IO_DIS; |
2354 | } | 2373 | } |
2355 | WREG32(RADEON_CONFIG_CNTL, temp); | 2374 | WREG32(RADEON_CONFIG_CNTL, temp); |
2356 | } | 2375 | } |
@@ -3318,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3318 | unsigned long size; | 3337 | unsigned long size; |
3319 | unsigned prim_walk; | 3338 | unsigned prim_walk; |
3320 | unsigned nverts; | 3339 | unsigned nverts; |
3321 | unsigned num_cb = track->num_cb; | 3340 | unsigned num_cb = track->cb_dirty ? track->num_cb : 0; |
3322 | 3341 | ||
3323 | if (!track->zb_cb_clear && !track->color_channel_mask && | 3342 | if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && |
3324 | !track->blend_read_enable) | 3343 | !track->blend_read_enable) |
3325 | num_cb = 0; | 3344 | num_cb = 0; |
3326 | 3345 | ||
@@ -3341,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3341 | return -EINVAL; | 3360 | return -EINVAL; |
3342 | } | 3361 | } |
3343 | } | 3362 | } |
3344 | if (track->z_enabled) { | 3363 | track->cb_dirty = false; |
3364 | |||
3365 | if (track->zb_dirty && track->z_enabled) { | ||
3345 | if (track->zb.robj == NULL) { | 3366 | if (track->zb.robj == NULL) { |
3346 | DRM_ERROR("[drm] No buffer for z buffer !\n"); | 3367 | DRM_ERROR("[drm] No buffer for z buffer !\n"); |
3347 | return -EINVAL; | 3368 | return -EINVAL; |
@@ -3358,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3358 | return -EINVAL; | 3379 | return -EINVAL; |
3359 | } | 3380 | } |
3360 | } | 3381 | } |
3382 | track->zb_dirty = false; | ||
3383 | |||
3384 | if (track->aa_dirty && track->aaresolve) { | ||
3385 | if (track->aa.robj == NULL) { | ||
3386 | DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); | ||
3387 | return -EINVAL; | ||
3388 | } | ||
3389 | /* I believe the format comes from colorbuffer0. */ | ||
3390 | size = track->aa.pitch * track->cb[0].cpp * track->maxy; | ||
3391 | size += track->aa.offset; | ||
3392 | if (size > radeon_bo_size(track->aa.robj)) { | ||
3393 | DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " | ||
3394 | "(need %lu have %lu) !\n", i, size, | ||
3395 | radeon_bo_size(track->aa.robj)); | ||
3396 | DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", | ||
3397 | i, track->aa.pitch, track->cb[0].cpp, | ||
3398 | track->aa.offset, track->maxy); | ||
3399 | return -EINVAL; | ||
3400 | } | ||
3401 | } | ||
3402 | track->aa_dirty = false; | ||
3403 | |||
3361 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; | 3404 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
3362 | if (track->vap_vf_cntl & (1 << 14)) { | 3405 | if (track->vap_vf_cntl & (1 << 14)) { |
3363 | nverts = track->vap_alt_nverts; | 3406 | nverts = track->vap_alt_nverts; |
@@ -3417,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3417 | prim_walk); | 3460 | prim_walk); |
3418 | return -EINVAL; | 3461 | return -EINVAL; |
3419 | } | 3462 | } |
3420 | return r100_cs_track_texture_check(rdev, track); | 3463 | |
3464 | if (track->tex_dirty) { | ||
3465 | track->tex_dirty = false; | ||
3466 | return r100_cs_track_texture_check(rdev, track); | ||
3467 | } | ||
3468 | return 0; | ||
3421 | } | 3469 | } |
3422 | 3470 | ||
3423 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) | 3471 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) |
3424 | { | 3472 | { |
3425 | unsigned i, face; | 3473 | unsigned i, face; |
3426 | 3474 | ||
3475 | track->cb_dirty = true; | ||
3476 | track->zb_dirty = true; | ||
3477 | track->tex_dirty = true; | ||
3478 | track->aa_dirty = true; | ||
3479 | |||
3427 | if (rdev->family < CHIP_R300) { | 3480 | if (rdev->family < CHIP_R300) { |
3428 | track->num_cb = 1; | 3481 | track->num_cb = 1; |
3429 | if (rdev->family <= CHIP_RS200) | 3482 | if (rdev->family <= CHIP_RS200) |
@@ -3437,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3437 | track->num_texture = 16; | 3490 | track->num_texture = 16; |
3438 | track->maxy = 4096; | 3491 | track->maxy = 4096; |
3439 | track->separate_cube = 0; | 3492 | track->separate_cube = 0; |
3493 | track->aaresolve = false; | ||
3494 | track->aa.robj = NULL; | ||
3440 | } | 3495 | } |
3441 | 3496 | ||
3442 | for (i = 0; i < track->num_cb; i++) { | 3497 | for (i = 0; i < track->num_cb; i++) { |
@@ -3522,7 +3577,7 @@ int r100_ring_test(struct radeon_device *rdev) | |||
3522 | if (i < rdev->usec_timeout) { | 3577 | if (i < rdev->usec_timeout) { |
3523 | DRM_INFO("ring test succeeded in %d usecs\n", i); | 3578 | DRM_INFO("ring test succeeded in %d usecs\n", i); |
3524 | } else { | 3579 | } else { |
3525 | DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", | 3580 | DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", |
3526 | scratch, tmp); | 3581 | scratch, tmp); |
3527 | r = -EINVAL; | 3582 | r = -EINVAL; |
3528 | } | 3583 | } |
@@ -3746,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev) | |||
3746 | r100_mc_program(rdev); | 3801 | r100_mc_program(rdev); |
3747 | /* Resume clock */ | 3802 | /* Resume clock */ |
3748 | r100_clock_startup(rdev); | 3803 | r100_clock_startup(rdev); |
3749 | /* Initialize GPU configuration (# pipes, ...) */ | ||
3750 | // r100_gpu_init(rdev); | ||
3751 | /* Initialize GART (initialize after TTM so we can allocate | 3804 | /* Initialize GART (initialize after TTM so we can allocate |
3752 | * memory through TTM but finalize after TTM) */ | 3805 | * memory through TTM but finalize after TTM) */ |
3753 | r100_enable_bm(rdev); | 3806 | r100_enable_bm(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index af65600e6564..2fef9de7f363 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -52,14 +52,7 @@ struct r100_cs_track_texture { | |||
52 | unsigned compress_format; | 52 | unsigned compress_format; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct r100_cs_track_limits { | ||
56 | unsigned num_cb; | ||
57 | unsigned num_texture; | ||
58 | unsigned max_levels; | ||
59 | }; | ||
60 | |||
61 | struct r100_cs_track { | 55 | struct r100_cs_track { |
62 | struct radeon_device *rdev; | ||
63 | unsigned num_cb; | 56 | unsigned num_cb; |
64 | unsigned num_texture; | 57 | unsigned num_texture; |
65 | unsigned maxy; | 58 | unsigned maxy; |
@@ -73,11 +66,17 @@ struct r100_cs_track { | |||
73 | struct r100_cs_track_array arrays[11]; | 66 | struct r100_cs_track_array arrays[11]; |
74 | struct r100_cs_track_cb cb[R300_MAX_CB]; | 67 | struct r100_cs_track_cb cb[R300_MAX_CB]; |
75 | struct r100_cs_track_cb zb; | 68 | struct r100_cs_track_cb zb; |
69 | struct r100_cs_track_cb aa; | ||
76 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; | 70 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; |
77 | bool z_enabled; | 71 | bool z_enabled; |
78 | bool separate_cube; | 72 | bool separate_cube; |
79 | bool zb_cb_clear; | 73 | bool zb_cb_clear; |
80 | bool blend_read_enable; | 74 | bool blend_read_enable; |
75 | bool cb_dirty; | ||
76 | bool zb_dirty; | ||
77 | bool tex_dirty; | ||
78 | bool aa_dirty; | ||
79 | bool aaresolve; | ||
81 | }; | 80 | }; |
82 | 81 | ||
83 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); | 82 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index d2408c395619..f24058300413 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
184 | } | 184 | } |
185 | track->zb.robj = reloc->robj; | 185 | track->zb.robj = reloc->robj; |
186 | track->zb.offset = idx_value; | 186 | track->zb.offset = idx_value; |
187 | track->zb_dirty = true; | ||
187 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 188 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
188 | break; | 189 | break; |
189 | case RADEON_RB3D_COLOROFFSET: | 190 | case RADEON_RB3D_COLOROFFSET: |
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
196 | } | 197 | } |
197 | track->cb[0].robj = reloc->robj; | 198 | track->cb[0].robj = reloc->robj; |
198 | track->cb[0].offset = idx_value; | 199 | track->cb[0].offset = idx_value; |
200 | track->cb_dirty = true; | ||
199 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 201 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
200 | break; | 202 | break; |
201 | case R200_PP_TXOFFSET_0: | 203 | case R200_PP_TXOFFSET_0: |
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
214 | } | 216 | } |
215 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 217 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
216 | track->textures[i].robj = reloc->robj; | 218 | track->textures[i].robj = reloc->robj; |
219 | track->tex_dirty = true; | ||
217 | break; | 220 | break; |
218 | case R200_PP_CUBIC_OFFSET_F1_0: | 221 | case R200_PP_CUBIC_OFFSET_F1_0: |
219 | case R200_PP_CUBIC_OFFSET_F2_0: | 222 | case R200_PP_CUBIC_OFFSET_F2_0: |
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
257 | track->textures[i].cube_info[face - 1].offset = idx_value; | 260 | track->textures[i].cube_info[face - 1].offset = idx_value; |
258 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 261 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
259 | track->textures[i].cube_info[face - 1].robj = reloc->robj; | 262 | track->textures[i].cube_info[face - 1].robj = reloc->robj; |
263 | track->tex_dirty = true; | ||
260 | break; | 264 | break; |
261 | case RADEON_RE_WIDTH_HEIGHT: | 265 | case RADEON_RE_WIDTH_HEIGHT: |
262 | track->maxy = ((idx_value >> 16) & 0x7FF); | 266 | track->maxy = ((idx_value >> 16) & 0x7FF); |
267 | track->cb_dirty = true; | ||
268 | track->zb_dirty = true; | ||
263 | break; | 269 | break; |
264 | case RADEON_RB3D_COLORPITCH: | 270 | case RADEON_RB3D_COLORPITCH: |
265 | r = r100_cs_packet_next_reloc(p, &reloc); | 271 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
280 | ib[idx] = tmp; | 286 | ib[idx] = tmp; |
281 | 287 | ||
282 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 288 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
289 | track->cb_dirty = true; | ||
283 | break; | 290 | break; |
284 | case RADEON_RB3D_DEPTHPITCH: | 291 | case RADEON_RB3D_DEPTHPITCH: |
285 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; | 292 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
293 | track->zb_dirty = true; | ||
286 | break; | 294 | break; |
287 | case RADEON_RB3D_CNTL: | 295 | case RADEON_RB3D_CNTL: |
288 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 296 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
312 | } | 320 | } |
313 | 321 | ||
314 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); | 322 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
323 | track->cb_dirty = true; | ||
324 | track->zb_dirty = true; | ||
315 | break; | 325 | break; |
316 | case RADEON_RB3D_ZSTENCILCNTL: | 326 | case RADEON_RB3D_ZSTENCILCNTL: |
317 | switch (idx_value & 0xf) { | 327 | switch (idx_value & 0xf) { |
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
329 | default: | 339 | default: |
330 | break; | 340 | break; |
331 | } | 341 | } |
342 | track->zb_dirty = true; | ||
332 | break; | 343 | break; |
333 | case RADEON_RB3D_ZPASS_ADDR: | 344 | case RADEON_RB3D_ZPASS_ADDR: |
334 | r = r100_cs_packet_next_reloc(p, &reloc); | 345 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
345 | uint32_t temp = idx_value >> 4; | 356 | uint32_t temp = idx_value >> 4; |
346 | for (i = 0; i < track->num_texture; i++) | 357 | for (i = 0; i < track->num_texture; i++) |
347 | track->textures[i].enabled = !!(temp & (1 << i)); | 358 | track->textures[i].enabled = !!(temp & (1 << i)); |
359 | track->tex_dirty = true; | ||
348 | } | 360 | } |
349 | break; | 361 | break; |
350 | case RADEON_SE_VF_CNTL: | 362 | case RADEON_SE_VF_CNTL: |
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
369 | i = (reg - R200_PP_TXSIZE_0) / 32; | 381 | i = (reg - R200_PP_TXSIZE_0) / 32; |
370 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; | 382 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
371 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 383 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
384 | track->tex_dirty = true; | ||
372 | break; | 385 | break; |
373 | case R200_PP_TXPITCH_0: | 386 | case R200_PP_TXPITCH_0: |
374 | case R200_PP_TXPITCH_1: | 387 | case R200_PP_TXPITCH_1: |
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
378 | case R200_PP_TXPITCH_5: | 391 | case R200_PP_TXPITCH_5: |
379 | i = (reg - R200_PP_TXPITCH_0) / 32; | 392 | i = (reg - R200_PP_TXPITCH_0) / 32; |
380 | track->textures[i].pitch = idx_value + 32; | 393 | track->textures[i].pitch = idx_value + 32; |
394 | track->tex_dirty = true; | ||
381 | break; | 395 | break; |
382 | case R200_PP_TXFILTER_0: | 396 | case R200_PP_TXFILTER_0: |
383 | case R200_PP_TXFILTER_1: | 397 | case R200_PP_TXFILTER_1: |
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
394 | tmp = (idx_value >> 27) & 0x7; | 408 | tmp = (idx_value >> 27) & 0x7; |
395 | if (tmp == 2 || tmp == 6) | 409 | if (tmp == 2 || tmp == 6) |
396 | track->textures[i].roundup_h = false; | 410 | track->textures[i].roundup_h = false; |
411 | track->tex_dirty = true; | ||
397 | break; | 412 | break; |
398 | case R200_PP_TXMULTI_CTL_0: | 413 | case R200_PP_TXMULTI_CTL_0: |
399 | case R200_PP_TXMULTI_CTL_1: | 414 | case R200_PP_TXMULTI_CTL_1: |
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
432 | track->textures[i].tex_coord_type = 1; | 447 | track->textures[i].tex_coord_type = 1; |
433 | break; | 448 | break; |
434 | } | 449 | } |
450 | track->tex_dirty = true; | ||
435 | break; | 451 | break; |
436 | case R200_PP_TXFORMAT_0: | 452 | case R200_PP_TXFORMAT_0: |
437 | case R200_PP_TXFORMAT_1: | 453 | case R200_PP_TXFORMAT_1: |
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
488 | } | 504 | } |
489 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); | 505 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
490 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); | 506 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
507 | track->tex_dirty = true; | ||
491 | break; | 508 | break; |
492 | case R200_PP_CUBIC_FACES_0: | 509 | case R200_PP_CUBIC_FACES_0: |
493 | case R200_PP_CUBIC_FACES_1: | 510 | case R200_PP_CUBIC_FACES_1: |
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
501 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 518 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
502 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); | 519 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
503 | } | 520 | } |
521 | track->tex_dirty = true; | ||
504 | break; | 522 | break; |
505 | default: | 523 | default: |
506 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 524 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index cf862ca580bf..069efa8c8ecf 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -69,6 +69,9 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
69 | mb(); | 69 | mb(); |
70 | } | 70 | } |
71 | 71 | ||
72 | #define R300_PTE_WRITEABLE (1 << 2) | ||
73 | #define R300_PTE_READABLE (1 << 3) | ||
74 | |||
72 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 75 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
73 | { | 76 | { |
74 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 77 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
@@ -78,7 +81,7 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
78 | } | 81 | } |
79 | addr = (lower_32_bits(addr) >> 8) | | 82 | addr = (lower_32_bits(addr) >> 8) | |
80 | ((upper_32_bits(addr) & 0xff) << 24) | | 83 | ((upper_32_bits(addr) & 0xff) << 24) | |
81 | 0xc; | 84 | R300_PTE_WRITEABLE | R300_PTE_READABLE; |
82 | /* on x86 we want this to be CPU endian, on powerpc | 85 | /* on x86 we want this to be CPU endian, on powerpc |
83 | * on powerpc without HW swappers, it'll get swapped on way | 86 | * on powerpc without HW swappers, it'll get swapped on way |
84 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | 87 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
@@ -135,7 +138,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
135 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); | 138 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); |
136 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); | 139 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
137 | /* Clear error */ | 140 | /* Clear error */ |
138 | WREG32_PCIE(0x18, 0); | 141 | WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); |
139 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 142 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
140 | tmp |= RADEON_PCIE_TX_GART_EN; | 143 | tmp |= RADEON_PCIE_TX_GART_EN; |
141 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 144 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
@@ -664,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
664 | } | 667 | } |
665 | track->cb[i].robj = reloc->robj; | 668 | track->cb[i].robj = reloc->robj; |
666 | track->cb[i].offset = idx_value; | 669 | track->cb[i].offset = idx_value; |
670 | track->cb_dirty = true; | ||
667 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 671 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
668 | break; | 672 | break; |
669 | case R300_ZB_DEPTHOFFSET: | 673 | case R300_ZB_DEPTHOFFSET: |
@@ -676,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
676 | } | 680 | } |
677 | track->zb.robj = reloc->robj; | 681 | track->zb.robj = reloc->robj; |
678 | track->zb.offset = idx_value; | 682 | track->zb.offset = idx_value; |
683 | track->zb_dirty = true; | ||
679 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 684 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
680 | break; | 685 | break; |
681 | case R300_TX_OFFSET_0: | 686 | case R300_TX_OFFSET_0: |
@@ -714,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
714 | tmp |= tile_flags; | 719 | tmp |= tile_flags; |
715 | ib[idx] = tmp; | 720 | ib[idx] = tmp; |
716 | track->textures[i].robj = reloc->robj; | 721 | track->textures[i].robj = reloc->robj; |
722 | track->tex_dirty = true; | ||
717 | break; | 723 | break; |
718 | /* Tracked registers */ | 724 | /* Tracked registers */ |
719 | case 0x2084: | 725 | case 0x2084: |
@@ -740,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
740 | if (p->rdev->family < CHIP_RV515) { | 746 | if (p->rdev->family < CHIP_RV515) { |
741 | track->maxy -= 1440; | 747 | track->maxy -= 1440; |
742 | } | 748 | } |
749 | track->cb_dirty = true; | ||
750 | track->zb_dirty = true; | ||
743 | break; | 751 | break; |
744 | case 0x4E00: | 752 | case 0x4E00: |
745 | /* RB3D_CCTL */ | 753 | /* RB3D_CCTL */ |
@@ -749,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
749 | return -EINVAL; | 757 | return -EINVAL; |
750 | } | 758 | } |
751 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; | 759 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
760 | track->cb_dirty = true; | ||
752 | break; | 761 | break; |
753 | case 0x4E38: | 762 | case 0x4E38: |
754 | case 0x4E3C: | 763 | case 0x4E3C: |
@@ -811,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
811 | ((idx_value >> 21) & 0xF)); | 820 | ((idx_value >> 21) & 0xF)); |
812 | return -EINVAL; | 821 | return -EINVAL; |
813 | } | 822 | } |
823 | track->cb_dirty = true; | ||
814 | break; | 824 | break; |
815 | case 0x4F00: | 825 | case 0x4F00: |
816 | /* ZB_CNTL */ | 826 | /* ZB_CNTL */ |
@@ -819,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
819 | } else { | 829 | } else { |
820 | track->z_enabled = false; | 830 | track->z_enabled = false; |
821 | } | 831 | } |
832 | track->zb_dirty = true; | ||
822 | break; | 833 | break; |
823 | case 0x4F10: | 834 | case 0x4F10: |
824 | /* ZB_FORMAT */ | 835 | /* ZB_FORMAT */ |
@@ -835,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
835 | (idx_value & 0xF)); | 846 | (idx_value & 0xF)); |
836 | return -EINVAL; | 847 | return -EINVAL; |
837 | } | 848 | } |
849 | track->zb_dirty = true; | ||
838 | break; | 850 | break; |
839 | case 0x4F24: | 851 | case 0x4F24: |
840 | /* ZB_DEPTHPITCH */ | 852 | /* ZB_DEPTHPITCH */ |
@@ -858,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
858 | ib[idx] = tmp; | 870 | ib[idx] = tmp; |
859 | 871 | ||
860 | track->zb.pitch = idx_value & 0x3FFC; | 872 | track->zb.pitch = idx_value & 0x3FFC; |
873 | track->zb_dirty = true; | ||
861 | break; | 874 | break; |
862 | case 0x4104: | 875 | case 0x4104: |
876 | /* TX_ENABLE */ | ||
863 | for (i = 0; i < 16; i++) { | 877 | for (i = 0; i < 16; i++) { |
864 | bool enabled; | 878 | bool enabled; |
865 | 879 | ||
866 | enabled = !!(idx_value & (1 << i)); | 880 | enabled = !!(idx_value & (1 << i)); |
867 | track->textures[i].enabled = enabled; | 881 | track->textures[i].enabled = enabled; |
868 | } | 882 | } |
883 | track->tex_dirty = true; | ||
869 | break; | 884 | break; |
870 | case 0x44C0: | 885 | case 0x44C0: |
871 | case 0x44C4: | 886 | case 0x44C4: |
@@ -895,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
895 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
896 | break; | 911 | break; |
897 | case R300_TX_FORMAT_X16: | 912 | case R300_TX_FORMAT_X16: |
913 | case R300_TX_FORMAT_FL_I16: | ||
898 | case R300_TX_FORMAT_Y8X8: | 914 | case R300_TX_FORMAT_Y8X8: |
899 | case R300_TX_FORMAT_Z5Y6X5: | 915 | case R300_TX_FORMAT_Z5Y6X5: |
900 | case R300_TX_FORMAT_Z6Y5X5: | 916 | case R300_TX_FORMAT_Z6Y5X5: |
@@ -907,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
907 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 923 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
908 | break; | 924 | break; |
909 | case R300_TX_FORMAT_Y16X16: | 925 | case R300_TX_FORMAT_Y16X16: |
926 | case R300_TX_FORMAT_FL_I16A16: | ||
910 | case R300_TX_FORMAT_Z11Y11X10: | 927 | case R300_TX_FORMAT_Z11Y11X10: |
911 | case R300_TX_FORMAT_Z10Y11X11: | 928 | case R300_TX_FORMAT_Z10Y11X11: |
912 | case R300_TX_FORMAT_W8Z8Y8X8: | 929 | case R300_TX_FORMAT_W8Z8Y8X8: |
@@ -948,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
948 | DRM_ERROR("Invalid texture format %u\n", | 965 | DRM_ERROR("Invalid texture format %u\n", |
949 | (idx_value & 0x1F)); | 966 | (idx_value & 0x1F)); |
950 | return -EINVAL; | 967 | return -EINVAL; |
951 | break; | ||
952 | } | 968 | } |
969 | track->tex_dirty = true; | ||
953 | break; | 970 | break; |
954 | case 0x4400: | 971 | case 0x4400: |
955 | case 0x4404: | 972 | case 0x4404: |
@@ -977,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
977 | if (tmp == 2 || tmp == 4 || tmp == 6) { | 994 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
978 | track->textures[i].roundup_h = false; | 995 | track->textures[i].roundup_h = false; |
979 | } | 996 | } |
997 | track->tex_dirty = true; | ||
980 | break; | 998 | break; |
981 | case 0x4500: | 999 | case 0x4500: |
982 | case 0x4504: | 1000 | case 0x4504: |
@@ -1014,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1014 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); | 1032 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); |
1015 | return -EINVAL; | 1033 | return -EINVAL; |
1016 | } | 1034 | } |
1035 | track->tex_dirty = true; | ||
1017 | break; | 1036 | break; |
1018 | case 0x4480: | 1037 | case 0x4480: |
1019 | case 0x4484: | 1038 | case 0x4484: |
@@ -1043,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1043 | track->textures[i].use_pitch = !!tmp; | 1062 | track->textures[i].use_pitch = !!tmp; |
1044 | tmp = (idx_value >> 22) & 0xF; | 1063 | tmp = (idx_value >> 22) & 0xF; |
1045 | track->textures[i].txdepth = tmp; | 1064 | track->textures[i].txdepth = tmp; |
1065 | track->tex_dirty = true; | ||
1046 | break; | 1066 | break; |
1047 | case R300_ZB_ZPASS_ADDR: | 1067 | case R300_ZB_ZPASS_ADDR: |
1048 | r = r100_cs_packet_next_reloc(p, &reloc); | 1068 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1057,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1057 | case 0x4e0c: | 1077 | case 0x4e0c: |
1058 | /* RB3D_COLOR_CHANNEL_MASK */ | 1078 | /* RB3D_COLOR_CHANNEL_MASK */ |
1059 | track->color_channel_mask = idx_value; | 1079 | track->color_channel_mask = idx_value; |
1080 | track->cb_dirty = true; | ||
1060 | break; | 1081 | break; |
1061 | case 0x43a4: | 1082 | case 0x43a4: |
1062 | /* SC_HYPERZ_EN */ | 1083 | /* SC_HYPERZ_EN */ |
@@ -1070,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1070 | case 0x4f1c: | 1091 | case 0x4f1c: |
1071 | /* ZB_BW_CNTL */ | 1092 | /* ZB_BW_CNTL */ |
1072 | track->zb_cb_clear = !!(idx_value & (1 << 5)); | 1093 | track->zb_cb_clear = !!(idx_value & (1 << 5)); |
1094 | track->cb_dirty = true; | ||
1095 | track->zb_dirty = true; | ||
1073 | if (p->rdev->hyperz_filp != p->filp) { | 1096 | if (p->rdev->hyperz_filp != p->filp) { |
1074 | if (idx_value & (R300_HIZ_ENABLE | | 1097 | if (idx_value & (R300_HIZ_ENABLE | |
1075 | R300_RD_COMP_ENABLE | | 1098 | R300_RD_COMP_ENABLE | |
@@ -1081,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1081 | case 0x4e04: | 1104 | case 0x4e04: |
1082 | /* RB3D_BLENDCNTL */ | 1105 | /* RB3D_BLENDCNTL */ |
1083 | track->blend_read_enable = !!(idx_value & (1 << 2)); | 1106 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1107 | track->cb_dirty = true; | ||
1108 | break; | ||
1109 | case R300_RB3D_AARESOLVE_OFFSET: | ||
1110 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1111 | if (r) { | ||
1112 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1113 | idx, reg); | ||
1114 | r100_cs_dump_packet(p, pkt); | ||
1115 | return r; | ||
1116 | } | ||
1117 | track->aa.robj = reloc->robj; | ||
1118 | track->aa.offset = idx_value; | ||
1119 | track->aa_dirty = true; | ||
1120 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | ||
1121 | break; | ||
1122 | case R300_RB3D_AARESOLVE_PITCH: | ||
1123 | track->aa.pitch = idx_value & 0x3FFE; | ||
1124 | track->aa_dirty = true; | ||
1084 | break; | 1125 | break; |
1085 | case 0x4f28: /* ZB_DEPTHCLEARVALUE */ | 1126 | case R300_RB3D_AARESOLVE_CTL: |
1127 | track->aaresolve = idx_value & 0x1; | ||
1128 | track->aa_dirty = true; | ||
1086 | break; | 1129 | break; |
1087 | case 0x4f30: /* ZB_MASK_OFFSET */ | 1130 | case 0x4f30: /* ZB_MASK_OFFSET */ |
1088 | case 0x4f34: /* ZB_ZMASK_PITCH */ | 1131 | case 0x4f34: /* ZB_ZMASK_PITCH */ |
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 1a0d5362cd79..f0bce399c9f3 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
@@ -1371,6 +1371,8 @@ | |||
1371 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ | 1371 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ |
1372 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ | 1372 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ |
1373 | 1373 | ||
1374 | #define R300_RB3D_AARESOLVE_OFFSET 0x4E80 | ||
1375 | #define R300_RB3D_AARESOLVE_PITCH 0x4E84 | ||
1374 | #define R300_RB3D_AARESOLVE_CTL 0x4E88 | 1376 | #define R300_RB3D_AARESOLVE_CTL 0x4E88 |
1375 | /* gap */ | 1377 | /* gap */ |
1376 | 1378 | ||
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index c387346f93a9..0b59ed7c7d2c 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -96,7 +96,7 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
96 | "programming pipes. Bad things might happen.\n"); | 96 | "programming pipes. Bad things might happen.\n"); |
97 | } | 97 | } |
98 | /* get max number of pipes */ | 98 | /* get max number of pipes */ |
99 | gb_pipe_select = RREG32(0x402C); | 99 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
100 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; | 100 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
101 | 101 | ||
102 | /* SE chips have 1 pipe */ | 102 | /* SE chips have 1 pipe */ |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 3c8677f9e385..2ce80d976568 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -79,8 +79,8 @@ static void r520_gpu_init(struct radeon_device *rdev) | |||
79 | WREG32(0x4128, 0xFF); | 79 | WREG32(0x4128, 0xFF); |
80 | } | 80 | } |
81 | r420_pipes_init(rdev); | 81 | r420_pipes_init(rdev); |
82 | gb_pipe_select = RREG32(0x402C); | 82 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
83 | tmp = RREG32(0x170C); | 83 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
84 | pipe_select_current = (tmp >> 2) & 3; | 84 | pipe_select_current = (tmp >> 2) & 3; |
85 | tmp = (1 << pipe_select_current) | | 85 | tmp = (1 << pipe_select_current) | |
86 | (((gb_pipe_select >> 8) & 0xF) << 4); | 86 | (((gb_pipe_select >> 8) & 0xF) << 4); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aca2236268fa..de88624d5f87 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -97,12 +97,16 @@ void r600_irq_disable(struct radeon_device *rdev); | |||
97 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); | 97 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); |
98 | 98 | ||
99 | /* get temperature in millidegrees */ | 99 | /* get temperature in millidegrees */ |
100 | u32 rv6xx_get_temp(struct radeon_device *rdev) | 100 | int rv6xx_get_temp(struct radeon_device *rdev) |
101 | { | 101 | { |
102 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> | 102 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> |
103 | ASIC_T_SHIFT; | 103 | ASIC_T_SHIFT; |
104 | int actual_temp = temp & 0xff; | ||
104 | 105 | ||
105 | return temp * 1000; | 106 | if (temp & 0x100) |
107 | actual_temp -= 256; | ||
108 | |||
109 | return actual_temp * 1000; | ||
106 | } | 110 | } |
107 | 111 | ||
108 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) | 112 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) |
@@ -1287,6 +1291,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
1287 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | 1291 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
1288 | u32 tmp; | 1292 | u32 tmp; |
1289 | 1293 | ||
1294 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
1295 | return 0; | ||
1296 | |||
1290 | dev_info(rdev->dev, "GPU softreset \n"); | 1297 | dev_info(rdev->dev, "GPU softreset \n"); |
1291 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | 1298 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
1292 | RREG32(R_008010_GRBM_STATUS)); | 1299 | RREG32(R_008010_GRBM_STATUS)); |
@@ -2098,7 +2105,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev) | |||
2098 | 2105 | ||
2099 | r600_cp_stop(rdev); | 2106 | r600_cp_stop(rdev); |
2100 | 2107 | ||
2101 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | 2108 | WREG32(CP_RB_CNTL, |
2109 | #ifdef __BIG_ENDIAN | ||
2110 | BUF_SWAP_32BIT | | ||
2111 | #endif | ||
2112 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
2102 | 2113 | ||
2103 | /* Reset cp */ | 2114 | /* Reset cp */ |
2104 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | 2115 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
@@ -2185,7 +2196,11 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2185 | WREG32(CP_RB_WPTR, 0); | 2196 | WREG32(CP_RB_WPTR, 0); |
2186 | 2197 | ||
2187 | /* set the wb address whether it's enabled or not */ | 2198 | /* set the wb address whether it's enabled or not */ |
2188 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 2199 | WREG32(CP_RB_RPTR_ADDR, |
2200 | #ifdef __BIG_ENDIAN | ||
2201 | RB_RPTR_SWAP(2) | | ||
2202 | #endif | ||
2203 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | ||
2189 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | 2204 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
2190 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | 2205 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); |
2191 | 2206 | ||
@@ -2621,7 +2636,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
2621 | { | 2636 | { |
2622 | /* FIXME: implement */ | 2637 | /* FIXME: implement */ |
2623 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 2638 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
2624 | radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); | 2639 | radeon_ring_write(rdev, |
2640 | #ifdef __BIG_ENDIAN | ||
2641 | (2 << 0) | | ||
2642 | #endif | ||
2643 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
2625 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); | 2644 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); |
2626 | radeon_ring_write(rdev, ib->length_dw); | 2645 | radeon_ring_write(rdev, ib->length_dw); |
2627 | } | 2646 | } |
@@ -3290,8 +3309,8 @@ restart_ih: | |||
3290 | while (rptr != wptr) { | 3309 | while (rptr != wptr) { |
3291 | /* wptr/rptr are in bytes! */ | 3310 | /* wptr/rptr are in bytes! */ |
3292 | ring_index = rptr / 4; | 3311 | ring_index = rptr / 4; |
3293 | src_id = rdev->ih.ring[ring_index] & 0xff; | 3312 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; |
3294 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | 3313 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; |
3295 | 3314 | ||
3296 | switch (src_id) { | 3315 | switch (src_id) { |
3297 | case 1: /* D1 vblank/vline */ | 3316 | case 1: /* D1 vblank/vline */ |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index ca5c29f70779..7f1043448d25 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev) | |||
137 | ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); | 137 | ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); |
138 | 138 | ||
139 | for (i = 0; i < r6xx_vs_size; i++) | 139 | for (i = 0; i < r6xx_vs_size; i++) |
140 | vs[i] = r6xx_vs[i]; | 140 | vs[i] = cpu_to_le32(r6xx_vs[i]); |
141 | for (i = 0; i < r6xx_ps_size; i++) | 141 | for (i = 0; i < r6xx_ps_size; i++) |
142 | ps[i] = r6xx_ps[i]; | 142 | ps[i] = cpu_to_le32(r6xx_ps[i]); |
143 | 143 | ||
144 | dev_priv->blit_vb->used = 512; | 144 | dev_priv->blit_vb->used = 512; |
145 | 145 | ||
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) | |||
192 | DRM_DEBUG("\n"); | 192 | DRM_DEBUG("\n"); |
193 | 193 | ||
194 | sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); | 194 | sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); |
195 | #ifdef __BIG_ENDIAN | ||
196 | sq_vtx_constant_word2 |= (2 << 30); | ||
197 | #endif | ||
195 | 198 | ||
196 | BEGIN_RING(9); | 199 | BEGIN_RING(9); |
197 | OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); | 200 | OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); |
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv) | |||
291 | OUT_RING(DI_PT_RECTLIST); | 294 | OUT_RING(DI_PT_RECTLIST); |
292 | 295 | ||
293 | OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); | 296 | OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); |
297 | #ifdef __BIG_ENDIAN | ||
298 | OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT); | ||
299 | #else | ||
294 | OUT_RING(DI_INDEX_SIZE_16_BIT); | 300 | OUT_RING(DI_INDEX_SIZE_16_BIT); |
301 | #endif | ||
295 | 302 | ||
296 | OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); | 303 | OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); |
297 | OUT_RING(1); | 304 | OUT_RING(1); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 86e5aa07f0db..41f7aafc97c4 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format, | |||
54 | if (h < 8) | 54 | if (h < 8) |
55 | h = 8; | 55 | h = 8; |
56 | 56 | ||
57 | cb_color_info = ((format << 2) | (1 << 27)); | 57 | cb_color_info = ((format << 2) | (1 << 27) | (1 << 8)); |
58 | pitch = (w / 8) - 1; | 58 | pitch = (w / 8) - 1; |
59 | slice = ((w * h) / 64) - 1; | 59 | slice = ((w * h) / 64) - 1; |
60 | 60 | ||
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
165 | u32 sq_vtx_constant_word2; | 165 | u32 sq_vtx_constant_word2; |
166 | 166 | ||
167 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | 167 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); |
168 | #ifdef __BIG_ENDIAN | ||
169 | sq_vtx_constant_word2 |= (2 << 30); | ||
170 | #endif | ||
168 | 171 | ||
169 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); | 172 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); |
170 | radeon_ring_write(rdev, 0x460); | 173 | radeon_ring_write(rdev, 0x460); |
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev, | |||
199 | if (h < 1) | 202 | if (h < 1) |
200 | h = 1; | 203 | h = 1; |
201 | 204 | ||
202 | sq_tex_resource_word0 = (1 << 0); | 205 | sq_tex_resource_word0 = (1 << 0) | (1 << 3); |
203 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | | 206 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | |
204 | ((w - 1) << 19)); | 207 | ((w - 1) << 19)); |
205 | 208 | ||
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev) | |||
253 | radeon_ring_write(rdev, DI_PT_RECTLIST); | 256 | radeon_ring_write(rdev, DI_PT_RECTLIST); |
254 | 257 | ||
255 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); | 258 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); |
256 | radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); | 259 | radeon_ring_write(rdev, |
260 | #ifdef __BIG_ENDIAN | ||
261 | (2 << 2) | | ||
262 | #endif | ||
263 | DI_INDEX_SIZE_16_BIT); | ||
257 | 264 | ||
258 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); | 265 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); |
259 | radeon_ring_write(rdev, 1); | 266 | radeon_ring_write(rdev, 1); |
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev) | |||
424 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | 431 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); |
425 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | 432 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; |
426 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 433 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
427 | radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); | 434 | radeon_ring_write(rdev, |
435 | #ifdef __BIG_ENDIAN | ||
436 | (2 << 0) | | ||
437 | #endif | ||
438 | (gpu_addr & 0xFFFFFFFC)); | ||
428 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); | 439 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); |
429 | radeon_ring_write(rdev, dwords); | 440 | radeon_ring_write(rdev, dwords); |
430 | 441 | ||
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input) | |||
467 | int r600_blit_init(struct radeon_device *rdev) | 478 | int r600_blit_init(struct radeon_device *rdev) |
468 | { | 479 | { |
469 | u32 obj_size; | 480 | u32 obj_size; |
470 | int r, dwords; | 481 | int i, r, dwords; |
471 | void *ptr; | 482 | void *ptr; |
472 | u32 packet2s[16]; | 483 | u32 packet2s[16]; |
473 | int num_packet2s = 0; | 484 | int num_packet2s = 0; |
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
486 | 497 | ||
487 | dwords = rdev->r600_blit.state_len; | 498 | dwords = rdev->r600_blit.state_len; |
488 | while (dwords & 0xf) { | 499 | while (dwords & 0xf) { |
489 | packet2s[num_packet2s++] = PACKET2(0); | 500 | packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); |
490 | dwords++; | 501 | dwords++; |
491 | } | 502 | } |
492 | 503 | ||
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev) | |||
529 | if (num_packet2s) | 540 | if (num_packet2s) |
530 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | 541 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), |
531 | packet2s, num_packet2s * 4); | 542 | packet2s, num_packet2s * 4); |
532 | memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); | 543 | for (i = 0; i < r6xx_vs_size; i++) |
533 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); | 544 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); |
545 | for (i = 0; i < r6xx_ps_size; i++) | ||
546 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); | ||
534 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 547 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
535 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 548 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
536 | 549 | ||
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index e8151c1d55b2..2d1f6c5ee2a7 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c | |||
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] = | |||
684 | 0x00000000, | 684 | 0x00000000, |
685 | 0x3c000000, | 685 | 0x3c000000, |
686 | 0x68cd1000, | 686 | 0x68cd1000, |
687 | #ifdef __BIG_ENDIAN | ||
688 | 0x000a0000, | ||
689 | #else | ||
687 | 0x00080000, | 690 | 0x00080000, |
691 | #endif | ||
688 | 0x00000000, | 692 | 0x00000000, |
689 | }; | 693 | }; |
690 | 694 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 4f4cd8b286d5..c3ab959bdc7c 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
396 | r600_do_cp_stop(dev_priv); | 396 | r600_do_cp_stop(dev_priv); |
397 | 397 | ||
398 | RADEON_WRITE(R600_CP_RB_CNTL, | 398 | RADEON_WRITE(R600_CP_RB_CNTL, |
399 | #ifdef __BIG_ENDIAN | ||
400 | R600_BUF_SWAP_32BIT | | ||
401 | #endif | ||
399 | R600_RB_NO_UPDATE | | 402 | R600_RB_NO_UPDATE | |
400 | R600_RB_BLKSZ(15) | | 403 | R600_RB_BLKSZ(15) | |
401 | R600_RB_BUFSZ(3)); | 404 | R600_RB_BUFSZ(3)); |
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
486 | r600_do_cp_stop(dev_priv); | 489 | r600_do_cp_stop(dev_priv); |
487 | 490 | ||
488 | RADEON_WRITE(R600_CP_RB_CNTL, | 491 | RADEON_WRITE(R600_CP_RB_CNTL, |
492 | #ifdef __BIG_ENDIAN | ||
493 | R600_BUF_SWAP_32BIT | | ||
494 | #endif | ||
489 | R600_RB_NO_UPDATE | | 495 | R600_RB_NO_UPDATE | |
490 | (15 << 8) | | 496 | R600_RB_BLKSZ(15) | |
491 | (3 << 0)); | 497 | R600_RB_BUFSZ(3)); |
492 | 498 | ||
493 | RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); | 499 | RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); |
494 | RADEON_READ(R600_GRBM_SOFT_RESET); | 500 | RADEON_READ(R600_GRBM_SOFT_RESET); |
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv) | |||
550 | 556 | ||
551 | if (!dev_priv->writeback_works) { | 557 | if (!dev_priv->writeback_works) { |
552 | /* Disable writeback to avoid unnecessary bus master transfer */ | 558 | /* Disable writeback to avoid unnecessary bus master transfer */ |
553 | RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | | 559 | RADEON_WRITE(R600_CP_RB_CNTL, |
554 | RADEON_RB_NO_UPDATE); | 560 | #ifdef __BIG_ENDIAN |
561 | R600_BUF_SWAP_32BIT | | ||
562 | #endif | ||
563 | RADEON_READ(R600_CP_RB_CNTL) | | ||
564 | R600_RB_NO_UPDATE); | ||
555 | RADEON_WRITE(R600_SCRATCH_UMSK, 0); | 565 | RADEON_WRITE(R600_SCRATCH_UMSK, 0); |
556 | } | 566 | } |
557 | } | 567 | } |
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev) | |||
575 | 585 | ||
576 | RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); | 586 | RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); |
577 | cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); | 587 | cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); |
578 | RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); | 588 | RADEON_WRITE(R600_CP_RB_CNTL, |
589 | #ifdef __BIG_ENDIAN | ||
590 | R600_BUF_SWAP_32BIT | | ||
591 | #endif | ||
592 | R600_RB_RPTR_WR_ENA); | ||
579 | 593 | ||
580 | RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); | 594 | RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); |
581 | RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); | 595 | RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); |
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1838 | + dev_priv->gart_vm_start; | 1852 | + dev_priv->gart_vm_start; |
1839 | } | 1853 | } |
1840 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, | 1854 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, |
1841 | rptr_addr & 0xffffffff); | 1855 | #ifdef __BIG_ENDIAN |
1856 | (2 << 0) | | ||
1857 | #endif | ||
1858 | (rptr_addr & 0xfffffffc)); | ||
1842 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, | 1859 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, |
1843 | upper_32_bits(rptr_addr)); | 1860 | upper_32_bits(rptr_addr)); |
1844 | 1861 | ||
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1889 | { | 1906 | { |
1890 | u64 scratch_addr; | 1907 | u64 scratch_addr; |
1891 | 1908 | ||
1892 | scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); | 1909 | scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC; |
1893 | scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; | 1910 | scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; |
1894 | scratch_addr += R600_SCRATCH_REG_OFFSET; | 1911 | scratch_addr += R600_SCRATCH_REG_OFFSET; |
1895 | scratch_addr >>= 8; | 1912 | scratch_addr >>= 8; |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 7831e0890210..153095fba62f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -295,17 +295,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
295 | } | 295 | } |
296 | 296 | ||
297 | if (!IS_ALIGNED(pitch, pitch_align)) { | 297 | if (!IS_ALIGNED(pitch, pitch_align)) { |
298 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | 298 | dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", |
299 | __func__, __LINE__, pitch); | 299 | __func__, __LINE__, pitch, pitch_align, array_mode); |
300 | return -EINVAL; | 300 | return -EINVAL; |
301 | } | 301 | } |
302 | if (!IS_ALIGNED(height, height_align)) { | 302 | if (!IS_ALIGNED(height, height_align)) { |
303 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | 303 | dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", |
304 | __func__, __LINE__, height); | 304 | __func__, __LINE__, height, height_align, array_mode); |
305 | return -EINVAL; | 305 | return -EINVAL; |
306 | } | 306 | } |
307 | if (!IS_ALIGNED(base_offset, base_align)) { | 307 | if (!IS_ALIGNED(base_offset, base_align)) { |
308 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | 308 | dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, |
309 | base_offset, base_align, array_mode); | ||
309 | return -EINVAL; | 310 | return -EINVAL; |
310 | } | 311 | } |
311 | 312 | ||
@@ -320,7 +321,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
320 | * broken userspace. | 321 | * broken userspace. |
321 | */ | 322 | */ |
322 | } else { | 323 | } else { |
323 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); | 324 | dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i, |
325 | array_mode, | ||
326 | track->cb_color_bo_offset[i], tmp, | ||
327 | radeon_bo_size(track->cb_color_bo[i])); | ||
324 | return -EINVAL; | 328 | return -EINVAL; |
325 | } | 329 | } |
326 | } | 330 | } |
@@ -455,17 +459,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
455 | } | 459 | } |
456 | 460 | ||
457 | if (!IS_ALIGNED(pitch, pitch_align)) { | 461 | if (!IS_ALIGNED(pitch, pitch_align)) { |
458 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | 462 | dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", |
459 | __func__, __LINE__, pitch); | 463 | __func__, __LINE__, pitch, pitch_align, array_mode); |
460 | return -EINVAL; | 464 | return -EINVAL; |
461 | } | 465 | } |
462 | if (!IS_ALIGNED(height, height_align)) { | 466 | if (!IS_ALIGNED(height, height_align)) { |
463 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | 467 | dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", |
464 | __func__, __LINE__, height); | 468 | __func__, __LINE__, height, height_align, array_mode); |
465 | return -EINVAL; | 469 | return -EINVAL; |
466 | } | 470 | } |
467 | if (!IS_ALIGNED(base_offset, base_align)) { | 471 | if (!IS_ALIGNED(base_offset, base_align)) { |
468 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | 472 | dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, |
473 | base_offset, base_align, array_mode); | ||
469 | return -EINVAL; | 474 | return -EINVAL; |
470 | } | 475 | } |
471 | 476 | ||
@@ -473,9 +478,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
473 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | 478 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; |
474 | tmp = ntiles * bpe * 64 * nviews; | 479 | tmp = ntiles * bpe * 64 * nviews; |
475 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | 480 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { |
476 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", | 481 | dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", |
477 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | 482 | array_mode, |
478 | radeon_bo_size(track->db_bo)); | 483 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, |
484 | radeon_bo_size(track->db_bo)); | ||
479 | return -EINVAL; | 485 | return -EINVAL; |
480 | } | 486 | } |
481 | } | 487 | } |
@@ -1227,18 +1233,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1227 | /* XXX check height as well... */ | 1233 | /* XXX check height as well... */ |
1228 | 1234 | ||
1229 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1235 | if (!IS_ALIGNED(pitch, pitch_align)) { |
1230 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1236 | dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", |
1231 | __func__, __LINE__, pitch); | 1237 | __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); |
1232 | return -EINVAL; | 1238 | return -EINVAL; |
1233 | } | 1239 | } |
1234 | if (!IS_ALIGNED(base_offset, base_align)) { | 1240 | if (!IS_ALIGNED(base_offset, base_align)) { |
1235 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", | 1241 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", |
1236 | __func__, __LINE__, base_offset); | 1242 | __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); |
1237 | return -EINVAL; | 1243 | return -EINVAL; |
1238 | } | 1244 | } |
1239 | if (!IS_ALIGNED(mip_offset, base_align)) { | 1245 | if (!IS_ALIGNED(mip_offset, base_align)) { |
1240 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", | 1246 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", |
1241 | __func__, __LINE__, mip_offset); | 1247 | __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); |
1242 | return -EINVAL; | 1248 | return -EINVAL; |
1243 | } | 1249 | } |
1244 | 1250 | ||
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index 33cda016b083..f869897c7456 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h | |||
@@ -81,7 +81,11 @@ | |||
81 | #define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 | 81 | #define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 |
82 | #define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 | 82 | #define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 |
83 | 83 | ||
84 | 84 | #define R600_D1GRPH_SWAP_CONTROL 0x610C | |
85 | # define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0) | ||
86 | # define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0) | ||
87 | # define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0) | ||
88 | # define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0) | ||
85 | 89 | ||
86 | #define R600_HDP_NONSURFACE_BASE 0x2c04 | 90 | #define R600_HDP_NONSURFACE_BASE 0x2c04 |
87 | 91 | ||
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index a5d898b4bad2..04bac0bbd3ec 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -154,13 +154,14 @@ | |||
154 | #define ROQ_IB2_START(x) ((x) << 8) | 154 | #define ROQ_IB2_START(x) ((x) << 8) |
155 | #define CP_RB_BASE 0xC100 | 155 | #define CP_RB_BASE 0xC100 |
156 | #define CP_RB_CNTL 0xC104 | 156 | #define CP_RB_CNTL 0xC104 |
157 | #define RB_BUFSZ(x) ((x)<<0) | 157 | #define RB_BUFSZ(x) ((x) << 0) |
158 | #define RB_BLKSZ(x) ((x)<<8) | 158 | #define RB_BLKSZ(x) ((x) << 8) |
159 | #define RB_NO_UPDATE (1<<27) | 159 | #define RB_NO_UPDATE (1 << 27) |
160 | #define RB_RPTR_WR_ENA (1<<31) | 160 | #define RB_RPTR_WR_ENA (1 << 31) |
161 | #define BUF_SWAP_32BIT (2 << 16) | 161 | #define BUF_SWAP_32BIT (2 << 16) |
162 | #define CP_RB_RPTR 0x8700 | 162 | #define CP_RB_RPTR 0x8700 |
163 | #define CP_RB_RPTR_ADDR 0xC10C | 163 | #define CP_RB_RPTR_ADDR 0xC10C |
164 | #define RB_RPTR_SWAP(x) ((x) << 0) | ||
164 | #define CP_RB_RPTR_ADDR_HI 0xC110 | 165 | #define CP_RB_RPTR_ADDR_HI 0xC110 |
165 | #define CP_RB_RPTR_WR 0xC108 | 166 | #define CP_RB_RPTR_WR 0xC108 |
166 | #define CP_RB_WPTR 0xC114 | 167 | #define CP_RB_WPTR 0xC114 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 71d2a554bbe6..56c48b67ef3d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -179,10 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev); | |||
179 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 179 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
180 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | 180 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); |
181 | void rs690_pm_info(struct radeon_device *rdev); | 181 | void rs690_pm_info(struct radeon_device *rdev); |
182 | extern u32 rv6xx_get_temp(struct radeon_device *rdev); | 182 | extern int rv6xx_get_temp(struct radeon_device *rdev); |
183 | extern u32 rv770_get_temp(struct radeon_device *rdev); | 183 | extern int rv770_get_temp(struct radeon_device *rdev); |
184 | extern u32 evergreen_get_temp(struct radeon_device *rdev); | 184 | extern int evergreen_get_temp(struct radeon_device *rdev); |
185 | extern u32 sumo_get_temp(struct radeon_device *rdev); | 185 | extern int sumo_get_temp(struct radeon_device *rdev); |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Fences. | 188 | * Fences. |
@@ -812,8 +812,7 @@ struct radeon_pm { | |||
812 | fixed20_12 sclk; | 812 | fixed20_12 sclk; |
813 | fixed20_12 mclk; | 813 | fixed20_12 mclk; |
814 | fixed20_12 needed_bandwidth; | 814 | fixed20_12 needed_bandwidth; |
815 | /* XXX: use a define for num power modes */ | 815 | struct radeon_power_state *power_state; |
816 | struct radeon_power_state power_state[8]; | ||
817 | /* number of valid power states */ | 816 | /* number of valid power states */ |
818 | int num_power_states; | 817 | int num_power_states; |
819 | int current_power_state_index; | 818 | int current_power_state_index; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3a1b16186224..e75d63b8e21d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -759,7 +759,7 @@ static struct radeon_asic evergreen_asic = { | |||
759 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 759 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, |
760 | .gart_set_page = &rs600_gart_set_page, | 760 | .gart_set_page = &rs600_gart_set_page, |
761 | .ring_test = &r600_ring_test, | 761 | .ring_test = &r600_ring_test, |
762 | .ring_ib_execute = &r600_ring_ib_execute, | 762 | .ring_ib_execute = &evergreen_ring_ib_execute, |
763 | .irq_set = &evergreen_irq_set, | 763 | .irq_set = &evergreen_irq_set, |
764 | .irq_process = &evergreen_irq_process, | 764 | .irq_process = &evergreen_irq_process, |
765 | .get_vblank_counter = &evergreen_get_vblank_counter, | 765 | .get_vblank_counter = &evergreen_get_vblank_counter, |
@@ -805,7 +805,7 @@ static struct radeon_asic sumo_asic = { | |||
805 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 805 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, |
806 | .gart_set_page = &rs600_gart_set_page, | 806 | .gart_set_page = &rs600_gart_set_page, |
807 | .ring_test = &r600_ring_test, | 807 | .ring_test = &r600_ring_test, |
808 | .ring_ib_execute = &r600_ring_ib_execute, | 808 | .ring_ib_execute = &evergreen_ring_ib_execute, |
809 | .irq_set = &evergreen_irq_set, | 809 | .irq_set = &evergreen_irq_set, |
810 | .irq_process = &evergreen_irq_process, | 810 | .irq_process = &evergreen_irq_process, |
811 | .get_vblank_counter = &evergreen_get_vblank_counter, | 811 | .get_vblank_counter = &evergreen_get_vblank_counter, |
@@ -848,7 +848,7 @@ static struct radeon_asic btc_asic = { | |||
848 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 848 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, |
849 | .gart_set_page = &rs600_gart_set_page, | 849 | .gart_set_page = &rs600_gart_set_page, |
850 | .ring_test = &r600_ring_test, | 850 | .ring_test = &r600_ring_test, |
851 | .ring_ib_execute = &r600_ring_ib_execute, | 851 | .ring_ib_execute = &evergreen_ring_ib_execute, |
852 | .irq_set = &evergreen_irq_set, | 852 | .irq_set = &evergreen_irq_set, |
853 | .irq_process = &evergreen_irq_process, | 853 | .irq_process = &evergreen_irq_process, |
854 | .get_vblank_counter = &evergreen_get_vblank_counter, | 854 | .get_vblank_counter = &evergreen_get_vblank_counter, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e01f07718539..c59bd98a2029 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -355,6 +355,7 @@ int evergreen_resume(struct radeon_device *rdev); | |||
355 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); | 355 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); |
356 | int evergreen_asic_reset(struct radeon_device *rdev); | 356 | int evergreen_asic_reset(struct radeon_device *rdev); |
357 | void evergreen_bandwidth_update(struct radeon_device *rdev); | 357 | void evergreen_bandwidth_update(struct radeon_device *rdev); |
358 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | ||
358 | int evergreen_copy_blit(struct radeon_device *rdev, | 359 | int evergreen_copy_blit(struct radeon_device *rdev, |
359 | uint64_t src_offset, uint64_t dst_offset, | 360 | uint64_t src_offset, uint64_t dst_offset, |
360 | unsigned num_pages, struct radeon_fence *fence); | 361 | unsigned num_pages, struct radeon_fence *fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1573202a6418..02d5c415f499 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
88 | /* some evergreen boards have bad data for this entry */ | 88 | /* some evergreen boards have bad data for this entry */ |
89 | if (ASIC_IS_DCE4(rdev)) { | 89 | if (ASIC_IS_DCE4(rdev)) { |
90 | if ((i == 7) && | 90 | if ((i == 7) && |
91 | (gpio->usClkMaskRegisterIndex == 0x1936) && | 91 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && |
92 | (gpio->sucI2cId.ucAccess == 0)) { | 92 | (gpio->sucI2cId.ucAccess == 0)) { |
93 | gpio->sucI2cId.ucAccess = 0x97; | 93 | gpio->sucI2cId.ucAccess = 0x97; |
94 | gpio->ucDataMaskShift = 8; | 94 | gpio->ucDataMaskShift = 8; |
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
101 | /* some DCE3 boards have bad data for this entry */ | 101 | /* some DCE3 boards have bad data for this entry */ |
102 | if (ASIC_IS_DCE3(rdev)) { | 102 | if (ASIC_IS_DCE3(rdev)) { |
103 | if ((i == 4) && | 103 | if ((i == 4) && |
104 | (gpio->usClkMaskRegisterIndex == 0x1fda) && | 104 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && |
105 | (gpio->sucI2cId.ucAccess == 0x94)) | 105 | (gpio->sucI2cId.ucAccess == 0x94)) |
106 | gpio->sucI2cId.ucAccess = 0x14; | 106 | gpio->sucI2cId.ucAccess = 0x14; |
107 | } | 107 | } |
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
172 | /* some evergreen boards have bad data for this entry */ | 172 | /* some evergreen boards have bad data for this entry */ |
173 | if (ASIC_IS_DCE4(rdev)) { | 173 | if (ASIC_IS_DCE4(rdev)) { |
174 | if ((i == 7) && | 174 | if ((i == 7) && |
175 | (gpio->usClkMaskRegisterIndex == 0x1936) && | 175 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && |
176 | (gpio->sucI2cId.ucAccess == 0)) { | 176 | (gpio->sucI2cId.ucAccess == 0)) { |
177 | gpio->sucI2cId.ucAccess = 0x97; | 177 | gpio->sucI2cId.ucAccess = 0x97; |
178 | gpio->ucDataMaskShift = 8; | 178 | gpio->ucDataMaskShift = 8; |
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
185 | /* some DCE3 boards have bad data for this entry */ | 185 | /* some DCE3 boards have bad data for this entry */ |
186 | if (ASIC_IS_DCE3(rdev)) { | 186 | if (ASIC_IS_DCE3(rdev)) { |
187 | if ((i == 4) && | 187 | if ((i == 4) && |
188 | (gpio->usClkMaskRegisterIndex == 0x1fda) && | 188 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && |
189 | (gpio->sucI2cId.ucAccess == 0x94)) | 189 | (gpio->sucI2cId.ucAccess == 0x94)) |
190 | gpio->sucI2cId.ucAccess = 0x14; | 190 | gpio->sucI2cId.ucAccess = 0x14; |
191 | } | 191 | } |
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd | |||
252 | pin = &gpio_info->asGPIO_Pin[i]; | 252 | pin = &gpio_info->asGPIO_Pin[i]; |
253 | if (id == pin->ucGPIO_ID) { | 253 | if (id == pin->ucGPIO_ID) { |
254 | gpio.id = pin->ucGPIO_ID; | 254 | gpio.id = pin->ucGPIO_ID; |
255 | gpio.reg = pin->usGpioPin_AIndex * 4; | 255 | gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; |
256 | gpio.mask = (1 << pin->ucGpioPinBitShift); | 256 | gpio.mask = (1 << pin->ucGpioPinBitShift); |
257 | gpio.valid = true; | 257 | gpio.valid = true; |
258 | break; | 258 | break; |
@@ -387,15 +387,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
387 | *line_mux = 0x90; | 387 | *line_mux = 0x90; |
388 | } | 388 | } |
389 | 389 | ||
390 | /* mac rv630 */ | 390 | /* mac rv630, rv730, others */ |
391 | if ((dev->pdev->device == 0x9588) && | 391 | if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && |
392 | (dev->pdev->subsystem_vendor == 0x106b) && | 392 | (*connector_type == DRM_MODE_CONNECTOR_DVII)) { |
393 | (dev->pdev->subsystem_device == 0x00a6)) { | 393 | *connector_type = DRM_MODE_CONNECTOR_9PinDIN; |
394 | if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && | 394 | *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; |
395 | (*connector_type == DRM_MODE_CONNECTOR_DVII)) { | ||
396 | *connector_type = DRM_MODE_CONNECTOR_9PinDIN; | ||
397 | *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; | ||
398 | } | ||
399 | } | 395 | } |
400 | 396 | ||
401 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ | 397 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ |
@@ -1167,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1167 | p1pll->pll_out_min = 64800; | 1163 | p1pll->pll_out_min = 64800; |
1168 | else | 1164 | else |
1169 | p1pll->pll_out_min = 20000; | 1165 | p1pll->pll_out_min = 20000; |
1170 | } else if (p1pll->pll_out_min > 64800) { | ||
1171 | /* Limiting the pll output range is a good thing generally as | ||
1172 | * it limits the number of possible pll combinations for a given | ||
1173 | * frequency presumably to the ones that work best on each card. | ||
1174 | * However, certain duallink DVI monitors seem to like | ||
1175 | * pll combinations that would be limited by this at least on | ||
1176 | * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per | ||
1177 | * family. | ||
1178 | */ | ||
1179 | p1pll->pll_out_min = 64800; | ||
1180 | } | 1166 | } |
1181 | 1167 | ||
1182 | p1pll->pll_in_min = | 1168 | p1pll->pll_in_min = |
@@ -1288,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
1288 | data_offset); | 1274 | data_offset); |
1289 | switch (crev) { | 1275 | switch (crev) { |
1290 | case 1: | 1276 | case 1: |
1291 | if (igp_info->info.ulBootUpMemoryClock) | 1277 | if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock)) |
1292 | return true; | 1278 | return true; |
1293 | break; | 1279 | break; |
1294 | case 2: | 1280 | case 2: |
1295 | if (igp_info->info_2.ulBootUpSidePortClock) | 1281 | if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock)) |
1296 | return true; | 1282 | return true; |
1297 | break; | 1283 | break; |
1298 | default: | 1284 | default: |
@@ -1456,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1456 | 1442 | ||
1457 | for (i = 0; i < num_indices; i++) { | 1443 | for (i = 0; i < num_indices; i++) { |
1458 | if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && | 1444 | if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && |
1459 | (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { | 1445 | (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { |
1460 | ss->percentage = | 1446 | ss->percentage = |
1461 | le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1447 | le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); |
1462 | ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1448 | ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; |
@@ -1470,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1470 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); | 1456 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); |
1471 | for (i = 0; i < num_indices; i++) { | 1457 | for (i = 0; i < num_indices; i++) { |
1472 | if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && | 1458 | if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && |
1473 | (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { | 1459 | (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { |
1474 | ss->percentage = | 1460 | ss->percentage = |
1475 | le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1461 | le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); |
1476 | ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1462 | ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; |
@@ -1484,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1484 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); | 1470 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); |
1485 | for (i = 0; i < num_indices; i++) { | 1471 | for (i = 0; i < num_indices; i++) { |
1486 | if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && | 1472 | if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && |
1487 | (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { | 1473 | (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { |
1488 | ss->percentage = | 1474 | ss->percentage = |
1489 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1475 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); |
1490 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1476 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; |
@@ -1567,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1567 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | 1553 | if (misc & ATOM_DOUBLE_CLOCK_MODE) |
1568 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; | 1554 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; |
1569 | 1555 | ||
1570 | lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; | 1556 | lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); |
1571 | lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; | 1557 | lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); |
1572 | 1558 | ||
1573 | /* set crtc values */ | 1559 | /* set crtc values */ |
1574 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | 1560 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); |
@@ -1583,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1583 | lvds->linkb = false; | 1569 | lvds->linkb = false; |
1584 | 1570 | ||
1585 | /* parse the lcd record table */ | 1571 | /* parse the lcd record table */ |
1586 | if (lvds_info->info.usModePatchTableOffset) { | 1572 | if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { |
1587 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; | 1573 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; |
1588 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; | 1574 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; |
1589 | bool bad_record = false; | 1575 | bool bad_record = false; |
1590 | u8 *record = (u8 *)(mode_info->atom_context->bios + | 1576 | u8 *record = (u8 *)(mode_info->atom_context->bios + |
1591 | data_offset + | 1577 | data_offset + |
1592 | lvds_info->info.usModePatchTableOffset); | 1578 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); |
1593 | while (*record != ATOM_RECORD_END_TYPE) { | 1579 | while (*record != ATOM_RECORD_END_TYPE) { |
1594 | switch (*record) { | 1580 | switch (*record) { |
1595 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: | 1581 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: |
@@ -1991,6 +1977,9 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
1991 | num_modes = power_info->info.ucNumOfPowerModeEntries; | 1977 | num_modes = power_info->info.ucNumOfPowerModeEntries; |
1992 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | 1978 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) |
1993 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | 1979 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; |
1980 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); | ||
1981 | if (!rdev->pm.power_state) | ||
1982 | return state_index; | ||
1994 | /* last mode is usually default, array is low to high */ | 1983 | /* last mode is usually default, array is low to high */ |
1995 | for (i = 0; i < num_modes; i++) { | 1984 | for (i = 0; i < num_modes; i++) { |
1996 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 1985 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
@@ -2200,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) | |||
2200 | firmware_info = | 2189 | firmware_info = |
2201 | (union firmware_info *)(mode_info->atom_context->bios + | 2190 | (union firmware_info *)(mode_info->atom_context->bios + |
2202 | data_offset); | 2191 | data_offset); |
2203 | vddc = firmware_info->info_14.usBootUpVDDCVoltage; | 2192 | vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); |
2204 | } | 2193 | } |
2205 | 2194 | ||
2206 | return vddc; | 2195 | return vddc; |
@@ -2295,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2295 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 2284 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
2296 | VOLTAGE_SW; | 2285 | VOLTAGE_SW; |
2297 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2286 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
2298 | clock_info->evergreen.usVDDC; | 2287 | le16_to_cpu(clock_info->evergreen.usVDDC); |
2299 | } else { | 2288 | } else { |
2300 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); | 2289 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); |
2301 | sclk |= clock_info->r600.ucEngineClockHigh << 16; | 2290 | sclk |= clock_info->r600.ucEngineClockHigh << 16; |
@@ -2306,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2306 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 2295 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
2307 | VOLTAGE_SW; | 2296 | VOLTAGE_SW; |
2308 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2297 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
2309 | clock_info->r600.usVDDC; | 2298 | le16_to_cpu(clock_info->r600.usVDDC); |
2310 | } | 2299 | } |
2311 | 2300 | ||
2312 | if (rdev->flags & RADEON_IS_IGP) { | 2301 | if (rdev->flags & RADEON_IS_IGP) { |
@@ -2342,6 +2331,10 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) | |||
2342 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | 2331 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
2343 | 2332 | ||
2344 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); | 2333 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); |
2334 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * | ||
2335 | power_info->pplib.ucNumStates, GFP_KERNEL); | ||
2336 | if (!rdev->pm.power_state) | ||
2337 | return state_index; | ||
2345 | /* first mode is usually default, followed by low to high */ | 2338 | /* first mode is usually default, followed by low to high */ |
2346 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { | 2339 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
2347 | mode_index = 0; | 2340 | mode_index = 0; |
@@ -2415,13 +2408,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) | |||
2415 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); | 2408 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); |
2416 | state_array = (struct StateArray *) | 2409 | state_array = (struct StateArray *) |
2417 | (mode_info->atom_context->bios + data_offset + | 2410 | (mode_info->atom_context->bios + data_offset + |
2418 | power_info->pplib.usStateArrayOffset); | 2411 | le16_to_cpu(power_info->pplib.usStateArrayOffset)); |
2419 | clock_info_array = (struct ClockInfoArray *) | 2412 | clock_info_array = (struct ClockInfoArray *) |
2420 | (mode_info->atom_context->bios + data_offset + | 2413 | (mode_info->atom_context->bios + data_offset + |
2421 | power_info->pplib.usClockInfoArrayOffset); | 2414 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); |
2422 | non_clock_info_array = (struct NonClockInfoArray *) | 2415 | non_clock_info_array = (struct NonClockInfoArray *) |
2423 | (mode_info->atom_context->bios + data_offset + | 2416 | (mode_info->atom_context->bios + data_offset + |
2424 | power_info->pplib.usNonClockInfoArrayOffset); | 2417 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); |
2418 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * | ||
2419 | state_array->ucNumEntries, GFP_KERNEL); | ||
2420 | if (!rdev->pm.power_state) | ||
2421 | return state_index; | ||
2425 | for (i = 0; i < state_array->ucNumEntries; i++) { | 2422 | for (i = 0; i < state_array->ucNumEntries; i++) { |
2426 | mode_index = 0; | 2423 | mode_index = 0; |
2427 | power_state = (union pplib_power_state *)&state_array->states[i]; | 2424 | power_state = (union pplib_power_state *)&state_array->states[i]; |
@@ -2495,19 +2492,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
2495 | break; | 2492 | break; |
2496 | } | 2493 | } |
2497 | } else { | 2494 | } else { |
2498 | /* add the default mode */ | 2495 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); |
2499 | rdev->pm.power_state[state_index].type = | 2496 | if (rdev->pm.power_state) { |
2500 | POWER_STATE_TYPE_DEFAULT; | 2497 | /* add the default mode */ |
2501 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2498 | rdev->pm.power_state[state_index].type = |
2502 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | 2499 | POWER_STATE_TYPE_DEFAULT; |
2503 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2500 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2504 | rdev->pm.power_state[state_index].default_clock_mode = | 2501 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; |
2505 | &rdev->pm.power_state[state_index].clock_info[0]; | 2502 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; |
2506 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2503 | rdev->pm.power_state[state_index].default_clock_mode = |
2507 | rdev->pm.power_state[state_index].pcie_lanes = 16; | 2504 | &rdev->pm.power_state[state_index].clock_info[0]; |
2508 | rdev->pm.default_power_state_index = state_index; | 2505 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
2509 | rdev->pm.power_state[state_index].flags = 0; | 2506 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
2510 | state_index++; | 2507 | rdev->pm.default_power_state_index = state_index; |
2508 | rdev->pm.power_state[state_index].flags = 0; | ||
2509 | state_index++; | ||
2510 | } | ||
2511 | } | 2511 | } |
2512 | 2512 | ||
2513 | rdev->pm.num_power_states = state_index; | 2513 | rdev->pm.num_power_states = state_index; |
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) | |||
2533 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); | 2533 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); |
2534 | 2534 | ||
2535 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2535 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2536 | return args.ulReturnEngineClock; | 2536 | return le32_to_cpu(args.ulReturnEngineClock); |
2537 | } | 2537 | } |
2538 | 2538 | ||
2539 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) | 2539 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) |
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) | |||
2542 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); | 2542 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); |
2543 | 2543 | ||
2544 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2544 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2545 | return args.ulReturnMemoryClock; | 2545 | return le32_to_cpu(args.ulReturnMemoryClock); |
2546 | } | 2546 | } |
2547 | 2547 | ||
2548 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, | 2548 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, |
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev, | |||
2551 | SET_ENGINE_CLOCK_PS_ALLOCATION args; | 2551 | SET_ENGINE_CLOCK_PS_ALLOCATION args; |
2552 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); | 2552 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); |
2553 | 2553 | ||
2554 | args.ulTargetEngineClock = eng_clock; /* 10 khz */ | 2554 | args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ |
2555 | 2555 | ||
2556 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2556 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2557 | } | 2557 | } |
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, | |||
2565 | if (rdev->flags & RADEON_IS_IGP) | 2565 | if (rdev->flags & RADEON_IS_IGP) |
2566 | return; | 2566 | return; |
2567 | 2567 | ||
2568 | args.ulTargetMemoryClock = mem_clock; /* 10 khz */ | 2568 | args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ |
2569 | 2569 | ||
2570 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2570 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2571 | } | 2571 | } |
@@ -2623,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | |||
2623 | bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; | 2623 | bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; |
2624 | 2624 | ||
2625 | /* tell the bios not to handle mode switching */ | 2625 | /* tell the bios not to handle mode switching */ |
2626 | bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); | 2626 | bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; |
2627 | 2627 | ||
2628 | if (rdev->family >= CHIP_R600) { | 2628 | if (rdev->family >= CHIP_R600) { |
2629 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); | 2629 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); |
@@ -2674,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) | |||
2674 | else | 2674 | else |
2675 | bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); | 2675 | bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); |
2676 | 2676 | ||
2677 | if (lock) | 2677 | if (lock) { |
2678 | bios_6_scratch |= ATOM_S6_CRITICAL_STATE; | 2678 | bios_6_scratch |= ATOM_S6_CRITICAL_STATE; |
2679 | else | 2679 | bios_6_scratch &= ~ATOM_S6_ACC_MODE; |
2680 | } else { | ||
2680 | bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; | 2681 | bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; |
2682 | bios_6_scratch |= ATOM_S6_ACC_MODE; | ||
2683 | } | ||
2681 | 2684 | ||
2682 | if (rdev->family >= CHIP_R600) | 2685 | if (rdev->family >= CHIP_R600) |
2683 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); | 2686 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 591fcae8f224..cf7c8d5b4ec2 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1504 | (rdev->pdev->subsystem_device == 0x4a48)) { | 1504 | (rdev->pdev->subsystem_device == 0x4a48)) { |
1505 | /* Mac X800 */ | 1505 | /* Mac X800 */ |
1506 | rdev->mode_info.connector_table = CT_MAC_X800; | 1506 | rdev->mode_info.connector_table = CT_MAC_X800; |
1507 | } else if ((rdev->pdev->device == 0x4150) && | ||
1508 | (rdev->pdev->subsystem_vendor == 0x1002) && | ||
1509 | (rdev->pdev->subsystem_device == 0x4150)) { | ||
1510 | /* Mac G5 9600 */ | ||
1511 | rdev->mode_info.connector_table = CT_MAC_G5_9600; | ||
1507 | } else | 1512 | } else |
1508 | #endif /* CONFIG_PPC_PMAC */ | 1513 | #endif /* CONFIG_PPC_PMAC */ |
1509 | #ifdef CONFIG_PPC64 | 1514 | #ifdef CONFIG_PPC64 |
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
2022 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, | 2027 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, |
2023 | &hpd); | 2028 | &hpd); |
2024 | break; | 2029 | break; |
2030 | case CT_MAC_G5_9600: | ||
2031 | DRM_INFO("Connector Table: %d (mac g5 9600)\n", | ||
2032 | rdev->mode_info.connector_table); | ||
2033 | /* DVI - tv dac, dvo */ | ||
2034 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | ||
2035 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
2036 | radeon_add_legacy_encoder(dev, | ||
2037 | radeon_get_encoder_enum(dev, | ||
2038 | ATOM_DEVICE_DFP2_SUPPORT, | ||
2039 | 0), | ||
2040 | ATOM_DEVICE_DFP2_SUPPORT); | ||
2041 | radeon_add_legacy_encoder(dev, | ||
2042 | radeon_get_encoder_enum(dev, | ||
2043 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2044 | 2), | ||
2045 | ATOM_DEVICE_CRT2_SUPPORT); | ||
2046 | radeon_add_legacy_connector(dev, 0, | ||
2047 | ATOM_DEVICE_DFP2_SUPPORT | | ||
2048 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2049 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2050 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
2051 | &hpd); | ||
2052 | /* ADC - primary dac, internal tmds */ | ||
2053 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | ||
2054 | hpd.hpd = RADEON_HPD_2; /* ??? */ | ||
2055 | radeon_add_legacy_encoder(dev, | ||
2056 | radeon_get_encoder_enum(dev, | ||
2057 | ATOM_DEVICE_DFP1_SUPPORT, | ||
2058 | 0), | ||
2059 | ATOM_DEVICE_DFP1_SUPPORT); | ||
2060 | radeon_add_legacy_encoder(dev, | ||
2061 | radeon_get_encoder_enum(dev, | ||
2062 | ATOM_DEVICE_CRT1_SUPPORT, | ||
2063 | 1), | ||
2064 | ATOM_DEVICE_CRT1_SUPPORT); | ||
2065 | radeon_add_legacy_connector(dev, 1, | ||
2066 | ATOM_DEVICE_DFP1_SUPPORT | | ||
2067 | ATOM_DEVICE_CRT1_SUPPORT, | ||
2068 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2069 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
2070 | &hpd); | ||
2071 | break; | ||
2025 | default: | 2072 | default: |
2026 | DRM_INFO("Connector table: %d (invalid)\n", | 2073 | DRM_INFO("Connector table: %d (invalid)\n", |
2027 | rdev->mode_info.connector_table); | 2074 | rdev->mode_info.connector_table); |
@@ -2442,6 +2489,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2442 | 2489 | ||
2443 | rdev->pm.default_power_state_index = -1; | 2490 | rdev->pm.default_power_state_index = -1; |
2444 | 2491 | ||
2492 | /* allocate 2 power states */ | ||
2493 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); | ||
2494 | if (!rdev->pm.power_state) { | ||
2495 | rdev->pm.default_power_state_index = state_index; | ||
2496 | rdev->pm.num_power_states = 0; | ||
2497 | |||
2498 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | ||
2499 | rdev->pm.current_clock_mode_index = 0; | ||
2500 | return; | ||
2501 | } | ||
2502 | |||
2445 | if (rdev->flags & RADEON_IS_MOBILITY) { | 2503 | if (rdev->flags & RADEON_IS_MOBILITY) { |
2446 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); | 2504 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); |
2447 | if (offset) { | 2505 | if (offset) { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 26091d602b84..4954e2d6ffa2 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -891,9 +891,9 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
891 | pci_disable_device(dev->pdev); | 891 | pci_disable_device(dev->pdev); |
892 | pci_set_power_state(dev->pdev, PCI_D3hot); | 892 | pci_set_power_state(dev->pdev, PCI_D3hot); |
893 | } | 893 | } |
894 | acquire_console_sem(); | 894 | console_lock(); |
895 | radeon_fbdev_set_suspend(rdev, 1); | 895 | radeon_fbdev_set_suspend(rdev, 1); |
896 | release_console_sem(); | 896 | console_unlock(); |
897 | return 0; | 897 | return 0; |
898 | } | 898 | } |
899 | 899 | ||
@@ -905,11 +905,11 @@ int radeon_resume_kms(struct drm_device *dev) | |||
905 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 905 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
906 | return 0; | 906 | return 0; |
907 | 907 | ||
908 | acquire_console_sem(); | 908 | console_lock(); |
909 | pci_set_power_state(dev->pdev, PCI_D0); | 909 | pci_set_power_state(dev->pdev, PCI_D0); |
910 | pci_restore_state(dev->pdev); | 910 | pci_restore_state(dev->pdev); |
911 | if (pci_enable_device(dev->pdev)) { | 911 | if (pci_enable_device(dev->pdev)) { |
912 | release_console_sem(); | 912 | console_unlock(); |
913 | return -1; | 913 | return -1; |
914 | } | 914 | } |
915 | pci_set_master(dev->pdev); | 915 | pci_set_master(dev->pdev); |
@@ -920,7 +920,7 @@ int radeon_resume_kms(struct drm_device *dev) | |||
920 | radeon_restore_bios_scratch_regs(rdev); | 920 | radeon_restore_bios_scratch_regs(rdev); |
921 | 921 | ||
922 | radeon_fbdev_set_suspend(rdev, 0); | 922 | radeon_fbdev_set_suspend(rdev, 0); |
923 | release_console_sem(); | 923 | console_unlock(); |
924 | 924 | ||
925 | /* reset hpd state */ | 925 | /* reset hpd state */ |
926 | radeon_hpd_init(rdev); | 926 | radeon_hpd_init(rdev); |
@@ -936,8 +936,11 @@ int radeon_resume_kms(struct drm_device *dev) | |||
936 | int radeon_gpu_reset(struct radeon_device *rdev) | 936 | int radeon_gpu_reset(struct radeon_device *rdev) |
937 | { | 937 | { |
938 | int r; | 938 | int r; |
939 | int resched; | ||
939 | 940 | ||
940 | radeon_save_bios_scratch_regs(rdev); | 941 | radeon_save_bios_scratch_regs(rdev); |
942 | /* block TTM */ | ||
943 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | ||
941 | radeon_suspend(rdev); | 944 | radeon_suspend(rdev); |
942 | 945 | ||
943 | r = radeon_asic_reset(rdev); | 946 | r = radeon_asic_reset(rdev); |
@@ -946,6 +949,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
946 | radeon_resume(rdev); | 949 | radeon_resume(rdev); |
947 | radeon_restore_bios_scratch_regs(rdev); | 950 | radeon_restore_bios_scratch_regs(rdev); |
948 | drm_helper_resume_force_mode(rdev->ddev); | 951 | drm_helper_resume_force_mode(rdev->ddev); |
952 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | ||
949 | return 0; | 953 | return 0; |
950 | } | 954 | } |
951 | /* bad news, how to tell it to userspace ? */ | 955 | /* bad news, how to tell it to userspace ? */ |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d26dabf878d9..3e7e7f9eb781 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -780,6 +780,125 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
780 | return ret; | 780 | return ret; |
781 | } | 781 | } |
782 | 782 | ||
783 | /* avivo */ | ||
784 | static void avivo_get_fb_div(struct radeon_pll *pll, | ||
785 | u32 target_clock, | ||
786 | u32 post_div, | ||
787 | u32 ref_div, | ||
788 | u32 *fb_div, | ||
789 | u32 *frac_fb_div) | ||
790 | { | ||
791 | u32 tmp = post_div * ref_div; | ||
792 | |||
793 | tmp *= target_clock; | ||
794 | *fb_div = tmp / pll->reference_freq; | ||
795 | *frac_fb_div = tmp % pll->reference_freq; | ||
796 | |||
797 | if (*fb_div > pll->max_feedback_div) | ||
798 | *fb_div = pll->max_feedback_div; | ||
799 | else if (*fb_div < pll->min_feedback_div) | ||
800 | *fb_div = pll->min_feedback_div; | ||
801 | } | ||
802 | |||
803 | static u32 avivo_get_post_div(struct radeon_pll *pll, | ||
804 | u32 target_clock) | ||
805 | { | ||
806 | u32 vco, post_div, tmp; | ||
807 | |||
808 | if (pll->flags & RADEON_PLL_USE_POST_DIV) | ||
809 | return pll->post_div; | ||
810 | |||
811 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { | ||
812 | if (pll->flags & RADEON_PLL_IS_LCD) | ||
813 | vco = pll->lcd_pll_out_min; | ||
814 | else | ||
815 | vco = pll->pll_out_min; | ||
816 | } else { | ||
817 | if (pll->flags & RADEON_PLL_IS_LCD) | ||
818 | vco = pll->lcd_pll_out_max; | ||
819 | else | ||
820 | vco = pll->pll_out_max; | ||
821 | } | ||
822 | |||
823 | post_div = vco / target_clock; | ||
824 | tmp = vco % target_clock; | ||
825 | |||
826 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { | ||
827 | if (tmp) | ||
828 | post_div++; | ||
829 | } else { | ||
830 | if (!tmp) | ||
831 | post_div--; | ||
832 | } | ||
833 | |||
834 | if (post_div > pll->max_post_div) | ||
835 | post_div = pll->max_post_div; | ||
836 | else if (post_div < pll->min_post_div) | ||
837 | post_div = pll->min_post_div; | ||
838 | |||
839 | return post_div; | ||
840 | } | ||
841 | |||
842 | #define MAX_TOLERANCE 10 | ||
843 | |||
844 | void radeon_compute_pll_avivo(struct radeon_pll *pll, | ||
845 | u32 freq, | ||
846 | u32 *dot_clock_p, | ||
847 | u32 *fb_div_p, | ||
848 | u32 *frac_fb_div_p, | ||
849 | u32 *ref_div_p, | ||
850 | u32 *post_div_p) | ||
851 | { | ||
852 | u32 target_clock = freq / 10; | ||
853 | u32 post_div = avivo_get_post_div(pll, target_clock); | ||
854 | u32 ref_div = pll->min_ref_div; | ||
855 | u32 fb_div = 0, frac_fb_div = 0, tmp; | ||
856 | |||
857 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | ||
858 | ref_div = pll->reference_div; | ||
859 | |||
860 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | ||
861 | avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); | ||
862 | frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; | ||
863 | if (frac_fb_div >= 5) { | ||
864 | frac_fb_div -= 5; | ||
865 | frac_fb_div = frac_fb_div / 10; | ||
866 | frac_fb_div++; | ||
867 | } | ||
868 | if (frac_fb_div >= 10) { | ||
869 | fb_div++; | ||
870 | frac_fb_div = 0; | ||
871 | } | ||
872 | } else { | ||
873 | while (ref_div <= pll->max_ref_div) { | ||
874 | avivo_get_fb_div(pll, target_clock, post_div, ref_div, | ||
875 | &fb_div, &frac_fb_div); | ||
876 | if (frac_fb_div >= (pll->reference_freq / 2)) | ||
877 | fb_div++; | ||
878 | frac_fb_div = 0; | ||
879 | tmp = (pll->reference_freq * fb_div) / (post_div * ref_div); | ||
880 | tmp = (tmp * 10000) / target_clock; | ||
881 | |||
882 | if (tmp > (10000 + MAX_TOLERANCE)) | ||
883 | ref_div++; | ||
884 | else if (tmp >= (10000 - MAX_TOLERANCE)) | ||
885 | break; | ||
886 | else | ||
887 | ref_div++; | ||
888 | } | ||
889 | } | ||
890 | |||
891 | *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / | ||
892 | (ref_div * post_div * 10); | ||
893 | *fb_div_p = fb_div; | ||
894 | *frac_fb_div_p = frac_fb_div; | ||
895 | *ref_div_p = ref_div; | ||
896 | *post_div_p = post_div; | ||
897 | DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", | ||
898 | *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); | ||
899 | } | ||
900 | |||
901 | /* pre-avivo */ | ||
783 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) | 902 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) |
784 | { | 903 | { |
785 | uint64_t mod; | 904 | uint64_t mod; |
@@ -790,13 +909,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) | |||
790 | return n; | 909 | return n; |
791 | } | 910 | } |
792 | 911 | ||
793 | void radeon_compute_pll(struct radeon_pll *pll, | 912 | void radeon_compute_pll_legacy(struct radeon_pll *pll, |
794 | uint64_t freq, | 913 | uint64_t freq, |
795 | uint32_t *dot_clock_p, | 914 | uint32_t *dot_clock_p, |
796 | uint32_t *fb_div_p, | 915 | uint32_t *fb_div_p, |
797 | uint32_t *frac_fb_div_p, | 916 | uint32_t *frac_fb_div_p, |
798 | uint32_t *ref_div_p, | 917 | uint32_t *ref_div_p, |
799 | uint32_t *post_div_p) | 918 | uint32_t *post_div_p) |
800 | { | 919 | { |
801 | uint32_t min_ref_div = pll->min_ref_div; | 920 | uint32_t min_ref_div = pll->min_ref_div; |
802 | uint32_t max_ref_div = pll->max_ref_div; | 921 | uint32_t max_ref_div = pll->max_ref_div; |
@@ -826,6 +945,9 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
826 | pll_out_max = pll->pll_out_max; | 945 | pll_out_max = pll->pll_out_max; |
827 | } | 946 | } |
828 | 947 | ||
948 | if (pll_out_min > 64800) | ||
949 | pll_out_min = 64800; | ||
950 | |||
829 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | 951 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
830 | min_ref_div = max_ref_div = pll->reference_div; | 952 | min_ref_div = max_ref_div = pll->reference_div; |
831 | else { | 953 | else { |
@@ -965,6 +1087,10 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
965 | *frac_fb_div_p = best_frac_feedback_div; | 1087 | *frac_fb_div_p = best_frac_feedback_div; |
966 | *ref_div_p = best_ref_div; | 1088 | *ref_div_p = best_ref_div; |
967 | *post_div_p = best_post_div; | 1089 | *post_div_p = best_post_div; |
1090 | DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", | ||
1091 | freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, | ||
1092 | best_ref_div, best_post_div); | ||
1093 | |||
968 | } | 1094 | } |
969 | 1095 | ||
970 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | 1096 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index d5680a0c87af..275b26a708d6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -48,7 +48,7 @@ | |||
48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen | 48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen |
49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) | 49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) |
50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs | 50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs |
51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK | 51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query |
52 | */ | 52 | */ |
53 | #define KMS_DRIVER_MAJOR 2 | 53 | #define KMS_DRIVER_MAJOR 2 |
54 | #define KMS_DRIVER_MINOR 8 | 54 | #define KMS_DRIVER_MINOR 8 |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 448eba89d1e6..5cba46b9779a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); | |||
1524 | #define R600_CP_RB_CNTL 0xc104 | 1524 | #define R600_CP_RB_CNTL 0xc104 |
1525 | # define R600_RB_BUFSZ(x) ((x) << 0) | 1525 | # define R600_RB_BUFSZ(x) ((x) << 0) |
1526 | # define R600_RB_BLKSZ(x) ((x) << 8) | 1526 | # define R600_RB_BLKSZ(x) ((x) << 8) |
1527 | # define R600_BUF_SWAP_32BIT (2 << 16) | ||
1527 | # define R600_RB_NO_UPDATE (1 << 27) | 1528 | # define R600_RB_NO_UPDATE (1 << 27) |
1528 | # define R600_RB_RPTR_WR_ENA (1 << 31) | 1529 | # define R600_RB_RPTR_WR_ENA (1 << 31) |
1529 | #define R600_CP_RB_RPTR_WR 0xc108 | 1530 | #define R600_CP_RB_RPTR_WR 0xc108 |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 8fd184286c0b..b4274883227f 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
641 | switch (connector->connector_type) { | 641 | switch (connector->connector_type) { |
642 | case DRM_MODE_CONNECTOR_DVII: | 642 | case DRM_MODE_CONNECTOR_DVII: |
643 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | 643 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
644 | if (drm_detect_monitor_audio(radeon_connector->edid)) { | 644 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
645 | /* fix me */ | 645 | /* fix me */ |
646 | if (ASIC_IS_DCE4(rdev)) | 646 | if (ASIC_IS_DCE4(rdev)) |
647 | return ATOM_ENCODER_MODE_DVI; | 647 | return ATOM_ENCODER_MODE_DVI; |
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
655 | case DRM_MODE_CONNECTOR_DVID: | 655 | case DRM_MODE_CONNECTOR_DVID: |
656 | case DRM_MODE_CONNECTOR_HDMIA: | 656 | case DRM_MODE_CONNECTOR_HDMIA: |
657 | default: | 657 | default: |
658 | if (drm_detect_monitor_audio(radeon_connector->edid)) { | 658 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
659 | /* fix me */ | 659 | /* fix me */ |
660 | if (ASIC_IS_DCE4(rdev)) | 660 | if (ASIC_IS_DCE4(rdev)) |
661 | return ATOM_ENCODER_MODE_DVI; | 661 | return ATOM_ENCODER_MODE_DVI; |
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
673 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 673 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
674 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 674 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
675 | return ATOM_ENCODER_MODE_DP; | 675 | return ATOM_ENCODER_MODE_DP; |
676 | else if (drm_detect_monitor_audio(radeon_connector->edid)) { | 676 | else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
677 | /* fix me */ | 677 | /* fix me */ |
678 | if (ASIC_IS_DCE4(rdev)) | 678 | if (ASIC_IS_DCE4(rdev)) |
679 | return ATOM_ENCODER_MODE_DVI; | 679 | return ATOM_ENCODER_MODE_DVI; |
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
910 | 910 | ||
911 | args.v1.ucAction = action; | 911 | args.v1.ucAction = action; |
912 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 912 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
913 | args.v1.usInitInfo = connector_object_id; | 913 | args.v1.usInitInfo = cpu_to_le16(connector_object_id); |
914 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | 914 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { |
915 | args.v1.asMode.ucLaneSel = lane_num; | 915 | args.v1.asMode.ucLaneSel = lane_num; |
916 | args.v1.asMode.ucLaneSet = lane_set; | 916 | args.v1.asMode.ucLaneSet = lane_set; |
@@ -1063,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action) | |||
1063 | if (!ASIC_IS_DCE4(rdev)) | 1063 | if (!ASIC_IS_DCE4(rdev)) |
1064 | return; | 1064 | return; |
1065 | 1065 | ||
1066 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || | 1066 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && |
1067 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | 1067 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) |
1068 | return; | 1068 | return; |
1069 | 1069 | ||
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, | |||
1140 | case 3: | 1140 | case 3: |
1141 | args.v3.sExtEncoder.ucAction = action; | 1141 | args.v3.sExtEncoder.ucAction = action; |
1142 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | 1142 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) |
1143 | args.v3.sExtEncoder.usConnectorId = connector_object_id; | 1143 | args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); |
1144 | else | 1144 | else |
1145 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 1145 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
1146 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | 1146 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); |
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, | |||
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | /* set scaler clears this on some chips */ | 1572 | /* set scaler clears this on some chips */ |
1573 | /* XXX check DCE4 */ | 1573 | if (ASIC_IS_AVIVO(rdev) && |
1574 | if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { | 1574 | (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { |
1575 | if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 1575 | if (ASIC_IS_DCE4(rdev)) { |
1576 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | 1576 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
1577 | AVIVO_D1MODE_INTERLEAVE_EN); | 1577 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, |
1578 | EVERGREEN_INTERLEAVE_EN); | ||
1579 | else | ||
1580 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1581 | } else { | ||
1582 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1583 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1584 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1585 | else | ||
1586 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1587 | } | ||
1578 | } | 1588 | } |
1579 | } | 1589 | } |
1580 | 1590 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 66324b5bb5ba..cc44bdfec80f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
113 | u32 tiling_flags = 0; | 113 | u32 tiling_flags = 0; |
114 | int ret; | 114 | int ret; |
115 | int aligned_size, size; | 115 | int aligned_size, size; |
116 | int height = mode_cmd->height; | ||
116 | 117 | ||
117 | /* need to align pitch with crtc limits */ | 118 | /* need to align pitch with crtc limits */ |
118 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); | 119 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); |
119 | 120 | ||
120 | size = mode_cmd->pitch * mode_cmd->height; | 121 | if (rdev->family >= CHIP_R600) |
122 | height = ALIGN(mode_cmd->height, 8); | ||
123 | size = mode_cmd->pitch * height; | ||
121 | aligned_size = ALIGN(size, PAGE_SIZE); | 124 | aligned_size = ALIGN(size, PAGE_SIZE); |
122 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 125 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
123 | RADEON_GEM_DOMAIN_VRAM, | 126 | RADEON_GEM_DOMAIN_VRAM, |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a289646e8aa4..9ec830c77af0 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -110,11 +110,14 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
110 | 110 | ||
111 | int radeon_irq_kms_init(struct radeon_device *rdev) | 111 | int radeon_irq_kms_init(struct radeon_device *rdev) |
112 | { | 112 | { |
113 | int i; | ||
113 | int r = 0; | 114 | int r = 0; |
114 | 115 | ||
115 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | 116 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); |
116 | 117 | ||
117 | spin_lock_init(&rdev->irq.sw_lock); | 118 | spin_lock_init(&rdev->irq.sw_lock); |
119 | for (i = 0; i < rdev->num_crtc; i++) | ||
120 | spin_lock_init(&rdev->irq.pflip_lock[i]); | ||
118 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); | 121 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
119 | if (r) { | 122 | if (r) { |
120 | return r; | 123 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 28a53e4a925f..8387d32caaa7 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -201,6 +201,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
201 | } | 201 | } |
202 | radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); | 202 | radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); |
203 | break; | 203 | break; |
204 | case RADEON_INFO_CLOCK_CRYSTAL_FREQ: | ||
205 | /* return clock value in KHz */ | ||
206 | value = rdev->clock.spll.reference_freq * 10; | ||
207 | break; | ||
204 | default: | 208 | default: |
205 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 209 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
206 | return -EINVAL; | 210 | return -EINVAL; |
@@ -243,6 +247,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev, | |||
243 | struct radeon_device *rdev = dev->dev_private; | 247 | struct radeon_device *rdev = dev->dev_private; |
244 | if (rdev->hyperz_filp == file_priv) | 248 | if (rdev->hyperz_filp == file_priv) |
245 | rdev->hyperz_filp = NULL; | 249 | rdev->hyperz_filp = NULL; |
250 | if (rdev->cmask_filp == file_priv) | ||
251 | rdev->cmask_filp = NULL; | ||
246 | } | 252 | } |
247 | 253 | ||
248 | /* | 254 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index ace2e6384d40..cf0638c3b7c7 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
778 | DRM_DEBUG_KMS("\n"); | 778 | DRM_DEBUG_KMS("\n"); |
779 | 779 | ||
780 | if (!use_bios_divs) { | 780 | if (!use_bios_divs) { |
781 | radeon_compute_pll(pll, mode->clock, | 781 | radeon_compute_pll_legacy(pll, mode->clock, |
782 | &freq, &feedback_div, &frac_fb_div, | 782 | &freq, &feedback_div, &frac_fb_div, |
783 | &reference_div, &post_divider); | 783 | &reference_div, &post_divider); |
784 | 784 | ||
785 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { | 785 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { |
786 | if (post_div->divider == post_divider) | 786 | if (post_div->divider == post_divider) |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 12bdeab91c86..a670caaee29e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -149,6 +149,7 @@ struct radeon_tmds_pll { | |||
149 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | 149 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
150 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | 150 | #define RADEON_PLL_USE_POST_DIV (1 << 12) |
151 | #define RADEON_PLL_IS_LCD (1 << 13) | 151 | #define RADEON_PLL_IS_LCD (1 << 13) |
152 | #define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14) | ||
152 | 153 | ||
153 | struct radeon_pll { | 154 | struct radeon_pll { |
154 | /* reference frequency */ | 155 | /* reference frequency */ |
@@ -208,6 +209,7 @@ enum radeon_connector_table { | |||
208 | CT_EMAC, | 209 | CT_EMAC, |
209 | CT_RN50_POWER, | 210 | CT_RN50_POWER, |
210 | CT_MAC_X800, | 211 | CT_MAC_X800, |
212 | CT_MAC_G5_9600, | ||
211 | }; | 213 | }; |
212 | 214 | ||
213 | enum radeon_dvo_chip { | 215 | enum radeon_dvo_chip { |
@@ -510,13 +512,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
510 | struct radeon_atom_ss *ss, | 512 | struct radeon_atom_ss *ss, |
511 | int id, u32 clock); | 513 | int id, u32 clock); |
512 | 514 | ||
513 | extern void radeon_compute_pll(struct radeon_pll *pll, | 515 | extern void radeon_compute_pll_legacy(struct radeon_pll *pll, |
514 | uint64_t freq, | 516 | uint64_t freq, |
515 | uint32_t *dot_clock_p, | 517 | uint32_t *dot_clock_p, |
516 | uint32_t *fb_div_p, | 518 | uint32_t *fb_div_p, |
517 | uint32_t *frac_fb_div_p, | 519 | uint32_t *frac_fb_div_p, |
518 | uint32_t *ref_div_p, | 520 | uint32_t *ref_div_p, |
519 | uint32_t *post_div_p); | 521 | uint32_t *post_div_p); |
522 | |||
523 | extern void radeon_compute_pll_avivo(struct radeon_pll *pll, | ||
524 | u32 freq, | ||
525 | u32 *dot_clock_p, | ||
526 | u32 *fb_div_p, | ||
527 | u32 *frac_fb_div_p, | ||
528 | u32 *ref_div_p, | ||
529 | u32 *post_div_p); | ||
520 | 530 | ||
521 | extern void radeon_setup_encoder_clones(struct drm_device *dev); | 531 | extern void radeon_setup_encoder_clones(struct drm_device *dev); |
522 | 532 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 3b1b2bf9cdd5..2aed03bde4b2 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -430,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
430 | { | 430 | { |
431 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 431 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); |
432 | struct radeon_device *rdev = ddev->dev_private; | 432 | struct radeon_device *rdev = ddev->dev_private; |
433 | u32 temp; | 433 | int temp; |
434 | 434 | ||
435 | switch (rdev->pm.int_thermal_type) { | 435 | switch (rdev->pm.int_thermal_type) { |
436 | case THERMAL_TYPE_RV6XX: | 436 | case THERMAL_TYPE_RV6XX: |
@@ -646,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
646 | #endif | 646 | #endif |
647 | } | 647 | } |
648 | 648 | ||
649 | if (rdev->pm.power_state) | ||
650 | kfree(rdev->pm.power_state); | ||
651 | |||
649 | radeon_hwmon_fini(rdev); | 652 | radeon_hwmon_fini(rdev); |
650 | } | 653 | } |
651 | 654 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 3cd4dace57c7..ec93a75369e6 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -375,6 +375,8 @@ | |||
375 | #define RADEON_CONFIG_APER_SIZE 0x0108 | 375 | #define RADEON_CONFIG_APER_SIZE 0x0108 |
376 | #define RADEON_CONFIG_BONDS 0x00e8 | 376 | #define RADEON_CONFIG_BONDS 0x00e8 |
377 | #define RADEON_CONFIG_CNTL 0x00e0 | 377 | #define RADEON_CONFIG_CNTL 0x00e0 |
378 | # define RADEON_CFG_VGA_RAM_EN (1 << 8) | ||
379 | # define RADEON_CFG_VGA_IO_DIS (1 << 9) | ||
378 | # define RADEON_CFG_ATI_REV_A11 (0 << 16) | 380 | # define RADEON_CFG_ATI_REV_A11 (0 << 16) |
379 | # define RADEON_CFG_ATI_REV_A12 (1 << 16) | 381 | # define RADEON_CFG_ATI_REV_A12 (1 << 16) |
380 | # define RADEON_CFG_ATI_REV_A13 (2 << 16) | 382 | # define RADEON_CFG_ATI_REV_A13 (2 << 16) |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1272e4b6a1d4..e5b2cf10cbf4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -787,9 +787,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |||
787 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | 787 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; |
788 | radeon_mem_types_list[i].driver_features = 0; | 788 | radeon_mem_types_list[i].driver_features = 0; |
789 | if (i == 0) | 789 | if (i == 0) |
790 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; | 790 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; |
791 | else | 791 | else |
792 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; | 792 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; |
793 | 793 | ||
794 | } | 794 | } |
795 | /* Add ttm page pool to debugfs */ | 795 | /* Add ttm page pool to debugfs */ |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index b506ec1cab4b..e8a1786b6426 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 | |||
@@ -683,9 +683,7 @@ r300 0x4f60 | |||
683 | 0x4DF4 US_ALU_CONST_G_31 | 683 | 0x4DF4 US_ALU_CONST_G_31 |
684 | 0x4DF8 US_ALU_CONST_B_31 | 684 | 0x4DF8 US_ALU_CONST_B_31 |
685 | 0x4DFC US_ALU_CONST_A_31 | 685 | 0x4DFC US_ALU_CONST_A_31 |
686 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
687 | 0x4E08 RB3D_ABLENDCNTL_R3 | 686 | 0x4E08 RB3D_ABLENDCNTL_R3 |
688 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
689 | 0x4E10 RB3D_CONSTANT_COLOR | 687 | 0x4E10 RB3D_CONSTANT_COLOR |
690 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 688 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
691 | 0x4E18 RB3D_ROPCNTL_R3 | 689 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -706,13 +704,11 @@ r300 0x4f60 | |||
706 | 0x4E74 RB3D_CMASK_WRINDEX | 704 | 0x4E74 RB3D_CMASK_WRINDEX |
707 | 0x4E78 RB3D_CMASK_DWORD | 705 | 0x4E78 RB3D_CMASK_DWORD |
708 | 0x4E7C RB3D_CMASK_RDINDEX | 706 | 0x4E7C RB3D_CMASK_RDINDEX |
709 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
710 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
711 | 0x4E88 RB3D_AARESOLVE_CTL | ||
712 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 707 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
713 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 708 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
714 | 0x4F04 ZB_ZSTENCILCNTL | 709 | 0x4F04 ZB_ZSTENCILCNTL |
715 | 0x4F08 ZB_STENCILREFMASK | 710 | 0x4F08 ZB_STENCILREFMASK |
716 | 0x4F14 ZB_ZTOP | 711 | 0x4F14 ZB_ZTOP |
717 | 0x4F18 ZB_ZCACHE_CTLSTAT | 712 | 0x4F18 ZB_ZCACHE_CTLSTAT |
713 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
718 | 0x4F58 ZB_ZPASS_DATA | 714 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 8c1214c2390f..722074e21e2f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 | |||
@@ -130,7 +130,6 @@ r420 0x4f60 | |||
130 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
131 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
132 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
133 | 0x4028 GB_Z_PEQ_CONFIG | ||
134 | 0x4100 TX_INVALTAGS | 133 | 0x4100 TX_INVALTAGS |
135 | 0x4200 GA_POINT_S0 | 134 | 0x4200 GA_POINT_S0 |
136 | 0x4204 GA_POINT_T0 | 135 | 0x4204 GA_POINT_T0 |
@@ -750,9 +749,7 @@ r420 0x4f60 | |||
750 | 0x4DF4 US_ALU_CONST_G_31 | 749 | 0x4DF4 US_ALU_CONST_G_31 |
751 | 0x4DF8 US_ALU_CONST_B_31 | 750 | 0x4DF8 US_ALU_CONST_B_31 |
752 | 0x4DFC US_ALU_CONST_A_31 | 751 | 0x4DFC US_ALU_CONST_A_31 |
753 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
754 | 0x4E08 RB3D_ABLENDCNTL_R3 | 752 | 0x4E08 RB3D_ABLENDCNTL_R3 |
755 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
756 | 0x4E10 RB3D_CONSTANT_COLOR | 753 | 0x4E10 RB3D_CONSTANT_COLOR |
757 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 754 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
758 | 0x4E18 RB3D_ROPCNTL_R3 | 755 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -773,13 +770,11 @@ r420 0x4f60 | |||
773 | 0x4E74 RB3D_CMASK_WRINDEX | 770 | 0x4E74 RB3D_CMASK_WRINDEX |
774 | 0x4E78 RB3D_CMASK_DWORD | 771 | 0x4E78 RB3D_CMASK_DWORD |
775 | 0x4E7C RB3D_CMASK_RDINDEX | 772 | 0x4E7C RB3D_CMASK_RDINDEX |
776 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
777 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
778 | 0x4E88 RB3D_AARESOLVE_CTL | ||
779 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 773 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
780 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 774 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
781 | 0x4F04 ZB_ZSTENCILCNTL | 775 | 0x4F04 ZB_ZSTENCILCNTL |
782 | 0x4F08 ZB_STENCILREFMASK | 776 | 0x4F08 ZB_STENCILREFMASK |
783 | 0x4F14 ZB_ZTOP | 777 | 0x4F14 ZB_ZTOP |
784 | 0x4F18 ZB_ZCACHE_CTLSTAT | 778 | 0x4F18 ZB_ZCACHE_CTLSTAT |
779 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
785 | 0x4F58 ZB_ZPASS_DATA | 780 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 0828d80396f2..d9f62866bbc1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 | |||
@@ -749,9 +749,7 @@ rs600 0x6d40 | |||
749 | 0x4DF4 US_ALU_CONST_G_31 | 749 | 0x4DF4 US_ALU_CONST_G_31 |
750 | 0x4DF8 US_ALU_CONST_B_31 | 750 | 0x4DF8 US_ALU_CONST_B_31 |
751 | 0x4DFC US_ALU_CONST_A_31 | 751 | 0x4DFC US_ALU_CONST_A_31 |
752 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
753 | 0x4E08 RB3D_ABLENDCNTL_R3 | 752 | 0x4E08 RB3D_ABLENDCNTL_R3 |
754 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
755 | 0x4E10 RB3D_CONSTANT_COLOR | 753 | 0x4E10 RB3D_CONSTANT_COLOR |
756 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 754 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
757 | 0x4E18 RB3D_ROPCNTL_R3 | 755 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -772,13 +770,11 @@ rs600 0x6d40 | |||
772 | 0x4E74 RB3D_CMASK_WRINDEX | 770 | 0x4E74 RB3D_CMASK_WRINDEX |
773 | 0x4E78 RB3D_CMASK_DWORD | 771 | 0x4E78 RB3D_CMASK_DWORD |
774 | 0x4E7C RB3D_CMASK_RDINDEX | 772 | 0x4E7C RB3D_CMASK_RDINDEX |
775 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
776 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
777 | 0x4E88 RB3D_AARESOLVE_CTL | ||
778 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 773 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
779 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 774 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
780 | 0x4F04 ZB_ZSTENCILCNTL | 775 | 0x4F04 ZB_ZSTENCILCNTL |
781 | 0x4F08 ZB_STENCILREFMASK | 776 | 0x4F08 ZB_STENCILREFMASK |
782 | 0x4F14 ZB_ZTOP | 777 | 0x4F14 ZB_ZTOP |
783 | 0x4F18 ZB_ZCACHE_CTLSTAT | 778 | 0x4F18 ZB_ZCACHE_CTLSTAT |
779 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
784 | 0x4F58 ZB_ZPASS_DATA | 780 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index ef422bbacfc1..911a8fbd32bb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
@@ -164,7 +164,6 @@ rv515 0x6d40 | |||
164 | 0x401C GB_SELECT | 164 | 0x401C GB_SELECT |
165 | 0x4020 GB_AA_CONFIG | 165 | 0x4020 GB_AA_CONFIG |
166 | 0x4024 GB_FIFO_SIZE | 166 | 0x4024 GB_FIFO_SIZE |
167 | 0x4028 GB_Z_PEQ_CONFIG | ||
168 | 0x4100 TX_INVALTAGS | 167 | 0x4100 TX_INVALTAGS |
169 | 0x4114 SU_TEX_WRAP_PS3 | 168 | 0x4114 SU_TEX_WRAP_PS3 |
170 | 0x4118 PS3_ENABLE | 169 | 0x4118 PS3_ENABLE |
@@ -461,9 +460,7 @@ rv515 0x6d40 | |||
461 | 0x4DF4 US_ALU_CONST_G_31 | 460 | 0x4DF4 US_ALU_CONST_G_31 |
462 | 0x4DF8 US_ALU_CONST_B_31 | 461 | 0x4DF8 US_ALU_CONST_B_31 |
463 | 0x4DFC US_ALU_CONST_A_31 | 462 | 0x4DFC US_ALU_CONST_A_31 |
464 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
465 | 0x4E08 RB3D_ABLENDCNTL_R3 | 463 | 0x4E08 RB3D_ABLENDCNTL_R3 |
466 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
467 | 0x4E10 RB3D_CONSTANT_COLOR | 464 | 0x4E10 RB3D_CONSTANT_COLOR |
468 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 465 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
469 | 0x4E18 RB3D_ROPCNTL_R3 | 466 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -484,9 +481,6 @@ rv515 0x6d40 | |||
484 | 0x4E74 RB3D_CMASK_WRINDEX | 481 | 0x4E74 RB3D_CMASK_WRINDEX |
485 | 0x4E78 RB3D_CMASK_DWORD | 482 | 0x4E78 RB3D_CMASK_DWORD |
486 | 0x4E7C RB3D_CMASK_RDINDEX | 483 | 0x4E7C RB3D_CMASK_RDINDEX |
487 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
488 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
489 | 0x4E88 RB3D_AARESOLVE_CTL | ||
490 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 484 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
491 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 485 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
492 | 0x4EF8 RB3D_CONSTANT_COLOR_AR | 486 | 0x4EF8 RB3D_CONSTANT_COLOR_AR |
@@ -496,4 +490,5 @@ rv515 0x6d40 | |||
496 | 0x4F14 ZB_ZTOP | 490 | 0x4F14 ZB_ZTOP |
497 | 0x4F18 ZB_ZCACHE_CTLSTAT | 491 | 0x4F18 ZB_ZCACHE_CTLSTAT |
498 | 0x4F58 ZB_ZPASS_DATA | 492 | 0x4F58 ZB_ZPASS_DATA |
493 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
499 | 0x4FD4 ZB_STENCILREFMASK_BF | 494 | 0x4FD4 ZB_STENCILREFMASK_BF |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 5512e4e5e636..c76283d9eb3d 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -203,6 +203,9 @@ void rs400_gart_fini(struct radeon_device *rdev) | |||
203 | radeon_gart_table_ram_free(rdev); | 203 | radeon_gart_table_ram_free(rdev); |
204 | } | 204 | } |
205 | 205 | ||
206 | #define RS400_PTE_WRITEABLE (1 << 2) | ||
207 | #define RS400_PTE_READABLE (1 << 3) | ||
208 | |||
206 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 209 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
207 | { | 210 | { |
208 | uint32_t entry; | 211 | uint32_t entry; |
@@ -213,7 +216,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
213 | 216 | ||
214 | entry = (lower_32_bits(addr) & PAGE_MASK) | | 217 | entry = (lower_32_bits(addr) & PAGE_MASK) | |
215 | ((upper_32_bits(addr) & 0xff) << 4) | | 218 | ((upper_32_bits(addr) & 0xff) << 4) | |
216 | 0xc; | 219 | RS400_PTE_WRITEABLE | RS400_PTE_READABLE; |
217 | entry = cpu_to_le32(entry); | 220 | entry = cpu_to_le32(entry); |
218 | rdev->gart.table.ram.ptr[i] = entry; | 221 | rdev->gart.table.ram.ptr[i] = entry; |
219 | return 0; | 222 | return 0; |
@@ -226,8 +229,8 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) | |||
226 | 229 | ||
227 | for (i = 0; i < rdev->usec_timeout; i++) { | 230 | for (i = 0; i < rdev->usec_timeout; i++) { |
228 | /* read MC_STATUS */ | 231 | /* read MC_STATUS */ |
229 | tmp = RREG32(0x0150); | 232 | tmp = RREG32(RADEON_MC_STATUS); |
230 | if (tmp & (1 << 2)) { | 233 | if (tmp & RADEON_MC_IDLE) { |
231 | return 0; | 234 | return 0; |
232 | } | 235 | } |
233 | DRM_UDELAY(1); | 236 | DRM_UDELAY(1); |
@@ -241,7 +244,7 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
241 | r420_pipes_init(rdev); | 244 | r420_pipes_init(rdev); |
242 | if (rs400_mc_wait_for_idle(rdev)) { | 245 | if (rs400_mc_wait_for_idle(rdev)) { |
243 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " | 246 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " |
244 | "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); | 247 | "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); |
245 | } | 248 | } |
246 | } | 249 | } |
247 | 250 | ||
@@ -300,9 +303,9 @@ static int rs400_debugfs_gart_info(struct seq_file *m, void *data) | |||
300 | seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); | 303 | seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); |
301 | tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); | 304 | tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); |
302 | seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); | 305 | seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); |
303 | tmp = RREG32_MC(0x100); | 306 | tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION); |
304 | seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); | 307 | seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); |
305 | tmp = RREG32(0x134); | 308 | tmp = RREG32(RS690_HDP_FB_LOCATION); |
306 | seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); | 309 | seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); |
307 | } else { | 310 | } else { |
308 | tmp = RREG32(RADEON_AGP_BASE); | 311 | tmp = RREG32(RADEON_AGP_BASE); |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 0137d3e3728d..6638c8e4c81b 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
77 | switch (crev) { | 77 | switch (crev) { |
78 | case 1: | 78 | case 1: |
79 | tmp.full = dfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); |
81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
82 | if (info->info.usK8MemoryClock) | 82 | if (le16_to_cpu(info->info.usK8MemoryClock)) |
83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
84 | else if (rdev->clock.default_mclk) { | 84 | else if (rdev->clock.default_mclk) { |
85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
91 | break; | 91 | break; |
92 | case 2: | 92 | case 2: |
93 | tmp.full = dfixed_const(100); | 93 | tmp.full = dfixed_const(100); |
94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); | 94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); |
95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
96 | if (info->info_v2.ulBootUpUMAClock) | 96 | if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) |
97 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | 97 | rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); |
98 | else if (rdev->clock.default_mclk) | 98 | else if (rdev->clock.default_mclk) |
99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
100 | else | 100 | else |
101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); | 101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); |
102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); | 103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); |
104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
106 | break; | 106 | break; |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 5d569f41f4ae..64b57af93714 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -69,13 +69,13 @@ void rv515_ring_start(struct radeon_device *rdev) | |||
69 | ISYNC_CPSCRATCH_IDLEGUI); | 69 | ISYNC_CPSCRATCH_IDLEGUI); |
70 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); | 70 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
71 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | 71 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
72 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); | 72 | radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
73 | radeon_ring_write(rdev, 1 << 31); | 73 | radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); |
74 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); | 74 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); |
75 | radeon_ring_write(rdev, 0); | 75 | radeon_ring_write(rdev, 0); |
76 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); | 76 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); |
77 | radeon_ring_write(rdev, 0); | 77 | radeon_ring_write(rdev, 0); |
78 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); | 78 | radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0)); |
79 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); | 79 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); |
80 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); | 80 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); |
81 | radeon_ring_write(rdev, 0); | 81 | radeon_ring_write(rdev, 0); |
@@ -153,8 +153,8 @@ void rv515_gpu_init(struct radeon_device *rdev) | |||
153 | } | 153 | } |
154 | rv515_vga_render_disable(rdev); | 154 | rv515_vga_render_disable(rdev); |
155 | r420_pipes_init(rdev); | 155 | r420_pipes_init(rdev); |
156 | gb_pipe_select = RREG32(0x402C); | 156 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
157 | tmp = RREG32(0x170C); | 157 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
158 | pipe_select_current = (tmp >> 2) & 3; | 158 | pipe_select_current = (tmp >> 2) & 3; |
159 | tmp = (1 << pipe_select_current) | | 159 | tmp = (1 << pipe_select_current) | |
160 | (((gb_pipe_select >> 8) & 0xF) << 4); | 160 | (((gb_pipe_select >> 8) & 0xF) << 4); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 491dc9000655..d8ba67690656 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -78,18 +78,23 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /* get temperature in millidegrees */ | 80 | /* get temperature in millidegrees */ |
81 | u32 rv770_get_temp(struct radeon_device *rdev) | 81 | int rv770_get_temp(struct radeon_device *rdev) |
82 | { | 82 | { |
83 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | 83 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> |
84 | ASIC_T_SHIFT; | 84 | ASIC_T_SHIFT; |
85 | u32 actual_temp = 0; | 85 | int actual_temp; |
86 | 86 | ||
87 | if ((temp >> 9) & 1) | 87 | if (temp & 0x400) |
88 | actual_temp = 0; | 88 | actual_temp = -256; |
89 | else | 89 | else if (temp & 0x200) |
90 | actual_temp = (temp >> 1) & 0xff; | 90 | actual_temp = 255; |
91 | 91 | else if (temp & 0x100) { | |
92 | return actual_temp * 1000; | 92 | actual_temp = temp & 0x1ff; |
93 | actual_temp |= ~0x1ff; | ||
94 | } else | ||
95 | actual_temp = temp & 0xff; | ||
96 | |||
97 | return (actual_temp * 1000) / 2; | ||
93 | } | 98 | } |
94 | 99 | ||
95 | void rv770_pm_misc(struct radeon_device *rdev) | 100 | void rv770_pm_misc(struct radeon_device *rdev) |
@@ -316,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) | |||
316 | return -EINVAL; | 321 | return -EINVAL; |
317 | 322 | ||
318 | r700_cp_stop(rdev); | 323 | r700_cp_stop(rdev); |
319 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | 324 | WREG32(CP_RB_CNTL, |
325 | #ifdef __BIG_ENDIAN | ||
326 | BUF_SWAP_32BIT | | ||
327 | #endif | ||
328 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
320 | 329 | ||
321 | /* Reset cp */ | 330 | /* Reset cp */ |
322 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | 331 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index abc8cf5a3672..79fa588e9ed5 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -76,10 +76,10 @@ | |||
76 | #define ROQ_IB1_START(x) ((x) << 0) | 76 | #define ROQ_IB1_START(x) ((x) << 0) |
77 | #define ROQ_IB2_START(x) ((x) << 8) | 77 | #define ROQ_IB2_START(x) ((x) << 8) |
78 | #define CP_RB_CNTL 0xC104 | 78 | #define CP_RB_CNTL 0xC104 |
79 | #define RB_BUFSZ(x) ((x)<<0) | 79 | #define RB_BUFSZ(x) ((x) << 0) |
80 | #define RB_BLKSZ(x) ((x)<<8) | 80 | #define RB_BLKSZ(x) ((x) << 8) |
81 | #define RB_NO_UPDATE (1<<27) | 81 | #define RB_NO_UPDATE (1 << 27) |
82 | #define RB_RPTR_WR_ENA (1<<31) | 82 | #define RB_RPTR_WR_ENA (1 << 31) |
83 | #define BUF_SWAP_32BIT (2 << 16) | 83 | #define BUF_SWAP_32BIT (2 << 16) |
84 | #define CP_RB_RPTR 0x8700 | 84 | #define CP_RB_RPTR 0x8700 |
85 | #define CP_RB_RPTR_ADDR 0xC10C | 85 | #define CP_RB_RPTR_ADDR 0xC10C |