diff options
Diffstat (limited to 'drivers/gpu')
77 files changed, 5206 insertions, 2378 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8e7b0ebece0c..5cae0b3eee9b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1556,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, | |||
1556 | struct drm_crtc *crtc; | 1556 | struct drm_crtc *crtc; |
1557 | int ret = 0; | 1557 | int ret = 0; |
1558 | 1558 | ||
1559 | DRM_DEBUG_KMS("\n"); | ||
1560 | |||
1561 | if (!req->flags) { | 1559 | if (!req->flags) { |
1562 | DRM_ERROR("no operation set\n"); | 1560 | DRM_ERROR("no operation set\n"); |
1563 | return -EINVAL; | 1561 | return -EINVAL; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 1fe4e1d344fd..bbfd110a7168 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -331,6 +331,7 @@ create_mode: | |||
331 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | 331 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, |
332 | cmdline_mode->rb, cmdline_mode->interlace, | 332 | cmdline_mode->rb, cmdline_mode->interlace, |
333 | cmdline_mode->margins); | 333 | cmdline_mode->margins); |
334 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
334 | list_add(&mode->head, &connector->modes); | 335 | list_add(&mode->head, &connector->modes); |
335 | return mode; | 336 | return mode; |
336 | } | 337 | } |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3c0d2b3aed76..cea665d86dd3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -626,6 +626,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
626 | return NULL; | 626 | return NULL; |
627 | } | 627 | } |
628 | 628 | ||
629 | /* it is incorrect if hsync/vsync width is zero */ | ||
630 | if (!hsync_pulse_width || !vsync_pulse_width) { | ||
631 | DRM_DEBUG_KMS("Incorrect Detailed timing. " | ||
632 | "Wrong Hsync/Vsync pulse width\n"); | ||
633 | return NULL; | ||
634 | } | ||
629 | mode = drm_mode_create(dev); | 635 | mode = drm_mode_create(dev); |
630 | if (!mode) | 636 | if (!mode) |
631 | return NULL; | 637 | return NULL; |
@@ -647,6 +653,15 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
647 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; | 653 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; |
648 | mode->vtotal = mode->vdisplay + vblank; | 654 | mode->vtotal = mode->vdisplay + vblank; |
649 | 655 | ||
656 | /* perform the basic check for the detailed timing */ | ||
657 | if (mode->hsync_end > mode->htotal || | ||
658 | mode->vsync_end > mode->vtotal) { | ||
659 | drm_mode_destroy(dev, mode); | ||
660 | DRM_DEBUG_KMS("Incorrect detailed timing. " | ||
661 | "Sync is beyond the blank.\n"); | ||
662 | return NULL; | ||
663 | } | ||
664 | |||
650 | drm_mode_set_name(mode); | 665 | drm_mode_set_name(mode); |
651 | 666 | ||
652 | if (pt->misc & DRM_EDID_PT_INTERLACED) | 667 | if (pt->misc & DRM_EDID_PT_INTERLACED) |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 819ddcbfcce5..dc8e374a0b55 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -454,6 +454,109 @@ out_free: | |||
454 | } | 454 | } |
455 | EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); | 455 | EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); |
456 | 456 | ||
457 | static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, | ||
458 | u16 blue, u16 regno, struct fb_info *info) | ||
459 | { | ||
460 | struct drm_fb_helper *fb_helper = info->par; | ||
461 | struct drm_framebuffer *fb = fb_helper->fb; | ||
462 | int pindex; | ||
463 | |||
464 | if (info->fix.visual == FB_VISUAL_TRUECOLOR) { | ||
465 | u32 *palette; | ||
466 | u32 value; | ||
467 | /* place color in psuedopalette */ | ||
468 | if (regno > 16) | ||
469 | return -EINVAL; | ||
470 | palette = (u32 *)info->pseudo_palette; | ||
471 | red >>= (16 - info->var.red.length); | ||
472 | green >>= (16 - info->var.green.length); | ||
473 | blue >>= (16 - info->var.blue.length); | ||
474 | value = (red << info->var.red.offset) | | ||
475 | (green << info->var.green.offset) | | ||
476 | (blue << info->var.blue.offset); | ||
477 | palette[regno] = value; | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | pindex = regno; | ||
482 | |||
483 | if (fb->bits_per_pixel == 16) { | ||
484 | pindex = regno << 3; | ||
485 | |||
486 | if (fb->depth == 16 && regno > 63) | ||
487 | return -EINVAL; | ||
488 | if (fb->depth == 15 && regno > 31) | ||
489 | return -EINVAL; | ||
490 | |||
491 | if (fb->depth == 16) { | ||
492 | u16 r, g, b; | ||
493 | int i; | ||
494 | if (regno < 32) { | ||
495 | for (i = 0; i < 8; i++) | ||
496 | fb_helper->funcs->gamma_set(crtc, red, | ||
497 | green, blue, pindex + i); | ||
498 | } | ||
499 | |||
500 | fb_helper->funcs->gamma_get(crtc, &r, | ||
501 | &g, &b, | ||
502 | pindex >> 1); | ||
503 | |||
504 | for (i = 0; i < 4; i++) | ||
505 | fb_helper->funcs->gamma_set(crtc, r, | ||
506 | green, b, | ||
507 | (pindex >> 1) + i); | ||
508 | } | ||
509 | } | ||
510 | |||
511 | if (fb->depth != 16) | ||
512 | fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | ||
517 | { | ||
518 | struct drm_fb_helper *fb_helper = info->par; | ||
519 | struct drm_device *dev = fb_helper->dev; | ||
520 | u16 *red, *green, *blue, *transp; | ||
521 | struct drm_crtc *crtc; | ||
522 | int i, rc = 0; | ||
523 | int start; | ||
524 | |||
525 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
526 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
527 | for (i = 0; i < fb_helper->crtc_count; i++) { | ||
528 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) | ||
529 | break; | ||
530 | } | ||
531 | if (i == fb_helper->crtc_count) | ||
532 | continue; | ||
533 | |||
534 | red = cmap->red; | ||
535 | green = cmap->green; | ||
536 | blue = cmap->blue; | ||
537 | transp = cmap->transp; | ||
538 | start = cmap->start; | ||
539 | |||
540 | for (i = 0; i < cmap->len; i++) { | ||
541 | u16 hred, hgreen, hblue, htransp = 0xffff; | ||
542 | |||
543 | hred = *red++; | ||
544 | hgreen = *green++; | ||
545 | hblue = *blue++; | ||
546 | |||
547 | if (transp) | ||
548 | htransp = *transp++; | ||
549 | |||
550 | rc = setcolreg(crtc, hred, hgreen, hblue, start++, info); | ||
551 | if (rc) | ||
552 | return rc; | ||
553 | } | ||
554 | crtc_funcs->load_lut(crtc); | ||
555 | } | ||
556 | return rc; | ||
557 | } | ||
558 | EXPORT_SYMBOL(drm_fb_helper_setcmap); | ||
559 | |||
457 | int drm_fb_helper_setcolreg(unsigned regno, | 560 | int drm_fb_helper_setcolreg(unsigned regno, |
458 | unsigned red, | 561 | unsigned red, |
459 | unsigned green, | 562 | unsigned green, |
@@ -465,10 +568,13 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
465 | struct drm_device *dev = fb_helper->dev; | 568 | struct drm_device *dev = fb_helper->dev; |
466 | struct drm_crtc *crtc; | 569 | struct drm_crtc *crtc; |
467 | int i; | 570 | int i; |
571 | int ret; | ||
468 | 572 | ||
469 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 573 | if (regno > 255) |
470 | struct drm_framebuffer *fb = fb_helper->fb; | 574 | return 1; |
471 | 575 | ||
576 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
577 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
472 | for (i = 0; i < fb_helper->crtc_count; i++) { | 578 | for (i = 0; i < fb_helper->crtc_count; i++) { |
473 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) | 579 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) |
474 | break; | 580 | break; |
@@ -476,35 +582,11 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
476 | if (i == fb_helper->crtc_count) | 582 | if (i == fb_helper->crtc_count) |
477 | continue; | 583 | continue; |
478 | 584 | ||
479 | if (regno > 255) | 585 | ret = setcolreg(crtc, red, green, blue, regno, info); |
480 | return 1; | 586 | if (ret) |
481 | 587 | return ret; | |
482 | if (fb->depth == 8) { | ||
483 | fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); | ||
484 | return 0; | ||
485 | } | ||
486 | 588 | ||
487 | if (regno < 16) { | 589 | crtc_funcs->load_lut(crtc); |
488 | switch (fb->depth) { | ||
489 | case 15: | ||
490 | fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | | ||
491 | ((green & 0xf800) >> 6) | | ||
492 | ((blue & 0xf800) >> 11); | ||
493 | break; | ||
494 | case 16: | ||
495 | fb->pseudo_palette[regno] = (red & 0xf800) | | ||
496 | ((green & 0xfc00) >> 5) | | ||
497 | ((blue & 0xf800) >> 11); | ||
498 | break; | ||
499 | case 24: | ||
500 | case 32: | ||
501 | fb->pseudo_palette[regno] = | ||
502 | (((red >> 8) & 0xff) << info->var.red.offset) | | ||
503 | (((green >> 8) & 0xff) << info->var.green.offset) | | ||
504 | (((blue >> 8) & 0xff) << info->var.blue.offset); | ||
505 | break; | ||
506 | } | ||
507 | } | ||
508 | } | 590 | } |
509 | return 0; | 591 | return 0; |
510 | } | 592 | } |
@@ -625,7 +707,7 @@ int drm_fb_helper_set_par(struct fb_info *info) | |||
625 | 707 | ||
626 | if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { | 708 | if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { |
627 | mutex_lock(&dev->mode_config.mutex); | 709 | mutex_lock(&dev->mode_config.mutex); |
628 | ret = crtc->funcs->set_config(&fb_helper->crtc_info->mode_set); | 710 | ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); |
629 | mutex_unlock(&dev->mode_config.mutex); | 711 | mutex_unlock(&dev->mode_config.mutex); |
630 | if (ret) | 712 | if (ret) |
631 | return ret; | 713 | return ret; |
@@ -674,6 +756,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, | |||
674 | EXPORT_SYMBOL(drm_fb_helper_pan_display); | 756 | EXPORT_SYMBOL(drm_fb_helper_pan_display); |
675 | 757 | ||
676 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, | 758 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, |
759 | int preferred_bpp, | ||
677 | int (*fb_create)(struct drm_device *dev, | 760 | int (*fb_create)(struct drm_device *dev, |
678 | uint32_t fb_width, | 761 | uint32_t fb_width, |
679 | uint32_t fb_height, | 762 | uint32_t fb_height, |
@@ -696,6 +779,11 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
696 | struct drm_fb_helper *fb_helper; | 779 | struct drm_fb_helper *fb_helper; |
697 | uint32_t surface_depth = 24, surface_bpp = 32; | 780 | uint32_t surface_depth = 24, surface_bpp = 32; |
698 | 781 | ||
782 | /* if driver picks 8 or 16 by default use that | ||
783 | for both depth/bpp */ | ||
784 | if (preferred_bpp != surface_bpp) { | ||
785 | surface_depth = surface_bpp = preferred_bpp; | ||
786 | } | ||
699 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ | 787 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ |
700 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 788 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
701 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; | 789 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; |
@@ -851,10 +939,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) | |||
851 | } | 939 | } |
852 | EXPORT_SYMBOL(drm_fb_helper_free); | 940 | EXPORT_SYMBOL(drm_fb_helper_free); |
853 | 941 | ||
854 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) | 942 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
943 | uint32_t depth) | ||
855 | { | 944 | { |
856 | info->fix.type = FB_TYPE_PACKED_PIXELS; | 945 | info->fix.type = FB_TYPE_PACKED_PIXELS; |
857 | info->fix.visual = FB_VISUAL_TRUECOLOR; | 946 | info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : |
947 | FB_VISUAL_TRUECOLOR; | ||
858 | info->fix.type_aux = 0; | 948 | info->fix.type_aux = 0; |
859 | info->fix.xpanstep = 1; /* doing it in hw */ | 949 | info->fix.xpanstep = 1; /* doing it in hw */ |
860 | info->fix.ypanstep = 1; /* doing it in hw */ | 950 | info->fix.ypanstep = 1; /* doing it in hw */ |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 45d507ebd3ff..e5b138be45fa 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1227,8 +1227,7 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1227 | goto out; | 1227 | goto out; |
1228 | 1228 | ||
1229 | /* Try to set up FBC with a reasonable compressed buffer size */ | 1229 | /* Try to set up FBC with a reasonable compressed buffer size */ |
1230 | if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) && | 1230 | if (I915_HAS_FBC(dev) && i915_powersave) { |
1231 | i915_powersave) { | ||
1232 | int cfb_size; | 1231 | int cfb_size; |
1233 | 1232 | ||
1234 | /* Try to get an 8M buffer... */ | 1233 | /* Try to get an 8M buffer... */ |
@@ -1468,6 +1467,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1468 | spin_lock_init(&dev_priv->user_irq_lock); | 1467 | spin_lock_init(&dev_priv->user_irq_lock); |
1469 | spin_lock_init(&dev_priv->error_lock); | 1468 | spin_lock_init(&dev_priv->error_lock); |
1470 | dev_priv->user_irq_refcount = 0; | 1469 | dev_priv->user_irq_refcount = 0; |
1470 | dev_priv->trace_irq_seqno = 0; | ||
1471 | 1471 | ||
1472 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 1472 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
1473 | 1473 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b93814c0d3e2..7f436ec075f6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -89,7 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
89 | pci_set_power_state(dev->pdev, PCI_D3hot); | 89 | pci_set_power_state(dev->pdev, PCI_D3hot); |
90 | } | 90 | } |
91 | 91 | ||
92 | dev_priv->suspended = 1; | 92 | /* Modeset on resume, not lid events */ |
93 | dev_priv->modeset_on_lid = 0; | ||
93 | 94 | ||
94 | return 0; | 95 | return 0; |
95 | } | 96 | } |
@@ -124,7 +125,7 @@ static int i915_resume(struct drm_device *dev) | |||
124 | drm_helper_resume_force_mode(dev); | 125 | drm_helper_resume_force_mode(dev); |
125 | } | 126 | } |
126 | 127 | ||
127 | dev_priv->suspended = 0; | 128 | dev_priv->modeset_on_lid = 0; |
128 | 129 | ||
129 | return ret; | 130 | return ret; |
130 | } | 131 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b24b2d145b75..57204e298975 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -202,6 +202,7 @@ typedef struct drm_i915_private { | |||
202 | spinlock_t user_irq_lock; | 202 | spinlock_t user_irq_lock; |
203 | /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ | 203 | /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ |
204 | int user_irq_refcount; | 204 | int user_irq_refcount; |
205 | u32 trace_irq_seqno; | ||
205 | /** Cached value of IMR to avoid reads in updating the bitfield */ | 206 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
206 | u32 irq_mask_reg; | 207 | u32 irq_mask_reg; |
207 | u32 pipestat[2]; | 208 | u32 pipestat[2]; |
@@ -273,7 +274,7 @@ typedef struct drm_i915_private { | |||
273 | struct drm_i915_display_funcs display; | 274 | struct drm_i915_display_funcs display; |
274 | 275 | ||
275 | /* Register state */ | 276 | /* Register state */ |
276 | bool suspended; | 277 | bool modeset_on_lid; |
277 | u8 saveLBB; | 278 | u8 saveLBB; |
278 | u32 saveDSPACNTR; | 279 | u32 saveDSPACNTR; |
279 | u32 saveDSPBCNTR; | 280 | u32 saveDSPBCNTR; |
@@ -295,6 +296,12 @@ typedef struct drm_i915_private { | |||
295 | u32 saveVBLANK_A; | 296 | u32 saveVBLANK_A; |
296 | u32 saveVSYNC_A; | 297 | u32 saveVSYNC_A; |
297 | u32 saveBCLRPAT_A; | 298 | u32 saveBCLRPAT_A; |
299 | u32 saveTRANS_HTOTAL_A; | ||
300 | u32 saveTRANS_HBLANK_A; | ||
301 | u32 saveTRANS_HSYNC_A; | ||
302 | u32 saveTRANS_VTOTAL_A; | ||
303 | u32 saveTRANS_VBLANK_A; | ||
304 | u32 saveTRANS_VSYNC_A; | ||
298 | u32 savePIPEASTAT; | 305 | u32 savePIPEASTAT; |
299 | u32 saveDSPASTRIDE; | 306 | u32 saveDSPASTRIDE; |
300 | u32 saveDSPASIZE; | 307 | u32 saveDSPASIZE; |
@@ -303,8 +310,11 @@ typedef struct drm_i915_private { | |||
303 | u32 saveDSPASURF; | 310 | u32 saveDSPASURF; |
304 | u32 saveDSPATILEOFF; | 311 | u32 saveDSPATILEOFF; |
305 | u32 savePFIT_PGM_RATIOS; | 312 | u32 savePFIT_PGM_RATIOS; |
313 | u32 saveBLC_HIST_CTL; | ||
306 | u32 saveBLC_PWM_CTL; | 314 | u32 saveBLC_PWM_CTL; |
307 | u32 saveBLC_PWM_CTL2; | 315 | u32 saveBLC_PWM_CTL2; |
316 | u32 saveBLC_CPU_PWM_CTL; | ||
317 | u32 saveBLC_CPU_PWM_CTL2; | ||
308 | u32 saveFPB0; | 318 | u32 saveFPB0; |
309 | u32 saveFPB1; | 319 | u32 saveFPB1; |
310 | u32 saveDPLL_B; | 320 | u32 saveDPLL_B; |
@@ -316,6 +326,12 @@ typedef struct drm_i915_private { | |||
316 | u32 saveVBLANK_B; | 326 | u32 saveVBLANK_B; |
317 | u32 saveVSYNC_B; | 327 | u32 saveVSYNC_B; |
318 | u32 saveBCLRPAT_B; | 328 | u32 saveBCLRPAT_B; |
329 | u32 saveTRANS_HTOTAL_B; | ||
330 | u32 saveTRANS_HBLANK_B; | ||
331 | u32 saveTRANS_HSYNC_B; | ||
332 | u32 saveTRANS_VTOTAL_B; | ||
333 | u32 saveTRANS_VBLANK_B; | ||
334 | u32 saveTRANS_VSYNC_B; | ||
319 | u32 savePIPEBSTAT; | 335 | u32 savePIPEBSTAT; |
320 | u32 saveDSPBSTRIDE; | 336 | u32 saveDSPBSTRIDE; |
321 | u32 saveDSPBSIZE; | 337 | u32 saveDSPBSIZE; |
@@ -341,6 +357,7 @@ typedef struct drm_i915_private { | |||
341 | u32 savePFIT_CONTROL; | 357 | u32 savePFIT_CONTROL; |
342 | u32 save_palette_a[256]; | 358 | u32 save_palette_a[256]; |
343 | u32 save_palette_b[256]; | 359 | u32 save_palette_b[256]; |
360 | u32 saveDPFC_CB_BASE; | ||
344 | u32 saveFBC_CFB_BASE; | 361 | u32 saveFBC_CFB_BASE; |
345 | u32 saveFBC_LL_BASE; | 362 | u32 saveFBC_LL_BASE; |
346 | u32 saveFBC_CONTROL; | 363 | u32 saveFBC_CONTROL; |
@@ -348,6 +365,12 @@ typedef struct drm_i915_private { | |||
348 | u32 saveIER; | 365 | u32 saveIER; |
349 | u32 saveIIR; | 366 | u32 saveIIR; |
350 | u32 saveIMR; | 367 | u32 saveIMR; |
368 | u32 saveDEIER; | ||
369 | u32 saveDEIMR; | ||
370 | u32 saveGTIER; | ||
371 | u32 saveGTIMR; | ||
372 | u32 saveFDI_RXA_IMR; | ||
373 | u32 saveFDI_RXB_IMR; | ||
351 | u32 saveCACHE_MODE_0; | 374 | u32 saveCACHE_MODE_0; |
352 | u32 saveD_STATE; | 375 | u32 saveD_STATE; |
353 | u32 saveDSPCLK_GATE_D; | 376 | u32 saveDSPCLK_GATE_D; |
@@ -381,6 +404,16 @@ typedef struct drm_i915_private { | |||
381 | u32 savePIPEB_DP_LINK_M; | 404 | u32 savePIPEB_DP_LINK_M; |
382 | u32 savePIPEA_DP_LINK_N; | 405 | u32 savePIPEA_DP_LINK_N; |
383 | u32 savePIPEB_DP_LINK_N; | 406 | u32 savePIPEB_DP_LINK_N; |
407 | u32 saveFDI_RXA_CTL; | ||
408 | u32 saveFDI_TXA_CTL; | ||
409 | u32 saveFDI_RXB_CTL; | ||
410 | u32 saveFDI_TXB_CTL; | ||
411 | u32 savePFA_CTL_1; | ||
412 | u32 savePFB_CTL_1; | ||
413 | u32 savePFA_WIN_SZ; | ||
414 | u32 savePFB_WIN_SZ; | ||
415 | u32 savePFA_WIN_POS; | ||
416 | u32 savePFB_WIN_POS; | ||
384 | 417 | ||
385 | struct { | 418 | struct { |
386 | struct drm_mm gtt_space; | 419 | struct drm_mm gtt_space; |
@@ -491,6 +524,8 @@ typedef struct drm_i915_private { | |||
491 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | 524 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
492 | } mm; | 525 | } mm; |
493 | struct sdvo_device_mapping sdvo_mappings[2]; | 526 | struct sdvo_device_mapping sdvo_mappings[2]; |
527 | /* indicate whether the LVDS_BORDER should be enabled or not */ | ||
528 | unsigned int lvds_border_bits; | ||
494 | 529 | ||
495 | /* Reclocking support */ | 530 | /* Reclocking support */ |
496 | bool render_reclock_avail; | 531 | bool render_reclock_avail; |
@@ -665,6 +700,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, | |||
665 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 700 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
666 | struct drm_file *file_priv); | 701 | struct drm_file *file_priv); |
667 | void i915_user_irq_get(struct drm_device *dev); | 702 | void i915_user_irq_get(struct drm_device *dev); |
703 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno); | ||
668 | void i915_user_irq_put(struct drm_device *dev); | 704 | void i915_user_irq_put(struct drm_device *dev); |
669 | extern void i915_enable_interrupt (struct drm_device *dev); | 705 | extern void i915_enable_interrupt (struct drm_device *dev); |
670 | 706 | ||
@@ -979,7 +1015,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
979 | 1015 | ||
980 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) | 1016 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) |
981 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 1017 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
982 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev))) | 1018 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ |
1019 | (IS_I9XX(dev) || IS_GM45(dev)) && \ | ||
1020 | !IS_IGD(dev) && \ | ||
1021 | !IS_IGDNG(dev)) | ||
983 | 1022 | ||
984 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1023 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
985 | 1024 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 40727d4c2919..abfc27b0c2ea 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1770,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1770 | drm_i915_private_t *dev_priv = dev->dev_private; | 1770 | drm_i915_private_t *dev_priv = dev->dev_private; |
1771 | uint32_t seqno; | 1771 | uint32_t seqno; |
1772 | 1772 | ||
1773 | if (!dev_priv->hw_status_page) | 1773 | if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) |
1774 | return; | 1774 | return; |
1775 | 1775 | ||
1776 | seqno = i915_get_gem_seqno(dev); | 1776 | seqno = i915_get_gem_seqno(dev); |
@@ -1794,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1794 | } else | 1794 | } else |
1795 | break; | 1795 | break; |
1796 | } | 1796 | } |
1797 | |||
1798 | if (unlikely (dev_priv->trace_irq_seqno && | ||
1799 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | ||
1800 | i915_user_irq_put(dev); | ||
1801 | dev_priv->trace_irq_seqno = 0; | ||
1802 | } | ||
1797 | } | 1803 | } |
1798 | 1804 | ||
1799 | void | 1805 | void |
@@ -3352,7 +3358,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, | |||
3352 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 3358 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
3353 | exec_len = (uint32_t) exec->batch_len; | 3359 | exec_len = (uint32_t) exec->batch_len; |
3354 | 3360 | ||
3355 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); | 3361 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); |
3356 | 3362 | ||
3357 | count = nbox ? nbox : 1; | 3363 | count = nbox ? nbox : 1; |
3358 | 3364 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4dfeec7cdd42..c3ceffa46ea0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -725,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev) | |||
725 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 725 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
726 | } | 726 | } |
727 | 727 | ||
728 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | ||
729 | { | ||
730 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
731 | |||
732 | if (dev_priv->trace_irq_seqno == 0) | ||
733 | i915_user_irq_get(dev); | ||
734 | |||
735 | dev_priv->trace_irq_seqno = seqno; | ||
736 | } | ||
737 | |||
728 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 738 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
729 | { | 739 | { |
730 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 740 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0466ddbeba32..1687edf68795 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -968,6 +968,8 @@ | |||
968 | #define LVDS_PORT_EN (1 << 31) | 968 | #define LVDS_PORT_EN (1 << 31) |
969 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 969 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
970 | #define LVDS_PIPEB_SELECT (1 << 30) | 970 | #define LVDS_PIPEB_SELECT (1 << 30) |
971 | /* Enable border for unscaled (or aspect-scaled) display */ | ||
972 | #define LVDS_BORDER_ENABLE (1 << 15) | ||
971 | /* | 973 | /* |
972 | * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per | 974 | * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per |
973 | * pixel. | 975 | * pixel. |
@@ -1078,6 +1080,8 @@ | |||
1078 | #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) | 1080 | #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) |
1079 | #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) | 1081 | #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) |
1080 | 1082 | ||
1083 | #define BLC_HIST_CTL 0x61260 | ||
1084 | |||
1081 | /* TV port control */ | 1085 | /* TV port control */ |
1082 | #define TV_CTL 0x68000 | 1086 | #define TV_CTL 0x68000 |
1083 | /** Enables the TV encoder */ | 1087 | /** Enables the TV encoder */ |
@@ -1780,6 +1784,11 @@ | |||
1780 | #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ | 1784 | #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ |
1781 | #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) | 1785 | #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) |
1782 | #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) | 1786 | #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) |
1787 | #define PIPE_BPC_MASK (7 << 5) /* Ironlake */ | ||
1788 | #define PIPE_8BPC (0 << 5) | ||
1789 | #define PIPE_10BPC (1 << 5) | ||
1790 | #define PIPE_6BPC (2 << 5) | ||
1791 | #define PIPE_12BPC (3 << 5) | ||
1783 | 1792 | ||
1784 | #define DSPARB 0x70030 | 1793 | #define DSPARB 0x70030 |
1785 | #define DSPARB_CSTART_MASK (0x7f << 7) | 1794 | #define DSPARB_CSTART_MASK (0x7f << 7) |
@@ -1790,17 +1799,29 @@ | |||
1790 | #define DSPARB_AEND_SHIFT 0 | 1799 | #define DSPARB_AEND_SHIFT 0 |
1791 | 1800 | ||
1792 | #define DSPFW1 0x70034 | 1801 | #define DSPFW1 0x70034 |
1802 | #define DSPFW_SR_SHIFT 23 | ||
1803 | #define DSPFW_CURSORB_SHIFT 16 | ||
1804 | #define DSPFW_PLANEB_SHIFT 8 | ||
1793 | #define DSPFW2 0x70038 | 1805 | #define DSPFW2 0x70038 |
1806 | #define DSPFW_CURSORA_MASK 0x00003f00 | ||
1807 | #define DSPFW_CURSORA_SHIFT 16 | ||
1794 | #define DSPFW3 0x7003c | 1808 | #define DSPFW3 0x7003c |
1809 | #define DSPFW_HPLL_SR_EN (1<<31) | ||
1810 | #define DSPFW_CURSOR_SR_SHIFT 24 | ||
1795 | #define IGD_SELF_REFRESH_EN (1<<30) | 1811 | #define IGD_SELF_REFRESH_EN (1<<30) |
1796 | 1812 | ||
1797 | /* FIFO watermark sizes etc */ | 1813 | /* FIFO watermark sizes etc */ |
1814 | #define G4X_FIFO_LINE_SIZE 64 | ||
1798 | #define I915_FIFO_LINE_SIZE 64 | 1815 | #define I915_FIFO_LINE_SIZE 64 |
1799 | #define I830_FIFO_LINE_SIZE 32 | 1816 | #define I830_FIFO_LINE_SIZE 32 |
1817 | |||
1818 | #define G4X_FIFO_SIZE 127 | ||
1800 | #define I945_FIFO_SIZE 127 /* 945 & 965 */ | 1819 | #define I945_FIFO_SIZE 127 /* 945 & 965 */ |
1801 | #define I915_FIFO_SIZE 95 | 1820 | #define I915_FIFO_SIZE 95 |
1802 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ | 1821 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ |
1803 | #define I830_FIFO_SIZE 95 | 1822 | #define I830_FIFO_SIZE 95 |
1823 | |||
1824 | #define G4X_MAX_WM 0x3f | ||
1804 | #define I915_MAX_WM 0x3f | 1825 | #define I915_MAX_WM 0x3f |
1805 | 1826 | ||
1806 | #define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ | 1827 | #define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ |
@@ -2030,6 +2051,11 @@ | |||
2030 | #define PFA_CTL_1 0x68080 | 2051 | #define PFA_CTL_1 0x68080 |
2031 | #define PFB_CTL_1 0x68880 | 2052 | #define PFB_CTL_1 0x68880 |
2032 | #define PF_ENABLE (1<<31) | 2053 | #define PF_ENABLE (1<<31) |
2054 | #define PF_FILTER_MASK (3<<23) | ||
2055 | #define PF_FILTER_PROGRAMMED (0<<23) | ||
2056 | #define PF_FILTER_MED_3x3 (1<<23) | ||
2057 | #define PF_FILTER_EDGE_ENHANCE (2<<23) | ||
2058 | #define PF_FILTER_EDGE_SOFTEN (3<<23) | ||
2033 | #define PFA_WIN_SZ 0x68074 | 2059 | #define PFA_WIN_SZ 0x68074 |
2034 | #define PFB_WIN_SZ 0x68874 | 2060 | #define PFB_WIN_SZ 0x68874 |
2035 | #define PFA_WIN_POS 0x68070 | 2061 | #define PFA_WIN_POS 0x68070 |
@@ -2149,11 +2175,11 @@ | |||
2149 | #define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) | 2175 | #define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) |
2150 | #define DREF_SSC_SOURCE_DISABLE (0<<11) | 2176 | #define DREF_SSC_SOURCE_DISABLE (0<<11) |
2151 | #define DREF_SSC_SOURCE_ENABLE (2<<11) | 2177 | #define DREF_SSC_SOURCE_ENABLE (2<<11) |
2152 | #define DREF_SSC_SOURCE_MASK (2<<11) | 2178 | #define DREF_SSC_SOURCE_MASK (3<<11) |
2153 | #define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) | 2179 | #define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) |
2154 | #define DREF_NONSPREAD_CK505_ENABLE (1<<9) | 2180 | #define DREF_NONSPREAD_CK505_ENABLE (1<<9) |
2155 | #define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) | 2181 | #define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) |
2156 | #define DREF_NONSPREAD_SOURCE_MASK (2<<9) | 2182 | #define DREF_NONSPREAD_SOURCE_MASK (3<<9) |
2157 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) | 2183 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) |
2158 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) | 2184 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) |
2159 | #define DREF_SSC4_DOWNSPREAD (0<<6) | 2185 | #define DREF_SSC4_DOWNSPREAD (0<<6) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index bd6d8d91ca9f..992d5617e798 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -32,11 +32,15 @@ | |||
32 | static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | 32 | static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) |
33 | { | 33 | { |
34 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
35 | u32 dpll_reg; | ||
35 | 36 | ||
36 | if (pipe == PIPE_A) | 37 | if (IS_IGDNG(dev)) { |
37 | return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); | 38 | dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; |
38 | else | 39 | } else { |
39 | return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); | 40 | dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; |
41 | } | ||
42 | |||
43 | return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); | ||
40 | } | 44 | } |
41 | 45 | ||
42 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | 46 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) |
@@ -49,6 +53,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
49 | if (!i915_pipe_enabled(dev, pipe)) | 53 | if (!i915_pipe_enabled(dev, pipe)) |
50 | return; | 54 | return; |
51 | 55 | ||
56 | if (IS_IGDNG(dev)) | ||
57 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | ||
58 | |||
52 | if (pipe == PIPE_A) | 59 | if (pipe == PIPE_A) |
53 | array = dev_priv->save_palette_a; | 60 | array = dev_priv->save_palette_a; |
54 | else | 61 | else |
@@ -68,6 +75,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | |||
68 | if (!i915_pipe_enabled(dev, pipe)) | 75 | if (!i915_pipe_enabled(dev, pipe)) |
69 | return; | 76 | return; |
70 | 77 | ||
78 | if (IS_IGDNG(dev)) | ||
79 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | ||
80 | |||
71 | if (pipe == PIPE_A) | 81 | if (pipe == PIPE_A) |
72 | array = dev_priv->save_palette_a; | 82 | array = dev_priv->save_palette_a; |
73 | else | 83 | else |
@@ -232,10 +242,16 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
232 | /* Pipe & plane A info */ | 242 | /* Pipe & plane A info */ |
233 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 243 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); |
234 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 244 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); |
235 | dev_priv->saveFPA0 = I915_READ(FPA0); | 245 | if (IS_IGDNG(dev)) { |
236 | dev_priv->saveFPA1 = I915_READ(FPA1); | 246 | dev_priv->saveFPA0 = I915_READ(PCH_FPA0); |
237 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); | 247 | dev_priv->saveFPA1 = I915_READ(PCH_FPA1); |
238 | if (IS_I965G(dev)) | 248 | dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); |
249 | } else { | ||
250 | dev_priv->saveFPA0 = I915_READ(FPA0); | ||
251 | dev_priv->saveFPA1 = I915_READ(FPA1); | ||
252 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); | ||
253 | } | ||
254 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | ||
239 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); | 255 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); |
240 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); | 256 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); |
241 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); | 257 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); |
@@ -243,7 +259,24 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
243 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); | 259 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); |
244 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); | 260 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); |
245 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); | 261 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); |
246 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | 262 | if (!IS_IGDNG(dev)) |
263 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | ||
264 | |||
265 | if (IS_IGDNG(dev)) { | ||
266 | dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); | ||
267 | dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); | ||
268 | |||
269 | dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1); | ||
270 | dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); | ||
271 | dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); | ||
272 | |||
273 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); | ||
274 | dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); | ||
275 | dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); | ||
276 | dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A); | ||
277 | dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A); | ||
278 | dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A); | ||
279 | } | ||
247 | 280 | ||
248 | dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); | 281 | dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); |
249 | dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); | 282 | dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); |
@@ -260,10 +293,16 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
260 | /* Pipe & plane B info */ | 293 | /* Pipe & plane B info */ |
261 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); | 294 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); |
262 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); | 295 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); |
263 | dev_priv->saveFPB0 = I915_READ(FPB0); | 296 | if (IS_IGDNG(dev)) { |
264 | dev_priv->saveFPB1 = I915_READ(FPB1); | 297 | dev_priv->saveFPB0 = I915_READ(PCH_FPB0); |
265 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); | 298 | dev_priv->saveFPB1 = I915_READ(PCH_FPB1); |
266 | if (IS_I965G(dev)) | 299 | dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); |
300 | } else { | ||
301 | dev_priv->saveFPB0 = I915_READ(FPB0); | ||
302 | dev_priv->saveFPB1 = I915_READ(FPB1); | ||
303 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); | ||
304 | } | ||
305 | if (IS_I965G(dev) && !IS_IGDNG(dev)) | ||
267 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); | 306 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); |
268 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); | 307 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); |
269 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); | 308 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); |
@@ -271,7 +310,24 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
271 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); | 310 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); |
272 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); | 311 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); |
273 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); | 312 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); |
274 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | 313 | if (!IS_IGDNG(dev)) |
314 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); | ||
315 | |||
316 | if (IS_IGDNG(dev)) { | ||
317 | dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); | ||
318 | dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); | ||
319 | |||
320 | dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1); | ||
321 | dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); | ||
322 | dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); | ||
323 | |||
324 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); | ||
325 | dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); | ||
326 | dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); | ||
327 | dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B); | ||
328 | dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B); | ||
329 | dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B); | ||
330 | } | ||
275 | 331 | ||
276 | dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); | 332 | dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); |
277 | dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); | 333 | dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); |
@@ -290,23 +346,41 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
290 | static void i915_restore_modeset_reg(struct drm_device *dev) | 346 | static void i915_restore_modeset_reg(struct drm_device *dev) |
291 | { | 347 | { |
292 | struct drm_i915_private *dev_priv = dev->dev_private; | 348 | struct drm_i915_private *dev_priv = dev->dev_private; |
349 | int dpll_a_reg, fpa0_reg, fpa1_reg; | ||
350 | int dpll_b_reg, fpb0_reg, fpb1_reg; | ||
293 | 351 | ||
294 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 352 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
295 | return; | 353 | return; |
296 | 354 | ||
355 | if (IS_IGDNG(dev)) { | ||
356 | dpll_a_reg = PCH_DPLL_A; | ||
357 | dpll_b_reg = PCH_DPLL_B; | ||
358 | fpa0_reg = PCH_FPA0; | ||
359 | fpb0_reg = PCH_FPB0; | ||
360 | fpa1_reg = PCH_FPA1; | ||
361 | fpb1_reg = PCH_FPB1; | ||
362 | } else { | ||
363 | dpll_a_reg = DPLL_A; | ||
364 | dpll_b_reg = DPLL_B; | ||
365 | fpa0_reg = FPA0; | ||
366 | fpb0_reg = FPB0; | ||
367 | fpa1_reg = FPA1; | ||
368 | fpb1_reg = FPB1; | ||
369 | } | ||
370 | |||
297 | /* Pipe & plane A info */ | 371 | /* Pipe & plane A info */ |
298 | /* Prime the clock */ | 372 | /* Prime the clock */ |
299 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { | 373 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { |
300 | I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & | 374 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & |
301 | ~DPLL_VCO_ENABLE); | 375 | ~DPLL_VCO_ENABLE); |
302 | DRM_UDELAY(150); | 376 | DRM_UDELAY(150); |
303 | } | 377 | } |
304 | I915_WRITE(FPA0, dev_priv->saveFPA0); | 378 | I915_WRITE(fpa0_reg, dev_priv->saveFPA0); |
305 | I915_WRITE(FPA1, dev_priv->saveFPA1); | 379 | I915_WRITE(fpa1_reg, dev_priv->saveFPA1); |
306 | /* Actually enable it */ | 380 | /* Actually enable it */ |
307 | I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); | 381 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); |
308 | DRM_UDELAY(150); | 382 | DRM_UDELAY(150); |
309 | if (IS_I965G(dev)) | 383 | if (IS_I965G(dev) && !IS_IGDNG(dev)) |
310 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); | 384 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); |
311 | DRM_UDELAY(150); | 385 | DRM_UDELAY(150); |
312 | 386 | ||
@@ -317,7 +391,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
317 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); | 391 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); |
318 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); | 392 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); |
319 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); | 393 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); |
320 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | 394 | if (!IS_IGDNG(dev)) |
395 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | ||
396 | |||
397 | if (IS_IGDNG(dev)) { | ||
398 | I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); | ||
399 | I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); | ||
400 | |||
401 | I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1); | ||
402 | I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); | ||
403 | I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); | ||
404 | |||
405 | I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); | ||
406 | I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); | ||
407 | I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); | ||
408 | I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); | ||
409 | I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); | ||
410 | I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); | ||
411 | } | ||
321 | 412 | ||
322 | /* Restore plane info */ | 413 | /* Restore plane info */ |
323 | I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); | 414 | I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); |
@@ -339,14 +430,14 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
339 | 430 | ||
340 | /* Pipe & plane B info */ | 431 | /* Pipe & plane B info */ |
341 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { | 432 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { |
342 | I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & | 433 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & |
343 | ~DPLL_VCO_ENABLE); | 434 | ~DPLL_VCO_ENABLE); |
344 | DRM_UDELAY(150); | 435 | DRM_UDELAY(150); |
345 | } | 436 | } |
346 | I915_WRITE(FPB0, dev_priv->saveFPB0); | 437 | I915_WRITE(fpb0_reg, dev_priv->saveFPB0); |
347 | I915_WRITE(FPB1, dev_priv->saveFPB1); | 438 | I915_WRITE(fpb1_reg, dev_priv->saveFPB1); |
348 | /* Actually enable it */ | 439 | /* Actually enable it */ |
349 | I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); | 440 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); |
350 | DRM_UDELAY(150); | 441 | DRM_UDELAY(150); |
351 | if (IS_I965G(dev)) | 442 | if (IS_I965G(dev)) |
352 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | 443 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); |
@@ -359,7 +450,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
359 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); | 450 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); |
360 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); | 451 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); |
361 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); | 452 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); |
362 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | 453 | if (!IS_IGDNG(dev)) |
454 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | ||
455 | |||
456 | if (IS_IGDNG(dev)) { | ||
457 | I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); | ||
458 | I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); | ||
459 | |||
460 | I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1); | ||
461 | I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); | ||
462 | I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); | ||
463 | |||
464 | I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); | ||
465 | I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); | ||
466 | I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); | ||
467 | I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); | ||
468 | I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); | ||
469 | I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); | ||
470 | } | ||
363 | 471 | ||
364 | /* Restore plane info */ | 472 | /* Restore plane info */ |
365 | I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); | 473 | I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); |
@@ -404,21 +512,43 @@ void i915_save_display(struct drm_device *dev) | |||
404 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | 512 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); |
405 | 513 | ||
406 | /* CRT state */ | 514 | /* CRT state */ |
407 | dev_priv->saveADPA = I915_READ(ADPA); | 515 | if (IS_IGDNG(dev)) { |
516 | dev_priv->saveADPA = I915_READ(PCH_ADPA); | ||
517 | } else { | ||
518 | dev_priv->saveADPA = I915_READ(ADPA); | ||
519 | } | ||
408 | 520 | ||
409 | /* LVDS state */ | 521 | /* LVDS state */ |
410 | dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); | 522 | if (IS_IGDNG(dev)) { |
411 | dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); | 523 | dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); |
412 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | 524 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); |
413 | if (IS_I965G(dev)) | 525 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); |
414 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | 526 | dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); |
415 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 527 | dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); |
416 | dev_priv->saveLVDS = I915_READ(LVDS); | 528 | dev_priv->saveLVDS = I915_READ(PCH_LVDS); |
417 | if (!IS_I830(dev) && !IS_845G(dev)) | 529 | } else { |
530 | dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); | ||
531 | dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); | ||
532 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | ||
533 | dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); | ||
534 | if (IS_I965G(dev)) | ||
535 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | ||
536 | if (IS_MOBILE(dev) && !IS_I830(dev)) | ||
537 | dev_priv->saveLVDS = I915_READ(LVDS); | ||
538 | } | ||
539 | |||
540 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) | ||
418 | dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); | 541 | dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); |
419 | dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); | 542 | |
420 | dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); | 543 | if (IS_IGDNG(dev)) { |
421 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); | 544 | dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); |
545 | dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); | ||
546 | dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); | ||
547 | } else { | ||
548 | dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); | ||
549 | dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); | ||
550 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); | ||
551 | } | ||
422 | 552 | ||
423 | /* Display Port state */ | 553 | /* Display Port state */ |
424 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 554 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
@@ -437,16 +567,23 @@ void i915_save_display(struct drm_device *dev) | |||
437 | /* FIXME: save TV & SDVO state */ | 567 | /* FIXME: save TV & SDVO state */ |
438 | 568 | ||
439 | /* FBC state */ | 569 | /* FBC state */ |
440 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); | 570 | if (IS_GM45(dev)) { |
441 | dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); | 571 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); |
442 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); | 572 | } else { |
443 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); | 573 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); |
574 | dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); | ||
575 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); | ||
576 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); | ||
577 | } | ||
444 | 578 | ||
445 | /* VGA state */ | 579 | /* VGA state */ |
446 | dev_priv->saveVGA0 = I915_READ(VGA0); | 580 | dev_priv->saveVGA0 = I915_READ(VGA0); |
447 | dev_priv->saveVGA1 = I915_READ(VGA1); | 581 | dev_priv->saveVGA1 = I915_READ(VGA1); |
448 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); | 582 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); |
449 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | 583 | if (IS_IGDNG(dev)) |
584 | dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); | ||
585 | else | ||
586 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | ||
450 | 587 | ||
451 | i915_save_vga(dev); | 588 | i915_save_vga(dev); |
452 | } | 589 | } |
@@ -485,22 +622,41 @@ void i915_restore_display(struct drm_device *dev) | |||
485 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | 622 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); |
486 | 623 | ||
487 | /* CRT state */ | 624 | /* CRT state */ |
488 | I915_WRITE(ADPA, dev_priv->saveADPA); | 625 | if (IS_IGDNG(dev)) |
626 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); | ||
627 | else | ||
628 | I915_WRITE(ADPA, dev_priv->saveADPA); | ||
489 | 629 | ||
490 | /* LVDS state */ | 630 | /* LVDS state */ |
491 | if (IS_I965G(dev)) | 631 | if (IS_I965G(dev) && !IS_IGDNG(dev)) |
492 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); | 632 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); |
493 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 633 | |
634 | if (IS_IGDNG(dev)) { | ||
635 | I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); | ||
636 | } else if (IS_MOBILE(dev) && !IS_I830(dev)) | ||
494 | I915_WRITE(LVDS, dev_priv->saveLVDS); | 637 | I915_WRITE(LVDS, dev_priv->saveLVDS); |
495 | if (!IS_I830(dev) && !IS_845G(dev)) | 638 | |
639 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) | ||
496 | I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); | 640 | I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); |
497 | 641 | ||
498 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); | 642 | if (IS_IGDNG(dev)) { |
499 | I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); | 643 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); |
500 | I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); | 644 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); |
501 | I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | 645 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); |
502 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); | 646 | I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); |
503 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); | 647 | I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); |
648 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | ||
649 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); | ||
650 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); | ||
651 | } else { | ||
652 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); | ||
653 | I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); | ||
654 | I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); | ||
655 | I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); | ||
656 | I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | ||
657 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); | ||
658 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); | ||
659 | } | ||
504 | 660 | ||
505 | /* Display Port state */ | 661 | /* Display Port state */ |
506 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 662 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
@@ -511,13 +667,22 @@ void i915_restore_display(struct drm_device *dev) | |||
511 | /* FIXME: restore TV & SDVO state */ | 667 | /* FIXME: restore TV & SDVO state */ |
512 | 668 | ||
513 | /* FBC info */ | 669 | /* FBC info */ |
514 | I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); | 670 | if (IS_GM45(dev)) { |
515 | I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); | 671 | g4x_disable_fbc(dev); |
516 | I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); | 672 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); |
517 | I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); | 673 | } else { |
674 | i8xx_disable_fbc(dev); | ||
675 | I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); | ||
676 | I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); | ||
677 | I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); | ||
678 | I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); | ||
679 | } | ||
518 | 680 | ||
519 | /* VGA state */ | 681 | /* VGA state */ |
520 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); | 682 | if (IS_IGDNG(dev)) |
683 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); | ||
684 | else | ||
685 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); | ||
521 | I915_WRITE(VGA0, dev_priv->saveVGA0); | 686 | I915_WRITE(VGA0, dev_priv->saveVGA0); |
522 | I915_WRITE(VGA1, dev_priv->saveVGA1); | 687 | I915_WRITE(VGA1, dev_priv->saveVGA1); |
523 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); | 688 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); |
@@ -543,8 +708,17 @@ int i915_save_state(struct drm_device *dev) | |||
543 | i915_save_display(dev); | 708 | i915_save_display(dev); |
544 | 709 | ||
545 | /* Interrupt state */ | 710 | /* Interrupt state */ |
546 | dev_priv->saveIER = I915_READ(IER); | 711 | if (IS_IGDNG(dev)) { |
547 | dev_priv->saveIMR = I915_READ(IMR); | 712 | dev_priv->saveDEIER = I915_READ(DEIER); |
713 | dev_priv->saveDEIMR = I915_READ(DEIMR); | ||
714 | dev_priv->saveGTIER = I915_READ(GTIER); | ||
715 | dev_priv->saveGTIMR = I915_READ(GTIMR); | ||
716 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); | ||
717 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); | ||
718 | } else { | ||
719 | dev_priv->saveIER = I915_READ(IER); | ||
720 | dev_priv->saveIMR = I915_READ(IMR); | ||
721 | } | ||
548 | 722 | ||
549 | /* Clock gating state */ | 723 | /* Clock gating state */ |
550 | dev_priv->saveD_STATE = I915_READ(D_STATE); | 724 | dev_priv->saveD_STATE = I915_READ(D_STATE); |
@@ -609,8 +783,17 @@ int i915_restore_state(struct drm_device *dev) | |||
609 | i915_restore_display(dev); | 783 | i915_restore_display(dev); |
610 | 784 | ||
611 | /* Interrupt state */ | 785 | /* Interrupt state */ |
612 | I915_WRITE (IER, dev_priv->saveIER); | 786 | if (IS_IGDNG(dev)) { |
613 | I915_WRITE (IMR, dev_priv->saveIMR); | 787 | I915_WRITE(DEIER, dev_priv->saveDEIER); |
788 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); | ||
789 | I915_WRITE(GTIER, dev_priv->saveGTIER); | ||
790 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); | ||
791 | I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); | ||
792 | I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); | ||
793 | } else { | ||
794 | I915_WRITE (IER, dev_priv->saveIER); | ||
795 | I915_WRITE (IMR, dev_priv->saveIMR); | ||
796 | } | ||
614 | 797 | ||
615 | /* Clock gating state */ | 798 | /* Clock gating state */ |
616 | I915_WRITE (D_STATE, dev_priv->saveD_STATE); | 799 | I915_WRITE (D_STATE, dev_priv->saveD_STATE); |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 5567a40816f3..01840d9bc38f 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -158,16 +158,17 @@ TRACE_EVENT(i915_gem_request_submit, | |||
158 | TP_ARGS(dev, seqno), | 158 | TP_ARGS(dev, seqno), |
159 | 159 | ||
160 | TP_STRUCT__entry( | 160 | TP_STRUCT__entry( |
161 | __field(struct drm_device *, dev) | 161 | __field(u32, dev) |
162 | __field(u32, seqno) | 162 | __field(u32, seqno) |
163 | ), | 163 | ), |
164 | 164 | ||
165 | TP_fast_assign( | 165 | TP_fast_assign( |
166 | __entry->dev = dev; | 166 | __entry->dev = dev->primary->index; |
167 | __entry->seqno = seqno; | 167 | __entry->seqno = seqno; |
168 | i915_trace_irq_get(dev, seqno); | ||
168 | ), | 169 | ), |
169 | 170 | ||
170 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | 171 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) |
171 | ); | 172 | ); |
172 | 173 | ||
173 | TRACE_EVENT(i915_gem_request_flush, | 174 | TRACE_EVENT(i915_gem_request_flush, |
@@ -178,20 +179,20 @@ TRACE_EVENT(i915_gem_request_flush, | |||
178 | TP_ARGS(dev, seqno, flush_domains, invalidate_domains), | 179 | TP_ARGS(dev, seqno, flush_domains, invalidate_domains), |
179 | 180 | ||
180 | TP_STRUCT__entry( | 181 | TP_STRUCT__entry( |
181 | __field(struct drm_device *, dev) | 182 | __field(u32, dev) |
182 | __field(u32, seqno) | 183 | __field(u32, seqno) |
183 | __field(u32, flush_domains) | 184 | __field(u32, flush_domains) |
184 | __field(u32, invalidate_domains) | 185 | __field(u32, invalidate_domains) |
185 | ), | 186 | ), |
186 | 187 | ||
187 | TP_fast_assign( | 188 | TP_fast_assign( |
188 | __entry->dev = dev; | 189 | __entry->dev = dev->primary->index; |
189 | __entry->seqno = seqno; | 190 | __entry->seqno = seqno; |
190 | __entry->flush_domains = flush_domains; | 191 | __entry->flush_domains = flush_domains; |
191 | __entry->invalidate_domains = invalidate_domains; | 192 | __entry->invalidate_domains = invalidate_domains; |
192 | ), | 193 | ), |
193 | 194 | ||
194 | TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", | 195 | TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", |
195 | __entry->dev, __entry->seqno, | 196 | __entry->dev, __entry->seqno, |
196 | __entry->flush_domains, __entry->invalidate_domains) | 197 | __entry->flush_domains, __entry->invalidate_domains) |
197 | ); | 198 | ); |
@@ -204,16 +205,16 @@ TRACE_EVENT(i915_gem_request_complete, | |||
204 | TP_ARGS(dev, seqno), | 205 | TP_ARGS(dev, seqno), |
205 | 206 | ||
206 | TP_STRUCT__entry( | 207 | TP_STRUCT__entry( |
207 | __field(struct drm_device *, dev) | 208 | __field(u32, dev) |
208 | __field(u32, seqno) | 209 | __field(u32, seqno) |
209 | ), | 210 | ), |
210 | 211 | ||
211 | TP_fast_assign( | 212 | TP_fast_assign( |
212 | __entry->dev = dev; | 213 | __entry->dev = dev->primary->index; |
213 | __entry->seqno = seqno; | 214 | __entry->seqno = seqno; |
214 | ), | 215 | ), |
215 | 216 | ||
216 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | 217 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) |
217 | ); | 218 | ); |
218 | 219 | ||
219 | TRACE_EVENT(i915_gem_request_retire, | 220 | TRACE_EVENT(i915_gem_request_retire, |
@@ -223,16 +224,16 @@ TRACE_EVENT(i915_gem_request_retire, | |||
223 | TP_ARGS(dev, seqno), | 224 | TP_ARGS(dev, seqno), |
224 | 225 | ||
225 | TP_STRUCT__entry( | 226 | TP_STRUCT__entry( |
226 | __field(struct drm_device *, dev) | 227 | __field(u32, dev) |
227 | __field(u32, seqno) | 228 | __field(u32, seqno) |
228 | ), | 229 | ), |
229 | 230 | ||
230 | TP_fast_assign( | 231 | TP_fast_assign( |
231 | __entry->dev = dev; | 232 | __entry->dev = dev->primary->index; |
232 | __entry->seqno = seqno; | 233 | __entry->seqno = seqno; |
233 | ), | 234 | ), |
234 | 235 | ||
235 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | 236 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) |
236 | ); | 237 | ); |
237 | 238 | ||
238 | TRACE_EVENT(i915_gem_request_wait_begin, | 239 | TRACE_EVENT(i915_gem_request_wait_begin, |
@@ -242,16 +243,16 @@ TRACE_EVENT(i915_gem_request_wait_begin, | |||
242 | TP_ARGS(dev, seqno), | 243 | TP_ARGS(dev, seqno), |
243 | 244 | ||
244 | TP_STRUCT__entry( | 245 | TP_STRUCT__entry( |
245 | __field(struct drm_device *, dev) | 246 | __field(u32, dev) |
246 | __field(u32, seqno) | 247 | __field(u32, seqno) |
247 | ), | 248 | ), |
248 | 249 | ||
249 | TP_fast_assign( | 250 | TP_fast_assign( |
250 | __entry->dev = dev; | 251 | __entry->dev = dev->primary->index; |
251 | __entry->seqno = seqno; | 252 | __entry->seqno = seqno; |
252 | ), | 253 | ), |
253 | 254 | ||
254 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | 255 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) |
255 | ); | 256 | ); |
256 | 257 | ||
257 | TRACE_EVENT(i915_gem_request_wait_end, | 258 | TRACE_EVENT(i915_gem_request_wait_end, |
@@ -261,16 +262,16 @@ TRACE_EVENT(i915_gem_request_wait_end, | |||
261 | TP_ARGS(dev, seqno), | 262 | TP_ARGS(dev, seqno), |
262 | 263 | ||
263 | TP_STRUCT__entry( | 264 | TP_STRUCT__entry( |
264 | __field(struct drm_device *, dev) | 265 | __field(u32, dev) |
265 | __field(u32, seqno) | 266 | __field(u32, seqno) |
266 | ), | 267 | ), |
267 | 268 | ||
268 | TP_fast_assign( | 269 | TP_fast_assign( |
269 | __entry->dev = dev; | 270 | __entry->dev = dev->primary->index; |
270 | __entry->seqno = seqno; | 271 | __entry->seqno = seqno; |
271 | ), | 272 | ), |
272 | 273 | ||
273 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | 274 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) |
274 | ); | 275 | ); |
275 | 276 | ||
276 | TRACE_EVENT(i915_ring_wait_begin, | 277 | TRACE_EVENT(i915_ring_wait_begin, |
@@ -280,14 +281,14 @@ TRACE_EVENT(i915_ring_wait_begin, | |||
280 | TP_ARGS(dev), | 281 | TP_ARGS(dev), |
281 | 282 | ||
282 | TP_STRUCT__entry( | 283 | TP_STRUCT__entry( |
283 | __field(struct drm_device *, dev) | 284 | __field(u32, dev) |
284 | ), | 285 | ), |
285 | 286 | ||
286 | TP_fast_assign( | 287 | TP_fast_assign( |
287 | __entry->dev = dev; | 288 | __entry->dev = dev->primary->index; |
288 | ), | 289 | ), |
289 | 290 | ||
290 | TP_printk("dev=%p", __entry->dev) | 291 | TP_printk("dev=%u", __entry->dev) |
291 | ); | 292 | ); |
292 | 293 | ||
293 | TRACE_EVENT(i915_ring_wait_end, | 294 | TRACE_EVENT(i915_ring_wait_end, |
@@ -297,14 +298,14 @@ TRACE_EVENT(i915_ring_wait_end, | |||
297 | TP_ARGS(dev), | 298 | TP_ARGS(dev), |
298 | 299 | ||
299 | TP_STRUCT__entry( | 300 | TP_STRUCT__entry( |
300 | __field(struct drm_device *, dev) | 301 | __field(u32, dev) |
301 | ), | 302 | ), |
302 | 303 | ||
303 | TP_fast_assign( | 304 | TP_fast_assign( |
304 | __entry->dev = dev; | 305 | __entry->dev = dev->primary->index; |
305 | ), | 306 | ), |
306 | 307 | ||
307 | TP_printk("dev=%p", __entry->dev) | 308 | TP_printk("dev=%u", __entry->dev) |
308 | ); | 309 | ); |
309 | 310 | ||
310 | #endif /* _I915_TRACE_H_ */ | 311 | #endif /* _I915_TRACE_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 4337414846b6..96cd256e60e6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -351,20 +351,18 @@ parse_driver_features(struct drm_i915_private *dev_priv, | |||
351 | struct drm_device *dev = dev_priv->dev; | 351 | struct drm_device *dev = dev_priv->dev; |
352 | struct bdb_driver_features *driver; | 352 | struct bdb_driver_features *driver; |
353 | 353 | ||
354 | /* set default for chips without eDP */ | ||
355 | if (!SUPPORTS_EDP(dev)) { | ||
356 | dev_priv->edp_support = 0; | ||
357 | return; | ||
358 | } | ||
359 | |||
360 | driver = find_section(bdb, BDB_DRIVER_FEATURES); | 354 | driver = find_section(bdb, BDB_DRIVER_FEATURES); |
361 | if (!driver) | 355 | if (!driver) |
362 | return; | 356 | return; |
363 | 357 | ||
364 | if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) | 358 | if (driver && SUPPORTS_EDP(dev) && |
359 | driver->lvds_config == BDB_DRIVER_FEATURE_EDP) { | ||
365 | dev_priv->edp_support = 1; | 360 | dev_priv->edp_support = 1; |
361 | } else { | ||
362 | dev_priv->edp_support = 0; | ||
363 | } | ||
366 | 364 | ||
367 | if (driver->dual_frequency) | 365 | if (driver && driver->dual_frequency) |
368 | dev_priv->render_reclock_avail = true; | 366 | dev_priv->render_reclock_avail = true; |
369 | } | 367 | } |
370 | 368 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 93ff6c03733e..3ba6546b7c7f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -943,6 +943,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
943 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); | 943 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); |
944 | clock.p = (clock.p1 * clock.p2); | 944 | clock.p = (clock.p1 * clock.p2); |
945 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; | 945 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; |
946 | clock.vco = 0; | ||
946 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | 947 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
947 | return true; | 948 | return true; |
948 | } | 949 | } |
@@ -1260,9 +1261,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1260 | return ret; | 1261 | return ret; |
1261 | } | 1262 | } |
1262 | 1263 | ||
1263 | /* Pre-i965 needs to install a fence for tiled scan-out */ | 1264 | /* Install a fence for tiled scan-out. Pre-i965 always needs a fence, |
1264 | if (!IS_I965G(dev) && | 1265 | * whereas 965+ only requires a fence if using framebuffer compression. |
1265 | obj_priv->fence_reg == I915_FENCE_REG_NONE && | 1266 | * For simplicity, we always install a fence as the cost is not that onerous. |
1267 | */ | ||
1268 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
1266 | obj_priv->tiling_mode != I915_TILING_NONE) { | 1269 | obj_priv->tiling_mode != I915_TILING_NONE) { |
1267 | ret = i915_gem_object_get_fence_reg(obj); | 1270 | ret = i915_gem_object_get_fence_reg(obj); |
1268 | if (ret != 0) { | 1271 | if (ret != 0) { |
@@ -1513,7 +1516,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1513 | /* Enable panel fitting for LVDS */ | 1516 | /* Enable panel fitting for LVDS */ |
1514 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 1517 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
1515 | temp = I915_READ(pf_ctl_reg); | 1518 | temp = I915_READ(pf_ctl_reg); |
1516 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE); | 1519 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); |
1517 | 1520 | ||
1518 | /* currently full aspect */ | 1521 | /* currently full aspect */ |
1519 | I915_WRITE(pf_win_pos, 0); | 1522 | I915_WRITE(pf_win_pos, 0); |
@@ -1801,6 +1804,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1801 | case DRM_MODE_DPMS_ON: | 1804 | case DRM_MODE_DPMS_ON: |
1802 | case DRM_MODE_DPMS_STANDBY: | 1805 | case DRM_MODE_DPMS_STANDBY: |
1803 | case DRM_MODE_DPMS_SUSPEND: | 1806 | case DRM_MODE_DPMS_SUSPEND: |
1807 | intel_update_watermarks(dev); | ||
1808 | |||
1804 | /* Enable the DPLL */ | 1809 | /* Enable the DPLL */ |
1805 | temp = I915_READ(dpll_reg); | 1810 | temp = I915_READ(dpll_reg); |
1806 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 1811 | if ((temp & DPLL_VCO_ENABLE) == 0) { |
@@ -1838,7 +1843,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1838 | 1843 | ||
1839 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 1844 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
1840 | //intel_crtc_dpms_video(crtc, true); TODO | 1845 | //intel_crtc_dpms_video(crtc, true); TODO |
1841 | intel_update_watermarks(dev); | ||
1842 | break; | 1846 | break; |
1843 | case DRM_MODE_DPMS_OFF: | 1847 | case DRM_MODE_DPMS_OFF: |
1844 | intel_update_watermarks(dev); | 1848 | intel_update_watermarks(dev); |
@@ -2082,7 +2086,7 @@ fdi_reduce_ratio(u32 *num, u32 *den) | |||
2082 | #define LINK_N 0x80000 | 2086 | #define LINK_N 0x80000 |
2083 | 2087 | ||
2084 | static void | 2088 | static void |
2085 | igdng_compute_m_n(int bytes_per_pixel, int nlanes, | 2089 | igdng_compute_m_n(int bits_per_pixel, int nlanes, |
2086 | int pixel_clock, int link_clock, | 2090 | int pixel_clock, int link_clock, |
2087 | struct fdi_m_n *m_n) | 2091 | struct fdi_m_n *m_n) |
2088 | { | 2092 | { |
@@ -2092,7 +2096,8 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes, | |||
2092 | 2096 | ||
2093 | temp = (u64) DATA_N * pixel_clock; | 2097 | temp = (u64) DATA_N * pixel_clock; |
2094 | temp = div_u64(temp, link_clock); | 2098 | temp = div_u64(temp, link_clock); |
2095 | m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes); | 2099 | m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); |
2100 | m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ | ||
2096 | m_n->gmch_n = DATA_N; | 2101 | m_n->gmch_n = DATA_N; |
2097 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 2102 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
2098 | 2103 | ||
@@ -2140,6 +2145,13 @@ static struct intel_watermark_params igd_cursor_hplloff_wm = { | |||
2140 | IGD_CURSOR_GUARD_WM, | 2145 | IGD_CURSOR_GUARD_WM, |
2141 | IGD_FIFO_LINE_SIZE | 2146 | IGD_FIFO_LINE_SIZE |
2142 | }; | 2147 | }; |
2148 | static struct intel_watermark_params g4x_wm_info = { | ||
2149 | G4X_FIFO_SIZE, | ||
2150 | G4X_MAX_WM, | ||
2151 | G4X_MAX_WM, | ||
2152 | 2, | ||
2153 | G4X_FIFO_LINE_SIZE, | ||
2154 | }; | ||
2143 | static struct intel_watermark_params i945_wm_info = { | 2155 | static struct intel_watermark_params i945_wm_info = { |
2144 | I945_FIFO_SIZE, | 2156 | I945_FIFO_SIZE, |
2145 | I915_MAX_WM, | 2157 | I915_MAX_WM, |
@@ -2430,17 +2442,74 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
2430 | return size; | 2442 | return size; |
2431 | } | 2443 | } |
2432 | 2444 | ||
2433 | static void g4x_update_wm(struct drm_device *dev, int unused, int unused2, | 2445 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, |
2434 | int unused3, int unused4) | 2446 | int planeb_clock, int sr_hdisplay, int pixel_size) |
2435 | { | 2447 | { |
2436 | struct drm_i915_private *dev_priv = dev->dev_private; | 2448 | struct drm_i915_private *dev_priv = dev->dev_private; |
2437 | u32 fw_blc_self = I915_READ(FW_BLC_SELF); | 2449 | int total_size, cacheline_size; |
2450 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr; | ||
2451 | struct intel_watermark_params planea_params, planeb_params; | ||
2452 | unsigned long line_time_us; | ||
2453 | int sr_clock, sr_entries = 0, entries_required; | ||
2438 | 2454 | ||
2439 | if (i915_powersave) | 2455 | /* Create copies of the base settings for each pipe */ |
2440 | fw_blc_self |= FW_BLC_SELF_EN; | 2456 | planea_params = planeb_params = g4x_wm_info; |
2441 | else | 2457 | |
2442 | fw_blc_self &= ~FW_BLC_SELF_EN; | 2458 | /* Grab a couple of global values before we overwrite them */ |
2443 | I915_WRITE(FW_BLC_SELF, fw_blc_self); | 2459 | total_size = planea_params.fifo_size; |
2460 | cacheline_size = planea_params.cacheline_size; | ||
2461 | |||
2462 | /* | ||
2463 | * Note: we need to make sure we don't overflow for various clock & | ||
2464 | * latency values. | ||
2465 | * clocks go from a few thousand to several hundred thousand. | ||
2466 | * latency is usually a few thousand | ||
2467 | */ | ||
2468 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / | ||
2469 | 1000; | ||
2470 | entries_required /= G4X_FIFO_LINE_SIZE; | ||
2471 | planea_wm = entries_required + planea_params.guard_size; | ||
2472 | |||
2473 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / | ||
2474 | 1000; | ||
2475 | entries_required /= G4X_FIFO_LINE_SIZE; | ||
2476 | planeb_wm = entries_required + planeb_params.guard_size; | ||
2477 | |||
2478 | cursora_wm = cursorb_wm = 16; | ||
2479 | cursor_sr = 32; | ||
2480 | |||
2481 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | ||
2482 | |||
2483 | /* Calc sr entries for one plane configs */ | ||
2484 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | ||
2485 | /* self-refresh has much higher latency */ | ||
2486 | const static int sr_latency_ns = 12000; | ||
2487 | |||
2488 | sr_clock = planea_clock ? planea_clock : planeb_clock; | ||
2489 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | ||
2490 | |||
2491 | /* Use ns/us then divide to preserve precision */ | ||
2492 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | ||
2493 | pixel_size * sr_hdisplay) / 1000; | ||
2494 | sr_entries = roundup(sr_entries / cacheline_size, 1); | ||
2495 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | ||
2496 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
2497 | } | ||
2498 | |||
2499 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | ||
2500 | planea_wm, planeb_wm, sr_entries); | ||
2501 | |||
2502 | planea_wm &= 0x3f; | ||
2503 | planeb_wm &= 0x3f; | ||
2504 | |||
2505 | I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) | | ||
2506 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | ||
2507 | (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); | ||
2508 | I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | ||
2509 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | ||
2510 | /* HPLL off in SR has some issues on G4x... disable it */ | ||
2511 | I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | ||
2512 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | ||
2444 | } | 2513 | } |
2445 | 2514 | ||
2446 | static void i965_update_wm(struct drm_device *dev, int unused, int unused2, | 2515 | static void i965_update_wm(struct drm_device *dev, int unused, int unused2, |
@@ -2586,6 +2655,9 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
2586 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | 2655 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; |
2587 | int enabled = 0, pixel_size = 0; | 2656 | int enabled = 0, pixel_size = 0; |
2588 | 2657 | ||
2658 | if (!dev_priv->display.update_wm) | ||
2659 | return; | ||
2660 | |||
2589 | /* Get the clock config from both planes */ | 2661 | /* Get the clock config from both planes */ |
2590 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2662 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2591 | intel_crtc = to_intel_crtc(crtc); | 2663 | intel_crtc = to_intel_crtc(crtc); |
@@ -2763,7 +2835,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2763 | 2835 | ||
2764 | /* FDI link */ | 2836 | /* FDI link */ |
2765 | if (IS_IGDNG(dev)) { | 2837 | if (IS_IGDNG(dev)) { |
2766 | int lane, link_bw; | 2838 | int lane, link_bw, bpp; |
2767 | /* eDP doesn't require FDI link, so just set DP M/N | 2839 | /* eDP doesn't require FDI link, so just set DP M/N |
2768 | according to current link config */ | 2840 | according to current link config */ |
2769 | if (is_edp) { | 2841 | if (is_edp) { |
@@ -2782,10 +2854,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2782 | lane = 4; | 2854 | lane = 4; |
2783 | link_bw = 270000; | 2855 | link_bw = 270000; |
2784 | } | 2856 | } |
2785 | igdng_compute_m_n(3, lane, target_clock, | 2857 | |
2858 | /* determine panel color depth */ | ||
2859 | temp = I915_READ(pipeconf_reg); | ||
2860 | |||
2861 | switch (temp & PIPE_BPC_MASK) { | ||
2862 | case PIPE_8BPC: | ||
2863 | bpp = 24; | ||
2864 | break; | ||
2865 | case PIPE_10BPC: | ||
2866 | bpp = 30; | ||
2867 | break; | ||
2868 | case PIPE_6BPC: | ||
2869 | bpp = 18; | ||
2870 | break; | ||
2871 | case PIPE_12BPC: | ||
2872 | bpp = 36; | ||
2873 | break; | ||
2874 | default: | ||
2875 | DRM_ERROR("unknown pipe bpc value\n"); | ||
2876 | bpp = 24; | ||
2877 | } | ||
2878 | |||
2879 | igdng_compute_m_n(bpp, lane, target_clock, | ||
2786 | link_bw, &m_n); | 2880 | link_bw, &m_n); |
2787 | } | 2881 | } |
2788 | 2882 | ||
2883 | /* Ironlake: try to setup display ref clock before DPLL | ||
2884 | * enabling. This is only under driver's control after | ||
2885 | * PCH B stepping, previous chipset stepping should be | ||
2886 | * ignoring this setting. | ||
2887 | */ | ||
2888 | if (IS_IGDNG(dev)) { | ||
2889 | temp = I915_READ(PCH_DREF_CONTROL); | ||
2890 | /* Always enable nonspread source */ | ||
2891 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
2892 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
2893 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
2894 | POSTING_READ(PCH_DREF_CONTROL); | ||
2895 | |||
2896 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
2897 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
2898 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
2899 | POSTING_READ(PCH_DREF_CONTROL); | ||
2900 | |||
2901 | udelay(200); | ||
2902 | |||
2903 | if (is_edp) { | ||
2904 | if (dev_priv->lvds_use_ssc) { | ||
2905 | temp |= DREF_SSC1_ENABLE; | ||
2906 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
2907 | POSTING_READ(PCH_DREF_CONTROL); | ||
2908 | |||
2909 | udelay(200); | ||
2910 | |||
2911 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
2912 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
2913 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
2914 | POSTING_READ(PCH_DREF_CONTROL); | ||
2915 | } else { | ||
2916 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
2917 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
2918 | POSTING_READ(PCH_DREF_CONTROL); | ||
2919 | } | ||
2920 | } | ||
2921 | } | ||
2922 | |||
2789 | if (IS_IGD(dev)) { | 2923 | if (IS_IGD(dev)) { |
2790 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | 2924 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
2791 | if (has_reduced_clock) | 2925 | if (has_reduced_clock) |
@@ -2936,6 +3070,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2936 | 3070 | ||
2937 | lvds = I915_READ(lvds_reg); | 3071 | lvds = I915_READ(lvds_reg); |
2938 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; | 3072 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; |
3073 | /* set the corresponsding LVDS_BORDER bit */ | ||
3074 | lvds |= dev_priv->lvds_border_bits; | ||
2939 | /* Set the B0-B3 data pairs corresponding to whether we're going to | 3075 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
2940 | * set the DPLLs for dual-channel mode or not. | 3076 | * set the DPLLs for dual-channel mode or not. |
2941 | */ | 3077 | */ |
@@ -3095,7 +3231,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3095 | struct drm_gem_object *bo; | 3231 | struct drm_gem_object *bo; |
3096 | struct drm_i915_gem_object *obj_priv; | 3232 | struct drm_i915_gem_object *obj_priv; |
3097 | int pipe = intel_crtc->pipe; | 3233 | int pipe = intel_crtc->pipe; |
3098 | int plane = intel_crtc->plane; | ||
3099 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; | 3234 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; |
3100 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; | 3235 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; |
3101 | uint32_t temp = I915_READ(control); | 3236 | uint32_t temp = I915_READ(control); |
@@ -3182,9 +3317,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3182 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 3317 | drm_gem_object_unreference(intel_crtc->cursor_bo); |
3183 | } | 3318 | } |
3184 | 3319 | ||
3185 | if ((IS_I965G(dev) || plane == 0)) | ||
3186 | intel_update_fbc(crtc, &crtc->mode); | ||
3187 | |||
3188 | mutex_unlock(&dev->struct_mutex); | 3320 | mutex_unlock(&dev->struct_mutex); |
3189 | 3321 | ||
3190 | intel_crtc->cursor_addr = addr; | 3322 | intel_crtc->cursor_addr = addr; |
@@ -3244,6 +3376,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
3244 | intel_crtc->lut_b[regno] = blue >> 8; | 3376 | intel_crtc->lut_b[regno] = blue >> 8; |
3245 | } | 3377 | } |
3246 | 3378 | ||
3379 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
3380 | u16 *blue, int regno) | ||
3381 | { | ||
3382 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3383 | |||
3384 | *red = intel_crtc->lut_r[regno] << 8; | ||
3385 | *green = intel_crtc->lut_g[regno] << 8; | ||
3386 | *blue = intel_crtc->lut_b[regno] << 8; | ||
3387 | } | ||
3388 | |||
3247 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 3389 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
3248 | u16 *blue, uint32_t size) | 3390 | u16 *blue, uint32_t size) |
3249 | { | 3391 | { |
@@ -3835,6 +3977,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = { | |||
3835 | .mode_set_base = intel_pipe_set_base, | 3977 | .mode_set_base = intel_pipe_set_base, |
3836 | .prepare = intel_crtc_prepare, | 3978 | .prepare = intel_crtc_prepare, |
3837 | .commit = intel_crtc_commit, | 3979 | .commit = intel_crtc_commit, |
3980 | .load_lut = intel_crtc_load_lut, | ||
3838 | }; | 3981 | }; |
3839 | 3982 | ||
3840 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 3983 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
@@ -4117,7 +4260,9 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4117 | * Disable clock gating reported to work incorrectly according to the | 4260 | * Disable clock gating reported to work incorrectly according to the |
4118 | * specs, but enable as much else as we can. | 4261 | * specs, but enable as much else as we can. |
4119 | */ | 4262 | */ |
4120 | if (IS_G4X(dev)) { | 4263 | if (IS_IGDNG(dev)) { |
4264 | return; | ||
4265 | } else if (IS_G4X(dev)) { | ||
4121 | uint32_t dspclk_gate; | 4266 | uint32_t dspclk_gate; |
4122 | I915_WRITE(RENCLK_GATE_D1, 0); | 4267 | I915_WRITE(RENCLK_GATE_D1, 0); |
4123 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | 4268 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | |
@@ -4205,7 +4350,9 @@ static void intel_init_display(struct drm_device *dev) | |||
4205 | i830_get_display_clock_speed; | 4350 | i830_get_display_clock_speed; |
4206 | 4351 | ||
4207 | /* For FIFO watermark updates */ | 4352 | /* For FIFO watermark updates */ |
4208 | if (IS_G4X(dev)) | 4353 | if (IS_IGDNG(dev)) |
4354 | dev_priv->display.update_wm = NULL; | ||
4355 | else if (IS_G4X(dev)) | ||
4209 | dev_priv->display.update_wm = g4x_update_wm; | 4356 | dev_priv->display.update_wm = g4x_update_wm; |
4210 | else if (IS_I965G(dev)) | 4357 | else if (IS_I965G(dev)) |
4211 | dev_priv->display.update_wm = i965_update_wm; | 4358 | dev_priv->display.update_wm = i965_update_wm; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f4856a510476..d83447557f9b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -400,7 +400,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | |||
400 | { | 400 | { |
401 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 401 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; |
402 | 402 | ||
403 | DRM_ERROR("i2c_init %s\n", name); | 403 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
404 | dp_priv->algo.running = false; | 404 | dp_priv->algo.running = false; |
405 | dp_priv->algo.address = 0; | 405 | dp_priv->algo.address = 0; |
406 | dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; | 406 | dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8aa4b7f30daa..ef61fe9507e2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); | |||
175 | extern void intelfb_restore(void); | 175 | extern void intelfb_restore(void); |
176 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 176 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
177 | u16 blue, int regno); | 177 | u16 blue, int regno); |
178 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
179 | u16 *blue, int regno); | ||
178 | 180 | ||
179 | extern int intel_framebuffer_create(struct drm_device *dev, | 181 | extern int intel_framebuffer_create(struct drm_device *dev, |
180 | struct drm_mode_fb_cmd *mode_cmd, | 182 | struct drm_mode_fb_cmd *mode_cmd, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index e85d7e9eed7d..2b0fe54cd92c 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = { | |||
60 | .fb_imageblit = cfb_imageblit, | 60 | .fb_imageblit = cfb_imageblit, |
61 | .fb_pan_display = drm_fb_helper_pan_display, | 61 | .fb_pan_display = drm_fb_helper_pan_display, |
62 | .fb_blank = drm_fb_helper_blank, | 62 | .fb_blank = drm_fb_helper_blank, |
63 | .fb_setcmap = drm_fb_helper_setcmap, | ||
63 | }; | 64 | }; |
64 | 65 | ||
65 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | 66 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { |
66 | .gamma_set = intel_crtc_fb_gamma_set, | 67 | .gamma_set = intel_crtc_fb_gamma_set, |
68 | .gamma_get = intel_crtc_fb_gamma_get, | ||
67 | }; | 69 | }; |
68 | 70 | ||
69 | 71 | ||
@@ -123,6 +125,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
123 | struct device *device = &dev->pdev->dev; | 125 | struct device *device = &dev->pdev->dev; |
124 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 126 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; |
125 | 127 | ||
128 | /* we don't do packed 24bpp */ | ||
129 | if (surface_bpp == 24) | ||
130 | surface_bpp = 32; | ||
131 | |||
126 | mode_cmd.width = surface_width; | 132 | mode_cmd.width = surface_width; |
127 | mode_cmd.height = surface_height; | 133 | mode_cmd.height = surface_height; |
128 | 134 | ||
@@ -206,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
206 | 212 | ||
207 | // memset(info->screen_base, 0, size); | 213 | // memset(info->screen_base, 0, size); |
208 | 214 | ||
209 | drm_fb_helper_fill_fix(info, fb->pitch); | 215 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
210 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); | 216 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
211 | 217 | ||
212 | /* FIXME: we really shouldn't expose mmio space at all */ | 218 | /* FIXME: we really shouldn't expose mmio space at all */ |
@@ -244,7 +250,7 @@ int intelfb_probe(struct drm_device *dev) | |||
244 | int ret; | 250 | int ret; |
245 | 251 | ||
246 | DRM_DEBUG("\n"); | 252 | DRM_DEBUG("\n"); |
247 | ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); | 253 | ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); |
248 | return ret; | 254 | return ret; |
249 | } | 255 | } |
250 | EXPORT_SYMBOL(intelfb_probe); | 256 | EXPORT_SYMBOL(intelfb_probe); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index fa304e136010..663ab6de0b58 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -223,7 +223,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
223 | 223 | ||
224 | connector = &intel_output->base; | 224 | connector = &intel_output->base; |
225 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 225 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
226 | DRM_MODE_CONNECTOR_DVID); | 226 | DRM_MODE_CONNECTOR_HDMIA); |
227 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 227 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
228 | 228 | ||
229 | intel_output->type = INTEL_OUTPUT_HDMI; | 229 | intel_output->type = INTEL_OUTPUT_HDMI; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 98ae3d73577e..05598ae10c4b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -380,7 +380,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
380 | adjusted_mode->crtc_vblank_start + vsync_pos; | 380 | adjusted_mode->crtc_vblank_start + vsync_pos; |
381 | /* keep the vsync width constant */ | 381 | /* keep the vsync width constant */ |
382 | adjusted_mode->crtc_vsync_end = | 382 | adjusted_mode->crtc_vsync_end = |
383 | adjusted_mode->crtc_vblank_start + vsync_width; | 383 | adjusted_mode->crtc_vsync_start + vsync_width; |
384 | border = 1; | 384 | border = 1; |
385 | break; | 385 | break; |
386 | case DRM_MODE_SCALE_ASPECT: | 386 | case DRM_MODE_SCALE_ASPECT: |
@@ -526,6 +526,14 @@ out: | |||
526 | lvds_priv->pfit_control = pfit_control; | 526 | lvds_priv->pfit_control = pfit_control; |
527 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; | 527 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; |
528 | /* | 528 | /* |
529 | * When there exists the border, it means that the LVDS_BORDR | ||
530 | * should be enabled. | ||
531 | */ | ||
532 | if (border) | ||
533 | dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE; | ||
534 | else | ||
535 | dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE); | ||
536 | /* | ||
529 | * XXX: It would be nice to support lower refresh rates on the | 537 | * XXX: It would be nice to support lower refresh rates on the |
530 | * panels to reduce power consumption, and perhaps match the | 538 | * panels to reduce power consumption, and perhaps match the |
531 | * user's requested refresh rate. | 539 | * user's requested refresh rate. |
@@ -656,6 +664,15 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
656 | return 0; | 664 | return 0; |
657 | } | 665 | } |
658 | 666 | ||
667 | /* | ||
668 | * Lid events. Note the use of 'modeset_on_lid': | ||
669 | * - we set it on lid close, and reset it on open | ||
670 | * - we use it as a "only once" bit (ie we ignore | ||
671 | * duplicate events where it was already properly | ||
672 | * set/reset) | ||
673 | * - the suspend/resume paths will also set it to | ||
674 | * zero, since they restore the mode ("lid open"). | ||
675 | */ | ||
659 | static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | 676 | static int intel_lid_notify(struct notifier_block *nb, unsigned long val, |
660 | void *unused) | 677 | void *unused) |
661 | { | 678 | { |
@@ -663,13 +680,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
663 | container_of(nb, struct drm_i915_private, lid_notifier); | 680 | container_of(nb, struct drm_i915_private, lid_notifier); |
664 | struct drm_device *dev = dev_priv->dev; | 681 | struct drm_device *dev = dev_priv->dev; |
665 | 682 | ||
666 | if (acpi_lid_open() && !dev_priv->suspended) { | 683 | if (!acpi_lid_open()) { |
667 | mutex_lock(&dev->mode_config.mutex); | 684 | dev_priv->modeset_on_lid = 1; |
668 | drm_helper_resume_force_mode(dev); | 685 | return NOTIFY_OK; |
669 | mutex_unlock(&dev->mode_config.mutex); | ||
670 | } | 686 | } |
671 | 687 | ||
672 | drm_sysfs_hotplug_event(dev_priv->dev); | 688 | if (!dev_priv->modeset_on_lid) |
689 | return NOTIFY_OK; | ||
690 | |||
691 | dev_priv->modeset_on_lid = 0; | ||
692 | |||
693 | mutex_lock(&dev->mode_config.mutex); | ||
694 | drm_helper_resume_force_mode(dev); | ||
695 | mutex_unlock(&dev->mode_config.mutex); | ||
673 | 696 | ||
674 | return NOTIFY_OK; | 697 | return NOTIFY_OK; |
675 | } | 698 | } |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index c64eab493fb0..9ca917931afb 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1082,7 +1082,8 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo | |||
1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); |
1083 | 1083 | ||
1084 | /* Ensure TV refresh is close to desired refresh */ | 1084 | /* Ensure TV refresh is close to desired refresh */ |
1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10) | 1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) |
1086 | < 1000) | ||
1086 | return MODE_OK; | 1087 | return MODE_OK; |
1087 | return MODE_CLOCK_RANGE; | 1088 | return MODE_CLOCK_RANGE; |
1088 | } | 1089 | } |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 09a28923f46e..b5713eedd6e1 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \ | |||
49 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ | 49 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ |
50 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ | 50 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ |
51 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 51 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
52 | r600_blit_kms.o | 52 | r600_blit_kms.o radeon_pm.o |
53 | 53 | ||
54 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 54 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
55 | 55 | ||
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 5d402086bc47..c11ddddfb3b6 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -2314,7 +2314,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { | |||
2314 | UCHAR ucSS_Step; | 2314 | UCHAR ucSS_Step; |
2315 | UCHAR ucSS_Delay; | 2315 | UCHAR ucSS_Delay; |
2316 | UCHAR ucSS_Id; | 2316 | UCHAR ucSS_Id; |
2317 | UCHAR ucRecommandedRef_Div; | 2317 | UCHAR ucRecommendedRef_Div; |
2318 | UCHAR ucSS_Range; /* it was reserved for V11 */ | 2318 | UCHAR ucSS_Range; /* it was reserved for V11 */ |
2319 | } ATOM_SPREAD_SPECTRUM_ASSIGNMENT; | 2319 | } ATOM_SPREAD_SPECTRUM_ASSIGNMENT; |
2320 | 2320 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 6a015929deee..c15287a590ff 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -31,10 +31,6 @@ | |||
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
33 | 33 | ||
34 | /* evil but including atombios.h is much worse */ | ||
35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | ||
36 | SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing, | ||
37 | int32_t *pixel_clock); | ||
38 | static void atombios_overscan_setup(struct drm_crtc *crtc, | 34 | static void atombios_overscan_setup(struct drm_crtc *crtc, |
39 | struct drm_display_mode *mode, | 35 | struct drm_display_mode *mode, |
40 | struct drm_display_mode *adjusted_mode) | 36 | struct drm_display_mode *adjusted_mode) |
@@ -248,18 +244,18 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
248 | 244 | ||
249 | switch (mode) { | 245 | switch (mode) { |
250 | case DRM_MODE_DPMS_ON: | 246 | case DRM_MODE_DPMS_ON: |
247 | atombios_enable_crtc(crtc, 1); | ||
251 | if (ASIC_IS_DCE3(rdev)) | 248 | if (ASIC_IS_DCE3(rdev)) |
252 | atombios_enable_crtc_memreq(crtc, 1); | 249 | atombios_enable_crtc_memreq(crtc, 1); |
253 | atombios_enable_crtc(crtc, 1); | ||
254 | atombios_blank_crtc(crtc, 0); | 250 | atombios_blank_crtc(crtc, 0); |
255 | break; | 251 | break; |
256 | case DRM_MODE_DPMS_STANDBY: | 252 | case DRM_MODE_DPMS_STANDBY: |
257 | case DRM_MODE_DPMS_SUSPEND: | 253 | case DRM_MODE_DPMS_SUSPEND: |
258 | case DRM_MODE_DPMS_OFF: | 254 | case DRM_MODE_DPMS_OFF: |
259 | atombios_blank_crtc(crtc, 1); | 255 | atombios_blank_crtc(crtc, 1); |
260 | atombios_enable_crtc(crtc, 0); | ||
261 | if (ASIC_IS_DCE3(rdev)) | 256 | if (ASIC_IS_DCE3(rdev)) |
262 | atombios_enable_crtc_memreq(crtc, 0); | 257 | atombios_enable_crtc_memreq(crtc, 0); |
258 | atombios_enable_crtc(crtc, 0); | ||
263 | break; | 259 | break; |
264 | } | 260 | } |
265 | 261 | ||
@@ -270,59 +266,147 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
270 | 266 | ||
271 | static void | 267 | static void |
272 | atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, | 268 | atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, |
273 | SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param) | 269 | struct drm_display_mode *mode) |
274 | { | 270 | { |
271 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
275 | struct drm_device *dev = crtc->dev; | 272 | struct drm_device *dev = crtc->dev; |
276 | struct radeon_device *rdev = dev->dev_private; | 273 | struct radeon_device *rdev = dev->dev_private; |
277 | SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param; | 274 | SET_CRTC_USING_DTD_TIMING_PARAMETERS args; |
278 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); | 275 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); |
276 | u16 misc = 0; | ||
279 | 277 | ||
280 | conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size); | 278 | memset(&args, 0, sizeof(args)); |
281 | conv_param.usH_Blanking_Time = | 279 | args.usH_Size = cpu_to_le16(mode->crtc_hdisplay); |
282 | cpu_to_le16(crtc_param->usH_Blanking_Time); | 280 | args.usH_Blanking_Time = |
283 | conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size); | 281 | cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay); |
284 | conv_param.usV_Blanking_Time = | 282 | args.usV_Size = cpu_to_le16(mode->crtc_vdisplay); |
285 | cpu_to_le16(crtc_param->usV_Blanking_Time); | 283 | args.usV_Blanking_Time = |
286 | conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset); | 284 | cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay); |
287 | conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth); | 285 | args.usH_SyncOffset = |
288 | conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset); | 286 | cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay); |
289 | conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth); | 287 | args.usH_SyncWidth = |
290 | conv_param.susModeMiscInfo.usAccess = | 288 | cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); |
291 | cpu_to_le16(crtc_param->susModeMiscInfo.usAccess); | 289 | args.usV_SyncOffset = |
292 | conv_param.ucCRTC = crtc_param->ucCRTC; | 290 | cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay); |
291 | args.usV_SyncWidth = | ||
292 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); | ||
293 | /*args.ucH_Border = mode->hborder;*/ | ||
294 | /*args.ucV_Border = mode->vborder;*/ | ||
295 | |||
296 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
297 | misc |= ATOM_VSYNC_POLARITY; | ||
298 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
299 | misc |= ATOM_HSYNC_POLARITY; | ||
300 | if (mode->flags & DRM_MODE_FLAG_CSYNC) | ||
301 | misc |= ATOM_COMPOSITESYNC; | ||
302 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
303 | misc |= ATOM_INTERLACE; | ||
304 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
305 | misc |= ATOM_DOUBLE_CLOCK_MODE; | ||
306 | |||
307 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); | ||
308 | args.ucCRTC = radeon_crtc->crtc_id; | ||
293 | 309 | ||
294 | printk("executing set crtc dtd timing\n"); | 310 | printk("executing set crtc dtd timing\n"); |
295 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param); | 311 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
296 | } | 312 | } |
297 | 313 | ||
298 | void atombios_crtc_set_timing(struct drm_crtc *crtc, | 314 | static void atombios_crtc_set_timing(struct drm_crtc *crtc, |
299 | SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION * | 315 | struct drm_display_mode *mode) |
300 | crtc_param) | ||
301 | { | 316 | { |
317 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
302 | struct drm_device *dev = crtc->dev; | 318 | struct drm_device *dev = crtc->dev; |
303 | struct radeon_device *rdev = dev->dev_private; | 319 | struct radeon_device *rdev = dev->dev_private; |
304 | SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param; | 320 | SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args; |
305 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing); | 321 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing); |
322 | u16 misc = 0; | ||
306 | 323 | ||
307 | conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total); | 324 | memset(&args, 0, sizeof(args)); |
308 | conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp); | 325 | args.usH_Total = cpu_to_le16(mode->crtc_htotal); |
309 | conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart); | 326 | args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay); |
310 | conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth); | 327 | args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start); |
311 | conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total); | 328 | args.usH_SyncWidth = |
312 | conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp); | 329 | cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); |
313 | conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart); | 330 | args.usV_Total = cpu_to_le16(mode->crtc_vtotal); |
314 | conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth); | 331 | args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay); |
315 | conv_param.susModeMiscInfo.usAccess = | 332 | args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start); |
316 | cpu_to_le16(crtc_param->susModeMiscInfo.usAccess); | 333 | args.usV_SyncWidth = |
317 | conv_param.ucCRTC = crtc_param->ucCRTC; | 334 | cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); |
318 | conv_param.ucOverscanRight = crtc_param->ucOverscanRight; | 335 | |
319 | conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft; | 336 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
320 | conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom; | 337 | misc |= ATOM_VSYNC_POLARITY; |
321 | conv_param.ucOverscanTop = crtc_param->ucOverscanTop; | 338 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
322 | conv_param.ucReserved = crtc_param->ucReserved; | 339 | misc |= ATOM_HSYNC_POLARITY; |
340 | if (mode->flags & DRM_MODE_FLAG_CSYNC) | ||
341 | misc |= ATOM_COMPOSITESYNC; | ||
342 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
343 | misc |= ATOM_INTERLACE; | ||
344 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
345 | misc |= ATOM_DOUBLE_CLOCK_MODE; | ||
346 | |||
347 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); | ||
348 | args.ucCRTC = radeon_crtc->crtc_id; | ||
323 | 349 | ||
324 | printk("executing set crtc timing\n"); | 350 | printk("executing set crtc timing\n"); |
325 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param); | 351 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
352 | } | ||
353 | |||
354 | static void atombios_set_ss(struct drm_crtc *crtc, int enable) | ||
355 | { | ||
356 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
357 | struct drm_device *dev = crtc->dev; | ||
358 | struct radeon_device *rdev = dev->dev_private; | ||
359 | struct drm_encoder *encoder = NULL; | ||
360 | struct radeon_encoder *radeon_encoder = NULL; | ||
361 | struct radeon_encoder_atom_dig *dig = NULL; | ||
362 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); | ||
363 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args; | ||
364 | ENABLE_LVDS_SS_PARAMETERS legacy_args; | ||
365 | uint16_t percentage = 0; | ||
366 | uint8_t type = 0, step = 0, delay = 0, range = 0; | ||
367 | |||
368 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
369 | if (encoder->crtc == crtc) { | ||
370 | radeon_encoder = to_radeon_encoder(encoder); | ||
371 | /* only enable spread spectrum on LVDS */ | ||
372 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
373 | dig = radeon_encoder->enc_priv; | ||
374 | if (dig && dig->ss) { | ||
375 | percentage = dig->ss->percentage; | ||
376 | type = dig->ss->type; | ||
377 | step = dig->ss->step; | ||
378 | delay = dig->ss->delay; | ||
379 | range = dig->ss->range; | ||
380 | } else if (enable) | ||
381 | return; | ||
382 | } else if (enable) | ||
383 | return; | ||
384 | break; | ||
385 | } | ||
386 | } | ||
387 | |||
388 | if (!radeon_encoder) | ||
389 | return; | ||
390 | |||
391 | if (ASIC_IS_AVIVO(rdev)) { | ||
392 | memset(&args, 0, sizeof(args)); | ||
393 | args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | ||
394 | args.ucSpreadSpectrumType = type; | ||
395 | args.ucSpreadSpectrumStep = step; | ||
396 | args.ucSpreadSpectrumDelay = delay; | ||
397 | args.ucSpreadSpectrumRange = range; | ||
398 | args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | ||
399 | args.ucEnable = enable; | ||
400 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
401 | } else { | ||
402 | memset(&legacy_args, 0, sizeof(legacy_args)); | ||
403 | legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | ||
404 | legacy_args.ucSpreadSpectrumType = type; | ||
405 | legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; | ||
406 | legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; | ||
407 | legacy_args.ucEnable = enable; | ||
408 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args); | ||
409 | } | ||
326 | } | 410 | } |
327 | 411 | ||
328 | void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | 412 | void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) |
@@ -333,12 +417,13 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
333 | struct drm_encoder *encoder = NULL; | 417 | struct drm_encoder *encoder = NULL; |
334 | struct radeon_encoder *radeon_encoder = NULL; | 418 | struct radeon_encoder *radeon_encoder = NULL; |
335 | uint8_t frev, crev; | 419 | uint8_t frev, crev; |
336 | int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | 420 | int index; |
337 | SET_PIXEL_CLOCK_PS_ALLOCATION args; | 421 | SET_PIXEL_CLOCK_PS_ALLOCATION args; |
338 | PIXEL_CLOCK_PARAMETERS *spc1_ptr; | 422 | PIXEL_CLOCK_PARAMETERS *spc1_ptr; |
339 | PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; | 423 | PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; |
340 | PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; | 424 | PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; |
341 | uint32_t sclock = mode->clock; | 425 | uint32_t pll_clock = mode->clock; |
426 | uint32_t adjusted_clock; | ||
342 | uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; | 427 | uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; |
343 | struct radeon_pll *pll; | 428 | struct radeon_pll *pll; |
344 | int pll_flags = 0; | 429 | int pll_flags = 0; |
@@ -346,8 +431,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
346 | memset(&args, 0, sizeof(args)); | 431 | memset(&args, 0, sizeof(args)); |
347 | 432 | ||
348 | if (ASIC_IS_AVIVO(rdev)) { | 433 | if (ASIC_IS_AVIVO(rdev)) { |
349 | uint32_t ss_cntl; | ||
350 | |||
351 | if ((rdev->family == CHIP_RS600) || | 434 | if ((rdev->family == CHIP_RS600) || |
352 | (rdev->family == CHIP_RS690) || | 435 | (rdev->family == CHIP_RS690) || |
353 | (rdev->family == CHIP_RS740)) | 436 | (rdev->family == CHIP_RS740)) |
@@ -358,15 +441,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
358 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 441 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
359 | else | 442 | else |
360 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 443 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
361 | |||
362 | /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */ | ||
363 | if (radeon_crtc->crtc_id == 0) { | ||
364 | ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); | ||
365 | WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1); | ||
366 | } else { | ||
367 | ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL); | ||
368 | WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1); | ||
369 | } | ||
370 | } else { | 444 | } else { |
371 | pll_flags |= RADEON_PLL_LEGACY; | 445 | pll_flags |= RADEON_PLL_LEGACY; |
372 | 446 | ||
@@ -393,14 +467,43 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
393 | } | 467 | } |
394 | } | 468 | } |
395 | 469 | ||
470 | /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock | ||
471 | * accordingly based on the encoder/transmitter to work around | ||
472 | * special hw requirements. | ||
473 | */ | ||
474 | if (ASIC_IS_DCE3(rdev)) { | ||
475 | ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; | ||
476 | |||
477 | if (!encoder) | ||
478 | return; | ||
479 | |||
480 | memset(&adjust_pll_args, 0, sizeof(adjust_pll_args)); | ||
481 | adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10); | ||
482 | adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id; | ||
483 | adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
484 | |||
485 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); | ||
486 | atom_execute_table(rdev->mode_info.atom_context, | ||
487 | index, (uint32_t *)&adjust_pll_args); | ||
488 | adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; | ||
489 | } else { | ||
490 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | ||
491 | if (ASIC_IS_AVIVO(rdev) && | ||
492 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) | ||
493 | adjusted_clock = mode->clock * 2; | ||
494 | else | ||
495 | adjusted_clock = mode->clock; | ||
496 | } | ||
497 | |||
396 | if (radeon_crtc->crtc_id == 0) | 498 | if (radeon_crtc->crtc_id == 0) |
397 | pll = &rdev->clock.p1pll; | 499 | pll = &rdev->clock.p1pll; |
398 | else | 500 | else |
399 | pll = &rdev->clock.p2pll; | 501 | pll = &rdev->clock.p2pll; |
400 | 502 | ||
401 | radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div, | 503 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, |
402 | &ref_div, &post_div, pll_flags); | 504 | &ref_div, &post_div, pll_flags); |
403 | 505 | ||
506 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | ||
404 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 507 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
405 | &crev); | 508 | &crev); |
406 | 509 | ||
@@ -409,7 +512,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
409 | switch (crev) { | 512 | switch (crev) { |
410 | case 1: | 513 | case 1: |
411 | spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; | 514 | spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; |
412 | spc1_ptr->usPixelClock = cpu_to_le16(sclock); | 515 | spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); |
413 | spc1_ptr->usRefDiv = cpu_to_le16(ref_div); | 516 | spc1_ptr->usRefDiv = cpu_to_le16(ref_div); |
414 | spc1_ptr->usFbDiv = cpu_to_le16(fb_div); | 517 | spc1_ptr->usFbDiv = cpu_to_le16(fb_div); |
415 | spc1_ptr->ucFracFbDiv = frac_fb_div; | 518 | spc1_ptr->ucFracFbDiv = frac_fb_div; |
@@ -422,7 +525,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
422 | case 2: | 525 | case 2: |
423 | spc2_ptr = | 526 | spc2_ptr = |
424 | (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; | 527 | (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; |
425 | spc2_ptr->usPixelClock = cpu_to_le16(sclock); | 528 | spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); |
426 | spc2_ptr->usRefDiv = cpu_to_le16(ref_div); | 529 | spc2_ptr->usRefDiv = cpu_to_le16(ref_div); |
427 | spc2_ptr->usFbDiv = cpu_to_le16(fb_div); | 530 | spc2_ptr->usFbDiv = cpu_to_le16(fb_div); |
428 | spc2_ptr->ucFracFbDiv = frac_fb_div; | 531 | spc2_ptr->ucFracFbDiv = frac_fb_div; |
@@ -437,7 +540,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
437 | return; | 540 | return; |
438 | spc3_ptr = | 541 | spc3_ptr = |
439 | (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; | 542 | (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; |
440 | spc3_ptr->usPixelClock = cpu_to_le16(sclock); | 543 | spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); |
441 | spc3_ptr->usRefDiv = cpu_to_le16(ref_div); | 544 | spc3_ptr->usRefDiv = cpu_to_le16(ref_div); |
442 | spc3_ptr->usFbDiv = cpu_to_le16(fb_div); | 545 | spc3_ptr->usFbDiv = cpu_to_le16(fb_div); |
443 | spc3_ptr->ucFracFbDiv = frac_fb_div; | 546 | spc3_ptr->ucFracFbDiv = frac_fb_div; |
@@ -527,6 +630,16 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
527 | WREG32(AVIVO_D1VGA_CONTROL, 0); | 630 | WREG32(AVIVO_D1VGA_CONTROL, 0); |
528 | else | 631 | else |
529 | WREG32(AVIVO_D2VGA_CONTROL, 0); | 632 | WREG32(AVIVO_D2VGA_CONTROL, 0); |
633 | |||
634 | if (rdev->family >= CHIP_RV770) { | ||
635 | if (radeon_crtc->crtc_id) { | ||
636 | WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | ||
637 | WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | ||
638 | } else { | ||
639 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); | ||
640 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); | ||
641 | } | ||
642 | } | ||
530 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 643 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
531 | (u32) fb_location); | 644 | (u32) fb_location); |
532 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + | 645 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + |
@@ -563,6 +676,10 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
563 | radeon_fb = to_radeon_framebuffer(old_fb); | 676 | radeon_fb = to_radeon_framebuffer(old_fb); |
564 | radeon_gem_object_unpin(radeon_fb->obj); | 677 | radeon_gem_object_unpin(radeon_fb->obj); |
565 | } | 678 | } |
679 | |||
680 | /* Bytes per pixel may have changed */ | ||
681 | radeon_bandwidth_update(rdev); | ||
682 | |||
566 | return 0; | 683 | return 0; |
567 | } | 684 | } |
568 | 685 | ||
@@ -574,134 +691,24 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
574 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 691 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
575 | struct drm_device *dev = crtc->dev; | 692 | struct drm_device *dev = crtc->dev; |
576 | struct radeon_device *rdev = dev->dev_private; | 693 | struct radeon_device *rdev = dev->dev_private; |
577 | struct drm_encoder *encoder; | ||
578 | SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing; | ||
579 | int need_tv_timings = 0; | ||
580 | bool ret; | ||
581 | 694 | ||
582 | /* TODO color tiling */ | 695 | /* TODO color tiling */ |
583 | memset(&crtc_timing, 0, sizeof(crtc_timing)); | ||
584 | |||
585 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
586 | /* find tv std */ | ||
587 | if (encoder->crtc == crtc) { | ||
588 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
589 | |||
590 | if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { | ||
591 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; | ||
592 | if (tv_dac) { | ||
593 | if (tv_dac->tv_std == TV_STD_NTSC || | ||
594 | tv_dac->tv_std == TV_STD_NTSC_J || | ||
595 | tv_dac->tv_std == TV_STD_PAL_M) | ||
596 | need_tv_timings = 1; | ||
597 | else | ||
598 | need_tv_timings = 2; | ||
599 | break; | ||
600 | } | ||
601 | } | ||
602 | } | ||
603 | } | ||
604 | |||
605 | crtc_timing.ucCRTC = radeon_crtc->crtc_id; | ||
606 | if (need_tv_timings) { | ||
607 | ret = radeon_atom_get_tv_timings(rdev, need_tv_timings - 1, | ||
608 | &crtc_timing, &adjusted_mode->clock); | ||
609 | if (ret == false) | ||
610 | need_tv_timings = 0; | ||
611 | } | ||
612 | |||
613 | if (!need_tv_timings) { | ||
614 | crtc_timing.usH_Total = adjusted_mode->crtc_htotal; | ||
615 | crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay; | ||
616 | crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start; | ||
617 | crtc_timing.usH_SyncWidth = | ||
618 | adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; | ||
619 | |||
620 | crtc_timing.usV_Total = adjusted_mode->crtc_vtotal; | ||
621 | crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay; | ||
622 | crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start; | ||
623 | crtc_timing.usV_SyncWidth = | ||
624 | adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; | ||
625 | |||
626 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
627 | crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY; | ||
628 | |||
629 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
630 | crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY; | ||
631 | |||
632 | if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC) | ||
633 | crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC; | ||
634 | |||
635 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
636 | crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE; | ||
637 | |||
638 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
639 | crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE; | ||
640 | } | ||
641 | 696 | ||
697 | atombios_set_ss(crtc, 0); | ||
642 | atombios_crtc_set_pll(crtc, adjusted_mode); | 698 | atombios_crtc_set_pll(crtc, adjusted_mode); |
643 | atombios_crtc_set_timing(crtc, &crtc_timing); | 699 | atombios_set_ss(crtc, 1); |
700 | atombios_crtc_set_timing(crtc, adjusted_mode); | ||
644 | 701 | ||
645 | if (ASIC_IS_AVIVO(rdev)) | 702 | if (ASIC_IS_AVIVO(rdev)) |
646 | atombios_crtc_set_base(crtc, x, y, old_fb); | 703 | atombios_crtc_set_base(crtc, x, y, old_fb); |
647 | else { | 704 | else { |
648 | if (radeon_crtc->crtc_id == 0) { | 705 | if (radeon_crtc->crtc_id == 0) |
649 | SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing; | 706 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
650 | memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing)); | ||
651 | |||
652 | /* setup FP shadow regs on R4xx */ | ||
653 | crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id; | ||
654 | crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay; | ||
655 | crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay; | ||
656 | crtc_dtd_timing.usH_Blanking_Time = | ||
657 | adjusted_mode->crtc_hblank_end - | ||
658 | adjusted_mode->crtc_hdisplay; | ||
659 | crtc_dtd_timing.usV_Blanking_Time = | ||
660 | adjusted_mode->crtc_vblank_end - | ||
661 | adjusted_mode->crtc_vdisplay; | ||
662 | crtc_dtd_timing.usH_SyncOffset = | ||
663 | adjusted_mode->crtc_hsync_start - | ||
664 | adjusted_mode->crtc_hdisplay; | ||
665 | crtc_dtd_timing.usV_SyncOffset = | ||
666 | adjusted_mode->crtc_vsync_start - | ||
667 | adjusted_mode->crtc_vdisplay; | ||
668 | crtc_dtd_timing.usH_SyncWidth = | ||
669 | adjusted_mode->crtc_hsync_end - | ||
670 | adjusted_mode->crtc_hsync_start; | ||
671 | crtc_dtd_timing.usV_SyncWidth = | ||
672 | adjusted_mode->crtc_vsync_end - | ||
673 | adjusted_mode->crtc_vsync_start; | ||
674 | /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */ | ||
675 | /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */ | ||
676 | |||
677 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
678 | crtc_dtd_timing.susModeMiscInfo.usAccess |= | ||
679 | ATOM_VSYNC_POLARITY; | ||
680 | |||
681 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
682 | crtc_dtd_timing.susModeMiscInfo.usAccess |= | ||
683 | ATOM_HSYNC_POLARITY; | ||
684 | |||
685 | if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC) | ||
686 | crtc_dtd_timing.susModeMiscInfo.usAccess |= | ||
687 | ATOM_COMPOSITESYNC; | ||
688 | |||
689 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
690 | crtc_dtd_timing.susModeMiscInfo.usAccess |= | ||
691 | ATOM_INTERLACE; | ||
692 | |||
693 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
694 | crtc_dtd_timing.susModeMiscInfo.usAccess |= | ||
695 | ATOM_DOUBLE_CLOCK_MODE; | ||
696 | |||
697 | atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing); | ||
698 | } | ||
699 | radeon_crtc_set_base(crtc, x, y, old_fb); | 707 | radeon_crtc_set_base(crtc, x, y, old_fb); |
700 | radeon_legacy_atom_set_surface(crtc); | 708 | radeon_legacy_atom_set_surface(crtc); |
701 | } | 709 | } |
702 | atombios_overscan_setup(crtc, mode, adjusted_mode); | 710 | atombios_overscan_setup(crtc, mode, adjusted_mode); |
703 | atombios_scaler_setup(crtc); | 711 | atombios_scaler_setup(crtc); |
704 | radeon_bandwidth_update(rdev); | ||
705 | return 0; | 712 | return 0; |
706 | } | 713 | } |
707 | 714 | ||
@@ -733,6 +740,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { | |||
733 | .mode_set_base = atombios_crtc_set_base, | 740 | .mode_set_base = atombios_crtc_set_base, |
734 | .prepare = atombios_crtc_prepare, | 741 | .prepare = atombios_crtc_prepare, |
735 | .commit = atombios_crtc_commit, | 742 | .commit = atombios_crtc_commit, |
743 | .load_lut = radeon_crtc_load_lut, | ||
736 | }; | 744 | }; |
737 | 745 | ||
738 | void radeon_atombios_init_crtc(struct drm_device *dev, | 746 | void radeon_atombios_init_crtc(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index fb211e585dea..0d79577c1576 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c | |||
@@ -561,7 +561,7 @@ struct table { | |||
561 | char *gpu_prefix; | 561 | char *gpu_prefix; |
562 | }; | 562 | }; |
563 | 563 | ||
564 | struct offset *offset_new(unsigned o) | 564 | static struct offset *offset_new(unsigned o) |
565 | { | 565 | { |
566 | struct offset *offset; | 566 | struct offset *offset; |
567 | 567 | ||
@@ -573,12 +573,12 @@ struct offset *offset_new(unsigned o) | |||
573 | return offset; | 573 | return offset; |
574 | } | 574 | } |
575 | 575 | ||
576 | void table_offset_add(struct table *t, struct offset *offset) | 576 | static void table_offset_add(struct table *t, struct offset *offset) |
577 | { | 577 | { |
578 | list_add_tail(&offset->list, &t->offsets); | 578 | list_add_tail(&offset->list, &t->offsets); |
579 | } | 579 | } |
580 | 580 | ||
581 | void table_init(struct table *t) | 581 | static void table_init(struct table *t) |
582 | { | 582 | { |
583 | INIT_LIST_HEAD(&t->offsets); | 583 | INIT_LIST_HEAD(&t->offsets); |
584 | t->offset_max = 0; | 584 | t->offset_max = 0; |
@@ -586,7 +586,7 @@ void table_init(struct table *t) | |||
586 | t->table = NULL; | 586 | t->table = NULL; |
587 | } | 587 | } |
588 | 588 | ||
589 | void table_print(struct table *t) | 589 | static void table_print(struct table *t) |
590 | { | 590 | { |
591 | unsigned nlloop, i, j, n, c, id; | 591 | unsigned nlloop, i, j, n, c, id; |
592 | 592 | ||
@@ -611,7 +611,7 @@ void table_print(struct table *t) | |||
611 | printf("};\n"); | 611 | printf("};\n"); |
612 | } | 612 | } |
613 | 613 | ||
614 | int table_build(struct table *t) | 614 | static int table_build(struct table *t) |
615 | { | 615 | { |
616 | struct offset *offset; | 616 | struct offset *offset; |
617 | unsigned i, m; | 617 | unsigned i, m; |
@@ -631,7 +631,7 @@ int table_build(struct table *t) | |||
631 | } | 631 | } |
632 | 632 | ||
633 | static char gpu_name[10]; | 633 | static char gpu_name[10]; |
634 | int parser_auth(struct table *t, const char *filename) | 634 | static int parser_auth(struct table *t, const char *filename) |
635 | { | 635 | { |
636 | FILE *file; | 636 | FILE *file; |
637 | regex_t mask_rex; | 637 | regex_t mask_rex; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e6cce24de802..c9e93eabcf16 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -32,6 +32,9 @@ | |||
32 | #include "radeon_reg.h" | 32 | #include "radeon_reg.h" |
33 | #include "radeon.h" | 33 | #include "radeon.h" |
34 | #include "r100d.h" | 34 | #include "r100d.h" |
35 | #include "rs100d.h" | ||
36 | #include "rv200d.h" | ||
37 | #include "rv250d.h" | ||
35 | 38 | ||
36 | #include <linux/firmware.h> | 39 | #include <linux/firmware.h> |
37 | #include <linux/platform_device.h> | 40 | #include <linux/platform_device.h> |
@@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
60 | 63 | ||
61 | /* This files gather functions specifics to: | 64 | /* This files gather functions specifics to: |
62 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 65 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
63 | * | ||
64 | * Some of these functions might be used by newer ASICs. | ||
65 | */ | 66 | */ |
66 | int r200_init(struct radeon_device *rdev); | ||
67 | void r100_hdp_reset(struct radeon_device *rdev); | ||
68 | void r100_gpu_init(struct radeon_device *rdev); | ||
69 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
70 | int r100_mc_wait_for_idle(struct radeon_device *rdev); | ||
71 | void r100_gpu_wait_for_vsync(struct radeon_device *rdev); | ||
72 | void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); | ||
73 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); | ||
74 | |||
75 | 67 | ||
76 | /* | 68 | /* |
77 | * PCI GART | 69 | * PCI GART |
@@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev) | |||
152 | radeon_gart_fini(rdev); | 144 | radeon_gart_fini(rdev); |
153 | } | 145 | } |
154 | 146 | ||
155 | |||
156 | /* | ||
157 | * MC | ||
158 | */ | ||
159 | void r100_mc_disable_clients(struct radeon_device *rdev) | ||
160 | { | ||
161 | uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl; | ||
162 | |||
163 | /* FIXME: is this function correct for rs100,rs200,rs300 ? */ | ||
164 | if (r100_gui_wait_for_idle(rdev)) { | ||
165 | printk(KERN_WARNING "Failed to wait GUI idle while " | ||
166 | "programming pipes. Bad things might happen.\n"); | ||
167 | } | ||
168 | |||
169 | /* stop display and memory access */ | ||
170 | ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL); | ||
171 | WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE); | ||
172 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | ||
173 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS); | ||
174 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | ||
175 | |||
176 | r100_gpu_wait_for_vsync(rdev); | ||
177 | |||
178 | WREG32(RADEON_CRTC_GEN_CNTL, | ||
179 | (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) | | ||
180 | RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN); | ||
181 | |||
182 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | ||
183 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); | ||
184 | |||
185 | r100_gpu_wait_for_vsync2(rdev); | ||
186 | WREG32(RADEON_CRTC2_GEN_CNTL, | ||
187 | (crtc2_gen_cntl & | ||
188 | ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) | | ||
189 | RADEON_CRTC2_DISP_REQ_EN_B); | ||
190 | } | ||
191 | |||
192 | udelay(500); | ||
193 | } | ||
194 | |||
195 | void r100_mc_setup(struct radeon_device *rdev) | ||
196 | { | ||
197 | uint32_t tmp; | ||
198 | int r; | ||
199 | |||
200 | r = r100_debugfs_mc_info_init(rdev); | ||
201 | if (r) { | ||
202 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); | ||
203 | } | ||
204 | /* Write VRAM size in case we are limiting it */ | ||
205 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
206 | /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, | ||
207 | * if the aperture is 64MB but we have 32MB VRAM | ||
208 | * we report only 32MB VRAM but we have to set MC_FB_LOCATION | ||
209 | * to 64MB, otherwise the gpu accidentially dies */ | ||
210 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
211 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | ||
212 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | ||
213 | WREG32(RADEON_MC_FB_LOCATION, tmp); | ||
214 | |||
215 | /* Enable bus mastering */ | ||
216 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
217 | WREG32(RADEON_BUS_CNTL, tmp); | ||
218 | |||
219 | if (rdev->flags & RADEON_IS_AGP) { | ||
220 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | ||
221 | tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16); | ||
222 | tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16); | ||
223 | WREG32(RADEON_MC_AGP_LOCATION, tmp); | ||
224 | WREG32(RADEON_AGP_BASE, rdev->mc.agp_base); | ||
225 | } else { | ||
226 | WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
227 | WREG32(RADEON_AGP_BASE, 0); | ||
228 | } | ||
229 | |||
230 | tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; | ||
231 | tmp |= (7 << 28); | ||
232 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
233 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
234 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
235 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
236 | } | ||
237 | |||
238 | int r100_mc_init(struct radeon_device *rdev) | ||
239 | { | ||
240 | int r; | ||
241 | |||
242 | if (r100_debugfs_rbbm_init(rdev)) { | ||
243 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
244 | } | ||
245 | |||
246 | r100_gpu_init(rdev); | ||
247 | /* Disable gart which also disable out of gart access */ | ||
248 | r100_pci_gart_disable(rdev); | ||
249 | |||
250 | /* Setup GPU memory space */ | ||
251 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
252 | if (rdev->flags & RADEON_IS_AGP) { | ||
253 | r = radeon_agp_init(rdev); | ||
254 | if (r) { | ||
255 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
256 | rdev->flags &= ~RADEON_IS_AGP; | ||
257 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
258 | } else { | ||
259 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
260 | } | ||
261 | } | ||
262 | r = radeon_mc_setup(rdev); | ||
263 | if (r) { | ||
264 | return r; | ||
265 | } | ||
266 | |||
267 | r100_mc_disable_clients(rdev); | ||
268 | if (r100_mc_wait_for_idle(rdev)) { | ||
269 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
270 | "programming pipes. Bad things might happen.\n"); | ||
271 | } | ||
272 | |||
273 | r100_mc_setup(rdev); | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | void r100_mc_fini(struct radeon_device *rdev) | ||
278 | { | ||
279 | } | ||
280 | |||
281 | |||
282 | /* | ||
283 | * Interrupts | ||
284 | */ | ||
285 | int r100_irq_set(struct radeon_device *rdev) | 147 | int r100_irq_set(struct radeon_device *rdev) |
286 | { | 148 | { |
287 | uint32_t tmp = 0; | 149 | uint32_t tmp = 0; |
@@ -324,7 +186,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | |||
324 | 186 | ||
325 | int r100_irq_process(struct radeon_device *rdev) | 187 | int r100_irq_process(struct radeon_device *rdev) |
326 | { | 188 | { |
327 | uint32_t status; | 189 | uint32_t status, msi_rearm; |
328 | 190 | ||
329 | status = r100_irq_ack(rdev); | 191 | status = r100_irq_ack(rdev); |
330 | if (!status) { | 192 | if (!status) { |
@@ -347,6 +209,21 @@ int r100_irq_process(struct radeon_device *rdev) | |||
347 | } | 209 | } |
348 | status = r100_irq_ack(rdev); | 210 | status = r100_irq_ack(rdev); |
349 | } | 211 | } |
212 | if (rdev->msi_enabled) { | ||
213 | switch (rdev->family) { | ||
214 | case CHIP_RS400: | ||
215 | case CHIP_RS480: | ||
216 | msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; | ||
217 | WREG32(RADEON_AIC_CNTL, msi_rearm); | ||
218 | WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); | ||
219 | break; | ||
220 | default: | ||
221 | msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; | ||
222 | WREG32(RADEON_MSI_REARM_EN, msi_rearm); | ||
223 | WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); | ||
224 | break; | ||
225 | } | ||
226 | } | ||
350 | return IRQ_HANDLED; | 227 | return IRQ_HANDLED; |
351 | } | 228 | } |
352 | 229 | ||
@@ -358,10 +235,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
358 | return RREG32(RADEON_CRTC2_CRNT_FRAME); | 235 | return RREG32(RADEON_CRTC2_CRNT_FRAME); |
359 | } | 236 | } |
360 | 237 | ||
361 | |||
362 | /* | ||
363 | * Fence emission | ||
364 | */ | ||
365 | void r100_fence_ring_emit(struct radeon_device *rdev, | 238 | void r100_fence_ring_emit(struct radeon_device *rdev, |
366 | struct radeon_fence *fence) | 239 | struct radeon_fence *fence) |
367 | { | 240 | { |
@@ -377,16 +250,12 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
377 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 250 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
378 | } | 251 | } |
379 | 252 | ||
380 | |||
381 | /* | ||
382 | * Writeback | ||
383 | */ | ||
384 | int r100_wb_init(struct radeon_device *rdev) | 253 | int r100_wb_init(struct radeon_device *rdev) |
385 | { | 254 | { |
386 | int r; | 255 | int r; |
387 | 256 | ||
388 | if (rdev->wb.wb_obj == NULL) { | 257 | if (rdev->wb.wb_obj == NULL) { |
389 | r = radeon_object_create(rdev, NULL, 4096, | 258 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, |
390 | true, | 259 | true, |
391 | RADEON_GEM_DOMAIN_GTT, | 260 | RADEON_GEM_DOMAIN_GTT, |
392 | false, &rdev->wb.wb_obj); | 261 | false, &rdev->wb.wb_obj); |
@@ -504,10 +373,6 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
504 | return r; | 373 | return r; |
505 | } | 374 | } |
506 | 375 | ||
507 | |||
508 | /* | ||
509 | * CP | ||
510 | */ | ||
511 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) | 376 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) |
512 | { | 377 | { |
513 | unsigned i; | 378 | unsigned i; |
@@ -612,6 +477,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) | |||
612 | } | 477 | } |
613 | return err; | 478 | return err; |
614 | } | 479 | } |
480 | |||
615 | static void r100_cp_load_microcode(struct radeon_device *rdev) | 481 | static void r100_cp_load_microcode(struct radeon_device *rdev) |
616 | { | 482 | { |
617 | const __be32 *fw_data; | 483 | const __be32 *fw_data; |
@@ -712,19 +578,19 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
712 | indirect1_start = 16; | 578 | indirect1_start = 16; |
713 | /* cp setup */ | 579 | /* cp setup */ |
714 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); | 580 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); |
715 | WREG32(RADEON_CP_RB_CNTL, | 581 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | |
716 | #ifdef __BIG_ENDIAN | ||
717 | RADEON_BUF_SWAP_32BIT | | ||
718 | #endif | ||
719 | REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | | ||
720 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | | 582 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | |
721 | REG_SET(RADEON_MAX_FETCH, max_fetch) | | 583 | REG_SET(RADEON_MAX_FETCH, max_fetch) | |
722 | RADEON_RB_NO_UPDATE); | 584 | RADEON_RB_NO_UPDATE); |
585 | #ifdef __BIG_ENDIAN | ||
586 | tmp |= RADEON_BUF_SWAP_32BIT; | ||
587 | #endif | ||
588 | WREG32(RADEON_CP_RB_CNTL, tmp); | ||
589 | |||
723 | /* Set ring address */ | 590 | /* Set ring address */ |
724 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); | 591 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); |
725 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); | 592 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); |
726 | /* Force read & write ptr to 0 */ | 593 | /* Force read & write ptr to 0 */ |
727 | tmp = RREG32(RADEON_CP_RB_CNTL); | ||
728 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); | 594 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); |
729 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 595 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
730 | WREG32(RADEON_CP_RB_WPTR, 0); | 596 | WREG32(RADEON_CP_RB_WPTR, 0); |
@@ -978,7 +844,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
978 | 844 | ||
979 | header = radeon_get_ib_value(p, h_idx); | 845 | header = radeon_get_ib_value(p, h_idx); |
980 | crtc_id = radeon_get_ib_value(p, h_idx + 5); | 846 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
981 | reg = header >> 2; | 847 | reg = CP_PACKET0_GET_REG(header); |
982 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 848 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
983 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 849 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
984 | if (!obj) { | 850 | if (!obj) { |
@@ -1990,7 +1856,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
1990 | r100_pll_errata_after_data(rdev); | 1856 | r100_pll_errata_after_data(rdev); |
1991 | } | 1857 | } |
1992 | 1858 | ||
1993 | int r100_init(struct radeon_device *rdev) | 1859 | void r100_set_safe_registers(struct radeon_device *rdev) |
1994 | { | 1860 | { |
1995 | if (ASIC_IS_RN50(rdev)) { | 1861 | if (ASIC_IS_RN50(rdev)) { |
1996 | rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; | 1862 | rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; |
@@ -1999,9 +1865,8 @@ int r100_init(struct radeon_device *rdev) | |||
1999 | rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; | 1865 | rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; |
2000 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); | 1866 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); |
2001 | } else { | 1867 | } else { |
2002 | return r200_init(rdev); | 1868 | r200_set_safe_registers(rdev); |
2003 | } | 1869 | } |
2004 | return 0; | ||
2005 | } | 1870 | } |
2006 | 1871 | ||
2007 | /* | 1872 | /* |
@@ -2299,9 +2164,11 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2299 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | 2164 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
2300 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | 2165 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
2301 | } | 2166 | } |
2302 | if (rdev->mode_info.crtcs[1]->base.enabled) { | 2167 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
2303 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; | 2168 | if (rdev->mode_info.crtcs[1]->base.enabled) { |
2304 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | 2169 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; |
2170 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | ||
2171 | } | ||
2305 | } | 2172 | } |
2306 | 2173 | ||
2307 | min_mem_eff.full = rfixed_const_8(0); | 2174 | min_mem_eff.full = rfixed_const_8(0); |
@@ -2512,7 +2379,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2512 | /* | 2379 | /* |
2513 | Find the total latency for the display data. | 2380 | Find the total latency for the display data. |
2514 | */ | 2381 | */ |
2515 | disp_latency_overhead.full = rfixed_const(80); | 2382 | disp_latency_overhead.full = rfixed_const(8); |
2516 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | 2383 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); |
2517 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | 2384 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
2518 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | 2385 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
@@ -2710,8 +2577,11 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2710 | static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) | 2577 | static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) |
2711 | { | 2578 | { |
2712 | DRM_ERROR("pitch %d\n", t->pitch); | 2579 | DRM_ERROR("pitch %d\n", t->pitch); |
2580 | DRM_ERROR("use_pitch %d\n", t->use_pitch); | ||
2713 | DRM_ERROR("width %d\n", t->width); | 2581 | DRM_ERROR("width %d\n", t->width); |
2582 | DRM_ERROR("width_11 %d\n", t->width_11); | ||
2714 | DRM_ERROR("height %d\n", t->height); | 2583 | DRM_ERROR("height %d\n", t->height); |
2584 | DRM_ERROR("height_11 %d\n", t->height_11); | ||
2715 | DRM_ERROR("num levels %d\n", t->num_levels); | 2585 | DRM_ERROR("num levels %d\n", t->num_levels); |
2716 | DRM_ERROR("depth %d\n", t->txdepth); | 2586 | DRM_ERROR("depth %d\n", t->txdepth); |
2717 | DRM_ERROR("bpp %d\n", t->cpp); | 2587 | DRM_ERROR("bpp %d\n", t->cpp); |
@@ -2771,15 +2641,17 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
2771 | else | 2641 | else |
2772 | w = track->textures[u].pitch / (1 << i); | 2642 | w = track->textures[u].pitch / (1 << i); |
2773 | } else { | 2643 | } else { |
2774 | w = track->textures[u].width / (1 << i); | 2644 | w = track->textures[u].width; |
2775 | if (rdev->family >= CHIP_RV515) | 2645 | if (rdev->family >= CHIP_RV515) |
2776 | w |= track->textures[u].width_11; | 2646 | w |= track->textures[u].width_11; |
2647 | w = w / (1 << i); | ||
2777 | if (track->textures[u].roundup_w) | 2648 | if (track->textures[u].roundup_w) |
2778 | w = roundup_pow_of_two(w); | 2649 | w = roundup_pow_of_two(w); |
2779 | } | 2650 | } |
2780 | h = track->textures[u].height / (1 << i); | 2651 | h = track->textures[u].height; |
2781 | if (rdev->family >= CHIP_RV515) | 2652 | if (rdev->family >= CHIP_RV515) |
2782 | h |= track->textures[u].height_11; | 2653 | h |= track->textures[u].height_11; |
2654 | h = h / (1 << i); | ||
2783 | if (track->textures[u].roundup_h) | 2655 | if (track->textures[u].roundup_h) |
2784 | h = roundup_pow_of_two(h); | 2656 | h = roundup_pow_of_two(h); |
2785 | size += w * h; | 2657 | size += w * h; |
@@ -3114,7 +2986,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | |||
3114 | WREG32(R_000740_CP_CSQ_CNTL, 0); | 2986 | WREG32(R_000740_CP_CSQ_CNTL, 0); |
3115 | 2987 | ||
3116 | /* Save few CRTC registers */ | 2988 | /* Save few CRTC registers */ |
3117 | save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); | 2989 | save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); |
3118 | save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); | 2990 | save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); |
3119 | save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); | 2991 | save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); |
3120 | save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); | 2992 | save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); |
@@ -3124,7 +2996,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | |||
3124 | } | 2996 | } |
3125 | 2997 | ||
3126 | /* Disable VGA aperture access */ | 2998 | /* Disable VGA aperture access */ |
3127 | WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); | 2999 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); |
3128 | /* Disable cursor, overlay, crtc */ | 3000 | /* Disable cursor, overlay, crtc */ |
3129 | WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); | 3001 | WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); |
3130 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | | 3002 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | |
@@ -3156,10 +3028,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) | |||
3156 | rdev->mc.vram_location); | 3028 | rdev->mc.vram_location); |
3157 | } | 3029 | } |
3158 | /* Restore CRTC registers */ | 3030 | /* Restore CRTC registers */ |
3159 | WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); | 3031 | WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); |
3160 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); | 3032 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); |
3161 | WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); | 3033 | WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); |
3162 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 3034 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
3163 | WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); | 3035 | WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); |
3164 | } | 3036 | } |
3165 | } | 3037 | } |
3038 | |||
3039 | void r100_vga_render_disable(struct radeon_device *rdev) | ||
3040 | { | ||
3041 | u32 tmp; | ||
3042 | |||
3043 | tmp = RREG8(R_0003C2_GENMO_WT); | ||
3044 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); | ||
3045 | } | ||
3046 | |||
3047 | static void r100_debugfs(struct radeon_device *rdev) | ||
3048 | { | ||
3049 | int r; | ||
3050 | |||
3051 | r = r100_debugfs_mc_info_init(rdev); | ||
3052 | if (r) | ||
3053 | dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); | ||
3054 | } | ||
3055 | |||
3056 | static void r100_mc_program(struct radeon_device *rdev) | ||
3057 | { | ||
3058 | struct r100_mc_save save; | ||
3059 | |||
3060 | /* Stops all mc clients */ | ||
3061 | r100_mc_stop(rdev, &save); | ||
3062 | if (rdev->flags & RADEON_IS_AGP) { | ||
3063 | WREG32(R_00014C_MC_AGP_LOCATION, | ||
3064 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | | ||
3065 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); | ||
3066 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); | ||
3067 | if (rdev->family > CHIP_RV200) | ||
3068 | WREG32(R_00015C_AGP_BASE_2, | ||
3069 | upper_32_bits(rdev->mc.agp_base) & 0xff); | ||
3070 | } else { | ||
3071 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
3072 | WREG32(R_000170_AGP_BASE, 0); | ||
3073 | if (rdev->family > CHIP_RV200) | ||
3074 | WREG32(R_00015C_AGP_BASE_2, 0); | ||
3075 | } | ||
3076 | /* Wait for mc idle */ | ||
3077 | if (r100_mc_wait_for_idle(rdev)) | ||
3078 | dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); | ||
3079 | /* Program MC, should be a 32bits limited address space */ | ||
3080 | WREG32(R_000148_MC_FB_LOCATION, | ||
3081 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
3082 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
3083 | r100_mc_resume(rdev, &save); | ||
3084 | } | ||
3085 | |||
3086 | void r100_clock_startup(struct radeon_device *rdev) | ||
3087 | { | ||
3088 | u32 tmp; | ||
3089 | |||
3090 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
3091 | radeon_legacy_set_clock_gating(rdev, 1); | ||
3092 | /* We need to force on some of the block */ | ||
3093 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); | ||
3094 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | ||
3095 | if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) | ||
3096 | tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); | ||
3097 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); | ||
3098 | } | ||
3099 | |||
3100 | static int r100_startup(struct radeon_device *rdev) | ||
3101 | { | ||
3102 | int r; | ||
3103 | |||
3104 | r100_mc_program(rdev); | ||
3105 | /* Resume clock */ | ||
3106 | r100_clock_startup(rdev); | ||
3107 | /* Initialize GPU configuration (# pipes, ...) */ | ||
3108 | r100_gpu_init(rdev); | ||
3109 | /* Initialize GART (initialize after TTM so we can allocate | ||
3110 | * memory through TTM but finalize after TTM) */ | ||
3111 | if (rdev->flags & RADEON_IS_PCI) { | ||
3112 | r = r100_pci_gart_enable(rdev); | ||
3113 | if (r) | ||
3114 | return r; | ||
3115 | } | ||
3116 | /* Enable IRQ */ | ||
3117 | rdev->irq.sw_int = true; | ||
3118 | r100_irq_set(rdev); | ||
3119 | /* 1M ring buffer */ | ||
3120 | r = r100_cp_init(rdev, 1024 * 1024); | ||
3121 | if (r) { | ||
3122 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
3123 | return r; | ||
3124 | } | ||
3125 | r = r100_wb_init(rdev); | ||
3126 | if (r) | ||
3127 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
3128 | r = r100_ib_init(rdev); | ||
3129 | if (r) { | ||
3130 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
3131 | return r; | ||
3132 | } | ||
3133 | return 0; | ||
3134 | } | ||
3135 | |||
3136 | int r100_resume(struct radeon_device *rdev) | ||
3137 | { | ||
3138 | /* Make sur GART are not working */ | ||
3139 | if (rdev->flags & RADEON_IS_PCI) | ||
3140 | r100_pci_gart_disable(rdev); | ||
3141 | /* Resume clock before doing reset */ | ||
3142 | r100_clock_startup(rdev); | ||
3143 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
3144 | if (radeon_gpu_reset(rdev)) { | ||
3145 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
3146 | RREG32(R_000E40_RBBM_STATUS), | ||
3147 | RREG32(R_0007C0_CP_STAT)); | ||
3148 | } | ||
3149 | /* post */ | ||
3150 | radeon_combios_asic_init(rdev->ddev); | ||
3151 | /* Resume clock after posting */ | ||
3152 | r100_clock_startup(rdev); | ||
3153 | return r100_startup(rdev); | ||
3154 | } | ||
3155 | |||
3156 | int r100_suspend(struct radeon_device *rdev) | ||
3157 | { | ||
3158 | r100_cp_disable(rdev); | ||
3159 | r100_wb_disable(rdev); | ||
3160 | r100_irq_disable(rdev); | ||
3161 | if (rdev->flags & RADEON_IS_PCI) | ||
3162 | r100_pci_gart_disable(rdev); | ||
3163 | return 0; | ||
3164 | } | ||
3165 | |||
3166 | void r100_fini(struct radeon_device *rdev) | ||
3167 | { | ||
3168 | r100_suspend(rdev); | ||
3169 | r100_cp_fini(rdev); | ||
3170 | r100_wb_fini(rdev); | ||
3171 | r100_ib_fini(rdev); | ||
3172 | radeon_gem_fini(rdev); | ||
3173 | if (rdev->flags & RADEON_IS_PCI) | ||
3174 | r100_pci_gart_fini(rdev); | ||
3175 | radeon_irq_kms_fini(rdev); | ||
3176 | radeon_fence_driver_fini(rdev); | ||
3177 | radeon_object_fini(rdev); | ||
3178 | radeon_atombios_fini(rdev); | ||
3179 | kfree(rdev->bios); | ||
3180 | rdev->bios = NULL; | ||
3181 | } | ||
3182 | |||
3183 | int r100_mc_init(struct radeon_device *rdev) | ||
3184 | { | ||
3185 | int r; | ||
3186 | u32 tmp; | ||
3187 | |||
3188 | /* Setup GPU memory space */ | ||
3189 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
3190 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
3191 | if (rdev->flags & RADEON_IS_IGP) { | ||
3192 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | ||
3193 | rdev->mc.vram_location = tmp << 16; | ||
3194 | } | ||
3195 | if (rdev->flags & RADEON_IS_AGP) { | ||
3196 | r = radeon_agp_init(rdev); | ||
3197 | if (r) { | ||
3198 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
3199 | rdev->flags &= ~RADEON_IS_AGP; | ||
3200 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
3201 | } else { | ||
3202 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
3203 | } | ||
3204 | } | ||
3205 | r = radeon_mc_setup(rdev); | ||
3206 | if (r) | ||
3207 | return r; | ||
3208 | return 0; | ||
3209 | } | ||
3210 | |||
3211 | int r100_init(struct radeon_device *rdev) | ||
3212 | { | ||
3213 | int r; | ||
3214 | |||
3215 | /* Register debugfs file specific to this group of asics */ | ||
3216 | r100_debugfs(rdev); | ||
3217 | /* Disable VGA */ | ||
3218 | r100_vga_render_disable(rdev); | ||
3219 | /* Initialize scratch registers */ | ||
3220 | radeon_scratch_init(rdev); | ||
3221 | /* Initialize surface registers */ | ||
3222 | radeon_surface_init(rdev); | ||
3223 | /* TODO: disable VGA need to use VGA request */ | ||
3224 | /* BIOS*/ | ||
3225 | if (!radeon_get_bios(rdev)) { | ||
3226 | if (ASIC_IS_AVIVO(rdev)) | ||
3227 | return -EINVAL; | ||
3228 | } | ||
3229 | if (rdev->is_atom_bios) { | ||
3230 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
3231 | return -EINVAL; | ||
3232 | } else { | ||
3233 | r = radeon_combios_init(rdev); | ||
3234 | if (r) | ||
3235 | return r; | ||
3236 | } | ||
3237 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
3238 | if (radeon_gpu_reset(rdev)) { | ||
3239 | dev_warn(rdev->dev, | ||
3240 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
3241 | RREG32(R_000E40_RBBM_STATUS), | ||
3242 | RREG32(R_0007C0_CP_STAT)); | ||
3243 | } | ||
3244 | /* check if cards are posted or not */ | ||
3245 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
3246 | DRM_INFO("GPU not posted. posting now...\n"); | ||
3247 | radeon_combios_asic_init(rdev->ddev); | ||
3248 | } | ||
3249 | /* Set asic errata */ | ||
3250 | r100_errata(rdev); | ||
3251 | /* Initialize clocks */ | ||
3252 | radeon_get_clock_info(rdev->ddev); | ||
3253 | /* Get vram informations */ | ||
3254 | r100_vram_info(rdev); | ||
3255 | /* Initialize memory controller (also test AGP) */ | ||
3256 | r = r100_mc_init(rdev); | ||
3257 | if (r) | ||
3258 | return r; | ||
3259 | /* Fence driver */ | ||
3260 | r = radeon_fence_driver_init(rdev); | ||
3261 | if (r) | ||
3262 | return r; | ||
3263 | r = radeon_irq_kms_init(rdev); | ||
3264 | if (r) | ||
3265 | return r; | ||
3266 | /* Memory manager */ | ||
3267 | r = radeon_object_init(rdev); | ||
3268 | if (r) | ||
3269 | return r; | ||
3270 | if (rdev->flags & RADEON_IS_PCI) { | ||
3271 | r = r100_pci_gart_init(rdev); | ||
3272 | if (r) | ||
3273 | return r; | ||
3274 | } | ||
3275 | r100_set_safe_registers(rdev); | ||
3276 | rdev->accel_working = true; | ||
3277 | r = r100_startup(rdev); | ||
3278 | if (r) { | ||
3279 | /* Somethings want wront with the accel init stop accel */ | ||
3280 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
3281 | r100_suspend(rdev); | ||
3282 | r100_cp_fini(rdev); | ||
3283 | r100_wb_fini(rdev); | ||
3284 | r100_ib_fini(rdev); | ||
3285 | if (rdev->flags & RADEON_IS_PCI) | ||
3286 | r100_pci_gart_fini(rdev); | ||
3287 | radeon_irq_kms_fini(rdev); | ||
3288 | rdev->accel_working = false; | ||
3289 | } | ||
3290 | return 0; | ||
3291 | } | ||
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index c4b257ec920e..df29a630c466 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h | |||
@@ -381,6 +381,24 @@ | |||
381 | #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) | 381 | #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) |
382 | #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) | 382 | #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) |
383 | #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF | 383 | #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF |
384 | #define R_000148_MC_FB_LOCATION 0x000148 | ||
385 | #define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
386 | #define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
387 | #define C_000148_MC_FB_START 0xFFFF0000 | ||
388 | #define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
389 | #define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
390 | #define C_000148_MC_FB_TOP 0x0000FFFF | ||
391 | #define R_00014C_MC_AGP_LOCATION 0x00014C | ||
392 | #define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
393 | #define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
394 | #define C_00014C_MC_AGP_START 0xFFFF0000 | ||
395 | #define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
396 | #define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
397 | #define C_00014C_MC_AGP_TOP 0x0000FFFF | ||
398 | #define R_000170_AGP_BASE 0x000170 | ||
399 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
400 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
401 | #define C_000170_AGP_BASE_ADDR 0x00000000 | ||
384 | #define R_00023C_DISPLAY_BASE_ADDR 0x00023C | 402 | #define R_00023C_DISPLAY_BASE_ADDR 0x00023C |
385 | #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | 403 | #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) |
386 | #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | 404 | #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) |
@@ -403,25 +421,25 @@ | |||
403 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) | 421 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) |
404 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) | 422 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) |
405 | #define C_000360_CUR2_LOCK 0x7FFFFFFF | 423 | #define C_000360_CUR2_LOCK 0x7FFFFFFF |
406 | #define R_0003C0_GENMO_WT 0x0003C0 | 424 | #define R_0003C2_GENMO_WT 0x0003C0 |
407 | #define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) | 425 | #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) |
408 | #define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) | 426 | #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) |
409 | #define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE | 427 | #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE |
410 | #define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) | 428 | #define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1) |
411 | #define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) | 429 | #define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1) |
412 | #define C_0003C0_VGA_RAM_EN 0xFFFFFFFD | 430 | #define C_0003C2_VGA_RAM_EN 0xFD |
413 | #define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) | 431 | #define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2) |
414 | #define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) | 432 | #define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3) |
415 | #define C_0003C0_VGA_CKSEL 0xFFFFFFF3 | 433 | #define C_0003C2_VGA_CKSEL 0xF3 |
416 | #define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) | 434 | #define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) |
417 | #define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) | 435 | #define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) |
418 | #define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF | 436 | #define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF |
419 | #define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) | 437 | #define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) |
420 | #define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) | 438 | #define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) |
421 | #define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF | 439 | #define C_0003C2_VGA_HSYNC_POL 0xBF |
422 | #define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) | 440 | #define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) |
423 | #define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) | 441 | #define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) |
424 | #define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F | 442 | #define C_0003C2_VGA_VSYNC_POL 0x7F |
425 | #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 | 443 | #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 |
426 | #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) | 444 | #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) |
427 | #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) | 445 | #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) |
@@ -545,6 +563,46 @@ | |||
545 | #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) | 563 | #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) |
546 | #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) | 564 | #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) |
547 | #define C_000774_SCRATCH_ADDR 0x0000001F | 565 | #define C_000774_SCRATCH_ADDR 0x0000001F |
566 | #define R_0007C0_CP_STAT 0x0007C0 | ||
567 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
568 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
569 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
570 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
571 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
572 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
573 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
574 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
575 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
576 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
577 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
578 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
579 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
580 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
581 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
582 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
583 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
584 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
585 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
586 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
587 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
588 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
589 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
590 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
591 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
592 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
593 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
594 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
595 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
596 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
597 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
598 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
599 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
600 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
601 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
602 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
603 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
604 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
605 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
548 | #define R_000E40_RBBM_STATUS 0x000E40 | 606 | #define R_000E40_RBBM_STATUS 0x000E40 |
549 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | 607 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) |
550 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | 608 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) |
@@ -604,4 +662,53 @@ | |||
604 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | 662 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) |
605 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | 663 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF |
606 | 664 | ||
665 | |||
666 | #define R_00000D_SCLK_CNTL 0x00000D | ||
667 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
668 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
669 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
670 | #define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8) | ||
671 | #define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7) | ||
672 | #define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF | ||
673 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
674 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
675 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
676 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
677 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
678 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
679 | #define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18) | ||
680 | #define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1) | ||
681 | #define C_00000D_FORCE_DISP 0xFFFBFFFF | ||
682 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
683 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
684 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
685 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
686 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
687 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
688 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
689 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
690 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
691 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
692 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
693 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
694 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
695 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
696 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
697 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
698 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
699 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
700 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
701 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
702 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
703 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
704 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
705 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
706 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
707 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
708 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
709 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
710 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
711 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
712 | |||
713 | |||
607 | #endif | 714 | #endif |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index cf7fea5ff2e5..eb740fc3549f 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -447,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
447 | return 0; | 447 | return 0; |
448 | } | 448 | } |
449 | 449 | ||
450 | int r200_init(struct radeon_device *rdev) | 450 | void r200_set_safe_registers(struct radeon_device *rdev) |
451 | { | 451 | { |
452 | rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; | 452 | rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; |
453 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); | 453 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); |
454 | return 0; | ||
455 | } | 454 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 1ebea8cc8c93..2f43ee8e4048 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -33,43 +33,16 @@ | |||
33 | #include "radeon_drm.h" | 33 | #include "radeon_drm.h" |
34 | #include "r100_track.h" | 34 | #include "r100_track.h" |
35 | #include "r300d.h" | 35 | #include "r300d.h" |
36 | 36 | #include "rv350d.h" | |
37 | #include "r300_reg_safe.h" | 37 | #include "r300_reg_safe.h" |
38 | 38 | ||
39 | /* r300,r350,rv350,rv370,rv380 depends on : */ | 39 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ |
40 | void r100_hdp_reset(struct radeon_device *rdev); | ||
41 | int r100_cp_reset(struct radeon_device *rdev); | ||
42 | int r100_rb2d_reset(struct radeon_device *rdev); | ||
43 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
44 | int r100_pci_gart_enable(struct radeon_device *rdev); | ||
45 | void r100_mc_setup(struct radeon_device *rdev); | ||
46 | void r100_mc_disable_clients(struct radeon_device *rdev); | ||
47 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
48 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
49 | struct radeon_cs_packet *pkt, | ||
50 | unsigned idx); | ||
51 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); | ||
52 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
53 | struct radeon_cs_packet *pkt, | ||
54 | const unsigned *auth, unsigned n, | ||
55 | radeon_packet0_check_t check); | ||
56 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
57 | struct radeon_cs_packet *pkt, | ||
58 | struct radeon_object *robj); | ||
59 | |||
60 | /* This files gather functions specifics to: | ||
61 | * r300,r350,rv350,rv370,rv380 | ||
62 | * | ||
63 | * Some of these functions might be used by newer ASICs. | ||
64 | */ | ||
65 | void r300_gpu_init(struct radeon_device *rdev); | ||
66 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
67 | int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
68 | |||
69 | 40 | ||
70 | /* | 41 | /* |
71 | * rv370,rv380 PCIE GART | 42 | * rv370,rv380 PCIE GART |
72 | */ | 43 | */ |
44 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
45 | |||
73 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | 46 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
74 | { | 47 | { |
75 | uint32_t tmp; | 48 | uint32_t tmp; |
@@ -140,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
140 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 113 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
141 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); | 114 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
142 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); | 115 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); |
143 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096; | 116 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; |
144 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); | 117 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
145 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); | 118 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
146 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); | 119 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
@@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) | |||
182 | radeon_gart_fini(rdev); | 155 | radeon_gart_fini(rdev); |
183 | } | 156 | } |
184 | 157 | ||
185 | /* | ||
186 | * MC | ||
187 | */ | ||
188 | int r300_mc_init(struct radeon_device *rdev) | ||
189 | { | ||
190 | int r; | ||
191 | |||
192 | if (r100_debugfs_rbbm_init(rdev)) { | ||
193 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
194 | } | ||
195 | |||
196 | r300_gpu_init(rdev); | ||
197 | r100_pci_gart_disable(rdev); | ||
198 | if (rdev->flags & RADEON_IS_PCIE) { | ||
199 | rv370_pcie_gart_disable(rdev); | ||
200 | } | ||
201 | |||
202 | /* Setup GPU memory space */ | ||
203 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
204 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
205 | if (rdev->flags & RADEON_IS_AGP) { | ||
206 | r = radeon_agp_init(rdev); | ||
207 | if (r) { | ||
208 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
209 | rdev->flags &= ~RADEON_IS_AGP; | ||
210 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
211 | } else { | ||
212 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
213 | } | ||
214 | } | ||
215 | r = radeon_mc_setup(rdev); | ||
216 | if (r) { | ||
217 | return r; | ||
218 | } | ||
219 | |||
220 | /* Program GPU memory space */ | ||
221 | r100_mc_disable_clients(rdev); | ||
222 | if (r300_mc_wait_for_idle(rdev)) { | ||
223 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
224 | "programming pipes. Bad things might happen.\n"); | ||
225 | } | ||
226 | r100_mc_setup(rdev); | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | void r300_mc_fini(struct radeon_device *rdev) | ||
231 | { | ||
232 | } | ||
233 | |||
234 | |||
235 | /* | ||
236 | * Fence emission | ||
237 | */ | ||
238 | void r300_fence_ring_emit(struct radeon_device *rdev, | 158 | void r300_fence_ring_emit(struct radeon_device *rdev, |
239 | struct radeon_fence *fence) | 159 | struct radeon_fence *fence) |
240 | { | 160 | { |
@@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev, | |||
260 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 180 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
261 | } | 181 | } |
262 | 182 | ||
263 | |||
264 | /* | ||
265 | * Global GPU functions | ||
266 | */ | ||
267 | int r300_copy_dma(struct radeon_device *rdev, | 183 | int r300_copy_dma(struct radeon_device *rdev, |
268 | uint64_t src_offset, | 184 | uint64_t src_offset, |
269 | uint64_t dst_offset, | 185 | uint64_t dst_offset, |
@@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev) | |||
582 | r100_vram_init_sizes(rdev); | 498 | r100_vram_init_sizes(rdev); |
583 | } | 499 | } |
584 | 500 | ||
585 | |||
586 | /* | ||
587 | * PCIE Lanes | ||
588 | */ | ||
589 | |||
590 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | 501 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
591 | { | 502 | { |
592 | uint32_t link_width_cntl, mask; | 503 | uint32_t link_width_cntl, mask; |
@@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | |||
646 | 557 | ||
647 | } | 558 | } |
648 | 559 | ||
649 | |||
650 | /* | ||
651 | * Debugfs info | ||
652 | */ | ||
653 | #if defined(CONFIG_DEBUG_FS) | 560 | #if defined(CONFIG_DEBUG_FS) |
654 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) | 561 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
655 | { | 562 | { |
@@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = { | |||
680 | }; | 587 | }; |
681 | #endif | 588 | #endif |
682 | 589 | ||
683 | int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | 590 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
684 | { | 591 | { |
685 | #if defined(CONFIG_DEBUG_FS) | 592 | #if defined(CONFIG_DEBUG_FS) |
686 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); | 593 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); |
@@ -689,10 +596,6 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | |||
689 | #endif | 596 | #endif |
690 | } | 597 | } |
691 | 598 | ||
692 | |||
693 | /* | ||
694 | * CS functions | ||
695 | */ | ||
696 | static int r300_packet0_check(struct radeon_cs_parser *p, | 599 | static int r300_packet0_check(struct radeon_cs_parser *p, |
697 | struct radeon_cs_packet *pkt, | 600 | struct radeon_cs_packet *pkt, |
698 | unsigned idx, unsigned reg) | 601 | unsigned idx, unsigned reg) |
@@ -1226,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev) | |||
1226 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); | 1129 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
1227 | } | 1130 | } |
1228 | 1131 | ||
1229 | int r300_init(struct radeon_device *rdev) | ||
1230 | { | ||
1231 | r300_set_reg_safe(rdev); | ||
1232 | return 0; | ||
1233 | } | ||
1234 | |||
1235 | void r300_mc_program(struct radeon_device *rdev) | 1132 | void r300_mc_program(struct radeon_device *rdev) |
1236 | { | 1133 | { |
1237 | struct r100_mc_save save; | 1134 | struct r100_mc_save save; |
@@ -1265,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev) | |||
1265 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 1162 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
1266 | r100_mc_resume(rdev, &save); | 1163 | r100_mc_resume(rdev, &save); |
1267 | } | 1164 | } |
1165 | |||
1166 | void r300_clock_startup(struct radeon_device *rdev) | ||
1167 | { | ||
1168 | u32 tmp; | ||
1169 | |||
1170 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
1171 | radeon_legacy_set_clock_gating(rdev, 1); | ||
1172 | /* We need to force on some of the block */ | ||
1173 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); | ||
1174 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | ||
1175 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) | ||
1176 | tmp |= S_00000D_FORCE_VAP(1); | ||
1177 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); | ||
1178 | } | ||
1179 | |||
1180 | static int r300_startup(struct radeon_device *rdev) | ||
1181 | { | ||
1182 | int r; | ||
1183 | |||
1184 | r300_mc_program(rdev); | ||
1185 | /* Resume clock */ | ||
1186 | r300_clock_startup(rdev); | ||
1187 | /* Initialize GPU configuration (# pipes, ...) */ | ||
1188 | r300_gpu_init(rdev); | ||
1189 | /* Initialize GART (initialize after TTM so we can allocate | ||
1190 | * memory through TTM but finalize after TTM) */ | ||
1191 | if (rdev->flags & RADEON_IS_PCIE) { | ||
1192 | r = rv370_pcie_gart_enable(rdev); | ||
1193 | if (r) | ||
1194 | return r; | ||
1195 | } | ||
1196 | if (rdev->flags & RADEON_IS_PCI) { | ||
1197 | r = r100_pci_gart_enable(rdev); | ||
1198 | if (r) | ||
1199 | return r; | ||
1200 | } | ||
1201 | /* Enable IRQ */ | ||
1202 | rdev->irq.sw_int = true; | ||
1203 | r100_irq_set(rdev); | ||
1204 | /* 1M ring buffer */ | ||
1205 | r = r100_cp_init(rdev, 1024 * 1024); | ||
1206 | if (r) { | ||
1207 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
1208 | return r; | ||
1209 | } | ||
1210 | r = r100_wb_init(rdev); | ||
1211 | if (r) | ||
1212 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
1213 | r = r100_ib_init(rdev); | ||
1214 | if (r) { | ||
1215 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
1216 | return r; | ||
1217 | } | ||
1218 | return 0; | ||
1219 | } | ||
1220 | |||
1221 | int r300_resume(struct radeon_device *rdev) | ||
1222 | { | ||
1223 | /* Make sur GART are not working */ | ||
1224 | if (rdev->flags & RADEON_IS_PCIE) | ||
1225 | rv370_pcie_gart_disable(rdev); | ||
1226 | if (rdev->flags & RADEON_IS_PCI) | ||
1227 | r100_pci_gart_disable(rdev); | ||
1228 | /* Resume clock before doing reset */ | ||
1229 | r300_clock_startup(rdev); | ||
1230 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
1231 | if (radeon_gpu_reset(rdev)) { | ||
1232 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
1233 | RREG32(R_000E40_RBBM_STATUS), | ||
1234 | RREG32(R_0007C0_CP_STAT)); | ||
1235 | } | ||
1236 | /* post */ | ||
1237 | radeon_combios_asic_init(rdev->ddev); | ||
1238 | /* Resume clock after posting */ | ||
1239 | r300_clock_startup(rdev); | ||
1240 | return r300_startup(rdev); | ||
1241 | } | ||
1242 | |||
1243 | int r300_suspend(struct radeon_device *rdev) | ||
1244 | { | ||
1245 | r100_cp_disable(rdev); | ||
1246 | r100_wb_disable(rdev); | ||
1247 | r100_irq_disable(rdev); | ||
1248 | if (rdev->flags & RADEON_IS_PCIE) | ||
1249 | rv370_pcie_gart_disable(rdev); | ||
1250 | if (rdev->flags & RADEON_IS_PCI) | ||
1251 | r100_pci_gart_disable(rdev); | ||
1252 | return 0; | ||
1253 | } | ||
1254 | |||
1255 | void r300_fini(struct radeon_device *rdev) | ||
1256 | { | ||
1257 | r300_suspend(rdev); | ||
1258 | r100_cp_fini(rdev); | ||
1259 | r100_wb_fini(rdev); | ||
1260 | r100_ib_fini(rdev); | ||
1261 | radeon_gem_fini(rdev); | ||
1262 | if (rdev->flags & RADEON_IS_PCIE) | ||
1263 | rv370_pcie_gart_fini(rdev); | ||
1264 | if (rdev->flags & RADEON_IS_PCI) | ||
1265 | r100_pci_gart_fini(rdev); | ||
1266 | radeon_irq_kms_fini(rdev); | ||
1267 | radeon_fence_driver_fini(rdev); | ||
1268 | radeon_object_fini(rdev); | ||
1269 | radeon_atombios_fini(rdev); | ||
1270 | kfree(rdev->bios); | ||
1271 | rdev->bios = NULL; | ||
1272 | } | ||
1273 | |||
1274 | int r300_init(struct radeon_device *rdev) | ||
1275 | { | ||
1276 | int r; | ||
1277 | |||
1278 | /* Disable VGA */ | ||
1279 | r100_vga_render_disable(rdev); | ||
1280 | /* Initialize scratch registers */ | ||
1281 | radeon_scratch_init(rdev); | ||
1282 | /* Initialize surface registers */ | ||
1283 | radeon_surface_init(rdev); | ||
1284 | /* TODO: disable VGA need to use VGA request */ | ||
1285 | /* BIOS*/ | ||
1286 | if (!radeon_get_bios(rdev)) { | ||
1287 | if (ASIC_IS_AVIVO(rdev)) | ||
1288 | return -EINVAL; | ||
1289 | } | ||
1290 | if (rdev->is_atom_bios) { | ||
1291 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
1292 | return -EINVAL; | ||
1293 | } else { | ||
1294 | r = radeon_combios_init(rdev); | ||
1295 | if (r) | ||
1296 | return r; | ||
1297 | } | ||
1298 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
1299 | if (radeon_gpu_reset(rdev)) { | ||
1300 | dev_warn(rdev->dev, | ||
1301 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
1302 | RREG32(R_000E40_RBBM_STATUS), | ||
1303 | RREG32(R_0007C0_CP_STAT)); | ||
1304 | } | ||
1305 | /* check if cards are posted or not */ | ||
1306 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
1307 | DRM_INFO("GPU not posted. posting now...\n"); | ||
1308 | radeon_combios_asic_init(rdev->ddev); | ||
1309 | } | ||
1310 | /* Set asic errata */ | ||
1311 | r300_errata(rdev); | ||
1312 | /* Initialize clocks */ | ||
1313 | radeon_get_clock_info(rdev->ddev); | ||
1314 | /* Get vram informations */ | ||
1315 | r300_vram_info(rdev); | ||
1316 | /* Initialize memory controller (also test AGP) */ | ||
1317 | r = r420_mc_init(rdev); | ||
1318 | if (r) | ||
1319 | return r; | ||
1320 | /* Fence driver */ | ||
1321 | r = radeon_fence_driver_init(rdev); | ||
1322 | if (r) | ||
1323 | return r; | ||
1324 | r = radeon_irq_kms_init(rdev); | ||
1325 | if (r) | ||
1326 | return r; | ||
1327 | /* Memory manager */ | ||
1328 | r = radeon_object_init(rdev); | ||
1329 | if (r) | ||
1330 | return r; | ||
1331 | if (rdev->flags & RADEON_IS_PCIE) { | ||
1332 | r = rv370_pcie_gart_init(rdev); | ||
1333 | if (r) | ||
1334 | return r; | ||
1335 | } | ||
1336 | if (rdev->flags & RADEON_IS_PCI) { | ||
1337 | r = r100_pci_gart_init(rdev); | ||
1338 | if (r) | ||
1339 | return r; | ||
1340 | } | ||
1341 | r300_set_reg_safe(rdev); | ||
1342 | rdev->accel_working = true; | ||
1343 | r = r300_startup(rdev); | ||
1344 | if (r) { | ||
1345 | /* Somethings want wront with the accel init stop accel */ | ||
1346 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
1347 | r300_suspend(rdev); | ||
1348 | r100_cp_fini(rdev); | ||
1349 | r100_wb_fini(rdev); | ||
1350 | r100_ib_fini(rdev); | ||
1351 | if (rdev->flags & RADEON_IS_PCIE) | ||
1352 | rv370_pcie_gart_fini(rdev); | ||
1353 | if (rdev->flags & RADEON_IS_PCI) | ||
1354 | r100_pci_gart_fini(rdev); | ||
1355 | radeon_irq_kms_fini(rdev); | ||
1356 | rdev->accel_working = false; | ||
1357 | } | ||
1358 | return 0; | ||
1359 | } | ||
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index d4fa3eb1074f..4c73114f0de9 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h | |||
@@ -96,6 +96,211 @@ | |||
96 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | 96 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) |
97 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | 97 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) |
98 | #define C_000170_AGP_BASE_ADDR 0x00000000 | 98 | #define C_000170_AGP_BASE_ADDR 0x00000000 |
99 | #define R_0007C0_CP_STAT 0x0007C0 | ||
100 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
101 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
102 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
103 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
104 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
105 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
106 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
107 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
108 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
109 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
110 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
111 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
112 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
113 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
114 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
115 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
116 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
117 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
118 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
119 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
120 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
121 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
122 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
123 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
124 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
125 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
126 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
127 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
128 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
129 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
130 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
131 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
132 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
133 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
134 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
135 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
136 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
137 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
138 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
139 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
140 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
141 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
142 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
143 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
144 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
145 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
146 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
147 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
148 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
149 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
150 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
151 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
152 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
153 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
154 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
155 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
156 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
157 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
158 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
159 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
160 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
161 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
162 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
163 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
164 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
165 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
166 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
167 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
168 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
169 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
170 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
171 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
172 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
173 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
174 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
175 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
176 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
177 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
178 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
179 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
180 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
181 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
182 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
183 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
184 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
185 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
186 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
187 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
188 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
189 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
190 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
191 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
192 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
193 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
194 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
195 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
196 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
197 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
198 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
199 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
200 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
201 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
202 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
203 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
204 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
205 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
206 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
207 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
208 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
209 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
210 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
211 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
99 | 212 | ||
100 | 213 | ||
214 | #define R_00000D_SCLK_CNTL 0x00000D | ||
215 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
216 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
217 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
218 | #define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) | ||
219 | #define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) | ||
220 | #define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 | ||
221 | #define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) | ||
222 | #define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) | ||
223 | #define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF | ||
224 | #define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) | ||
225 | #define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) | ||
226 | #define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF | ||
227 | #define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) | ||
228 | #define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) | ||
229 | #define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF | ||
230 | #define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) | ||
231 | #define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) | ||
232 | #define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F | ||
233 | #define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) | ||
234 | #define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) | ||
235 | #define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF | ||
236 | #define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) | ||
237 | #define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) | ||
238 | #define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF | ||
239 | #define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) | ||
240 | #define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) | ||
241 | #define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF | ||
242 | #define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) | ||
243 | #define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) | ||
244 | #define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF | ||
245 | #define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) | ||
246 | #define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) | ||
247 | #define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF | ||
248 | #define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) | ||
249 | #define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) | ||
250 | #define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF | ||
251 | #define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) | ||
252 | #define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) | ||
253 | #define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF | ||
254 | #define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) | ||
255 | #define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) | ||
256 | #define C_00000D_FORCE_DISP2 0xFFFF7FFF | ||
257 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
258 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
259 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
260 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
261 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
262 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
263 | #define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) | ||
264 | #define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) | ||
265 | #define C_00000D_FORCE_DISP1 0xFFFBFFFF | ||
266 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
267 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
268 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
269 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
270 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
271 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
272 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
273 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
274 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
275 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
276 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
277 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
278 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
279 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
280 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
281 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
282 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
283 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
284 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
285 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
286 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
287 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
288 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
289 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
290 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
291 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
292 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
293 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
294 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
295 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
296 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | ||
297 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | ||
298 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | ||
299 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | ||
300 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | ||
301 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | ||
302 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | ||
303 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | ||
304 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | ||
305 | |||
101 | #endif | 306 | #endif |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 49a2fdc57d27..1cefdbcc0850 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev) | |||
155 | static void r420_clock_resume(struct radeon_device *rdev) | 155 | static void r420_clock_resume(struct radeon_device *rdev) |
156 | { | 156 | { |
157 | u32 sclk_cntl; | 157 | u32 sclk_cntl; |
158 | |||
159 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
160 | radeon_atom_set_clock_gating(rdev, 1); | ||
158 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); | 161 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); |
159 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | 162 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
160 | if (rdev->family == CHIP_R420) | 163 | if (rdev->family == CHIP_R420) |
@@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev) | |||
167 | int r; | 170 | int r; |
168 | 171 | ||
169 | r300_mc_program(rdev); | 172 | r300_mc_program(rdev); |
173 | /* Resume clock */ | ||
174 | r420_clock_resume(rdev); | ||
170 | /* Initialize GART (initialize after TTM so we can allocate | 175 | /* Initialize GART (initialize after TTM so we can allocate |
171 | * memory through TTM but finalize after TTM) */ | 176 | * memory through TTM but finalize after TTM) */ |
172 | if (rdev->flags & RADEON_IS_PCIE) { | 177 | if (rdev->flags & RADEON_IS_PCIE) { |
@@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev) | |||
267 | { | 272 | { |
268 | int r; | 273 | int r; |
269 | 274 | ||
270 | rdev->new_init_path = true; | ||
271 | /* Initialize scratch registers */ | 275 | /* Initialize scratch registers */ |
272 | radeon_scratch_init(rdev); | 276 | radeon_scratch_init(rdev); |
273 | /* Initialize surface registers */ | 277 | /* Initialize surface registers */ |
@@ -307,6 +311,8 @@ int r420_init(struct radeon_device *rdev) | |||
307 | } | 311 | } |
308 | /* Initialize clocks */ | 312 | /* Initialize clocks */ |
309 | radeon_get_clock_info(rdev->ddev); | 313 | radeon_get_clock_info(rdev->ddev); |
314 | /* Initialize power management */ | ||
315 | radeon_pm_init(rdev); | ||
310 | /* Get vram informations */ | 316 | /* Get vram informations */ |
311 | r300_vram_info(rdev); | 317 | r300_vram_info(rdev); |
312 | /* Initialize memory controller (also test AGP) */ | 318 | /* Initialize memory controller (also test AGP) */ |
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h index a48a7db1e2aa..fc78d31a0b4a 100644 --- a/drivers/gpu/drm/radeon/r420d.h +++ b/drivers/gpu/drm/radeon/r420d.h | |||
@@ -212,9 +212,9 @@ | |||
212 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | 212 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) |
213 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | 213 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) |
214 | #define C_00000D_FORCE_E2 0xFFEFFFFF | 214 | #define C_00000D_FORCE_E2 0xFFEFFFFF |
215 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | 215 | #define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) |
216 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | 216 | #define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) |
217 | #define C_00000D_FORCE_SE 0xFFDFFFFF | 217 | #define C_00000D_FORCE_VAP 0xFFDFFFFF |
218 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | 218 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) |
219 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | 219 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) |
220 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | 220 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF |
@@ -224,24 +224,24 @@ | |||
224 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | 224 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) |
225 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | 225 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) |
226 | #define C_00000D_FORCE_RE 0xFEFFFFFF | 226 | #define C_00000D_FORCE_RE 0xFEFFFFFF |
227 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | 227 | #define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) |
228 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | 228 | #define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) |
229 | #define C_00000D_FORCE_PB 0xFDFFFFFF | 229 | #define C_00000D_FORCE_SR 0xFDFFFFFF |
230 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) | 230 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) |
231 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) | 231 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) |
232 | #define C_00000D_FORCE_PX 0xFBFFFFFF | 232 | #define C_00000D_FORCE_PX 0xFBFFFFFF |
233 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) | 233 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) |
234 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) | 234 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) |
235 | #define C_00000D_FORCE_TX 0xF7FFFFFF | 235 | #define C_00000D_FORCE_TX 0xF7FFFFFF |
236 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | 236 | #define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) |
237 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | 237 | #define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) |
238 | #define C_00000D_FORCE_RB 0xEFFFFFFF | 238 | #define C_00000D_FORCE_US 0xEFFFFFFF |
239 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | 239 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) |
240 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | 240 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) |
241 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | 241 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF |
242 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | 242 | #define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) |
243 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | 243 | #define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) |
244 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | 244 | #define C_00000D_FORCE_SU 0xBFFFFFFF |
245 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | 245 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) |
246 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | 246 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) |
247 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | 247 | #define C_00000D_FORCE_OV0 0x7FFFFFFF |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 868add6e166d..7baa73955563 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -384,9 +384,16 @@ | |||
384 | # define AVIVO_D1GRPH_TILED (1 << 20) | 384 | # define AVIVO_D1GRPH_TILED (1 << 20) |
385 | # define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) | 385 | # define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) |
386 | 386 | ||
387 | /* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2 | ||
388 | * block and vice versa. This applies to GRPH, CUR, etc. | ||
389 | */ | ||
387 | #define AVIVO_D1GRPH_LUT_SEL 0x6108 | 390 | #define AVIVO_D1GRPH_LUT_SEL 0x6108 |
388 | #define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 | 391 | #define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 |
392 | #define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 | ||
393 | #define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 | ||
389 | #define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 | 394 | #define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 |
395 | #define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c | ||
396 | #define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c | ||
390 | #define AVIVO_D1GRPH_PITCH 0x6120 | 397 | #define AVIVO_D1GRPH_PITCH 0x6120 |
391 | #define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124 | 398 | #define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124 |
392 | #define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128 | 399 | #define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128 |
@@ -404,6 +411,8 @@ | |||
404 | # define AVIVO_D1CURSOR_MODE_MASK (3 << 8) | 411 | # define AVIVO_D1CURSOR_MODE_MASK (3 << 8) |
405 | # define AVIVO_D1CURSOR_MODE_24BPP 2 | 412 | # define AVIVO_D1CURSOR_MODE_24BPP 2 |
406 | #define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408 | 413 | #define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408 |
414 | #define R700_D1CUR_SURFACE_ADDRESS_HIGH 0x6c0c | ||
415 | #define R700_D2CUR_SURFACE_ADDRESS_HIGH 0x640c | ||
407 | #define AVIVO_D1CUR_SIZE 0x6410 | 416 | #define AVIVO_D1CUR_SIZE 0x6410 |
408 | #define AVIVO_D1CUR_POSITION 0x6414 | 417 | #define AVIVO_D1CUR_POSITION 0x6414 |
409 | #define AVIVO_D1CUR_HOT_SPOT 0x6418 | 418 | #define AVIVO_D1CUR_HOT_SPOT 0x6418 |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 0bf13fccdaf2..f7435185c0a6 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -186,7 +186,7 @@ static int r520_startup(struct radeon_device *rdev) | |||
186 | } | 186 | } |
187 | /* Enable IRQ */ | 187 | /* Enable IRQ */ |
188 | rdev->irq.sw_int = true; | 188 | rdev->irq.sw_int = true; |
189 | r100_irq_set(rdev); | 189 | rs600_irq_set(rdev); |
190 | /* 1M ring buffer */ | 190 | /* 1M ring buffer */ |
191 | r = r100_cp_init(rdev, 1024 * 1024); | 191 | r = r100_cp_init(rdev, 1024 * 1024); |
192 | if (r) { | 192 | if (r) { |
@@ -228,7 +228,6 @@ int r520_init(struct radeon_device *rdev) | |||
228 | { | 228 | { |
229 | int r; | 229 | int r; |
230 | 230 | ||
231 | rdev->new_init_path = true; | ||
232 | /* Initialize scratch registers */ | 231 | /* Initialize scratch registers */ |
233 | radeon_scratch_init(rdev); | 232 | radeon_scratch_init(rdev); |
234 | /* Initialize surface registers */ | 233 | /* Initialize surface registers */ |
@@ -261,6 +260,8 @@ int r520_init(struct radeon_device *rdev) | |||
261 | } | 260 | } |
262 | /* Initialize clocks */ | 261 | /* Initialize clocks */ |
263 | radeon_get_clock_info(rdev->ddev); | 262 | radeon_get_clock_info(rdev->ddev); |
263 | /* Initialize power management */ | ||
264 | radeon_pm_init(rdev); | ||
264 | /* Get vram informations */ | 265 | /* Get vram informations */ |
265 | r520_vram_info(rdev); | 266 | r520_vram_info(rdev); |
266 | /* Initialize memory controller (also test AGP) */ | 267 | /* Initialize memory controller (also test AGP) */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2e4e60edbff4..278f646bc18e 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin"); | |||
65 | 65 | ||
66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
67 | 67 | ||
68 | /* This files gather functions specifics to: | 68 | /* r600,rv610,rv630,rv620,rv635,rv670 */ |
69 | * r600,rv610,rv630,rv620,rv635,rv670 | ||
70 | * | ||
71 | * Some of these functions might be used by newer ASICs. | ||
72 | */ | ||
73 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | 69 | int r600_mc_wait_for_idle(struct radeon_device *rdev); |
74 | void r600_gpu_init(struct radeon_device *rdev); | 70 | void r600_gpu_init(struct radeon_device *rdev); |
75 | void r600_fini(struct radeon_device *rdev); | 71 | void r600_fini(struct radeon_device *rdev); |
76 | 72 | ||
77 | |||
78 | /* | 73 | /* |
79 | * R600 PCIE GART | 74 | * R600 PCIE GART |
80 | */ | 75 | */ |
@@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
168 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 163 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
169 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 164 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
170 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 165 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
171 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); | 166 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
172 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 167 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
173 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 168 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
174 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 169 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
@@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev) | |||
225 | radeon_gart_fini(rdev); | 220 | radeon_gart_fini(rdev); |
226 | } | 221 | } |
227 | 222 | ||
223 | void r600_agp_enable(struct radeon_device *rdev) | ||
224 | { | ||
225 | u32 tmp; | ||
226 | int i; | ||
227 | |||
228 | /* Setup L2 cache */ | ||
229 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | ||
230 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | ||
231 | EFFECTIVE_L2_QUEUE_SIZE(7)); | ||
232 | WREG32(VM_L2_CNTL2, 0); | ||
233 | WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); | ||
234 | /* Setup TLB control */ | ||
235 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | ||
236 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
237 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | | ||
238 | ENABLE_WAIT_L2_QUERY; | ||
239 | WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); | ||
240 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | ||
241 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); | ||
242 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | ||
243 | WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); | ||
244 | WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); | ||
245 | WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); | ||
246 | WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); | ||
247 | WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); | ||
248 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | ||
249 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | ||
250 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | ||
251 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
252 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
253 | for (i = 0; i < 7; i++) | ||
254 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
255 | } | ||
256 | |||
228 | int r600_mc_wait_for_idle(struct radeon_device *rdev) | 257 | int r600_mc_wait_for_idle(struct radeon_device *rdev) |
229 | { | 258 | { |
230 | unsigned i; | 259 | unsigned i; |
@@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) | |||
240 | return -1; | 269 | return -1; |
241 | } | 270 | } |
242 | 271 | ||
243 | static void r600_mc_resume(struct radeon_device *rdev) | 272 | static void r600_mc_program(struct radeon_device *rdev) |
244 | { | 273 | { |
245 | u32 d1vga_control, d2vga_control; | 274 | struct rv515_mc_save save; |
246 | u32 vga_render_control, vga_hdp_control; | ||
247 | u32 d1crtc_control, d2crtc_control; | ||
248 | u32 new_d1grph_primary, new_d1grph_secondary; | ||
249 | u32 new_d2grph_primary, new_d2grph_secondary; | ||
250 | u64 old_vram_start; | ||
251 | u32 tmp; | 275 | u32 tmp; |
252 | int i, j; | 276 | int i, j; |
253 | 277 | ||
@@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev) | |||
261 | } | 285 | } |
262 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | 286 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); |
263 | 287 | ||
264 | d1vga_control = RREG32(D1VGA_CONTROL); | 288 | rv515_mc_stop(rdev, &save); |
265 | d2vga_control = RREG32(D2VGA_CONTROL); | ||
266 | vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
267 | vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
268 | d1crtc_control = RREG32(D1CRTC_CONTROL); | ||
269 | d2crtc_control = RREG32(D2CRTC_CONTROL); | ||
270 | old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | ||
271 | new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); | ||
272 | new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); | ||
273 | new_d1grph_primary += rdev->mc.vram_start - old_vram_start; | ||
274 | new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
275 | new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); | ||
276 | new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); | ||
277 | new_d2grph_primary += rdev->mc.vram_start - old_vram_start; | ||
278 | new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
279 | |||
280 | /* Stop all video */ | ||
281 | WREG32(D1VGA_CONTROL, 0); | ||
282 | WREG32(D2VGA_CONTROL, 0); | ||
283 | WREG32(VGA_RENDER_CONTROL, 0); | ||
284 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
285 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
286 | WREG32(D1CRTC_CONTROL, 0); | ||
287 | WREG32(D2CRTC_CONTROL, 0); | ||
288 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
289 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
290 | |||
291 | mdelay(1); | ||
292 | if (r600_mc_wait_for_idle(rdev)) { | 289 | if (r600_mc_wait_for_idle(rdev)) { |
293 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 290 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
294 | } | 291 | } |
295 | 292 | /* Lockout access through VGA aperture (doesn't exist before R600) */ | |
296 | /* Lockout access through VGA aperture*/ | ||
297 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | 293 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
298 | |||
299 | /* Update configuration */ | 294 | /* Update configuration */ |
300 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 295 | if (rdev->flags & RADEON_IS_AGP) { |
301 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); | 296 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
297 | /* VRAM before AGP */ | ||
298 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
299 | rdev->mc.vram_start >> 12); | ||
300 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
301 | rdev->mc.gtt_end >> 12); | ||
302 | } else { | ||
303 | /* VRAM after AGP */ | ||
304 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
305 | rdev->mc.gtt_start >> 12); | ||
306 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
307 | rdev->mc.vram_end >> 12); | ||
308 | } | ||
309 | } else { | ||
310 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | ||
311 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); | ||
312 | } | ||
302 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 313 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
303 | tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; | 314 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
304 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 315 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
305 | WREG32(MC_VM_FB_LOCATION, tmp); | 316 | WREG32(MC_VM_FB_LOCATION, tmp); |
306 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 317 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
307 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 318 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
308 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 319 | WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); |
309 | if (rdev->flags & RADEON_IS_AGP) { | 320 | if (rdev->flags & RADEON_IS_AGP) { |
310 | WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); | 321 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
311 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 322 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
312 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | 323 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
313 | } else { | 324 | } else { |
314 | WREG32(MC_VM_AGP_BASE, 0); | 325 | WREG32(MC_VM_AGP_BASE, 0); |
315 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | 326 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
316 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | 327 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
317 | } | 328 | } |
318 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); | ||
319 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); | ||
320 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); | ||
321 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); | ||
322 | WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
323 | |||
324 | /* Unlock host access */ | ||
325 | WREG32(VGA_HDP_CONTROL, vga_hdp_control); | ||
326 | |||
327 | mdelay(1); | ||
328 | if (r600_mc_wait_for_idle(rdev)) { | 329 | if (r600_mc_wait_for_idle(rdev)) { |
329 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 330 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
330 | } | 331 | } |
331 | 332 | rv515_mc_resume(rdev, &save); | |
332 | /* Restore video state */ | ||
333 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
334 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
335 | WREG32(D1CRTC_CONTROL, d1crtc_control); | ||
336 | WREG32(D2CRTC_CONTROL, d2crtc_control); | ||
337 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
338 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
339 | WREG32(D1VGA_CONTROL, d1vga_control); | ||
340 | WREG32(D2VGA_CONTROL, d2vga_control); | ||
341 | WREG32(VGA_RENDER_CONTROL, vga_render_control); | ||
342 | |||
343 | /* we need to own VRAM, so turn off the VGA renderer here | 333 | /* we need to own VRAM, so turn off the VGA renderer here |
344 | * to stop it overwriting our objects */ | 334 | * to stop it overwriting our objects */ |
345 | rv515_vga_render_disable(rdev); | 335 | rv515_vga_render_disable(rdev); |
@@ -349,11 +339,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
349 | { | 339 | { |
350 | fixed20_12 a; | 340 | fixed20_12 a; |
351 | u32 tmp; | 341 | u32 tmp; |
352 | int chansize; | 342 | int chansize, numchan; |
353 | int r; | 343 | int r; |
354 | 344 | ||
355 | /* Get VRAM informations */ | 345 | /* Get VRAM informations */ |
356 | rdev->mc.vram_width = 128; | ||
357 | rdev->mc.vram_is_ddr = true; | 346 | rdev->mc.vram_is_ddr = true; |
358 | tmp = RREG32(RAMCFG); | 347 | tmp = RREG32(RAMCFG); |
359 | if (tmp & CHANSIZE_OVERRIDE) { | 348 | if (tmp & CHANSIZE_OVERRIDE) { |
@@ -363,17 +352,23 @@ int r600_mc_init(struct radeon_device *rdev) | |||
363 | } else { | 352 | } else { |
364 | chansize = 32; | 353 | chansize = 32; |
365 | } | 354 | } |
366 | if (rdev->family == CHIP_R600) { | 355 | tmp = RREG32(CHMAP); |
367 | rdev->mc.vram_width = 8 * chansize; | 356 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { |
368 | } else if (rdev->family == CHIP_RV670) { | 357 | case 0: |
369 | rdev->mc.vram_width = 4 * chansize; | 358 | default: |
370 | } else if ((rdev->family == CHIP_RV610) || | 359 | numchan = 1; |
371 | (rdev->family == CHIP_RV620)) { | 360 | break; |
372 | rdev->mc.vram_width = chansize; | 361 | case 1: |
373 | } else if ((rdev->family == CHIP_RV630) || | 362 | numchan = 2; |
374 | (rdev->family == CHIP_RV635)) { | 363 | break; |
375 | rdev->mc.vram_width = 2 * chansize; | 364 | case 2: |
365 | numchan = 4; | ||
366 | break; | ||
367 | case 3: | ||
368 | numchan = 8; | ||
369 | break; | ||
376 | } | 370 | } |
371 | rdev->mc.vram_width = numchan * chansize; | ||
377 | /* Could aper size report 0 ? */ | 372 | /* Could aper size report 0 ? */ |
378 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 373 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
379 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 374 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
@@ -414,40 +409,34 @@ int r600_mc_init(struct radeon_device *rdev) | |||
414 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | 409 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
415 | } | 410 | } |
416 | } else { | 411 | } else { |
417 | if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { | 412 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
418 | rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) & | 413 | rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) & |
419 | 0xFFFF) << 24; | 414 | 0xFFFF) << 24; |
420 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 415 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; |
421 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 416 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
422 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { | 417 | /* Enough place after vram */ |
423 | /* Enough place after vram */ | 418 | rdev->mc.gtt_location = tmp; |
424 | rdev->mc.gtt_location = tmp; | 419 | } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) { |
425 | } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) { | 420 | /* Enough place before vram */ |
426 | /* Enough place before vram */ | 421 | rdev->mc.gtt_location = 0; |
422 | } else { | ||
423 | /* Not enough place after or before shrink | ||
424 | * gart size | ||
425 | */ | ||
426 | if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) { | ||
427 | rdev->mc.gtt_location = 0; | 427 | rdev->mc.gtt_location = 0; |
428 | rdev->mc.gtt_size = rdev->mc.vram_location; | ||
428 | } else { | 429 | } else { |
429 | /* Not enough place after or before shrink | 430 | rdev->mc.gtt_location = tmp; |
430 | * gart size | 431 | rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp; |
431 | */ | ||
432 | if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) { | ||
433 | rdev->mc.gtt_location = 0; | ||
434 | rdev->mc.gtt_size = rdev->mc.vram_location; | ||
435 | } else { | ||
436 | rdev->mc.gtt_location = tmp; | ||
437 | rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp; | ||
438 | } | ||
439 | } | 432 | } |
440 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
441 | } else { | ||
442 | rdev->mc.vram_location = 0x00000000UL; | ||
443 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
444 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
445 | } | 433 | } |
434 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
446 | } | 435 | } |
447 | rdev->mc.vram_start = rdev->mc.vram_location; | 436 | rdev->mc.vram_start = rdev->mc.vram_location; |
448 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 437 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
449 | rdev->mc.gtt_start = rdev->mc.gtt_location; | 438 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
450 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; | 439 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
451 | /* FIXME: we should enforce default clock in case GPU is not in | 440 | /* FIXME: we should enforce default clock in case GPU is not in |
452 | * default setup | 441 | * default setup |
453 | */ | 442 | */ |
@@ -463,6 +452,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
463 | */ | 452 | */ |
464 | int r600_gpu_soft_reset(struct radeon_device *rdev) | 453 | int r600_gpu_soft_reset(struct radeon_device *rdev) |
465 | { | 454 | { |
455 | struct rv515_mc_save save; | ||
466 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | | 456 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | |
467 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | | 457 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | |
468 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | | 458 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | |
@@ -480,13 +470,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
480 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | | 470 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | |
481 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | 471 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
482 | u32 srbm_reset = 0; | 472 | u32 srbm_reset = 0; |
473 | u32 tmp; | ||
483 | 474 | ||
475 | dev_info(rdev->dev, "GPU softreset \n"); | ||
476 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
477 | RREG32(R_008010_GRBM_STATUS)); | ||
478 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
479 | RREG32(R_008014_GRBM_STATUS2)); | ||
480 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
481 | RREG32(R_000E50_SRBM_STATUS)); | ||
482 | rv515_mc_stop(rdev, &save); | ||
483 | if (r600_mc_wait_for_idle(rdev)) { | ||
484 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
485 | } | ||
484 | /* Disable CP parsing/prefetching */ | 486 | /* Disable CP parsing/prefetching */ |
485 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); | 487 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); |
486 | /* Check if any of the rendering block is busy and reset it */ | 488 | /* Check if any of the rendering block is busy and reset it */ |
487 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || | 489 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || |
488 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { | 490 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { |
489 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | | 491 | tmp = S_008020_SOFT_RESET_CR(1) | |
490 | S_008020_SOFT_RESET_DB(1) | | 492 | S_008020_SOFT_RESET_DB(1) | |
491 | S_008020_SOFT_RESET_CB(1) | | 493 | S_008020_SOFT_RESET_CB(1) | |
492 | S_008020_SOFT_RESET_PA(1) | | 494 | S_008020_SOFT_RESET_PA(1) | |
@@ -498,14 +500,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
498 | S_008020_SOFT_RESET_TC(1) | | 500 | S_008020_SOFT_RESET_TC(1) | |
499 | S_008020_SOFT_RESET_TA(1) | | 501 | S_008020_SOFT_RESET_TA(1) | |
500 | S_008020_SOFT_RESET_VC(1) | | 502 | S_008020_SOFT_RESET_VC(1) | |
501 | S_008020_SOFT_RESET_VGT(1)); | 503 | S_008020_SOFT_RESET_VGT(1); |
504 | dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
505 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
502 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 506 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
503 | udelay(50); | 507 | udelay(50); |
504 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 508 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
505 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 509 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
506 | } | 510 | } |
507 | /* Reset CP (we always reset CP) */ | 511 | /* Reset CP (we always reset CP) */ |
508 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); | 512 | tmp = S_008020_SOFT_RESET_CP(1); |
513 | dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
514 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
509 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 515 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
510 | udelay(50); | 516 | udelay(50); |
511 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 517 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
@@ -533,6 +539,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
533 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); | 539 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); |
534 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) | 540 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
535 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); | 541 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); |
542 | if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) | ||
543 | srbm_reset |= S_000E60_SOFT_RESET_BIF(1); | ||
544 | dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | ||
545 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | ||
546 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
547 | udelay(50); | ||
548 | WREG32(R_000E60_SRBM_SOFT_RESET, 0); | ||
549 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
536 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | 550 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); |
537 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 551 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
538 | udelay(50); | 552 | udelay(50); |
@@ -540,6 +554,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
540 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 554 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
541 | /* Wait a little for things to settle down */ | 555 | /* Wait a little for things to settle down */ |
542 | udelay(50); | 556 | udelay(50); |
557 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
558 | RREG32(R_008010_GRBM_STATUS)); | ||
559 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
560 | RREG32(R_008014_GRBM_STATUS2)); | ||
561 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
562 | RREG32(R_000E50_SRBM_STATUS)); | ||
563 | /* After reset we need to reinit the asic as GPU often endup in an | ||
564 | * incoherent state. | ||
565 | */ | ||
566 | atom_asic_init(rdev->mode_info.atom_context); | ||
567 | rv515_mc_resume(rdev, &save); | ||
543 | return 0; | 568 | return 0; |
544 | } | 569 | } |
545 | 570 | ||
@@ -833,7 +858,8 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
833 | ((rdev->family) == CHIP_RV630) || | 858 | ((rdev->family) == CHIP_RV630) || |
834 | ((rdev->family) == CHIP_RV610) || | 859 | ((rdev->family) == CHIP_RV610) || |
835 | ((rdev->family) == CHIP_RV620) || | 860 | ((rdev->family) == CHIP_RV620) || |
836 | ((rdev->family) == CHIP_RS780)) { | 861 | ((rdev->family) == CHIP_RS780) || |
862 | ((rdev->family) == CHIP_RS880)) { | ||
837 | WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); | 863 | WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); |
838 | } else { | 864 | } else { |
839 | WREG32(DB_DEBUG, 0); | 865 | WREG32(DB_DEBUG, 0); |
@@ -850,7 +876,8 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
850 | tmp = RREG32(SQ_MS_FIFO_SIZES); | 876 | tmp = RREG32(SQ_MS_FIFO_SIZES); |
851 | if (((rdev->family) == CHIP_RV610) || | 877 | if (((rdev->family) == CHIP_RV610) || |
852 | ((rdev->family) == CHIP_RV620) || | 878 | ((rdev->family) == CHIP_RV620) || |
853 | ((rdev->family) == CHIP_RS780)) { | 879 | ((rdev->family) == CHIP_RS780) || |
880 | ((rdev->family) == CHIP_RS880)) { | ||
854 | tmp = (CACHE_FIFO_SIZE(0xa) | | 881 | tmp = (CACHE_FIFO_SIZE(0xa) | |
855 | FETCH_FIFO_HIWATER(0xa) | | 882 | FETCH_FIFO_HIWATER(0xa) | |
856 | DONE_FIFO_HIWATER(0xe0) | | 883 | DONE_FIFO_HIWATER(0xe0) | |
@@ -893,7 +920,8 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
893 | NUM_ES_STACK_ENTRIES(0)); | 920 | NUM_ES_STACK_ENTRIES(0)); |
894 | } else if (((rdev->family) == CHIP_RV610) || | 921 | } else if (((rdev->family) == CHIP_RV610) || |
895 | ((rdev->family) == CHIP_RV620) || | 922 | ((rdev->family) == CHIP_RV620) || |
896 | ((rdev->family) == CHIP_RS780)) { | 923 | ((rdev->family) == CHIP_RS780) || |
924 | ((rdev->family) == CHIP_RS880)) { | ||
897 | /* no vertex cache */ | 925 | /* no vertex cache */ |
898 | sq_config &= ~VC_ENABLE; | 926 | sq_config &= ~VC_ENABLE; |
899 | 927 | ||
@@ -950,7 +978,8 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
950 | 978 | ||
951 | if (((rdev->family) == CHIP_RV610) || | 979 | if (((rdev->family) == CHIP_RV610) || |
952 | ((rdev->family) == CHIP_RV620) || | 980 | ((rdev->family) == CHIP_RV620) || |
953 | ((rdev->family) == CHIP_RS780)) { | 981 | ((rdev->family) == CHIP_RS780) || |
982 | ((rdev->family) == CHIP_RS880)) { | ||
954 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); | 983 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); |
955 | } else { | 984 | } else { |
956 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); | 985 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); |
@@ -976,8 +1005,9 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
976 | tmp = rdev->config.r600.max_pipes * 16; | 1005 | tmp = rdev->config.r600.max_pipes * 16; |
977 | switch (rdev->family) { | 1006 | switch (rdev->family) { |
978 | case CHIP_RV610: | 1007 | case CHIP_RV610: |
979 | case CHIP_RS780: | ||
980 | case CHIP_RV620: | 1008 | case CHIP_RV620: |
1009 | case CHIP_RS780: | ||
1010 | case CHIP_RS880: | ||
981 | tmp += 32; | 1011 | tmp += 32; |
982 | break; | 1012 | break; |
983 | case CHIP_RV670: | 1013 | case CHIP_RV670: |
@@ -1018,8 +1048,9 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1018 | 1048 | ||
1019 | switch (rdev->family) { | 1049 | switch (rdev->family) { |
1020 | case CHIP_RV610: | 1050 | case CHIP_RV610: |
1021 | case CHIP_RS780: | ||
1022 | case CHIP_RV620: | 1051 | case CHIP_RV620: |
1052 | case CHIP_RS780: | ||
1053 | case CHIP_RS880: | ||
1023 | tmp = TC_L2_SIZE(8); | 1054 | tmp = TC_L2_SIZE(8); |
1024 | break; | 1055 | break; |
1025 | case CHIP_RV630: | 1056 | case CHIP_RV630: |
@@ -1241,19 +1272,17 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
1241 | 1272 | ||
1242 | /* Set ring buffer size */ | 1273 | /* Set ring buffer size */ |
1243 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 1274 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
1275 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | ||
1244 | #ifdef __BIG_ENDIAN | 1276 | #ifdef __BIG_ENDIAN |
1245 | WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE | | 1277 | tmp |= BUF_SWAP_32BIT; |
1246 | (drm_order(4096/8) << 8) | rb_bufsz); | ||
1247 | #else | ||
1248 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz); | ||
1249 | #endif | 1278 | #endif |
1279 | WREG32(CP_RB_CNTL, tmp); | ||
1250 | WREG32(CP_SEM_WAIT_TIMER, 0x4); | 1280 | WREG32(CP_SEM_WAIT_TIMER, 0x4); |
1251 | 1281 | ||
1252 | /* Set the write pointer delay */ | 1282 | /* Set the write pointer delay */ |
1253 | WREG32(CP_RB_WPTR_DELAY, 0); | 1283 | WREG32(CP_RB_WPTR_DELAY, 0); |
1254 | 1284 | ||
1255 | /* Initialize the ring buffer's read and write pointers */ | 1285 | /* Initialize the ring buffer's read and write pointers */ |
1256 | tmp = RREG32(CP_RB_CNTL); | ||
1257 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 1286 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
1258 | WREG32(CP_RB_RPTR_WR, 0); | 1287 | WREG32(CP_RB_RPTR_WR, 0); |
1259 | WREG32(CP_RB_WPTR, 0); | 1288 | WREG32(CP_RB_WPTR, 0); |
@@ -1350,32 +1379,47 @@ int r600_ring_test(struct radeon_device *rdev) | |||
1350 | return r; | 1379 | return r; |
1351 | } | 1380 | } |
1352 | 1381 | ||
1353 | /* | 1382 | void r600_wb_disable(struct radeon_device *rdev) |
1354 | * Writeback | 1383 | { |
1355 | */ | 1384 | WREG32(SCRATCH_UMSK, 0); |
1356 | int r600_wb_init(struct radeon_device *rdev) | 1385 | if (rdev->wb.wb_obj) { |
1386 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
1387 | radeon_object_unpin(rdev->wb.wb_obj); | ||
1388 | } | ||
1389 | } | ||
1390 | |||
1391 | void r600_wb_fini(struct radeon_device *rdev) | ||
1392 | { | ||
1393 | r600_wb_disable(rdev); | ||
1394 | if (rdev->wb.wb_obj) { | ||
1395 | radeon_object_unref(&rdev->wb.wb_obj); | ||
1396 | rdev->wb.wb = NULL; | ||
1397 | rdev->wb.wb_obj = NULL; | ||
1398 | } | ||
1399 | } | ||
1400 | |||
1401 | int r600_wb_enable(struct radeon_device *rdev) | ||
1357 | { | 1402 | { |
1358 | int r; | 1403 | int r; |
1359 | 1404 | ||
1360 | if (rdev->wb.wb_obj == NULL) { | 1405 | if (rdev->wb.wb_obj == NULL) { |
1361 | r = radeon_object_create(rdev, NULL, 4096, | 1406 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
1362 | true, | 1407 | RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); |
1363 | RADEON_GEM_DOMAIN_GTT, | ||
1364 | false, &rdev->wb.wb_obj); | ||
1365 | if (r) { | 1408 | if (r) { |
1366 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | 1409 | dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); |
1367 | return r; | 1410 | return r; |
1368 | } | 1411 | } |
1369 | r = radeon_object_pin(rdev->wb.wb_obj, | 1412 | r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
1370 | RADEON_GEM_DOMAIN_GTT, | 1413 | &rdev->wb.gpu_addr); |
1371 | &rdev->wb.gpu_addr); | ||
1372 | if (r) { | 1414 | if (r) { |
1373 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | 1415 | dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); |
1416 | r600_wb_fini(rdev); | ||
1374 | return r; | 1417 | return r; |
1375 | } | 1418 | } |
1376 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 1419 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
1377 | if (r) { | 1420 | if (r) { |
1378 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | 1421 | dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); |
1422 | r600_wb_fini(rdev); | ||
1379 | return r; | 1423 | return r; |
1380 | } | 1424 | } |
1381 | } | 1425 | } |
@@ -1386,21 +1430,6 @@ int r600_wb_init(struct radeon_device *rdev) | |||
1386 | return 0; | 1430 | return 0; |
1387 | } | 1431 | } |
1388 | 1432 | ||
1389 | void r600_wb_fini(struct radeon_device *rdev) | ||
1390 | { | ||
1391 | if (rdev->wb.wb_obj) { | ||
1392 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
1393 | radeon_object_unpin(rdev->wb.wb_obj); | ||
1394 | radeon_object_unref(&rdev->wb.wb_obj); | ||
1395 | rdev->wb.wb = NULL; | ||
1396 | rdev->wb.wb_obj = NULL; | ||
1397 | } | ||
1398 | } | ||
1399 | |||
1400 | |||
1401 | /* | ||
1402 | * CS | ||
1403 | */ | ||
1404 | void r600_fence_ring_emit(struct radeon_device *rdev, | 1433 | void r600_fence_ring_emit(struct radeon_device *rdev, |
1405 | struct radeon_fence *fence) | 1434 | struct radeon_fence *fence) |
1406 | { | 1435 | { |
@@ -1424,8 +1453,8 @@ int r600_copy_blit(struct radeon_device *rdev, | |||
1424 | uint64_t src_offset, uint64_t dst_offset, | 1453 | uint64_t src_offset, uint64_t dst_offset, |
1425 | unsigned num_pages, struct radeon_fence *fence) | 1454 | unsigned num_pages, struct radeon_fence *fence) |
1426 | { | 1455 | { |
1427 | r600_blit_prepare_copy(rdev, num_pages * 4096); | 1456 | r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); |
1428 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096); | 1457 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); |
1429 | r600_blit_done_copy(rdev, fence); | 1458 | r600_blit_done_copy(rdev, fence); |
1430 | return 0; | 1459 | return 0; |
1431 | } | 1460 | } |
@@ -1477,11 +1506,14 @@ int r600_startup(struct radeon_device *rdev) | |||
1477 | { | 1506 | { |
1478 | int r; | 1507 | int r; |
1479 | 1508 | ||
1480 | r600_gpu_reset(rdev); | 1509 | r600_mc_program(rdev); |
1481 | r600_mc_resume(rdev); | 1510 | if (rdev->flags & RADEON_IS_AGP) { |
1482 | r = r600_pcie_gart_enable(rdev); | 1511 | r600_agp_enable(rdev); |
1483 | if (r) | 1512 | } else { |
1484 | return r; | 1513 | r = r600_pcie_gart_enable(rdev); |
1514 | if (r) | ||
1515 | return r; | ||
1516 | } | ||
1485 | r600_gpu_init(rdev); | 1517 | r600_gpu_init(rdev); |
1486 | 1518 | ||
1487 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1519 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
@@ -1500,9 +1532,8 @@ int r600_startup(struct radeon_device *rdev) | |||
1500 | r = r600_cp_resume(rdev); | 1532 | r = r600_cp_resume(rdev); |
1501 | if (r) | 1533 | if (r) |
1502 | return r; | 1534 | return r; |
1503 | r = r600_wb_init(rdev); | 1535 | /* write back buffer are not vital so don't worry about failure */ |
1504 | if (r) | 1536 | r600_wb_enable(rdev); |
1505 | return r; | ||
1506 | return 0; | 1537 | return 0; |
1507 | } | 1538 | } |
1508 | 1539 | ||
@@ -1524,15 +1555,12 @@ int r600_resume(struct radeon_device *rdev) | |||
1524 | { | 1555 | { |
1525 | int r; | 1556 | int r; |
1526 | 1557 | ||
1527 | if (radeon_gpu_reset(rdev)) { | 1558 | /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, |
1528 | /* FIXME: what do we want to do here ? */ | 1559 | * posting will perform necessary task to bring back GPU into good |
1529 | } | 1560 | * shape. |
1561 | */ | ||
1530 | /* post card */ | 1562 | /* post card */ |
1531 | if (rdev->is_atom_bios) { | 1563 | atom_asic_init(rdev->mode_info.atom_context); |
1532 | atom_asic_init(rdev->mode_info.atom_context); | ||
1533 | } else { | ||
1534 | radeon_combios_asic_init(rdev->ddev); | ||
1535 | } | ||
1536 | /* Initialize clocks */ | 1564 | /* Initialize clocks */ |
1537 | r = radeon_clocks_init(rdev); | 1565 | r = radeon_clocks_init(rdev); |
1538 | if (r) { | 1566 | if (r) { |
@@ -1545,7 +1573,7 @@ int r600_resume(struct radeon_device *rdev) | |||
1545 | return r; | 1573 | return r; |
1546 | } | 1574 | } |
1547 | 1575 | ||
1548 | r = radeon_ib_test(rdev); | 1576 | r = r600_ib_test(rdev); |
1549 | if (r) { | 1577 | if (r) { |
1550 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1578 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1551 | return r; | 1579 | return r; |
@@ -1553,13 +1581,12 @@ int r600_resume(struct radeon_device *rdev) | |||
1553 | return r; | 1581 | return r; |
1554 | } | 1582 | } |
1555 | 1583 | ||
1556 | |||
1557 | int r600_suspend(struct radeon_device *rdev) | 1584 | int r600_suspend(struct radeon_device *rdev) |
1558 | { | 1585 | { |
1559 | /* FIXME: we should wait for ring to be empty */ | 1586 | /* FIXME: we should wait for ring to be empty */ |
1560 | r600_cp_stop(rdev); | 1587 | r600_cp_stop(rdev); |
1561 | rdev->cp.ready = false; | 1588 | rdev->cp.ready = false; |
1562 | 1589 | r600_wb_disable(rdev); | |
1563 | r600_pcie_gart_disable(rdev); | 1590 | r600_pcie_gart_disable(rdev); |
1564 | /* unpin shaders bo */ | 1591 | /* unpin shaders bo */ |
1565 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 1592 | radeon_object_unpin(rdev->r600_blit.shader_obj); |
@@ -1576,7 +1603,6 @@ int r600_init(struct radeon_device *rdev) | |||
1576 | { | 1603 | { |
1577 | int r; | 1604 | int r; |
1578 | 1605 | ||
1579 | rdev->new_init_path = true; | ||
1580 | r = radeon_dummy_page_init(rdev); | 1606 | r = radeon_dummy_page_init(rdev); |
1581 | if (r) | 1607 | if (r) |
1582 | return r; | 1608 | return r; |
@@ -1593,8 +1619,10 @@ int r600_init(struct radeon_device *rdev) | |||
1593 | return -EINVAL; | 1619 | return -EINVAL; |
1594 | } | 1620 | } |
1595 | /* Must be an ATOMBIOS */ | 1621 | /* Must be an ATOMBIOS */ |
1596 | if (!rdev->is_atom_bios) | 1622 | if (!rdev->is_atom_bios) { |
1623 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | ||
1597 | return -EINVAL; | 1624 | return -EINVAL; |
1625 | } | ||
1598 | r = radeon_atombios_init(rdev); | 1626 | r = radeon_atombios_init(rdev); |
1599 | if (r) | 1627 | if (r) |
1600 | return r; | 1628 | return r; |
@@ -1607,24 +1635,20 @@ int r600_init(struct radeon_device *rdev) | |||
1607 | r600_scratch_init(rdev); | 1635 | r600_scratch_init(rdev); |
1608 | /* Initialize surface registers */ | 1636 | /* Initialize surface registers */ |
1609 | radeon_surface_init(rdev); | 1637 | radeon_surface_init(rdev); |
1638 | /* Initialize clocks */ | ||
1610 | radeon_get_clock_info(rdev->ddev); | 1639 | radeon_get_clock_info(rdev->ddev); |
1611 | r = radeon_clocks_init(rdev); | 1640 | r = radeon_clocks_init(rdev); |
1612 | if (r) | 1641 | if (r) |
1613 | return r; | 1642 | return r; |
1643 | /* Initialize power management */ | ||
1644 | radeon_pm_init(rdev); | ||
1614 | /* Fence driver */ | 1645 | /* Fence driver */ |
1615 | r = radeon_fence_driver_init(rdev); | 1646 | r = radeon_fence_driver_init(rdev); |
1616 | if (r) | 1647 | if (r) |
1617 | return r; | 1648 | return r; |
1618 | r = r600_mc_init(rdev); | 1649 | r = r600_mc_init(rdev); |
1619 | if (r) { | 1650 | if (r) |
1620 | if (rdev->flags & RADEON_IS_AGP) { | ||
1621 | /* Retry with disabling AGP */ | ||
1622 | r600_fini(rdev); | ||
1623 | rdev->flags &= ~RADEON_IS_AGP; | ||
1624 | return r600_init(rdev); | ||
1625 | } | ||
1626 | return r; | 1651 | return r; |
1627 | } | ||
1628 | /* Memory manager */ | 1652 | /* Memory manager */ |
1629 | r = radeon_object_init(rdev); | 1653 | r = radeon_object_init(rdev); |
1630 | if (r) | 1654 | if (r) |
@@ -1653,12 +1677,10 @@ int r600_init(struct radeon_device *rdev) | |||
1653 | 1677 | ||
1654 | r = r600_startup(rdev); | 1678 | r = r600_startup(rdev); |
1655 | if (r) { | 1679 | if (r) { |
1656 | if (rdev->flags & RADEON_IS_AGP) { | 1680 | r600_suspend(rdev); |
1657 | /* Retry with disabling AGP */ | 1681 | r600_wb_fini(rdev); |
1658 | r600_fini(rdev); | 1682 | radeon_ring_fini(rdev); |
1659 | rdev->flags &= ~RADEON_IS_AGP; | 1683 | r600_pcie_gart_fini(rdev); |
1660 | return r600_init(rdev); | ||
1661 | } | ||
1662 | rdev->accel_working = false; | 1684 | rdev->accel_working = false; |
1663 | } | 1685 | } |
1664 | if (rdev->accel_working) { | 1686 | if (rdev->accel_working) { |
@@ -1667,7 +1689,7 @@ int r600_init(struct radeon_device *rdev) | |||
1667 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 1689 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
1668 | rdev->accel_working = false; | 1690 | rdev->accel_working = false; |
1669 | } | 1691 | } |
1670 | r = radeon_ib_test(rdev); | 1692 | r = r600_ib_test(rdev); |
1671 | if (r) { | 1693 | if (r) { |
1672 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1694 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1673 | rdev->accel_working = false; | 1695 | rdev->accel_working = false; |
@@ -1683,19 +1705,15 @@ void r600_fini(struct radeon_device *rdev) | |||
1683 | 1705 | ||
1684 | r600_blit_fini(rdev); | 1706 | r600_blit_fini(rdev); |
1685 | radeon_ring_fini(rdev); | 1707 | radeon_ring_fini(rdev); |
1708 | r600_wb_fini(rdev); | ||
1686 | r600_pcie_gart_fini(rdev); | 1709 | r600_pcie_gart_fini(rdev); |
1687 | radeon_gem_fini(rdev); | 1710 | radeon_gem_fini(rdev); |
1688 | radeon_fence_driver_fini(rdev); | 1711 | radeon_fence_driver_fini(rdev); |
1689 | radeon_clocks_fini(rdev); | 1712 | radeon_clocks_fini(rdev); |
1690 | #if __OS_HAS_AGP | ||
1691 | if (rdev->flags & RADEON_IS_AGP) | 1713 | if (rdev->flags & RADEON_IS_AGP) |
1692 | radeon_agp_fini(rdev); | 1714 | radeon_agp_fini(rdev); |
1693 | #endif | ||
1694 | radeon_object_fini(rdev); | 1715 | radeon_object_fini(rdev); |
1695 | if (rdev->is_atom_bios) | 1716 | radeon_atombios_fini(rdev); |
1696 | radeon_atombios_fini(rdev); | ||
1697 | else | ||
1698 | radeon_combios_fini(rdev); | ||
1699 | kfree(rdev->bios); | 1717 | kfree(rdev->bios); |
1700 | rdev->bios = NULL; | 1718 | rdev->bios = NULL; |
1701 | radeon_dummy_page_fini(rdev); | 1719 | radeon_dummy_page_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index d988eece0187..5ea432347589 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
@@ -774,11 +774,10 @@ r600_blit_swap(struct drm_device *dev, | |||
774 | { | 774 | { |
775 | drm_radeon_private_t *dev_priv = dev->dev_private; | 775 | drm_radeon_private_t *dev_priv = dev->dev_private; |
776 | int cb_format, tex_format; | 776 | int cb_format, tex_format; |
777 | int sx2, sy2, dx2, dy2; | ||
777 | u64 vb_addr; | 778 | u64 vb_addr; |
778 | u32 *vb; | 779 | u32 *vb; |
779 | 780 | ||
780 | vb = r600_nomm_get_vb_ptr(dev); | ||
781 | |||
782 | if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { | 781 | if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { |
783 | 782 | ||
784 | r600_nomm_put_vb(dev); | 783 | r600_nomm_put_vb(dev); |
@@ -787,19 +786,13 @@ r600_blit_swap(struct drm_device *dev, | |||
787 | return; | 786 | return; |
788 | 787 | ||
789 | set_shaders(dev); | 788 | set_shaders(dev); |
790 | vb = r600_nomm_get_vb_ptr(dev); | ||
791 | } | 789 | } |
790 | vb = r600_nomm_get_vb_ptr(dev); | ||
792 | 791 | ||
793 | if (cpp == 4) { | 792 | sx2 = sx + w; |
794 | cb_format = COLOR_8_8_8_8; | 793 | sy2 = sy + h; |
795 | tex_format = FMT_8_8_8_8; | 794 | dx2 = dx + w; |
796 | } else if (cpp == 2) { | 795 | dy2 = dy + h; |
797 | cb_format = COLOR_5_6_5; | ||
798 | tex_format = FMT_5_6_5; | ||
799 | } else { | ||
800 | cb_format = COLOR_8; | ||
801 | tex_format = FMT_8; | ||
802 | } | ||
803 | 796 | ||
804 | vb[0] = i2f(dx); | 797 | vb[0] = i2f(dx); |
805 | vb[1] = i2f(dy); | 798 | vb[1] = i2f(dy); |
@@ -807,31 +800,46 @@ r600_blit_swap(struct drm_device *dev, | |||
807 | vb[3] = i2f(sy); | 800 | vb[3] = i2f(sy); |
808 | 801 | ||
809 | vb[4] = i2f(dx); | 802 | vb[4] = i2f(dx); |
810 | vb[5] = i2f(dy + h); | 803 | vb[5] = i2f(dy2); |
811 | vb[6] = i2f(sx); | 804 | vb[6] = i2f(sx); |
812 | vb[7] = i2f(sy + h); | 805 | vb[7] = i2f(sy2); |
806 | |||
807 | vb[8] = i2f(dx2); | ||
808 | vb[9] = i2f(dy2); | ||
809 | vb[10] = i2f(sx2); | ||
810 | vb[11] = i2f(sy2); | ||
813 | 811 | ||
814 | vb[8] = i2f(dx + w); | 812 | switch(cpp) { |
815 | vb[9] = i2f(dy + h); | 813 | case 4: |
816 | vb[10] = i2f(sx + w); | 814 | cb_format = COLOR_8_8_8_8; |
817 | vb[11] = i2f(sy + h); | 815 | tex_format = FMT_8_8_8_8; |
816 | break; | ||
817 | case 2: | ||
818 | cb_format = COLOR_5_6_5; | ||
819 | tex_format = FMT_5_6_5; | ||
820 | break; | ||
821 | default: | ||
822 | cb_format = COLOR_8; | ||
823 | tex_format = FMT_8; | ||
824 | break; | ||
825 | } | ||
818 | 826 | ||
819 | /* src */ | 827 | /* src */ |
820 | set_tex_resource(dev_priv, tex_format, | 828 | set_tex_resource(dev_priv, tex_format, |
821 | src_pitch / cpp, | 829 | src_pitch / cpp, |
822 | sy + h, src_pitch / cpp, | 830 | sy2, src_pitch / cpp, |
823 | src_gpu_addr); | 831 | src_gpu_addr); |
824 | 832 | ||
825 | cp_set_surface_sync(dev_priv, | 833 | cp_set_surface_sync(dev_priv, |
826 | R600_TC_ACTION_ENA, (src_pitch * (sy + h)), src_gpu_addr); | 834 | R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr); |
827 | 835 | ||
828 | /* dst */ | 836 | /* dst */ |
829 | set_render_target(dev_priv, cb_format, | 837 | set_render_target(dev_priv, cb_format, |
830 | dst_pitch / cpp, dy + h, | 838 | dst_pitch / cpp, dy2, |
831 | dst_gpu_addr); | 839 | dst_gpu_addr); |
832 | 840 | ||
833 | /* scissors */ | 841 | /* scissors */ |
834 | set_scissors(dev_priv, dx, dy, dx + w, dy + h); | 842 | set_scissors(dev_priv, dx, dy, dx2, dy2); |
835 | 843 | ||
836 | /* Vertex buffer setup */ | 844 | /* Vertex buffer setup */ |
837 | vb_addr = dev_priv->gart_buffers_offset + | 845 | vb_addr = dev_priv->gart_buffers_offset + |
@@ -844,7 +852,7 @@ r600_blit_swap(struct drm_device *dev, | |||
844 | 852 | ||
845 | cp_set_surface_sync(dev_priv, | 853 | cp_set_surface_sync(dev_priv, |
846 | R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, | 854 | R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, |
847 | dst_pitch * (dy + h), dst_gpu_addr); | 855 | dst_pitch * dy2, dst_gpu_addr); |
848 | 856 | ||
849 | dev_priv->blit_vb->used += 12 * 4; | 857 | dev_priv->blit_vb->used += 12 * 4; |
850 | } | 858 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index acae33e2ad51..dbf716e1fbf3 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -368,7 +368,7 @@ set_default_state(struct radeon_device *rdev) | |||
368 | if ((rdev->family == CHIP_RV610) || | 368 | if ((rdev->family == CHIP_RV610) || |
369 | (rdev->family == CHIP_RV620) || | 369 | (rdev->family == CHIP_RV620) || |
370 | (rdev->family == CHIP_RS780) || | 370 | (rdev->family == CHIP_RS780) || |
371 | (rdev->family == CHIP_RS780) || | 371 | (rdev->family == CHIP_RS880) || |
372 | (rdev->family == CHIP_RV710)) | 372 | (rdev->family == CHIP_RV710)) |
373 | sq_config = 0; | 373 | sq_config = 0; |
374 | else | 374 | else |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d28970db6a2d..0d820764f340 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -252,7 +252,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
252 | 252 | ||
253 | header = radeon_get_ib_value(p, h_idx); | 253 | header = radeon_get_ib_value(p, h_idx); |
254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
255 | reg = header >> 2; | 255 | reg = CP_PACKET0_GET_REG(header); |
256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
258 | if (!obj) { | 258 | if (!obj) { |
@@ -466,6 +466,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
466 | for (i = 0; i < pkt->count; i++) { | 466 | for (i = 0; i < pkt->count; i++) { |
467 | reg = start_reg + (4 * i); | 467 | reg = start_reg + (4 * i); |
468 | switch (reg) { | 468 | switch (reg) { |
469 | case SQ_ESGS_RING_BASE: | ||
470 | case SQ_GSVS_RING_BASE: | ||
471 | case SQ_ESTMP_RING_BASE: | ||
472 | case SQ_GSTMP_RING_BASE: | ||
473 | case SQ_VSTMP_RING_BASE: | ||
474 | case SQ_PSTMP_RING_BASE: | ||
475 | case SQ_FBUF_RING_BASE: | ||
476 | case SQ_REDUC_RING_BASE: | ||
477 | case SX_MEMORY_EXPORT_BASE: | ||
478 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
479 | if (r) { | ||
480 | DRM_ERROR("bad SET_CONFIG_REG " | ||
481 | "0x%04X\n", reg); | ||
482 | return -EINVAL; | ||
483 | } | ||
484 | ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
485 | break; | ||
469 | case CP_COHER_BASE: | 486 | case CP_COHER_BASE: |
470 | /* use PACKET3_SURFACE_SYNC */ | 487 | /* use PACKET3_SURFACE_SYNC */ |
471 | return -EINVAL; | 488 | return -EINVAL; |
@@ -487,6 +504,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
487 | reg = start_reg + (4 * i); | 504 | reg = start_reg + (4 * i); |
488 | switch (reg) { | 505 | switch (reg) { |
489 | case DB_DEPTH_BASE: | 506 | case DB_DEPTH_BASE: |
507 | case DB_HTILE_DATA_BASE: | ||
490 | case CB_COLOR0_BASE: | 508 | case CB_COLOR0_BASE: |
491 | case CB_COLOR1_BASE: | 509 | case CB_COLOR1_BASE: |
492 | case CB_COLOR2_BASE: | 510 | case CB_COLOR2_BASE: |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 4a9028a85c9b..27ab428b149b 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -119,6 +119,7 @@ | |||
119 | #define DB_DEBUG 0x9830 | 119 | #define DB_DEBUG 0x9830 |
120 | #define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) | 120 | #define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) |
121 | #define DB_DEPTH_BASE 0x2800C | 121 | #define DB_DEPTH_BASE 0x2800C |
122 | #define DB_HTILE_DATA_BASE 0x28014 | ||
122 | #define DB_WATERMARKS 0x9838 | 123 | #define DB_WATERMARKS 0x9838 |
123 | #define DEPTH_FREE(x) ((x) << 0) | 124 | #define DEPTH_FREE(x) ((x) << 0) |
124 | #define DEPTH_FLUSH(x) ((x) << 5) | 125 | #define DEPTH_FLUSH(x) ((x) << 5) |
@@ -171,6 +172,14 @@ | |||
171 | #define SQ_STACK_RESOURCE_MGMT_2 0x8c14 | 172 | #define SQ_STACK_RESOURCE_MGMT_2 0x8c14 |
172 | # define NUM_GS_STACK_ENTRIES(x) ((x) << 0) | 173 | # define NUM_GS_STACK_ENTRIES(x) ((x) << 0) |
173 | # define NUM_ES_STACK_ENTRIES(x) ((x) << 16) | 174 | # define NUM_ES_STACK_ENTRIES(x) ((x) << 16) |
175 | #define SQ_ESGS_RING_BASE 0x8c40 | ||
176 | #define SQ_GSVS_RING_BASE 0x8c48 | ||
177 | #define SQ_ESTMP_RING_BASE 0x8c50 | ||
178 | #define SQ_GSTMP_RING_BASE 0x8c58 | ||
179 | #define SQ_VSTMP_RING_BASE 0x8c60 | ||
180 | #define SQ_PSTMP_RING_BASE 0x8c68 | ||
181 | #define SQ_FBUF_RING_BASE 0x8c70 | ||
182 | #define SQ_REDUC_RING_BASE 0x8c78 | ||
174 | 183 | ||
175 | #define GRBM_CNTL 0x8000 | 184 | #define GRBM_CNTL 0x8000 |
176 | # define GRBM_READ_TIMEOUT(x) ((x) << 0) | 185 | # define GRBM_READ_TIMEOUT(x) ((x) << 0) |
@@ -271,6 +280,10 @@ | |||
271 | #define PCIE_PORT_INDEX 0x0038 | 280 | #define PCIE_PORT_INDEX 0x0038 |
272 | #define PCIE_PORT_DATA 0x003C | 281 | #define PCIE_PORT_DATA 0x003C |
273 | 282 | ||
283 | #define CHMAP 0x2004 | ||
284 | #define NOOFCHAN_SHIFT 12 | ||
285 | #define NOOFCHAN_MASK 0x00003000 | ||
286 | |||
274 | #define RAMCFG 0x2408 | 287 | #define RAMCFG 0x2408 |
275 | #define NOOFBANK_SHIFT 0 | 288 | #define NOOFBANK_SHIFT 0 |
276 | #define NOOFBANK_MASK 0x00000001 | 289 | #define NOOFBANK_MASK 0x00000001 |
@@ -352,6 +365,7 @@ | |||
352 | 365 | ||
353 | 366 | ||
354 | #define SX_MISC 0x28350 | 367 | #define SX_MISC 0x28350 |
368 | #define SX_MEMORY_EXPORT_BASE 0x9010 | ||
355 | #define SX_DEBUG_1 0x9054 | 369 | #define SX_DEBUG_1 0x9054 |
356 | #define SMX_EVENT_RELEASE (1 << 0) | 370 | #define SMX_EVENT_RELEASE (1 << 0) |
357 | #define ENABLE_NEW_SMX_ADDRESS (1 << 16) | 371 | #define ENABLE_NEW_SMX_ADDRESS (1 << 16) |
@@ -643,6 +657,7 @@ | |||
643 | #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) | 657 | #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) |
644 | #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) | 658 | #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) |
645 | #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) | 659 | #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) |
660 | #define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1) | ||
646 | #define R_000E60_SRBM_SOFT_RESET 0x0E60 | 661 | #define R_000E60_SRBM_SOFT_RESET 0x0E60 |
647 | #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) | 662 | #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) |
648 | #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) | 663 | #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 950b346e343f..757f5cd37744 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -139,6 +139,10 @@ struct radeon_clock { | |||
139 | uint32_t default_sclk; | 139 | uint32_t default_sclk; |
140 | }; | 140 | }; |
141 | 141 | ||
142 | /* | ||
143 | * Power management | ||
144 | */ | ||
145 | int radeon_pm_init(struct radeon_device *rdev); | ||
142 | 146 | ||
143 | /* | 147 | /* |
144 | * Fences. | 148 | * Fences. |
@@ -276,6 +280,8 @@ union radeon_gart_table { | |||
276 | struct radeon_gart_table_vram vram; | 280 | struct radeon_gart_table_vram vram; |
277 | }; | 281 | }; |
278 | 282 | ||
283 | #define RADEON_GPU_PAGE_SIZE 4096 | ||
284 | |||
279 | struct radeon_gart { | 285 | struct radeon_gart { |
280 | dma_addr_t table_addr; | 286 | dma_addr_t table_addr; |
281 | unsigned num_gpu_pages; | 287 | unsigned num_gpu_pages; |
@@ -590,18 +596,8 @@ struct radeon_asic { | |||
590 | void (*fini)(struct radeon_device *rdev); | 596 | void (*fini)(struct radeon_device *rdev); |
591 | int (*resume)(struct radeon_device *rdev); | 597 | int (*resume)(struct radeon_device *rdev); |
592 | int (*suspend)(struct radeon_device *rdev); | 598 | int (*suspend)(struct radeon_device *rdev); |
593 | void (*errata)(struct radeon_device *rdev); | ||
594 | void (*vram_info)(struct radeon_device *rdev); | ||
595 | void (*vga_set_state)(struct radeon_device *rdev, bool state); | 599 | void (*vga_set_state)(struct radeon_device *rdev, bool state); |
596 | int (*gpu_reset)(struct radeon_device *rdev); | 600 | int (*gpu_reset)(struct radeon_device *rdev); |
597 | int (*mc_init)(struct radeon_device *rdev); | ||
598 | void (*mc_fini)(struct radeon_device *rdev); | ||
599 | int (*wb_init)(struct radeon_device *rdev); | ||
600 | void (*wb_fini)(struct radeon_device *rdev); | ||
601 | int (*gart_init)(struct radeon_device *rdev); | ||
602 | void (*gart_fini)(struct radeon_device *rdev); | ||
603 | int (*gart_enable)(struct radeon_device *rdev); | ||
604 | void (*gart_disable)(struct radeon_device *rdev); | ||
605 | void (*gart_tlb_flush)(struct radeon_device *rdev); | 601 | void (*gart_tlb_flush)(struct radeon_device *rdev); |
606 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); | 602 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); |
607 | int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); | 603 | int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); |
@@ -611,7 +607,6 @@ struct radeon_asic { | |||
611 | void (*ring_start)(struct radeon_device *rdev); | 607 | void (*ring_start)(struct radeon_device *rdev); |
612 | int (*ring_test)(struct radeon_device *rdev); | 608 | int (*ring_test)(struct radeon_device *rdev); |
613 | void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); | 609 | void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); |
614 | int (*ib_test)(struct radeon_device *rdev); | ||
615 | int (*irq_set)(struct radeon_device *rdev); | 610 | int (*irq_set)(struct radeon_device *rdev); |
616 | int (*irq_process)(struct radeon_device *rdev); | 611 | int (*irq_process)(struct radeon_device *rdev); |
617 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); | 612 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); |
@@ -632,7 +627,9 @@ struct radeon_asic { | |||
632 | uint64_t dst_offset, | 627 | uint64_t dst_offset, |
633 | unsigned num_pages, | 628 | unsigned num_pages, |
634 | struct radeon_fence *fence); | 629 | struct radeon_fence *fence); |
630 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); | ||
635 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); | 631 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); |
632 | uint32_t (*get_memory_clock)(struct radeon_device *rdev); | ||
636 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); | 633 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); |
637 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); | 634 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); |
638 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); | 635 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); |
@@ -789,12 +786,12 @@ struct radeon_device { | |||
789 | bool shutdown; | 786 | bool shutdown; |
790 | bool suspend; | 787 | bool suspend; |
791 | bool need_dma32; | 788 | bool need_dma32; |
792 | bool new_init_path; | ||
793 | bool accel_working; | 789 | bool accel_working; |
794 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | 790 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; |
795 | const struct firmware *me_fw; /* all family ME firmware */ | 791 | const struct firmware *me_fw; /* all family ME firmware */ |
796 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 792 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
797 | struct r600_blit r600_blit; | 793 | struct r600_blit r600_blit; |
794 | int msi_enabled; /* msi enabled */ | ||
798 | }; | 795 | }; |
799 | 796 | ||
800 | int radeon_device_init(struct radeon_device *rdev, | 797 | int radeon_device_init(struct radeon_device *rdev, |
@@ -949,28 +946,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
949 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) | 946 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) |
950 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) | 947 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) |
951 | #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) | 948 | #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) |
952 | #define radeon_errata(rdev) (rdev)->asic->errata((rdev)) | ||
953 | #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) | ||
954 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) | 949 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
955 | #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) | 950 | #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) |
956 | #define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) | ||
957 | #define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) | ||
958 | #define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev)) | ||
959 | #define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev)) | ||
960 | #define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev)) | ||
961 | #define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev)) | ||
962 | #define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev)) | ||
963 | #define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev)) | ||
964 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) | 951 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) |
965 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) | 952 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) |
966 | #define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize)) | ||
967 | #define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev)) | ||
968 | #define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev)) | ||
969 | #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) | 953 | #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) |
970 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) | 954 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) |
971 | #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) | 955 | #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) |
972 | #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) | 956 | #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) |
973 | #define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev)) | ||
974 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) | 957 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) |
975 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) | 958 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) |
976 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) | 959 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) |
@@ -978,7 +961,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
978 | #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) | 961 | #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) |
979 | #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) | 962 | #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) |
980 | #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) | 963 | #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) |
964 | #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) | ||
981 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 965 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
966 | #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) | ||
982 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 967 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
983 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) | 968 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
984 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) | 969 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
@@ -996,6 +981,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev); | |||
996 | extern void radeon_scratch_init(struct radeon_device *rdev); | 981 | extern void radeon_scratch_init(struct radeon_device *rdev); |
997 | extern void radeon_surface_init(struct radeon_device *rdev); | 982 | extern void radeon_surface_init(struct radeon_device *rdev); |
998 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 983 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
984 | extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | ||
999 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | 985 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); |
1000 | 986 | ||
1001 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 987 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ |
@@ -1031,11 +1017,27 @@ extern int r100_wb_init(struct radeon_device *rdev); | |||
1031 | extern void r100_hdp_reset(struct radeon_device *rdev); | 1017 | extern void r100_hdp_reset(struct radeon_device *rdev); |
1032 | extern int r100_rb2d_reset(struct radeon_device *rdev); | 1018 | extern int r100_rb2d_reset(struct radeon_device *rdev); |
1033 | extern int r100_cp_reset(struct radeon_device *rdev); | 1019 | extern int r100_cp_reset(struct radeon_device *rdev); |
1020 | extern void r100_vga_render_disable(struct radeon_device *rdev); | ||
1021 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
1022 | struct radeon_cs_packet *pkt, | ||
1023 | struct radeon_object *robj); | ||
1024 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
1025 | struct radeon_cs_packet *pkt, | ||
1026 | const unsigned *auth, unsigned n, | ||
1027 | radeon_packet0_check_t check); | ||
1028 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
1029 | struct radeon_cs_packet *pkt, | ||
1030 | unsigned idx); | ||
1031 | |||
1032 | /* rv200,rv250,rv280 */ | ||
1033 | extern void r200_set_safe_registers(struct radeon_device *rdev); | ||
1034 | 1034 | ||
1035 | /* r300,r350,rv350,rv370,rv380 */ | 1035 | /* r300,r350,rv350,rv370,rv380 */ |
1036 | extern void r300_set_reg_safe(struct radeon_device *rdev); | 1036 | extern void r300_set_reg_safe(struct radeon_device *rdev); |
1037 | extern void r300_mc_program(struct radeon_device *rdev); | 1037 | extern void r300_mc_program(struct radeon_device *rdev); |
1038 | extern void r300_vram_info(struct radeon_device *rdev); | 1038 | extern void r300_vram_info(struct radeon_device *rdev); |
1039 | extern void r300_clock_startup(struct radeon_device *rdev); | ||
1040 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
1039 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); | 1041 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); |
1040 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | 1042 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); |
1041 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | 1043 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); |
@@ -1066,6 +1068,18 @@ extern void rv515_clock_startup(struct radeon_device *rdev); | |||
1066 | extern void rv515_debugfs(struct radeon_device *rdev); | 1068 | extern void rv515_debugfs(struct radeon_device *rdev); |
1067 | extern int rv515_suspend(struct radeon_device *rdev); | 1069 | extern int rv515_suspend(struct radeon_device *rdev); |
1068 | 1070 | ||
1071 | /* rs400 */ | ||
1072 | extern int rs400_gart_init(struct radeon_device *rdev); | ||
1073 | extern int rs400_gart_enable(struct radeon_device *rdev); | ||
1074 | extern void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
1075 | extern void rs400_gart_disable(struct radeon_device *rdev); | ||
1076 | extern void rs400_gart_fini(struct radeon_device *rdev); | ||
1077 | |||
1078 | /* rs600 */ | ||
1079 | extern void rs600_set_safe_registers(struct radeon_device *rdev); | ||
1080 | extern int rs600_irq_set(struct radeon_device *rdev); | ||
1081 | extern void rs600_irq_disable(struct radeon_device *rdev); | ||
1082 | |||
1069 | /* rs690, rs740 */ | 1083 | /* rs690, rs740 */ |
1070 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, | 1084 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, |
1071 | struct drm_display_mode *mode1, | 1085 | struct drm_display_mode *mode1, |
@@ -1083,8 +1097,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); | |||
1083 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); | 1097 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); |
1084 | extern int r600_ib_test(struct radeon_device *rdev); | 1098 | extern int r600_ib_test(struct radeon_device *rdev); |
1085 | extern int r600_ring_test(struct radeon_device *rdev); | 1099 | extern int r600_ring_test(struct radeon_device *rdev); |
1086 | extern int r600_wb_init(struct radeon_device *rdev); | ||
1087 | extern void r600_wb_fini(struct radeon_device *rdev); | 1100 | extern void r600_wb_fini(struct radeon_device *rdev); |
1101 | extern int r600_wb_enable(struct radeon_device *rdev); | ||
1102 | extern void r600_wb_disable(struct radeon_device *rdev); | ||
1088 | extern void r600_scratch_init(struct radeon_device *rdev); | 1103 | extern void r600_scratch_init(struct radeon_device *rdev); |
1089 | extern int r600_blit_init(struct radeon_device *rdev); | 1104 | extern int r600_blit_init(struct radeon_device *rdev); |
1090 | extern void r600_blit_fini(struct radeon_device *rdev); | 1105 | extern void r600_blit_fini(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c8a4e7b5663d..c18fbee387d7 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -31,38 +31,30 @@ | |||
31 | /* | 31 | /* |
32 | * common functions | 32 | * common functions |
33 | */ | 33 | */ |
34 | uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev); | ||
34 | void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); | 35 | void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); |
35 | void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | 36 | void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); |
36 | 37 | ||
38 | uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev); | ||
37 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); | 39 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); |
40 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev); | ||
38 | void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); | 41 | void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); |
39 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | 42 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); |
40 | 43 | ||
41 | /* | 44 | /* |
42 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 45 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
43 | */ | 46 | */ |
44 | int r100_init(struct radeon_device *rdev); | 47 | extern int r100_init(struct radeon_device *rdev); |
45 | int r200_init(struct radeon_device *rdev); | 48 | extern void r100_fini(struct radeon_device *rdev); |
49 | extern int r100_suspend(struct radeon_device *rdev); | ||
50 | extern int r100_resume(struct radeon_device *rdev); | ||
46 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); | 51 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
47 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 52 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
48 | void r100_errata(struct radeon_device *rdev); | ||
49 | void r100_vram_info(struct radeon_device *rdev); | ||
50 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 53 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
51 | int r100_gpu_reset(struct radeon_device *rdev); | 54 | int r100_gpu_reset(struct radeon_device *rdev); |
52 | int r100_mc_init(struct radeon_device *rdev); | ||
53 | void r100_mc_fini(struct radeon_device *rdev); | ||
54 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 55 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
55 | int r100_wb_init(struct radeon_device *rdev); | ||
56 | void r100_wb_fini(struct radeon_device *rdev); | ||
57 | int r100_pci_gart_init(struct radeon_device *rdev); | ||
58 | void r100_pci_gart_fini(struct radeon_device *rdev); | ||
59 | int r100_pci_gart_enable(struct radeon_device *rdev); | ||
60 | void r100_pci_gart_disable(struct radeon_device *rdev); | ||
61 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 56 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
62 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 57 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
63 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
64 | void r100_cp_fini(struct radeon_device *rdev); | ||
65 | void r100_cp_disable(struct radeon_device *rdev); | ||
66 | void r100_cp_commit(struct radeon_device *rdev); | 58 | void r100_cp_commit(struct radeon_device *rdev); |
67 | void r100_ring_start(struct radeon_device *rdev); | 59 | void r100_ring_start(struct radeon_device *rdev); |
68 | int r100_irq_set(struct radeon_device *rdev); | 60 | int r100_irq_set(struct radeon_device *rdev); |
@@ -83,33 +75,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
83 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | 75 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); |
84 | void r100_bandwidth_update(struct radeon_device *rdev); | 76 | void r100_bandwidth_update(struct radeon_device *rdev); |
85 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 77 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
86 | int r100_ib_test(struct radeon_device *rdev); | ||
87 | int r100_ring_test(struct radeon_device *rdev); | 78 | int r100_ring_test(struct radeon_device *rdev); |
88 | 79 | ||
89 | static struct radeon_asic r100_asic = { | 80 | static struct radeon_asic r100_asic = { |
90 | .init = &r100_init, | 81 | .init = &r100_init, |
91 | .errata = &r100_errata, | 82 | .fini = &r100_fini, |
92 | .vram_info = &r100_vram_info, | 83 | .suspend = &r100_suspend, |
84 | .resume = &r100_resume, | ||
93 | .vga_set_state = &r100_vga_set_state, | 85 | .vga_set_state = &r100_vga_set_state, |
94 | .gpu_reset = &r100_gpu_reset, | 86 | .gpu_reset = &r100_gpu_reset, |
95 | .mc_init = &r100_mc_init, | ||
96 | .mc_fini = &r100_mc_fini, | ||
97 | .wb_init = &r100_wb_init, | ||
98 | .wb_fini = &r100_wb_fini, | ||
99 | .gart_init = &r100_pci_gart_init, | ||
100 | .gart_fini = &r100_pci_gart_fini, | ||
101 | .gart_enable = &r100_pci_gart_enable, | ||
102 | .gart_disable = &r100_pci_gart_disable, | ||
103 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 87 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
104 | .gart_set_page = &r100_pci_gart_set_page, | 88 | .gart_set_page = &r100_pci_gart_set_page, |
105 | .cp_init = &r100_cp_init, | ||
106 | .cp_fini = &r100_cp_fini, | ||
107 | .cp_disable = &r100_cp_disable, | ||
108 | .cp_commit = &r100_cp_commit, | 89 | .cp_commit = &r100_cp_commit, |
109 | .ring_start = &r100_ring_start, | 90 | .ring_start = &r100_ring_start, |
110 | .ring_test = &r100_ring_test, | 91 | .ring_test = &r100_ring_test, |
111 | .ring_ib_execute = &r100_ring_ib_execute, | 92 | .ring_ib_execute = &r100_ring_ib_execute, |
112 | .ib_test = &r100_ib_test, | ||
113 | .irq_set = &r100_irq_set, | 93 | .irq_set = &r100_irq_set, |
114 | .irq_process = &r100_irq_process, | 94 | .irq_process = &r100_irq_process, |
115 | .get_vblank_counter = &r100_get_vblank_counter, | 95 | .get_vblank_counter = &r100_get_vblank_counter, |
@@ -118,7 +98,9 @@ static struct radeon_asic r100_asic = { | |||
118 | .copy_blit = &r100_copy_blit, | 98 | .copy_blit = &r100_copy_blit, |
119 | .copy_dma = NULL, | 99 | .copy_dma = NULL, |
120 | .copy = &r100_copy_blit, | 100 | .copy = &r100_copy_blit, |
101 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
121 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 102 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
103 | .get_memory_clock = NULL, | ||
122 | .set_memory_clock = NULL, | 104 | .set_memory_clock = NULL, |
123 | .set_pcie_lanes = NULL, | 105 | .set_pcie_lanes = NULL, |
124 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 106 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
@@ -131,55 +113,38 @@ static struct radeon_asic r100_asic = { | |||
131 | /* | 113 | /* |
132 | * r300,r350,rv350,rv380 | 114 | * r300,r350,rv350,rv380 |
133 | */ | 115 | */ |
134 | int r300_init(struct radeon_device *rdev); | 116 | extern int r300_init(struct radeon_device *rdev); |
135 | void r300_errata(struct radeon_device *rdev); | 117 | extern void r300_fini(struct radeon_device *rdev); |
136 | void r300_vram_info(struct radeon_device *rdev); | 118 | extern int r300_suspend(struct radeon_device *rdev); |
137 | int r300_gpu_reset(struct radeon_device *rdev); | 119 | extern int r300_resume(struct radeon_device *rdev); |
138 | int r300_mc_init(struct radeon_device *rdev); | 120 | extern int r300_gpu_reset(struct radeon_device *rdev); |
139 | void r300_mc_fini(struct radeon_device *rdev); | 121 | extern void r300_ring_start(struct radeon_device *rdev); |
140 | void r300_ring_start(struct radeon_device *rdev); | 122 | extern void r300_fence_ring_emit(struct radeon_device *rdev, |
141 | void r300_fence_ring_emit(struct radeon_device *rdev, | 123 | struct radeon_fence *fence); |
142 | struct radeon_fence *fence); | 124 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
143 | int r300_cs_parse(struct radeon_cs_parser *p); | 125 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
144 | int rv370_pcie_gart_init(struct radeon_device *rdev); | 126 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
145 | void rv370_pcie_gart_fini(struct radeon_device *rdev); | 127 | extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
146 | int rv370_pcie_gart_enable(struct radeon_device *rdev); | 128 | extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
147 | void rv370_pcie_gart_disable(struct radeon_device *rdev); | 129 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
148 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 130 | extern int r300_copy_dma(struct radeon_device *rdev, |
149 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 131 | uint64_t src_offset, |
150 | uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | 132 | uint64_t dst_offset, |
151 | void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 133 | unsigned num_pages, |
152 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 134 | struct radeon_fence *fence); |
153 | int r300_copy_dma(struct radeon_device *rdev, | ||
154 | uint64_t src_offset, | ||
155 | uint64_t dst_offset, | ||
156 | unsigned num_pages, | ||
157 | struct radeon_fence *fence); | ||
158 | |||
159 | static struct radeon_asic r300_asic = { | 135 | static struct radeon_asic r300_asic = { |
160 | .init = &r300_init, | 136 | .init = &r300_init, |
161 | .errata = &r300_errata, | 137 | .fini = &r300_fini, |
162 | .vram_info = &r300_vram_info, | 138 | .suspend = &r300_suspend, |
139 | .resume = &r300_resume, | ||
163 | .vga_set_state = &r100_vga_set_state, | 140 | .vga_set_state = &r100_vga_set_state, |
164 | .gpu_reset = &r300_gpu_reset, | 141 | .gpu_reset = &r300_gpu_reset, |
165 | .mc_init = &r300_mc_init, | ||
166 | .mc_fini = &r300_mc_fini, | ||
167 | .wb_init = &r100_wb_init, | ||
168 | .wb_fini = &r100_wb_fini, | ||
169 | .gart_init = &r100_pci_gart_init, | ||
170 | .gart_fini = &r100_pci_gart_fini, | ||
171 | .gart_enable = &r100_pci_gart_enable, | ||
172 | .gart_disable = &r100_pci_gart_disable, | ||
173 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 142 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
174 | .gart_set_page = &r100_pci_gart_set_page, | 143 | .gart_set_page = &r100_pci_gart_set_page, |
175 | .cp_init = &r100_cp_init, | ||
176 | .cp_fini = &r100_cp_fini, | ||
177 | .cp_disable = &r100_cp_disable, | ||
178 | .cp_commit = &r100_cp_commit, | 144 | .cp_commit = &r100_cp_commit, |
179 | .ring_start = &r300_ring_start, | 145 | .ring_start = &r300_ring_start, |
180 | .ring_test = &r100_ring_test, | 146 | .ring_test = &r100_ring_test, |
181 | .ring_ib_execute = &r100_ring_ib_execute, | 147 | .ring_ib_execute = &r100_ring_ib_execute, |
182 | .ib_test = &r100_ib_test, | ||
183 | .irq_set = &r100_irq_set, | 148 | .irq_set = &r100_irq_set, |
184 | .irq_process = &r100_irq_process, | 149 | .irq_process = &r100_irq_process, |
185 | .get_vblank_counter = &r100_get_vblank_counter, | 150 | .get_vblank_counter = &r100_get_vblank_counter, |
@@ -188,7 +153,9 @@ static struct radeon_asic r300_asic = { | |||
188 | .copy_blit = &r100_copy_blit, | 153 | .copy_blit = &r100_copy_blit, |
189 | .copy_dma = &r300_copy_dma, | 154 | .copy_dma = &r300_copy_dma, |
190 | .copy = &r100_copy_blit, | 155 | .copy = &r100_copy_blit, |
156 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
191 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 157 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
158 | .get_memory_clock = NULL, | ||
192 | .set_memory_clock = NULL, | 159 | .set_memory_clock = NULL, |
193 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 160 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
194 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 161 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
@@ -209,26 +176,14 @@ static struct radeon_asic r420_asic = { | |||
209 | .fini = &r420_fini, | 176 | .fini = &r420_fini, |
210 | .suspend = &r420_suspend, | 177 | .suspend = &r420_suspend, |
211 | .resume = &r420_resume, | 178 | .resume = &r420_resume, |
212 | .errata = NULL, | ||
213 | .vram_info = NULL, | ||
214 | .vga_set_state = &r100_vga_set_state, | 179 | .vga_set_state = &r100_vga_set_state, |
215 | .gpu_reset = &r300_gpu_reset, | 180 | .gpu_reset = &r300_gpu_reset, |
216 | .mc_init = NULL, | ||
217 | .mc_fini = NULL, | ||
218 | .wb_init = NULL, | ||
219 | .wb_fini = NULL, | ||
220 | .gart_enable = NULL, | ||
221 | .gart_disable = NULL, | ||
222 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 181 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
223 | .gart_set_page = &rv370_pcie_gart_set_page, | 182 | .gart_set_page = &rv370_pcie_gart_set_page, |
224 | .cp_init = NULL, | ||
225 | .cp_fini = NULL, | ||
226 | .cp_disable = NULL, | ||
227 | .cp_commit = &r100_cp_commit, | 183 | .cp_commit = &r100_cp_commit, |
228 | .ring_start = &r300_ring_start, | 184 | .ring_start = &r300_ring_start, |
229 | .ring_test = &r100_ring_test, | 185 | .ring_test = &r100_ring_test, |
230 | .ring_ib_execute = &r100_ring_ib_execute, | 186 | .ring_ib_execute = &r100_ring_ib_execute, |
231 | .ib_test = NULL, | ||
232 | .irq_set = &r100_irq_set, | 187 | .irq_set = &r100_irq_set, |
233 | .irq_process = &r100_irq_process, | 188 | .irq_process = &r100_irq_process, |
234 | .get_vblank_counter = &r100_get_vblank_counter, | 189 | .get_vblank_counter = &r100_get_vblank_counter, |
@@ -237,7 +192,9 @@ static struct radeon_asic r420_asic = { | |||
237 | .copy_blit = &r100_copy_blit, | 192 | .copy_blit = &r100_copy_blit, |
238 | .copy_dma = &r300_copy_dma, | 193 | .copy_dma = &r300_copy_dma, |
239 | .copy = &r100_copy_blit, | 194 | .copy = &r100_copy_blit, |
195 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
240 | .set_engine_clock = &radeon_atom_set_engine_clock, | 196 | .set_engine_clock = &radeon_atom_set_engine_clock, |
197 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
241 | .set_memory_clock = &radeon_atom_set_memory_clock, | 198 | .set_memory_clock = &radeon_atom_set_memory_clock, |
242 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 199 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
243 | .set_clock_gating = &radeon_atom_set_clock_gating, | 200 | .set_clock_gating = &radeon_atom_set_clock_gating, |
@@ -250,42 +207,27 @@ static struct radeon_asic r420_asic = { | |||
250 | /* | 207 | /* |
251 | * rs400,rs480 | 208 | * rs400,rs480 |
252 | */ | 209 | */ |
253 | void rs400_errata(struct radeon_device *rdev); | 210 | extern int rs400_init(struct radeon_device *rdev); |
254 | void rs400_vram_info(struct radeon_device *rdev); | 211 | extern void rs400_fini(struct radeon_device *rdev); |
255 | int rs400_mc_init(struct radeon_device *rdev); | 212 | extern int rs400_suspend(struct radeon_device *rdev); |
256 | void rs400_mc_fini(struct radeon_device *rdev); | 213 | extern int rs400_resume(struct radeon_device *rdev); |
257 | int rs400_gart_init(struct radeon_device *rdev); | ||
258 | void rs400_gart_fini(struct radeon_device *rdev); | ||
259 | int rs400_gart_enable(struct radeon_device *rdev); | ||
260 | void rs400_gart_disable(struct radeon_device *rdev); | ||
261 | void rs400_gart_tlb_flush(struct radeon_device *rdev); | 214 | void rs400_gart_tlb_flush(struct radeon_device *rdev); |
262 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 215 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
263 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 216 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
264 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 217 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
265 | static struct radeon_asic rs400_asic = { | 218 | static struct radeon_asic rs400_asic = { |
266 | .init = &r300_init, | 219 | .init = &rs400_init, |
267 | .errata = &rs400_errata, | 220 | .fini = &rs400_fini, |
268 | .vram_info = &rs400_vram_info, | 221 | .suspend = &rs400_suspend, |
222 | .resume = &rs400_resume, | ||
269 | .vga_set_state = &r100_vga_set_state, | 223 | .vga_set_state = &r100_vga_set_state, |
270 | .gpu_reset = &r300_gpu_reset, | 224 | .gpu_reset = &r300_gpu_reset, |
271 | .mc_init = &rs400_mc_init, | ||
272 | .mc_fini = &rs400_mc_fini, | ||
273 | .wb_init = &r100_wb_init, | ||
274 | .wb_fini = &r100_wb_fini, | ||
275 | .gart_init = &rs400_gart_init, | ||
276 | .gart_fini = &rs400_gart_fini, | ||
277 | .gart_enable = &rs400_gart_enable, | ||
278 | .gart_disable = &rs400_gart_disable, | ||
279 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 225 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
280 | .gart_set_page = &rs400_gart_set_page, | 226 | .gart_set_page = &rs400_gart_set_page, |
281 | .cp_init = &r100_cp_init, | ||
282 | .cp_fini = &r100_cp_fini, | ||
283 | .cp_disable = &r100_cp_disable, | ||
284 | .cp_commit = &r100_cp_commit, | 227 | .cp_commit = &r100_cp_commit, |
285 | .ring_start = &r300_ring_start, | 228 | .ring_start = &r300_ring_start, |
286 | .ring_test = &r100_ring_test, | 229 | .ring_test = &r100_ring_test, |
287 | .ring_ib_execute = &r100_ring_ib_execute, | 230 | .ring_ib_execute = &r100_ring_ib_execute, |
288 | .ib_test = &r100_ib_test, | ||
289 | .irq_set = &r100_irq_set, | 231 | .irq_set = &r100_irq_set, |
290 | .irq_process = &r100_irq_process, | 232 | .irq_process = &r100_irq_process, |
291 | .get_vblank_counter = &r100_get_vblank_counter, | 233 | .get_vblank_counter = &r100_get_vblank_counter, |
@@ -294,7 +236,9 @@ static struct radeon_asic rs400_asic = { | |||
294 | .copy_blit = &r100_copy_blit, | 236 | .copy_blit = &r100_copy_blit, |
295 | .copy_dma = &r300_copy_dma, | 237 | .copy_dma = &r300_copy_dma, |
296 | .copy = &r100_copy_blit, | 238 | .copy = &r100_copy_blit, |
239 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
297 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 240 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
241 | .get_memory_clock = NULL, | ||
298 | .set_memory_clock = NULL, | 242 | .set_memory_clock = NULL, |
299 | .set_pcie_lanes = NULL, | 243 | .set_pcie_lanes = NULL, |
300 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 244 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
@@ -307,18 +251,13 @@ static struct radeon_asic rs400_asic = { | |||
307 | /* | 251 | /* |
308 | * rs600. | 252 | * rs600. |
309 | */ | 253 | */ |
310 | int rs600_init(struct radeon_device *rdev); | 254 | extern int rs600_init(struct radeon_device *rdev); |
311 | void rs600_errata(struct radeon_device *rdev); | 255 | extern void rs600_fini(struct radeon_device *rdev); |
312 | void rs600_vram_info(struct radeon_device *rdev); | 256 | extern int rs600_suspend(struct radeon_device *rdev); |
313 | int rs600_mc_init(struct radeon_device *rdev); | 257 | extern int rs600_resume(struct radeon_device *rdev); |
314 | void rs600_mc_fini(struct radeon_device *rdev); | ||
315 | int rs600_irq_set(struct radeon_device *rdev); | 258 | int rs600_irq_set(struct radeon_device *rdev); |
316 | int rs600_irq_process(struct radeon_device *rdev); | 259 | int rs600_irq_process(struct radeon_device *rdev); |
317 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 260 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
318 | int rs600_gart_init(struct radeon_device *rdev); | ||
319 | void rs600_gart_fini(struct radeon_device *rdev); | ||
320 | int rs600_gart_enable(struct radeon_device *rdev); | ||
321 | void rs600_gart_disable(struct radeon_device *rdev); | ||
322 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 261 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
323 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 262 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
324 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 263 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
@@ -326,28 +265,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
326 | void rs600_bandwidth_update(struct radeon_device *rdev); | 265 | void rs600_bandwidth_update(struct radeon_device *rdev); |
327 | static struct radeon_asic rs600_asic = { | 266 | static struct radeon_asic rs600_asic = { |
328 | .init = &rs600_init, | 267 | .init = &rs600_init, |
329 | .errata = &rs600_errata, | 268 | .fini = &rs600_fini, |
330 | .vram_info = &rs600_vram_info, | 269 | .suspend = &rs600_suspend, |
270 | .resume = &rs600_resume, | ||
331 | .vga_set_state = &r100_vga_set_state, | 271 | .vga_set_state = &r100_vga_set_state, |
332 | .gpu_reset = &r300_gpu_reset, | 272 | .gpu_reset = &r300_gpu_reset, |
333 | .mc_init = &rs600_mc_init, | ||
334 | .mc_fini = &rs600_mc_fini, | ||
335 | .wb_init = &r100_wb_init, | ||
336 | .wb_fini = &r100_wb_fini, | ||
337 | .gart_init = &rs600_gart_init, | ||
338 | .gart_fini = &rs600_gart_fini, | ||
339 | .gart_enable = &rs600_gart_enable, | ||
340 | .gart_disable = &rs600_gart_disable, | ||
341 | .gart_tlb_flush = &rs600_gart_tlb_flush, | 273 | .gart_tlb_flush = &rs600_gart_tlb_flush, |
342 | .gart_set_page = &rs600_gart_set_page, | 274 | .gart_set_page = &rs600_gart_set_page, |
343 | .cp_init = &r100_cp_init, | ||
344 | .cp_fini = &r100_cp_fini, | ||
345 | .cp_disable = &r100_cp_disable, | ||
346 | .cp_commit = &r100_cp_commit, | 275 | .cp_commit = &r100_cp_commit, |
347 | .ring_start = &r300_ring_start, | 276 | .ring_start = &r300_ring_start, |
348 | .ring_test = &r100_ring_test, | 277 | .ring_test = &r100_ring_test, |
349 | .ring_ib_execute = &r100_ring_ib_execute, | 278 | .ring_ib_execute = &r100_ring_ib_execute, |
350 | .ib_test = &r100_ib_test, | ||
351 | .irq_set = &rs600_irq_set, | 279 | .irq_set = &rs600_irq_set, |
352 | .irq_process = &rs600_irq_process, | 280 | .irq_process = &rs600_irq_process, |
353 | .get_vblank_counter = &rs600_get_vblank_counter, | 281 | .get_vblank_counter = &rs600_get_vblank_counter, |
@@ -356,7 +284,9 @@ static struct radeon_asic rs600_asic = { | |||
356 | .copy_blit = &r100_copy_blit, | 284 | .copy_blit = &r100_copy_blit, |
357 | .copy_dma = &r300_copy_dma, | 285 | .copy_dma = &r300_copy_dma, |
358 | .copy = &r100_copy_blit, | 286 | .copy = &r100_copy_blit, |
287 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
359 | .set_engine_clock = &radeon_atom_set_engine_clock, | 288 | .set_engine_clock = &radeon_atom_set_engine_clock, |
289 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
360 | .set_memory_clock = &radeon_atom_set_memory_clock, | 290 | .set_memory_clock = &radeon_atom_set_memory_clock, |
361 | .set_pcie_lanes = NULL, | 291 | .set_pcie_lanes = NULL, |
362 | .set_clock_gating = &radeon_atom_set_clock_gating, | 292 | .set_clock_gating = &radeon_atom_set_clock_gating, |
@@ -367,37 +297,26 @@ static struct radeon_asic rs600_asic = { | |||
367 | /* | 297 | /* |
368 | * rs690,rs740 | 298 | * rs690,rs740 |
369 | */ | 299 | */ |
370 | void rs690_errata(struct radeon_device *rdev); | 300 | int rs690_init(struct radeon_device *rdev); |
371 | void rs690_vram_info(struct radeon_device *rdev); | 301 | void rs690_fini(struct radeon_device *rdev); |
372 | int rs690_mc_init(struct radeon_device *rdev); | 302 | int rs690_resume(struct radeon_device *rdev); |
373 | void rs690_mc_fini(struct radeon_device *rdev); | 303 | int rs690_suspend(struct radeon_device *rdev); |
374 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 304 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
375 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 305 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
376 | void rs690_bandwidth_update(struct radeon_device *rdev); | 306 | void rs690_bandwidth_update(struct radeon_device *rdev); |
377 | static struct radeon_asic rs690_asic = { | 307 | static struct radeon_asic rs690_asic = { |
378 | .init = &rs600_init, | 308 | .init = &rs690_init, |
379 | .errata = &rs690_errata, | 309 | .fini = &rs690_fini, |
380 | .vram_info = &rs690_vram_info, | 310 | .suspend = &rs690_suspend, |
311 | .resume = &rs690_resume, | ||
381 | .vga_set_state = &r100_vga_set_state, | 312 | .vga_set_state = &r100_vga_set_state, |
382 | .gpu_reset = &r300_gpu_reset, | 313 | .gpu_reset = &r300_gpu_reset, |
383 | .mc_init = &rs690_mc_init, | ||
384 | .mc_fini = &rs690_mc_fini, | ||
385 | .wb_init = &r100_wb_init, | ||
386 | .wb_fini = &r100_wb_fini, | ||
387 | .gart_init = &rs400_gart_init, | ||
388 | .gart_fini = &rs400_gart_fini, | ||
389 | .gart_enable = &rs400_gart_enable, | ||
390 | .gart_disable = &rs400_gart_disable, | ||
391 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 314 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
392 | .gart_set_page = &rs400_gart_set_page, | 315 | .gart_set_page = &rs400_gart_set_page, |
393 | .cp_init = &r100_cp_init, | ||
394 | .cp_fini = &r100_cp_fini, | ||
395 | .cp_disable = &r100_cp_disable, | ||
396 | .cp_commit = &r100_cp_commit, | 316 | .cp_commit = &r100_cp_commit, |
397 | .ring_start = &r300_ring_start, | 317 | .ring_start = &r300_ring_start, |
398 | .ring_test = &r100_ring_test, | 318 | .ring_test = &r100_ring_test, |
399 | .ring_ib_execute = &r100_ring_ib_execute, | 319 | .ring_ib_execute = &r100_ring_ib_execute, |
400 | .ib_test = &r100_ib_test, | ||
401 | .irq_set = &rs600_irq_set, | 320 | .irq_set = &rs600_irq_set, |
402 | .irq_process = &rs600_irq_process, | 321 | .irq_process = &rs600_irq_process, |
403 | .get_vblank_counter = &rs600_get_vblank_counter, | 322 | .get_vblank_counter = &rs600_get_vblank_counter, |
@@ -406,7 +325,9 @@ static struct radeon_asic rs690_asic = { | |||
406 | .copy_blit = &r100_copy_blit, | 325 | .copy_blit = &r100_copy_blit, |
407 | .copy_dma = &r300_copy_dma, | 326 | .copy_dma = &r300_copy_dma, |
408 | .copy = &r300_copy_dma, | 327 | .copy = &r300_copy_dma, |
328 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
409 | .set_engine_clock = &radeon_atom_set_engine_clock, | 329 | .set_engine_clock = &radeon_atom_set_engine_clock, |
330 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
410 | .set_memory_clock = &radeon_atom_set_memory_clock, | 331 | .set_memory_clock = &radeon_atom_set_memory_clock, |
411 | .set_pcie_lanes = NULL, | 332 | .set_pcie_lanes = NULL, |
412 | .set_clock_gating = &radeon_atom_set_clock_gating, | 333 | .set_clock_gating = &radeon_atom_set_clock_gating, |
@@ -435,28 +356,14 @@ static struct radeon_asic rv515_asic = { | |||
435 | .fini = &rv515_fini, | 356 | .fini = &rv515_fini, |
436 | .suspend = &rv515_suspend, | 357 | .suspend = &rv515_suspend, |
437 | .resume = &rv515_resume, | 358 | .resume = &rv515_resume, |
438 | .errata = NULL, | ||
439 | .vram_info = NULL, | ||
440 | .vga_set_state = &r100_vga_set_state, | 359 | .vga_set_state = &r100_vga_set_state, |
441 | .gpu_reset = &rv515_gpu_reset, | 360 | .gpu_reset = &rv515_gpu_reset, |
442 | .mc_init = NULL, | ||
443 | .mc_fini = NULL, | ||
444 | .wb_init = NULL, | ||
445 | .wb_fini = NULL, | ||
446 | .gart_init = &rv370_pcie_gart_init, | ||
447 | .gart_fini = &rv370_pcie_gart_fini, | ||
448 | .gart_enable = NULL, | ||
449 | .gart_disable = NULL, | ||
450 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 361 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
451 | .gart_set_page = &rv370_pcie_gart_set_page, | 362 | .gart_set_page = &rv370_pcie_gart_set_page, |
452 | .cp_init = NULL, | ||
453 | .cp_fini = NULL, | ||
454 | .cp_disable = NULL, | ||
455 | .cp_commit = &r100_cp_commit, | 363 | .cp_commit = &r100_cp_commit, |
456 | .ring_start = &rv515_ring_start, | 364 | .ring_start = &rv515_ring_start, |
457 | .ring_test = &r100_ring_test, | 365 | .ring_test = &r100_ring_test, |
458 | .ring_ib_execute = &r100_ring_ib_execute, | 366 | .ring_ib_execute = &r100_ring_ib_execute, |
459 | .ib_test = NULL, | ||
460 | .irq_set = &rs600_irq_set, | 367 | .irq_set = &rs600_irq_set, |
461 | .irq_process = &rs600_irq_process, | 368 | .irq_process = &rs600_irq_process, |
462 | .get_vblank_counter = &rs600_get_vblank_counter, | 369 | .get_vblank_counter = &rs600_get_vblank_counter, |
@@ -465,7 +372,9 @@ static struct radeon_asic rv515_asic = { | |||
465 | .copy_blit = &r100_copy_blit, | 372 | .copy_blit = &r100_copy_blit, |
466 | .copy_dma = &r300_copy_dma, | 373 | .copy_dma = &r300_copy_dma, |
467 | .copy = &r100_copy_blit, | 374 | .copy = &r100_copy_blit, |
375 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
468 | .set_engine_clock = &radeon_atom_set_engine_clock, | 376 | .set_engine_clock = &radeon_atom_set_engine_clock, |
377 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
469 | .set_memory_clock = &radeon_atom_set_memory_clock, | 378 | .set_memory_clock = &radeon_atom_set_memory_clock, |
470 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 379 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
471 | .set_clock_gating = &radeon_atom_set_clock_gating, | 380 | .set_clock_gating = &radeon_atom_set_clock_gating, |
@@ -485,28 +394,14 @@ static struct radeon_asic r520_asic = { | |||
485 | .fini = &rv515_fini, | 394 | .fini = &rv515_fini, |
486 | .suspend = &rv515_suspend, | 395 | .suspend = &rv515_suspend, |
487 | .resume = &r520_resume, | 396 | .resume = &r520_resume, |
488 | .errata = NULL, | ||
489 | .vram_info = NULL, | ||
490 | .vga_set_state = &r100_vga_set_state, | 397 | .vga_set_state = &r100_vga_set_state, |
491 | .gpu_reset = &rv515_gpu_reset, | 398 | .gpu_reset = &rv515_gpu_reset, |
492 | .mc_init = NULL, | ||
493 | .mc_fini = NULL, | ||
494 | .wb_init = NULL, | ||
495 | .wb_fini = NULL, | ||
496 | .gart_init = NULL, | ||
497 | .gart_fini = NULL, | ||
498 | .gart_enable = NULL, | ||
499 | .gart_disable = NULL, | ||
500 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 399 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
501 | .gart_set_page = &rv370_pcie_gart_set_page, | 400 | .gart_set_page = &rv370_pcie_gart_set_page, |
502 | .cp_init = NULL, | ||
503 | .cp_fini = NULL, | ||
504 | .cp_disable = NULL, | ||
505 | .cp_commit = &r100_cp_commit, | 401 | .cp_commit = &r100_cp_commit, |
506 | .ring_start = &rv515_ring_start, | 402 | .ring_start = &rv515_ring_start, |
507 | .ring_test = &r100_ring_test, | 403 | .ring_test = &r100_ring_test, |
508 | .ring_ib_execute = &r100_ring_ib_execute, | 404 | .ring_ib_execute = &r100_ring_ib_execute, |
509 | .ib_test = NULL, | ||
510 | .irq_set = &rs600_irq_set, | 405 | .irq_set = &rs600_irq_set, |
511 | .irq_process = &rs600_irq_process, | 406 | .irq_process = &rs600_irq_process, |
512 | .get_vblank_counter = &rs600_get_vblank_counter, | 407 | .get_vblank_counter = &rs600_get_vblank_counter, |
@@ -515,7 +410,9 @@ static struct radeon_asic r520_asic = { | |||
515 | .copy_blit = &r100_copy_blit, | 410 | .copy_blit = &r100_copy_blit, |
516 | .copy_dma = &r300_copy_dma, | 411 | .copy_dma = &r300_copy_dma, |
517 | .copy = &r100_copy_blit, | 412 | .copy = &r100_copy_blit, |
413 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
518 | .set_engine_clock = &radeon_atom_set_engine_clock, | 414 | .set_engine_clock = &radeon_atom_set_engine_clock, |
415 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
519 | .set_memory_clock = &radeon_atom_set_memory_clock, | 416 | .set_memory_clock = &radeon_atom_set_memory_clock, |
520 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 417 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
521 | .set_clock_gating = &radeon_atom_set_clock_gating, | 418 | .set_clock_gating = &radeon_atom_set_clock_gating, |
@@ -554,37 +451,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, | |||
554 | uint32_t offset, uint32_t obj_size); | 451 | uint32_t offset, uint32_t obj_size); |
555 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 452 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
556 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 453 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
557 | int r600_ib_test(struct radeon_device *rdev); | ||
558 | int r600_ring_test(struct radeon_device *rdev); | 454 | int r600_ring_test(struct radeon_device *rdev); |
559 | int r600_copy_blit(struct radeon_device *rdev, | 455 | int r600_copy_blit(struct radeon_device *rdev, |
560 | uint64_t src_offset, uint64_t dst_offset, | 456 | uint64_t src_offset, uint64_t dst_offset, |
561 | unsigned num_pages, struct radeon_fence *fence); | 457 | unsigned num_pages, struct radeon_fence *fence); |
562 | 458 | ||
563 | static struct radeon_asic r600_asic = { | 459 | static struct radeon_asic r600_asic = { |
564 | .errata = NULL, | ||
565 | .init = &r600_init, | 460 | .init = &r600_init, |
566 | .fini = &r600_fini, | 461 | .fini = &r600_fini, |
567 | .suspend = &r600_suspend, | 462 | .suspend = &r600_suspend, |
568 | .resume = &r600_resume, | 463 | .resume = &r600_resume, |
569 | .cp_commit = &r600_cp_commit, | 464 | .cp_commit = &r600_cp_commit, |
570 | .vram_info = NULL, | ||
571 | .vga_set_state = &r600_vga_set_state, | 465 | .vga_set_state = &r600_vga_set_state, |
572 | .gpu_reset = &r600_gpu_reset, | 466 | .gpu_reset = &r600_gpu_reset, |
573 | .mc_init = NULL, | ||
574 | .mc_fini = NULL, | ||
575 | .wb_init = &r600_wb_init, | ||
576 | .wb_fini = &r600_wb_fini, | ||
577 | .gart_enable = NULL, | ||
578 | .gart_disable = NULL, | ||
579 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 467 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
580 | .gart_set_page = &rs600_gart_set_page, | 468 | .gart_set_page = &rs600_gart_set_page, |
581 | .cp_init = NULL, | ||
582 | .cp_fini = NULL, | ||
583 | .cp_disable = NULL, | ||
584 | .ring_start = NULL, | ||
585 | .ring_test = &r600_ring_test, | 469 | .ring_test = &r600_ring_test, |
586 | .ring_ib_execute = &r600_ring_ib_execute, | 470 | .ring_ib_execute = &r600_ring_ib_execute, |
587 | .ib_test = &r600_ib_test, | ||
588 | .irq_set = &r600_irq_set, | 471 | .irq_set = &r600_irq_set, |
589 | .irq_process = &r600_irq_process, | 472 | .irq_process = &r600_irq_process, |
590 | .fence_ring_emit = &r600_fence_ring_emit, | 473 | .fence_ring_emit = &r600_fence_ring_emit, |
@@ -592,7 +475,9 @@ static struct radeon_asic r600_asic = { | |||
592 | .copy_blit = &r600_copy_blit, | 475 | .copy_blit = &r600_copy_blit, |
593 | .copy_dma = &r600_copy_blit, | 476 | .copy_dma = &r600_copy_blit, |
594 | .copy = &r600_copy_blit, | 477 | .copy = &r600_copy_blit, |
478 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
595 | .set_engine_clock = &radeon_atom_set_engine_clock, | 479 | .set_engine_clock = &radeon_atom_set_engine_clock, |
480 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
596 | .set_memory_clock = &radeon_atom_set_memory_clock, | 481 | .set_memory_clock = &radeon_atom_set_memory_clock, |
597 | .set_pcie_lanes = NULL, | 482 | .set_pcie_lanes = NULL, |
598 | .set_clock_gating = &radeon_atom_set_clock_gating, | 483 | .set_clock_gating = &radeon_atom_set_clock_gating, |
@@ -611,30 +496,17 @@ int rv770_resume(struct radeon_device *rdev); | |||
611 | int rv770_gpu_reset(struct radeon_device *rdev); | 496 | int rv770_gpu_reset(struct radeon_device *rdev); |
612 | 497 | ||
613 | static struct radeon_asic rv770_asic = { | 498 | static struct radeon_asic rv770_asic = { |
614 | .errata = NULL, | ||
615 | .init = &rv770_init, | 499 | .init = &rv770_init, |
616 | .fini = &rv770_fini, | 500 | .fini = &rv770_fini, |
617 | .suspend = &rv770_suspend, | 501 | .suspend = &rv770_suspend, |
618 | .resume = &rv770_resume, | 502 | .resume = &rv770_resume, |
619 | .cp_commit = &r600_cp_commit, | 503 | .cp_commit = &r600_cp_commit, |
620 | .vram_info = NULL, | ||
621 | .gpu_reset = &rv770_gpu_reset, | 504 | .gpu_reset = &rv770_gpu_reset, |
622 | .vga_set_state = &r600_vga_set_state, | 505 | .vga_set_state = &r600_vga_set_state, |
623 | .mc_init = NULL, | ||
624 | .mc_fini = NULL, | ||
625 | .wb_init = &r600_wb_init, | ||
626 | .wb_fini = &r600_wb_fini, | ||
627 | .gart_enable = NULL, | ||
628 | .gart_disable = NULL, | ||
629 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 506 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
630 | .gart_set_page = &rs600_gart_set_page, | 507 | .gart_set_page = &rs600_gart_set_page, |
631 | .cp_init = NULL, | ||
632 | .cp_fini = NULL, | ||
633 | .cp_disable = NULL, | ||
634 | .ring_start = NULL, | ||
635 | .ring_test = &r600_ring_test, | 508 | .ring_test = &r600_ring_test, |
636 | .ring_ib_execute = &r600_ring_ib_execute, | 509 | .ring_ib_execute = &r600_ring_ib_execute, |
637 | .ib_test = &r600_ib_test, | ||
638 | .irq_set = &r600_irq_set, | 510 | .irq_set = &r600_irq_set, |
639 | .irq_process = &r600_irq_process, | 511 | .irq_process = &r600_irq_process, |
640 | .fence_ring_emit = &r600_fence_ring_emit, | 512 | .fence_ring_emit = &r600_fence_ring_emit, |
@@ -642,7 +514,9 @@ static struct radeon_asic rv770_asic = { | |||
642 | .copy_blit = &r600_copy_blit, | 514 | .copy_blit = &r600_copy_blit, |
643 | .copy_dma = &r600_copy_blit, | 515 | .copy_dma = &r600_copy_blit, |
644 | .copy = &r600_copy_blit, | 516 | .copy = &r600_copy_blit, |
517 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
645 | .set_engine_clock = &radeon_atom_set_engine_clock, | 518 | .set_engine_clock = &radeon_atom_set_engine_clock, |
519 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
646 | .set_memory_clock = &radeon_atom_set_memory_clock, | 520 | .set_memory_clock = &radeon_atom_set_memory_clock, |
647 | .set_pcie_lanes = NULL, | 521 | .set_pcie_lanes = NULL, |
648 | .set_clock_gating = &radeon_atom_set_clock_gating, | 522 | .set_clock_gating = &radeon_atom_set_clock_gating, |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5b6c08cee40e..2ed88a820935 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -46,7 +46,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
46 | uint32_t supported_device, | 46 | uint32_t supported_device, |
47 | int connector_type, | 47 | int connector_type, |
48 | struct radeon_i2c_bus_rec *i2c_bus, | 48 | struct radeon_i2c_bus_rec *i2c_bus, |
49 | bool linkb, uint32_t igp_lane_info); | 49 | bool linkb, uint32_t igp_lane_info, |
50 | uint16_t connector_object_id); | ||
50 | 51 | ||
51 | /* from radeon_legacy_encoder.c */ | 52 | /* from radeon_legacy_encoder.c */ |
52 | extern void | 53 | extern void |
@@ -193,6 +194,23 @@ const int supported_devices_connector_convert[] = { | |||
193 | DRM_MODE_CONNECTOR_DisplayPort | 194 | DRM_MODE_CONNECTOR_DisplayPort |
194 | }; | 195 | }; |
195 | 196 | ||
197 | const uint16_t supported_devices_connector_object_id_convert[] = { | ||
198 | CONNECTOR_OBJECT_ID_NONE, | ||
199 | CONNECTOR_OBJECT_ID_VGA, | ||
200 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */ | ||
201 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */ | ||
202 | CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */ | ||
203 | CONNECTOR_OBJECT_ID_COMPOSITE, | ||
204 | CONNECTOR_OBJECT_ID_SVIDEO, | ||
205 | CONNECTOR_OBJECT_ID_LVDS, | ||
206 | CONNECTOR_OBJECT_ID_9PIN_DIN, | ||
207 | CONNECTOR_OBJECT_ID_9PIN_DIN, | ||
208 | CONNECTOR_OBJECT_ID_DISPLAYPORT, | ||
209 | CONNECTOR_OBJECT_ID_HDMI_TYPE_A, | ||
210 | CONNECTOR_OBJECT_ID_HDMI_TYPE_B, | ||
211 | CONNECTOR_OBJECT_ID_SVIDEO | ||
212 | }; | ||
213 | |||
196 | const int object_connector_convert[] = { | 214 | const int object_connector_convert[] = { |
197 | DRM_MODE_CONNECTOR_Unknown, | 215 | DRM_MODE_CONNECTOR_Unknown, |
198 | DRM_MODE_CONNECTOR_DVII, | 216 | DRM_MODE_CONNECTOR_DVII, |
@@ -229,7 +247,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
229 | ATOM_OBJECT_HEADER *obj_header; | 247 | ATOM_OBJECT_HEADER *obj_header; |
230 | int i, j, path_size, device_support; | 248 | int i, j, path_size, device_support; |
231 | int connector_type; | 249 | int connector_type; |
232 | uint16_t igp_lane_info, conn_id; | 250 | uint16_t igp_lane_info, conn_id, connector_object_id; |
233 | bool linkb; | 251 | bool linkb; |
234 | struct radeon_i2c_bus_rec ddc_bus; | 252 | struct radeon_i2c_bus_rec ddc_bus; |
235 | 253 | ||
@@ -277,7 +295,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
277 | ATOM_DEVICE_CV_SUPPORT) | 295 | ATOM_DEVICE_CV_SUPPORT) |
278 | continue; | 296 | continue; |
279 | 297 | ||
280 | if ((rdev->family == CHIP_RS780) && | 298 | /* IGP chips */ |
299 | if ((rdev->flags & RADEON_IS_IGP) && | ||
281 | (con_obj_id == | 300 | (con_obj_id == |
282 | CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) { | 301 | CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) { |
283 | uint16_t igp_offset = 0; | 302 | uint16_t igp_offset = 0; |
@@ -311,6 +330,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
311 | connector_type = | 330 | connector_type = |
312 | object_connector_convert | 331 | object_connector_convert |
313 | [ct]; | 332 | [ct]; |
333 | connector_object_id = ct; | ||
314 | igp_lane_info = | 334 | igp_lane_info = |
315 | slot_config & 0xffff; | 335 | slot_config & 0xffff; |
316 | } else | 336 | } else |
@@ -321,6 +341,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
321 | igp_lane_info = 0; | 341 | igp_lane_info = 0; |
322 | connector_type = | 342 | connector_type = |
323 | object_connector_convert[con_obj_id]; | 343 | object_connector_convert[con_obj_id]; |
344 | connector_object_id = con_obj_id; | ||
324 | } | 345 | } |
325 | 346 | ||
326 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 347 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
@@ -425,7 +446,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
425 | le16_to_cpu(path-> | 446 | le16_to_cpu(path-> |
426 | usDeviceTag), | 447 | usDeviceTag), |
427 | connector_type, &ddc_bus, | 448 | connector_type, &ddc_bus, |
428 | linkb, igp_lane_info); | 449 | linkb, igp_lane_info, |
450 | connector_object_id); | ||
429 | 451 | ||
430 | } | 452 | } |
431 | } | 453 | } |
@@ -435,6 +457,45 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
435 | return true; | 457 | return true; |
436 | } | 458 | } |
437 | 459 | ||
460 | static uint16_t atombios_get_connector_object_id(struct drm_device *dev, | ||
461 | int connector_type, | ||
462 | uint16_t devices) | ||
463 | { | ||
464 | struct radeon_device *rdev = dev->dev_private; | ||
465 | |||
466 | if (rdev->flags & RADEON_IS_IGP) { | ||
467 | return supported_devices_connector_object_id_convert | ||
468 | [connector_type]; | ||
469 | } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) || | ||
470 | (connector_type == DRM_MODE_CONNECTOR_DVID)) && | ||
471 | (devices & ATOM_DEVICE_DFP2_SUPPORT)) { | ||
472 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
473 | struct atom_context *ctx = mode_info->atom_context; | ||
474 | int index = GetIndexIntoMasterTable(DATA, XTMDS_Info); | ||
475 | uint16_t size, data_offset; | ||
476 | uint8_t frev, crev; | ||
477 | ATOM_XTMDS_INFO *xtmds; | ||
478 | |||
479 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | ||
480 | xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); | ||
481 | |||
482 | if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { | ||
483 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | ||
484 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; | ||
485 | else | ||
486 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; | ||
487 | } else { | ||
488 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | ||
489 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | ||
490 | else | ||
491 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; | ||
492 | } | ||
493 | } else { | ||
494 | return supported_devices_connector_object_id_convert | ||
495 | [connector_type]; | ||
496 | } | ||
497 | } | ||
498 | |||
438 | struct bios_connector { | 499 | struct bios_connector { |
439 | bool valid; | 500 | bool valid; |
440 | uint16_t line_mux; | 501 | uint16_t line_mux; |
@@ -593,14 +654,20 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
593 | 654 | ||
594 | /* add the connectors */ | 655 | /* add the connectors */ |
595 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | 656 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { |
596 | if (bios_connectors[i].valid) | 657 | if (bios_connectors[i].valid) { |
658 | uint16_t connector_object_id = | ||
659 | atombios_get_connector_object_id(dev, | ||
660 | bios_connectors[i].connector_type, | ||
661 | bios_connectors[i].devices); | ||
597 | radeon_add_atom_connector(dev, | 662 | radeon_add_atom_connector(dev, |
598 | bios_connectors[i].line_mux, | 663 | bios_connectors[i].line_mux, |
599 | bios_connectors[i].devices, | 664 | bios_connectors[i].devices, |
600 | bios_connectors[i]. | 665 | bios_connectors[i]. |
601 | connector_type, | 666 | connector_type, |
602 | &bios_connectors[i].ddc_bus, | 667 | &bios_connectors[i].ddc_bus, |
603 | false, 0); | 668 | false, 0, |
669 | connector_object_id); | ||
670 | } | ||
604 | } | 671 | } |
605 | 672 | ||
606 | radeon_link_encoder_connector(dev); | 673 | radeon_link_encoder_connector(dev); |
@@ -641,8 +708,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
641 | le16_to_cpu(firmware_info->info.usReferenceClock); | 708 | le16_to_cpu(firmware_info->info.usReferenceClock); |
642 | p1pll->reference_div = 0; | 709 | p1pll->reference_div = 0; |
643 | 710 | ||
644 | p1pll->pll_out_min = | 711 | if (crev < 2) |
645 | le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); | 712 | p1pll->pll_out_min = |
713 | le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); | ||
714 | else | ||
715 | p1pll->pll_out_min = | ||
716 | le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); | ||
646 | p1pll->pll_out_max = | 717 | p1pll->pll_out_max = |
647 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); | 718 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); |
648 | 719 | ||
@@ -651,6 +722,16 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
651 | p1pll->pll_out_min = 64800; | 722 | p1pll->pll_out_min = 64800; |
652 | else | 723 | else |
653 | p1pll->pll_out_min = 20000; | 724 | p1pll->pll_out_min = 20000; |
725 | } else if (p1pll->pll_out_min > 64800) { | ||
726 | /* Limiting the pll output range is a good thing generally as | ||
727 | * it limits the number of possible pll combinations for a given | ||
728 | * frequency presumably to the ones that work best on each card. | ||
729 | * However, certain duallink DVI monitors seem to like | ||
730 | * pll combinations that would be limited by this at least on | ||
731 | * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per | ||
732 | * family. | ||
733 | */ | ||
734 | p1pll->pll_out_min = 64800; | ||
654 | } | 735 | } |
655 | 736 | ||
656 | p1pll->pll_in_min = | 737 | p1pll->pll_in_min = |
@@ -767,6 +848,46 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | |||
767 | return false; | 848 | return false; |
768 | } | 849 | } |
769 | 850 | ||
851 | static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | ||
852 | radeon_encoder | ||
853 | *encoder, | ||
854 | int id) | ||
855 | { | ||
856 | struct drm_device *dev = encoder->base.dev; | ||
857 | struct radeon_device *rdev = dev->dev_private; | ||
858 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
859 | int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); | ||
860 | uint16_t data_offset; | ||
861 | struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; | ||
862 | uint8_t frev, crev; | ||
863 | struct radeon_atom_ss *ss = NULL; | ||
864 | |||
865 | if (id > ATOM_MAX_SS_ENTRY) | ||
866 | return NULL; | ||
867 | |||
868 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | ||
869 | &crev, &data_offset); | ||
870 | |||
871 | ss_info = | ||
872 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); | ||
873 | |||
874 | if (ss_info) { | ||
875 | ss = | ||
876 | kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); | ||
877 | |||
878 | if (!ss) | ||
879 | return NULL; | ||
880 | |||
881 | ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); | ||
882 | ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; | ||
883 | ss->step = ss_info->asSS_Info[id].ucSS_Step; | ||
884 | ss->delay = ss_info->asSS_Info[id].ucSS_Delay; | ||
885 | ss->range = ss_info->asSS_Info[id].ucSS_Range; | ||
886 | ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; | ||
887 | } | ||
888 | return ss; | ||
889 | } | ||
890 | |||
770 | union lvds_info { | 891 | union lvds_info { |
771 | struct _ATOM_LVDS_INFO info; | 892 | struct _ATOM_LVDS_INFO info; |
772 | struct _ATOM_LVDS_INFO_V12 info_12; | 893 | struct _ATOM_LVDS_INFO_V12 info_12; |
@@ -798,27 +919,31 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
798 | if (!lvds) | 919 | if (!lvds) |
799 | return NULL; | 920 | return NULL; |
800 | 921 | ||
801 | lvds->native_mode.dotclock = | 922 | lvds->native_mode.clock = |
802 | le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; | 923 | le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; |
803 | lvds->native_mode.panel_xres = | 924 | lvds->native_mode.hdisplay = |
804 | le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); | 925 | le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); |
805 | lvds->native_mode.panel_yres = | 926 | lvds->native_mode.vdisplay = |
806 | le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); | 927 | le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); |
807 | lvds->native_mode.hblank = | 928 | lvds->native_mode.htotal = lvds->native_mode.hdisplay + |
808 | le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); | 929 | le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); |
809 | lvds->native_mode.hoverplus = | 930 | lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + |
810 | le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); | 931 | le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); |
811 | lvds->native_mode.hsync_width = | 932 | lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + |
812 | le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); | 933 | le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); |
813 | lvds->native_mode.vblank = | 934 | lvds->native_mode.vtotal = lvds->native_mode.vdisplay + |
814 | le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); | 935 | le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); |
815 | lvds->native_mode.voverplus = | 936 | lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + |
816 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); | 937 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); |
817 | lvds->native_mode.vsync_width = | 938 | lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + |
818 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); | 939 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); |
819 | lvds->panel_pwr_delay = | 940 | lvds->panel_pwr_delay = |
820 | le16_to_cpu(lvds_info->info.usOffDelayInMs); | 941 | le16_to_cpu(lvds_info->info.usOffDelayInMs); |
821 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; | 942 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; |
943 | /* set crtc values */ | ||
944 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | ||
945 | |||
946 | lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); | ||
822 | 947 | ||
823 | encoder->native_mode = lvds->native_mode; | 948 | encoder->native_mode = lvds->native_mode; |
824 | } | 949 | } |
@@ -857,8 +982,7 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) | |||
857 | } | 982 | } |
858 | 983 | ||
859 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | 984 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, |
860 | SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing, | 985 | struct drm_display_mode *mode) |
861 | int32_t *pixel_clock) | ||
862 | { | 986 | { |
863 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 987 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
864 | ATOM_ANALOG_TV_INFO *tv_info; | 988 | ATOM_ANALOG_TV_INFO *tv_info; |
@@ -866,7 +990,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
866 | ATOM_DTD_FORMAT *dtd_timings; | 990 | ATOM_DTD_FORMAT *dtd_timings; |
867 | int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); | 991 | int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); |
868 | u8 frev, crev; | 992 | u8 frev, crev; |
869 | uint16_t data_offset; | 993 | u16 data_offset, misc; |
870 | 994 | ||
871 | atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); | 995 | atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); |
872 | 996 | ||
@@ -876,28 +1000,37 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
876 | if (index > MAX_SUPPORTED_TV_TIMING) | 1000 | if (index > MAX_SUPPORTED_TV_TIMING) |
877 | return false; | 1001 | return false; |
878 | 1002 | ||
879 | crtc_timing->usH_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); | 1003 | mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); |
880 | crtc_timing->usH_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp); | 1004 | mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp); |
881 | crtc_timing->usH_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart); | 1005 | mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart); |
882 | crtc_timing->usH_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth); | 1006 | mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) + |
883 | 1007 | le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth); | |
884 | crtc_timing->usV_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total); | 1008 | |
885 | crtc_timing->usV_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp); | 1009 | mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total); |
886 | crtc_timing->usV_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart); | 1010 | mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp); |
887 | crtc_timing->usV_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth); | 1011 | mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart); |
888 | 1012 | mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) + | |
889 | crtc_timing->susModeMiscInfo = tv_info->aModeTimings[index].susModeMiscInfo; | 1013 | le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth); |
890 | 1014 | ||
891 | crtc_timing->ucOverscanRight = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanRight); | 1015 | mode->flags = 0; |
892 | crtc_timing->ucOverscanLeft = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanLeft); | 1016 | misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess); |
893 | crtc_timing->ucOverscanBottom = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanBottom); | 1017 | if (misc & ATOM_VSYNC_POLARITY) |
894 | crtc_timing->ucOverscanTop = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanTop); | 1018 | mode->flags |= DRM_MODE_FLAG_NVSYNC; |
895 | *pixel_clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; | 1019 | if (misc & ATOM_HSYNC_POLARITY) |
1020 | mode->flags |= DRM_MODE_FLAG_NHSYNC; | ||
1021 | if (misc & ATOM_COMPOSITESYNC) | ||
1022 | mode->flags |= DRM_MODE_FLAG_CSYNC; | ||
1023 | if (misc & ATOM_INTERLACE) | ||
1024 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
1025 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | ||
1026 | mode->flags |= DRM_MODE_FLAG_DBLSCAN; | ||
1027 | |||
1028 | mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; | ||
896 | 1029 | ||
897 | if (index == 1) { | 1030 | if (index == 1) { |
898 | /* PAL timings appear to have wrong values for totals */ | 1031 | /* PAL timings appear to have wrong values for totals */ |
899 | crtc_timing->usH_Total -= 1; | 1032 | mode->crtc_htotal -= 1; |
900 | crtc_timing->usV_Total -= 1; | 1033 | mode->crtc_vtotal -= 1; |
901 | } | 1034 | } |
902 | break; | 1035 | break; |
903 | case 2: | 1036 | case 2: |
@@ -906,17 +1039,36 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
906 | return false; | 1039 | return false; |
907 | 1040 | ||
908 | dtd_timings = &tv_info_v1_2->aModeTimings[index]; | 1041 | dtd_timings = &tv_info_v1_2->aModeTimings[index]; |
909 | crtc_timing->usH_Total = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time); | 1042 | mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) + |
910 | crtc_timing->usH_Disp = le16_to_cpu(dtd_timings->usHActive); | 1043 | le16_to_cpu(dtd_timings->usHBlanking_Time); |
911 | crtc_timing->usH_SyncStart = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset); | 1044 | mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive); |
912 | crtc_timing->usH_SyncWidth = le16_to_cpu(dtd_timings->usHSyncWidth); | 1045 | mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) + |
913 | crtc_timing->usV_Total = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time); | 1046 | le16_to_cpu(dtd_timings->usHSyncOffset); |
914 | crtc_timing->usV_Disp = le16_to_cpu(dtd_timings->usVActive); | 1047 | mode->crtc_hsync_end = mode->crtc_hsync_start + |
915 | crtc_timing->usV_SyncStart = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset); | 1048 | le16_to_cpu(dtd_timings->usHSyncWidth); |
916 | crtc_timing->usV_SyncWidth = le16_to_cpu(dtd_timings->usVSyncWidth); | 1049 | |
917 | 1050 | mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) + | |
918 | crtc_timing->susModeMiscInfo.usAccess = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess); | 1051 | le16_to_cpu(dtd_timings->usVBlanking_Time); |
919 | *pixel_clock = le16_to_cpu(dtd_timings->usPixClk) * 10; | 1052 | mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive); |
1053 | mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) + | ||
1054 | le16_to_cpu(dtd_timings->usVSyncOffset); | ||
1055 | mode->crtc_vsync_end = mode->crtc_vsync_start + | ||
1056 | le16_to_cpu(dtd_timings->usVSyncWidth); | ||
1057 | |||
1058 | mode->flags = 0; | ||
1059 | misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess); | ||
1060 | if (misc & ATOM_VSYNC_POLARITY) | ||
1061 | mode->flags |= DRM_MODE_FLAG_NVSYNC; | ||
1062 | if (misc & ATOM_HSYNC_POLARITY) | ||
1063 | mode->flags |= DRM_MODE_FLAG_NHSYNC; | ||
1064 | if (misc & ATOM_COMPOSITESYNC) | ||
1065 | mode->flags |= DRM_MODE_FLAG_CSYNC; | ||
1066 | if (misc & ATOM_INTERLACE) | ||
1067 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
1068 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | ||
1069 | mode->flags |= DRM_MODE_FLAG_DBLSCAN; | ||
1070 | |||
1071 | mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10; | ||
920 | break; | 1072 | break; |
921 | } | 1073 | } |
922 | return true; | 1074 | return true; |
@@ -981,6 +1133,24 @@ void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable) | |||
981 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1133 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
982 | } | 1134 | } |
983 | 1135 | ||
1136 | uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) | ||
1137 | { | ||
1138 | GET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
1139 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); | ||
1140 | |||
1141 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1142 | return args.ulReturnEngineClock; | ||
1143 | } | ||
1144 | |||
1145 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) | ||
1146 | { | ||
1147 | GET_MEMORY_CLOCK_PS_ALLOCATION args; | ||
1148 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); | ||
1149 | |||
1150 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1151 | return args.ulReturnMemoryClock; | ||
1152 | } | ||
1153 | |||
984 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, | 1154 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, |
985 | uint32_t eng_clock) | 1155 | uint32_t eng_clock) |
986 | { | 1156 | { |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 2e938f7496fb..10bd50a7db87 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
63 | if (r) { | 63 | if (r) { |
64 | goto out_cleanup; | 64 | goto out_cleanup; |
65 | } | 65 | } |
66 | r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); | 66 | r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); |
67 | if (r) { | 67 | if (r) { |
68 | goto out_cleanup; | 68 | goto out_cleanup; |
69 | } | 69 | } |
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
88 | if (r) { | 88 | if (r) { |
89 | goto out_cleanup; | 89 | goto out_cleanup; |
90 | } | 90 | } |
91 | r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); | 91 | r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); |
92 | if (r) { | 92 | if (r) { |
93 | goto out_cleanup; | 93 | goto out_cleanup; |
94 | } | 94 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 96e37a6e7ce4..906921740c60 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -33,12 +33,47 @@ | |||
33 | /* | 33 | /* |
34 | * BIOS. | 34 | * BIOS. |
35 | */ | 35 | */ |
36 | |||
37 | /* If you boot an IGP board with a discrete card as the primary, | ||
38 | * the IGP rom is not accessible via the rom bar as the IGP rom is | ||
39 | * part of the system bios. On boot, the system bios puts a | ||
40 | * copy of the igp rom at the start of vram if a discrete card is | ||
41 | * present. | ||
42 | */ | ||
43 | static bool igp_read_bios_from_vram(struct radeon_device *rdev) | ||
44 | { | ||
45 | uint8_t __iomem *bios; | ||
46 | resource_size_t vram_base; | ||
47 | resource_size_t size = 256 * 1024; /* ??? */ | ||
48 | |||
49 | rdev->bios = NULL; | ||
50 | vram_base = drm_get_resource_start(rdev->ddev, 0); | ||
51 | bios = ioremap(vram_base, size); | ||
52 | if (!bios) { | ||
53 | return false; | ||
54 | } | ||
55 | |||
56 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
57 | iounmap(bios); | ||
58 | return false; | ||
59 | } | ||
60 | rdev->bios = kmalloc(size, GFP_KERNEL); | ||
61 | if (rdev->bios == NULL) { | ||
62 | iounmap(bios); | ||
63 | return false; | ||
64 | } | ||
65 | memcpy(rdev->bios, bios, size); | ||
66 | iounmap(bios); | ||
67 | return true; | ||
68 | } | ||
69 | |||
36 | static bool radeon_read_bios(struct radeon_device *rdev) | 70 | static bool radeon_read_bios(struct radeon_device *rdev) |
37 | { | 71 | { |
38 | uint8_t __iomem *bios; | 72 | uint8_t __iomem *bios; |
39 | size_t size; | 73 | size_t size; |
40 | 74 | ||
41 | rdev->bios = NULL; | 75 | rdev->bios = NULL; |
76 | /* XXX: some cards may return 0 for rom size? ddx has a workaround */ | ||
42 | bios = pci_map_rom(rdev->pdev, &size); | 77 | bios = pci_map_rom(rdev->pdev, &size); |
43 | if (!bios) { | 78 | if (!bios) { |
44 | return false; | 79 | return false; |
@@ -341,7 +376,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
341 | 376 | ||
342 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) | 377 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) |
343 | { | 378 | { |
344 | if (rdev->family >= CHIP_RV770) | 379 | if (rdev->flags & RADEON_IS_IGP) |
380 | return igp_read_bios_from_vram(rdev); | ||
381 | else if (rdev->family >= CHIP_RV770) | ||
345 | return r700_read_disabled_bios(rdev); | 382 | return r700_read_disabled_bios(rdev); |
346 | else if (rdev->family >= CHIP_R600) | 383 | else if (rdev->family >= CHIP_R600) |
347 | return r600_read_disabled_bios(rdev); | 384 | return r600_read_disabled_bios(rdev); |
@@ -356,7 +393,12 @@ bool radeon_get_bios(struct radeon_device *rdev) | |||
356 | bool r; | 393 | bool r; |
357 | uint16_t tmp; | 394 | uint16_t tmp; |
358 | 395 | ||
359 | r = radeon_read_bios(rdev); | 396 | if (rdev->flags & RADEON_IS_IGP) { |
397 | r = igp_read_bios_from_vram(rdev); | ||
398 | if (r == false) | ||
399 | r = radeon_read_bios(rdev); | ||
400 | } else | ||
401 | r = radeon_read_bios(rdev); | ||
360 | if (r == false) { | 402 | if (r == false) { |
361 | r = radeon_read_disabled_bios(rdev); | 403 | r = radeon_read_disabled_bios(rdev); |
362 | } | 404 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 152eef13197a..a81354167621 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include "atom.h" | 32 | #include "atom.h" |
33 | 33 | ||
34 | /* 10 khz */ | 34 | /* 10 khz */ |
35 | static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) | 35 | uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) |
36 | { | 36 | { |
37 | struct radeon_pll *spll = &rdev->clock.spll; | 37 | struct radeon_pll *spll = &rdev->clock.spll; |
38 | uint32_t fb_div, ref_div, post_div, sclk; | 38 | uint32_t fb_div, ref_div, post_div, sclk; |
@@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
411 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 411 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
412 | R300_PIXCLK_TVO_ALWAYS_ONb | | 412 | R300_PIXCLK_TVO_ALWAYS_ONb | |
413 | R300_P2G2CLK_ALWAYS_ONb | | 413 | R300_P2G2CLK_ALWAYS_ONb | |
414 | R300_P2G2CLK_ALWAYS_ONb); | 414 | R300_P2G2CLK_DAC_ALWAYS_ONb); |
415 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 415 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
416 | } else if (rdev->family >= CHIP_RV350) { | 416 | } else if (rdev->family >= CHIP_RV350) { |
417 | tmp = RREG32_PLL(R300_SCLK_CNTL2); | 417 | tmp = RREG32_PLL(R300_SCLK_CNTL2); |
@@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
464 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 464 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
465 | R300_PIXCLK_TVO_ALWAYS_ONb | | 465 | R300_PIXCLK_TVO_ALWAYS_ONb | |
466 | R300_P2G2CLK_ALWAYS_ONb | | 466 | R300_P2G2CLK_ALWAYS_ONb | |
467 | R300_P2G2CLK_ALWAYS_ONb); | 467 | R300_P2G2CLK_DAC_ALWAYS_ONb); |
468 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 468 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
469 | 469 | ||
470 | tmp = RREG32_PLL(RADEON_MCLK_MISC); | 470 | tmp = RREG32_PLL(RADEON_MCLK_MISC); |
@@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
654 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 654 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
655 | R300_PIXCLK_TVO_ALWAYS_ONb | | 655 | R300_PIXCLK_TVO_ALWAYS_ONb | |
656 | R300_P2G2CLK_ALWAYS_ONb | | 656 | R300_P2G2CLK_ALWAYS_ONb | |
657 | R300_P2G2CLK_ALWAYS_ONb | | 657 | R300_P2G2CLK_DAC_ALWAYS_ONb | |
658 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); | 658 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); |
659 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 659 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
660 | } else if (rdev->family >= CHIP_RV350) { | 660 | } else if (rdev->family >= CHIP_RV350) { |
@@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
705 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 705 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
706 | R300_PIXCLK_TVO_ALWAYS_ONb | | 706 | R300_PIXCLK_TVO_ALWAYS_ONb | |
707 | R300_P2G2CLK_ALWAYS_ONb | | 707 | R300_P2G2CLK_ALWAYS_ONb | |
708 | R300_P2G2CLK_ALWAYS_ONb | | 708 | R300_P2G2CLK_DAC_ALWAYS_ONb | |
709 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); | 709 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); |
710 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 710 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
711 | } else { | 711 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 748265a105b3..5253cbf6db1f 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -49,7 +49,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
49 | uint32_t connector_id, | 49 | uint32_t connector_id, |
50 | uint32_t supported_device, | 50 | uint32_t supported_device, |
51 | int connector_type, | 51 | int connector_type, |
52 | struct radeon_i2c_bus_rec *i2c_bus); | 52 | struct radeon_i2c_bus_rec *i2c_bus, |
53 | uint16_t connector_object_id); | ||
53 | 54 | ||
54 | /* from radeon_legacy_encoder.c */ | 55 | /* from radeon_legacy_encoder.c */ |
55 | extern void | 56 | extern void |
@@ -808,25 +809,25 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct | |||
808 | lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf; | 809 | lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf; |
809 | 810 | ||
810 | if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE) | 811 | if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE) |
811 | lvds->native_mode.panel_yres = | 812 | lvds->native_mode.vdisplay = |
812 | ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >> | 813 | ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >> |
813 | RADEON_VERT_PANEL_SHIFT) + 1; | 814 | RADEON_VERT_PANEL_SHIFT) + 1; |
814 | else | 815 | else |
815 | lvds->native_mode.panel_yres = | 816 | lvds->native_mode.vdisplay = |
816 | (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1; | 817 | (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1; |
817 | 818 | ||
818 | if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE) | 819 | if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE) |
819 | lvds->native_mode.panel_xres = | 820 | lvds->native_mode.hdisplay = |
820 | (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >> | 821 | (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >> |
821 | RADEON_HORZ_PANEL_SHIFT) + 1) * 8; | 822 | RADEON_HORZ_PANEL_SHIFT) + 1) * 8; |
822 | else | 823 | else |
823 | lvds->native_mode.panel_xres = | 824 | lvds->native_mode.hdisplay = |
824 | ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8; | 825 | ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8; |
825 | 826 | ||
826 | if ((lvds->native_mode.panel_xres < 640) || | 827 | if ((lvds->native_mode.hdisplay < 640) || |
827 | (lvds->native_mode.panel_yres < 480)) { | 828 | (lvds->native_mode.vdisplay < 480)) { |
828 | lvds->native_mode.panel_xres = 640; | 829 | lvds->native_mode.hdisplay = 640; |
829 | lvds->native_mode.panel_yres = 480; | 830 | lvds->native_mode.vdisplay = 480; |
830 | } | 831 | } |
831 | 832 | ||
832 | ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3; | 833 | ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3; |
@@ -846,8 +847,8 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct | |||
846 | lvds->panel_vcc_delay = 200; | 847 | lvds->panel_vcc_delay = 200; |
847 | 848 | ||
848 | DRM_INFO("Panel info derived from registers\n"); | 849 | DRM_INFO("Panel info derived from registers\n"); |
849 | DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres, | 850 | DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay, |
850 | lvds->native_mode.panel_yres); | 851 | lvds->native_mode.vdisplay); |
851 | 852 | ||
852 | return lvds; | 853 | return lvds; |
853 | } | 854 | } |
@@ -882,11 +883,11 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder | |||
882 | 883 | ||
883 | DRM_INFO("Panel ID String: %s\n", stmp); | 884 | DRM_INFO("Panel ID String: %s\n", stmp); |
884 | 885 | ||
885 | lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19); | 886 | lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19); |
886 | lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b); | 887 | lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b); |
887 | 888 | ||
888 | DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres, | 889 | DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay, |
889 | lvds->native_mode.panel_yres); | 890 | lvds->native_mode.vdisplay); |
890 | 891 | ||
891 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); | 892 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); |
892 | if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) | 893 | if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) |
@@ -944,27 +945,25 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder | |||
944 | if (tmp == 0) | 945 | if (tmp == 0) |
945 | break; | 946 | break; |
946 | 947 | ||
947 | if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) && | 948 | if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && |
948 | (RBIOS16(tmp + 2) == | 949 | (RBIOS16(tmp + 2) == |
949 | lvds->native_mode.panel_yres)) { | 950 | lvds->native_mode.vdisplay)) { |
950 | lvds->native_mode.hblank = | 951 | lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8; |
951 | (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; | 952 | lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8; |
952 | lvds->native_mode.hoverplus = | 953 | lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) + |
953 | (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - | 954 | RBIOS16(tmp + 21)) * 8; |
954 | 1) * 8; | 955 | |
955 | lvds->native_mode.hsync_width = | 956 | lvds->native_mode.vtotal = RBIOS16(tmp + 24); |
956 | RBIOS8(tmp + 23) * 8; | 957 | lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff; |
957 | 958 | lvds->native_mode.vsync_end = | |
958 | lvds->native_mode.vblank = (RBIOS16(tmp + 24) - | 959 | ((RBIOS16(tmp + 28) & 0xf800) >> 11) + |
959 | RBIOS16(tmp + 26)); | 960 | (RBIOS16(tmp + 28) & 0x7ff); |
960 | lvds->native_mode.voverplus = | 961 | |
961 | ((RBIOS16(tmp + 28) & 0x7ff) - | 962 | lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; |
962 | RBIOS16(tmp + 26)); | ||
963 | lvds->native_mode.vsync_width = | ||
964 | ((RBIOS16(tmp + 28) & 0xf800) >> 11); | ||
965 | lvds->native_mode.dotclock = | ||
966 | RBIOS16(tmp + 9) * 10; | ||
967 | lvds->native_mode.flags = 0; | 963 | lvds->native_mode.flags = 0; |
964 | /* set crtc values */ | ||
965 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | ||
966 | |||
968 | } | 967 | } |
969 | } | 968 | } |
970 | } else { | 969 | } else { |
@@ -1178,7 +1177,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1178 | radeon_add_legacy_connector(dev, 0, | 1177 | radeon_add_legacy_connector(dev, 0, |
1179 | ATOM_DEVICE_CRT1_SUPPORT, | 1178 | ATOM_DEVICE_CRT1_SUPPORT, |
1180 | DRM_MODE_CONNECTOR_VGA, | 1179 | DRM_MODE_CONNECTOR_VGA, |
1181 | &ddc_i2c); | 1180 | &ddc_i2c, |
1181 | CONNECTOR_OBJECT_ID_VGA); | ||
1182 | } else if (rdev->flags & RADEON_IS_MOBILITY) { | 1182 | } else if (rdev->flags & RADEON_IS_MOBILITY) { |
1183 | /* LVDS */ | 1183 | /* LVDS */ |
1184 | ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); | 1184 | ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); |
@@ -1190,7 +1190,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1190 | radeon_add_legacy_connector(dev, 0, | 1190 | radeon_add_legacy_connector(dev, 0, |
1191 | ATOM_DEVICE_LCD1_SUPPORT, | 1191 | ATOM_DEVICE_LCD1_SUPPORT, |
1192 | DRM_MODE_CONNECTOR_LVDS, | 1192 | DRM_MODE_CONNECTOR_LVDS, |
1193 | &ddc_i2c); | 1193 | &ddc_i2c, |
1194 | CONNECTOR_OBJECT_ID_LVDS); | ||
1194 | 1195 | ||
1195 | /* VGA - primary dac */ | 1196 | /* VGA - primary dac */ |
1196 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1197 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); |
@@ -1202,7 +1203,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1202 | radeon_add_legacy_connector(dev, 1, | 1203 | radeon_add_legacy_connector(dev, 1, |
1203 | ATOM_DEVICE_CRT1_SUPPORT, | 1204 | ATOM_DEVICE_CRT1_SUPPORT, |
1204 | DRM_MODE_CONNECTOR_VGA, | 1205 | DRM_MODE_CONNECTOR_VGA, |
1205 | &ddc_i2c); | 1206 | &ddc_i2c, |
1207 | CONNECTOR_OBJECT_ID_VGA); | ||
1206 | } else { | 1208 | } else { |
1207 | /* DVI-I - tv dac, int tmds */ | 1209 | /* DVI-I - tv dac, int tmds */ |
1208 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1210 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); |
@@ -1220,7 +1222,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1220 | ATOM_DEVICE_DFP1_SUPPORT | | 1222 | ATOM_DEVICE_DFP1_SUPPORT | |
1221 | ATOM_DEVICE_CRT2_SUPPORT, | 1223 | ATOM_DEVICE_CRT2_SUPPORT, |
1222 | DRM_MODE_CONNECTOR_DVII, | 1224 | DRM_MODE_CONNECTOR_DVII, |
1223 | &ddc_i2c); | 1225 | &ddc_i2c, |
1226 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | ||
1224 | 1227 | ||
1225 | /* VGA - primary dac */ | 1228 | /* VGA - primary dac */ |
1226 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1229 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); |
@@ -1232,7 +1235,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1232 | radeon_add_legacy_connector(dev, 1, | 1235 | radeon_add_legacy_connector(dev, 1, |
1233 | ATOM_DEVICE_CRT1_SUPPORT, | 1236 | ATOM_DEVICE_CRT1_SUPPORT, |
1234 | DRM_MODE_CONNECTOR_VGA, | 1237 | DRM_MODE_CONNECTOR_VGA, |
1235 | &ddc_i2c); | 1238 | &ddc_i2c, |
1239 | CONNECTOR_OBJECT_ID_VGA); | ||
1236 | } | 1240 | } |
1237 | 1241 | ||
1238 | if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { | 1242 | if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { |
@@ -1245,7 +1249,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1245 | radeon_add_legacy_connector(dev, 2, | 1249 | radeon_add_legacy_connector(dev, 2, |
1246 | ATOM_DEVICE_TV1_SUPPORT, | 1250 | ATOM_DEVICE_TV1_SUPPORT, |
1247 | DRM_MODE_CONNECTOR_SVIDEO, | 1251 | DRM_MODE_CONNECTOR_SVIDEO, |
1248 | &ddc_i2c); | 1252 | &ddc_i2c, |
1253 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1249 | } | 1254 | } |
1250 | break; | 1255 | break; |
1251 | case CT_IBOOK: | 1256 | case CT_IBOOK: |
@@ -1259,7 +1264,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1259 | 0), | 1264 | 0), |
1260 | ATOM_DEVICE_LCD1_SUPPORT); | 1265 | ATOM_DEVICE_LCD1_SUPPORT); |
1261 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1266 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1262 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); | 1267 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1268 | CONNECTOR_OBJECT_ID_LVDS); | ||
1263 | /* VGA - TV DAC */ | 1269 | /* VGA - TV DAC */ |
1264 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1270 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); |
1265 | radeon_add_legacy_encoder(dev, | 1271 | radeon_add_legacy_encoder(dev, |
@@ -1268,7 +1274,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1268 | 2), | 1274 | 2), |
1269 | ATOM_DEVICE_CRT2_SUPPORT); | 1275 | ATOM_DEVICE_CRT2_SUPPORT); |
1270 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | 1276 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, |
1271 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c); | 1277 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1278 | CONNECTOR_OBJECT_ID_VGA); | ||
1272 | /* TV - TV DAC */ | 1279 | /* TV - TV DAC */ |
1273 | radeon_add_legacy_encoder(dev, | 1280 | radeon_add_legacy_encoder(dev, |
1274 | radeon_get_encoder_id(dev, | 1281 | radeon_get_encoder_id(dev, |
@@ -1277,7 +1284,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1277 | ATOM_DEVICE_TV1_SUPPORT); | 1284 | ATOM_DEVICE_TV1_SUPPORT); |
1278 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1285 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1279 | DRM_MODE_CONNECTOR_SVIDEO, | 1286 | DRM_MODE_CONNECTOR_SVIDEO, |
1280 | &ddc_i2c); | 1287 | &ddc_i2c, |
1288 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1281 | break; | 1289 | break; |
1282 | case CT_POWERBOOK_EXTERNAL: | 1290 | case CT_POWERBOOK_EXTERNAL: |
1283 | DRM_INFO("Connector Table: %d (powerbook external tmds)\n", | 1291 | DRM_INFO("Connector Table: %d (powerbook external tmds)\n", |
@@ -1290,7 +1298,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1290 | 0), | 1298 | 0), |
1291 | ATOM_DEVICE_LCD1_SUPPORT); | 1299 | ATOM_DEVICE_LCD1_SUPPORT); |
1292 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1300 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1293 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); | 1301 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1302 | CONNECTOR_OBJECT_ID_LVDS); | ||
1294 | /* DVI-I - primary dac, ext tmds */ | 1303 | /* DVI-I - primary dac, ext tmds */ |
1295 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1304 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); |
1296 | radeon_add_legacy_encoder(dev, | 1305 | radeon_add_legacy_encoder(dev, |
@@ -1303,10 +1312,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1303 | ATOM_DEVICE_CRT1_SUPPORT, | 1312 | ATOM_DEVICE_CRT1_SUPPORT, |
1304 | 1), | 1313 | 1), |
1305 | ATOM_DEVICE_CRT1_SUPPORT); | 1314 | ATOM_DEVICE_CRT1_SUPPORT); |
1315 | /* XXX some are SL */ | ||
1306 | radeon_add_legacy_connector(dev, 1, | 1316 | radeon_add_legacy_connector(dev, 1, |
1307 | ATOM_DEVICE_DFP2_SUPPORT | | 1317 | ATOM_DEVICE_DFP2_SUPPORT | |
1308 | ATOM_DEVICE_CRT1_SUPPORT, | 1318 | ATOM_DEVICE_CRT1_SUPPORT, |
1309 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c); | 1319 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1320 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I); | ||
1310 | /* TV - TV DAC */ | 1321 | /* TV - TV DAC */ |
1311 | radeon_add_legacy_encoder(dev, | 1322 | radeon_add_legacy_encoder(dev, |
1312 | radeon_get_encoder_id(dev, | 1323 | radeon_get_encoder_id(dev, |
@@ -1315,7 +1326,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1315 | ATOM_DEVICE_TV1_SUPPORT); | 1326 | ATOM_DEVICE_TV1_SUPPORT); |
1316 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1327 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1317 | DRM_MODE_CONNECTOR_SVIDEO, | 1328 | DRM_MODE_CONNECTOR_SVIDEO, |
1318 | &ddc_i2c); | 1329 | &ddc_i2c, |
1330 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1319 | break; | 1331 | break; |
1320 | case CT_POWERBOOK_INTERNAL: | 1332 | case CT_POWERBOOK_INTERNAL: |
1321 | DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", | 1333 | DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", |
@@ -1328,7 +1340,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1328 | 0), | 1340 | 0), |
1329 | ATOM_DEVICE_LCD1_SUPPORT); | 1341 | ATOM_DEVICE_LCD1_SUPPORT); |
1330 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1342 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1331 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); | 1343 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1344 | CONNECTOR_OBJECT_ID_LVDS); | ||
1332 | /* DVI-I - primary dac, int tmds */ | 1345 | /* DVI-I - primary dac, int tmds */ |
1333 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1346 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); |
1334 | radeon_add_legacy_encoder(dev, | 1347 | radeon_add_legacy_encoder(dev, |
@@ -1344,7 +1357,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1344 | radeon_add_legacy_connector(dev, 1, | 1357 | radeon_add_legacy_connector(dev, 1, |
1345 | ATOM_DEVICE_DFP1_SUPPORT | | 1358 | ATOM_DEVICE_DFP1_SUPPORT | |
1346 | ATOM_DEVICE_CRT1_SUPPORT, | 1359 | ATOM_DEVICE_CRT1_SUPPORT, |
1347 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c); | 1360 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1361 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | ||
1348 | /* TV - TV DAC */ | 1362 | /* TV - TV DAC */ |
1349 | radeon_add_legacy_encoder(dev, | 1363 | radeon_add_legacy_encoder(dev, |
1350 | radeon_get_encoder_id(dev, | 1364 | radeon_get_encoder_id(dev, |
@@ -1353,7 +1367,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1353 | ATOM_DEVICE_TV1_SUPPORT); | 1367 | ATOM_DEVICE_TV1_SUPPORT); |
1354 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1368 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1355 | DRM_MODE_CONNECTOR_SVIDEO, | 1369 | DRM_MODE_CONNECTOR_SVIDEO, |
1356 | &ddc_i2c); | 1370 | &ddc_i2c, |
1371 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1357 | break; | 1372 | break; |
1358 | case CT_POWERBOOK_VGA: | 1373 | case CT_POWERBOOK_VGA: |
1359 | DRM_INFO("Connector Table: %d (powerbook vga)\n", | 1374 | DRM_INFO("Connector Table: %d (powerbook vga)\n", |
@@ -1366,7 +1381,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1366 | 0), | 1381 | 0), |
1367 | ATOM_DEVICE_LCD1_SUPPORT); | 1382 | ATOM_DEVICE_LCD1_SUPPORT); |
1368 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1383 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1369 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c); | 1384 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1385 | CONNECTOR_OBJECT_ID_LVDS); | ||
1370 | /* VGA - primary dac */ | 1386 | /* VGA - primary dac */ |
1371 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1387 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); |
1372 | radeon_add_legacy_encoder(dev, | 1388 | radeon_add_legacy_encoder(dev, |
@@ -1375,7 +1391,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1375 | 1), | 1391 | 1), |
1376 | ATOM_DEVICE_CRT1_SUPPORT); | 1392 | ATOM_DEVICE_CRT1_SUPPORT); |
1377 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, | 1393 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, |
1378 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c); | 1394 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1395 | CONNECTOR_OBJECT_ID_VGA); | ||
1379 | /* TV - TV DAC */ | 1396 | /* TV - TV DAC */ |
1380 | radeon_add_legacy_encoder(dev, | 1397 | radeon_add_legacy_encoder(dev, |
1381 | radeon_get_encoder_id(dev, | 1398 | radeon_get_encoder_id(dev, |
@@ -1384,7 +1401,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1384 | ATOM_DEVICE_TV1_SUPPORT); | 1401 | ATOM_DEVICE_TV1_SUPPORT); |
1385 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1402 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1386 | DRM_MODE_CONNECTOR_SVIDEO, | 1403 | DRM_MODE_CONNECTOR_SVIDEO, |
1387 | &ddc_i2c); | 1404 | &ddc_i2c, |
1405 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1388 | break; | 1406 | break; |
1389 | case CT_MINI_EXTERNAL: | 1407 | case CT_MINI_EXTERNAL: |
1390 | DRM_INFO("Connector Table: %d (mini external tmds)\n", | 1408 | DRM_INFO("Connector Table: %d (mini external tmds)\n", |
@@ -1401,10 +1419,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1401 | ATOM_DEVICE_CRT2_SUPPORT, | 1419 | ATOM_DEVICE_CRT2_SUPPORT, |
1402 | 2), | 1420 | 2), |
1403 | ATOM_DEVICE_CRT2_SUPPORT); | 1421 | ATOM_DEVICE_CRT2_SUPPORT); |
1422 | /* XXX are any DL? */ | ||
1404 | radeon_add_legacy_connector(dev, 0, | 1423 | radeon_add_legacy_connector(dev, 0, |
1405 | ATOM_DEVICE_DFP2_SUPPORT | | 1424 | ATOM_DEVICE_DFP2_SUPPORT | |
1406 | ATOM_DEVICE_CRT2_SUPPORT, | 1425 | ATOM_DEVICE_CRT2_SUPPORT, |
1407 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c); | 1426 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1427 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | ||
1408 | /* TV - TV DAC */ | 1428 | /* TV - TV DAC */ |
1409 | radeon_add_legacy_encoder(dev, | 1429 | radeon_add_legacy_encoder(dev, |
1410 | radeon_get_encoder_id(dev, | 1430 | radeon_get_encoder_id(dev, |
@@ -1413,7 +1433,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1413 | ATOM_DEVICE_TV1_SUPPORT); | 1433 | ATOM_DEVICE_TV1_SUPPORT); |
1414 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, | 1434 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, |
1415 | DRM_MODE_CONNECTOR_SVIDEO, | 1435 | DRM_MODE_CONNECTOR_SVIDEO, |
1416 | &ddc_i2c); | 1436 | &ddc_i2c, |
1437 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1417 | break; | 1438 | break; |
1418 | case CT_MINI_INTERNAL: | 1439 | case CT_MINI_INTERNAL: |
1419 | DRM_INFO("Connector Table: %d (mini internal tmds)\n", | 1440 | DRM_INFO("Connector Table: %d (mini internal tmds)\n", |
@@ -1433,7 +1454,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1433 | radeon_add_legacy_connector(dev, 0, | 1454 | radeon_add_legacy_connector(dev, 0, |
1434 | ATOM_DEVICE_DFP1_SUPPORT | | 1455 | ATOM_DEVICE_DFP1_SUPPORT | |
1435 | ATOM_DEVICE_CRT2_SUPPORT, | 1456 | ATOM_DEVICE_CRT2_SUPPORT, |
1436 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c); | 1457 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1458 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | ||
1437 | /* TV - TV DAC */ | 1459 | /* TV - TV DAC */ |
1438 | radeon_add_legacy_encoder(dev, | 1460 | radeon_add_legacy_encoder(dev, |
1439 | radeon_get_encoder_id(dev, | 1461 | radeon_get_encoder_id(dev, |
@@ -1442,7 +1464,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1442 | ATOM_DEVICE_TV1_SUPPORT); | 1464 | ATOM_DEVICE_TV1_SUPPORT); |
1443 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, | 1465 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, |
1444 | DRM_MODE_CONNECTOR_SVIDEO, | 1466 | DRM_MODE_CONNECTOR_SVIDEO, |
1445 | &ddc_i2c); | 1467 | &ddc_i2c, |
1468 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1446 | break; | 1469 | break; |
1447 | case CT_IMAC_G5_ISIGHT: | 1470 | case CT_IMAC_G5_ISIGHT: |
1448 | DRM_INFO("Connector Table: %d (imac g5 isight)\n", | 1471 | DRM_INFO("Connector Table: %d (imac g5 isight)\n", |
@@ -1455,7 +1478,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1455 | 0), | 1478 | 0), |
1456 | ATOM_DEVICE_DFP1_SUPPORT); | 1479 | ATOM_DEVICE_DFP1_SUPPORT); |
1457 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, | 1480 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, |
1458 | DRM_MODE_CONNECTOR_DVID, &ddc_i2c); | 1481 | DRM_MODE_CONNECTOR_DVID, &ddc_i2c, |
1482 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); | ||
1459 | /* VGA - tv dac */ | 1483 | /* VGA - tv dac */ |
1460 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1484 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); |
1461 | radeon_add_legacy_encoder(dev, | 1485 | radeon_add_legacy_encoder(dev, |
@@ -1464,7 +1488,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1464 | 2), | 1488 | 2), |
1465 | ATOM_DEVICE_CRT2_SUPPORT); | 1489 | ATOM_DEVICE_CRT2_SUPPORT); |
1466 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | 1490 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, |
1467 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c); | 1491 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1492 | CONNECTOR_OBJECT_ID_VGA); | ||
1468 | /* TV - TV DAC */ | 1493 | /* TV - TV DAC */ |
1469 | radeon_add_legacy_encoder(dev, | 1494 | radeon_add_legacy_encoder(dev, |
1470 | radeon_get_encoder_id(dev, | 1495 | radeon_get_encoder_id(dev, |
@@ -1473,7 +1498,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1473 | ATOM_DEVICE_TV1_SUPPORT); | 1498 | ATOM_DEVICE_TV1_SUPPORT); |
1474 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1499 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1475 | DRM_MODE_CONNECTOR_SVIDEO, | 1500 | DRM_MODE_CONNECTOR_SVIDEO, |
1476 | &ddc_i2c); | 1501 | &ddc_i2c, |
1502 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1477 | break; | 1503 | break; |
1478 | case CT_EMAC: | 1504 | case CT_EMAC: |
1479 | DRM_INFO("Connector Table: %d (emac)\n", | 1505 | DRM_INFO("Connector Table: %d (emac)\n", |
@@ -1486,7 +1512,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1486 | 1), | 1512 | 1), |
1487 | ATOM_DEVICE_CRT1_SUPPORT); | 1513 | ATOM_DEVICE_CRT1_SUPPORT); |
1488 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, | 1514 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, |
1489 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c); | 1515 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1516 | CONNECTOR_OBJECT_ID_VGA); | ||
1490 | /* VGA - tv dac */ | 1517 | /* VGA - tv dac */ |
1491 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); | 1518 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); |
1492 | radeon_add_legacy_encoder(dev, | 1519 | radeon_add_legacy_encoder(dev, |
@@ -1495,7 +1522,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1495 | 2), | 1522 | 2), |
1496 | ATOM_DEVICE_CRT2_SUPPORT); | 1523 | ATOM_DEVICE_CRT2_SUPPORT); |
1497 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | 1524 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, |
1498 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c); | 1525 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1526 | CONNECTOR_OBJECT_ID_VGA); | ||
1499 | /* TV - TV DAC */ | 1527 | /* TV - TV DAC */ |
1500 | radeon_add_legacy_encoder(dev, | 1528 | radeon_add_legacy_encoder(dev, |
1501 | radeon_get_encoder_id(dev, | 1529 | radeon_get_encoder_id(dev, |
@@ -1504,7 +1532,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1504 | ATOM_DEVICE_TV1_SUPPORT); | 1532 | ATOM_DEVICE_TV1_SUPPORT); |
1505 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1533 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1506 | DRM_MODE_CONNECTOR_SVIDEO, | 1534 | DRM_MODE_CONNECTOR_SVIDEO, |
1507 | &ddc_i2c); | 1535 | &ddc_i2c, |
1536 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1508 | break; | 1537 | break; |
1509 | default: | 1538 | default: |
1510 | DRM_INFO("Connector table: %d (invalid)\n", | 1539 | DRM_INFO("Connector table: %d (invalid)\n", |
@@ -1581,11 +1610,63 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, | |||
1581 | return true; | 1610 | return true; |
1582 | } | 1611 | } |
1583 | 1612 | ||
1613 | static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev) | ||
1614 | { | ||
1615 | /* Acer 5102 has non-existent TV port */ | ||
1616 | if (dev->pdev->device == 0x5975 && | ||
1617 | dev->pdev->subsystem_vendor == 0x1025 && | ||
1618 | dev->pdev->subsystem_device == 0x009f) | ||
1619 | return false; | ||
1620 | |||
1621 | /* HP dc5750 has non-existent TV port */ | ||
1622 | if (dev->pdev->device == 0x5974 && | ||
1623 | dev->pdev->subsystem_vendor == 0x103c && | ||
1624 | dev->pdev->subsystem_device == 0x280a) | ||
1625 | return false; | ||
1626 | |||
1627 | return true; | ||
1628 | } | ||
1629 | |||
1630 | static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d) | ||
1631 | { | ||
1632 | struct radeon_device *rdev = dev->dev_private; | ||
1633 | uint32_t ext_tmds_info; | ||
1634 | |||
1635 | if (rdev->flags & RADEON_IS_IGP) { | ||
1636 | if (is_dvi_d) | ||
1637 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; | ||
1638 | else | ||
1639 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | ||
1640 | } | ||
1641 | ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | ||
1642 | if (ext_tmds_info) { | ||
1643 | uint8_t rev = RBIOS8(ext_tmds_info); | ||
1644 | uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5); | ||
1645 | if (rev >= 3) { | ||
1646 | if (is_dvi_d) | ||
1647 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; | ||
1648 | else | ||
1649 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; | ||
1650 | } else { | ||
1651 | if (flags & 1) { | ||
1652 | if (is_dvi_d) | ||
1653 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; | ||
1654 | else | ||
1655 | return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; | ||
1656 | } | ||
1657 | } | ||
1658 | } | ||
1659 | if (is_dvi_d) | ||
1660 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; | ||
1661 | else | ||
1662 | return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | ||
1663 | } | ||
1664 | |||
1584 | bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | 1665 | bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) |
1585 | { | 1666 | { |
1586 | struct radeon_device *rdev = dev->dev_private; | 1667 | struct radeon_device *rdev = dev->dev_private; |
1587 | uint32_t conn_info, entry, devices; | 1668 | uint32_t conn_info, entry, devices; |
1588 | uint16_t tmp; | 1669 | uint16_t tmp, connector_object_id; |
1589 | enum radeon_combios_ddc ddc_type; | 1670 | enum radeon_combios_ddc ddc_type; |
1590 | enum radeon_combios_connector connector; | 1671 | enum radeon_combios_connector connector; |
1591 | int i = 0; | 1672 | int i = 0; |
@@ -1628,8 +1709,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1628 | break; | 1709 | break; |
1629 | } | 1710 | } |
1630 | 1711 | ||
1631 | radeon_apply_legacy_quirks(dev, i, &connector, | 1712 | if (!radeon_apply_legacy_quirks(dev, i, &connector, |
1632 | &ddc_i2c); | 1713 | &ddc_i2c)) |
1714 | continue; | ||
1633 | 1715 | ||
1634 | switch (connector) { | 1716 | switch (connector) { |
1635 | case CONNECTOR_PROPRIETARY_LEGACY: | 1717 | case CONNECTOR_PROPRIETARY_LEGACY: |
@@ -1644,7 +1726,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1644 | radeon_add_legacy_connector(dev, i, devices, | 1726 | radeon_add_legacy_connector(dev, i, devices, |
1645 | legacy_connector_convert | 1727 | legacy_connector_convert |
1646 | [connector], | 1728 | [connector], |
1647 | &ddc_i2c); | 1729 | &ddc_i2c, |
1730 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); | ||
1648 | break; | 1731 | break; |
1649 | case CONNECTOR_CRT_LEGACY: | 1732 | case CONNECTOR_CRT_LEGACY: |
1650 | if (tmp & 0x1) { | 1733 | if (tmp & 0x1) { |
@@ -1669,7 +1752,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1669 | devices, | 1752 | devices, |
1670 | legacy_connector_convert | 1753 | legacy_connector_convert |
1671 | [connector], | 1754 | [connector], |
1672 | &ddc_i2c); | 1755 | &ddc_i2c, |
1756 | CONNECTOR_OBJECT_ID_VGA); | ||
1673 | break; | 1757 | break; |
1674 | case CONNECTOR_DVI_I_LEGACY: | 1758 | case CONNECTOR_DVI_I_LEGACY: |
1675 | devices = 0; | 1759 | devices = 0; |
@@ -1698,6 +1782,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1698 | ATOM_DEVICE_DFP2_SUPPORT, | 1782 | ATOM_DEVICE_DFP2_SUPPORT, |
1699 | 0), | 1783 | 0), |
1700 | ATOM_DEVICE_DFP2_SUPPORT); | 1784 | ATOM_DEVICE_DFP2_SUPPORT); |
1785 | connector_object_id = combios_check_dl_dvi(dev, 0); | ||
1701 | } else { | 1786 | } else { |
1702 | devices |= ATOM_DEVICE_DFP1_SUPPORT; | 1787 | devices |= ATOM_DEVICE_DFP1_SUPPORT; |
1703 | radeon_add_legacy_encoder(dev, | 1788 | radeon_add_legacy_encoder(dev, |
@@ -1706,19 +1791,24 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1706 | ATOM_DEVICE_DFP1_SUPPORT, | 1791 | ATOM_DEVICE_DFP1_SUPPORT, |
1707 | 0), | 1792 | 0), |
1708 | ATOM_DEVICE_DFP1_SUPPORT); | 1793 | ATOM_DEVICE_DFP1_SUPPORT); |
1794 | connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | ||
1709 | } | 1795 | } |
1710 | radeon_add_legacy_connector(dev, | 1796 | radeon_add_legacy_connector(dev, |
1711 | i, | 1797 | i, |
1712 | devices, | 1798 | devices, |
1713 | legacy_connector_convert | 1799 | legacy_connector_convert |
1714 | [connector], | 1800 | [connector], |
1715 | &ddc_i2c); | 1801 | &ddc_i2c, |
1802 | connector_object_id); | ||
1716 | break; | 1803 | break; |
1717 | case CONNECTOR_DVI_D_LEGACY: | 1804 | case CONNECTOR_DVI_D_LEGACY: |
1718 | if ((tmp >> 4) & 0x1) | 1805 | if ((tmp >> 4) & 0x1) { |
1719 | devices = ATOM_DEVICE_DFP2_SUPPORT; | 1806 | devices = ATOM_DEVICE_DFP2_SUPPORT; |
1720 | else | 1807 | connector_object_id = combios_check_dl_dvi(dev, 1); |
1808 | } else { | ||
1721 | devices = ATOM_DEVICE_DFP1_SUPPORT; | 1809 | devices = ATOM_DEVICE_DFP1_SUPPORT; |
1810 | connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | ||
1811 | } | ||
1722 | radeon_add_legacy_encoder(dev, | 1812 | radeon_add_legacy_encoder(dev, |
1723 | radeon_get_encoder_id | 1813 | radeon_get_encoder_id |
1724 | (dev, devices, 0), | 1814 | (dev, devices, 0), |
@@ -1726,7 +1816,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1726 | radeon_add_legacy_connector(dev, i, devices, | 1816 | radeon_add_legacy_connector(dev, i, devices, |
1727 | legacy_connector_convert | 1817 | legacy_connector_convert |
1728 | [connector], | 1818 | [connector], |
1729 | &ddc_i2c); | 1819 | &ddc_i2c, |
1820 | connector_object_id); | ||
1730 | break; | 1821 | break; |
1731 | case CONNECTOR_CTV_LEGACY: | 1822 | case CONNECTOR_CTV_LEGACY: |
1732 | case CONNECTOR_STV_LEGACY: | 1823 | case CONNECTOR_STV_LEGACY: |
@@ -1740,7 +1831,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1740 | ATOM_DEVICE_TV1_SUPPORT, | 1831 | ATOM_DEVICE_TV1_SUPPORT, |
1741 | legacy_connector_convert | 1832 | legacy_connector_convert |
1742 | [connector], | 1833 | [connector], |
1743 | &ddc_i2c); | 1834 | &ddc_i2c, |
1835 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
1744 | break; | 1836 | break; |
1745 | default: | 1837 | default: |
1746 | DRM_ERROR("Unknown connector type: %d\n", | 1838 | DRM_ERROR("Unknown connector type: %d\n", |
@@ -1772,10 +1864,29 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1772 | ATOM_DEVICE_CRT1_SUPPORT | | 1864 | ATOM_DEVICE_CRT1_SUPPORT | |
1773 | ATOM_DEVICE_DFP1_SUPPORT, | 1865 | ATOM_DEVICE_DFP1_SUPPORT, |
1774 | DRM_MODE_CONNECTOR_DVII, | 1866 | DRM_MODE_CONNECTOR_DVII, |
1775 | &ddc_i2c); | 1867 | &ddc_i2c, |
1868 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | ||
1776 | } else { | 1869 | } else { |
1777 | DRM_DEBUG("No connector info found\n"); | 1870 | uint16_t crt_info = |
1778 | return false; | 1871 | combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); |
1872 | DRM_DEBUG("Found CRT table, assuming VGA connector\n"); | ||
1873 | if (crt_info) { | ||
1874 | radeon_add_legacy_encoder(dev, | ||
1875 | radeon_get_encoder_id(dev, | ||
1876 | ATOM_DEVICE_CRT1_SUPPORT, | ||
1877 | 1), | ||
1878 | ATOM_DEVICE_CRT1_SUPPORT); | ||
1879 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | ||
1880 | radeon_add_legacy_connector(dev, | ||
1881 | 0, | ||
1882 | ATOM_DEVICE_CRT1_SUPPORT, | ||
1883 | DRM_MODE_CONNECTOR_VGA, | ||
1884 | &ddc_i2c, | ||
1885 | CONNECTOR_OBJECT_ID_VGA); | ||
1886 | } else { | ||
1887 | DRM_DEBUG("No connector info found\n"); | ||
1888 | return false; | ||
1889 | } | ||
1779 | } | 1890 | } |
1780 | } | 1891 | } |
1781 | 1892 | ||
@@ -1870,7 +1981,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1870 | 5, | 1981 | 5, |
1871 | ATOM_DEVICE_LCD1_SUPPORT, | 1982 | ATOM_DEVICE_LCD1_SUPPORT, |
1872 | DRM_MODE_CONNECTOR_LVDS, | 1983 | DRM_MODE_CONNECTOR_LVDS, |
1873 | &ddc_i2c); | 1984 | &ddc_i2c, |
1985 | CONNECTOR_OBJECT_ID_LVDS); | ||
1874 | } | 1986 | } |
1875 | } | 1987 | } |
1876 | 1988 | ||
@@ -1880,16 +1992,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1880 | combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); | 1992 | combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); |
1881 | if (tv_info) { | 1993 | if (tv_info) { |
1882 | if (RBIOS8(tv_info + 6) == 'T') { | 1994 | if (RBIOS8(tv_info + 6) == 'T') { |
1883 | radeon_add_legacy_encoder(dev, | 1995 | if (radeon_apply_legacy_tv_quirks(dev)) { |
1884 | radeon_get_encoder_id | 1996 | radeon_add_legacy_encoder(dev, |
1885 | (dev, | 1997 | radeon_get_encoder_id |
1886 | ATOM_DEVICE_TV1_SUPPORT, | 1998 | (dev, |
1887 | 2), | 1999 | ATOM_DEVICE_TV1_SUPPORT, |
1888 | ATOM_DEVICE_TV1_SUPPORT); | 2000 | 2), |
1889 | radeon_add_legacy_connector(dev, 6, | 2001 | ATOM_DEVICE_TV1_SUPPORT); |
1890 | ATOM_DEVICE_TV1_SUPPORT, | 2002 | radeon_add_legacy_connector(dev, 6, |
1891 | DRM_MODE_CONNECTOR_SVIDEO, | 2003 | ATOM_DEVICE_TV1_SUPPORT, |
1892 | &ddc_i2c); | 2004 | DRM_MODE_CONNECTOR_SVIDEO, |
2005 | &ddc_i2c, | ||
2006 | CONNECTOR_OBJECT_ID_SVIDEO); | ||
2007 | } | ||
1893 | } | 2008 | } |
1894 | } | 2009 | } |
1895 | } | 2010 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index e376be47a4a0..fce4c4087fda 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -178,25 +178,12 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode | |||
178 | struct drm_device *dev = encoder->dev; | 178 | struct drm_device *dev = encoder->dev; |
179 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 179 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
180 | struct drm_display_mode *mode = NULL; | 180 | struct drm_display_mode *mode = NULL; |
181 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; | 181 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
182 | |||
183 | if (native_mode->panel_xres != 0 && | ||
184 | native_mode->panel_yres != 0 && | ||
185 | native_mode->dotclock != 0) { | ||
186 | mode = drm_mode_create(dev); | ||
187 | |||
188 | mode->hdisplay = native_mode->panel_xres; | ||
189 | mode->vdisplay = native_mode->panel_yres; | ||
190 | |||
191 | mode->htotal = mode->hdisplay + native_mode->hblank; | ||
192 | mode->hsync_start = mode->hdisplay + native_mode->hoverplus; | ||
193 | mode->hsync_end = mode->hsync_start + native_mode->hsync_width; | ||
194 | mode->vtotal = mode->vdisplay + native_mode->vblank; | ||
195 | mode->vsync_start = mode->vdisplay + native_mode->voverplus; | ||
196 | mode->vsync_end = mode->vsync_start + native_mode->vsync_width; | ||
197 | mode->clock = native_mode->dotclock; | ||
198 | mode->flags = 0; | ||
199 | 182 | ||
183 | if (native_mode->hdisplay != 0 && | ||
184 | native_mode->vdisplay != 0 && | ||
185 | native_mode->clock != 0) { | ||
186 | mode = drm_mode_duplicate(dev, native_mode); | ||
200 | mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; | 187 | mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; |
201 | drm_mode_set_name(mode); | 188 | drm_mode_set_name(mode); |
202 | 189 | ||
@@ -210,7 +197,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn | |||
210 | struct drm_device *dev = encoder->dev; | 197 | struct drm_device *dev = encoder->dev; |
211 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 198 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
212 | struct drm_display_mode *mode = NULL; | 199 | struct drm_display_mode *mode = NULL; |
213 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; | 200 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
214 | int i; | 201 | int i; |
215 | struct mode_size { | 202 | struct mode_size { |
216 | int w; | 203 | int w; |
@@ -236,11 +223,16 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn | |||
236 | }; | 223 | }; |
237 | 224 | ||
238 | for (i = 0; i < 17; i++) { | 225 | for (i = 0; i < 17; i++) { |
226 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
227 | if (common_modes[i].w > 1024 || | ||
228 | common_modes[i].h > 768) | ||
229 | continue; | ||
230 | } | ||
239 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 231 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
240 | if (common_modes[i].w > native_mode->panel_xres || | 232 | if (common_modes[i].w > native_mode->hdisplay || |
241 | common_modes[i].h > native_mode->panel_yres || | 233 | common_modes[i].h > native_mode->vdisplay || |
242 | (common_modes[i].w == native_mode->panel_xres && | 234 | (common_modes[i].w == native_mode->hdisplay && |
243 | common_modes[i].h == native_mode->panel_yres)) | 235 | common_modes[i].h == native_mode->vdisplay)) |
244 | continue; | 236 | continue; |
245 | } | 237 | } |
246 | if (common_modes[i].w < 320 || common_modes[i].h < 200) | 238 | if (common_modes[i].w < 320 || common_modes[i].h < 200) |
@@ -344,28 +336,23 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
344 | struct drm_connector *connector) | 336 | struct drm_connector *connector) |
345 | { | 337 | { |
346 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 338 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
347 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; | 339 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
348 | 340 | ||
349 | /* Try to get native mode details from EDID if necessary */ | 341 | /* Try to get native mode details from EDID if necessary */ |
350 | if (!native_mode->dotclock) { | 342 | if (!native_mode->clock) { |
351 | struct drm_display_mode *t, *mode; | 343 | struct drm_display_mode *t, *mode; |
352 | 344 | ||
353 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | 345 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { |
354 | if (mode->hdisplay == native_mode->panel_xres && | 346 | if (mode->hdisplay == native_mode->hdisplay && |
355 | mode->vdisplay == native_mode->panel_yres) { | 347 | mode->vdisplay == native_mode->vdisplay) { |
356 | native_mode->hblank = mode->htotal - mode->hdisplay; | 348 | *native_mode = *mode; |
357 | native_mode->hoverplus = mode->hsync_start - mode->hdisplay; | 349 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); |
358 | native_mode->hsync_width = mode->hsync_end - mode->hsync_start; | ||
359 | native_mode->vblank = mode->vtotal - mode->vdisplay; | ||
360 | native_mode->voverplus = mode->vsync_start - mode->vdisplay; | ||
361 | native_mode->vsync_width = mode->vsync_end - mode->vsync_start; | ||
362 | native_mode->dotclock = mode->clock; | ||
363 | DRM_INFO("Determined LVDS native mode details from EDID\n"); | 350 | DRM_INFO("Determined LVDS native mode details from EDID\n"); |
364 | break; | 351 | break; |
365 | } | 352 | } |
366 | } | 353 | } |
367 | } | 354 | } |
368 | if (!native_mode->dotclock) { | 355 | if (!native_mode->clock) { |
369 | DRM_INFO("No LVDS native mode details, disabling RMX\n"); | 356 | DRM_INFO("No LVDS native mode details, disabling RMX\n"); |
370 | radeon_encoder->rmx_type = RMX_OFF; | 357 | radeon_encoder->rmx_type = RMX_OFF; |
371 | } | 358 | } |
@@ -410,13 +397,64 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) | |||
410 | static int radeon_lvds_mode_valid(struct drm_connector *connector, | 397 | static int radeon_lvds_mode_valid(struct drm_connector *connector, |
411 | struct drm_display_mode *mode) | 398 | struct drm_display_mode *mode) |
412 | { | 399 | { |
400 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
401 | |||
402 | if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) | ||
403 | return MODE_PANEL; | ||
404 | |||
405 | if (encoder) { | ||
406 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
407 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
408 | |||
409 | /* AVIVO hardware supports downscaling modes larger than the panel | ||
410 | * to the panel size, but I'm not sure this is desirable. | ||
411 | */ | ||
412 | if ((mode->hdisplay > native_mode->hdisplay) || | ||
413 | (mode->vdisplay > native_mode->vdisplay)) | ||
414 | return MODE_PANEL; | ||
415 | |||
416 | /* if scaling is disabled, block non-native modes */ | ||
417 | if (radeon_encoder->rmx_type == RMX_OFF) { | ||
418 | if ((mode->hdisplay != native_mode->hdisplay) || | ||
419 | (mode->vdisplay != native_mode->vdisplay)) | ||
420 | return MODE_PANEL; | ||
421 | } | ||
422 | } | ||
423 | |||
413 | return MODE_OK; | 424 | return MODE_OK; |
414 | } | 425 | } |
415 | 426 | ||
416 | static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) | 427 | static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) |
417 | { | 428 | { |
418 | enum drm_connector_status ret = connector_status_connected; | 429 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
430 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
431 | enum drm_connector_status ret = connector_status_disconnected; | ||
432 | |||
433 | if (encoder) { | ||
434 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
435 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
436 | |||
437 | /* check if panel is valid */ | ||
438 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) | ||
439 | ret = connector_status_connected; | ||
440 | |||
441 | } | ||
442 | |||
443 | /* check for edid as well */ | ||
444 | if (radeon_connector->edid) | ||
445 | ret = connector_status_connected; | ||
446 | else { | ||
447 | if (radeon_connector->ddc_bus) { | ||
448 | radeon_i2c_do_lock(radeon_connector, 1); | ||
449 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, | ||
450 | &radeon_connector->ddc_bus->adapter); | ||
451 | radeon_i2c_do_lock(radeon_connector, 0); | ||
452 | if (radeon_connector->edid) | ||
453 | ret = connector_status_connected; | ||
454 | } | ||
455 | } | ||
419 | /* check acpi lid status ??? */ | 456 | /* check acpi lid status ??? */ |
457 | |||
420 | radeon_connector_update_scratch_regs(connector, ret); | 458 | radeon_connector_update_scratch_regs(connector, ret); |
421 | return ret; | 459 | return ret; |
422 | } | 460 | } |
@@ -427,6 +465,8 @@ static void radeon_connector_destroy(struct drm_connector *connector) | |||
427 | 465 | ||
428 | if (radeon_connector->ddc_bus) | 466 | if (radeon_connector->ddc_bus) |
429 | radeon_i2c_destroy(radeon_connector->ddc_bus); | 467 | radeon_i2c_destroy(radeon_connector->ddc_bus); |
468 | if (radeon_connector->edid) | ||
469 | kfree(radeon_connector->edid); | ||
430 | kfree(radeon_connector->con_priv); | 470 | kfree(radeon_connector->con_priv); |
431 | drm_sysfs_connector_remove(connector); | 471 | drm_sysfs_connector_remove(connector); |
432 | drm_connector_cleanup(connector); | 472 | drm_connector_cleanup(connector); |
@@ -496,6 +536,8 @@ static int radeon_vga_get_modes(struct drm_connector *connector) | |||
496 | static int radeon_vga_mode_valid(struct drm_connector *connector, | 536 | static int radeon_vga_mode_valid(struct drm_connector *connector, |
497 | struct drm_display_mode *mode) | 537 | struct drm_display_mode *mode) |
498 | { | 538 | { |
539 | /* XXX check mode bandwidth */ | ||
540 | /* XXX verify against max DAC output frequency */ | ||
499 | return MODE_OK; | 541 | return MODE_OK; |
500 | } | 542 | } |
501 | 543 | ||
@@ -514,9 +556,32 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect | |||
514 | radeon_i2c_do_lock(radeon_connector, 1); | 556 | radeon_i2c_do_lock(radeon_connector, 1); |
515 | dret = radeon_ddc_probe(radeon_connector); | 557 | dret = radeon_ddc_probe(radeon_connector); |
516 | radeon_i2c_do_lock(radeon_connector, 0); | 558 | radeon_i2c_do_lock(radeon_connector, 0); |
517 | if (dret) | 559 | if (dret) { |
518 | ret = connector_status_connected; | 560 | if (radeon_connector->edid) { |
519 | else { | 561 | kfree(radeon_connector->edid); |
562 | radeon_connector->edid = NULL; | ||
563 | } | ||
564 | radeon_i2c_do_lock(radeon_connector, 1); | ||
565 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | ||
566 | radeon_i2c_do_lock(radeon_connector, 0); | ||
567 | |||
568 | if (!radeon_connector->edid) { | ||
569 | DRM_ERROR("DDC responded but not EDID found for %s\n", | ||
570 | drm_get_connector_name(connector)); | ||
571 | } else { | ||
572 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); | ||
573 | |||
574 | /* some oems have boards with separate digital and analog connectors | ||
575 | * with a shared ddc line (often vga + hdmi) | ||
576 | */ | ||
577 | if (radeon_connector->use_digital && radeon_connector->shared_ddc) { | ||
578 | kfree(radeon_connector->edid); | ||
579 | radeon_connector->edid = NULL; | ||
580 | ret = connector_status_disconnected; | ||
581 | } else | ||
582 | ret = connector_status_connected; | ||
583 | } | ||
584 | } else { | ||
520 | if (radeon_connector->dac_load_detect) { | 585 | if (radeon_connector->dac_load_detect) { |
521 | encoder_funcs = encoder->helper_private; | 586 | encoder_funcs = encoder->helper_private; |
522 | ret = encoder_funcs->detect(encoder, connector); | 587 | ret = encoder_funcs->detect(encoder, connector); |
@@ -570,6 +635,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector) | |||
570 | static int radeon_tv_mode_valid(struct drm_connector *connector, | 635 | static int radeon_tv_mode_valid(struct drm_connector *connector, |
571 | struct drm_display_mode *mode) | 636 | struct drm_display_mode *mode) |
572 | { | 637 | { |
638 | if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) | ||
639 | return MODE_CLOCK_RANGE; | ||
573 | return MODE_OK; | 640 | return MODE_OK; |
574 | } | 641 | } |
575 | 642 | ||
@@ -644,6 +711,10 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
644 | dret = radeon_ddc_probe(radeon_connector); | 711 | dret = radeon_ddc_probe(radeon_connector); |
645 | radeon_i2c_do_lock(radeon_connector, 0); | 712 | radeon_i2c_do_lock(radeon_connector, 0); |
646 | if (dret) { | 713 | if (dret) { |
714 | if (radeon_connector->edid) { | ||
715 | kfree(radeon_connector->edid); | ||
716 | radeon_connector->edid = NULL; | ||
717 | } | ||
647 | radeon_i2c_do_lock(radeon_connector, 1); | 718 | radeon_i2c_do_lock(radeon_connector, 1); |
648 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 719 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
649 | radeon_i2c_do_lock(radeon_connector, 0); | 720 | radeon_i2c_do_lock(radeon_connector, 0); |
@@ -654,10 +725,15 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
654 | } else { | 725 | } else { |
655 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); | 726 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); |
656 | 727 | ||
657 | /* if this isn't a digital monitor | 728 | /* some oems have boards with separate digital and analog connectors |
658 | then we need to make sure we don't have any | 729 | * with a shared ddc line (often vga + hdmi) |
659 | TV conflicts */ | 730 | */ |
660 | ret = connector_status_connected; | 731 | if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) { |
732 | kfree(radeon_connector->edid); | ||
733 | radeon_connector->edid = NULL; | ||
734 | ret = connector_status_disconnected; | ||
735 | } else | ||
736 | ret = connector_status_connected; | ||
661 | } | 737 | } |
662 | } | 738 | } |
663 | 739 | ||
@@ -753,9 +829,27 @@ static void radeon_dvi_force(struct drm_connector *connector) | |||
753 | radeon_connector->use_digital = true; | 829 | radeon_connector->use_digital = true; |
754 | } | 830 | } |
755 | 831 | ||
832 | static int radeon_dvi_mode_valid(struct drm_connector *connector, | ||
833 | struct drm_display_mode *mode) | ||
834 | { | ||
835 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
836 | |||
837 | /* XXX check mode bandwidth */ | ||
838 | |||
839 | if (radeon_connector->use_digital && (mode->clock > 165000)) { | ||
840 | if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || | ||
841 | (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || | ||
842 | (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) | ||
843 | return MODE_OK; | ||
844 | else | ||
845 | return MODE_CLOCK_HIGH; | ||
846 | } | ||
847 | return MODE_OK; | ||
848 | } | ||
849 | |||
756 | struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { | 850 | struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { |
757 | .get_modes = radeon_dvi_get_modes, | 851 | .get_modes = radeon_dvi_get_modes, |
758 | .mode_valid = radeon_vga_mode_valid, | 852 | .mode_valid = radeon_dvi_mode_valid, |
759 | .best_encoder = radeon_dvi_encoder, | 853 | .best_encoder = radeon_dvi_encoder, |
760 | }; | 854 | }; |
761 | 855 | ||
@@ -775,13 +869,15 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
775 | int connector_type, | 869 | int connector_type, |
776 | struct radeon_i2c_bus_rec *i2c_bus, | 870 | struct radeon_i2c_bus_rec *i2c_bus, |
777 | bool linkb, | 871 | bool linkb, |
778 | uint32_t igp_lane_info) | 872 | uint32_t igp_lane_info, |
873 | uint16_t connector_object_id) | ||
779 | { | 874 | { |
780 | struct radeon_device *rdev = dev->dev_private; | 875 | struct radeon_device *rdev = dev->dev_private; |
781 | struct drm_connector *connector; | 876 | struct drm_connector *connector; |
782 | struct radeon_connector *radeon_connector; | 877 | struct radeon_connector *radeon_connector; |
783 | struct radeon_connector_atom_dig *radeon_dig_connector; | 878 | struct radeon_connector_atom_dig *radeon_dig_connector; |
784 | uint32_t subpixel_order = SubPixelNone; | 879 | uint32_t subpixel_order = SubPixelNone; |
880 | bool shared_ddc = false; | ||
785 | int ret; | 881 | int ret; |
786 | 882 | ||
787 | /* fixme - tv/cv/din */ | 883 | /* fixme - tv/cv/din */ |
@@ -795,6 +891,13 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
795 | radeon_connector->devices |= supported_device; | 891 | radeon_connector->devices |= supported_device; |
796 | return; | 892 | return; |
797 | } | 893 | } |
894 | if (radeon_connector->ddc_bus && i2c_bus->valid) { | ||
895 | if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, | ||
896 | sizeof(struct radeon_i2c_bus_rec)) == 0) { | ||
897 | radeon_connector->shared_ddc = true; | ||
898 | shared_ddc = true; | ||
899 | } | ||
900 | } | ||
798 | } | 901 | } |
799 | 902 | ||
800 | radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); | 903 | radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); |
@@ -805,6 +908,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
805 | 908 | ||
806 | radeon_connector->connector_id = connector_id; | 909 | radeon_connector->connector_id = connector_id; |
807 | radeon_connector->devices = supported_device; | 910 | radeon_connector->devices = supported_device; |
911 | radeon_connector->shared_ddc = shared_ddc; | ||
912 | radeon_connector->connector_object_id = connector_object_id; | ||
808 | switch (connector_type) { | 913 | switch (connector_type) { |
809 | case DRM_MODE_CONNECTOR_VGA: | 914 | case DRM_MODE_CONNECTOR_VGA: |
810 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 915 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
@@ -956,7 +1061,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
956 | uint32_t connector_id, | 1061 | uint32_t connector_id, |
957 | uint32_t supported_device, | 1062 | uint32_t supported_device, |
958 | int connector_type, | 1063 | int connector_type, |
959 | struct radeon_i2c_bus_rec *i2c_bus) | 1064 | struct radeon_i2c_bus_rec *i2c_bus, |
1065 | uint16_t connector_object_id) | ||
960 | { | 1066 | { |
961 | struct radeon_device *rdev = dev->dev_private; | 1067 | struct radeon_device *rdev = dev->dev_private; |
962 | struct drm_connector *connector; | 1068 | struct drm_connector *connector; |
@@ -985,6 +1091,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
985 | 1091 | ||
986 | radeon_connector->connector_id = connector_id; | 1092 | radeon_connector->connector_id = connector_id; |
987 | radeon_connector->devices = supported_device; | 1093 | radeon_connector->devices = supported_device; |
1094 | radeon_connector->connector_object_id = connector_object_id; | ||
988 | switch (connector_type) { | 1095 | switch (connector_type) { |
989 | case DRM_MODE_CONNECTOR_VGA: | 1096 | case DRM_MODE_CONNECTOR_VGA: |
990 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1097 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index b13c79e38bc0..28772a37009c 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -109,9 +109,15 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | |||
109 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 109 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
110 | struct radeon_device *rdev = crtc->dev->dev_private; | 110 | struct radeon_device *rdev = crtc->dev->dev_private; |
111 | 111 | ||
112 | if (ASIC_IS_AVIVO(rdev)) | 112 | if (ASIC_IS_AVIVO(rdev)) { |
113 | if (rdev->family >= CHIP_RV770) { | ||
114 | if (radeon_crtc->crtc_id) | ||
115 | WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); | ||
116 | else | ||
117 | WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0); | ||
118 | } | ||
113 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 119 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); |
114 | else { | 120 | } else { |
115 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; | 121 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; |
116 | /* offset is from DISP(2)_BASE_ADDRESS */ | 122 | /* offset is from DISP(2)_BASE_ADDRESS */ |
117 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); | 123 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ec835d56d30a..e3f9edfa40fe 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
322 | case CHIP_RV380: | 322 | case CHIP_RV380: |
323 | rdev->asic = &r300_asic; | 323 | rdev->asic = &r300_asic; |
324 | if (rdev->flags & RADEON_IS_PCIE) { | 324 | if (rdev->flags & RADEON_IS_PCIE) { |
325 | rdev->asic->gart_init = &rv370_pcie_gart_init; | ||
326 | rdev->asic->gart_fini = &rv370_pcie_gart_fini; | ||
327 | rdev->asic->gart_enable = &rv370_pcie_gart_enable; | ||
328 | rdev->asic->gart_disable = &rv370_pcie_gart_disable; | ||
329 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | 325 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
330 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | 326 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
331 | } | 327 | } |
@@ -448,20 +444,24 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) | |||
448 | return r; | 444 | return r; |
449 | } | 445 | } |
450 | 446 | ||
451 | static struct card_info atom_card_info = { | ||
452 | .dev = NULL, | ||
453 | .reg_read = cail_reg_read, | ||
454 | .reg_write = cail_reg_write, | ||
455 | .mc_read = cail_mc_read, | ||
456 | .mc_write = cail_mc_write, | ||
457 | .pll_read = cail_pll_read, | ||
458 | .pll_write = cail_pll_write, | ||
459 | }; | ||
460 | |||
461 | int radeon_atombios_init(struct radeon_device *rdev) | 447 | int radeon_atombios_init(struct radeon_device *rdev) |
462 | { | 448 | { |
463 | atom_card_info.dev = rdev->ddev; | 449 | struct card_info *atom_card_info = |
464 | rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios); | 450 | kzalloc(sizeof(struct card_info), GFP_KERNEL); |
451 | |||
452 | if (!atom_card_info) | ||
453 | return -ENOMEM; | ||
454 | |||
455 | rdev->mode_info.atom_card_info = atom_card_info; | ||
456 | atom_card_info->dev = rdev->ddev; | ||
457 | atom_card_info->reg_read = cail_reg_read; | ||
458 | atom_card_info->reg_write = cail_reg_write; | ||
459 | atom_card_info->mc_read = cail_mc_read; | ||
460 | atom_card_info->mc_write = cail_mc_write; | ||
461 | atom_card_info->pll_read = cail_pll_read; | ||
462 | atom_card_info->pll_write = cail_pll_write; | ||
463 | |||
464 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); | ||
465 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); | 465 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
466 | return 0; | 466 | return 0; |
467 | } | 467 | } |
@@ -469,6 +469,7 @@ int radeon_atombios_init(struct radeon_device *rdev) | |||
469 | void radeon_atombios_fini(struct radeon_device *rdev) | 469 | void radeon_atombios_fini(struct radeon_device *rdev) |
470 | { | 470 | { |
471 | kfree(rdev->mode_info.atom_context); | 471 | kfree(rdev->mode_info.atom_context); |
472 | kfree(rdev->mode_info.atom_card_info); | ||
472 | } | 473 | } |
473 | 474 | ||
474 | int radeon_combios_init(struct radeon_device *rdev) | 475 | int radeon_combios_init(struct radeon_device *rdev) |
@@ -485,7 +486,6 @@ void radeon_combios_fini(struct radeon_device *rdev) | |||
485 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) | 486 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) |
486 | { | 487 | { |
487 | struct radeon_device *rdev = cookie; | 488 | struct radeon_device *rdev = cookie; |
488 | |||
489 | radeon_vga_set_state(rdev, state); | 489 | radeon_vga_set_state(rdev, state); |
490 | if (state) | 490 | if (state) |
491 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 491 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
@@ -493,6 +493,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |||
493 | else | 493 | else |
494 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 494 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
495 | } | 495 | } |
496 | |||
497 | void radeon_agp_disable(struct radeon_device *rdev) | ||
498 | { | ||
499 | rdev->flags &= ~RADEON_IS_AGP; | ||
500 | if (rdev->family >= CHIP_R600) { | ||
501 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
502 | rdev->flags |= RADEON_IS_PCIE; | ||
503 | } else if (rdev->family >= CHIP_RV515 || | ||
504 | rdev->family == CHIP_RV380 || | ||
505 | rdev->family == CHIP_RV410 || | ||
506 | rdev->family == CHIP_R423) { | ||
507 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
508 | rdev->flags |= RADEON_IS_PCIE; | ||
509 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
510 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
511 | } else { | ||
512 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
513 | rdev->flags |= RADEON_IS_PCI; | ||
514 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
515 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
516 | } | ||
517 | } | ||
518 | |||
496 | /* | 519 | /* |
497 | * Radeon device. | 520 | * Radeon device. |
498 | */ | 521 | */ |
@@ -531,32 +554,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
531 | } | 554 | } |
532 | 555 | ||
533 | if (radeon_agpmode == -1) { | 556 | if (radeon_agpmode == -1) { |
534 | rdev->flags &= ~RADEON_IS_AGP; | 557 | radeon_agp_disable(rdev); |
535 | if (rdev->family >= CHIP_R600) { | ||
536 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
537 | rdev->flags |= RADEON_IS_PCIE; | ||
538 | } else if (rdev->family >= CHIP_RV515 || | ||
539 | rdev->family == CHIP_RV380 || | ||
540 | rdev->family == CHIP_RV410 || | ||
541 | rdev->family == CHIP_R423) { | ||
542 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
543 | rdev->flags |= RADEON_IS_PCIE; | ||
544 | rdev->asic->gart_init = &rv370_pcie_gart_init; | ||
545 | rdev->asic->gart_fini = &rv370_pcie_gart_fini; | ||
546 | rdev->asic->gart_enable = &rv370_pcie_gart_enable; | ||
547 | rdev->asic->gart_disable = &rv370_pcie_gart_disable; | ||
548 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
549 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
550 | } else { | ||
551 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
552 | rdev->flags |= RADEON_IS_PCI; | ||
553 | rdev->asic->gart_init = &r100_pci_gart_init; | ||
554 | rdev->asic->gart_fini = &r100_pci_gart_fini; | ||
555 | rdev->asic->gart_enable = &r100_pci_gart_enable; | ||
556 | rdev->asic->gart_disable = &r100_pci_gart_disable; | ||
557 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
558 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
559 | } | ||
560 | } | 558 | } |
561 | 559 | ||
562 | /* set DMA mask + need_dma32 flags. | 560 | /* set DMA mask + need_dma32 flags. |
@@ -588,111 +586,26 @@ int radeon_device_init(struct radeon_device *rdev, | |||
588 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); | 586 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
589 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); | 587 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
590 | 588 | ||
591 | rdev->new_init_path = false; | ||
592 | r = radeon_init(rdev); | ||
593 | if (r) { | ||
594 | return r; | ||
595 | } | ||
596 | |||
597 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 589 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
598 | r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); | 590 | /* this will fail for cards that aren't VGA class devices, just |
599 | if (r) { | 591 | * ignore it */ |
600 | return -EINVAL; | 592 | vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); |
601 | } | ||
602 | 593 | ||
603 | if (!rdev->new_init_path) { | 594 | r = radeon_init(rdev); |
604 | /* Setup errata flags */ | 595 | if (r) |
605 | radeon_errata(rdev); | 596 | return r; |
606 | /* Initialize scratch registers */ | ||
607 | radeon_scratch_init(rdev); | ||
608 | /* Initialize surface registers */ | ||
609 | radeon_surface_init(rdev); | ||
610 | |||
611 | /* BIOS*/ | ||
612 | if (!radeon_get_bios(rdev)) { | ||
613 | if (ASIC_IS_AVIVO(rdev)) | ||
614 | return -EINVAL; | ||
615 | } | ||
616 | if (rdev->is_atom_bios) { | ||
617 | r = radeon_atombios_init(rdev); | ||
618 | if (r) { | ||
619 | return r; | ||
620 | } | ||
621 | } else { | ||
622 | r = radeon_combios_init(rdev); | ||
623 | if (r) { | ||
624 | return r; | ||
625 | } | ||
626 | } | ||
627 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
628 | if (radeon_gpu_reset(rdev)) { | ||
629 | /* FIXME: what do we want to do here ? */ | ||
630 | } | ||
631 | /* check if cards are posted or not */ | ||
632 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
633 | DRM_INFO("GPU not posted. posting now...\n"); | ||
634 | if (rdev->is_atom_bios) { | ||
635 | atom_asic_init(rdev->mode_info.atom_context); | ||
636 | } else { | ||
637 | radeon_combios_asic_init(rdev->ddev); | ||
638 | } | ||
639 | } | ||
640 | /* Get clock & vram information */ | ||
641 | radeon_get_clock_info(rdev->ddev); | ||
642 | radeon_vram_info(rdev); | ||
643 | /* Initialize clocks */ | ||
644 | r = radeon_clocks_init(rdev); | ||
645 | if (r) { | ||
646 | return r; | ||
647 | } | ||
648 | 597 | ||
649 | /* Initialize memory controller (also test AGP) */ | 598 | if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
650 | r = radeon_mc_init(rdev); | 599 | /* Acceleration not working on AGP card try again |
651 | if (r) { | 600 | * with fallback to PCI or PCIE GART |
652 | return r; | 601 | */ |
653 | } | 602 | radeon_gpu_reset(rdev); |
654 | /* Fence driver */ | 603 | radeon_fini(rdev); |
655 | r = radeon_fence_driver_init(rdev); | 604 | radeon_agp_disable(rdev); |
656 | if (r) { | 605 | r = radeon_init(rdev); |
657 | return r; | ||
658 | } | ||
659 | r = radeon_irq_kms_init(rdev); | ||
660 | if (r) { | ||
661 | return r; | ||
662 | } | ||
663 | /* Memory manager */ | ||
664 | r = radeon_object_init(rdev); | ||
665 | if (r) { | ||
666 | return r; | ||
667 | } | ||
668 | r = radeon_gpu_gart_init(rdev); | ||
669 | if (r) | 606 | if (r) |
670 | return r; | 607 | return r; |
671 | /* Initialize GART (initialize after TTM so we can allocate | ||
672 | * memory through TTM but finalize after TTM) */ | ||
673 | r = radeon_gart_enable(rdev); | ||
674 | if (r) | ||
675 | return 0; | ||
676 | r = radeon_gem_init(rdev); | ||
677 | if (r) | ||
678 | return 0; | ||
679 | |||
680 | /* 1M ring buffer */ | ||
681 | r = radeon_cp_init(rdev, 1024 * 1024); | ||
682 | if (r) | ||
683 | return 0; | ||
684 | r = radeon_wb_init(rdev); | ||
685 | if (r) | ||
686 | DRM_ERROR("radeon: failled initializing WB (%d).\n", r); | ||
687 | r = radeon_ib_pool_init(rdev); | ||
688 | if (r) | ||
689 | return 0; | ||
690 | r = radeon_ib_test(rdev); | ||
691 | if (r) | ||
692 | return 0; | ||
693 | rdev->accel_working = true; | ||
694 | } | 608 | } |
695 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | ||
696 | if (radeon_testing) { | 609 | if (radeon_testing) { |
697 | radeon_test_moves(rdev); | 610 | radeon_test_moves(rdev); |
698 | } | 611 | } |
@@ -706,32 +619,8 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
706 | { | 619 | { |
707 | DRM_INFO("radeon: finishing device.\n"); | 620 | DRM_INFO("radeon: finishing device.\n"); |
708 | rdev->shutdown = true; | 621 | rdev->shutdown = true; |
709 | /* Order matter so becarefull if you rearrange anythings */ | 622 | radeon_fini(rdev); |
710 | if (!rdev->new_init_path) { | 623 | vga_client_register(rdev->pdev, NULL, NULL, NULL); |
711 | radeon_ib_pool_fini(rdev); | ||
712 | radeon_cp_fini(rdev); | ||
713 | radeon_wb_fini(rdev); | ||
714 | radeon_gpu_gart_fini(rdev); | ||
715 | radeon_gem_fini(rdev); | ||
716 | radeon_mc_fini(rdev); | ||
717 | #if __OS_HAS_AGP | ||
718 | radeon_agp_fini(rdev); | ||
719 | #endif | ||
720 | radeon_irq_kms_fini(rdev); | ||
721 | vga_client_register(rdev->pdev, NULL, NULL, NULL); | ||
722 | radeon_fence_driver_fini(rdev); | ||
723 | radeon_clocks_fini(rdev); | ||
724 | radeon_object_fini(rdev); | ||
725 | if (rdev->is_atom_bios) { | ||
726 | radeon_atombios_fini(rdev); | ||
727 | } else { | ||
728 | radeon_combios_fini(rdev); | ||
729 | } | ||
730 | kfree(rdev->bios); | ||
731 | rdev->bios = NULL; | ||
732 | } else { | ||
733 | radeon_fini(rdev); | ||
734 | } | ||
735 | iounmap(rdev->rmmio); | 624 | iounmap(rdev->rmmio); |
736 | rdev->rmmio = NULL; | 625 | rdev->rmmio = NULL; |
737 | } | 626 | } |
@@ -771,14 +660,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
771 | 660 | ||
772 | radeon_save_bios_scratch_regs(rdev); | 661 | radeon_save_bios_scratch_regs(rdev); |
773 | 662 | ||
774 | if (!rdev->new_init_path) { | 663 | radeon_suspend(rdev); |
775 | radeon_cp_disable(rdev); | ||
776 | radeon_gart_disable(rdev); | ||
777 | rdev->irq.sw_int = false; | ||
778 | radeon_irq_set(rdev); | ||
779 | } else { | ||
780 | radeon_suspend(rdev); | ||
781 | } | ||
782 | /* evict remaining vram memory */ | 664 | /* evict remaining vram memory */ |
783 | radeon_object_evict_vram(rdev); | 665 | radeon_object_evict_vram(rdev); |
784 | 666 | ||
@@ -797,7 +679,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
797 | int radeon_resume_kms(struct drm_device *dev) | 679 | int radeon_resume_kms(struct drm_device *dev) |
798 | { | 680 | { |
799 | struct radeon_device *rdev = dev->dev_private; | 681 | struct radeon_device *rdev = dev->dev_private; |
800 | int r; | ||
801 | 682 | ||
802 | acquire_console_sem(); | 683 | acquire_console_sem(); |
803 | pci_set_power_state(dev->pdev, PCI_D0); | 684 | pci_set_power_state(dev->pdev, PCI_D0); |
@@ -807,43 +688,7 @@ int radeon_resume_kms(struct drm_device *dev) | |||
807 | return -1; | 688 | return -1; |
808 | } | 689 | } |
809 | pci_set_master(dev->pdev); | 690 | pci_set_master(dev->pdev); |
810 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 691 | radeon_resume(rdev); |
811 | if (!rdev->new_init_path) { | ||
812 | if (radeon_gpu_reset(rdev)) { | ||
813 | /* FIXME: what do we want to do here ? */ | ||
814 | } | ||
815 | /* post card */ | ||
816 | if (rdev->is_atom_bios) { | ||
817 | atom_asic_init(rdev->mode_info.atom_context); | ||
818 | } else { | ||
819 | radeon_combios_asic_init(rdev->ddev); | ||
820 | } | ||
821 | /* Initialize clocks */ | ||
822 | r = radeon_clocks_init(rdev); | ||
823 | if (r) { | ||
824 | release_console_sem(); | ||
825 | return r; | ||
826 | } | ||
827 | /* Enable IRQ */ | ||
828 | rdev->irq.sw_int = true; | ||
829 | radeon_irq_set(rdev); | ||
830 | /* Initialize GPU Memory Controller */ | ||
831 | r = radeon_mc_init(rdev); | ||
832 | if (r) { | ||
833 | goto out; | ||
834 | } | ||
835 | r = radeon_gart_enable(rdev); | ||
836 | if (r) { | ||
837 | goto out; | ||
838 | } | ||
839 | r = radeon_cp_init(rdev, rdev->cp.ring_size); | ||
840 | if (r) { | ||
841 | goto out; | ||
842 | } | ||
843 | } else { | ||
844 | radeon_resume(rdev); | ||
845 | } | ||
846 | out: | ||
847 | radeon_restore_bios_scratch_regs(rdev); | 692 | radeon_restore_bios_scratch_regs(rdev); |
848 | fb_set_suspend(rdev->fbdev_info, 0); | 693 | fb_set_suspend(rdev->fbdev_info, 0); |
849 | release_console_sem(); | 694 | release_console_sem(); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5d8141b13765..c85df4afcb7a 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -106,51 +106,44 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) | |||
106 | legacy_crtc_load_lut(crtc); | 106 | legacy_crtc_load_lut(crtc); |
107 | } | 107 | } |
108 | 108 | ||
109 | /** Sets the color ramps on behalf of RandR */ | 109 | /** Sets the color ramps on behalf of fbcon */ |
110 | void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 110 | void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
111 | u16 blue, int regno) | 111 | u16 blue, int regno) |
112 | { | 112 | { |
113 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 113 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
114 | 114 | ||
115 | if (regno == 0) | ||
116 | DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id); | ||
117 | radeon_crtc->lut_r[regno] = red >> 6; | 115 | radeon_crtc->lut_r[regno] = red >> 6; |
118 | radeon_crtc->lut_g[regno] = green >> 6; | 116 | radeon_crtc->lut_g[regno] = green >> 6; |
119 | radeon_crtc->lut_b[regno] = blue >> 6; | 117 | radeon_crtc->lut_b[regno] = blue >> 6; |
120 | } | 118 | } |
121 | 119 | ||
120 | /** Gets the color ramps on behalf of fbcon */ | ||
121 | void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
122 | u16 *blue, int regno) | ||
123 | { | ||
124 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
125 | |||
126 | *red = radeon_crtc->lut_r[regno] << 6; | ||
127 | *green = radeon_crtc->lut_g[regno] << 6; | ||
128 | *blue = radeon_crtc->lut_b[regno] << 6; | ||
129 | } | ||
130 | |||
122 | static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 131 | static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
123 | u16 *blue, uint32_t size) | 132 | u16 *blue, uint32_t size) |
124 | { | 133 | { |
125 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 134 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
126 | int i, j; | 135 | int i; |
127 | 136 | ||
128 | if (size != 256) { | 137 | if (size != 256) { |
129 | return; | 138 | return; |
130 | } | 139 | } |
131 | if (crtc->fb == NULL) { | ||
132 | return; | ||
133 | } | ||
134 | 140 | ||
135 | if (crtc->fb->depth == 16) { | 141 | /* userspace palettes are always correct as is */ |
136 | for (i = 0; i < 64; i++) { | 142 | for (i = 0; i < 256; i++) { |
137 | if (i <= 31) { | 143 | radeon_crtc->lut_r[i] = red[i] >> 6; |
138 | for (j = 0; j < 8; j++) { | 144 | radeon_crtc->lut_g[i] = green[i] >> 6; |
139 | radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6; | 145 | radeon_crtc->lut_b[i] = blue[i] >> 6; |
140 | radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6; | ||
141 | } | ||
142 | } | ||
143 | for (j = 0; j < 4; j++) | ||
144 | radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6; | ||
145 | } | ||
146 | } else { | ||
147 | for (i = 0; i < 256; i++) { | ||
148 | radeon_crtc->lut_r[i] = red[i] >> 6; | ||
149 | radeon_crtc->lut_g[i] = green[i] >> 6; | ||
150 | radeon_crtc->lut_b[i] = blue[i] >> 6; | ||
151 | } | ||
152 | } | 146 | } |
153 | |||
154 | radeon_crtc_load_lut(crtc); | 147 | radeon_crtc_load_lut(crtc); |
155 | } | 148 | } |
156 | 149 | ||
@@ -341,27 +334,19 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) | |||
341 | 334 | ||
342 | int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | 335 | int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) |
343 | { | 336 | { |
344 | struct edid *edid; | ||
345 | int ret = 0; | 337 | int ret = 0; |
346 | 338 | ||
347 | if (!radeon_connector->ddc_bus) | 339 | if (!radeon_connector->ddc_bus) |
348 | return -1; | 340 | return -1; |
349 | if (!radeon_connector->edid) { | 341 | if (!radeon_connector->edid) { |
350 | radeon_i2c_do_lock(radeon_connector, 1); | 342 | radeon_i2c_do_lock(radeon_connector, 1); |
351 | edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 343 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
352 | radeon_i2c_do_lock(radeon_connector, 0); | 344 | radeon_i2c_do_lock(radeon_connector, 0); |
353 | } else | 345 | } |
354 | edid = radeon_connector->edid; | ||
355 | 346 | ||
356 | if (edid) { | 347 | if (radeon_connector->edid) { |
357 | /* update digital bits here */ | 348 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); |
358 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | 349 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); |
359 | radeon_connector->use_digital = 1; | ||
360 | else | ||
361 | radeon_connector->use_digital = 0; | ||
362 | drm_mode_connector_update_edid_property(&radeon_connector->base, edid); | ||
363 | ret = drm_add_edid_modes(&radeon_connector->base, edid); | ||
364 | kfree(edid); | ||
365 | return ret; | 350 | return ret; |
366 | } | 351 | } |
367 | drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); | 352 | drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); |
@@ -724,7 +709,11 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
724 | if (ret) { | 709 | if (ret) { |
725 | return ret; | 710 | return ret; |
726 | } | 711 | } |
727 | /* allocate crtcs - TODO single crtc */ | 712 | |
713 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
714 | num_crtc = 1; | ||
715 | |||
716 | /* allocate crtcs */ | ||
728 | for (i = 0; i < num_crtc; i++) { | 717 | for (i = 0; i < num_crtc; i++) { |
729 | radeon_crtc_init(rdev->ddev, i); | 718 | radeon_crtc_init(rdev->ddev, i); |
730 | } | 719 | } |
@@ -764,7 +753,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
764 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | 753 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; |
765 | memcpy(&radeon_crtc->native_mode, | 754 | memcpy(&radeon_crtc->native_mode, |
766 | &radeon_encoder->native_mode, | 755 | &radeon_encoder->native_mode, |
767 | sizeof(struct radeon_native_mode)); | 756 | sizeof(struct drm_display_mode)); |
768 | first = false; | 757 | first = false; |
769 | } else { | 758 | } else { |
770 | if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { | 759 | if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { |
@@ -782,10 +771,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
782 | if (radeon_crtc->rmx_type != RMX_OFF) { | 771 | if (radeon_crtc->rmx_type != RMX_OFF) { |
783 | fixed20_12 a, b; | 772 | fixed20_12 a, b; |
784 | a.full = rfixed_const(crtc->mode.vdisplay); | 773 | a.full = rfixed_const(crtc->mode.vdisplay); |
785 | b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); | 774 | b.full = rfixed_const(radeon_crtc->native_mode.hdisplay); |
786 | radeon_crtc->vsc.full = rfixed_div(a, b); | 775 | radeon_crtc->vsc.full = rfixed_div(a, b); |
787 | a.full = rfixed_const(crtc->mode.hdisplay); | 776 | a.full = rfixed_const(crtc->mode.hdisplay); |
788 | b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); | 777 | b.full = rfixed_const(radeon_crtc->native_mode.vdisplay); |
789 | radeon_crtc->hsc.full = rfixed_div(a, b); | 778 | radeon_crtc->hsc.full = rfixed_div(a, b); |
790 | } else { | 779 | } else { |
791 | radeon_crtc->vsc.full = rfixed_const(1); | 780 | radeon_crtc->vsc.full = rfixed_const(1); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 621646752cd2..d42bc512d75a 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -31,6 +31,10 @@ | |||
31 | 31 | ||
32 | extern int atom_debug; | 32 | extern int atom_debug; |
33 | 33 | ||
34 | /* evil but including atombios.h is much worse */ | ||
35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | ||
36 | struct drm_display_mode *mode); | ||
37 | |||
34 | uint32_t | 38 | uint32_t |
35 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) | 39 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) |
36 | { | 40 | { |
@@ -167,49 +171,17 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | |||
167 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 171 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
168 | struct drm_device *dev = encoder->dev; | 172 | struct drm_device *dev = encoder->dev; |
169 | struct radeon_device *rdev = dev->dev_private; | 173 | struct radeon_device *rdev = dev->dev_private; |
170 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; | 174 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
171 | 175 | ||
172 | if (mode->hdisplay < native_mode->panel_xres || | 176 | if (mode->hdisplay < native_mode->hdisplay || |
173 | mode->vdisplay < native_mode->panel_yres) { | 177 | mode->vdisplay < native_mode->vdisplay) { |
174 | if (ASIC_IS_AVIVO(rdev)) { | 178 | int mode_id = adjusted_mode->base.id; |
175 | adjusted_mode->hdisplay = native_mode->panel_xres; | 179 | *adjusted_mode = *native_mode; |
176 | adjusted_mode->vdisplay = native_mode->panel_yres; | 180 | if (!ASIC_IS_AVIVO(rdev)) { |
177 | adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank; | 181 | adjusted_mode->hdisplay = mode->hdisplay; |
178 | adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus; | 182 | adjusted_mode->vdisplay = mode->vdisplay; |
179 | adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width; | ||
180 | adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank; | ||
181 | adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus; | ||
182 | adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width; | ||
183 | /* update crtc values */ | ||
184 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
185 | /* adjust crtc values */ | ||
186 | adjusted_mode->crtc_hdisplay = native_mode->panel_xres; | ||
187 | adjusted_mode->crtc_vdisplay = native_mode->panel_yres; | ||
188 | adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank; | ||
189 | adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus; | ||
190 | adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width; | ||
191 | adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank; | ||
192 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus; | ||
193 | adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width; | ||
194 | } else { | ||
195 | adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank; | ||
196 | adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus; | ||
197 | adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width; | ||
198 | adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank; | ||
199 | adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus; | ||
200 | adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width; | ||
201 | /* update crtc values */ | ||
202 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
203 | /* adjust crtc values */ | ||
204 | adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank; | ||
205 | adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus; | ||
206 | adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width; | ||
207 | adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank; | ||
208 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus; | ||
209 | adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width; | ||
210 | } | 183 | } |
211 | adjusted_mode->flags = native_mode->flags; | 184 | adjusted_mode->base.id = mode_id; |
212 | adjusted_mode->clock = native_mode->dotclock; | ||
213 | } | 185 | } |
214 | } | 186 | } |
215 | 187 | ||
@@ -219,7 +191,11 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
219 | struct drm_display_mode *adjusted_mode) | 191 | struct drm_display_mode *adjusted_mode) |
220 | { | 192 | { |
221 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
194 | struct drm_device *dev = encoder->dev; | ||
195 | struct radeon_device *rdev = dev->dev_private; | ||
222 | 196 | ||
197 | /* set the active encoder to connector routing */ | ||
198 | radeon_encoder_set_active_device(encoder); | ||
223 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 199 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
224 | 200 | ||
225 | if (radeon_encoder->rmx_type != RMX_OFF) | 201 | if (radeon_encoder->rmx_type != RMX_OFF) |
@@ -230,6 +206,18 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
230 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | 206 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) |
231 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | 207 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; |
232 | 208 | ||
209 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | ||
210 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; | ||
211 | if (tv_dac) { | ||
212 | if (tv_dac->tv_std == TV_STD_NTSC || | ||
213 | tv_dac->tv_std == TV_STD_NTSC_J || | ||
214 | tv_dac->tv_std == TV_STD_PAL_M) | ||
215 | radeon_atom_get_tv_timings(rdev, 0, adjusted_mode); | ||
216 | else | ||
217 | radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); | ||
218 | } | ||
219 | } | ||
220 | |||
233 | return true; | 221 | return true; |
234 | } | 222 | } |
235 | 223 | ||
@@ -461,7 +449,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
461 | case 1: | 449 | case 1: |
462 | args.v1.ucMisc = 0; | 450 | args.v1.ucMisc = 0; |
463 | args.v1.ucAction = action; | 451 | args.v1.ucAction = action; |
464 | if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) | 452 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) |
465 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | 453 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; |
466 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 454 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
467 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 455 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
@@ -486,7 +474,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
486 | if (dig->coherent_mode) | 474 | if (dig->coherent_mode) |
487 | args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; | 475 | args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; |
488 | } | 476 | } |
489 | if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) | 477 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) |
490 | args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | 478 | args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; |
491 | args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 479 | args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
492 | args.v2.ucTruncate = 0; | 480 | args.v2.ucTruncate = 0; |
@@ -544,7 +532,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
544 | switch (connector->connector_type) { | 532 | switch (connector->connector_type) { |
545 | case DRM_MODE_CONNECTOR_DVII: | 533 | case DRM_MODE_CONNECTOR_DVII: |
546 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | 534 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
547 | if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) | 535 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) |
548 | return ATOM_ENCODER_MODE_HDMI; | 536 | return ATOM_ENCODER_MODE_HDMI; |
549 | else if (radeon_connector->use_digital) | 537 | else if (radeon_connector->use_digital) |
550 | return ATOM_ENCODER_MODE_DVI; | 538 | return ATOM_ENCODER_MODE_DVI; |
@@ -554,7 +542,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
554 | case DRM_MODE_CONNECTOR_DVID: | 542 | case DRM_MODE_CONNECTOR_DVID: |
555 | case DRM_MODE_CONNECTOR_HDMIA: | 543 | case DRM_MODE_CONNECTOR_HDMIA: |
556 | default: | 544 | default: |
557 | if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) | 545 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) |
558 | return ATOM_ENCODER_MODE_HDMI; | 546 | return ATOM_ENCODER_MODE_HDMI; |
559 | else | 547 | else |
560 | return ATOM_ENCODER_MODE_DVI; | 548 | return ATOM_ENCODER_MODE_DVI; |
@@ -566,7 +554,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
566 | /*if (radeon_output->MonType == MT_DP) | 554 | /*if (radeon_output->MonType == MT_DP) |
567 | return ATOM_ENCODER_MODE_DP; | 555 | return ATOM_ENCODER_MODE_DP; |
568 | else*/ | 556 | else*/ |
569 | if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) | 557 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) |
570 | return ATOM_ENCODER_MODE_HDMI; | 558 | return ATOM_ENCODER_MODE_HDMI; |
571 | else | 559 | else |
572 | return ATOM_ENCODER_MODE_DVI; | 560 | return ATOM_ENCODER_MODE_DVI; |
@@ -734,14 +722,17 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
734 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 722 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); |
735 | 723 | ||
736 | args.v1.ucAction = action; | 724 | args.v1.ucAction = action; |
737 | 725 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | |
726 | args.v1.usInitInfo = radeon_connector->connector_object_id; | ||
727 | } else { | ||
728 | if (radeon_encoder->pixel_clock > 165000) | ||
729 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | ||
730 | else | ||
731 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
732 | } | ||
738 | if (ASIC_IS_DCE32(rdev)) { | 733 | if (ASIC_IS_DCE32(rdev)) { |
739 | if (radeon_encoder->pixel_clock > 165000) { | 734 | if (radeon_encoder->pixel_clock > 165000) |
740 | args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100); | 735 | args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); |
741 | args.v2.acConfig.fDualLinkConnector = 1; | ||
742 | } else { | ||
743 | args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100); | ||
744 | } | ||
745 | if (dig->dig_block) | 736 | if (dig->dig_block) |
746 | args.v2.acConfig.ucEncoderSel = 1; | 737 | args.v2.acConfig.ucEncoderSel = 1; |
747 | 738 | ||
@@ -766,7 +757,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
766 | } | 757 | } |
767 | } else { | 758 | } else { |
768 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | 759 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; |
769 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10); | ||
770 | 760 | ||
771 | switch (radeon_encoder->encoder_id) { | 761 | switch (radeon_encoder->encoder_id) { |
772 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 762 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
@@ -874,16 +864,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
874 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; | 864 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; |
875 | int index = 0; | 865 | int index = 0; |
876 | bool is_dig = false; | 866 | bool is_dig = false; |
877 | int devices; | ||
878 | 867 | ||
879 | memset(&args, 0, sizeof(args)); | 868 | memset(&args, 0, sizeof(args)); |
880 | 869 | ||
881 | /* on DPMS off we have no idea if active device is meaningful */ | ||
882 | if (mode != DRM_MODE_DPMS_ON && !radeon_encoder->active_device) | ||
883 | devices = radeon_encoder->devices; | ||
884 | else | ||
885 | devices = radeon_encoder->active_device; | ||
886 | |||
887 | DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | 870 | DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
888 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | 871 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
889 | radeon_encoder->active_device); | 872 | radeon_encoder->active_device); |
@@ -914,18 +897,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
914 | break; | 897 | break; |
915 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 898 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
916 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 899 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
917 | if (devices & (ATOM_DEVICE_TV_SUPPORT)) | 900 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
918 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | 901 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
919 | else if (devices & (ATOM_DEVICE_CV_SUPPORT)) | 902 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
920 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | 903 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); |
921 | else | 904 | else |
922 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); | 905 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); |
923 | break; | 906 | break; |
924 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 907 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
925 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 908 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
926 | if (devices & (ATOM_DEVICE_TV_SUPPORT)) | 909 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
927 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | 910 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
928 | else if (devices & (ATOM_DEVICE_CV_SUPPORT)) | 911 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
929 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | 912 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); |
930 | else | 913 | else |
931 | index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); | 914 | index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); |
@@ -1104,8 +1087,11 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, | |||
1104 | } | 1087 | } |
1105 | 1088 | ||
1106 | /* set scaler clears this on some chips */ | 1089 | /* set scaler clears this on some chips */ |
1107 | if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 1090 | if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { |
1108 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN); | 1091 | if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) |
1092 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1093 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1094 | } | ||
1109 | } | 1095 | } |
1110 | 1096 | ||
1111 | static void | 1097 | static void |
@@ -1153,6 +1139,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1153 | 1139 | ||
1154 | /* setup and enable the encoder and transmitter */ | 1140 | /* setup and enable the encoder and transmitter */ |
1155 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE); | 1141 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE); |
1142 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT); | ||
1156 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); | 1143 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); |
1157 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); | 1144 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); |
1158 | break; | 1145 | break; |
@@ -1268,8 +1255,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
1268 | { | 1255 | { |
1269 | radeon_atom_output_lock(encoder, true); | 1256 | radeon_atom_output_lock(encoder, true); |
1270 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1257 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1271 | |||
1272 | radeon_encoder_set_active_device(encoder); | ||
1273 | } | 1258 | } |
1274 | 1259 | ||
1275 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | 1260 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) |
@@ -1345,6 +1330,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
1345 | void | 1330 | void |
1346 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1331 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) |
1347 | { | 1332 | { |
1333 | struct radeon_device *rdev = dev->dev_private; | ||
1348 | struct drm_encoder *encoder; | 1334 | struct drm_encoder *encoder; |
1349 | struct radeon_encoder *radeon_encoder; | 1335 | struct radeon_encoder *radeon_encoder; |
1350 | 1336 | ||
@@ -1364,7 +1350,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1364 | return; | 1350 | return; |
1365 | 1351 | ||
1366 | encoder = &radeon_encoder->base; | 1352 | encoder = &radeon_encoder->base; |
1367 | encoder->possible_crtcs = 0x3; | 1353 | if (rdev->flags & RADEON_SINGLE_CRTC) |
1354 | encoder->possible_crtcs = 0x1; | ||
1355 | else | ||
1356 | encoder->possible_crtcs = 0x3; | ||
1368 | encoder->possible_clones = 0; | 1357 | encoder->possible_clones = 0; |
1369 | 1358 | ||
1370 | radeon_encoder->enc_priv = NULL; | 1359 | radeon_encoder->enc_priv = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 1ba704eedefb..b38c4c8e2c61 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = { | |||
55 | .fb_imageblit = cfb_imageblit, | 55 | .fb_imageblit = cfb_imageblit, |
56 | .fb_pan_display = drm_fb_helper_pan_display, | 56 | .fb_pan_display = drm_fb_helper_pan_display, |
57 | .fb_blank = drm_fb_helper_blank, | 57 | .fb_blank = drm_fb_helper_blank, |
58 | .fb_setcmap = drm_fb_helper_setcmap, | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | /** | 61 | /** |
@@ -123,6 +124,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo | |||
123 | 124 | ||
124 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { | 125 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
125 | .gamma_set = radeon_crtc_fb_gamma_set, | 126 | .gamma_set = radeon_crtc_fb_gamma_set, |
127 | .gamma_get = radeon_crtc_fb_gamma_get, | ||
126 | }; | 128 | }; |
127 | 129 | ||
128 | int radeonfb_create(struct drm_device *dev, | 130 | int radeonfb_create(struct drm_device *dev, |
@@ -146,9 +148,15 @@ int radeonfb_create(struct drm_device *dev, | |||
146 | unsigned long tmp; | 148 | unsigned long tmp; |
147 | bool fb_tiled = false; /* useful for testing */ | 149 | bool fb_tiled = false; /* useful for testing */ |
148 | u32 tiling_flags = 0; | 150 | u32 tiling_flags = 0; |
151 | int crtc_count; | ||
149 | 152 | ||
150 | mode_cmd.width = surface_width; | 153 | mode_cmd.width = surface_width; |
151 | mode_cmd.height = surface_height; | 154 | mode_cmd.height = surface_height; |
155 | |||
156 | /* avivo can't scanout real 24bpp */ | ||
157 | if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) | ||
158 | surface_bpp = 32; | ||
159 | |||
152 | mode_cmd.bpp = surface_bpp; | 160 | mode_cmd.bpp = surface_bpp; |
153 | /* need to align pitch with crtc limits */ | 161 | /* need to align pitch with crtc limits */ |
154 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); | 162 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
@@ -217,7 +225,11 @@ int radeonfb_create(struct drm_device *dev, | |||
217 | rfbdev = info->par; | 225 | rfbdev = info->par; |
218 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; | 226 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
219 | rfbdev->helper.dev = dev; | 227 | rfbdev->helper.dev = dev; |
220 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, | 228 | if (rdev->flags & RADEON_SINGLE_CRTC) |
229 | crtc_count = 1; | ||
230 | else | ||
231 | crtc_count = 2; | ||
232 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, | ||
221 | RADEONFB_CONN_LIMIT); | 233 | RADEONFB_CONN_LIMIT); |
222 | if (ret) | 234 | if (ret) |
223 | goto out_unref; | 235 | goto out_unref; |
@@ -234,7 +246,7 @@ int radeonfb_create(struct drm_device *dev, | |||
234 | 246 | ||
235 | strcpy(info->fix.id, "radeondrmfb"); | 247 | strcpy(info->fix.id, "radeondrmfb"); |
236 | 248 | ||
237 | drm_fb_helper_fill_fix(info, fb->pitch); | 249 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
238 | 250 | ||
239 | info->flags = FBINFO_DEFAULT; | 251 | info->flags = FBINFO_DEFAULT; |
240 | info->fbops = &radeonfb_ops; | 252 | info->fbops = &radeonfb_ops; |
@@ -309,7 +321,7 @@ int radeon_parse_options(char *options) | |||
309 | 321 | ||
310 | int radeonfb_probe(struct drm_device *dev) | 322 | int radeonfb_probe(struct drm_device *dev) |
311 | { | 323 | { |
312 | return drm_fb_helper_single_fb_probe(dev, &radeonfb_create); | 324 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); |
313 | } | 325 | } |
314 | 326 | ||
315 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | 327 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a931af065dd4..a68d7566178c 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -140,15 +140,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
140 | WARN(1, "trying to unbind memory to unitialized GART !\n"); | 140 | WARN(1, "trying to unbind memory to unitialized GART !\n"); |
141 | return; | 141 | return; |
142 | } | 142 | } |
143 | t = offset / 4096; | 143 | t = offset / RADEON_GPU_PAGE_SIZE; |
144 | p = t / (PAGE_SIZE / 4096); | 144 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
145 | for (i = 0; i < pages; i++, p++) { | 145 | for (i = 0; i < pages; i++, p++) { |
146 | if (rdev->gart.pages[p]) { | 146 | if (rdev->gart.pages[p]) { |
147 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], | 147 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
148 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 148 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
149 | rdev->gart.pages[p] = NULL; | 149 | rdev->gart.pages[p] = NULL; |
150 | rdev->gart.pages_addr[p] = 0; | 150 | rdev->gart.pages_addr[p] = 0; |
151 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { | 151 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
152 | radeon_gart_set_page(rdev, t, 0); | 152 | radeon_gart_set_page(rdev, t, 0); |
153 | } | 153 | } |
154 | } | 154 | } |
@@ -169,8 +169,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
169 | DRM_ERROR("trying to bind memory to unitialized GART !\n"); | 169 | DRM_ERROR("trying to bind memory to unitialized GART !\n"); |
170 | return -EINVAL; | 170 | return -EINVAL; |
171 | } | 171 | } |
172 | t = offset / 4096; | 172 | t = offset / RADEON_GPU_PAGE_SIZE; |
173 | p = t / (PAGE_SIZE / 4096); | 173 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
174 | 174 | ||
175 | for (i = 0; i < pages; i++, p++) { | 175 | for (i = 0; i < pages; i++, p++) { |
176 | /* we need to support large memory configurations */ | 176 | /* we need to support large memory configurations */ |
@@ -185,9 +185,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
185 | } | 185 | } |
186 | rdev->gart.pages[p] = pagelist[i]; | 186 | rdev->gart.pages[p] = pagelist[i]; |
187 | page_base = rdev->gart.pages_addr[p]; | 187 | page_base = rdev->gart.pages_addr[p]; |
188 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { | 188 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
189 | radeon_gart_set_page(rdev, t, page_base); | 189 | radeon_gart_set_page(rdev, t, page_base); |
190 | page_base += 4096; | 190 | page_base += RADEON_GPU_PAGE_SIZE; |
191 | } | 191 | } |
192 | } | 192 | } |
193 | mb(); | 193 | mb(); |
@@ -200,14 +200,14 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
200 | if (rdev->gart.pages) { | 200 | if (rdev->gart.pages) { |
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | /* We need PAGE_SIZE >= 4096 */ | 203 | /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ |
204 | if (PAGE_SIZE < 4096) { | 204 | if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { |
205 | DRM_ERROR("Page size is smaller than GPU page size!\n"); | 205 | DRM_ERROR("Page size is smaller than GPU page size!\n"); |
206 | return -EINVAL; | 206 | return -EINVAL; |
207 | } | 207 | } |
208 | /* Compute table size */ | 208 | /* Compute table size */ |
209 | rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; | 209 | rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; |
210 | rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096; | 210 | rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; |
211 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", | 211 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
212 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); | 212 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
213 | /* Allocate pages table */ | 213 | /* Allocate pages table */ |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 1841145a7c4f..a0fe6232dcb6 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -83,11 +83,22 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
83 | int radeon_irq_kms_init(struct radeon_device *rdev) | 83 | int radeon_irq_kms_init(struct radeon_device *rdev) |
84 | { | 84 | { |
85 | int r = 0; | 85 | int r = 0; |
86 | int num_crtc = 2; | ||
86 | 87 | ||
87 | r = drm_vblank_init(rdev->ddev, 2); | 88 | if (rdev->flags & RADEON_SINGLE_CRTC) |
89 | num_crtc = 1; | ||
90 | |||
91 | r = drm_vblank_init(rdev->ddev, num_crtc); | ||
88 | if (r) { | 92 | if (r) { |
89 | return r; | 93 | return r; |
90 | } | 94 | } |
95 | /* enable msi */ | ||
96 | rdev->msi_enabled = 0; | ||
97 | if (rdev->family >= CHIP_RV380) { | ||
98 | int ret = pci_enable_msi(rdev->pdev); | ||
99 | if (!ret) | ||
100 | rdev->msi_enabled = 1; | ||
101 | } | ||
91 | drm_irq_install(rdev->ddev); | 102 | drm_irq_install(rdev->ddev); |
92 | rdev->irq.installed = true; | 103 | rdev->irq.installed = true; |
93 | DRM_INFO("radeon: irq initialized.\n"); | 104 | DRM_INFO("radeon: irq initialized.\n"); |
@@ -99,5 +110,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
99 | if (rdev->irq.installed) { | 110 | if (rdev->irq.installed) { |
100 | rdev->irq.installed = false; | 111 | rdev->irq.installed = false; |
101 | drm_irq_uninstall(rdev->ddev); | 112 | drm_irq_uninstall(rdev->ddev); |
113 | if (rdev->msi_enabled) | ||
114 | pci_disable_msi(rdev->pdev); | ||
102 | } | 115 | } |
103 | } | 116 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 2b997a15fb1f..8d0b7aa87fa4 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -48,7 +48,7 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | |||
48 | u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; | 48 | u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; |
49 | u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; | 49 | u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; |
50 | u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; | 50 | u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; |
51 | struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; | 51 | struct drm_display_mode *native_mode = &radeon_crtc->native_mode; |
52 | 52 | ||
53 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & | 53 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & |
54 | (RADEON_VERT_STRETCH_RESERVED | | 54 | (RADEON_VERT_STRETCH_RESERVED | |
@@ -95,19 +95,19 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | |||
95 | 95 | ||
96 | fp_horz_vert_active = 0; | 96 | fp_horz_vert_active = 0; |
97 | 97 | ||
98 | if (native_mode->panel_xres == 0 || | 98 | if (native_mode->hdisplay == 0 || |
99 | native_mode->panel_yres == 0) { | 99 | native_mode->vdisplay == 0) { |
100 | hscale = false; | 100 | hscale = false; |
101 | vscale = false; | 101 | vscale = false; |
102 | } else { | 102 | } else { |
103 | if (xres > native_mode->panel_xres) | 103 | if (xres > native_mode->hdisplay) |
104 | xres = native_mode->panel_xres; | 104 | xres = native_mode->hdisplay; |
105 | if (yres > native_mode->panel_yres) | 105 | if (yres > native_mode->vdisplay) |
106 | yres = native_mode->panel_yres; | 106 | yres = native_mode->vdisplay; |
107 | 107 | ||
108 | if (xres == native_mode->panel_xres) | 108 | if (xres == native_mode->hdisplay) |
109 | hscale = false; | 109 | hscale = false; |
110 | if (yres == native_mode->panel_yres) | 110 | if (yres == native_mode->vdisplay) |
111 | vscale = false; | 111 | vscale = false; |
112 | } | 112 | } |
113 | 113 | ||
@@ -119,11 +119,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | |||
119 | else { | 119 | else { |
120 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; | 120 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; |
121 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) | 121 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) |
122 | / native_mode->panel_xres + 1; | 122 | / native_mode->hdisplay + 1; |
123 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | | 123 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | |
124 | RADEON_HORZ_STRETCH_BLEND | | 124 | RADEON_HORZ_STRETCH_BLEND | |
125 | RADEON_HORZ_STRETCH_ENABLE | | 125 | RADEON_HORZ_STRETCH_ENABLE | |
126 | ((native_mode->panel_xres/8-1) << 16)); | 126 | ((native_mode->hdisplay/8-1) << 16)); |
127 | } | 127 | } |
128 | 128 | ||
129 | if (!vscale) | 129 | if (!vscale) |
@@ -131,11 +131,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | |||
131 | else { | 131 | else { |
132 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; | 132 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; |
133 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) | 133 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) |
134 | / native_mode->panel_yres + 1; | 134 | / native_mode->vdisplay + 1; |
135 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | | 135 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | |
136 | RADEON_VERT_STRETCH_ENABLE | | 136 | RADEON_VERT_STRETCH_ENABLE | |
137 | RADEON_VERT_STRETCH_BLEND | | 137 | RADEON_VERT_STRETCH_BLEND | |
138 | ((native_mode->panel_yres-1) << 12)); | 138 | ((native_mode->vdisplay-1) << 12)); |
139 | } | 139 | } |
140 | break; | 140 | break; |
141 | case RMX_CENTER: | 141 | case RMX_CENTER: |
@@ -175,8 +175,8 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | |||
175 | ? RADEON_CRTC_V_SYNC_POL | 175 | ? RADEON_CRTC_V_SYNC_POL |
176 | : 0))); | 176 | : 0))); |
177 | 177 | ||
178 | fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | | 178 | fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) | |
179 | (((native_mode->panel_xres / 8) & 0x1ff) << 16)); | 179 | (((native_mode->hdisplay / 8) & 0x1ff) << 16)); |
180 | break; | 180 | break; |
181 | case RMX_OFF: | 181 | case RMX_OFF: |
182 | default: | 182 | default: |
@@ -532,6 +532,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
532 | radeon_fb = to_radeon_framebuffer(old_fb); | 532 | radeon_fb = to_radeon_framebuffer(old_fb); |
533 | radeon_gem_object_unpin(radeon_fb->obj); | 533 | radeon_gem_object_unpin(radeon_fb->obj); |
534 | } | 534 | } |
535 | |||
536 | /* Bytes per pixel may have changed */ | ||
537 | radeon_bandwidth_update(rdev); | ||
538 | |||
535 | return 0; | 539 | return 0; |
536 | } | 540 | } |
537 | 541 | ||
@@ -664,6 +668,9 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
664 | 668 | ||
665 | WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl); | 669 | WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl); |
666 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); | 670 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); |
671 | |||
672 | WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid); | ||
673 | WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid); | ||
667 | } else { | 674 | } else { |
668 | uint32_t crtc_gen_cntl; | 675 | uint32_t crtc_gen_cntl; |
669 | uint32_t crtc_ext_cntl; | 676 | uint32_t crtc_ext_cntl; |
@@ -1015,14 +1022,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
1015 | int x, int y, struct drm_framebuffer *old_fb) | 1022 | int x, int y, struct drm_framebuffer *old_fb) |
1016 | { | 1023 | { |
1017 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 1024 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1018 | struct drm_device *dev = crtc->dev; | ||
1019 | struct radeon_device *rdev = dev->dev_private; | ||
1020 | 1025 | ||
1021 | /* TODO TV */ | 1026 | /* TODO TV */ |
1022 | radeon_crtc_set_base(crtc, x, y, old_fb); | 1027 | radeon_crtc_set_base(crtc, x, y, old_fb); |
1023 | radeon_set_crtc_timing(crtc, adjusted_mode); | 1028 | radeon_set_crtc_timing(crtc, adjusted_mode); |
1024 | radeon_set_pll(crtc, adjusted_mode); | 1029 | radeon_set_pll(crtc, adjusted_mode); |
1025 | radeon_bandwidth_update(rdev); | ||
1026 | if (radeon_crtc->crtc_id == 0) { | 1030 | if (radeon_crtc->crtc_id == 0) { |
1027 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); | 1031 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); |
1028 | } else { | 1032 | } else { |
@@ -1053,6 +1057,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { | |||
1053 | .mode_set_base = radeon_crtc_set_base, | 1057 | .mode_set_base = radeon_crtc_set_base, |
1054 | .prepare = radeon_crtc_prepare, | 1058 | .prepare = radeon_crtc_prepare, |
1055 | .commit = radeon_crtc_commit, | 1059 | .commit = radeon_crtc_commit, |
1060 | .load_lut = radeon_crtc_load_lut, | ||
1056 | }; | 1061 | }; |
1057 | 1062 | ||
1058 | 1063 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index b1547f700d73..00382122869b 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -107,8 +107,6 @@ static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) | |||
107 | else | 107 | else |
108 | radeon_combios_output_lock(encoder, true); | 108 | radeon_combios_output_lock(encoder, true); |
109 | radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); | 109 | radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); |
110 | |||
111 | radeon_encoder_set_active_device(encoder); | ||
112 | } | 110 | } |
113 | 111 | ||
114 | static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) | 112 | static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) |
@@ -192,6 +190,8 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | |||
192 | { | 190 | { |
193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 191 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
194 | 192 | ||
193 | /* set the active encoder to connector routing */ | ||
194 | radeon_encoder_set_active_device(encoder); | ||
195 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 195 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
196 | 196 | ||
197 | if (radeon_encoder->rmx_type != RMX_OFF) | 197 | if (radeon_encoder->rmx_type != RMX_OFF) |
@@ -218,7 +218,8 @@ static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder, | |||
218 | struct drm_display_mode *mode, | 218 | struct drm_display_mode *mode, |
219 | struct drm_display_mode *adjusted_mode) | 219 | struct drm_display_mode *adjusted_mode) |
220 | { | 220 | { |
221 | 221 | /* set the active encoder to connector routing */ | |
222 | radeon_encoder_set_active_device(encoder); | ||
222 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 223 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
223 | 224 | ||
224 | return true; | 225 | return true; |
@@ -272,7 +273,6 @@ static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) | |||
272 | else | 273 | else |
273 | radeon_combios_output_lock(encoder, true); | 274 | radeon_combios_output_lock(encoder, true); |
274 | radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); | 275 | radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); |
275 | radeon_encoder_set_active_device(encoder); | ||
276 | } | 276 | } |
277 | 277 | ||
278 | static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) | 278 | static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) |
@@ -468,7 +468,6 @@ static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) | |||
468 | else | 468 | else |
469 | radeon_combios_output_lock(encoder, true); | 469 | radeon_combios_output_lock(encoder, true); |
470 | radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); | 470 | radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); |
471 | radeon_encoder_set_active_device(encoder); | ||
472 | } | 471 | } |
473 | 472 | ||
474 | static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) | 473 | static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) |
@@ -543,6 +542,14 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
543 | 542 | ||
544 | fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); | 543 | fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); |
545 | 544 | ||
545 | fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN | | ||
546 | RADEON_FP_DFP_SYNC_SEL | | ||
547 | RADEON_FP_CRT_SYNC_SEL | | ||
548 | RADEON_FP_CRTC_LOCK_8DOT | | ||
549 | RADEON_FP_USE_SHADOW_EN | | ||
550 | RADEON_FP_CRTC_USE_SHADOW_VEND | | ||
551 | RADEON_FP_CRT_SYNC_ALT); | ||
552 | |||
546 | if (1) /* FIXME rgbBits == 8 */ | 553 | if (1) /* FIXME rgbBits == 8 */ |
547 | fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ | 554 | fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ |
548 | else | 555 | else |
@@ -556,7 +563,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
556 | else | 563 | else |
557 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; | 564 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; |
558 | } else | 565 | } else |
559 | fp_gen_cntl |= RADEON_FP_SEL_CRTC1; | 566 | fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2; |
560 | } else { | 567 | } else { |
561 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { | 568 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
562 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; | 569 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
@@ -593,7 +600,8 @@ static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder, | |||
593 | struct drm_display_mode *mode, | 600 | struct drm_display_mode *mode, |
594 | struct drm_display_mode *adjusted_mode) | 601 | struct drm_display_mode *adjusted_mode) |
595 | { | 602 | { |
596 | 603 | /* set the active encoder to connector routing */ | |
604 | radeon_encoder_set_active_device(encoder); | ||
597 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 605 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
598 | 606 | ||
599 | return true; | 607 | return true; |
@@ -636,7 +644,6 @@ static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) | |||
636 | else | 644 | else |
637 | radeon_combios_output_lock(encoder, true); | 645 | radeon_combios_output_lock(encoder, true); |
638 | radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); | 646 | radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); |
639 | radeon_encoder_set_active_device(encoder); | ||
640 | } | 647 | } |
641 | 648 | ||
642 | static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) | 649 | static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) |
@@ -735,7 +742,8 @@ static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder, | |||
735 | struct drm_display_mode *mode, | 742 | struct drm_display_mode *mode, |
736 | struct drm_display_mode *adjusted_mode) | 743 | struct drm_display_mode *adjusted_mode) |
737 | { | 744 | { |
738 | 745 | /* set the active encoder to connector routing */ | |
746 | radeon_encoder_set_active_device(encoder); | ||
739 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 747 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
740 | 748 | ||
741 | return true; | 749 | return true; |
@@ -839,7 +847,6 @@ static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) | |||
839 | else | 847 | else |
840 | radeon_combios_output_lock(encoder, true); | 848 | radeon_combios_output_lock(encoder, true); |
841 | radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); | 849 | radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); |
842 | radeon_encoder_set_active_device(encoder); | ||
843 | } | 850 | } |
844 | 851 | ||
845 | static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) | 852 | static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) |
@@ -881,7 +888,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
881 | R420_TV_DAC_DACADJ_MASK | | 888 | R420_TV_DAC_DACADJ_MASK | |
882 | R420_TV_DAC_RDACPD | | 889 | R420_TV_DAC_RDACPD | |
883 | R420_TV_DAC_GDACPD | | 890 | R420_TV_DAC_GDACPD | |
884 | R420_TV_DAC_GDACPD | | 891 | R420_TV_DAC_BDACPD | |
885 | R420_TV_DAC_TVENABLE); | 892 | R420_TV_DAC_TVENABLE); |
886 | } else { | 893 | } else { |
887 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | | 894 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | |
@@ -889,7 +896,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
889 | RADEON_TV_DAC_DACADJ_MASK | | 896 | RADEON_TV_DAC_DACADJ_MASK | |
890 | RADEON_TV_DAC_RDACPD | | 897 | RADEON_TV_DAC_RDACPD | |
891 | RADEON_TV_DAC_GDACPD | | 898 | RADEON_TV_DAC_GDACPD | |
892 | RADEON_TV_DAC_GDACPD); | 899 | RADEON_TV_DAC_BDACPD); |
893 | } | 900 | } |
894 | 901 | ||
895 | /* FIXME TV */ | 902 | /* FIXME TV */ |
@@ -1318,7 +1325,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1318 | return; | 1325 | return; |
1319 | 1326 | ||
1320 | encoder = &radeon_encoder->base; | 1327 | encoder = &radeon_encoder->base; |
1321 | encoder->possible_crtcs = 0x3; | 1328 | if (rdev->flags & RADEON_SINGLE_CRTC) |
1329 | encoder->possible_crtcs = 0x1; | ||
1330 | else | ||
1331 | encoder->possible_crtcs = 0x3; | ||
1322 | encoder->possible_clones = 0; | 1332 | encoder->possible_clones = 0; |
1323 | 1333 | ||
1324 | radeon_encoder->enc_priv = NULL; | 1334 | radeon_encoder->enc_priv = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 570a58729daf..ace726aa0d76 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -172,6 +172,7 @@ enum radeon_connector_table { | |||
172 | 172 | ||
173 | struct radeon_mode_info { | 173 | struct radeon_mode_info { |
174 | struct atom_context *atom_context; | 174 | struct atom_context *atom_context; |
175 | struct card_info *atom_card_info; | ||
175 | enum radeon_connector_table connector_table; | 176 | enum radeon_connector_table connector_table; |
176 | bool mode_config_initialized; | 177 | bool mode_config_initialized; |
177 | struct radeon_crtc *crtcs[2]; | 178 | struct radeon_crtc *crtcs[2]; |
@@ -186,17 +187,6 @@ struct radeon_mode_info { | |||
186 | 187 | ||
187 | }; | 188 | }; |
188 | 189 | ||
189 | struct radeon_native_mode { | ||
190 | /* preferred mode */ | ||
191 | uint32_t panel_xres, panel_yres; | ||
192 | uint32_t hoverplus, hsync_width; | ||
193 | uint32_t hblank; | ||
194 | uint32_t voverplus, vsync_width; | ||
195 | uint32_t vblank; | ||
196 | uint32_t dotclock; | ||
197 | uint32_t flags; | ||
198 | }; | ||
199 | |||
200 | #define MAX_H_CODE_TIMING_LEN 32 | 190 | #define MAX_H_CODE_TIMING_LEN 32 |
201 | #define MAX_V_CODE_TIMING_LEN 32 | 191 | #define MAX_V_CODE_TIMING_LEN 32 |
202 | 192 | ||
@@ -228,7 +218,7 @@ struct radeon_crtc { | |||
228 | enum radeon_rmx_type rmx_type; | 218 | enum radeon_rmx_type rmx_type; |
229 | fixed20_12 vsc; | 219 | fixed20_12 vsc; |
230 | fixed20_12 hsc; | 220 | fixed20_12 hsc; |
231 | struct radeon_native_mode native_mode; | 221 | struct drm_display_mode native_mode; |
232 | }; | 222 | }; |
233 | 223 | ||
234 | struct radeon_encoder_primary_dac { | 224 | struct radeon_encoder_primary_dac { |
@@ -248,7 +238,7 @@ struct radeon_encoder_lvds { | |||
248 | bool use_bios_dividers; | 238 | bool use_bios_dividers; |
249 | uint32_t lvds_gen_cntl; | 239 | uint32_t lvds_gen_cntl; |
250 | /* panel mode */ | 240 | /* panel mode */ |
251 | struct radeon_native_mode native_mode; | 241 | struct drm_display_mode native_mode; |
252 | }; | 242 | }; |
253 | 243 | ||
254 | struct radeon_encoder_tv_dac { | 244 | struct radeon_encoder_tv_dac { |
@@ -271,6 +261,16 @@ struct radeon_encoder_int_tmds { | |||
271 | struct radeon_tmds_pll tmds_pll[4]; | 261 | struct radeon_tmds_pll tmds_pll[4]; |
272 | }; | 262 | }; |
273 | 263 | ||
264 | /* spread spectrum */ | ||
265 | struct radeon_atom_ss { | ||
266 | uint16_t percentage; | ||
267 | uint8_t type; | ||
268 | uint8_t step; | ||
269 | uint8_t delay; | ||
270 | uint8_t range; | ||
271 | uint8_t refdiv; | ||
272 | }; | ||
273 | |||
274 | struct radeon_encoder_atom_dig { | 274 | struct radeon_encoder_atom_dig { |
275 | /* atom dig */ | 275 | /* atom dig */ |
276 | bool coherent_mode; | 276 | bool coherent_mode; |
@@ -278,8 +278,9 @@ struct radeon_encoder_atom_dig { | |||
278 | /* atom lvds */ | 278 | /* atom lvds */ |
279 | uint32_t lvds_misc; | 279 | uint32_t lvds_misc; |
280 | uint16_t panel_pwr_delay; | 280 | uint16_t panel_pwr_delay; |
281 | struct radeon_atom_ss *ss; | ||
281 | /* panel mode */ | 282 | /* panel mode */ |
282 | struct radeon_native_mode native_mode; | 283 | struct drm_display_mode native_mode; |
283 | }; | 284 | }; |
284 | 285 | ||
285 | struct radeon_encoder_atom_dac { | 286 | struct radeon_encoder_atom_dac { |
@@ -294,7 +295,7 @@ struct radeon_encoder { | |||
294 | uint32_t flags; | 295 | uint32_t flags; |
295 | uint32_t pixel_clock; | 296 | uint32_t pixel_clock; |
296 | enum radeon_rmx_type rmx_type; | 297 | enum radeon_rmx_type rmx_type; |
297 | struct radeon_native_mode native_mode; | 298 | struct drm_display_mode native_mode; |
298 | void *enc_priv; | 299 | void *enc_priv; |
299 | }; | 300 | }; |
300 | 301 | ||
@@ -308,12 +309,15 @@ struct radeon_connector { | |||
308 | uint32_t connector_id; | 309 | uint32_t connector_id; |
309 | uint32_t devices; | 310 | uint32_t devices; |
310 | struct radeon_i2c_chan *ddc_bus; | 311 | struct radeon_i2c_chan *ddc_bus; |
312 | /* some systems have a an hdmi and vga port with a shared ddc line */ | ||
313 | bool shared_ddc; | ||
311 | bool use_digital; | 314 | bool use_digital; |
312 | /* we need to mind the EDID between detect | 315 | /* we need to mind the EDID between detect |
313 | and get modes due to analog/digital/tvencoder */ | 316 | and get modes due to analog/digital/tvencoder */ |
314 | struct edid *edid; | 317 | struct edid *edid; |
315 | void *con_priv; | 318 | void *con_priv; |
316 | bool dac_load_detect; | 319 | bool dac_load_detect; |
320 | uint16_t connector_object_id; | ||
317 | }; | 321 | }; |
318 | 322 | ||
319 | struct radeon_framebuffer { | 323 | struct radeon_framebuffer { |
@@ -407,6 +411,8 @@ extern void | |||
407 | radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); | 411 | radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); |
408 | extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 412 | extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
409 | u16 blue, int regno); | 413 | u16 blue, int regno); |
414 | extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
415 | u16 *blue, int regno); | ||
410 | struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, | 416 | struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, |
411 | struct drm_mode_fb_cmd *mode_cmd, | 417 | struct drm_mode_fb_cmd *mode_cmd, |
412 | struct drm_gem_object *obj); | 418 | struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 73af463b7a59..1f056dadc5c2 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, | |||
400 | int radeon_object_list_reserve(struct list_head *head) | 400 | int radeon_object_list_reserve(struct list_head *head) |
401 | { | 401 | { |
402 | struct radeon_object_list *lobj; | 402 | struct radeon_object_list *lobj; |
403 | struct list_head *i; | ||
404 | int r; | 403 | int r; |
405 | 404 | ||
406 | list_for_each(i, head) { | 405 | list_for_each_entry(lobj, head, list){ |
407 | lobj = list_entry(i, struct radeon_object_list, list); | ||
408 | if (!lobj->robj->pin_count) { | 406 | if (!lobj->robj->pin_count) { |
409 | r = radeon_object_reserve(lobj->robj, true); | 407 | r = radeon_object_reserve(lobj->robj, true); |
410 | if (unlikely(r != 0)) { | 408 | if (unlikely(r != 0)) { |
@@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head) | |||
420 | void radeon_object_list_unreserve(struct list_head *head) | 418 | void radeon_object_list_unreserve(struct list_head *head) |
421 | { | 419 | { |
422 | struct radeon_object_list *lobj; | 420 | struct radeon_object_list *lobj; |
423 | struct list_head *i; | ||
424 | 421 | ||
425 | list_for_each(i, head) { | 422 | list_for_each_entry(lobj, head, list) { |
426 | lobj = list_entry(i, struct radeon_object_list, list); | ||
427 | if (!lobj->robj->pin_count) { | 423 | if (!lobj->robj->pin_count) { |
428 | radeon_object_unreserve(lobj->robj); | 424 | radeon_object_unreserve(lobj->robj); |
429 | } else { | ||
430 | } | 425 | } |
431 | } | 426 | } |
432 | } | 427 | } |
@@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
436 | struct radeon_object_list *lobj; | 431 | struct radeon_object_list *lobj; |
437 | struct radeon_object *robj; | 432 | struct radeon_object *robj; |
438 | struct radeon_fence *old_fence = NULL; | 433 | struct radeon_fence *old_fence = NULL; |
439 | struct list_head *i; | ||
440 | int r; | 434 | int r; |
441 | 435 | ||
442 | r = radeon_object_list_reserve(head); | 436 | r = radeon_object_list_reserve(head); |
@@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
444 | radeon_object_list_unreserve(head); | 438 | radeon_object_list_unreserve(head); |
445 | return r; | 439 | return r; |
446 | } | 440 | } |
447 | list_for_each(i, head) { | 441 | list_for_each_entry(lobj, head, list) { |
448 | lobj = list_entry(i, struct radeon_object_list, list); | ||
449 | robj = lobj->robj; | 442 | robj = lobj->robj; |
450 | if (!robj->pin_count) { | 443 | if (!robj->pin_count) { |
451 | if (lobj->wdomain) { | 444 | if (lobj->wdomain) { |
@@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head) | |||
482 | { | 475 | { |
483 | struct radeon_object_list *lobj; | 476 | struct radeon_object_list *lobj; |
484 | struct radeon_fence *old_fence = NULL; | 477 | struct radeon_fence *old_fence = NULL; |
485 | struct list_head *i; | ||
486 | 478 | ||
487 | list_for_each(i, head) { | 479 | list_for_each_entry(lobj, head, list) { |
488 | lobj = list_entry(i, struct radeon_object_list, list); | ||
489 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; | 480 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; |
490 | lobj->robj->tobj.sync_obj = NULL; | 481 | lobj->robj->tobj.sync_obj = NULL; |
491 | if (old_fence) { | 482 | if (old_fence) { |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c new file mode 100644 index 000000000000..46146c6a2a06 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
3 | * copy of this software and associated documentation files (the "Software"), | ||
4 | * to deal in the Software without restriction, including without limitation | ||
5 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
6 | * and/or sell copies of the Software, and to permit persons to whom the | ||
7 | * Software is furnished to do so, subject to the following conditions: | ||
8 | * | ||
9 | * The above copyright notice and this permission notice shall be included in | ||
10 | * all copies or substantial portions of the Software. | ||
11 | * | ||
12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
15 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
16 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
17 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
18 | * OTHER DEALINGS IN THE SOFTWARE. | ||
19 | * | ||
20 | * Authors: Rafał Miłecki <zajec5@gmail.com> | ||
21 | */ | ||
22 | #include "drmP.h" | ||
23 | #include "radeon.h" | ||
24 | |||
25 | int radeon_debugfs_pm_init(struct radeon_device *rdev); | ||
26 | |||
27 | int radeon_pm_init(struct radeon_device *rdev) | ||
28 | { | ||
29 | if (radeon_debugfs_pm_init(rdev)) { | ||
30 | DRM_ERROR("Failed to register debugfs file for CP !\n"); | ||
31 | } | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * Debugfs info | ||
38 | */ | ||
39 | #if defined(CONFIG_DEBUG_FS) | ||
40 | |||
41 | static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | ||
42 | { | ||
43 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
44 | struct drm_device *dev = node->minor->dev; | ||
45 | struct radeon_device *rdev = dev->dev_private; | ||
46 | |||
47 | seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev)); | ||
48 | seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev)); | ||
49 | |||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static struct drm_info_list radeon_pm_info_list[] = { | ||
54 | {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, | ||
55 | }; | ||
56 | #endif | ||
57 | |||
58 | int radeon_debugfs_pm_init(struct radeon_device *rdev) | ||
59 | { | ||
60 | #if defined(CONFIG_DEBUG_FS) | ||
61 | return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); | ||
62 | #else | ||
63 | return 0; | ||
64 | #endif | ||
65 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index bfa1ab9c93e1..29ab75903ec1 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -290,6 +290,8 @@ | |||
290 | #define RADEON_BUS_CNTL 0x0030 | 290 | #define RADEON_BUS_CNTL 0x0030 |
291 | # define RADEON_BUS_MASTER_DIS (1 << 6) | 291 | # define RADEON_BUS_MASTER_DIS (1 << 6) |
292 | # define RADEON_BUS_BIOS_DIS_ROM (1 << 12) | 292 | # define RADEON_BUS_BIOS_DIS_ROM (1 << 12) |
293 | # define RS600_BUS_MASTER_DIS (1 << 14) | ||
294 | # define RS600_MSI_REARM (1 << 20) /* rs600/rs690/rs740 */ | ||
293 | # define RADEON_BUS_RD_DISCARD_EN (1 << 24) | 295 | # define RADEON_BUS_RD_DISCARD_EN (1 << 24) |
294 | # define RADEON_BUS_RD_ABORT_EN (1 << 25) | 296 | # define RADEON_BUS_RD_ABORT_EN (1 << 25) |
295 | # define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28) | 297 | # define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28) |
@@ -297,6 +299,9 @@ | |||
297 | # define RADEON_BUS_READ_BURST (1 << 30) | 299 | # define RADEON_BUS_READ_BURST (1 << 30) |
298 | #define RADEON_BUS_CNTL1 0x0034 | 300 | #define RADEON_BUS_CNTL1 0x0034 |
299 | # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) | 301 | # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) |
302 | /* rv370/rv380, rv410, r423/r430/r480, r5xx */ | ||
303 | #define RADEON_MSI_REARM_EN 0x0160 | ||
304 | # define RV370_MSI_REARM_EN (1 << 0) | ||
300 | 305 | ||
301 | /* #define RADEON_PCIE_INDEX 0x0030 */ | 306 | /* #define RADEON_PCIE_INDEX 0x0030 */ |
302 | /* #define RADEON_PCIE_DATA 0x0034 */ | 307 | /* #define RADEON_PCIE_DATA 0x0034 */ |
@@ -3311,6 +3316,7 @@ | |||
3311 | #define RADEON_AIC_CNTL 0x01d0 | 3316 | #define RADEON_AIC_CNTL 0x01d0 |
3312 | # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) | 3317 | # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) |
3313 | # define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1) | 3318 | # define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1) |
3319 | # define RS400_MSI_REARM (1 << 3) /* rs400/rs480 */ | ||
3314 | #define RADEON_AIC_LO_ADDR 0x01dc | 3320 | #define RADEON_AIC_LO_ADDR 0x01dc |
3315 | #define RADEON_AIC_PT_BASE 0x01d8 | 3321 | #define RADEON_AIC_PT_BASE 0x01d8 |
3316 | #define RADEON_AIC_HI_ADDR 0x01e0 | 3322 | #define RADEON_AIC_HI_ADDR 0x01e0 |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 03c33cf4e14c..f8a465d9a1cf 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
42 | /* Number of tests = | 42 | /* Number of tests = |
43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | 43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size |
44 | */ | 44 | */ |
45 | n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - | 45 | n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - |
46 | rdev->cp.ring_size) / size; | 46 | rdev->cp.ring_size) / size; |
47 | 47 | ||
48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | 48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); |
@@ -102,7 +102,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
102 | goto out_cleanup; | 102 | goto out_cleanup; |
103 | } | 103 | } |
104 | 104 | ||
105 | r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); | 105 | r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence); |
106 | if (r) { | 106 | if (r) { |
107 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | 107 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); |
108 | goto out_cleanup; | 108 | goto out_cleanup; |
@@ -145,7 +145,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
145 | goto out_cleanup; | 145 | goto out_cleanup; |
146 | } | 146 | } |
147 | 147 | ||
148 | r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); | 148 | r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence); |
149 | if (r) { | 149 | if (r) { |
150 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | 150 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); |
151 | goto out_cleanup; | 151 | goto out_cleanup; |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 765bd184b6fc..1381e06d6af3 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -295,6 +295,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |||
295 | if (unlikely(r)) { | 295 | if (unlikely(r)) { |
296 | return r; | 296 | return r; |
297 | } | 297 | } |
298 | |||
299 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | ||
300 | if (unlikely(r)) { | ||
301 | goto out_cleanup; | ||
302 | } | ||
303 | |||
298 | r = ttm_tt_bind(bo->ttm, &tmp_mem); | 304 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
299 | if (unlikely(r)) { | 305 | if (unlikely(r)) { |
300 | goto out_cleanup; | 306 | goto out_cleanup; |
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h new file mode 100644 index 000000000000..48a913a06cfd --- /dev/null +++ b/drivers/gpu/drm/radeon/rs100d.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RS100D_H__ | ||
29 | #define __RS100D_H__ | ||
30 | |||
31 | /* Registers */ | ||
32 | #define R_00015C_NB_TOM 0x00015C | ||
33 | #define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
34 | #define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
35 | #define C_00015C_MC_FB_START 0xFFFF0000 | ||
36 | #define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
37 | #define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
38 | #define C_00015C_MC_FB_TOP 0x0000FFFF | ||
39 | |||
40 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index a3fbdad938c7..ca037160a582 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -27,27 +27,12 @@ | |||
27 | */ | 27 | */ |
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon_reg.h" | ||
31 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "rs400d.h" | ||
32 | 32 | ||
33 | /* rs400,rs480 depends on : */ | 33 | /* This files gather functions specifics to : rs400,rs480 */ |
34 | void r100_hdp_reset(struct radeon_device *rdev); | 34 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
35 | void r100_mc_disable_clients(struct radeon_device *rdev); | ||
36 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
37 | void r420_pipes_init(struct radeon_device *rdev); | ||
38 | 35 | ||
39 | /* This files gather functions specifics to : | ||
40 | * rs400,rs480 | ||
41 | * | ||
42 | * Some of these functions might be used by newer ASICs. | ||
43 | */ | ||
44 | void rs400_gpu_init(struct radeon_device *rdev); | ||
45 | int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
46 | |||
47 | |||
48 | /* | ||
49 | * GART functions. | ||
50 | */ | ||
51 | void rs400_gart_adjust_size(struct radeon_device *rdev) | 36 | void rs400_gart_adjust_size(struct radeon_device *rdev) |
52 | { | 37 | { |
53 | /* Check gart size */ | 38 | /* Check gart size */ |
@@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
238 | return 0; | 223 | return 0; |
239 | } | 224 | } |
240 | 225 | ||
241 | |||
242 | /* | ||
243 | * MC functions. | ||
244 | */ | ||
245 | int rs400_mc_init(struct radeon_device *rdev) | ||
246 | { | ||
247 | uint32_t tmp; | ||
248 | int r; | ||
249 | |||
250 | if (r100_debugfs_rbbm_init(rdev)) { | ||
251 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
252 | } | ||
253 | |||
254 | rs400_gpu_init(rdev); | ||
255 | rs400_gart_disable(rdev); | ||
256 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
257 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | ||
258 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | ||
259 | r = radeon_mc_setup(rdev); | ||
260 | if (r) { | ||
261 | return r; | ||
262 | } | ||
263 | |||
264 | r100_mc_disable_clients(rdev); | ||
265 | if (r300_mc_wait_for_idle(rdev)) { | ||
266 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
267 | "programming pipes. Bad things might happen.\n"); | ||
268 | } | ||
269 | |||
270 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
271 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | ||
272 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | ||
273 | WREG32(RADEON_MC_FB_LOCATION, tmp); | ||
274 | tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS; | ||
275 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
276 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
277 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
278 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | void rs400_mc_fini(struct radeon_device *rdev) | ||
284 | { | ||
285 | } | ||
286 | |||
287 | |||
288 | /* | ||
289 | * Global GPU functions | ||
290 | */ | ||
291 | void rs400_errata(struct radeon_device *rdev) | ||
292 | { | ||
293 | rdev->pll_errata = 0; | ||
294 | } | ||
295 | |||
296 | void rs400_gpu_init(struct radeon_device *rdev) | 226 | void rs400_gpu_init(struct radeon_device *rdev) |
297 | { | 227 | { |
298 | /* FIXME: HDP same place on rs400 ? */ | 228 | /* FIXME: HDP same place on rs400 ? */ |
@@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
305 | } | 235 | } |
306 | } | 236 | } |
307 | 237 | ||
308 | |||
309 | /* | ||
310 | * VRAM info. | ||
311 | */ | ||
312 | void rs400_vram_info(struct radeon_device *rdev) | 238 | void rs400_vram_info(struct radeon_device *rdev) |
313 | { | 239 | { |
314 | rs400_gart_adjust_size(rdev); | 240 | rs400_gart_adjust_size(rdev); |
@@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev) | |||
319 | r100_vram_init_sizes(rdev); | 245 | r100_vram_init_sizes(rdev); |
320 | } | 246 | } |
321 | 247 | ||
322 | |||
323 | /* | ||
324 | * Indirect registers accessor | ||
325 | */ | ||
326 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 248 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
327 | { | 249 | { |
328 | uint32_t r; | 250 | uint32_t r; |
@@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
340 | WREG32(RS480_NB_MC_INDEX, 0xff); | 262 | WREG32(RS480_NB_MC_INDEX, 0xff); |
341 | } | 263 | } |
342 | 264 | ||
343 | |||
344 | /* | ||
345 | * Debugfs info | ||
346 | */ | ||
347 | #if defined(CONFIG_DEBUG_FS) | 265 | #if defined(CONFIG_DEBUG_FS) |
348 | static int rs400_debugfs_gart_info(struct seq_file *m, void *data) | 266 | static int rs400_debugfs_gart_info(struct seq_file *m, void *data) |
349 | { | 267 | { |
@@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = { | |||
419 | }; | 337 | }; |
420 | #endif | 338 | #endif |
421 | 339 | ||
422 | int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | 340 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
423 | { | 341 | { |
424 | #if defined(CONFIG_DEBUG_FS) | 342 | #if defined(CONFIG_DEBUG_FS) |
425 | return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); | 343 | return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); |
@@ -427,3 +345,190 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | |||
427 | return 0; | 345 | return 0; |
428 | #endif | 346 | #endif |
429 | } | 347 | } |
348 | |||
349 | static int rs400_mc_init(struct radeon_device *rdev) | ||
350 | { | ||
351 | int r; | ||
352 | u32 tmp; | ||
353 | |||
354 | /* Setup GPU memory space */ | ||
355 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | ||
356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; | ||
357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
358 | r = radeon_mc_setup(rdev); | ||
359 | if (r) | ||
360 | return r; | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | void rs400_mc_program(struct radeon_device *rdev) | ||
365 | { | ||
366 | struct r100_mc_save save; | ||
367 | |||
368 | /* Stops all mc clients */ | ||
369 | r100_mc_stop(rdev, &save); | ||
370 | |||
371 | /* Wait for mc idle */ | ||
372 | if (r300_mc_wait_for_idle(rdev)) | ||
373 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
374 | WREG32(R_000148_MC_FB_LOCATION, | ||
375 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
376 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
377 | |||
378 | r100_mc_resume(rdev, &save); | ||
379 | } | ||
380 | |||
381 | static int rs400_startup(struct radeon_device *rdev) | ||
382 | { | ||
383 | int r; | ||
384 | |||
385 | rs400_mc_program(rdev); | ||
386 | /* Resume clock */ | ||
387 | r300_clock_startup(rdev); | ||
388 | /* Initialize GPU configuration (# pipes, ...) */ | ||
389 | rs400_gpu_init(rdev); | ||
390 | /* Initialize GART (initialize after TTM so we can allocate | ||
391 | * memory through TTM but finalize after TTM) */ | ||
392 | r = rs400_gart_enable(rdev); | ||
393 | if (r) | ||
394 | return r; | ||
395 | /* Enable IRQ */ | ||
396 | rdev->irq.sw_int = true; | ||
397 | r100_irq_set(rdev); | ||
398 | /* 1M ring buffer */ | ||
399 | r = r100_cp_init(rdev, 1024 * 1024); | ||
400 | if (r) { | ||
401 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
402 | return r; | ||
403 | } | ||
404 | r = r100_wb_init(rdev); | ||
405 | if (r) | ||
406 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
407 | r = r100_ib_init(rdev); | ||
408 | if (r) { | ||
409 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
410 | return r; | ||
411 | } | ||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | int rs400_resume(struct radeon_device *rdev) | ||
416 | { | ||
417 | /* Make sur GART are not working */ | ||
418 | rs400_gart_disable(rdev); | ||
419 | /* Resume clock before doing reset */ | ||
420 | r300_clock_startup(rdev); | ||
421 | /* setup MC before calling post tables */ | ||
422 | rs400_mc_program(rdev); | ||
423 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
424 | if (radeon_gpu_reset(rdev)) { | ||
425 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
426 | RREG32(R_000E40_RBBM_STATUS), | ||
427 | RREG32(R_0007C0_CP_STAT)); | ||
428 | } | ||
429 | /* post */ | ||
430 | radeon_combios_asic_init(rdev->ddev); | ||
431 | /* Resume clock after posting */ | ||
432 | r300_clock_startup(rdev); | ||
433 | return rs400_startup(rdev); | ||
434 | } | ||
435 | |||
436 | int rs400_suspend(struct radeon_device *rdev) | ||
437 | { | ||
438 | r100_cp_disable(rdev); | ||
439 | r100_wb_disable(rdev); | ||
440 | r100_irq_disable(rdev); | ||
441 | rs400_gart_disable(rdev); | ||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | void rs400_fini(struct radeon_device *rdev) | ||
446 | { | ||
447 | rs400_suspend(rdev); | ||
448 | r100_cp_fini(rdev); | ||
449 | r100_wb_fini(rdev); | ||
450 | r100_ib_fini(rdev); | ||
451 | radeon_gem_fini(rdev); | ||
452 | rs400_gart_fini(rdev); | ||
453 | radeon_irq_kms_fini(rdev); | ||
454 | radeon_fence_driver_fini(rdev); | ||
455 | radeon_object_fini(rdev); | ||
456 | radeon_atombios_fini(rdev); | ||
457 | kfree(rdev->bios); | ||
458 | rdev->bios = NULL; | ||
459 | } | ||
460 | |||
461 | int rs400_init(struct radeon_device *rdev) | ||
462 | { | ||
463 | int r; | ||
464 | |||
465 | /* Disable VGA */ | ||
466 | r100_vga_render_disable(rdev); | ||
467 | /* Initialize scratch registers */ | ||
468 | radeon_scratch_init(rdev); | ||
469 | /* Initialize surface registers */ | ||
470 | radeon_surface_init(rdev); | ||
471 | /* TODO: disable VGA need to use VGA request */ | ||
472 | /* BIOS*/ | ||
473 | if (!radeon_get_bios(rdev)) { | ||
474 | if (ASIC_IS_AVIVO(rdev)) | ||
475 | return -EINVAL; | ||
476 | } | ||
477 | if (rdev->is_atom_bios) { | ||
478 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
479 | return -EINVAL; | ||
480 | } else { | ||
481 | r = radeon_combios_init(rdev); | ||
482 | if (r) | ||
483 | return r; | ||
484 | } | ||
485 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
486 | if (radeon_gpu_reset(rdev)) { | ||
487 | dev_warn(rdev->dev, | ||
488 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
489 | RREG32(R_000E40_RBBM_STATUS), | ||
490 | RREG32(R_0007C0_CP_STAT)); | ||
491 | } | ||
492 | /* check if cards are posted or not */ | ||
493 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
494 | DRM_INFO("GPU not posted. posting now...\n"); | ||
495 | radeon_combios_asic_init(rdev->ddev); | ||
496 | } | ||
497 | /* Initialize clocks */ | ||
498 | radeon_get_clock_info(rdev->ddev); | ||
499 | /* Get vram informations */ | ||
500 | rs400_vram_info(rdev); | ||
501 | /* Initialize memory controller (also test AGP) */ | ||
502 | r = rs400_mc_init(rdev); | ||
503 | if (r) | ||
504 | return r; | ||
505 | /* Fence driver */ | ||
506 | r = radeon_fence_driver_init(rdev); | ||
507 | if (r) | ||
508 | return r; | ||
509 | r = radeon_irq_kms_init(rdev); | ||
510 | if (r) | ||
511 | return r; | ||
512 | /* Memory manager */ | ||
513 | r = radeon_object_init(rdev); | ||
514 | if (r) | ||
515 | return r; | ||
516 | r = rs400_gart_init(rdev); | ||
517 | if (r) | ||
518 | return r; | ||
519 | r300_set_reg_safe(rdev); | ||
520 | rdev->accel_working = true; | ||
521 | r = rs400_startup(rdev); | ||
522 | if (r) { | ||
523 | /* Somethings want wront with the accel init stop accel */ | ||
524 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
525 | rs400_suspend(rdev); | ||
526 | r100_cp_fini(rdev); | ||
527 | r100_wb_fini(rdev); | ||
528 | r100_ib_fini(rdev); | ||
529 | rs400_gart_fini(rdev); | ||
530 | radeon_irq_kms_fini(rdev); | ||
531 | rdev->accel_working = false; | ||
532 | } | ||
533 | return 0; | ||
534 | } | ||
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h new file mode 100644 index 000000000000..6d8bac58ced9 --- /dev/null +++ b/drivers/gpu/drm/radeon/rs400d.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RS400D_H__ | ||
29 | #define __RS400D_H__ | ||
30 | |||
31 | /* Registers */ | ||
32 | #define R_000148_MC_FB_LOCATION 0x000148 | ||
33 | #define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
34 | #define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
35 | #define C_000148_MC_FB_START 0xFFFF0000 | ||
36 | #define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
37 | #define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
38 | #define C_000148_MC_FB_TOP 0x0000FFFF | ||
39 | #define R_00015C_NB_TOM 0x00015C | ||
40 | #define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
41 | #define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
42 | #define C_00015C_MC_FB_START 0xFFFF0000 | ||
43 | #define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
44 | #define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
45 | #define C_00015C_MC_FB_TOP 0x0000FFFF | ||
46 | #define R_0007C0_CP_STAT 0x0007C0 | ||
47 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
48 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
49 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
50 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
51 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
52 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
53 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
54 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
55 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
56 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
57 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
58 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
59 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
60 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
61 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
62 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
63 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
64 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
65 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
66 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
67 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
68 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
69 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
70 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
71 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
72 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
73 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
74 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
75 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
76 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
77 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
78 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
79 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
80 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
81 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
82 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
83 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
84 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
85 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
86 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
87 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
88 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
89 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
90 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
91 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
92 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
93 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
94 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
95 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
96 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
97 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
98 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
99 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
100 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
101 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
102 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
103 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
104 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
105 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
106 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
107 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
108 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
109 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
110 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
111 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
112 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
113 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
114 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
115 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
116 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
117 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
118 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
119 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
120 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
121 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
122 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
123 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
124 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
125 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
126 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
127 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
128 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
129 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
130 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
131 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
132 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
133 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
134 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
135 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
136 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
137 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
138 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
139 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
140 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
141 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
142 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
143 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
144 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
145 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
146 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
147 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
148 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
149 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
150 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
151 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
152 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
153 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
154 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
155 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
156 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
157 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
158 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
159 | |||
160 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4a4fe1cb131c..5f117cd8736a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -25,27 +25,26 @@ | |||
25 | * Alex Deucher | 25 | * Alex Deucher |
26 | * Jerome Glisse | 26 | * Jerome Glisse |
27 | */ | 27 | */ |
28 | /* RS600 / Radeon X1250/X1270 integrated GPU | ||
29 | * | ||
30 | * This file gather function specific to RS600 which is the IGP of | ||
31 | * the X1250/X1270 family supporting intel CPU (while RS690/RS740 | ||
32 | * is the X1250/X1270 supporting AMD CPU). The display engine are | ||
33 | * the avivo one, bios is an atombios, 3D block are the one of the | ||
34 | * R4XX family. The GART is different from the RS400 one and is very | ||
35 | * close to the one of the R600 family (R600 likely being an evolution | ||
36 | * of the RS600 GART block). | ||
37 | */ | ||
28 | #include "drmP.h" | 38 | #include "drmP.h" |
29 | #include "radeon_reg.h" | ||
30 | #include "radeon.h" | 39 | #include "radeon.h" |
40 | #include "atom.h" | ||
41 | #include "rs600d.h" | ||
31 | 42 | ||
32 | #include "rs600_reg_safe.h" | 43 | #include "rs600_reg_safe.h" |
33 | 44 | ||
34 | /* rs600 depends on : */ | ||
35 | void r100_hdp_reset(struct radeon_device *rdev); | ||
36 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
37 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
38 | void r420_pipes_init(struct radeon_device *rdev); | ||
39 | |||
40 | /* This files gather functions specifics to : | ||
41 | * rs600 | ||
42 | * | ||
43 | * Some of these functions might be used by newer ASICs. | ||
44 | */ | ||
45 | void rs600_gpu_init(struct radeon_device *rdev); | 45 | void rs600_gpu_init(struct radeon_device *rdev); |
46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
47 | 47 | ||
48 | |||
49 | /* | 48 | /* |
50 | * GART. | 49 | * GART. |
51 | */ | 50 | */ |
@@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) | |||
53 | { | 52 | { |
54 | uint32_t tmp; | 53 | uint32_t tmp; |
55 | 54 | ||
56 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 55 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
57 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | 56 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
58 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 57 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
59 | 58 | ||
60 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 59 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
61 | tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; | 60 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); |
62 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 61 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
63 | 62 | ||
64 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 63 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
65 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | 64 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
66 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 65 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
67 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 66 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
68 | } | 67 | } |
69 | 68 | ||
70 | int rs600_gart_init(struct radeon_device *rdev) | 69 | int rs600_gart_init(struct radeon_device *rdev) |
@@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
86 | 85 | ||
87 | int rs600_gart_enable(struct radeon_device *rdev) | 86 | int rs600_gart_enable(struct radeon_device *rdev) |
88 | { | 87 | { |
89 | uint32_t tmp; | 88 | u32 tmp; |
90 | int r, i; | 89 | int r, i; |
91 | 90 | ||
92 | if (rdev->gart.table.vram.robj == NULL) { | 91 | if (rdev->gart.table.vram.robj == NULL) { |
@@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
96 | r = radeon_gart_table_vram_pin(rdev); | 95 | r = radeon_gart_table_vram_pin(rdev); |
97 | if (r) | 96 | if (r) |
98 | return r; | 97 | return r; |
98 | /* Enable bus master */ | ||
99 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; | ||
100 | WREG32(R_00004C_BUS_CNTL, tmp); | ||
99 | /* FIXME: setup default page */ | 101 | /* FIXME: setup default page */ |
100 | WREG32_MC(RS600_MC_PT0_CNTL, | 102 | WREG32_MC(R_000100_MC_PT0_CNTL, |
101 | (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | | 103 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
102 | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); | 104 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); |
103 | for (i = 0; i < 19; i++) { | 105 | for (i = 0; i < 19; i++) { |
104 | WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, | 106 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, |
105 | (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | | 107 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | |
106 | RS600_SYSTEM_ACCESS_MODE_IN_SYS | | 108 | S_00016C_SYSTEM_ACCESS_MODE_MASK( |
107 | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | | 109 | V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | |
108 | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | | 110 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( |
109 | RS600_ENABLE_FRAGMENT_PROCESSING | | 111 | V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | |
110 | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); | 112 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | |
113 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | | ||
114 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); | ||
111 | } | 115 | } |
112 | 116 | ||
113 | /* System context map to GART space */ | 117 | /* System context map to GART space */ |
114 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); | 118 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); |
115 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 119 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); |
116 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp); | ||
117 | 120 | ||
118 | /* enable first context */ | 121 | /* enable first context */ |
119 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); | 122 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); |
120 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 123 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); |
121 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); | 124 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, |
122 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, | 125 | S_000102_ENABLE_PAGE_TABLE(1) | |
123 | (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); | 126 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); |
124 | /* disable all other contexts */ | 127 | /* disable all other contexts */ |
125 | for (i = 1; i < 8; i++) { | 128 | for (i = 1; i < 8; i++) { |
126 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); | 129 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); |
127 | } | 130 | } |
128 | 131 | ||
129 | /* setup the page table */ | 132 | /* setup the page table */ |
130 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | 133 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, |
131 | rdev->gart.table_addr); | 134 | rdev->gart.table_addr); |
132 | WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | 135 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); |
133 | 136 | ||
134 | /* enable page tables */ | 137 | /* enable page tables */ |
135 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 138 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
136 | WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); | 139 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); |
137 | tmp = RREG32_MC(RS600_MC_CNTL1); | 140 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
138 | WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); | 141 | WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); |
139 | rs600_gart_tlb_flush(rdev); | 142 | rs600_gart_tlb_flush(rdev); |
140 | rdev->gart.ready = true; | 143 | rdev->gart.ready = true; |
141 | return 0; | 144 | return 0; |
@@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev) | |||
146 | uint32_t tmp; | 149 | uint32_t tmp; |
147 | 150 | ||
148 | /* FIXME: disable out of gart access */ | 151 | /* FIXME: disable out of gart access */ |
149 | WREG32_MC(RS600_MC_PT0_CNTL, 0); | 152 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
150 | tmp = RREG32_MC(RS600_MC_CNTL1); | 153 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
151 | tmp &= ~RS600_ENABLE_PAGE_TABLES; | 154 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
152 | WREG32_MC(RS600_MC_CNTL1, tmp); | ||
153 | if (rdev->gart.table.vram.robj) { | 155 | if (rdev->gart.table.vram.robj) { |
154 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 156 | radeon_object_kunmap(rdev->gart.table.vram.robj); |
155 | radeon_object_unpin(rdev->gart.table.vram.robj); | 157 | radeon_object_unpin(rdev->gart.table.vram.robj); |
@@ -183,132 +185,64 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
183 | return 0; | 185 | return 0; |
184 | } | 186 | } |
185 | 187 | ||
186 | |||
187 | /* | ||
188 | * MC. | ||
189 | */ | ||
190 | void rs600_mc_disable_clients(struct radeon_device *rdev) | ||
191 | { | ||
192 | unsigned tmp; | ||
193 | |||
194 | if (r100_gui_wait_for_idle(rdev)) { | ||
195 | printk(KERN_WARNING "Failed to wait GUI idle while " | ||
196 | "programming pipes. Bad things might happen.\n"); | ||
197 | } | ||
198 | |||
199 | rv515_vga_render_disable(rdev); | ||
200 | |||
201 | tmp = RREG32(AVIVO_D1VGA_CONTROL); | ||
202 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | ||
203 | tmp = RREG32(AVIVO_D2VGA_CONTROL); | ||
204 | WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | ||
205 | |||
206 | tmp = RREG32(AVIVO_D1CRTC_CONTROL); | ||
207 | WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | ||
208 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | ||
209 | WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | ||
210 | |||
211 | /* make sure all previous write got through */ | ||
212 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | ||
213 | |||
214 | mdelay(1); | ||
215 | } | ||
216 | |||
217 | int rs600_mc_init(struct radeon_device *rdev) | ||
218 | { | ||
219 | uint32_t tmp; | ||
220 | int r; | ||
221 | |||
222 | if (r100_debugfs_rbbm_init(rdev)) { | ||
223 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
224 | } | ||
225 | |||
226 | rs600_gpu_init(rdev); | ||
227 | rs600_gart_disable(rdev); | ||
228 | |||
229 | /* Setup GPU memory space */ | ||
230 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
231 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
232 | r = radeon_mc_setup(rdev); | ||
233 | if (r) { | ||
234 | return r; | ||
235 | } | ||
236 | |||
237 | /* Program GPU memory space */ | ||
238 | /* Enable bus master */ | ||
239 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; | ||
240 | WREG32(RADEON_BUS_CNTL, tmp); | ||
241 | /* FIXME: What does AGP means for such chipset ? */ | ||
242 | WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
243 | /* FIXME: are this AGP reg in indirect MC range ? */ | ||
244 | WREG32_MC(RS600_MC_AGP_BASE, 0); | ||
245 | WREG32_MC(RS600_MC_AGP_BASE_2, 0); | ||
246 | rs600_mc_disable_clients(rdev); | ||
247 | if (rs600_mc_wait_for_idle(rdev)) { | ||
248 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
249 | "programming pipes. Bad things might happen.\n"); | ||
250 | } | ||
251 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
252 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); | ||
253 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); | ||
254 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); | ||
255 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | void rs600_mc_fini(struct radeon_device *rdev) | ||
260 | { | ||
261 | } | ||
262 | |||
263 | |||
264 | /* | ||
265 | * Interrupts | ||
266 | */ | ||
267 | int rs600_irq_set(struct radeon_device *rdev) | 188 | int rs600_irq_set(struct radeon_device *rdev) |
268 | { | 189 | { |
269 | uint32_t tmp = 0; | 190 | uint32_t tmp = 0; |
270 | uint32_t mode_int = 0; | 191 | uint32_t mode_int = 0; |
271 | 192 | ||
272 | if (rdev->irq.sw_int) { | 193 | if (rdev->irq.sw_int) { |
273 | tmp |= RADEON_SW_INT_ENABLE; | 194 | tmp |= S_000040_SW_INT_EN(1); |
274 | } | 195 | } |
275 | if (rdev->irq.crtc_vblank_int[0]) { | 196 | if (rdev->irq.crtc_vblank_int[0]) { |
276 | mode_int |= AVIVO_D1MODE_INT_MASK; | 197 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
277 | } | 198 | } |
278 | if (rdev->irq.crtc_vblank_int[1]) { | 199 | if (rdev->irq.crtc_vblank_int[1]) { |
279 | mode_int |= AVIVO_D2MODE_INT_MASK; | 200 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
280 | } | 201 | } |
281 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 202 | WREG32(R_000040_GEN_INT_CNTL, tmp); |
282 | WREG32(AVIVO_DxMODE_INT_MASK, mode_int); | 203 | WREG32(R_006540_DxMODE_INT_MASK, mode_int); |
283 | return 0; | 204 | return 0; |
284 | } | 205 | } |
285 | 206 | ||
286 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | 207 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) |
287 | { | 208 | { |
288 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | 209 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
289 | uint32_t irq_mask = RADEON_SW_INT_TEST; | 210 | uint32_t irq_mask = ~C_000044_SW_INT; |
290 | 211 | ||
291 | if (irqs & AVIVO_DISPLAY_INT_STATUS) { | 212 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
292 | *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); | 213 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
293 | if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | 214 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { |
294 | WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | 215 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
216 | S_006534_D1MODE_VBLANK_ACK(1)); | ||
295 | } | 217 | } |
296 | if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | 218 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { |
297 | WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | 219 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
220 | S_006D34_D2MODE_VBLANK_ACK(1)); | ||
298 | } | 221 | } |
299 | } else { | 222 | } else { |
300 | *r500_disp_int = 0; | 223 | *r500_disp_int = 0; |
301 | } | 224 | } |
302 | 225 | ||
303 | if (irqs) { | 226 | if (irqs) { |
304 | WREG32(RADEON_GEN_INT_STATUS, irqs); | 227 | WREG32(R_000044_GEN_INT_STATUS, irqs); |
305 | } | 228 | } |
306 | return irqs & irq_mask; | 229 | return irqs & irq_mask; |
307 | } | 230 | } |
308 | 231 | ||
232 | void rs600_irq_disable(struct radeon_device *rdev) | ||
233 | { | ||
234 | u32 tmp; | ||
235 | |||
236 | WREG32(R_000040_GEN_INT_CNTL, 0); | ||
237 | WREG32(R_006540_DxMODE_INT_MASK, 0); | ||
238 | /* Wait and acknowledge irq */ | ||
239 | mdelay(1); | ||
240 | rs600_irq_ack(rdev, &tmp); | ||
241 | } | ||
242 | |||
309 | int rs600_irq_process(struct radeon_device *rdev) | 243 | int rs600_irq_process(struct radeon_device *rdev) |
310 | { | 244 | { |
311 | uint32_t status; | 245 | uint32_t status, msi_rearm; |
312 | uint32_t r500_disp_int; | 246 | uint32_t r500_disp_int; |
313 | 247 | ||
314 | status = rs600_irq_ack(rdev, &r500_disp_int); | 248 | status = rs600_irq_ack(rdev, &r500_disp_int); |
@@ -317,71 +251,65 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
317 | } | 251 | } |
318 | while (status || r500_disp_int) { | 252 | while (status || r500_disp_int) { |
319 | /* SW interrupt */ | 253 | /* SW interrupt */ |
320 | if (status & RADEON_SW_INT_TEST) { | 254 | if (G_000040_SW_INT_EN(status)) |
321 | radeon_fence_process(rdev); | 255 | radeon_fence_process(rdev); |
322 | } | ||
323 | /* Vertical blank interrupts */ | 256 | /* Vertical blank interrupts */ |
324 | if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | 257 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) |
325 | drm_handle_vblank(rdev->ddev, 0); | 258 | drm_handle_vblank(rdev->ddev, 0); |
326 | } | 259 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) |
327 | if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | ||
328 | drm_handle_vblank(rdev->ddev, 1); | 260 | drm_handle_vblank(rdev->ddev, 1); |
329 | } | ||
330 | status = rs600_irq_ack(rdev, &r500_disp_int); | 261 | status = rs600_irq_ack(rdev, &r500_disp_int); |
331 | } | 262 | } |
263 | if (rdev->msi_enabled) { | ||
264 | switch (rdev->family) { | ||
265 | case CHIP_RS600: | ||
266 | case CHIP_RS690: | ||
267 | case CHIP_RS740: | ||
268 | msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; | ||
269 | WREG32(RADEON_BUS_CNTL, msi_rearm); | ||
270 | WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); | ||
271 | break; | ||
272 | default: | ||
273 | msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; | ||
274 | WREG32(RADEON_MSI_REARM_EN, msi_rearm); | ||
275 | WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); | ||
276 | break; | ||
277 | } | ||
278 | } | ||
332 | return IRQ_HANDLED; | 279 | return IRQ_HANDLED; |
333 | } | 280 | } |
334 | 281 | ||
335 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) | 282 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) |
336 | { | 283 | { |
337 | if (crtc == 0) | 284 | if (crtc == 0) |
338 | return RREG32(AVIVO_D1CRTC_FRAME_COUNT); | 285 | return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); |
339 | else | 286 | else |
340 | return RREG32(AVIVO_D2CRTC_FRAME_COUNT); | 287 | return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); |
341 | } | 288 | } |
342 | 289 | ||
343 | |||
344 | /* | ||
345 | * Global GPU functions | ||
346 | */ | ||
347 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) | 290 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) |
348 | { | 291 | { |
349 | unsigned i; | 292 | unsigned i; |
350 | uint32_t tmp; | ||
351 | 293 | ||
352 | for (i = 0; i < rdev->usec_timeout; i++) { | 294 | for (i = 0; i < rdev->usec_timeout; i++) { |
353 | /* read MC_STATUS */ | 295 | if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) |
354 | tmp = RREG32_MC(RS600_MC_STATUS); | ||
355 | if (tmp & RS600_MC_STATUS_IDLE) { | ||
356 | return 0; | 296 | return 0; |
357 | } | 297 | udelay(1); |
358 | DRM_UDELAY(1); | ||
359 | } | 298 | } |
360 | return -1; | 299 | return -1; |
361 | } | 300 | } |
362 | 301 | ||
363 | void rs600_errata(struct radeon_device *rdev) | ||
364 | { | ||
365 | rdev->pll_errata = 0; | ||
366 | } | ||
367 | |||
368 | void rs600_gpu_init(struct radeon_device *rdev) | 302 | void rs600_gpu_init(struct radeon_device *rdev) |
369 | { | 303 | { |
370 | /* FIXME: HDP same place on rs600 ? */ | 304 | /* FIXME: HDP same place on rs600 ? */ |
371 | r100_hdp_reset(rdev); | 305 | r100_hdp_reset(rdev); |
372 | rv515_vga_render_disable(rdev); | ||
373 | /* FIXME: is this correct ? */ | 306 | /* FIXME: is this correct ? */ |
374 | r420_pipes_init(rdev); | 307 | r420_pipes_init(rdev); |
375 | if (rs600_mc_wait_for_idle(rdev)) { | 308 | /* Wait for mc idle */ |
376 | printk(KERN_WARNING "Failed to wait MC idle while " | 309 | if (rs600_mc_wait_for_idle(rdev)) |
377 | "programming pipes. Bad things might happen.\n"); | 310 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
378 | } | ||
379 | } | 311 | } |
380 | 312 | ||
381 | |||
382 | /* | ||
383 | * VRAM info. | ||
384 | */ | ||
385 | void rs600_vram_info(struct radeon_device *rdev) | 313 | void rs600_vram_info(struct radeon_device *rdev) |
386 | { | 314 | { |
387 | /* FIXME: to do or is these values sane ? */ | 315 | /* FIXME: to do or is these values sane ? */ |
@@ -394,31 +322,208 @@ void rs600_bandwidth_update(struct radeon_device *rdev) | |||
394 | /* FIXME: implement, should this be like rs690 ? */ | 322 | /* FIXME: implement, should this be like rs690 ? */ |
395 | } | 323 | } |
396 | 324 | ||
397 | |||
398 | /* | ||
399 | * Indirect registers accessor | ||
400 | */ | ||
401 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 325 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
402 | { | 326 | { |
403 | uint32_t r; | 327 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
404 | 328 | S_000070_MC_IND_CITF_ARB0(1)); | |
405 | WREG32(RS600_MC_INDEX, | 329 | return RREG32(R_000074_MC_IND_DATA); |
406 | ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); | ||
407 | r = RREG32(RS600_MC_DATA); | ||
408 | return r; | ||
409 | } | 330 | } |
410 | 331 | ||
411 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 332 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
412 | { | 333 | { |
413 | WREG32(RS600_MC_INDEX, | 334 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
414 | RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | | 335 | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); |
415 | ((reg) & RS600_MC_ADDR_MASK)); | 336 | WREG32(R_000074_MC_IND_DATA, v); |
416 | WREG32(RS600_MC_DATA, v); | ||
417 | } | 337 | } |
418 | 338 | ||
419 | int rs600_init(struct radeon_device *rdev) | 339 | void rs600_debugfs(struct radeon_device *rdev) |
340 | { | ||
341 | if (r100_debugfs_rbbm_init(rdev)) | ||
342 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
343 | } | ||
344 | |||
345 | void rs600_set_safe_registers(struct radeon_device *rdev) | ||
420 | { | 346 | { |
421 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; | 347 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; |
422 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); | 348 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); |
349 | } | ||
350 | |||
351 | static void rs600_mc_program(struct radeon_device *rdev) | ||
352 | { | ||
353 | struct rv515_mc_save save; | ||
354 | |||
355 | /* Stops all mc clients */ | ||
356 | rv515_mc_stop(rdev, &save); | ||
357 | |||
358 | /* Wait for mc idle */ | ||
359 | if (rs600_mc_wait_for_idle(rdev)) | ||
360 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
361 | |||
362 | /* FIXME: What does AGP means for such chipset ? */ | ||
363 | WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
364 | WREG32_MC(R_000006_AGP_BASE, 0); | ||
365 | WREG32_MC(R_000007_AGP_BASE_2, 0); | ||
366 | /* Program MC */ | ||
367 | WREG32_MC(R_000004_MC_FB_LOCATION, | ||
368 | S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
369 | S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
370 | WREG32(R_000134_HDP_FB_LOCATION, | ||
371 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
372 | |||
373 | rv515_mc_resume(rdev, &save); | ||
374 | } | ||
375 | |||
376 | static int rs600_startup(struct radeon_device *rdev) | ||
377 | { | ||
378 | int r; | ||
379 | |||
380 | rs600_mc_program(rdev); | ||
381 | /* Resume clock */ | ||
382 | rv515_clock_startup(rdev); | ||
383 | /* Initialize GPU configuration (# pipes, ...) */ | ||
384 | rs600_gpu_init(rdev); | ||
385 | /* Initialize GART (initialize after TTM so we can allocate | ||
386 | * memory through TTM but finalize after TTM) */ | ||
387 | r = rs600_gart_enable(rdev); | ||
388 | if (r) | ||
389 | return r; | ||
390 | /* Enable IRQ */ | ||
391 | rdev->irq.sw_int = true; | ||
392 | rs600_irq_set(rdev); | ||
393 | /* 1M ring buffer */ | ||
394 | r = r100_cp_init(rdev, 1024 * 1024); | ||
395 | if (r) { | ||
396 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
397 | return r; | ||
398 | } | ||
399 | r = r100_wb_init(rdev); | ||
400 | if (r) | ||
401 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
402 | r = r100_ib_init(rdev); | ||
403 | if (r) { | ||
404 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
405 | return r; | ||
406 | } | ||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | int rs600_resume(struct radeon_device *rdev) | ||
411 | { | ||
412 | /* Make sur GART are not working */ | ||
413 | rs600_gart_disable(rdev); | ||
414 | /* Resume clock before doing reset */ | ||
415 | rv515_clock_startup(rdev); | ||
416 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
417 | if (radeon_gpu_reset(rdev)) { | ||
418 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
419 | RREG32(R_000E40_RBBM_STATUS), | ||
420 | RREG32(R_0007C0_CP_STAT)); | ||
421 | } | ||
422 | /* post */ | ||
423 | atom_asic_init(rdev->mode_info.atom_context); | ||
424 | /* Resume clock after posting */ | ||
425 | rv515_clock_startup(rdev); | ||
426 | return rs600_startup(rdev); | ||
427 | } | ||
428 | |||
429 | int rs600_suspend(struct radeon_device *rdev) | ||
430 | { | ||
431 | r100_cp_disable(rdev); | ||
432 | r100_wb_disable(rdev); | ||
433 | rs600_irq_disable(rdev); | ||
434 | rs600_gart_disable(rdev); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | void rs600_fini(struct radeon_device *rdev) | ||
439 | { | ||
440 | rs600_suspend(rdev); | ||
441 | r100_cp_fini(rdev); | ||
442 | r100_wb_fini(rdev); | ||
443 | r100_ib_fini(rdev); | ||
444 | radeon_gem_fini(rdev); | ||
445 | rs600_gart_fini(rdev); | ||
446 | radeon_irq_kms_fini(rdev); | ||
447 | radeon_fence_driver_fini(rdev); | ||
448 | radeon_object_fini(rdev); | ||
449 | radeon_atombios_fini(rdev); | ||
450 | kfree(rdev->bios); | ||
451 | rdev->bios = NULL; | ||
452 | } | ||
453 | |||
454 | int rs600_init(struct radeon_device *rdev) | ||
455 | { | ||
456 | int r; | ||
457 | |||
458 | /* Disable VGA */ | ||
459 | rv515_vga_render_disable(rdev); | ||
460 | /* Initialize scratch registers */ | ||
461 | radeon_scratch_init(rdev); | ||
462 | /* Initialize surface registers */ | ||
463 | radeon_surface_init(rdev); | ||
464 | /* BIOS */ | ||
465 | if (!radeon_get_bios(rdev)) { | ||
466 | if (ASIC_IS_AVIVO(rdev)) | ||
467 | return -EINVAL; | ||
468 | } | ||
469 | if (rdev->is_atom_bios) { | ||
470 | r = radeon_atombios_init(rdev); | ||
471 | if (r) | ||
472 | return r; | ||
473 | } else { | ||
474 | dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); | ||
475 | return -EINVAL; | ||
476 | } | ||
477 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
478 | if (radeon_gpu_reset(rdev)) { | ||
479 | dev_warn(rdev->dev, | ||
480 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
481 | RREG32(R_000E40_RBBM_STATUS), | ||
482 | RREG32(R_0007C0_CP_STAT)); | ||
483 | } | ||
484 | /* check if cards are posted or not */ | ||
485 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
486 | DRM_INFO("GPU not posted. posting now...\n"); | ||
487 | atom_asic_init(rdev->mode_info.atom_context); | ||
488 | } | ||
489 | /* Initialize clocks */ | ||
490 | radeon_get_clock_info(rdev->ddev); | ||
491 | /* Initialize power management */ | ||
492 | radeon_pm_init(rdev); | ||
493 | /* Get vram informations */ | ||
494 | rs600_vram_info(rdev); | ||
495 | /* Initialize memory controller (also test AGP) */ | ||
496 | r = r420_mc_init(rdev); | ||
497 | if (r) | ||
498 | return r; | ||
499 | rs600_debugfs(rdev); | ||
500 | /* Fence driver */ | ||
501 | r = radeon_fence_driver_init(rdev); | ||
502 | if (r) | ||
503 | return r; | ||
504 | r = radeon_irq_kms_init(rdev); | ||
505 | if (r) | ||
506 | return r; | ||
507 | /* Memory manager */ | ||
508 | r = radeon_object_init(rdev); | ||
509 | if (r) | ||
510 | return r; | ||
511 | r = rs600_gart_init(rdev); | ||
512 | if (r) | ||
513 | return r; | ||
514 | rs600_set_safe_registers(rdev); | ||
515 | rdev->accel_working = true; | ||
516 | r = rs600_startup(rdev); | ||
517 | if (r) { | ||
518 | /* Somethings want wront with the accel init stop accel */ | ||
519 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
520 | rs600_suspend(rdev); | ||
521 | r100_cp_fini(rdev); | ||
522 | r100_wb_fini(rdev); | ||
523 | r100_ib_fini(rdev); | ||
524 | rs600_gart_fini(rdev); | ||
525 | radeon_irq_kms_fini(rdev); | ||
526 | rdev->accel_working = false; | ||
527 | } | ||
423 | return 0; | 528 | return 0; |
424 | } | 529 | } |
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h new file mode 100644 index 000000000000..81308924859a --- /dev/null +++ b/drivers/gpu/drm/radeon/rs600d.h | |||
@@ -0,0 +1,470 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RS600D_H__ | ||
29 | #define __RS600D_H__ | ||
30 | |||
31 | /* Registers */ | ||
32 | #define R_000040_GEN_INT_CNTL 0x000040 | ||
33 | #define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) | ||
34 | #define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) | ||
35 | #define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE | ||
36 | #define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) | ||
37 | #define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) | ||
38 | #define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF | ||
39 | #define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) | ||
40 | #define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) | ||
41 | #define C_000040_CRTC2_VSYNC 0xFFFFFFBF | ||
42 | #define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) | ||
43 | #define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) | ||
44 | #define C_000040_SNAPSHOT2 0xFFFFFF7F | ||
45 | #define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) | ||
46 | #define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) | ||
47 | #define C_000040_CRTC2_VBLANK 0xFFFFFDFF | ||
48 | #define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) | ||
49 | #define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) | ||
50 | #define C_000040_FP2_DETECT 0xFFFFFBFF | ||
51 | #define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) | ||
52 | #define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) | ||
53 | #define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF | ||
54 | #define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) | ||
55 | #define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) | ||
56 | #define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF | ||
57 | #define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14) | ||
58 | #define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1) | ||
59 | #define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF | ||
60 | #define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15) | ||
61 | #define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1) | ||
62 | #define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF | ||
63 | #define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17) | ||
64 | #define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1) | ||
65 | #define C_000040_I2C_INT_EN 0xFFFDFFFF | ||
66 | #define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19) | ||
67 | #define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1) | ||
68 | #define C_000040_GUI_IDLE 0xFFF7FFFF | ||
69 | #define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24) | ||
70 | #define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1) | ||
71 | #define C_000040_VIPH_INT_EN 0xFEFFFFFF | ||
72 | #define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25) | ||
73 | #define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1) | ||
74 | #define C_000040_SW_INT_EN 0xFDFFFFFF | ||
75 | #define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27) | ||
76 | #define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1) | ||
77 | #define C_000040_GEYSERVILLE 0xF7FFFFFF | ||
78 | #define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28) | ||
79 | #define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1) | ||
80 | #define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF | ||
81 | #define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29) | ||
82 | #define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1) | ||
83 | #define C_000040_DVI_I2C_INT 0xDFFFFFFF | ||
84 | #define S_000040_GUIDMA(x) (((x) & 0x1) << 30) | ||
85 | #define G_000040_GUIDMA(x) (((x) >> 30) & 0x1) | ||
86 | #define C_000040_GUIDMA 0xBFFFFFFF | ||
87 | #define S_000040_VIDDMA(x) (((x) & 0x1) << 31) | ||
88 | #define G_000040_VIDDMA(x) (((x) >> 31) & 0x1) | ||
89 | #define C_000040_VIDDMA 0x7FFFFFFF | ||
90 | #define R_000044_GEN_INT_STATUS 0x000044 | ||
91 | #define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0) | ||
92 | #define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1) | ||
93 | #define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE | ||
94 | #define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1) | ||
95 | #define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1) | ||
96 | #define C_000044_VGA_INT_STAT 0xFFFFFFFD | ||
97 | #define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8) | ||
98 | #define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1) | ||
99 | #define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF | ||
100 | #define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12) | ||
101 | #define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1) | ||
102 | #define C_000044_DMA_VIPH0_INT 0xFFFFEFFF | ||
103 | #define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13) | ||
104 | #define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1) | ||
105 | #define C_000044_DMA_VIPH1_INT 0xFFFFDFFF | ||
106 | #define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14) | ||
107 | #define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1) | ||
108 | #define C_000044_DMA_VIPH2_INT 0xFFFFBFFF | ||
109 | #define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15) | ||
110 | #define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1) | ||
111 | #define C_000044_DMA_VIPH3_INT 0xFFFF7FFF | ||
112 | #define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16) | ||
113 | #define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1) | ||
114 | #define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF | ||
115 | #define S_000044_I2C_INT(x) (((x) & 0x1) << 17) | ||
116 | #define G_000044_I2C_INT(x) (((x) >> 17) & 0x1) | ||
117 | #define C_000044_I2C_INT 0xFFFDFFFF | ||
118 | #define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18) | ||
119 | #define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1) | ||
120 | #define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF | ||
121 | #define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19) | ||
122 | #define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1) | ||
123 | #define C_000044_GUI_IDLE_STAT 0xFFF7FFFF | ||
124 | #define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20) | ||
125 | #define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1) | ||
126 | #define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF | ||
127 | #define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21) | ||
128 | #define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1) | ||
129 | #define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF | ||
130 | #define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22) | ||
131 | #define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1) | ||
132 | #define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF | ||
133 | #define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23) | ||
134 | #define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1) | ||
135 | #define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF | ||
136 | #define S_000044_VIPH_INT(x) (((x) & 0x1) << 24) | ||
137 | #define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1) | ||
138 | #define C_000044_VIPH_INT 0xFEFFFFFF | ||
139 | #define S_000044_SW_INT(x) (((x) & 0x1) << 25) | ||
140 | #define G_000044_SW_INT(x) (((x) >> 25) & 0x1) | ||
141 | #define C_000044_SW_INT 0xFDFFFFFF | ||
142 | #define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26) | ||
143 | #define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1) | ||
144 | #define C_000044_SW_INT_SET 0xFBFFFFFF | ||
145 | #define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27) | ||
146 | #define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1) | ||
147 | #define C_000044_IDCT_INT_STAT 0xF7FFFFFF | ||
148 | #define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30) | ||
149 | #define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1) | ||
150 | #define C_000044_GUIDMA_STAT 0xBFFFFFFF | ||
151 | #define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31) | ||
152 | #define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1) | ||
153 | #define C_000044_VIDDMA_STAT 0x7FFFFFFF | ||
154 | #define R_00004C_BUS_CNTL 0x00004C | ||
155 | #define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14) | ||
156 | #define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1) | ||
157 | #define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF | ||
158 | #define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20) | ||
159 | #define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1) | ||
160 | #define C_00004C_BUS_MSI_REARM 0xFFEFFFFF | ||
161 | #define R_000070_MC_IND_INDEX 0x000070 | ||
162 | #define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0) | ||
163 | #define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF) | ||
164 | #define C_000070_MC_IND_ADDR 0xFFFF0000 | ||
165 | #define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16) | ||
166 | #define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1) | ||
167 | #define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF | ||
168 | #define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17) | ||
169 | #define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1) | ||
170 | #define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF | ||
171 | #define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18) | ||
172 | #define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1) | ||
173 | #define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF | ||
174 | #define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19) | ||
175 | #define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1) | ||
176 | #define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF | ||
177 | #define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20) | ||
178 | #define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1) | ||
179 | #define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF | ||
180 | #define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21) | ||
181 | #define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1) | ||
182 | #define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF | ||
183 | #define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22) | ||
184 | #define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1) | ||
185 | #define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF | ||
186 | #define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23) | ||
187 | #define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1) | ||
188 | #define C_000070_MC_IND_WR_EN 0xFF7FFFFF | ||
189 | #define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24) | ||
190 | #define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1) | ||
191 | #define C_000070_MC_IND_RD_INV 0xFEFFFFFF | ||
192 | #define R_000074_MC_IND_DATA 0x000074 | ||
193 | #define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) | ||
194 | #define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) | ||
195 | #define C_000074_MC_IND_DATA 0x00000000 | ||
196 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
197 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
198 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
199 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
200 | #define R_0007C0_CP_STAT 0x0007C0 | ||
201 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
202 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
203 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
204 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
205 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
206 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
207 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
208 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
209 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
210 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
211 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
212 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
213 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
214 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
215 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
216 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
217 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
218 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
219 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
220 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
221 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
222 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
223 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
224 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
225 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
226 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
227 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
228 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
229 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
230 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
231 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
232 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
233 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
234 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
235 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
236 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
237 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
238 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
239 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
240 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
241 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
242 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
243 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
244 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
245 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
246 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
247 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
248 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
249 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
250 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
251 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
252 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
253 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
254 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
255 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
256 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
257 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
258 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
259 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
260 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
261 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
262 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
263 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
264 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
265 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
266 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
267 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
268 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
269 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
270 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
271 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
272 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
273 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
274 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
275 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
276 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
277 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
278 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
279 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
280 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
281 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
282 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
283 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
284 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
285 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
286 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
287 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
288 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
289 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
290 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
291 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
292 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
293 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
294 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
295 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
296 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
297 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
298 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
299 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
300 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
301 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
302 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
303 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
304 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
305 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
306 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
307 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
308 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
309 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
310 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
311 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
312 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
313 | #define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4 | ||
314 | #define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) | ||
315 | #define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) | ||
316 | #define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000 | ||
317 | #define R_006534_D1MODE_VBLANK_STATUS 0x006534 | ||
318 | #define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) | ||
319 | #define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) | ||
320 | #define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE | ||
321 | #define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) | ||
322 | #define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) | ||
323 | #define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF | ||
324 | #define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) | ||
325 | #define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) | ||
326 | #define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF | ||
327 | #define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) | ||
328 | #define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) | ||
329 | #define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF | ||
330 | #define R_006540_DxMODE_INT_MASK 0x006540 | ||
331 | #define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0) | ||
332 | #define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1) | ||
333 | #define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE | ||
334 | #define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4) | ||
335 | #define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1) | ||
336 | #define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF | ||
337 | #define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8) | ||
338 | #define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1) | ||
339 | #define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF | ||
340 | #define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12) | ||
341 | #define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1) | ||
342 | #define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF | ||
343 | #define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30) | ||
344 | #define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1) | ||
345 | #define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF | ||
346 | #define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31) | ||
347 | #define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1) | ||
348 | #define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF | ||
349 | #define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4 | ||
350 | #define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) | ||
351 | #define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) | ||
352 | #define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000 | ||
353 | #define R_006D34_D2MODE_VBLANK_STATUS 0x006D34 | ||
354 | #define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) | ||
355 | #define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) | ||
356 | #define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE | ||
357 | #define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) | ||
358 | #define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) | ||
359 | #define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF | ||
360 | #define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) | ||
361 | #define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) | ||
362 | #define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF | ||
363 | #define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) | ||
364 | #define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) | ||
365 | #define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF | ||
366 | #define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC | ||
367 | #define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4) | ||
368 | #define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1) | ||
369 | #define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF | ||
370 | #define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) | ||
371 | #define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) | ||
372 | #define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF | ||
373 | |||
374 | |||
375 | /* MC registers */ | ||
376 | #define R_000000_MC_STATUS 0x000000 | ||
377 | #define S_000000_MC_IDLE(x) (((x) & 0x1) << 0) | ||
378 | #define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1) | ||
379 | #define C_000000_MC_IDLE 0xFFFFFFFE | ||
380 | #define R_000004_MC_FB_LOCATION 0x000004 | ||
381 | #define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
382 | #define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
383 | #define C_000004_MC_FB_START 0xFFFF0000 | ||
384 | #define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
385 | #define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
386 | #define C_000004_MC_FB_TOP 0x0000FFFF | ||
387 | #define R_000005_MC_AGP_LOCATION 0x000005 | ||
388 | #define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
389 | #define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
390 | #define C_000005_MC_AGP_START 0xFFFF0000 | ||
391 | #define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
392 | #define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
393 | #define C_000005_MC_AGP_TOP 0x0000FFFF | ||
394 | #define R_000006_AGP_BASE 0x000006 | ||
395 | #define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
396 | #define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
397 | #define C_000006_AGP_BASE_ADDR 0x00000000 | ||
398 | #define R_000007_AGP_BASE_2 0x000007 | ||
399 | #define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
400 | #define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
401 | #define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
402 | #define R_000009_MC_CNTL1 0x000009 | ||
403 | #define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26) | ||
404 | #define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1) | ||
405 | #define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF | ||
406 | /* FIXME don't know the various field size need feedback from AMD */ | ||
407 | #define R_000100_MC_PT0_CNTL 0x000100 | ||
408 | #define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0) | ||
409 | #define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1) | ||
410 | #define C_000100_ENABLE_PT 0xFFFFFFFE | ||
411 | #define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15) | ||
412 | #define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7) | ||
413 | #define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF | ||
414 | #define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21) | ||
415 | #define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7) | ||
416 | #define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF | ||
417 | #define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28) | ||
418 | #define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1) | ||
419 | #define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF | ||
420 | #define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29) | ||
421 | #define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1) | ||
422 | #define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF | ||
423 | #define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102 | ||
424 | #define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0) | ||
425 | #define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1) | ||
426 | #define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE | ||
427 | #define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1) | ||
428 | #define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3) | ||
429 | #define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9 | ||
430 | #define V_000102_PAGE_TABLE_FLAT 0 | ||
431 | /* R600 documentation suggest that this should be a number of pages */ | ||
432 | #define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112 | ||
433 | #define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114 | ||
434 | #define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C | ||
435 | #define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C | ||
436 | #define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C | ||
437 | #define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C | ||
438 | #define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C | ||
439 | #define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0) | ||
440 | #define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1) | ||
441 | #define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE | ||
442 | #define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1) | ||
443 | #define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1) | ||
444 | #define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD | ||
445 | #define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8) | ||
446 | #define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3) | ||
447 | #define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF | ||
448 | #define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0 | ||
449 | #define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1 | ||
450 | #define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2 | ||
451 | #define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3 | ||
452 | #define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10) | ||
453 | #define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1) | ||
454 | #define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF | ||
455 | #define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0 | ||
456 | #define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1 | ||
457 | #define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11) | ||
458 | #define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7) | ||
459 | #define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF | ||
460 | #define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14) | ||
461 | #define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1) | ||
462 | #define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF | ||
463 | #define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15) | ||
464 | #define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7) | ||
465 | #define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF | ||
466 | #define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20) | ||
467 | #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) | ||
468 | #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF | ||
469 | |||
470 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 7a0098ddf977..27547175cf93 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -26,105 +26,29 @@ | |||
26 | * Jerome Glisse | 26 | * Jerome Glisse |
27 | */ | 27 | */ |
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | ||
30 | #include "radeon.h" | 29 | #include "radeon.h" |
31 | #include "rs690r.h" | ||
32 | #include "atom.h" | 30 | #include "atom.h" |
33 | #include "atom-bits.h" | 31 | #include "rs690d.h" |
34 | |||
35 | /* rs690,rs740 depends on : */ | ||
36 | void r100_hdp_reset(struct radeon_device *rdev); | ||
37 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
38 | void r420_pipes_init(struct radeon_device *rdev); | ||
39 | void rs400_gart_disable(struct radeon_device *rdev); | ||
40 | int rs400_gart_enable(struct radeon_device *rdev); | ||
41 | void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
42 | void rs600_mc_disable_clients(struct radeon_device *rdev); | ||
43 | |||
44 | /* This files gather functions specifics to : | ||
45 | * rs690,rs740 | ||
46 | * | ||
47 | * Some of these functions might be used by newer ASICs. | ||
48 | */ | ||
49 | void rs690_gpu_init(struct radeon_device *rdev); | ||
50 | int rs690_mc_wait_for_idle(struct radeon_device *rdev); | ||
51 | |||
52 | |||
53 | /* | ||
54 | * MC functions. | ||
55 | */ | ||
56 | int rs690_mc_init(struct radeon_device *rdev) | ||
57 | { | ||
58 | uint32_t tmp; | ||
59 | int r; | ||
60 | |||
61 | if (r100_debugfs_rbbm_init(rdev)) { | ||
62 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
63 | } | ||
64 | |||
65 | rs690_gpu_init(rdev); | ||
66 | rs400_gart_disable(rdev); | ||
67 | |||
68 | /* Setup GPU memory space */ | ||
69 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
70 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | ||
71 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | ||
72 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
73 | r = radeon_mc_setup(rdev); | ||
74 | if (r) { | ||
75 | return r; | ||
76 | } | ||
77 | |||
78 | /* Program GPU memory space */ | ||
79 | rs600_mc_disable_clients(rdev); | ||
80 | if (rs690_mc_wait_for_idle(rdev)) { | ||
81 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
82 | "programming pipes. Bad things might happen.\n"); | ||
83 | } | ||
84 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
85 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); | ||
86 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); | ||
87 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); | ||
88 | /* FIXME: Does this reg exist on RS480,RS740 ? */ | ||
89 | WREG32(0x310, rdev->mc.vram_location); | ||
90 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | void rs690_mc_fini(struct radeon_device *rdev) | ||
95 | { | ||
96 | } | ||
97 | |||
98 | 32 | ||
99 | /* | 33 | static int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
100 | * Global GPU functions | ||
101 | */ | ||
102 | int rs690_mc_wait_for_idle(struct radeon_device *rdev) | ||
103 | { | 34 | { |
104 | unsigned i; | 35 | unsigned i; |
105 | uint32_t tmp; | 36 | uint32_t tmp; |
106 | 37 | ||
107 | for (i = 0; i < rdev->usec_timeout; i++) { | 38 | for (i = 0; i < rdev->usec_timeout; i++) { |
108 | /* read MC_STATUS */ | 39 | /* read MC_STATUS */ |
109 | tmp = RREG32_MC(RS690_MC_STATUS); | 40 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); |
110 | if (tmp & RS690_MC_STATUS_IDLE) { | 41 | if (G_000090_MC_SYSTEM_IDLE(tmp)) |
111 | return 0; | 42 | return 0; |
112 | } | 43 | udelay(1); |
113 | DRM_UDELAY(1); | ||
114 | } | 44 | } |
115 | return -1; | 45 | return -1; |
116 | } | 46 | } |
117 | 47 | ||
118 | void rs690_errata(struct radeon_device *rdev) | 48 | static void rs690_gpu_init(struct radeon_device *rdev) |
119 | { | ||
120 | rdev->pll_errata = 0; | ||
121 | } | ||
122 | |||
123 | void rs690_gpu_init(struct radeon_device *rdev) | ||
124 | { | 49 | { |
125 | /* FIXME: HDP same place on rs690 ? */ | 50 | /* FIXME: HDP same place on rs690 ? */ |
126 | r100_hdp_reset(rdev); | 51 | r100_hdp_reset(rdev); |
127 | rv515_vga_render_disable(rdev); | ||
128 | /* FIXME: is this correct ? */ | 52 | /* FIXME: is this correct ? */ |
129 | r420_pipes_init(rdev); | 53 | r420_pipes_init(rdev); |
130 | if (rs690_mc_wait_for_idle(rdev)) { | 54 | if (rs690_mc_wait_for_idle(rdev)) { |
@@ -133,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev) | |||
133 | } | 57 | } |
134 | } | 58 | } |
135 | 59 | ||
136 | |||
137 | /* | ||
138 | * VRAM info. | ||
139 | */ | ||
140 | void rs690_pm_info(struct radeon_device *rdev) | 60 | void rs690_pm_info(struct radeon_device *rdev) |
141 | { | 61 | { |
142 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | 62 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
@@ -250,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
250 | /* | 170 | /* |
251 | * Line Buffer Setup | 171 | * Line Buffer Setup |
252 | * There is a single line buffer shared by both display controllers. | 172 | * There is a single line buffer shared by both display controllers. |
253 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | 173 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
254 | * the display controllers. The paritioning can either be done | 174 | * the display controllers. The paritioning can either be done |
255 | * manually or via one of four preset allocations specified in bits 1:0: | 175 | * manually or via one of four preset allocations specified in bits 1:0: |
256 | * 0 - line buffer is divided in half and shared between crtc | 176 | * 0 - line buffer is divided in half and shared between crtc |
257 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | 177 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
258 | * 2 - D1 gets the whole buffer | 178 | * 2 - D1 gets the whole buffer |
259 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | 179 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
260 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual | 180 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual |
261 | * allocation mode. In manual allocation mode, D1 always starts at 0, | 181 | * allocation mode. In manual allocation mode, D1 always starts at 0, |
262 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | 182 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. |
263 | */ | 183 | */ |
264 | tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; | 184 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; |
265 | tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; | 185 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; |
266 | /* auto */ | 186 | /* auto */ |
267 | if (mode1 && mode2) { | 187 | if (mode1 && mode2) { |
268 | if (mode1->hdisplay > mode2->hdisplay) { | 188 | if (mode1->hdisplay > mode2->hdisplay) { |
269 | if (mode1->hdisplay > 2560) | 189 | if (mode1->hdisplay > 2560) |
270 | tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | 190 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
271 | else | 191 | else |
272 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 192 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
273 | } else if (mode2->hdisplay > mode1->hdisplay) { | 193 | } else if (mode2->hdisplay > mode1->hdisplay) { |
274 | if (mode2->hdisplay > 2560) | 194 | if (mode2->hdisplay > 2560) |
275 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 195 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
276 | else | 196 | else |
277 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 197 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
278 | } else | 198 | } else |
279 | tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 199 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
280 | } else if (mode1) { | 200 | } else if (mode1) { |
281 | tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; | 201 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; |
282 | } else if (mode2) { | 202 | } else if (mode2) { |
283 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 203 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
284 | } | 204 | } |
285 | WREG32(DC_LB_MEMORY_SPLIT, tmp); | 205 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
286 | } | 206 | } |
287 | 207 | ||
288 | struct rs690_watermark { | 208 | struct rs690_watermark { |
@@ -487,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
487 | * option. | 407 | * option. |
488 | */ | 408 | */ |
489 | if (rdev->disp_priority == 2) { | 409 | if (rdev->disp_priority == 2) { |
490 | tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); | 410 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
491 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | 411 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
492 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | 412 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
493 | if (mode1) | ||
494 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
495 | if (mode0) | 413 | if (mode0) |
496 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | 414 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); |
497 | WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); | 415 | if (mode1) |
416 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); | ||
417 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); | ||
498 | } | 418 | } |
499 | rs690_line_buffer_adjust(rdev, mode0, mode1); | 419 | rs690_line_buffer_adjust(rdev, mode0, mode1); |
500 | 420 | ||
501 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | 421 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) |
502 | WREG32(DCP_CONTROL, 0); | 422 | WREG32(R_006C9C_DCP_CONTROL, 0); |
503 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | 423 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
504 | WREG32(DCP_CONTROL, 2); | 424 | WREG32(R_006C9C_DCP_CONTROL, 2); |
505 | 425 | ||
506 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | 426 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); |
507 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | 427 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); |
508 | 428 | ||
509 | tmp = (wm0.lb_request_fifo_depth - 1); | 429 | tmp = (wm0.lb_request_fifo_depth - 1); |
510 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | 430 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; |
511 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | 431 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
512 | 432 | ||
513 | if (mode0 && mode1) { | 433 | if (mode0 && mode1) { |
514 | if (rfixed_trunc(wm0.dbpp) > 64) | 434 | if (rfixed_trunc(wm0.dbpp) > 64) |
@@ -561,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
561 | priority_mark12.full = 0; | 481 | priority_mark12.full = 0; |
562 | if (wm1.priority_mark_max.full > priority_mark12.full) | 482 | if (wm1.priority_mark_max.full > priority_mark12.full) |
563 | priority_mark12.full = wm1.priority_mark_max.full; | 483 | priority_mark12.full = wm1.priority_mark_max.full; |
564 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 484 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
565 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 485 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
566 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 486 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); |
567 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 487 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); |
568 | } else if (mode0) { | 488 | } else if (mode0) { |
569 | if (rfixed_trunc(wm0.dbpp) > 64) | 489 | if (rfixed_trunc(wm0.dbpp) > 64) |
570 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 490 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); |
@@ -591,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
591 | priority_mark02.full = 0; | 511 | priority_mark02.full = 0; |
592 | if (wm0.priority_mark_max.full > priority_mark02.full) | 512 | if (wm0.priority_mark_max.full > priority_mark02.full) |
593 | priority_mark02.full = wm0.priority_mark_max.full; | 513 | priority_mark02.full = wm0.priority_mark_max.full; |
594 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 514 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
595 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 515 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
596 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 516 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, |
597 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 517 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); |
518 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | ||
519 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); | ||
598 | } else { | 520 | } else { |
599 | if (rfixed_trunc(wm1.dbpp) > 64) | 521 | if (rfixed_trunc(wm1.dbpp) > 64) |
600 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | 522 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); |
@@ -621,30 +543,205 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
621 | priority_mark12.full = 0; | 543 | priority_mark12.full = 0; |
622 | if (wm1.priority_mark_max.full > priority_mark12.full) | 544 | if (wm1.priority_mark_max.full > priority_mark12.full) |
623 | priority_mark12.full = wm1.priority_mark_max.full; | 545 | priority_mark12.full = wm1.priority_mark_max.full; |
624 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 546 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
625 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 547 | S_006548_D1MODE_PRIORITY_A_OFF(1)); |
626 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 548 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, |
627 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 549 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); |
550 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
551 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
628 | } | 552 | } |
629 | } | 553 | } |
630 | 554 | ||
631 | /* | ||
632 | * Indirect registers accessor | ||
633 | */ | ||
634 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 555 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
635 | { | 556 | { |
636 | uint32_t r; | 557 | uint32_t r; |
637 | 558 | ||
638 | WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK)); | 559 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
639 | r = RREG32(RS690_MC_DATA); | 560 | r = RREG32(R_00007C_MC_DATA); |
640 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK); | 561 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); |
641 | return r; | 562 | return r; |
642 | } | 563 | } |
643 | 564 | ||
644 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 565 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
645 | { | 566 | { |
646 | WREG32(RS690_MC_INDEX, | 567 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
647 | RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK)); | 568 | S_000078_MC_IND_WR_EN(1)); |
648 | WREG32(RS690_MC_DATA, v); | 569 | WREG32(R_00007C_MC_DATA, v); |
649 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); | 570 | WREG32(R_000078_MC_INDEX, 0x7F); |
571 | } | ||
572 | |||
573 | void rs690_mc_program(struct radeon_device *rdev) | ||
574 | { | ||
575 | struct rv515_mc_save save; | ||
576 | |||
577 | /* Stops all mc clients */ | ||
578 | rv515_mc_stop(rdev, &save); | ||
579 | |||
580 | /* Wait for mc idle */ | ||
581 | if (rs690_mc_wait_for_idle(rdev)) | ||
582 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
583 | /* Program MC, should be a 32bits limited address space */ | ||
584 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, | ||
585 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
586 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
587 | WREG32(R_000134_HDP_FB_LOCATION, | ||
588 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
589 | |||
590 | rv515_mc_resume(rdev, &save); | ||
591 | } | ||
592 | |||
593 | static int rs690_startup(struct radeon_device *rdev) | ||
594 | { | ||
595 | int r; | ||
596 | |||
597 | rs690_mc_program(rdev); | ||
598 | /* Resume clock */ | ||
599 | rv515_clock_startup(rdev); | ||
600 | /* Initialize GPU configuration (# pipes, ...) */ | ||
601 | rs690_gpu_init(rdev); | ||
602 | /* Initialize GART (initialize after TTM so we can allocate | ||
603 | * memory through TTM but finalize after TTM) */ | ||
604 | r = rs400_gart_enable(rdev); | ||
605 | if (r) | ||
606 | return r; | ||
607 | /* Enable IRQ */ | ||
608 | rdev->irq.sw_int = true; | ||
609 | rs600_irq_set(rdev); | ||
610 | /* 1M ring buffer */ | ||
611 | r = r100_cp_init(rdev, 1024 * 1024); | ||
612 | if (r) { | ||
613 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
614 | return r; | ||
615 | } | ||
616 | r = r100_wb_init(rdev); | ||
617 | if (r) | ||
618 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
619 | r = r100_ib_init(rdev); | ||
620 | if (r) { | ||
621 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
622 | return r; | ||
623 | } | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | int rs690_resume(struct radeon_device *rdev) | ||
628 | { | ||
629 | /* Make sur GART are not working */ | ||
630 | rs400_gart_disable(rdev); | ||
631 | /* Resume clock before doing reset */ | ||
632 | rv515_clock_startup(rdev); | ||
633 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
634 | if (radeon_gpu_reset(rdev)) { | ||
635 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
636 | RREG32(R_000E40_RBBM_STATUS), | ||
637 | RREG32(R_0007C0_CP_STAT)); | ||
638 | } | ||
639 | /* post */ | ||
640 | atom_asic_init(rdev->mode_info.atom_context); | ||
641 | /* Resume clock after posting */ | ||
642 | rv515_clock_startup(rdev); | ||
643 | return rs690_startup(rdev); | ||
644 | } | ||
645 | |||
646 | int rs690_suspend(struct radeon_device *rdev) | ||
647 | { | ||
648 | r100_cp_disable(rdev); | ||
649 | r100_wb_disable(rdev); | ||
650 | rs600_irq_disable(rdev); | ||
651 | rs400_gart_disable(rdev); | ||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | void rs690_fini(struct radeon_device *rdev) | ||
656 | { | ||
657 | rs690_suspend(rdev); | ||
658 | r100_cp_fini(rdev); | ||
659 | r100_wb_fini(rdev); | ||
660 | r100_ib_fini(rdev); | ||
661 | radeon_gem_fini(rdev); | ||
662 | rs400_gart_fini(rdev); | ||
663 | radeon_irq_kms_fini(rdev); | ||
664 | radeon_fence_driver_fini(rdev); | ||
665 | radeon_object_fini(rdev); | ||
666 | radeon_atombios_fini(rdev); | ||
667 | kfree(rdev->bios); | ||
668 | rdev->bios = NULL; | ||
669 | } | ||
670 | |||
671 | int rs690_init(struct radeon_device *rdev) | ||
672 | { | ||
673 | int r; | ||
674 | |||
675 | /* Disable VGA */ | ||
676 | rv515_vga_render_disable(rdev); | ||
677 | /* Initialize scratch registers */ | ||
678 | radeon_scratch_init(rdev); | ||
679 | /* Initialize surface registers */ | ||
680 | radeon_surface_init(rdev); | ||
681 | /* TODO: disable VGA need to use VGA request */ | ||
682 | /* BIOS*/ | ||
683 | if (!radeon_get_bios(rdev)) { | ||
684 | if (ASIC_IS_AVIVO(rdev)) | ||
685 | return -EINVAL; | ||
686 | } | ||
687 | if (rdev->is_atom_bios) { | ||
688 | r = radeon_atombios_init(rdev); | ||
689 | if (r) | ||
690 | return r; | ||
691 | } else { | ||
692 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); | ||
693 | return -EINVAL; | ||
694 | } | ||
695 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
696 | if (radeon_gpu_reset(rdev)) { | ||
697 | dev_warn(rdev->dev, | ||
698 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
699 | RREG32(R_000E40_RBBM_STATUS), | ||
700 | RREG32(R_0007C0_CP_STAT)); | ||
701 | } | ||
702 | /* check if cards are posted or not */ | ||
703 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
704 | DRM_INFO("GPU not posted. posting now...\n"); | ||
705 | atom_asic_init(rdev->mode_info.atom_context); | ||
706 | } | ||
707 | /* Initialize clocks */ | ||
708 | radeon_get_clock_info(rdev->ddev); | ||
709 | /* Initialize power management */ | ||
710 | radeon_pm_init(rdev); | ||
711 | /* Get vram informations */ | ||
712 | rs690_vram_info(rdev); | ||
713 | /* Initialize memory controller (also test AGP) */ | ||
714 | r = r420_mc_init(rdev); | ||
715 | if (r) | ||
716 | return r; | ||
717 | rv515_debugfs(rdev); | ||
718 | /* Fence driver */ | ||
719 | r = radeon_fence_driver_init(rdev); | ||
720 | if (r) | ||
721 | return r; | ||
722 | r = radeon_irq_kms_init(rdev); | ||
723 | if (r) | ||
724 | return r; | ||
725 | /* Memory manager */ | ||
726 | r = radeon_object_init(rdev); | ||
727 | if (r) | ||
728 | return r; | ||
729 | r = rs400_gart_init(rdev); | ||
730 | if (r) | ||
731 | return r; | ||
732 | rs600_set_safe_registers(rdev); | ||
733 | rdev->accel_working = true; | ||
734 | r = rs690_startup(rdev); | ||
735 | if (r) { | ||
736 | /* Somethings want wront with the accel init stop accel */ | ||
737 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
738 | rs690_suspend(rdev); | ||
739 | r100_cp_fini(rdev); | ||
740 | r100_wb_fini(rdev); | ||
741 | r100_ib_fini(rdev); | ||
742 | rs400_gart_fini(rdev); | ||
743 | radeon_irq_kms_fini(rdev); | ||
744 | rdev->accel_working = false; | ||
745 | } | ||
746 | return 0; | ||
650 | } | 747 | } |
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h new file mode 100644 index 000000000000..62d31e7a897f --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690d.h | |||
@@ -0,0 +1,307 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RS690D_H__ | ||
29 | #define __RS690D_H__ | ||
30 | |||
31 | /* Registers */ | ||
32 | #define R_000078_MC_INDEX 0x000078 | ||
33 | #define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) | ||
34 | #define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF) | ||
35 | #define C_000078_MC_IND_ADDR 0xFFFFFE00 | ||
36 | #define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9) | ||
37 | #define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1) | ||
38 | #define C_000078_MC_IND_WR_EN 0xFFFFFDFF | ||
39 | #define R_00007C_MC_DATA 0x00007C | ||
40 | #define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0) | ||
41 | #define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF) | ||
42 | #define C_00007C_MC_DATA 0x00000000 | ||
43 | #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 | ||
44 | #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) | ||
45 | #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) | ||
46 | #define C_0000F8_CONFIG_MEMSIZE 0x00000000 | ||
47 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
48 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
49 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
50 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
51 | #define R_0007C0_CP_STAT 0x0007C0 | ||
52 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
53 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
54 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
55 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
56 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
57 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
58 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
59 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
60 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
61 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
62 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
63 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
64 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
65 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
66 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
67 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
68 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
69 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
70 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
71 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
72 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
73 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
74 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
75 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
76 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
77 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
78 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
79 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
80 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
81 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
82 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
83 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
84 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
85 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
86 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
87 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
88 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
89 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
90 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
91 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
92 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
93 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
94 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
95 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
96 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
97 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
98 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
99 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
100 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
101 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
102 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
103 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
104 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
105 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
106 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
107 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
108 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
109 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
110 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
111 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
112 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
113 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
114 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
115 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
116 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
117 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
118 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
119 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
120 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
121 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
122 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
123 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
124 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
125 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
126 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
127 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
128 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
129 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
130 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
131 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
132 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
133 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
134 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
135 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
136 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
137 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
138 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
139 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
140 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
141 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
142 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
143 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
144 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
145 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
146 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
147 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
148 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
149 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
150 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
151 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
152 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
153 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
154 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
155 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
156 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
157 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
158 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
159 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
160 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
161 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
162 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
163 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
164 | #define R_006520_DC_LB_MEMORY_SPLIT 0x006520 | ||
165 | #define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0) | ||
166 | #define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3) | ||
167 | #define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC | ||
168 | #define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2) | ||
169 | #define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1) | ||
170 | #define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB | ||
171 | #define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
172 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
173 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
174 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
175 | #define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4) | ||
176 | #define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF) | ||
177 | #define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F | ||
178 | #define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 | ||
179 | #define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
180 | #define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
181 | #define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
182 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
183 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
184 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
185 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
186 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
187 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
188 | #define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C | ||
189 | #define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
190 | #define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
191 | #define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
192 | #define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
193 | #define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
194 | #define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
195 | #define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
196 | #define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
197 | #define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
198 | #define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
199 | #define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
200 | #define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
201 | #define R_006C9C_DCP_CONTROL 0x006C9C | ||
202 | #define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 | ||
203 | #define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
204 | #define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
205 | #define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
206 | #define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
207 | #define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
208 | #define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
209 | #define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
210 | #define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
211 | #define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
212 | #define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
213 | #define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
214 | #define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
215 | #define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C | ||
216 | #define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
217 | #define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
218 | #define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
219 | #define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
220 | #define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
221 | #define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
222 | #define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
223 | #define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
224 | #define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
225 | #define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
226 | #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
227 | #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
228 | #define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58 | ||
229 | #define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0) | ||
230 | #define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF) | ||
231 | #define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0 | ||
232 | #define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16) | ||
233 | #define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF) | ||
234 | #define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF | ||
235 | |||
236 | |||
237 | #define R_000090_MC_SYSTEM_STATUS 0x000090 | ||
238 | #define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0) | ||
239 | #define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1) | ||
240 | #define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE | ||
241 | #define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1) | ||
242 | #define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1) | ||
243 | #define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD | ||
244 | #define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2) | ||
245 | #define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1) | ||
246 | #define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB | ||
247 | #define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3) | ||
248 | #define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1) | ||
249 | #define C_000090_MC_SELECT_PM 0xFFFFFFF7 | ||
250 | #define S_000090_RESERVED4(x) (((x) & 0xF) << 4) | ||
251 | #define G_000090_RESERVED4(x) (((x) >> 4) & 0xF) | ||
252 | #define C_000090_RESERVED4 0xFFFFFF0F | ||
253 | #define S_000090_RESERVED8(x) (((x) & 0xF) << 8) | ||
254 | #define G_000090_RESERVED8(x) (((x) >> 8) & 0xF) | ||
255 | #define C_000090_RESERVED8 0xFFFFF0FF | ||
256 | #define S_000090_RESERVED12(x) (((x) & 0xF) << 12) | ||
257 | #define G_000090_RESERVED12(x) (((x) >> 12) & 0xF) | ||
258 | #define C_000090_RESERVED12 0xFFFF0FFF | ||
259 | #define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16) | ||
260 | #define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1) | ||
261 | #define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF | ||
262 | #define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17) | ||
263 | #define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1) | ||
264 | #define C_000090_MCA_IDLE 0xFFFDFFFF | ||
265 | #define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18) | ||
266 | #define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1) | ||
267 | #define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF | ||
268 | #define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19) | ||
269 | #define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1) | ||
270 | #define C_000090_MCA_ARB_IDLE 0xFFF7FFFF | ||
271 | #define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20) | ||
272 | #define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF) | ||
273 | #define C_000090_RESERVED20 0x000FFFFF | ||
274 | #define R_000100_MCCFG_FB_LOCATION 0x000100 | ||
275 | #define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
276 | #define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
277 | #define C_000100_MC_FB_START 0xFFFF0000 | ||
278 | #define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
279 | #define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
280 | #define C_000100_MC_FB_TOP 0x0000FFFF | ||
281 | #define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104 | ||
282 | #define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0) | ||
283 | #define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF) | ||
284 | #define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0 | ||
285 | #define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4) | ||
286 | #define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF) | ||
287 | #define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F | ||
288 | #define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8) | ||
289 | #define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF) | ||
290 | #define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF | ||
291 | #define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12) | ||
292 | #define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF) | ||
293 | #define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF | ||
294 | #define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16) | ||
295 | #define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF) | ||
296 | #define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF | ||
297 | #define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20) | ||
298 | #define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF) | ||
299 | #define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF | ||
300 | #define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24) | ||
301 | #define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF) | ||
302 | #define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF | ||
303 | #define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28) | ||
304 | #define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF) | ||
305 | #define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF | ||
306 | |||
307 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h deleted file mode 100644 index c0d9faa2175b..000000000000 --- a/drivers/gpu/drm/radeon/rs690r.h +++ /dev/null | |||
@@ -1,99 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef RS690R_H | ||
29 | #define RS690R_H | ||
30 | |||
31 | /* RS690/RS740 registers */ | ||
32 | #define MC_INDEX 0x0078 | ||
33 | # define MC_INDEX_MASK 0x1FF | ||
34 | # define MC_INDEX_WR_EN (1 << 9) | ||
35 | # define MC_INDEX_WR_ACK 0x7F | ||
36 | #define MC_DATA 0x007C | ||
37 | #define HDP_FB_LOCATION 0x0134 | ||
38 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
39 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
40 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
41 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
42 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
43 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
44 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
45 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
46 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
47 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
48 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
49 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
50 | #define MODE_PRIORITY_OFF (1 << 16) | ||
51 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
52 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
53 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
54 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
55 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
56 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
57 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
58 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
59 | #define DCP_CONTROL 0x6C9C | ||
60 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
61 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
62 | |||
63 | /* MC indirect registers */ | ||
64 | #define MC_STATUS_IDLE (1 << 0) | ||
65 | #define MC_MISC_CNTL 0x18 | ||
66 | #define DISABLE_GTW (1 << 1) | ||
67 | #define GART_INDEX_REG_EN (1 << 12) | ||
68 | #define BLOCK_GFX_D3_EN (1 << 14) | ||
69 | #define GART_FEATURE_ID 0x2B | ||
70 | #define HANG_EN (1 << 11) | ||
71 | #define TLB_ENABLE (1 << 18) | ||
72 | #define P2P_ENABLE (1 << 19) | ||
73 | #define GTW_LAC_EN (1 << 25) | ||
74 | #define LEVEL2_GART (0 << 30) | ||
75 | #define LEVEL1_GART (1 << 30) | ||
76 | #define PDC_EN (1 << 31) | ||
77 | #define GART_BASE 0x2C | ||
78 | #define GART_CACHE_CNTRL 0x2E | ||
79 | # define GART_CACHE_INVALIDATE (1 << 0) | ||
80 | #define MC_STATUS 0x90 | ||
81 | #define MCCFG_FB_LOCATION 0x100 | ||
82 | #define MC_FB_START_MASK 0x0000FFFF | ||
83 | #define MC_FB_START_SHIFT 0 | ||
84 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
85 | #define MC_FB_TOP_SHIFT 16 | ||
86 | #define MCCFG_AGP_LOCATION 0x101 | ||
87 | #define MC_AGP_START_MASK 0x0000FFFF | ||
88 | #define MC_AGP_START_SHIFT 0 | ||
89 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
90 | #define MC_AGP_TOP_SHIFT 16 | ||
91 | #define MCCFG_AGP_BASE 0x102 | ||
92 | #define MCCFG_AGP_BASE_2 0x103 | ||
93 | #define MC_INIT_MISC_LAT_TIMER 0x104 | ||
94 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
95 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
96 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
97 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
98 | |||
99 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h new file mode 100644 index 000000000000..c5b398330c26 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv200d.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RV200D_H__ | ||
29 | #define __RV200D_H__ | ||
30 | |||
31 | #define R_00015C_AGP_BASE_2 0x00015C | ||
32 | #define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
33 | #define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
34 | #define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
35 | |||
36 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h new file mode 100644 index 000000000000..e5a70b06fe1f --- /dev/null +++ b/drivers/gpu/drm/radeon/rv250d.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RV250D_H__ | ||
29 | #define __RV250D_H__ | ||
30 | |||
31 | #define R_00000D_SCLK_CNTL_M6 0x00000D | ||
32 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
33 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
34 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
35 | #define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) | ||
36 | #define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) | ||
37 | #define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 | ||
38 | #define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) | ||
39 | #define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) | ||
40 | #define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF | ||
41 | #define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) | ||
42 | #define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) | ||
43 | #define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF | ||
44 | #define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) | ||
45 | #define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) | ||
46 | #define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF | ||
47 | #define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) | ||
48 | #define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) | ||
49 | #define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F | ||
50 | #define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) | ||
51 | #define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) | ||
52 | #define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF | ||
53 | #define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) | ||
54 | #define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) | ||
55 | #define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF | ||
56 | #define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) | ||
57 | #define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) | ||
58 | #define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF | ||
59 | #define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) | ||
60 | #define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) | ||
61 | #define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF | ||
62 | #define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) | ||
63 | #define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) | ||
64 | #define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF | ||
65 | #define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) | ||
66 | #define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) | ||
67 | #define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF | ||
68 | #define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) | ||
69 | #define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) | ||
70 | #define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF | ||
71 | #define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) | ||
72 | #define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) | ||
73 | #define C_00000D_FORCE_DISP2 0xFFFF7FFF | ||
74 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
75 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
76 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
77 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
78 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
79 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
80 | #define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) | ||
81 | #define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) | ||
82 | #define C_00000D_FORCE_DISP1 0xFFFBFFFF | ||
83 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
84 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
85 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
86 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
87 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
88 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
89 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
90 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
91 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
92 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
93 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
94 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
95 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
96 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
97 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
98 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
99 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
100 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
101 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
102 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
103 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
104 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
105 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
106 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
107 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
108 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
109 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
110 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
111 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
112 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
113 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | ||
114 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | ||
115 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | ||
116 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | ||
117 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | ||
118 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | ||
119 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | ||
120 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | ||
121 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | ||
122 | |||
123 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h new file mode 100644 index 000000000000..c75c5ed9e654 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv350d.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RV350D_H__ | ||
29 | #define __RV350D_H__ | ||
30 | |||
31 | /* RV350, RV380 registers */ | ||
32 | /* #define R_00000D_SCLK_CNTL 0x00000D */ | ||
33 | #define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) | ||
34 | #define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) | ||
35 | #define C_00000D_FORCE_VAP 0xFFDFFFFF | ||
36 | #define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) | ||
37 | #define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) | ||
38 | #define C_00000D_FORCE_SR 0xFDFFFFFF | ||
39 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) | ||
40 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) | ||
41 | #define C_00000D_FORCE_PX 0xFBFFFFFF | ||
42 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) | ||
43 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) | ||
44 | #define C_00000D_FORCE_TX 0xF7FFFFFF | ||
45 | #define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) | ||
46 | #define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) | ||
47 | #define C_00000D_FORCE_US 0xEFFFFFFF | ||
48 | #define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) | ||
49 | #define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) | ||
50 | #define C_00000D_FORCE_SU 0xBFFFFFFF | ||
51 | |||
52 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index e53b5ca7a253..7935f793bf62 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -137,6 +137,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) | |||
137 | 137 | ||
138 | void rv515_vga_render_disable(struct radeon_device *rdev) | 138 | void rv515_vga_render_disable(struct radeon_device *rdev) |
139 | { | 139 | { |
140 | WREG32(R_000330_D1VGA_CONTROL, 0); | ||
141 | WREG32(R_000338_D2VGA_CONTROL, 0); | ||
140 | WREG32(R_000300_VGA_RENDER_CONTROL, | 142 | WREG32(R_000300_VGA_RENDER_CONTROL, |
141 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); | 143 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); |
142 | } | 144 | } |
@@ -478,7 +480,7 @@ static int rv515_startup(struct radeon_device *rdev) | |||
478 | } | 480 | } |
479 | /* Enable IRQ */ | 481 | /* Enable IRQ */ |
480 | rdev->irq.sw_int = true; | 482 | rdev->irq.sw_int = true; |
481 | r100_irq_set(rdev); | 483 | rs600_irq_set(rdev); |
482 | /* 1M ring buffer */ | 484 | /* 1M ring buffer */ |
483 | r = r100_cp_init(rdev, 1024 * 1024); | 485 | r = r100_cp_init(rdev, 1024 * 1024); |
484 | if (r) { | 486 | if (r) { |
@@ -520,7 +522,7 @@ int rv515_suspend(struct radeon_device *rdev) | |||
520 | { | 522 | { |
521 | r100_cp_disable(rdev); | 523 | r100_cp_disable(rdev); |
522 | r100_wb_disable(rdev); | 524 | r100_wb_disable(rdev); |
523 | r100_irq_disable(rdev); | 525 | rs600_irq_disable(rdev); |
524 | if (rdev->flags & RADEON_IS_PCIE) | 526 | if (rdev->flags & RADEON_IS_PCIE) |
525 | rv370_pcie_gart_disable(rdev); | 527 | rv370_pcie_gart_disable(rdev); |
526 | return 0; | 528 | return 0; |
@@ -553,7 +555,6 @@ int rv515_init(struct radeon_device *rdev) | |||
553 | { | 555 | { |
554 | int r; | 556 | int r; |
555 | 557 | ||
556 | rdev->new_init_path = true; | ||
557 | /* Initialize scratch registers */ | 558 | /* Initialize scratch registers */ |
558 | radeon_scratch_init(rdev); | 559 | radeon_scratch_init(rdev); |
559 | /* Initialize surface registers */ | 560 | /* Initialize surface registers */ |
@@ -586,6 +587,8 @@ int rv515_init(struct radeon_device *rdev) | |||
586 | } | 587 | } |
587 | /* Initialize clocks */ | 588 | /* Initialize clocks */ |
588 | radeon_get_clock_info(rdev->ddev); | 589 | radeon_get_clock_info(rdev->ddev); |
590 | /* Initialize power management */ | ||
591 | radeon_pm_init(rdev); | ||
589 | /* Get vram informations */ | 592 | /* Get vram informations */ |
590 | rv515_vram_info(rdev); | 593 | rv515_vram_info(rdev); |
591 | /* Initialize memory controller (also test AGP) */ | 594 | /* Initialize memory controller (also test AGP) */ |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index e0b97d161397..b0efd0ddae7a 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
75 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 75 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
76 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 76 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
77 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 77 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
78 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); | 78 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
79 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 79 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
80 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 80 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
81 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 81 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
@@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | 128 | ||
129 | /* | 129 | void rv770_agp_enable(struct radeon_device *rdev) |
130 | * MC | 130 | { |
131 | */ | 131 | u32 tmp; |
132 | static void rv770_mc_resume(struct radeon_device *rdev) | 132 | int i; |
133 | |||
134 | /* Setup L2 cache */ | ||
135 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | ||
136 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | ||
137 | EFFECTIVE_L2_QUEUE_SIZE(7)); | ||
138 | WREG32(VM_L2_CNTL2, 0); | ||
139 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); | ||
140 | /* Setup TLB control */ | ||
141 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | ||
142 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
143 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | | ||
144 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); | ||
145 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | ||
146 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | ||
147 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | ||
148 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | ||
149 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | ||
150 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | ||
151 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | ||
152 | for (i = 0; i < 7; i++) | ||
153 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
154 | } | ||
155 | |||
156 | static void rv770_mc_program(struct radeon_device *rdev) | ||
133 | { | 157 | { |
134 | u32 d1vga_control, d2vga_control; | 158 | struct rv515_mc_save save; |
135 | u32 vga_render_control, vga_hdp_control; | ||
136 | u32 d1crtc_control, d2crtc_control; | ||
137 | u32 new_d1grph_primary, new_d1grph_secondary; | ||
138 | u32 new_d2grph_primary, new_d2grph_secondary; | ||
139 | u64 old_vram_start; | ||
140 | u32 tmp; | 159 | u32 tmp; |
141 | int i, j; | 160 | int i, j; |
142 | 161 | ||
@@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev) | |||
150 | } | 169 | } |
151 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | 170 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); |
152 | 171 | ||
153 | d1vga_control = RREG32(D1VGA_CONTROL); | 172 | rv515_mc_stop(rdev, &save); |
154 | d2vga_control = RREG32(D2VGA_CONTROL); | ||
155 | vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
156 | vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
157 | d1crtc_control = RREG32(D1CRTC_CONTROL); | ||
158 | d2crtc_control = RREG32(D2CRTC_CONTROL); | ||
159 | old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | ||
160 | new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); | ||
161 | new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); | ||
162 | new_d1grph_primary += rdev->mc.vram_start - old_vram_start; | ||
163 | new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
164 | new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); | ||
165 | new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); | ||
166 | new_d2grph_primary += rdev->mc.vram_start - old_vram_start; | ||
167 | new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
168 | |||
169 | /* Stop all video */ | ||
170 | WREG32(D1VGA_CONTROL, 0); | ||
171 | WREG32(D2VGA_CONTROL, 0); | ||
172 | WREG32(VGA_RENDER_CONTROL, 0); | ||
173 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
174 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
175 | WREG32(D1CRTC_CONTROL, 0); | ||
176 | WREG32(D2CRTC_CONTROL, 0); | ||
177 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
178 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
179 | |||
180 | mdelay(1); | ||
181 | if (r600_mc_wait_for_idle(rdev)) { | 173 | if (r600_mc_wait_for_idle(rdev)) { |
182 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 174 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
183 | } | 175 | } |
184 | |||
185 | /* Lockout access through VGA aperture*/ | 176 | /* Lockout access through VGA aperture*/ |
186 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | 177 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
187 | |||
188 | /* Update configuration */ | 178 | /* Update configuration */ |
189 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 179 | if (rdev->flags & RADEON_IS_AGP) { |
190 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); | 180 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
181 | /* VRAM before AGP */ | ||
182 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
183 | rdev->mc.vram_start >> 12); | ||
184 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
185 | rdev->mc.gtt_end >> 12); | ||
186 | } else { | ||
187 | /* VRAM after AGP */ | ||
188 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
189 | rdev->mc.gtt_start >> 12); | ||
190 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
191 | rdev->mc.vram_end >> 12); | ||
192 | } | ||
193 | } else { | ||
194 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
195 | rdev->mc.vram_start >> 12); | ||
196 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
197 | rdev->mc.vram_end >> 12); | ||
198 | } | ||
191 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 199 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
192 | tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; | 200 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
193 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 201 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
194 | WREG32(MC_VM_FB_LOCATION, tmp); | 202 | WREG32(MC_VM_FB_LOCATION, tmp); |
195 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 203 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
196 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 204 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
197 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 205 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); |
198 | if (rdev->flags & RADEON_IS_AGP) { | 206 | if (rdev->flags & RADEON_IS_AGP) { |
199 | WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); | 207 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
200 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 208 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
201 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | 209 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
202 | } else { | 210 | } else { |
@@ -204,31 +212,10 @@ static void rv770_mc_resume(struct radeon_device *rdev) | |||
204 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | 212 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
205 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | 213 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
206 | } | 214 | } |
207 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); | ||
208 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); | ||
209 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); | ||
210 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); | ||
211 | WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
212 | |||
213 | /* Unlock host access */ | ||
214 | WREG32(VGA_HDP_CONTROL, vga_hdp_control); | ||
215 | |||
216 | mdelay(1); | ||
217 | if (r600_mc_wait_for_idle(rdev)) { | 215 | if (r600_mc_wait_for_idle(rdev)) { |
218 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 216 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
219 | } | 217 | } |
220 | 218 | rv515_mc_resume(rdev, &save); | |
221 | /* Restore video state */ | ||
222 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
223 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
224 | WREG32(D1CRTC_CONTROL, d1crtc_control); | ||
225 | WREG32(D2CRTC_CONTROL, d2crtc_control); | ||
226 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
227 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
228 | WREG32(D1VGA_CONTROL, d1vga_control); | ||
229 | WREG32(D2VGA_CONTROL, d2vga_control); | ||
230 | WREG32(VGA_RENDER_CONTROL, vga_render_control); | ||
231 | |||
232 | /* we need to own VRAM, so turn off the VGA renderer here | 219 | /* we need to own VRAM, so turn off the VGA renderer here |
233 | * to stop it overwriting our objects */ | 220 | * to stop it overwriting our objects */ |
234 | rv515_vga_render_disable(rdev); | 221 | rv515_vga_render_disable(rdev); |
@@ -542,11 +529,11 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
542 | if (rdev->family == CHIP_RV770) | 529 | if (rdev->family == CHIP_RV770) |
543 | gb_tiling_config |= BANK_TILING(1); | 530 | gb_tiling_config |= BANK_TILING(1); |
544 | else | 531 | else |
545 | gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK); | 532 | gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
546 | 533 | ||
547 | gb_tiling_config |= GROUP_SIZE(0); | 534 | gb_tiling_config |= GROUP_SIZE(0); |
548 | 535 | ||
549 | if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) { | 536 | if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { |
550 | gb_tiling_config |= ROW_TILING(3); | 537 | gb_tiling_config |= ROW_TILING(3); |
551 | gb_tiling_config |= SAMPLE_SPLIT(3); | 538 | gb_tiling_config |= SAMPLE_SPLIT(3); |
552 | } else { | 539 | } else { |
@@ -592,14 +579,14 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
592 | 579 | ||
593 | /* set HW defaults for 3D engine */ | 580 | /* set HW defaults for 3D engine */ |
594 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | | 581 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | |
595 | ROQ_IB2_START(0x2b))); | 582 | ROQ_IB2_START(0x2b))); |
596 | 583 | ||
597 | WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); | 584 | WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); |
598 | 585 | ||
599 | WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | | 586 | WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | |
600 | SYNC_GRADIENT | | 587 | SYNC_GRADIENT | |
601 | SYNC_WALKER | | 588 | SYNC_WALKER | |
602 | SYNC_ALIGNER)); | 589 | SYNC_ALIGNER)); |
603 | 590 | ||
604 | sx_debug_1 = RREG32(SX_DEBUG_1); | 591 | sx_debug_1 = RREG32(SX_DEBUG_1); |
605 | sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; | 592 | sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; |
@@ -611,9 +598,9 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
611 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); | 598 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); |
612 | 599 | ||
613 | WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | | 600 | WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | |
614 | GS_FLUSH_CTL(4) | | 601 | GS_FLUSH_CTL(4) | |
615 | ACK_FLUSH_CTL(3) | | 602 | ACK_FLUSH_CTL(3) | |
616 | SYNC_FLUSH_CTL)); | 603 | SYNC_FLUSH_CTL)); |
617 | 604 | ||
618 | if (rdev->family == CHIP_RV770) | 605 | if (rdev->family == CHIP_RV770) |
619 | WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); | 606 | WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); |
@@ -624,12 +611,12 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
624 | } | 611 | } |
625 | 612 | ||
626 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | | 613 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | |
627 | POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | | 614 | POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | |
628 | SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); | 615 | SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); |
629 | 616 | ||
630 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | | 617 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | |
631 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | | 618 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | |
632 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); | 619 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); |
633 | 620 | ||
634 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); | 621 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); |
635 | 622 | ||
@@ -787,14 +774,36 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
787 | { | 774 | { |
788 | fixed20_12 a; | 775 | fixed20_12 a; |
789 | u32 tmp; | 776 | u32 tmp; |
777 | int chansize, numchan; | ||
790 | int r; | 778 | int r; |
791 | 779 | ||
792 | /* Get VRAM informations */ | 780 | /* Get VRAM informations */ |
793 | /* FIXME: Don't know how to determine vram width, need to check | ||
794 | * vram_width usage | ||
795 | */ | ||
796 | rdev->mc.vram_width = 128; | ||
797 | rdev->mc.vram_is_ddr = true; | 781 | rdev->mc.vram_is_ddr = true; |
782 | tmp = RREG32(MC_ARB_RAMCFG); | ||
783 | if (tmp & CHANSIZE_OVERRIDE) { | ||
784 | chansize = 16; | ||
785 | } else if (tmp & CHANSIZE_MASK) { | ||
786 | chansize = 64; | ||
787 | } else { | ||
788 | chansize = 32; | ||
789 | } | ||
790 | tmp = RREG32(MC_SHARED_CHMAP); | ||
791 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
792 | case 0: | ||
793 | default: | ||
794 | numchan = 1; | ||
795 | break; | ||
796 | case 1: | ||
797 | numchan = 2; | ||
798 | break; | ||
799 | case 2: | ||
800 | numchan = 4; | ||
801 | break; | ||
802 | case 3: | ||
803 | numchan = 8; | ||
804 | break; | ||
805 | } | ||
806 | rdev->mc.vram_width = numchan * chansize; | ||
798 | /* Could aper size report 0 ? */ | 807 | /* Could aper size report 0 ? */ |
799 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 808 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
800 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 809 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
@@ -840,9 +849,9 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
840 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 849 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
841 | } | 850 | } |
842 | rdev->mc.vram_start = rdev->mc.vram_location; | 851 | rdev->mc.vram_start = rdev->mc.vram_location; |
843 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 852 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
844 | rdev->mc.gtt_start = rdev->mc.gtt_location; | 853 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
845 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; | 854 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
846 | /* FIXME: we should enforce default clock in case GPU is not in | 855 | /* FIXME: we should enforce default clock in case GPU is not in |
847 | * default setup | 856 | * default setup |
848 | */ | 857 | */ |
@@ -861,11 +870,14 @@ static int rv770_startup(struct radeon_device *rdev) | |||
861 | { | 870 | { |
862 | int r; | 871 | int r; |
863 | 872 | ||
864 | radeon_gpu_reset(rdev); | 873 | rv770_mc_program(rdev); |
865 | rv770_mc_resume(rdev); | 874 | if (rdev->flags & RADEON_IS_AGP) { |
866 | r = rv770_pcie_gart_enable(rdev); | 875 | rv770_agp_enable(rdev); |
867 | if (r) | 876 | } else { |
868 | return r; | 877 | r = rv770_pcie_gart_enable(rdev); |
878 | if (r) | ||
879 | return r; | ||
880 | } | ||
869 | rv770_gpu_init(rdev); | 881 | rv770_gpu_init(rdev); |
870 | 882 | ||
871 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 883 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
@@ -884,9 +896,8 @@ static int rv770_startup(struct radeon_device *rdev) | |||
884 | r = r600_cp_resume(rdev); | 896 | r = r600_cp_resume(rdev); |
885 | if (r) | 897 | if (r) |
886 | return r; | 898 | return r; |
887 | r = r600_wb_init(rdev); | 899 | /* write back buffer are not vital so don't worry about failure */ |
888 | if (r) | 900 | r600_wb_enable(rdev); |
889 | return r; | ||
890 | return 0; | 901 | return 0; |
891 | } | 902 | } |
892 | 903 | ||
@@ -894,15 +905,12 @@ int rv770_resume(struct radeon_device *rdev) | |||
894 | { | 905 | { |
895 | int r; | 906 | int r; |
896 | 907 | ||
897 | if (radeon_gpu_reset(rdev)) { | 908 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
898 | /* FIXME: what do we want to do here ? */ | 909 | * posting will perform necessary task to bring back GPU into good |
899 | } | 910 | * shape. |
911 | */ | ||
900 | /* post card */ | 912 | /* post card */ |
901 | if (rdev->is_atom_bios) { | 913 | atom_asic_init(rdev->mode_info.atom_context); |
902 | atom_asic_init(rdev->mode_info.atom_context); | ||
903 | } else { | ||
904 | radeon_combios_asic_init(rdev->ddev); | ||
905 | } | ||
906 | /* Initialize clocks */ | 914 | /* Initialize clocks */ |
907 | r = radeon_clocks_init(rdev); | 915 | r = radeon_clocks_init(rdev); |
908 | if (r) { | 916 | if (r) { |
@@ -915,7 +923,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
915 | return r; | 923 | return r; |
916 | } | 924 | } |
917 | 925 | ||
918 | r = radeon_ib_test(rdev); | 926 | r = r600_ib_test(rdev); |
919 | if (r) { | 927 | if (r) { |
920 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 928 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
921 | return r; | 929 | return r; |
@@ -929,8 +937,8 @@ int rv770_suspend(struct radeon_device *rdev) | |||
929 | /* FIXME: we should wait for ring to be empty */ | 937 | /* FIXME: we should wait for ring to be empty */ |
930 | r700_cp_stop(rdev); | 938 | r700_cp_stop(rdev); |
931 | rdev->cp.ready = false; | 939 | rdev->cp.ready = false; |
940 | r600_wb_disable(rdev); | ||
932 | rv770_pcie_gart_disable(rdev); | 941 | rv770_pcie_gart_disable(rdev); |
933 | |||
934 | /* unpin shaders bo */ | 942 | /* unpin shaders bo */ |
935 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 943 | radeon_object_unpin(rdev->r600_blit.shader_obj); |
936 | return 0; | 944 | return 0; |
@@ -946,7 +954,6 @@ int rv770_init(struct radeon_device *rdev) | |||
946 | { | 954 | { |
947 | int r; | 955 | int r; |
948 | 956 | ||
949 | rdev->new_init_path = true; | ||
950 | r = radeon_dummy_page_init(rdev); | 957 | r = radeon_dummy_page_init(rdev); |
951 | if (r) | 958 | if (r) |
952 | return r; | 959 | return r; |
@@ -960,8 +967,10 @@ int rv770_init(struct radeon_device *rdev) | |||
960 | return -EINVAL; | 967 | return -EINVAL; |
961 | } | 968 | } |
962 | /* Must be an ATOMBIOS */ | 969 | /* Must be an ATOMBIOS */ |
963 | if (!rdev->is_atom_bios) | 970 | if (!rdev->is_atom_bios) { |
971 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | ||
964 | return -EINVAL; | 972 | return -EINVAL; |
973 | } | ||
965 | r = radeon_atombios_init(rdev); | 974 | r = radeon_atombios_init(rdev); |
966 | if (r) | 975 | if (r) |
967 | return r; | 976 | return r; |
@@ -974,24 +983,20 @@ int rv770_init(struct radeon_device *rdev) | |||
974 | r600_scratch_init(rdev); | 983 | r600_scratch_init(rdev); |
975 | /* Initialize surface registers */ | 984 | /* Initialize surface registers */ |
976 | radeon_surface_init(rdev); | 985 | radeon_surface_init(rdev); |
986 | /* Initialize clocks */ | ||
977 | radeon_get_clock_info(rdev->ddev); | 987 | radeon_get_clock_info(rdev->ddev); |
978 | r = radeon_clocks_init(rdev); | 988 | r = radeon_clocks_init(rdev); |
979 | if (r) | 989 | if (r) |
980 | return r; | 990 | return r; |
991 | /* Initialize power management */ | ||
992 | radeon_pm_init(rdev); | ||
981 | /* Fence driver */ | 993 | /* Fence driver */ |
982 | r = radeon_fence_driver_init(rdev); | 994 | r = radeon_fence_driver_init(rdev); |
983 | if (r) | 995 | if (r) |
984 | return r; | 996 | return r; |
985 | r = rv770_mc_init(rdev); | 997 | r = rv770_mc_init(rdev); |
986 | if (r) { | 998 | if (r) |
987 | if (rdev->flags & RADEON_IS_AGP) { | ||
988 | /* Retry with disabling AGP */ | ||
989 | rv770_fini(rdev); | ||
990 | rdev->flags &= ~RADEON_IS_AGP; | ||
991 | return rv770_init(rdev); | ||
992 | } | ||
993 | return r; | 999 | return r; |
994 | } | ||
995 | /* Memory manager */ | 1000 | /* Memory manager */ |
996 | r = radeon_object_init(rdev); | 1001 | r = radeon_object_init(rdev); |
997 | if (r) | 1002 | if (r) |
@@ -1020,12 +1025,10 @@ int rv770_init(struct radeon_device *rdev) | |||
1020 | 1025 | ||
1021 | r = rv770_startup(rdev); | 1026 | r = rv770_startup(rdev); |
1022 | if (r) { | 1027 | if (r) { |
1023 | if (rdev->flags & RADEON_IS_AGP) { | 1028 | rv770_suspend(rdev); |
1024 | /* Retry with disabling AGP */ | 1029 | r600_wb_fini(rdev); |
1025 | rv770_fini(rdev); | 1030 | radeon_ring_fini(rdev); |
1026 | rdev->flags &= ~RADEON_IS_AGP; | 1031 | rv770_pcie_gart_fini(rdev); |
1027 | return rv770_init(rdev); | ||
1028 | } | ||
1029 | rdev->accel_working = false; | 1032 | rdev->accel_working = false; |
1030 | } | 1033 | } |
1031 | if (rdev->accel_working) { | 1034 | if (rdev->accel_working) { |
@@ -1034,7 +1037,7 @@ int rv770_init(struct radeon_device *rdev) | |||
1034 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 1037 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
1035 | rdev->accel_working = false; | 1038 | rdev->accel_working = false; |
1036 | } | 1039 | } |
1037 | r = radeon_ib_test(rdev); | 1040 | r = r600_ib_test(rdev); |
1038 | if (r) { | 1041 | if (r) { |
1039 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1042 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1040 | rdev->accel_working = false; | 1043 | rdev->accel_working = false; |
@@ -1049,20 +1052,15 @@ void rv770_fini(struct radeon_device *rdev) | |||
1049 | 1052 | ||
1050 | r600_blit_fini(rdev); | 1053 | r600_blit_fini(rdev); |
1051 | radeon_ring_fini(rdev); | 1054 | radeon_ring_fini(rdev); |
1055 | r600_wb_fini(rdev); | ||
1052 | rv770_pcie_gart_fini(rdev); | 1056 | rv770_pcie_gart_fini(rdev); |
1053 | radeon_gem_fini(rdev); | 1057 | radeon_gem_fini(rdev); |
1054 | radeon_fence_driver_fini(rdev); | 1058 | radeon_fence_driver_fini(rdev); |
1055 | radeon_clocks_fini(rdev); | 1059 | radeon_clocks_fini(rdev); |
1056 | #if __OS_HAS_AGP | ||
1057 | if (rdev->flags & RADEON_IS_AGP) | 1060 | if (rdev->flags & RADEON_IS_AGP) |
1058 | radeon_agp_fini(rdev); | 1061 | radeon_agp_fini(rdev); |
1059 | #endif | ||
1060 | radeon_object_fini(rdev); | 1062 | radeon_object_fini(rdev); |
1061 | if (rdev->is_atom_bios) { | 1063 | radeon_atombios_fini(rdev); |
1062 | radeon_atombios_fini(rdev); | ||
1063 | } else { | ||
1064 | radeon_combios_fini(rdev); | ||
1065 | } | ||
1066 | kfree(rdev->bios); | 1064 | kfree(rdev->bios); |
1067 | rdev->bios = NULL; | 1065 | rdev->bios = NULL; |
1068 | radeon_dummy_page_fini(rdev); | 1066 | radeon_dummy_page_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 4b9c3d6396ff..a1367ab6f261 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -129,6 +129,10 @@ | |||
129 | #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 | 129 | #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 |
130 | #define HDP_TILING_CONFIG 0x2F3C | 130 | #define HDP_TILING_CONFIG 0x2F3C |
131 | 131 | ||
132 | #define MC_SHARED_CHMAP 0x2004 | ||
133 | #define NOOFCHAN_SHIFT 12 | ||
134 | #define NOOFCHAN_MASK 0x00003000 | ||
135 | |||
132 | #define MC_ARB_RAMCFG 0x2760 | 136 | #define MC_ARB_RAMCFG 0x2760 |
133 | #define NOOFBANK_SHIFT 0 | 137 | #define NOOFBANK_SHIFT 0 |
134 | #define NOOFBANK_MASK 0x00000003 | 138 | #define NOOFBANK_MASK 0x00000003 |
@@ -142,6 +146,7 @@ | |||
142 | #define CHANSIZE_MASK 0x00000100 | 146 | #define CHANSIZE_MASK 0x00000100 |
143 | #define BURSTLENGTH_SHIFT 9 | 147 | #define BURSTLENGTH_SHIFT 9 |
144 | #define BURSTLENGTH_MASK 0x00000200 | 148 | #define BURSTLENGTH_MASK 0x00000200 |
149 | #define CHANSIZE_OVERRIDE (1 << 11) | ||
145 | #define MC_VM_AGP_TOP 0x2028 | 150 | #define MC_VM_AGP_TOP 0x2028 |
146 | #define MC_VM_AGP_BOT 0x202C | 151 | #define MC_VM_AGP_BOT 0x202C |
147 | #define MC_VM_AGP_BASE 0x2030 | 152 | #define MC_VM_AGP_BASE 0x2030 |
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index 541744d00d3e..b17007178a36 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c | |||
@@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) | |||
82 | if (unlikely(ret != 0)) | 82 | if (unlikely(ret != 0)) |
83 | goto out_err; | 83 | goto out_err; |
84 | 84 | ||
85 | ++item->refcount; | ||
86 | } | 85 | } |
86 | ++item->refcount; | ||
87 | ref->object = item->object; | 87 | ref->object = item->object; |
88 | object = item->object; | 88 | object = item->object; |
89 | mutex_unlock(&item->mutex); | 89 | mutex_unlock(&item->mutex); |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a55ee1a56c16..7bcb89f39ce8 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -279,6 +279,7 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) | |||
279 | 279 | ||
280 | return ttm_tt_set_caching(ttm, state); | 280 | return ttm_tt_set_caching(ttm, state); |
281 | } | 281 | } |
282 | EXPORT_SYMBOL(ttm_tt_set_placement_caching); | ||
282 | 283 | ||
283 | static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) | 284 | static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) |
284 | { | 285 | { |