aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-10-02 20:32:58 -0400
committerDave Airlie <airlied@redhat.com>2012-10-02 20:32:58 -0400
commit8ff1f792dd68ad46f3cfe01e01a375b402cf08da (patch)
treeb73b4fa71c5a9357931360d7894beee247cd464e
parent2216c9e74fb3baac3cb73952158dbe38b703997e (diff)
parent82ffd92b162ece87c863c075d993c65333e8e78b (diff)
Merge branch 'drm-next-3.7' of git://people.freedesktop.org/~agd5f/linux into drm-next
Alex writes: "The big changes for 3.7 include: - Asynchronous VM page table updates for Cayman/SI - 2 level VM page table support. Saves memory compared to 1 level page tables. - Reworked PLL handing in the display code allows lots more combinations of monitors to work, including more than two DP displays assuming compatible clocks across shared PLLs. This also allows us to power down extra PLLs when we can share a single one across multiple displays which saves power. - Native backlight control on ATOMBIOS systems. - Improved ACPI support for interacting with the GPU. Fixes backlight control on some laptops. - Document AMD ACPI interfaces - Lots of code cleanup - Bug fixes" * 'drm-next-3.7' of git://people.freedesktop.org/~agd5f/linux: (79 commits) drm/radeon: add vm set_page() callback for SI drm/radeon: rework the vm_flush interface drm/radeon: use WRITE_DATA packets for vm flush on SI drm/radeon/pm: fix multi-head profile handling on BTC+ (v2) drm/radeon: fix radeon power state debug output drm/radeon: force MSIs on RS690 asics drm/radeon: Add MSI quirk for gateway RS690 drm/radeon: allow MIP_ADDRESS=0 for MSAA textures on Evergreen drm/radeon/kms: allow STRMOUT_BASE_UPDATE on RS780 and RS880 drm/radeon: add 2-level VM pagetables support v9 drm/radeon: refactor set_page chipset interface v5 drm/radeon: Fix scratch register leak in IB test. drm/radeon: restore backlight level on resume drm/radeon: add get_backlight_level callback drm/radeon: only adjust default clocks on NI GPUs drm/radeon: validate PPLL in crtc fixup drm/radeon: work around KMS modeset limitations in PLL allocation (v2) drm/radeon: make non-DP PPLL sharing more robust drm/radeon: store the encoder in the radeon_crtc drm/radeon: rework crtc pll setup to better support PPLL sharing ...
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c663
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c367
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c282
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c61
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h7
-rw-r--r--drivers/gpu/drm/radeon/ni.c134
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c96
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c37
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c115
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c52
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h192
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c607
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h445
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c93
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c411
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c602
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c49
-rw-r--r--drivers/gpu/drm/radeon/rs690.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c18
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c113
-rw-r--r--drivers/gpu/drm/radeon/sid.h15
55 files changed, 3549 insertions, 1510 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1e0a9e17c31d..f94d4c818fc7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1448,8 +1448,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1448 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch, 1448 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
1449 * most likely via hotkey. */ 1449 * most likely via hotkey. */
1450 acpi_bus_generate_proc_event(device, event, 0); 1450 acpi_bus_generate_proc_event(device, event, 0);
1451 if (!acpi_notifier_call_chain(device, event, 0)) 1451 keycode = KEY_SWITCHVIDEOMODE;
1452 keycode = KEY_SWITCHVIDEOMODE;
1453 break; 1452 break;
1454 1453
1455 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video 1454 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1479,8 +1478,9 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1479 break; 1478 break;
1480 } 1479 }
1481 1480
1482 if (event != ACPI_VIDEO_NOTIFY_SWITCH) 1481 if (acpi_notifier_call_chain(device, event, 0))
1483 acpi_notifier_call_chain(device, event, 0); 1482 /* Something vetoed the keypress. */
1483 keycode = 0;
1484 1484
1485 if (keycode) { 1485 if (keycode) {
1486 input_report_key(input, keycode, 1); 1486 input_report_key(input, keycode, 1);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2817101fb167..96184d02c8d9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -83,25 +83,19 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
83 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 83 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
84 ENABLE_SCALER_PS_ALLOCATION args; 84 ENABLE_SCALER_PS_ALLOCATION args;
85 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); 85 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
86 86 struct radeon_encoder *radeon_encoder =
87 to_radeon_encoder(radeon_crtc->encoder);
87 /* fixme - fill in enc_priv for atom dac */ 88 /* fixme - fill in enc_priv for atom dac */
88 enum radeon_tv_std tv_std = TV_STD_NTSC; 89 enum radeon_tv_std tv_std = TV_STD_NTSC;
89 bool is_tv = false, is_cv = false; 90 bool is_tv = false, is_cv = false;
90 struct drm_encoder *encoder;
91 91
92 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) 92 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
93 return; 93 return;
94 94
95 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 95 if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
96 /* find tv std */ 96 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
97 if (encoder->crtc == crtc) { 97 tv_std = tv_dac->tv_std;
98 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 98 is_tv = true;
99 if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
100 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
101 tv_std = tv_dac->tv_std;
102 is_tv = true;
103 }
104 }
105 } 99 }
106 100
107 memset(&args, 0, sizeof(args)); 101 memset(&args, 0, sizeof(args));
@@ -533,99 +527,87 @@ union adjust_pixel_clock {
533}; 527};
534 528
535static u32 atombios_adjust_pll(struct drm_crtc *crtc, 529static u32 atombios_adjust_pll(struct drm_crtc *crtc,
536 struct drm_display_mode *mode, 530 struct drm_display_mode *mode)
537 struct radeon_pll *pll,
538 bool ss_enabled,
539 struct radeon_atom_ss *ss)
540{ 531{
532 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
541 struct drm_device *dev = crtc->dev; 533 struct drm_device *dev = crtc->dev;
542 struct radeon_device *rdev = dev->dev_private; 534 struct radeon_device *rdev = dev->dev_private;
543 struct drm_encoder *encoder = NULL; 535 struct drm_encoder *encoder = radeon_crtc->encoder;
544 struct radeon_encoder *radeon_encoder = NULL; 536 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
545 struct drm_connector *connector = NULL; 537 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
546 u32 adjusted_clock = mode->clock; 538 u32 adjusted_clock = mode->clock;
547 int encoder_mode = 0; 539 int encoder_mode = atombios_get_encoder_mode(encoder);
548 u32 dp_clock = mode->clock; 540 u32 dp_clock = mode->clock;
549 int bpc = 8; 541 int bpc = radeon_get_monitor_bpc(connector);
550 bool is_duallink = false; 542 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
551 543
552 /* reset the pll flags */ 544 /* reset the pll flags */
553 pll->flags = 0; 545 radeon_crtc->pll_flags = 0;
554 546
555 if (ASIC_IS_AVIVO(rdev)) { 547 if (ASIC_IS_AVIVO(rdev)) {
556 if ((rdev->family == CHIP_RS600) || 548 if ((rdev->family == CHIP_RS600) ||
557 (rdev->family == CHIP_RS690) || 549 (rdev->family == CHIP_RS690) ||
558 (rdev->family == CHIP_RS740)) 550 (rdev->family == CHIP_RS740))
559 pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ 551 radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
560 RADEON_PLL_PREFER_CLOSEST_LOWER); 552 RADEON_PLL_PREFER_CLOSEST_LOWER);
561 553
562 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 554 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
563 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 555 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
564 else 556 else
565 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 557 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
566 558
567 if (rdev->family < CHIP_RV770) 559 if (rdev->family < CHIP_RV770)
568 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 560 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
569 /* use frac fb div on APUs */ 561 /* use frac fb div on APUs */
570 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 562 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
571 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; 563 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
572 } else { 564 } else {
573 pll->flags |= RADEON_PLL_LEGACY; 565 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
574 566
575 if (mode->clock > 200000) /* range limits??? */ 567 if (mode->clock > 200000) /* range limits??? */
576 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 568 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
577 else 569 else
578 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 570 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
579 } 571 }
580 572
581 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 573 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
582 if (encoder->crtc == crtc) { 574 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
583 radeon_encoder = to_radeon_encoder(encoder); 575 if (connector) {
584 connector = radeon_get_connector_for_encoder(encoder); 576 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
585 bpc = radeon_get_monitor_bpc(connector); 577 struct radeon_connector_atom_dig *dig_connector =
586 encoder_mode = atombios_get_encoder_mode(encoder); 578 radeon_connector->con_priv;
587 is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
588 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
589 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
590 if (connector) {
591 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
592 struct radeon_connector_atom_dig *dig_connector =
593 radeon_connector->con_priv;
594
595 dp_clock = dig_connector->dp_clock;
596 }
597 }
598 579
599 /* use recommended ref_div for ss */ 580 dp_clock = dig_connector->dp_clock;
600 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 581 }
601 if (ss_enabled) { 582 }
602 if (ss->refdiv) {
603 pll->flags |= RADEON_PLL_USE_REF_DIV;
604 pll->reference_div = ss->refdiv;
605 if (ASIC_IS_AVIVO(rdev))
606 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
607 }
608 }
609 }
610 583
611 if (ASIC_IS_AVIVO(rdev)) { 584 /* use recommended ref_div for ss */
612 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 585 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
613 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 586 if (radeon_crtc->ss_enabled) {
614 adjusted_clock = mode->clock * 2; 587 if (radeon_crtc->ss.refdiv) {
615 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 588 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
616 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 589 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
617 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 590 if (ASIC_IS_AVIVO(rdev))
618 pll->flags |= RADEON_PLL_IS_LCD; 591 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
619 } else {
620 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
621 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
622 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
623 pll->flags |= RADEON_PLL_USE_REF_DIV;
624 } 592 }
625 break;
626 } 593 }
627 } 594 }
628 595
596 if (ASIC_IS_AVIVO(rdev)) {
597 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
598 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
599 adjusted_clock = mode->clock * 2;
600 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
601 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
602 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
603 radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
604 } else {
605 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
606 radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
607 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
608 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
609 }
610
629 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock 611 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
630 * accordingly based on the encoder/transmitter to work around 612 * accordingly based on the encoder/transmitter to work around
631 * special hw requirements. 613 * special hw requirements.
@@ -650,7 +632,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
650 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 632 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
651 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 633 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
652 args.v1.ucEncodeMode = encoder_mode; 634 args.v1.ucEncodeMode = encoder_mode;
653 if (ss_enabled && ss->percentage) 635 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
654 args.v1.ucConfig |= 636 args.v1.ucConfig |=
655 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 637 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
656 638
@@ -663,7 +645,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
663 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 645 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
664 args.v3.sInput.ucEncodeMode = encoder_mode; 646 args.v3.sInput.ucEncodeMode = encoder_mode;
665 args.v3.sInput.ucDispPllConfig = 0; 647 args.v3.sInput.ucDispPllConfig = 0;
666 if (ss_enabled && ss->percentage) 648 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
667 args.v3.sInput.ucDispPllConfig |= 649 args.v3.sInput.ucDispPllConfig |=
668 DISPPLL_CONFIG_SS_ENABLE; 650 DISPPLL_CONFIG_SS_ENABLE;
669 if (ENCODER_MODE_IS_DP(encoder_mode)) { 651 if (ENCODER_MODE_IS_DP(encoder_mode)) {
@@ -695,14 +677,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
695 index, (uint32_t *)&args); 677 index, (uint32_t *)&args);
696 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 678 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
697 if (args.v3.sOutput.ucRefDiv) { 679 if (args.v3.sOutput.ucRefDiv) {
698 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; 680 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
699 pll->flags |= RADEON_PLL_USE_REF_DIV; 681 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
700 pll->reference_div = args.v3.sOutput.ucRefDiv; 682 radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
701 } 683 }
702 if (args.v3.sOutput.ucPostDiv) { 684 if (args.v3.sOutput.ucPostDiv) {
703 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; 685 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
704 pll->flags |= RADEON_PLL_USE_POST_DIV; 686 radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
705 pll->post_div = args.v3.sOutput.ucPostDiv; 687 radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
706 } 688 }
707 break; 689 break;
708 default: 690 default:
@@ -837,7 +819,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
837 args.v3.ucFracFbDiv = frac_fb_div; 819 args.v3.ucFracFbDiv = frac_fb_div;
838 args.v3.ucPostDiv = post_div; 820 args.v3.ucPostDiv = post_div;
839 args.v3.ucPpll = pll_id; 821 args.v3.ucPpll = pll_id;
840 args.v3.ucMiscInfo = (pll_id << 2); 822 if (crtc_id == ATOM_CRTC2)
823 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
824 else
825 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
841 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 826 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
842 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; 827 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
843 args.v3.ucTransmitterId = encoder_id; 828 args.v3.ucTransmitterId = encoder_id;
@@ -907,58 +892,29 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
907 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 892 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
908} 893}
909 894
910static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 895static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
911{ 896{
912 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 897 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
913 struct drm_device *dev = crtc->dev; 898 struct drm_device *dev = crtc->dev;
914 struct radeon_device *rdev = dev->dev_private; 899 struct radeon_device *rdev = dev->dev_private;
915 struct drm_encoder *encoder = NULL; 900 struct radeon_encoder *radeon_encoder =
916 struct radeon_encoder *radeon_encoder = NULL; 901 to_radeon_encoder(radeon_crtc->encoder);
917 u32 pll_clock = mode->clock; 902 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
918 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
919 struct radeon_pll *pll;
920 u32 adjusted_clock;
921 int encoder_mode = 0;
922 struct radeon_atom_ss ss;
923 bool ss_enabled = false;
924 int bpc = 8;
925 903
926 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 904 radeon_crtc->bpc = 8;
927 if (encoder->crtc == crtc) { 905 radeon_crtc->ss_enabled = false;
928 radeon_encoder = to_radeon_encoder(encoder);
929 encoder_mode = atombios_get_encoder_mode(encoder);
930 break;
931 }
932 }
933
934 if (!radeon_encoder)
935 return;
936
937 switch (radeon_crtc->pll_id) {
938 case ATOM_PPLL1:
939 pll = &rdev->clock.p1pll;
940 break;
941 case ATOM_PPLL2:
942 pll = &rdev->clock.p2pll;
943 break;
944 case ATOM_DCPLL:
945 case ATOM_PPLL_INVALID:
946 default:
947 pll = &rdev->clock.dcpll;
948 break;
949 }
950 906
951 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 907 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
952 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { 908 (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
953 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 909 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
954 struct drm_connector *connector = 910 struct drm_connector *connector =
955 radeon_get_connector_for_encoder(encoder); 911 radeon_get_connector_for_encoder(radeon_crtc->encoder);
956 struct radeon_connector *radeon_connector = 912 struct radeon_connector *radeon_connector =
957 to_radeon_connector(connector); 913 to_radeon_connector(connector);
958 struct radeon_connector_atom_dig *dig_connector = 914 struct radeon_connector_atom_dig *dig_connector =
959 radeon_connector->con_priv; 915 radeon_connector->con_priv;
960 int dp_clock; 916 int dp_clock;
961 bpc = radeon_get_monitor_bpc(connector); 917 radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
962 918
963 switch (encoder_mode) { 919 switch (encoder_mode) {
964 case ATOM_ENCODER_MODE_DP_MST: 920 case ATOM_ENCODER_MODE_DP_MST:
@@ -966,45 +922,54 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
966 /* DP/eDP */ 922 /* DP/eDP */
967 dp_clock = dig_connector->dp_clock / 10; 923 dp_clock = dig_connector->dp_clock / 10;
968 if (ASIC_IS_DCE4(rdev)) 924 if (ASIC_IS_DCE4(rdev))
969 ss_enabled = 925 radeon_crtc->ss_enabled =
970 radeon_atombios_get_asic_ss_info(rdev, &ss, 926 radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
971 ASIC_INTERNAL_SS_ON_DP, 927 ASIC_INTERNAL_SS_ON_DP,
972 dp_clock); 928 dp_clock);
973 else { 929 else {
974 if (dp_clock == 16200) { 930 if (dp_clock == 16200) {
975 ss_enabled = 931 radeon_crtc->ss_enabled =
976 radeon_atombios_get_ppll_ss_info(rdev, &ss, 932 radeon_atombios_get_ppll_ss_info(rdev,
933 &radeon_crtc->ss,
977 ATOM_DP_SS_ID2); 934 ATOM_DP_SS_ID2);
978 if (!ss_enabled) 935 if (!radeon_crtc->ss_enabled)
979 ss_enabled = 936 radeon_crtc->ss_enabled =
980 radeon_atombios_get_ppll_ss_info(rdev, &ss, 937 radeon_atombios_get_ppll_ss_info(rdev,
938 &radeon_crtc->ss,
981 ATOM_DP_SS_ID1); 939 ATOM_DP_SS_ID1);
982 } else 940 } else
983 ss_enabled = 941 radeon_crtc->ss_enabled =
984 radeon_atombios_get_ppll_ss_info(rdev, &ss, 942 radeon_atombios_get_ppll_ss_info(rdev,
943 &radeon_crtc->ss,
985 ATOM_DP_SS_ID1); 944 ATOM_DP_SS_ID1);
986 } 945 }
987 break; 946 break;
988 case ATOM_ENCODER_MODE_LVDS: 947 case ATOM_ENCODER_MODE_LVDS:
989 if (ASIC_IS_DCE4(rdev)) 948 if (ASIC_IS_DCE4(rdev))
990 ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, 949 radeon_crtc->ss_enabled =
991 dig->lcd_ss_id, 950 radeon_atombios_get_asic_ss_info(rdev,
992 mode->clock / 10); 951 &radeon_crtc->ss,
952 dig->lcd_ss_id,
953 mode->clock / 10);
993 else 954 else
994 ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, 955 radeon_crtc->ss_enabled =
995 dig->lcd_ss_id); 956 radeon_atombios_get_ppll_ss_info(rdev,
957 &radeon_crtc->ss,
958 dig->lcd_ss_id);
996 break; 959 break;
997 case ATOM_ENCODER_MODE_DVI: 960 case ATOM_ENCODER_MODE_DVI:
998 if (ASIC_IS_DCE4(rdev)) 961 if (ASIC_IS_DCE4(rdev))
999 ss_enabled = 962 radeon_crtc->ss_enabled =
1000 radeon_atombios_get_asic_ss_info(rdev, &ss, 963 radeon_atombios_get_asic_ss_info(rdev,
964 &radeon_crtc->ss,
1001 ASIC_INTERNAL_SS_ON_TMDS, 965 ASIC_INTERNAL_SS_ON_TMDS,
1002 mode->clock / 10); 966 mode->clock / 10);
1003 break; 967 break;
1004 case ATOM_ENCODER_MODE_HDMI: 968 case ATOM_ENCODER_MODE_HDMI:
1005 if (ASIC_IS_DCE4(rdev)) 969 if (ASIC_IS_DCE4(rdev))
1006 ss_enabled = 970 radeon_crtc->ss_enabled =
1007 radeon_atombios_get_asic_ss_info(rdev, &ss, 971 radeon_atombios_get_asic_ss_info(rdev,
972 &radeon_crtc->ss,
1008 ASIC_INTERNAL_SS_ON_HDMI, 973 ASIC_INTERNAL_SS_ON_HDMI,
1009 mode->clock / 10); 974 mode->clock / 10);
1010 break; 975 break;
@@ -1014,43 +979,80 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1014 } 979 }
1015 980
1016 /* adjust pixel clock as needed */ 981 /* adjust pixel clock as needed */
1017 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 982 radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
983
984 return true;
985}
986
987static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
988{
989 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
990 struct drm_device *dev = crtc->dev;
991 struct radeon_device *rdev = dev->dev_private;
992 struct radeon_encoder *radeon_encoder =
993 to_radeon_encoder(radeon_crtc->encoder);
994 u32 pll_clock = mode->clock;
995 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
996 struct radeon_pll *pll;
997 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
998
999 switch (radeon_crtc->pll_id) {
1000 case ATOM_PPLL1:
1001 pll = &rdev->clock.p1pll;
1002 break;
1003 case ATOM_PPLL2:
1004 pll = &rdev->clock.p2pll;
1005 break;
1006 case ATOM_DCPLL:
1007 case ATOM_PPLL_INVALID:
1008 default:
1009 pll = &rdev->clock.dcpll;
1010 break;
1011 }
1012
1013 /* update pll params */
1014 pll->flags = radeon_crtc->pll_flags;
1015 pll->reference_div = radeon_crtc->pll_reference_div;
1016 pll->post_div = radeon_crtc->pll_post_div;
1018 1017
1019 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1018 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1020 /* TV seems to prefer the legacy algo on some boards */ 1019 /* TV seems to prefer the legacy algo on some boards */
1021 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1020 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
1022 &ref_div, &post_div); 1021 &fb_div, &frac_fb_div, &ref_div, &post_div);
1023 else if (ASIC_IS_AVIVO(rdev)) 1022 else if (ASIC_IS_AVIVO(rdev))
1024 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1023 radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
1025 &ref_div, &post_div); 1024 &fb_div, &frac_fb_div, &ref_div, &post_div);
1026 else 1025 else
1027 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1026 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
1028 &ref_div, &post_div); 1027 &fb_div, &frac_fb_div, &ref_div, &post_div);
1029 1028
1030 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); 1029 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
1030 radeon_crtc->crtc_id, &radeon_crtc->ss);
1031 1031
1032 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1032 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1033 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1033 encoder_mode, radeon_encoder->encoder_id, mode->clock,
1034 ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss); 1034 ref_div, fb_div, frac_fb_div, post_div,
1035 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
1035 1036
1036 if (ss_enabled) { 1037 if (radeon_crtc->ss_enabled) {
1037 /* calculate ss amount and step size */ 1038 /* calculate ss amount and step size */
1038 if (ASIC_IS_DCE4(rdev)) { 1039 if (ASIC_IS_DCE4(rdev)) {
1039 u32 step_size; 1040 u32 step_size;
1040 u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; 1041 u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
1041 ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; 1042 radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
1042 ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & 1043 radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
1043 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; 1044 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
1044 if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) 1045 if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
1045 step_size = (4 * amount * ref_div * (ss.rate * 2048)) / 1046 step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
1046 (125 * 25 * pll->reference_freq / 100); 1047 (125 * 25 * pll->reference_freq / 100);
1047 else 1048 else
1048 step_size = (2 * amount * ref_div * (ss.rate * 2048)) / 1049 step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
1049 (125 * 25 * pll->reference_freq / 100); 1050 (125 * 25 * pll->reference_freq / 100);
1050 ss.step = step_size; 1051 radeon_crtc->ss.step = step_size;
1051 } 1052 }
1052 1053
1053 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); 1054 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
1055 radeon_crtc->crtc_id, &radeon_crtc->ss);
1054 } 1056 }
1055} 1057}
1056 1058
@@ -1479,85 +1481,251 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
1479 } 1481 }
1480} 1482}
1481 1483
1484/**
1485 * radeon_get_pll_use_mask - look up a mask of which pplls are in use
1486 *
1487 * @crtc: drm crtc
1488 *
1489 * Returns the mask of which PPLLs (Pixel PLLs) are in use.
1490 */
1491static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
1492{
1493 struct drm_device *dev = crtc->dev;
1494 struct drm_crtc *test_crtc;
1495 struct radeon_crtc *test_radeon_crtc;
1496 u32 pll_in_use = 0;
1497
1498 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1499 if (crtc == test_crtc)
1500 continue;
1501
1502 test_radeon_crtc = to_radeon_crtc(test_crtc);
1503 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1504 pll_in_use |= (1 << test_radeon_crtc->pll_id);
1505 }
1506 return pll_in_use;
1507}
1508
1509/**
1510 * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
1511 *
1512 * @crtc: drm crtc
1513 *
1514 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
1515 * also in DP mode. For DP, a single PPLL can be used for all DP
1516 * crtcs/encoders.
1517 */
1518static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
1519{
1520 struct drm_device *dev = crtc->dev;
1521 struct drm_crtc *test_crtc;
1522 struct radeon_crtc *test_radeon_crtc;
1523
1524 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1525 if (crtc == test_crtc)
1526 continue;
1527 test_radeon_crtc = to_radeon_crtc(test_crtc);
1528 if (test_radeon_crtc->encoder &&
1529 ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
1530 /* for DP use the same PLL for all */
1531 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1532 return test_radeon_crtc->pll_id;
1533 }
1534 }
1535 return ATOM_PPLL_INVALID;
1536}
1537
1538/**
1539 * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
1540 *
1541 * @crtc: drm crtc
1542 * @encoder: drm encoder
1543 *
1544 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
1545 * be shared (i.e., same clock).
1546 */
1547static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1548{
1549 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1550 struct drm_device *dev = crtc->dev;
1551 struct drm_crtc *test_crtc;
1552 struct radeon_crtc *test_radeon_crtc;
1553 u32 adjusted_clock, test_adjusted_clock;
1554
1555 adjusted_clock = radeon_crtc->adjusted_clock;
1556
1557 if (adjusted_clock == 0)
1558 return ATOM_PPLL_INVALID;
1559
1560 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1561 if (crtc == test_crtc)
1562 continue;
1563 test_radeon_crtc = to_radeon_crtc(test_crtc);
1564 if (test_radeon_crtc->encoder &&
1565 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
1566 /* check if we are already driving this connector with another crtc */
1567 if (test_radeon_crtc->connector == radeon_crtc->connector) {
1568 /* if we are, return that pll */
1569 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1570 return test_radeon_crtc->pll_id;
1571 }
1572 /* for non-DP check the clock */
1573 test_adjusted_clock = test_radeon_crtc->adjusted_clock;
1574 if ((crtc->mode.clock == test_crtc->mode.clock) &&
1575 (adjusted_clock == test_adjusted_clock) &&
1576 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1577 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
1578 return test_radeon_crtc->pll_id;
1579 }
1580 }
1581 return ATOM_PPLL_INVALID;
1582}
1583
1584/**
1585 * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
1586 *
1587 * @crtc: drm crtc
1588 *
1589 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1590 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1591 * monitors a dedicated PPLL must be used. If a particular board has
1592 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1593 * as there is no need to program the PLL itself. If we are not able to
1594 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1595 * avoid messing up an existing monitor.
1596 *
1597 * Asic specific PLL information
1598 *
1599 * DCE 6.1
1600 * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
1601 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
1602 *
1603 * DCE 6.0
1604 * - PPLL0 is available to all UNIPHY (DP only)
1605 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1606 *
1607 * DCE 5.0
1608 * - DCPLL is available to all UNIPHY (DP only)
1609 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1610 *
1611 * DCE 3.0/4.0/4.1
1612 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1613 *
1614 */
1482static int radeon_atom_pick_pll(struct drm_crtc *crtc) 1615static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1483{ 1616{
1484 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1617 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1485 struct drm_device *dev = crtc->dev; 1618 struct drm_device *dev = crtc->dev;
1486 struct radeon_device *rdev = dev->dev_private; 1619 struct radeon_device *rdev = dev->dev_private;
1487 struct drm_encoder *test_encoder; 1620 struct radeon_encoder *radeon_encoder =
1488 struct drm_crtc *test_crtc; 1621 to_radeon_encoder(radeon_crtc->encoder);
1489 uint32_t pll_in_use = 0; 1622 u32 pll_in_use;
1623 int pll;
1490 1624
1491 if (ASIC_IS_DCE61(rdev)) { 1625 if (ASIC_IS_DCE61(rdev)) {
1492 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { 1626 struct radeon_encoder_atom_dig *dig =
1493 if (test_encoder->crtc && (test_encoder->crtc == crtc)) { 1627 radeon_encoder->enc_priv;
1494 struct radeon_encoder *test_radeon_encoder = 1628
1495 to_radeon_encoder(test_encoder); 1629 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
1496 struct radeon_encoder_atom_dig *dig = 1630 (dig->linkb == false))
1497 test_radeon_encoder->enc_priv; 1631 /* UNIPHY A uses PPLL2 */
1498 1632 return ATOM_PPLL2;
1499 if ((test_radeon_encoder->encoder_id == 1633 else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1500 ENCODER_OBJECT_ID_INTERNAL_UNIPHY) && 1634 /* UNIPHY B/C/D/E/F */
1501 (dig->linkb == false)) /* UNIPHY A uses PPLL2 */ 1635 if (rdev->clock.dp_extclk)
1502 return ATOM_PPLL2; 1636 /* skip PPLL programming if using ext clock */
1637 return ATOM_PPLL_INVALID;
1638 else {
1639 /* use the same PPLL for all DP monitors */
1640 pll = radeon_get_shared_dp_ppll(crtc);
1641 if (pll != ATOM_PPLL_INVALID)
1642 return pll;
1503 } 1643 }
1644 } else {
1645 /* use the same PPLL for all monitors with the same clock */
1646 pll = radeon_get_shared_nondp_ppll(crtc);
1647 if (pll != ATOM_PPLL_INVALID)
1648 return pll;
1504 } 1649 }
1505 /* UNIPHY B/C/D/E/F */ 1650 /* UNIPHY B/C/D/E/F */
1506 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 1651 pll_in_use = radeon_get_pll_use_mask(crtc);
1507 struct radeon_crtc *radeon_test_crtc; 1652 if (!(pll_in_use & (1 << ATOM_PPLL0)))
1508
1509 if (crtc == test_crtc)
1510 continue;
1511
1512 radeon_test_crtc = to_radeon_crtc(test_crtc);
1513 if ((radeon_test_crtc->pll_id == ATOM_PPLL0) ||
1514 (radeon_test_crtc->pll_id == ATOM_PPLL1))
1515 pll_in_use |= (1 << radeon_test_crtc->pll_id);
1516 }
1517 if (!(pll_in_use & 4))
1518 return ATOM_PPLL0; 1653 return ATOM_PPLL0;
1519 return ATOM_PPLL1; 1654 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1655 return ATOM_PPLL1;
1656 DRM_ERROR("unable to allocate a PPLL\n");
1657 return ATOM_PPLL_INVALID;
1520 } else if (ASIC_IS_DCE4(rdev)) { 1658 } else if (ASIC_IS_DCE4(rdev)) {
1521 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { 1659 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
1522 if (test_encoder->crtc && (test_encoder->crtc == crtc)) { 1660 * depending on the asic:
1523 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, 1661 * DCE4: PPLL or ext clock
1524 * depending on the asic: 1662 * DCE5: PPLL, DCPLL, or ext clock
1525 * DCE4: PPLL or ext clock 1663 * DCE6: PPLL, PPLL0, or ext clock
1526 * DCE5: DCPLL or ext clock 1664 *
1527 * 1665 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
1528 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip 1666 * PPLL/DCPLL programming and only program the DP DTO for the
1529 * PPLL/DCPLL programming and only program the DP DTO for the 1667 * crtc virtual pixel clock.
1530 * crtc virtual pixel clock. 1668 */
1531 */ 1669 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1532 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { 1670 if (rdev->clock.dp_extclk)
1533 if (rdev->clock.dp_extclk) 1671 /* skip PPLL programming if using ext clock */
1534 return ATOM_PPLL_INVALID; 1672 return ATOM_PPLL_INVALID;
1535 else if (ASIC_IS_DCE6(rdev)) 1673 else if (ASIC_IS_DCE6(rdev))
1536 return ATOM_PPLL0; 1674 /* use PPLL0 for all DP */
1537 else if (ASIC_IS_DCE5(rdev)) 1675 return ATOM_PPLL0;
1538 return ATOM_DCPLL; 1676 else if (ASIC_IS_DCE5(rdev))
1539 } 1677 /* use DCPLL for all DP */
1678 return ATOM_DCPLL;
1679 else {
1680 /* use the same PPLL for all DP monitors */
1681 pll = radeon_get_shared_dp_ppll(crtc);
1682 if (pll != ATOM_PPLL_INVALID)
1683 return pll;
1540 } 1684 }
1685 } else {
1686 /* use the same PPLL for all monitors with the same clock */
1687 pll = radeon_get_shared_nondp_ppll(crtc);
1688 if (pll != ATOM_PPLL_INVALID)
1689 return pll;
1541 } 1690 }
1542 1691 /* all other cases */
1543 /* otherwise, pick one of the plls */ 1692 pll_in_use = radeon_get_pll_use_mask(crtc);
1544 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 1693 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1545 struct radeon_crtc *radeon_test_crtc; 1694 return ATOM_PPLL2;
1546 1695 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1547 if (crtc == test_crtc)
1548 continue;
1549
1550 radeon_test_crtc = to_radeon_crtc(test_crtc);
1551 if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
1552 (radeon_test_crtc->pll_id <= ATOM_PPLL2))
1553 pll_in_use |= (1 << radeon_test_crtc->pll_id);
1554 }
1555 if (!(pll_in_use & 1))
1556 return ATOM_PPLL1; 1696 return ATOM_PPLL1;
1557 return ATOM_PPLL2; 1697 DRM_ERROR("unable to allocate a PPLL\n");
1558 } else 1698 return ATOM_PPLL_INVALID;
1559 return radeon_crtc->crtc_id; 1699 } else {
1560 1700 if (ASIC_IS_AVIVO(rdev)) {
1701 /* in DP mode, the DP ref clock can come from either PPLL
1702 * depending on the asic:
1703 * DCE3: PPLL1 or PPLL2
1704 */
1705 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1706 /* use the same PPLL for all DP monitors */
1707 pll = radeon_get_shared_dp_ppll(crtc);
1708 if (pll != ATOM_PPLL_INVALID)
1709 return pll;
1710 } else {
1711 /* use the same PPLL for all monitors with the same clock */
1712 pll = radeon_get_shared_nondp_ppll(crtc);
1713 if (pll != ATOM_PPLL_INVALID)
1714 return pll;
1715 }
1716 /* all other cases */
1717 pll_in_use = radeon_get_pll_use_mask(crtc);
1718 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1719 return ATOM_PPLL2;
1720 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1721 return ATOM_PPLL1;
1722 DRM_ERROR("unable to allocate a PPLL\n");
1723 return ATOM_PPLL_INVALID;
1724 } else {
1725 /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
1726 return radeon_crtc->crtc_id;
1727 }
1728 }
1561} 1729}
1562 1730
1563void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) 1731void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
@@ -1588,18 +1756,13 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1588 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1756 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1589 struct drm_device *dev = crtc->dev; 1757 struct drm_device *dev = crtc->dev;
1590 struct radeon_device *rdev = dev->dev_private; 1758 struct radeon_device *rdev = dev->dev_private;
1591 struct drm_encoder *encoder; 1759 struct radeon_encoder *radeon_encoder =
1760 to_radeon_encoder(radeon_crtc->encoder);
1592 bool is_tvcv = false; 1761 bool is_tvcv = false;
1593 1762
1594 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1763 if (radeon_encoder->active_device &
1595 /* find tv std */ 1764 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1596 if (encoder->crtc == crtc) { 1765 is_tvcv = true;
1597 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1598 if (radeon_encoder->active_device &
1599 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1600 is_tvcv = true;
1601 }
1602 }
1603 1766
1604 atombios_crtc_set_pll(crtc, adjusted_mode); 1767 atombios_crtc_set_pll(crtc, adjusted_mode);
1605 1768
@@ -1626,8 +1789,34 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1626 const struct drm_display_mode *mode, 1789 const struct drm_display_mode *mode,
1627 struct drm_display_mode *adjusted_mode) 1790 struct drm_display_mode *adjusted_mode)
1628{ 1791{
1792 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1793 struct drm_device *dev = crtc->dev;
1794 struct drm_encoder *encoder;
1795
1796 /* assign the encoder to the radeon crtc to avoid repeated lookups later */
1797 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1798 if (encoder->crtc == crtc) {
1799 radeon_crtc->encoder = encoder;
1800 radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
1801 break;
1802 }
1803 }
1804 if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
1805 radeon_crtc->encoder = NULL;
1806 radeon_crtc->connector = NULL;
1807 return false;
1808 }
1629 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1809 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1630 return false; 1810 return false;
1811 if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
1812 return false;
1813 /* pick pll */
1814 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1815 /* if we can't get a PPLL for a non-DP encoder, fail */
1816 if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
1817 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
1818 return false;
1819
1631 return true; 1820 return true;
1632} 1821}
1633 1822
@@ -1638,8 +1827,6 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
1638 struct radeon_device *rdev = dev->dev_private; 1827 struct radeon_device *rdev = dev->dev_private;
1639 1828
1640 radeon_crtc->in_mode_set = true; 1829 radeon_crtc->in_mode_set = true;
1641 /* pick pll */
1642 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1643 1830
1644 /* disable crtc pair power gating before programming */ 1831 /* disable crtc pair power gating before programming */
1645 if (ASIC_IS_DCE6(rdev)) 1832 if (ASIC_IS_DCE6(rdev))
@@ -1697,7 +1884,10 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1697 break; 1884 break;
1698 } 1885 }
1699done: 1886done:
1700 radeon_crtc->pll_id = -1; 1887 radeon_crtc->pll_id = ATOM_PPLL_INVALID;
1888 radeon_crtc->adjusted_clock = 0;
1889 radeon_crtc->encoder = NULL;
1890 radeon_crtc->connector = NULL;
1701} 1891}
1702 1892
1703static const struct drm_crtc_helper_funcs atombios_helper_funcs = { 1893static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -1746,6 +1936,9 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
1746 else 1936 else
1747 radeon_crtc->crtc_offset = 0; 1937 radeon_crtc->crtc_offset = 0;
1748 } 1938 }
1749 radeon_crtc->pll_id = -1; 1939 radeon_crtc->pll_id = ATOM_PPLL_INVALID;
1940 radeon_crtc->adjusted_clock = 0;
1941 radeon_crtc->encoder = NULL;
1942 radeon_crtc->connector = NULL;
1750 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 1943 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
1751} 1944}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 6e8803a1170c..806cbcc94fdd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,9 +28,251 @@
28#include "radeon_drm.h" 28#include "radeon_drm.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "atom.h" 30#include "atom.h"
31#include <linux/backlight.h>
31 32
32extern int atom_debug; 33extern int atom_debug;
33 34
35static u8
36radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
37{
38 u8 backlight_level;
39 u32 bios_2_scratch;
40
41 if (rdev->family >= CHIP_R600)
42 bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
43 else
44 bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
45
46 backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
47 ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
48
49 return backlight_level;
50}
51
52static void
53radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
54 u8 backlight_level)
55{
56 u32 bios_2_scratch;
57
58 if (rdev->family >= CHIP_R600)
59 bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
60 else
61 bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
62
63 bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
64 bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
65 ATOM_S2_CURRENT_BL_LEVEL_MASK);
66
67 if (rdev->family >= CHIP_R600)
68 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
69 else
70 WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
71}
72
73u8
74atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
75{
76 struct drm_device *dev = radeon_encoder->base.dev;
77 struct radeon_device *rdev = dev->dev_private;
78
79 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
80 return 0;
81
82 return radeon_atom_get_backlight_level_from_reg(rdev);
83}
84
85void
86atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
87{
88 struct drm_encoder *encoder = &radeon_encoder->base;
89 struct drm_device *dev = radeon_encoder->base.dev;
90 struct radeon_device *rdev = dev->dev_private;
91 struct radeon_encoder_atom_dig *dig;
92 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
93 int index;
94
95 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
96 return;
97
98 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
99 radeon_encoder->enc_priv) {
100 dig = radeon_encoder->enc_priv;
101 dig->backlight_level = level;
102 radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
103
104 switch (radeon_encoder->encoder_id) {
105 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
106 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
107 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
108 if (dig->backlight_level == 0) {
109 args.ucAction = ATOM_LCD_BLOFF;
110 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
111 } else {
112 args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
113 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
114 args.ucAction = ATOM_LCD_BLON;
115 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
116 }
117 break;
118 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
119 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
120 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
121 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
122 if (dig->backlight_level == 0)
123 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
124 else {
125 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
126 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
127 }
128 break;
129 default:
130 break;
131 }
132 }
133}
134
135#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
136
137static u8 radeon_atom_bl_level(struct backlight_device *bd)
138{
139 u8 level;
140
141 /* Convert brightness to hardware level */
142 if (bd->props.brightness < 0)
143 level = 0;
144 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
145 level = RADEON_MAX_BL_LEVEL;
146 else
147 level = bd->props.brightness;
148
149 return level;
150}
151
152static int radeon_atom_backlight_update_status(struct backlight_device *bd)
153{
154 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
155 struct radeon_encoder *radeon_encoder = pdata->encoder;
156
157 atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
158
159 return 0;
160}
161
162static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
163{
164 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
165 struct radeon_encoder *radeon_encoder = pdata->encoder;
166 struct drm_device *dev = radeon_encoder->base.dev;
167 struct radeon_device *rdev = dev->dev_private;
168
169 return radeon_atom_get_backlight_level_from_reg(rdev);
170}
171
172static const struct backlight_ops radeon_atom_backlight_ops = {
173 .get_brightness = radeon_atom_backlight_get_brightness,
174 .update_status = radeon_atom_backlight_update_status,
175};
176
177void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
178 struct drm_connector *drm_connector)
179{
180 struct drm_device *dev = radeon_encoder->base.dev;
181 struct radeon_device *rdev = dev->dev_private;
182 struct backlight_device *bd;
183 struct backlight_properties props;
184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level;
187
188 if (!radeon_encoder->enc_priv)
189 return;
190
191 if (!rdev->is_atom_bios)
192 return;
193
194 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
195 return;
196
197 pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
198 if (!pdata) {
199 DRM_ERROR("Memory allocation failed\n");
200 goto error;
201 }
202
203 memset(&props, 0, sizeof(props));
204 props.max_brightness = RADEON_MAX_BL_LEVEL;
205 props.type = BACKLIGHT_RAW;
206 bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
207 pdata, &radeon_atom_backlight_ops, &props);
208 if (IS_ERR(bd)) {
209 DRM_ERROR("Backlight registration failed\n");
210 goto error;
211 }
212
213 pdata->encoder = radeon_encoder;
214
215 backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
216
217 dig = radeon_encoder->enc_priv;
218 dig->bl_dev = bd;
219
220 bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
221 bd->props.power = FB_BLANK_UNBLANK;
222 backlight_update_status(bd);
223
224 DRM_INFO("radeon atom DIG backlight initialized\n");
225
226 return;
227
228error:
229 kfree(pdata);
230 return;
231}
232
233static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
234{
235 struct drm_device *dev = radeon_encoder->base.dev;
236 struct radeon_device *rdev = dev->dev_private;
237 struct backlight_device *bd = NULL;
238 struct radeon_encoder_atom_dig *dig;
239
240 if (!radeon_encoder->enc_priv)
241 return;
242
243 if (!rdev->is_atom_bios)
244 return;
245
246 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
247 return;
248
249 dig = radeon_encoder->enc_priv;
250 bd = dig->bl_dev;
251 dig->bl_dev = NULL;
252
253 if (bd) {
254 struct radeon_legacy_backlight_privdata *pdata;
255
256 pdata = bl_get_data(bd);
257 backlight_device_unregister(bd);
258 kfree(pdata);
259
260 DRM_INFO("radeon atom LVDS backlight unloaded\n");
261 }
262}
263
264#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
265
266void radeon_atom_backlight_init(struct radeon_encoder *encoder)
267{
268}
269
270static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
271{
272}
273
274#endif
275
34/* evil but including atombios.h is much worse */ 276/* evil but including atombios.h is much worse */
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 277bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 struct drm_display_mode *mode); 278 struct drm_display_mode *mode);
@@ -209,6 +451,32 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
209 451
210} 452}
211 453
454static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
455{
456 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
457 int bpc = 8;
458
459 if (connector)
460 bpc = radeon_get_monitor_bpc(connector);
461
462 switch (bpc) {
463 case 0:
464 return PANEL_BPC_UNDEFINE;
465 case 6:
466 return PANEL_6BIT_PER_COLOR;
467 case 8:
468 default:
469 return PANEL_8BIT_PER_COLOR;
470 case 10:
471 return PANEL_10BIT_PER_COLOR;
472 case 12:
473 return PANEL_12BIT_PER_COLOR;
474 case 16:
475 return PANEL_16BIT_PER_COLOR;
476 }
477}
478
479
212union dvo_encoder_control { 480union dvo_encoder_control {
213 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; 481 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
214 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; 482 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
@@ -406,7 +674,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
406 return ATOM_ENCODER_MODE_DP; 674 return ATOM_ENCODER_MODE_DP;
407 675
408 /* DVO is always DVO */ 676 /* DVO is always DVO */
409 if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) 677 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
678 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
410 return ATOM_ENCODER_MODE_DVO; 679 return ATOM_ENCODER_MODE_DVO;
411 680
412 connector = radeon_get_connector_for_encoder(encoder); 681 connector = radeon_get_connector_for_encoder(encoder);
@@ -535,7 +804,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
535 int dp_clock = 0; 804 int dp_clock = 0;
536 int dp_lane_count = 0; 805 int dp_lane_count = 0;
537 int hpd_id = RADEON_HPD_NONE; 806 int hpd_id = RADEON_HPD_NONE;
538 int bpc = 8;
539 807
540 if (connector) { 808 if (connector) {
541 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 809 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -545,7 +813,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
545 dp_clock = dig_connector->dp_clock; 813 dp_clock = dig_connector->dp_clock;
546 dp_lane_count = dig_connector->dp_lane_count; 814 dp_lane_count = dig_connector->dp_lane_count;
547 hpd_id = radeon_connector->hpd.hpd; 815 hpd_id = radeon_connector->hpd.hpd;
548 bpc = radeon_get_monitor_bpc(connector);
549 } 816 }
550 817
551 /* no dig encoder assigned */ 818 /* no dig encoder assigned */
@@ -612,37 +879,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
612 else 879 else
613 args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder); 880 args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
614 881
615 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) 882 if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
616 args.v3.ucLaneNum = dp_lane_count; 883 args.v3.ucLaneNum = dp_lane_count;
617 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 884 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
618 args.v3.ucLaneNum = 8; 885 args.v3.ucLaneNum = 8;
619 else 886 else
620 args.v3.ucLaneNum = 4; 887 args.v3.ucLaneNum = 4;
621 888
622 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) 889 if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
623 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; 890 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
624 args.v3.acConfig.ucDigSel = dig->dig_encoder; 891 args.v3.acConfig.ucDigSel = dig->dig_encoder;
625 switch (bpc) { 892 args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
626 case 0:
627 args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
628 break;
629 case 6:
630 args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
631 break;
632 case 8:
633 default:
634 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
635 break;
636 case 10:
637 args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
638 break;
639 case 12:
640 args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
641 break;
642 case 16:
643 args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
644 break;
645 }
646 break; 893 break;
647 case 4: 894 case 4:
648 args.v4.ucAction = action; 895 args.v4.ucAction = action;
@@ -652,41 +899,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
652 else 899 else
653 args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder); 900 args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
654 901
655 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) 902 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
656 args.v4.ucLaneNum = dp_lane_count; 903 args.v4.ucLaneNum = dp_lane_count;
657 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 904 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
658 args.v4.ucLaneNum = 8; 905 args.v4.ucLaneNum = 8;
659 else 906 else
660 args.v4.ucLaneNum = 4; 907 args.v4.ucLaneNum = 4;
661 908
662 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) { 909 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
663 if (dp_clock == 270000) 910 if (dp_clock == 270000)
664 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; 911 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
665 else if (dp_clock == 540000) 912 else if (dp_clock == 540000)
666 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; 913 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
667 } 914 }
668 args.v4.acConfig.ucDigSel = dig->dig_encoder; 915 args.v4.acConfig.ucDigSel = dig->dig_encoder;
669 switch (bpc) { 916 args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
670 case 0:
671 args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
672 break;
673 case 6:
674 args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
675 break;
676 case 8:
677 default:
678 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
679 break;
680 case 10:
681 args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
682 break;
683 case 12:
684 args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
685 break;
686 case 16:
687 args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
688 break;
689 }
690 if (hpd_id == RADEON_HPD_NONE) 917 if (hpd_id == RADEON_HPD_NONE)
691 args.v4.ucHPD_ID = 0; 918 args.v4.ucHPD_ID = 0;
692 else 919 else
@@ -799,8 +1026,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
799 args.v1.asMode.ucLaneSet = lane_set; 1026 args.v1.asMode.ucLaneSet = lane_set;
800 } else { 1027 } else {
801 if (is_dp) 1028 if (is_dp)
802 args.v1.usPixelClock = 1029 args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
803 cpu_to_le16(dp_clock / 10);
804 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1030 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
805 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1031 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
806 else 1032 else
@@ -857,8 +1083,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
857 args.v2.asMode.ucLaneSet = lane_set; 1083 args.v2.asMode.ucLaneSet = lane_set;
858 } else { 1084 } else {
859 if (is_dp) 1085 if (is_dp)
860 args.v2.usPixelClock = 1086 args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
861 cpu_to_le16(dp_clock / 10);
862 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1087 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
863 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1088 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
864 else 1089 else
@@ -900,8 +1125,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
900 args.v3.asMode.ucLaneSet = lane_set; 1125 args.v3.asMode.ucLaneSet = lane_set;
901 } else { 1126 } else {
902 if (is_dp) 1127 if (is_dp)
903 args.v3.usPixelClock = 1128 args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
904 cpu_to_le16(dp_clock / 10);
905 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1129 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
906 args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1130 args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
907 else 1131 else
@@ -960,8 +1184,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
960 args.v4.asMode.ucLaneSet = lane_set; 1184 args.v4.asMode.ucLaneSet = lane_set;
961 } else { 1185 } else {
962 if (is_dp) 1186 if (is_dp)
963 args.v4.usPixelClock = 1187 args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
964 cpu_to_le16(dp_clock / 10);
965 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1188 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
966 args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1189 args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
967 else 1190 else
@@ -1147,7 +1370,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1147 int dp_lane_count = 0; 1370 int dp_lane_count = 0;
1148 int connector_object_id = 0; 1371 int connector_object_id = 0;
1149 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 1372 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1150 int bpc = 8;
1151 1373
1152 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) 1374 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1153 connector = radeon_get_connector_for_encoder_init(encoder); 1375 connector = radeon_get_connector_for_encoder_init(encoder);
@@ -1163,7 +1385,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1163 dp_lane_count = dig_connector->dp_lane_count; 1385 dp_lane_count = dig_connector->dp_lane_count;
1164 connector_object_id = 1386 connector_object_id =
1165 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 1387 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1166 bpc = radeon_get_monitor_bpc(connector);
1167 } 1388 }
1168 1389
1169 memset(&args, 0, sizeof(args)); 1390 memset(&args, 0, sizeof(args));
@@ -1221,27 +1442,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1221 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; 1442 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
1222 break; 1443 break;
1223 } 1444 }
1224 switch (bpc) { 1445 args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
1225 case 0:
1226 args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
1227 break;
1228 case 6:
1229 args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
1230 break;
1231 case 8:
1232 default:
1233 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
1234 break;
1235 case 10:
1236 args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
1237 break;
1238 case 12:
1239 args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
1240 break;
1241 case 16:
1242 args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
1243 break;
1244 }
1245 break; 1446 break;
1246 default: 1447 default:
1247 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1448 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
@@ -2286,6 +2487,8 @@ static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
2286void radeon_enc_destroy(struct drm_encoder *encoder) 2487void radeon_enc_destroy(struct drm_encoder *encoder)
2287{ 2488{
2288 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2489 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2490 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2491 radeon_atom_backlight_exit(radeon_encoder);
2289 kfree(radeon_encoder->enc_priv); 2492 kfree(radeon_encoder->enc_priv);
2290 drm_encoder_cleanup(encoder); 2493 drm_encoder_cleanup(encoder);
2291 kfree(radeon_encoder); 2494 kfree(radeon_encoder);
@@ -2295,7 +2498,7 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
2295 .destroy = radeon_enc_destroy, 2498 .destroy = radeon_enc_destroy,
2296}; 2499};
2297 2500
2298struct radeon_encoder_atom_dac * 2501static struct radeon_encoder_atom_dac *
2299radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) 2502radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
2300{ 2503{
2301 struct drm_device *dev = radeon_encoder->base.dev; 2504 struct drm_device *dev = radeon_encoder->base.dev;
@@ -2309,7 +2512,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
2309 return dac; 2512 return dac;
2310} 2513}
2311 2514
2312struct radeon_encoder_atom_dig * 2515static struct radeon_encoder_atom_dig *
2313radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) 2516radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
2314{ 2517{
2315 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 2518 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e93b80a6d4e9..c4ded396b78d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -37,6 +37,16 @@
37#define EVERGREEN_PFP_UCODE_SIZE 1120 37#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376 38#define EVERGREEN_PM4_UCODE_SIZE 1376
39 39
40static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
40static void evergreen_gpu_init(struct radeon_device *rdev); 50static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 51void evergreen_fini(struct radeon_device *rdev);
42void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 52void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
@@ -109,17 +119,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
109 */ 119 */
110void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) 120void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
111{ 121{
112 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
113 int i; 122 int i;
114 123
115 if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) { 124 if (crtc >= rdev->num_crtc)
125 return;
126
127 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
116 for (i = 0; i < rdev->usec_timeout; i++) { 128 for (i = 0; i < rdev->usec_timeout; i++) {
117 if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)) 129 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
118 break; 130 break;
119 udelay(1); 131 udelay(1);
120 } 132 }
121 for (i = 0; i < rdev->usec_timeout; i++) { 133 for (i = 0; i < rdev->usec_timeout; i++) {
122 if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK) 134 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
123 break; 135 break;
124 udelay(1); 136 udelay(1);
125 } 137 }
@@ -314,6 +326,64 @@ void sumo_pm_init_profile(struct radeon_device *rdev)
314} 326}
315 327
316/** 328/**
329 * btc_pm_init_profile - Initialize power profiles callback.
330 *
331 * @rdev: radeon_device pointer
332 *
333 * Initialize the power states used in profile mode
334 * (BTC, cayman).
335 * Used for profile mode only.
336 */
337void btc_pm_init_profile(struct radeon_device *rdev)
338{
339 int idx;
340
341 /* default */
342 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
343 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
344 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
346 /* starting with BTC, there is one state that is used for both
347 * MH and SH. Difference is that we always use the high clock index for
348 * mclk.
349 */
350 if (rdev->flags & RADEON_IS_MOBILITY)
351 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
352 else
353 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
354 /* low sh */
355 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
356 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
357 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
358 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
359 /* mid sh */
360 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
361 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
362 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
363 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
364 /* high sh */
365 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
366 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
367 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
368 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
369 /* low mh */
370 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
371 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
372 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
373 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
374 /* mid mh */
375 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
376 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
377 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
378 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
379 /* high mh */
380 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
381 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
382 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
383 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
384}
385
386/**
317 * evergreen_pm_misc - set additional pm hw parameters callback. 387 * evergreen_pm_misc - set additional pm hw parameters callback.
318 * 388 *
319 * @rdev: radeon_device pointer 389 * @rdev: radeon_device pointer
@@ -1109,7 +1179,7 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1109 } 1179 }
1110} 1180}
1111 1181
1112int evergreen_pcie_gart_enable(struct radeon_device *rdev) 1182static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1113{ 1183{
1114 u32 tmp; 1184 u32 tmp;
1115 int r; 1185 int r;
@@ -1168,7 +1238,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1168 return 0; 1238 return 0;
1169} 1239}
1170 1240
1171void evergreen_pcie_gart_disable(struct radeon_device *rdev) 1241static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1172{ 1242{
1173 u32 tmp; 1243 u32 tmp;
1174 1244
@@ -1193,7 +1263,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1193 radeon_gart_table_vram_unpin(rdev); 1263 radeon_gart_table_vram_unpin(rdev);
1194} 1264}
1195 1265
1196void evergreen_pcie_gart_fini(struct radeon_device *rdev) 1266static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1197{ 1267{
1198 evergreen_pcie_gart_disable(rdev); 1268 evergreen_pcie_gart_disable(rdev);
1199 radeon_gart_table_vram_free(rdev); 1269 radeon_gart_table_vram_free(rdev);
@@ -1201,7 +1271,7 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1201} 1271}
1202 1272
1203 1273
1204void evergreen_agp_enable(struct radeon_device *rdev) 1274static void evergreen_agp_enable(struct radeon_device *rdev)
1205{ 1275{
1206 u32 tmp; 1276 u32 tmp;
1207 1277
@@ -1229,116 +1299,103 @@ void evergreen_agp_enable(struct radeon_device *rdev)
1229 1299
1230void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 1300void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
1231{ 1301{
1302 u32 crtc_enabled, tmp, frame_count, blackout;
1303 int i, j;
1304
1232 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 1305 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1233 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 1306 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
1234 1307
1235 /* Stop all video */ 1308 /* disable VGA render */
1236 WREG32(VGA_RENDER_CONTROL, 0); 1309 WREG32(VGA_RENDER_CONTROL, 0);
1237 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); 1310 /* blank the display controllers */
1238 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); 1311 for (i = 0; i < rdev->num_crtc; i++) {
1239 if (rdev->num_crtc >= 4) { 1312 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1240 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); 1313 if (crtc_enabled) {
1241 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); 1314 save->crtc_enabled[i] = true;
1242 } 1315 if (ASIC_IS_DCE6(rdev)) {
1243 if (rdev->num_crtc >= 6) { 1316 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1244 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); 1317 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1245 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); 1318 radeon_wait_for_vblank(rdev, i);
1246 } 1319 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1247 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1320 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1248 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1321 }
1249 if (rdev->num_crtc >= 4) { 1322 } else {
1250 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1323 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1251 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1324 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1252 } 1325 radeon_wait_for_vblank(rdev, i);
1253 if (rdev->num_crtc >= 6) { 1326 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1254 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1327 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1255 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 1328 }
1256 } 1329 }
1257 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1330 /* wait for the next frame */
1258 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1331 frame_count = radeon_get_vblank_counter(rdev, i);
1259 if (rdev->num_crtc >= 4) { 1332 for (j = 0; j < rdev->usec_timeout; j++) {
1260 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1333 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1261 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1334 break;
1262 } 1335 udelay(1);
1263 if (rdev->num_crtc >= 6) { 1336 }
1264 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1337 }
1265 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1266 } 1338 }
1267 1339
1268 WREG32(D1VGA_CONTROL, 0); 1340 radeon_mc_wait_for_idle(rdev);
1269 WREG32(D2VGA_CONTROL, 0); 1341
1270 if (rdev->num_crtc >= 4) { 1342 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1271 WREG32(EVERGREEN_D3VGA_CONTROL, 0); 1343 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1272 WREG32(EVERGREEN_D4VGA_CONTROL, 0); 1344 /* Block CPU access */
1273 } 1345 WREG32(BIF_FB_EN, 0);
1274 if (rdev->num_crtc >= 6) { 1346 /* blackout the MC */
1275 WREG32(EVERGREEN_D5VGA_CONTROL, 0); 1347 blackout &= ~BLACKOUT_MODE_MASK;
1276 WREG32(EVERGREEN_D6VGA_CONTROL, 0); 1348 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1277 } 1349 }
1278} 1350}
1279 1351
1280void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) 1352void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1281{ 1353{
1282 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, 1354 u32 tmp, frame_count;
1283 upper_32_bits(rdev->mc.vram_start)); 1355 int i, j;
1284 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1285 upper_32_bits(rdev->mc.vram_start));
1286 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1287 (u32)rdev->mc.vram_start);
1288 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1289 (u32)rdev->mc.vram_start);
1290
1291 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1292 upper_32_bits(rdev->mc.vram_start));
1293 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1294 upper_32_bits(rdev->mc.vram_start));
1295 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1296 (u32)rdev->mc.vram_start);
1297 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1298 (u32)rdev->mc.vram_start);
1299
1300 if (rdev->num_crtc >= 4) {
1301 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1302 upper_32_bits(rdev->mc.vram_start));
1303 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1304 upper_32_bits(rdev->mc.vram_start));
1305 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1306 (u32)rdev->mc.vram_start);
1307 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1308 (u32)rdev->mc.vram_start);
1309
1310 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1311 upper_32_bits(rdev->mc.vram_start));
1312 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1313 upper_32_bits(rdev->mc.vram_start));
1314 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1315 (u32)rdev->mc.vram_start);
1316 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1317 (u32)rdev->mc.vram_start);
1318 }
1319 if (rdev->num_crtc >= 6) {
1320 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1321 upper_32_bits(rdev->mc.vram_start));
1322 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1323 upper_32_bits(rdev->mc.vram_start));
1324 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1325 (u32)rdev->mc.vram_start);
1326 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1327 (u32)rdev->mc.vram_start);
1328 1356
1329 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1357 /* update crtc base addresses */
1358 for (i = 0; i < rdev->num_crtc; i++) {
1359 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1330 upper_32_bits(rdev->mc.vram_start)); 1360 upper_32_bits(rdev->mc.vram_start));
1331 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1361 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1332 upper_32_bits(rdev->mc.vram_start)); 1362 upper_32_bits(rdev->mc.vram_start));
1333 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1363 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
1334 (u32)rdev->mc.vram_start); 1364 (u32)rdev->mc.vram_start);
1335 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1365 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
1336 (u32)rdev->mc.vram_start); 1366 (u32)rdev->mc.vram_start);
1337 } 1367 }
1338
1339 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 1368 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1340 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 1369 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1341 /* Unlock host access */ 1370
1371 /* unblackout the MC */
1372 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1373 tmp &= ~BLACKOUT_MODE_MASK;
1374 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1375 /* allow CPU access */
1376 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1377
1378 for (i = 0; i < rdev->num_crtc; i++) {
1379 if (save->crtc_enabled) {
1380 if (ASIC_IS_DCE6(rdev)) {
1381 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1382 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1383 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1384 } else {
1385 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1386 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1387 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1388 }
1389 /* wait for the next frame */
1390 frame_count = radeon_get_vblank_counter(rdev, i);
1391 for (j = 0; j < rdev->usec_timeout; j++) {
1392 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1393 break;
1394 udelay(1);
1395 }
1396 }
1397 }
1398 /* Unlock vga access */
1342 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); 1399 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1343 mdelay(1); 1400 mdelay(1);
1344 WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 1401 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
@@ -1557,7 +1614,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1557 return 0; 1614 return 0;
1558} 1615}
1559 1616
1560int evergreen_cp_resume(struct radeon_device *rdev) 1617static int evergreen_cp_resume(struct radeon_device *rdev)
1561{ 1618{
1562 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1619 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1563 u32 tmp; 1620 u32 tmp;
@@ -2333,22 +2390,10 @@ int evergreen_asic_reset(struct radeon_device *rdev)
2333 2390
2334u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) 2391u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2335{ 2392{
2336 switch (crtc) { 2393 if (crtc >= rdev->num_crtc)
2337 case 0:
2338 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
2339 case 1:
2340 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
2341 case 2:
2342 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
2343 case 3:
2344 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
2345 case 4:
2346 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
2347 case 5:
2348 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
2349 default:
2350 return 0; 2394 return 0;
2351 } 2395 else
2396 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
2352} 2397}
2353 2398
2354void evergreen_disable_interrupt_state(struct radeon_device *rdev) 2399void evergreen_disable_interrupt_state(struct radeon_device *rdev)
@@ -2541,10 +2586,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
2541 DRM_DEBUG("evergreen_irq_set: hdmi 5\n"); 2586 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
2542 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2587 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2543 } 2588 }
2544 if (rdev->irq.gui_idle) {
2545 DRM_DEBUG("gui idle\n");
2546 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2547 }
2548 2589
2549 if (rdev->family >= CHIP_CAYMAN) { 2590 if (rdev->family >= CHIP_CAYMAN) {
2550 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl); 2591 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
@@ -2726,7 +2767,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
2726 } 2767 }
2727} 2768}
2728 2769
2729void evergreen_irq_disable(struct radeon_device *rdev) 2770static void evergreen_irq_disable(struct radeon_device *rdev)
2730{ 2771{
2731 r600_disable_interrupts(rdev); 2772 r600_disable_interrupts(rdev);
2732 /* Wait and acknowledge irq */ 2773 /* Wait and acknowledge irq */
@@ -3079,7 +3120,6 @@ restart_ih:
3079 break; 3120 break;
3080 case 233: /* GUI IDLE */ 3121 case 233: /* GUI IDLE */
3081 DRM_DEBUG("IH: GUI idle\n"); 3122 DRM_DEBUG("IH: GUI idle\n");
3082 wake_up(&rdev->irq.idle_queue);
3083 break; 3123 break;
3084 default: 3124 default:
3085 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3125 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index e44a62a07fe3..2ceab2b52d69 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -846,6 +846,16 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
846 return -EINVAL; 846 return -EINVAL;
847 } 847 }
848 848
849 if (!mipmap) {
850 if (llevel) {
851 dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
852 __func__, __LINE__);
853 return -EINVAL;
854 } else {
855 return 0; /* everything's ok */
856 }
857 }
858
849 /* check mipmap size */ 859 /* check mipmap size */
850 for (i = 1; i <= llevel; i++) { 860 for (i = 1; i <= llevel; i++) {
851 unsigned w, h, d; 861 unsigned w, h, d;
@@ -995,7 +1005,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
995 * Assume that chunk_ib_index is properly set. Will return -EINVAL 1005 * Assume that chunk_ib_index is properly set. Will return -EINVAL
996 * if packet is bigger than remaining ib size. or if packets is unknown. 1006 * if packet is bigger than remaining ib size. or if packets is unknown.
997 **/ 1007 **/
998int evergreen_cs_packet_parse(struct radeon_cs_parser *p, 1008static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
999 struct radeon_cs_packet *pkt, 1009 struct radeon_cs_packet *pkt,
1000 unsigned idx) 1010 unsigned idx)
1001{ 1011{
@@ -1081,6 +1091,27 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
1081} 1091}
1082 1092
1083/** 1093/**
1094 * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
1095 * @p: structure holding the parser context.
1096 *
1097 * Check if the next packet is a relocation packet3.
1098 **/
1099static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
1100{
1101 struct radeon_cs_packet p3reloc;
1102 int r;
1103
1104 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
1105 if (r) {
1106 return false;
1107 }
1108 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1109 return false;
1110 }
1111 return true;
1112}
1113
1114/**
1084 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet 1115 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
1085 * @parser: parser structure holding parsing context. 1116 * @parser: parser structure holding parsing context.
1086 * 1117 *
@@ -2330,7 +2361,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2330 for (i = 0; i < (pkt->count / 8); i++) { 2361 for (i = 0; i < (pkt->count / 8); i++) {
2331 struct radeon_bo *texture, *mipmap; 2362 struct radeon_bo *texture, *mipmap;
2332 u32 toffset, moffset; 2363 u32 toffset, moffset;
2333 u32 size, offset; 2364 u32 size, offset, mip_address, tex_dim;
2334 2365
2335 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { 2366 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2336 case SQ_TEX_VTX_VALID_TEXTURE: 2367 case SQ_TEX_VTX_VALID_TEXTURE:
@@ -2359,14 +2390,28 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2359 } 2390 }
2360 texture = reloc->robj; 2391 texture = reloc->robj;
2361 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2392 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2393
2362 /* tex mip base */ 2394 /* tex mip base */
2363 r = evergreen_cs_packet_next_reloc(p, &reloc); 2395 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2364 if (r) { 2396 mip_address = ib[idx+1+(i*8)+3];
2365 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 2397
2366 return -EINVAL; 2398 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2399 !mip_address &&
2400 !evergreen_cs_packet_next_is_pkt3_nop(p)) {
2401 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2402 * It should be 0 if FMASK is disabled. */
2403 moffset = 0;
2404 mipmap = NULL;
2405 } else {
2406 r = evergreen_cs_packet_next_reloc(p, &reloc);
2407 if (r) {
2408 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2409 return -EINVAL;
2410 }
2411 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2412 mipmap = reloc->robj;
2367 } 2413 }
2368 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2414
2369 mipmap = reloc->robj;
2370 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8)); 2415 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2371 if (r) 2416 if (r)
2372 return r; 2417 return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 8beac1065025..034f4c22e5db 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -218,6 +218,8 @@
218#define EVERGREEN_CRTC_CONTROL 0x6e70 218#define EVERGREEN_CRTC_CONTROL 0x6e70
219# define EVERGREEN_CRTC_MASTER_EN (1 << 0) 219# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
220# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) 220# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
221#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
222# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
221#define EVERGREEN_CRTC_STATUS 0x6e8c 223#define EVERGREEN_CRTC_STATUS 0x6e8c
222# define EVERGREEN_CRTC_V_BLANK (1 << 0) 224# define EVERGREEN_CRTC_V_BLANK (1 << 0)
223#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 225#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79347855d9bf..df542f1a5dfb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -87,6 +87,10 @@
87 87
88#define CONFIG_MEMSIZE 0x5428 88#define CONFIG_MEMSIZE 0x5428
89 89
90#define BIF_FB_EN 0x5490
91#define FB_READ_EN (1 << 0)
92#define FB_WRITE_EN (1 << 1)
93
90#define CP_COHER_BASE 0x85F8 94#define CP_COHER_BASE 0x85F8
91#define CP_STALLED_STAT1 0x8674 95#define CP_STALLED_STAT1 0x8674
92#define CP_STALLED_STAT2 0x8678 96#define CP_STALLED_STAT2 0x8678
@@ -430,6 +434,9 @@
430#define NOOFCHAN_MASK 0x00003000 434#define NOOFCHAN_MASK 0x00003000
431#define MC_SHARED_CHREMAP 0x2008 435#define MC_SHARED_CHREMAP 0x2008
432 436
437#define MC_SHARED_BLACKOUT_CNTL 0x20ac
438#define BLACKOUT_MODE_MASK 0x00000007
439
433#define MC_ARB_RAMCFG 0x2760 440#define MC_ARB_RAMCFG 0x2760
434#define NOOFBANK_SHIFT 0 441#define NOOFBANK_SHIFT 0
435#define NOOFBANK_MASK 0x00000003 442#define NOOFBANK_MASK 0x00000003
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 853800e8582f..9a46f7d4e61f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -726,7 +726,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
726 WREG32(VM_INVALIDATE_REQUEST, 1); 726 WREG32(VM_INVALIDATE_REQUEST, 1);
727} 727}
728 728
729int cayman_pcie_gart_enable(struct radeon_device *rdev) 729static int cayman_pcie_gart_enable(struct radeon_device *rdev)
730{ 730{
731 int i, r; 731 int i, r;
732 732
@@ -782,7 +782,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
782 (u32)(rdev->dummy_page.addr >> 12)); 782 (u32)(rdev->dummy_page.addr >> 12));
783 WREG32(VM_CONTEXT1_CNTL2, 0); 783 WREG32(VM_CONTEXT1_CNTL2, 0);
784 WREG32(VM_CONTEXT1_CNTL, 0); 784 WREG32(VM_CONTEXT1_CNTL, 0);
785 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 785 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
786 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 786 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
787 787
788 cayman_pcie_gart_tlb_flush(rdev); 788 cayman_pcie_gart_tlb_flush(rdev);
@@ -793,7 +793,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
793 return 0; 793 return 0;
794} 794}
795 795
796void cayman_pcie_gart_disable(struct radeon_device *rdev) 796static void cayman_pcie_gart_disable(struct radeon_device *rdev)
797{ 797{
798 /* Disable all tables */ 798 /* Disable all tables */
799 WREG32(VM_CONTEXT0_CNTL, 0); 799 WREG32(VM_CONTEXT0_CNTL, 0);
@@ -813,7 +813,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
813 radeon_gart_table_vram_unpin(rdev); 813 radeon_gart_table_vram_unpin(rdev);
814} 814}
815 815
816void cayman_pcie_gart_fini(struct radeon_device *rdev) 816static void cayman_pcie_gart_fini(struct radeon_device *rdev)
817{ 817{
818 cayman_pcie_gart_disable(rdev); 818 cayman_pcie_gart_disable(rdev);
819 radeon_gart_table_vram_free(rdev); 819 radeon_gart_table_vram_free(rdev);
@@ -879,12 +879,13 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
879#endif 879#endif
880 (ib->gpu_addr & 0xFFFFFFFC)); 880 (ib->gpu_addr & 0xFFFFFFFC));
881 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 881 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
882 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); 882 radeon_ring_write(ring, ib->length_dw |
883 (ib->vm ? (ib->vm->id << 24) : 0));
883 884
884 /* flush read cache over gart for this vmid */ 885 /* flush read cache over gart for this vmid */
885 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 886 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
886 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 887 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
887 radeon_ring_write(ring, ib->vm_id); 888 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
888 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 889 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
889 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 890 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
890 radeon_ring_write(ring, 0xFFFFFFFF); 891 radeon_ring_write(ring, 0xFFFFFFFF);
@@ -1004,7 +1005,7 @@ static void cayman_cp_fini(struct radeon_device *rdev)
1004 radeon_scratch_free(rdev, ring->rptr_save_reg); 1005 radeon_scratch_free(rdev, ring->rptr_save_reg);
1005} 1006}
1006 1007
1007int cayman_cp_resume(struct radeon_device *rdev) 1008static int cayman_cp_resume(struct radeon_device *rdev)
1008{ 1009{
1009 static const int ridx[] = { 1010 static const int ridx[] = {
1010 RADEON_RING_TYPE_GFX_INDEX, 1011 RADEON_RING_TYPE_GFX_INDEX,
@@ -1496,53 +1497,16 @@ void cayman_vm_fini(struct radeon_device *rdev)
1496{ 1497{
1497} 1498}
1498 1499
1499int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) 1500#define R600_ENTRY_VALID (1 << 0)
1500{
1501 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
1502 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
1503 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
1504 /* flush hdp cache */
1505 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1506 /* bits 0-7 are the VM contexts0-7 */
1507 WREG32(VM_INVALIDATE_REQUEST, 1 << id);
1508 return 0;
1509}
1510
1511void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
1512{
1513 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
1514 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
1515 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
1516 /* flush hdp cache */
1517 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1518 /* bits 0-7 are the VM contexts0-7 */
1519 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1520}
1521
1522void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
1523{
1524 if (vm->id == -1)
1525 return;
1526
1527 /* flush hdp cache */
1528 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1529 /* bits 0-7 are the VM contexts0-7 */
1530 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1531}
1532
1533#define R600_PTE_VALID (1 << 0)
1534#define R600_PTE_SYSTEM (1 << 1) 1501#define R600_PTE_SYSTEM (1 << 1)
1535#define R600_PTE_SNOOPED (1 << 2) 1502#define R600_PTE_SNOOPED (1 << 2)
1536#define R600_PTE_READABLE (1 << 5) 1503#define R600_PTE_READABLE (1 << 5)
1537#define R600_PTE_WRITEABLE (1 << 6) 1504#define R600_PTE_WRITEABLE (1 << 6)
1538 1505
1539uint32_t cayman_vm_page_flags(struct radeon_device *rdev, 1506uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
1540 struct radeon_vm *vm,
1541 uint32_t flags)
1542{ 1507{
1543 uint32_t r600_flags = 0; 1508 uint32_t r600_flags = 0;
1544 1509 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
1545 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
1546 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 1510 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
1547 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 1511 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
1548 if (flags & RADEON_VM_PAGE_SYSTEM) { 1512 if (flags & RADEON_VM_PAGE_SYSTEM) {
@@ -1552,12 +1516,76 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
1552 return r600_flags; 1516 return r600_flags;
1553} 1517}
1554 1518
1555void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm, 1519/**
1556 unsigned pfn, uint64_t addr, uint32_t flags) 1520 * cayman_vm_set_page - update the page tables using the CP
1521 *
1522 * @rdev: radeon_device pointer
1523 * @pe: addr of the page entry
1524 * @addr: dst addr to write into pe
1525 * @count: number of page entries to update
1526 * @incr: increase next addr by incr bytes
1527 * @flags: access flags
1528 *
1529 * Update the page tables using the CP (cayman-si).
1530 */
1531void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1532 uint64_t addr, unsigned count,
1533 uint32_t incr, uint32_t flags)
1534{
1535 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1536 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1537 int i;
1538
1539 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
1540 radeon_ring_write(ring, pe);
1541 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1542 for (i = 0; i < count; ++i) {
1543 uint64_t value = 0;
1544 if (flags & RADEON_VM_PAGE_SYSTEM) {
1545 value = radeon_vm_map_gart(rdev, addr);
1546 value &= 0xFFFFFFFFFFFFF000ULL;
1547 addr += incr;
1548
1549 } else if (flags & RADEON_VM_PAGE_VALID) {
1550 value = addr;
1551 addr += incr;
1552 }
1553
1554 value |= r600_flags;
1555 radeon_ring_write(ring, value);
1556 radeon_ring_write(ring, upper_32_bits(value));
1557 }
1558}
1559
1560/**
1561 * cayman_vm_flush - vm flush using the CP
1562 *
1563 * @rdev: radeon_device pointer
1564 *
1565 * Update the page table base and flush the VM TLB
1566 * using the CP (cayman-si).
1567 */
1568void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1557{ 1569{
1558 void __iomem *ptr = (void *)vm->pt; 1570 struct radeon_ring *ring = &rdev->ring[ridx];
1571
1572 if (vm == NULL)
1573 return;
1574
1575 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
1576 radeon_ring_write(ring, 0);
1577
1578 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
1579 radeon_ring_write(ring, vm->last_pfn);
1559 1580
1560 addr = addr & 0xFFFFFFFFFFFFF000ULL; 1581 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
1561 addr |= flags; 1582 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1562 writeq(addr, ptr + (pfn * 8)); 1583
1584 /* flush hdp cache */
1585 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1586 radeon_ring_write(ring, 0x1);
1587
1588 /* bits 0-7 are the VM contexts0-7 */
1589 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
1590 radeon_ring_write(ring, 1 << vm->id);
1563} 1591}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 870db340d377..2423d1b5d385 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -585,6 +585,7 @@
585#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 585#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
586#define PACKET3_SET_RESOURCE_INDIRECT 0x74 586#define PACKET3_SET_RESOURCE_INDIRECT 0x74
587#define PACKET3_SET_APPEND_CNT 0x75 587#define PACKET3_SET_APPEND_CNT 0x75
588#define PACKET3_ME_WRITE 0x7A
588 589
589#endif 590#endif
590 591
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8d7e33a0b243..b41237bf884b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -80,10 +80,12 @@ MODULE_FIRMWARE(FIRMWARE_R520);
80 */ 80 */
81void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 81void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
82{ 82{
83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
84 int i; 83 int i;
85 84
86 if (radeon_crtc->crtc_id == 0) { 85 if (crtc >= rdev->num_crtc)
86 return;
87
88 if (crtc == 0) {
87 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { 89 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
88 for (i = 0; i < rdev->usec_timeout; i++) { 90 for (i = 0; i < rdev->usec_timeout; i++) {
89 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) 91 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
@@ -698,9 +700,6 @@ int r100_irq_set(struct radeon_device *rdev)
698 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 700 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
699 tmp |= RADEON_SW_INT_ENABLE; 701 tmp |= RADEON_SW_INT_ENABLE;
700 } 702 }
701 if (rdev->irq.gui_idle) {
702 tmp |= RADEON_GUI_IDLE_MASK;
703 }
704 if (rdev->irq.crtc_vblank_int[0] || 703 if (rdev->irq.crtc_vblank_int[0] ||
705 atomic_read(&rdev->irq.pflip[0])) { 704 atomic_read(&rdev->irq.pflip[0])) {
706 tmp |= RADEON_CRTC_VBLANK_MASK; 705 tmp |= RADEON_CRTC_VBLANK_MASK;
@@ -737,12 +736,6 @@ static uint32_t r100_irq_ack(struct radeon_device *rdev)
737 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 736 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
738 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 737 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
739 738
740 /* the interrupt works, but the status bit is permanently asserted */
741 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
742 if (!rdev->irq.gui_idle_acked)
743 irq_mask |= RADEON_GUI_IDLE_STAT;
744 }
745
746 if (irqs) { 739 if (irqs) {
747 WREG32(RADEON_GEN_INT_STATUS, irqs); 740 WREG32(RADEON_GEN_INT_STATUS, irqs);
748 } 741 }
@@ -754,9 +747,6 @@ int r100_irq_process(struct radeon_device *rdev)
754 uint32_t status, msi_rearm; 747 uint32_t status, msi_rearm;
755 bool queue_hotplug = false; 748 bool queue_hotplug = false;
756 749
757 /* reset gui idle ack. the status bit is broken */
758 rdev->irq.gui_idle_acked = false;
759
760 status = r100_irq_ack(rdev); 750 status = r100_irq_ack(rdev);
761 if (!status) { 751 if (!status) {
762 return IRQ_NONE; 752 return IRQ_NONE;
@@ -769,11 +759,6 @@ int r100_irq_process(struct radeon_device *rdev)
769 if (status & RADEON_SW_INT_TEST) { 759 if (status & RADEON_SW_INT_TEST) {
770 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 760 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
771 } 761 }
772 /* gui idle interrupt */
773 if (status & RADEON_GUI_IDLE_STAT) {
774 rdev->irq.gui_idle_acked = true;
775 wake_up(&rdev->irq.idle_queue);
776 }
777 /* Vertical blank interrupts */ 762 /* Vertical blank interrupts */
778 if (status & RADEON_CRTC_VBLANK_STAT) { 763 if (status & RADEON_CRTC_VBLANK_STAT) {
779 if (rdev->irq.crtc_vblank_int[0]) { 764 if (rdev->irq.crtc_vblank_int[0]) {
@@ -803,8 +788,6 @@ int r100_irq_process(struct radeon_device *rdev)
803 } 788 }
804 status = r100_irq_ack(rdev); 789 status = r100_irq_ack(rdev);
805 } 790 }
806 /* reset gui idle ack. the status bit is broken */
807 rdev->irq.gui_idle_acked = false;
808 if (queue_hotplug) 791 if (queue_hotplug)
809 schedule_work(&rdev->hotplug_work); 792 schedule_work(&rdev->hotplug_work);
810 if (rdev->msi_enabled) { 793 if (rdev->msi_enabled) {
@@ -2530,7 +2513,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
2530/* 2513/*
2531 * Global GPU functions 2514 * Global GPU functions
2532 */ 2515 */
2533void r100_errata(struct radeon_device *rdev) 2516static void r100_errata(struct radeon_device *rdev)
2534{ 2517{
2535 rdev->pll_errata = 0; 2518 rdev->pll_errata = 0;
2536 2519
@@ -2545,51 +2528,7 @@ void r100_errata(struct radeon_device *rdev)
2545 } 2528 }
2546} 2529}
2547 2530
2548/* Wait for vertical sync on primary CRTC */ 2531static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2549void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
2550{
2551 uint32_t crtc_gen_cntl, tmp;
2552 int i;
2553
2554 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
2555 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
2556 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
2557 return;
2558 }
2559 /* Clear the CRTC_VBLANK_SAVE bit */
2560 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
2561 for (i = 0; i < rdev->usec_timeout; i++) {
2562 tmp = RREG32(RADEON_CRTC_STATUS);
2563 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
2564 return;
2565 }
2566 DRM_UDELAY(1);
2567 }
2568}
2569
2570/* Wait for vertical sync on secondary CRTC */
2571void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
2572{
2573 uint32_t crtc2_gen_cntl, tmp;
2574 int i;
2575
2576 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
2577 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
2578 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
2579 return;
2580
2581 /* Clear the CRTC_VBLANK_SAVE bit */
2582 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
2583 for (i = 0; i < rdev->usec_timeout; i++) {
2584 tmp = RREG32(RADEON_CRTC2_STATUS);
2585 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
2586 return;
2587 }
2588 DRM_UDELAY(1);
2589 }
2590}
2591
2592int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2593{ 2532{
2594 unsigned i; 2533 unsigned i;
2595 uint32_t tmp; 2534 uint32_t tmp;
@@ -2950,7 +2889,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
2950 WREG32(RADEON_CONFIG_CNTL, temp); 2889 WREG32(RADEON_CONFIG_CNTL, temp);
2951} 2890}
2952 2891
2953void r100_mc_init(struct radeon_device *rdev) 2892static void r100_mc_init(struct radeon_device *rdev)
2954{ 2893{
2955 u64 base; 2894 u64 base;
2956 2895
@@ -3022,7 +2961,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
3022 r100_pll_errata_after_data(rdev); 2961 r100_pll_errata_after_data(rdev);
3023} 2962}
3024 2963
3025void r100_set_safe_registers(struct radeon_device *rdev) 2964static void r100_set_safe_registers(struct radeon_device *rdev)
3026{ 2965{
3027 if (ASIC_IS_RN50(rdev)) { 2966 if (ASIC_IS_RN50(rdev)) {
3028 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2967 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -3817,9 +3756,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3817 return r; 3756 return r;
3818 } 3757 }
3819 WREG32(scratch, 0xCAFEDEAD); 3758 WREG32(scratch, 0xCAFEDEAD);
3820 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256); 3759 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3821 if (r) { 3760 if (r) {
3822 return r; 3761 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3762 goto free_scratch;
3823 } 3763 }
3824 ib.ptr[0] = PACKET0(scratch, 0); 3764 ib.ptr[0] = PACKET0(scratch, 0);
3825 ib.ptr[1] = 0xDEADBEEF; 3765 ib.ptr[1] = 0xDEADBEEF;
@@ -3832,13 +3772,13 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3832 ib.length_dw = 8; 3772 ib.length_dw = 8;
3833 r = radeon_ib_schedule(rdev, &ib, NULL); 3773 r = radeon_ib_schedule(rdev, &ib, NULL);
3834 if (r) { 3774 if (r) {
3835 radeon_scratch_free(rdev, scratch); 3775 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3836 radeon_ib_free(rdev, &ib); 3776 goto free_ib;
3837 return r;
3838 } 3777 }
3839 r = radeon_fence_wait(ib.fence, false); 3778 r = radeon_fence_wait(ib.fence, false);
3840 if (r) { 3779 if (r) {
3841 return r; 3780 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3781 goto free_ib;
3842 } 3782 }
3843 for (i = 0; i < rdev->usec_timeout; i++) { 3783 for (i = 0; i < rdev->usec_timeout; i++) {
3844 tmp = RREG32(scratch); 3784 tmp = RREG32(scratch);
@@ -3854,8 +3794,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3854 scratch, tmp); 3794 scratch, tmp);
3855 r = -EINVAL; 3795 r = -EINVAL;
3856 } 3796 }
3857 radeon_scratch_free(rdev, scratch); 3797free_ib:
3858 radeon_ib_free(rdev, &ib); 3798 radeon_ib_free(rdev, &ib);
3799free_scratch:
3800 radeon_scratch_free(rdev, scratch);
3859 return r; 3801 return r;
3860} 3802}
3861 3803
@@ -3964,7 +3906,7 @@ static void r100_mc_program(struct radeon_device *rdev)
3964 r100_mc_resume(rdev, &save); 3906 r100_mc_resume(rdev, &save);
3965} 3907}
3966 3908
3967void r100_clock_startup(struct radeon_device *rdev) 3909static void r100_clock_startup(struct radeon_device *rdev)
3968{ 3910{
3969 u32 tmp; 3911 u32 tmp;
3970 3912
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 646a1927dda7..4949bfc14b58 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -296,7 +296,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
296 radeon_ring_unlock_commit(rdev, ring); 296 radeon_ring_unlock_commit(rdev, ring);
297} 297}
298 298
299void r300_errata(struct radeon_device *rdev) 299static void r300_errata(struct radeon_device *rdev)
300{ 300{
301 rdev->pll_errata = 0; 301 rdev->pll_errata = 0;
302 302
@@ -322,7 +322,7 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
322 return -1; 322 return -1;
323} 323}
324 324
325void r300_gpu_init(struct radeon_device *rdev) 325static void r300_gpu_init(struct radeon_device *rdev)
326{ 326{
327 uint32_t gb_tile_config, tmp; 327 uint32_t gb_tile_config, tmp;
328 328
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 079d3c52c08a..28b4f871aaf4 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,7 +119,7 @@ static void r520_vram_get_type(struct radeon_device *rdev)
119 rdev->mc.vram_width *= 2; 119 rdev->mc.vram_width *= 2;
120} 120}
121 121
122void r520_mc_init(struct radeon_device *rdev) 122static void r520_mc_init(struct radeon_device *rdev)
123{ 123{
124 124
125 r520_vram_get_type(rdev); 125 r520_vram_get_type(rdev);
@@ -131,7 +131,7 @@ void r520_mc_init(struct radeon_device *rdev)
131 radeon_update_bandwidth_info(rdev); 131 radeon_update_bandwidth_info(rdev);
132} 132}
133 133
134void r520_mc_program(struct radeon_device *rdev) 134static void r520_mc_program(struct radeon_device *rdev)
135{ 135{
136 struct rv515_mc_save save; 136 struct rv515_mc_save save;
137 137
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d79c639ae739..39b743fff791 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -98,7 +98,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
98 98
99/* r600,rv610,rv630,rv620,rv635,rv670 */ 99/* r600,rv610,rv630,rv620,rv635,rv670 */
100int r600_mc_wait_for_idle(struct radeon_device *rdev); 100int r600_mc_wait_for_idle(struct radeon_device *rdev);
101void r600_gpu_init(struct radeon_device *rdev); 101static void r600_gpu_init(struct radeon_device *rdev);
102void r600_fini(struct radeon_device *rdev); 102void r600_fini(struct radeon_device *rdev);
103void r600_irq_disable(struct radeon_device *rdev); 103void r600_irq_disable(struct radeon_device *rdev);
104static void r600_pcie_gen2_enable(struct radeon_device *rdev); 104static void r600_pcie_gen2_enable(struct radeon_device *rdev);
@@ -881,7 +881,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
881 return radeon_gart_table_vram_alloc(rdev); 881 return radeon_gart_table_vram_alloc(rdev);
882} 882}
883 883
884int r600_pcie_gart_enable(struct radeon_device *rdev) 884static int r600_pcie_gart_enable(struct radeon_device *rdev)
885{ 885{
886 u32 tmp; 886 u32 tmp;
887 int r, i; 887 int r, i;
@@ -938,7 +938,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
938 return 0; 938 return 0;
939} 939}
940 940
941void r600_pcie_gart_disable(struct radeon_device *rdev) 941static void r600_pcie_gart_disable(struct radeon_device *rdev)
942{ 942{
943 u32 tmp; 943 u32 tmp;
944 int i; 944 int i;
@@ -971,14 +971,14 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
971 radeon_gart_table_vram_unpin(rdev); 971 radeon_gart_table_vram_unpin(rdev);
972} 972}
973 973
974void r600_pcie_gart_fini(struct radeon_device *rdev) 974static void r600_pcie_gart_fini(struct radeon_device *rdev)
975{ 975{
976 radeon_gart_fini(rdev); 976 radeon_gart_fini(rdev);
977 r600_pcie_gart_disable(rdev); 977 r600_pcie_gart_disable(rdev);
978 radeon_gart_table_vram_free(rdev); 978 radeon_gart_table_vram_free(rdev);
979} 979}
980 980
981void r600_agp_enable(struct radeon_device *rdev) 981static void r600_agp_enable(struct radeon_device *rdev)
982{ 982{
983 u32 tmp; 983 u32 tmp;
984 int i; 984 int i;
@@ -1158,7 +1158,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
1158 } 1158 }
1159} 1159}
1160 1160
1161int r600_mc_init(struct radeon_device *rdev) 1161static int r600_mc_init(struct radeon_device *rdev)
1162{ 1162{
1163 u32 tmp; 1163 u32 tmp;
1164 int chansize, numchan; 1164 int chansize, numchan;
@@ -1258,7 +1258,7 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
1258 * reset, it's up to the caller to determine if the GPU needs one. We 1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that. 1259 * might add an helper function to check that.
1260 */ 1260 */
1261int r600_gpu_soft_reset(struct radeon_device *rdev) 1261static int r600_gpu_soft_reset(struct radeon_device *rdev)
1262{ 1262{
1263 struct rv515_mc_save save; 1263 struct rv515_mc_save save;
1264 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 1264 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
@@ -1433,7 +1433,7 @@ int r600_count_pipe_bits(uint32_t val)
1433 return ret; 1433 return ret;
1434} 1434}
1435 1435
1436void r600_gpu_init(struct radeon_device *rdev) 1436static void r600_gpu_init(struct radeon_device *rdev)
1437{ 1437{
1438 u32 tiling_config; 1438 u32 tiling_config;
1439 u32 ramcfg; 1439 u32 ramcfg;
@@ -2347,7 +2347,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2347 /* FIXME: implement */ 2347 /* FIXME: implement */
2348} 2348}
2349 2349
2350int r600_startup(struct radeon_device *rdev) 2350static int r600_startup(struct radeon_device *rdev)
2351{ 2351{
2352 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2352 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2353 int r; 2353 int r;
@@ -2635,10 +2635,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2635 return r; 2635 return r;
2636 } 2636 }
2637 WREG32(scratch, 0xCAFEDEAD); 2637 WREG32(scratch, 0xCAFEDEAD);
2638 r = radeon_ib_get(rdev, ring->idx, &ib, 256); 2638 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
2639 if (r) { 2639 if (r) {
2640 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 2640 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2641 return r; 2641 goto free_scratch;
2642 } 2642 }
2643 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 2643 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2644 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2644 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2646,15 +2646,13 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2646 ib.length_dw = 3; 2646 ib.length_dw = 3;
2647 r = radeon_ib_schedule(rdev, &ib, NULL); 2647 r = radeon_ib_schedule(rdev, &ib, NULL);
2648 if (r) { 2648 if (r) {
2649 radeon_scratch_free(rdev, scratch);
2650 radeon_ib_free(rdev, &ib);
2651 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 2649 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2652 return r; 2650 goto free_ib;
2653 } 2651 }
2654 r = radeon_fence_wait(ib.fence, false); 2652 r = radeon_fence_wait(ib.fence, false);
2655 if (r) { 2653 if (r) {
2656 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 2654 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2657 return r; 2655 goto free_ib;
2658 } 2656 }
2659 for (i = 0; i < rdev->usec_timeout; i++) { 2657 for (i = 0; i < rdev->usec_timeout; i++) {
2660 tmp = RREG32(scratch); 2658 tmp = RREG32(scratch);
@@ -2669,8 +2667,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2669 scratch, tmp); 2667 scratch, tmp);
2670 r = -EINVAL; 2668 r = -EINVAL;
2671 } 2669 }
2672 radeon_scratch_free(rdev, scratch); 2670free_ib:
2673 radeon_ib_free(rdev, &ib); 2671 radeon_ib_free(rdev, &ib);
2672free_scratch:
2673 radeon_scratch_free(rdev, scratch);
2674 return r; 2674 return r;
2675} 2675}
2676 2676
@@ -3088,10 +3088,6 @@ int r600_irq_set(struct radeon_device *rdev)
3088 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3088 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3089 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3089 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3090 } 3090 }
3091 if (rdev->irq.gui_idle) {
3092 DRM_DEBUG("gui idle\n");
3093 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3094 }
3095 3091
3096 WREG32(CP_INT_CNTL, cp_int_cntl); 3092 WREG32(CP_INT_CNTL, cp_int_cntl);
3097 WREG32(DxMODE_INT_MASK, mode_int); 3093 WREG32(DxMODE_INT_MASK, mode_int);
@@ -3475,7 +3471,6 @@ restart_ih:
3475 break; 3471 break;
3476 case 233: /* GUI IDLE */ 3472 case 233: /* GUI IDLE */
3477 DRM_DEBUG("IH: GUI idle\n"); 3473 DRM_DEBUG("IH: GUI idle\n");
3478 wake_up(&rdev->irq.idle_queue);
3479 break; 3474 break;
3480 default: 3475 default:
3481 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3476 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 3c031a48205d..661fec2a2cc1 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -489,31 +489,36 @@ set_default_state(drm_radeon_private_t *dev_priv)
489 ADVANCE_RING(); 489 ADVANCE_RING();
490} 490}
491 491
492static uint32_t i2f(uint32_t input) 492/* 23 bits of float fractional data */
493#define I2F_FRAC_BITS 23
494#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
495
496/*
497 * Converts unsigned integer into 32-bit IEEE floating point representation.
498 * Will be exact from 0 to 2^24. Above that, we round towards zero
499 * as the fractional bits will not fit in a float. (It would be better to
500 * round towards even as the fpu does, but that is slower.)
501 */
502__pure uint32_t int2float(uint32_t x)
493{ 503{
494 u32 result, i, exponent, fraction; 504 uint32_t msb, exponent, fraction;
495 505
496 if ((input & 0x3fff) == 0) 506 /* Zero is special */
497 result = 0; /* 0 is a special case */ 507 if (!x) return 0;
498 else { 508
499 exponent = 140; /* exponent biased by 127; */ 509 /* Get location of the most significant bit */
500 fraction = (input & 0x3fff) << 10; /* cheat and only 510 msb = __fls(x);
501 handle numbers below 2^^15 */
502 for (i = 0; i < 14; i++) {
503 if (fraction & 0x800000)
504 break;
505 else {
506 fraction = fraction << 1; /* keep
507 shifting left until top bit = 1 */
508 exponent = exponent - 1;
509 }
510 }
511 result = exponent << 23 | (fraction & 0x7fffff); /* mask
512 off top bit; assumed 1 */
513 }
514 return result;
515}
516 511
512 /*
513 * Use a rotate instead of a shift because that works both leftwards
514 * and rightwards due to the mod(32) behaviour. This means we don't
515 * need to check to see if we are above 2^24 or not.
516 */
517 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
518 exponent = (127 + msb) << I2F_FRAC_BITS;
519
520 return fraction + exponent;
521}
517 522
518static int r600_nomm_get_vb(struct drm_device *dev) 523static int r600_nomm_get_vb(struct drm_device *dev)
519{ 524{
@@ -632,20 +637,20 @@ r600_blit_copy(struct drm_device *dev,
632 vb = r600_nomm_get_vb_ptr(dev); 637 vb = r600_nomm_get_vb_ptr(dev);
633 } 638 }
634 639
635 vb[0] = i2f(dst_x); 640 vb[0] = int2float(dst_x);
636 vb[1] = 0; 641 vb[1] = 0;
637 vb[2] = i2f(src_x); 642 vb[2] = int2float(src_x);
638 vb[3] = 0; 643 vb[3] = 0;
639 644
640 vb[4] = i2f(dst_x); 645 vb[4] = int2float(dst_x);
641 vb[5] = i2f(h); 646 vb[5] = int2float(h);
642 vb[6] = i2f(src_x); 647 vb[6] = int2float(src_x);
643 vb[7] = i2f(h); 648 vb[7] = int2float(h);
644 649
645 vb[8] = i2f(dst_x + cur_size); 650 vb[8] = int2float(dst_x + cur_size);
646 vb[9] = i2f(h); 651 vb[9] = int2float(h);
647 vb[10] = i2f(src_x + cur_size); 652 vb[10] = int2float(src_x + cur_size);
648 vb[11] = i2f(h); 653 vb[11] = int2float(h);
649 654
650 /* src */ 655 /* src */
651 set_tex_resource(dev_priv, FMT_8, 656 set_tex_resource(dev_priv, FMT_8,
@@ -721,20 +726,20 @@ r600_blit_copy(struct drm_device *dev,
721 vb = r600_nomm_get_vb_ptr(dev); 726 vb = r600_nomm_get_vb_ptr(dev);
722 } 727 }
723 728
724 vb[0] = i2f(dst_x / 4); 729 vb[0] = int2float(dst_x / 4);
725 vb[1] = 0; 730 vb[1] = 0;
726 vb[2] = i2f(src_x / 4); 731 vb[2] = int2float(src_x / 4);
727 vb[3] = 0; 732 vb[3] = 0;
728 733
729 vb[4] = i2f(dst_x / 4); 734 vb[4] = int2float(dst_x / 4);
730 vb[5] = i2f(h); 735 vb[5] = int2float(h);
731 vb[6] = i2f(src_x / 4); 736 vb[6] = int2float(src_x / 4);
732 vb[7] = i2f(h); 737 vb[7] = int2float(h);
733 738
734 vb[8] = i2f((dst_x + cur_size) / 4); 739 vb[8] = int2float((dst_x + cur_size) / 4);
735 vb[9] = i2f(h); 740 vb[9] = int2float(h);
736 vb[10] = i2f((src_x + cur_size) / 4); 741 vb[10] = int2float((src_x + cur_size) / 4);
737 vb[11] = i2f(h); 742 vb[11] = int2float(h);
738 743
739 /* src */ 744 /* src */
740 set_tex_resource(dev_priv, FMT_8_8_8_8, 745 set_tex_resource(dev_priv, FMT_8_8_8_8,
@@ -804,20 +809,20 @@ r600_blit_swap(struct drm_device *dev,
804 dx2 = dx + w; 809 dx2 = dx + w;
805 dy2 = dy + h; 810 dy2 = dy + h;
806 811
807 vb[0] = i2f(dx); 812 vb[0] = int2float(dx);
808 vb[1] = i2f(dy); 813 vb[1] = int2float(dy);
809 vb[2] = i2f(sx); 814 vb[2] = int2float(sx);
810 vb[3] = i2f(sy); 815 vb[3] = int2float(sy);
811 816
812 vb[4] = i2f(dx); 817 vb[4] = int2float(dx);
813 vb[5] = i2f(dy2); 818 vb[5] = int2float(dy2);
814 vb[6] = i2f(sx); 819 vb[6] = int2float(sx);
815 vb[7] = i2f(sy2); 820 vb[7] = int2float(sy2);
816 821
817 vb[8] = i2f(dx2); 822 vb[8] = int2float(dx2);
818 vb[9] = i2f(dy2); 823 vb[9] = int2float(dy2);
819 vb[10] = i2f(sx2); 824 vb[10] = int2float(sx2);
820 vb[11] = i2f(sy2); 825 vb[11] = int2float(sy2);
821 826
822 switch(cpp) { 827 switch(cpp) {
823 case 4: 828 case 4:
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 2bef8549ddfe..1c7ed3a5d045 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -455,46 +455,6 @@ set_default_state(struct radeon_device *rdev)
455 radeon_ring_write(ring, sq_stack_resource_mgmt_2); 455 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
456} 456}
457 457
458#define I2F_MAX_BITS 15
459#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1)
460#define I2F_SHIFT (24 - I2F_MAX_BITS)
461
462/*
463 * Converts unsigned integer into 32-bit IEEE floating point representation.
464 * Conversion is not universal and only works for the range from 0
465 * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
466 * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
467 * I2F_MAX_BITS can be increased, but that will add to the loop iterations
468 * and slow us down. Conversion is done by shifting the input and counting
469 * down until the first 1 reaches bit position 23. The resulting counter
470 * and the shifted input are, respectively, the exponent and the fraction.
471 * The sign is always zero.
472 */
473static uint32_t i2f(uint32_t input)
474{
475 u32 result, i, exponent, fraction;
476
477 WARN_ON_ONCE(input > I2F_MAX_INPUT);
478
479 if ((input & I2F_MAX_INPUT) == 0)
480 result = 0;
481 else {
482 exponent = 126 + I2F_MAX_BITS;
483 fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
484
485 for (i = 0; i < I2F_MAX_BITS; i++) {
486 if (fraction & 0x800000)
487 break;
488 else {
489 fraction = fraction << 1;
490 exponent = exponent - 1;
491 }
492 }
493 result = exponent << 23 | (fraction & 0x7fffff);
494 }
495 return result;
496}
497
498int r600_blit_init(struct radeon_device *rdev) 458int r600_blit_init(struct radeon_device *rdev)
499{ 459{
500 u32 obj_size; 460 u32 obj_size;
@@ -766,14 +726,14 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
766 vb_cpu_addr[3] = 0; 726 vb_cpu_addr[3] = 0;
767 727
768 vb_cpu_addr[4] = 0; 728 vb_cpu_addr[4] = 0;
769 vb_cpu_addr[5] = i2f(h); 729 vb_cpu_addr[5] = int2float(h);
770 vb_cpu_addr[6] = 0; 730 vb_cpu_addr[6] = 0;
771 vb_cpu_addr[7] = i2f(h); 731 vb_cpu_addr[7] = int2float(h);
772 732
773 vb_cpu_addr[8] = i2f(w); 733 vb_cpu_addr[8] = int2float(w);
774 vb_cpu_addr[9] = i2f(h); 734 vb_cpu_addr[9] = int2float(h);
775 vb_cpu_addr[10] = i2f(w); 735 vb_cpu_addr[10] = int2float(w);
776 vb_cpu_addr[11] = i2f(h); 736 vb_cpu_addr[11] = int2float(h);
777 737
778 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, 738 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
779 w, h, w, src_gpu_addr, size_in_bytes); 739 w, h, w, src_gpu_addr, size_in_bytes);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index f437d36dd98c..2f3ce7a75976 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,4 +35,5 @@ extern const u32 r6xx_default_state[];
35extern const u32 r6xx_ps_size, r6xx_vs_size; 35extern const u32 r6xx_ps_size, r6xx_vs_size;
36extern const u32 r6xx_default_size, r7xx_default_size; 36extern const u32 r6xx_default_size, r7xx_default_size;
37 37
38__pure uint32_t int2float(uint32_t x);
38#endif 39#endif
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index f37676d7f217..853f05ced1b1 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -847,7 +847,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
847 * Assume that chunk_ib_index is properly set. Will return -EINVAL 847 * Assume that chunk_ib_index is properly set. Will return -EINVAL
848 * if packet is bigger than remaining ib size. or if packets is unknown. 848 * if packet is bigger than remaining ib size. or if packets is unknown.
849 **/ 849 **/
850int r600_cs_packet_parse(struct radeon_cs_parser *p, 850static int r600_cs_packet_parse(struct radeon_cs_parser *p,
851 struct radeon_cs_packet *pkt, 851 struct radeon_cs_packet *pkt,
852 unsigned idx) 852 unsigned idx)
853{ 853{
@@ -2180,7 +2180,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2180 } 2180 }
2181 break; 2181 break;
2182 case PACKET3_STRMOUT_BASE_UPDATE: 2182 case PACKET3_STRMOUT_BASE_UPDATE:
2183 if (p->family < CHIP_RV770) { 2183 /* RS780 and RS880 also need this */
2184 if (p->family < CHIP_RS780) {
2184 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2185 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2185 return -EINVAL; 2186 return -EINVAL;
2186 } 2187 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e3558c3ef24a..857a7d7862fb 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -53,7 +53,7 @@ enum r600_hdmi_iec_status_bits {
53 AUDIO_STATUS_LEVEL = 0x80 53 AUDIO_STATUS_LEVEL = 0x80
54}; 54};
55 55
56struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { 56static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
57 /* 32kHz 44.1kHz 48kHz */ 57 /* 32kHz 44.1kHz 48kHz */
58 /* Clock N CTS N CTS N CTS */ 58 /* Clock N CTS N CTS N CTS */
59 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 59 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 59a15315ae9f..b04c06444d8b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -123,6 +123,7 @@ extern int radeon_lockup_timeout;
123#define CAYMAN_RING_TYPE_CP2_INDEX 2 123#define CAYMAN_RING_TYPE_CP2_INDEX 2
124 124
125/* hardcode those limit for now */ 125/* hardcode those limit for now */
126#define RADEON_VA_IB_OFFSET (1 << 20)
126#define RADEON_VA_RESERVED_SIZE (8 << 20) 127#define RADEON_VA_RESERVED_SIZE (8 << 20)
127#define RADEON_IB_VM_MAX_SIZE (64 << 10) 128#define RADEON_IB_VM_MAX_SIZE (64 << 10)
128 129
@@ -253,6 +254,22 @@ static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
253 } 254 }
254} 255}
255 256
257static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
258 struct radeon_fence *b)
259{
260 if (!a) {
261 return false;
262 }
263
264 if (!b) {
265 return true;
266 }
267
268 BUG_ON(a->ring != b->ring);
269
270 return a->seq < b->seq;
271}
272
256/* 273/*
257 * Tiling registers 274 * Tiling registers
258 */ 275 */
@@ -275,18 +292,20 @@ struct radeon_mman {
275 292
276/* bo virtual address in a specific vm */ 293/* bo virtual address in a specific vm */
277struct radeon_bo_va { 294struct radeon_bo_va {
278 /* bo list is protected by bo being reserved */ 295 /* protected by bo being reserved */
279 struct list_head bo_list; 296 struct list_head bo_list;
280 /* vm list is protected by vm mutex */
281 struct list_head vm_list;
282 /* constant after initialization */
283 struct radeon_vm *vm;
284 struct radeon_bo *bo;
285 uint64_t soffset; 297 uint64_t soffset;
286 uint64_t eoffset; 298 uint64_t eoffset;
287 uint32_t flags; 299 uint32_t flags;
288 struct radeon_fence *fence;
289 bool valid; 300 bool valid;
301 unsigned ref_count;
302
303 /* protected by vm mutex */
304 struct list_head vm_list;
305
306 /* constant after initialization */
307 struct radeon_vm *vm;
308 struct radeon_bo *bo;
290}; 309};
291 310
292struct radeon_bo { 311struct radeon_bo {
@@ -566,9 +585,6 @@ struct radeon_irq {
566 atomic_t pflip[RADEON_MAX_CRTCS]; 585 atomic_t pflip[RADEON_MAX_CRTCS];
567 wait_queue_head_t vblank_queue; 586 wait_queue_head_t vblank_queue;
568 bool hpd[RADEON_MAX_HPD_PINS]; 587 bool hpd[RADEON_MAX_HPD_PINS];
569 bool gui_idle;
570 bool gui_idle_acked;
571 wait_queue_head_t idle_queue;
572 bool afmt[RADEON_MAX_AFMT_BLOCKS]; 588 bool afmt[RADEON_MAX_AFMT_BLOCKS];
573 union radeon_irq_stat_regs stat_regs; 589 union radeon_irq_stat_regs stat_regs;
574}; 590};
@@ -583,7 +599,6 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
583void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); 599void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
584void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 600void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
585void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 601void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
586int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
587 602
588/* 603/*
589 * CP & rings. 604 * CP & rings.
@@ -596,7 +611,7 @@ struct radeon_ib {
596 uint32_t *ptr; 611 uint32_t *ptr;
597 int ring; 612 int ring;
598 struct radeon_fence *fence; 613 struct radeon_fence *fence;
599 unsigned vm_id; 614 struct radeon_vm *vm;
600 bool is_const_ib; 615 bool is_const_ib;
601 struct radeon_fence *sync_to[RADEON_NUM_RINGS]; 616 struct radeon_fence *sync_to[RADEON_NUM_RINGS];
602 struct radeon_semaphore *semaphore; 617 struct radeon_semaphore *semaphore;
@@ -632,41 +647,38 @@ struct radeon_ring {
632/* 647/*
633 * VM 648 * VM
634 */ 649 */
650
651/* maximum number of VMIDs */
652#define RADEON_NUM_VM 16
653
654/* defines number of bits in page table versus page directory,
655 * a page is 4KB so we have 12 bits offset, 9 bits in the page
656 * table and the remaining 19 bits are in the page directory */
657#define RADEON_VM_BLOCK_SIZE 9
658
659/* number of entries in page table */
660#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
661
635struct radeon_vm { 662struct radeon_vm {
636 struct list_head list; 663 struct list_head list;
637 struct list_head va; 664 struct list_head va;
638 int id; 665 unsigned id;
639 unsigned last_pfn; 666 unsigned last_pfn;
640 u64 pt_gpu_addr; 667 u64 pd_gpu_addr;
641 u64 *pt;
642 struct radeon_sa_bo *sa_bo; 668 struct radeon_sa_bo *sa_bo;
643 struct mutex mutex; 669 struct mutex mutex;
644 /* last fence for cs using this vm */ 670 /* last fence for cs using this vm */
645 struct radeon_fence *fence; 671 struct radeon_fence *fence;
646}; 672 /* last flush or NULL if we still need to flush */
647 673 struct radeon_fence *last_flush;
648struct radeon_vm_funcs {
649 int (*init)(struct radeon_device *rdev);
650 void (*fini)(struct radeon_device *rdev);
651 /* cs mutex must be lock for schedule_ib */
652 int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
653 void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
654 void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
655 uint32_t (*page_flags)(struct radeon_device *rdev,
656 struct radeon_vm *vm,
657 uint32_t flags);
658 void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
659 unsigned pfn, uint64_t addr, uint32_t flags);
660}; 674};
661 675
662struct radeon_vm_manager { 676struct radeon_vm_manager {
663 struct mutex lock; 677 struct mutex lock;
664 struct list_head lru_vm; 678 struct list_head lru_vm;
665 uint32_t use_bitmap; 679 struct radeon_fence *active[RADEON_NUM_VM];
666 struct radeon_sa_manager sa_manager; 680 struct radeon_sa_manager sa_manager;
667 uint32_t max_pfn; 681 uint32_t max_pfn;
668 /* fields constant after init */
669 const struct radeon_vm_funcs *funcs;
670 /* number of VMIDs */ 682 /* number of VMIDs */
671 unsigned nvm; 683 unsigned nvm;
672 /* vram base address for page table entry */ 684 /* vram base address for page table entry */
@@ -738,7 +750,8 @@ struct si_rlc {
738}; 750};
739 751
740int radeon_ib_get(struct radeon_device *rdev, int ring, 752int radeon_ib_get(struct radeon_device *rdev, int ring,
741 struct radeon_ib *ib, unsigned size); 753 struct radeon_ib *ib, struct radeon_vm *vm,
754 unsigned size);
742void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 755void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
743int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 756int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
744 struct radeon_ib *const_ib); 757 struct radeon_ib *const_ib);
@@ -1131,6 +1144,15 @@ struct radeon_asic {
1131 void (*tlb_flush)(struct radeon_device *rdev); 1144 void (*tlb_flush)(struct radeon_device *rdev);
1132 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1145 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
1133 } gart; 1146 } gart;
1147 struct {
1148 int (*init)(struct radeon_device *rdev);
1149 void (*fini)(struct radeon_device *rdev);
1150
1151 u32 pt_ring_index;
1152 void (*set_page)(struct radeon_device *rdev, uint64_t pe,
1153 uint64_t addr, unsigned count,
1154 uint32_t incr, uint32_t flags);
1155 } vm;
1134 /* ring specific callbacks */ 1156 /* ring specific callbacks */
1135 struct { 1157 struct {
1136 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1158 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -1143,6 +1165,7 @@ struct radeon_asic {
1143 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1165 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1144 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1166 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1145 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); 1167 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1168 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1146 } ring[RADEON_NUM_RINGS]; 1169 } ring[RADEON_NUM_RINGS];
1147 /* irqs */ 1170 /* irqs */
1148 struct { 1171 struct {
@@ -1157,6 +1180,10 @@ struct radeon_asic {
1157 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 1180 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1158 /* wait for vblank */ 1181 /* wait for vblank */
1159 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); 1182 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1183 /* set backlight level */
1184 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1185 /* get backlight level */
1186 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1160 } display; 1187 } display;
1161 /* copy functions for bo handling */ 1188 /* copy functions for bo handling */
1162 struct { 1189 struct {
@@ -1428,6 +1455,56 @@ struct r600_vram_scratch {
1428 u64 gpu_addr; 1455 u64 gpu_addr;
1429}; 1456};
1430 1457
1458/*
1459 * ACPI
1460 */
1461struct radeon_atif_notification_cfg {
1462 bool enabled;
1463 int command_code;
1464};
1465
1466struct radeon_atif_notifications {
1467 bool display_switch;
1468 bool expansion_mode_change;
1469 bool thermal_state;
1470 bool forced_power_state;
1471 bool system_power_state;
1472 bool display_conf_change;
1473 bool px_gfx_switch;
1474 bool brightness_change;
1475 bool dgpu_display_event;
1476};
1477
1478struct radeon_atif_functions {
1479 bool system_params;
1480 bool sbios_requests;
1481 bool select_active_disp;
1482 bool lid_state;
1483 bool get_tv_standard;
1484 bool set_tv_standard;
1485 bool get_panel_expansion_mode;
1486 bool set_panel_expansion_mode;
1487 bool temperature_change;
1488 bool graphics_device_types;
1489};
1490
1491struct radeon_atif {
1492 struct radeon_atif_notifications notifications;
1493 struct radeon_atif_functions functions;
1494 struct radeon_atif_notification_cfg notification_cfg;
1495 struct radeon_encoder *encoder_for_bl;
1496};
1497
1498struct radeon_atcs_functions {
1499 bool get_ext_state;
1500 bool pcie_perf_req;
1501 bool pcie_dev_rdy;
1502 bool pcie_bus_width;
1503};
1504
1505struct radeon_atcs {
1506 struct radeon_atcs_functions functions;
1507};
1431 1508
1432/* 1509/*
1433 * Core structure, functions and helpers. 1510 * Core structure, functions and helpers.
@@ -1520,6 +1597,9 @@ struct radeon_device {
1520 /* virtual memory */ 1597 /* virtual memory */
1521 struct radeon_vm_manager vm_manager; 1598 struct radeon_vm_manager vm_manager;
1522 struct mutex gpu_clock_mutex; 1599 struct mutex gpu_clock_mutex;
1600 /* ACPI interface */
1601 struct radeon_atif atif;
1602 struct radeon_atcs atcs;
1523}; 1603};
1524 1604
1525int radeon_device_init(struct radeon_device *rdev, 1605int radeon_device_init(struct radeon_device *rdev,
@@ -1683,15 +1763,21 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1683#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1763#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1684#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 1764#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1685#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 1765#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
1766#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
1767#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
1768#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
1686#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) 1769#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1687#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) 1770#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1688#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 1771#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
1689#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 1772#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
1690#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 1773#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1691#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 1774#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
1775#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
1692#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 1776#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1693#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 1777#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1694#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1778#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
1779#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
1780#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
1695#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 1781#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1696#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 1782#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
1697#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 1783#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1759,22 +1845,30 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
1759void radeon_vm_manager_fini(struct radeon_device *rdev); 1845void radeon_vm_manager_fini(struct radeon_device *rdev);
1760int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1846int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1761void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1847void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1762int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm); 1848int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
1763void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); 1849struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
1850 struct radeon_vm *vm, int ring);
1851void radeon_vm_fence(struct radeon_device *rdev,
1852 struct radeon_vm *vm,
1853 struct radeon_fence *fence);
1854uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
1764int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1855int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1765 struct radeon_vm *vm, 1856 struct radeon_vm *vm,
1766 struct radeon_bo *bo, 1857 struct radeon_bo *bo,
1767 struct ttm_mem_reg *mem); 1858 struct ttm_mem_reg *mem);
1768void radeon_vm_bo_invalidate(struct radeon_device *rdev, 1859void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1769 struct radeon_bo *bo); 1860 struct radeon_bo *bo);
1770int radeon_vm_bo_add(struct radeon_device *rdev, 1861struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
1771 struct radeon_vm *vm, 1862 struct radeon_bo *bo);
1772 struct radeon_bo *bo, 1863struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
1773 uint64_t offset, 1864 struct radeon_vm *vm,
1774 uint32_t flags); 1865 struct radeon_bo *bo);
1866int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1867 struct radeon_bo_va *bo_va,
1868 uint64_t offset,
1869 uint32_t flags);
1775int radeon_vm_bo_rmv(struct radeon_device *rdev, 1870int radeon_vm_bo_rmv(struct radeon_device *rdev,
1776 struct radeon_vm *vm, 1871 struct radeon_bo_va *bo_va);
1777 struct radeon_bo *bo);
1778 1872
1779/* audio */ 1873/* audio */
1780void r600_audio_update_hdmi(struct work_struct *work); 1874void r600_audio_update_hdmi(struct work_struct *work);
@@ -1832,12 +1926,14 @@ extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_displ
1832extern int ni_init_microcode(struct radeon_device *rdev); 1926extern int ni_init_microcode(struct radeon_device *rdev);
1833extern int ni_mc_load_microcode(struct radeon_device *rdev); 1927extern int ni_mc_load_microcode(struct radeon_device *rdev);
1834 1928
1835/* radeon_acpi.c */ 1929/* radeon_acpi.c */
1836#if defined(CONFIG_ACPI) 1930#if defined(CONFIG_ACPI)
1837extern int radeon_acpi_init(struct radeon_device *rdev); 1931extern int radeon_acpi_init(struct radeon_device *rdev);
1838#else 1932extern void radeon_acpi_fini(struct radeon_device *rdev);
1839static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 1933#else
1840#endif 1934static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1935static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
1936#endif
1841 1937
1842#include "radeon_object.h" 1938#include "radeon_object.h"
1843 1939
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3516a6081dcf..c3976eb341bf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -1,35 +1,120 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
1#include <linux/pci.h> 24#include <linux/pci.h>
2#include <linux/acpi.h> 25#include <linux/acpi.h>
3#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/power_supply.h>
4#include <acpi/acpi_drivers.h> 28#include <acpi/acpi_drivers.h>
5#include <acpi/acpi_bus.h> 29#include <acpi/acpi_bus.h>
30#include <acpi/video.h>
6 31
7#include "drmP.h" 32#include "drmP.h"
8#include "drm.h" 33#include "drm.h"
9#include "drm_sarea.h" 34#include "drm_sarea.h"
10#include "drm_crtc_helper.h" 35#include "drm_crtc_helper.h"
11#include "radeon.h" 36#include "radeon.h"
37#include "radeon_acpi.h"
38#include "atom.h"
12 39
13#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
14 41
42#define ACPI_AC_CLASS "ac_adapter"
43
44extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
45
46struct atif_verify_interface {
47 u16 size; /* structure size in bytes (includes size field) */
48 u16 version; /* version */
49 u32 notification_mask; /* supported notifications mask */
50 u32 function_bits; /* supported functions bit vector */
51} __packed;
52
53struct atif_system_params {
54 u16 size; /* structure size in bytes (includes size field) */
55 u32 valid_mask; /* valid flags mask */
56 u32 flags; /* flags */
57 u8 command_code; /* notify command code */
58} __packed;
59
60struct atif_sbios_requests {
61 u16 size; /* structure size in bytes (includes size field) */
62 u32 pending; /* pending sbios requests */
63 u8 panel_exp_mode; /* panel expansion mode */
64 u8 thermal_gfx; /* thermal state: target gfx controller */
65 u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
66 u8 forced_power_gfx; /* forced power state: target gfx controller */
67 u8 forced_power_state; /* forced power state: state id */
68 u8 system_power_src; /* system power source */
69 u8 backlight_level; /* panel backlight level (0-255) */
70} __packed;
71
72#define ATIF_NOTIFY_MASK 0x3
73#define ATIF_NOTIFY_NONE 0
74#define ATIF_NOTIFY_81 1
75#define ATIF_NOTIFY_N 2
76
77struct atcs_verify_interface {
78 u16 size; /* structure size in bytes (includes size field) */
79 u16 version; /* version */
80 u32 function_bits; /* supported functions bit vector */
81} __packed;
82
15/* Call the ATIF method 83/* Call the ATIF method
84 */
85/**
86 * radeon_atif_call - call an ATIF method
16 * 87 *
17 * Note: currently we discard the output 88 * @handle: acpi handle
89 * @function: the ATIF function to execute
90 * @params: ATIF function params
91 *
92 * Executes the requested ATIF function (all asics).
93 * Returns a pointer to the acpi output buffer.
18 */ 94 */
19static int radeon_atif_call(acpi_handle handle) 95static union acpi_object *radeon_atif_call(acpi_handle handle, int function,
96 struct acpi_buffer *params)
20{ 97{
21 acpi_status status; 98 acpi_status status;
22 union acpi_object atif_arg_elements[2]; 99 union acpi_object atif_arg_elements[2];
23 struct acpi_object_list atif_arg; 100 struct acpi_object_list atif_arg;
24 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; 101 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
25 102
26 atif_arg.count = 2; 103 atif_arg.count = 2;
27 atif_arg.pointer = &atif_arg_elements[0]; 104 atif_arg.pointer = &atif_arg_elements[0];
28 105
29 atif_arg_elements[0].type = ACPI_TYPE_INTEGER; 106 atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
30 atif_arg_elements[0].integer.value = 0; 107 atif_arg_elements[0].integer.value = function;
31 atif_arg_elements[1].type = ACPI_TYPE_INTEGER; 108
32 atif_arg_elements[1].integer.value = 0; 109 if (params) {
110 atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
111 atif_arg_elements[1].buffer.length = params->length;
112 atif_arg_elements[1].buffer.pointer = params->pointer;
113 } else {
114 /* We need a second fake parameter */
115 atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
116 atif_arg_elements[1].integer.value = 0;
117 }
33 118
34 status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); 119 status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
35 120
@@ -38,17 +123,434 @@ static int radeon_atif_call(acpi_handle handle)
38 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", 123 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
39 acpi_format_exception(status)); 124 acpi_format_exception(status));
40 kfree(buffer.pointer); 125 kfree(buffer.pointer);
41 return 1; 126 return NULL;
42 } 127 }
43 128
44 kfree(buffer.pointer); 129 return buffer.pointer;
45 return 0; 130}
131
132/**
133 * radeon_atif_parse_notification - parse supported notifications
134 *
135 * @n: supported notifications struct
136 * @mask: supported notifications mask from ATIF
137 *
138 * Use the supported notifications mask from ATIF function
139 * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
140 * are supported (all asics).
141 */
142static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
143{
144 n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
145 n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
146 n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
147 n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
148 n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
149 n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
150 n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
151 n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
152 n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
153}
154
155/**
156 * radeon_atif_parse_functions - parse supported functions
157 *
158 * @f: supported functions struct
159 * @mask: supported functions mask from ATIF
160 *
161 * Use the supported functions mask from ATIF function
162 * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
163 * are supported (all asics).
164 */
165static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
166{
167 f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
168 f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
169 f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
170 f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
171 f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
172 f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
173 f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
174 f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
175 f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
176 f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
177}
178
179/**
180 * radeon_atif_verify_interface - verify ATIF
181 *
182 * @handle: acpi handle
183 * @atif: radeon atif struct
184 *
185 * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
186 * to initialize ATIF and determine what features are supported
187 * (all asics).
188 * returns 0 on success, error on failure.
189 */
190static int radeon_atif_verify_interface(acpi_handle handle,
191 struct radeon_atif *atif)
192{
193 union acpi_object *info;
194 struct atif_verify_interface output;
195 size_t size;
196 int err = 0;
197
198 info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
199 if (!info)
200 return -EIO;
201
202 memset(&output, 0, sizeof(output));
203
204 size = *(u16 *) info->buffer.pointer;
205 if (size < 12) {
206 DRM_INFO("ATIF buffer is too small: %lu\n", size);
207 err = -EINVAL;
208 goto out;
209 }
210 size = min(sizeof(output), size);
211
212 memcpy(&output, info->buffer.pointer, size);
213
214 /* TODO: check version? */
215 DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
216
217 radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
218 radeon_atif_parse_functions(&atif->functions, output.function_bits);
219
220out:
221 kfree(info);
222 return err;
223}
224
225/**
226 * radeon_atif_get_notification_params - determine notify configuration
227 *
228 * @handle: acpi handle
229 * @n: atif notification configuration struct
230 *
231 * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
232 * to determine if a notifier is used and if so which one
233 * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n)
234 * where n is specified in the result if a notifier is used.
235 * Returns 0 on success, error on failure.
236 */
237static int radeon_atif_get_notification_params(acpi_handle handle,
238 struct radeon_atif_notification_cfg *n)
239{
240 union acpi_object *info;
241 struct atif_system_params params;
242 size_t size;
243 int err = 0;
244
245 info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
246 if (!info) {
247 err = -EIO;
248 goto out;
249 }
250
251 size = *(u16 *) info->buffer.pointer;
252 if (size < 10) {
253 err = -EINVAL;
254 goto out;
255 }
256
257 memset(&params, 0, sizeof(params));
258 size = min(sizeof(params), size);
259 memcpy(&params, info->buffer.pointer, size);
260
261 DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
262 params.flags, params.valid_mask);
263 params.flags = params.flags & params.valid_mask;
264
265 if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
266 n->enabled = false;
267 n->command_code = 0;
268 } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
269 n->enabled = true;
270 n->command_code = 0x81;
271 } else {
272 if (size < 11) {
273 err = -EINVAL;
274 goto out;
275 }
276 n->enabled = true;
277 n->command_code = params.command_code;
278 }
279
280out:
281 DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
282 (n->enabled ? "enabled" : "disabled"),
283 n->command_code);
284 kfree(info);
285 return err;
286}
287
288/**
289 * radeon_atif_get_sbios_requests - get requested sbios event
290 *
291 * @handle: acpi handle
292 * @req: atif sbios request struct
293 *
294 * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
295 * to determine what requests the sbios is making to the driver
296 * (all asics).
297 * Returns 0 on success, error on failure.
298 */
299static int radeon_atif_get_sbios_requests(acpi_handle handle,
300 struct atif_sbios_requests *req)
301{
302 union acpi_object *info;
303 size_t size;
304 int count = 0;
305
306 info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
307 if (!info)
308 return -EIO;
309
310 size = *(u16 *)info->buffer.pointer;
311 if (size < 0xd) {
312 count = -EINVAL;
313 goto out;
314 }
315 memset(req, 0, sizeof(*req));
316
317 size = min(sizeof(*req), size);
318 memcpy(req, info->buffer.pointer, size);
319 DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
320
321 count = hweight32(req->pending);
322
323out:
324 kfree(info);
325 return count;
326}
327
328/**
329 * radeon_atif_handler - handle ATIF notify requests
330 *
331 * @rdev: radeon_device pointer
332 * @event: atif sbios request struct
333 *
334 * Checks the acpi event and if it matches an atif event,
335 * handles it.
336 * Returns NOTIFY code
337 */
338int radeon_atif_handler(struct radeon_device *rdev,
339 struct acpi_bus_event *event)
340{
341 struct radeon_atif *atif = &rdev->atif;
342 struct atif_sbios_requests req;
343 acpi_handle handle;
344 int count;
345
346 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
347 event->device_class, event->type);
348
349 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
350 return NOTIFY_DONE;
351
352 if (!atif->notification_cfg.enabled ||
353 event->type != atif->notification_cfg.command_code)
354 /* Not our event */
355 return NOTIFY_DONE;
356
357 /* Check pending SBIOS requests */
358 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
359 count = radeon_atif_get_sbios_requests(handle, &req);
360
361 if (count <= 0)
362 return NOTIFY_DONE;
363
364 DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
365
366 if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
367 struct radeon_encoder *enc = atif->encoder_for_bl;
368
369 if (enc) {
370 DRM_DEBUG_DRIVER("Changing brightness to %d\n",
371 req.backlight_level);
372
373 radeon_set_backlight_level(rdev, enc, req.backlight_level);
374
375 if (rdev->is_atom_bios) {
376 struct radeon_encoder_atom_dig *dig = enc->enc_priv;
377 backlight_force_update(dig->bl_dev,
378 BACKLIGHT_UPDATE_HOTKEY);
379 } else {
380 struct radeon_encoder_lvds *dig = enc->enc_priv;
381 backlight_force_update(dig->bl_dev,
382 BACKLIGHT_UPDATE_HOTKEY);
383 }
384 }
385 }
386 /* TODO: check other events */
387
388 /* We've handled the event, stop the notifier chain. The ACPI interface
389 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
390 * userspace if the event was generated only to signal a SBIOS
391 * request.
392 */
393 return NOTIFY_BAD;
394}
395
396/* Call the ATCS method
397 */
398/**
399 * radeon_atcs_call - call an ATCS method
400 *
401 * @handle: acpi handle
402 * @function: the ATCS function to execute
403 * @params: ATCS function params
404 *
405 * Executes the requested ATCS function (all asics).
406 * Returns a pointer to the acpi output buffer.
407 */
408static union acpi_object *radeon_atcs_call(acpi_handle handle, int function,
409 struct acpi_buffer *params)
410{
411 acpi_status status;
412 union acpi_object atcs_arg_elements[2];
413 struct acpi_object_list atcs_arg;
414 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
415
416 atcs_arg.count = 2;
417 atcs_arg.pointer = &atcs_arg_elements[0];
418
419 atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
420 atcs_arg_elements[0].integer.value = function;
421
422 if (params) {
423 atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
424 atcs_arg_elements[1].buffer.length = params->length;
425 atcs_arg_elements[1].buffer.pointer = params->pointer;
426 } else {
427 /* We need a second fake parameter */
428 atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
429 atcs_arg_elements[1].integer.value = 0;
430 }
431
432 status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
433
434 /* Fail only if calling the method fails and ATIF is supported */
435 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
436 DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
437 acpi_format_exception(status));
438 kfree(buffer.pointer);
439 return NULL;
440 }
441
442 return buffer.pointer;
443}
444
445/**
446 * radeon_atcs_parse_functions - parse supported functions
447 *
448 * @f: supported functions struct
449 * @mask: supported functions mask from ATCS
450 *
451 * Use the supported functions mask from ATCS function
452 * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
453 * are supported (all asics).
454 */
455static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
456{
457 f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
458 f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
459 f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
460 f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
461}
462
463/**
464 * radeon_atcs_verify_interface - verify ATCS
465 *
466 * @handle: acpi handle
467 * @atcs: radeon atcs struct
468 *
469 * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
470 * to initialize ATCS and determine what features are supported
471 * (all asics).
472 * returns 0 on success, error on failure.
473 */
474static int radeon_atcs_verify_interface(acpi_handle handle,
475 struct radeon_atcs *atcs)
476{
477 union acpi_object *info;
478 struct atcs_verify_interface output;
479 size_t size;
480 int err = 0;
481
482 info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
483 if (!info)
484 return -EIO;
485
486 memset(&output, 0, sizeof(output));
487
488 size = *(u16 *) info->buffer.pointer;
489 if (size < 8) {
490 DRM_INFO("ATCS buffer is too small: %lu\n", size);
491 err = -EINVAL;
492 goto out;
493 }
494 size = min(sizeof(output), size);
495
496 memcpy(&output, info->buffer.pointer, size);
497
498 /* TODO: check version? */
499 DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
500
501 radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
502
503out:
504 kfree(info);
505 return err;
506}
507
508/**
509 * radeon_acpi_event - handle notify events
510 *
511 * @nb: notifier block
512 * @val: val
513 * @data: acpi event
514 *
515 * Calls relevant radeon functions in response to various
516 * acpi events.
517 * Returns NOTIFY code
518 */
519static int radeon_acpi_event(struct notifier_block *nb,
520 unsigned long val,
521 void *data)
522{
523 struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
524 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
525
526 if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
527 if (power_supply_is_system_supplied() > 0)
528 DRM_DEBUG_DRIVER("pm: AC\n");
529 else
530 DRM_DEBUG_DRIVER("pm: DC\n");
531
532 radeon_pm_acpi_event_handler(rdev);
533 }
534
535 /* Check for pending SBIOS requests */
536 return radeon_atif_handler(rdev, entry);
46} 537}
47 538
48/* Call all ACPI methods here */ 539/* Call all ACPI methods here */
540/**
541 * radeon_acpi_init - init driver acpi support
542 *
543 * @rdev: radeon_device pointer
544 *
545 * Verifies the AMD ACPI interfaces and registers with the acpi
546 * notifier chain (all asics).
547 * Returns 0 on success, error on failure.
548 */
49int radeon_acpi_init(struct radeon_device *rdev) 549int radeon_acpi_init(struct radeon_device *rdev)
50{ 550{
51 acpi_handle handle; 551 acpi_handle handle;
552 struct radeon_atif *atif = &rdev->atif;
553 struct radeon_atcs *atcs = &rdev->atcs;
52 int ret; 554 int ret;
53 555
54 /* Get the device handle */ 556 /* Get the device handle */
@@ -58,11 +560,90 @@ int radeon_acpi_init(struct radeon_device *rdev)
58 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) 560 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
59 return 0; 561 return 0;
60 562
563 /* Call the ATCS method */
564 ret = radeon_atcs_verify_interface(handle, atcs);
565 if (ret) {
566 DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
567 }
568
61 /* Call the ATIF method */ 569 /* Call the ATIF method */
62 ret = radeon_atif_call(handle); 570 ret = radeon_atif_verify_interface(handle, atif);
63 if (ret) 571 if (ret) {
64 return ret; 572 DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
573 goto out;
574 }
575
576 if (atif->notifications.brightness_change) {
577 struct drm_encoder *tmp;
578 struct radeon_encoder *target = NULL;
579
580 /* Find the encoder controlling the brightness */
581 list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
582 head) {
583 struct radeon_encoder *enc = to_radeon_encoder(tmp);
584
585 if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
586 enc->enc_priv) {
587 if (rdev->is_atom_bios) {
588 struct radeon_encoder_atom_dig *dig = enc->enc_priv;
589 if (dig->bl_dev) {
590 target = enc;
591 break;
592 }
593 } else {
594 struct radeon_encoder_lvds *dig = enc->enc_priv;
595 if (dig->bl_dev) {
596 target = enc;
597 break;
598 }
599 }
600 }
601 }
602
603 atif->encoder_for_bl = target;
604 if (!target) {
605 /* Brightness change notification is enabled, but we
606 * didn't find a backlight controller, this should
607 * never happen.
608 */
609 DRM_ERROR("Cannot find a backlight controller\n");
610 }
611 }
65 612
66 return 0; 613 if (atif->functions.sbios_requests && !atif->functions.system_params) {
614 /* XXX check this workraround, if sbios request function is
615 * present we have to see how it's configured in the system
616 * params
617 */
618 atif->functions.system_params = true;
619 }
620
621 if (atif->functions.system_params) {
622 ret = radeon_atif_get_notification_params(handle,
623 &atif->notification_cfg);
624 if (ret) {
625 DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
626 ret);
627 /* Disable notification */
628 atif->notification_cfg.enabled = false;
629 }
630 }
631
632out:
633 rdev->acpi_nb.notifier_call = radeon_acpi_event;
634 register_acpi_notifier(&rdev->acpi_nb);
635
636 return ret;
67} 637}
68 638
639/**
640 * radeon_acpi_fini - tear down driver acpi support
641 *
642 * @rdev: radeon_device pointer
643 *
644 * Unregisters with the acpi notifier chain (all asics).
645 */
646void radeon_acpi_fini(struct radeon_device *rdev)
647{
648 unregister_acpi_notifier(&rdev->acpi_nb);
649}
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
new file mode 100644
index 000000000000..be4af76f213d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -0,0 +1,445 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef RADEON_ACPI_H
25#define RADEON_ACPI_H
26
27struct radeon_device;
28struct acpi_bus_event;
29
30int radeon_atif_handler(struct radeon_device *rdev,
31 struct acpi_bus_event *event);
32
33/* AMD hw uses four ACPI control methods:
34 * 1. ATIF
35 * ARG0: (ACPI_INTEGER) function code
36 * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
37 * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
38 * ATIF provides an entry point for the gfx driver to interact with the sbios.
39 * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
40 * notification. Which notification is used as indicated by the ATIF Control
41 * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
42 * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
43 * to identify pending System BIOS requests and associated parameters. For
44 * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
45 * will perform display device detection and invoke ATIF Control Method
46 * SELECT_ACTIVE_DISPLAYS.
47 *
48 * 2. ATPX
49 * ARG0: (ACPI_INTEGER) function code
50 * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
51 * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
52 * ATPX methods are used on PowerXpress systems to handle mux switching and
53 * discrete GPU power control.
54 *
55 * 3. ATRM
56 * ARG0: (ACPI_INTEGER) offset of vbios rom data
57 * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
58 * OUTPUT: (ACPI_BUFFER) output buffer
59 * ATRM provides an interfacess to access the discrete GPU vbios image on
60 * PowerXpress systems with multiple GPUs.
61 *
62 * 4. ATCS
63 * ARG0: (ACPI_INTEGER) function code
64 * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
65 * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
66 * ATCS provides an interface to AMD chipset specific functionality.
67 *
68 */
69/* ATIF */
70#define ATIF_FUNCTION_VERIFY_INTERFACE 0x0
71/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
72 * ARG1: none
73 * OUTPUT:
74 * WORD - structure size in bytes (includes size field)
75 * WORD - version
76 * DWORD - supported notifications mask
77 * DWORD - supported functions bit vector
78 */
79/* Notifications mask */
80# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
81# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
82# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
83# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
84# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
85# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
86# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
87# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
88# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
89/* supported functions vector */
90# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
91# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
92# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
93# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
94# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
95# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
96# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
97# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
98# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
99# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
100#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
101/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
102 * ARG1: none
103 * OUTPUT:
104 * WORD - structure size in bytes (includes size field)
105 * DWORD - valid flags mask
106 * DWORD - flags
107 *
108 * OR
109 *
110 * WORD - structure size in bytes (includes size field)
111 * DWORD - valid flags mask
112 * DWORD - flags
113 * BYTE - notify command code
114 *
115 * flags
116 * bits 1:0:
117 * 0 - Notify(VGA, 0x81) is not used for notification
118 * 1 - Notify(VGA, 0x81) is used for notification
119 * 2 - Notify(VGA, n) is used for notification where
120 * n (0xd0-0xd9) is specified in notify command code.
121 * bit 2:
122 * 1 - lid changes not reported though int10
123 */
124#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
125/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
126 * ARG1: none
127 * OUTPUT:
128 * WORD - structure size in bytes (includes size field)
129 * DWORD - pending sbios requests
130 * BYTE - panel expansion mode
131 * BYTE - thermal state: target gfx controller
132 * BYTE - thermal state: state id (0: exit state, non-0: state)
133 * BYTE - forced power state: target gfx controller
134 * BYTE - forced power state: state id
135 * BYTE - system power source
136 * BYTE - panel backlight level (0-255)
137 */
138/* pending sbios requests */
139# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
140# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
141# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
142# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
143# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
144# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
145# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
146# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
147# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
148/* panel expansion mode */
149# define ATIF_PANEL_EXPANSION_DISABLE 0
150# define ATIF_PANEL_EXPANSION_FULL 1
151# define ATIF_PANEL_EXPANSION_ASPECT 2
152/* target gfx controller */
153# define ATIF_TARGET_GFX_SINGLE 0
154# define ATIF_TARGET_GFX_PX_IGPU 1
155# define ATIF_TARGET_GFX_PX_DGPU 2
156/* system power source */
157# define ATIF_POWER_SOURCE_AC 1
158# define ATIF_POWER_SOURCE_DC 2
159# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
160# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
161#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
162/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
163 * ARG1:
164 * WORD - structure size in bytes (includes size field)
165 * WORD - selected displays
166 * WORD - connected displays
167 * OUTPUT:
168 * WORD - structure size in bytes (includes size field)
169 * WORD - selected displays
170 */
171# define ATIF_LCD1 (1 << 0)
172# define ATIF_CRT1 (1 << 1)
173# define ATIF_TV (1 << 2)
174# define ATIF_DFP1 (1 << 3)
175# define ATIF_CRT2 (1 << 4)
176# define ATIF_LCD2 (1 << 5)
177# define ATIF_DFP2 (1 << 7)
178# define ATIF_CV (1 << 8)
179# define ATIF_DFP3 (1 << 9)
180# define ATIF_DFP4 (1 << 10)
181# define ATIF_DFP5 (1 << 11)
182# define ATIF_DFP6 (1 << 12)
183#define ATIF_FUNCTION_GET_LID_STATE 0x4
184/* ARG0: ATIF_FUNCTION_GET_LID_STATE
185 * ARG1: none
186 * OUTPUT:
187 * WORD - structure size in bytes (includes size field)
188 * BYTE - lid state (0: open, 1: closed)
189 *
190 * GET_LID_STATE only works at boot and resume, for general lid
191 * status, use the kernel provided status
192 */
193#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
194/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
195 * ARG1: none
196 * OUTPUT:
197 * WORD - structure size in bytes (includes size field)
198 * BYTE - 0
199 * BYTE - TV standard
200 */
201# define ATIF_TV_STD_NTSC 0
202# define ATIF_TV_STD_PAL 1
203# define ATIF_TV_STD_PALM 2
204# define ATIF_TV_STD_PAL60 3
205# define ATIF_TV_STD_NTSCJ 4
206# define ATIF_TV_STD_PALCN 5
207# define ATIF_TV_STD_PALN 6
208# define ATIF_TV_STD_SCART_RGB 9
209#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
210/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
211 * ARG1:
212 * WORD - structure size in bytes (includes size field)
213 * BYTE - 0
214 * BYTE - TV standard
215 * OUTPUT: none
216 */
217#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
218/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
219 * ARG1: none
220 * OUTPUT:
221 * WORD - structure size in bytes (includes size field)
222 * BYTE - panel expansion mode
223 */
224#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
225/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
226 * ARG1:
227 * WORD - structure size in bytes (includes size field)
228 * BYTE - panel expansion mode
229 * OUTPUT: none
230 */
231#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
232/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
233 * ARG1:
234 * WORD - structure size in bytes (includes size field)
235 * WORD - gfx controller id
236 * BYTE - current temperature (degress Celsius)
237 * OUTPUT: none
238 */
239#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
240/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
241 * ARG1: none
242 * OUTPUT:
243 * WORD - number of gfx devices
244 * WORD - device structure size in bytes (excludes device size field)
245 * DWORD - flags \
246 * WORD - bus number } repeated structure
247 * WORD - device number /
248 */
249/* flags */
250# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
251# define ATIF_XGP_PORT (1 << 1)
252# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
253# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
254
255/* ATPX */
256#define ATPX_FUNCTION_VERIFY_INTERFACE 0x0
257/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
258 * ARG1: none
259 * OUTPUT:
260 * WORD - structure size in bytes (includes size field)
261 * WORD - version
262 * DWORD - supported functions bit vector
263 */
264/* supported functions vector */
265# define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0)
266# define ATPX_POWER_CONTROL_SUPPORTED (1 << 1)
267# define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2)
268# define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3)
269# define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
270# define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5)
271# define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7)
272# define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8)
273#define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1
274/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
275 * ARG1: none
276 * OUTPUT:
277 * WORD - structure size in bytes (includes size field)
278 * DWORD - valid flags mask
279 * DWORD - flags
280 */
281/* flags */
282# define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0)
283# define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1)
284# define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2)
285# define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3)
286# define ATPX_TV_SIGNAL_MUXED (1 << 4)
287# define ATPX_DFP_SIGNAL_MUXED (1 << 5)
288# define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6)
289# define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7)
290# define ATPX_ACF_NOT_SUPPORTED (1 << 8)
291# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
292# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
293# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
294#define ATPX_FUNCTION_POWER_CONTROL 0x2
295/* ARG0: ATPX_FUNCTION_POWER_CONTROL
296 * ARG1:
297 * WORD - structure size in bytes (includes size field)
298 * BYTE - dGPU power state (0: power off, 1: power on)
299 * OUTPUT: none
300 */
301#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3
302/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
303 * ARG1:
304 * WORD - structure size in bytes (includes size field)
305 * WORD - display mux control (0: iGPU, 1: dGPU)
306 * OUTPUT: none
307 */
308# define ATPX_INTEGRATED_GPU 0
309# define ATPX_DISCRETE_GPU 1
310#define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4
311/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
312 * ARG1:
313 * WORD - structure size in bytes (includes size field)
314 * WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
315 * OUTPUT: none
316 */
317#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5
318/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
319 * ARG1:
320 * WORD - structure size in bytes (includes size field)
321 * WORD - target gpu (0: iGPU, 1: dGPU)
322 * OUTPUT: none
323 */
324#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6
325/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
326 * ARG1:
327 * WORD - structure size in bytes (includes size field)
328 * WORD - target gpu (0: iGPU, 1: dGPU)
329 * OUTPUT: none
330 */
331#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8
332/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
333 * ARG1: none
334 * OUTPUT:
335 * WORD - number of display connectors
336 * WORD - connector structure size in bytes (excludes connector size field)
337 * BYTE - flags \
338 * BYTE - ATIF display vector bit position } repeated
339 * BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
340 * WORD - connector ACPI id /
341 */
342/* flags */
343# define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0)
344# define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1)
345# define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2)
346#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9
347/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
348 * ARG1: none
349 * OUTPUT:
350 * WORD - number of HPD/DDC ports
351 * WORD - port structure size in bytes (excludes port size field)
352 * BYTE - ATIF display vector bit position \
353 * BYTE - hpd id } reapeated structure
354 * BYTE - ddc id /
355 *
356 * available on A+A systems only
357 */
358/* hpd id */
359# define ATPX_HPD_NONE 0
360# define ATPX_HPD1 1
361# define ATPX_HPD2 2
362# define ATPX_HPD3 3
363# define ATPX_HPD4 4
364# define ATPX_HPD5 5
365# define ATPX_HPD6 6
366/* ddc id */
367# define ATPX_DDC_NONE 0
368# define ATPX_DDC1 1
369# define ATPX_DDC2 2
370# define ATPX_DDC3 3
371# define ATPX_DDC4 4
372# define ATPX_DDC5 5
373# define ATPX_DDC6 6
374# define ATPX_DDC7 7
375# define ATPX_DDC8 8
376
377/* ATCS */
378#define ATCS_FUNCTION_VERIFY_INTERFACE 0x0
379/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
380 * ARG1: none
381 * OUTPUT:
382 * WORD - structure size in bytes (includes size field)
383 * WORD - version
384 * DWORD - supported functions bit vector
385 */
386/* supported functions vector */
387# define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0)
388# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1)
389# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2)
390# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3)
391#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1
392/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
393 * ARG1: none
394 * OUTPUT:
395 * WORD - structure size in bytes (includes size field)
396 * DWORD - valid flags mask
397 * DWORD - flags (0: undocked, 1: docked)
398 */
399/* flags */
400# define ATCS_DOCKED (1 << 0)
401#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2
402/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
403 * ARG1:
404 * WORD - structure size in bytes (includes size field)
405 * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
406 * WORD - valid flags mask
407 * WORD - flags
408 * BYTE - request type
409 * BYTE - performance request
410 * OUTPUT:
411 * WORD - structure size in bytes (includes size field)
412 * BYTE - return value
413 */
414/* flags */
415# define ATCS_ADVERTISE_CAPS (1 << 0)
416# define ATCS_WAIT_FOR_COMPLETION (1 << 1)
417/* request type */
418# define ATCS_PCIE_LINK_SPEED 1
419/* performance request */
420# define ATCS_REMOVE 0
421# define ATCS_FORCE_LOW_POWER 1
422# define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */
423# define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */
424# define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */
425/* return value */
426# define ATCS_REQUEST_REFUSED 1
427# define ATCS_REQUEST_COMPLETE 2
428# define ATCS_REQUEST_IN_PROGRESS 3
429#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3
430/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
431 * ARG1: none
432 * OUTPUT: none
433 */
434#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4
435/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
436 * ARG1:
437 * WORD - structure size in bytes (includes size field)
438 * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
439 * BYTE - number of active lanes
440 * OUTPUT:
441 * WORD - structure size in bytes (includes size field)
442 * BYTE - number of active lanes
443 */
444
445#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 973417c4b014..654520b95ab7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -198,6 +198,8 @@ static struct radeon_asic r100_asic = {
198 .bandwidth_update = &r100_bandwidth_update, 198 .bandwidth_update = &r100_bandwidth_update,
199 .get_vblank_counter = &r100_get_vblank_counter, 199 .get_vblank_counter = &r100_get_vblank_counter,
200 .wait_for_vblank = &r100_wait_for_vblank, 200 .wait_for_vblank = &r100_wait_for_vblank,
201 .set_backlight_level = &radeon_legacy_set_backlight_level,
202 .get_backlight_level = &radeon_legacy_get_backlight_level,
201 }, 203 },
202 .copy = { 204 .copy = {
203 .blit = &r100_copy_blit, 205 .blit = &r100_copy_blit,
@@ -272,6 +274,8 @@ static struct radeon_asic r200_asic = {
272 .bandwidth_update = &r100_bandwidth_update, 274 .bandwidth_update = &r100_bandwidth_update,
273 .get_vblank_counter = &r100_get_vblank_counter, 275 .get_vblank_counter = &r100_get_vblank_counter,
274 .wait_for_vblank = &r100_wait_for_vblank, 276 .wait_for_vblank = &r100_wait_for_vblank,
277 .set_backlight_level = &radeon_legacy_set_backlight_level,
278 .get_backlight_level = &radeon_legacy_get_backlight_level,
275 }, 279 },
276 .copy = { 280 .copy = {
277 .blit = &r100_copy_blit, 281 .blit = &r100_copy_blit,
@@ -346,6 +350,8 @@ static struct radeon_asic r300_asic = {
346 .bandwidth_update = &r100_bandwidth_update, 350 .bandwidth_update = &r100_bandwidth_update,
347 .get_vblank_counter = &r100_get_vblank_counter, 351 .get_vblank_counter = &r100_get_vblank_counter,
348 .wait_for_vblank = &r100_wait_for_vblank, 352 .wait_for_vblank = &r100_wait_for_vblank,
353 .set_backlight_level = &radeon_legacy_set_backlight_level,
354 .get_backlight_level = &radeon_legacy_get_backlight_level,
349 }, 355 },
350 .copy = { 356 .copy = {
351 .blit = &r100_copy_blit, 357 .blit = &r100_copy_blit,
@@ -420,6 +426,8 @@ static struct radeon_asic r300_asic_pcie = {
420 .bandwidth_update = &r100_bandwidth_update, 426 .bandwidth_update = &r100_bandwidth_update,
421 .get_vblank_counter = &r100_get_vblank_counter, 427 .get_vblank_counter = &r100_get_vblank_counter,
422 .wait_for_vblank = &r100_wait_for_vblank, 428 .wait_for_vblank = &r100_wait_for_vblank,
429 .set_backlight_level = &radeon_legacy_set_backlight_level,
430 .get_backlight_level = &radeon_legacy_get_backlight_level,
423 }, 431 },
424 .copy = { 432 .copy = {
425 .blit = &r100_copy_blit, 433 .blit = &r100_copy_blit,
@@ -494,6 +502,8 @@ static struct radeon_asic r420_asic = {
494 .bandwidth_update = &r100_bandwidth_update, 502 .bandwidth_update = &r100_bandwidth_update,
495 .get_vblank_counter = &r100_get_vblank_counter, 503 .get_vblank_counter = &r100_get_vblank_counter,
496 .wait_for_vblank = &r100_wait_for_vblank, 504 .wait_for_vblank = &r100_wait_for_vblank,
505 .set_backlight_level = &atombios_set_backlight_level,
506 .get_backlight_level = &atombios_get_backlight_level,
497 }, 507 },
498 .copy = { 508 .copy = {
499 .blit = &r100_copy_blit, 509 .blit = &r100_copy_blit,
@@ -568,6 +578,8 @@ static struct radeon_asic rs400_asic = {
568 .bandwidth_update = &r100_bandwidth_update, 578 .bandwidth_update = &r100_bandwidth_update,
569 .get_vblank_counter = &r100_get_vblank_counter, 579 .get_vblank_counter = &r100_get_vblank_counter,
570 .wait_for_vblank = &r100_wait_for_vblank, 580 .wait_for_vblank = &r100_wait_for_vblank,
581 .set_backlight_level = &radeon_legacy_set_backlight_level,
582 .get_backlight_level = &radeon_legacy_get_backlight_level,
571 }, 583 },
572 .copy = { 584 .copy = {
573 .blit = &r100_copy_blit, 585 .blit = &r100_copy_blit,
@@ -642,6 +654,8 @@ static struct radeon_asic rs600_asic = {
642 .bandwidth_update = &rs600_bandwidth_update, 654 .bandwidth_update = &rs600_bandwidth_update,
643 .get_vblank_counter = &rs600_get_vblank_counter, 655 .get_vblank_counter = &rs600_get_vblank_counter,
644 .wait_for_vblank = &avivo_wait_for_vblank, 656 .wait_for_vblank = &avivo_wait_for_vblank,
657 .set_backlight_level = &atombios_set_backlight_level,
658 .get_backlight_level = &atombios_get_backlight_level,
645 }, 659 },
646 .copy = { 660 .copy = {
647 .blit = &r100_copy_blit, 661 .blit = &r100_copy_blit,
@@ -716,6 +730,8 @@ static struct radeon_asic rs690_asic = {
716 .get_vblank_counter = &rs600_get_vblank_counter, 730 .get_vblank_counter = &rs600_get_vblank_counter,
717 .bandwidth_update = &rs690_bandwidth_update, 731 .bandwidth_update = &rs690_bandwidth_update,
718 .wait_for_vblank = &avivo_wait_for_vblank, 732 .wait_for_vblank = &avivo_wait_for_vblank,
733 .set_backlight_level = &atombios_set_backlight_level,
734 .get_backlight_level = &atombios_get_backlight_level,
719 }, 735 },
720 .copy = { 736 .copy = {
721 .blit = &r100_copy_blit, 737 .blit = &r100_copy_blit,
@@ -790,6 +806,8 @@ static struct radeon_asic rv515_asic = {
790 .get_vblank_counter = &rs600_get_vblank_counter, 806 .get_vblank_counter = &rs600_get_vblank_counter,
791 .bandwidth_update = &rv515_bandwidth_update, 807 .bandwidth_update = &rv515_bandwidth_update,
792 .wait_for_vblank = &avivo_wait_for_vblank, 808 .wait_for_vblank = &avivo_wait_for_vblank,
809 .set_backlight_level = &atombios_set_backlight_level,
810 .get_backlight_level = &atombios_get_backlight_level,
793 }, 811 },
794 .copy = { 812 .copy = {
795 .blit = &r100_copy_blit, 813 .blit = &r100_copy_blit,
@@ -864,6 +882,8 @@ static struct radeon_asic r520_asic = {
864 .bandwidth_update = &rv515_bandwidth_update, 882 .bandwidth_update = &rv515_bandwidth_update,
865 .get_vblank_counter = &rs600_get_vblank_counter, 883 .get_vblank_counter = &rs600_get_vblank_counter,
866 .wait_for_vblank = &avivo_wait_for_vblank, 884 .wait_for_vblank = &avivo_wait_for_vblank,
885 .set_backlight_level = &atombios_set_backlight_level,
886 .get_backlight_level = &atombios_get_backlight_level,
867 }, 887 },
868 .copy = { 888 .copy = {
869 .blit = &r100_copy_blit, 889 .blit = &r100_copy_blit,
@@ -937,6 +957,8 @@ static struct radeon_asic r600_asic = {
937 .bandwidth_update = &rv515_bandwidth_update, 957 .bandwidth_update = &rv515_bandwidth_update,
938 .get_vblank_counter = &rs600_get_vblank_counter, 958 .get_vblank_counter = &rs600_get_vblank_counter,
939 .wait_for_vblank = &avivo_wait_for_vblank, 959 .wait_for_vblank = &avivo_wait_for_vblank,
960 .set_backlight_level = &atombios_set_backlight_level,
961 .get_backlight_level = &atombios_get_backlight_level,
940 }, 962 },
941 .copy = { 963 .copy = {
942 .blit = &r600_copy_blit, 964 .blit = &r600_copy_blit,
@@ -1010,6 +1032,8 @@ static struct radeon_asic rs780_asic = {
1010 .bandwidth_update = &rs690_bandwidth_update, 1032 .bandwidth_update = &rs690_bandwidth_update,
1011 .get_vblank_counter = &rs600_get_vblank_counter, 1033 .get_vblank_counter = &rs600_get_vblank_counter,
1012 .wait_for_vblank = &avivo_wait_for_vblank, 1034 .wait_for_vblank = &avivo_wait_for_vblank,
1035 .set_backlight_level = &atombios_set_backlight_level,
1036 .get_backlight_level = &atombios_get_backlight_level,
1013 }, 1037 },
1014 .copy = { 1038 .copy = {
1015 .blit = &r600_copy_blit, 1039 .blit = &r600_copy_blit,
@@ -1083,6 +1107,8 @@ static struct radeon_asic rv770_asic = {
1083 .bandwidth_update = &rv515_bandwidth_update, 1107 .bandwidth_update = &rv515_bandwidth_update,
1084 .get_vblank_counter = &rs600_get_vblank_counter, 1108 .get_vblank_counter = &rs600_get_vblank_counter,
1085 .wait_for_vblank = &avivo_wait_for_vblank, 1109 .wait_for_vblank = &avivo_wait_for_vblank,
1110 .set_backlight_level = &atombios_set_backlight_level,
1111 .get_backlight_level = &atombios_get_backlight_level,
1086 }, 1112 },
1087 .copy = { 1113 .copy = {
1088 .blit = &r600_copy_blit, 1114 .blit = &r600_copy_blit,
@@ -1156,6 +1182,8 @@ static struct radeon_asic evergreen_asic = {
1156 .bandwidth_update = &evergreen_bandwidth_update, 1182 .bandwidth_update = &evergreen_bandwidth_update,
1157 .get_vblank_counter = &evergreen_get_vblank_counter, 1183 .get_vblank_counter = &evergreen_get_vblank_counter,
1158 .wait_for_vblank = &dce4_wait_for_vblank, 1184 .wait_for_vblank = &dce4_wait_for_vblank,
1185 .set_backlight_level = &atombios_set_backlight_level,
1186 .get_backlight_level = &atombios_get_backlight_level,
1159 }, 1187 },
1160 .copy = { 1188 .copy = {
1161 .blit = &r600_copy_blit, 1189 .blit = &r600_copy_blit,
@@ -1229,6 +1257,8 @@ static struct radeon_asic sumo_asic = {
1229 .bandwidth_update = &evergreen_bandwidth_update, 1257 .bandwidth_update = &evergreen_bandwidth_update,
1230 .get_vblank_counter = &evergreen_get_vblank_counter, 1258 .get_vblank_counter = &evergreen_get_vblank_counter,
1231 .wait_for_vblank = &dce4_wait_for_vblank, 1259 .wait_for_vblank = &dce4_wait_for_vblank,
1260 .set_backlight_level = &atombios_set_backlight_level,
1261 .get_backlight_level = &atombios_get_backlight_level,
1232 }, 1262 },
1233 .copy = { 1263 .copy = {
1234 .blit = &r600_copy_blit, 1264 .blit = &r600_copy_blit,
@@ -1302,6 +1332,8 @@ static struct radeon_asic btc_asic = {
1302 .bandwidth_update = &evergreen_bandwidth_update, 1332 .bandwidth_update = &evergreen_bandwidth_update,
1303 .get_vblank_counter = &evergreen_get_vblank_counter, 1333 .get_vblank_counter = &evergreen_get_vblank_counter,
1304 .wait_for_vblank = &dce4_wait_for_vblank, 1334 .wait_for_vblank = &dce4_wait_for_vblank,
1335 .set_backlight_level = &atombios_set_backlight_level,
1336 .get_backlight_level = &atombios_get_backlight_level,
1305 }, 1337 },
1306 .copy = { 1338 .copy = {
1307 .blit = &r600_copy_blit, 1339 .blit = &r600_copy_blit,
@@ -1325,7 +1357,7 @@ static struct radeon_asic btc_asic = {
1325 .misc = &evergreen_pm_misc, 1357 .misc = &evergreen_pm_misc,
1326 .prepare = &evergreen_pm_prepare, 1358 .prepare = &evergreen_pm_prepare,
1327 .finish = &evergreen_pm_finish, 1359 .finish = &evergreen_pm_finish,
1328 .init_profile = &r600_pm_init_profile, 1360 .init_profile = &btc_pm_init_profile,
1329 .get_dynpm_state = &r600_pm_get_dynpm_state, 1361 .get_dynpm_state = &r600_pm_get_dynpm_state,
1330 .get_engine_clock = &radeon_atom_get_engine_clock, 1362 .get_engine_clock = &radeon_atom_get_engine_clock,
1331 .set_engine_clock = &radeon_atom_set_engine_clock, 1363 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1342,16 +1374,6 @@ static struct radeon_asic btc_asic = {
1342 }, 1374 },
1343}; 1375};
1344 1376
1345static const struct radeon_vm_funcs cayman_vm_funcs = {
1346 .init = &cayman_vm_init,
1347 .fini = &cayman_vm_fini,
1348 .bind = &cayman_vm_bind,
1349 .unbind = &cayman_vm_unbind,
1350 .tlb_flush = &cayman_vm_tlb_flush,
1351 .page_flags = &cayman_vm_page_flags,
1352 .set_page = &cayman_vm_set_page,
1353};
1354
1355static struct radeon_asic cayman_asic = { 1377static struct radeon_asic cayman_asic = {
1356 .init = &cayman_init, 1378 .init = &cayman_init,
1357 .fini = &cayman_fini, 1379 .fini = &cayman_fini,
@@ -1366,6 +1388,12 @@ static struct radeon_asic cayman_asic = {
1366 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1388 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1367 .set_page = &rs600_gart_set_page, 1389 .set_page = &rs600_gart_set_page,
1368 }, 1390 },
1391 .vm = {
1392 .init = &cayman_vm_init,
1393 .fini = &cayman_vm_fini,
1394 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1395 .set_page = &cayman_vm_set_page,
1396 },
1369 .ring = { 1397 .ring = {
1370 [RADEON_RING_TYPE_GFX_INDEX] = { 1398 [RADEON_RING_TYPE_GFX_INDEX] = {
1371 .ib_execute = &cayman_ring_ib_execute, 1399 .ib_execute = &cayman_ring_ib_execute,
@@ -1376,6 +1404,7 @@ static struct radeon_asic cayman_asic = {
1376 .ring_test = &r600_ring_test, 1404 .ring_test = &r600_ring_test,
1377 .ib_test = &r600_ib_test, 1405 .ib_test = &r600_ib_test,
1378 .is_lockup = &evergreen_gpu_is_lockup, 1406 .is_lockup = &evergreen_gpu_is_lockup,
1407 .vm_flush = &cayman_vm_flush,
1379 }, 1408 },
1380 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1409 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1381 .ib_execute = &cayman_ring_ib_execute, 1410 .ib_execute = &cayman_ring_ib_execute,
@@ -1386,6 +1415,7 @@ static struct radeon_asic cayman_asic = {
1386 .ring_test = &r600_ring_test, 1415 .ring_test = &r600_ring_test,
1387 .ib_test = &r600_ib_test, 1416 .ib_test = &r600_ib_test,
1388 .is_lockup = &evergreen_gpu_is_lockup, 1417 .is_lockup = &evergreen_gpu_is_lockup,
1418 .vm_flush = &cayman_vm_flush,
1389 }, 1419 },
1390 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1420 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1391 .ib_execute = &cayman_ring_ib_execute, 1421 .ib_execute = &cayman_ring_ib_execute,
@@ -1396,6 +1426,7 @@ static struct radeon_asic cayman_asic = {
1396 .ring_test = &r600_ring_test, 1426 .ring_test = &r600_ring_test,
1397 .ib_test = &r600_ib_test, 1427 .ib_test = &r600_ib_test,
1398 .is_lockup = &evergreen_gpu_is_lockup, 1428 .is_lockup = &evergreen_gpu_is_lockup,
1429 .vm_flush = &cayman_vm_flush,
1399 } 1430 }
1400 }, 1431 },
1401 .irq = { 1432 .irq = {
@@ -1406,6 +1437,8 @@ static struct radeon_asic cayman_asic = {
1406 .bandwidth_update = &evergreen_bandwidth_update, 1437 .bandwidth_update = &evergreen_bandwidth_update,
1407 .get_vblank_counter = &evergreen_get_vblank_counter, 1438 .get_vblank_counter = &evergreen_get_vblank_counter,
1408 .wait_for_vblank = &dce4_wait_for_vblank, 1439 .wait_for_vblank = &dce4_wait_for_vblank,
1440 .set_backlight_level = &atombios_set_backlight_level,
1441 .get_backlight_level = &atombios_get_backlight_level,
1409 }, 1442 },
1410 .copy = { 1443 .copy = {
1411 .blit = &r600_copy_blit, 1444 .blit = &r600_copy_blit,
@@ -1429,7 +1462,7 @@ static struct radeon_asic cayman_asic = {
1429 .misc = &evergreen_pm_misc, 1462 .misc = &evergreen_pm_misc,
1430 .prepare = &evergreen_pm_prepare, 1463 .prepare = &evergreen_pm_prepare,
1431 .finish = &evergreen_pm_finish, 1464 .finish = &evergreen_pm_finish,
1432 .init_profile = &r600_pm_init_profile, 1465 .init_profile = &btc_pm_init_profile,
1433 .get_dynpm_state = &r600_pm_get_dynpm_state, 1466 .get_dynpm_state = &r600_pm_get_dynpm_state,
1434 .get_engine_clock = &radeon_atom_get_engine_clock, 1467 .get_engine_clock = &radeon_atom_get_engine_clock,
1435 .set_engine_clock = &radeon_atom_set_engine_clock, 1468 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1460,6 +1493,12 @@ static struct radeon_asic trinity_asic = {
1460 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1493 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1461 .set_page = &rs600_gart_set_page, 1494 .set_page = &rs600_gart_set_page,
1462 }, 1495 },
1496 .vm = {
1497 .init = &cayman_vm_init,
1498 .fini = &cayman_vm_fini,
1499 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1500 .set_page = &cayman_vm_set_page,
1501 },
1463 .ring = { 1502 .ring = {
1464 [RADEON_RING_TYPE_GFX_INDEX] = { 1503 [RADEON_RING_TYPE_GFX_INDEX] = {
1465 .ib_execute = &cayman_ring_ib_execute, 1504 .ib_execute = &cayman_ring_ib_execute,
@@ -1470,6 +1509,7 @@ static struct radeon_asic trinity_asic = {
1470 .ring_test = &r600_ring_test, 1509 .ring_test = &r600_ring_test,
1471 .ib_test = &r600_ib_test, 1510 .ib_test = &r600_ib_test,
1472 .is_lockup = &evergreen_gpu_is_lockup, 1511 .is_lockup = &evergreen_gpu_is_lockup,
1512 .vm_flush = &cayman_vm_flush,
1473 }, 1513 },
1474 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1514 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1475 .ib_execute = &cayman_ring_ib_execute, 1515 .ib_execute = &cayman_ring_ib_execute,
@@ -1480,6 +1520,7 @@ static struct radeon_asic trinity_asic = {
1480 .ring_test = &r600_ring_test, 1520 .ring_test = &r600_ring_test,
1481 .ib_test = &r600_ib_test, 1521 .ib_test = &r600_ib_test,
1482 .is_lockup = &evergreen_gpu_is_lockup, 1522 .is_lockup = &evergreen_gpu_is_lockup,
1523 .vm_flush = &cayman_vm_flush,
1483 }, 1524 },
1484 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1525 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1485 .ib_execute = &cayman_ring_ib_execute, 1526 .ib_execute = &cayman_ring_ib_execute,
@@ -1490,6 +1531,7 @@ static struct radeon_asic trinity_asic = {
1490 .ring_test = &r600_ring_test, 1531 .ring_test = &r600_ring_test,
1491 .ib_test = &r600_ib_test, 1532 .ib_test = &r600_ib_test,
1492 .is_lockup = &evergreen_gpu_is_lockup, 1533 .is_lockup = &evergreen_gpu_is_lockup,
1534 .vm_flush = &cayman_vm_flush,
1493 } 1535 }
1494 }, 1536 },
1495 .irq = { 1537 .irq = {
@@ -1500,6 +1542,8 @@ static struct radeon_asic trinity_asic = {
1500 .bandwidth_update = &dce6_bandwidth_update, 1542 .bandwidth_update = &dce6_bandwidth_update,
1501 .get_vblank_counter = &evergreen_get_vblank_counter, 1543 .get_vblank_counter = &evergreen_get_vblank_counter,
1502 .wait_for_vblank = &dce4_wait_for_vblank, 1544 .wait_for_vblank = &dce4_wait_for_vblank,
1545 .set_backlight_level = &atombios_set_backlight_level,
1546 .get_backlight_level = &atombios_get_backlight_level,
1503 }, 1547 },
1504 .copy = { 1548 .copy = {
1505 .blit = &r600_copy_blit, 1549 .blit = &r600_copy_blit,
@@ -1540,16 +1584,6 @@ static struct radeon_asic trinity_asic = {
1540 }, 1584 },
1541}; 1585};
1542 1586
1543static const struct radeon_vm_funcs si_vm_funcs = {
1544 .init = &si_vm_init,
1545 .fini = &si_vm_fini,
1546 .bind = &si_vm_bind,
1547 .unbind = &si_vm_unbind,
1548 .tlb_flush = &si_vm_tlb_flush,
1549 .page_flags = &cayman_vm_page_flags,
1550 .set_page = &cayman_vm_set_page,
1551};
1552
1553static struct radeon_asic si_asic = { 1587static struct radeon_asic si_asic = {
1554 .init = &si_init, 1588 .init = &si_init,
1555 .fini = &si_fini, 1589 .fini = &si_fini,
@@ -1564,6 +1598,12 @@ static struct radeon_asic si_asic = {
1564 .tlb_flush = &si_pcie_gart_tlb_flush, 1598 .tlb_flush = &si_pcie_gart_tlb_flush,
1565 .set_page = &rs600_gart_set_page, 1599 .set_page = &rs600_gart_set_page,
1566 }, 1600 },
1601 .vm = {
1602 .init = &si_vm_init,
1603 .fini = &si_vm_fini,
1604 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1605 .set_page = &si_vm_set_page,
1606 },
1567 .ring = { 1607 .ring = {
1568 [RADEON_RING_TYPE_GFX_INDEX] = { 1608 [RADEON_RING_TYPE_GFX_INDEX] = {
1569 .ib_execute = &si_ring_ib_execute, 1609 .ib_execute = &si_ring_ib_execute,
@@ -1574,6 +1614,7 @@ static struct radeon_asic si_asic = {
1574 .ring_test = &r600_ring_test, 1614 .ring_test = &r600_ring_test,
1575 .ib_test = &r600_ib_test, 1615 .ib_test = &r600_ib_test,
1576 .is_lockup = &si_gpu_is_lockup, 1616 .is_lockup = &si_gpu_is_lockup,
1617 .vm_flush = &si_vm_flush,
1577 }, 1618 },
1578 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1619 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1579 .ib_execute = &si_ring_ib_execute, 1620 .ib_execute = &si_ring_ib_execute,
@@ -1584,6 +1625,7 @@ static struct radeon_asic si_asic = {
1584 .ring_test = &r600_ring_test, 1625 .ring_test = &r600_ring_test,
1585 .ib_test = &r600_ib_test, 1626 .ib_test = &r600_ib_test,
1586 .is_lockup = &si_gpu_is_lockup, 1627 .is_lockup = &si_gpu_is_lockup,
1628 .vm_flush = &si_vm_flush,
1587 }, 1629 },
1588 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1630 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1589 .ib_execute = &si_ring_ib_execute, 1631 .ib_execute = &si_ring_ib_execute,
@@ -1594,6 +1636,7 @@ static struct radeon_asic si_asic = {
1594 .ring_test = &r600_ring_test, 1636 .ring_test = &r600_ring_test,
1595 .ib_test = &r600_ib_test, 1637 .ib_test = &r600_ib_test,
1596 .is_lockup = &si_gpu_is_lockup, 1638 .is_lockup = &si_gpu_is_lockup,
1639 .vm_flush = &si_vm_flush,
1597 } 1640 }
1598 }, 1641 },
1599 .irq = { 1642 .irq = {
@@ -1604,6 +1647,8 @@ static struct radeon_asic si_asic = {
1604 .bandwidth_update = &dce6_bandwidth_update, 1647 .bandwidth_update = &dce6_bandwidth_update,
1605 .get_vblank_counter = &evergreen_get_vblank_counter, 1648 .get_vblank_counter = &evergreen_get_vblank_counter,
1606 .wait_for_vblank = &dce4_wait_for_vblank, 1649 .wait_for_vblank = &dce4_wait_for_vblank,
1650 .set_backlight_level = &atombios_set_backlight_level,
1651 .get_backlight_level = &atombios_get_backlight_level,
1607 }, 1652 },
1608 .copy = { 1653 .copy = {
1609 .blit = NULL, 1654 .blit = NULL,
@@ -1697,6 +1742,7 @@ int radeon_asic_init(struct radeon_device *rdev)
1697 rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock; 1742 rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
1698 rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock; 1743 rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
1699 rdev->asic->pm.set_memory_clock = NULL; 1744 rdev->asic->pm.set_memory_clock = NULL;
1745 rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
1700 } 1746 }
1701 break; 1747 break;
1702 case CHIP_RS400: 1748 case CHIP_RS400:
@@ -1769,13 +1815,11 @@ int radeon_asic_init(struct radeon_device *rdev)
1769 rdev->asic = &cayman_asic; 1815 rdev->asic = &cayman_asic;
1770 /* set num crtcs */ 1816 /* set num crtcs */
1771 rdev->num_crtc = 6; 1817 rdev->num_crtc = 6;
1772 rdev->vm_manager.funcs = &cayman_vm_funcs;
1773 break; 1818 break;
1774 case CHIP_ARUBA: 1819 case CHIP_ARUBA:
1775 rdev->asic = &trinity_asic; 1820 rdev->asic = &trinity_asic;
1776 /* set num crtcs */ 1821 /* set num crtcs */
1777 rdev->num_crtc = 4; 1822 rdev->num_crtc = 4;
1778 rdev->vm_manager.funcs = &cayman_vm_funcs;
1779 break; 1823 break;
1780 case CHIP_TAHITI: 1824 case CHIP_TAHITI:
1781 case CHIP_PITCAIRN: 1825 case CHIP_PITCAIRN:
@@ -1783,7 +1827,6 @@ int radeon_asic_init(struct radeon_device *rdev)
1783 rdev->asic = &si_asic; 1827 rdev->asic = &si_asic;
1784 /* set num crtcs */ 1828 /* set num crtcs */
1785 rdev->num_crtc = 6; 1829 rdev->num_crtc = 6;
1786 rdev->vm_manager.funcs = &si_vm_funcs;
1787 break; 1830 break;
1788 default: 1831 default:
1789 /* FIXME: not supported yet */ 1832 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 18c38d14c8cd..5e3a0e5c6be1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -42,6 +42,12 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
42void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); 42void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
44 44
45void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
46u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
47void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
48u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
49
50
45/* 51/*
46 * r100,rv100,rs100,rv200,rs200 52 * r100,rv100,rs100,rv200,rs200
47 */ 53 */
@@ -389,6 +395,7 @@ void r700_cp_fini(struct radeon_device *rdev);
389struct evergreen_mc_save { 395struct evergreen_mc_save {
390 u32 vga_render_control; 396 u32 vga_render_control;
391 u32 vga_hdp_control; 397 u32 vga_hdp_control;
398 bool crtc_enabled[RADEON_MAX_CRTCS];
392}; 399};
393 400
394void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); 401void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -413,6 +420,7 @@ extern void evergreen_pm_misc(struct radeon_device *rdev);
413extern void evergreen_pm_prepare(struct radeon_device *rdev); 420extern void evergreen_pm_prepare(struct radeon_device *rdev);
414extern void evergreen_pm_finish(struct radeon_device *rdev); 421extern void evergreen_pm_finish(struct radeon_device *rdev);
415extern void sumo_pm_init_profile(struct radeon_device *rdev); 422extern void sumo_pm_init_profile(struct radeon_device *rdev);
423extern void btc_pm_init_profile(struct radeon_device *rdev);
416extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 424extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
417extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 425extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
418extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 426extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -435,14 +443,11 @@ int cayman_asic_reset(struct radeon_device *rdev);
435void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 443void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
436int cayman_vm_init(struct radeon_device *rdev); 444int cayman_vm_init(struct radeon_device *rdev);
437void cayman_vm_fini(struct radeon_device *rdev); 445void cayman_vm_fini(struct radeon_device *rdev);
438int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); 446void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
439void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); 447uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
440void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); 448void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
441uint32_t cayman_vm_page_flags(struct radeon_device *rdev, 449 uint64_t addr, unsigned count,
442 struct radeon_vm *vm, 450 uint32_t incr, uint32_t flags);
443 uint32_t flags);
444void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
445 unsigned pfn, uint64_t addr, uint32_t flags);
446int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 451int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
447 452
448/* DCE6 - SI */ 453/* DCE6 - SI */
@@ -465,9 +470,10 @@ int si_irq_set(struct radeon_device *rdev);
465int si_irq_process(struct radeon_device *rdev); 470int si_irq_process(struct radeon_device *rdev);
466int si_vm_init(struct radeon_device *rdev); 471int si_vm_init(struct radeon_device *rdev);
467void si_vm_fini(struct radeon_device *rdev); 472void si_vm_fini(struct radeon_device *rdev);
468int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); 473void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
469void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); 474 uint64_t addr, unsigned count,
470void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); 475 uint32_t incr, uint32_t flags);
476void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
471int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 477int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
472uint64_t si_get_gpu_clock(struct radeon_device *rdev); 478uint64_t si_get_gpu_clock(struct radeon_device *rdev);
473 479
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d67d4f3eb6f4..01b90b4f5e22 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1254,6 +1254,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1254 if (rdev->clock.max_pixel_clock == 0) 1254 if (rdev->clock.max_pixel_clock == 0)
1255 rdev->clock.max_pixel_clock = 40000; 1255 rdev->clock.max_pixel_clock = 40000;
1256 1256
1257 /* not technically a clock, but... */
1258 rdev->mode_info.firmware_flags =
1259 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
1260
1257 return true; 1261 return true;
1258 } 1262 }
1259 1263
@@ -2005,7 +2009,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2005 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2009 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2006 2010
2007 /* add the i2c bus for thermal/fan chip */ 2011 /* add the i2c bus for thermal/fan chip */
2008 if (power_info->info.ucOverdriveThermalController > 0) { 2012 if ((power_info->info.ucOverdriveThermalController > 0) &&
2013 (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
2009 DRM_INFO("Possible %s thermal controller at 0x%02x\n", 2014 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
2010 thermal_controller_names[power_info->info.ucOverdriveThermalController], 2015 thermal_controller_names[power_info->info.ucOverdriveThermalController],
2011 power_info->info.ucOverdriveControllerAddress >> 1); 2016 power_info->info.ucOverdriveControllerAddress >> 1);
@@ -2209,7 +2214,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2209 (controller->ucType == 2214 (controller->ucType ==
2210 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { 2215 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
2211 DRM_INFO("Special thermal controller config\n"); 2216 DRM_INFO("Special thermal controller config\n");
2212 } else { 2217 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
2213 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 2218 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
2214 pp_lib_thermal_controller_names[controller->ucType], 2219 pp_lib_thermal_controller_names[controller->ucType],
2215 controller->ucI2cAddress >> 1, 2220 controller->ucI2cAddress >> 1,
@@ -2224,6 +2229,12 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2224 strlcpy(info.type, name, sizeof(info.type)); 2229 strlcpy(info.type, name, sizeof(info.type));
2225 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2230 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2226 } 2231 }
2232 } else {
2233 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
2234 controller->ucType,
2235 controller->ucI2cAddress >> 1,
2236 (controller->ucFanParameters &
2237 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2227 } 2238 }
2228 } 2239 }
2229} 2240}
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2a2cf0b88a28..582e99449c12 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -12,30 +12,62 @@
12#include <acpi/acpi_bus.h> 12#include <acpi/acpi_bus.h>
13#include <linux/pci.h> 13#include <linux/pci.h>
14 14
15#define ATPX_VERSION 0 15#include "radeon_acpi.h"
16#define ATPX_GPU_PWR 2 16
17#define ATPX_MUX_SELECT 3 17struct radeon_atpx_functions {
18#define ATPX_I2C_MUX_SELECT 4 18 bool px_params;
19#define ATPX_SWITCH_START 5 19 bool power_cntl;
20#define ATPX_SWITCH_END 6 20 bool disp_mux_cntl;
21 21 bool i2c_mux_cntl;
22#define ATPX_INTEGRATED 0 22 bool switch_start;
23#define ATPX_DISCRETE 1 23 bool switch_end;
24 bool disp_connectors_mapping;
25 bool disp_detetion_ports;
26};
24 27
25#define ATPX_MUX_IGD 0 28struct radeon_atpx {
26#define ATPX_MUX_DISCRETE 1 29 acpi_handle handle;
30 struct radeon_atpx_functions functions;
31};
27 32
28static struct radeon_atpx_priv { 33static struct radeon_atpx_priv {
29 bool atpx_detected; 34 bool atpx_detected;
30 /* handle for device - and atpx */ 35 /* handle for device - and atpx */
31 acpi_handle dhandle; 36 acpi_handle dhandle;
32 acpi_handle atpx_handle; 37 struct radeon_atpx atpx;
33} radeon_atpx_priv; 38} radeon_atpx_priv;
34 39
35static int radeon_atpx_get_version(acpi_handle handle) 40struct atpx_verify_interface {
41 u16 size; /* structure size in bytes (includes size field) */
42 u16 version; /* version */
43 u32 function_bits; /* supported functions bit vector */
44} __packed;
45
46struct atpx_power_control {
47 u16 size;
48 u8 dgpu_state;
49} __packed;
50
51struct atpx_mux {
52 u16 size;
53 u16 mux;
54} __packed;
55
56/**
57 * radeon_atpx_call - call an ATPX method
58 *
59 * @handle: acpi handle
60 * @function: the ATPX function to execute
61 * @params: ATPX function params
62 *
63 * Executes the requested ATPX function (all asics).
64 * Returns a pointer to the acpi output buffer.
65 */
66static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
67 struct acpi_buffer *params)
36{ 68{
37 acpi_status status; 69 acpi_status status;
38 union acpi_object atpx_arg_elements[2], *obj; 70 union acpi_object atpx_arg_elements[2];
39 struct acpi_object_list atpx_arg; 71 struct acpi_object_list atpx_arg;
40 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 72 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
41 73
@@ -43,99 +75,292 @@ static int radeon_atpx_get_version(acpi_handle handle)
43 atpx_arg.pointer = &atpx_arg_elements[0]; 75 atpx_arg.pointer = &atpx_arg_elements[0];
44 76
45 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; 77 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
46 atpx_arg_elements[0].integer.value = ATPX_VERSION; 78 atpx_arg_elements[0].integer.value = function;
79
80 if (params) {
81 atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
82 atpx_arg_elements[1].buffer.length = params->length;
83 atpx_arg_elements[1].buffer.pointer = params->pointer;
84 } else {
85 /* We need a second fake parameter */
86 atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
87 atpx_arg_elements[1].integer.value = 0;
88 }
47 89
48 atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; 90 status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
49 atpx_arg_elements[1].integer.value = ATPX_VERSION;
50 91
51 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); 92 /* Fail only if calling the method fails and ATPX is supported */
52 if (ACPI_FAILURE(status)) { 93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
53 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); 94 printk("failed to evaluate ATPX got %s\n",
54 return -ENOSYS; 95 acpi_format_exception(status));
96 kfree(buffer.pointer);
97 return NULL;
55 } 98 }
56 obj = (union acpi_object *)buffer.pointer; 99
57 if (obj && (obj->type == ACPI_TYPE_BUFFER)) 100 return buffer.pointer;
58 printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
59 kfree(buffer.pointer);
60 return 0;
61} 101}
62 102
63static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value) 103/**
104 * radeon_atpx_parse_functions - parse supported functions
105 *
106 * @f: supported functions struct
107 * @mask: supported functions mask from ATPX
108 *
109 * Use the supported functions mask from ATPX function
110 * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
111 * are supported (all asics).
112 */
113static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
64{ 114{
65 acpi_status status; 115 f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
66 union acpi_object atpx_arg_elements[2]; 116 f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
67 struct acpi_object_list atpx_arg; 117 f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
68 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 118 f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
69 uint8_t buf[4] = {0}; 119 f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
70 120 f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
71 if (!handle) 121 f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
72 return -EINVAL; 122 f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
73 123}
74 atpx_arg.count = 2;
75 atpx_arg.pointer = &atpx_arg_elements[0];
76 124
77 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; 125/**
78 atpx_arg_elements[0].integer.value = cmd_id; 126 * radeon_atpx_verify_interface - verify ATPX
127 *
128 * @handle: acpi handle
129 * @atpx: radeon atpx struct
130 *
131 * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
132 * to initialize ATPX and determine what features are supported
133 * (all asics).
134 * returns 0 on success, error on failure.
135 */
136static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
137{
138 union acpi_object *info;
139 struct atpx_verify_interface output;
140 size_t size;
141 int err = 0;
142
143 info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
144 if (!info)
145 return -EIO;
146
147 memset(&output, 0, sizeof(output));
148
149 size = *(u16 *) info->buffer.pointer;
150 if (size < 8) {
151 printk("ATPX buffer is too small: %lu\n", size);
152 err = -EINVAL;
153 goto out;
154 }
155 size = min(sizeof(output), size);
79 156
80 buf[2] = value & 0xff; 157 memcpy(&output, info->buffer.pointer, size);
81 buf[3] = (value >> 8) & 0xff;
82 158
83 atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; 159 /* TODO: check version? */
84 atpx_arg_elements[1].buffer.length = 4; 160 printk("ATPX version %u\n", output.version);
85 atpx_arg_elements[1].buffer.pointer = buf;
86 161
87 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); 162 radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
88 if (ACPI_FAILURE(status)) {
89 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
90 return -ENOSYS;
91 }
92 kfree(buffer.pointer);
93 163
94 return 0; 164out:
165 kfree(info);
166 return err;
95} 167}
96 168
97static int radeon_atpx_set_discrete_state(acpi_handle handle, int state) 169/**
170 * radeon_atpx_set_discrete_state - power up/down discrete GPU
171 *
172 * @atpx: atpx info struct
173 * @state: discrete GPU state (0 = power down, 1 = power up)
174 *
175 * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
176 * power down/up the discrete GPU (all asics).
177 * Returns 0 on success, error on failure.
178 */
179static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
98{ 180{
99 return radeon_atpx_execute(handle, ATPX_GPU_PWR, state); 181 struct acpi_buffer params;
182 union acpi_object *info;
183 struct atpx_power_control input;
184
185 if (atpx->functions.power_cntl) {
186 input.size = 3;
187 input.dgpu_state = state;
188 params.length = input.size;
189 params.pointer = &input;
190 info = radeon_atpx_call(atpx->handle,
191 ATPX_FUNCTION_POWER_CONTROL,
192 &params);
193 if (!info)
194 return -EIO;
195 kfree(info);
196 }
197 return 0;
100} 198}
101 199
102static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) 200/**
201 * radeon_atpx_switch_disp_mux - switch display mux
202 *
203 * @atpx: atpx info struct
204 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
205 *
206 * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
207 * switch the display mux between the discrete GPU and integrated GPU
208 * (all asics).
209 * Returns 0 on success, error on failure.
210 */
211static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
103{ 212{
104 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); 213 struct acpi_buffer params;
214 union acpi_object *info;
215 struct atpx_mux input;
216
217 if (atpx->functions.disp_mux_cntl) {
218 input.size = 4;
219 input.mux = mux_id;
220 params.length = input.size;
221 params.pointer = &input;
222 info = radeon_atpx_call(atpx->handle,
223 ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
224 &params);
225 if (!info)
226 return -EIO;
227 kfree(info);
228 }
229 return 0;
105} 230}
106 231
107static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) 232/**
233 * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
234 *
235 * @atpx: atpx info struct
236 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
237 *
238 * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
239 * switch the i2c/hpd mux between the discrete GPU and integrated GPU
240 * (all asics).
241 * Returns 0 on success, error on failure.
242 */
243static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
108{ 244{
109 return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); 245 struct acpi_buffer params;
246 union acpi_object *info;
247 struct atpx_mux input;
248
249 if (atpx->functions.i2c_mux_cntl) {
250 input.size = 4;
251 input.mux = mux_id;
252 params.length = input.size;
253 params.pointer = &input;
254 info = radeon_atpx_call(atpx->handle,
255 ATPX_FUNCTION_I2C_MUX_CONTROL,
256 &params);
257 if (!info)
258 return -EIO;
259 kfree(info);
260 }
261 return 0;
110} 262}
111 263
112static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) 264/**
265 * radeon_atpx_switch_start - notify the sbios of a GPU switch
266 *
267 * @atpx: atpx info struct
268 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
269 *
270 * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
271 * function to notify the sbios that a switch between the discrete GPU and
272 * integrated GPU has begun (all asics).
273 * Returns 0 on success, error on failure.
274 */
275static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
113{ 276{
114 return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); 277 struct acpi_buffer params;
278 union acpi_object *info;
279 struct atpx_mux input;
280
281 if (atpx->functions.switch_start) {
282 input.size = 4;
283 input.mux = mux_id;
284 params.length = input.size;
285 params.pointer = &input;
286 info = radeon_atpx_call(atpx->handle,
287 ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
288 &params);
289 if (!info)
290 return -EIO;
291 kfree(info);
292 }
293 return 0;
115} 294}
116 295
117static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) 296/**
297 * radeon_atpx_switch_end - notify the sbios of a GPU switch
298 *
299 * @atpx: atpx info struct
300 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
301 *
302 * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
303 * function to notify the sbios that a switch between the discrete GPU and
304 * integrated GPU has ended (all asics).
305 * Returns 0 on success, error on failure.
306 */
307static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
118{ 308{
119 return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); 309 struct acpi_buffer params;
310 union acpi_object *info;
311 struct atpx_mux input;
312
313 if (atpx->functions.switch_end) {
314 input.size = 4;
315 input.mux = mux_id;
316 params.length = input.size;
317 params.pointer = &input;
318 info = radeon_atpx_call(atpx->handle,
319 ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
320 &params);
321 if (!info)
322 return -EIO;
323 kfree(info);
324 }
325 return 0;
120} 326}
121 327
328/**
329 * radeon_atpx_switchto - switch to the requested GPU
330 *
331 * @id: GPU to switch to
332 *
333 * Execute the necessary ATPX functions to switch between the discrete GPU and
334 * integrated GPU (all asics).
335 * Returns 0 on success, error on failure.
336 */
122static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) 337static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
123{ 338{
124 int gpu_id; 339 u16 gpu_id;
125 340
126 if (id == VGA_SWITCHEROO_IGD) 341 if (id == VGA_SWITCHEROO_IGD)
127 gpu_id = ATPX_INTEGRATED; 342 gpu_id = ATPX_INTEGRATED_GPU;
128 else 343 else
129 gpu_id = ATPX_DISCRETE; 344 gpu_id = ATPX_DISCRETE_GPU;
130 345
131 radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); 346 radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
132 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); 347 radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
133 radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); 348 radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
134 radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); 349 radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
135 350
136 return 0; 351 return 0;
137} 352}
138 353
354/**
355 * radeon_atpx_switchto - switch to the requested GPU
356 *
357 * @id: GPU to switch to
358 * @state: requested power state (0 = off, 1 = on)
359 *
360 * Execute the necessary ATPX function to power down/up the discrete GPU
361 * (all asics).
362 * Returns 0 on success, error on failure.
363 */
139static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, 364static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
140 enum vga_switcheroo_state state) 365 enum vga_switcheroo_state state)
141{ 366{
@@ -143,10 +368,18 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
143 if (id == VGA_SWITCHEROO_IGD) 368 if (id == VGA_SWITCHEROO_IGD)
144 return 0; 369 return 0;
145 370
146 radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state); 371 radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
147 return 0; 372 return 0;
148} 373}
149 374
375/**
376 * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
377 *
378 * @pdev: pci device
379 *
380 * Look up the ATPX and ATRM handles (all asics).
381 * Returns true if the handles are found, false if not.
382 */
150static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
151{ 384{
152 acpi_handle dhandle, atpx_handle; 385 acpi_handle dhandle, atpx_handle;
@@ -161,18 +394,30 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
161 return false; 394 return false;
162 395
163 radeon_atpx_priv.dhandle = dhandle; 396 radeon_atpx_priv.dhandle = dhandle;
164 radeon_atpx_priv.atpx_handle = atpx_handle; 397 radeon_atpx_priv.atpx.handle = atpx_handle;
165 return true; 398 return true;
166} 399}
167 400
401/**
402 * radeon_atpx_init - verify the ATPX interface
403 *
404 * Verify the ATPX interface (all asics).
405 * Returns 0 on success, error on failure.
406 */
168static int radeon_atpx_init(void) 407static int radeon_atpx_init(void)
169{ 408{
170 /* set up the ATPX handle */ 409 /* set up the ATPX handle */
171 410 return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
172 radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
173 return 0;
174} 411}
175 412
413/**
414 * radeon_atpx_get_client_id - get the client id
415 *
416 * @pdev: pci device
417 *
418 * look up whether we are the integrated or discrete GPU (all asics).
419 * Returns the client id.
420 */
176static int radeon_atpx_get_client_id(struct pci_dev *pdev) 421static int radeon_atpx_get_client_id(struct pci_dev *pdev)
177{ 422{
178 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) 423 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
@@ -188,6 +433,12 @@ static struct vga_switcheroo_handler radeon_atpx_handler = {
188 .get_client_id = radeon_atpx_get_client_id, 433 .get_client_id = radeon_atpx_get_client_id,
189}; 434};
190 435
436/**
437 * radeon_atpx_detect - detect whether we have PX
438 *
439 * Check if we have a PX system (all asics).
440 * Returns true if we have a PX system, false if not.
441 */
191static bool radeon_atpx_detect(void) 442static bool radeon_atpx_detect(void)
192{ 443{
193 char acpi_method_name[255] = { 0 }; 444 char acpi_method_name[255] = { 0 };
@@ -203,7 +454,7 @@ static bool radeon_atpx_detect(void)
203 } 454 }
204 455
205 if (has_atpx && vga_count == 2) { 456 if (has_atpx && vga_count == 2) {
206 acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer); 457 acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
207 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 458 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
208 acpi_method_name); 459 acpi_method_name);
209 radeon_atpx_priv.atpx_detected = true; 460 radeon_atpx_priv.atpx_detected = true;
@@ -212,6 +463,11 @@ static bool radeon_atpx_detect(void)
212 return false; 463 return false;
213} 464}
214 465
466/**
467 * radeon_register_atpx_handler - register with vga_switcheroo
468 *
469 * Register the PX callbacks with vga_switcheroo (all asics).
470 */
215void radeon_register_atpx_handler(void) 471void radeon_register_atpx_handler(void)
216{ 472{
217 bool r; 473 bool r;
@@ -224,6 +480,11 @@ void radeon_register_atpx_handler(void)
224 vga_switcheroo_register_handler(&radeon_atpx_handler); 480 vga_switcheroo_register_handler(&radeon_atpx_handler);
225} 481}
226 482
483/**
484 * radeon_unregister_atpx_handler - unregister with vga_switcheroo
485 *
486 * Unregister the PX callbacks with vga_switcheroo (all asics).
487 */
227void radeon_unregister_atpx_handler(void) 488void radeon_unregister_atpx_handler(void)
228{ 489{
229 vga_switcheroo_unregister_handler(); 490 vga_switcheroo_unregister_handler();
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index f75247d42ffd..8a73f0758903 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3319,15 +3319,6 @@ static void combios_write_ram_size(struct drm_device *dev)
3319 WREG32(RADEON_CONFIG_MEMSIZE, mem_size); 3319 WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
3320} 3320}
3321 3321
3322void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
3323{
3324 uint16_t dyn_clk_info =
3325 combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
3326
3327 if (dyn_clk_info)
3328 combios_parse_pll_table(dev, dyn_clk_info);
3329}
3330
3331void radeon_combios_asic_init(struct drm_device *dev) 3322void radeon_combios_asic_init(struct drm_device *dev)
3332{ 3323{
3333 struct radeon_device *rdev = dev->dev_private; 3324 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 895e628b60f8..69a142fc3d1d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,10 +40,6 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
40 struct drm_encoder *encoder, 40 struct drm_encoder *encoder,
41 bool connected); 41 bool connected);
42 42
43extern void
44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
45 struct drm_connector *drm_connector);
46
47void radeon_connector_hotplug(struct drm_connector *connector) 43void radeon_connector_hotplug(struct drm_connector *connector)
48{ 44{
49 struct drm_device *dev = connector->dev; 45 struct drm_device *dev = connector->dev;
@@ -198,7 +194,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
198 } 194 }
199} 195}
200 196
201struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) 197static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
202{ 198{
203 struct drm_mode_object *obj; 199 struct drm_mode_object *obj;
204 struct drm_encoder *encoder; 200 struct drm_encoder *encoder;
@@ -219,7 +215,7 @@ struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int enc
219 return NULL; 215 return NULL;
220} 216}
221 217
222struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) 218static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
223{ 219{
224 int enc_id = connector->encoder_ids[0]; 220 int enc_id = connector->encoder_ids[0];
225 struct drm_mode_object *obj; 221 struct drm_mode_object *obj;
@@ -370,7 +366,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
370 } 366 }
371} 367}
372 368
373int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, 369static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
374 uint64_t val) 370 uint64_t val)
375{ 371{
376 struct drm_device *dev = connector->dev; 372 struct drm_device *dev = connector->dev;
@@ -691,13 +687,13 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
691} 687}
692 688
693 689
694struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { 690static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
695 .get_modes = radeon_lvds_get_modes, 691 .get_modes = radeon_lvds_get_modes,
696 .mode_valid = radeon_lvds_mode_valid, 692 .mode_valid = radeon_lvds_mode_valid,
697 .best_encoder = radeon_best_single_encoder, 693 .best_encoder = radeon_best_single_encoder,
698}; 694};
699 695
700struct drm_connector_funcs radeon_lvds_connector_funcs = { 696static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
701 .dpms = drm_helper_connector_dpms, 697 .dpms = drm_helper_connector_dpms,
702 .detect = radeon_lvds_detect, 698 .detect = radeon_lvds_detect,
703 .fill_modes = drm_helper_probe_single_connector_modes, 699 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -809,13 +805,13 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
809 return ret; 805 return ret;
810} 806}
811 807
812struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { 808static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
813 .get_modes = radeon_vga_get_modes, 809 .get_modes = radeon_vga_get_modes,
814 .mode_valid = radeon_vga_mode_valid, 810 .mode_valid = radeon_vga_mode_valid,
815 .best_encoder = radeon_best_single_encoder, 811 .best_encoder = radeon_best_single_encoder,
816}; 812};
817 813
818struct drm_connector_funcs radeon_vga_connector_funcs = { 814static const struct drm_connector_funcs radeon_vga_connector_funcs = {
819 .dpms = drm_helper_connector_dpms, 815 .dpms = drm_helper_connector_dpms,
820 .detect = radeon_vga_detect, 816 .detect = radeon_vga_detect,
821 .fill_modes = drm_helper_probe_single_connector_modes, 817 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -879,13 +875,13 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
879 return ret; 875 return ret;
880} 876}
881 877
882struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = { 878static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
883 .get_modes = radeon_tv_get_modes, 879 .get_modes = radeon_tv_get_modes,
884 .mode_valid = radeon_tv_mode_valid, 880 .mode_valid = radeon_tv_mode_valid,
885 .best_encoder = radeon_best_single_encoder, 881 .best_encoder = radeon_best_single_encoder,
886}; 882};
887 883
888struct drm_connector_funcs radeon_tv_connector_funcs = { 884static const struct drm_connector_funcs radeon_tv_connector_funcs = {
889 .dpms = drm_helper_connector_dpms, 885 .dpms = drm_helper_connector_dpms,
890 .detect = radeon_tv_detect, 886 .detect = radeon_tv_detect,
891 .fill_modes = drm_helper_probe_single_connector_modes, 887 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1089,7 +1085,7 @@ out:
1089} 1085}
1090 1086
1091/* okay need to be smart in here about which encoder to pick */ 1087/* okay need to be smart in here about which encoder to pick */
1092struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) 1088static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
1093{ 1089{
1094 int enc_id = connector->encoder_ids[0]; 1090 int enc_id = connector->encoder_ids[0];
1095 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1091 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1179,13 +1175,13 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
1179 return MODE_OK; 1175 return MODE_OK;
1180} 1176}
1181 1177
1182struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { 1178static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
1183 .get_modes = radeon_dvi_get_modes, 1179 .get_modes = radeon_dvi_get_modes,
1184 .mode_valid = radeon_dvi_mode_valid, 1180 .mode_valid = radeon_dvi_mode_valid,
1185 .best_encoder = radeon_dvi_encoder, 1181 .best_encoder = radeon_dvi_encoder,
1186}; 1182};
1187 1183
1188struct drm_connector_funcs radeon_dvi_connector_funcs = { 1184static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
1189 .dpms = drm_helper_connector_dpms, 1185 .dpms = drm_helper_connector_dpms,
1190 .detect = radeon_dvi_detect, 1186 .detect = radeon_dvi_detect,
1191 .fill_modes = drm_helper_probe_single_connector_modes, 1187 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1462,13 +1458,13 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1462 } 1458 }
1463} 1459}
1464 1460
1465struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { 1461static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
1466 .get_modes = radeon_dp_get_modes, 1462 .get_modes = radeon_dp_get_modes,
1467 .mode_valid = radeon_dp_mode_valid, 1463 .mode_valid = radeon_dp_mode_valid,
1468 .best_encoder = radeon_dvi_encoder, 1464 .best_encoder = radeon_dvi_encoder,
1469}; 1465};
1470 1466
1471struct drm_connector_funcs radeon_dp_connector_funcs = { 1467static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1472 .dpms = drm_helper_connector_dpms, 1468 .dpms = drm_helper_connector_dpms,
1473 .detect = radeon_dp_detect, 1469 .detect = radeon_dp_detect,
1474 .fill_modes = drm_helper_probe_single_connector_modes, 1470 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -2008,15 +2004,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
2008 connector->polled = DRM_CONNECTOR_POLL_HPD; 2004 connector->polled = DRM_CONNECTOR_POLL_HPD;
2009 connector->display_info.subpixel_order = subpixel_order; 2005 connector->display_info.subpixel_order = subpixel_order;
2010 drm_sysfs_connector_add(connector); 2006 drm_sysfs_connector_add(connector);
2011 if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
2012 struct drm_encoder *drm_encoder;
2013
2014 list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
2015 struct radeon_encoder *radeon_encoder;
2016
2017 radeon_encoder = to_radeon_encoder(drm_encoder);
2018 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
2019 radeon_legacy_backlight_init(radeon_encoder, connector);
2020 }
2021 }
2022} 2007}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index b4a0db24f4dd..d59eb59cdb81 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -32,7 +32,7 @@
32void r100_cs_dump_packet(struct radeon_cs_parser *p, 32void r100_cs_dump_packet(struct radeon_cs_parser *p,
33 struct radeon_cs_packet *pkt); 33 struct radeon_cs_packet *pkt);
34 34
35int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 35static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36{ 36{
37 struct drm_device *ddev = p->rdev->ddev; 37 struct drm_device *ddev = p->rdev->ddev;
38 struct radeon_cs_chunk *chunk; 38 struct radeon_cs_chunk *chunk;
@@ -115,19 +115,27 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
115 return 0; 115 return 0;
116} 116}
117 117
118static void radeon_cs_sync_to(struct radeon_cs_parser *p,
119 struct radeon_fence *fence)
120{
121 struct radeon_fence *other;
122
123 if (!fence)
124 return;
125
126 other = p->ib.sync_to[fence->ring];
127 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
128}
129
118static void radeon_cs_sync_rings(struct radeon_cs_parser *p) 130static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
119{ 131{
120 int i; 132 int i;
121 133
122 for (i = 0; i < p->nrelocs; i++) { 134 for (i = 0; i < p->nrelocs; i++) {
123 struct radeon_fence *a, *b; 135 if (!p->relocs[i].robj)
124
125 if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
126 continue; 136 continue;
127 137
128 a = p->relocs[i].robj->tbo.sync_obj; 138 radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
129 b = p->ib.sync_to[a->ring];
130 p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
131 } 139 }
132} 140}
133 141
@@ -278,30 +286,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
278 return 0; 286 return 0;
279} 287}
280 288
281static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
282 struct radeon_fence *fence)
283{
284 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
285 struct radeon_vm *vm = &fpriv->vm;
286 struct radeon_bo_list *lobj;
287
288 if (parser->chunk_ib_idx == -1) {
289 return;
290 }
291 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
292 return;
293 }
294
295 list_for_each_entry(lobj, &parser->validated, tv.head) {
296 struct radeon_bo_va *bo_va;
297 struct radeon_bo *rbo = lobj->bo;
298
299 bo_va = radeon_bo_va(rbo, vm);
300 radeon_fence_unref(&bo_va->fence);
301 bo_va->fence = radeon_fence_ref(fence);
302 }
303}
304
305/** 289/**
306 * cs_parser_fini() - clean parser states 290 * cs_parser_fini() - clean parser states
307 * @parser: parser structure holding parsing context. 291 * @parser: parser structure holding parsing context.
@@ -315,8 +299,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
315 unsigned i; 299 unsigned i;
316 300
317 if (!error) { 301 if (!error) {
318 /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
319 radeon_bo_vm_fence_va(parser, parser->ib.fence);
320 ttm_eu_fence_buffer_objects(&parser->validated, 302 ttm_eu_fence_buffer_objects(&parser->validated,
321 parser->ib.fence); 303 parser->ib.fence);
322 } else { 304 } else {
@@ -363,7 +345,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
363 * uncached). 345 * uncached).
364 */ 346 */
365 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 347 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
366 ib_chunk->length_dw * 4); 348 NULL, ib_chunk->length_dw * 4);
367 if (r) { 349 if (r) {
368 DRM_ERROR("Failed to get ib !\n"); 350 DRM_ERROR("Failed to get ib !\n");
369 return r; 351 return r;
@@ -380,7 +362,6 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
380 return r; 362 return r;
381 } 363 }
382 radeon_cs_sync_rings(parser); 364 radeon_cs_sync_rings(parser);
383 parser->ib.vm_id = 0;
384 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 365 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
385 if (r) { 366 if (r) {
386 DRM_ERROR("Failed to schedule IB !\n"); 367 DRM_ERROR("Failed to schedule IB !\n");
@@ -391,10 +372,15 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
391static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, 372static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
392 struct radeon_vm *vm) 373 struct radeon_vm *vm)
393{ 374{
375 struct radeon_device *rdev = parser->rdev;
394 struct radeon_bo_list *lobj; 376 struct radeon_bo_list *lobj;
395 struct radeon_bo *bo; 377 struct radeon_bo *bo;
396 int r; 378 int r;
397 379
380 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
381 if (r) {
382 return r;
383 }
398 list_for_each_entry(lobj, &parser->validated, tv.head) { 384 list_for_each_entry(lobj, &parser->validated, tv.head) {
399 bo = lobj->bo; 385 bo = lobj->bo;
400 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); 386 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
@@ -426,7 +412,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
426 return -EINVAL; 412 return -EINVAL;
427 } 413 }
428 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, 414 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
429 ib_chunk->length_dw * 4); 415 vm, ib_chunk->length_dw * 4);
430 if (r) { 416 if (r) {
431 DRM_ERROR("Failed to get const ib !\n"); 417 DRM_ERROR("Failed to get const ib !\n");
432 return r; 418 return r;
@@ -450,7 +436,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
450 return -EINVAL; 436 return -EINVAL;
451 } 437 }
452 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 438 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
453 ib_chunk->length_dw * 4); 439 vm, ib_chunk->length_dw * 4);
454 if (r) { 440 if (r) {
455 DRM_ERROR("Failed to get ib !\n"); 441 DRM_ERROR("Failed to get ib !\n");
456 return r; 442 return r;
@@ -468,7 +454,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
468 454
469 mutex_lock(&rdev->vm_manager.lock); 455 mutex_lock(&rdev->vm_manager.lock);
470 mutex_lock(&vm->mutex); 456 mutex_lock(&vm->mutex);
471 r = radeon_vm_bind(rdev, vm); 457 r = radeon_vm_alloc_pt(rdev, vm);
472 if (r) { 458 if (r) {
473 goto out; 459 goto out;
474 } 460 }
@@ -477,32 +463,21 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
477 goto out; 463 goto out;
478 } 464 }
479 radeon_cs_sync_rings(parser); 465 radeon_cs_sync_rings(parser);
480 466 radeon_cs_sync_to(parser, vm->fence);
481 parser->ib.vm_id = vm->id; 467 radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
482 /* ib pool is bind at 0 in virtual address space,
483 * so gpu_addr is the offset inside the pool bo
484 */
485 parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
486 468
487 if ((rdev->family >= CHIP_TAHITI) && 469 if ((rdev->family >= CHIP_TAHITI) &&
488 (parser->chunk_const_ib_idx != -1)) { 470 (parser->chunk_const_ib_idx != -1)) {
489 parser->const_ib.vm_id = vm->id;
490 /* ib pool is bind at 0 in virtual address space,
491 * so gpu_addr is the offset inside the pool bo
492 */
493 parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
494 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); 471 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
495 } else { 472 } else {
496 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 473 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
497 } 474 }
498 475
499out:
500 if (!r) { 476 if (!r) {
501 if (vm->fence) { 477 radeon_vm_fence(rdev, vm, parser->ib.fence);
502 radeon_fence_unref(&vm->fence);
503 }
504 vm->fence = radeon_fence_ref(parser->ib.fence);
505 } 478 }
479
480out:
506 mutex_unlock(&vm->mutex); 481 mutex_unlock(&vm->mutex);
507 mutex_unlock(&rdev->vm_manager.lock); 482 mutex_unlock(&rdev->vm_manager.lock);
508 return r; 483 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3daebd732d..64a42647f08a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -842,7 +842,7 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
842 * Validates certain module parameters and updates 842 * Validates certain module parameters and updates
843 * the associated values used by the driver (all asics). 843 * the associated values used by the driver (all asics).
844 */ 844 */
845void radeon_check_arguments(struct radeon_device *rdev) 845static void radeon_check_arguments(struct radeon_device *rdev)
846{ 846{
847 /* vramlimit must be a power of two */ 847 /* vramlimit must be a power of two */
848 switch (radeon_vram_limit) { 848 switch (radeon_vram_limit) {
@@ -1013,13 +1013,11 @@ int radeon_device_init(struct radeon_device *rdev,
1013 init_rwsem(&rdev->pm.mclk_lock); 1013 init_rwsem(&rdev->pm.mclk_lock);
1014 init_rwsem(&rdev->exclusive_lock); 1014 init_rwsem(&rdev->exclusive_lock);
1015 init_waitqueue_head(&rdev->irq.vblank_queue); 1015 init_waitqueue_head(&rdev->irq.vblank_queue);
1016 init_waitqueue_head(&rdev->irq.idle_queue);
1017 r = radeon_gem_init(rdev); 1016 r = radeon_gem_init(rdev);
1018 if (r) 1017 if (r)
1019 return r; 1018 return r;
1020 /* initialize vm here */ 1019 /* initialize vm here */
1021 mutex_init(&rdev->vm_manager.lock); 1020 mutex_init(&rdev->vm_manager.lock);
1022 rdev->vm_manager.use_bitmap = 1;
1023 rdev->vm_manager.max_pfn = 1 << 20; 1021 rdev->vm_manager.max_pfn = 1 << 20;
1024 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); 1022 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
1025 1023
@@ -1284,6 +1282,13 @@ int radeon_resume_kms(struct drm_device *dev)
1284 if (rdev->is_atom_bios) { 1282 if (rdev->is_atom_bios) {
1285 radeon_atom_encoder_init(rdev); 1283 radeon_atom_encoder_init(rdev);
1286 radeon_atom_disp_eng_pll_init(rdev); 1284 radeon_atom_disp_eng_pll_init(rdev);
1285 /* turn on the BL */
1286 if (rdev->mode_info.bl_encoder) {
1287 u8 bl_level = radeon_get_backlight_level(rdev,
1288 rdev->mode_info.bl_encoder);
1289 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1290 bl_level);
1291 }
1287 } 1292 }
1288 /* reset hpd state */ 1293 /* reset hpd state */
1289 radeon_hpd_init(rdev); 1294 radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8c593ea82c41..27ece75b4789 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -64,9 +64,11 @@
64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query 64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
65 * 2.21.0 - r600-r700: FMASK and CMASK 65 * 2.21.0 - r600-r700: FMASK and CMASK
66 * 2.22.0 - r600 only: RESOLVE_BOX allowed 66 * 2.22.0 - r600 only: RESOLVE_BOX allowed
67 * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
68 * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
67 */ 69 */
68#define KMS_DRIVER_MAJOR 2 70#define KMS_DRIVER_MAJOR 2
69#define KMS_DRIVER_MINOR 22 71#define KMS_DRIVER_MINOR 24
70#define KMS_DRIVER_PATCHLEVEL 0 72#define KMS_DRIVER_PATCHLEVEL 0
71int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 73int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
72int radeon_driver_unload_kms(struct drm_device *dev); 74int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 74670696277d..e66c807df9e6 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,6 +29,14 @@
29#include "radeon.h" 29#include "radeon.h"
30#include "atom.h" 30#include "atom.h"
31 31
32extern void
33radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
34 struct drm_connector *drm_connector);
35extern void
36radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
37 struct drm_connector *drm_connector);
38
39
32static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) 40static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
33{ 41{
34 struct drm_device *dev = encoder->dev; 42 struct drm_device *dev = encoder->dev;
@@ -153,6 +161,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
153void 161void
154radeon_link_encoder_connector(struct drm_device *dev) 162radeon_link_encoder_connector(struct drm_device *dev)
155{ 163{
164 struct radeon_device *rdev = dev->dev_private;
156 struct drm_connector *connector; 165 struct drm_connector *connector;
157 struct radeon_connector *radeon_connector; 166 struct radeon_connector *radeon_connector;
158 struct drm_encoder *encoder; 167 struct drm_encoder *encoder;
@@ -163,8 +172,16 @@ radeon_link_encoder_connector(struct drm_device *dev)
163 radeon_connector = to_radeon_connector(connector); 172 radeon_connector = to_radeon_connector(connector);
164 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 173 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
165 radeon_encoder = to_radeon_encoder(encoder); 174 radeon_encoder = to_radeon_encoder(encoder);
166 if (radeon_encoder->devices & radeon_connector->devices) 175 if (radeon_encoder->devices & radeon_connector->devices) {
167 drm_mode_connector_attach_encoder(connector, encoder); 176 drm_mode_connector_attach_encoder(connector, encoder);
177 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
178 if (rdev->is_atom_bios)
179 radeon_atom_backlight_init(radeon_encoder, connector);
180 else
181 radeon_legacy_backlight_init(radeon_encoder, connector);
182 rdev->mode_info.bl_encoder = radeon_encoder;
183 }
184 }
168 } 185 }
169 } 186 }
170} 187}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 5906914a78bc..6ada72c6d7a1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -316,22 +316,6 @@ static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
316 return new_fb; 316 return new_fb;
317} 317}
318 318
319static char *mode_option;
320int radeon_parse_options(char *options)
321{
322 char *this_opt;
323
324 if (!options || !*options)
325 return 0;
326
327 while ((this_opt = strsep(&options, ",")) != NULL) {
328 if (!*this_opt)
329 continue;
330 mode_option = this_opt;
331 }
332 return 0;
333}
334
335void radeon_fb_output_poll_changed(struct radeon_device *rdev) 319void radeon_fb_output_poll_changed(struct radeon_device *rdev)
336{ 320{
337 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); 321 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 2a59375dbe52..1eb3db52a36e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -399,7 +399,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
399 return 0; 399 return 0;
400} 400}
401 401
402bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 402static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
403{ 403{
404 unsigned i; 404 unsigned i;
405 405
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index bb3b7fe05ccd..753b7ca3c807 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
423 */ 423 */
424 424
425/** 425/**
426 * radeon_vm_directory_size - returns the size of the page directory in bytes
427 *
428 * @rdev: radeon_device pointer
429 *
430 * Calculate the size of the page directory in bytes (cayman+).
431 */
432static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
433{
434 return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
435}
436
437/**
426 * radeon_vm_manager_init - init the vm manager 438 * radeon_vm_manager_init - init the vm manager
427 * 439 *
428 * @rdev: radeon_device pointer 440 * @rdev: radeon_device pointer
@@ -435,12 +447,15 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
435 struct radeon_vm *vm; 447 struct radeon_vm *vm;
436 struct radeon_bo_va *bo_va; 448 struct radeon_bo_va *bo_va;
437 int r; 449 int r;
450 unsigned size;
438 451
439 if (!rdev->vm_manager.enabled) { 452 if (!rdev->vm_manager.enabled) {
440 /* mark first vm as always in use, it's the system one */
441 /* allocate enough for 2 full VM pts */ 453 /* allocate enough for 2 full VM pts */
454 size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
455 size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
456 size *= 2;
442 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 457 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
443 rdev->vm_manager.max_pfn * 8 * 2, 458 size,
444 RADEON_GEM_DOMAIN_VRAM); 459 RADEON_GEM_DOMAIN_VRAM);
445 if (r) { 460 if (r) {
446 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 461 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -448,10 +463,10 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
448 return r; 463 return r;
449 } 464 }
450 465
451 r = rdev->vm_manager.funcs->init(rdev); 466 r = radeon_asic_vm_init(rdev);
452 if (r) 467 if (r)
453 return r; 468 return r;
454 469
455 rdev->vm_manager.enabled = true; 470 rdev->vm_manager.enabled = true;
456 471
457 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); 472 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
@@ -461,73 +476,36 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
461 476
462 /* restore page table */ 477 /* restore page table */
463 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { 478 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
464 if (vm->id == -1) 479 if (vm->sa_bo == NULL)
465 continue; 480 continue;
466 481
467 list_for_each_entry(bo_va, &vm->va, vm_list) { 482 list_for_each_entry(bo_va, &vm->va, vm_list) {
468 struct ttm_mem_reg *mem = NULL;
469 if (bo_va->valid)
470 mem = &bo_va->bo->tbo.mem;
471
472 bo_va->valid = false; 483 bo_va->valid = false;
473 r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
474 if (r) {
475 DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
476 }
477 }
478
479 r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
480 if (r) {
481 DRM_ERROR("Failed to bind vm %d!\n", vm->id);
482 } 484 }
483 } 485 }
484 return 0; 486 return 0;
485} 487}
486 488
487/* global mutex must be lock */
488/** 489/**
489 * radeon_vm_unbind_locked - unbind a specific vm 490 * radeon_vm_free_pt - free the page table for a specific vm
490 * 491 *
491 * @rdev: radeon_device pointer 492 * @rdev: radeon_device pointer
492 * @vm: vm to unbind 493 * @vm: vm to unbind
493 * 494 *
494 * Unbind the requested vm (cayman+). 495 * Free the page table of a specific vm (cayman+).
495 * Wait for use of the VM to finish, then unbind the page table, 496 *
496 * and free the page table memory. 497 * Global and local mutex must be lock!
497 */ 498 */
498static void radeon_vm_unbind_locked(struct radeon_device *rdev, 499static void radeon_vm_free_pt(struct radeon_device *rdev,
499 struct radeon_vm *vm) 500 struct radeon_vm *vm)
500{ 501{
501 struct radeon_bo_va *bo_va; 502 struct radeon_bo_va *bo_va;
502 503
503 if (vm->id == -1) { 504 if (!vm->sa_bo)
504 return; 505 return;
505 }
506 506
507 /* wait for vm use to end */
508 while (vm->fence) {
509 int r;
510 r = radeon_fence_wait(vm->fence, false);
511 if (r)
512 DRM_ERROR("error while waiting for fence: %d\n", r);
513 if (r == -EDEADLK) {
514 mutex_unlock(&rdev->vm_manager.lock);
515 r = radeon_gpu_reset(rdev);
516 mutex_lock(&rdev->vm_manager.lock);
517 if (!r)
518 continue;
519 }
520 break;
521 }
522 radeon_fence_unref(&vm->fence);
523
524 /* hw unbind */
525 rdev->vm_manager.funcs->unbind(rdev, vm);
526 rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
527 list_del_init(&vm->list); 507 list_del_init(&vm->list);
528 vm->id = -1; 508 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
529 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
530 vm->pt = NULL;
531 509
532 list_for_each_entry(bo_va, &vm->va, vm_list) { 510 list_for_each_entry(bo_va, &vm->va, vm_list) {
533 bo_va->valid = false; 511 bo_va->valid = false;
@@ -544,16 +522,22 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
544void radeon_vm_manager_fini(struct radeon_device *rdev) 522void radeon_vm_manager_fini(struct radeon_device *rdev)
545{ 523{
546 struct radeon_vm *vm, *tmp; 524 struct radeon_vm *vm, *tmp;
525 int i;
547 526
548 if (!rdev->vm_manager.enabled) 527 if (!rdev->vm_manager.enabled)
549 return; 528 return;
550 529
551 mutex_lock(&rdev->vm_manager.lock); 530 mutex_lock(&rdev->vm_manager.lock);
552 /* unbind all active vm */ 531 /* free all allocated page tables */
553 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { 532 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
554 radeon_vm_unbind_locked(rdev, vm); 533 mutex_lock(&vm->mutex);
534 radeon_vm_free_pt(rdev, vm);
535 mutex_unlock(&vm->mutex);
555 } 536 }
556 rdev->vm_manager.funcs->fini(rdev); 537 for (i = 0; i < RADEON_NUM_VM; ++i) {
538 radeon_fence_unref(&rdev->vm_manager.active[i]);
539 }
540 radeon_asic_vm_fini(rdev);
557 mutex_unlock(&rdev->vm_manager.lock); 541 mutex_unlock(&rdev->vm_manager.lock);
558 542
559 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); 543 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
@@ -561,46 +545,34 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
561 rdev->vm_manager.enabled = false; 545 rdev->vm_manager.enabled = false;
562} 546}
563 547
564/* global mutex must be locked */
565/** 548/**
566 * radeon_vm_unbind - locked version of unbind 549 * radeon_vm_alloc_pt - allocates a page table for a VM
567 *
568 * @rdev: radeon_device pointer
569 * @vm: vm to unbind
570 *
571 * Locked version that wraps radeon_vm_unbind_locked (cayman+).
572 */
573void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
574{
575 mutex_lock(&vm->mutex);
576 radeon_vm_unbind_locked(rdev, vm);
577 mutex_unlock(&vm->mutex);
578}
579
580/* global and local mutex must be locked */
581/**
582 * radeon_vm_bind - bind a page table to a VMID
583 * 550 *
584 * @rdev: radeon_device pointer 551 * @rdev: radeon_device pointer
585 * @vm: vm to bind 552 * @vm: vm to bind
586 * 553 *
587 * Bind the requested vm (cayman+). 554 * Allocate a page table for the requested vm (cayman+).
588 * Suballocate memory for the page table, allocate a VMID 555 * Also starts to populate the page table.
589 * and bind the page table to it, and finally start to populate
590 * the page table.
591 * Returns 0 for success, error for failure. 556 * Returns 0 for success, error for failure.
557 *
558 * Global and local mutex must be locked!
592 */ 559 */
593int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) 560int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
594{ 561{
595 struct radeon_vm *vm_evict; 562 struct radeon_vm *vm_evict;
596 unsigned i; 563 int r;
597 int id = -1, r; 564 u64 *pd_addr;
565 int tables_size;
598 566
599 if (vm == NULL) { 567 if (vm == NULL) {
600 return -EINVAL; 568 return -EINVAL;
601 } 569 }
602 570
603 if (vm->id != -1) { 571 /* allocate enough to cover the current VM size */
572 tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
573 tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
574
575 if (vm->sa_bo != NULL) {
604 /* update lru */ 576 /* update lru */
605 list_del_init(&vm->list); 577 list_del_init(&vm->list);
606 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 578 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
@@ -609,98 +581,215 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
609 581
610retry: 582retry:
611 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 583 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
612 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), 584 tables_size, RADEON_GPU_PAGE_SIZE, false);
613 RADEON_GPU_PAGE_SIZE, false); 585 if (r == -ENOMEM) {
614 if (r) {
615 if (list_empty(&rdev->vm_manager.lru_vm)) { 586 if (list_empty(&rdev->vm_manager.lru_vm)) {
616 return r; 587 return r;
617 } 588 }
618 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); 589 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
619 radeon_vm_unbind(rdev, vm_evict); 590 mutex_lock(&vm_evict->mutex);
591 radeon_vm_free_pt(rdev, vm_evict);
592 mutex_unlock(&vm_evict->mutex);
620 goto retry; 593 goto retry;
594
595 } else if (r) {
596 return r;
621 } 597 }
622 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
623 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
624 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
625 598
626retry_id: 599 pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
627 /* search for free vm */ 600 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
628 for (i = 0; i < rdev->vm_manager.nvm; i++) { 601 memset(pd_addr, 0, tables_size);
629 if (!(rdev->vm_manager.use_bitmap & (1 << i))) { 602
630 id = i; 603 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
631 break; 604 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
605 &rdev->ring_tmp_bo.bo->tbo.mem);
606}
607
608/**
609 * radeon_vm_grab_id - allocate the next free VMID
610 *
611 * @rdev: radeon_device pointer
612 * @vm: vm to allocate id for
613 * @ring: ring we want to submit job to
614 *
615 * Allocate an id for the vm (cayman+).
616 * Returns the fence we need to sync to (if any).
617 *
618 * Global and local mutex must be locked!
619 */
620struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
621 struct radeon_vm *vm, int ring)
622{
623 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
624 unsigned choices[2] = {};
625 unsigned i;
626
627 /* check if the id is still valid */
628 if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
629 return NULL;
630
631 /* we definately need to flush */
632 radeon_fence_unref(&vm->last_flush);
633
634 /* skip over VMID 0, since it is the system VM */
635 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
636 struct radeon_fence *fence = rdev->vm_manager.active[i];
637
638 if (fence == NULL) {
639 /* found a free one */
640 vm->id = i;
641 return NULL;
642 }
643
644 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
645 best[fence->ring] = fence;
646 choices[fence->ring == ring ? 0 : 1] = i;
632 } 647 }
633 } 648 }
634 /* evict vm if necessary */ 649
635 if (id == -1) { 650 for (i = 0; i < 2; ++i) {
636 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); 651 if (choices[i]) {
637 radeon_vm_unbind(rdev, vm_evict); 652 vm->id = choices[i];
638 goto retry_id; 653 return rdev->vm_manager.active[choices[i]];
654 }
639 } 655 }
640 656
641 /* do hw bind */ 657 /* should never happen */
642 r = rdev->vm_manager.funcs->bind(rdev, vm, id); 658 BUG();
643 if (r) { 659 return NULL;
644 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL); 660}
645 return r; 661
662/**
663 * radeon_vm_fence - remember fence for vm
664 *
665 * @rdev: radeon_device pointer
666 * @vm: vm we want to fence
667 * @fence: fence to remember
668 *
669 * Fence the vm (cayman+).
670 * Set the fence used to protect page table and id.
671 *
672 * Global and local mutex must be locked!
673 */
674void radeon_vm_fence(struct radeon_device *rdev,
675 struct radeon_vm *vm,
676 struct radeon_fence *fence)
677{
678 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
679 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
680
681 radeon_fence_unref(&vm->fence);
682 vm->fence = radeon_fence_ref(fence);
683}
684
685/**
686 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
687 *
688 * @vm: requested vm
689 * @bo: requested buffer object
690 *
691 * Find @bo inside the requested vm (cayman+).
692 * Search inside the @bos vm list for the requested vm
693 * Returns the found bo_va or NULL if none is found
694 *
695 * Object has to be reserved!
696 */
697struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
698 struct radeon_bo *bo)
699{
700 struct radeon_bo_va *bo_va;
701
702 list_for_each_entry(bo_va, &bo->va, bo_list) {
703 if (bo_va->vm == vm) {
704 return bo_va;
705 }
646 } 706 }
647 rdev->vm_manager.use_bitmap |= 1 << id; 707 return NULL;
648 vm->id = id;
649 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
650 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
651 &rdev->ring_tmp_bo.bo->tbo.mem);
652} 708}
653 709
654/* object have to be reserved */
655/** 710/**
656 * radeon_vm_bo_add - add a bo to a specific vm 711 * radeon_vm_bo_add - add a bo to a specific vm
657 * 712 *
658 * @rdev: radeon_device pointer 713 * @rdev: radeon_device pointer
659 * @vm: requested vm 714 * @vm: requested vm
660 * @bo: radeon buffer object 715 * @bo: radeon buffer object
661 * @offset: requested offset of the buffer in the VM address space
662 * @flags: attributes of pages (read/write/valid/etc.)
663 * 716 *
664 * Add @bo into the requested vm (cayman+). 717 * Add @bo into the requested vm (cayman+).
665 * Add @bo to the list of bos associated with the vm and validate 718 * Add @bo to the list of bos associated with the vm
666 * the offset requested within the vm address space. 719 * Returns newly added bo_va or NULL for failure
667 * Returns 0 for success, error for failure. 720 *
721 * Object has to be reserved!
668 */ 722 */
669int radeon_vm_bo_add(struct radeon_device *rdev, 723struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
670 struct radeon_vm *vm, 724 struct radeon_vm *vm,
671 struct radeon_bo *bo, 725 struct radeon_bo *bo)
672 uint64_t offset,
673 uint32_t flags)
674{ 726{
675 struct radeon_bo_va *bo_va, *tmp; 727 struct radeon_bo_va *bo_va;
676 struct list_head *head;
677 uint64_t size = radeon_bo_size(bo), last_offset = 0;
678 unsigned last_pfn;
679 728
680 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 729 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
681 if (bo_va == NULL) { 730 if (bo_va == NULL) {
682 return -ENOMEM; 731 return NULL;
683 } 732 }
684 bo_va->vm = vm; 733 bo_va->vm = vm;
685 bo_va->bo = bo; 734 bo_va->bo = bo;
686 bo_va->soffset = offset; 735 bo_va->soffset = 0;
687 bo_va->eoffset = offset + size; 736 bo_va->eoffset = 0;
688 bo_va->flags = flags; 737 bo_va->flags = 0;
689 bo_va->valid = false; 738 bo_va->valid = false;
739 bo_va->ref_count = 1;
690 INIT_LIST_HEAD(&bo_va->bo_list); 740 INIT_LIST_HEAD(&bo_va->bo_list);
691 INIT_LIST_HEAD(&bo_va->vm_list); 741 INIT_LIST_HEAD(&bo_va->vm_list);
692 /* make sure object fit at this offset */
693 if (bo_va->soffset >= bo_va->eoffset) {
694 kfree(bo_va);
695 return -EINVAL;
696 }
697 742
698 last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE; 743 mutex_lock(&vm->mutex);
699 if (last_pfn > rdev->vm_manager.max_pfn) { 744 list_add(&bo_va->vm_list, &vm->va);
700 kfree(bo_va); 745 list_add_tail(&bo_va->bo_list, &bo->va);
701 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 746 mutex_unlock(&vm->mutex);
702 last_pfn, rdev->vm_manager.max_pfn); 747
703 return -EINVAL; 748 return bo_va;
749}
750
751/**
752 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
753 *
754 * @rdev: radeon_device pointer
755 * @bo_va: bo_va to store the address
756 * @soffset: requested offset of the buffer in the VM address space
757 * @flags: attributes of pages (read/write/valid/etc.)
758 *
759 * Set offset of @bo_va (cayman+).
760 * Validate and set the offset requested within the vm address space.
761 * Returns 0 for success, error for failure.
762 *
763 * Object has to be reserved!
764 */
765int radeon_vm_bo_set_addr(struct radeon_device *rdev,
766 struct radeon_bo_va *bo_va,
767 uint64_t soffset,
768 uint32_t flags)
769{
770 uint64_t size = radeon_bo_size(bo_va->bo);
771 uint64_t eoffset, last_offset = 0;
772 struct radeon_vm *vm = bo_va->vm;
773 struct radeon_bo_va *tmp;
774 struct list_head *head;
775 unsigned last_pfn;
776
777 if (soffset) {
778 /* make sure object fit at this offset */
779 eoffset = soffset + size;
780 if (soffset >= eoffset) {
781 return -EINVAL;
782 }
783
784 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
785 if (last_pfn > rdev->vm_manager.max_pfn) {
786 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
787 last_pfn, rdev->vm_manager.max_pfn);
788 return -EINVAL;
789 }
790
791 } else {
792 eoffset = last_pfn = 0;
704 } 793 }
705 794
706 mutex_lock(&vm->mutex); 795 mutex_lock(&vm->mutex);
@@ -713,7 +802,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
713 if (last_pfn > vm->last_pfn) { 802 if (last_pfn > vm->last_pfn) {
714 /* grow va space 32M by 32M */ 803 /* grow va space 32M by 32M */
715 unsigned align = ((32 << 20) >> 12) - 1; 804 unsigned align = ((32 << 20) >> 12) - 1;
716 radeon_vm_unbind_locked(rdev, vm); 805 radeon_vm_free_pt(rdev, vm);
717 vm->last_pfn = (last_pfn + align) & ~align; 806 vm->last_pfn = (last_pfn + align) & ~align;
718 } 807 }
719 mutex_unlock(&rdev->vm_manager.lock); 808 mutex_unlock(&rdev->vm_manager.lock);
@@ -721,68 +810,60 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
721 head = &vm->va; 810 head = &vm->va;
722 last_offset = 0; 811 last_offset = 0;
723 list_for_each_entry(tmp, &vm->va, vm_list) { 812 list_for_each_entry(tmp, &vm->va, vm_list) {
724 if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) { 813 if (bo_va == tmp) {
814 /* skip over currently modified bo */
815 continue;
816 }
817
818 if (soffset >= last_offset && eoffset <= tmp->soffset) {
725 /* bo can be added before this one */ 819 /* bo can be added before this one */
726 break; 820 break;
727 } 821 }
728 if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) { 822 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
729 /* bo and tmp overlap, invalid offset */ 823 /* bo and tmp overlap, invalid offset */
730 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", 824 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
731 bo, (unsigned)bo_va->soffset, tmp->bo, 825 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
732 (unsigned)tmp->soffset, (unsigned)tmp->eoffset); 826 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
733 kfree(bo_va);
734 mutex_unlock(&vm->mutex); 827 mutex_unlock(&vm->mutex);
735 return -EINVAL; 828 return -EINVAL;
736 } 829 }
737 last_offset = tmp->eoffset; 830 last_offset = tmp->eoffset;
738 head = &tmp->vm_list; 831 head = &tmp->vm_list;
739 } 832 }
740 list_add(&bo_va->vm_list, head); 833
741 list_add_tail(&bo_va->bo_list, &bo->va); 834 bo_va->soffset = soffset;
835 bo_va->eoffset = eoffset;
836 bo_va->flags = flags;
837 bo_va->valid = false;
838 list_move(&bo_va->vm_list, head);
839
742 mutex_unlock(&vm->mutex); 840 mutex_unlock(&vm->mutex);
743 return 0; 841 return 0;
744} 842}
745 843
746/** 844/**
747 * radeon_vm_get_addr - get the physical address of the page 845 * radeon_vm_map_gart - get the physical address of a gart page
748 * 846 *
749 * @rdev: radeon_device pointer 847 * @rdev: radeon_device pointer
750 * @mem: ttm mem 848 * @addr: the unmapped addr
751 * @pfn: pfn
752 * 849 *
753 * Look up the physical address of the page that the pte resolves 850 * Look up the physical address of the page that the pte resolves
754 * to (cayman+). 851 * to (cayman+).
755 * Returns the physical address of the page. 852 * Returns the physical address of the page.
756 */ 853 */
757static u64 radeon_vm_get_addr(struct radeon_device *rdev, 854uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
758 struct ttm_mem_reg *mem,
759 unsigned pfn)
760{ 855{
761 u64 addr = 0; 856 uint64_t result;
762 857
763 switch (mem->mem_type) { 858 /* page table offset */
764 case TTM_PL_VRAM: 859 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
765 addr = (mem->start << PAGE_SHIFT); 860
766 addr += pfn * RADEON_GPU_PAGE_SIZE; 861 /* in case cpu page size != gpu page size*/
767 addr += rdev->vm_manager.vram_base_offset; 862 result |= addr & (~PAGE_MASK);
768 break; 863
769 case TTM_PL_TT: 864 return result;
770 /* offset inside page table */
771 addr = mem->start << PAGE_SHIFT;
772 addr += pfn * RADEON_GPU_PAGE_SIZE;
773 addr = addr >> PAGE_SHIFT;
774 /* page table offset */
775 addr = rdev->gart.pages_addr[addr];
776 /* in case cpu page size != gpu page size*/
777 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
778 break;
779 default:
780 break;
781 }
782 return addr;
783} 865}
784 866
785/* object have to be reserved & global and local mutex must be locked */
786/** 867/**
787 * radeon_vm_bo_update_pte - map a bo into the vm page table 868 * radeon_vm_bo_update_pte - map a bo into the vm page table
788 * 869 *
@@ -793,103 +874,160 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
793 * 874 *
794 * Fill in the page table entries for @bo (cayman+). 875 * Fill in the page table entries for @bo (cayman+).
795 * Returns 0 for success, -EINVAL for failure. 876 * Returns 0 for success, -EINVAL for failure.
877 *
878 * Object have to be reserved & global and local mutex must be locked!
796 */ 879 */
797int radeon_vm_bo_update_pte(struct radeon_device *rdev, 880int radeon_vm_bo_update_pte(struct radeon_device *rdev,
798 struct radeon_vm *vm, 881 struct radeon_vm *vm,
799 struct radeon_bo *bo, 882 struct radeon_bo *bo,
800 struct ttm_mem_reg *mem) 883 struct ttm_mem_reg *mem)
801{ 884{
885 unsigned ridx = rdev->asic->vm.pt_ring_index;
886 struct radeon_ring *ring = &rdev->ring[ridx];
887 struct radeon_semaphore *sem = NULL;
802 struct radeon_bo_va *bo_va; 888 struct radeon_bo_va *bo_va;
803 unsigned ngpu_pages, i; 889 unsigned nptes, npdes, ndw;
804 uint64_t addr = 0, pfn; 890 uint64_t pe, addr;
805 uint32_t flags; 891 uint64_t pfn;
892 int r;
806 893
807 /* nothing to do if vm isn't bound */ 894 /* nothing to do if vm isn't bound */
808 if (vm->id == -1) 895 if (vm->sa_bo == NULL)
809 return 0; 896 return 0;
810 897
811 bo_va = radeon_bo_va(bo, vm); 898 bo_va = radeon_vm_bo_find(vm, bo);
812 if (bo_va == NULL) { 899 if (bo_va == NULL) {
813 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); 900 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
814 return -EINVAL; 901 return -EINVAL;
815 } 902 }
816 903
817 if (bo_va->valid && mem) 904 if (!bo_va->soffset) {
905 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
906 bo, vm);
907 return -EINVAL;
908 }
909
910 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
818 return 0; 911 return 0;
819 912
820 ngpu_pages = radeon_bo_ngpu_pages(bo);
821 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 913 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
822 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 914 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
823 if (mem) { 915 if (mem) {
916 addr = mem->start << PAGE_SHIFT;
824 if (mem->mem_type != TTM_PL_SYSTEM) { 917 if (mem->mem_type != TTM_PL_SYSTEM) {
825 bo_va->flags |= RADEON_VM_PAGE_VALID; 918 bo_va->flags |= RADEON_VM_PAGE_VALID;
826 bo_va->valid = true; 919 bo_va->valid = true;
827 } 920 }
828 if (mem->mem_type == TTM_PL_TT) { 921 if (mem->mem_type == TTM_PL_TT) {
829 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; 922 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
923 } else {
924 addr += rdev->vm_manager.vram_base_offset;
830 } 925 }
926 } else {
927 addr = 0;
928 bo_va->valid = false;
831 } 929 }
832 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE; 930
833 flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags); 931 if (vm->fence && radeon_fence_signaled(vm->fence)) {
834 for (i = 0, addr = 0; i < ngpu_pages; i++) { 932 radeon_fence_unref(&vm->fence);
835 if (mem && bo_va->valid) { 933 }
836 addr = radeon_vm_get_addr(rdev, mem, i); 934
935 if (vm->fence && vm->fence->ring != ridx) {
936 r = radeon_semaphore_create(rdev, &sem);
937 if (r) {
938 return r;
837 } 939 }
838 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
839 } 940 }
840 rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm); 941
942 /* estimate number of dw needed */
943 /* reserve space for 32-bit padding */
944 ndw = 32;
945
946 nptes = radeon_bo_ngpu_pages(bo);
947
948 pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
949
950 /* handle cases where a bo spans several pdes */
951 npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
952 (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
953
954 /* reserve space for one header for every 2k dwords */
955 ndw += (nptes >> 11) * 3;
956 /* reserve space for pte addresses */
957 ndw += nptes * 2;
958
959 /* reserve space for one header for every 2k dwords */
960 ndw += (npdes >> 11) * 3;
961 /* reserve space for pde addresses */
962 ndw += npdes * 2;
963
964 r = radeon_ring_lock(rdev, ring, ndw);
965 if (r) {
966 return r;
967 }
968
969 if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
970 radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
971 radeon_fence_note_sync(vm->fence, ridx);
972 }
973
974 /* update page table entries */
975 pe = vm->pd_gpu_addr;
976 pe += radeon_vm_directory_size(rdev);
977 pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
978
979 radeon_asic_vm_set_page(rdev, pe, addr, nptes,
980 RADEON_GPU_PAGE_SIZE, bo_va->flags);
981
982 /* update page directory entries */
983 addr = pe;
984
985 pe = vm->pd_gpu_addr;
986 pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
987
988 radeon_asic_vm_set_page(rdev, pe, addr, npdes,
989 RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
990
991 radeon_fence_unref(&vm->fence);
992 r = radeon_fence_emit(rdev, &vm->fence, ridx);
993 if (r) {
994 radeon_ring_unlock_undo(rdev, ring);
995 return r;
996 }
997 radeon_ring_unlock_commit(rdev, ring);
998 radeon_semaphore_free(rdev, &sem, vm->fence);
999 radeon_fence_unref(&vm->last_flush);
841 return 0; 1000 return 0;
842} 1001}
843 1002
844/* object have to be reserved */
845/** 1003/**
846 * radeon_vm_bo_rmv - remove a bo to a specific vm 1004 * radeon_vm_bo_rmv - remove a bo to a specific vm
847 * 1005 *
848 * @rdev: radeon_device pointer 1006 * @rdev: radeon_device pointer
849 * @vm: requested vm 1007 * @bo_va: requested bo_va
850 * @bo: radeon buffer object
851 * 1008 *
852 * Remove @bo from the requested vm (cayman+). 1009 * Remove @bo_va->bo from the requested vm (cayman+).
853 * Remove @bo from the list of bos associated with the vm and 1010 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
854 * remove the ptes for @bo in the page table. 1011 * remove the ptes for @bo_va in the page table.
855 * Returns 0 for success. 1012 * Returns 0 for success.
1013 *
1014 * Object have to be reserved!
856 */ 1015 */
857int radeon_vm_bo_rmv(struct radeon_device *rdev, 1016int radeon_vm_bo_rmv(struct radeon_device *rdev,
858 struct radeon_vm *vm, 1017 struct radeon_bo_va *bo_va)
859 struct radeon_bo *bo)
860{ 1018{
861 struct radeon_bo_va *bo_va;
862 int r; 1019 int r;
863 1020
864 bo_va = radeon_bo_va(bo, vm);
865 if (bo_va == NULL)
866 return 0;
867
868 /* wait for va use to end */
869 while (bo_va->fence) {
870 r = radeon_fence_wait(bo_va->fence, false);
871 if (r) {
872 DRM_ERROR("error while waiting for fence: %d\n", r);
873 }
874 if (r == -EDEADLK) {
875 r = radeon_gpu_reset(rdev);
876 if (!r)
877 continue;
878 }
879 break;
880 }
881 radeon_fence_unref(&bo_va->fence);
882
883 mutex_lock(&rdev->vm_manager.lock); 1021 mutex_lock(&rdev->vm_manager.lock);
884 mutex_lock(&vm->mutex); 1022 mutex_lock(&bo_va->vm->mutex);
885 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 1023 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
886 mutex_unlock(&rdev->vm_manager.lock); 1024 mutex_unlock(&rdev->vm_manager.lock);
887 list_del(&bo_va->vm_list); 1025 list_del(&bo_va->vm_list);
888 mutex_unlock(&vm->mutex); 1026 mutex_unlock(&bo_va->vm->mutex);
889 list_del(&bo_va->bo_list); 1027 list_del(&bo_va->bo_list);
890 1028
891 kfree(bo_va); 1029 kfree(bo_va);
892 return 0; 1030 return r;
893} 1031}
894 1032
895/** 1033/**
@@ -925,27 +1063,23 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
925 */ 1063 */
926int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 1064int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
927{ 1065{
1066 struct radeon_bo_va *bo_va;
928 int r; 1067 int r;
929 1068
930 vm->id = -1; 1069 vm->id = 0;
931 vm->fence = NULL; 1070 vm->fence = NULL;
1071 vm->last_pfn = 0;
932 mutex_init(&vm->mutex); 1072 mutex_init(&vm->mutex);
933 INIT_LIST_HEAD(&vm->list); 1073 INIT_LIST_HEAD(&vm->list);
934 INIT_LIST_HEAD(&vm->va); 1074 INIT_LIST_HEAD(&vm->va);
935 /* SI requires equal sized PTs for all VMs, so always set 1075
936 * last_pfn to max_pfn. cayman allows variable sized
937 * pts so we can grow then as needed. Once we switch
938 * to two level pts we can unify this again.
939 */
940 if (rdev->family >= CHIP_TAHITI)
941 vm->last_pfn = rdev->vm_manager.max_pfn;
942 else
943 vm->last_pfn = 0;
944 /* map the ib pool buffer at 0 in virtual address space, set 1076 /* map the ib pool buffer at 0 in virtual address space, set
945 * read only 1077 * read only
946 */ 1078 */
947 r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0, 1079 bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
948 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); 1080 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
1081 RADEON_VM_PAGE_READABLE |
1082 RADEON_VM_PAGE_SNOOPED);
949 return r; 1083 return r;
950} 1084}
951 1085
@@ -965,7 +1099,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
965 1099
966 mutex_lock(&rdev->vm_manager.lock); 1100 mutex_lock(&rdev->vm_manager.lock);
967 mutex_lock(&vm->mutex); 1101 mutex_lock(&vm->mutex);
968 radeon_vm_unbind_locked(rdev, vm); 1102 radeon_vm_free_pt(rdev, vm);
969 mutex_unlock(&rdev->vm_manager.lock); 1103 mutex_unlock(&rdev->vm_manager.lock);
970 1104
971 /* remove all bo at this point non are busy any more because unbind 1105 /* remove all bo at this point non are busy any more because unbind
@@ -973,10 +1107,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
973 */ 1107 */
974 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 1108 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
975 if (!r) { 1109 if (!r) {
976 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); 1110 bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
977 list_del_init(&bo_va->bo_list); 1111 list_del_init(&bo_va->bo_list);
978 list_del_init(&bo_va->vm_list); 1112 list_del_init(&bo_va->vm_list);
979 radeon_fence_unref(&bo_va->fence);
980 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 1113 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
981 kfree(bo_va); 1114 kfree(bo_va);
982 } 1115 }
@@ -988,10 +1121,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
988 r = radeon_bo_reserve(bo_va->bo, false); 1121 r = radeon_bo_reserve(bo_va->bo, false);
989 if (!r) { 1122 if (!r) {
990 list_del_init(&bo_va->bo_list); 1123 list_del_init(&bo_va->bo_list);
991 radeon_fence_unref(&bo_va->fence);
992 radeon_bo_unreserve(bo_va->bo); 1124 radeon_bo_unreserve(bo_va->bo);
993 kfree(bo_va); 1125 kfree(bo_va);
994 } 1126 }
995 } 1127 }
1128 radeon_fence_unref(&vm->fence);
1129 radeon_fence_unref(&vm->last_flush);
996 mutex_unlock(&vm->mutex); 1130 mutex_unlock(&vm->mutex);
997} 1131}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 1b57b0058ad6..6579befa5101 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -124,6 +124,30 @@ void radeon_gem_fini(struct radeon_device *rdev)
124 */ 124 */
125int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 125int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
126{ 126{
127 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
128 struct radeon_device *rdev = rbo->rdev;
129 struct radeon_fpriv *fpriv = file_priv->driver_priv;
130 struct radeon_vm *vm = &fpriv->vm;
131 struct radeon_bo_va *bo_va;
132 int r;
133
134 if (rdev->family < CHIP_CAYMAN) {
135 return 0;
136 }
137
138 r = radeon_bo_reserve(rbo, false);
139 if (r) {
140 return r;
141 }
142
143 bo_va = radeon_vm_bo_find(vm, rbo);
144 if (!bo_va) {
145 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
146 } else {
147 ++bo_va->ref_count;
148 }
149 radeon_bo_unreserve(rbo);
150
127 return 0; 151 return 0;
128} 152}
129 153
@@ -134,16 +158,25 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
134 struct radeon_device *rdev = rbo->rdev; 158 struct radeon_device *rdev = rbo->rdev;
135 struct radeon_fpriv *fpriv = file_priv->driver_priv; 159 struct radeon_fpriv *fpriv = file_priv->driver_priv;
136 struct radeon_vm *vm = &fpriv->vm; 160 struct radeon_vm *vm = &fpriv->vm;
161 struct radeon_bo_va *bo_va;
162 int r;
137 163
138 if (rdev->family < CHIP_CAYMAN) { 164 if (rdev->family < CHIP_CAYMAN) {
139 return; 165 return;
140 } 166 }
141 167
142 if (radeon_bo_reserve(rbo, false)) { 168 r = radeon_bo_reserve(rbo, true);
143 dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); 169 if (r) {
170 dev_err(rdev->dev, "leaking bo va because "
171 "we fail to reserve bo (%d)\n", r);
144 return; 172 return;
145 } 173 }
146 radeon_vm_bo_rmv(rdev, vm, rbo); 174 bo_va = radeon_vm_bo_find(vm, rbo);
175 if (bo_va) {
176 if (--bo_va->ref_count == 0) {
177 radeon_vm_bo_rmv(rdev, bo_va);
178 }
179 }
147 radeon_bo_unreserve(rbo); 180 radeon_bo_unreserve(rbo);
148} 181}
149 182
@@ -459,19 +492,24 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
459 drm_gem_object_unreference_unlocked(gobj); 492 drm_gem_object_unreference_unlocked(gobj);
460 return r; 493 return r;
461 } 494 }
495 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
496 if (!bo_va) {
497 args->operation = RADEON_VA_RESULT_ERROR;
498 drm_gem_object_unreference_unlocked(gobj);
499 return -ENOENT;
500 }
501
462 switch (args->operation) { 502 switch (args->operation) {
463 case RADEON_VA_MAP: 503 case RADEON_VA_MAP:
464 bo_va = radeon_bo_va(rbo, &fpriv->vm); 504 if (bo_va->soffset) {
465 if (bo_va) {
466 args->operation = RADEON_VA_RESULT_VA_EXIST; 505 args->operation = RADEON_VA_RESULT_VA_EXIST;
467 args->offset = bo_va->soffset; 506 args->offset = bo_va->soffset;
468 goto out; 507 goto out;
469 } 508 }
470 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo, 509 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
471 args->offset, args->flags);
472 break; 510 break;
473 case RADEON_VA_UNMAP: 511 case RADEON_VA_UNMAP:
474 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo); 512 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
475 break; 513 break;
476 default: 514 default:
477 break; 515 break;
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index 48b7cea31e08..c4bb2269be10 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -369,7 +369,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
369#define compat_radeon_cp_setparam NULL 369#define compat_radeon_cp_setparam NULL
370#endif /* X86_64 || IA64 */ 370#endif /* X86_64 || IA64 */
371 371
372drm_ioctl_compat_t *radeon_compat_ioctls[] = { 372static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
373 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, 373 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
374 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, 374 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
375 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, 375 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index afaa1727abd2..b06fa5936100 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -99,7 +99,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
99 /* Disable *all* interrupts */ 99 /* Disable *all* interrupts */
100 for (i = 0; i < RADEON_NUM_RINGS; i++) 100 for (i = 0; i < RADEON_NUM_RINGS; i++)
101 atomic_set(&rdev->irq.ring_int[i], 0); 101 atomic_set(&rdev->irq.ring_int[i], 0);
102 rdev->irq.gui_idle = false;
103 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 102 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
104 rdev->irq.hpd[i] = false; 103 rdev->irq.hpd[i] = false;
105 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 104 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -147,7 +146,6 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
147 /* Disable *all* interrupts */ 146 /* Disable *all* interrupts */
148 for (i = 0; i < RADEON_NUM_RINGS; i++) 147 for (i = 0; i < RADEON_NUM_RINGS; i++)
149 atomic_set(&rdev->irq.ring_int[i], 0); 148 atomic_set(&rdev->irq.ring_int[i], 0);
150 rdev->irq.gui_idle = false;
151 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 149 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
152 rdev->irq.hpd[i] = false; 150 rdev->irq.hpd[i] = false;
153 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 151 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -204,6 +202,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
204 (rdev->pdev->subsystem_device == 0x01fd)) 202 (rdev->pdev->subsystem_device == 0x01fd))
205 return true; 203 return true;
206 204
205 /* Gateway RS690 only seems to work with MSIs. */
206 if ((rdev->pdev->device == 0x791f) &&
207 (rdev->pdev->subsystem_vendor == 0x107b) &&
208 (rdev->pdev->subsystem_device == 0x0185))
209 return true;
210
211 /* try and enable MSIs by default on all RS690s */
212 if (rdev->family == CHIP_RS690)
213 return true;
214
207 /* RV515 seems to have MSI issues where it loses 215 /* RV515 seems to have MSI issues where it loses
208 * MSI rearms occasionally. This leads to lockups and freezes. 216 * MSI rearms occasionally. This leads to lockups and freezes.
209 * disable it by default. 217 * disable it by default.
@@ -457,34 +465,3 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
457 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 465 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
458} 466}
459 467
460/**
461 * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle
462 *
463 * @rdev: radeon device pointer
464 *
465 * Enabled the GUI idle interrupt and waits for it to fire (r6xx+).
466 * This is currently used to make sure the 3D engine is idle for power
467 * management, but should be replaces with proper fence waits.
468 * GUI idle interrupts don't work very well on pre-r6xx hw and it also
469 * does not take into account other aspects of the chip that may be busy.
470 * DO NOT USE GOING FORWARD.
471 */
472int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
473{
474 unsigned long irqflags;
475 int r;
476
477 spin_lock_irqsave(&rdev->irq.lock, irqflags);
478 rdev->irq.gui_idle = true;
479 radeon_irq_set(rdev);
480 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
481
482 r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
483 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
484
485 spin_lock_irqsave(&rdev->irq.lock, irqflags);
486 rdev->irq.gui_idle = false;
487 radeon_irq_set(rdev);
488 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
489 return r;
490}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 414b4acf6947..cb8e94d1a2b2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -51,6 +51,7 @@ int radeon_driver_unload_kms(struct drm_device *dev)
51 51
52 if (rdev == NULL) 52 if (rdev == NULL)
53 return 0; 53 return 0;
54 radeon_acpi_fini(rdev);
54 radeon_modeset_fini(rdev); 55 radeon_modeset_fini(rdev);
55 radeon_device_fini(rdev); 56 radeon_device_fini(rdev);
56 kfree(rdev); 57 kfree(rdev);
@@ -103,11 +104,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
103 goto out; 104 goto out;
104 } 105 }
105 106
106 /* Call ACPI methods */
107 acpi_status = radeon_acpi_init(rdev);
108 if (acpi_status)
109 dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
110
111 /* Again modeset_init should fail only on fatal error 107 /* Again modeset_init should fail only on fatal error
112 * otherwise it should provide enough functionalities 108 * otherwise it should provide enough functionalities
113 * for shadowfb to run 109 * for shadowfb to run
@@ -115,6 +111,17 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
115 r = radeon_modeset_init(rdev); 111 r = radeon_modeset_init(rdev);
116 if (r) 112 if (r)
117 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 113 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
114
115 /* Call ACPI methods: require modeset init
116 * but failure is not fatal
117 */
118 if (!r) {
119 acpi_status = radeon_acpi_init(rdev);
120 if (acpi_status)
121 dev_dbg(&dev->pdev->dev,
122 "Error during ACPI methods call\n");
123 }
124
118out: 125out:
119 if (r) 126 if (r)
120 radeon_driver_unload_kms(dev); 127 radeon_driver_unload_kms(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 94b4a1c12893..5677a424b585 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -206,11 +206,6 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
206 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); 206 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
207} 207}
208 208
209void radeon_restore_common_regs(struct drm_device *dev)
210{
211 /* don't need this yet */
212}
213
214static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev) 209static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
215{ 210{
216 struct radeon_device *rdev = dev->dev_private; 211 struct radeon_device *rdev = dev->dev_private;
@@ -295,7 +290,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
295 return 1; 290 return 1;
296} 291}
297 292
298void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) 293static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
299{ 294{
300 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
301 struct drm_device *dev = crtc->dev; 296 struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 670e9910f869..8ad9c5f16014 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -271,13 +271,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
271 271
272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
273 273
274#define MAX_RADEON_LEVEL 0xFF
275
276struct radeon_backlight_privdata {
277 struct radeon_encoder *encoder;
278 uint8_t negative;
279};
280
281static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) 274static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
282{ 275{
283 struct radeon_backlight_privdata *pdata = bl_get_data(bd); 276 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@@ -286,21 +279,33 @@ static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
286 /* Convert brightness to hardware level */ 279 /* Convert brightness to hardware level */
287 if (bd->props.brightness < 0) 280 if (bd->props.brightness < 0)
288 level = 0; 281 level = 0;
289 else if (bd->props.brightness > MAX_RADEON_LEVEL) 282 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
290 level = MAX_RADEON_LEVEL; 283 level = RADEON_MAX_BL_LEVEL;
291 else 284 else
292 level = bd->props.brightness; 285 level = bd->props.brightness;
293 286
294 if (pdata->negative) 287 if (pdata->negative)
295 level = MAX_RADEON_LEVEL - level; 288 level = RADEON_MAX_BL_LEVEL - level;
296 289
297 return level; 290 return level;
298} 291}
299 292
300static int radeon_legacy_backlight_update_status(struct backlight_device *bd) 293u8
294radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
295{
296 struct drm_device *dev = radeon_encoder->base.dev;
297 struct radeon_device *rdev = dev->dev_private;
298 u8 backlight_level;
299
300 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
301 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
302
303 return backlight_level;
304}
305
306void
307radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
301{ 308{
302 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
303 struct radeon_encoder *radeon_encoder = pdata->encoder;
304 struct drm_device *dev = radeon_encoder->base.dev; 309 struct drm_device *dev = radeon_encoder->base.dev;
305 struct radeon_device *rdev = dev->dev_private; 310 struct radeon_device *rdev = dev->dev_private;
306 int dpms_mode = DRM_MODE_DPMS_ON; 311 int dpms_mode = DRM_MODE_DPMS_ON;
@@ -308,19 +313,31 @@ static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
308 if (radeon_encoder->enc_priv) { 313 if (radeon_encoder->enc_priv) {
309 if (rdev->is_atom_bios) { 314 if (rdev->is_atom_bios) {
310 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; 315 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
311 dpms_mode = lvds->dpms_mode; 316 if (lvds->backlight_level > 0)
312 lvds->backlight_level = radeon_legacy_lvds_level(bd); 317 dpms_mode = lvds->dpms_mode;
318 else
319 dpms_mode = DRM_MODE_DPMS_OFF;
320 lvds->backlight_level = level;
313 } else { 321 } else {
314 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; 322 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
315 dpms_mode = lvds->dpms_mode; 323 if (lvds->backlight_level > 0)
316 lvds->backlight_level = radeon_legacy_lvds_level(bd); 324 dpms_mode = lvds->dpms_mode;
325 else
326 dpms_mode = DRM_MODE_DPMS_OFF;
327 lvds->backlight_level = level;
317 } 328 }
318 } 329 }
319 330
320 if (bd->props.brightness > 0) 331 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
321 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); 332}
322 else 333
323 radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF); 334static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
335{
336 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
337 struct radeon_encoder *radeon_encoder = pdata->encoder;
338
339 radeon_legacy_set_backlight_level(radeon_encoder,
340 radeon_legacy_lvds_level(bd));
324 341
325 return 0; 342 return 0;
326} 343}
@@ -336,7 +353,7 @@ static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
336 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> 353 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
337 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; 354 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
338 355
339 return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level; 356 return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
340} 357}
341 358
342static const struct backlight_ops radeon_backlight_ops = { 359static const struct backlight_ops radeon_backlight_ops = {
@@ -370,7 +387,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
370 } 387 }
371 388
372 memset(&props, 0, sizeof(props)); 389 memset(&props, 0, sizeof(props));
373 props.max_brightness = MAX_RADEON_LEVEL; 390 props.max_brightness = RADEON_MAX_BL_LEVEL;
374 props.type = BACKLIGHT_RAW; 391 props.type = BACKLIGHT_RAW;
375 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 392 bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
376 pdata, &radeon_backlight_ops, &props); 393 pdata, &radeon_backlight_ops, &props);
@@ -449,7 +466,7 @@ static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
449 } 466 }
450 467
451 if (bd) { 468 if (bd) {
452 struct radeon_legacy_backlight_privdata *pdata; 469 struct radeon_backlight_privdata *pdata;
453 470
454 pdata = bl_get_data(bd); 471 pdata = bl_get_data(bd);
455 backlight_device_unregister(bd); 472 backlight_device_unregister(bd);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d56978949f34..527761801590 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -252,8 +252,23 @@ struct radeon_mode_info {
252 252
253 /* pointer to fbdev info structure */ 253 /* pointer to fbdev info structure */
254 struct radeon_fbdev *rfbdev; 254 struct radeon_fbdev *rfbdev;
255 /* firmware flags */
256 u16 firmware_flags;
257 /* pointer to backlight encoder */
258 struct radeon_encoder *bl_encoder;
255}; 259};
256 260
261#define RADEON_MAX_BL_LEVEL 0xFF
262
263#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
264
265struct radeon_backlight_privdata {
266 struct radeon_encoder *encoder;
267 uint8_t negative;
268};
269
270#endif
271
257#define MAX_H_CODE_TIMING_LEN 32 272#define MAX_H_CODE_TIMING_LEN 32
258#define MAX_V_CODE_TIMING_LEN 32 273#define MAX_V_CODE_TIMING_LEN 32
259 274
@@ -269,6 +284,18 @@ struct radeon_tv_regs {
269 uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN]; 284 uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
270}; 285};
271 286
287struct radeon_atom_ss {
288 uint16_t percentage;
289 uint8_t type;
290 uint16_t step;
291 uint8_t delay;
292 uint8_t range;
293 uint8_t refdiv;
294 /* asic_ss */
295 uint16_t rate;
296 uint16_t amount;
297};
298
272struct radeon_crtc { 299struct radeon_crtc {
273 struct drm_crtc base; 300 struct drm_crtc base;
274 int crtc_id; 301 int crtc_id;
@@ -293,6 +320,16 @@ struct radeon_crtc {
293 /* page flipping */ 320 /* page flipping */
294 struct radeon_unpin_work *unpin_work; 321 struct radeon_unpin_work *unpin_work;
295 int deferred_flip_completion; 322 int deferred_flip_completion;
323 /* pll sharing */
324 struct radeon_atom_ss ss;
325 bool ss_enabled;
326 u32 adjusted_clock;
327 int bpc;
328 u32 pll_reference_div;
329 u32 pll_post_div;
330 u32 pll_flags;
331 struct drm_encoder *encoder;
332 struct drm_connector *connector;
296}; 333};
297 334
298struct radeon_encoder_primary_dac { 335struct radeon_encoder_primary_dac {
@@ -346,18 +383,6 @@ struct radeon_encoder_ext_tmds {
346}; 383};
347 384
348/* spread spectrum */ 385/* spread spectrum */
349struct radeon_atom_ss {
350 uint16_t percentage;
351 uint8_t type;
352 uint16_t step;
353 uint8_t delay;
354 uint8_t range;
355 uint8_t refdiv;
356 /* asic_ss */
357 uint16_t rate;
358 uint16_t amount;
359};
360
361struct radeon_encoder_atom_dig { 386struct radeon_encoder_atom_dig {
362 bool linkb; 387 bool linkb;
363 /* atom dig */ 388 /* atom dig */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 9024e7222839..a236795dc69a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,7 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
52 52
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { 53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54 /* remove from all vm address space */ 54 /* remove from all vm address space */
55 radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); 55 radeon_vm_bo_rmv(bo->rdev, bo_va);
56 } 56 }
57} 57}
58 58
@@ -627,18 +627,17 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
627/** 627/**
628 * radeon_bo_reserve - reserve bo 628 * radeon_bo_reserve - reserve bo
629 * @bo: bo structure 629 * @bo: bo structure
630 * @no_wait: don't sleep while trying to reserve (return -EBUSY) 630 * @no_intr: don't return -ERESTARTSYS on pending signal
631 * 631 *
632 * Returns: 632 * Returns:
633 * -EBUSY: buffer is busy and @no_wait is true
634 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 633 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
635 * a signal. Release all buffer reservations and return to user-space. 634 * a signal. Release all buffer reservations and return to user-space.
636 */ 635 */
637int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) 636int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
638{ 637{
639 int r; 638 int r;
640 639
641 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 640 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
642 if (unlikely(r != 0)) { 641 if (unlikely(r != 0)) {
643 if (r != -ERESTARTSYS) 642 if (r != -ERESTARTSYS)
644 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 643 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
@@ -646,16 +645,3 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
646 } 645 }
647 return 0; 646 return 0;
648} 647}
649
650/* object have to be reserved */
651struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
652{
653 struct radeon_bo_va *bo_va;
654
655 list_for_each_entry(bo_va, &rbo->va, bo_list) {
656 if (bo_va->vm == vm) {
657 return bo_va;
658 }
659 }
660 return NULL;
661}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 17fb99f177cf..93cd491fff2e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -52,7 +52,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
52 return 0; 52 return 0;
53} 53}
54 54
55int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait); 55int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
56 56
57static inline void radeon_bo_unreserve(struct radeon_bo *bo) 57static inline void radeon_bo_unreserve(struct radeon_bo *bo)
58{ 58{
@@ -141,8 +141,6 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
141 struct ttm_mem_reg *mem); 141 struct ttm_mem_reg *mem);
142extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 142extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
143extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 143extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
144extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
145 struct radeon_vm *vm);
146 144
147/* 145/*
148 * sub allocation 146 * sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 7ae606600107..bc2e7050a9d8 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -24,9 +24,6 @@
24#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h" 25#include "avivod.h"
26#include "atom.h" 26#include "atom.h"
27#ifdef CONFIG_ACPI
28#include <linux/acpi.h>
29#endif
30#include <linux/power_supply.h> 27#include <linux/power_supply.h>
31#include <linux/hwmon.h> 28#include <linux/hwmon.h>
32#include <linux/hwmon-sysfs.h> 29#include <linux/hwmon-sysfs.h>
@@ -36,7 +33,7 @@
36#define RADEON_WAIT_VBLANK_TIMEOUT 200 33#define RADEON_WAIT_VBLANK_TIMEOUT 200
37 34
38static const char *radeon_pm_state_type_name[5] = { 35static const char *radeon_pm_state_type_name[5] = {
39 "Default", 36 "",
40 "Powersave", 37 "Powersave",
41 "Battery", 38 "Battery",
42 "Balanced", 39 "Balanced",
@@ -50,8 +47,6 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
50static void radeon_pm_update_profile(struct radeon_device *rdev); 47static void radeon_pm_update_profile(struct radeon_device *rdev);
51static void radeon_pm_set_clocks(struct radeon_device *rdev); 48static void radeon_pm_set_clocks(struct radeon_device *rdev);
52 49
53#define ACPI_AC_CLASS "ac_adapter"
54
55int radeon_pm_get_type_index(struct radeon_device *rdev, 50int radeon_pm_get_type_index(struct radeon_device *rdev,
56 enum radeon_pm_state_type ps_type, 51 enum radeon_pm_state_type ps_type,
57 int instance) 52 int instance)
@@ -70,33 +65,17 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
70 return rdev->pm.default_power_state_index; 65 return rdev->pm.default_power_state_index;
71} 66}
72 67
73#ifdef CONFIG_ACPI 68void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
74static int radeon_acpi_event(struct notifier_block *nb,
75 unsigned long val,
76 void *data)
77{ 69{
78 struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); 70 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
79 struct acpi_bus_event *entry = (struct acpi_bus_event *)data; 71 if (rdev->pm.profile == PM_PROFILE_AUTO) {
80 72 mutex_lock(&rdev->pm.mutex);
81 if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { 73 radeon_pm_update_profile(rdev);
82 if (power_supply_is_system_supplied() > 0) 74 radeon_pm_set_clocks(rdev);
83 DRM_DEBUG_DRIVER("pm: AC\n"); 75 mutex_unlock(&rdev->pm.mutex);
84 else
85 DRM_DEBUG_DRIVER("pm: DC\n");
86
87 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
88 if (rdev->pm.profile == PM_PROFILE_AUTO) {
89 mutex_lock(&rdev->pm.mutex);
90 radeon_pm_update_profile(rdev);
91 radeon_pm_set_clocks(rdev);
92 mutex_unlock(&rdev->pm.mutex);
93 }
94 } 76 }
95 } 77 }
96
97 return NOTIFY_OK;
98} 78}
99#endif
100 79
101static void radeon_pm_update_profile(struct radeon_device *rdev) 80static void radeon_pm_update_profile(struct radeon_device *rdev)
102{ 81{
@@ -188,8 +167,21 @@ static void radeon_set_power_state(struct radeon_device *rdev)
188 if (sclk > rdev->pm.default_sclk) 167 if (sclk > rdev->pm.default_sclk)
189 sclk = rdev->pm.default_sclk; 168 sclk = rdev->pm.default_sclk;
190 169
191 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 170 /* starting with BTC, there is one state that is used for both
192 clock_info[rdev->pm.requested_clock_mode_index].mclk; 171 * MH and SH. Difference is that we always use the high clock index for
172 * mclk.
173 */
174 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
175 (rdev->family >= CHIP_BARTS) &&
176 rdev->pm.active_crtc_count &&
177 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
178 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
179 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
180 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
181 else
182 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
183 clock_info[rdev->pm.requested_clock_mode_index].mclk;
184
193 if (mclk > rdev->pm.default_mclk) 185 if (mclk > rdev->pm.default_mclk)
194 mclk = rdev->pm.default_mclk; 186 mclk = rdev->pm.default_mclk;
195 187
@@ -253,18 +245,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
253 down_write(&rdev->pm.mclk_lock); 245 down_write(&rdev->pm.mclk_lock);
254 mutex_lock(&rdev->ring_lock); 246 mutex_lock(&rdev->ring_lock);
255 247
256 /* gui idle int has issues on older chips it seems */ 248 /* wait for the rings to drain */
257 if (rdev->family >= CHIP_R600) { 249 for (i = 0; i < RADEON_NUM_RINGS; i++) {
258 if (rdev->irq.installed) { 250 struct radeon_ring *ring = &rdev->ring[i];
259 /* wait for GPU to become idle */ 251 if (ring->ready)
260 radeon_irq_kms_wait_gui_idle(rdev); 252 radeon_fence_wait_empty_locked(rdev, i);
261 }
262 } else {
263 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
264 if (ring->ready) {
265 radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
266 }
267 } 253 }
254
268 radeon_unmap_vram_bos(rdev); 255 radeon_unmap_vram_bos(rdev);
269 256
270 if (rdev->irq.installed) { 257 if (rdev->irq.installed) {
@@ -320,17 +307,15 @@ static void radeon_pm_print_states(struct radeon_device *rdev)
320 for (j = 0; j < power_state->num_clock_modes; j++) { 307 for (j = 0; j < power_state->num_clock_modes; j++) {
321 clock_info = &(power_state->clock_info[j]); 308 clock_info = &(power_state->clock_info[j]);
322 if (rdev->flags & RADEON_IS_IGP) 309 if (rdev->flags & RADEON_IS_IGP)
323 DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n", 310 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
324 j, 311 j,
325 clock_info->sclk * 10, 312 clock_info->sclk * 10);
326 clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
327 else 313 else
328 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n", 314 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
329 j, 315 j,
330 clock_info->sclk * 10, 316 clock_info->sclk * 10,
331 clock_info->mclk * 10, 317 clock_info->mclk * 10,
332 clock_info->voltage.voltage, 318 clock_info->voltage.voltage);
333 clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
334 } 319 }
335 } 320 }
336} 321}
@@ -547,7 +532,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
547void radeon_pm_resume(struct radeon_device *rdev) 532void radeon_pm_resume(struct radeon_device *rdev)
548{ 533{
549 /* set up the default clocks if the MC ucode is loaded */ 534 /* set up the default clocks if the MC ucode is loaded */
550 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { 535 if ((rdev->family >= CHIP_BARTS) &&
536 (rdev->family <= CHIP_CAYMAN) &&
537 rdev->mc_fw) {
551 if (rdev->pm.default_vddc) 538 if (rdev->pm.default_vddc)
552 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 539 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
553 SET_VOLTAGE_TYPE_ASIC_VDDC); 540 SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -602,7 +589,9 @@ int radeon_pm_init(struct radeon_device *rdev)
602 radeon_pm_print_states(rdev); 589 radeon_pm_print_states(rdev);
603 radeon_pm_init_profile(rdev); 590 radeon_pm_init_profile(rdev);
604 /* set up the default clocks if the MC ucode is loaded */ 591 /* set up the default clocks if the MC ucode is loaded */
605 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { 592 if ((rdev->family >= CHIP_BARTS) &&
593 (rdev->family <= CHIP_CAYMAN) &&
594 rdev->mc_fw) {
606 if (rdev->pm.default_vddc) 595 if (rdev->pm.default_vddc)
607 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 596 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
608 SET_VOLTAGE_TYPE_ASIC_VDDC); 597 SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -632,10 +621,6 @@ int radeon_pm_init(struct radeon_device *rdev)
632 if (ret) 621 if (ret)
633 DRM_ERROR("failed to create device file for power method\n"); 622 DRM_ERROR("failed to create device file for power method\n");
634 623
635#ifdef CONFIG_ACPI
636 rdev->acpi_nb.notifier_call = radeon_acpi_event;
637 register_acpi_notifier(&rdev->acpi_nb);
638#endif
639 if (radeon_debugfs_pm_init(rdev)) { 624 if (radeon_debugfs_pm_init(rdev)) {
640 DRM_ERROR("Failed to register debugfs file for PM!\n"); 625 DRM_ERROR("Failed to register debugfs file for PM!\n");
641 } 626 }
@@ -666,9 +651,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
666 651
667 device_remove_file(rdev->dev, &dev_attr_power_profile); 652 device_remove_file(rdev->dev, &dev_attr_power_profile);
668 device_remove_file(rdev->dev, &dev_attr_power_method); 653 device_remove_file(rdev->dev, &dev_attr_power_method);
669#ifdef CONFIG_ACPI
670 unregister_acpi_notifier(&rdev->acpi_nb);
671#endif
672 } 654 }
673 655
674 if (rdev->pm.power_state) 656 if (rdev->pm.power_state)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 43c431a2686d..028508859a3b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -43,7 +43,7 @@
43 * produce command buffers which are send to the kernel and 43 * produce command buffers which are send to the kernel and
44 * put in IBs for execution by the requested ring. 44 * put in IBs for execution by the requested ring.
45 */ 45 */
46int radeon_debugfs_sa_init(struct radeon_device *rdev); 46static int radeon_debugfs_sa_init(struct radeon_device *rdev);
47 47
48/** 48/**
49 * radeon_ib_get - request an IB (Indirect Buffer) 49 * radeon_ib_get - request an IB (Indirect Buffer)
@@ -58,7 +58,8 @@ int radeon_debugfs_sa_init(struct radeon_device *rdev);
58 * Returns 0 on success, error on failure. 58 * Returns 0 on success, error on failure.
59 */ 59 */
60int radeon_ib_get(struct radeon_device *rdev, int ring, 60int radeon_ib_get(struct radeon_device *rdev, int ring,
61 struct radeon_ib *ib, unsigned size) 61 struct radeon_ib *ib, struct radeon_vm *vm,
62 unsigned size)
62{ 63{
63 int i, r; 64 int i, r;
64 65
@@ -76,8 +77,15 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
76 ib->ring = ring; 77 ib->ring = ring;
77 ib->fence = NULL; 78 ib->fence = NULL;
78 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); 79 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
79 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); 80 ib->vm = vm;
80 ib->vm_id = 0; 81 if (vm) {
82 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
83 * space and soffset is the offset inside the pool bo
84 */
85 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
86 } else {
87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
88 }
81 ib->is_const_ib = false; 89 ib->is_const_ib = false;
82 for (i = 0; i < RADEON_NUM_RINGS; ++i) 90 for (i = 0; i < RADEON_NUM_RINGS; ++i)
83 ib->sync_to[i] = NULL; 91 ib->sync_to[i] = NULL;
@@ -152,6 +160,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
152 if (!need_sync) { 160 if (!need_sync) {
153 radeon_semaphore_free(rdev, &ib->semaphore, NULL); 161 radeon_semaphore_free(rdev, &ib->semaphore, NULL);
154 } 162 }
163 /* if we can't remember our last VM flush then flush now! */
164 if (ib->vm && !ib->vm->last_flush) {
165 radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
166 }
155 if (const_ib) { 167 if (const_ib) {
156 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); 168 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
157 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); 169 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
@@ -166,6 +178,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
166 if (const_ib) { 178 if (const_ib) {
167 const_ib->fence = radeon_fence_ref(ib->fence); 179 const_ib->fence = radeon_fence_ref(ib->fence);
168 } 180 }
181 /* we just flushed the VM, remember that */
182 if (ib->vm && !ib->vm->last_flush) {
183 ib->vm->last_flush = radeon_fence_ref(ib->fence);
184 }
169 radeon_ring_unlock_commit(rdev, ring); 185 radeon_ring_unlock_commit(rdev, ring);
170 return 0; 186 return 0;
171} 187}
@@ -275,7 +291,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
275 * wptr. The GPU then starts fetching commands and executes 291 * wptr. The GPU then starts fetching commands and executes
276 * them until the pointers are equal again. 292 * them until the pointers are equal again.
277 */ 293 */
278int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 294static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
279 295
280/** 296/**
281 * radeon_ring_write - write a value to the ring 297 * radeon_ring_write - write a value to the ring
@@ -803,7 +819,7 @@ static struct drm_info_list radeon_debugfs_sa_list[] = {
803 819
804#endif 820#endif
805 821
806int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) 822static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
807{ 823{
808#if defined(CONFIG_DEBUG_FS) 824#if defined(CONFIG_DEBUG_FS)
809 unsigned i; 825 unsigned i;
@@ -823,7 +839,7 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin
823 return 0; 839 return 0;
824} 840}
825 841
826int radeon_debugfs_sa_init(struct radeon_device *rdev) 842static int radeon_debugfs_sa_init(struct radeon_device *rdev)
827{ 843{
828#if defined(CONFIG_DEBUG_FS) 844#if defined(CONFIG_DEBUG_FS)
829 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); 845 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4e771240fdd0..105fde69d045 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -316,7 +316,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
316{ 316{
317 struct radeon_fence *fences[RADEON_NUM_RINGS]; 317 struct radeon_fence *fences[RADEON_NUM_RINGS];
318 unsigned tries[RADEON_NUM_RINGS]; 318 unsigned tries[RADEON_NUM_RINGS];
319 int i, r = -ENOMEM; 319 int i, r;
320 320
321 BUG_ON(align > RADEON_GPU_PAGE_SIZE); 321 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
322 BUG_ON(size > sa_manager->size); 322 BUG_ON(size > sa_manager->size);
@@ -331,7 +331,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
331 INIT_LIST_HEAD(&(*sa_bo)->flist); 331 INIT_LIST_HEAD(&(*sa_bo)->flist);
332 332
333 spin_lock(&sa_manager->wq.lock); 333 spin_lock(&sa_manager->wq.lock);
334 while(1) { 334 do {
335 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 335 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
336 fences[i] = NULL; 336 fences[i] = NULL;
337 tries[i] = 0; 337 tries[i] = 0;
@@ -349,26 +349,22 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
349 /* see if we can skip over some allocations */ 349 /* see if we can skip over some allocations */
350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351 351
352 if (!block) {
353 break;
354 }
355
356 spin_unlock(&sa_manager->wq.lock); 352 spin_unlock(&sa_manager->wq.lock);
357 r = radeon_fence_wait_any(rdev, fences, false); 353 r = radeon_fence_wait_any(rdev, fences, false);
358 spin_lock(&sa_manager->wq.lock); 354 spin_lock(&sa_manager->wq.lock);
359 /* if we have nothing to wait for block */ 355 /* if we have nothing to wait for block */
360 if (r == -ENOENT) { 356 if (r == -ENOENT && block) {
361 r = wait_event_interruptible_locked( 357 r = wait_event_interruptible_locked(
362 sa_manager->wq, 358 sa_manager->wq,
363 radeon_sa_event(sa_manager, size, align) 359 radeon_sa_event(sa_manager, size, align)
364 ); 360 );
361
362 } else if (r == -ENOENT) {
363 r = -ENOMEM;
365 } 364 }
366 if (r) {
367 goto out_err;
368 }
369 };
370 365
371out_err: 366 } while (!r);
367
372 spin_unlock(&sa_manager->wq.lock); 368 spin_unlock(&sa_manager->wq.lock);
373 kfree(*sa_bo); 369 kfree(*sa_bo);
374 *sa_bo = NULL; 370 *sa_bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 7c16540c10ff..587c09a00ba2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -313,7 +313,7 @@ out_cleanup:
313 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 313 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
314} 314}
315 315
316void radeon_test_ring_sync2(struct radeon_device *rdev, 316static void radeon_test_ring_sync2(struct radeon_device *rdev,
317 struct radeon_ring *ringA, 317 struct radeon_ring *ringA,
318 struct radeon_ring *ringB, 318 struct radeon_ring *ringB,
319 struct radeon_ring *ringC) 319 struct radeon_ring *ringC)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5b71c716d83f..5ebe1b3e5db2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -549,7 +549,7 @@ static struct ttm_backend_func radeon_backend_func = {
549 .destroy = &radeon_ttm_backend_destroy, 549 .destroy = &radeon_ttm_backend_destroy,
550}; 550};
551 551
552struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, 552static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
553 unsigned long size, uint32_t page_flags, 553 unsigned long size, uint32_t page_flags,
554 struct page *dummy_read_page) 554 struct page *dummy_read_page)
555{ 555{
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 2752f7f78237..73051ce3121e 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -242,7 +242,7 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
242 return -1; 242 return -1;
243} 243}
244 244
245void rs400_gpu_init(struct radeon_device *rdev) 245static void rs400_gpu_init(struct radeon_device *rdev)
246{ 246{
247 /* FIXME: is this correct ? */ 247 /* FIXME: is this correct ? */
248 r420_pipes_init(rdev); 248 r420_pipes_init(rdev);
@@ -252,7 +252,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
252 } 252 }
253} 253}
254 254
255void rs400_mc_init(struct radeon_device *rdev) 255static void rs400_mc_init(struct radeon_device *rdev)
256{ 256{
257 u64 base; 257 u64 base;
258 258
@@ -370,7 +370,7 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
370#endif 370#endif
371} 371}
372 372
373void rs400_mc_program(struct radeon_device *rdev) 373static void rs400_mc_program(struct radeon_device *rdev)
374{ 374{
375 struct r100_mc_save save; 375 struct r100_mc_save save;
376 376
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5301b3df8466..dc8d021a999b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -43,22 +43,30 @@
43 43
44#include "rs600_reg_safe.h" 44#include "rs600_reg_safe.h"
45 45
46void rs600_gpu_init(struct radeon_device *rdev); 46static void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48 48
49static const u32 crtc_offsets[2] =
50{
51 0,
52 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
53};
54
49void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) 55void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
50{ 56{
51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
52 int i; 57 int i;
53 58
54 if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) { 59 if (crtc >= rdev->num_crtc)
60 return;
61
62 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
55 for (i = 0; i < rdev->usec_timeout; i++) { 63 for (i = 0; i < rdev->usec_timeout; i++) {
56 if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)) 64 if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
57 break; 65 break;
58 udelay(1); 66 udelay(1);
59 } 67 }
60 for (i = 0; i < rdev->usec_timeout; i++) { 68 for (i = 0; i < rdev->usec_timeout; i++) {
61 if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK) 69 if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
62 break; 70 break;
63 udelay(1); 71 udelay(1);
64 } 72 }
@@ -424,7 +432,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
424 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 432 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
425} 433}
426 434
427int rs600_gart_init(struct radeon_device *rdev) 435static int rs600_gart_init(struct radeon_device *rdev)
428{ 436{
429 int r; 437 int r;
430 438
@@ -506,7 +514,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
506 return 0; 514 return 0;
507} 515}
508 516
509void rs600_gart_disable(struct radeon_device *rdev) 517static void rs600_gart_disable(struct radeon_device *rdev)
510{ 518{
511 u32 tmp; 519 u32 tmp;
512 520
@@ -517,7 +525,7 @@ void rs600_gart_disable(struct radeon_device *rdev)
517 radeon_gart_table_vram_unpin(rdev); 525 radeon_gart_table_vram_unpin(rdev);
518} 526}
519 527
520void rs600_gart_fini(struct radeon_device *rdev) 528static void rs600_gart_fini(struct radeon_device *rdev)
521{ 529{
522 radeon_gart_fini(rdev); 530 radeon_gart_fini(rdev);
523 rs600_gart_disable(rdev); 531 rs600_gart_disable(rdev);
@@ -567,9 +575,6 @@ int rs600_irq_set(struct radeon_device *rdev)
567 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 575 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
568 tmp |= S_000040_SW_INT_EN(1); 576 tmp |= S_000040_SW_INT_EN(1);
569 } 577 }
570 if (rdev->irq.gui_idle) {
571 tmp |= S_000040_GUI_IDLE(1);
572 }
573 if (rdev->irq.crtc_vblank_int[0] || 578 if (rdev->irq.crtc_vblank_int[0] ||
574 atomic_read(&rdev->irq.pflip[0])) { 579 atomic_read(&rdev->irq.pflip[0])) {
575 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 580 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
@@ -602,12 +607,6 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
602 uint32_t irq_mask = S_000044_SW_INT(1); 607 uint32_t irq_mask = S_000044_SW_INT(1);
603 u32 tmp; 608 u32 tmp;
604 609
605 /* the interrupt works, but the status bit is permanently asserted */
606 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
607 if (!rdev->irq.gui_idle_acked)
608 irq_mask |= S_000044_GUI_IDLE_STAT(1);
609 }
610
611 if (G_000044_DISPLAY_INT_STAT(irqs)) { 610 if (G_000044_DISPLAY_INT_STAT(irqs)) {
612 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 611 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
613 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 612 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -667,9 +666,6 @@ int rs600_irq_process(struct radeon_device *rdev)
667 bool queue_hotplug = false; 666 bool queue_hotplug = false;
668 bool queue_hdmi = false; 667 bool queue_hdmi = false;
669 668
670 /* reset gui idle ack. the status bit is broken */
671 rdev->irq.gui_idle_acked = false;
672
673 status = rs600_irq_ack(rdev); 669 status = rs600_irq_ack(rdev);
674 if (!status && 670 if (!status &&
675 !rdev->irq.stat_regs.r500.disp_int && 671 !rdev->irq.stat_regs.r500.disp_int &&
@@ -683,11 +679,6 @@ int rs600_irq_process(struct radeon_device *rdev)
683 if (G_000044_SW_INT(status)) { 679 if (G_000044_SW_INT(status)) {
684 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 680 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
685 } 681 }
686 /* GUI idle */
687 if (G_000040_GUI_IDLE(status)) {
688 rdev->irq.gui_idle_acked = true;
689 wake_up(&rdev->irq.idle_queue);
690 }
691 /* Vertical blank interrupts */ 682 /* Vertical blank interrupts */
692 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 683 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
693 if (rdev->irq.crtc_vblank_int[0]) { 684 if (rdev->irq.crtc_vblank_int[0]) {
@@ -721,8 +712,6 @@ int rs600_irq_process(struct radeon_device *rdev)
721 } 712 }
722 status = rs600_irq_ack(rdev); 713 status = rs600_irq_ack(rdev);
723 } 714 }
724 /* reset gui idle ack. the status bit is broken */
725 rdev->irq.gui_idle_acked = false;
726 if (queue_hotplug) 715 if (queue_hotplug)
727 schedule_work(&rdev->hotplug_work); 716 schedule_work(&rdev->hotplug_work);
728 if (queue_hdmi) 717 if (queue_hdmi)
@@ -764,7 +753,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
764 return -1; 753 return -1;
765} 754}
766 755
767void rs600_gpu_init(struct radeon_device *rdev) 756static void rs600_gpu_init(struct radeon_device *rdev)
768{ 757{
769 r420_pipes_init(rdev); 758 r420_pipes_init(rdev);
770 /* Wait for mc idle */ 759 /* Wait for mc idle */
@@ -772,7 +761,7 @@ void rs600_gpu_init(struct radeon_device *rdev)
772 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 761 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
773} 762}
774 763
775void rs600_mc_init(struct radeon_device *rdev) 764static void rs600_mc_init(struct radeon_device *rdev)
776{ 765{
777 u64 base; 766 u64 base;
778 767
@@ -834,7 +823,7 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
834 WREG32(R_000074_MC_IND_DATA, v); 823 WREG32(R_000074_MC_IND_DATA, v);
835} 824}
836 825
837void rs600_debugfs(struct radeon_device *rdev) 826static void rs600_debugfs(struct radeon_device *rdev)
838{ 827{
839 if (r100_debugfs_rbbm_init(rdev)) 828 if (r100_debugfs_rbbm_init(rdev))
840 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 829 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3b663fcfe061..5cd5aceb69fa 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -145,7 +145,7 @@ void rs690_pm_info(struct radeon_device *rdev)
145 rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); 145 rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
146} 146}
147 147
148void rs690_mc_init(struct radeon_device *rdev) 148static void rs690_mc_init(struct radeon_device *rdev)
149{ 149{
150 u64 base; 150 u64 base;
151 151
@@ -224,7 +224,7 @@ struct rs690_watermark {
224 fixed20_12 sclk; 224 fixed20_12 sclk;
225}; 225};
226 226
227void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, 227static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
228 struct radeon_crtc *crtc, 228 struct radeon_crtc *crtc,
229 struct rs690_watermark *wm) 229 struct rs690_watermark *wm)
230{ 230{
@@ -581,7 +581,7 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
581 WREG32(R_000078_MC_INDEX, 0x7F); 581 WREG32(R_000078_MC_INDEX, 0x7F);
582} 582}
583 583
584void rs690_mc_program(struct radeon_device *rdev) 584static void rs690_mc_program(struct radeon_device *rdev)
585{ 585{
586 struct rv515_mc_save save; 586 struct rv515_mc_save save;
587 587
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index aa8ef491ef3c..2d75d30be5b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -35,9 +35,9 @@
35#include "rv515_reg_safe.h" 35#include "rv515_reg_safe.h"
36 36
37/* This files gather functions specifics to: rv515 */ 37/* This files gather functions specifics to: rv515 */
38int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); 38static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
39int rv515_debugfs_ga_info_init(struct radeon_device *rdev); 39static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
40void rv515_gpu_init(struct radeon_device *rdev); 40static void rv515_gpu_init(struct radeon_device *rdev);
41int rv515_mc_wait_for_idle(struct radeon_device *rdev); 41int rv515_mc_wait_for_idle(struct radeon_device *rdev);
42 42
43void rv515_debugfs(struct radeon_device *rdev) 43void rv515_debugfs(struct radeon_device *rdev)
@@ -143,7 +143,7 @@ void rv515_vga_render_disable(struct radeon_device *rdev)
143 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); 143 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
144} 144}
145 145
146void rv515_gpu_init(struct radeon_device *rdev) 146static void rv515_gpu_init(struct radeon_device *rdev)
147{ 147{
148 unsigned pipe_select_current, gb_pipe_select, tmp; 148 unsigned pipe_select_current, gb_pipe_select, tmp;
149 149
@@ -189,7 +189,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
189 } 189 }
190} 190}
191 191
192void rv515_mc_init(struct radeon_device *rdev) 192static void rv515_mc_init(struct radeon_device *rdev)
193{ 193{
194 194
195 rv515_vram_get_type(rdev); 195 rv515_vram_get_type(rdev);
@@ -261,7 +261,7 @@ static struct drm_info_list rv515_ga_info_list[] = {
261}; 261};
262#endif 262#endif
263 263
264int rv515_debugfs_pipes_info_init(struct radeon_device *rdev) 264static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
265{ 265{
266#if defined(CONFIG_DEBUG_FS) 266#if defined(CONFIG_DEBUG_FS)
267 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1); 267 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
@@ -270,7 +270,7 @@ int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
270#endif 270#endif
271} 271}
272 272
273int rv515_debugfs_ga_info_init(struct radeon_device *rdev) 273static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
274{ 274{
275#if defined(CONFIG_DEBUG_FS) 275#if defined(CONFIG_DEBUG_FS)
276 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1); 276 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
@@ -310,7 +310,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
310 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 310 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
311} 311}
312 312
313void rv515_mc_program(struct radeon_device *rdev) 313static void rv515_mc_program(struct radeon_device *rdev)
314{ 314{
315 struct rv515_mc_save save; 315 struct rv515_mc_save save;
316 316
@@ -787,7 +787,7 @@ struct rv515_watermark {
787 fixed20_12 sclk; 787 fixed20_12 sclk;
788}; 788};
789 789
790void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, 790static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
791 struct radeon_crtc *crtc, 791 struct radeon_crtc *crtc,
792 struct rv515_watermark *wm) 792 struct rv515_watermark *wm)
793{ 793{
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index ca8ffec10ff6..2469afe11b85 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -124,7 +124,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
124/* 124/*
125 * GART 125 * GART
126 */ 126 */
127int rv770_pcie_gart_enable(struct radeon_device *rdev) 127static int rv770_pcie_gart_enable(struct radeon_device *rdev)
128{ 128{
129 u32 tmp; 129 u32 tmp;
130 int r, i; 130 int r, i;
@@ -175,7 +175,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
175 return 0; 175 return 0;
176} 176}
177 177
178void rv770_pcie_gart_disable(struct radeon_device *rdev) 178static void rv770_pcie_gart_disable(struct radeon_device *rdev)
179{ 179{
180 u32 tmp; 180 u32 tmp;
181 int i; 181 int i;
@@ -201,7 +201,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
201 radeon_gart_table_vram_unpin(rdev); 201 radeon_gart_table_vram_unpin(rdev);
202} 202}
203 203
204void rv770_pcie_gart_fini(struct radeon_device *rdev) 204static void rv770_pcie_gart_fini(struct radeon_device *rdev)
205{ 205{
206 radeon_gart_fini(rdev); 206 radeon_gart_fini(rdev);
207 rv770_pcie_gart_disable(rdev); 207 rv770_pcie_gart_disable(rdev);
@@ -209,7 +209,7 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
209} 209}
210 210
211 211
212void rv770_agp_enable(struct radeon_device *rdev) 212static void rv770_agp_enable(struct radeon_device *rdev)
213{ 213{
214 u32 tmp; 214 u32 tmp;
215 int i; 215 int i;
@@ -839,7 +839,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
839 } 839 }
840} 840}
841 841
842int rv770_mc_init(struct radeon_device *rdev) 842static int rv770_mc_init(struct radeon_device *rdev)
843{ 843{
844 u32 tmp; 844 u32 tmp;
845 int chansize, numchan; 845 int chansize, numchan;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 0139e227e3c7..c76825ffa37f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1806,13 +1806,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1806#endif 1806#endif
1807 (ib->gpu_addr & 0xFFFFFFFC)); 1807 (ib->gpu_addr & 0xFFFFFFFC));
1808 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 1808 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
1809 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); 1809 radeon_ring_write(ring, ib->length_dw |
1810 (ib->vm ? (ib->vm->id << 24) : 0));
1810 1811
1811 if (!ib->is_const_ib) { 1812 if (!ib->is_const_ib) {
1812 /* flush read cache over gart for this vmid */ 1813 /* flush read cache over gart for this vmid */
1813 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1814 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1814 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 1815 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1815 radeon_ring_write(ring, ib->vm_id); 1816 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
1816 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1817 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1817 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 1818 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
1818 PACKET3_TC_ACTION_ENA | 1819 PACKET3_TC_ACTION_ENA |
@@ -2363,7 +2364,7 @@ void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
2363 WREG32(VM_INVALIDATE_REQUEST, 1); 2364 WREG32(VM_INVALIDATE_REQUEST, 1);
2364} 2365}
2365 2366
2366int si_pcie_gart_enable(struct radeon_device *rdev) 2367static int si_pcie_gart_enable(struct radeon_device *rdev)
2367{ 2368{
2368 int r, i; 2369 int r, i;
2369 2370
@@ -2425,7 +2426,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
2425 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 2426 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2426 (u32)(rdev->dummy_page.addr >> 12)); 2427 (u32)(rdev->dummy_page.addr >> 12));
2427 WREG32(VM_CONTEXT1_CNTL2, 0); 2428 WREG32(VM_CONTEXT1_CNTL2, 0);
2428 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 2429 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2429 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 2430 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2430 2431
2431 si_pcie_gart_tlb_flush(rdev); 2432 si_pcie_gart_tlb_flush(rdev);
@@ -2436,7 +2437,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
2436 return 0; 2437 return 0;
2437} 2438}
2438 2439
2439void si_pcie_gart_disable(struct radeon_device *rdev) 2440static void si_pcie_gart_disable(struct radeon_device *rdev)
2440{ 2441{
2441 /* Disable all tables */ 2442 /* Disable all tables */
2442 WREG32(VM_CONTEXT0_CNTL, 0); 2443 WREG32(VM_CONTEXT0_CNTL, 0);
@@ -2455,7 +2456,7 @@ void si_pcie_gart_disable(struct radeon_device *rdev)
2455 radeon_gart_table_vram_unpin(rdev); 2456 radeon_gart_table_vram_unpin(rdev);
2456} 2457}
2457 2458
2458void si_pcie_gart_fini(struct radeon_device *rdev) 2459static void si_pcie_gart_fini(struct radeon_device *rdev)
2459{ 2460{
2460 si_pcie_gart_disable(rdev); 2461 si_pcie_gart_disable(rdev);
2461 radeon_gart_table_vram_free(rdev); 2462 radeon_gart_table_vram_free(rdev);
@@ -2788,41 +2789,84 @@ void si_vm_fini(struct radeon_device *rdev)
2788{ 2789{
2789} 2790}
2790 2791
2791int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) 2792/**
2793 * si_vm_set_page - update the page tables using the CP
2794 *
2795 * @rdev: radeon_device pointer
2796 * @pe: addr of the page entry
2797 * @addr: dst addr to write into pe
2798 * @count: number of page entries to update
2799 * @incr: increase next addr by incr bytes
2800 * @flags: access flags
2801 *
2802 * Update the page tables using the CP (cayman-si).
2803 */
2804void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2805 uint64_t addr, unsigned count,
2806 uint32_t incr, uint32_t flags)
2792{ 2807{
2793 if (id < 8) 2808 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2794 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); 2809 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2795 else 2810 int i;
2796 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2), 2811 uint64_t value;
2797 vm->pt_gpu_addr >> 12); 2812
2798 /* flush hdp cache */ 2813 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
2799 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 2814 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2800 /* bits 0-15 are the VM contexts0-15 */ 2815 WRITE_DATA_DST_SEL(1)));
2801 WREG32(VM_INVALIDATE_REQUEST, 1 << id); 2816 radeon_ring_write(ring, pe);
2802 return 0; 2817 radeon_ring_write(ring, upper_32_bits(pe));
2818 for (i = 0; i < count; ++i) {
2819 if (flags & RADEON_VM_PAGE_SYSTEM) {
2820 value = radeon_vm_map_gart(rdev, addr);
2821 value &= 0xFFFFFFFFFFFFF000ULL;
2822 } else if (flags & RADEON_VM_PAGE_VALID)
2823 value = addr;
2824 else
2825 value = 0;
2826 addr += incr;
2827 value |= r600_flags;
2828 radeon_ring_write(ring, value);
2829 radeon_ring_write(ring, upper_32_bits(value));
2830 }
2803} 2831}
2804 2832
2805void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) 2833void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2806{ 2834{
2807 if (vm->id < 8) 2835 struct radeon_ring *ring = &rdev->ring[ridx];
2808 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
2809 else
2810 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0);
2811 /* flush hdp cache */
2812 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2813 /* bits 0-15 are the VM contexts0-15 */
2814 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
2815}
2816 2836
2817void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) 2837 if (vm == NULL)
2818{
2819 if (vm->id == -1)
2820 return; 2838 return;
2821 2839
2840 /* write new base address */
2841 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2842 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2843 WRITE_DATA_DST_SEL(0)));
2844
2845 if (vm->id < 8) {
2846 radeon_ring_write(ring,
2847 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
2848 } else {
2849 radeon_ring_write(ring,
2850 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
2851 }
2852 radeon_ring_write(ring, 0);
2853 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2854
2822 /* flush hdp cache */ 2855 /* flush hdp cache */
2823 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 2856 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2857 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2858 WRITE_DATA_DST_SEL(0)));
2859 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
2860 radeon_ring_write(ring, 0);
2861 radeon_ring_write(ring, 0x1);
2862
2824 /* bits 0-15 are the VM contexts0-15 */ 2863 /* bits 0-15 are the VM contexts0-15 */
2825 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); 2864 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2865 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2866 WRITE_DATA_DST_SEL(0)));
2867 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2868 radeon_ring_write(ring, 0);
2869 radeon_ring_write(ring, 1 << vm->id);
2826} 2870}
2827 2871
2828/* 2872/*
@@ -3199,10 +3243,6 @@ int si_irq_set(struct radeon_device *rdev)
3199 DRM_DEBUG("si_irq_set: hpd 6\n"); 3243 DRM_DEBUG("si_irq_set: hpd 6\n");
3200 hpd6 |= DC_HPDx_INT_EN; 3244 hpd6 |= DC_HPDx_INT_EN;
3201 } 3245 }
3202 if (rdev->irq.gui_idle) {
3203 DRM_DEBUG("gui idle\n");
3204 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3205 }
3206 3246
3207 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 3247 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
3208 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); 3248 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
@@ -3658,7 +3698,6 @@ restart_ih:
3658 break; 3698 break;
3659 case 233: /* GUI IDLE */ 3699 case 233: /* GUI IDLE */
3660 DRM_DEBUG("IH: GUI idle\n"); 3700 DRM_DEBUG("IH: GUI idle\n");
3661 wake_up(&rdev->irq.idle_queue);
3662 break; 3701 break;
3663 default: 3702 default:
3664 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3703 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index ef4815c27b1c..7d2a20e56577 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -812,6 +812,21 @@
812#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 812#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
813#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 813#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
814#define PACKET3_WRITE_DATA 0x37 814#define PACKET3_WRITE_DATA 0x37
815#define WRITE_DATA_DST_SEL(x) ((x) << 8)
816 /* 0 - register
817 * 1 - memory (sync - via GRBM)
818 * 2 - tc/l2
819 * 3 - gds
820 * 4 - reserved
821 * 5 - memory (async - direct)
822 */
823#define WR_ONE_ADDR (1 << 16)
824#define WR_CONFIRM (1 << 20)
825#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
826 /* 0 - me
827 * 1 - pfp
828 * 2 - ce
829 */
815#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 830#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
816#define PACKET3_MEM_SEMAPHORE 0x39 831#define PACKET3_MEM_SEMAPHORE 0x39
817#define PACKET3_MPEG_INDEX 0x3A 832#define PACKET3_MPEG_INDEX 0x3A