diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-05 22:57:54 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-05 22:57:54 -0400 |
commit | 26bf62e47261142d528a6109fdd671a2e280b4ea (patch) | |
tree | d7f9622fba650ddd402df7614d8b7175d4be5227 | |
parent | 96a03fce54af40b4f0820cd729608bc32c9b8949 (diff) | |
parent | d7ccd8fc11700502b5a104b7bad595b492a3aa1b (diff) |
Merge branch 'drm-radeon-next' of ../drm-radeon-next into drm-core-next
* 'drm-radeon-next' of ../drm-radeon-next:
drm/radeon/kms: add drm blit support for evergreen
drm/radeon: Modify radeon_pm_in_vbl to use radeon_get_crtc_scanoutpos()
drm/radeon: Add function for display scanout position query.
drm/radeon/kms: rework spread spectrum handling
drm/radeon/kms: remove new pll algo
drm/radeon/kms: remove some pll algo flags
drm/radeon/kms: prefer high post dividers in legacy pll algo
drm/radeon/kms: properly handle 40 bit MC addresses in the cursor code
drm/radeon: add properties to configure the width of the underscan borders
drm/radeon/kms/r6xx+: use new style fencing (v3)
drm/radeon/kms: enable writeback (v2)
drm/radeon/kms: clean up r6xx/r7xx blit init (v2)
34 files changed, 2140 insertions, 819 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index aebe00875041..6cae4f2028d2 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
65 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ | 65 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ |
66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
68 | evergreen.o evergreen_cs.o | 68 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o |
69 | 69 | ||
70 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 70 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
71 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 71 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 501e5286ec3f..037e3260cb7c 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -398,65 +398,76 @@ static void atombios_disable_ss(struct drm_crtc *crtc) | |||
398 | 398 | ||
399 | 399 | ||
400 | union atom_enable_ss { | 400 | union atom_enable_ss { |
401 | ENABLE_LVDS_SS_PARAMETERS legacy; | 401 | ENABLE_LVDS_SS_PARAMETERS lvds_ss; |
402 | ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; | ||
402 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; | 403 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; |
404 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; | ||
403 | }; | 405 | }; |
404 | 406 | ||
405 | static void atombios_enable_ss(struct drm_crtc *crtc) | 407 | static void atombios_crtc_program_ss(struct drm_crtc *crtc, |
408 | int enable, | ||
409 | int pll_id, | ||
410 | struct radeon_atom_ss *ss) | ||
406 | { | 411 | { |
407 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
408 | struct drm_device *dev = crtc->dev; | 412 | struct drm_device *dev = crtc->dev; |
409 | struct radeon_device *rdev = dev->dev_private; | 413 | struct radeon_device *rdev = dev->dev_private; |
410 | struct drm_encoder *encoder = NULL; | ||
411 | struct radeon_encoder *radeon_encoder = NULL; | ||
412 | struct radeon_encoder_atom_dig *dig = NULL; | ||
413 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); | 414 | int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); |
414 | union atom_enable_ss args; | 415 | union atom_enable_ss args; |
415 | uint16_t percentage = 0; | ||
416 | uint8_t type = 0, step = 0, delay = 0, range = 0; | ||
417 | 416 | ||
418 | /* XXX add ss support for DCE4 */ | 417 | memset(&args, 0, sizeof(args)); |
419 | if (ASIC_IS_DCE4(rdev)) | ||
420 | return; | ||
421 | 418 | ||
422 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 419 | if (ASIC_IS_DCE4(rdev)) { |
423 | if (encoder->crtc == crtc) { | 420 | args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
424 | radeon_encoder = to_radeon_encoder(encoder); | 421 | args.v2.ucSpreadSpectrumType = ss->type; |
425 | /* only enable spread spectrum on LVDS */ | 422 | switch (pll_id) { |
426 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 423 | case ATOM_PPLL1: |
427 | dig = radeon_encoder->enc_priv; | 424 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; |
428 | if (dig && dig->ss) { | 425 | args.v2.usSpreadSpectrumAmount = ss->amount; |
429 | percentage = dig->ss->percentage; | 426 | args.v2.usSpreadSpectrumStep = ss->step; |
430 | type = dig->ss->type; | 427 | break; |
431 | step = dig->ss->step; | 428 | case ATOM_PPLL2: |
432 | delay = dig->ss->delay; | 429 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; |
433 | range = dig->ss->range; | 430 | args.v2.usSpreadSpectrumAmount = ss->amount; |
434 | } else | 431 | args.v2.usSpreadSpectrumStep = ss->step; |
435 | return; | ||
436 | } else | ||
437 | return; | ||
438 | break; | 432 | break; |
433 | case ATOM_DCPLL: | ||
434 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; | ||
435 | args.v2.usSpreadSpectrumAmount = 0; | ||
436 | args.v2.usSpreadSpectrumStep = 0; | ||
437 | break; | ||
438 | case ATOM_PPLL_INVALID: | ||
439 | return; | ||
439 | } | 440 | } |
440 | } | 441 | args.v2.ucEnable = enable; |
441 | 442 | } else if (ASIC_IS_DCE3(rdev)) { | |
442 | if (!radeon_encoder) | 443 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
443 | return; | 444 | args.v1.ucSpreadSpectrumType = ss->type; |
444 | 445 | args.v1.ucSpreadSpectrumStep = ss->step; | |
445 | memset(&args, 0, sizeof(args)); | 446 | args.v1.ucSpreadSpectrumDelay = ss->delay; |
446 | if (ASIC_IS_AVIVO(rdev)) { | 447 | args.v1.ucSpreadSpectrumRange = ss->range; |
447 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | 448 | args.v1.ucPpll = pll_id; |
448 | args.v1.ucSpreadSpectrumType = type; | 449 | args.v1.ucEnable = enable; |
449 | args.v1.ucSpreadSpectrumStep = step; | 450 | } else if (ASIC_IS_AVIVO(rdev)) { |
450 | args.v1.ucSpreadSpectrumDelay = delay; | 451 | if (enable == ATOM_DISABLE) { |
451 | args.v1.ucSpreadSpectrumRange = range; | 452 | atombios_disable_ss(crtc); |
452 | args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 453 | return; |
453 | args.v1.ucEnable = ATOM_ENABLE; | 454 | } |
455 | args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | ||
456 | args.lvds_ss_2.ucSpreadSpectrumType = ss->type; | ||
457 | args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; | ||
458 | args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; | ||
459 | args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; | ||
460 | args.lvds_ss_2.ucEnable = enable; | ||
454 | } else { | 461 | } else { |
455 | args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); | 462 | if (enable == ATOM_DISABLE) { |
456 | args.legacy.ucSpreadSpectrumType = type; | 463 | atombios_disable_ss(crtc); |
457 | args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; | 464 | return; |
458 | args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; | 465 | } |
459 | args.legacy.ucEnable = ATOM_ENABLE; | 466 | args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
467 | args.lvds_ss.ucSpreadSpectrumType = ss->type; | ||
468 | args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; | ||
469 | args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; | ||
470 | args.lvds_ss.ucEnable = enable; | ||
460 | } | 471 | } |
461 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 472 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
462 | } | 473 | } |
@@ -468,7 +479,9 @@ union adjust_pixel_clock { | |||
468 | 479 | ||
469 | static u32 atombios_adjust_pll(struct drm_crtc *crtc, | 480 | static u32 atombios_adjust_pll(struct drm_crtc *crtc, |
470 | struct drm_display_mode *mode, | 481 | struct drm_display_mode *mode, |
471 | struct radeon_pll *pll) | 482 | struct radeon_pll *pll, |
483 | bool ss_enabled, | ||
484 | struct radeon_atom_ss *ss) | ||
472 | { | 485 | { |
473 | struct drm_device *dev = crtc->dev; | 486 | struct drm_device *dev = crtc->dev; |
474 | struct radeon_device *rdev = dev->dev_private; | 487 | struct radeon_device *rdev = dev->dev_private; |
@@ -482,40 +495,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
482 | /* reset the pll flags */ | 495 | /* reset the pll flags */ |
483 | pll->flags = 0; | 496 | pll->flags = 0; |
484 | 497 | ||
485 | /* select the PLL algo */ | ||
486 | if (ASIC_IS_AVIVO(rdev)) { | ||
487 | if (radeon_new_pll == 0) | ||
488 | pll->algo = PLL_ALGO_LEGACY; | ||
489 | else | ||
490 | pll->algo = PLL_ALGO_NEW; | ||
491 | } else { | ||
492 | if (radeon_new_pll == 1) | ||
493 | pll->algo = PLL_ALGO_NEW; | ||
494 | else | ||
495 | pll->algo = PLL_ALGO_LEGACY; | ||
496 | } | ||
497 | |||
498 | if (ASIC_IS_AVIVO(rdev)) { | 498 | if (ASIC_IS_AVIVO(rdev)) { |
499 | if ((rdev->family == CHIP_RS600) || | 499 | if ((rdev->family == CHIP_RS600) || |
500 | (rdev->family == CHIP_RS690) || | 500 | (rdev->family == CHIP_RS690) || |
501 | (rdev->family == CHIP_RS740)) | 501 | (rdev->family == CHIP_RS740)) |
502 | pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ | 502 | pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ |
503 | RADEON_PLL_PREFER_CLOSEST_LOWER); | 503 | RADEON_PLL_PREFER_CLOSEST_LOWER); |
504 | 504 | } else | |
505 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | ||
506 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | ||
507 | else | ||
508 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | ||
509 | } else { | ||
510 | pll->flags |= RADEON_PLL_LEGACY; | 505 | pll->flags |= RADEON_PLL_LEGACY; |
511 | 506 | ||
512 | if (mode->clock > 200000) /* range limits??? */ | ||
513 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | ||
514 | else | ||
515 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | ||
516 | |||
517 | } | ||
518 | |||
519 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 507 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
520 | if (encoder->crtc == crtc) { | 508 | if (encoder->crtc == crtc) { |
521 | radeon_encoder = to_radeon_encoder(encoder); | 509 | radeon_encoder = to_radeon_encoder(encoder); |
@@ -531,29 +519,22 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
531 | } | 519 | } |
532 | } | 520 | } |
533 | 521 | ||
522 | /* use recommended ref_div for ss */ | ||
523 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
524 | if (ss_enabled) { | ||
525 | if (ss->refdiv) { | ||
526 | pll->flags |= RADEON_PLL_USE_REF_DIV; | ||
527 | pll->reference_div = ss->refdiv; | ||
528 | } | ||
529 | } | ||
530 | } | ||
531 | |||
534 | if (ASIC_IS_AVIVO(rdev)) { | 532 | if (ASIC_IS_AVIVO(rdev)) { |
535 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 533 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
536 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 534 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
537 | adjusted_clock = mode->clock * 2; | 535 | adjusted_clock = mode->clock * 2; |
538 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | 536 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
539 | pll->algo = PLL_ALGO_LEGACY; | ||
540 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 537 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
541 | } | ||
542 | /* There is some evidence (often anecdotal) that RV515/RV620 LVDS | ||
543 | * (on some boards at least) prefers the legacy algo. I'm not | ||
544 | * sure whether this should handled generically or on a | ||
545 | * case-by-case quirk basis. Both algos should work fine in the | ||
546 | * majority of cases. | ||
547 | */ | ||
548 | if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && | ||
549 | ((rdev->family == CHIP_RV515) || | ||
550 | (rdev->family == CHIP_RV620))) { | ||
551 | /* allow the user to overrride just in case */ | ||
552 | if (radeon_new_pll == 1) | ||
553 | pll->algo = PLL_ALGO_NEW; | ||
554 | else | ||
555 | pll->algo = PLL_ALGO_LEGACY; | ||
556 | } | ||
557 | } else { | 538 | } else { |
558 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 539 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
559 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 540 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -589,9 +570,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
589 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | 570 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; |
590 | args.v1.ucEncodeMode = encoder_mode; | 571 | args.v1.ucEncodeMode = encoder_mode; |
591 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 572 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
592 | /* may want to enable SS on DP eventually */ | 573 | if (ss_enabled) |
593 | /* args.v1.ucConfig |= | 574 | args.v1.ucConfig |= |
594 | ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/ | 575 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; |
595 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | 576 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { |
596 | args.v1.ucConfig |= | 577 | args.v1.ucConfig |= |
597 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | 578 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; |
@@ -608,11 +589,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
608 | args.v3.sInput.ucDispPllConfig = 0; | 589 | args.v3.sInput.ucDispPllConfig = 0; |
609 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 590 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
610 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 591 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
611 | |||
612 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 592 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
613 | /* may want to enable SS on DP/eDP eventually */ | 593 | if (ss_enabled) |
614 | /*args.v3.sInput.ucDispPllConfig |= | 594 | args.v3.sInput.ucDispPllConfig |= |
615 | DISPPLL_CONFIG_SS_ENABLE;*/ | 595 | DISPPLL_CONFIG_SS_ENABLE; |
616 | args.v3.sInput.ucDispPllConfig |= | 596 | args.v3.sInput.ucDispPllConfig |= |
617 | DISPPLL_CONFIG_COHERENT_MODE; | 597 | DISPPLL_CONFIG_COHERENT_MODE; |
618 | /* 16200 or 27000 */ | 598 | /* 16200 or 27000 */ |
@@ -632,17 +612,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
632 | } | 612 | } |
633 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 613 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
634 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 614 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
635 | /* may want to enable SS on DP/eDP eventually */ | 615 | if (ss_enabled) |
636 | /*args.v3.sInput.ucDispPllConfig |= | 616 | args.v3.sInput.ucDispPllConfig |= |
637 | DISPPLL_CONFIG_SS_ENABLE;*/ | 617 | DISPPLL_CONFIG_SS_ENABLE; |
638 | args.v3.sInput.ucDispPllConfig |= | 618 | args.v3.sInput.ucDispPllConfig |= |
639 | DISPPLL_CONFIG_COHERENT_MODE; | 619 | DISPPLL_CONFIG_COHERENT_MODE; |
640 | /* 16200 or 27000 */ | 620 | /* 16200 or 27000 */ |
641 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | 621 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); |
642 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | 622 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { |
643 | /* want to enable SS on LVDS eventually */ | 623 | if (ss_enabled) |
644 | /*args.v3.sInput.ucDispPllConfig |= | 624 | args.v3.sInput.ucDispPllConfig |= |
645 | DISPPLL_CONFIG_SS_ENABLE;*/ | 625 | DISPPLL_CONFIG_SS_ENABLE; |
646 | } else { | 626 | } else { |
647 | if (mode->clock > 165000) | 627 | if (mode->clock > 165000) |
648 | args.v3.sInput.ucDispPllConfig |= | 628 | args.v3.sInput.ucDispPllConfig |= |
@@ -816,6 +796,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
816 | struct radeon_pll *pll; | 796 | struct radeon_pll *pll; |
817 | u32 adjusted_clock; | 797 | u32 adjusted_clock; |
818 | int encoder_mode = 0; | 798 | int encoder_mode = 0; |
799 | struct radeon_atom_ss ss; | ||
800 | bool ss_enabled = false; | ||
819 | 801 | ||
820 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 802 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
821 | if (encoder->crtc == crtc) { | 803 | if (encoder->crtc == crtc) { |
@@ -842,16 +824,112 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
842 | break; | 824 | break; |
843 | } | 825 | } |
844 | 826 | ||
827 | if (radeon_encoder->active_device & | ||
828 | (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { | ||
829 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
830 | struct drm_connector *connector = | ||
831 | radeon_get_connector_for_encoder(encoder); | ||
832 | struct radeon_connector *radeon_connector = | ||
833 | to_radeon_connector(connector); | ||
834 | struct radeon_connector_atom_dig *dig_connector = | ||
835 | radeon_connector->con_priv; | ||
836 | int dp_clock; | ||
837 | |||
838 | switch (encoder_mode) { | ||
839 | case ATOM_ENCODER_MODE_DP: | ||
840 | /* DP/eDP */ | ||
841 | dp_clock = dig_connector->dp_clock / 10; | ||
842 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
843 | if (ASIC_IS_DCE4(rdev)) | ||
844 | ss_enabled = | ||
845 | radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
846 | dig->lcd_ss_id, | ||
847 | dp_clock); | ||
848 | else | ||
849 | ss_enabled = | ||
850 | radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
851 | dig->lcd_ss_id); | ||
852 | } else { | ||
853 | if (ASIC_IS_DCE4(rdev)) | ||
854 | ss_enabled = | ||
855 | radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
856 | ASIC_INTERNAL_SS_ON_DP, | ||
857 | dp_clock); | ||
858 | else { | ||
859 | if (dp_clock == 16200) { | ||
860 | ss_enabled = | ||
861 | radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
862 | ATOM_DP_SS_ID2); | ||
863 | if (!ss_enabled) | ||
864 | ss_enabled = | ||
865 | radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
866 | ATOM_DP_SS_ID1); | ||
867 | } else | ||
868 | ss_enabled = | ||
869 | radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
870 | ATOM_DP_SS_ID1); | ||
871 | } | ||
872 | } | ||
873 | break; | ||
874 | case ATOM_ENCODER_MODE_LVDS: | ||
875 | if (ASIC_IS_DCE4(rdev)) | ||
876 | ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
877 | dig->lcd_ss_id, | ||
878 | mode->clock / 10); | ||
879 | else | ||
880 | ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, | ||
881 | dig->lcd_ss_id); | ||
882 | break; | ||
883 | case ATOM_ENCODER_MODE_DVI: | ||
884 | if (ASIC_IS_DCE4(rdev)) | ||
885 | ss_enabled = | ||
886 | radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
887 | ASIC_INTERNAL_SS_ON_TMDS, | ||
888 | mode->clock / 10); | ||
889 | break; | ||
890 | case ATOM_ENCODER_MODE_HDMI: | ||
891 | if (ASIC_IS_DCE4(rdev)) | ||
892 | ss_enabled = | ||
893 | radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
894 | ASIC_INTERNAL_SS_ON_HDMI, | ||
895 | mode->clock / 10); | ||
896 | break; | ||
897 | default: | ||
898 | break; | ||
899 | } | ||
900 | } | ||
901 | |||
845 | /* adjust pixel clock as needed */ | 902 | /* adjust pixel clock as needed */ |
846 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll); | 903 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); |
847 | 904 | ||
848 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 905 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, |
849 | &ref_div, &post_div); | 906 | &ref_div, &post_div); |
850 | 907 | ||
908 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); | ||
909 | |||
851 | atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, | 910 | atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, |
852 | encoder_mode, radeon_encoder->encoder_id, mode->clock, | 911 | encoder_mode, radeon_encoder->encoder_id, mode->clock, |
853 | ref_div, fb_div, frac_fb_div, post_div); | 912 | ref_div, fb_div, frac_fb_div, post_div); |
854 | 913 | ||
914 | if (ss_enabled) { | ||
915 | /* calculate ss amount and step size */ | ||
916 | if (ASIC_IS_DCE4(rdev)) { | ||
917 | u32 step_size; | ||
918 | u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; | ||
919 | ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; | ||
920 | ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & | ||
921 | ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; | ||
922 | if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) | ||
923 | step_size = (4 * amount * ref_div * (ss.rate * 2048)) / | ||
924 | (125 * 25 * pll->reference_freq / 100); | ||
925 | else | ||
926 | step_size = (2 * amount * ref_div * (ss.rate * 2048)) / | ||
927 | (125 * 25 * pll->reference_freq / 100); | ||
928 | ss.step = step_size; | ||
929 | } | ||
930 | |||
931 | atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss); | ||
932 | } | ||
855 | } | 933 | } |
856 | 934 | ||
857 | static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | 935 | static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, |
@@ -1278,12 +1356,19 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1278 | } | 1356 | } |
1279 | } | 1357 | } |
1280 | 1358 | ||
1281 | atombios_disable_ss(crtc); | ||
1282 | /* always set DCPLL */ | 1359 | /* always set DCPLL */ |
1283 | if (ASIC_IS_DCE4(rdev)) | 1360 | if (ASIC_IS_DCE4(rdev)) { |
1361 | struct radeon_atom_ss ss; | ||
1362 | bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
1363 | ASIC_INTERNAL_SS_ON_DCPLL, | ||
1364 | rdev->clock.default_dispclk); | ||
1365 | if (ss_enabled) | ||
1366 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); | ||
1284 | atombios_crtc_set_dcpll(crtc); | 1367 | atombios_crtc_set_dcpll(crtc); |
1368 | if (ss_enabled) | ||
1369 | atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); | ||
1370 | } | ||
1285 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1371 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1286 | atombios_enable_ss(crtc); | ||
1287 | 1372 | ||
1288 | if (ASIC_IS_DCE4(rdev)) | 1373 | if (ASIC_IS_DCE4(rdev)) |
1289 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 1374 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 79082d4398ae..aee61ae24402 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -731,7 +731,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
731 | 731 | ||
732 | /* Set ring buffer size */ | 732 | /* Set ring buffer size */ |
733 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 733 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
734 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 734 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
735 | #ifdef __BIG_ENDIAN | 735 | #ifdef __BIG_ENDIAN |
736 | tmp |= BUF_SWAP_32BIT; | 736 | tmp |= BUF_SWAP_32BIT; |
737 | #endif | 737 | #endif |
@@ -745,8 +745,19 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
745 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 745 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
746 | WREG32(CP_RB_RPTR_WR, 0); | 746 | WREG32(CP_RB_RPTR_WR, 0); |
747 | WREG32(CP_RB_WPTR, 0); | 747 | WREG32(CP_RB_WPTR, 0); |
748 | WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); | 748 | |
749 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); | 749 | /* set the wb address wether it's enabled or not */ |
750 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | ||
751 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | ||
752 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | ||
753 | |||
754 | if (rdev->wb.enabled) | ||
755 | WREG32(SCRATCH_UMSK, 0xff); | ||
756 | else { | ||
757 | tmp |= RB_NO_UPDATE; | ||
758 | WREG32(SCRATCH_UMSK, 0); | ||
759 | } | ||
760 | |||
750 | mdelay(1); | 761 | mdelay(1); |
751 | WREG32(CP_RB_CNTL, tmp); | 762 | WREG32(CP_RB_CNTL, tmp); |
752 | 763 | ||
@@ -1583,6 +1594,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
1583 | if (rdev->irq.sw_int) { | 1594 | if (rdev->irq.sw_int) { |
1584 | DRM_DEBUG("evergreen_irq_set: sw int\n"); | 1595 | DRM_DEBUG("evergreen_irq_set: sw int\n"); |
1585 | cp_int_cntl |= RB_INT_ENABLE; | 1596 | cp_int_cntl |= RB_INT_ENABLE; |
1597 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | ||
1586 | } | 1598 | } |
1587 | if (rdev->irq.crtc_vblank_int[0]) { | 1599 | if (rdev->irq.crtc_vblank_int[0]) { |
1588 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); | 1600 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); |
@@ -1759,8 +1771,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |||
1759 | { | 1771 | { |
1760 | u32 wptr, tmp; | 1772 | u32 wptr, tmp; |
1761 | 1773 | ||
1762 | /* XXX use writeback */ | 1774 | if (rdev->wb.enabled) |
1763 | wptr = RREG32(IH_RB_WPTR); | 1775 | wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; |
1776 | else | ||
1777 | wptr = RREG32(IH_RB_WPTR); | ||
1764 | 1778 | ||
1765 | if (wptr & RB_OVERFLOW) { | 1779 | if (wptr & RB_OVERFLOW) { |
1766 | /* When a ring buffer overflow happen start parsing interrupt | 1780 | /* When a ring buffer overflow happen start parsing interrupt |
@@ -1999,6 +2013,7 @@ restart_ih: | |||
1999 | break; | 2013 | break; |
2000 | case 181: /* CP EOP event */ | 2014 | case 181: /* CP EOP event */ |
2001 | DRM_DEBUG("IH: CP EOP\n"); | 2015 | DRM_DEBUG("IH: CP EOP\n"); |
2016 | radeon_fence_process(rdev); | ||
2002 | break; | 2017 | break; |
2003 | case 233: /* GUI IDLE */ | 2018 | case 233: /* GUI IDLE */ |
2004 | DRM_DEBUG("IH: CP EOP\n"); | 2019 | DRM_DEBUG("IH: CP EOP\n"); |
@@ -2047,26 +2062,18 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2047 | return r; | 2062 | return r; |
2048 | } | 2063 | } |
2049 | evergreen_gpu_init(rdev); | 2064 | evergreen_gpu_init(rdev); |
2050 | #if 0 | ||
2051 | if (!rdev->r600_blit.shader_obj) { | ||
2052 | r = r600_blit_init(rdev); | ||
2053 | if (r) { | ||
2054 | DRM_ERROR("radeon: failed blitter (%d).\n", r); | ||
2055 | return r; | ||
2056 | } | ||
2057 | } | ||
2058 | 2065 | ||
2059 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 2066 | r = evergreen_blit_init(rdev); |
2060 | if (unlikely(r != 0)) | ||
2061 | return r; | ||
2062 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
2063 | &rdev->r600_blit.shader_gpu_addr); | ||
2064 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
2065 | if (r) { | 2067 | if (r) { |
2066 | DRM_ERROR("failed to pin blit object %d\n", r); | 2068 | evergreen_blit_fini(rdev); |
2067 | return r; | 2069 | rdev->asic->copy = NULL; |
2070 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
2068 | } | 2071 | } |
2069 | #endif | 2072 | |
2073 | /* allocate wb buffer */ | ||
2074 | r = radeon_wb_init(rdev); | ||
2075 | if (r) | ||
2076 | return r; | ||
2070 | 2077 | ||
2071 | /* Enable IRQ */ | 2078 | /* Enable IRQ */ |
2072 | r = r600_irq_init(rdev); | 2079 | r = r600_irq_init(rdev); |
@@ -2086,8 +2093,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2086 | r = evergreen_cp_resume(rdev); | 2093 | r = evergreen_cp_resume(rdev); |
2087 | if (r) | 2094 | if (r) |
2088 | return r; | 2095 | return r; |
2089 | /* write back buffer are not vital so don't worry about failure */ | ||
2090 | r600_wb_enable(rdev); | ||
2091 | 2096 | ||
2092 | return 0; | 2097 | return 0; |
2093 | } | 2098 | } |
@@ -2121,23 +2126,43 @@ int evergreen_resume(struct radeon_device *rdev) | |||
2121 | 2126 | ||
2122 | int evergreen_suspend(struct radeon_device *rdev) | 2127 | int evergreen_suspend(struct radeon_device *rdev) |
2123 | { | 2128 | { |
2124 | #if 0 | ||
2125 | int r; | 2129 | int r; |
2126 | #endif | 2130 | |
2127 | /* FIXME: we should wait for ring to be empty */ | 2131 | /* FIXME: we should wait for ring to be empty */ |
2128 | r700_cp_stop(rdev); | 2132 | r700_cp_stop(rdev); |
2129 | rdev->cp.ready = false; | 2133 | rdev->cp.ready = false; |
2130 | evergreen_irq_suspend(rdev); | 2134 | evergreen_irq_suspend(rdev); |
2131 | r600_wb_disable(rdev); | 2135 | radeon_wb_disable(rdev); |
2132 | evergreen_pcie_gart_disable(rdev); | 2136 | evergreen_pcie_gart_disable(rdev); |
2133 | #if 0 | 2137 | |
2134 | /* unpin shaders bo */ | 2138 | /* unpin shaders bo */ |
2135 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 2139 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
2136 | if (likely(r == 0)) { | 2140 | if (likely(r == 0)) { |
2137 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | 2141 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
2138 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 2142 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
2139 | } | 2143 | } |
2140 | #endif | 2144 | |
2145 | return 0; | ||
2146 | } | ||
2147 | |||
2148 | int evergreen_copy_blit(struct radeon_device *rdev, | ||
2149 | uint64_t src_offset, uint64_t dst_offset, | ||
2150 | unsigned num_pages, struct radeon_fence *fence) | ||
2151 | { | ||
2152 | int r; | ||
2153 | |||
2154 | mutex_lock(&rdev->r600_blit.mutex); | ||
2155 | rdev->r600_blit.vb_ib = NULL; | ||
2156 | r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | ||
2157 | if (r) { | ||
2158 | if (rdev->r600_blit.vb_ib) | ||
2159 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
2160 | mutex_unlock(&rdev->r600_blit.mutex); | ||
2161 | return r; | ||
2162 | } | ||
2163 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | ||
2164 | evergreen_blit_done_copy(rdev, fence); | ||
2165 | mutex_unlock(&rdev->r600_blit.mutex); | ||
2141 | return 0; | 2166 | return 0; |
2142 | } | 2167 | } |
2143 | 2168 | ||
@@ -2245,8 +2270,8 @@ int evergreen_init(struct radeon_device *rdev) | |||
2245 | if (r) { | 2270 | if (r) { |
2246 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 2271 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2247 | r700_cp_fini(rdev); | 2272 | r700_cp_fini(rdev); |
2248 | r600_wb_fini(rdev); | ||
2249 | r600_irq_fini(rdev); | 2273 | r600_irq_fini(rdev); |
2274 | radeon_wb_fini(rdev); | ||
2250 | radeon_irq_kms_fini(rdev); | 2275 | radeon_irq_kms_fini(rdev); |
2251 | evergreen_pcie_gart_fini(rdev); | 2276 | evergreen_pcie_gart_fini(rdev); |
2252 | rdev->accel_working = false; | 2277 | rdev->accel_working = false; |
@@ -2268,10 +2293,10 @@ int evergreen_init(struct radeon_device *rdev) | |||
2268 | 2293 | ||
2269 | void evergreen_fini(struct radeon_device *rdev) | 2294 | void evergreen_fini(struct radeon_device *rdev) |
2270 | { | 2295 | { |
2271 | /*r600_blit_fini(rdev);*/ | 2296 | evergreen_blit_fini(rdev); |
2272 | r700_cp_fini(rdev); | 2297 | r700_cp_fini(rdev); |
2273 | r600_wb_fini(rdev); | ||
2274 | r600_irq_fini(rdev); | 2298 | r600_irq_fini(rdev); |
2299 | radeon_wb_fini(rdev); | ||
2275 | radeon_irq_kms_fini(rdev); | 2300 | radeon_irq_kms_fini(rdev); |
2276 | evergreen_pcie_gart_fini(rdev); | 2301 | evergreen_pcie_gart_fini(rdev); |
2277 | radeon_gem_fini(rdev); | 2302 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c new file mode 100644 index 000000000000..ce1ae4a2aa54 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -0,0 +1,776 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Alex Deucher <alexander.deucher@amd.com> | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "radeon_drm.h" | ||
30 | #include "radeon.h" | ||
31 | |||
32 | #include "evergreend.h" | ||
33 | #include "evergreen_blit_shaders.h" | ||
34 | |||
35 | #define DI_PT_RECTLIST 0x11 | ||
36 | #define DI_INDEX_SIZE_16_BIT 0x0 | ||
37 | #define DI_SRC_SEL_AUTO_INDEX 0x2 | ||
38 | |||
39 | #define FMT_8 0x1 | ||
40 | #define FMT_5_6_5 0x8 | ||
41 | #define FMT_8_8_8_8 0x1a | ||
42 | #define COLOR_8 0x1 | ||
43 | #define COLOR_5_6_5 0x8 | ||
44 | #define COLOR_8_8_8_8 0x1a | ||
45 | |||
46 | /* emits 17 */ | ||
47 | static void | ||
48 | set_render_target(struct radeon_device *rdev, int format, | ||
49 | int w, int h, u64 gpu_addr) | ||
50 | { | ||
51 | u32 cb_color_info; | ||
52 | int pitch, slice; | ||
53 | |||
54 | h = ALIGN(h, 8); | ||
55 | if (h < 8) | ||
56 | h = 8; | ||
57 | |||
58 | cb_color_info = ((format << 2) | (1 << 24)); | ||
59 | pitch = (w / 8) - 1; | ||
60 | slice = ((w * h) / 64) - 1; | ||
61 | |||
62 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); | ||
63 | radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
64 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
65 | radeon_ring_write(rdev, pitch); | ||
66 | radeon_ring_write(rdev, slice); | ||
67 | radeon_ring_write(rdev, 0); | ||
68 | radeon_ring_write(rdev, cb_color_info); | ||
69 | radeon_ring_write(rdev, (1 << 4)); | ||
70 | radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16)); | ||
71 | radeon_ring_write(rdev, 0); | ||
72 | radeon_ring_write(rdev, 0); | ||
73 | radeon_ring_write(rdev, 0); | ||
74 | radeon_ring_write(rdev, 0); | ||
75 | radeon_ring_write(rdev, 0); | ||
76 | radeon_ring_write(rdev, 0); | ||
77 | radeon_ring_write(rdev, 0); | ||
78 | radeon_ring_write(rdev, 0); | ||
79 | } | ||
80 | |||
81 | /* emits 5dw */ | ||
82 | static void | ||
83 | cp_set_surface_sync(struct radeon_device *rdev, | ||
84 | u32 sync_type, u32 size, | ||
85 | u64 mc_addr) | ||
86 | { | ||
87 | u32 cp_coher_size; | ||
88 | |||
89 | if (size == 0xffffffff) | ||
90 | cp_coher_size = 0xffffffff; | ||
91 | else | ||
92 | cp_coher_size = ((size + 255) >> 8); | ||
93 | |||
94 | radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); | ||
95 | radeon_ring_write(rdev, sync_type); | ||
96 | radeon_ring_write(rdev, cp_coher_size); | ||
97 | radeon_ring_write(rdev, mc_addr >> 8); | ||
98 | radeon_ring_write(rdev, 10); /* poll interval */ | ||
99 | } | ||
100 | |||
101 | /* emits 11dw + 1 surface sync = 16dw */ | ||
102 | static void | ||
103 | set_shaders(struct radeon_device *rdev) | ||
104 | { | ||
105 | u64 gpu_addr; | ||
106 | |||
107 | /* VS */ | ||
108 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | ||
109 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); | ||
110 | radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
111 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
112 | radeon_ring_write(rdev, 2); | ||
113 | radeon_ring_write(rdev, 0); | ||
114 | |||
115 | /* PS */ | ||
116 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; | ||
117 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); | ||
118 | radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
119 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
120 | radeon_ring_write(rdev, 1); | ||
121 | radeon_ring_write(rdev, 0); | ||
122 | radeon_ring_write(rdev, 2); | ||
123 | |||
124 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | ||
125 | cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); | ||
126 | } | ||
127 | |||
128 | /* emits 10 + 1 sync (5) = 15 */ | ||
129 | static void | ||
130 | set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | ||
131 | { | ||
132 | u32 sq_vtx_constant_word2, sq_vtx_constant_word3; | ||
133 | |||
134 | /* high addr, stride */ | ||
135 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | ||
136 | /* xyzw swizzles */ | ||
137 | sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); | ||
138 | |||
139 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); | ||
140 | radeon_ring_write(rdev, 0x580); | ||
141 | radeon_ring_write(rdev, gpu_addr & 0xffffffff); | ||
142 | radeon_ring_write(rdev, 48 - 1); /* size */ | ||
143 | radeon_ring_write(rdev, sq_vtx_constant_word2); | ||
144 | radeon_ring_write(rdev, sq_vtx_constant_word3); | ||
145 | radeon_ring_write(rdev, 0); | ||
146 | radeon_ring_write(rdev, 0); | ||
147 | radeon_ring_write(rdev, 0); | ||
148 | radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); | ||
149 | |||
150 | if (rdev->family == CHIP_CEDAR) | ||
151 | cp_set_surface_sync(rdev, | ||
152 | PACKET3_TC_ACTION_ENA, 48, gpu_addr); | ||
153 | else | ||
154 | cp_set_surface_sync(rdev, | ||
155 | PACKET3_VC_ACTION_ENA, 48, gpu_addr); | ||
156 | |||
157 | } | ||
158 | |||
159 | /* emits 10 */ | ||
160 | static void | ||
161 | set_tex_resource(struct radeon_device *rdev, | ||
162 | int format, int w, int h, int pitch, | ||
163 | u64 gpu_addr) | ||
164 | { | ||
165 | u32 sq_tex_resource_word0, sq_tex_resource_word1; | ||
166 | u32 sq_tex_resource_word4, sq_tex_resource_word7; | ||
167 | |||
168 | if (h < 1) | ||
169 | h = 1; | ||
170 | |||
171 | sq_tex_resource_word0 = (1 << 0); /* 2D */ | ||
172 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | | ||
173 | ((w - 1) << 18)); | ||
174 | sq_tex_resource_word1 = ((h - 1) << 0); | ||
175 | /* xyzw swizzles */ | ||
176 | sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); | ||
177 | |||
178 | sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30); | ||
179 | |||
180 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); | ||
181 | radeon_ring_write(rdev, 0); | ||
182 | radeon_ring_write(rdev, sq_tex_resource_word0); | ||
183 | radeon_ring_write(rdev, sq_tex_resource_word1); | ||
184 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
185 | radeon_ring_write(rdev, gpu_addr >> 8); | ||
186 | radeon_ring_write(rdev, sq_tex_resource_word4); | ||
187 | radeon_ring_write(rdev, 0); | ||
188 | radeon_ring_write(rdev, 0); | ||
189 | radeon_ring_write(rdev, sq_tex_resource_word7); | ||
190 | } | ||
191 | |||
192 | /* emits 12 */ | ||
193 | static void | ||
194 | set_scissors(struct radeon_device *rdev, int x1, int y1, | ||
195 | int x2, int y2) | ||
196 | { | ||
197 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
198 | radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
199 | radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); | ||
200 | radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); | ||
201 | |||
202 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
203 | radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
204 | radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); | ||
205 | radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); | ||
206 | |||
207 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
208 | radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | ||
209 | radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); | ||
210 | radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); | ||
211 | } | ||
212 | |||
213 | /* emits 10 */ | ||
214 | static void | ||
215 | draw_auto(struct radeon_device *rdev) | ||
216 | { | ||
217 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
218 | radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
219 | radeon_ring_write(rdev, DI_PT_RECTLIST); | ||
220 | |||
221 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); | ||
222 | radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); | ||
223 | |||
224 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); | ||
225 | radeon_ring_write(rdev, 1); | ||
226 | |||
227 | radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); | ||
228 | radeon_ring_write(rdev, 3); | ||
229 | radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX); | ||
230 | |||
231 | } | ||
232 | |||
233 | /* emits 20 */ | ||
234 | static void | ||
235 | set_default_state(struct radeon_device *rdev) | ||
236 | { | ||
237 | u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; | ||
238 | u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; | ||
239 | u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; | ||
240 | int num_ps_gprs, num_vs_gprs, num_temp_gprs; | ||
241 | int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs; | ||
242 | int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; | ||
243 | int num_hs_threads, num_ls_threads; | ||
244 | int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; | ||
245 | int num_hs_stack_entries, num_ls_stack_entries; | ||
246 | u64 gpu_addr; | ||
247 | int dwords; | ||
248 | |||
249 | switch (rdev->family) { | ||
250 | case CHIP_CEDAR: | ||
251 | default: | ||
252 | num_ps_gprs = 93; | ||
253 | num_vs_gprs = 46; | ||
254 | num_temp_gprs = 4; | ||
255 | num_gs_gprs = 31; | ||
256 | num_es_gprs = 31; | ||
257 | num_hs_gprs = 23; | ||
258 | num_ls_gprs = 23; | ||
259 | num_ps_threads = 96; | ||
260 | num_vs_threads = 16; | ||
261 | num_gs_threads = 16; | ||
262 | num_es_threads = 16; | ||
263 | num_hs_threads = 16; | ||
264 | num_ls_threads = 16; | ||
265 | num_ps_stack_entries = 42; | ||
266 | num_vs_stack_entries = 42; | ||
267 | num_gs_stack_entries = 42; | ||
268 | num_es_stack_entries = 42; | ||
269 | num_hs_stack_entries = 42; | ||
270 | num_ls_stack_entries = 42; | ||
271 | break; | ||
272 | case CHIP_REDWOOD: | ||
273 | num_ps_gprs = 93; | ||
274 | num_vs_gprs = 46; | ||
275 | num_temp_gprs = 4; | ||
276 | num_gs_gprs = 31; | ||
277 | num_es_gprs = 31; | ||
278 | num_hs_gprs = 23; | ||
279 | num_ls_gprs = 23; | ||
280 | num_ps_threads = 128; | ||
281 | num_vs_threads = 20; | ||
282 | num_gs_threads = 20; | ||
283 | num_es_threads = 20; | ||
284 | num_hs_threads = 20; | ||
285 | num_ls_threads = 20; | ||
286 | num_ps_stack_entries = 42; | ||
287 | num_vs_stack_entries = 42; | ||
288 | num_gs_stack_entries = 42; | ||
289 | num_es_stack_entries = 42; | ||
290 | num_hs_stack_entries = 42; | ||
291 | num_ls_stack_entries = 42; | ||
292 | break; | ||
293 | case CHIP_JUNIPER: | ||
294 | num_ps_gprs = 93; | ||
295 | num_vs_gprs = 46; | ||
296 | num_temp_gprs = 4; | ||
297 | num_gs_gprs = 31; | ||
298 | num_es_gprs = 31; | ||
299 | num_hs_gprs = 23; | ||
300 | num_ls_gprs = 23; | ||
301 | num_ps_threads = 128; | ||
302 | num_vs_threads = 20; | ||
303 | num_gs_threads = 20; | ||
304 | num_es_threads = 20; | ||
305 | num_hs_threads = 20; | ||
306 | num_ls_threads = 20; | ||
307 | num_ps_stack_entries = 85; | ||
308 | num_vs_stack_entries = 85; | ||
309 | num_gs_stack_entries = 85; | ||
310 | num_es_stack_entries = 85; | ||
311 | num_hs_stack_entries = 85; | ||
312 | num_ls_stack_entries = 85; | ||
313 | break; | ||
314 | case CHIP_CYPRESS: | ||
315 | case CHIP_HEMLOCK: | ||
316 | num_ps_gprs = 93; | ||
317 | num_vs_gprs = 46; | ||
318 | num_temp_gprs = 4; | ||
319 | num_gs_gprs = 31; | ||
320 | num_es_gprs = 31; | ||
321 | num_hs_gprs = 23; | ||
322 | num_ls_gprs = 23; | ||
323 | num_ps_threads = 128; | ||
324 | num_vs_threads = 20; | ||
325 | num_gs_threads = 20; | ||
326 | num_es_threads = 20; | ||
327 | num_hs_threads = 20; | ||
328 | num_ls_threads = 20; | ||
329 | num_ps_stack_entries = 85; | ||
330 | num_vs_stack_entries = 85; | ||
331 | num_gs_stack_entries = 85; | ||
332 | num_es_stack_entries = 85; | ||
333 | num_hs_stack_entries = 85; | ||
334 | num_ls_stack_entries = 85; | ||
335 | break; | ||
336 | } | ||
337 | |||
338 | if (rdev->family == CHIP_CEDAR) | ||
339 | sq_config = 0; | ||
340 | else | ||
341 | sq_config = VC_ENABLE; | ||
342 | |||
343 | sq_config |= (EXPORT_SRC_C | | ||
344 | CS_PRIO(0) | | ||
345 | LS_PRIO(0) | | ||
346 | HS_PRIO(0) | | ||
347 | PS_PRIO(0) | | ||
348 | VS_PRIO(1) | | ||
349 | GS_PRIO(2) | | ||
350 | ES_PRIO(3)); | ||
351 | |||
352 | sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | | ||
353 | NUM_VS_GPRS(num_vs_gprs) | | ||
354 | NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); | ||
355 | sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | | ||
356 | NUM_ES_GPRS(num_es_gprs)); | ||
357 | sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) | | ||
358 | NUM_LS_GPRS(num_ls_gprs)); | ||
359 | sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | | ||
360 | NUM_VS_THREADS(num_vs_threads) | | ||
361 | NUM_GS_THREADS(num_gs_threads) | | ||
362 | NUM_ES_THREADS(num_es_threads)); | ||
363 | sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) | | ||
364 | NUM_LS_THREADS(num_ls_threads)); | ||
365 | sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | | ||
366 | NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); | ||
367 | sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | | ||
368 | NUM_ES_STACK_ENTRIES(num_es_stack_entries)); | ||
369 | sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | | ||
370 | NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); | ||
371 | |||
372 | /* emit an IB pointing at default state */ | ||
373 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | ||
374 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | ||
375 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
376 | radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); | ||
377 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); | ||
378 | radeon_ring_write(rdev, dwords); | ||
379 | |||
380 | /* disable dyn gprs */ | ||
381 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
382 | radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
383 | radeon_ring_write(rdev, 0); | ||
384 | |||
385 | /* SQ config */ | ||
386 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); | ||
387 | radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
388 | radeon_ring_write(rdev, sq_config); | ||
389 | radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); | ||
390 | radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); | ||
391 | radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); | ||
392 | radeon_ring_write(rdev, 0); | ||
393 | radeon_ring_write(rdev, 0); | ||
394 | radeon_ring_write(rdev, sq_thread_resource_mgmt); | ||
395 | radeon_ring_write(rdev, sq_thread_resource_mgmt_2); | ||
396 | radeon_ring_write(rdev, sq_stack_resource_mgmt_1); | ||
397 | radeon_ring_write(rdev, sq_stack_resource_mgmt_2); | ||
398 | radeon_ring_write(rdev, sq_stack_resource_mgmt_3); | ||
399 | } | ||
400 | |||
401 | static inline uint32_t i2f(uint32_t input) | ||
402 | { | ||
403 | u32 result, i, exponent, fraction; | ||
404 | |||
405 | if ((input & 0x3fff) == 0) | ||
406 | result = 0; /* 0 is a special case */ | ||
407 | else { | ||
408 | exponent = 140; /* exponent biased by 127; */ | ||
409 | fraction = (input & 0x3fff) << 10; /* cheat and only | ||
410 | handle numbers below 2^^15 */ | ||
411 | for (i = 0; i < 14; i++) { | ||
412 | if (fraction & 0x800000) | ||
413 | break; | ||
414 | else { | ||
415 | fraction = fraction << 1; /* keep | ||
416 | shifting left until top bit = 1 */ | ||
417 | exponent = exponent - 1; | ||
418 | } | ||
419 | } | ||
420 | result = exponent << 23 | (fraction & 0x7fffff); /* mask | ||
421 | off top bit; assumed 1 */ | ||
422 | } | ||
423 | return result; | ||
424 | } | ||
425 | |||
426 | int evergreen_blit_init(struct radeon_device *rdev) | ||
427 | { | ||
428 | u32 obj_size; | ||
429 | int r, dwords; | ||
430 | void *ptr; | ||
431 | u32 packet2s[16]; | ||
432 | int num_packet2s = 0; | ||
433 | |||
434 | /* pin copy shader into vram if already initialized */ | ||
435 | if (rdev->r600_blit.shader_obj) | ||
436 | goto done; | ||
437 | |||
438 | mutex_init(&rdev->r600_blit.mutex); | ||
439 | rdev->r600_blit.state_offset = 0; | ||
440 | |||
441 | rdev->r600_blit.state_len = evergreen_default_size; | ||
442 | |||
443 | dwords = rdev->r600_blit.state_len; | ||
444 | while (dwords & 0xf) { | ||
445 | packet2s[num_packet2s++] = PACKET2(0); | ||
446 | dwords++; | ||
447 | } | ||
448 | |||
449 | obj_size = dwords * 4; | ||
450 | obj_size = ALIGN(obj_size, 256); | ||
451 | |||
452 | rdev->r600_blit.vs_offset = obj_size; | ||
453 | obj_size += evergreen_vs_size * 4; | ||
454 | obj_size = ALIGN(obj_size, 256); | ||
455 | |||
456 | rdev->r600_blit.ps_offset = obj_size; | ||
457 | obj_size += evergreen_ps_size * 4; | ||
458 | obj_size = ALIGN(obj_size, 256); | ||
459 | |||
460 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, | ||
461 | &rdev->r600_blit.shader_obj); | ||
462 | if (r) { | ||
463 | DRM_ERROR("evergreen failed to allocate shader\n"); | ||
464 | return r; | ||
465 | } | ||
466 | |||
467 | DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", | ||
468 | obj_size, | ||
469 | rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); | ||
470 | |||
471 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
472 | if (unlikely(r != 0)) | ||
473 | return r; | ||
474 | r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); | ||
475 | if (r) { | ||
476 | DRM_ERROR("failed to map blit object %d\n", r); | ||
477 | return r; | ||
478 | } | ||
479 | |||
480 | memcpy_toio(ptr + rdev->r600_blit.state_offset, | ||
481 | evergreen_default_state, rdev->r600_blit.state_len * 4); | ||
482 | |||
483 | if (num_packet2s) | ||
484 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | ||
485 | packet2s, num_packet2s * 4); | ||
486 | memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); | ||
487 | memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); | ||
488 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | ||
489 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
490 | |||
491 | done: | ||
492 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
493 | if (unlikely(r != 0)) | ||
494 | return r; | ||
495 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
496 | &rdev->r600_blit.shader_gpu_addr); | ||
497 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
498 | if (r) { | ||
499 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
500 | return r; | ||
501 | } | ||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | void evergreen_blit_fini(struct radeon_device *rdev) | ||
506 | { | ||
507 | int r; | ||
508 | |||
509 | if (rdev->r600_blit.shader_obj == NULL) | ||
510 | return; | ||
511 | /* If we can't reserve the bo, unref should be enough to destroy | ||
512 | * it when it becomes idle. | ||
513 | */ | ||
514 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
515 | if (!r) { | ||
516 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
517 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
518 | } | ||
519 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | ||
520 | } | ||
521 | |||
522 | static int evergreen_vb_ib_get(struct radeon_device *rdev) | ||
523 | { | ||
524 | int r; | ||
525 | r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); | ||
526 | if (r) { | ||
527 | DRM_ERROR("failed to get IB for vertex buffer\n"); | ||
528 | return r; | ||
529 | } | ||
530 | |||
531 | rdev->r600_blit.vb_total = 64*1024; | ||
532 | rdev->r600_blit.vb_used = 0; | ||
533 | return 0; | ||
534 | } | ||
535 | |||
536 | static void evergreen_vb_ib_put(struct radeon_device *rdev) | ||
537 | { | ||
538 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | ||
539 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
540 | } | ||
541 | |||
542 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | ||
543 | { | ||
544 | int r; | ||
545 | int ring_size, line_size; | ||
546 | int max_size; | ||
547 | /* loops of emits + fence emit possible */ | ||
548 | int dwords_per_loop = 74, num_loops; | ||
549 | |||
550 | r = evergreen_vb_ib_get(rdev); | ||
551 | if (r) | ||
552 | return r; | ||
553 | |||
554 | /* 8 bpp vs 32 bpp for xfer unit */ | ||
555 | if (size_bytes & 3) | ||
556 | line_size = 8192; | ||
557 | else | ||
558 | line_size = 8192 * 4; | ||
559 | |||
560 | max_size = 8192 * line_size; | ||
561 | |||
562 | /* major loops cover the max size transfer */ | ||
563 | num_loops = ((size_bytes + max_size) / max_size); | ||
564 | /* minor loops cover the extra non aligned bits */ | ||
565 | num_loops += ((size_bytes % line_size) ? 1 : 0); | ||
566 | /* calculate number of loops correctly */ | ||
567 | ring_size = num_loops * dwords_per_loop; | ||
568 | /* set default + shaders */ | ||
569 | ring_size += 36; /* shaders + def state */ | ||
570 | ring_size += 10; /* fence emit for VB IB */ | ||
571 | ring_size += 5; /* done copy */ | ||
572 | ring_size += 10; /* fence emit for done copy */ | ||
573 | r = radeon_ring_lock(rdev, ring_size); | ||
574 | if (r) | ||
575 | return r; | ||
576 | |||
577 | set_default_state(rdev); /* 20 */ | ||
578 | set_shaders(rdev); /* 16 */ | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) | ||
583 | { | ||
584 | int r; | ||
585 | |||
586 | if (rdev->r600_blit.vb_ib) | ||
587 | evergreen_vb_ib_put(rdev); | ||
588 | |||
589 | if (fence) | ||
590 | r = radeon_fence_emit(rdev, fence); | ||
591 | |||
592 | radeon_ring_unlock_commit(rdev); | ||
593 | } | ||
594 | |||
595 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
596 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
597 | int size_bytes) | ||
598 | { | ||
599 | int max_bytes; | ||
600 | u64 vb_gpu_addr; | ||
601 | u32 *vb; | ||
602 | |||
603 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, | ||
604 | size_bytes, rdev->r600_blit.vb_used); | ||
605 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
606 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | ||
607 | max_bytes = 8192; | ||
608 | |||
609 | while (size_bytes) { | ||
610 | int cur_size = size_bytes; | ||
611 | int src_x = src_gpu_addr & 255; | ||
612 | int dst_x = dst_gpu_addr & 255; | ||
613 | int h = 1; | ||
614 | src_gpu_addr = src_gpu_addr & ~255; | ||
615 | dst_gpu_addr = dst_gpu_addr & ~255; | ||
616 | |||
617 | if (!src_x && !dst_x) { | ||
618 | h = (cur_size / max_bytes); | ||
619 | if (h > 8192) | ||
620 | h = 8192; | ||
621 | if (h == 0) | ||
622 | h = 1; | ||
623 | else | ||
624 | cur_size = max_bytes; | ||
625 | } else { | ||
626 | if (cur_size > max_bytes) | ||
627 | cur_size = max_bytes; | ||
628 | if (cur_size > (max_bytes - dst_x)) | ||
629 | cur_size = (max_bytes - dst_x); | ||
630 | if (cur_size > (max_bytes - src_x)) | ||
631 | cur_size = (max_bytes - src_x); | ||
632 | } | ||
633 | |||
634 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
635 | WARN_ON(1); | ||
636 | } | ||
637 | |||
638 | vb[0] = i2f(dst_x); | ||
639 | vb[1] = 0; | ||
640 | vb[2] = i2f(src_x); | ||
641 | vb[3] = 0; | ||
642 | |||
643 | vb[4] = i2f(dst_x); | ||
644 | vb[5] = i2f(h); | ||
645 | vb[6] = i2f(src_x); | ||
646 | vb[7] = i2f(h); | ||
647 | |||
648 | vb[8] = i2f(dst_x + cur_size); | ||
649 | vb[9] = i2f(h); | ||
650 | vb[10] = i2f(src_x + cur_size); | ||
651 | vb[11] = i2f(h); | ||
652 | |||
653 | /* src 10 */ | ||
654 | set_tex_resource(rdev, FMT_8, | ||
655 | src_x + cur_size, h, src_x + cur_size, | ||
656 | src_gpu_addr); | ||
657 | |||
658 | /* 5 */ | ||
659 | cp_set_surface_sync(rdev, | ||
660 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | ||
661 | |||
662 | |||
663 | /* dst 17 */ | ||
664 | set_render_target(rdev, COLOR_8, | ||
665 | dst_x + cur_size, h, | ||
666 | dst_gpu_addr); | ||
667 | |||
668 | /* scissors 12 */ | ||
669 | set_scissors(rdev, dst_x, 0, dst_x + cur_size, h); | ||
670 | |||
671 | /* 15 */ | ||
672 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | ||
673 | set_vtx_resource(rdev, vb_gpu_addr); | ||
674 | |||
675 | /* draw 10 */ | ||
676 | draw_auto(rdev); | ||
677 | |||
678 | /* 5 */ | ||
679 | cp_set_surface_sync(rdev, | ||
680 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
681 | cur_size * h, dst_gpu_addr); | ||
682 | |||
683 | vb += 12; | ||
684 | rdev->r600_blit.vb_used += 12 * 4; | ||
685 | |||
686 | src_gpu_addr += cur_size * h; | ||
687 | dst_gpu_addr += cur_size * h; | ||
688 | size_bytes -= cur_size * h; | ||
689 | } | ||
690 | } else { | ||
691 | max_bytes = 8192 * 4; | ||
692 | |||
693 | while (size_bytes) { | ||
694 | int cur_size = size_bytes; | ||
695 | int src_x = (src_gpu_addr & 255); | ||
696 | int dst_x = (dst_gpu_addr & 255); | ||
697 | int h = 1; | ||
698 | src_gpu_addr = src_gpu_addr & ~255; | ||
699 | dst_gpu_addr = dst_gpu_addr & ~255; | ||
700 | |||
701 | if (!src_x && !dst_x) { | ||
702 | h = (cur_size / max_bytes); | ||
703 | if (h > 8192) | ||
704 | h = 8192; | ||
705 | if (h == 0) | ||
706 | h = 1; | ||
707 | else | ||
708 | cur_size = max_bytes; | ||
709 | } else { | ||
710 | if (cur_size > max_bytes) | ||
711 | cur_size = max_bytes; | ||
712 | if (cur_size > (max_bytes - dst_x)) | ||
713 | cur_size = (max_bytes - dst_x); | ||
714 | if (cur_size > (max_bytes - src_x)) | ||
715 | cur_size = (max_bytes - src_x); | ||
716 | } | ||
717 | |||
718 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | ||
719 | WARN_ON(1); | ||
720 | } | ||
721 | |||
722 | vb[0] = i2f(dst_x / 4); | ||
723 | vb[1] = 0; | ||
724 | vb[2] = i2f(src_x / 4); | ||
725 | vb[3] = 0; | ||
726 | |||
727 | vb[4] = i2f(dst_x / 4); | ||
728 | vb[5] = i2f(h); | ||
729 | vb[6] = i2f(src_x / 4); | ||
730 | vb[7] = i2f(h); | ||
731 | |||
732 | vb[8] = i2f((dst_x + cur_size) / 4); | ||
733 | vb[9] = i2f(h); | ||
734 | vb[10] = i2f((src_x + cur_size) / 4); | ||
735 | vb[11] = i2f(h); | ||
736 | |||
737 | /* src 10 */ | ||
738 | set_tex_resource(rdev, FMT_8_8_8_8, | ||
739 | (src_x + cur_size) / 4, | ||
740 | h, (src_x + cur_size) / 4, | ||
741 | src_gpu_addr); | ||
742 | /* 5 */ | ||
743 | cp_set_surface_sync(rdev, | ||
744 | PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); | ||
745 | |||
746 | /* dst 17 */ | ||
747 | set_render_target(rdev, COLOR_8_8_8_8, | ||
748 | (dst_x + cur_size) / 4, h, | ||
749 | dst_gpu_addr); | ||
750 | |||
751 | /* scissors 12 */ | ||
752 | set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h); | ||
753 | |||
754 | /* Vertex buffer setup 15 */ | ||
755 | vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; | ||
756 | set_vtx_resource(rdev, vb_gpu_addr); | ||
757 | |||
758 | /* draw 10 */ | ||
759 | draw_auto(rdev); | ||
760 | |||
761 | /* 5 */ | ||
762 | cp_set_surface_sync(rdev, | ||
763 | PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, | ||
764 | cur_size * h, dst_gpu_addr); | ||
765 | |||
766 | /* 74 ring dwords per loop */ | ||
767 | vb += 12; | ||
768 | rdev->r600_blit.vb_used += 12 * 4; | ||
769 | |||
770 | src_gpu_addr += cur_size * h; | ||
771 | dst_gpu_addr += cur_size * h; | ||
772 | size_bytes -= cur_size * h; | ||
773 | } | ||
774 | } | ||
775 | } | ||
776 | |||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c new file mode 100644 index 000000000000..5d5045027b46 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c | |||
@@ -0,0 +1,359 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Alex Deucher <alexander.deucher@amd.com> | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/kernel.h> | ||
29 | |||
30 | /* | ||
31 | * evergreen cards need to use the 3D engine to blit data which requires | ||
32 | * quite a bit of hw state setup. Rather than pull the whole 3D driver | ||
33 | * (which normally generates the 3D state) into the DRM, we opt to use | ||
34 | * statically generated state tables. The regsiter state and shaders | ||
35 | * were hand generated to support blitting functionality. See the 3D | ||
36 | * driver or documentation for descriptions of the registers and | ||
37 | * shader instructions. | ||
38 | */ | ||
39 | |||
40 | const u32 evergreen_default_state[] = | ||
41 | { | ||
42 | 0xc0012800, /* CONTEXT_CONTROL */ | ||
43 | 0x80000000, | ||
44 | 0x80000000, | ||
45 | |||
46 | 0xc0016900, | ||
47 | 0x0000023b, | ||
48 | 0x00000000, /* SQ_LDS_ALLOC_PS */ | ||
49 | |||
50 | 0xc0066900, | ||
51 | 0x00000240, | ||
52 | 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ | ||
53 | 0x00000000, | ||
54 | 0x00000000, | ||
55 | 0x00000000, | ||
56 | 0x00000000, | ||
57 | 0x00000000, | ||
58 | |||
59 | 0xc0046900, | ||
60 | 0x00000247, | ||
61 | 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ | ||
62 | 0x00000000, | ||
63 | 0x00000000, | ||
64 | 0x00000000, | ||
65 | |||
66 | 0xc0026f00, | ||
67 | 0x00000000, | ||
68 | 0x00000000, /* SQ_VTX_BASE_VTX_LOC */ | ||
69 | 0x00000000, | ||
70 | |||
71 | 0xc0026900, | ||
72 | 0x00000010, | ||
73 | 0x00000000, /* DB_Z_INFO */ | ||
74 | 0x00000000, /* DB_STENCIL_INFO */ | ||
75 | |||
76 | |||
77 | 0xc0016900, | ||
78 | 0x00000200, | ||
79 | 0x00000000, /* DB_DEPTH_CONTROL */ | ||
80 | |||
81 | 0xc0066900, | ||
82 | 0x00000000, | ||
83 | 0x00000060, /* DB_RENDER_CONTROL */ | ||
84 | 0x00000000, /* DB_COUNT_CONTROL */ | ||
85 | 0x00000000, /* DB_DEPTH_VIEW */ | ||
86 | 0x0000002a, /* DB_RENDER_OVERRIDE */ | ||
87 | 0x00000000, /* DB_RENDER_OVERRIDE2 */ | ||
88 | 0x00000000, /* DB_HTILE_DATA_BASE */ | ||
89 | |||
90 | 0xc0026900, | ||
91 | 0x0000000a, | ||
92 | 0x00000000, /* DB_STENCIL_CLEAR */ | ||
93 | 0x00000000, /* DB_DEPTH_CLEAR */ | ||
94 | |||
95 | 0xc0016900, | ||
96 | 0x000002dc, | ||
97 | 0x0000aa00, /* DB_ALPHA_TO_MASK */ | ||
98 | |||
99 | 0xc0016900, | ||
100 | 0x00000080, | ||
101 | 0x00000000, /* PA_SC_WINDOW_OFFSET */ | ||
102 | |||
103 | 0xc00d6900, | ||
104 | 0x00000083, | ||
105 | 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ | ||
106 | 0x00000000, /* PA_SC_CLIPRECT_0_TL */ | ||
107 | 0x20002000, /* PA_SC_CLIPRECT_0_BR */ | ||
108 | 0x00000000, | ||
109 | 0x20002000, | ||
110 | 0x00000000, | ||
111 | 0x20002000, | ||
112 | 0x00000000, | ||
113 | 0x20002000, | ||
114 | 0xaaaaaaaa, /* PA_SC_EDGERULE */ | ||
115 | 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ | ||
116 | 0x0000000f, /* CB_TARGET_MASK */ | ||
117 | 0x0000000f, /* CB_SHADER_MASK */ | ||
118 | |||
119 | 0xc0226900, | ||
120 | 0x00000094, | ||
121 | 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ | ||
122 | 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ | ||
123 | 0x80000000, | ||
124 | 0x20002000, | ||
125 | 0x80000000, | ||
126 | 0x20002000, | ||
127 | 0x80000000, | ||
128 | 0x20002000, | ||
129 | 0x80000000, | ||
130 | 0x20002000, | ||
131 | 0x80000000, | ||
132 | 0x20002000, | ||
133 | 0x80000000, | ||
134 | 0x20002000, | ||
135 | 0x80000000, | ||
136 | 0x20002000, | ||
137 | 0x80000000, | ||
138 | 0x20002000, | ||
139 | 0x80000000, | ||
140 | 0x20002000, | ||
141 | 0x80000000, | ||
142 | 0x20002000, | ||
143 | 0x80000000, | ||
144 | 0x20002000, | ||
145 | 0x80000000, | ||
146 | 0x20002000, | ||
147 | 0x80000000, | ||
148 | 0x20002000, | ||
149 | 0x80000000, | ||
150 | 0x20002000, | ||
151 | 0x80000000, | ||
152 | 0x20002000, | ||
153 | 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ | ||
154 | 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ | ||
155 | |||
156 | 0xc0016900, | ||
157 | 0x000000d4, | ||
158 | 0x00000000, /* SX_MISC */ | ||
159 | |||
160 | 0xc0026900, | ||
161 | 0x00000292, | ||
162 | 0x00000000, /* PA_SC_MODE_CNTL_0 */ | ||
163 | 0x00000000, /* PA_SC_MODE_CNTL_1 */ | ||
164 | |||
165 | 0xc0106900, | ||
166 | 0x00000300, | ||
167 | 0x00000000, /* PA_SC_LINE_CNTL */ | ||
168 | 0x00000000, /* PA_SC_AA_CONFIG */ | ||
169 | 0x00000005, /* PA_SU_VTX_CNTL */ | ||
170 | 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ | ||
171 | 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ | ||
172 | 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ | ||
173 | 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ | ||
174 | 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */ | ||
175 | 0x00000000, /* */ | ||
176 | 0x00000000, /* */ | ||
177 | 0x00000000, /* */ | ||
178 | 0x00000000, /* */ | ||
179 | 0x00000000, /* */ | ||
180 | 0x00000000, /* */ | ||
181 | 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */ | ||
182 | 0xffffffff, /* PA_SC_AA_MASK */ | ||
183 | |||
184 | 0xc00d6900, | ||
185 | 0x00000202, | ||
186 | 0x00cc0010, /* CB_COLOR_CONTROL */ | ||
187 | 0x00000210, /* DB_SHADER_CONTROL */ | ||
188 | 0x00010000, /* PA_CL_CLIP_CNTL */ | ||
189 | 0x00000004, /* PA_SU_SC_MODE_CNTL */ | ||
190 | 0x00000100, /* PA_CL_VTE_CNTL */ | ||
191 | 0x00000000, /* PA_CL_VS_OUT_CNTL */ | ||
192 | 0x00000000, /* PA_CL_NANINF_CNTL */ | ||
193 | 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ | ||
194 | 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ | ||
195 | 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ | ||
196 | 0x00000000, /* */ | ||
197 | 0x00000000, /* */ | ||
198 | 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */ | ||
199 | |||
200 | 0xc0066900, | ||
201 | 0x000002de, | ||
202 | 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ | ||
203 | 0x00000000, /* */ | ||
204 | 0x00000000, /* */ | ||
205 | 0x00000000, /* */ | ||
206 | 0x00000000, /* */ | ||
207 | 0x00000000, /* */ | ||
208 | |||
209 | 0xc0016900, | ||
210 | 0x00000229, | ||
211 | 0x00000000, /* SQ_PGM_START_FS */ | ||
212 | |||
213 | 0xc0016900, | ||
214 | 0x0000022a, | ||
215 | 0x00000000, /* SQ_PGM_RESOURCES_FS */ | ||
216 | |||
217 | 0xc0096900, | ||
218 | 0x00000100, | ||
219 | 0x00ffffff, /* VGT_MAX_VTX_INDX */ | ||
220 | 0x00000000, /* */ | ||
221 | 0x00000000, /* */ | ||
222 | 0x00000000, /* */ | ||
223 | 0x00000000, /* SX_ALPHA_TEST_CONTROL */ | ||
224 | 0x00000000, /* CB_BLEND_RED */ | ||
225 | 0x00000000, /* CB_BLEND_GREEN */ | ||
226 | 0x00000000, /* CB_BLEND_BLUE */ | ||
227 | 0x00000000, /* CB_BLEND_ALPHA */ | ||
228 | |||
229 | 0xc0026900, | ||
230 | 0x000002a8, | ||
231 | 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ | ||
232 | 0x00000000, /* */ | ||
233 | |||
234 | 0xc0026900, | ||
235 | 0x000002ad, | ||
236 | 0x00000000, /* VGT_REUSE_OFF */ | ||
237 | 0x00000000, /* */ | ||
238 | |||
239 | 0xc0116900, | ||
240 | 0x00000280, | ||
241 | 0x00000000, /* PA_SU_POINT_SIZE */ | ||
242 | 0x00000000, /* PA_SU_POINT_MINMAX */ | ||
243 | 0x00000008, /* PA_SU_LINE_CNTL */ | ||
244 | 0x00000000, /* PA_SC_LINE_STIPPLE */ | ||
245 | 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ | ||
246 | 0x00000000, /* VGT_HOS_CNTL */ | ||
247 | 0x00000000, /* */ | ||
248 | 0x00000000, /* */ | ||
249 | 0x00000000, /* */ | ||
250 | 0x00000000, /* */ | ||
251 | 0x00000000, /* */ | ||
252 | 0x00000000, /* */ | ||
253 | 0x00000000, /* */ | ||
254 | 0x00000000, /* */ | ||
255 | 0x00000000, /* */ | ||
256 | 0x00000000, /* */ | ||
257 | 0x00000000, /* VGT_GS_MODE */ | ||
258 | |||
259 | 0xc0016900, | ||
260 | 0x000002a1, | ||
261 | 0x00000000, /* VGT_PRIMITIVEID_EN */ | ||
262 | |||
263 | 0xc0016900, | ||
264 | 0x000002a5, | ||
265 | 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ | ||
266 | |||
267 | 0xc0016900, | ||
268 | 0x000002d5, | ||
269 | 0x00000000, /* VGT_SHADER_STAGES_EN */ | ||
270 | |||
271 | 0xc0026900, | ||
272 | 0x000002e5, | ||
273 | 0x00000000, /* VGT_STRMOUT_CONFIG */ | ||
274 | 0x00000000, /* */ | ||
275 | |||
276 | 0xc0016900, | ||
277 | 0x000001e0, | ||
278 | 0x00000000, /* CB_BLEND0_CONTROL */ | ||
279 | |||
280 | 0xc0016900, | ||
281 | 0x000001b1, | ||
282 | 0x00000000, /* SPI_VS_OUT_CONFIG */ | ||
283 | |||
284 | 0xc0016900, | ||
285 | 0x00000187, | ||
286 | 0x00000000, /* SPI_VS_OUT_ID_0 */ | ||
287 | |||
288 | 0xc0016900, | ||
289 | 0x00000191, | ||
290 | 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ | ||
291 | |||
292 | 0xc00b6900, | ||
293 | 0x000001b3, | ||
294 | 0x20000001, /* SPI_PS_IN_CONTROL_0 */ | ||
295 | 0x00000000, /* SPI_PS_IN_CONTROL_1 */ | ||
296 | 0x00000000, /* SPI_INTERP_CONTROL_0 */ | ||
297 | 0x00000000, /* SPI_INPUT_Z */ | ||
298 | 0x00000000, /* SPI_FOG_CNTL */ | ||
299 | 0x00100000, /* SPI_BARYC_CNTL */ | ||
300 | 0x00000000, /* SPI_PS_IN_CONTROL_2 */ | ||
301 | 0x00000000, /* */ | ||
302 | 0x00000000, /* */ | ||
303 | 0x00000000, /* */ | ||
304 | 0x00000000, /* */ | ||
305 | |||
306 | 0xc0036e00, /* SET_SAMPLER */ | ||
307 | 0x00000000, | ||
308 | 0x00000012, | ||
309 | 0x00000000, | ||
310 | 0x00000000, | ||
311 | }; | ||
312 | |||
313 | const u32 evergreen_vs[] = | ||
314 | { | ||
315 | 0x00000004, | ||
316 | 0x80800400, | ||
317 | 0x0000a03c, | ||
318 | 0x95000688, | ||
319 | 0x00004000, | ||
320 | 0x15200688, | ||
321 | 0x00000000, | ||
322 | 0x00000000, | ||
323 | 0x3c000000, | ||
324 | 0x67961001, | ||
325 | 0x00080000, | ||
326 | 0x00000000, | ||
327 | 0x1c000000, | ||
328 | 0x67961000, | ||
329 | 0x00000008, | ||
330 | 0x00000000, | ||
331 | }; | ||
332 | |||
333 | const u32 evergreen_ps[] = | ||
334 | { | ||
335 | 0x00000003, | ||
336 | 0xa00c0000, | ||
337 | 0x00000008, | ||
338 | 0x80400000, | ||
339 | 0x00000000, | ||
340 | 0x95200688, | ||
341 | 0x00380400, | ||
342 | 0x00146b10, | ||
343 | 0x00380000, | ||
344 | 0x20146b10, | ||
345 | 0x00380400, | ||
346 | 0x40146b00, | ||
347 | 0x80380000, | ||
348 | 0x60146b00, | ||
349 | 0x00000000, | ||
350 | 0x00000000, | ||
351 | 0x00000010, | ||
352 | 0x000d1000, | ||
353 | 0xb0800000, | ||
354 | 0x00000000, | ||
355 | }; | ||
356 | |||
357 | const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps); | ||
358 | const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs); | ||
359 | const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state); | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h new file mode 100644 index 000000000000..bb8d6c751595 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef EVERGREEN_BLIT_SHADERS_H | ||
26 | #define EVERGREEN_BLIT_SHADERS_H | ||
27 | |||
28 | extern const u32 evergreen_ps[]; | ||
29 | extern const u32 evergreen_vs[]; | ||
30 | extern const u32 evergreen_default_state[]; | ||
31 | |||
32 | extern const u32 evergreen_ps_size, evergreen_vs_size; | ||
33 | extern const u32 evergreen_default_size; | ||
34 | |||
35 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 9b7532dd30f7..319aa9752d40 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -802,6 +802,11 @@ | |||
802 | #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 | 802 | #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 |
803 | #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c | 803 | #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c |
804 | 804 | ||
805 | #define PA_SC_SCREEN_SCISSOR_TL 0x28030 | ||
806 | #define PA_SC_GENERIC_SCISSOR_TL 0x28240 | ||
807 | #define PA_SC_WINDOW_SCISSOR_TL 0x28204 | ||
808 | #define VGT_PRIMITIVE_TYPE 0x8958 | ||
809 | |||
805 | #define DB_DEPTH_CONTROL 0x28800 | 810 | #define DB_DEPTH_CONTROL 0x28800 |
806 | #define DB_DEPTH_VIEW 0x28008 | 811 | #define DB_DEPTH_VIEW 0x28008 |
807 | #define DB_HTILE_DATA_BASE 0x28014 | 812 | #define DB_HTILE_DATA_BASE 0x28014 |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e151f16a8f86..7712c055b3e8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
675 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 675 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
676 | } | 676 | } |
677 | 677 | ||
678 | int r100_wb_init(struct radeon_device *rdev) | ||
679 | { | ||
680 | int r; | ||
681 | |||
682 | if (rdev->wb.wb_obj == NULL) { | ||
683 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | ||
684 | RADEON_GEM_DOMAIN_GTT, | ||
685 | &rdev->wb.wb_obj); | ||
686 | if (r) { | ||
687 | dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); | ||
688 | return r; | ||
689 | } | ||
690 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
691 | if (unlikely(r != 0)) | ||
692 | return r; | ||
693 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
694 | &rdev->wb.gpu_addr); | ||
695 | if (r) { | ||
696 | dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); | ||
697 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
698 | return r; | ||
699 | } | ||
700 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
701 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
702 | if (r) { | ||
703 | dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); | ||
704 | return r; | ||
705 | } | ||
706 | } | ||
707 | WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); | ||
708 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | ||
709 | S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); | ||
710 | WREG32(R_000770_SCRATCH_UMSK, 0xff); | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | void r100_wb_disable(struct radeon_device *rdev) | ||
715 | { | ||
716 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
717 | } | ||
718 | |||
719 | void r100_wb_fini(struct radeon_device *rdev) | ||
720 | { | ||
721 | int r; | ||
722 | |||
723 | r100_wb_disable(rdev); | ||
724 | if (rdev->wb.wb_obj) { | ||
725 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
726 | if (unlikely(r != 0)) { | ||
727 | dev_err(rdev->dev, "(%d) can't finish WB\n", r); | ||
728 | return; | ||
729 | } | ||
730 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
731 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
732 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
733 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
734 | rdev->wb.wb = NULL; | ||
735 | rdev->wb.wb_obj = NULL; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | int r100_copy_blit(struct radeon_device *rdev, | 678 | int r100_copy_blit(struct radeon_device *rdev, |
740 | uint64_t src_offset, | 679 | uint64_t src_offset, |
741 | uint64_t dst_offset, | 680 | uint64_t dst_offset, |
@@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
996 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); | 935 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); |
997 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | | 936 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | |
998 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | | 937 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | |
999 | REG_SET(RADEON_MAX_FETCH, max_fetch) | | 938 | REG_SET(RADEON_MAX_FETCH, max_fetch)); |
1000 | RADEON_RB_NO_UPDATE); | ||
1001 | #ifdef __BIG_ENDIAN | 939 | #ifdef __BIG_ENDIAN |
1002 | tmp |= RADEON_BUF_SWAP_32BIT; | 940 | tmp |= RADEON_BUF_SWAP_32BIT; |
1003 | #endif | 941 | #endif |
1004 | WREG32(RADEON_CP_RB_CNTL, tmp); | 942 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); |
1005 | 943 | ||
1006 | /* Set ring address */ | 944 | /* Set ring address */ |
1007 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); | 945 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); |
1008 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); | 946 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); |
1009 | /* Force read & write ptr to 0 */ | 947 | /* Force read & write ptr to 0 */ |
1010 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); | 948 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
1011 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 949 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
1012 | WREG32(RADEON_CP_RB_WPTR, 0); | 950 | WREG32(RADEON_CP_RB_WPTR, 0); |
951 | |||
952 | /* set the wb address whether it's enabled or not */ | ||
953 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | ||
954 | S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); | ||
955 | WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); | ||
956 | |||
957 | if (rdev->wb.enabled) | ||
958 | WREG32(R_000770_SCRATCH_UMSK, 0xff); | ||
959 | else { | ||
960 | tmp |= RADEON_RB_NO_UPDATE; | ||
961 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
962 | } | ||
963 | |||
1013 | WREG32(RADEON_CP_RB_CNTL, tmp); | 964 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1014 | udelay(10); | 965 | udelay(10); |
1015 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 966 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
@@ -1050,6 +1001,7 @@ void r100_cp_disable(struct radeon_device *rdev) | |||
1050 | rdev->cp.ready = false; | 1001 | rdev->cp.ready = false; |
1051 | WREG32(RADEON_CP_CSQ_MODE, 0); | 1002 | WREG32(RADEON_CP_CSQ_MODE, 0); |
1052 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 1003 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
1004 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
1053 | if (r100_gui_wait_for_idle(rdev)) { | 1005 | if (r100_gui_wait_for_idle(rdev)) { |
1054 | printk(KERN_WARNING "Failed to wait GUI idle while " | 1006 | printk(KERN_WARNING "Failed to wait GUI idle while " |
1055 | "programming pipes. Bad things might happen.\n"); | 1007 | "programming pipes. Bad things might happen.\n"); |
@@ -3734,6 +3686,12 @@ static int r100_startup(struct radeon_device *rdev) | |||
3734 | if (r) | 3686 | if (r) |
3735 | return r; | 3687 | return r; |
3736 | } | 3688 | } |
3689 | |||
3690 | /* allocate wb buffer */ | ||
3691 | r = radeon_wb_init(rdev); | ||
3692 | if (r) | ||
3693 | return r; | ||
3694 | |||
3737 | /* Enable IRQ */ | 3695 | /* Enable IRQ */ |
3738 | r100_irq_set(rdev); | 3696 | r100_irq_set(rdev); |
3739 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 3697 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -3743,9 +3701,6 @@ static int r100_startup(struct radeon_device *rdev) | |||
3743 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 3701 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
3744 | return r; | 3702 | return r; |
3745 | } | 3703 | } |
3746 | r = r100_wb_init(rdev); | ||
3747 | if (r) | ||
3748 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
3749 | r = r100_ib_init(rdev); | 3704 | r = r100_ib_init(rdev); |
3750 | if (r) { | 3705 | if (r) { |
3751 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 3706 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -3779,7 +3734,7 @@ int r100_resume(struct radeon_device *rdev) | |||
3779 | int r100_suspend(struct radeon_device *rdev) | 3734 | int r100_suspend(struct radeon_device *rdev) |
3780 | { | 3735 | { |
3781 | r100_cp_disable(rdev); | 3736 | r100_cp_disable(rdev); |
3782 | r100_wb_disable(rdev); | 3737 | radeon_wb_disable(rdev); |
3783 | r100_irq_disable(rdev); | 3738 | r100_irq_disable(rdev); |
3784 | if (rdev->flags & RADEON_IS_PCI) | 3739 | if (rdev->flags & RADEON_IS_PCI) |
3785 | r100_pci_gart_disable(rdev); | 3740 | r100_pci_gart_disable(rdev); |
@@ -3789,7 +3744,7 @@ int r100_suspend(struct radeon_device *rdev) | |||
3789 | void r100_fini(struct radeon_device *rdev) | 3744 | void r100_fini(struct radeon_device *rdev) |
3790 | { | 3745 | { |
3791 | r100_cp_fini(rdev); | 3746 | r100_cp_fini(rdev); |
3792 | r100_wb_fini(rdev); | 3747 | radeon_wb_fini(rdev); |
3793 | r100_ib_fini(rdev); | 3748 | r100_ib_fini(rdev); |
3794 | radeon_gem_fini(rdev); | 3749 | radeon_gem_fini(rdev); |
3795 | if (rdev->flags & RADEON_IS_PCI) | 3750 | if (rdev->flags & RADEON_IS_PCI) |
@@ -3902,7 +3857,7 @@ int r100_init(struct radeon_device *rdev) | |||
3902 | /* Somethings want wront with the accel init stop accel */ | 3857 | /* Somethings want wront with the accel init stop accel */ |
3903 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 3858 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
3904 | r100_cp_fini(rdev); | 3859 | r100_cp_fini(rdev); |
3905 | r100_wb_fini(rdev); | 3860 | radeon_wb_fini(rdev); |
3906 | r100_ib_fini(rdev); | 3861 | r100_ib_fini(rdev); |
3907 | radeon_irq_kms_fini(rdev); | 3862 | radeon_irq_kms_fini(rdev); |
3908 | if (rdev->flags & RADEON_IS_PCI) | 3863 | if (rdev->flags & RADEON_IS_PCI) |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index c827738ad7dd..34527e600fe9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev) | |||
1332 | if (r) | 1332 | if (r) |
1333 | return r; | 1333 | return r; |
1334 | } | 1334 | } |
1335 | |||
1336 | /* allocate wb buffer */ | ||
1337 | r = radeon_wb_init(rdev); | ||
1338 | if (r) | ||
1339 | return r; | ||
1340 | |||
1335 | /* Enable IRQ */ | 1341 | /* Enable IRQ */ |
1336 | r100_irq_set(rdev); | 1342 | r100_irq_set(rdev); |
1337 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 1343 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev) | |||
1341 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 1347 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
1342 | return r; | 1348 | return r; |
1343 | } | 1349 | } |
1344 | r = r100_wb_init(rdev); | ||
1345 | if (r) | ||
1346 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
1347 | r = r100_ib_init(rdev); | 1350 | r = r100_ib_init(rdev); |
1348 | if (r) { | 1351 | if (r) { |
1349 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 1352 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev) | |||
1379 | int r300_suspend(struct radeon_device *rdev) | 1382 | int r300_suspend(struct radeon_device *rdev) |
1380 | { | 1383 | { |
1381 | r100_cp_disable(rdev); | 1384 | r100_cp_disable(rdev); |
1382 | r100_wb_disable(rdev); | 1385 | radeon_wb_disable(rdev); |
1383 | r100_irq_disable(rdev); | 1386 | r100_irq_disable(rdev); |
1384 | if (rdev->flags & RADEON_IS_PCIE) | 1387 | if (rdev->flags & RADEON_IS_PCIE) |
1385 | rv370_pcie_gart_disable(rdev); | 1388 | rv370_pcie_gart_disable(rdev); |
@@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev) | |||
1391 | void r300_fini(struct radeon_device *rdev) | 1394 | void r300_fini(struct radeon_device *rdev) |
1392 | { | 1395 | { |
1393 | r100_cp_fini(rdev); | 1396 | r100_cp_fini(rdev); |
1394 | r100_wb_fini(rdev); | 1397 | radeon_wb_fini(rdev); |
1395 | r100_ib_fini(rdev); | 1398 | r100_ib_fini(rdev); |
1396 | radeon_gem_fini(rdev); | 1399 | radeon_gem_fini(rdev); |
1397 | if (rdev->flags & RADEON_IS_PCIE) | 1400 | if (rdev->flags & RADEON_IS_PCIE) |
@@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev) | |||
1484 | /* Somethings want wront with the accel init stop accel */ | 1487 | /* Somethings want wront with the accel init stop accel */ |
1485 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 1488 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
1486 | r100_cp_fini(rdev); | 1489 | r100_cp_fini(rdev); |
1487 | r100_wb_fini(rdev); | 1490 | radeon_wb_fini(rdev); |
1488 | r100_ib_fini(rdev); | 1491 | r100_ib_fini(rdev); |
1489 | radeon_irq_kms_fini(rdev); | 1492 | radeon_irq_kms_fini(rdev); |
1490 | if (rdev->flags & RADEON_IS_PCIE) | 1493 | if (rdev->flags & RADEON_IS_PCIE) |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 59f7bccc5be0..c387346f93a9 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev) | |||
248 | return r; | 248 | return r; |
249 | } | 249 | } |
250 | r420_pipes_init(rdev); | 250 | r420_pipes_init(rdev); |
251 | |||
252 | /* allocate wb buffer */ | ||
253 | r = radeon_wb_init(rdev); | ||
254 | if (r) | ||
255 | return r; | ||
256 | |||
251 | /* Enable IRQ */ | 257 | /* Enable IRQ */ |
252 | r100_irq_set(rdev); | 258 | r100_irq_set(rdev); |
253 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 259 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev) | |||
258 | return r; | 264 | return r; |
259 | } | 265 | } |
260 | r420_cp_errata_init(rdev); | 266 | r420_cp_errata_init(rdev); |
261 | r = r100_wb_init(rdev); | ||
262 | if (r) { | ||
263 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
264 | } | ||
265 | r = r100_ib_init(rdev); | 267 | r = r100_ib_init(rdev); |
266 | if (r) { | 268 | if (r) { |
267 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 269 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
302 | { | 304 | { |
303 | r420_cp_errata_fini(rdev); | 305 | r420_cp_errata_fini(rdev); |
304 | r100_cp_disable(rdev); | 306 | r100_cp_disable(rdev); |
305 | r100_wb_disable(rdev); | 307 | radeon_wb_disable(rdev); |
306 | r100_irq_disable(rdev); | 308 | r100_irq_disable(rdev); |
307 | if (rdev->flags & RADEON_IS_PCIE) | 309 | if (rdev->flags & RADEON_IS_PCIE) |
308 | rv370_pcie_gart_disable(rdev); | 310 | rv370_pcie_gart_disable(rdev); |
@@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev) | |||
314 | void r420_fini(struct radeon_device *rdev) | 316 | void r420_fini(struct radeon_device *rdev) |
315 | { | 317 | { |
316 | r100_cp_fini(rdev); | 318 | r100_cp_fini(rdev); |
317 | r100_wb_fini(rdev); | 319 | radeon_wb_fini(rdev); |
318 | r100_ib_fini(rdev); | 320 | r100_ib_fini(rdev); |
319 | radeon_gem_fini(rdev); | 321 | radeon_gem_fini(rdev); |
320 | if (rdev->flags & RADEON_IS_PCIE) | 322 | if (rdev->flags & RADEON_IS_PCIE) |
@@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev) | |||
418 | /* Somethings want wront with the accel init stop accel */ | 420 | /* Somethings want wront with the accel init stop accel */ |
419 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 421 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
420 | r100_cp_fini(rdev); | 422 | r100_cp_fini(rdev); |
421 | r100_wb_fini(rdev); | 423 | radeon_wb_fini(rdev); |
422 | r100_ib_fini(rdev); | 424 | r100_ib_fini(rdev); |
423 | radeon_irq_kms_fini(rdev); | 425 | radeon_irq_kms_fini(rdev); |
424 | if (rdev->flags & RADEON_IS_PCIE) | 426 | if (rdev->flags & RADEON_IS_PCIE) |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 1458dee902dd..3c8677f9e385 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev) | |||
181 | if (r) | 181 | if (r) |
182 | return r; | 182 | return r; |
183 | } | 183 | } |
184 | |||
185 | /* allocate wb buffer */ | ||
186 | r = radeon_wb_init(rdev); | ||
187 | if (r) | ||
188 | return r; | ||
189 | |||
184 | /* Enable IRQ */ | 190 | /* Enable IRQ */ |
185 | rs600_irq_set(rdev); | 191 | rs600_irq_set(rdev); |
186 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 192 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev) | |||
190 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 196 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
191 | return r; | 197 | return r; |
192 | } | 198 | } |
193 | r = r100_wb_init(rdev); | ||
194 | if (r) | ||
195 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
196 | r = r100_ib_init(rdev); | 199 | r = r100_ib_init(rdev); |
197 | if (r) { | 200 | if (r) { |
198 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 201 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev) | |||
295 | /* Somethings want wront with the accel init stop accel */ | 298 | /* Somethings want wront with the accel init stop accel */ |
296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 299 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
297 | r100_cp_fini(rdev); | 300 | r100_cp_fini(rdev); |
298 | r100_wb_fini(rdev); | 301 | radeon_wb_fini(rdev); |
299 | r100_ib_fini(rdev); | 302 | r100_ib_fini(rdev); |
300 | radeon_irq_kms_fini(rdev); | 303 | radeon_irq_kms_fini(rdev); |
301 | rv370_pcie_gart_fini(rdev); | 304 | rv370_pcie_gart_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 7a04959ba0ee..7c5f855a43e6 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1918,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1918 | void r600_cp_stop(struct radeon_device *rdev) | 1918 | void r600_cp_stop(struct radeon_device *rdev) |
1919 | { | 1919 | { |
1920 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1920 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
1921 | WREG32(SCRATCH_UMSK, 0); | ||
1921 | } | 1922 | } |
1922 | 1923 | ||
1923 | int r600_init_microcode(struct radeon_device *rdev) | 1924 | int r600_init_microcode(struct radeon_device *rdev) |
@@ -2150,7 +2151,7 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2150 | 2151 | ||
2151 | /* Set ring buffer size */ | 2152 | /* Set ring buffer size */ |
2152 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 2153 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
2153 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 2154 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
2154 | #ifdef __BIG_ENDIAN | 2155 | #ifdef __BIG_ENDIAN |
2155 | tmp |= BUF_SWAP_32BIT; | 2156 | tmp |= BUF_SWAP_32BIT; |
2156 | #endif | 2157 | #endif |
@@ -2164,8 +2165,19 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2164 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 2165 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
2165 | WREG32(CP_RB_RPTR_WR, 0); | 2166 | WREG32(CP_RB_RPTR_WR, 0); |
2166 | WREG32(CP_RB_WPTR, 0); | 2167 | WREG32(CP_RB_WPTR, 0); |
2167 | WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); | 2168 | |
2168 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); | 2169 | /* set the wb address whether it's enabled or not */ |
2170 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | ||
2171 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | ||
2172 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | ||
2173 | |||
2174 | if (rdev->wb.enabled) | ||
2175 | WREG32(SCRATCH_UMSK, 0xff); | ||
2176 | else { | ||
2177 | tmp |= RB_NO_UPDATE; | ||
2178 | WREG32(SCRATCH_UMSK, 0); | ||
2179 | } | ||
2180 | |||
2169 | mdelay(1); | 2181 | mdelay(1); |
2170 | WREG32(CP_RB_CNTL, tmp); | 2182 | WREG32(CP_RB_CNTL, tmp); |
2171 | 2183 | ||
@@ -2217,9 +2229,10 @@ void r600_scratch_init(struct radeon_device *rdev) | |||
2217 | int i; | 2229 | int i; |
2218 | 2230 | ||
2219 | rdev->scratch.num_reg = 7; | 2231 | rdev->scratch.num_reg = 7; |
2232 | rdev->scratch.reg_base = SCRATCH_REG0; | ||
2220 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 2233 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
2221 | rdev->scratch.free[i] = true; | 2234 | rdev->scratch.free[i] = true; |
2222 | rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4); | 2235 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); |
2223 | } | 2236 | } |
2224 | } | 2237 | } |
2225 | 2238 | ||
@@ -2263,88 +2276,34 @@ int r600_ring_test(struct radeon_device *rdev) | |||
2263 | return r; | 2276 | return r; |
2264 | } | 2277 | } |
2265 | 2278 | ||
2266 | void r600_wb_disable(struct radeon_device *rdev) | ||
2267 | { | ||
2268 | int r; | ||
2269 | |||
2270 | WREG32(SCRATCH_UMSK, 0); | ||
2271 | if (rdev->wb.wb_obj) { | ||
2272 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
2273 | if (unlikely(r != 0)) | ||
2274 | return; | ||
2275 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
2276 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
2277 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2278 | } | ||
2279 | } | ||
2280 | |||
2281 | void r600_wb_fini(struct radeon_device *rdev) | ||
2282 | { | ||
2283 | r600_wb_disable(rdev); | ||
2284 | if (rdev->wb.wb_obj) { | ||
2285 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
2286 | rdev->wb.wb = NULL; | ||
2287 | rdev->wb.wb_obj = NULL; | ||
2288 | } | ||
2289 | } | ||
2290 | |||
2291 | int r600_wb_enable(struct radeon_device *rdev) | ||
2292 | { | ||
2293 | int r; | ||
2294 | |||
2295 | if (rdev->wb.wb_obj == NULL) { | ||
2296 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | ||
2297 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | ||
2298 | if (r) { | ||
2299 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | ||
2300 | return r; | ||
2301 | } | ||
2302 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
2303 | if (unlikely(r != 0)) { | ||
2304 | r600_wb_fini(rdev); | ||
2305 | return r; | ||
2306 | } | ||
2307 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
2308 | &rdev->wb.gpu_addr); | ||
2309 | if (r) { | ||
2310 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2311 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
2312 | r600_wb_fini(rdev); | ||
2313 | return r; | ||
2314 | } | ||
2315 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
2316 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2317 | if (r) { | ||
2318 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | ||
2319 | r600_wb_fini(rdev); | ||
2320 | return r; | ||
2321 | } | ||
2322 | } | ||
2323 | WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF); | ||
2324 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC); | ||
2325 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF); | ||
2326 | WREG32(SCRATCH_UMSK, 0xff); | ||
2327 | return 0; | ||
2328 | } | ||
2329 | |||
2330 | void r600_fence_ring_emit(struct radeon_device *rdev, | 2279 | void r600_fence_ring_emit(struct radeon_device *rdev, |
2331 | struct radeon_fence *fence) | 2280 | struct radeon_fence *fence) |
2332 | { | 2281 | { |
2333 | /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ | 2282 | if (rdev->wb.use_event) { |
2334 | 2283 | u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + | |
2335 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); | 2284 | (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); |
2336 | radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); | 2285 | /* EVENT_WRITE_EOP - flush caches, send int */ |
2337 | /* wait for 3D idle clean */ | 2286 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
2338 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2287 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); |
2339 | radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 2288 | radeon_ring_write(rdev, addr & 0xffffffff); |
2340 | radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); | 2289 | radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); |
2341 | /* Emit fence sequence & fire IRQ */ | 2290 | radeon_ring_write(rdev, fence->seq); |
2342 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2291 | radeon_ring_write(rdev, 0); |
2343 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 2292 | } else { |
2344 | radeon_ring_write(rdev, fence->seq); | 2293 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); |
2345 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | 2294 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); |
2346 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | 2295 | /* wait for 3D idle clean */ |
2347 | radeon_ring_write(rdev, RB_INT_STAT); | 2296 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
2297 | radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | ||
2298 | radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); | ||
2299 | /* Emit fence sequence & fire IRQ */ | ||
2300 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
2301 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | ||
2302 | radeon_ring_write(rdev, fence->seq); | ||
2303 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | ||
2304 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | ||
2305 | radeon_ring_write(rdev, RB_INT_STAT); | ||
2306 | } | ||
2348 | } | 2307 | } |
2349 | 2308 | ||
2350 | int r600_copy_blit(struct radeon_device *rdev, | 2309 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -2426,19 +2385,12 @@ int r600_startup(struct radeon_device *rdev) | |||
2426 | rdev->asic->copy = NULL; | 2385 | rdev->asic->copy = NULL; |
2427 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 2386 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
2428 | } | 2387 | } |
2429 | /* pin copy shader into vram */ | 2388 | |
2430 | if (rdev->r600_blit.shader_obj) { | 2389 | /* allocate wb buffer */ |
2431 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 2390 | r = radeon_wb_init(rdev); |
2432 | if (unlikely(r != 0)) | 2391 | if (r) |
2433 | return r; | 2392 | return r; |
2434 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 2393 | |
2435 | &rdev->r600_blit.shader_gpu_addr); | ||
2436 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
2437 | if (r) { | ||
2438 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
2439 | return r; | ||
2440 | } | ||
2441 | } | ||
2442 | /* Enable IRQ */ | 2394 | /* Enable IRQ */ |
2443 | r = r600_irq_init(rdev); | 2395 | r = r600_irq_init(rdev); |
2444 | if (r) { | 2396 | if (r) { |
@@ -2457,8 +2409,7 @@ int r600_startup(struct radeon_device *rdev) | |||
2457 | r = r600_cp_resume(rdev); | 2409 | r = r600_cp_resume(rdev); |
2458 | if (r) | 2410 | if (r) |
2459 | return r; | 2411 | return r; |
2460 | /* write back buffer are not vital so don't worry about failure */ | 2412 | |
2461 | r600_wb_enable(rdev); | ||
2462 | return 0; | 2413 | return 0; |
2463 | } | 2414 | } |
2464 | 2415 | ||
@@ -2517,7 +2468,7 @@ int r600_suspend(struct radeon_device *rdev) | |||
2517 | r600_cp_stop(rdev); | 2468 | r600_cp_stop(rdev); |
2518 | rdev->cp.ready = false; | 2469 | rdev->cp.ready = false; |
2519 | r600_irq_suspend(rdev); | 2470 | r600_irq_suspend(rdev); |
2520 | r600_wb_disable(rdev); | 2471 | radeon_wb_disable(rdev); |
2521 | r600_pcie_gart_disable(rdev); | 2472 | r600_pcie_gart_disable(rdev); |
2522 | /* unpin shaders bo */ | 2473 | /* unpin shaders bo */ |
2523 | if (rdev->r600_blit.shader_obj) { | 2474 | if (rdev->r600_blit.shader_obj) { |
@@ -2614,8 +2565,8 @@ int r600_init(struct radeon_device *rdev) | |||
2614 | if (r) { | 2565 | if (r) { |
2615 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 2566 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2616 | r600_cp_fini(rdev); | 2567 | r600_cp_fini(rdev); |
2617 | r600_wb_fini(rdev); | ||
2618 | r600_irq_fini(rdev); | 2568 | r600_irq_fini(rdev); |
2569 | radeon_wb_fini(rdev); | ||
2619 | radeon_irq_kms_fini(rdev); | 2570 | radeon_irq_kms_fini(rdev); |
2620 | r600_pcie_gart_fini(rdev); | 2571 | r600_pcie_gart_fini(rdev); |
2621 | rdev->accel_working = false; | 2572 | rdev->accel_working = false; |
@@ -2645,8 +2596,8 @@ void r600_fini(struct radeon_device *rdev) | |||
2645 | r600_audio_fini(rdev); | 2596 | r600_audio_fini(rdev); |
2646 | r600_blit_fini(rdev); | 2597 | r600_blit_fini(rdev); |
2647 | r600_cp_fini(rdev); | 2598 | r600_cp_fini(rdev); |
2648 | r600_wb_fini(rdev); | ||
2649 | r600_irq_fini(rdev); | 2599 | r600_irq_fini(rdev); |
2600 | radeon_wb_fini(rdev); | ||
2650 | radeon_irq_kms_fini(rdev); | 2601 | radeon_irq_kms_fini(rdev); |
2651 | r600_pcie_gart_fini(rdev); | 2602 | r600_pcie_gart_fini(rdev); |
2652 | radeon_agp_fini(rdev); | 2603 | radeon_agp_fini(rdev); |
@@ -2981,10 +2932,13 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2981 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | | 2932 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | |
2982 | IH_WPTR_OVERFLOW_CLEAR | | 2933 | IH_WPTR_OVERFLOW_CLEAR | |
2983 | (rb_bufsz << 1)); | 2934 | (rb_bufsz << 1)); |
2984 | /* WPTR writeback, not yet */ | 2935 | |
2985 | /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ | 2936 | if (rdev->wb.enabled) |
2986 | WREG32(IH_RB_WPTR_ADDR_LO, 0); | 2937 | ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; |
2987 | WREG32(IH_RB_WPTR_ADDR_HI, 0); | 2938 | |
2939 | /* set the writeback address whether it's enabled or not */ | ||
2940 | WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); | ||
2941 | WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); | ||
2988 | 2942 | ||
2989 | WREG32(IH_RB_CNTL, ih_rb_cntl); | 2943 | WREG32(IH_RB_CNTL, ih_rb_cntl); |
2990 | 2944 | ||
@@ -3068,6 +3022,7 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3068 | if (rdev->irq.sw_int) { | 3022 | if (rdev->irq.sw_int) { |
3069 | DRM_DEBUG("r600_irq_set: sw int\n"); | 3023 | DRM_DEBUG("r600_irq_set: sw int\n"); |
3070 | cp_int_cntl |= RB_INT_ENABLE; | 3024 | cp_int_cntl |= RB_INT_ENABLE; |
3025 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | ||
3071 | } | 3026 | } |
3072 | if (rdev->irq.crtc_vblank_int[0]) { | 3027 | if (rdev->irq.crtc_vblank_int[0]) { |
3073 | DRM_DEBUG("r600_irq_set: vblank 0\n"); | 3028 | DRM_DEBUG("r600_irq_set: vblank 0\n"); |
@@ -3242,8 +3197,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
3242 | { | 3197 | { |
3243 | u32 wptr, tmp; | 3198 | u32 wptr, tmp; |
3244 | 3199 | ||
3245 | /* XXX use writeback */ | 3200 | if (rdev->wb.enabled) |
3246 | wptr = RREG32(IH_RB_WPTR); | 3201 | wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; |
3202 | else | ||
3203 | wptr = RREG32(IH_RB_WPTR); | ||
3247 | 3204 | ||
3248 | if (wptr & RB_OVERFLOW) { | 3205 | if (wptr & RB_OVERFLOW) { |
3249 | /* When a ring buffer overflow happen start parsing interrupt | 3206 | /* When a ring buffer overflow happen start parsing interrupt |
@@ -3431,6 +3388,7 @@ restart_ih: | |||
3431 | break; | 3388 | break; |
3432 | case 181: /* CP EOP event */ | 3389 | case 181: /* CP EOP event */ |
3433 | DRM_DEBUG("IH: CP EOP\n"); | 3390 | DRM_DEBUG("IH: CP EOP\n"); |
3391 | radeon_fence_process(rdev); | ||
3434 | break; | 3392 | break; |
3435 | case 233: /* GUI IDLE */ | 3393 | case 233: /* GUI IDLE */ |
3436 | DRM_DEBUG("IH: CP EOP\n"); | 3394 | DRM_DEBUG("IH: CP EOP\n"); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 9ceb2a1ce799..2a4747d9747c 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev) | |||
472 | u32 packet2s[16]; | 472 | u32 packet2s[16]; |
473 | int num_packet2s = 0; | 473 | int num_packet2s = 0; |
474 | 474 | ||
475 | /* don't reinitialize blit */ | 475 | /* pin copy shader into vram if already initialized */ |
476 | if (rdev->r600_blit.shader_obj) | 476 | if (rdev->r600_blit.shader_obj) |
477 | return 0; | 477 | goto done; |
478 | |||
478 | mutex_init(&rdev->r600_blit.mutex); | 479 | mutex_init(&rdev->r600_blit.mutex); |
479 | rdev->r600_blit.state_offset = 0; | 480 | rdev->r600_blit.state_offset = 0; |
480 | 481 | ||
@@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev) | |||
532 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); | 533 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); |
533 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 534 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
534 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 535 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
536 | |||
537 | done: | ||
538 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
539 | if (unlikely(r != 0)) | ||
540 | return r; | ||
541 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
542 | &rdev->r600_blit.shader_gpu_addr); | ||
543 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
544 | if (r) { | ||
545 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
546 | return r; | ||
547 | } | ||
535 | return 0; | 548 | return 0; |
536 | } | 549 | } |
537 | 550 | ||
@@ -552,7 +565,7 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
552 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | 565 | radeon_bo_unref(&rdev->r600_blit.shader_obj); |
553 | } | 566 | } |
554 | 567 | ||
555 | int r600_vb_ib_get(struct radeon_device *rdev) | 568 | static int r600_vb_ib_get(struct radeon_device *rdev) |
556 | { | 569 | { |
557 | int r; | 570 | int r; |
558 | r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); | 571 | r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); |
@@ -566,7 +579,7 @@ int r600_vb_ib_get(struct radeon_device *rdev) | |||
566 | return 0; | 579 | return 0; |
567 | } | 580 | } |
568 | 581 | ||
569 | void r600_vb_ib_put(struct radeon_device *rdev) | 582 | static void r600_vb_ib_put(struct radeon_device *rdev) |
570 | { | 583 | { |
571 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | 584 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); |
572 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 585 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
@@ -670,17 +683,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
670 | 683 | ||
671 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | 684 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { |
672 | WARN_ON(1); | 685 | WARN_ON(1); |
673 | |||
674 | #if 0 | ||
675 | r600_vb_ib_put(rdev); | ||
676 | |||
677 | r600_nomm_put_vb(dev); | ||
678 | r600_nomm_get_vb(dev); | ||
679 | if (!dev_priv->blit_vb) | ||
680 | return; | ||
681 | set_shaders(dev); | ||
682 | vb = r600_nomm_get_vb_ptr(dev); | ||
683 | #endif | ||
684 | } | 686 | } |
685 | 687 | ||
686 | vb[0] = i2f(dst_x); | 688 | vb[0] = i2f(dst_x); |
@@ -765,17 +767,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
765 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { | 767 | if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { |
766 | WARN_ON(1); | 768 | WARN_ON(1); |
767 | } | 769 | } |
768 | #if 0 | ||
769 | if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) { | ||
770 | r600_nomm_put_vb(dev); | ||
771 | r600_nomm_get_vb(dev); | ||
772 | if (!rdev->blit_vb) | ||
773 | return; | ||
774 | |||
775 | set_shaders(dev); | ||
776 | vb = r600_nomm_get_vb_ptr(dev); | ||
777 | } | ||
778 | #endif | ||
779 | 770 | ||
780 | vb[0] = i2f(dst_x / 4); | 771 | vb[0] = i2f(dst_x / 4); |
781 | vb[1] = 0; | 772 | vb[1] = 0; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 858a1920c0d7..966a793e225b 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -474,6 +474,7 @@ | |||
474 | #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 | 474 | #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 |
475 | #define VTX_REUSE_DEPTH_MASK 0x000000FF | 475 | #define VTX_REUSE_DEPTH_MASK 0x000000FF |
476 | #define VGT_EVENT_INITIATOR 0x28a90 | 476 | #define VGT_EVENT_INITIATOR 0x28a90 |
477 | # define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) | ||
477 | # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) | 478 | # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) |
478 | 479 | ||
479 | #define VM_CONTEXT0_CNTL 0x1410 | 480 | #define VM_CONTEXT0_CNTL 0x1410 |
@@ -775,7 +776,27 @@ | |||
775 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | 776 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) |
776 | #define PACKET3_COND_WRITE 0x45 | 777 | #define PACKET3_COND_WRITE 0x45 |
777 | #define PACKET3_EVENT_WRITE 0x46 | 778 | #define PACKET3_EVENT_WRITE 0x46 |
779 | #define EVENT_TYPE(x) ((x) << 0) | ||
780 | #define EVENT_INDEX(x) ((x) << 8) | ||
781 | /* 0 - any non-TS event | ||
782 | * 1 - ZPASS_DONE | ||
783 | * 2 - SAMPLE_PIPELINESTAT | ||
784 | * 3 - SAMPLE_STREAMOUTSTAT* | ||
785 | * 4 - *S_PARTIAL_FLUSH | ||
786 | * 5 - TS events | ||
787 | */ | ||
778 | #define PACKET3_EVENT_WRITE_EOP 0x47 | 788 | #define PACKET3_EVENT_WRITE_EOP 0x47 |
789 | #define DATA_SEL(x) ((x) << 29) | ||
790 | /* 0 - discard | ||
791 | * 1 - send low 32bit data | ||
792 | * 2 - send 64bit data | ||
793 | * 3 - send 64bit counter value | ||
794 | */ | ||
795 | #define INT_SEL(x) ((x) << 24) | ||
796 | /* 0 - none | ||
797 | * 1 - interrupt only (DATA_SEL = 0) | ||
798 | * 2 - interrupt when data write is confirmed | ||
799 | */ | ||
779 | #define PACKET3_ONE_REG_WRITE 0x57 | 800 | #define PACKET3_ONE_REG_WRITE 0x57 |
780 | #define PACKET3_SET_CONFIG_REG 0x68 | 801 | #define PACKET3_SET_CONFIG_REG 0x68 |
781 | #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 | 802 | #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a168d644bf9e..2edd52ece226 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -88,7 +88,6 @@ extern int radeon_benchmarking; | |||
88 | extern int radeon_testing; | 88 | extern int radeon_testing; |
89 | extern int radeon_connector_table; | 89 | extern int radeon_connector_table; |
90 | extern int radeon_tv; | 90 | extern int radeon_tv; |
91 | extern int radeon_new_pll; | ||
92 | extern int radeon_audio; | 91 | extern int radeon_audio; |
93 | extern int radeon_disp_priority; | 92 | extern int radeon_disp_priority; |
94 | extern int radeon_hw_i2c; | 93 | extern int radeon_hw_i2c; |
@@ -365,6 +364,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev); | |||
365 | */ | 364 | */ |
366 | struct radeon_scratch { | 365 | struct radeon_scratch { |
367 | unsigned num_reg; | 366 | unsigned num_reg; |
367 | uint32_t reg_base; | ||
368 | bool free[32]; | 368 | bool free[32]; |
369 | uint32_t reg[32]; | 369 | uint32_t reg[32]; |
370 | }; | 370 | }; |
@@ -593,8 +593,15 @@ struct radeon_wb { | |||
593 | struct radeon_bo *wb_obj; | 593 | struct radeon_bo *wb_obj; |
594 | volatile uint32_t *wb; | 594 | volatile uint32_t *wb; |
595 | uint64_t gpu_addr; | 595 | uint64_t gpu_addr; |
596 | bool enabled; | ||
597 | bool use_event; | ||
596 | }; | 598 | }; |
597 | 599 | ||
600 | #define RADEON_WB_SCRATCH_OFFSET 0 | ||
601 | #define RADEON_WB_CP_RPTR_OFFSET 1024 | ||
602 | #define R600_WB_IH_WPTR_OFFSET 2048 | ||
603 | #define R600_WB_EVENT_OFFSET 3072 | ||
604 | |||
598 | /** | 605 | /** |
599 | * struct radeon_pm - power management datas | 606 | * struct radeon_pm - power management datas |
600 | * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) | 607 | * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) |
@@ -1123,6 +1130,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) | |||
1123 | void r600_kms_blit_copy(struct radeon_device *rdev, | 1130 | void r600_kms_blit_copy(struct radeon_device *rdev, |
1124 | u64 src_gpu_addr, u64 dst_gpu_addr, | 1131 | u64 src_gpu_addr, u64 dst_gpu_addr, |
1125 | int size_bytes); | 1132 | int size_bytes); |
1133 | /* evergreen blit */ | ||
1134 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
1135 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
1136 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
1137 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
1138 | int size_bytes); | ||
1126 | 1139 | ||
1127 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | 1140 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
1128 | { | 1141 | { |
@@ -1340,6 +1353,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev); | |||
1340 | extern void radeon_update_display_priority(struct radeon_device *rdev); | 1353 | extern void radeon_update_display_priority(struct radeon_device *rdev); |
1341 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | 1354 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); |
1342 | extern void radeon_scratch_init(struct radeon_device *rdev); | 1355 | extern void radeon_scratch_init(struct radeon_device *rdev); |
1356 | extern void radeon_wb_fini(struct radeon_device *rdev); | ||
1357 | extern int radeon_wb_init(struct radeon_device *rdev); | ||
1358 | extern void radeon_wb_disable(struct radeon_device *rdev); | ||
1343 | extern void radeon_surface_init(struct radeon_device *rdev); | 1359 | extern void radeon_surface_init(struct radeon_device *rdev); |
1344 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 1360 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
1345 | extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | 1361 | extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); |
@@ -1424,9 +1440,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); | |||
1424 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); | 1440 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); |
1425 | extern int r600_ib_test(struct radeon_device *rdev); | 1441 | extern int r600_ib_test(struct radeon_device *rdev); |
1426 | extern int r600_ring_test(struct radeon_device *rdev); | 1442 | extern int r600_ring_test(struct radeon_device *rdev); |
1427 | extern void r600_wb_fini(struct radeon_device *rdev); | ||
1428 | extern int r600_wb_enable(struct radeon_device *rdev); | ||
1429 | extern void r600_wb_disable(struct radeon_device *rdev); | ||
1430 | extern void r600_scratch_init(struct radeon_device *rdev); | 1443 | extern void r600_scratch_init(struct radeon_device *rdev); |
1431 | extern int r600_blit_init(struct radeon_device *rdev); | 1444 | extern int r600_blit_init(struct radeon_device *rdev); |
1432 | extern void r600_blit_fini(struct radeon_device *rdev); | 1445 | extern void r600_blit_fini(struct radeon_device *rdev); |
@@ -1464,6 +1477,8 @@ extern void r700_cp_stop(struct radeon_device *rdev); | |||
1464 | extern void r700_cp_fini(struct radeon_device *rdev); | 1477 | extern void r700_cp_fini(struct radeon_device *rdev); |
1465 | extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); | 1478 | extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); |
1466 | extern int evergreen_irq_set(struct radeon_device *rdev); | 1479 | extern int evergreen_irq_set(struct radeon_device *rdev); |
1480 | extern int evergreen_blit_init(struct radeon_device *rdev); | ||
1481 | extern void evergreen_blit_fini(struct radeon_device *rdev); | ||
1467 | 1482 | ||
1468 | /* radeon_acpi.c */ | 1483 | /* radeon_acpi.c */ |
1469 | #if defined(CONFIG_ACPI) | 1484 | #if defined(CONFIG_ACPI) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 25e1dd197791..64fb89ecbf74 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = { | |||
726 | .get_vblank_counter = &evergreen_get_vblank_counter, | 726 | .get_vblank_counter = &evergreen_get_vblank_counter, |
727 | .fence_ring_emit = &r600_fence_ring_emit, | 727 | .fence_ring_emit = &r600_fence_ring_emit, |
728 | .cs_parse = &evergreen_cs_parse, | 728 | .cs_parse = &evergreen_cs_parse, |
729 | .copy_blit = NULL, | 729 | .copy_blit = &evergreen_copy_blit, |
730 | .copy_dma = NULL, | 730 | .copy_dma = &evergreen_copy_blit, |
731 | .copy = NULL, | 731 | .copy = &evergreen_copy_blit, |
732 | .get_engine_clock = &radeon_atom_get_engine_clock, | 732 | .get_engine_clock = &radeon_atom_get_engine_clock, |
733 | .set_engine_clock = &radeon_atom_set_engine_clock, | 733 | .set_engine_clock = &radeon_atom_set_engine_clock, |
734 | .get_memory_clock = &radeon_atom_get_memory_clock, | 734 | .get_memory_clock = &radeon_atom_get_memory_clock, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index a5aff755f0d2..740988244143 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev); | |||
108 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); | 108 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); |
109 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); | 109 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); |
110 | void r100_vram_init_sizes(struct radeon_device *rdev); | 110 | void r100_vram_init_sizes(struct radeon_device *rdev); |
111 | void r100_wb_disable(struct radeon_device *rdev); | ||
112 | void r100_wb_fini(struct radeon_device *rdev); | ||
113 | int r100_wb_init(struct radeon_device *rdev); | ||
114 | int r100_cp_reset(struct radeon_device *rdev); | 111 | int r100_cp_reset(struct radeon_device *rdev); |
115 | void r100_vga_render_disable(struct radeon_device *rdev); | 112 | void r100_vga_render_disable(struct radeon_device *rdev); |
116 | void r100_restore_sanity(struct radeon_device *rdev); | 113 | void r100_restore_sanity(struct radeon_device *rdev); |
@@ -257,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
257 | int r600_cs_parse(struct radeon_cs_parser *p); | 254 | int r600_cs_parse(struct radeon_cs_parser *p); |
258 | void r600_fence_ring_emit(struct radeon_device *rdev, | 255 | void r600_fence_ring_emit(struct radeon_device *rdev, |
259 | struct radeon_fence *fence); | 256 | struct radeon_fence *fence); |
260 | int r600_copy_dma(struct radeon_device *rdev, | ||
261 | uint64_t src_offset, | ||
262 | uint64_t dst_offset, | ||
263 | unsigned num_pages, | ||
264 | struct radeon_fence *fence); | ||
265 | int r600_irq_process(struct radeon_device *rdev); | 257 | int r600_irq_process(struct radeon_device *rdev); |
266 | int r600_irq_set(struct radeon_device *rdev); | 258 | int r600_irq_set(struct radeon_device *rdev); |
267 | bool r600_gpu_is_lockup(struct radeon_device *rdev); | 259 | bool r600_gpu_is_lockup(struct radeon_device *rdev); |
@@ -307,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev); | |||
307 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); | 299 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); |
308 | int evergreen_asic_reset(struct radeon_device *rdev); | 300 | int evergreen_asic_reset(struct radeon_device *rdev); |
309 | void evergreen_bandwidth_update(struct radeon_device *rdev); | 301 | void evergreen_bandwidth_update(struct radeon_device *rdev); |
302 | int evergreen_copy_blit(struct radeon_device *rdev, | ||
303 | uint64_t src_offset, uint64_t dst_offset, | ||
304 | unsigned num_pages, struct radeon_fence *fence); | ||
310 | void evergreen_hpd_init(struct radeon_device *rdev); | 305 | void evergreen_hpd_init(struct radeon_device *rdev); |
311 | void evergreen_hpd_fini(struct radeon_device *rdev); | 306 | void evergreen_hpd_fini(struct radeon_device *rdev); |
312 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 307 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 68932ba7b8a4..2b44cbcb031b 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -1112,8 +1112,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1112 | * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per | 1112 | * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per |
1113 | * family. | 1113 | * family. |
1114 | */ | 1114 | */ |
1115 | if (!radeon_new_pll) | 1115 | p1pll->pll_out_min = 64800; |
1116 | p1pll->pll_out_min = 64800; | ||
1117 | } | 1116 | } |
1118 | 1117 | ||
1119 | p1pll->pll_in_min = | 1118 | p1pll->pll_in_min = |
@@ -1277,36 +1276,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | |||
1277 | return false; | 1276 | return false; |
1278 | } | 1277 | } |
1279 | 1278 | ||
1280 | static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | 1279 | bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, |
1281 | radeon_encoder | 1280 | struct radeon_atom_ss *ss, |
1282 | *encoder, | 1281 | int id) |
1283 | int id) | ||
1284 | { | 1282 | { |
1285 | struct drm_device *dev = encoder->base.dev; | ||
1286 | struct radeon_device *rdev = dev->dev_private; | ||
1287 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 1283 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
1288 | int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); | 1284 | int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); |
1289 | uint16_t data_offset; | 1285 | uint16_t data_offset, size; |
1290 | struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; | 1286 | struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; |
1291 | uint8_t frev, crev; | 1287 | uint8_t frev, crev; |
1292 | struct radeon_atom_ss *ss = NULL; | 1288 | int i, num_indices; |
1293 | int i; | ||
1294 | |||
1295 | if (id > ATOM_MAX_SS_ENTRY) | ||
1296 | return NULL; | ||
1297 | 1289 | ||
1298 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 1290 | memset(ss, 0, sizeof(struct radeon_atom_ss)); |
1291 | if (atom_parse_data_header(mode_info->atom_context, index, &size, | ||
1299 | &frev, &crev, &data_offset)) { | 1292 | &frev, &crev, &data_offset)) { |
1300 | ss_info = | 1293 | ss_info = |
1301 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); | 1294 | (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); |
1302 | 1295 | ||
1303 | ss = | 1296 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
1304 | kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); | 1297 | sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); |
1305 | |||
1306 | if (!ss) | ||
1307 | return NULL; | ||
1308 | 1298 | ||
1309 | for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) { | 1299 | for (i = 0; i < num_indices; i++) { |
1310 | if (ss_info->asSS_Info[i].ucSS_Id == id) { | 1300 | if (ss_info->asSS_Info[i].ucSS_Id == id) { |
1311 | ss->percentage = | 1301 | ss->percentage = |
1312 | le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); | 1302 | le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); |
@@ -1315,11 +1305,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | |||
1315 | ss->delay = ss_info->asSS_Info[i].ucSS_Delay; | 1305 | ss->delay = ss_info->asSS_Info[i].ucSS_Delay; |
1316 | ss->range = ss_info->asSS_Info[i].ucSS_Range; | 1306 | ss->range = ss_info->asSS_Info[i].ucSS_Range; |
1317 | ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; | 1307 | ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; |
1318 | break; | 1308 | return true; |
1309 | } | ||
1310 | } | ||
1311 | } | ||
1312 | return false; | ||
1313 | } | ||
1314 | |||
1315 | union asic_ss_info { | ||
1316 | struct _ATOM_ASIC_INTERNAL_SS_INFO info; | ||
1317 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; | ||
1318 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; | ||
1319 | }; | ||
1320 | |||
1321 | bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | ||
1322 | struct radeon_atom_ss *ss, | ||
1323 | int id, u32 clock) | ||
1324 | { | ||
1325 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
1326 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
1327 | uint16_t data_offset, size; | ||
1328 | union asic_ss_info *ss_info; | ||
1329 | uint8_t frev, crev; | ||
1330 | int i, num_indices; | ||
1331 | |||
1332 | memset(ss, 0, sizeof(struct radeon_atom_ss)); | ||
1333 | if (atom_parse_data_header(mode_info->atom_context, index, &size, | ||
1334 | &frev, &crev, &data_offset)) { | ||
1335 | |||
1336 | ss_info = | ||
1337 | (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); | ||
1338 | |||
1339 | switch (frev) { | ||
1340 | case 1: | ||
1341 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
1342 | sizeof(ATOM_ASIC_SS_ASSIGNMENT); | ||
1343 | |||
1344 | for (i = 0; i < num_indices; i++) { | ||
1345 | if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && | ||
1346 | (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { | ||
1347 | ss->percentage = | ||
1348 | le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | ||
1349 | ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; | ||
1350 | ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); | ||
1351 | return true; | ||
1352 | } | ||
1353 | } | ||
1354 | break; | ||
1355 | case 2: | ||
1356 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
1357 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); | ||
1358 | for (i = 0; i < num_indices; i++) { | ||
1359 | if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && | ||
1360 | (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { | ||
1361 | ss->percentage = | ||
1362 | le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | ||
1363 | ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; | ||
1364 | ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); | ||
1365 | return true; | ||
1366 | } | ||
1319 | } | 1367 | } |
1368 | break; | ||
1369 | case 3: | ||
1370 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | ||
1371 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); | ||
1372 | for (i = 0; i < num_indices; i++) { | ||
1373 | if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && | ||
1374 | (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { | ||
1375 | ss->percentage = | ||
1376 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | ||
1377 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; | ||
1378 | ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); | ||
1379 | return true; | ||
1380 | } | ||
1381 | } | ||
1382 | break; | ||
1383 | default: | ||
1384 | DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); | ||
1385 | break; | ||
1320 | } | 1386 | } |
1387 | |||
1321 | } | 1388 | } |
1322 | return ss; | 1389 | return false; |
1323 | } | 1390 | } |
1324 | 1391 | ||
1325 | union lvds_info { | 1392 | union lvds_info { |
@@ -1371,7 +1438,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1371 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); | 1438 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); |
1372 | lvds->panel_pwr_delay = | 1439 | lvds->panel_pwr_delay = |
1373 | le16_to_cpu(lvds_info->info.usOffDelayInMs); | 1440 | le16_to_cpu(lvds_info->info.usOffDelayInMs); |
1374 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; | 1441 | lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; |
1375 | 1442 | ||
1376 | misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); | 1443 | misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); |
1377 | if (misc & ATOM_VSYNC_POLARITY) | 1444 | if (misc & ATOM_VSYNC_POLARITY) |
@@ -1388,19 +1455,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1388 | /* set crtc values */ | 1455 | /* set crtc values */ |
1389 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | 1456 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); |
1390 | 1457 | ||
1391 | lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); | 1458 | lvds->lcd_ss_id = lvds_info->info.ucSS_Id; |
1392 | |||
1393 | if (ASIC_IS_AVIVO(rdev)) { | ||
1394 | if (radeon_new_pll == 0) | ||
1395 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1396 | else | ||
1397 | lvds->pll_algo = PLL_ALGO_NEW; | ||
1398 | } else { | ||
1399 | if (radeon_new_pll == 1) | ||
1400 | lvds->pll_algo = PLL_ALGO_NEW; | ||
1401 | else | ||
1402 | lvds->pll_algo = PLL_ALGO_LEGACY; | ||
1403 | } | ||
1404 | 1459 | ||
1405 | encoder->native_mode = lvds->native_mode; | 1460 | encoder->native_mode = lvds->native_mode; |
1406 | 1461 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index ecc1a8fafbfd..64c3ddf02167 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
326 | } | 326 | } |
327 | } | 327 | } |
328 | 328 | ||
329 | if (property == rdev->mode_info.underscan_hborder_property) { | ||
330 | /* need to find digital encoder on connector */ | ||
331 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
332 | if (!encoder) | ||
333 | return 0; | ||
334 | |||
335 | radeon_encoder = to_radeon_encoder(encoder); | ||
336 | |||
337 | if (radeon_encoder->underscan_hborder != val) { | ||
338 | radeon_encoder->underscan_hborder = val; | ||
339 | radeon_property_change_mode(&radeon_encoder->base); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | if (property == rdev->mode_info.underscan_vborder_property) { | ||
344 | /* need to find digital encoder on connector */ | ||
345 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | ||
346 | if (!encoder) | ||
347 | return 0; | ||
348 | |||
349 | radeon_encoder = to_radeon_encoder(encoder); | ||
350 | |||
351 | if (radeon_encoder->underscan_vborder != val) { | ||
352 | radeon_encoder->underscan_vborder = val; | ||
353 | radeon_property_change_mode(&radeon_encoder->base); | ||
354 | } | ||
355 | } | ||
356 | |||
329 | if (property == rdev->mode_info.tv_std_property) { | 357 | if (property == rdev->mode_info.tv_std_property) { |
330 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); | 358 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); |
331 | if (!encoder) { | 359 | if (!encoder) { |
@@ -1153,10 +1181,17 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1153 | drm_connector_attach_property(&radeon_connector->base, | 1181 | drm_connector_attach_property(&radeon_connector->base, |
1154 | rdev->mode_info.coherent_mode_property, | 1182 | rdev->mode_info.coherent_mode_property, |
1155 | 1); | 1183 | 1); |
1156 | if (ASIC_IS_AVIVO(rdev)) | 1184 | if (ASIC_IS_AVIVO(rdev)) { |
1157 | drm_connector_attach_property(&radeon_connector->base, | 1185 | drm_connector_attach_property(&radeon_connector->base, |
1158 | rdev->mode_info.underscan_property, | 1186 | rdev->mode_info.underscan_property, |
1159 | UNDERSCAN_AUTO); | 1187 | UNDERSCAN_AUTO); |
1188 | drm_connector_attach_property(&radeon_connector->base, | ||
1189 | rdev->mode_info.underscan_hborder_property, | ||
1190 | 0); | ||
1191 | drm_connector_attach_property(&radeon_connector->base, | ||
1192 | rdev->mode_info.underscan_vborder_property, | ||
1193 | 0); | ||
1194 | } | ||
1160 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | 1195 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
1161 | radeon_connector->dac_load_detect = true; | 1196 | radeon_connector->dac_load_detect = true; |
1162 | drm_connector_attach_property(&radeon_connector->base, | 1197 | drm_connector_attach_property(&radeon_connector->base, |
@@ -1181,10 +1216,17 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1181 | drm_connector_attach_property(&radeon_connector->base, | 1216 | drm_connector_attach_property(&radeon_connector->base, |
1182 | rdev->mode_info.coherent_mode_property, | 1217 | rdev->mode_info.coherent_mode_property, |
1183 | 1); | 1218 | 1); |
1184 | if (ASIC_IS_AVIVO(rdev)) | 1219 | if (ASIC_IS_AVIVO(rdev)) { |
1185 | drm_connector_attach_property(&radeon_connector->base, | 1220 | drm_connector_attach_property(&radeon_connector->base, |
1186 | rdev->mode_info.underscan_property, | 1221 | rdev->mode_info.underscan_property, |
1187 | UNDERSCAN_AUTO); | 1222 | UNDERSCAN_AUTO); |
1223 | drm_connector_attach_property(&radeon_connector->base, | ||
1224 | rdev->mode_info.underscan_hborder_property, | ||
1225 | 0); | ||
1226 | drm_connector_attach_property(&radeon_connector->base, | ||
1227 | rdev->mode_info.underscan_vborder_property, | ||
1228 | 0); | ||
1229 | } | ||
1188 | subpixel_order = SubPixelHorizontalRGB; | 1230 | subpixel_order = SubPixelHorizontalRGB; |
1189 | break; | 1231 | break; |
1190 | case DRM_MODE_CONNECTOR_DisplayPort: | 1232 | case DRM_MODE_CONNECTOR_DisplayPort: |
@@ -1212,10 +1254,17 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1212 | drm_connector_attach_property(&radeon_connector->base, | 1254 | drm_connector_attach_property(&radeon_connector->base, |
1213 | rdev->mode_info.coherent_mode_property, | 1255 | rdev->mode_info.coherent_mode_property, |
1214 | 1); | 1256 | 1); |
1215 | if (ASIC_IS_AVIVO(rdev)) | 1257 | if (ASIC_IS_AVIVO(rdev)) { |
1216 | drm_connector_attach_property(&radeon_connector->base, | 1258 | drm_connector_attach_property(&radeon_connector->base, |
1217 | rdev->mode_info.underscan_property, | 1259 | rdev->mode_info.underscan_property, |
1218 | UNDERSCAN_AUTO); | 1260 | UNDERSCAN_AUTO); |
1261 | drm_connector_attach_property(&radeon_connector->base, | ||
1262 | rdev->mode_info.underscan_hborder_property, | ||
1263 | 0); | ||
1264 | drm_connector_attach_property(&radeon_connector->base, | ||
1265 | rdev->mode_info.underscan_vborder_property, | ||
1266 | 0); | ||
1267 | } | ||
1219 | break; | 1268 | break; |
1220 | case DRM_MODE_CONNECTOR_SVIDEO: | 1269 | case DRM_MODE_CONNECTOR_SVIDEO: |
1221 | case DRM_MODE_CONNECTOR_Composite: | 1270 | case DRM_MODE_CONNECTOR_Composite: |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5731fc9b1ae3..4a8102512db5 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | 120 | static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, |
121 | uint32_t gpu_addr) | 121 | uint64_t gpu_addr) |
122 | { | 122 | { |
123 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 123 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
124 | struct radeon_device *rdev = crtc->dev->dev_private; | 124 | struct radeon_device *rdev = crtc->dev->dev_private; |
125 | 125 | ||
126 | if (ASIC_IS_DCE4(rdev)) { | 126 | if (ASIC_IS_DCE4(rdev)) { |
127 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0); | 127 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, |
128 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 128 | upper_32_bits(gpu_addr)); |
129 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
130 | gpu_addr & 0xffffffff); | ||
129 | } else if (ASIC_IS_AVIVO(rdev)) { | 131 | } else if (ASIC_IS_AVIVO(rdev)) { |
130 | if (rdev->family >= CHIP_RV770) { | 132 | if (rdev->family >= CHIP_RV770) { |
131 | if (radeon_crtc->crtc_id) | 133 | if (radeon_crtc->crtc_id) |
132 | WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); | 134 | WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
133 | else | 135 | else |
134 | WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0); | 136 | WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
135 | } | 137 | } |
136 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 138 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
139 | gpu_addr & 0xffffffff); | ||
137 | } else { | 140 | } else { |
138 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; | 141 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; |
139 | /* offset is from DISP(2)_BASE_ADDRESS */ | 142 | /* offset is from DISP(2)_BASE_ADDRESS */ |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 256d204a6d24..8adfedfe547f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev) | |||
117 | } else { | 117 | } else { |
118 | rdev->scratch.num_reg = 7; | 118 | rdev->scratch.num_reg = 7; |
119 | } | 119 | } |
120 | rdev->scratch.reg_base = RADEON_SCRATCH_REG0; | ||
120 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 121 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
121 | rdev->scratch.free[i] = true; | 122 | rdev->scratch.free[i] = true; |
122 | rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); | 123 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); |
123 | } | 124 | } |
124 | } | 125 | } |
125 | 126 | ||
@@ -149,6 +150,86 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |||
149 | } | 150 | } |
150 | } | 151 | } |
151 | 152 | ||
153 | void radeon_wb_disable(struct radeon_device *rdev) | ||
154 | { | ||
155 | int r; | ||
156 | |||
157 | if (rdev->wb.wb_obj) { | ||
158 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
159 | if (unlikely(r != 0)) | ||
160 | return; | ||
161 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
162 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
163 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
164 | } | ||
165 | rdev->wb.enabled = false; | ||
166 | } | ||
167 | |||
168 | void radeon_wb_fini(struct radeon_device *rdev) | ||
169 | { | ||
170 | radeon_wb_disable(rdev); | ||
171 | if (rdev->wb.wb_obj) { | ||
172 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
173 | rdev->wb.wb = NULL; | ||
174 | rdev->wb.wb_obj = NULL; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | int radeon_wb_init(struct radeon_device *rdev) | ||
179 | { | ||
180 | int r; | ||
181 | |||
182 | if (rdev->wb.wb_obj == NULL) { | ||
183 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | ||
184 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | ||
185 | if (r) { | ||
186 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | ||
187 | return r; | ||
188 | } | ||
189 | } | ||
190 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
191 | if (unlikely(r != 0)) { | ||
192 | radeon_wb_fini(rdev); | ||
193 | return r; | ||
194 | } | ||
195 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
196 | &rdev->wb.gpu_addr); | ||
197 | if (r) { | ||
198 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
199 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
200 | radeon_wb_fini(rdev); | ||
201 | return r; | ||
202 | } | ||
203 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
204 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
205 | if (r) { | ||
206 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | ||
207 | radeon_wb_fini(rdev); | ||
208 | return r; | ||
209 | } | ||
210 | |||
211 | /* disable event_write fences */ | ||
212 | rdev->wb.use_event = false; | ||
213 | /* disabled via module param */ | ||
214 | if (radeon_no_wb == 1) | ||
215 | rdev->wb.enabled = false; | ||
216 | else { | ||
217 | /* often unreliable on AGP */ | ||
218 | if (rdev->flags & RADEON_IS_AGP) { | ||
219 | rdev->wb.enabled = false; | ||
220 | } else { | ||
221 | rdev->wb.enabled = true; | ||
222 | /* event_write fences are only available on r600+ */ | ||
223 | if (rdev->family >= CHIP_R600) | ||
224 | rdev->wb.use_event = true; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
152 | /** | 233 | /** |
153 | * radeon_vram_location - try to find VRAM location | 234 | * radeon_vram_location - try to find VRAM location |
154 | * @rdev: radeon device structure holding all necessary informations | 235 | * @rdev: radeon device structure holding all necessary informations |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 26935cf2c3b3..6c6846cdaa30 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -486,13 +486,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) | |||
486 | return n; | 486 | return n; |
487 | } | 487 | } |
488 | 488 | ||
489 | static void radeon_compute_pll_legacy(struct radeon_pll *pll, | 489 | void radeon_compute_pll(struct radeon_pll *pll, |
490 | uint64_t freq, | 490 | uint64_t freq, |
491 | uint32_t *dot_clock_p, | 491 | uint32_t *dot_clock_p, |
492 | uint32_t *fb_div_p, | 492 | uint32_t *fb_div_p, |
493 | uint32_t *frac_fb_div_p, | 493 | uint32_t *frac_fb_div_p, |
494 | uint32_t *ref_div_p, | 494 | uint32_t *ref_div_p, |
495 | uint32_t *post_div_p) | 495 | uint32_t *post_div_p) |
496 | { | 496 | { |
497 | uint32_t min_ref_div = pll->min_ref_div; | 497 | uint32_t min_ref_div = pll->min_ref_div; |
498 | uint32_t max_ref_div = pll->max_ref_div; | 498 | uint32_t max_ref_div = pll->max_ref_div; |
@@ -545,7 +545,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
545 | max_fractional_feed_div = pll->max_frac_feedback_div; | 545 | max_fractional_feed_div = pll->max_frac_feedback_div; |
546 | } | 546 | } |
547 | 547 | ||
548 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { | 548 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { |
549 | uint32_t ref_div; | 549 | uint32_t ref_div; |
550 | 550 | ||
551 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 551 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
@@ -611,7 +611,8 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
611 | if ((best_vco == 0 && error < best_error) || | 611 | if ((best_vco == 0 && error < best_error) || |
612 | (best_vco != 0 && | 612 | (best_vco != 0 && |
613 | ((best_error > 100 && error < best_error - 100) || | 613 | ((best_error > 100 && error < best_error - 100) || |
614 | (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { | 614 | (abs(error - best_error) < 100 && |
615 | vco_diff < best_vco_diff)))) { | ||
615 | best_post_div = post_div; | 616 | best_post_div = post_div; |
616 | best_ref_div = ref_div; | 617 | best_ref_div = ref_div; |
617 | best_feedback_div = feedback_div; | 618 | best_feedback_div = feedback_div; |
@@ -619,29 +620,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
619 | best_freq = current_freq; | 620 | best_freq = current_freq; |
620 | best_error = error; | 621 | best_error = error; |
621 | best_vco_diff = vco_diff; | 622 | best_vco_diff = vco_diff; |
622 | } else if (current_freq == freq) { | ||
623 | if (best_freq == -1) { | ||
624 | best_post_div = post_div; | ||
625 | best_ref_div = ref_div; | ||
626 | best_feedback_div = feedback_div; | ||
627 | best_frac_feedback_div = frac_feedback_div; | ||
628 | best_freq = current_freq; | ||
629 | best_error = error; | ||
630 | best_vco_diff = vco_diff; | ||
631 | } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || | ||
632 | ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || | ||
633 | ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || | ||
634 | ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || | ||
635 | ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || | ||
636 | ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { | ||
637 | best_post_div = post_div; | ||
638 | best_ref_div = ref_div; | ||
639 | best_feedback_div = feedback_div; | ||
640 | best_frac_feedback_div = frac_feedback_div; | ||
641 | best_freq = current_freq; | ||
642 | best_error = error; | ||
643 | best_vco_diff = vco_diff; | ||
644 | } | ||
645 | } | 623 | } |
646 | if (current_freq < freq) | 624 | if (current_freq < freq) |
647 | min_frac_feed_div = frac_feedback_div + 1; | 625 | min_frac_feed_div = frac_feedback_div + 1; |
@@ -663,214 +641,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
663 | *post_div_p = best_post_div; | 641 | *post_div_p = best_post_div; |
664 | } | 642 | } |
665 | 643 | ||
666 | static bool | ||
667 | calc_fb_div(struct radeon_pll *pll, | ||
668 | uint32_t freq, | ||
669 | uint32_t post_div, | ||
670 | uint32_t ref_div, | ||
671 | uint32_t *fb_div, | ||
672 | uint32_t *fb_div_frac) | ||
673 | { | ||
674 | fixed20_12 feedback_divider, a, b; | ||
675 | u32 vco_freq; | ||
676 | |||
677 | vco_freq = freq * post_div; | ||
678 | /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ | ||
679 | a.full = dfixed_const(pll->reference_freq); | ||
680 | feedback_divider.full = dfixed_const(vco_freq); | ||
681 | feedback_divider.full = dfixed_div(feedback_divider, a); | ||
682 | a.full = dfixed_const(ref_div); | ||
683 | feedback_divider.full = dfixed_mul(feedback_divider, a); | ||
684 | |||
685 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | ||
686 | /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ | ||
687 | a.full = dfixed_const(10); | ||
688 | feedback_divider.full = dfixed_mul(feedback_divider, a); | ||
689 | feedback_divider.full += dfixed_const_half(0); | ||
690 | feedback_divider.full = dfixed_floor(feedback_divider); | ||
691 | feedback_divider.full = dfixed_div(feedback_divider, a); | ||
692 | |||
693 | /* *fb_div = floor(feedback_divider); */ | ||
694 | a.full = dfixed_floor(feedback_divider); | ||
695 | *fb_div = dfixed_trunc(a); | ||
696 | /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ | ||
697 | a.full = dfixed_const(10); | ||
698 | b.full = dfixed_mul(feedback_divider, a); | ||
699 | |||
700 | feedback_divider.full = dfixed_floor(feedback_divider); | ||
701 | feedback_divider.full = dfixed_mul(feedback_divider, a); | ||
702 | feedback_divider.full = b.full - feedback_divider.full; | ||
703 | *fb_div_frac = dfixed_trunc(feedback_divider); | ||
704 | } else { | ||
705 | /* *fb_div = floor(feedback_divider + 0.5); */ | ||
706 | feedback_divider.full += dfixed_const_half(0); | ||
707 | feedback_divider.full = dfixed_floor(feedback_divider); | ||
708 | |||
709 | *fb_div = dfixed_trunc(feedback_divider); | ||
710 | *fb_div_frac = 0; | ||
711 | } | ||
712 | |||
713 | if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div)) | ||
714 | return false; | ||
715 | else | ||
716 | return true; | ||
717 | } | ||
718 | |||
719 | static bool | ||
720 | calc_fb_ref_div(struct radeon_pll *pll, | ||
721 | uint32_t freq, | ||
722 | uint32_t post_div, | ||
723 | uint32_t *fb_div, | ||
724 | uint32_t *fb_div_frac, | ||
725 | uint32_t *ref_div) | ||
726 | { | ||
727 | fixed20_12 ffreq, max_error, error, pll_out, a; | ||
728 | u32 vco; | ||
729 | u32 pll_out_min, pll_out_max; | ||
730 | |||
731 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
732 | pll_out_min = pll->lcd_pll_out_min; | ||
733 | pll_out_max = pll->lcd_pll_out_max; | ||
734 | } else { | ||
735 | pll_out_min = pll->pll_out_min; | ||
736 | pll_out_max = pll->pll_out_max; | ||
737 | } | ||
738 | |||
739 | ffreq.full = dfixed_const(freq); | ||
740 | /* max_error = ffreq * 0.0025; */ | ||
741 | a.full = dfixed_const(400); | ||
742 | max_error.full = dfixed_div(ffreq, a); | ||
743 | |||
744 | for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { | ||
745 | if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { | ||
746 | vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); | ||
747 | vco = vco / ((*ref_div) * 10); | ||
748 | |||
749 | if ((vco < pll_out_min) || (vco > pll_out_max)) | ||
750 | continue; | ||
751 | |||
752 | /* pll_out = vco / post_div; */ | ||
753 | a.full = dfixed_const(post_div); | ||
754 | pll_out.full = dfixed_const(vco); | ||
755 | pll_out.full = dfixed_div(pll_out, a); | ||
756 | |||
757 | if (pll_out.full >= ffreq.full) { | ||
758 | error.full = pll_out.full - ffreq.full; | ||
759 | if (error.full <= max_error.full) | ||
760 | return true; | ||
761 | } | ||
762 | } | ||
763 | } | ||
764 | return false; | ||
765 | } | ||
766 | |||
767 | static void radeon_compute_pll_new(struct radeon_pll *pll, | ||
768 | uint64_t freq, | ||
769 | uint32_t *dot_clock_p, | ||
770 | uint32_t *fb_div_p, | ||
771 | uint32_t *frac_fb_div_p, | ||
772 | uint32_t *ref_div_p, | ||
773 | uint32_t *post_div_p) | ||
774 | { | ||
775 | u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; | ||
776 | u32 best_freq = 0, vco_frequency; | ||
777 | u32 pll_out_min, pll_out_max; | ||
778 | |||
779 | if (pll->flags & RADEON_PLL_IS_LCD) { | ||
780 | pll_out_min = pll->lcd_pll_out_min; | ||
781 | pll_out_max = pll->lcd_pll_out_max; | ||
782 | } else { | ||
783 | pll_out_min = pll->pll_out_min; | ||
784 | pll_out_max = pll->pll_out_max; | ||
785 | } | ||
786 | |||
787 | /* freq = freq / 10; */ | ||
788 | do_div(freq, 10); | ||
789 | |||
790 | if (pll->flags & RADEON_PLL_USE_POST_DIV) { | ||
791 | post_div = pll->post_div; | ||
792 | if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div)) | ||
793 | goto done; | ||
794 | |||
795 | vco_frequency = freq * post_div; | ||
796 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) | ||
797 | goto done; | ||
798 | |||
799 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | ||
800 | ref_div = pll->reference_div; | ||
801 | if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) | ||
802 | goto done; | ||
803 | if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) | ||
804 | goto done; | ||
805 | } | ||
806 | } else { | ||
807 | for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) { | ||
808 | if (pll->flags & RADEON_PLL_LEGACY) { | ||
809 | if ((post_div == 5) || | ||
810 | (post_div == 7) || | ||
811 | (post_div == 9) || | ||
812 | (post_div == 10) || | ||
813 | (post_div == 11)) | ||
814 | continue; | ||
815 | } | ||
816 | |||
817 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | ||
818 | continue; | ||
819 | |||
820 | vco_frequency = freq * post_div; | ||
821 | if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) | ||
822 | continue; | ||
823 | if (pll->flags & RADEON_PLL_USE_REF_DIV) { | ||
824 | ref_div = pll->reference_div; | ||
825 | if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) | ||
826 | goto done; | ||
827 | if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) | ||
828 | break; | ||
829 | } else { | ||
830 | if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div)) | ||
831 | break; | ||
832 | } | ||
833 | } | ||
834 | } | ||
835 | |||
836 | best_freq = pll->reference_freq * 10 * fb_div; | ||
837 | best_freq += pll->reference_freq * fb_div_frac; | ||
838 | best_freq = best_freq / (ref_div * post_div); | ||
839 | |||
840 | done: | ||
841 | if (best_freq == 0) | ||
842 | DRM_ERROR("Couldn't find valid PLL dividers\n"); | ||
843 | |||
844 | *dot_clock_p = best_freq / 10; | ||
845 | *fb_div_p = fb_div; | ||
846 | *frac_fb_div_p = fb_div_frac; | ||
847 | *ref_div_p = ref_div; | ||
848 | *post_div_p = post_div; | ||
849 | |||
850 | DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); | ||
851 | } | ||
852 | |||
853 | void radeon_compute_pll(struct radeon_pll *pll, | ||
854 | uint64_t freq, | ||
855 | uint32_t *dot_clock_p, | ||
856 | uint32_t *fb_div_p, | ||
857 | uint32_t *frac_fb_div_p, | ||
858 | uint32_t *ref_div_p, | ||
859 | uint32_t *post_div_p) | ||
860 | { | ||
861 | switch (pll->algo) { | ||
862 | case PLL_ALGO_NEW: | ||
863 | radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p, | ||
864 | frac_fb_div_p, ref_div_p, post_div_p); | ||
865 | break; | ||
866 | case PLL_ALGO_LEGACY: | ||
867 | default: | ||
868 | radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p, | ||
869 | frac_fb_div_p, ref_div_p, post_div_p); | ||
870 | break; | ||
871 | } | ||
872 | } | ||
873 | |||
874 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | 644 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) |
875 | { | 645 | { |
876 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); | 646 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
@@ -1034,6 +804,24 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) | |||
1034 | radeon_underscan_enum_list[i].name); | 804 | radeon_underscan_enum_list[i].name); |
1035 | } | 805 | } |
1036 | 806 | ||
807 | rdev->mode_info.underscan_hborder_property = | ||
808 | drm_property_create(rdev->ddev, | ||
809 | DRM_MODE_PROP_RANGE, | ||
810 | "underscan hborder", 2); | ||
811 | if (!rdev->mode_info.underscan_hborder_property) | ||
812 | return -ENOMEM; | ||
813 | rdev->mode_info.underscan_hborder_property->values[0] = 0; | ||
814 | rdev->mode_info.underscan_hborder_property->values[1] = 128; | ||
815 | |||
816 | rdev->mode_info.underscan_vborder_property = | ||
817 | drm_property_create(rdev->ddev, | ||
818 | DRM_MODE_PROP_RANGE, | ||
819 | "underscan vborder", 2); | ||
820 | if (!rdev->mode_info.underscan_vborder_property) | ||
821 | return -ENOMEM; | ||
822 | rdev->mode_info.underscan_vborder_property->values[0] = 0; | ||
823 | rdev->mode_info.underscan_vborder_property->values[1] = 128; | ||
824 | |||
1037 | return 0; | 825 | return 0; |
1038 | } | 826 | } |
1039 | 827 | ||
@@ -1191,8 +979,14 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1191 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && | 979 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
1192 | drm_detect_hdmi_monitor(radeon_connector->edid) && | 980 | drm_detect_hdmi_monitor(radeon_connector->edid) && |
1193 | is_hdtv_mode(mode)))) { | 981 | is_hdtv_mode(mode)))) { |
1194 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; | 982 | if (radeon_encoder->underscan_hborder != 0) |
1195 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; | 983 | radeon_crtc->h_border = radeon_encoder->underscan_hborder; |
984 | else | ||
985 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; | ||
986 | if (radeon_encoder->underscan_vborder != 0) | ||
987 | radeon_crtc->v_border = radeon_encoder->underscan_vborder; | ||
988 | else | ||
989 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; | ||
1196 | radeon_crtc->rmx_type = RMX_FULL; | 990 | radeon_crtc->rmx_type = RMX_FULL; |
1197 | src_v = crtc->mode.vdisplay; | 991 | src_v = crtc->mode.vdisplay; |
1198 | dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); | 992 | dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); |
@@ -1227,3 +1021,156 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1227 | } | 1021 | } |
1228 | return true; | 1022 | return true; |
1229 | } | 1023 | } |
1024 | |||
1025 | /* | ||
1026 | * Retrieve current video scanout position of crtc on a given gpu. | ||
1027 | * | ||
1028 | * \param rdev Device to query. | ||
1029 | * \param crtc Crtc to query. | ||
1030 | * \param *vpos Location where vertical scanout position should be stored. | ||
1031 | * \param *hpos Location where horizontal scanout position should go. | ||
1032 | * | ||
1033 | * Returns vpos as a positive number while in active scanout area. | ||
1034 | * Returns vpos as a negative number inside vblank, counting the number | ||
1035 | * of scanlines to go until end of vblank, e.g., -1 means "one scanline | ||
1036 | * until start of active scanout / end of vblank." | ||
1037 | * | ||
1038 | * \return Flags, or'ed together as follows: | ||
1039 | * | ||
1040 | * RADEON_SCANOUTPOS_VALID = Query successfull. | ||
1041 | * RADEON_SCANOUTPOS_INVBL = Inside vblank. | ||
1042 | * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of | ||
1043 | * this flag means that returned position may be offset by a constant but | ||
1044 | * unknown small number of scanlines wrt. real scanout position. | ||
1045 | * | ||
1046 | */ | ||
1047 | int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) | ||
1048 | { | ||
1049 | u32 stat_crtc = 0, vbl = 0, position = 0; | ||
1050 | int vbl_start, vbl_end, vtotal, ret = 0; | ||
1051 | bool in_vbl = true; | ||
1052 | |||
1053 | if (ASIC_IS_DCE4(rdev)) { | ||
1054 | if (crtc == 0) { | ||
1055 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1056 | EVERGREEN_CRTC0_REGISTER_OFFSET); | ||
1057 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1058 | EVERGREEN_CRTC0_REGISTER_OFFSET); | ||
1059 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1060 | } | ||
1061 | if (crtc == 1) { | ||
1062 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1063 | EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
1064 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1065 | EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
1066 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1067 | } | ||
1068 | if (crtc == 2) { | ||
1069 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1070 | EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
1071 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1072 | EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
1073 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1074 | } | ||
1075 | if (crtc == 3) { | ||
1076 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1077 | EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
1078 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1079 | EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
1080 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1081 | } | ||
1082 | if (crtc == 4) { | ||
1083 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1084 | EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
1085 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1086 | EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
1087 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1088 | } | ||
1089 | if (crtc == 5) { | ||
1090 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
1091 | EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
1092 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
1093 | EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
1094 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1095 | } | ||
1096 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
1097 | if (crtc == 0) { | ||
1098 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); | ||
1099 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); | ||
1100 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1101 | } | ||
1102 | if (crtc == 1) { | ||
1103 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); | ||
1104 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); | ||
1105 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1106 | } | ||
1107 | } else { | ||
1108 | /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ | ||
1109 | if (crtc == 0) { | ||
1110 | /* Assume vbl_end == 0, get vbl_start from | ||
1111 | * upper 16 bits. | ||
1112 | */ | ||
1113 | vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & | ||
1114 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; | ||
1115 | /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ | ||
1116 | position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; | ||
1117 | stat_crtc = RREG32(RADEON_CRTC_STATUS); | ||
1118 | if (!(stat_crtc & 1)) | ||
1119 | in_vbl = false; | ||
1120 | |||
1121 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1122 | } | ||
1123 | if (crtc == 1) { | ||
1124 | vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & | ||
1125 | RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; | ||
1126 | position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; | ||
1127 | stat_crtc = RREG32(RADEON_CRTC2_STATUS); | ||
1128 | if (!(stat_crtc & 1)) | ||
1129 | in_vbl = false; | ||
1130 | |||
1131 | ret |= RADEON_SCANOUTPOS_VALID; | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | /* Decode into vertical and horizontal scanout position. */ | ||
1136 | *vpos = position & 0x1fff; | ||
1137 | *hpos = (position >> 16) & 0x1fff; | ||
1138 | |||
1139 | /* Valid vblank area boundaries from gpu retrieved? */ | ||
1140 | if (vbl > 0) { | ||
1141 | /* Yes: Decode. */ | ||
1142 | ret |= RADEON_SCANOUTPOS_ACCURATE; | ||
1143 | vbl_start = vbl & 0x1fff; | ||
1144 | vbl_end = (vbl >> 16) & 0x1fff; | ||
1145 | } | ||
1146 | else { | ||
1147 | /* No: Fake something reasonable which gives at least ok results. */ | ||
1148 | vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; | ||
1149 | vbl_end = 0; | ||
1150 | } | ||
1151 | |||
1152 | /* Test scanout position against vblank region. */ | ||
1153 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | ||
1154 | in_vbl = false; | ||
1155 | |||
1156 | /* Check if inside vblank area and apply corrective offsets: | ||
1157 | * vpos will then be >=0 in video scanout area, but negative | ||
1158 | * within vblank area, counting down the number of lines until | ||
1159 | * start of scanout. | ||
1160 | */ | ||
1161 | |||
1162 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ | ||
1163 | if (in_vbl && (*vpos >= vbl_start)) { | ||
1164 | vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; | ||
1165 | *vpos = *vpos - vtotal; | ||
1166 | } | ||
1167 | |||
1168 | /* Correct for shifted end of vbl at vbl_end. */ | ||
1169 | *vpos = *vpos - vbl_end; | ||
1170 | |||
1171 | /* In vblank? */ | ||
1172 | if (in_vbl) | ||
1173 | ret |= RADEON_SCANOUTPOS_INVBL; | ||
1174 | |||
1175 | return ret; | ||
1176 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 663cdc10a5c2..f29a2695d961 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -93,7 +93,6 @@ int radeon_benchmarking = 0; | |||
93 | int radeon_testing = 0; | 93 | int radeon_testing = 0; |
94 | int radeon_connector_table = 0; | 94 | int radeon_connector_table = 0; |
95 | int radeon_tv = 1; | 95 | int radeon_tv = 1; |
96 | int radeon_new_pll = -1; | ||
97 | int radeon_audio = 1; | 96 | int radeon_audio = 1; |
98 | int radeon_disp_priority = 0; | 97 | int radeon_disp_priority = 0; |
99 | int radeon_hw_i2c = 0; | 98 | int radeon_hw_i2c = 0; |
@@ -131,9 +130,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); | |||
131 | MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); | 130 | MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); |
132 | module_param_named(tv, radeon_tv, int, 0444); | 131 | module_param_named(tv, radeon_tv, int, 0444); |
133 | 132 | ||
134 | MODULE_PARM_DESC(new_pll, "Select new PLL code"); | ||
135 | module_param_named(new_pll, radeon_new_pll, int, 0444); | ||
136 | |||
137 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); | 133 | MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); |
138 | module_param_named(audio, radeon_audio, int, 0444); | 134 | module_param_named(audio, radeon_audio, int, 0444); |
139 | 135 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 2c293e8304d6..ae58b6849a2e 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -529,9 +529,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
529 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | 529 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; |
530 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 530 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
531 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 531 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
532 | if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) | 532 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) |
533 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 533 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
534 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 534 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
535 | args.v1.ucMisc |= (1 << 1); | 535 | args.v1.ucMisc |= (1 << 1); |
536 | } else { | 536 | } else { |
537 | if (dig->linkb) | 537 | if (dig->linkb) |
@@ -558,18 +558,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
558 | args.v2.ucTemporal = 0; | 558 | args.v2.ucTemporal = 0; |
559 | args.v2.ucFRC = 0; | 559 | args.v2.ucFRC = 0; |
560 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 560 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
561 | if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) | 561 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) |
562 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 562 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
563 | if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) { | 563 | if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { |
564 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; | 564 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; |
565 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 565 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
566 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; | 566 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; |
567 | } | 567 | } |
568 | if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) { | 568 | if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { |
569 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; | 569 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; |
570 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 570 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) |
571 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; | 571 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; |
572 | if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) | 572 | if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) |
573 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; | 573 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; |
574 | } | 574 | } |
575 | } else { | 575 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index b1f9a81b5d1d..216392d0353b 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -72,7 +72,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) | |||
72 | bool wake = false; | 72 | bool wake = false; |
73 | unsigned long cjiffies; | 73 | unsigned long cjiffies; |
74 | 74 | ||
75 | seq = RREG32(rdev->fence_drv.scratch_reg); | 75 | if (rdev->wb.enabled) { |
76 | u32 scratch_index; | ||
77 | if (rdev->wb.use_event) | ||
78 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | ||
79 | else | ||
80 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | ||
81 | seq = rdev->wb.wb[scratch_index/4]; | ||
82 | } else | ||
83 | seq = RREG32(rdev->fence_drv.scratch_reg); | ||
76 | if (seq != rdev->fence_drv.last_seq) { | 84 | if (seq != rdev->fence_drv.last_seq) { |
77 | rdev->fence_drv.last_seq = seq; | 85 | rdev->fence_drv.last_seq = seq; |
78 | rdev->fence_drv.last_jiffies = jiffies; | 86 | rdev->fence_drv.last_jiffies = jiffies; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 42954785247f..c0bf8b7cc56c 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -744,15 +744,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
744 | pll = &rdev->clock.p1pll; | 744 | pll = &rdev->clock.p1pll; |
745 | 745 | ||
746 | pll->flags = RADEON_PLL_LEGACY; | 746 | pll->flags = RADEON_PLL_LEGACY; |
747 | if (radeon_new_pll == 1) | ||
748 | pll->algo = PLL_ALGO_NEW; | ||
749 | else | ||
750 | pll->algo = PLL_ALGO_LEGACY; | ||
751 | |||
752 | if (mode->clock > 200000) /* range limits??? */ | ||
753 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | ||
754 | else | ||
755 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | ||
756 | 747 | ||
757 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 748 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
758 | if (encoder->crtc == crtc) { | 749 | if (encoder->crtc == crtc) { |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 2f78615f02aa..3cda63e37b28 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -139,22 +139,10 @@ struct radeon_tmds_pll { | |||
139 | #define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) | 139 | #define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) |
140 | #define RADEON_PLL_USE_REF_DIV (1 << 2) | 140 | #define RADEON_PLL_USE_REF_DIV (1 << 2) |
141 | #define RADEON_PLL_LEGACY (1 << 3) | 141 | #define RADEON_PLL_LEGACY (1 << 3) |
142 | #define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4) | 142 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 4) |
143 | #define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5) | 143 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 5) |
144 | #define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6) | 144 | #define RADEON_PLL_USE_POST_DIV (1 << 6) |
145 | #define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7) | 145 | #define RADEON_PLL_IS_LCD (1 << 7) |
146 | #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) | ||
147 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) | ||
148 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | ||
149 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | ||
150 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | ||
151 | #define RADEON_PLL_IS_LCD (1 << 13) | ||
152 | |||
153 | /* pll algo */ | ||
154 | enum radeon_pll_algo { | ||
155 | PLL_ALGO_LEGACY, | ||
156 | PLL_ALGO_NEW | ||
157 | }; | ||
158 | 146 | ||
159 | struct radeon_pll { | 147 | struct radeon_pll { |
160 | /* reference frequency */ | 148 | /* reference frequency */ |
@@ -188,8 +176,6 @@ struct radeon_pll { | |||
188 | 176 | ||
189 | /* pll id */ | 177 | /* pll id */ |
190 | uint32_t id; | 178 | uint32_t id; |
191 | /* pll algo */ | ||
192 | enum radeon_pll_algo algo; | ||
193 | }; | 179 | }; |
194 | 180 | ||
195 | struct radeon_i2c_chan { | 181 | struct radeon_i2c_chan { |
@@ -241,6 +227,8 @@ struct radeon_mode_info { | |||
241 | struct drm_property *tmds_pll_property; | 227 | struct drm_property *tmds_pll_property; |
242 | /* underscan */ | 228 | /* underscan */ |
243 | struct drm_property *underscan_property; | 229 | struct drm_property *underscan_property; |
230 | struct drm_property *underscan_hborder_property; | ||
231 | struct drm_property *underscan_vborder_property; | ||
244 | /* hardcoded DFP edid from BIOS */ | 232 | /* hardcoded DFP edid from BIOS */ |
245 | struct edid *bios_hardcoded_edid; | 233 | struct edid *bios_hardcoded_edid; |
246 | 234 | ||
@@ -337,22 +325,24 @@ struct radeon_encoder_ext_tmds { | |||
337 | struct radeon_atom_ss { | 325 | struct radeon_atom_ss { |
338 | uint16_t percentage; | 326 | uint16_t percentage; |
339 | uint8_t type; | 327 | uint8_t type; |
340 | uint8_t step; | 328 | uint16_t step; |
341 | uint8_t delay; | 329 | uint8_t delay; |
342 | uint8_t range; | 330 | uint8_t range; |
343 | uint8_t refdiv; | 331 | uint8_t refdiv; |
332 | /* asic_ss */ | ||
333 | uint16_t rate; | ||
334 | uint16_t amount; | ||
344 | }; | 335 | }; |
345 | 336 | ||
346 | struct radeon_encoder_atom_dig { | 337 | struct radeon_encoder_atom_dig { |
347 | bool linkb; | 338 | bool linkb; |
348 | /* atom dig */ | 339 | /* atom dig */ |
349 | bool coherent_mode; | 340 | bool coherent_mode; |
350 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ | 341 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */ |
351 | /* atom lvds */ | 342 | /* atom lvds/edp */ |
352 | uint32_t lvds_misc; | 343 | uint32_t lcd_misc; |
353 | uint16_t panel_pwr_delay; | 344 | uint16_t panel_pwr_delay; |
354 | enum radeon_pll_algo pll_algo; | 345 | uint32_t lcd_ss_id; |
355 | struct radeon_atom_ss *ss; | ||
356 | /* panel mode */ | 346 | /* panel mode */ |
357 | struct drm_display_mode native_mode; | 347 | struct drm_display_mode native_mode; |
358 | }; | 348 | }; |
@@ -371,6 +361,8 @@ struct radeon_encoder { | |||
371 | uint32_t pixel_clock; | 361 | uint32_t pixel_clock; |
372 | enum radeon_rmx_type rmx_type; | 362 | enum radeon_rmx_type rmx_type; |
373 | enum radeon_underscan_type underscan_type; | 363 | enum radeon_underscan_type underscan_type; |
364 | uint32_t underscan_hborder; | ||
365 | uint32_t underscan_vborder; | ||
374 | struct drm_display_mode native_mode; | 366 | struct drm_display_mode native_mode; |
375 | void *enc_priv; | 367 | void *enc_priv; |
376 | int audio_polling_active; | 368 | int audio_polling_active; |
@@ -437,6 +429,11 @@ struct radeon_framebuffer { | |||
437 | struct drm_gem_object *obj; | 429 | struct drm_gem_object *obj; |
438 | }; | 430 | }; |
439 | 431 | ||
432 | /* radeon_get_crtc_scanoutpos() return flags */ | ||
433 | #define RADEON_SCANOUTPOS_VALID (1 << 0) | ||
434 | #define RADEON_SCANOUTPOS_INVBL (1 << 1) | ||
435 | #define RADEON_SCANOUTPOS_ACCURATE (1 << 2) | ||
436 | |||
440 | extern enum radeon_tv_std | 437 | extern enum radeon_tv_std |
441 | radeon_combios_get_tv_info(struct radeon_device *rdev); | 438 | radeon_combios_get_tv_info(struct radeon_device *rdev); |
442 | extern enum radeon_tv_std | 439 | extern enum radeon_tv_std |
@@ -492,6 +489,13 @@ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | |||
492 | 489 | ||
493 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); | 490 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); |
494 | 491 | ||
492 | extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, | ||
493 | struct radeon_atom_ss *ss, | ||
494 | int id); | ||
495 | extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | ||
496 | struct radeon_atom_ss *ss, | ||
497 | int id, u32 clock); | ||
498 | |||
495 | extern void radeon_compute_pll(struct radeon_pll *pll, | 499 | extern void radeon_compute_pll(struct radeon_pll *pll, |
496 | uint64_t freq, | 500 | uint64_t freq, |
497 | uint32_t *dot_clock_p, | 501 | uint32_t *dot_clock_p, |
@@ -543,6 +547,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
543 | extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, | 547 | extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
544 | int x, int y); | 548 | int x, int y); |
545 | 549 | ||
550 | extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); | ||
551 | |||
546 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); | 552 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); |
547 | extern struct edid * | 553 | extern struct edid * |
548 | radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); | 554 | radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f87efec76236..8c9b2ef32c68 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
712 | 712 | ||
713 | static bool radeon_pm_in_vbl(struct radeon_device *rdev) | 713 | static bool radeon_pm_in_vbl(struct radeon_device *rdev) |
714 | { | 714 | { |
715 | u32 stat_crtc = 0, vbl = 0, position = 0; | 715 | int crtc, vpos, hpos, vbl_status; |
716 | bool in_vbl = true; | 716 | bool in_vbl = true; |
717 | 717 | ||
718 | if (ASIC_IS_DCE4(rdev)) { | 718 | /* Iterate over all active crtc's. All crtc's must be in vblank, |
719 | if (rdev->pm.active_crtcs & (1 << 0)) { | 719 | * otherwise return in_vbl == false. |
720 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | 720 | */ |
721 | EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; | 721 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { |
722 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | 722 | if (rdev->pm.active_crtcs & (1 << crtc)) { |
723 | EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; | 723 | vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); |
724 | } | 724 | if ((vbl_status & RADEON_SCANOUTPOS_VALID) && |
725 | if (rdev->pm.active_crtcs & (1 << 1)) { | 725 | !(vbl_status & RADEON_SCANOUTPOS_INVBL)) |
726 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
727 | EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; | ||
728 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
729 | EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; | ||
730 | } | ||
731 | if (rdev->pm.active_crtcs & (1 << 2)) { | ||
732 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
733 | EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; | ||
734 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
735 | EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; | ||
736 | } | ||
737 | if (rdev->pm.active_crtcs & (1 << 3)) { | ||
738 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
739 | EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; | ||
740 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
741 | EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; | ||
742 | } | ||
743 | if (rdev->pm.active_crtcs & (1 << 4)) { | ||
744 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
745 | EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; | ||
746 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
747 | EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; | ||
748 | } | ||
749 | if (rdev->pm.active_crtcs & (1 << 5)) { | ||
750 | vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + | ||
751 | EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; | ||
752 | position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + | ||
753 | EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; | ||
754 | } | ||
755 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
756 | if (rdev->pm.active_crtcs & (1 << 0)) { | ||
757 | vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; | ||
758 | position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; | ||
759 | } | ||
760 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
761 | vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; | ||
762 | position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; | ||
763 | } | ||
764 | if (position < vbl && position > 1) | ||
765 | in_vbl = false; | ||
766 | } else { | ||
767 | if (rdev->pm.active_crtcs & (1 << 0)) { | ||
768 | stat_crtc = RREG32(RADEON_CRTC_STATUS); | ||
769 | if (!(stat_crtc & 1)) | ||
770 | in_vbl = false; | ||
771 | } | ||
772 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
773 | stat_crtc = RREG32(RADEON_CRTC2_STATUS); | ||
774 | if (!(stat_crtc & 1)) | ||
775 | in_vbl = false; | 726 | in_vbl = false; |
776 | } | 727 | } |
777 | } | 728 | } |
778 | 729 | ||
779 | if (position < vbl && position > 1) | ||
780 | in_vbl = false; | ||
781 | |||
782 | return in_vbl; | 730 | return in_vbl; |
783 | } | 731 | } |
784 | 732 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 261e98a276db..6ea798ce8218 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
247 | */ | 247 | */ |
248 | void radeon_ring_free_size(struct radeon_device *rdev) | 248 | void radeon_ring_free_size(struct radeon_device *rdev) |
249 | { | 249 | { |
250 | if (rdev->family >= CHIP_R600) | 250 | if (rdev->wb.enabled) |
251 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | 251 | rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; |
252 | else | 252 | else { |
253 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 253 | if (rdev->family >= CHIP_R600) |
254 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | ||
255 | else | ||
256 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | ||
257 | } | ||
254 | /* This works because ring_size is a power of 2 */ | 258 | /* This works because ring_size is a power of 2 */ |
255 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); | 259 | rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
256 | rdev->cp.ring_free_dw -= rdev->cp.wptr; | 260 | rdev->cp.ring_free_dw -= rdev->cp.wptr; |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index ae2b76b9a388..f683e51a2a06 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev) | |||
397 | r = rs400_gart_enable(rdev); | 397 | r = rs400_gart_enable(rdev); |
398 | if (r) | 398 | if (r) |
399 | return r; | 399 | return r; |
400 | |||
401 | /* allocate wb buffer */ | ||
402 | r = radeon_wb_init(rdev); | ||
403 | if (r) | ||
404 | return r; | ||
405 | |||
400 | /* Enable IRQ */ | 406 | /* Enable IRQ */ |
401 | r100_irq_set(rdev); | 407 | r100_irq_set(rdev); |
402 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 408 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev) | |||
406 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 412 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
407 | return r; | 413 | return r; |
408 | } | 414 | } |
409 | r = r100_wb_init(rdev); | ||
410 | if (r) | ||
411 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
412 | r = r100_ib_init(rdev); | 415 | r = r100_ib_init(rdev); |
413 | if (r) { | 416 | if (r) { |
414 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 417 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev) | |||
443 | int rs400_suspend(struct radeon_device *rdev) | 446 | int rs400_suspend(struct radeon_device *rdev) |
444 | { | 447 | { |
445 | r100_cp_disable(rdev); | 448 | r100_cp_disable(rdev); |
446 | r100_wb_disable(rdev); | 449 | radeon_wb_disable(rdev); |
447 | r100_irq_disable(rdev); | 450 | r100_irq_disable(rdev); |
448 | rs400_gart_disable(rdev); | 451 | rs400_gart_disable(rdev); |
449 | return 0; | 452 | return 0; |
@@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev) | |||
452 | void rs400_fini(struct radeon_device *rdev) | 455 | void rs400_fini(struct radeon_device *rdev) |
453 | { | 456 | { |
454 | r100_cp_fini(rdev); | 457 | r100_cp_fini(rdev); |
455 | r100_wb_fini(rdev); | 458 | radeon_wb_fini(rdev); |
456 | r100_ib_fini(rdev); | 459 | r100_ib_fini(rdev); |
457 | radeon_gem_fini(rdev); | 460 | radeon_gem_fini(rdev); |
458 | rs400_gart_fini(rdev); | 461 | rs400_gart_fini(rdev); |
@@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev) | |||
526 | /* Somethings want wront with the accel init stop accel */ | 529 | /* Somethings want wront with the accel init stop accel */ |
527 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 530 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
528 | r100_cp_fini(rdev); | 531 | r100_cp_fini(rdev); |
529 | r100_wb_fini(rdev); | 532 | radeon_wb_fini(rdev); |
530 | r100_ib_fini(rdev); | 533 | r100_ib_fini(rdev); |
531 | rs400_gart_fini(rdev); | 534 | rs400_gart_fini(rdev); |
532 | radeon_irq_kms_fini(rdev); | 535 | radeon_irq_kms_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index cc05b230d7ef..8d8359a5d459 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -795,6 +795,12 @@ static int rs600_startup(struct radeon_device *rdev) | |||
795 | r = rs600_gart_enable(rdev); | 795 | r = rs600_gart_enable(rdev); |
796 | if (r) | 796 | if (r) |
797 | return r; | 797 | return r; |
798 | |||
799 | /* allocate wb buffer */ | ||
800 | r = radeon_wb_init(rdev); | ||
801 | if (r) | ||
802 | return r; | ||
803 | |||
798 | /* Enable IRQ */ | 804 | /* Enable IRQ */ |
799 | rs600_irq_set(rdev); | 805 | rs600_irq_set(rdev); |
800 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 806 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -804,9 +810,6 @@ static int rs600_startup(struct radeon_device *rdev) | |||
804 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 810 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
805 | return r; | 811 | return r; |
806 | } | 812 | } |
807 | r = r100_wb_init(rdev); | ||
808 | if (r) | ||
809 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
810 | r = r100_ib_init(rdev); | 813 | r = r100_ib_init(rdev); |
811 | if (r) { | 814 | if (r) { |
812 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 815 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -847,7 +850,7 @@ int rs600_suspend(struct radeon_device *rdev) | |||
847 | { | 850 | { |
848 | r600_audio_fini(rdev); | 851 | r600_audio_fini(rdev); |
849 | r100_cp_disable(rdev); | 852 | r100_cp_disable(rdev); |
850 | r100_wb_disable(rdev); | 853 | radeon_wb_disable(rdev); |
851 | rs600_irq_disable(rdev); | 854 | rs600_irq_disable(rdev); |
852 | rs600_gart_disable(rdev); | 855 | rs600_gart_disable(rdev); |
853 | return 0; | 856 | return 0; |
@@ -857,7 +860,7 @@ void rs600_fini(struct radeon_device *rdev) | |||
857 | { | 860 | { |
858 | r600_audio_fini(rdev); | 861 | r600_audio_fini(rdev); |
859 | r100_cp_fini(rdev); | 862 | r100_cp_fini(rdev); |
860 | r100_wb_fini(rdev); | 863 | radeon_wb_fini(rdev); |
861 | r100_ib_fini(rdev); | 864 | r100_ib_fini(rdev); |
862 | radeon_gem_fini(rdev); | 865 | radeon_gem_fini(rdev); |
863 | rs600_gart_fini(rdev); | 866 | rs600_gart_fini(rdev); |
@@ -931,7 +934,7 @@ int rs600_init(struct radeon_device *rdev) | |||
931 | /* Somethings want wront with the accel init stop accel */ | 934 | /* Somethings want wront with the accel init stop accel */ |
932 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 935 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
933 | r100_cp_fini(rdev); | 936 | r100_cp_fini(rdev); |
934 | r100_wb_fini(rdev); | 937 | radeon_wb_fini(rdev); |
935 | r100_ib_fini(rdev); | 938 | r100_ib_fini(rdev); |
936 | rs600_gart_fini(rdev); | 939 | rs600_gart_fini(rdev); |
937 | radeon_irq_kms_fini(rdev); | 940 | radeon_irq_kms_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3e3f75718be3..70ed66ef1ca8 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -615,6 +615,12 @@ static int rs690_startup(struct radeon_device *rdev) | |||
615 | r = rs400_gart_enable(rdev); | 615 | r = rs400_gart_enable(rdev); |
616 | if (r) | 616 | if (r) |
617 | return r; | 617 | return r; |
618 | |||
619 | /* allocate wb buffer */ | ||
620 | r = radeon_wb_init(rdev); | ||
621 | if (r) | ||
622 | return r; | ||
623 | |||
618 | /* Enable IRQ */ | 624 | /* Enable IRQ */ |
619 | rs600_irq_set(rdev); | 625 | rs600_irq_set(rdev); |
620 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 626 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -624,9 +630,6 @@ static int rs690_startup(struct radeon_device *rdev) | |||
624 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 630 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
625 | return r; | 631 | return r; |
626 | } | 632 | } |
627 | r = r100_wb_init(rdev); | ||
628 | if (r) | ||
629 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
630 | r = r100_ib_init(rdev); | 633 | r = r100_ib_init(rdev); |
631 | if (r) { | 634 | if (r) { |
632 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 635 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -667,7 +670,7 @@ int rs690_suspend(struct radeon_device *rdev) | |||
667 | { | 670 | { |
668 | r600_audio_fini(rdev); | 671 | r600_audio_fini(rdev); |
669 | r100_cp_disable(rdev); | 672 | r100_cp_disable(rdev); |
670 | r100_wb_disable(rdev); | 673 | radeon_wb_disable(rdev); |
671 | rs600_irq_disable(rdev); | 674 | rs600_irq_disable(rdev); |
672 | rs400_gart_disable(rdev); | 675 | rs400_gart_disable(rdev); |
673 | return 0; | 676 | return 0; |
@@ -677,7 +680,7 @@ void rs690_fini(struct radeon_device *rdev) | |||
677 | { | 680 | { |
678 | r600_audio_fini(rdev); | 681 | r600_audio_fini(rdev); |
679 | r100_cp_fini(rdev); | 682 | r100_cp_fini(rdev); |
680 | r100_wb_fini(rdev); | 683 | radeon_wb_fini(rdev); |
681 | r100_ib_fini(rdev); | 684 | r100_ib_fini(rdev); |
682 | radeon_gem_fini(rdev); | 685 | radeon_gem_fini(rdev); |
683 | rs400_gart_fini(rdev); | 686 | rs400_gart_fini(rdev); |
@@ -752,7 +755,7 @@ int rs690_init(struct radeon_device *rdev) | |||
752 | /* Somethings want wront with the accel init stop accel */ | 755 | /* Somethings want wront with the accel init stop accel */ |
753 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 756 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
754 | r100_cp_fini(rdev); | 757 | r100_cp_fini(rdev); |
755 | r100_wb_fini(rdev); | 758 | radeon_wb_fini(rdev); |
756 | r100_ib_fini(rdev); | 759 | r100_ib_fini(rdev); |
757 | rs400_gart_fini(rdev); | 760 | rs400_gart_fini(rdev); |
758 | radeon_irq_kms_fini(rdev); | 761 | radeon_irq_kms_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 4d6e86041a9f..5d569f41f4ae 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev) | |||
386 | if (r) | 386 | if (r) |
387 | return r; | 387 | return r; |
388 | } | 388 | } |
389 | |||
390 | /* allocate wb buffer */ | ||
391 | r = radeon_wb_init(rdev); | ||
392 | if (r) | ||
393 | return r; | ||
394 | |||
389 | /* Enable IRQ */ | 395 | /* Enable IRQ */ |
390 | rs600_irq_set(rdev); | 396 | rs600_irq_set(rdev); |
391 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 397 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
@@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev) | |||
395 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 401 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
396 | return r; | 402 | return r; |
397 | } | 403 | } |
398 | r = r100_wb_init(rdev); | ||
399 | if (r) | ||
400 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
401 | r = r100_ib_init(rdev); | 404 | r = r100_ib_init(rdev); |
402 | if (r) { | 405 | if (r) { |
403 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 406 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); |
@@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev) | |||
431 | int rv515_suspend(struct radeon_device *rdev) | 434 | int rv515_suspend(struct radeon_device *rdev) |
432 | { | 435 | { |
433 | r100_cp_disable(rdev); | 436 | r100_cp_disable(rdev); |
434 | r100_wb_disable(rdev); | 437 | radeon_wb_disable(rdev); |
435 | rs600_irq_disable(rdev); | 438 | rs600_irq_disable(rdev); |
436 | if (rdev->flags & RADEON_IS_PCIE) | 439 | if (rdev->flags & RADEON_IS_PCIE) |
437 | rv370_pcie_gart_disable(rdev); | 440 | rv370_pcie_gart_disable(rdev); |
@@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
447 | void rv515_fini(struct radeon_device *rdev) | 450 | void rv515_fini(struct radeon_device *rdev) |
448 | { | 451 | { |
449 | r100_cp_fini(rdev); | 452 | r100_cp_fini(rdev); |
450 | r100_wb_fini(rdev); | 453 | radeon_wb_fini(rdev); |
451 | r100_ib_fini(rdev); | 454 | r100_ib_fini(rdev); |
452 | radeon_gem_fini(rdev); | 455 | radeon_gem_fini(rdev); |
453 | rv370_pcie_gart_fini(rdev); | 456 | rv370_pcie_gart_fini(rdev); |
@@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev) | |||
527 | /* Somethings want wront with the accel init stop accel */ | 530 | /* Somethings want wront with the accel init stop accel */ |
528 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 531 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
529 | r100_cp_fini(rdev); | 532 | r100_cp_fini(rdev); |
530 | r100_wb_fini(rdev); | 533 | radeon_wb_fini(rdev); |
531 | r100_ib_fini(rdev); | 534 | r100_ib_fini(rdev); |
532 | radeon_irq_kms_fini(rdev); | 535 | radeon_irq_kms_fini(rdev); |
533 | rv370_pcie_gart_fini(rdev); | 536 | rv370_pcie_gart_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bfa59db374d2..ff1cc58920c0 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -268,6 +268,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
268 | void r700_cp_stop(struct radeon_device *rdev) | 268 | void r700_cp_stop(struct radeon_device *rdev) |
269 | { | 269 | { |
270 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | 270 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
271 | WREG32(SCRATCH_UMSK, 0); | ||
271 | } | 272 | } |
272 | 273 | ||
273 | static int rv770_cp_load_microcode(struct radeon_device *rdev) | 274 | static int rv770_cp_load_microcode(struct radeon_device *rdev) |
@@ -1028,19 +1029,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1028 | rdev->asic->copy = NULL; | 1029 | rdev->asic->copy = NULL; |
1029 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 1030 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
1030 | } | 1031 | } |
1031 | /* pin copy shader into vram */ | 1032 | |
1032 | if (rdev->r600_blit.shader_obj) { | 1033 | /* allocate wb buffer */ |
1033 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 1034 | r = radeon_wb_init(rdev); |
1034 | if (unlikely(r != 0)) | 1035 | if (r) |
1035 | return r; | 1036 | return r; |
1036 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1037 | |
1037 | &rdev->r600_blit.shader_gpu_addr); | ||
1038 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1039 | if (r) { | ||
1040 | DRM_ERROR("failed to pin blit object %d\n", r); | ||
1041 | return r; | ||
1042 | } | ||
1043 | } | ||
1044 | /* Enable IRQ */ | 1038 | /* Enable IRQ */ |
1045 | r = r600_irq_init(rdev); | 1039 | r = r600_irq_init(rdev); |
1046 | if (r) { | 1040 | if (r) { |
@@ -1059,8 +1053,7 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1059 | r = r600_cp_resume(rdev); | 1053 | r = r600_cp_resume(rdev); |
1060 | if (r) | 1054 | if (r) |
1061 | return r; | 1055 | return r; |
1062 | /* write back buffer are not vital so don't worry about failure */ | 1056 | |
1063 | r600_wb_enable(rdev); | ||
1064 | return 0; | 1057 | return 0; |
1065 | } | 1058 | } |
1066 | 1059 | ||
@@ -1106,7 +1099,7 @@ int rv770_suspend(struct radeon_device *rdev) | |||
1106 | r700_cp_stop(rdev); | 1099 | r700_cp_stop(rdev); |
1107 | rdev->cp.ready = false; | 1100 | rdev->cp.ready = false; |
1108 | r600_irq_suspend(rdev); | 1101 | r600_irq_suspend(rdev); |
1109 | r600_wb_disable(rdev); | 1102 | radeon_wb_disable(rdev); |
1110 | rv770_pcie_gart_disable(rdev); | 1103 | rv770_pcie_gart_disable(rdev); |
1111 | /* unpin shaders bo */ | 1104 | /* unpin shaders bo */ |
1112 | if (rdev->r600_blit.shader_obj) { | 1105 | if (rdev->r600_blit.shader_obj) { |
@@ -1201,8 +1194,8 @@ int rv770_init(struct radeon_device *rdev) | |||
1201 | if (r) { | 1194 | if (r) { |
1202 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 1195 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1203 | r700_cp_fini(rdev); | 1196 | r700_cp_fini(rdev); |
1204 | r600_wb_fini(rdev); | ||
1205 | r600_irq_fini(rdev); | 1197 | r600_irq_fini(rdev); |
1198 | radeon_wb_fini(rdev); | ||
1206 | radeon_irq_kms_fini(rdev); | 1199 | radeon_irq_kms_fini(rdev); |
1207 | rv770_pcie_gart_fini(rdev); | 1200 | rv770_pcie_gart_fini(rdev); |
1208 | rdev->accel_working = false; | 1201 | rdev->accel_working = false; |
@@ -1234,8 +1227,8 @@ void rv770_fini(struct radeon_device *rdev) | |||
1234 | { | 1227 | { |
1235 | r600_blit_fini(rdev); | 1228 | r600_blit_fini(rdev); |
1236 | r700_cp_fini(rdev); | 1229 | r700_cp_fini(rdev); |
1237 | r600_wb_fini(rdev); | ||
1238 | r600_irq_fini(rdev); | 1230 | r600_irq_fini(rdev); |
1231 | radeon_wb_fini(rdev); | ||
1239 | radeon_irq_kms_fini(rdev); | 1232 | radeon_irq_kms_fini(rdev); |
1240 | rv770_pcie_gart_fini(rdev); | 1233 | rv770_pcie_gart_fini(rdev); |
1241 | rv770_vram_scratch_fini(rdev); | 1234 | rv770_vram_scratch_fini(rdev); |