aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/rs690.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2012-10-25 16:58:55 -0400
committerAlex Deucher <alexander.deucher@amd.com>2013-06-27 10:49:21 -0400
commit3a4d8f7b61378d0811ac892a77d4434b01f17d1c (patch)
treeac16210ac67aacefea97d17796d121a6e49374ad /drivers/gpu/drm/radeon/rs690.c
parentda321c8a6a2a947710499273aaad733974af1689 (diff)
drm/radeon/kms: fix up rs780/rs880 display watermark calc for dpm
calculate the low and high watermarks based on the low and high clocks for the current power state. The dynamic pm hw will select the appropriate watermark based on the internal dpm state. Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/rs690.c')
-rw-r--r--drivers/gpu/drm/radeon/rs690.c291
1 files changed, 167 insertions, 124 deletions
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 55880d5962c3..d8ddfb34545d 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -248,13 +248,16 @@ struct rs690_watermark {
248}; 248};
249 249
250static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, 250static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
251 struct radeon_crtc *crtc, 251 struct radeon_crtc *crtc,
252 struct rs690_watermark *wm) 252 struct rs690_watermark *wm,
253 bool low)
253{ 254{
254 struct drm_display_mode *mode = &crtc->base.mode; 255 struct drm_display_mode *mode = &crtc->base.mode;
255 fixed20_12 a, b, c; 256 fixed20_12 a, b, c;
256 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 257 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
257 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 258 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
259 fixed20_12 sclk, core_bandwidth, max_bandwidth;
260 u32 selected_sclk;
258 261
259 if (!crtc->base.enabled) { 262 if (!crtc->base.enabled) {
260 /* FIXME: wouldn't it better to set priority mark to maximum */ 263 /* FIXME: wouldn't it better to set priority mark to maximum */
@@ -262,6 +265,21 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
262 return; 265 return;
263 } 266 }
264 267
268 if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
269 (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
270 selected_sclk = radeon_dpm_get_sclk(rdev, low);
271 else
272 selected_sclk = rdev->pm.current_sclk;
273
274 /* sclk in Mhz */
275 a.full = dfixed_const(100);
276 sclk.full = dfixed_const(selected_sclk);
277 sclk.full = dfixed_div(sclk, a);
278
279 /* core_bandwidth = sclk(Mhz) * 16 */
280 a.full = dfixed_const(16);
281 core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
282
265 if (crtc->vsc.full > dfixed_const(2)) 283 if (crtc->vsc.full > dfixed_const(2))
266 wm->num_line_pair.full = dfixed_const(2); 284 wm->num_line_pair.full = dfixed_const(2);
267 else 285 else
@@ -322,36 +340,36 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
322 wm->active_time.full = dfixed_div(wm->active_time, a); 340 wm->active_time.full = dfixed_div(wm->active_time, a);
323 341
324 /* Maximun bandwidth is the minimun bandwidth of all component */ 342 /* Maximun bandwidth is the minimun bandwidth of all component */
325 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; 343 max_bandwidth = core_bandwidth;
326 if (rdev->mc.igp_sideport_enabled) { 344 if (rdev->mc.igp_sideport_enabled) {
327 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 345 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
328 rdev->pm.sideport_bandwidth.full) 346 rdev->pm.sideport_bandwidth.full)
329 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; 347 max_bandwidth = rdev->pm.sideport_bandwidth;
330 read_delay_latency.full = dfixed_const(370 * 800 * 1000); 348 read_delay_latency.full = dfixed_const(370 * 800 * 1000);
331 read_delay_latency.full = dfixed_div(read_delay_latency, 349 read_delay_latency.full = dfixed_div(read_delay_latency,
332 rdev->pm.igp_sideport_mclk); 350 rdev->pm.igp_sideport_mclk);
333 } else { 351 } else {
334 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && 352 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
335 rdev->pm.k8_bandwidth.full) 353 rdev->pm.k8_bandwidth.full)
336 rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; 354 max_bandwidth = rdev->pm.k8_bandwidth;
337 if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && 355 if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
338 rdev->pm.ht_bandwidth.full) 356 rdev->pm.ht_bandwidth.full)
339 rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; 357 max_bandwidth = rdev->pm.ht_bandwidth;
340 read_delay_latency.full = dfixed_const(5000); 358 read_delay_latency.full = dfixed_const(5000);
341 } 359 }
342 360
343 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ 361 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
344 a.full = dfixed_const(16); 362 a.full = dfixed_const(16);
345 rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); 363 sclk.full = dfixed_mul(max_bandwidth, a);
346 a.full = dfixed_const(1000); 364 a.full = dfixed_const(1000);
347 rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); 365 sclk.full = dfixed_div(a, sclk);
348 /* Determine chunk time 366 /* Determine chunk time
349 * ChunkTime = the time it takes the DCP to send one chunk of data 367 * ChunkTime = the time it takes the DCP to send one chunk of data
350 * to the LB which consists of pipeline delay and inter chunk gap 368 * to the LB which consists of pipeline delay and inter chunk gap
351 * sclk = system clock(ns) 369 * sclk = system clock(ns)
352 */ 370 */
353 a.full = dfixed_const(256 * 13); 371 a.full = dfixed_const(256 * 13);
354 chunk_time.full = dfixed_mul(rdev->pm.sclk, a); 372 chunk_time.full = dfixed_mul(sclk, a);
355 a.full = dfixed_const(10); 373 a.full = dfixed_const(10);
356 chunk_time.full = dfixed_div(chunk_time, a); 374 chunk_time.full = dfixed_div(chunk_time, a);
357 375
@@ -415,175 +433,200 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
415 } 433 }
416} 434}
417 435
418void rs690_bandwidth_update(struct radeon_device *rdev) 436static void rs690_compute_mode_priority(struct radeon_device *rdev,
437 struct rs690_watermark *wm0,
438 struct rs690_watermark *wm1,
439 struct drm_display_mode *mode0,
440 struct drm_display_mode *mode1,
441 u32 *d1mode_priority_a_cnt,
442 u32 *d2mode_priority_a_cnt)
419{ 443{
420 struct drm_display_mode *mode0 = NULL;
421 struct drm_display_mode *mode1 = NULL;
422 struct rs690_watermark wm0;
423 struct rs690_watermark wm1;
424 u32 tmp;
425 u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
426 u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
427 fixed20_12 priority_mark02, priority_mark12, fill_rate; 444 fixed20_12 priority_mark02, priority_mark12, fill_rate;
428 fixed20_12 a, b; 445 fixed20_12 a, b;
429 446
430 radeon_update_display_priority(rdev); 447 *d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
431 448 *d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
432 if (rdev->mode_info.crtcs[0]->base.enabled)
433 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
434 if (rdev->mode_info.crtcs[1]->base.enabled)
435 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
436 /*
437 * Set display0/1 priority up in the memory controller for
438 * modes if the user specifies HIGH for displaypriority
439 * option.
440 */
441 if ((rdev->disp_priority == 2) &&
442 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
443 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
444 tmp &= C_000104_MC_DISP0R_INIT_LAT;
445 tmp &= C_000104_MC_DISP1R_INIT_LAT;
446 if (mode0)
447 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
448 if (mode1)
449 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
450 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
451 }
452 rs690_line_buffer_adjust(rdev, mode0, mode1);
453
454 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
455 WREG32(R_006C9C_DCP_CONTROL, 0);
456 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
457 WREG32(R_006C9C_DCP_CONTROL, 2);
458
459 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
460 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
461
462 tmp = (wm0.lb_request_fifo_depth - 1);
463 tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
464 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
465 449
466 if (mode0 && mode1) { 450 if (mode0 && mode1) {
467 if (dfixed_trunc(wm0.dbpp) > 64) 451 if (dfixed_trunc(wm0->dbpp) > 64)
468 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); 452 a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
469 else 453 else
470 a.full = wm0.num_line_pair.full; 454 a.full = wm0->num_line_pair.full;
471 if (dfixed_trunc(wm1.dbpp) > 64) 455 if (dfixed_trunc(wm1->dbpp) > 64)
472 b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); 456 b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
473 else 457 else
474 b.full = wm1.num_line_pair.full; 458 b.full = wm1->num_line_pair.full;
475 a.full += b.full; 459 a.full += b.full;
476 fill_rate.full = dfixed_div(wm0.sclk, a); 460 fill_rate.full = dfixed_div(wm0->sclk, a);
477 if (wm0.consumption_rate.full > fill_rate.full) { 461 if (wm0->consumption_rate.full > fill_rate.full) {
478 b.full = wm0.consumption_rate.full - fill_rate.full; 462 b.full = wm0->consumption_rate.full - fill_rate.full;
479 b.full = dfixed_mul(b, wm0.active_time); 463 b.full = dfixed_mul(b, wm0->active_time);
480 a.full = dfixed_mul(wm0.worst_case_latency, 464 a.full = dfixed_mul(wm0->worst_case_latency,
481 wm0.consumption_rate); 465 wm0->consumption_rate);
482 a.full = a.full + b.full; 466 a.full = a.full + b.full;
483 b.full = dfixed_const(16 * 1000); 467 b.full = dfixed_const(16 * 1000);
484 priority_mark02.full = dfixed_div(a, b); 468 priority_mark02.full = dfixed_div(a, b);
485 } else { 469 } else {
486 a.full = dfixed_mul(wm0.worst_case_latency, 470 a.full = dfixed_mul(wm0->worst_case_latency,
487 wm0.consumption_rate); 471 wm0->consumption_rate);
488 b.full = dfixed_const(16 * 1000); 472 b.full = dfixed_const(16 * 1000);
489 priority_mark02.full = dfixed_div(a, b); 473 priority_mark02.full = dfixed_div(a, b);
490 } 474 }
491 if (wm1.consumption_rate.full > fill_rate.full) { 475 if (wm1->consumption_rate.full > fill_rate.full) {
492 b.full = wm1.consumption_rate.full - fill_rate.full; 476 b.full = wm1->consumption_rate.full - fill_rate.full;
493 b.full = dfixed_mul(b, wm1.active_time); 477 b.full = dfixed_mul(b, wm1->active_time);
494 a.full = dfixed_mul(wm1.worst_case_latency, 478 a.full = dfixed_mul(wm1->worst_case_latency,
495 wm1.consumption_rate); 479 wm1->consumption_rate);
496 a.full = a.full + b.full; 480 a.full = a.full + b.full;
497 b.full = dfixed_const(16 * 1000); 481 b.full = dfixed_const(16 * 1000);
498 priority_mark12.full = dfixed_div(a, b); 482 priority_mark12.full = dfixed_div(a, b);
499 } else { 483 } else {
500 a.full = dfixed_mul(wm1.worst_case_latency, 484 a.full = dfixed_mul(wm1->worst_case_latency,
501 wm1.consumption_rate); 485 wm1->consumption_rate);
502 b.full = dfixed_const(16 * 1000); 486 b.full = dfixed_const(16 * 1000);
503 priority_mark12.full = dfixed_div(a, b); 487 priority_mark12.full = dfixed_div(a, b);
504 } 488 }
505 if (wm0.priority_mark.full > priority_mark02.full) 489 if (wm0->priority_mark.full > priority_mark02.full)
506 priority_mark02.full = wm0.priority_mark.full; 490 priority_mark02.full = wm0->priority_mark.full;
507 if (dfixed_trunc(priority_mark02) < 0) 491 if (dfixed_trunc(priority_mark02) < 0)
508 priority_mark02.full = 0; 492 priority_mark02.full = 0;
509 if (wm0.priority_mark_max.full > priority_mark02.full) 493 if (wm0->priority_mark_max.full > priority_mark02.full)
510 priority_mark02.full = wm0.priority_mark_max.full; 494 priority_mark02.full = wm0->priority_mark_max.full;
511 if (wm1.priority_mark.full > priority_mark12.full) 495 if (wm1->priority_mark.full > priority_mark12.full)
512 priority_mark12.full = wm1.priority_mark.full; 496 priority_mark12.full = wm1->priority_mark.full;
513 if (dfixed_trunc(priority_mark12) < 0) 497 if (dfixed_trunc(priority_mark12) < 0)
514 priority_mark12.full = 0; 498 priority_mark12.full = 0;
515 if (wm1.priority_mark_max.full > priority_mark12.full) 499 if (wm1->priority_mark_max.full > priority_mark12.full)
516 priority_mark12.full = wm1.priority_mark_max.full; 500 priority_mark12.full = wm1->priority_mark_max.full;
517 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 501 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
518 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 502 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
519 if (rdev->disp_priority == 2) { 503 if (rdev->disp_priority == 2) {
520 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 504 *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
521 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 505 *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
522 } 506 }
523 } else if (mode0) { 507 } else if (mode0) {
524 if (dfixed_trunc(wm0.dbpp) > 64) 508 if (dfixed_trunc(wm0->dbpp) > 64)
525 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); 509 a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
526 else 510 else
527 a.full = wm0.num_line_pair.full; 511 a.full = wm0->num_line_pair.full;
528 fill_rate.full = dfixed_div(wm0.sclk, a); 512 fill_rate.full = dfixed_div(wm0->sclk, a);
529 if (wm0.consumption_rate.full > fill_rate.full) { 513 if (wm0->consumption_rate.full > fill_rate.full) {
530 b.full = wm0.consumption_rate.full - fill_rate.full; 514 b.full = wm0->consumption_rate.full - fill_rate.full;
531 b.full = dfixed_mul(b, wm0.active_time); 515 b.full = dfixed_mul(b, wm0->active_time);
532 a.full = dfixed_mul(wm0.worst_case_latency, 516 a.full = dfixed_mul(wm0->worst_case_latency,
533 wm0.consumption_rate); 517 wm0->consumption_rate);
534 a.full = a.full + b.full; 518 a.full = a.full + b.full;
535 b.full = dfixed_const(16 * 1000); 519 b.full = dfixed_const(16 * 1000);
536 priority_mark02.full = dfixed_div(a, b); 520 priority_mark02.full = dfixed_div(a, b);
537 } else { 521 } else {
538 a.full = dfixed_mul(wm0.worst_case_latency, 522 a.full = dfixed_mul(wm0->worst_case_latency,
539 wm0.consumption_rate); 523 wm0->consumption_rate);
540 b.full = dfixed_const(16 * 1000); 524 b.full = dfixed_const(16 * 1000);
541 priority_mark02.full = dfixed_div(a, b); 525 priority_mark02.full = dfixed_div(a, b);
542 } 526 }
543 if (wm0.priority_mark.full > priority_mark02.full) 527 if (wm0->priority_mark.full > priority_mark02.full)
544 priority_mark02.full = wm0.priority_mark.full; 528 priority_mark02.full = wm0->priority_mark.full;
545 if (dfixed_trunc(priority_mark02) < 0) 529 if (dfixed_trunc(priority_mark02) < 0)
546 priority_mark02.full = 0; 530 priority_mark02.full = 0;
547 if (wm0.priority_mark_max.full > priority_mark02.full) 531 if (wm0->priority_mark_max.full > priority_mark02.full)
548 priority_mark02.full = wm0.priority_mark_max.full; 532 priority_mark02.full = wm0->priority_mark_max.full;
549 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); 533 *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
550 if (rdev->disp_priority == 2) 534 if (rdev->disp_priority == 2)
551 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 535 *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
552 } else if (mode1) { 536 } else if (mode1) {
553 if (dfixed_trunc(wm1.dbpp) > 64) 537 if (dfixed_trunc(wm1->dbpp) > 64)
554 a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); 538 a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
555 else 539 else
556 a.full = wm1.num_line_pair.full; 540 a.full = wm1->num_line_pair.full;
557 fill_rate.full = dfixed_div(wm1.sclk, a); 541 fill_rate.full = dfixed_div(wm1->sclk, a);
558 if (wm1.consumption_rate.full > fill_rate.full) { 542 if (wm1->consumption_rate.full > fill_rate.full) {
559 b.full = wm1.consumption_rate.full - fill_rate.full; 543 b.full = wm1->consumption_rate.full - fill_rate.full;
560 b.full = dfixed_mul(b, wm1.active_time); 544 b.full = dfixed_mul(b, wm1->active_time);
561 a.full = dfixed_mul(wm1.worst_case_latency, 545 a.full = dfixed_mul(wm1->worst_case_latency,
562 wm1.consumption_rate); 546 wm1->consumption_rate);
563 a.full = a.full + b.full; 547 a.full = a.full + b.full;
564 b.full = dfixed_const(16 * 1000); 548 b.full = dfixed_const(16 * 1000);
565 priority_mark12.full = dfixed_div(a, b); 549 priority_mark12.full = dfixed_div(a, b);
566 } else { 550 } else {
567 a.full = dfixed_mul(wm1.worst_case_latency, 551 a.full = dfixed_mul(wm1->worst_case_latency,
568 wm1.consumption_rate); 552 wm1->consumption_rate);
569 b.full = dfixed_const(16 * 1000); 553 b.full = dfixed_const(16 * 1000);
570 priority_mark12.full = dfixed_div(a, b); 554 priority_mark12.full = dfixed_div(a, b);
571 } 555 }
572 if (wm1.priority_mark.full > priority_mark12.full) 556 if (wm1->priority_mark.full > priority_mark12.full)
573 priority_mark12.full = wm1.priority_mark.full; 557 priority_mark12.full = wm1->priority_mark.full;
574 if (dfixed_trunc(priority_mark12) < 0) 558 if (dfixed_trunc(priority_mark12) < 0)
575 priority_mark12.full = 0; 559 priority_mark12.full = 0;
576 if (wm1.priority_mark_max.full > priority_mark12.full) 560 if (wm1->priority_mark_max.full > priority_mark12.full)
577 priority_mark12.full = wm1.priority_mark_max.full; 561 priority_mark12.full = wm1->priority_mark_max.full;
578 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); 562 *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
579 if (rdev->disp_priority == 2) 563 if (rdev->disp_priority == 2)
580 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 564 *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
581 } 565 }
566}
567
568void rs690_bandwidth_update(struct radeon_device *rdev)
569{
570 struct drm_display_mode *mode0 = NULL;
571 struct drm_display_mode *mode1 = NULL;
572 struct rs690_watermark wm0_high, wm0_low;
573 struct rs690_watermark wm1_high, wm1_low;
574 u32 tmp;
575 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
576 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
577
578 radeon_update_display_priority(rdev);
579
580 if (rdev->mode_info.crtcs[0]->base.enabled)
581 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
582 if (rdev->mode_info.crtcs[1]->base.enabled)
583 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
584 /*
585 * Set display0/1 priority up in the memory controller for
586 * modes if the user specifies HIGH for displaypriority
587 * option.
588 */
589 if ((rdev->disp_priority == 2) &&
590 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
591 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
592 tmp &= C_000104_MC_DISP0R_INIT_LAT;
593 tmp &= C_000104_MC_DISP1R_INIT_LAT;
594 if (mode0)
595 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
596 if (mode1)
597 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
598 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
599 }
600 rs690_line_buffer_adjust(rdev, mode0, mode1);
601
602 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
603 WREG32(R_006C9C_DCP_CONTROL, 0);
604 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
605 WREG32(R_006C9C_DCP_CONTROL, 2);
606
607 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
608 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
609
610 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
611 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);
612
613 tmp = (wm0_high.lb_request_fifo_depth - 1);
614 tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
615 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
616
617 rs690_compute_mode_priority(rdev,
618 &wm0_high, &wm1_high,
619 mode0, mode1,
620 &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
621 rs690_compute_mode_priority(rdev,
622 &wm0_low, &wm1_low,
623 mode0, mode1,
624 &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
582 625
583 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 626 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
584 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 627 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
585 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 628 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
586 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 629 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
587} 630}
588 631
589uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)