aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c437
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h13
2 files changed, 449 insertions, 1 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 77ebcbc1b6e3..17b2fe925ce0 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -284,9 +284,444 @@ void evergreen_hpd_fini(struct radeon_device *rdev)
284 } 284 }
285} 285}
286 286
287/* watermark setup */
288
289static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
290 struct radeon_crtc *radeon_crtc,
291 struct drm_display_mode *mode,
292 struct drm_display_mode *other_mode)
293{
294 u32 tmp = 0;
295 /*
296 * Line Buffer Setup
297 * There are 3 line buffers, each one shared by 2 display controllers.
298 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
299 * the display controllers. The paritioning is done via one of four
300 * preset allocations specified in bits 2:0:
301 * first display controller
302 * 0 - first half of lb (3840 * 2)
303 * 1 - first 3/4 of lb (5760 * 2)
304 * 2 - whole lb (7680 * 2)
305 * 3 - first 1/4 of lb (1920 * 2)
306 * second display controller
307 * 4 - second half of lb (3840 * 2)
308 * 5 - second 3/4 of lb (5760 * 2)
309 * 6 - whole lb (7680 * 2)
310 * 7 - last 1/4 of lb (1920 * 2)
311 */
312 if (mode && other_mode) {
313 if (mode->hdisplay > other_mode->hdisplay) {
314 if (mode->hdisplay > 2560)
315 tmp = 1; /* 3/4 */
316 else
317 tmp = 0; /* 1/2 */
318 } else if (other_mode->hdisplay > mode->hdisplay) {
319 if (other_mode->hdisplay > 2560)
320 tmp = 3; /* 1/4 */
321 else
322 tmp = 0; /* 1/2 */
323 } else
324 tmp = 0; /* 1/2 */
325 } else if (mode)
326 tmp = 2; /* whole */
327 else if (other_mode)
328 tmp = 3; /* 1/4 */
329
330 /* second controller of the pair uses second half of the lb */
331 if (radeon_crtc->crtc_id % 2)
332 tmp += 4;
333 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
334
335 switch (tmp) {
336 case 0:
337 case 4:
338 default:
339 return 3840 * 2;
340 case 1:
341 case 5:
342 return 5760 * 2;
343 case 2:
344 case 6:
345 return 7680 * 2;
346 case 3:
347 case 7:
348 return 1920 * 2;
349 }
350}
351
352static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
353{
354 u32 tmp = RREG32(MC_SHARED_CHMAP);
355
356 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
357 case 0:
358 default:
359 return 1;
360 case 1:
361 return 2;
362 case 2:
363 return 4;
364 case 3:
365 return 8;
366 }
367}
368
369struct evergreen_wm_params {
370 u32 dram_channels; /* number of dram channels */
371 u32 yclk; /* bandwidth per dram data pin in kHz */
372 u32 sclk; /* engine clock in kHz */
373 u32 disp_clk; /* display clock in kHz */
374 u32 src_width; /* viewport width */
375 u32 active_time; /* active display time in ns */
376 u32 blank_time; /* blank time in ns */
377 bool interlaced; /* mode is interlaced */
378 fixed20_12 vsc; /* vertical scale ratio */
379 u32 num_heads; /* number of active crtcs */
380 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
381 u32 lb_size; /* line buffer allocated to pipe */
382 u32 vtaps; /* vertical scaler taps */
383};
384
385static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
386{
387 /* Calculate DRAM Bandwidth and the part allocated to display. */
388 fixed20_12 dram_efficiency; /* 0.7 */
389 fixed20_12 yclk, dram_channels, bandwidth;
390 fixed20_12 a;
391
392 a.full = dfixed_const(1000);
393 yclk.full = dfixed_const(wm->yclk);
394 yclk.full = dfixed_div(yclk, a);
395 dram_channels.full = dfixed_const(wm->dram_channels * 4);
396 a.full = dfixed_const(10);
397 dram_efficiency.full = dfixed_const(7);
398 dram_efficiency.full = dfixed_div(dram_efficiency, a);
399 bandwidth.full = dfixed_mul(dram_channels, yclk);
400 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
401
402 return dfixed_trunc(bandwidth);
403}
404
405static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
406{
407 /* Calculate DRAM Bandwidth and the part allocated to display. */
408 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
409 fixed20_12 yclk, dram_channels, bandwidth;
410 fixed20_12 a;
411
412 a.full = dfixed_const(1000);
413 yclk.full = dfixed_const(wm->yclk);
414 yclk.full = dfixed_div(yclk, a);
415 dram_channels.full = dfixed_const(wm->dram_channels * 4);
416 a.full = dfixed_const(10);
417 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
418 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
419 bandwidth.full = dfixed_mul(dram_channels, yclk);
420 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
421
422 return dfixed_trunc(bandwidth);
423}
424
425static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
426{
427 /* Calculate the display Data return Bandwidth */
428 fixed20_12 return_efficiency; /* 0.8 */
429 fixed20_12 sclk, bandwidth;
430 fixed20_12 a;
431
432 a.full = dfixed_const(1000);
433 sclk.full = dfixed_const(wm->sclk);
434 sclk.full = dfixed_div(sclk, a);
435 a.full = dfixed_const(10);
436 return_efficiency.full = dfixed_const(8);
437 return_efficiency.full = dfixed_div(return_efficiency, a);
438 a.full = dfixed_const(32);
439 bandwidth.full = dfixed_mul(a, sclk);
440 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
441
442 return dfixed_trunc(bandwidth);
443}
444
445static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
446{
447 /* Calculate the DMIF Request Bandwidth */
448 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
449 fixed20_12 disp_clk, bandwidth;
450 fixed20_12 a;
451
452 a.full = dfixed_const(1000);
453 disp_clk.full = dfixed_const(wm->disp_clk);
454 disp_clk.full = dfixed_div(disp_clk, a);
455 a.full = dfixed_const(10);
456 disp_clk_request_efficiency.full = dfixed_const(8);
457 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
458 a.full = dfixed_const(32);
459 bandwidth.full = dfixed_mul(a, disp_clk);
460 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
461
462 return dfixed_trunc(bandwidth);
463}
464
465static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
466{
467 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
468 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
469 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
470 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
471
472 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
473}
474
475static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
476{
477 /* Calculate the display mode Average Bandwidth
478 * DisplayMode should contain the source and destination dimensions,
479 * timing, etc.
480 */
481 fixed20_12 bpp;
482 fixed20_12 line_time;
483 fixed20_12 src_width;
484 fixed20_12 bandwidth;
485 fixed20_12 a;
486
487 a.full = dfixed_const(1000);
488 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
489 line_time.full = dfixed_div(line_time, a);
490 bpp.full = dfixed_const(wm->bytes_per_pixel);
491 src_width.full = dfixed_const(wm->src_width);
492 bandwidth.full = dfixed_mul(src_width, bpp);
493 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
494 bandwidth.full = dfixed_div(bandwidth, line_time);
495
496 return dfixed_trunc(bandwidth);
497}
498
499static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
500{
501 /* First calcualte the latency in ns */
502 u32 mc_latency = 2000; /* 2000 ns. */
503 u32 available_bandwidth = evergreen_available_bandwidth(wm);
504 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
505 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
506 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
507 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
508 (wm->num_heads * cursor_line_pair_return_time);
509 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
510 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
511 fixed20_12 a, b, c;
512
513 if (wm->num_heads == 0)
514 return 0;
515
516 a.full = dfixed_const(2);
517 b.full = dfixed_const(1);
518 if ((wm->vsc.full > a.full) ||
519 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
520 (wm->vtaps >= 5) ||
521 ((wm->vsc.full >= a.full) && wm->interlaced))
522 max_src_lines_per_dst_line = 4;
523 else
524 max_src_lines_per_dst_line = 2;
525
526 a.full = dfixed_const(available_bandwidth);
527 b.full = dfixed_const(wm->num_heads);
528 a.full = dfixed_div(a, b);
529
530 b.full = dfixed_const(1000);
531 c.full = dfixed_const(wm->disp_clk);
532 b.full = dfixed_div(c, b);
533 c.full = dfixed_const(wm->bytes_per_pixel);
534 b.full = dfixed_mul(b, c);
535
536 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
537
538 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
539 b.full = dfixed_const(1000);
540 c.full = dfixed_const(lb_fill_bw);
541 b.full = dfixed_div(c, b);
542 a.full = dfixed_div(a, b);
543 line_fill_time = dfixed_trunc(a);
544
545 if (line_fill_time < wm->active_time)
546 return latency;
547 else
548 return latency + (line_fill_time - wm->active_time);
549
550}
551
552static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
553{
554 if (evergreen_average_bandwidth(wm) <=
555 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
556 return true;
557 else
558 return false;
559};
560
561static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
562{
563 if (evergreen_average_bandwidth(wm) <=
564 (evergreen_available_bandwidth(wm) / wm->num_heads))
565 return true;
566 else
567 return false;
568};
569
570static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
571{
572 u32 lb_partitions = wm->lb_size / wm->src_width;
573 u32 line_time = wm->active_time + wm->blank_time;
574 u32 latency_tolerant_lines;
575 u32 latency_hiding;
576 fixed20_12 a;
577
578 a.full = dfixed_const(1);
579 if (wm->vsc.full > a.full)
580 latency_tolerant_lines = 1;
581 else {
582 if (lb_partitions <= (wm->vtaps + 1))
583 latency_tolerant_lines = 1;
584 else
585 latency_tolerant_lines = 2;
586 }
587
588 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
589
590 if (evergreen_latency_watermark(wm) <= latency_hiding)
591 return true;
592 else
593 return false;
594}
595
596static void evergreen_program_watermarks(struct radeon_device *rdev,
597 struct radeon_crtc *radeon_crtc,
598 u32 lb_size, u32 num_heads)
599{
600 struct drm_display_mode *mode = &radeon_crtc->base.mode;
601 struct evergreen_wm_params wm;
602 u32 pixel_period;
603 u32 line_time = 0;
604 u32 latency_watermark_a = 0, latency_watermark_b = 0;
605 u32 priority_a_mark = 0, priority_b_mark = 0;
606 u32 priority_a_cnt = PRIORITY_OFF;
607 u32 priority_b_cnt = PRIORITY_OFF;
608 u32 pipe_offset = radeon_crtc->crtc_id * 16;
609 u32 tmp, arb_control3;
610 fixed20_12 a, b, c;
611
612 if (radeon_crtc->base.enabled && num_heads && mode) {
613 pixel_period = 1000000 / (u32)mode->clock;
614 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
615 priority_a_cnt = 0;
616 priority_b_cnt = 0;
617
618 wm.yclk = rdev->pm.current_mclk * 10;
619 wm.sclk = rdev->pm.current_sclk * 10;
620 wm.disp_clk = mode->clock;
621 wm.src_width = mode->crtc_hdisplay;
622 wm.active_time = mode->crtc_hdisplay * pixel_period;
623 wm.blank_time = line_time - wm.active_time;
624 wm.interlaced = false;
625 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
626 wm.interlaced = true;
627 wm.vsc = radeon_crtc->vsc;
628 wm.vtaps = 1;
629 if (radeon_crtc->rmx_type != RMX_OFF)
630 wm.vtaps = 2;
631 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
632 wm.lb_size = lb_size;
633 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
634 wm.num_heads = num_heads;
635
636 /* set for high clocks */
637 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
638 /* set for low clocks */
639 /* wm.yclk = low clk; wm.sclk = low clk */
640 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
641
642 /* possibly force display priority to high */
643 /* should really do this at mode validation time... */
644 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
645 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
646 !evergreen_check_latency_hiding(&wm) ||
647 (rdev->disp_priority == 2)) {
648 DRM_INFO("force priority to high\n");
649 priority_a_cnt |= PRIORITY_ALWAYS_ON;
650 priority_b_cnt |= PRIORITY_ALWAYS_ON;
651 }
652
653 a.full = dfixed_const(1000);
654 b.full = dfixed_const(mode->clock);
655 b.full = dfixed_div(b, a);
656 c.full = dfixed_const(latency_watermark_a);
657 c.full = dfixed_mul(c, b);
658 c.full = dfixed_mul(c, radeon_crtc->hsc);
659 c.full = dfixed_div(c, a);
660 a.full = dfixed_const(16);
661 c.full = dfixed_div(c, a);
662 priority_a_mark = dfixed_trunc(c);
663 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
664
665 a.full = dfixed_const(1000);
666 b.full = dfixed_const(mode->clock);
667 b.full = dfixed_div(b, a);
668 c.full = dfixed_const(latency_watermark_b);
669 c.full = dfixed_mul(c, b);
670 c.full = dfixed_mul(c, radeon_crtc->hsc);
671 c.full = dfixed_div(c, a);
672 a.full = dfixed_const(16);
673 c.full = dfixed_div(c, a);
674 priority_b_mark = dfixed_trunc(c);
675 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
676 }
677
678 /* select wm A */
679 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
680 tmp = arb_control3;
681 tmp &= ~LATENCY_WATERMARK_MASK(3);
682 tmp |= LATENCY_WATERMARK_MASK(1);
683 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
684 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
685 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
686 LATENCY_HIGH_WATERMARK(line_time)));
687 /* select wm B */
688 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
689 tmp &= ~LATENCY_WATERMARK_MASK(3);
690 tmp |= LATENCY_WATERMARK_MASK(2);
691 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
692 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
693 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
694 LATENCY_HIGH_WATERMARK(line_time)));
695 /* restore original selection */
696 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
697
698 /* write the priority marks */
699 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
700 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
701
702}
703
287void evergreen_bandwidth_update(struct radeon_device *rdev) 704void evergreen_bandwidth_update(struct radeon_device *rdev)
288{ 705{
289 /* XXX */ 706 struct drm_display_mode *mode0 = NULL;
707 struct drm_display_mode *mode1 = NULL;
708 u32 num_heads = 0, lb_size;
709 int i;
710
711 radeon_update_display_priority(rdev);
712
713 for (i = 0; i < rdev->num_crtc; i++) {
714 if (rdev->mode_info.crtcs[i]->base.enabled)
715 num_heads++;
716 }
717 for (i = 0; i < rdev->num_crtc; i += 2) {
718 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
719 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
720 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
721 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
722 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
723 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
724 }
290} 725}
291 726
292static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) 727static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 319aa9752d40..d507f438eed0 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -412,6 +412,19 @@
412#define SOFT_RESET_REGBB (1 << 22) 412#define SOFT_RESET_REGBB (1 << 22)
413#define SOFT_RESET_ORB (1 << 23) 413#define SOFT_RESET_ORB (1 << 23)
414 414
415/* display watermarks */
416#define DC_LB_MEMORY_SPLIT 0x6b0c
417#define PRIORITY_A_CNT 0x6b18
418#define PRIORITY_MARK_MASK 0x7fff
419#define PRIORITY_OFF (1 << 16)
420#define PRIORITY_ALWAYS_ON (1 << 20)
421#define PRIORITY_B_CNT 0x6b1c
422#define PIPE0_ARBITRATION_CONTROL3 0x0bf0
423# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
424#define PIPE0_LATENCY_CONTROL 0x0bf4
425# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
426# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
427
415#define IH_RB_CNTL 0x3e00 428#define IH_RB_CNTL 0x3e00
416# define IH_RB_ENABLE (1 << 0) 429# define IH_RB_ENABLE (1 << 0)
417# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ 430# define IH_IB_SIZE(x) ((x) << 1) /* log2 */