diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-04-27 21:46:42 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-05-18 04:21:33 -0400 |
commit | 68adac5e49436992e9c999fbae879d9ac5b72d4e (patch) | |
tree | 6593c74a8baf4a0424bbc5b2a06264c0a6a9338d | |
parent | 15a7df8db84e7a9d9915d879199ac4a870836c54 (diff) |
drm: move radeon_fixed.h to shared drm_fixed.h header
Will be used by nouveau driver also in the near future.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/radeon/atombios_crtc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 194 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_display.c | 68 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_mode.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rs690.c | 280 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv515.c | 188 | ||||
-rw-r--r-- | include/drm/drm_fixed.h (renamed from drivers/gpu/drm/radeon/radeon_fixed.h) | 40 |
9 files changed, 400 insertions, 400 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b11aa37ed6d5..4151ad8affed 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
27 | #include <drm/drm_crtc_helper.h> | 27 | #include <drm/drm_crtc_helper.h> |
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon_fixed.h" | 29 | #include <drm/drm_fixed.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 87c4ffaf545e..a5f11c300f6a 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -2686,53 +2686,53 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2686 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | 2686 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; |
2687 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | 2687 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; |
2688 | fixed20_12 memtcas_ff[8] = { | 2688 | fixed20_12 memtcas_ff[8] = { |
2689 | fixed_init(1), | 2689 | dfixed_init(1), |
2690 | fixed_init(2), | 2690 | dfixed_init(2), |
2691 | fixed_init(3), | 2691 | dfixed_init(3), |
2692 | fixed_init(0), | 2692 | dfixed_init(0), |
2693 | fixed_init_half(1), | 2693 | dfixed_init_half(1), |
2694 | fixed_init_half(2), | 2694 | dfixed_init_half(2), |
2695 | fixed_init(0), | 2695 | dfixed_init(0), |
2696 | }; | 2696 | }; |
2697 | fixed20_12 memtcas_rs480_ff[8] = { | 2697 | fixed20_12 memtcas_rs480_ff[8] = { |
2698 | fixed_init(0), | 2698 | dfixed_init(0), |
2699 | fixed_init(1), | 2699 | dfixed_init(1), |
2700 | fixed_init(2), | 2700 | dfixed_init(2), |
2701 | fixed_init(3), | 2701 | dfixed_init(3), |
2702 | fixed_init(0), | 2702 | dfixed_init(0), |
2703 | fixed_init_half(1), | 2703 | dfixed_init_half(1), |
2704 | fixed_init_half(2), | 2704 | dfixed_init_half(2), |
2705 | fixed_init_half(3), | 2705 | dfixed_init_half(3), |
2706 | }; | 2706 | }; |
2707 | fixed20_12 memtcas2_ff[8] = { | 2707 | fixed20_12 memtcas2_ff[8] = { |
2708 | fixed_init(0), | 2708 | dfixed_init(0), |
2709 | fixed_init(1), | 2709 | dfixed_init(1), |
2710 | fixed_init(2), | 2710 | dfixed_init(2), |
2711 | fixed_init(3), | 2711 | dfixed_init(3), |
2712 | fixed_init(4), | 2712 | dfixed_init(4), |
2713 | fixed_init(5), | 2713 | dfixed_init(5), |
2714 | fixed_init(6), | 2714 | dfixed_init(6), |
2715 | fixed_init(7), | 2715 | dfixed_init(7), |
2716 | }; | 2716 | }; |
2717 | fixed20_12 memtrbs[8] = { | 2717 | fixed20_12 memtrbs[8] = { |
2718 | fixed_init(1), | 2718 | dfixed_init(1), |
2719 | fixed_init_half(1), | 2719 | dfixed_init_half(1), |
2720 | fixed_init(2), | 2720 | dfixed_init(2), |
2721 | fixed_init_half(2), | 2721 | dfixed_init_half(2), |
2722 | fixed_init(3), | 2722 | dfixed_init(3), |
2723 | fixed_init_half(3), | 2723 | dfixed_init_half(3), |
2724 | fixed_init(4), | 2724 | dfixed_init(4), |
2725 | fixed_init_half(4) | 2725 | dfixed_init_half(4) |
2726 | }; | 2726 | }; |
2727 | fixed20_12 memtrbs_r4xx[8] = { | 2727 | fixed20_12 memtrbs_r4xx[8] = { |
2728 | fixed_init(4), | 2728 | dfixed_init(4), |
2729 | fixed_init(5), | 2729 | dfixed_init(5), |
2730 | fixed_init(6), | 2730 | dfixed_init(6), |
2731 | fixed_init(7), | 2731 | dfixed_init(7), |
2732 | fixed_init(8), | 2732 | dfixed_init(8), |
2733 | fixed_init(9), | 2733 | dfixed_init(9), |
2734 | fixed_init(10), | 2734 | dfixed_init(10), |
2735 | fixed_init(11) | 2735 | dfixed_init(11) |
2736 | }; | 2736 | }; |
2737 | fixed20_12 min_mem_eff; | 2737 | fixed20_12 min_mem_eff; |
2738 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | 2738 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; |
@@ -2763,7 +2763,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2763 | } | 2763 | } |
2764 | } | 2764 | } |
2765 | 2765 | ||
2766 | min_mem_eff.full = rfixed_const_8(0); | 2766 | min_mem_eff.full = dfixed_const_8(0); |
2767 | /* get modes */ | 2767 | /* get modes */ |
2768 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | 2768 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { |
2769 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | 2769 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); |
@@ -2784,28 +2784,28 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2784 | mclk_ff = rdev->pm.mclk; | 2784 | mclk_ff = rdev->pm.mclk; |
2785 | 2785 | ||
2786 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | 2786 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
2787 | temp_ff.full = rfixed_const(temp); | 2787 | temp_ff.full = dfixed_const(temp); |
2788 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | 2788 | mem_bw.full = dfixed_mul(mclk_ff, temp_ff); |
2789 | 2789 | ||
2790 | pix_clk.full = 0; | 2790 | pix_clk.full = 0; |
2791 | pix_clk2.full = 0; | 2791 | pix_clk2.full = 0; |
2792 | peak_disp_bw.full = 0; | 2792 | peak_disp_bw.full = 0; |
2793 | if (mode1) { | 2793 | if (mode1) { |
2794 | temp_ff.full = rfixed_const(1000); | 2794 | temp_ff.full = dfixed_const(1000); |
2795 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | 2795 | pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ |
2796 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | 2796 | pix_clk.full = dfixed_div(pix_clk, temp_ff); |
2797 | temp_ff.full = rfixed_const(pixel_bytes1); | 2797 | temp_ff.full = dfixed_const(pixel_bytes1); |
2798 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | 2798 | peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); |
2799 | } | 2799 | } |
2800 | if (mode2) { | 2800 | if (mode2) { |
2801 | temp_ff.full = rfixed_const(1000); | 2801 | temp_ff.full = dfixed_const(1000); |
2802 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | 2802 | pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ |
2803 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | 2803 | pix_clk2.full = dfixed_div(pix_clk2, temp_ff); |
2804 | temp_ff.full = rfixed_const(pixel_bytes2); | 2804 | temp_ff.full = dfixed_const(pixel_bytes2); |
2805 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | 2805 | peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); |
2806 | } | 2806 | } |
2807 | 2807 | ||
2808 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | 2808 | mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); |
2809 | if (peak_disp_bw.full >= mem_bw.full) { | 2809 | if (peak_disp_bw.full >= mem_bw.full) { |
2810 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | 2810 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" |
2811 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | 2811 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); |
@@ -2847,9 +2847,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2847 | mem_tras = ((temp >> 12) & 0xf) + 4; | 2847 | mem_tras = ((temp >> 12) & 0xf) + 4; |
2848 | } | 2848 | } |
2849 | /* convert to FF */ | 2849 | /* convert to FF */ |
2850 | trcd_ff.full = rfixed_const(mem_trcd); | 2850 | trcd_ff.full = dfixed_const(mem_trcd); |
2851 | trp_ff.full = rfixed_const(mem_trp); | 2851 | trp_ff.full = dfixed_const(mem_trp); |
2852 | tras_ff.full = rfixed_const(mem_tras); | 2852 | tras_ff.full = dfixed_const(mem_tras); |
2853 | 2853 | ||
2854 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | 2854 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ |
2855 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | 2855 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); |
@@ -2867,7 +2867,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2867 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | 2867 | /* extra cas latency stored in bits 23-25 0-4 clocks */ |
2868 | data = (temp >> 23) & 0x7; | 2868 | data = (temp >> 23) & 0x7; |
2869 | if (data < 5) | 2869 | if (data < 5) |
2870 | tcas_ff.full += rfixed_const(data); | 2870 | tcas_ff.full += dfixed_const(data); |
2871 | } | 2871 | } |
2872 | 2872 | ||
2873 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | 2873 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
@@ -2904,72 +2904,72 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2904 | 2904 | ||
2905 | if (rdev->flags & RADEON_IS_AGP) { | 2905 | if (rdev->flags & RADEON_IS_AGP) { |
2906 | fixed20_12 agpmode_ff; | 2906 | fixed20_12 agpmode_ff; |
2907 | agpmode_ff.full = rfixed_const(radeon_agpmode); | 2907 | agpmode_ff.full = dfixed_const(radeon_agpmode); |
2908 | temp_ff.full = rfixed_const_666(16); | 2908 | temp_ff.full = dfixed_const_666(16); |
2909 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | 2909 | sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); |
2910 | } | 2910 | } |
2911 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | 2911 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ |
2912 | 2912 | ||
2913 | if (ASIC_IS_R300(rdev)) { | 2913 | if (ASIC_IS_R300(rdev)) { |
2914 | sclk_delay_ff.full = rfixed_const(250); | 2914 | sclk_delay_ff.full = dfixed_const(250); |
2915 | } else { | 2915 | } else { |
2916 | if ((rdev->family == CHIP_RV100) || | 2916 | if ((rdev->family == CHIP_RV100) || |
2917 | rdev->flags & RADEON_IS_IGP) { | 2917 | rdev->flags & RADEON_IS_IGP) { |
2918 | if (rdev->mc.vram_is_ddr) | 2918 | if (rdev->mc.vram_is_ddr) |
2919 | sclk_delay_ff.full = rfixed_const(41); | 2919 | sclk_delay_ff.full = dfixed_const(41); |
2920 | else | 2920 | else |
2921 | sclk_delay_ff.full = rfixed_const(33); | 2921 | sclk_delay_ff.full = dfixed_const(33); |
2922 | } else { | 2922 | } else { |
2923 | if (rdev->mc.vram_width == 128) | 2923 | if (rdev->mc.vram_width == 128) |
2924 | sclk_delay_ff.full = rfixed_const(57); | 2924 | sclk_delay_ff.full = dfixed_const(57); |
2925 | else | 2925 | else |
2926 | sclk_delay_ff.full = rfixed_const(41); | 2926 | sclk_delay_ff.full = dfixed_const(41); |
2927 | } | 2927 | } |
2928 | } | 2928 | } |
2929 | 2929 | ||
2930 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | 2930 | mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); |
2931 | 2931 | ||
2932 | if (rdev->mc.vram_is_ddr) { | 2932 | if (rdev->mc.vram_is_ddr) { |
2933 | if (rdev->mc.vram_width == 32) { | 2933 | if (rdev->mc.vram_width == 32) { |
2934 | k1.full = rfixed_const(40); | 2934 | k1.full = dfixed_const(40); |
2935 | c = 3; | 2935 | c = 3; |
2936 | } else { | 2936 | } else { |
2937 | k1.full = rfixed_const(20); | 2937 | k1.full = dfixed_const(20); |
2938 | c = 1; | 2938 | c = 1; |
2939 | } | 2939 | } |
2940 | } else { | 2940 | } else { |
2941 | k1.full = rfixed_const(40); | 2941 | k1.full = dfixed_const(40); |
2942 | c = 3; | 2942 | c = 3; |
2943 | } | 2943 | } |
2944 | 2944 | ||
2945 | temp_ff.full = rfixed_const(2); | 2945 | temp_ff.full = dfixed_const(2); |
2946 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | 2946 | mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); |
2947 | temp_ff.full = rfixed_const(c); | 2947 | temp_ff.full = dfixed_const(c); |
2948 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | 2948 | mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); |
2949 | temp_ff.full = rfixed_const(4); | 2949 | temp_ff.full = dfixed_const(4); |
2950 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | 2950 | mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); |
2951 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | 2951 | mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); |
2952 | mc_latency_mclk.full += k1.full; | 2952 | mc_latency_mclk.full += k1.full; |
2953 | 2953 | ||
2954 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | 2954 | mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); |
2955 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | 2955 | mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); |
2956 | 2956 | ||
2957 | /* | 2957 | /* |
2958 | HW cursor time assuming worst case of full size colour cursor. | 2958 | HW cursor time assuming worst case of full size colour cursor. |
2959 | */ | 2959 | */ |
2960 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | 2960 | temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
2961 | temp_ff.full += trcd_ff.full; | 2961 | temp_ff.full += trcd_ff.full; |
2962 | if (temp_ff.full < tras_ff.full) | 2962 | if (temp_ff.full < tras_ff.full) |
2963 | temp_ff.full = tras_ff.full; | 2963 | temp_ff.full = tras_ff.full; |
2964 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | 2964 | cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); |
2965 | 2965 | ||
2966 | temp_ff.full = rfixed_const(cur_size); | 2966 | temp_ff.full = dfixed_const(cur_size); |
2967 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | 2967 | cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); |
2968 | /* | 2968 | /* |
2969 | Find the total latency for the display data. | 2969 | Find the total latency for the display data. |
2970 | */ | 2970 | */ |
2971 | disp_latency_overhead.full = rfixed_const(8); | 2971 | disp_latency_overhead.full = dfixed_const(8); |
2972 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | 2972 | disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); |
2973 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | 2973 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
2974 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | 2974 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
2975 | 2975 | ||
@@ -2997,16 +2997,16 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
2997 | /* | 2997 | /* |
2998 | Find the drain rate of the display buffer. | 2998 | Find the drain rate of the display buffer. |
2999 | */ | 2999 | */ |
3000 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | 3000 | temp_ff.full = dfixed_const((16/pixel_bytes1)); |
3001 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | 3001 | disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); |
3002 | 3002 | ||
3003 | /* | 3003 | /* |
3004 | Find the critical point of the display buffer. | 3004 | Find the critical point of the display buffer. |
3005 | */ | 3005 | */ |
3006 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | 3006 | crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); |
3007 | crit_point_ff.full += rfixed_const_half(0); | 3007 | crit_point_ff.full += dfixed_const_half(0); |
3008 | 3008 | ||
3009 | critical_point = rfixed_trunc(crit_point_ff); | 3009 | critical_point = dfixed_trunc(crit_point_ff); |
3010 | 3010 | ||
3011 | if (rdev->disp_priority == 2) { | 3011 | if (rdev->disp_priority == 2) { |
3012 | critical_point = 0; | 3012 | critical_point = 0; |
@@ -3077,8 +3077,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3077 | /* | 3077 | /* |
3078 | Find the drain rate of the display buffer. | 3078 | Find the drain rate of the display buffer. |
3079 | */ | 3079 | */ |
3080 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | 3080 | temp_ff.full = dfixed_const((16/pixel_bytes2)); |
3081 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | 3081 | disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); |
3082 | 3082 | ||
3083 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | 3083 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); |
3084 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | 3084 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); |
@@ -3099,8 +3099,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3099 | critical_point2 = 0; | 3099 | critical_point2 = 0; |
3100 | else { | 3100 | else { |
3101 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | 3101 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; |
3102 | temp_ff.full = rfixed_const(temp); | 3102 | temp_ff.full = dfixed_const(temp); |
3103 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | 3103 | temp_ff.full = dfixed_mul(mclk_ff, temp_ff); |
3104 | if (sclk_ff.full < temp_ff.full) | 3104 | if (sclk_ff.full < temp_ff.full) |
3105 | temp_ff.full = sclk_ff.full; | 3105 | temp_ff.full = sclk_ff.full; |
3106 | 3106 | ||
@@ -3108,15 +3108,15 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3108 | 3108 | ||
3109 | if (mode1) { | 3109 | if (mode1) { |
3110 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | 3110 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; |
3111 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | 3111 | time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); |
3112 | } else { | 3112 | } else { |
3113 | time_disp1_drop_priority.full = 0; | 3113 | time_disp1_drop_priority.full = 0; |
3114 | } | 3114 | } |
3115 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | 3115 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; |
3116 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | 3116 | crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); |
3117 | crit_point_ff.full += rfixed_const_half(0); | 3117 | crit_point_ff.full += dfixed_const_half(0); |
3118 | 3118 | ||
3119 | critical_point2 = rfixed_trunc(crit_point_ff); | 3119 | critical_point2 = dfixed_trunc(crit_point_ff); |
3120 | 3120 | ||
3121 | if (rdev->disp_priority == 2) { | 3121 | if (rdev->disp_priority == 2) { |
3122 | critical_point2 = 0; | 3122 | critical_point2 = 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0372ec96020f..e249da81dbfc 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev) | |||
299 | sclk = radeon_get_engine_clock(rdev); | 299 | sclk = radeon_get_engine_clock(rdev); |
300 | mclk = rdev->clock.default_mclk; | 300 | mclk = rdev->clock.default_mclk; |
301 | 301 | ||
302 | a.full = rfixed_const(100); | 302 | a.full = dfixed_const(100); |
303 | rdev->pm.sclk.full = rfixed_const(sclk); | 303 | rdev->pm.sclk.full = dfixed_const(sclk); |
304 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 304 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); |
305 | rdev->pm.mclk.full = rfixed_const(mclk); | 305 | rdev->pm.mclk.full = dfixed_const(mclk); |
306 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | 306 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); |
307 | 307 | ||
308 | a.full = rfixed_const(16); | 308 | a.full = dfixed_const(16); |
309 | /* core_bandwidth = sclk(Mhz) * 16 */ | 309 | /* core_bandwidth = sclk(Mhz) * 16 */ |
310 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | 310 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
311 | } else { | 311 | } else { |
312 | sclk = radeon_get_engine_clock(rdev); | 312 | sclk = radeon_get_engine_clock(rdev); |
313 | mclk = radeon_get_memory_clock(rdev); | 313 | mclk = radeon_get_memory_clock(rdev); |
314 | 314 | ||
315 | a.full = rfixed_const(100); | 315 | a.full = dfixed_const(100); |
316 | rdev->pm.sclk.full = rfixed_const(sclk); | 316 | rdev->pm.sclk.full = dfixed_const(sclk); |
317 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 317 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); |
318 | rdev->pm.mclk.full = rfixed_const(mclk); | 318 | rdev->pm.mclk.full = dfixed_const(mclk); |
319 | rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); | 319 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); |
320 | } | 320 | } |
321 | } | 321 | } |
322 | 322 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index bc9cc9211e67..10d70540fc50 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll, | |||
633 | 633 | ||
634 | vco_freq = freq * post_div; | 634 | vco_freq = freq * post_div; |
635 | /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ | 635 | /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ |
636 | a.full = rfixed_const(pll->reference_freq); | 636 | a.full = dfixed_const(pll->reference_freq); |
637 | feedback_divider.full = rfixed_const(vco_freq); | 637 | feedback_divider.full = dfixed_const(vco_freq); |
638 | feedback_divider.full = rfixed_div(feedback_divider, a); | 638 | feedback_divider.full = dfixed_div(feedback_divider, a); |
639 | a.full = rfixed_const(ref_div); | 639 | a.full = dfixed_const(ref_div); |
640 | feedback_divider.full = rfixed_mul(feedback_divider, a); | 640 | feedback_divider.full = dfixed_mul(feedback_divider, a); |
641 | 641 | ||
642 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | 642 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
643 | /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ | 643 | /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ |
644 | a.full = rfixed_const(10); | 644 | a.full = dfixed_const(10); |
645 | feedback_divider.full = rfixed_mul(feedback_divider, a); | 645 | feedback_divider.full = dfixed_mul(feedback_divider, a); |
646 | feedback_divider.full += rfixed_const_half(0); | 646 | feedback_divider.full += dfixed_const_half(0); |
647 | feedback_divider.full = rfixed_floor(feedback_divider); | 647 | feedback_divider.full = dfixed_floor(feedback_divider); |
648 | feedback_divider.full = rfixed_div(feedback_divider, a); | 648 | feedback_divider.full = dfixed_div(feedback_divider, a); |
649 | 649 | ||
650 | /* *fb_div = floor(feedback_divider); */ | 650 | /* *fb_div = floor(feedback_divider); */ |
651 | a.full = rfixed_floor(feedback_divider); | 651 | a.full = dfixed_floor(feedback_divider); |
652 | *fb_div = rfixed_trunc(a); | 652 | *fb_div = dfixed_trunc(a); |
653 | /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ | 653 | /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ |
654 | a.full = rfixed_const(10); | 654 | a.full = dfixed_const(10); |
655 | b.full = rfixed_mul(feedback_divider, a); | 655 | b.full = dfixed_mul(feedback_divider, a); |
656 | 656 | ||
657 | feedback_divider.full = rfixed_floor(feedback_divider); | 657 | feedback_divider.full = dfixed_floor(feedback_divider); |
658 | feedback_divider.full = rfixed_mul(feedback_divider, a); | 658 | feedback_divider.full = dfixed_mul(feedback_divider, a); |
659 | feedback_divider.full = b.full - feedback_divider.full; | 659 | feedback_divider.full = b.full - feedback_divider.full; |
660 | *fb_div_frac = rfixed_trunc(feedback_divider); | 660 | *fb_div_frac = dfixed_trunc(feedback_divider); |
661 | } else { | 661 | } else { |
662 | /* *fb_div = floor(feedback_divider + 0.5); */ | 662 | /* *fb_div = floor(feedback_divider + 0.5); */ |
663 | feedback_divider.full += rfixed_const_half(0); | 663 | feedback_divider.full += dfixed_const_half(0); |
664 | feedback_divider.full = rfixed_floor(feedback_divider); | 664 | feedback_divider.full = dfixed_floor(feedback_divider); |
665 | 665 | ||
666 | *fb_div = rfixed_trunc(feedback_divider); | 666 | *fb_div = dfixed_trunc(feedback_divider); |
667 | *fb_div_frac = 0; | 667 | *fb_div_frac = 0; |
668 | } | 668 | } |
669 | 669 | ||
@@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
693 | pll_out_max = pll->pll_out_max; | 693 | pll_out_max = pll->pll_out_max; |
694 | } | 694 | } |
695 | 695 | ||
696 | ffreq.full = rfixed_const(freq); | 696 | ffreq.full = dfixed_const(freq); |
697 | /* max_error = ffreq * 0.0025; */ | 697 | /* max_error = ffreq * 0.0025; */ |
698 | a.full = rfixed_const(400); | 698 | a.full = dfixed_const(400); |
699 | max_error.full = rfixed_div(ffreq, a); | 699 | max_error.full = dfixed_div(ffreq, a); |
700 | 700 | ||
701 | for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { | 701 | for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { |
702 | if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { | 702 | if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { |
@@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll, | |||
707 | continue; | 707 | continue; |
708 | 708 | ||
709 | /* pll_out = vco / post_div; */ | 709 | /* pll_out = vco / post_div; */ |
710 | a.full = rfixed_const(post_div); | 710 | a.full = dfixed_const(post_div); |
711 | pll_out.full = rfixed_const(vco); | 711 | pll_out.full = dfixed_const(vco); |
712 | pll_out.full = rfixed_div(pll_out, a); | 712 | pll_out.full = dfixed_div(pll_out, a); |
713 | 713 | ||
714 | if (pll_out.full >= ffreq.full) { | 714 | if (pll_out.full >= ffreq.full) { |
715 | error.full = pll_out.full - ffreq.full; | 715 | error.full = pll_out.full - ffreq.full; |
@@ -1099,15 +1099,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1099 | } | 1099 | } |
1100 | if (radeon_crtc->rmx_type != RMX_OFF) { | 1100 | if (radeon_crtc->rmx_type != RMX_OFF) { |
1101 | fixed20_12 a, b; | 1101 | fixed20_12 a, b; |
1102 | a.full = rfixed_const(crtc->mode.vdisplay); | 1102 | a.full = dfixed_const(crtc->mode.vdisplay); |
1103 | b.full = rfixed_const(radeon_crtc->native_mode.hdisplay); | 1103 | b.full = dfixed_const(radeon_crtc->native_mode.hdisplay); |
1104 | radeon_crtc->vsc.full = rfixed_div(a, b); | 1104 | radeon_crtc->vsc.full = dfixed_div(a, b); |
1105 | a.full = rfixed_const(crtc->mode.hdisplay); | 1105 | a.full = dfixed_const(crtc->mode.hdisplay); |
1106 | b.full = rfixed_const(radeon_crtc->native_mode.vdisplay); | 1106 | b.full = dfixed_const(radeon_crtc->native_mode.vdisplay); |
1107 | radeon_crtc->hsc.full = rfixed_div(a, b); | 1107 | radeon_crtc->hsc.full = dfixed_div(a, b); |
1108 | } else { | 1108 | } else { |
1109 | radeon_crtc->vsc.full = rfixed_const(1); | 1109 | radeon_crtc->vsc.full = dfixed_const(1); |
1110 | radeon_crtc->hsc.full = rfixed_const(1); | 1110 | radeon_crtc->hsc.full = dfixed_const(1); |
1111 | } | 1111 | } |
1112 | return true; | 1112 | return true; |
1113 | } | 1113 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index f4f9cb297e36..7701d42c4804 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
27 | #include <drm/drm_crtc_helper.h> | 27 | #include <drm/drm_crtc_helper.h> |
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon_fixed.h" | 29 | #include <drm/drm_fixed.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | 32 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 061a2a6801cc..38c0b63e9bbd 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -34,10 +34,10 @@ | |||
34 | #include <drm_mode.h> | 34 | #include <drm_mode.h> |
35 | #include <drm_edid.h> | 35 | #include <drm_edid.h> |
36 | #include <drm_dp_helper.h> | 36 | #include <drm_dp_helper.h> |
37 | #include <drm_fixed.h> | ||
37 | #include <linux/i2c.h> | 38 | #include <linux/i2c.h> |
38 | #include <linux/i2c-id.h> | 39 | #include <linux/i2c-id.h> |
39 | #include <linux/i2c-algo-bit.h> | 40 | #include <linux/i2c-algo-bit.h> |
40 | #include "radeon_fixed.h" | ||
41 | 41 | ||
42 | struct radeon_bo; | 42 | struct radeon_bo; |
43 | struct radeon_device; | 43 | struct radeon_device; |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 56a0aec84af2..e8edfe617286 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -76,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
76 | /* Get various system informations from bios */ | 76 | /* Get various system informations from bios */ |
77 | switch (crev) { | 77 | switch (crev) { |
78 | case 1: | 78 | case 1: |
79 | tmp.full = rfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
80 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); |
81 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
82 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 82 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
83 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock)); | 83 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
84 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth); | 84 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
85 | break; | 85 | break; |
86 | case 2: | 86 | case 2: |
87 | tmp.full = rfixed_const(100); | 87 | tmp.full = dfixed_const(100); |
88 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock); | 88 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); |
89 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 89 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
90 | rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock); | 90 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); |
91 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 91 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
92 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq); | 92 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); |
93 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 93 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
94 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 94 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
95 | break; | 95 | break; |
96 | default: | 96 | default: |
97 | tmp.full = rfixed_const(100); | 97 | tmp.full = dfixed_const(100); |
98 | /* We assume the slower possible clock ie worst case */ | 98 | /* We assume the slower possible clock ie worst case */ |
99 | /* DDR 333Mhz */ | 99 | /* DDR 333Mhz */ |
100 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | 100 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); |
101 | /* FIXME: system clock ? */ | 101 | /* FIXME: system clock ? */ |
102 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | 102 | rdev->pm.igp_system_mclk.full = dfixed_const(100); |
103 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 103 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
104 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); |
105 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
106 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 106 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
107 | break; | 107 | break; |
108 | } | 108 | } |
109 | } else { | 109 | } else { |
110 | tmp.full = rfixed_const(100); | 110 | tmp.full = dfixed_const(100); |
111 | /* We assume the slower possible clock ie worst case */ | 111 | /* We assume the slower possible clock ie worst case */ |
112 | /* DDR 333Mhz */ | 112 | /* DDR 333Mhz */ |
113 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | 113 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); |
114 | /* FIXME: system clock ? */ | 114 | /* FIXME: system clock ? */ |
115 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | 115 | rdev->pm.igp_system_mclk.full = dfixed_const(100); |
116 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | 116 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
117 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | 117 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); |
118 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | 118 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
119 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 119 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
120 | } | 120 | } |
121 | /* Compute various bandwidth */ | 121 | /* Compute various bandwidth */ |
122 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | 122 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
123 | tmp.full = rfixed_const(4); | 123 | tmp.full = dfixed_const(4); |
124 | rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); | 124 | rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); |
125 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 | 125 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 |
126 | * = ht_clk * ht_width / 5 | 126 | * = ht_clk * ht_width / 5 |
127 | */ | 127 | */ |
128 | tmp.full = rfixed_const(5); | 128 | tmp.full = dfixed_const(5); |
129 | rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, | 129 | rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, |
130 | rdev->pm.igp_ht_link_width); | 130 | rdev->pm.igp_ht_link_width); |
131 | rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); | 131 | rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); |
132 | if (tmp.full < rdev->pm.max_bandwidth.full) { | 132 | if (tmp.full < rdev->pm.max_bandwidth.full) { |
133 | /* HT link is a limiting factor */ | 133 | /* HT link is a limiting factor */ |
134 | rdev->pm.max_bandwidth.full = tmp.full; | 134 | rdev->pm.max_bandwidth.full = tmp.full; |
@@ -136,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
136 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 | 136 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 |
137 | * = (sideport_clk * 14) / 10 | 137 | * = (sideport_clk * 14) / 10 |
138 | */ | 138 | */ |
139 | tmp.full = rfixed_const(14); | 139 | tmp.full = dfixed_const(14); |
140 | rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); | 140 | rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); |
141 | tmp.full = rfixed_const(10); | 141 | tmp.full = dfixed_const(10); |
142 | rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); | 142 | rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); |
143 | } | 143 | } |
144 | 144 | ||
145 | void rs690_mc_init(struct radeon_device *rdev) | 145 | void rs690_mc_init(struct radeon_device *rdev) |
@@ -239,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
239 | return; | 239 | return; |
240 | } | 240 | } |
241 | 241 | ||
242 | if (crtc->vsc.full > rfixed_const(2)) | 242 | if (crtc->vsc.full > dfixed_const(2)) |
243 | wm->num_line_pair.full = rfixed_const(2); | 243 | wm->num_line_pair.full = dfixed_const(2); |
244 | else | 244 | else |
245 | wm->num_line_pair.full = rfixed_const(1); | 245 | wm->num_line_pair.full = dfixed_const(1); |
246 | 246 | ||
247 | b.full = rfixed_const(mode->crtc_hdisplay); | 247 | b.full = dfixed_const(mode->crtc_hdisplay); |
248 | c.full = rfixed_const(256); | 248 | c.full = dfixed_const(256); |
249 | a.full = rfixed_div(b, c); | 249 | a.full = dfixed_div(b, c); |
250 | request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); | 250 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); |
251 | request_fifo_depth.full = rfixed_ceil(request_fifo_depth); | 251 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); |
252 | if (a.full < rfixed_const(4)) { | 252 | if (a.full < dfixed_const(4)) { |
253 | wm->lb_request_fifo_depth = 4; | 253 | wm->lb_request_fifo_depth = 4; |
254 | } else { | 254 | } else { |
255 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | 255 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); |
256 | } | 256 | } |
257 | 257 | ||
258 | /* Determine consumption rate | 258 | /* Determine consumption rate |
@@ -261,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
261 | * vsc = vertical scaling ratio, defined as source/destination | 261 | * vsc = vertical scaling ratio, defined as source/destination |
262 | * hsc = horizontal scaling ration, defined as source/destination | 262 | * hsc = horizontal scaling ration, defined as source/destination |
263 | */ | 263 | */ |
264 | a.full = rfixed_const(mode->clock); | 264 | a.full = dfixed_const(mode->clock); |
265 | b.full = rfixed_const(1000); | 265 | b.full = dfixed_const(1000); |
266 | a.full = rfixed_div(a, b); | 266 | a.full = dfixed_div(a, b); |
267 | pclk.full = rfixed_div(b, a); | 267 | pclk.full = dfixed_div(b, a); |
268 | if (crtc->rmx_type != RMX_OFF) { | 268 | if (crtc->rmx_type != RMX_OFF) { |
269 | b.full = rfixed_const(2); | 269 | b.full = dfixed_const(2); |
270 | if (crtc->vsc.full > b.full) | 270 | if (crtc->vsc.full > b.full) |
271 | b.full = crtc->vsc.full; | 271 | b.full = crtc->vsc.full; |
272 | b.full = rfixed_mul(b, crtc->hsc); | 272 | b.full = dfixed_mul(b, crtc->hsc); |
273 | c.full = rfixed_const(2); | 273 | c.full = dfixed_const(2); |
274 | b.full = rfixed_div(b, c); | 274 | b.full = dfixed_div(b, c); |
275 | consumption_time.full = rfixed_div(pclk, b); | 275 | consumption_time.full = dfixed_div(pclk, b); |
276 | } else { | 276 | } else { |
277 | consumption_time.full = pclk.full; | 277 | consumption_time.full = pclk.full; |
278 | } | 278 | } |
279 | a.full = rfixed_const(1); | 279 | a.full = dfixed_const(1); |
280 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | 280 | wm->consumption_rate.full = dfixed_div(a, consumption_time); |
281 | 281 | ||
282 | 282 | ||
283 | /* Determine line time | 283 | /* Determine line time |
@@ -285,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
285 | * LineTime = total number of horizontal pixels | 285 | * LineTime = total number of horizontal pixels |
286 | * pclk = pixel clock period(ns) | 286 | * pclk = pixel clock period(ns) |
287 | */ | 287 | */ |
288 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 288 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
289 | line_time.full = rfixed_mul(a, pclk); | 289 | line_time.full = dfixed_mul(a, pclk); |
290 | 290 | ||
291 | /* Determine active time | 291 | /* Determine active time |
292 | * ActiveTime = time of active region of display within one line, | 292 | * ActiveTime = time of active region of display within one line, |
293 | * hactive = total number of horizontal active pixels | 293 | * hactive = total number of horizontal active pixels |
294 | * htotal = total number of horizontal pixels | 294 | * htotal = total number of horizontal pixels |
295 | */ | 295 | */ |
296 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 296 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
297 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 297 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
298 | wm->active_time.full = rfixed_mul(line_time, b); | 298 | wm->active_time.full = dfixed_mul(line_time, b); |
299 | wm->active_time.full = rfixed_div(wm->active_time, a); | 299 | wm->active_time.full = dfixed_div(wm->active_time, a); |
300 | 300 | ||
301 | /* Maximun bandwidth is the minimun bandwidth of all component */ | 301 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | 302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; |
@@ -304,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | 304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
305 | rdev->pm.sideport_bandwidth.full) | 305 | rdev->pm.sideport_bandwidth.full) |
306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | 306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; |
307 | read_delay_latency.full = rfixed_const(370 * 800 * 1000); | 307 | read_delay_latency.full = dfixed_const(370 * 800 * 1000); |
308 | read_delay_latency.full = rfixed_div(read_delay_latency, | 308 | read_delay_latency.full = dfixed_div(read_delay_latency, |
309 | rdev->pm.igp_sideport_mclk); | 309 | rdev->pm.igp_sideport_mclk); |
310 | } else { | 310 | } else { |
311 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && | 311 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && |
@@ -314,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
314 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && | 314 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && |
315 | rdev->pm.ht_bandwidth.full) | 315 | rdev->pm.ht_bandwidth.full) |
316 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; | 316 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; |
317 | read_delay_latency.full = rfixed_const(5000); | 317 | read_delay_latency.full = dfixed_const(5000); |
318 | } | 318 | } |
319 | 319 | ||
320 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ | 320 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ |
321 | a.full = rfixed_const(16); | 321 | a.full = dfixed_const(16); |
322 | rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); | 322 | rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); |
323 | a.full = rfixed_const(1000); | 323 | a.full = dfixed_const(1000); |
324 | rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); | 324 | rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); |
325 | /* Determine chunk time | 325 | /* Determine chunk time |
326 | * ChunkTime = the time it takes the DCP to send one chunk of data | 326 | * ChunkTime = the time it takes the DCP to send one chunk of data |
327 | * to the LB which consists of pipeline delay and inter chunk gap | 327 | * to the LB which consists of pipeline delay and inter chunk gap |
328 | * sclk = system clock(ns) | 328 | * sclk = system clock(ns) |
329 | */ | 329 | */ |
330 | a.full = rfixed_const(256 * 13); | 330 | a.full = dfixed_const(256 * 13); |
331 | chunk_time.full = rfixed_mul(rdev->pm.sclk, a); | 331 | chunk_time.full = dfixed_mul(rdev->pm.sclk, a); |
332 | a.full = rfixed_const(10); | 332 | a.full = dfixed_const(10); |
333 | chunk_time.full = rfixed_div(chunk_time, a); | 333 | chunk_time.full = dfixed_div(chunk_time, a); |
334 | 334 | ||
335 | /* Determine the worst case latency | 335 | /* Determine the worst case latency |
336 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | 336 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
@@ -340,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
340 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | 340 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB |
341 | * which consists of pipeline delay and inter chunk gap | 341 | * which consists of pipeline delay and inter chunk gap |
342 | */ | 342 | */ |
343 | if (rfixed_trunc(wm->num_line_pair) > 1) { | 343 | if (dfixed_trunc(wm->num_line_pair) > 1) { |
344 | a.full = rfixed_const(3); | 344 | a.full = dfixed_const(3); |
345 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | 345 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
346 | wm->worst_case_latency.full += read_delay_latency.full; | 346 | wm->worst_case_latency.full += read_delay_latency.full; |
347 | } else { | 347 | } else { |
348 | a.full = rfixed_const(2); | 348 | a.full = dfixed_const(2); |
349 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | 349 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
350 | wm->worst_case_latency.full += read_delay_latency.full; | 350 | wm->worst_case_latency.full += read_delay_latency.full; |
351 | } | 351 | } |
352 | 352 | ||
@@ -360,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
360 | * of data to the LB which consists of | 360 | * of data to the LB which consists of |
361 | * pipeline delay and inter chunk gap | 361 | * pipeline delay and inter chunk gap |
362 | */ | 362 | */ |
363 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | 363 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { |
364 | tolerable_latency.full = line_time.full; | 364 | tolerable_latency.full = line_time.full; |
365 | } else { | 365 | } else { |
366 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | 366 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); |
367 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | 367 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
368 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | 368 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); |
369 | tolerable_latency.full = line_time.full - tolerable_latency.full; | 369 | tolerable_latency.full = line_time.full - tolerable_latency.full; |
370 | } | 370 | } |
371 | /* We assume worst case 32bits (4 bytes) */ | 371 | /* We assume worst case 32bits (4 bytes) */ |
372 | wm->dbpp.full = rfixed_const(4 * 8); | 372 | wm->dbpp.full = dfixed_const(4 * 8); |
373 | 373 | ||
374 | /* Determine the maximum priority mark | 374 | /* Determine the maximum priority mark |
375 | * width = viewport width in pixels | 375 | * width = viewport width in pixels |
376 | */ | 376 | */ |
377 | a.full = rfixed_const(16); | 377 | a.full = dfixed_const(16); |
378 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 378 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
379 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | 379 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); |
380 | wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); | 380 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); |
381 | 381 | ||
382 | /* Determine estimated width */ | 382 | /* Determine estimated width */ |
383 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | 383 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
384 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | 384 | estimated_width.full = dfixed_div(estimated_width, consumption_time); |
385 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | 385 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
386 | wm->priority_mark.full = rfixed_const(10); | 386 | wm->priority_mark.full = dfixed_const(10); |
387 | } else { | 387 | } else { |
388 | a.full = rfixed_const(16); | 388 | a.full = dfixed_const(16); |
389 | wm->priority_mark.full = rfixed_div(estimated_width, a); | 389 | wm->priority_mark.full = dfixed_div(estimated_width, a); |
390 | wm->priority_mark.full = rfixed_ceil(wm->priority_mark); | 390 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); |
391 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | 391 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
392 | } | 392 | } |
393 | } | 393 | } |
@@ -439,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
439 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); | 439 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
440 | 440 | ||
441 | if (mode0 && mode1) { | 441 | if (mode0 && mode1) { |
442 | if (rfixed_trunc(wm0.dbpp) > 64) | 442 | if (dfixed_trunc(wm0.dbpp) > 64) |
443 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 443 | a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); |
444 | else | 444 | else |
445 | a.full = wm0.num_line_pair.full; | 445 | a.full = wm0.num_line_pair.full; |
446 | if (rfixed_trunc(wm1.dbpp) > 64) | 446 | if (dfixed_trunc(wm1.dbpp) > 64) |
447 | b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | 447 | b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); |
448 | else | 448 | else |
449 | b.full = wm1.num_line_pair.full; | 449 | b.full = wm1.num_line_pair.full; |
450 | a.full += b.full; | 450 | a.full += b.full; |
451 | fill_rate.full = rfixed_div(wm0.sclk, a); | 451 | fill_rate.full = dfixed_div(wm0.sclk, a); |
452 | if (wm0.consumption_rate.full > fill_rate.full) { | 452 | if (wm0.consumption_rate.full > fill_rate.full) { |
453 | b.full = wm0.consumption_rate.full - fill_rate.full; | 453 | b.full = wm0.consumption_rate.full - fill_rate.full; |
454 | b.full = rfixed_mul(b, wm0.active_time); | 454 | b.full = dfixed_mul(b, wm0.active_time); |
455 | a.full = rfixed_mul(wm0.worst_case_latency, | 455 | a.full = dfixed_mul(wm0.worst_case_latency, |
456 | wm0.consumption_rate); | 456 | wm0.consumption_rate); |
457 | a.full = a.full + b.full; | 457 | a.full = a.full + b.full; |
458 | b.full = rfixed_const(16 * 1000); | 458 | b.full = dfixed_const(16 * 1000); |
459 | priority_mark02.full = rfixed_div(a, b); | 459 | priority_mark02.full = dfixed_div(a, b); |
460 | } else { | 460 | } else { |
461 | a.full = rfixed_mul(wm0.worst_case_latency, | 461 | a.full = dfixed_mul(wm0.worst_case_latency, |
462 | wm0.consumption_rate); | 462 | wm0.consumption_rate); |
463 | b.full = rfixed_const(16 * 1000); | 463 | b.full = dfixed_const(16 * 1000); |
464 | priority_mark02.full = rfixed_div(a, b); | 464 | priority_mark02.full = dfixed_div(a, b); |
465 | } | 465 | } |
466 | if (wm1.consumption_rate.full > fill_rate.full) { | 466 | if (wm1.consumption_rate.full > fill_rate.full) { |
467 | b.full = wm1.consumption_rate.full - fill_rate.full; | 467 | b.full = wm1.consumption_rate.full - fill_rate.full; |
468 | b.full = rfixed_mul(b, wm1.active_time); | 468 | b.full = dfixed_mul(b, wm1.active_time); |
469 | a.full = rfixed_mul(wm1.worst_case_latency, | 469 | a.full = dfixed_mul(wm1.worst_case_latency, |
470 | wm1.consumption_rate); | 470 | wm1.consumption_rate); |
471 | a.full = a.full + b.full; | 471 | a.full = a.full + b.full; |
472 | b.full = rfixed_const(16 * 1000); | 472 | b.full = dfixed_const(16 * 1000); |
473 | priority_mark12.full = rfixed_div(a, b); | 473 | priority_mark12.full = dfixed_div(a, b); |
474 | } else { | 474 | } else { |
475 | a.full = rfixed_mul(wm1.worst_case_latency, | 475 | a.full = dfixed_mul(wm1.worst_case_latency, |
476 | wm1.consumption_rate); | 476 | wm1.consumption_rate); |
477 | b.full = rfixed_const(16 * 1000); | 477 | b.full = dfixed_const(16 * 1000); |
478 | priority_mark12.full = rfixed_div(a, b); | 478 | priority_mark12.full = dfixed_div(a, b); |
479 | } | 479 | } |
480 | if (wm0.priority_mark.full > priority_mark02.full) | 480 | if (wm0.priority_mark.full > priority_mark02.full) |
481 | priority_mark02.full = wm0.priority_mark.full; | 481 | priority_mark02.full = wm0.priority_mark.full; |
482 | if (rfixed_trunc(priority_mark02) < 0) | 482 | if (dfixed_trunc(priority_mark02) < 0) |
483 | priority_mark02.full = 0; | 483 | priority_mark02.full = 0; |
484 | if (wm0.priority_mark_max.full > priority_mark02.full) | 484 | if (wm0.priority_mark_max.full > priority_mark02.full) |
485 | priority_mark02.full = wm0.priority_mark_max.full; | 485 | priority_mark02.full = wm0.priority_mark_max.full; |
486 | if (wm1.priority_mark.full > priority_mark12.full) | 486 | if (wm1.priority_mark.full > priority_mark12.full) |
487 | priority_mark12.full = wm1.priority_mark.full; | 487 | priority_mark12.full = wm1.priority_mark.full; |
488 | if (rfixed_trunc(priority_mark12) < 0) | 488 | if (dfixed_trunc(priority_mark12) < 0) |
489 | priority_mark12.full = 0; | 489 | priority_mark12.full = 0; |
490 | if (wm1.priority_mark_max.full > priority_mark12.full) | 490 | if (wm1.priority_mark_max.full > priority_mark12.full) |
491 | priority_mark12.full = wm1.priority_mark_max.full; | 491 | priority_mark12.full = wm1.priority_mark_max.full; |
492 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 492 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
493 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 493 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
494 | if (rdev->disp_priority == 2) { | 494 | if (rdev->disp_priority == 2) { |
495 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | 495 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
496 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | 496 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
@@ -500,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
500 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | 500 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
501 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | 501 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
502 | } else if (mode0) { | 502 | } else if (mode0) { |
503 | if (rfixed_trunc(wm0.dbpp) > 64) | 503 | if (dfixed_trunc(wm0.dbpp) > 64) |
504 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 504 | a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); |
505 | else | 505 | else |
506 | a.full = wm0.num_line_pair.full; | 506 | a.full = wm0.num_line_pair.full; |
507 | fill_rate.full = rfixed_div(wm0.sclk, a); | 507 | fill_rate.full = dfixed_div(wm0.sclk, a); |
508 | if (wm0.consumption_rate.full > fill_rate.full) { | 508 | if (wm0.consumption_rate.full > fill_rate.full) { |
509 | b.full = wm0.consumption_rate.full - fill_rate.full; | 509 | b.full = wm0.consumption_rate.full - fill_rate.full; |
510 | b.full = rfixed_mul(b, wm0.active_time); | 510 | b.full = dfixed_mul(b, wm0.active_time); |
511 | a.full = rfixed_mul(wm0.worst_case_latency, | 511 | a.full = dfixed_mul(wm0.worst_case_latency, |
512 | wm0.consumption_rate); | 512 | wm0.consumption_rate); |
513 | a.full = a.full + b.full; | 513 | a.full = a.full + b.full; |
514 | b.full = rfixed_const(16 * 1000); | 514 | b.full = dfixed_const(16 * 1000); |
515 | priority_mark02.full = rfixed_div(a, b); | 515 | priority_mark02.full = dfixed_div(a, b); |
516 | } else { | 516 | } else { |
517 | a.full = rfixed_mul(wm0.worst_case_latency, | 517 | a.full = dfixed_mul(wm0.worst_case_latency, |
518 | wm0.consumption_rate); | 518 | wm0.consumption_rate); |
519 | b.full = rfixed_const(16 * 1000); | 519 | b.full = dfixed_const(16 * 1000); |
520 | priority_mark02.full = rfixed_div(a, b); | 520 | priority_mark02.full = dfixed_div(a, b); |
521 | } | 521 | } |
522 | if (wm0.priority_mark.full > priority_mark02.full) | 522 | if (wm0.priority_mark.full > priority_mark02.full) |
523 | priority_mark02.full = wm0.priority_mark.full; | 523 | priority_mark02.full = wm0.priority_mark.full; |
524 | if (rfixed_trunc(priority_mark02) < 0) | 524 | if (dfixed_trunc(priority_mark02) < 0) |
525 | priority_mark02.full = 0; | 525 | priority_mark02.full = 0; |
526 | if (wm0.priority_mark_max.full > priority_mark02.full) | 526 | if (wm0.priority_mark_max.full > priority_mark02.full) |
527 | priority_mark02.full = wm0.priority_mark_max.full; | 527 | priority_mark02.full = wm0.priority_mark_max.full; |
528 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 528 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
529 | if (rdev->disp_priority == 2) | 529 | if (rdev->disp_priority == 2) |
530 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); | 530 | d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); |
531 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | 531 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
@@ -535,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
535 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | 535 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, |
536 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); | 536 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); |
537 | } else { | 537 | } else { |
538 | if (rfixed_trunc(wm1.dbpp) > 64) | 538 | if (dfixed_trunc(wm1.dbpp) > 64) |
539 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | 539 | a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); |
540 | else | 540 | else |
541 | a.full = wm1.num_line_pair.full; | 541 | a.full = wm1.num_line_pair.full; |
542 | fill_rate.full = rfixed_div(wm1.sclk, a); | 542 | fill_rate.full = dfixed_div(wm1.sclk, a); |
543 | if (wm1.consumption_rate.full > fill_rate.full) { | 543 | if (wm1.consumption_rate.full > fill_rate.full) { |
544 | b.full = wm1.consumption_rate.full - fill_rate.full; | 544 | b.full = wm1.consumption_rate.full - fill_rate.full; |
545 | b.full = rfixed_mul(b, wm1.active_time); | 545 | b.full = dfixed_mul(b, wm1.active_time); |
546 | a.full = rfixed_mul(wm1.worst_case_latency, | 546 | a.full = dfixed_mul(wm1.worst_case_latency, |
547 | wm1.consumption_rate); | 547 | wm1.consumption_rate); |
548 | a.full = a.full + b.full; | 548 | a.full = a.full + b.full; |
549 | b.full = rfixed_const(16 * 1000); | 549 | b.full = dfixed_const(16 * 1000); |
550 | priority_mark12.full = rfixed_div(a, b); | 550 | priority_mark12.full = dfixed_div(a, b); |
551 | } else { | 551 | } else { |
552 | a.full = rfixed_mul(wm1.worst_case_latency, | 552 | a.full = dfixed_mul(wm1.worst_case_latency, |
553 | wm1.consumption_rate); | 553 | wm1.consumption_rate); |
554 | b.full = rfixed_const(16 * 1000); | 554 | b.full = dfixed_const(16 * 1000); |
555 | priority_mark12.full = rfixed_div(a, b); | 555 | priority_mark12.full = dfixed_div(a, b); |
556 | } | 556 | } |
557 | if (wm1.priority_mark.full > priority_mark12.full) | 557 | if (wm1.priority_mark.full > priority_mark12.full) |
558 | priority_mark12.full = wm1.priority_mark.full; | 558 | priority_mark12.full = wm1.priority_mark.full; |
559 | if (rfixed_trunc(priority_mark12) < 0) | 559 | if (dfixed_trunc(priority_mark12) < 0) |
560 | priority_mark12.full = 0; | 560 | priority_mark12.full = 0; |
561 | if (wm1.priority_mark_max.full > priority_mark12.full) | 561 | if (wm1.priority_mark_max.full > priority_mark12.full) |
562 | priority_mark12.full = wm1.priority_mark_max.full; | 562 | priority_mark12.full = wm1.priority_mark_max.full; |
563 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 563 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
564 | if (rdev->disp_priority == 2) | 564 | if (rdev->disp_priority == 2) |
565 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); | 565 | d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); |
566 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, | 566 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index c513473d72ae..2009f4b20c28 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -795,20 +795,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
795 | return; | 795 | return; |
796 | } | 796 | } |
797 | 797 | ||
798 | if (crtc->vsc.full > rfixed_const(2)) | 798 | if (crtc->vsc.full > dfixed_const(2)) |
799 | wm->num_line_pair.full = rfixed_const(2); | 799 | wm->num_line_pair.full = dfixed_const(2); |
800 | else | 800 | else |
801 | wm->num_line_pair.full = rfixed_const(1); | 801 | wm->num_line_pair.full = dfixed_const(1); |
802 | 802 | ||
803 | b.full = rfixed_const(mode->crtc_hdisplay); | 803 | b.full = dfixed_const(mode->crtc_hdisplay); |
804 | c.full = rfixed_const(256); | 804 | c.full = dfixed_const(256); |
805 | a.full = rfixed_div(b, c); | 805 | a.full = dfixed_div(b, c); |
806 | request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); | 806 | request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); |
807 | request_fifo_depth.full = rfixed_ceil(request_fifo_depth); | 807 | request_fifo_depth.full = dfixed_ceil(request_fifo_depth); |
808 | if (a.full < rfixed_const(4)) { | 808 | if (a.full < dfixed_const(4)) { |
809 | wm->lb_request_fifo_depth = 4; | 809 | wm->lb_request_fifo_depth = 4; |
810 | } else { | 810 | } else { |
811 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | 811 | wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); |
812 | } | 812 | } |
813 | 813 | ||
814 | /* Determine consumption rate | 814 | /* Determine consumption rate |
@@ -817,23 +817,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
817 | * vsc = vertical scaling ratio, defined as source/destination | 817 | * vsc = vertical scaling ratio, defined as source/destination |
818 | * hsc = horizontal scaling ration, defined as source/destination | 818 | * hsc = horizontal scaling ration, defined as source/destination |
819 | */ | 819 | */ |
820 | a.full = rfixed_const(mode->clock); | 820 | a.full = dfixed_const(mode->clock); |
821 | b.full = rfixed_const(1000); | 821 | b.full = dfixed_const(1000); |
822 | a.full = rfixed_div(a, b); | 822 | a.full = dfixed_div(a, b); |
823 | pclk.full = rfixed_div(b, a); | 823 | pclk.full = dfixed_div(b, a); |
824 | if (crtc->rmx_type != RMX_OFF) { | 824 | if (crtc->rmx_type != RMX_OFF) { |
825 | b.full = rfixed_const(2); | 825 | b.full = dfixed_const(2); |
826 | if (crtc->vsc.full > b.full) | 826 | if (crtc->vsc.full > b.full) |
827 | b.full = crtc->vsc.full; | 827 | b.full = crtc->vsc.full; |
828 | b.full = rfixed_mul(b, crtc->hsc); | 828 | b.full = dfixed_mul(b, crtc->hsc); |
829 | c.full = rfixed_const(2); | 829 | c.full = dfixed_const(2); |
830 | b.full = rfixed_div(b, c); | 830 | b.full = dfixed_div(b, c); |
831 | consumption_time.full = rfixed_div(pclk, b); | 831 | consumption_time.full = dfixed_div(pclk, b); |
832 | } else { | 832 | } else { |
833 | consumption_time.full = pclk.full; | 833 | consumption_time.full = pclk.full; |
834 | } | 834 | } |
835 | a.full = rfixed_const(1); | 835 | a.full = dfixed_const(1); |
836 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | 836 | wm->consumption_rate.full = dfixed_div(a, consumption_time); |
837 | 837 | ||
838 | 838 | ||
839 | /* Determine line time | 839 | /* Determine line time |
@@ -841,27 +841,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
841 | * LineTime = total number of horizontal pixels | 841 | * LineTime = total number of horizontal pixels |
842 | * pclk = pixel clock period(ns) | 842 | * pclk = pixel clock period(ns) |
843 | */ | 843 | */ |
844 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 844 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
845 | line_time.full = rfixed_mul(a, pclk); | 845 | line_time.full = dfixed_mul(a, pclk); |
846 | 846 | ||
847 | /* Determine active time | 847 | /* Determine active time |
848 | * ActiveTime = time of active region of display within one line, | 848 | * ActiveTime = time of active region of display within one line, |
849 | * hactive = total number of horizontal active pixels | 849 | * hactive = total number of horizontal active pixels |
850 | * htotal = total number of horizontal pixels | 850 | * htotal = total number of horizontal pixels |
851 | */ | 851 | */ |
852 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | 852 | a.full = dfixed_const(crtc->base.mode.crtc_htotal); |
853 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 853 | b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
854 | wm->active_time.full = rfixed_mul(line_time, b); | 854 | wm->active_time.full = dfixed_mul(line_time, b); |
855 | wm->active_time.full = rfixed_div(wm->active_time, a); | 855 | wm->active_time.full = dfixed_div(wm->active_time, a); |
856 | 856 | ||
857 | /* Determine chunk time | 857 | /* Determine chunk time |
858 | * ChunkTime = the time it takes the DCP to send one chunk of data | 858 | * ChunkTime = the time it takes the DCP to send one chunk of data |
859 | * to the LB which consists of pipeline delay and inter chunk gap | 859 | * to the LB which consists of pipeline delay and inter chunk gap |
860 | * sclk = system clock(Mhz) | 860 | * sclk = system clock(Mhz) |
861 | */ | 861 | */ |
862 | a.full = rfixed_const(600 * 1000); | 862 | a.full = dfixed_const(600 * 1000); |
863 | chunk_time.full = rfixed_div(a, rdev->pm.sclk); | 863 | chunk_time.full = dfixed_div(a, rdev->pm.sclk); |
864 | read_delay_latency.full = rfixed_const(1000); | 864 | read_delay_latency.full = dfixed_const(1000); |
865 | 865 | ||
866 | /* Determine the worst case latency | 866 | /* Determine the worst case latency |
867 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | 867 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
@@ -871,9 +871,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
871 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | 871 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB |
872 | * which consists of pipeline delay and inter chunk gap | 872 | * which consists of pipeline delay and inter chunk gap |
873 | */ | 873 | */ |
874 | if (rfixed_trunc(wm->num_line_pair) > 1) { | 874 | if (dfixed_trunc(wm->num_line_pair) > 1) { |
875 | a.full = rfixed_const(3); | 875 | a.full = dfixed_const(3); |
876 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | 876 | wm->worst_case_latency.full = dfixed_mul(a, chunk_time); |
877 | wm->worst_case_latency.full += read_delay_latency.full; | 877 | wm->worst_case_latency.full += read_delay_latency.full; |
878 | } else { | 878 | } else { |
879 | wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; | 879 | wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; |
@@ -889,34 +889,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
889 | * of data to the LB which consists of | 889 | * of data to the LB which consists of |
890 | * pipeline delay and inter chunk gap | 890 | * pipeline delay and inter chunk gap |
891 | */ | 891 | */ |
892 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | 892 | if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { |
893 | tolerable_latency.full = line_time.full; | 893 | tolerable_latency.full = line_time.full; |
894 | } else { | 894 | } else { |
895 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | 895 | tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); |
896 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | 896 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
897 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | 897 | tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); |
898 | tolerable_latency.full = line_time.full - tolerable_latency.full; | 898 | tolerable_latency.full = line_time.full - tolerable_latency.full; |
899 | } | 899 | } |
900 | /* We assume worst case 32bits (4 bytes) */ | 900 | /* We assume worst case 32bits (4 bytes) */ |
901 | wm->dbpp.full = rfixed_const(2 * 16); | 901 | wm->dbpp.full = dfixed_const(2 * 16); |
902 | 902 | ||
903 | /* Determine the maximum priority mark | 903 | /* Determine the maximum priority mark |
904 | * width = viewport width in pixels | 904 | * width = viewport width in pixels |
905 | */ | 905 | */ |
906 | a.full = rfixed_const(16); | 906 | a.full = dfixed_const(16); |
907 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | 907 | wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); |
908 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | 908 | wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); |
909 | wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); | 909 | wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); |
910 | 910 | ||
911 | /* Determine estimated width */ | 911 | /* Determine estimated width */ |
912 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | 912 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
913 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | 913 | estimated_width.full = dfixed_div(estimated_width, consumption_time); |
914 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | 914 | if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
915 | wm->priority_mark.full = wm->priority_mark_max.full; | 915 | wm->priority_mark.full = wm->priority_mark_max.full; |
916 | } else { | 916 | } else { |
917 | a.full = rfixed_const(16); | 917 | a.full = dfixed_const(16); |
918 | wm->priority_mark.full = rfixed_div(estimated_width, a); | 918 | wm->priority_mark.full = dfixed_div(estimated_width, a); |
919 | wm->priority_mark.full = rfixed_ceil(wm->priority_mark); | 919 | wm->priority_mark.full = dfixed_ceil(wm->priority_mark); |
920 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | 920 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
921 | } | 921 | } |
922 | } | 922 | } |
@@ -945,58 +945,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
945 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | 945 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); |
946 | 946 | ||
947 | if (mode0 && mode1) { | 947 | if (mode0 && mode1) { |
948 | if (rfixed_trunc(wm0.dbpp) > 64) | 948 | if (dfixed_trunc(wm0.dbpp) > 64) |
949 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | 949 | a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); |
950 | else | 950 | else |
951 | a.full = wm0.num_line_pair.full; | 951 | a.full = wm0.num_line_pair.full; |
952 | if (rfixed_trunc(wm1.dbpp) > 64) | 952 | if (dfixed_trunc(wm1.dbpp) > 64) |
953 | b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | 953 | b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); |
954 | else | 954 | else |
955 | b.full = wm1.num_line_pair.full; | 955 | b.full = wm1.num_line_pair.full; |
956 | a.full += b.full; | 956 | a.full += b.full; |
957 | fill_rate.full = rfixed_div(wm0.sclk, a); | 957 | fill_rate.full = dfixed_div(wm0.sclk, a); |
958 | if (wm0.consumption_rate.full > fill_rate.full) { | 958 | if (wm0.consumption_rate.full > fill_rate.full) { |
959 | b.full = wm0.consumption_rate.full - fill_rate.full; | 959 | b.full = wm0.consumption_rate.full - fill_rate.full; |
960 | b.full = rfixed_mul(b, wm0.active_time); | 960 | b.full = dfixed_mul(b, wm0.active_time); |
961 | a.full = rfixed_const(16); | 961 | a.full = dfixed_const(16); |
962 | b.full = rfixed_div(b, a); | 962 | b.full = dfixed_div(b, a); |
963 | a.full = rfixed_mul(wm0.worst_case_latency, | 963 | a.full = dfixed_mul(wm0.worst_case_latency, |
964 | wm0.consumption_rate); | 964 | wm0.consumption_rate); |
965 | priority_mark02.full = a.full + b.full; | 965 | priority_mark02.full = a.full + b.full; |
966 | } else { | 966 | } else { |
967 | a.full = rfixed_mul(wm0.worst_case_latency, | 967 | a.full = dfixed_mul(wm0.worst_case_latency, |
968 | wm0.consumption_rate); | 968 | wm0.consumption_rate); |
969 | b.full = rfixed_const(16 * 1000); | 969 | b.full = dfixed_const(16 * 1000); |
970 | priority_mark02.full = rfixed_div(a, b); | 970 | priority_mark02.full = dfixed_div(a, b); |
971 | } | 971 | } |
972 | if (wm1.consumption_rate.full > fill_rate.full) { | 972 | if (wm1.consumption_rate.full > fill_rate.full) { |
973 | b.full = wm1.consumption_rate.full - fill_rate.full; | 973 | b.full = wm1.consumption_rate.full - fill_rate.full; |
974 | b.full = rfixed_mul(b, wm1.active_time); | 974 | b.full = dfixed_mul(b, wm1.active_time); |
975 | a.full = rfixed_const(16); | 975 | a.full = dfixed_const(16); |
976 | b.full = rfixed_div(b, a); | 976 | b.full = dfixed_div(b, a); |
977 | a.full = rfixed_mul(wm1.worst_case_latency, | 977 | a.full = dfixed_mul(wm1.worst_case_latency, |
978 | wm1.consumption_rate); | 978 | wm1.consumption_rate); |
979 | priority_mark12.full = a.full + b.full; | 979 | priority_mark12.full = a.full + b.full; |
980 | } else { | 980 | } else { |
981 | a.full = rfixed_mul(wm1.worst_case_latency, | 981 | a.full = dfixed_mul(wm1.worst_case_latency, |
982 | wm1.consumption_rate); | 982 | wm1.consumption_rate); |
983 | b.full = rfixed_const(16 * 1000); | 983 | b.full = dfixed_const(16 * 1000); |
984 | priority_mark12.full = rfixed_div(a, b); | 984 | priority_mark12.full = dfixed_div(a, b); |
985 | } | 985 | } |
986 | if (wm0.priority_mark.full > priority_mark02.full) | 986 | if (wm0.priority_mark.full > priority_mark02.full) |
987 | priority_mark02.full = wm0.priority_mark.full; | 987 | priority_mark02.full = wm0.priority_mark.full; |
988 | if (rfixed_trunc(priority_mark02) < 0) | 988 | if (dfixed_trunc(priority_mark02) < 0) |
989 | priority_mark02.full = 0; | 989 | priority_mark02.full = 0; |
990 | if (wm0.priority_mark_max.full > priority_mark02.full) | 990 | if (wm0.priority_mark_max.full > priority_mark02.full) |
991 | priority_mark02.full = wm0.priority_mark_max.full; | 991 | priority_mark02.full = wm0.priority_mark_max.full; |
992 | if (wm1.priority_mark.full > priority_mark12.full) | 992 | if (wm1.priority_mark.full > priority_mark12.full) |
993 | priority_mark12.full = wm1.priority_mark.full; | 993 | priority_mark12.full = wm1.priority_mark.full; |
994 | if (rfixed_trunc(priority_mark12) < 0) | 994 | if (dfixed_trunc(priority_mark12) < 0) |
995 | priority_mark12.full = 0; | 995 | priority_mark12.full = 0; |
996 | if (wm1.priority_mark_max.full > priority_mark12.full) | 996 | if (wm1.priority_mark_max.full > priority_mark12.full) |
997 | priority_mark12.full = wm1.priority_mark_max.full; | 997 | priority_mark12.full = wm1.priority_mark_max.full; |
998 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 998 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
999 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 999 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
1000 | if (rdev->disp_priority == 2) { | 1000 | if (rdev->disp_priority == 2) { |
1001 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 1001 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
1002 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 1002 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
@@ -1006,32 +1006,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1006 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); | 1006 | WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
1007 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); | 1007 | WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
1008 | } else if (mode0) { | 1008 | } else if (mode0) { |
1009 | if (rfixed_trunc(wm0.dbpp) > 64) | 1009 | if (dfixed_trunc(wm0.dbpp) > 64) |
1010 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | 1010 | a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); |
1011 | else | 1011 | else |
1012 | a.full = wm0.num_line_pair.full; | 1012 | a.full = wm0.num_line_pair.full; |
1013 | fill_rate.full = rfixed_div(wm0.sclk, a); | 1013 | fill_rate.full = dfixed_div(wm0.sclk, a); |
1014 | if (wm0.consumption_rate.full > fill_rate.full) { | 1014 | if (wm0.consumption_rate.full > fill_rate.full) { |
1015 | b.full = wm0.consumption_rate.full - fill_rate.full; | 1015 | b.full = wm0.consumption_rate.full - fill_rate.full; |
1016 | b.full = rfixed_mul(b, wm0.active_time); | 1016 | b.full = dfixed_mul(b, wm0.active_time); |
1017 | a.full = rfixed_const(16); | 1017 | a.full = dfixed_const(16); |
1018 | b.full = rfixed_div(b, a); | 1018 | b.full = dfixed_div(b, a); |
1019 | a.full = rfixed_mul(wm0.worst_case_latency, | 1019 | a.full = dfixed_mul(wm0.worst_case_latency, |
1020 | wm0.consumption_rate); | 1020 | wm0.consumption_rate); |
1021 | priority_mark02.full = a.full + b.full; | 1021 | priority_mark02.full = a.full + b.full; |
1022 | } else { | 1022 | } else { |
1023 | a.full = rfixed_mul(wm0.worst_case_latency, | 1023 | a.full = dfixed_mul(wm0.worst_case_latency, |
1024 | wm0.consumption_rate); | 1024 | wm0.consumption_rate); |
1025 | b.full = rfixed_const(16); | 1025 | b.full = dfixed_const(16); |
1026 | priority_mark02.full = rfixed_div(a, b); | 1026 | priority_mark02.full = dfixed_div(a, b); |
1027 | } | 1027 | } |
1028 | if (wm0.priority_mark.full > priority_mark02.full) | 1028 | if (wm0.priority_mark.full > priority_mark02.full) |
1029 | priority_mark02.full = wm0.priority_mark.full; | 1029 | priority_mark02.full = wm0.priority_mark.full; |
1030 | if (rfixed_trunc(priority_mark02) < 0) | 1030 | if (dfixed_trunc(priority_mark02) < 0) |
1031 | priority_mark02.full = 0; | 1031 | priority_mark02.full = 0; |
1032 | if (wm0.priority_mark_max.full > priority_mark02.full) | 1032 | if (wm0.priority_mark_max.full > priority_mark02.full) |
1033 | priority_mark02.full = wm0.priority_mark_max.full; | 1033 | priority_mark02.full = wm0.priority_mark_max.full; |
1034 | d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); | 1034 | d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
1035 | if (rdev->disp_priority == 2) | 1035 | if (rdev->disp_priority == 2) |
1036 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 1036 | d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
1037 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); | 1037 | WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
@@ -1039,32 +1039,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | |||
1039 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1039 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
1040 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 1040 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
1041 | } else { | 1041 | } else { |
1042 | if (rfixed_trunc(wm1.dbpp) > 64) | 1042 | if (dfixed_trunc(wm1.dbpp) > 64) |
1043 | a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | 1043 | a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); |
1044 | else | 1044 | else |
1045 | a.full = wm1.num_line_pair.full; | 1045 | a.full = wm1.num_line_pair.full; |
1046 | fill_rate.full = rfixed_div(wm1.sclk, a); | 1046 | fill_rate.full = dfixed_div(wm1.sclk, a); |
1047 | if (wm1.consumption_rate.full > fill_rate.full) { | 1047 | if (wm1.consumption_rate.full > fill_rate.full) { |
1048 | b.full = wm1.consumption_rate.full - fill_rate.full; | 1048 | b.full = wm1.consumption_rate.full - fill_rate.full; |
1049 | b.full = rfixed_mul(b, wm1.active_time); | 1049 | b.full = dfixed_mul(b, wm1.active_time); |
1050 | a.full = rfixed_const(16); | 1050 | a.full = dfixed_const(16); |
1051 | b.full = rfixed_div(b, a); | 1051 | b.full = dfixed_div(b, a); |
1052 | a.full = rfixed_mul(wm1.worst_case_latency, | 1052 | a.full = dfixed_mul(wm1.worst_case_latency, |
1053 | wm1.consumption_rate); | 1053 | wm1.consumption_rate); |
1054 | priority_mark12.full = a.full + b.full; | 1054 | priority_mark12.full = a.full + b.full; |
1055 | } else { | 1055 | } else { |
1056 | a.full = rfixed_mul(wm1.worst_case_latency, | 1056 | a.full = dfixed_mul(wm1.worst_case_latency, |
1057 | wm1.consumption_rate); | 1057 | wm1.consumption_rate); |
1058 | b.full = rfixed_const(16 * 1000); | 1058 | b.full = dfixed_const(16 * 1000); |
1059 | priority_mark12.full = rfixed_div(a, b); | 1059 | priority_mark12.full = dfixed_div(a, b); |
1060 | } | 1060 | } |
1061 | if (wm1.priority_mark.full > priority_mark12.full) | 1061 | if (wm1.priority_mark.full > priority_mark12.full) |
1062 | priority_mark12.full = wm1.priority_mark.full; | 1062 | priority_mark12.full = wm1.priority_mark.full; |
1063 | if (rfixed_trunc(priority_mark12) < 0) | 1063 | if (dfixed_trunc(priority_mark12) < 0) |
1064 | priority_mark12.full = 0; | 1064 | priority_mark12.full = 0; |
1065 | if (wm1.priority_mark_max.full > priority_mark12.full) | 1065 | if (wm1.priority_mark_max.full > priority_mark12.full) |
1066 | priority_mark12.full = wm1.priority_mark_max.full; | 1066 | priority_mark12.full = wm1.priority_mark_max.full; |
1067 | d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); | 1067 | d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
1068 | if (rdev->disp_priority == 2) | 1068 | if (rdev->disp_priority == 2) |
1069 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; | 1069 | d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
1070 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 1070 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/include/drm/drm_fixed.h index 3d4d84e078ac..4a08a664ff1f 100644 --- a/drivers/gpu/drm/radeon/radeon_fixed.h +++ b/include/drm/drm_fixed.h | |||
@@ -21,41 +21,41 @@ | |||
21 | * | 21 | * |
22 | * Authors: Dave Airlie | 22 | * Authors: Dave Airlie |
23 | */ | 23 | */ |
24 | #ifndef RADEON_FIXED_H | 24 | #ifndef DRM_FIXED_H |
25 | #define RADEON_FIXED_H | 25 | #define DRM_FIXED_H |
26 | 26 | ||
27 | typedef union rfixed { | 27 | typedef union dfixed { |
28 | u32 full; | 28 | u32 full; |
29 | } fixed20_12; | 29 | } fixed20_12; |
30 | 30 | ||
31 | 31 | ||
32 | #define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ | 32 | #define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ |
33 | #define rfixed_const_half(A) (u32)(((A) << 12) + 2048) | 33 | #define dfixed_const_half(A) (u32)(((A) << 12) + 2048) |
34 | #define rfixed_const_666(A) (u32)(((A) << 12) + 2731) | 34 | #define dfixed_const_666(A) (u32)(((A) << 12) + 2731) |
35 | #define rfixed_const_8(A) (u32)(((A) << 12) + 3277) | 35 | #define dfixed_const_8(A) (u32)(((A) << 12) + 3277) |
36 | #define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) | 36 | #define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) |
37 | #define fixed_init(A) { .full = rfixed_const((A)) } | 37 | #define dfixed_init(A) { .full = dfixed_const((A)) } |
38 | #define fixed_init_half(A) { .full = rfixed_const_half((A)) } | 38 | #define dfixed_init_half(A) { .full = dfixed_const_half((A)) } |
39 | #define rfixed_trunc(A) ((A).full >> 12) | 39 | #define dfixed_trunc(A) ((A).full >> 12) |
40 | 40 | ||
41 | static inline u32 rfixed_floor(fixed20_12 A) | 41 | static inline u32 dfixed_floor(fixed20_12 A) |
42 | { | 42 | { |
43 | u32 non_frac = rfixed_trunc(A); | 43 | u32 non_frac = dfixed_trunc(A); |
44 | 44 | ||
45 | return rfixed_const(non_frac); | 45 | return dfixed_const(non_frac); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline u32 rfixed_ceil(fixed20_12 A) | 48 | static inline u32 dfixed_ceil(fixed20_12 A) |
49 | { | 49 | { |
50 | u32 non_frac = rfixed_trunc(A); | 50 | u32 non_frac = dfixed_trunc(A); |
51 | 51 | ||
52 | if (A.full > rfixed_const(non_frac)) | 52 | if (A.full > dfixed_const(non_frac)) |
53 | return rfixed_const(non_frac + 1); | 53 | return dfixed_const(non_frac + 1); |
54 | else | 54 | else |
55 | return rfixed_const(non_frac); | 55 | return dfixed_const(non_frac); |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) | 58 | static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) |
59 | { | 59 | { |
60 | u64 tmp = ((u64)A.full << 13); | 60 | u64 tmp = ((u64)A.full << 13); |
61 | 61 | ||