aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-05-02 20:09:11 -0400
committerDave Airlie <airlied@redhat.com>2013-05-02 20:09:11 -0400
commit6110948846e9440a60e491a9be67a8a13c646b7e (patch)
tree4fc4f362b6f52aaf748a96830a42a3a7cdcab129
parentf49e7259a4ea3a9ac42fc1c70c86d5e50e800731 (diff)
parent441e76ca83ac604eaf0f046def96d8e3a27eea28 (diff)
Merge branch 'drm-next-3.10-2' of git://people.freedesktop.org/~agd5f/linux into drm-next
Just some fixes that have accumulated over the last couple of weeks and some new PCI ids. * 'drm-next-3.10-2' of git://people.freedesktop.org/~agd5f/linux: drm/radeon: fix handling of v6 power tables drm/radeon: clarify family checks in pm table parsing drm/radeon: consolidate UVD clock programming drm/radeon: fix UPLL_REF_DIV_MASK definition radeon: add bo tracking debugfs drm/radeon: add new richland pci ids drm/radeon: add some new SI PCI ids drm/radeon: fix scratch reg handling for UVD fence drm/radeon: allocate SA bo in the requested domain drm/radeon: fix possible segfault when parsing pm tables drm/radeon: fix endian bugs in atom_allocate_fb_scratch()
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c103
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c137
-rw-r--r--drivers/gpu/drm/radeon/rv770.c110
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c104
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--include/drm/drm_pciids.h5
17 files changed, 284 insertions, 299 deletions
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 46a9c3772850..fb441a790f3d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
1394 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1394 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1395 1395
1396 DRM_DEBUG("atom firmware requested %08x %dkb\n", 1396 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1397 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, 1397 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1398 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); 1398 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1399 1399
1400 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; 1400 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1401 } 1401 }
1402 ctx->scratch_size_bytes = 0; 1402 ctx->scratch_size_bytes = 0;
1403 if (usage_bytes == 0) 1403 if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1531f167d152..105bafb6c29d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -989,62 +989,10 @@ done:
989 return r; 989 return r;
990} 990}
991 991
992static int evergreen_uvd_calc_post_div(unsigned target_freq,
993 unsigned vco_freq,
994 unsigned *div)
995{
996 /* target larger than vco frequency ? */
997 if (vco_freq < target_freq)
998 return -1; /* forget it */
999
1000 /* Fclk = Fvco / PDIV */
1001 *div = vco_freq / target_freq;
1002
1003 /* we alway need a frequency less than or equal the target */
1004 if ((vco_freq / *div) > target_freq)
1005 *div += 1;
1006
1007 /* dividers above 5 must be even */
1008 if (*div > 5 && *div % 2)
1009 *div += 1;
1010
1011 /* out of range ? */
1012 if (*div >= 128)
1013 return -1; /* forget it */
1014
1015 return vco_freq / *div;
1016}
1017
1018static int evergreen_uvd_send_upll_ctlreq(struct radeon_device *rdev)
1019{
1020 unsigned i;
1021
1022 /* assert UPLL_CTLREQ */
1023 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
1024
1025 /* wait for CTLACK and CTLACK2 to get asserted */
1026 for (i = 0; i < 100; ++i) {
1027 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
1028 if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask)
1029 break;
1030 mdelay(10);
1031 }
1032 if (i == 100)
1033 return -ETIMEDOUT;
1034
1035 /* deassert UPLL_CTLREQ */
1036 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
1037
1038 return 0;
1039}
1040
1041int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 992int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1042{ 993{
1043 /* start off with something large */ 994 /* start off with something large */
1044 int optimal_diff_score = 0x7FFFFFF; 995 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
1045 unsigned optimal_fb_div = 0, optimal_vclk_div = 0;
1046 unsigned optimal_dclk_div = 0, optimal_vco_freq = 0;
1047 unsigned vco_freq;
1048 int r; 996 int r;
1049 997
1050 /* bypass vclk and dclk with bclk */ 998 /* bypass vclk and dclk with bclk */
@@ -1061,40 +1009,11 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1061 return 0; 1009 return 0;
1062 } 1010 }
1063 1011
1064 /* loop through vco from low to high */ 1012 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
1065 for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) { 1013 16384, 0x03FFFFFF, 0, 128, 5,
1066 unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384; 1014 &fb_div, &vclk_div, &dclk_div);
1067 int calc_clk, diff_score, diff_vclk, diff_dclk; 1015 if (r)
1068 unsigned vclk_div, dclk_div; 1016 return r;
1069
1070 /* fb div out of range ? */
1071 if (fb_div > 0x03FFFFFF)
1072 break; /* it can oly get worse */
1073
1074 /* calc vclk with current vco freq. */
1075 calc_clk = evergreen_uvd_calc_post_div(vclk, vco_freq, &vclk_div);
1076 if (calc_clk == -1)
1077 break; /* vco is too big, it has to stop. */
1078 diff_vclk = vclk - calc_clk;
1079
1080 /* calc dclk with current vco freq. */
1081 calc_clk = evergreen_uvd_calc_post_div(dclk, vco_freq, &dclk_div);
1082 if (calc_clk == -1)
1083 break; /* vco is too big, it has to stop. */
1084 diff_dclk = dclk - calc_clk;
1085
1086 /* determine if this vco setting is better than current optimal settings */
1087 diff_score = abs(diff_vclk) + abs(diff_dclk);
1088 if (diff_score < optimal_diff_score) {
1089 optimal_fb_div = fb_div;
1090 optimal_vclk_div = vclk_div;
1091 optimal_dclk_div = dclk_div;
1092 optimal_vco_freq = vco_freq;
1093 optimal_diff_score = diff_score;
1094 if (optimal_diff_score == 0)
1095 break; /* it can't get better than this */
1096 }
1097 }
1098 1017
1099 /* set VCO_MODE to 1 */ 1018 /* set VCO_MODE to 1 */
1100 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); 1019 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
@@ -1108,7 +1027,7 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1108 1027
1109 mdelay(1); 1028 mdelay(1);
1110 1029
1111 r = evergreen_uvd_send_upll_ctlreq(rdev); 1030 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1112 if (r) 1031 if (r)
1113 return r; 1032 return r;
1114 1033
@@ -1119,19 +1038,19 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1119 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); 1038 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1120 1039
1121 /* set feedback divider */ 1040 /* set feedback divider */
1122 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK); 1041 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
1123 1042
1124 /* set ref divider to 0 */ 1043 /* set ref divider to 0 */
1125 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); 1044 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
1126 1045
1127 if (optimal_vco_freq < 187500) 1046 if (fb_div < 307200)
1128 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); 1047 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
1129 else 1048 else
1130 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9); 1049 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
1131 1050
1132 /* set PDIV_A and PDIV_B */ 1051 /* set PDIV_A and PDIV_B */
1133 WREG32_P(CG_UPLL_FUNC_CNTL_2, 1052 WREG32_P(CG_UPLL_FUNC_CNTL_2,
1134 UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div), 1053 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
1135 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); 1054 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
1136 1055
1137 /* give the PLL some time to settle */ 1056 /* give the PLL some time to settle */
@@ -1145,7 +1064,7 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
1145 /* switch from bypass mode to normal mode */ 1064 /* switch from bypass mode to normal mode */
1146 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); 1065 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
1147 1066
1148 r = evergreen_uvd_send_upll_ctlreq(rdev); 1067 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
1149 if (r) 1068 if (r)
1150 return r; 1069 return r;
1151 1070
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index d9a005431087..75c05631146d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -59,7 +59,7 @@
59# define UPLL_SLEEP_MASK 0x00000002 59# define UPLL_SLEEP_MASK 0x00000002
60# define UPLL_BYPASS_EN_MASK 0x00000004 60# define UPLL_BYPASS_EN_MASK 0x00000004
61# define UPLL_CTLREQ_MASK 0x00000008 61# define UPLL_CTLREQ_MASK 0x00000008
62# define UPLL_REF_DIV_MASK 0x001F0000 62# define UPLL_REF_DIV_MASK 0x003F0000
63# define UPLL_VCO_MODE_MASK 0x00000200 63# define UPLL_VCO_MODE_MASK 0x00000200
64# define UPLL_CTLACK_MASK 0x40000000 64# define UPLL_CTLACK_MASK 0x40000000
65# define UPLL_CTLACK2_MASK 0x80000000 65# define UPLL_CTLACK2_MASK 0x80000000
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7436b91699d0..7969c0c8ec20 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -749,7 +749,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
749 (rdev->pdev->device == 0x990F) || 749 (rdev->pdev->device == 0x990F) ||
750 (rdev->pdev->device == 0x9910) || 750 (rdev->pdev->device == 0x9910) ||
751 (rdev->pdev->device == 0x9917) || 751 (rdev->pdev->device == 0x9917) ||
752 (rdev->pdev->device == 0x9999)) { 752 (rdev->pdev->device == 0x9999) ||
753 (rdev->pdev->device == 0x999C)) {
753 rdev->config.cayman.max_simds_per_se = 6; 754 rdev->config.cayman.max_simds_per_se = 6;
754 rdev->config.cayman.max_backends_per_se = 2; 755 rdev->config.cayman.max_backends_per_se = 2;
755 } else if ((rdev->pdev->device == 0x9903) || 756 } else if ((rdev->pdev->device == 0x9903) ||
@@ -758,7 +759,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
758 (rdev->pdev->device == 0x990D) || 759 (rdev->pdev->device == 0x990D) ||
759 (rdev->pdev->device == 0x990E) || 760 (rdev->pdev->device == 0x990E) ||
760 (rdev->pdev->device == 0x9913) || 761 (rdev->pdev->device == 0x9913) ||
761 (rdev->pdev->device == 0x9918)) { 762 (rdev->pdev->device == 0x9918) ||
763 (rdev->pdev->device == 0x999D)) {
762 rdev->config.cayman.max_simds_per_se = 4; 764 rdev->config.cayman.max_simds_per_se = 4;
763 rdev->config.cayman.max_backends_per_se = 2; 765 rdev->config.cayman.max_backends_per_se = 2;
764 } else if ((rdev->pdev->device == 0x9919) || 766 } else if ((rdev->pdev->device == 0x9919) ||
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 6105b25b18c3..acb146c06973 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1208,6 +1208,10 @@
1208 1208
1209#define UVD_CONTEXT_ID 0xf6f4 1209#define UVD_CONTEXT_ID 0xf6f4
1210 1210
1211# define UPLL_CTLREQ_MASK 0x00000008
1212# define UPLL_CTLACK_MASK 0x40000000
1213# define UPLL_CTLACK2_MASK 0x80000000
1214
1211/* 1215/*
1212 * PM4 1216 * PM4
1213 */ 1217 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d6c8cbaa8693..1442ce765d48 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -358,7 +358,8 @@ struct radeon_bo {
358 struct radeon_device *rdev; 358 struct radeon_device *rdev;
359 struct drm_gem_object gem_base; 359 struct drm_gem_object gem_base;
360 360
361 struct ttm_bo_kmap_obj dma_buf_vmap; 361 struct ttm_bo_kmap_obj dma_buf_vmap;
362 pid_t pid;
362}; 363};
363#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 364#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
364 365
@@ -372,6 +373,8 @@ struct radeon_bo_list {
372 u32 tiling_flags; 373 u32 tiling_flags;
373}; 374};
374 375
376int radeon_gem_debugfs_init(struct radeon_device *rdev);
377
375/* sub-allocation manager, it has to be protected by another lock. 378/* sub-allocation manager, it has to be protected by another lock.
376 * By conception this is an helper for other part of the driver 379 * By conception this is an helper for other part of the driver
377 * like the indirect buffer or semaphore, which both have their 380 * like the indirect buffer or semaphore, which both have their
@@ -1159,6 +1162,17 @@ void radeon_uvd_free_handles(struct radeon_device *rdev,
1159 struct drm_file *filp); 1162 struct drm_file *filp);
1160int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); 1163int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1161void radeon_uvd_note_usage(struct radeon_device *rdev); 1164void radeon_uvd_note_usage(struct radeon_device *rdev);
1165int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1166 unsigned vclk, unsigned dclk,
1167 unsigned vco_min, unsigned vco_max,
1168 unsigned fb_factor, unsigned fb_mask,
1169 unsigned pd_min, unsigned pd_max,
1170 unsigned pd_even,
1171 unsigned *optimal_fb_div,
1172 unsigned *optimal_vclk_div,
1173 unsigned *optimal_dclk_div);
1174int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1175 unsigned cg_upll_func_cntl);
1162 1176
1163struct r600_audio { 1177struct r600_audio {
1164 int channels; 1178 int channels;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 0dd87c0e0fac..dea6f63c9724 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2028 num_modes = power_info->info.ucNumOfPowerModeEntries; 2028 num_modes = power_info->info.ucNumOfPowerModeEntries;
2029 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 2029 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
2030 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 2030 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
2031 if (num_modes == 0)
2032 return state_index;
2031 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); 2033 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
2032 if (!rdev->pm.power_state) 2034 if (!rdev->pm.power_state)
2033 return state_index; 2035 return state_index;
@@ -2307,7 +2309,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2307 rdev->pm.default_power_state_index = state_index; 2309 rdev->pm.default_power_state_index = state_index;
2308 rdev->pm.power_state[state_index].default_clock_mode = 2310 rdev->pm.power_state[state_index].default_clock_mode =
2309 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2311 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2310 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 2312 if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
2311 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2313 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2312 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2314 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2313 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2315 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2345,7 +2347,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2345 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; 2347 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
2346 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; 2348 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2347 } 2349 }
2348 } else if (ASIC_IS_DCE6(rdev)) { 2350 } else if (rdev->family >= CHIP_TAHITI) {
2349 sclk = le16_to_cpu(clock_info->si.usEngineClockLow); 2351 sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
2350 sclk |= clock_info->si.ucEngineClockHigh << 16; 2352 sclk |= clock_info->si.ucEngineClockHigh << 16;
2351 mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); 2353 mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
@@ -2358,7 +2360,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2358 le16_to_cpu(clock_info->si.usVDDC); 2360 le16_to_cpu(clock_info->si.usVDDC);
2359 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = 2361 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
2360 le16_to_cpu(clock_info->si.usVDDCI); 2362 le16_to_cpu(clock_info->si.usVDDCI);
2361 } else if (ASIC_IS_DCE4(rdev)) { 2363 } else if (rdev->family >= CHIP_CEDAR) {
2362 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); 2364 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
2363 sclk |= clock_info->evergreen.ucEngineClockHigh << 16; 2365 sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2364 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); 2366 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2432 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2434 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2433 2435
2434 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2436 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2437 if (power_info->pplib.ucNumStates == 0)
2438 return state_index;
2435 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2439 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2436 power_info->pplib.ucNumStates, GFP_KERNEL); 2440 power_info->pplib.ucNumStates, GFP_KERNEL);
2437 if (!rdev->pm.power_state) 2441 if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2514 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2518 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2515 u16 data_offset; 2519 u16 data_offset;
2516 u8 frev, crev; 2520 u8 frev, crev;
2521 u8 *power_state_offset;
2517 2522
2518 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2523 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2519 &frev, &crev, &data_offset)) 2524 &frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2530 non_clock_info_array = (struct _NonClockInfoArray *) 2535 non_clock_info_array = (struct _NonClockInfoArray *)
2531 (mode_info->atom_context->bios + data_offset + 2536 (mode_info->atom_context->bios + data_offset +
2532 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2537 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2538 if (state_array->ucNumEntries == 0)
2539 return state_index;
2533 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2540 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2534 state_array->ucNumEntries, GFP_KERNEL); 2541 state_array->ucNumEntries, GFP_KERNEL);
2535 if (!rdev->pm.power_state) 2542 if (!rdev->pm.power_state)
2536 return state_index; 2543 return state_index;
2544 power_state_offset = (u8 *)state_array->states;
2537 for (i = 0; i < state_array->ucNumEntries; i++) { 2545 for (i = 0; i < state_array->ucNumEntries; i++) {
2538 mode_index = 0; 2546 mode_index = 0;
2539 power_state = (union pplib_power_state *)&state_array->states[i]; 2547 power_state = (union pplib_power_state *)power_state_offset;
2540 /* XXX this might be an inagua bug... */ 2548 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2541 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2542 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2549 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2543 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2550 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2544 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * 2551 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2550 if (power_state->v2.ucNumDPMLevels) { 2557 if (power_state->v2.ucNumDPMLevels) {
2551 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2558 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2552 clock_array_index = power_state->v2.clockInfoIndex[j]; 2559 clock_array_index = power_state->v2.clockInfoIndex[j];
2553 /* XXX this might be an inagua bug... */
2554 if (clock_array_index >= clock_info_array->ucNumEntries)
2555 continue;
2556 clock_info = (union pplib_clock_info *) 2560 clock_info = (union pplib_clock_info *)
2557 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2561 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2558 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2562 valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2574 non_clock_info); 2578 non_clock_info);
2575 state_index++; 2579 state_index++;
2576 } 2580 }
2581 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2577 } 2582 }
2578 /* if multiple clock modes, mark the lowest as no display */ 2583 /* if multiple clock modes, mark the lowest as no display */
2579 for (i = 0; i < state_index; i++) { 2584 for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2620 default: 2625 default:
2621 break; 2626 break;
2622 } 2627 }
2623 } else { 2628 }
2629
2630 if (state_index == 0) {
2624 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2631 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2625 if (rdev->pm.power_state) { 2632 if (rdev->pm.power_state) {
2626 rdev->pm.power_state[0].clock_info = 2633 rdev->pm.power_state[0].clock_info =
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 237b7a7549e6..a8f608903989 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1178,6 +1178,11 @@ int radeon_device_init(struct radeon_device *rdev,
1178 if (r) 1178 if (r)
1179 DRM_ERROR("ib ring test failed (%d).\n", r); 1179 DRM_ERROR("ib ring test failed (%d).\n", r);
1180 1180
1181 r = radeon_gem_debugfs_init(rdev);
1182 if (r) {
1183 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1184 }
1185
1181 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1186 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1182 /* Acceleration not working on AGP card try again 1187 /* Acceleration not working on AGP card try again
1183 * with fallback to PCI or PCIE GART 1188 * with fallback to PCI or PCIE GART
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 1a699cefaac7..5b937dfe6f65 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -767,8 +767,8 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
767 767
768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
770 rdev->fence_drv[ring].scratch_reg = 0;
770 if (ring != R600_RING_TYPE_UVD_INDEX) { 771 if (ring != R600_RING_TYPE_UVD_INDEX) {
771 rdev->fence_drv[ring].scratch_reg = 0;
772 index = R600_WB_EVENT_OFFSET + ring * 4; 772 index = R600_WB_EVENT_OFFSET + ring * 4;
773 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 773 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
774 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + 774 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe5c1f6b7957..aa796031ab65 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -84,6 +84,7 @@ retry:
84 return r; 84 return r;
85 } 85 }
86 *obj = &robj->gem_base; 86 *obj = &robj->gem_base;
87 robj->pid = task_pid_nr(current);
87 88
88 mutex_lock(&rdev->gem.mutex); 89 mutex_lock(&rdev->gem.mutex);
89 list_add_tail(&robj->list, &rdev->gem.objects); 90 list_add_tail(&robj->list, &rdev->gem.objects);
@@ -575,3 +576,52 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
575{ 576{
576 return drm_gem_handle_delete(file_priv, handle); 577 return drm_gem_handle_delete(file_priv, handle);
577} 578}
579
580#if defined(CONFIG_DEBUG_FS)
581static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
582{
583 struct drm_info_node *node = (struct drm_info_node *)m->private;
584 struct drm_device *dev = node->minor->dev;
585 struct radeon_device *rdev = dev->dev_private;
586 struct radeon_bo *rbo;
587 unsigned i = 0;
588
589 mutex_lock(&rdev->gem.mutex);
590 list_for_each_entry(rbo, &rdev->gem.objects, list) {
591 unsigned domain;
592 const char *placement;
593
594 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
595 switch (domain) {
596 case RADEON_GEM_DOMAIN_VRAM:
597 placement = "VRAM";
598 break;
599 case RADEON_GEM_DOMAIN_GTT:
600 placement = " GTT";
601 break;
602 case RADEON_GEM_DOMAIN_CPU:
603 default:
604 placement = " CPU";
605 break;
606 }
607 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
608 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
609 placement, (unsigned long)rbo->pid);
610 i++;
611 }
612 mutex_unlock(&rdev->gem.mutex);
613 return 0;
614}
615
616static struct drm_info_list radeon_debugfs_gem_list[] = {
617 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
618};
619#endif
620
621int radeon_gem_debugfs_init(struct radeon_device *rdev)
622{
623#if defined(CONFIG_DEBUG_FS)
624 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
625#endif
626 return 0;
627}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index cb800995d4f9..0abe5a9431bb 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -64,7 +64,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
64 } 64 }
65 65
66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, 66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
67 RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo); 67 domain, NULL, &sa_manager->bo);
68 if (r) { 68 if (r) {
69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
70 return r; 70 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0312a7f4d768..906e5c0ca3b9 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -692,3 +692,140 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
692 if (set_clocks) 692 if (set_clocks)
693 radeon_set_uvd_clocks(rdev, 53300, 40000); 693 radeon_set_uvd_clocks(rdev, 53300, 40000);
694} 694}
695
696static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
697 unsigned target_freq,
698 unsigned pd_min,
699 unsigned pd_even)
700{
701 unsigned post_div = vco_freq / target_freq;
702
703 /* adjust to post divider minimum value */
704 if (post_div < pd_min)
705 post_div = pd_min;
706
707 /* we alway need a frequency less than or equal the target */
708 if ((vco_freq / post_div) > target_freq)
709 post_div += 1;
710
711 /* post dividers above a certain value must be even */
712 if (post_div > pd_even && post_div % 2)
713 post_div += 1;
714
715 return post_div;
716}
717
718/**
719 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
720 *
721 * @rdev: radeon_device pointer
722 * @vclk: wanted VCLK
723 * @dclk: wanted DCLK
724 * @vco_min: minimum VCO frequency
725 * @vco_max: maximum VCO frequency
726 * @fb_factor: factor to multiply vco freq with
727 * @fb_mask: limit and bitmask for feedback divider
728 * @pd_min: post divider minimum
729 * @pd_max: post divider maximum
730 * @pd_even: post divider must be even above this value
731 * @optimal_fb_div: resulting feedback divider
732 * @optimal_vclk_div: resulting vclk post divider
733 * @optimal_dclk_div: resulting dclk post divider
734 *
735 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
736 * Returns zero on success -EINVAL on error.
737 */
738int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
739 unsigned vclk, unsigned dclk,
740 unsigned vco_min, unsigned vco_max,
741 unsigned fb_factor, unsigned fb_mask,
742 unsigned pd_min, unsigned pd_max,
743 unsigned pd_even,
744 unsigned *optimal_fb_div,
745 unsigned *optimal_vclk_div,
746 unsigned *optimal_dclk_div)
747{
748 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
749
750 /* start off with something large */
751 unsigned optimal_score = ~0;
752
753 /* loop through vco from low to high */
754 vco_min = max(max(vco_min, vclk), dclk);
755 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
756
757 uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
758 unsigned vclk_div, dclk_div, score;
759
760 do_div(fb_div, ref_freq);
761
762 /* fb div out of range ? */
763 if (fb_div > fb_mask)
764 break; /* it can oly get worse */
765
766 fb_div &= fb_mask;
767
768 /* calc vclk divider with current vco freq */
769 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
770 pd_min, pd_even);
771 if (vclk_div > pd_max)
772 break; /* vco is too big, it has to stop */
773
774 /* calc dclk divider with current vco freq */
775 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
776 pd_min, pd_even);
777 if (vclk_div > pd_max)
778 break; /* vco is too big, it has to stop */
779
780 /* calc score with current vco freq */
781 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
782
783 /* determine if this vco setting is better than current optimal settings */
784 if (score < optimal_score) {
785 *optimal_fb_div = fb_div;
786 *optimal_vclk_div = vclk_div;
787 *optimal_dclk_div = dclk_div;
788 optimal_score = score;
789 if (optimal_score == 0)
790 break; /* it can't get better than this */
791 }
792 }
793
794 /* did we found a valid setup ? */
795 if (optimal_score == ~0)
796 return -EINVAL;
797
798 return 0;
799}
800
801int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
802 unsigned cg_upll_func_cntl)
803{
804 unsigned i;
805
806 /* make sure UPLL_CTLREQ is deasserted */
807 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
808
809 mdelay(10);
810
811 /* assert UPLL_CTLREQ */
812 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
813
814 /* wait for CTLACK and CTLACK2 to get asserted */
815 for (i = 0; i < 100; ++i) {
816 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
817 if ((RREG32(cg_upll_func_cntl) & mask) == mask)
818 break;
819 mdelay(10);
820 }
821
822 /* deassert UPLL_CTLREQ */
823 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
824
825 if (i == 100) {
826 DRM_ERROR("Timeout setting UVD clocks!\n");
827 return -ETIMEDOUT;
828 }
829
830 return 0;
831}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 91530d4c11c4..83f612a9500b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -44,56 +44,9 @@ void rv770_fini(struct radeon_device *rdev);
44static void rv770_pcie_gen2_enable(struct radeon_device *rdev); 44static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
45int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 45int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
46 46
47static int rv770_uvd_calc_post_div(unsigned target_freq,
48 unsigned vco_freq,
49 unsigned *div)
50{
51 /* Fclk = Fvco / PDIV */
52 *div = vco_freq / target_freq;
53
54 /* we alway need a frequency less than or equal the target */
55 if ((vco_freq / *div) > target_freq)
56 *div += 1;
57
58 /* out of range ? */
59 if (*div > 30)
60 return -1; /* forget it */
61
62 *div -= 1;
63 return vco_freq / (*div + 1);
64}
65
66static int rv770_uvd_send_upll_ctlreq(struct radeon_device *rdev)
67{
68 unsigned i;
69
70 /* assert UPLL_CTLREQ */
71 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
72
73 /* wait for CTLACK and CTLACK2 to get asserted */
74 for (i = 0; i < 100; ++i) {
75 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
76 if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask)
77 break;
78 mdelay(10);
79 }
80 if (i == 100)
81 return -ETIMEDOUT;
82
83 /* deassert UPLL_CTLREQ */
84 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
85
86 return 0;
87}
88
89int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 47int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
90{ 48{
91 /* start off with something large */ 49 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
92 int optimal_diff_score = 0x7FFFFFF;
93 unsigned optimal_fb_div = 0, optimal_vclk_div = 0;
94 unsigned optimal_dclk_div = 0, optimal_vco_freq = 0;
95 unsigned vco_freq, vco_min = 50000, vco_max = 160000;
96 unsigned ref_freq = rdev->clock.spll.reference_freq;
97 int r; 50 int r;
98 51
99 /* RV740 uses evergreen uvd clk programming */ 52 /* RV740 uses evergreen uvd clk programming */
@@ -111,44 +64,15 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
111 return 0; 64 return 0;
112 } 65 }
113 66
114 /* loop through vco from low to high */ 67 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
115 vco_min = max(max(vco_min, vclk), dclk); 68 43663, 0x03FFFFFE, 1, 30, ~0,
116 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 500) { 69 &fb_div, &vclk_div, &dclk_div);
117 uint64_t fb_div = (uint64_t)vco_freq * 43663; 70 if (r)
118 int calc_clk, diff_score, diff_vclk, diff_dclk; 71 return r;
119 unsigned vclk_div, dclk_div; 72
120 73 fb_div |= 1;
121 do_div(fb_div, ref_freq); 74 vclk_div -= 1;
122 fb_div |= 1; 75 dclk_div -= 1;
123
124 /* fb div out of range ? */
125 if (fb_div > 0x03FFFFFF)
126 break; /* it can oly get worse */
127
128 /* calc vclk with current vco freq. */
129 calc_clk = rv770_uvd_calc_post_div(vclk, vco_freq, &vclk_div);
130 if (calc_clk == -1)
131 break; /* vco is too big, it has to stop. */
132 diff_vclk = vclk - calc_clk;
133
134 /* calc dclk with current vco freq. */
135 calc_clk = rv770_uvd_calc_post_div(dclk, vco_freq, &dclk_div);
136 if (calc_clk == -1)
137 break; /* vco is too big, it has to stop. */
138 diff_dclk = dclk - calc_clk;
139
140 /* determine if this vco setting is better than current optimal settings */
141 diff_score = abs(diff_vclk) + abs(diff_dclk);
142 if (diff_score < optimal_diff_score) {
143 optimal_fb_div = fb_div;
144 optimal_vclk_div = vclk_div;
145 optimal_dclk_div = dclk_div;
146 optimal_vco_freq = vco_freq;
147 optimal_diff_score = diff_score;
148 if (optimal_diff_score == 0)
149 break; /* it can't get better than this */
150 }
151 }
152 76
153 /* set UPLL_FB_DIV to 0x50000 */ 77 /* set UPLL_FB_DIV to 0x50000 */
154 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK); 78 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
@@ -160,7 +84,7 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
160 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); 84 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
161 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1)); 85 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
162 86
163 r = rv770_uvd_send_upll_ctlreq(rdev); 87 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
164 if (r) 88 if (r)
165 return r; 89 return r;
166 90
@@ -170,13 +94,13 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
170 /* set the required FB_DIV, REF_DIV, Post divder values */ 94 /* set the required FB_DIV, REF_DIV, Post divder values */
171 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK); 95 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
172 WREG32_P(CG_UPLL_FUNC_CNTL_2, 96 WREG32_P(CG_UPLL_FUNC_CNTL_2,
173 UPLL_SW_HILEN(optimal_vclk_div >> 1) | 97 UPLL_SW_HILEN(vclk_div >> 1) |
174 UPLL_SW_LOLEN((optimal_vclk_div >> 1) + (optimal_vclk_div & 1)) | 98 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
175 UPLL_SW_HILEN2(optimal_dclk_div >> 1) | 99 UPLL_SW_HILEN2(dclk_div >> 1) |
176 UPLL_SW_LOLEN2((optimal_dclk_div >> 1) + (optimal_dclk_div & 1)), 100 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
177 ~UPLL_SW_MASK); 101 ~UPLL_SW_MASK);
178 102
179 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), 103 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
180 ~UPLL_FB_DIV_MASK); 104 ~UPLL_FB_DIV_MASK);
181 105
182 /* give the PLL some time to settle */ 106 /* give the PLL some time to settle */
@@ -191,7 +115,7 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
191 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); 115 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
192 WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1)); 116 WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
193 117
194 r = rv770_uvd_send_upll_ctlreq(rdev); 118 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
195 if (r) 119 if (r)
196 return r; 120 return r;
197 121
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 6a52b2054f32..85b16266f748 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -45,7 +45,7 @@
45# define UPLL_BYPASS_EN_MASK 0x00000004 45# define UPLL_BYPASS_EN_MASK 0x00000004
46# define UPLL_CTLREQ_MASK 0x00000008 46# define UPLL_CTLREQ_MASK 0x00000008
47# define UPLL_REF_DIV(x) ((x) << 16) 47# define UPLL_REF_DIV(x) ((x) << 16)
48# define UPLL_REF_DIV_MASK 0x001F0000 48# define UPLL_REF_DIV_MASK 0x003F0000
49# define UPLL_CTLACK_MASK 0x40000000 49# define UPLL_CTLACK_MASK 0x40000000
50# define UPLL_CTLACK2_MASK 0x80000000 50# define UPLL_CTLACK2_MASK 0x80000000
51#define CG_UPLL_FUNC_CNTL_2 0x71c 51#define CG_UPLL_FUNC_CNTL_2 0x71c
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index fe6b14e0021c..f0b6c2f87c4d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5415,62 +5415,9 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
5415 return clock; 5415 return clock;
5416} 5416}
5417 5417
5418static int si_uvd_calc_post_div(unsigned target_freq,
5419 unsigned vco_freq,
5420 unsigned *div)
5421{
5422 /* target larger than vco frequency ? */
5423 if (vco_freq < target_freq)
5424 return -1; /* forget it */
5425
5426 /* Fclk = Fvco / PDIV */
5427 *div = vco_freq / target_freq;
5428
5429 /* we alway need a frequency less than or equal the target */
5430 if ((vco_freq / *div) > target_freq)
5431 *div += 1;
5432
5433 /* dividers above 5 must be even */
5434 if (*div > 5 && *div % 2)
5435 *div += 1;
5436
5437 /* out of range ? */
5438 if (*div >= 128)
5439 return -1; /* forget it */
5440
5441 return vco_freq / *div;
5442}
5443
5444static int si_uvd_send_upll_ctlreq(struct radeon_device *rdev)
5445{
5446 unsigned i;
5447
5448 /* assert UPLL_CTLREQ */
5449 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
5450
5451 /* wait for CTLACK and CTLACK2 to get asserted */
5452 for (i = 0; i < 100; ++i) {
5453 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
5454 if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask)
5455 break;
5456 mdelay(10);
5457 }
5458 if (i == 100)
5459 return -ETIMEDOUT;
5460
5461 /* deassert UPLL_CTLREQ */
5462 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
5463
5464 return 0;
5465}
5466
5467int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 5418int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5468{ 5419{
5469 /* start off with something large */ 5420 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
5470 int optimal_diff_score = 0x7FFFFFF;
5471 unsigned optimal_fb_div = 0, optimal_vclk_div = 0;
5472 unsigned optimal_dclk_div = 0, optimal_vco_freq = 0;
5473 unsigned vco_freq;
5474 int r; 5421 int r;
5475 5422
5476 /* bypass vclk and dclk with bclk */ 5423 /* bypass vclk and dclk with bclk */
@@ -5487,40 +5434,11 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5487 return 0; 5434 return 0;
5488 } 5435 }
5489 5436
5490 /* loop through vco from low to high */ 5437 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
5491 for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) { 5438 16384, 0x03FFFFFF, 0, 128, 5,
5492 unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384; 5439 &fb_div, &vclk_div, &dclk_div);
5493 int calc_clk, diff_score, diff_vclk, diff_dclk; 5440 if (r)
5494 unsigned vclk_div, dclk_div; 5441 return r;
5495
5496 /* fb div out of range ? */
5497 if (fb_div > 0x03FFFFFF)
5498 break; /* it can oly get worse */
5499
5500 /* calc vclk with current vco freq. */
5501 calc_clk = si_uvd_calc_post_div(vclk, vco_freq, &vclk_div);
5502 if (calc_clk == -1)
5503 break; /* vco is too big, it has to stop. */
5504 diff_vclk = vclk - calc_clk;
5505
5506 /* calc dclk with current vco freq. */
5507 calc_clk = si_uvd_calc_post_div(dclk, vco_freq, &dclk_div);
5508 if (calc_clk == -1)
5509 break; /* vco is too big, it has to stop. */
5510 diff_dclk = dclk - calc_clk;
5511
5512 /* determine if this vco setting is better than current optimal settings */
5513 diff_score = abs(diff_vclk) + abs(diff_dclk);
5514 if (diff_score < optimal_diff_score) {
5515 optimal_fb_div = fb_div;
5516 optimal_vclk_div = vclk_div;
5517 optimal_dclk_div = dclk_div;
5518 optimal_vco_freq = vco_freq;
5519 optimal_diff_score = diff_score;
5520 if (optimal_diff_score == 0)
5521 break; /* it can't get better than this */
5522 }
5523 }
5524 5442
5525 /* set RESET_ANTI_MUX to 0 */ 5443 /* set RESET_ANTI_MUX to 0 */
5526 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); 5444 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
@@ -5537,7 +5455,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5537 5455
5538 mdelay(1); 5456 mdelay(1);
5539 5457
5540 r = si_uvd_send_upll_ctlreq(rdev); 5458 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
5541 if (r) 5459 if (r)
5542 return r; 5460 return r;
5543 5461
@@ -5548,19 +5466,19 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5548 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); 5466 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
5549 5467
5550 /* set feedback divider */ 5468 /* set feedback divider */
5551 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK); 5469 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
5552 5470
5553 /* set ref divider to 0 */ 5471 /* set ref divider to 0 */
5554 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); 5472 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
5555 5473
5556 if (optimal_vco_freq < 187500) 5474 if (fb_div < 307200)
5557 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); 5475 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
5558 else 5476 else
5559 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9); 5477 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
5560 5478
5561 /* set PDIV_A and PDIV_B */ 5479 /* set PDIV_A and PDIV_B */
5562 WREG32_P(CG_UPLL_FUNC_CNTL_2, 5480 WREG32_P(CG_UPLL_FUNC_CNTL_2,
5563 UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div), 5481 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
5564 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); 5482 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
5565 5483
5566 /* give the PLL some time to settle */ 5484 /* give the PLL some time to settle */
@@ -5574,7 +5492,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
5574 /* switch from bypass mode to normal mode */ 5492 /* switch from bypass mode to normal mode */
5575 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); 5493 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
5576 5494
5577 r = si_uvd_send_upll_ctlreq(rdev); 5495 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
5578 if (r) 5496 if (r)
5579 return r; 5497 return r;
5580 5498
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 042b91d6c941..222877ba6cf5 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -36,7 +36,7 @@
36# define UPLL_BYPASS_EN_MASK 0x00000004 36# define UPLL_BYPASS_EN_MASK 0x00000004
37# define UPLL_CTLREQ_MASK 0x00000008 37# define UPLL_CTLREQ_MASK 0x00000008
38# define UPLL_VCO_MODE_MASK 0x00000600 38# define UPLL_VCO_MODE_MASK 0x00000600
39# define UPLL_REF_DIV_MASK 0x001F0000 39# define UPLL_REF_DIV_MASK 0x003F0000
40# define UPLL_CTLACK_MASK 0x40000000 40# define UPLL_CTLACK_MASK 0x40000000
41# define UPLL_CTLACK2_MASK 0x80000000 41# define UPLL_CTLACK2_MASK 0x80000000
42#define CG_UPLL_FUNC_CNTL_2 0x638 42#define CG_UPLL_FUNC_CNTL_2 0x638
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 918e8fe2f5e9..c2af598f701d 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -240,6 +240,7 @@
240 {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 240 {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
241 {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 241 {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
242 {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 242 {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
243 {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
243 {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 244 {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
244 {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 245 {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
245 {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 246 {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -247,11 +248,13 @@
247 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 248 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
248 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 249 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
249 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 250 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
251 {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
250 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 252 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
251 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 253 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
252 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 254 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
253 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 255 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
254 {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 256 {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
255 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 258 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
256 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 259 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 260 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
@@ -603,6 +606,8 @@
603 {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 606 {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
604 {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 607 {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
605 {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 608 {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
609 {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
610 {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
606 {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 611 {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
607 {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 612 {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
608 {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 613 {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \