aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-09-30 16:09:06 -0400
committerDave Airlie <airlied@redhat.com>2009-10-01 19:25:50 -0400
commitc010f8000a925e08d84d9391e13dd297b9fdc393 (patch)
treefb1c4bb4c417fe91f40b1d7697e8a11f27bddcea /drivers/gpu/drm
parent3bc6853593bd4fba357dc252b3cf60cd86a1d2ec (diff)
drm/radeon/kms: Convert RS600 to new init path
New init path allow to simply asic initialization and make easier to trace what happen on each different asic. We are removing most callback. Do a massive RS600 register cleanup to clarify RS600 register, we are still bit fuzy on some register and waiting for more informations. I don't have hw to test, so this patch is a best effort to not break anythings and to try to improve things. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h44
-rw-r--r--drivers/gpu/drm/radeon/rs600.c466
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h406
3 files changed, 696 insertions, 220 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3ad916492747..7bc86a6aa5d6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -290,18 +290,13 @@ static struct radeon_asic rs400_asic = {
290/* 290/*
291 * rs600. 291 * rs600.
292 */ 292 */
293int rs600_init(struct radeon_device *rdev); 293extern int rs600_init(struct radeon_device *rdev);
294void rs600_errata(struct radeon_device *rdev); 294extern void rs600_fini(struct radeon_device *rdev);
295void rs600_vram_info(struct radeon_device *rdev); 295extern int rs600_suspend(struct radeon_device *rdev);
296int rs600_mc_init(struct radeon_device *rdev); 296extern int rs600_resume(struct radeon_device *rdev);
297void rs600_mc_fini(struct radeon_device *rdev);
298int rs600_irq_set(struct radeon_device *rdev); 297int rs600_irq_set(struct radeon_device *rdev);
299int rs600_irq_process(struct radeon_device *rdev); 298int rs600_irq_process(struct radeon_device *rdev);
300u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 299u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
301int rs600_gart_init(struct radeon_device *rdev);
302void rs600_gart_fini(struct radeon_device *rdev);
303int rs600_gart_enable(struct radeon_device *rdev);
304void rs600_gart_disable(struct radeon_device *rdev);
305void rs600_gart_tlb_flush(struct radeon_device *rdev); 300void rs600_gart_tlb_flush(struct radeon_device *rdev);
306int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 301int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
307uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 302uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -309,27 +304,30 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
309void rs600_bandwidth_update(struct radeon_device *rdev); 304void rs600_bandwidth_update(struct radeon_device *rdev);
310static struct radeon_asic rs600_asic = { 305static struct radeon_asic rs600_asic = {
311 .init = &rs600_init, 306 .init = &rs600_init,
312 .errata = &rs600_errata, 307 .fini = &rs600_fini,
313 .vram_info = &rs600_vram_info, 308 .suspend = &rs600_suspend,
309 .resume = &rs600_resume,
310 .errata = NULL,
311 .vram_info = NULL,
314 .gpu_reset = &r300_gpu_reset, 312 .gpu_reset = &r300_gpu_reset,
315 .mc_init = &rs600_mc_init, 313 .mc_init = NULL,
316 .mc_fini = &rs600_mc_fini, 314 .mc_fini = NULL,
317 .wb_init = &r100_wb_init, 315 .wb_init = NULL,
318 .wb_fini = &r100_wb_fini, 316 .wb_fini = NULL,
319 .gart_init = &rs600_gart_init, 317 .gart_init = NULL,
320 .gart_fini = &rs600_gart_fini, 318 .gart_fini = NULL,
321 .gart_enable = &rs600_gart_enable, 319 .gart_enable = NULL,
322 .gart_disable = &rs600_gart_disable, 320 .gart_disable = NULL,
323 .gart_tlb_flush = &rs600_gart_tlb_flush, 321 .gart_tlb_flush = &rs600_gart_tlb_flush,
324 .gart_set_page = &rs600_gart_set_page, 322 .gart_set_page = &rs600_gart_set_page,
325 .cp_init = &r100_cp_init, 323 .cp_init = NULL,
326 .cp_fini = &r100_cp_fini, 324 .cp_fini = NULL,
327 .cp_disable = &r100_cp_disable, 325 .cp_disable = NULL,
328 .cp_commit = &r100_cp_commit, 326 .cp_commit = &r100_cp_commit,
329 .ring_start = &r300_ring_start, 327 .ring_start = &r300_ring_start,
330 .ring_test = &r100_ring_test, 328 .ring_test = &r100_ring_test,
331 .ring_ib_execute = &r100_ring_ib_execute, 329 .ring_ib_execute = &r100_ring_ib_execute,
332 .ib_test = &r100_ib_test, 330 .ib_test = NULL,
333 .irq_set = &rs600_irq_set, 331 .irq_set = &rs600_irq_set,
334 .irq_process = &rs600_irq_process, 332 .irq_process = &rs600_irq_process,
335 .get_vblank_counter = &rs600_get_vblank_counter, 333 .get_vblank_counter = &rs600_get_vblank_counter,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fa8e451c64e8..9e4fdc173557 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -25,27 +25,26 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
28#include "drmP.h" 38#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 39#include "radeon.h"
40#include "atom.h"
41#include "rs600d.h"
31 42
32#include "rs600_reg_safe.h" 43#include "rs600_reg_safe.h"
33 44
34/* rs600 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev);
36int r100_gui_wait_for_idle(struct radeon_device *rdev);
37int r300_mc_wait_for_idle(struct radeon_device *rdev);
38void r420_pipes_init(struct radeon_device *rdev);
39
40/* This files gather functions specifics to :
41 * rs600
42 *
43 * Some of these functions might be used by newer ASICs.
44 */
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48
49/* 48/*
50 * GART. 49 * GART.
51 */ 50 */
@@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
53{ 52{
54 uint32_t tmp; 53 uint32_t tmp;
55 54
56 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 55 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
57 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 56 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
58 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 57 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
59 58
60 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 59 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
61 tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; 60 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
62 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 61 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
63 62
64 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 63 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
65 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 64 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
66 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 65 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
67 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 66 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
68} 67}
69 68
70int rs600_gart_init(struct radeon_device *rdev) 69int rs600_gart_init(struct radeon_device *rdev)
@@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev)
86 85
87int rs600_gart_enable(struct radeon_device *rdev) 86int rs600_gart_enable(struct radeon_device *rdev)
88{ 87{
89 uint32_t tmp; 88 u32 tmp;
90 int r, i; 89 int r, i;
91 90
92 if (rdev->gart.table.vram.robj == NULL) { 91 if (rdev->gart.table.vram.robj == NULL) {
@@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev)
96 r = radeon_gart_table_vram_pin(rdev); 95 r = radeon_gart_table_vram_pin(rdev);
97 if (r) 96 if (r)
98 return r; 97 return r;
98 /* Enable bus master */
99 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
100 WREG32(R_00004C_BUS_CNTL, tmp);
99 /* FIXME: setup default page */ 101 /* FIXME: setup default page */
100 WREG32_MC(RS600_MC_PT0_CNTL, 102 WREG32_MC(R_000100_MC_PT0_CNTL,
101 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | 103 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
102 RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); 104 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
103 for (i = 0; i < 19; i++) { 105 for (i = 0; i < 19; i++) {
104 WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, 106 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
105 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | 107 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
106 RS600_SYSTEM_ACCESS_MODE_IN_SYS | 108 S_00016C_SYSTEM_ACCESS_MODE_MASK(
107 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | 109 V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
108 RS600_EFFECTIVE_L1_CACHE_SIZE(3) | 110 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
109 RS600_ENABLE_FRAGMENT_PROCESSING | 111 V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
110 RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); 112 S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
113 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
114 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
111 } 115 }
112 116
113 /* System context map to GART space */ 117 /* System context map to GART space */
114 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); 118 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
115 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 119 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
116 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
117 120
118 /* enable first context */ 121 /* enable first context */
119 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); 122 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
120 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 123 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
121 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); 124 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
122 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, 125 S_000102_ENABLE_PAGE_TABLE(1) |
123 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); 126 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
124 /* disable all other contexts */ 127 /* disable all other contexts */
125 for (i = 1; i < 8; i++) { 128 for (i = 1; i < 8; i++) {
126 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); 129 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
127 } 130 }
128 131
129 /* setup the page table */ 132 /* setup the page table */
130 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 133 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
131 rdev->gart.table_addr); 134 rdev->gart.table_addr);
132 WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 135 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
133 136
134 /* enable page tables */ 137 /* enable page tables */
135 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 138 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
136 WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); 139 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
137 tmp = RREG32_MC(RS600_MC_CNTL1); 140 tmp = RREG32_MC(R_000009_MC_CNTL1);
138 WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); 141 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
139 rs600_gart_tlb_flush(rdev); 142 rs600_gart_tlb_flush(rdev);
140 rdev->gart.ready = true; 143 rdev->gart.ready = true;
141 return 0; 144 return 0;
@@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
146 uint32_t tmp; 149 uint32_t tmp;
147 150
148 /* FIXME: disable out of gart access */ 151 /* FIXME: disable out of gart access */
149 WREG32_MC(RS600_MC_PT0_CNTL, 0); 152 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
150 tmp = RREG32_MC(RS600_MC_CNTL1); 153 tmp = RREG32_MC(R_000009_MC_CNTL1);
151 tmp &= ~RS600_ENABLE_PAGE_TABLES; 154 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
152 WREG32_MC(RS600_MC_CNTL1, tmp);
153 if (rdev->gart.table.vram.robj) { 155 if (rdev->gart.table.vram.robj) {
154 radeon_object_kunmap(rdev->gart.table.vram.robj); 156 radeon_object_kunmap(rdev->gart.table.vram.robj);
155 radeon_object_unpin(rdev->gart.table.vram.robj); 157 radeon_object_unpin(rdev->gart.table.vram.robj);
@@ -183,125 +185,46 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
183 return 0; 185 return 0;
184} 186}
185 187
186
187/*
188 * MC.
189 */
190void rs600_mc_disable_clients(struct radeon_device *rdev)
191{
192 unsigned tmp;
193
194 if (r100_gui_wait_for_idle(rdev)) {
195 printk(KERN_WARNING "Failed to wait GUI idle while "
196 "programming pipes. Bad things might happen.\n");
197 }
198
199 rv515_vga_render_disable(rdev);
200
201 tmp = RREG32(AVIVO_D1VGA_CONTROL);
202 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
203 tmp = RREG32(AVIVO_D2VGA_CONTROL);
204 WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
205
206 tmp = RREG32(AVIVO_D1CRTC_CONTROL);
207 WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
208 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
209 WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
210
211 /* make sure all previous write got through */
212 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
213
214 mdelay(1);
215}
216
217int rs600_mc_init(struct radeon_device *rdev)
218{
219 uint32_t tmp;
220 int r;
221
222 if (r100_debugfs_rbbm_init(rdev)) {
223 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
224 }
225
226 rs600_gpu_init(rdev);
227 rs600_gart_disable(rdev);
228
229 /* Setup GPU memory space */
230 rdev->mc.vram_location = 0xFFFFFFFFUL;
231 rdev->mc.gtt_location = 0xFFFFFFFFUL;
232 r = radeon_mc_setup(rdev);
233 if (r) {
234 return r;
235 }
236
237 /* Program GPU memory space */
238 /* Enable bus master */
239 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
240 WREG32(RADEON_BUS_CNTL, tmp);
241 /* FIXME: What does AGP means for such chipset ? */
242 WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
243 /* FIXME: are this AGP reg in indirect MC range ? */
244 WREG32_MC(RS600_MC_AGP_BASE, 0);
245 WREG32_MC(RS600_MC_AGP_BASE_2, 0);
246 rs600_mc_disable_clients(rdev);
247 if (rs600_mc_wait_for_idle(rdev)) {
248 printk(KERN_WARNING "Failed to wait MC idle while "
249 "programming pipes. Bad things might happen.\n");
250 }
251 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
252 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
253 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
254 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
255 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
256 return 0;
257}
258
259void rs600_mc_fini(struct radeon_device *rdev)
260{
261}
262
263
264/*
265 * Interrupts
266 */
267int rs600_irq_set(struct radeon_device *rdev) 188int rs600_irq_set(struct radeon_device *rdev)
268{ 189{
269 uint32_t tmp = 0; 190 uint32_t tmp = 0;
270 uint32_t mode_int = 0; 191 uint32_t mode_int = 0;
271 192
272 if (rdev->irq.sw_int) { 193 if (rdev->irq.sw_int) {
273 tmp |= RADEON_SW_INT_ENABLE; 194 tmp |= S_000040_SW_INT_EN(1);
274 } 195 }
275 if (rdev->irq.crtc_vblank_int[0]) { 196 if (rdev->irq.crtc_vblank_int[0]) {
276 mode_int |= AVIVO_D1MODE_INT_MASK; 197 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
277 } 198 }
278 if (rdev->irq.crtc_vblank_int[1]) { 199 if (rdev->irq.crtc_vblank_int[1]) {
279 mode_int |= AVIVO_D2MODE_INT_MASK; 200 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
280 } 201 }
281 WREG32(RADEON_GEN_INT_CNTL, tmp); 202 WREG32(R_000040_GEN_INT_CNTL, tmp);
282 WREG32(AVIVO_DxMODE_INT_MASK, mode_int); 203 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
283 return 0; 204 return 0;
284} 205}
285 206
286static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 207static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
287{ 208{
288 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 209 uint32_t irqs = RREG32(R_000040_GEN_INT_CNTL);
289 uint32_t irq_mask = RADEON_SW_INT_TEST; 210 uint32_t irq_mask = ~C_000040_SW_INT_EN;
290 211
291 if (irqs & AVIVO_DISPLAY_INT_STATUS) { 212 if (G_000040_DISPLAY_INT_STATUS(irqs)) {
292 *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); 213 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
293 if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 214 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
294 WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 215 WREG32(R_006534_D1MODE_VBLANK_STATUS,
216 S_006534_D1MODE_VBLANK_ACK(1));
295 } 217 }
296 if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { 218 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
297 WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 219 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
220 S_006D34_D2MODE_VBLANK_ACK(1));
298 } 221 }
299 } else { 222 } else {
300 *r500_disp_int = 0; 223 *r500_disp_int = 0;
301 } 224 }
302 225
303 if (irqs) { 226 if (irqs) {
304 WREG32(RADEON_GEN_INT_STATUS, irqs); 227 WREG32(R_000040_GEN_INT_CNTL, irqs);
305 } 228 }
306 return irqs & irq_mask; 229 return irqs & irq_mask;
307} 230}
@@ -317,16 +240,13 @@ int rs600_irq_process(struct radeon_device *rdev)
317 } 240 }
318 while (status || r500_disp_int) { 241 while (status || r500_disp_int) {
319 /* SW interrupt */ 242 /* SW interrupt */
320 if (status & RADEON_SW_INT_TEST) { 243 if (G_000040_SW_INT_EN(status))
321 radeon_fence_process(rdev); 244 radeon_fence_process(rdev);
322 }
323 /* Vertical blank interrupts */ 245 /* Vertical blank interrupts */
324 if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 246 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
325 drm_handle_vblank(rdev->ddev, 0); 247 drm_handle_vblank(rdev->ddev, 0);
326 } 248 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
327 if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
328 drm_handle_vblank(rdev->ddev, 1); 249 drm_handle_vblank(rdev->ddev, 1);
329 }
330 status = rs600_irq_ack(rdev, &r500_disp_int); 250 status = rs600_irq_ack(rdev, &r500_disp_int);
331 } 251 }
332 return IRQ_HANDLED; 252 return IRQ_HANDLED;
@@ -335,53 +255,34 @@ int rs600_irq_process(struct radeon_device *rdev)
335u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 255u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
336{ 256{
337 if (crtc == 0) 257 if (crtc == 0)
338 return RREG32(AVIVO_D1CRTC_FRAME_COUNT); 258 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
339 else 259 else
340 return RREG32(AVIVO_D2CRTC_FRAME_COUNT); 260 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
341} 261}
342 262
343
344/*
345 * Global GPU functions
346 */
347int rs600_mc_wait_for_idle(struct radeon_device *rdev) 263int rs600_mc_wait_for_idle(struct radeon_device *rdev)
348{ 264{
349 unsigned i; 265 unsigned i;
350 uint32_t tmp;
351 266
352 for (i = 0; i < rdev->usec_timeout; i++) { 267 for (i = 0; i < rdev->usec_timeout; i++) {
353 /* read MC_STATUS */ 268 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
354 tmp = RREG32_MC(RS600_MC_STATUS);
355 if (tmp & RS600_MC_STATUS_IDLE) {
356 return 0; 269 return 0;
357 } 270 udelay(1);
358 DRM_UDELAY(1);
359 } 271 }
360 return -1; 272 return -1;
361} 273}
362 274
363void rs600_errata(struct radeon_device *rdev)
364{
365 rdev->pll_errata = 0;
366}
367
368void rs600_gpu_init(struct radeon_device *rdev) 275void rs600_gpu_init(struct radeon_device *rdev)
369{ 276{
370 /* FIXME: HDP same place on rs600 ? */ 277 /* FIXME: HDP same place on rs600 ? */
371 r100_hdp_reset(rdev); 278 r100_hdp_reset(rdev);
372 rv515_vga_render_disable(rdev);
373 /* FIXME: is this correct ? */ 279 /* FIXME: is this correct ? */
374 r420_pipes_init(rdev); 280 r420_pipes_init(rdev);
375 if (rs600_mc_wait_for_idle(rdev)) { 281 /* Wait for mc idle */
376 printk(KERN_WARNING "Failed to wait MC idle while " 282 if (rs600_mc_wait_for_idle(rdev))
377 "programming pipes. Bad things might happen.\n"); 283 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
378 }
379} 284}
380 285
381
382/*
383 * VRAM info.
384 */
385void rs600_vram_info(struct radeon_device *rdev) 286void rs600_vram_info(struct radeon_device *rdev)
386{ 287{
387 /* FIXME: to do or is these values sane ? */ 288 /* FIXME: to do or is these values sane ? */
@@ -394,26 +295,24 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
394 /* FIXME: implement, should this be like rs690 ? */ 295 /* FIXME: implement, should this be like rs690 ? */
395} 296}
396 297
397
398/*
399 * Indirect registers accessor
400 */
401uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 298uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
402{ 299{
403 uint32_t r; 300 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
404 301 S_000070_MC_IND_CITF_ARB0(1));
405 WREG32(RS600_MC_INDEX, 302 return RREG32(R_000074_MC_IND_DATA);
406 ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
407 r = RREG32(RS600_MC_DATA);
408 return r;
409} 303}
410 304
411void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 305void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
412{ 306{
413 WREG32(RS600_MC_INDEX, 307 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
414 RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | 308 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
415 ((reg) & RS600_MC_ADDR_MASK)); 309 WREG32(R_000074_MC_IND_DATA, v);
416 WREG32(RS600_MC_DATA, v); 310}
311
312void rs600_debugfs(struct radeon_device *rdev)
313{
314 if (r100_debugfs_rbbm_init(rdev))
315 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
417} 316}
418 317
419void rs600_set_safe_registers(struct radeon_device *rdev) 318void rs600_set_safe_registers(struct radeon_device *rdev)
@@ -422,8 +321,181 @@ void rs600_set_safe_registers(struct radeon_device *rdev)
422 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 321 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
423} 322}
424 323
324static void rs600_mc_program(struct radeon_device *rdev)
325{
326 struct rv515_mc_save save;
327
328 /* Stops all mc clients */
329 rv515_mc_stop(rdev, &save);
330
331 /* Wait for mc idle */
332 if (rs600_mc_wait_for_idle(rdev))
333 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
334
335 /* FIXME: What does AGP means for such chipset ? */
336 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
337 WREG32_MC(R_000006_AGP_BASE, 0);
338 WREG32_MC(R_000007_AGP_BASE_2, 0);
339 /* Program MC */
340 WREG32_MC(R_000004_MC_FB_LOCATION,
341 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
342 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
343 WREG32(R_000134_HDP_FB_LOCATION,
344 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
345
346 rv515_mc_resume(rdev, &save);
347}
348
349static int rs600_startup(struct radeon_device *rdev)
350{
351 int r;
352
353 rs600_mc_program(rdev);
354 /* Resume clock */
355 rv515_clock_startup(rdev);
356 /* Initialize GPU configuration (# pipes, ...) */
357 rs600_gpu_init(rdev);
358 /* Initialize GART (initialize after TTM so we can allocate
359 * memory through TTM but finalize after TTM) */
360 r = rs600_gart_enable(rdev);
361 if (r)
362 return r;
363 /* Enable IRQ */
364 rdev->irq.sw_int = true;
365 rs600_irq_set(rdev);
366 /* 1M ring buffer */
367 r = r100_cp_init(rdev, 1024 * 1024);
368 if (r) {
369 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
370 return r;
371 }
372 r = r100_wb_init(rdev);
373 if (r)
374 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
375 r = r100_ib_init(rdev);
376 if (r) {
377 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
378 return r;
379 }
380 return 0;
381}
382
383int rs600_resume(struct radeon_device *rdev)
384{
385 /* Make sur GART are not working */
386 rs600_gart_disable(rdev);
387 /* Resume clock before doing reset */
388 rv515_clock_startup(rdev);
389 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
390 if (radeon_gpu_reset(rdev)) {
391 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
392 RREG32(R_000E40_RBBM_STATUS),
393 RREG32(R_0007C0_CP_STAT));
394 }
395 /* post */
396 atom_asic_init(rdev->mode_info.atom_context);
397 /* Resume clock after posting */
398 rv515_clock_startup(rdev);
399 return rs600_startup(rdev);
400}
401
402int rs600_suspend(struct radeon_device *rdev)
403{
404 r100_cp_disable(rdev);
405 r100_wb_disable(rdev);
406 r100_irq_disable(rdev);
407 rs600_gart_disable(rdev);
408 return 0;
409}
410
411void rs600_fini(struct radeon_device *rdev)
412{
413 rs600_suspend(rdev);
414 r100_cp_fini(rdev);
415 r100_wb_fini(rdev);
416 r100_ib_fini(rdev);
417 radeon_gem_fini(rdev);
418 rs600_gart_fini(rdev);
419 radeon_irq_kms_fini(rdev);
420 radeon_fence_driver_fini(rdev);
421 radeon_object_fini(rdev);
422 radeon_atombios_fini(rdev);
423 kfree(rdev->bios);
424 rdev->bios = NULL;
425}
426
425int rs600_init(struct radeon_device *rdev) 427int rs600_init(struct radeon_device *rdev)
426{ 428{
427 rs600_set_safe_registers(rdev); 429 int r;
430
431 rdev->new_init_path = true;
432 /* Disable VGA */
433 rv515_vga_render_disable(rdev);
434 /* Initialize scratch registers */
435 radeon_scratch_init(rdev);
436 /* Initialize surface registers */
437 radeon_surface_init(rdev);
438 /* BIOS */
439 if (!radeon_get_bios(rdev)) {
440 if (ASIC_IS_AVIVO(rdev))
441 return -EINVAL;
442 }
443 if (rdev->is_atom_bios) {
444 r = radeon_atombios_init(rdev);
445 if (r)
446 return r;
447 } else {
448 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
449 return -EINVAL;
450 }
451 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
452 if (radeon_gpu_reset(rdev)) {
453 dev_warn(rdev->dev,
454 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
455 RREG32(R_000E40_RBBM_STATUS),
456 RREG32(R_0007C0_CP_STAT));
457 }
458 /* check if cards are posted or not */
459 if (!radeon_card_posted(rdev) && rdev->bios) {
460 DRM_INFO("GPU not posted. posting now...\n");
461 atom_asic_init(rdev->mode_info.atom_context);
462 }
463 /* Initialize clocks */
464 radeon_get_clock_info(rdev->ddev);
465 /* Get vram informations */
466 rs600_vram_info(rdev);
467 /* Initialize memory controller (also test AGP) */
468 r = r420_mc_init(rdev);
469 if (r)
470 return r;
471 rs600_debugfs(rdev);
472 /* Fence driver */
473 r = radeon_fence_driver_init(rdev);
474 if (r)
475 return r;
476 r = radeon_irq_kms_init(rdev);
477 if (r)
478 return r;
479 /* Memory manager */
480 r = radeon_object_init(rdev);
481 if (r)
482 return r;
483 r = rs600_gart_init(rdev);
484 if (r)
485 return r;
486 rs600_set_safe_registers(rdev);
487 rdev->accel_working = true;
488 r = rs600_startup(rdev);
489 if (r) {
490 /* Somethings want wront with the accel init stop accel */
491 dev_err(rdev->dev, "Disabling GPU acceleration\n");
492 rs600_suspend(rdev);
493 r100_cp_fini(rdev);
494 r100_wb_fini(rdev);
495 r100_ib_fini(rdev);
496 rs600_gart_fini(rdev);
497 radeon_irq_kms_fini(rdev);
498 rdev->accel_working = false;
499 }
428 return 0; 500 return 0;
429} 501}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644
index 000000000000..6dac524f6757
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -0,0 +1,406 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS600D_H__
29#define __RS600D_H__
30
31/* Registers */
32#define R_000040_GEN_INT_CNTL 0x000040
33#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0)
34#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1)
35#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE
36#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
37#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
38#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
39#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
40#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
41#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
42#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
43#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
44#define C_000040_SNAPSHOT2 0xFFFFFF7F
45#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
46#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
47#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
48#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
49#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
50#define C_000040_FP2_DETECT 0xFFFFFBFF
51#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
52#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
53#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
54#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
55#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
56#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
57#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14)
58#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1)
59#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF
60#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15)
61#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1)
62#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF
63#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17)
64#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1)
65#define C_000040_I2C_INT_EN 0xFFFDFFFF
66#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19)
67#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1)
68#define C_000040_GUI_IDLE 0xFFF7FFFF
69#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24)
70#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1)
71#define C_000040_VIPH_INT_EN 0xFEFFFFFF
72#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25)
73#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1)
74#define C_000040_SW_INT_EN 0xFDFFFFFF
75#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27)
76#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1)
77#define C_000040_GEYSERVILLE 0xF7FFFFFF
78#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28)
79#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1)
80#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF
81#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29)
82#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1)
83#define C_000040_DVI_I2C_INT 0xDFFFFFFF
84#define S_000040_GUIDMA(x) (((x) & 0x1) << 30)
85#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1)
86#define C_000040_GUIDMA 0xBFFFFFFF
87#define S_000040_VIDDMA(x) (((x) & 0x1) << 31)
88#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1)
89#define C_000040_VIDDMA 0x7FFFFFFF
90#define R_00004C_BUS_CNTL 0x00004C
91#define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14)
92#define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1)
93#define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF
94#define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20)
95#define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1)
96#define C_00004C_BUS_MSI_REARM 0xFFEFFFFF
97#define R_000070_MC_IND_INDEX 0x000070
98#define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0)
99#define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF)
100#define C_000070_MC_IND_ADDR 0xFFFF0000
101#define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16)
102#define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1)
103#define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF
104#define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17)
105#define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1)
106#define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF
107#define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18)
108#define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1)
109#define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF
110#define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19)
111#define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1)
112#define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF
113#define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20)
114#define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1)
115#define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF
116#define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21)
117#define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1)
118#define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF
119#define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22)
120#define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1)
121#define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF
122#define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23)
123#define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1)
124#define C_000070_MC_IND_WR_EN 0xFF7FFFFF
125#define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24)
126#define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1)
127#define C_000070_MC_IND_RD_INV 0xFEFFFFFF
128#define R_000074_MC_IND_DATA 0x000074
129#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
130#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
131#define C_000074_MC_IND_DATA 0x00000000
132#define R_000134_HDP_FB_LOCATION 0x000134
133#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
134#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
135#define C_000134_HDP_FB_START 0xFFFF0000
136#define R_0007C0_CP_STAT 0x0007C0
137#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
138#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
139#define C_0007C0_MRU_BUSY 0xFFFFFFFE
140#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
141#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
142#define C_0007C0_MWU_BUSY 0xFFFFFFFD
143#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
144#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
145#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
146#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
147#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
148#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
149#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
150#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
151#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
152#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
153#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
154#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
155#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
156#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
157#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
158#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
159#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
160#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
161#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
162#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
163#define C_0007C0_CSI_BUSY 0xFFFFDFFF
164#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
165#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
166#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
167#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
168#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
169#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
170#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
171#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
172#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
173#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
174#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
175#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
176#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
177#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
178#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
179#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
180#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
181#define C_0007C0_CP_BUSY 0x7FFFFFFF
182#define R_000E40_RBBM_STATUS 0x000E40
183#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
184#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
185#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
186#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
187#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
188#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
189#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
190#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
191#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
192#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
193#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
194#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
195#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
196#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
197#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
198#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
199#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
200#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
201#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
202#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
203#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
204#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
205#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
206#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
207#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
208#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
209#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
210#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
211#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
212#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
213#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
214#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
215#define C_000E40_E2_BUSY 0xFFFDFFFF
216#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
217#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
218#define C_000E40_RB2D_BUSY 0xFFFBFFFF
219#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
220#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
221#define C_000E40_RB3D_BUSY 0xFFF7FFFF
222#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
223#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
224#define C_000E40_VAP_BUSY 0xFFEFFFFF
225#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
226#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
227#define C_000E40_RE_BUSY 0xFFDFFFFF
228#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
229#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
230#define C_000E40_TAM_BUSY 0xFFBFFFFF
231#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
232#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
233#define C_000E40_TDM_BUSY 0xFF7FFFFF
234#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
235#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
236#define C_000E40_PB_BUSY 0xFEFFFFFF
237#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
238#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
239#define C_000E40_TIM_BUSY 0xFDFFFFFF
240#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
241#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
242#define C_000E40_GA_BUSY 0xFBFFFFFF
243#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
244#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
245#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
246#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
247#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
248#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
249#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4
250#define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
251#define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
252#define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000
253#define R_006534_D1MODE_VBLANK_STATUS 0x006534
254#define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
255#define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
256#define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE
257#define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
258#define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
259#define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF
260#define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
261#define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
262#define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF
263#define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
264#define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
265#define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF
266#define R_006540_DxMODE_INT_MASK 0x006540
267#define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0)
268#define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1)
269#define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE
270#define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4)
271#define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1)
272#define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF
273#define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8)
274#define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1)
275#define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF
276#define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12)
277#define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1)
278#define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF
279#define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30)
280#define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1)
281#define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF
282#define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31)
283#define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1)
284#define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF
285#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4
286#define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
287#define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
288#define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000
289#define R_006D34_D2MODE_VBLANK_STATUS 0x006D34
290#define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
291#define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
292#define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE
293#define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
294#define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
295#define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF
296#define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
297#define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
298#define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF
299#define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
300#define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
301#define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF
302#define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC
303#define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4)
304#define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1)
305#define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF
306#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
307#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
308#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
309
310
311/* MC registers */
312#define R_000000_MC_STATUS 0x000000
313#define S_000000_MC_IDLE(x) (((x) & 0x1) << 0)
314#define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1)
315#define C_000000_MC_IDLE 0xFFFFFFFE
316#define R_000004_MC_FB_LOCATION 0x000004
317#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
318#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
319#define C_000004_MC_FB_START 0xFFFF0000
320#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
321#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
322#define C_000004_MC_FB_TOP 0x0000FFFF
323#define R_000005_MC_AGP_LOCATION 0x000005
324#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
325#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
326#define C_000005_MC_AGP_START 0xFFFF0000
327#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
328#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
329#define C_000005_MC_AGP_TOP 0x0000FFFF
330#define R_000006_AGP_BASE 0x000006
331#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
332#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
333#define C_000006_AGP_BASE_ADDR 0x00000000
334#define R_000007_AGP_BASE_2 0x000007
335#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
336#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
337#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
338#define R_000009_MC_CNTL1 0x000009
339#define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26)
340#define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1)
341#define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF
342/* FIXME don't know the various field size need feedback from AMD */
343#define R_000100_MC_PT0_CNTL 0x000100
344#define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0)
345#define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1)
346#define C_000100_ENABLE_PT 0xFFFFFFFE
347#define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15)
348#define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7)
349#define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF
350#define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21)
351#define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7)
352#define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF
353#define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28)
354#define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1)
355#define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF
356#define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29)
357#define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1)
358#define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF
359#define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102
360#define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0)
361#define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1)
362#define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE
363#define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1)
364#define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3)
365#define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9
366#define V_000102_PAGE_TABLE_FLAT 0
367/* R600 documentation suggest that this should be a number of pages */
368#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112
369#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114
370#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C
371#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C
372#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C
373#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C
374#define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C
375#define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
376#define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
377#define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE
378#define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1)
379#define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1)
380#define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD
381#define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8)
382#define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3)
383#define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF
384#define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0
385#define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1
386#define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2
387#define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3
388#define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10)
389#define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1)
390#define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF
391#define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0
392#define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
393#define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11)
394#define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7)
395#define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF
396#define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14)
397#define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1)
398#define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF
399#define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15)
400#define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7)
401#define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF
402#define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20)
403#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
404#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
405
406#endif