diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 1549 |
1 files changed, 1489 insertions, 60 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e8f447e20507..8c8e4d3cbaa3 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -28,39 +28,235 @@ | |||
28 | #include "radeon.h" | 28 | #include "radeon.h" |
29 | #include "radeon_asic.h" | 29 | #include "radeon_asic.h" |
30 | #include "radeon_drm.h" | 30 | #include "radeon_drm.h" |
31 | #include "rv770d.h" | 31 | #include "evergreend.h" |
32 | #include "atom.h" | 32 | #include "atom.h" |
33 | #include "avivod.h" | 33 | #include "avivod.h" |
34 | #include "evergreen_reg.h" | 34 | #include "evergreen_reg.h" |
35 | 35 | ||
36 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | ||
37 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | ||
38 | |||
36 | static void evergreen_gpu_init(struct radeon_device *rdev); | 39 | static void evergreen_gpu_init(struct radeon_device *rdev); |
37 | void evergreen_fini(struct radeon_device *rdev); | 40 | void evergreen_fini(struct radeon_device *rdev); |
38 | 41 | ||
42 | void evergreen_pm_misc(struct radeon_device *rdev) | ||
43 | { | ||
44 | |||
45 | } | ||
46 | |||
47 | void evergreen_pm_prepare(struct radeon_device *rdev) | ||
48 | { | ||
49 | struct drm_device *ddev = rdev->ddev; | ||
50 | struct drm_crtc *crtc; | ||
51 | struct radeon_crtc *radeon_crtc; | ||
52 | u32 tmp; | ||
53 | |||
54 | /* disable any active CRTCs */ | ||
55 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
56 | radeon_crtc = to_radeon_crtc(crtc); | ||
57 | if (radeon_crtc->enabled) { | ||
58 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); | ||
59 | tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | ||
60 | WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
61 | } | ||
62 | } | ||
63 | } | ||
64 | |||
65 | void evergreen_pm_finish(struct radeon_device *rdev) | ||
66 | { | ||
67 | struct drm_device *ddev = rdev->ddev; | ||
68 | struct drm_crtc *crtc; | ||
69 | struct radeon_crtc *radeon_crtc; | ||
70 | u32 tmp; | ||
71 | |||
72 | /* enable any active CRTCs */ | ||
73 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | ||
74 | radeon_crtc = to_radeon_crtc(crtc); | ||
75 | if (radeon_crtc->enabled) { | ||
76 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); | ||
77 | tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | ||
78 | WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | ||
79 | } | ||
80 | } | ||
81 | } | ||
82 | |||
39 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | 83 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
40 | { | 84 | { |
41 | bool connected = false; | 85 | bool connected = false; |
42 | /* XXX */ | 86 | |
87 | switch (hpd) { | ||
88 | case RADEON_HPD_1: | ||
89 | if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) | ||
90 | connected = true; | ||
91 | break; | ||
92 | case RADEON_HPD_2: | ||
93 | if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) | ||
94 | connected = true; | ||
95 | break; | ||
96 | case RADEON_HPD_3: | ||
97 | if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) | ||
98 | connected = true; | ||
99 | break; | ||
100 | case RADEON_HPD_4: | ||
101 | if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) | ||
102 | connected = true; | ||
103 | break; | ||
104 | case RADEON_HPD_5: | ||
105 | if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) | ||
106 | connected = true; | ||
107 | break; | ||
108 | case RADEON_HPD_6: | ||
109 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | ||
110 | connected = true; | ||
111 | break; | ||
112 | default: | ||
113 | break; | ||
114 | } | ||
115 | |||
43 | return connected; | 116 | return connected; |
44 | } | 117 | } |
45 | 118 | ||
46 | void evergreen_hpd_set_polarity(struct radeon_device *rdev, | 119 | void evergreen_hpd_set_polarity(struct radeon_device *rdev, |
47 | enum radeon_hpd_id hpd) | 120 | enum radeon_hpd_id hpd) |
48 | { | 121 | { |
49 | /* XXX */ | 122 | u32 tmp; |
123 | bool connected = evergreen_hpd_sense(rdev, hpd); | ||
124 | |||
125 | switch (hpd) { | ||
126 | case RADEON_HPD_1: | ||
127 | tmp = RREG32(DC_HPD1_INT_CONTROL); | ||
128 | if (connected) | ||
129 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
130 | else | ||
131 | tmp |= DC_HPDx_INT_POLARITY; | ||
132 | WREG32(DC_HPD1_INT_CONTROL, tmp); | ||
133 | break; | ||
134 | case RADEON_HPD_2: | ||
135 | tmp = RREG32(DC_HPD2_INT_CONTROL); | ||
136 | if (connected) | ||
137 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
138 | else | ||
139 | tmp |= DC_HPDx_INT_POLARITY; | ||
140 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
141 | break; | ||
142 | case RADEON_HPD_3: | ||
143 | tmp = RREG32(DC_HPD3_INT_CONTROL); | ||
144 | if (connected) | ||
145 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
146 | else | ||
147 | tmp |= DC_HPDx_INT_POLARITY; | ||
148 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
149 | break; | ||
150 | case RADEON_HPD_4: | ||
151 | tmp = RREG32(DC_HPD4_INT_CONTROL); | ||
152 | if (connected) | ||
153 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
154 | else | ||
155 | tmp |= DC_HPDx_INT_POLARITY; | ||
156 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
157 | break; | ||
158 | case RADEON_HPD_5: | ||
159 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
160 | if (connected) | ||
161 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
162 | else | ||
163 | tmp |= DC_HPDx_INT_POLARITY; | ||
164 | WREG32(DC_HPD5_INT_CONTROL, tmp); | ||
165 | break; | ||
166 | case RADEON_HPD_6: | ||
167 | tmp = RREG32(DC_HPD6_INT_CONTROL); | ||
168 | if (connected) | ||
169 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
170 | else | ||
171 | tmp |= DC_HPDx_INT_POLARITY; | ||
172 | WREG32(DC_HPD6_INT_CONTROL, tmp); | ||
173 | break; | ||
174 | default: | ||
175 | break; | ||
176 | } | ||
50 | } | 177 | } |
51 | 178 | ||
52 | void evergreen_hpd_init(struct radeon_device *rdev) | 179 | void evergreen_hpd_init(struct radeon_device *rdev) |
53 | { | 180 | { |
54 | /* XXX */ | 181 | struct drm_device *dev = rdev->ddev; |
182 | struct drm_connector *connector; | ||
183 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | | ||
184 | DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; | ||
185 | |||
186 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
187 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
188 | switch (radeon_connector->hpd.hpd) { | ||
189 | case RADEON_HPD_1: | ||
190 | WREG32(DC_HPD1_CONTROL, tmp); | ||
191 | rdev->irq.hpd[0] = true; | ||
192 | break; | ||
193 | case RADEON_HPD_2: | ||
194 | WREG32(DC_HPD2_CONTROL, tmp); | ||
195 | rdev->irq.hpd[1] = true; | ||
196 | break; | ||
197 | case RADEON_HPD_3: | ||
198 | WREG32(DC_HPD3_CONTROL, tmp); | ||
199 | rdev->irq.hpd[2] = true; | ||
200 | break; | ||
201 | case RADEON_HPD_4: | ||
202 | WREG32(DC_HPD4_CONTROL, tmp); | ||
203 | rdev->irq.hpd[3] = true; | ||
204 | break; | ||
205 | case RADEON_HPD_5: | ||
206 | WREG32(DC_HPD5_CONTROL, tmp); | ||
207 | rdev->irq.hpd[4] = true; | ||
208 | break; | ||
209 | case RADEON_HPD_6: | ||
210 | WREG32(DC_HPD6_CONTROL, tmp); | ||
211 | rdev->irq.hpd[5] = true; | ||
212 | break; | ||
213 | default: | ||
214 | break; | ||
215 | } | ||
216 | } | ||
217 | if (rdev->irq.installed) | ||
218 | evergreen_irq_set(rdev); | ||
55 | } | 219 | } |
56 | 220 | ||
57 | 221 | void evergreen_hpd_fini(struct radeon_device *rdev) | |
58 | void evergreen_bandwidth_update(struct radeon_device *rdev) | ||
59 | { | 222 | { |
60 | /* XXX */ | 223 | struct drm_device *dev = rdev->ddev; |
224 | struct drm_connector *connector; | ||
225 | |||
226 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
227 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
228 | switch (radeon_connector->hpd.hpd) { | ||
229 | case RADEON_HPD_1: | ||
230 | WREG32(DC_HPD1_CONTROL, 0); | ||
231 | rdev->irq.hpd[0] = false; | ||
232 | break; | ||
233 | case RADEON_HPD_2: | ||
234 | WREG32(DC_HPD2_CONTROL, 0); | ||
235 | rdev->irq.hpd[1] = false; | ||
236 | break; | ||
237 | case RADEON_HPD_3: | ||
238 | WREG32(DC_HPD3_CONTROL, 0); | ||
239 | rdev->irq.hpd[2] = false; | ||
240 | break; | ||
241 | case RADEON_HPD_4: | ||
242 | WREG32(DC_HPD4_CONTROL, 0); | ||
243 | rdev->irq.hpd[3] = false; | ||
244 | break; | ||
245 | case RADEON_HPD_5: | ||
246 | WREG32(DC_HPD5_CONTROL, 0); | ||
247 | rdev->irq.hpd[4] = false; | ||
248 | break; | ||
249 | case RADEON_HPD_6: | ||
250 | WREG32(DC_HPD6_CONTROL, 0); | ||
251 | rdev->irq.hpd[5] = false; | ||
252 | break; | ||
253 | default: | ||
254 | break; | ||
255 | } | ||
256 | } | ||
61 | } | 257 | } |
62 | 258 | ||
63 | void evergreen_hpd_fini(struct radeon_device *rdev) | 259 | void evergreen_bandwidth_update(struct radeon_device *rdev) |
64 | { | 260 | { |
65 | /* XXX */ | 261 | /* XXX */ |
66 | } | 262 | } |
@@ -83,10 +279,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) | |||
83 | /* | 279 | /* |
84 | * GART | 280 | * GART |
85 | */ | 281 | */ |
282 | void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) | ||
283 | { | ||
284 | unsigned i; | ||
285 | u32 tmp; | ||
286 | |||
287 | WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); | ||
288 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
289 | /* read MC_STATUS */ | ||
290 | tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); | ||
291 | tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; | ||
292 | if (tmp == 2) { | ||
293 | printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); | ||
294 | return; | ||
295 | } | ||
296 | if (tmp) { | ||
297 | return; | ||
298 | } | ||
299 | udelay(1); | ||
300 | } | ||
301 | } | ||
302 | |||
86 | int evergreen_pcie_gart_enable(struct radeon_device *rdev) | 303 | int evergreen_pcie_gart_enable(struct radeon_device *rdev) |
87 | { | 304 | { |
88 | u32 tmp; | 305 | u32 tmp; |
89 | int r, i; | 306 | int r; |
90 | 307 | ||
91 | if (rdev->gart.table.vram.robj == NULL) { | 308 | if (rdev->gart.table.vram.robj == NULL) { |
92 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 309 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
@@ -121,10 +338,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
121 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 338 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
122 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 339 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
123 | (u32)(rdev->dummy_page.addr >> 12)); | 340 | (u32)(rdev->dummy_page.addr >> 12)); |
124 | for (i = 1; i < 7; i++) | 341 | WREG32(VM_CONTEXT1_CNTL, 0); |
125 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
126 | 342 | ||
127 | r600_pcie_gart_tlb_flush(rdev); | 343 | evergreen_pcie_gart_tlb_flush(rdev); |
128 | rdev->gart.ready = true; | 344 | rdev->gart.ready = true; |
129 | return 0; | 345 | return 0; |
130 | } | 346 | } |
@@ -132,11 +348,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
132 | void evergreen_pcie_gart_disable(struct radeon_device *rdev) | 348 | void evergreen_pcie_gart_disable(struct radeon_device *rdev) |
133 | { | 349 | { |
134 | u32 tmp; | 350 | u32 tmp; |
135 | int i, r; | 351 | int r; |
136 | 352 | ||
137 | /* Disable all tables */ | 353 | /* Disable all tables */ |
138 | for (i = 0; i < 7; i++) | 354 | WREG32(VM_CONTEXT0_CNTL, 0); |
139 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | 355 | WREG32(VM_CONTEXT1_CNTL, 0); |
140 | 356 | ||
141 | /* Setup L2 cache */ | 357 | /* Setup L2 cache */ |
142 | WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | | 358 | WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | |
@@ -173,7 +389,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev) | |||
173 | void evergreen_agp_enable(struct radeon_device *rdev) | 389 | void evergreen_agp_enable(struct radeon_device *rdev) |
174 | { | 390 | { |
175 | u32 tmp; | 391 | u32 tmp; |
176 | int i; | ||
177 | 392 | ||
178 | /* Setup L2 cache */ | 393 | /* Setup L2 cache */ |
179 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | 394 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
@@ -193,8 +408,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) | |||
193 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | 408 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
194 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 409 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
195 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 410 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
196 | for (i = 0; i < 7; i++) | 411 | WREG32(VM_CONTEXT0_CNTL, 0); |
197 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | 412 | WREG32(VM_CONTEXT1_CNTL, 0); |
198 | } | 413 | } |
199 | 414 | ||
200 | static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) | 415 | static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) |
@@ -400,40 +615,656 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
400 | rv515_vga_render_disable(rdev); | 615 | rv515_vga_render_disable(rdev); |
401 | } | 616 | } |
402 | 617 | ||
403 | #if 0 | ||
404 | /* | 618 | /* |
405 | * CP. | 619 | * CP. |
406 | */ | 620 | */ |
407 | static void evergreen_cp_stop(struct radeon_device *rdev) | ||
408 | { | ||
409 | /* XXX */ | ||
410 | } | ||
411 | |||
412 | 621 | ||
413 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) | 622 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) |
414 | { | 623 | { |
415 | /* XXX */ | 624 | const __be32 *fw_data; |
625 | int i; | ||
416 | 626 | ||
627 | if (!rdev->me_fw || !rdev->pfp_fw) | ||
628 | return -EINVAL; | ||
629 | |||
630 | r700_cp_stop(rdev); | ||
631 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | ||
632 | |||
633 | fw_data = (const __be32 *)rdev->pfp_fw->data; | ||
634 | WREG32(CP_PFP_UCODE_ADDR, 0); | ||
635 | for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) | ||
636 | WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); | ||
637 | WREG32(CP_PFP_UCODE_ADDR, 0); | ||
638 | |||
639 | fw_data = (const __be32 *)rdev->me_fw->data; | ||
640 | WREG32(CP_ME_RAM_WADDR, 0); | ||
641 | for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) | ||
642 | WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); | ||
643 | |||
644 | WREG32(CP_PFP_UCODE_ADDR, 0); | ||
645 | WREG32(CP_ME_RAM_WADDR, 0); | ||
646 | WREG32(CP_ME_RAM_RADDR, 0); | ||
417 | return 0; | 647 | return 0; |
418 | } | 648 | } |
419 | 649 | ||
650 | int evergreen_cp_resume(struct radeon_device *rdev) | ||
651 | { | ||
652 | u32 tmp; | ||
653 | u32 rb_bufsz; | ||
654 | int r; | ||
655 | |||
656 | /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ | ||
657 | WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | | ||
658 | SOFT_RESET_PA | | ||
659 | SOFT_RESET_SH | | ||
660 | SOFT_RESET_VGT | | ||
661 | SOFT_RESET_SX)); | ||
662 | RREG32(GRBM_SOFT_RESET); | ||
663 | mdelay(15); | ||
664 | WREG32(GRBM_SOFT_RESET, 0); | ||
665 | RREG32(GRBM_SOFT_RESET); | ||
666 | |||
667 | /* Set ring buffer size */ | ||
668 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | ||
669 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | ||
670 | #ifdef __BIG_ENDIAN | ||
671 | tmp |= BUF_SWAP_32BIT; | ||
672 | #endif | ||
673 | WREG32(CP_RB_CNTL, tmp); | ||
674 | WREG32(CP_SEM_WAIT_TIMER, 0x4); | ||
675 | |||
676 | /* Set the write pointer delay */ | ||
677 | WREG32(CP_RB_WPTR_DELAY, 0); | ||
678 | |||
679 | /* Initialize the ring buffer's read and write pointers */ | ||
680 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | ||
681 | WREG32(CP_RB_RPTR_WR, 0); | ||
682 | WREG32(CP_RB_WPTR, 0); | ||
683 | WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); | ||
684 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); | ||
685 | mdelay(1); | ||
686 | WREG32(CP_RB_CNTL, tmp); | ||
687 | |||
688 | WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); | ||
689 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | ||
690 | |||
691 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | ||
692 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | ||
693 | |||
694 | r600_cp_start(rdev); | ||
695 | rdev->cp.ready = true; | ||
696 | r = radeon_ring_test(rdev); | ||
697 | if (r) { | ||
698 | rdev->cp.ready = false; | ||
699 | return r; | ||
700 | } | ||
701 | return 0; | ||
702 | } | ||
420 | 703 | ||
421 | /* | 704 | /* |
422 | * Core functions | 705 | * Core functions |
423 | */ | 706 | */ |
424 | static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, | 707 | static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, |
708 | u32 num_tile_pipes, | ||
425 | u32 num_backends, | 709 | u32 num_backends, |
426 | u32 backend_disable_mask) | 710 | u32 backend_disable_mask) |
427 | { | 711 | { |
428 | u32 backend_map = 0; | 712 | u32 backend_map = 0; |
713 | u32 enabled_backends_mask = 0; | ||
714 | u32 enabled_backends_count = 0; | ||
715 | u32 cur_pipe; | ||
716 | u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; | ||
717 | u32 cur_backend = 0; | ||
718 | u32 i; | ||
719 | bool force_no_swizzle; | ||
720 | |||
721 | if (num_tile_pipes > EVERGREEN_MAX_PIPES) | ||
722 | num_tile_pipes = EVERGREEN_MAX_PIPES; | ||
723 | if (num_tile_pipes < 1) | ||
724 | num_tile_pipes = 1; | ||
725 | if (num_backends > EVERGREEN_MAX_BACKENDS) | ||
726 | num_backends = EVERGREEN_MAX_BACKENDS; | ||
727 | if (num_backends < 1) | ||
728 | num_backends = 1; | ||
729 | |||
730 | for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { | ||
731 | if (((backend_disable_mask >> i) & 1) == 0) { | ||
732 | enabled_backends_mask |= (1 << i); | ||
733 | ++enabled_backends_count; | ||
734 | } | ||
735 | if (enabled_backends_count == num_backends) | ||
736 | break; | ||
737 | } | ||
738 | |||
739 | if (enabled_backends_count == 0) { | ||
740 | enabled_backends_mask = 1; | ||
741 | enabled_backends_count = 1; | ||
742 | } | ||
743 | |||
744 | if (enabled_backends_count != num_backends) | ||
745 | num_backends = enabled_backends_count; | ||
746 | |||
747 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); | ||
748 | switch (rdev->family) { | ||
749 | case CHIP_CEDAR: | ||
750 | case CHIP_REDWOOD: | ||
751 | force_no_swizzle = false; | ||
752 | break; | ||
753 | case CHIP_CYPRESS: | ||
754 | case CHIP_HEMLOCK: | ||
755 | case CHIP_JUNIPER: | ||
756 | default: | ||
757 | force_no_swizzle = true; | ||
758 | break; | ||
759 | } | ||
760 | if (force_no_swizzle) { | ||
761 | bool last_backend_enabled = false; | ||
762 | |||
763 | force_no_swizzle = false; | ||
764 | for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { | ||
765 | if (((enabled_backends_mask >> i) & 1) == 1) { | ||
766 | if (last_backend_enabled) | ||
767 | force_no_swizzle = true; | ||
768 | last_backend_enabled = true; | ||
769 | } else | ||
770 | last_backend_enabled = false; | ||
771 | } | ||
772 | } | ||
773 | |||
774 | switch (num_tile_pipes) { | ||
775 | case 1: | ||
776 | case 3: | ||
777 | case 5: | ||
778 | case 7: | ||
779 | DRM_ERROR("odd number of pipes!\n"); | ||
780 | break; | ||
781 | case 2: | ||
782 | swizzle_pipe[0] = 0; | ||
783 | swizzle_pipe[1] = 1; | ||
784 | break; | ||
785 | case 4: | ||
786 | if (force_no_swizzle) { | ||
787 | swizzle_pipe[0] = 0; | ||
788 | swizzle_pipe[1] = 1; | ||
789 | swizzle_pipe[2] = 2; | ||
790 | swizzle_pipe[3] = 3; | ||
791 | } else { | ||
792 | swizzle_pipe[0] = 0; | ||
793 | swizzle_pipe[1] = 2; | ||
794 | swizzle_pipe[2] = 1; | ||
795 | swizzle_pipe[3] = 3; | ||
796 | } | ||
797 | break; | ||
798 | case 6: | ||
799 | if (force_no_swizzle) { | ||
800 | swizzle_pipe[0] = 0; | ||
801 | swizzle_pipe[1] = 1; | ||
802 | swizzle_pipe[2] = 2; | ||
803 | swizzle_pipe[3] = 3; | ||
804 | swizzle_pipe[4] = 4; | ||
805 | swizzle_pipe[5] = 5; | ||
806 | } else { | ||
807 | swizzle_pipe[0] = 0; | ||
808 | swizzle_pipe[1] = 2; | ||
809 | swizzle_pipe[2] = 4; | ||
810 | swizzle_pipe[3] = 1; | ||
811 | swizzle_pipe[4] = 3; | ||
812 | swizzle_pipe[5] = 5; | ||
813 | } | ||
814 | break; | ||
815 | case 8: | ||
816 | if (force_no_swizzle) { | ||
817 | swizzle_pipe[0] = 0; | ||
818 | swizzle_pipe[1] = 1; | ||
819 | swizzle_pipe[2] = 2; | ||
820 | swizzle_pipe[3] = 3; | ||
821 | swizzle_pipe[4] = 4; | ||
822 | swizzle_pipe[5] = 5; | ||
823 | swizzle_pipe[6] = 6; | ||
824 | swizzle_pipe[7] = 7; | ||
825 | } else { | ||
826 | swizzle_pipe[0] = 0; | ||
827 | swizzle_pipe[1] = 2; | ||
828 | swizzle_pipe[2] = 4; | ||
829 | swizzle_pipe[3] = 6; | ||
830 | swizzle_pipe[4] = 1; | ||
831 | swizzle_pipe[5] = 3; | ||
832 | swizzle_pipe[6] = 5; | ||
833 | swizzle_pipe[7] = 7; | ||
834 | } | ||
835 | break; | ||
836 | } | ||
837 | |||
838 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
839 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
840 | cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; | ||
841 | |||
842 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | ||
843 | |||
844 | cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; | ||
845 | } | ||
429 | 846 | ||
430 | return backend_map; | 847 | return backend_map; |
431 | } | 848 | } |
432 | #endif | ||
433 | 849 | ||
434 | static void evergreen_gpu_init(struct radeon_device *rdev) | 850 | static void evergreen_gpu_init(struct radeon_device *rdev) |
435 | { | 851 | { |
436 | /* XXX */ | 852 | u32 cc_rb_backend_disable = 0; |
853 | u32 cc_gc_shader_pipe_config; | ||
854 | u32 gb_addr_config = 0; | ||
855 | u32 mc_shared_chmap, mc_arb_ramcfg; | ||
856 | u32 gb_backend_map; | ||
857 | u32 grbm_gfx_index; | ||
858 | u32 sx_debug_1; | ||
859 | u32 smx_dc_ctl0; | ||
860 | u32 sq_config; | ||
861 | u32 sq_lds_resource_mgmt; | ||
862 | u32 sq_gpr_resource_mgmt_1; | ||
863 | u32 sq_gpr_resource_mgmt_2; | ||
864 | u32 sq_gpr_resource_mgmt_3; | ||
865 | u32 sq_thread_resource_mgmt; | ||
866 | u32 sq_thread_resource_mgmt_2; | ||
867 | u32 sq_stack_resource_mgmt_1; | ||
868 | u32 sq_stack_resource_mgmt_2; | ||
869 | u32 sq_stack_resource_mgmt_3; | ||
870 | u32 vgt_cache_invalidation; | ||
871 | u32 hdp_host_path_cntl; | ||
872 | int i, j, num_shader_engines, ps_thread_count; | ||
873 | |||
874 | switch (rdev->family) { | ||
875 | case CHIP_CYPRESS: | ||
876 | case CHIP_HEMLOCK: | ||
877 | rdev->config.evergreen.num_ses = 2; | ||
878 | rdev->config.evergreen.max_pipes = 4; | ||
879 | rdev->config.evergreen.max_tile_pipes = 8; | ||
880 | rdev->config.evergreen.max_simds = 10; | ||
881 | rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; | ||
882 | rdev->config.evergreen.max_gprs = 256; | ||
883 | rdev->config.evergreen.max_threads = 248; | ||
884 | rdev->config.evergreen.max_gs_threads = 32; | ||
885 | rdev->config.evergreen.max_stack_entries = 512; | ||
886 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
887 | rdev->config.evergreen.sx_max_export_size = 256; | ||
888 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
889 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
890 | rdev->config.evergreen.max_hw_contexts = 8; | ||
891 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
892 | |||
893 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | ||
894 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
895 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
896 | break; | ||
897 | case CHIP_JUNIPER: | ||
898 | rdev->config.evergreen.num_ses = 1; | ||
899 | rdev->config.evergreen.max_pipes = 4; | ||
900 | rdev->config.evergreen.max_tile_pipes = 4; | ||
901 | rdev->config.evergreen.max_simds = 10; | ||
902 | rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; | ||
903 | rdev->config.evergreen.max_gprs = 256; | ||
904 | rdev->config.evergreen.max_threads = 248; | ||
905 | rdev->config.evergreen.max_gs_threads = 32; | ||
906 | rdev->config.evergreen.max_stack_entries = 512; | ||
907 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
908 | rdev->config.evergreen.sx_max_export_size = 256; | ||
909 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
910 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
911 | rdev->config.evergreen.max_hw_contexts = 8; | ||
912 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
913 | |||
914 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | ||
915 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
916 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
917 | break; | ||
918 | case CHIP_REDWOOD: | ||
919 | rdev->config.evergreen.num_ses = 1; | ||
920 | rdev->config.evergreen.max_pipes = 4; | ||
921 | rdev->config.evergreen.max_tile_pipes = 4; | ||
922 | rdev->config.evergreen.max_simds = 5; | ||
923 | rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; | ||
924 | rdev->config.evergreen.max_gprs = 256; | ||
925 | rdev->config.evergreen.max_threads = 248; | ||
926 | rdev->config.evergreen.max_gs_threads = 32; | ||
927 | rdev->config.evergreen.max_stack_entries = 256; | ||
928 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
929 | rdev->config.evergreen.sx_max_export_size = 256; | ||
930 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
931 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
932 | rdev->config.evergreen.max_hw_contexts = 8; | ||
933 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
934 | |||
935 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | ||
936 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
937 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
938 | break; | ||
939 | case CHIP_CEDAR: | ||
940 | default: | ||
941 | rdev->config.evergreen.num_ses = 1; | ||
942 | rdev->config.evergreen.max_pipes = 2; | ||
943 | rdev->config.evergreen.max_tile_pipes = 2; | ||
944 | rdev->config.evergreen.max_simds = 2; | ||
945 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | ||
946 | rdev->config.evergreen.max_gprs = 256; | ||
947 | rdev->config.evergreen.max_threads = 192; | ||
948 | rdev->config.evergreen.max_gs_threads = 16; | ||
949 | rdev->config.evergreen.max_stack_entries = 256; | ||
950 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
951 | rdev->config.evergreen.sx_max_export_size = 128; | ||
952 | rdev->config.evergreen.sx_max_export_pos_size = 32; | ||
953 | rdev->config.evergreen.sx_max_export_smx_size = 96; | ||
954 | rdev->config.evergreen.max_hw_contexts = 4; | ||
955 | rdev->config.evergreen.sq_num_cf_insts = 1; | ||
956 | |||
957 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | ||
958 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
959 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
960 | break; | ||
961 | } | ||
962 | |||
963 | /* Initialize HDP */ | ||
964 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | ||
965 | WREG32((0x2c14 + j), 0x00000000); | ||
966 | WREG32((0x2c18 + j), 0x00000000); | ||
967 | WREG32((0x2c1c + j), 0x00000000); | ||
968 | WREG32((0x2c20 + j), 0x00000000); | ||
969 | WREG32((0x2c24 + j), 0x00000000); | ||
970 | } | ||
971 | |||
972 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | ||
973 | |||
974 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; | ||
975 | |||
976 | cc_gc_shader_pipe_config |= | ||
977 | INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) | ||
978 | & EVERGREEN_MAX_PIPES_MASK); | ||
979 | cc_gc_shader_pipe_config |= | ||
980 | INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) | ||
981 | & EVERGREEN_MAX_SIMDS_MASK); | ||
982 | |||
983 | cc_rb_backend_disable = | ||
984 | BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) | ||
985 | & EVERGREEN_MAX_BACKENDS_MASK); | ||
986 | |||
987 | |||
988 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | ||
989 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | ||
990 | |||
991 | switch (rdev->config.evergreen.max_tile_pipes) { | ||
992 | case 1: | ||
993 | default: | ||
994 | gb_addr_config |= NUM_PIPES(0); | ||
995 | break; | ||
996 | case 2: | ||
997 | gb_addr_config |= NUM_PIPES(1); | ||
998 | break; | ||
999 | case 4: | ||
1000 | gb_addr_config |= NUM_PIPES(2); | ||
1001 | break; | ||
1002 | case 8: | ||
1003 | gb_addr_config |= NUM_PIPES(3); | ||
1004 | break; | ||
1005 | } | ||
1006 | |||
1007 | gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); | ||
1008 | gb_addr_config |= BANK_INTERLEAVE_SIZE(0); | ||
1009 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); | ||
1010 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); | ||
1011 | gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ | ||
1012 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | ||
1013 | |||
1014 | if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) | ||
1015 | gb_addr_config |= ROW_SIZE(2); | ||
1016 | else | ||
1017 | gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); | ||
1018 | |||
1019 | if (rdev->ddev->pdev->device == 0x689e) { | ||
1020 | u32 efuse_straps_4; | ||
1021 | u32 efuse_straps_3; | ||
1022 | u8 efuse_box_bit_131_124; | ||
1023 | |||
1024 | WREG32(RCU_IND_INDEX, 0x204); | ||
1025 | efuse_straps_4 = RREG32(RCU_IND_DATA); | ||
1026 | WREG32(RCU_IND_INDEX, 0x203); | ||
1027 | efuse_straps_3 = RREG32(RCU_IND_DATA); | ||
1028 | efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); | ||
1029 | |||
1030 | switch(efuse_box_bit_131_124) { | ||
1031 | case 0x00: | ||
1032 | gb_backend_map = 0x76543210; | ||
1033 | break; | ||
1034 | case 0x55: | ||
1035 | gb_backend_map = 0x77553311; | ||
1036 | break; | ||
1037 | case 0x56: | ||
1038 | gb_backend_map = 0x77553300; | ||
1039 | break; | ||
1040 | case 0x59: | ||
1041 | gb_backend_map = 0x77552211; | ||
1042 | break; | ||
1043 | case 0x66: | ||
1044 | gb_backend_map = 0x77443300; | ||
1045 | break; | ||
1046 | case 0x99: | ||
1047 | gb_backend_map = 0x66552211; | ||
1048 | break; | ||
1049 | case 0x5a: | ||
1050 | gb_backend_map = 0x77552200; | ||
1051 | break; | ||
1052 | case 0xaa: | ||
1053 | gb_backend_map = 0x66442200; | ||
1054 | break; | ||
1055 | case 0x95: | ||
1056 | gb_backend_map = 0x66553311; | ||
1057 | break; | ||
1058 | default: | ||
1059 | DRM_ERROR("bad backend map, using default\n"); | ||
1060 | gb_backend_map = | ||
1061 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
1062 | rdev->config.evergreen.max_tile_pipes, | ||
1063 | rdev->config.evergreen.max_backends, | ||
1064 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
1065 | rdev->config.evergreen.max_backends) & | ||
1066 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
1067 | break; | ||
1068 | } | ||
1069 | } else if (rdev->ddev->pdev->device == 0x68b9) { | ||
1070 | u32 efuse_straps_3; | ||
1071 | u8 efuse_box_bit_127_124; | ||
1072 | |||
1073 | WREG32(RCU_IND_INDEX, 0x203); | ||
1074 | efuse_straps_3 = RREG32(RCU_IND_DATA); | ||
1075 | efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; | ||
1076 | |||
1077 | switch(efuse_box_bit_127_124) { | ||
1078 | case 0x0: | ||
1079 | gb_backend_map = 0x00003210; | ||
1080 | break; | ||
1081 | case 0x5: | ||
1082 | case 0x6: | ||
1083 | case 0x9: | ||
1084 | case 0xa: | ||
1085 | gb_backend_map = 0x00003311; | ||
1086 | break; | ||
1087 | default: | ||
1088 | DRM_ERROR("bad backend map, using default\n"); | ||
1089 | gb_backend_map = | ||
1090 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
1091 | rdev->config.evergreen.max_tile_pipes, | ||
1092 | rdev->config.evergreen.max_backends, | ||
1093 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
1094 | rdev->config.evergreen.max_backends) & | ||
1095 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
1096 | break; | ||
1097 | } | ||
1098 | } else | ||
1099 | gb_backend_map = | ||
1100 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
1101 | rdev->config.evergreen.max_tile_pipes, | ||
1102 | rdev->config.evergreen.max_backends, | ||
1103 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
1104 | rdev->config.evergreen.max_backends) & | ||
1105 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
1106 | |||
1107 | WREG32(GB_BACKEND_MAP, gb_backend_map); | ||
1108 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | ||
1109 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | ||
1110 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | ||
1111 | |||
1112 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | ||
1113 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | ||
1114 | |||
1115 | for (i = 0; i < rdev->config.evergreen.num_ses; i++) { | ||
1116 | u32 rb = cc_rb_backend_disable | (0xf0 << 16); | ||
1117 | u32 sp = cc_gc_shader_pipe_config; | ||
1118 | u32 gfx = grbm_gfx_index | SE_INDEX(i); | ||
1119 | |||
1120 | if (i == num_shader_engines) { | ||
1121 | rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); | ||
1122 | sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); | ||
1123 | } | ||
1124 | |||
1125 | WREG32(GRBM_GFX_INDEX, gfx); | ||
1126 | WREG32(RLC_GFX_INDEX, gfx); | ||
1127 | |||
1128 | WREG32(CC_RB_BACKEND_DISABLE, rb); | ||
1129 | WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); | ||
1130 | WREG32(GC_USER_RB_BACKEND_DISABLE, rb); | ||
1131 | WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); | ||
1132 | } | ||
1133 | |||
1134 | grbm_gfx_index |= SE_BROADCAST_WRITES; | ||
1135 | WREG32(GRBM_GFX_INDEX, grbm_gfx_index); | ||
1136 | WREG32(RLC_GFX_INDEX, grbm_gfx_index); | ||
1137 | |||
1138 | WREG32(CGTS_SYS_TCC_DISABLE, 0); | ||
1139 | WREG32(CGTS_TCC_DISABLE, 0); | ||
1140 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); | ||
1141 | WREG32(CGTS_USER_TCC_DISABLE, 0); | ||
1142 | |||
1143 | /* set HW defaults for 3D engine */ | ||
1144 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | | ||
1145 | ROQ_IB2_START(0x2b))); | ||
1146 | |||
1147 | WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); | ||
1148 | |||
1149 | WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | | ||
1150 | SYNC_GRADIENT | | ||
1151 | SYNC_WALKER | | ||
1152 | SYNC_ALIGNER)); | ||
1153 | |||
1154 | sx_debug_1 = RREG32(SX_DEBUG_1); | ||
1155 | sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; | ||
1156 | WREG32(SX_DEBUG_1, sx_debug_1); | ||
1157 | |||
1158 | |||
1159 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); | ||
1160 | smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); | ||
1161 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); | ||
1162 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); | ||
1163 | |||
1164 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | | ||
1165 | POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | | ||
1166 | SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); | ||
1167 | |||
1168 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | | ||
1169 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | | ||
1170 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); | ||
1171 | |||
1172 | WREG32(VGT_NUM_INSTANCES, 1); | ||
1173 | WREG32(SPI_CONFIG_CNTL, 0); | ||
1174 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); | ||
1175 | WREG32(CP_PERFMON_CNTL, 0); | ||
1176 | |||
1177 | WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | | ||
1178 | FETCH_FIFO_HIWATER(0x4) | | ||
1179 | DONE_FIFO_HIWATER(0xe0) | | ||
1180 | ALU_UPDATE_FIFO_HIWATER(0x8))); | ||
1181 | |||
1182 | sq_config = RREG32(SQ_CONFIG); | ||
1183 | sq_config &= ~(PS_PRIO(3) | | ||
1184 | VS_PRIO(3) | | ||
1185 | GS_PRIO(3) | | ||
1186 | ES_PRIO(3)); | ||
1187 | sq_config |= (VC_ENABLE | | ||
1188 | EXPORT_SRC_C | | ||
1189 | PS_PRIO(0) | | ||
1190 | VS_PRIO(1) | | ||
1191 | GS_PRIO(2) | | ||
1192 | ES_PRIO(3)); | ||
1193 | |||
1194 | if (rdev->family == CHIP_CEDAR) | ||
1195 | /* no vertex cache */ | ||
1196 | sq_config &= ~VC_ENABLE; | ||
1197 | |||
1198 | sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); | ||
1199 | |||
1200 | sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); | ||
1201 | sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); | ||
1202 | sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); | ||
1203 | sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); | ||
1204 | sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); | ||
1205 | sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | ||
1206 | sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | ||
1207 | |||
1208 | if (rdev->family == CHIP_CEDAR) | ||
1209 | ps_thread_count = 96; | ||
1210 | else | ||
1211 | ps_thread_count = 128; | ||
1212 | |||
1213 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); | ||
1214 | sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | ||
1215 | sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | ||
1216 | sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | ||
1217 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | ||
1218 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | ||
1219 | |||
1220 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | ||
1221 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | ||
1222 | sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | ||
1223 | sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | ||
1224 | sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | ||
1225 | sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | ||
1226 | |||
1227 | WREG32(SQ_CONFIG, sq_config); | ||
1228 | WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); | ||
1229 | WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); | ||
1230 | WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); | ||
1231 | WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); | ||
1232 | WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); | ||
1233 | WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); | ||
1234 | WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); | ||
1235 | WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); | ||
1236 | WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); | ||
1237 | WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); | ||
1238 | |||
1239 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | | ||
1240 | FORCE_EOV_MAX_REZ_CNT(255))); | ||
1241 | |||
1242 | if (rdev->family == CHIP_CEDAR) | ||
1243 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); | ||
1244 | else | ||
1245 | vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); | ||
1246 | vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); | ||
1247 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); | ||
1248 | |||
1249 | WREG32(VGT_GS_VERTEX_REUSE, 16); | ||
1250 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | ||
1251 | |||
1252 | WREG32(CB_PERF_CTR0_SEL_0, 0); | ||
1253 | WREG32(CB_PERF_CTR0_SEL_1, 0); | ||
1254 | WREG32(CB_PERF_CTR1_SEL_0, 0); | ||
1255 | WREG32(CB_PERF_CTR1_SEL_1, 0); | ||
1256 | WREG32(CB_PERF_CTR2_SEL_0, 0); | ||
1257 | WREG32(CB_PERF_CTR2_SEL_1, 0); | ||
1258 | WREG32(CB_PERF_CTR3_SEL_0, 0); | ||
1259 | WREG32(CB_PERF_CTR3_SEL_1, 0); | ||
1260 | |||
1261 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); | ||
1262 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | ||
1263 | |||
1264 | WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); | ||
1265 | |||
1266 | udelay(50); | ||
1267 | |||
437 | } | 1268 | } |
438 | 1269 | ||
439 | int evergreen_mc_init(struct radeon_device *rdev) | 1270 | int evergreen_mc_init(struct radeon_device *rdev) |
@@ -476,26 +1307,627 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
476 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 1307 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; |
477 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 1308 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; |
478 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1309 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
479 | /* FIXME remove this once we support unmappable VRAM */ | ||
480 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { | ||
481 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
482 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
483 | } | ||
484 | r600_vram_gtt_location(rdev, &rdev->mc); | 1310 | r600_vram_gtt_location(rdev, &rdev->mc); |
485 | radeon_update_bandwidth_info(rdev); | 1311 | radeon_update_bandwidth_info(rdev); |
486 | 1312 | ||
487 | return 0; | 1313 | return 0; |
488 | } | 1314 | } |
489 | 1315 | ||
490 | int evergreen_gpu_reset(struct radeon_device *rdev) | 1316 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev) |
491 | { | 1317 | { |
492 | /* FIXME: implement for evergreen */ | 1318 | /* FIXME: implement for evergreen */ |
1319 | return false; | ||
1320 | } | ||
1321 | |||
1322 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | ||
1323 | { | ||
1324 | struct evergreen_mc_save save; | ||
1325 | u32 srbm_reset = 0; | ||
1326 | u32 grbm_reset = 0; | ||
1327 | |||
1328 | dev_info(rdev->dev, "GPU softreset \n"); | ||
1329 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | ||
1330 | RREG32(GRBM_STATUS)); | ||
1331 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | ||
1332 | RREG32(GRBM_STATUS_SE0)); | ||
1333 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | ||
1334 | RREG32(GRBM_STATUS_SE1)); | ||
1335 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | ||
1336 | RREG32(SRBM_STATUS)); | ||
1337 | evergreen_mc_stop(rdev, &save); | ||
1338 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
1339 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
1340 | } | ||
1341 | /* Disable CP parsing/prefetching */ | ||
1342 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); | ||
1343 | |||
1344 | /* reset all the gfx blocks */ | ||
1345 | grbm_reset = (SOFT_RESET_CP | | ||
1346 | SOFT_RESET_CB | | ||
1347 | SOFT_RESET_DB | | ||
1348 | SOFT_RESET_PA | | ||
1349 | SOFT_RESET_SC | | ||
1350 | SOFT_RESET_SPI | | ||
1351 | SOFT_RESET_SH | | ||
1352 | SOFT_RESET_SX | | ||
1353 | SOFT_RESET_TC | | ||
1354 | SOFT_RESET_TA | | ||
1355 | SOFT_RESET_VC | | ||
1356 | SOFT_RESET_VGT); | ||
1357 | |||
1358 | dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); | ||
1359 | WREG32(GRBM_SOFT_RESET, grbm_reset); | ||
1360 | (void)RREG32(GRBM_SOFT_RESET); | ||
1361 | udelay(50); | ||
1362 | WREG32(GRBM_SOFT_RESET, 0); | ||
1363 | (void)RREG32(GRBM_SOFT_RESET); | ||
1364 | |||
1365 | /* reset all the system blocks */ | ||
1366 | srbm_reset = SRBM_SOFT_RESET_ALL_MASK; | ||
1367 | |||
1368 | dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | ||
1369 | WREG32(SRBM_SOFT_RESET, srbm_reset); | ||
1370 | (void)RREG32(SRBM_SOFT_RESET); | ||
1371 | udelay(50); | ||
1372 | WREG32(SRBM_SOFT_RESET, 0); | ||
1373 | (void)RREG32(SRBM_SOFT_RESET); | ||
1374 | /* Wait a little for things to settle down */ | ||
1375 | udelay(50); | ||
1376 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | ||
1377 | RREG32(GRBM_STATUS)); | ||
1378 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | ||
1379 | RREG32(GRBM_STATUS_SE0)); | ||
1380 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | ||
1381 | RREG32(GRBM_STATUS_SE1)); | ||
1382 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | ||
1383 | RREG32(SRBM_STATUS)); | ||
1384 | /* After reset we need to reinit the asic as GPU often endup in an | ||
1385 | * incoherent state. | ||
1386 | */ | ||
1387 | atom_asic_init(rdev->mode_info.atom_context); | ||
1388 | evergreen_mc_resume(rdev, &save); | ||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1392 | int evergreen_asic_reset(struct radeon_device *rdev) | ||
1393 | { | ||
1394 | return evergreen_gpu_soft_reset(rdev); | ||
1395 | } | ||
1396 | |||
1397 | /* Interrupts */ | ||
1398 | |||
1399 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) | ||
1400 | { | ||
1401 | switch (crtc) { | ||
1402 | case 0: | ||
1403 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); | ||
1404 | case 1: | ||
1405 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
1406 | case 2: | ||
1407 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
1408 | case 3: | ||
1409 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
1410 | case 4: | ||
1411 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
1412 | case 5: | ||
1413 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
1414 | default: | ||
1415 | return 0; | ||
1416 | } | ||
1417 | } | ||
1418 | |||
1419 | void evergreen_disable_interrupt_state(struct radeon_device *rdev) | ||
1420 | { | ||
1421 | u32 tmp; | ||
1422 | |||
1423 | WREG32(CP_INT_CNTL, 0); | ||
1424 | WREG32(GRBM_INT_CNTL, 0); | ||
1425 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | ||
1426 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | ||
1427 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | ||
1428 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | ||
1429 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
1430 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
1431 | |||
1432 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | ||
1433 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | ||
1434 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | ||
1435 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | ||
1436 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
1437 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
1438 | |||
1439 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | ||
1440 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | ||
1441 | |||
1442 | tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
1443 | WREG32(DC_HPD1_INT_CONTROL, tmp); | ||
1444 | tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
1445 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
1446 | tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
1447 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
1448 | tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
1449 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
1450 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
1451 | WREG32(DC_HPD5_INT_CONTROL, tmp); | ||
1452 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
1453 | WREG32(DC_HPD6_INT_CONTROL, tmp); | ||
1454 | |||
1455 | } | ||
1456 | |||
1457 | int evergreen_irq_set(struct radeon_device *rdev) | ||
1458 | { | ||
1459 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | ||
1460 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | ||
1461 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | ||
1462 | u32 grbm_int_cntl = 0; | ||
1463 | |||
1464 | if (!rdev->irq.installed) { | ||
1465 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | ||
1466 | return -EINVAL; | ||
1467 | } | ||
1468 | /* don't enable anything if the ih is disabled */ | ||
1469 | if (!rdev->ih.enabled) { | ||
1470 | r600_disable_interrupts(rdev); | ||
1471 | /* force the active interrupt state to all disabled */ | ||
1472 | evergreen_disable_interrupt_state(rdev); | ||
1473 | return 0; | ||
1474 | } | ||
1475 | |||
1476 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
1477 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
1478 | hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
1479 | hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
1480 | hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
1481 | hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
1482 | |||
1483 | if (rdev->irq.sw_int) { | ||
1484 | DRM_DEBUG("evergreen_irq_set: sw int\n"); | ||
1485 | cp_int_cntl |= RB_INT_ENABLE; | ||
1486 | } | ||
1487 | if (rdev->irq.crtc_vblank_int[0]) { | ||
1488 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); | ||
1489 | crtc1 |= VBLANK_INT_MASK; | ||
1490 | } | ||
1491 | if (rdev->irq.crtc_vblank_int[1]) { | ||
1492 | DRM_DEBUG("evergreen_irq_set: vblank 1\n"); | ||
1493 | crtc2 |= VBLANK_INT_MASK; | ||
1494 | } | ||
1495 | if (rdev->irq.crtc_vblank_int[2]) { | ||
1496 | DRM_DEBUG("evergreen_irq_set: vblank 2\n"); | ||
1497 | crtc3 |= VBLANK_INT_MASK; | ||
1498 | } | ||
1499 | if (rdev->irq.crtc_vblank_int[3]) { | ||
1500 | DRM_DEBUG("evergreen_irq_set: vblank 3\n"); | ||
1501 | crtc4 |= VBLANK_INT_MASK; | ||
1502 | } | ||
1503 | if (rdev->irq.crtc_vblank_int[4]) { | ||
1504 | DRM_DEBUG("evergreen_irq_set: vblank 4\n"); | ||
1505 | crtc5 |= VBLANK_INT_MASK; | ||
1506 | } | ||
1507 | if (rdev->irq.crtc_vblank_int[5]) { | ||
1508 | DRM_DEBUG("evergreen_irq_set: vblank 5\n"); | ||
1509 | crtc6 |= VBLANK_INT_MASK; | ||
1510 | } | ||
1511 | if (rdev->irq.hpd[0]) { | ||
1512 | DRM_DEBUG("evergreen_irq_set: hpd 1\n"); | ||
1513 | hpd1 |= DC_HPDx_INT_EN; | ||
1514 | } | ||
1515 | if (rdev->irq.hpd[1]) { | ||
1516 | DRM_DEBUG("evergreen_irq_set: hpd 2\n"); | ||
1517 | hpd2 |= DC_HPDx_INT_EN; | ||
1518 | } | ||
1519 | if (rdev->irq.hpd[2]) { | ||
1520 | DRM_DEBUG("evergreen_irq_set: hpd 3\n"); | ||
1521 | hpd3 |= DC_HPDx_INT_EN; | ||
1522 | } | ||
1523 | if (rdev->irq.hpd[3]) { | ||
1524 | DRM_DEBUG("evergreen_irq_set: hpd 4\n"); | ||
1525 | hpd4 |= DC_HPDx_INT_EN; | ||
1526 | } | ||
1527 | if (rdev->irq.hpd[4]) { | ||
1528 | DRM_DEBUG("evergreen_irq_set: hpd 5\n"); | ||
1529 | hpd5 |= DC_HPDx_INT_EN; | ||
1530 | } | ||
1531 | if (rdev->irq.hpd[5]) { | ||
1532 | DRM_DEBUG("evergreen_irq_set: hpd 6\n"); | ||
1533 | hpd6 |= DC_HPDx_INT_EN; | ||
1534 | } | ||
1535 | if (rdev->irq.gui_idle) { | ||
1536 | DRM_DEBUG("gui idle\n"); | ||
1537 | grbm_int_cntl |= GUI_IDLE_INT_ENABLE; | ||
1538 | } | ||
1539 | |||
1540 | WREG32(CP_INT_CNTL, cp_int_cntl); | ||
1541 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | ||
1542 | |||
1543 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); | ||
1544 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); | ||
1545 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); | ||
1546 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); | ||
1547 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); | ||
1548 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | ||
1549 | |||
1550 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | ||
1551 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | ||
1552 | WREG32(DC_HPD3_INT_CONTROL, hpd3); | ||
1553 | WREG32(DC_HPD4_INT_CONTROL, hpd4); | ||
1554 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | ||
1555 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | ||
1556 | |||
493 | return 0; | 1557 | return 0; |
494 | } | 1558 | } |
495 | 1559 | ||
1560 | static inline void evergreen_irq_ack(struct radeon_device *rdev, | ||
1561 | u32 *disp_int, | ||
1562 | u32 *disp_int_cont, | ||
1563 | u32 *disp_int_cont2, | ||
1564 | u32 *disp_int_cont3, | ||
1565 | u32 *disp_int_cont4, | ||
1566 | u32 *disp_int_cont5) | ||
1567 | { | ||
1568 | u32 tmp; | ||
1569 | |||
1570 | *disp_int = RREG32(DISP_INTERRUPT_STATUS); | ||
1571 | *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | ||
1572 | *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); | ||
1573 | *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); | ||
1574 | *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); | ||
1575 | *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); | ||
1576 | |||
1577 | if (*disp_int & LB_D1_VBLANK_INTERRUPT) | ||
1578 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); | ||
1579 | if (*disp_int & LB_D1_VLINE_INTERRUPT) | ||
1580 | WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); | ||
1581 | |||
1582 | if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) | ||
1583 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); | ||
1584 | if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) | ||
1585 | WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); | ||
1586 | |||
1587 | if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) | ||
1588 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); | ||
1589 | if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) | ||
1590 | WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); | ||
1591 | |||
1592 | if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) | ||
1593 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); | ||
1594 | if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) | ||
1595 | WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); | ||
1596 | |||
1597 | if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) | ||
1598 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | ||
1599 | if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | ||
1600 | WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); | ||
1601 | |||
1602 | if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) | ||
1603 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); | ||
1604 | if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) | ||
1605 | WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); | ||
1606 | |||
1607 | if (*disp_int & DC_HPD1_INTERRUPT) { | ||
1608 | tmp = RREG32(DC_HPD1_INT_CONTROL); | ||
1609 | tmp |= DC_HPDx_INT_ACK; | ||
1610 | WREG32(DC_HPD1_INT_CONTROL, tmp); | ||
1611 | } | ||
1612 | if (*disp_int_cont & DC_HPD2_INTERRUPT) { | ||
1613 | tmp = RREG32(DC_HPD2_INT_CONTROL); | ||
1614 | tmp |= DC_HPDx_INT_ACK; | ||
1615 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
1616 | } | ||
1617 | if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { | ||
1618 | tmp = RREG32(DC_HPD3_INT_CONTROL); | ||
1619 | tmp |= DC_HPDx_INT_ACK; | ||
1620 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
1621 | } | ||
1622 | if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { | ||
1623 | tmp = RREG32(DC_HPD4_INT_CONTROL); | ||
1624 | tmp |= DC_HPDx_INT_ACK; | ||
1625 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
1626 | } | ||
1627 | if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { | ||
1628 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
1629 | tmp |= DC_HPDx_INT_ACK; | ||
1630 | WREG32(DC_HPD5_INT_CONTROL, tmp); | ||
1631 | } | ||
1632 | if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { | ||
1633 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
1634 | tmp |= DC_HPDx_INT_ACK; | ||
1635 | WREG32(DC_HPD6_INT_CONTROL, tmp); | ||
1636 | } | ||
1637 | } | ||
1638 | |||
1639 | void evergreen_irq_disable(struct radeon_device *rdev) | ||
1640 | { | ||
1641 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
1642 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | ||
1643 | |||
1644 | r600_disable_interrupts(rdev); | ||
1645 | /* Wait and acknowledge irq */ | ||
1646 | mdelay(1); | ||
1647 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | ||
1648 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | ||
1649 | evergreen_disable_interrupt_state(rdev); | ||
1650 | } | ||
1651 | |||
1652 | static void evergreen_irq_suspend(struct radeon_device *rdev) | ||
1653 | { | ||
1654 | evergreen_irq_disable(rdev); | ||
1655 | r600_rlc_stop(rdev); | ||
1656 | } | ||
1657 | |||
1658 | static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | ||
1659 | { | ||
1660 | u32 wptr, tmp; | ||
1661 | |||
1662 | /* XXX use writeback */ | ||
1663 | wptr = RREG32(IH_RB_WPTR); | ||
1664 | |||
1665 | if (wptr & RB_OVERFLOW) { | ||
1666 | /* When a ring buffer overflow happen start parsing interrupt | ||
1667 | * from the last not overwritten vector (wptr + 16). Hopefully | ||
1668 | * this should allow us to catchup. | ||
1669 | */ | ||
1670 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | ||
1671 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | ||
1672 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | ||
1673 | tmp = RREG32(IH_RB_CNTL); | ||
1674 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | ||
1675 | WREG32(IH_RB_CNTL, tmp); | ||
1676 | } | ||
1677 | return (wptr & rdev->ih.ptr_mask); | ||
1678 | } | ||
1679 | |||
1680 | int evergreen_irq_process(struct radeon_device *rdev) | ||
1681 | { | ||
1682 | u32 wptr = evergreen_get_ih_wptr(rdev); | ||
1683 | u32 rptr = rdev->ih.rptr; | ||
1684 | u32 src_id, src_data; | ||
1685 | u32 ring_index; | ||
1686 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
1687 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | ||
1688 | unsigned long flags; | ||
1689 | bool queue_hotplug = false; | ||
1690 | |||
1691 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | ||
1692 | if (!rdev->ih.enabled) | ||
1693 | return IRQ_NONE; | ||
1694 | |||
1695 | spin_lock_irqsave(&rdev->ih.lock, flags); | ||
1696 | |||
1697 | if (rptr == wptr) { | ||
1698 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
1699 | return IRQ_NONE; | ||
1700 | } | ||
1701 | if (rdev->shutdown) { | ||
1702 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
1703 | return IRQ_NONE; | ||
1704 | } | ||
1705 | |||
1706 | restart_ih: | ||
1707 | /* display interrupts */ | ||
1708 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | ||
1709 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | ||
1710 | |||
1711 | rdev->ih.wptr = wptr; | ||
1712 | while (rptr != wptr) { | ||
1713 | /* wptr/rptr are in bytes! */ | ||
1714 | ring_index = rptr / 4; | ||
1715 | src_id = rdev->ih.ring[ring_index] & 0xff; | ||
1716 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | ||
1717 | |||
1718 | switch (src_id) { | ||
1719 | case 1: /* D1 vblank/vline */ | ||
1720 | switch (src_data) { | ||
1721 | case 0: /* D1 vblank */ | ||
1722 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | ||
1723 | drm_handle_vblank(rdev->ddev, 0); | ||
1724 | wake_up(&rdev->irq.vblank_queue); | ||
1725 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | ||
1726 | DRM_DEBUG("IH: D1 vblank\n"); | ||
1727 | } | ||
1728 | break; | ||
1729 | case 1: /* D1 vline */ | ||
1730 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | ||
1731 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | ||
1732 | DRM_DEBUG("IH: D1 vline\n"); | ||
1733 | } | ||
1734 | break; | ||
1735 | default: | ||
1736 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1737 | break; | ||
1738 | } | ||
1739 | break; | ||
1740 | case 2: /* D2 vblank/vline */ | ||
1741 | switch (src_data) { | ||
1742 | case 0: /* D2 vblank */ | ||
1743 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | ||
1744 | drm_handle_vblank(rdev->ddev, 1); | ||
1745 | wake_up(&rdev->irq.vblank_queue); | ||
1746 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | ||
1747 | DRM_DEBUG("IH: D2 vblank\n"); | ||
1748 | } | ||
1749 | break; | ||
1750 | case 1: /* D2 vline */ | ||
1751 | if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { | ||
1752 | disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; | ||
1753 | DRM_DEBUG("IH: D2 vline\n"); | ||
1754 | } | ||
1755 | break; | ||
1756 | default: | ||
1757 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1758 | break; | ||
1759 | } | ||
1760 | break; | ||
1761 | case 3: /* D3 vblank/vline */ | ||
1762 | switch (src_data) { | ||
1763 | case 0: /* D3 vblank */ | ||
1764 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | ||
1765 | drm_handle_vblank(rdev->ddev, 2); | ||
1766 | wake_up(&rdev->irq.vblank_queue); | ||
1767 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | ||
1768 | DRM_DEBUG("IH: D3 vblank\n"); | ||
1769 | } | ||
1770 | break; | ||
1771 | case 1: /* D3 vline */ | ||
1772 | if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { | ||
1773 | disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; | ||
1774 | DRM_DEBUG("IH: D3 vline\n"); | ||
1775 | } | ||
1776 | break; | ||
1777 | default: | ||
1778 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1779 | break; | ||
1780 | } | ||
1781 | break; | ||
1782 | case 4: /* D4 vblank/vline */ | ||
1783 | switch (src_data) { | ||
1784 | case 0: /* D4 vblank */ | ||
1785 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | ||
1786 | drm_handle_vblank(rdev->ddev, 3); | ||
1787 | wake_up(&rdev->irq.vblank_queue); | ||
1788 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | ||
1789 | DRM_DEBUG("IH: D4 vblank\n"); | ||
1790 | } | ||
1791 | break; | ||
1792 | case 1: /* D4 vline */ | ||
1793 | if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { | ||
1794 | disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; | ||
1795 | DRM_DEBUG("IH: D4 vline\n"); | ||
1796 | } | ||
1797 | break; | ||
1798 | default: | ||
1799 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1800 | break; | ||
1801 | } | ||
1802 | break; | ||
1803 | case 5: /* D5 vblank/vline */ | ||
1804 | switch (src_data) { | ||
1805 | case 0: /* D5 vblank */ | ||
1806 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | ||
1807 | drm_handle_vblank(rdev->ddev, 4); | ||
1808 | wake_up(&rdev->irq.vblank_queue); | ||
1809 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | ||
1810 | DRM_DEBUG("IH: D5 vblank\n"); | ||
1811 | } | ||
1812 | break; | ||
1813 | case 1: /* D5 vline */ | ||
1814 | if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { | ||
1815 | disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; | ||
1816 | DRM_DEBUG("IH: D5 vline\n"); | ||
1817 | } | ||
1818 | break; | ||
1819 | default: | ||
1820 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1821 | break; | ||
1822 | } | ||
1823 | break; | ||
1824 | case 6: /* D6 vblank/vline */ | ||
1825 | switch (src_data) { | ||
1826 | case 0: /* D6 vblank */ | ||
1827 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | ||
1828 | drm_handle_vblank(rdev->ddev, 5); | ||
1829 | wake_up(&rdev->irq.vblank_queue); | ||
1830 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | ||
1831 | DRM_DEBUG("IH: D6 vblank\n"); | ||
1832 | } | ||
1833 | break; | ||
1834 | case 1: /* D6 vline */ | ||
1835 | if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { | ||
1836 | disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; | ||
1837 | DRM_DEBUG("IH: D6 vline\n"); | ||
1838 | } | ||
1839 | break; | ||
1840 | default: | ||
1841 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1842 | break; | ||
1843 | } | ||
1844 | break; | ||
1845 | case 42: /* HPD hotplug */ | ||
1846 | switch (src_data) { | ||
1847 | case 0: | ||
1848 | if (disp_int & DC_HPD1_INTERRUPT) { | ||
1849 | disp_int &= ~DC_HPD1_INTERRUPT; | ||
1850 | queue_hotplug = true; | ||
1851 | DRM_DEBUG("IH: HPD1\n"); | ||
1852 | } | ||
1853 | break; | ||
1854 | case 1: | ||
1855 | if (disp_int_cont & DC_HPD2_INTERRUPT) { | ||
1856 | disp_int_cont &= ~DC_HPD2_INTERRUPT; | ||
1857 | queue_hotplug = true; | ||
1858 | DRM_DEBUG("IH: HPD2\n"); | ||
1859 | } | ||
1860 | break; | ||
1861 | case 2: | ||
1862 | if (disp_int_cont2 & DC_HPD3_INTERRUPT) { | ||
1863 | disp_int_cont2 &= ~DC_HPD3_INTERRUPT; | ||
1864 | queue_hotplug = true; | ||
1865 | DRM_DEBUG("IH: HPD3\n"); | ||
1866 | } | ||
1867 | break; | ||
1868 | case 3: | ||
1869 | if (disp_int_cont3 & DC_HPD4_INTERRUPT) { | ||
1870 | disp_int_cont3 &= ~DC_HPD4_INTERRUPT; | ||
1871 | queue_hotplug = true; | ||
1872 | DRM_DEBUG("IH: HPD4\n"); | ||
1873 | } | ||
1874 | break; | ||
1875 | case 4: | ||
1876 | if (disp_int_cont4 & DC_HPD5_INTERRUPT) { | ||
1877 | disp_int_cont4 &= ~DC_HPD5_INTERRUPT; | ||
1878 | queue_hotplug = true; | ||
1879 | DRM_DEBUG("IH: HPD5\n"); | ||
1880 | } | ||
1881 | break; | ||
1882 | case 5: | ||
1883 | if (disp_int_cont5 & DC_HPD6_INTERRUPT) { | ||
1884 | disp_int_cont5 &= ~DC_HPD6_INTERRUPT; | ||
1885 | queue_hotplug = true; | ||
1886 | DRM_DEBUG("IH: HPD6\n"); | ||
1887 | } | ||
1888 | break; | ||
1889 | default: | ||
1890 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1891 | break; | ||
1892 | } | ||
1893 | break; | ||
1894 | case 176: /* CP_INT in ring buffer */ | ||
1895 | case 177: /* CP_INT in IB1 */ | ||
1896 | case 178: /* CP_INT in IB2 */ | ||
1897 | DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); | ||
1898 | radeon_fence_process(rdev); | ||
1899 | break; | ||
1900 | case 181: /* CP EOP event */ | ||
1901 | DRM_DEBUG("IH: CP EOP\n"); | ||
1902 | break; | ||
1903 | case 233: /* GUI IDLE */ | ||
1904 | DRM_DEBUG("IH: CP EOP\n"); | ||
1905 | rdev->pm.gui_idle = true; | ||
1906 | wake_up(&rdev->irq.idle_queue); | ||
1907 | break; | ||
1908 | default: | ||
1909 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
1910 | break; | ||
1911 | } | ||
1912 | |||
1913 | /* wptr/rptr are in bytes! */ | ||
1914 | rptr += 16; | ||
1915 | rptr &= rdev->ih.ptr_mask; | ||
1916 | } | ||
1917 | /* make sure wptr hasn't changed while processing */ | ||
1918 | wptr = evergreen_get_ih_wptr(rdev); | ||
1919 | if (wptr != rdev->ih.wptr) | ||
1920 | goto restart_ih; | ||
1921 | if (queue_hotplug) | ||
1922 | queue_work(rdev->wq, &rdev->hotplug_work); | ||
1923 | rdev->ih.rptr = rptr; | ||
1924 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
1925 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
1926 | return IRQ_HANDLED; | ||
1927 | } | ||
1928 | |||
496 | static int evergreen_startup(struct radeon_device *rdev) | 1929 | static int evergreen_startup(struct radeon_device *rdev) |
497 | { | 1930 | { |
498 | #if 0 | ||
499 | int r; | 1931 | int r; |
500 | 1932 | ||
501 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 1933 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
@@ -505,17 +1937,15 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
505 | return r; | 1937 | return r; |
506 | } | 1938 | } |
507 | } | 1939 | } |
508 | #endif | 1940 | |
509 | evergreen_mc_program(rdev); | 1941 | evergreen_mc_program(rdev); |
510 | #if 0 | ||
511 | if (rdev->flags & RADEON_IS_AGP) { | 1942 | if (rdev->flags & RADEON_IS_AGP) { |
512 | evergreem_agp_enable(rdev); | 1943 | evergreen_agp_enable(rdev); |
513 | } else { | 1944 | } else { |
514 | r = evergreen_pcie_gart_enable(rdev); | 1945 | r = evergreen_pcie_gart_enable(rdev); |
515 | if (r) | 1946 | if (r) |
516 | return r; | 1947 | return r; |
517 | } | 1948 | } |
518 | #endif | ||
519 | evergreen_gpu_init(rdev); | 1949 | evergreen_gpu_init(rdev); |
520 | #if 0 | 1950 | #if 0 |
521 | if (!rdev->r600_blit.shader_obj) { | 1951 | if (!rdev->r600_blit.shader_obj) { |
@@ -536,6 +1966,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
536 | DRM_ERROR("failed to pin blit object %d\n", r); | 1966 | DRM_ERROR("failed to pin blit object %d\n", r); |
537 | return r; | 1967 | return r; |
538 | } | 1968 | } |
1969 | #endif | ||
539 | 1970 | ||
540 | /* Enable IRQ */ | 1971 | /* Enable IRQ */ |
541 | r = r600_irq_init(rdev); | 1972 | r = r600_irq_init(rdev); |
@@ -544,7 +1975,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
544 | radeon_irq_kms_fini(rdev); | 1975 | radeon_irq_kms_fini(rdev); |
545 | return r; | 1976 | return r; |
546 | } | 1977 | } |
547 | r600_irq_set(rdev); | 1978 | evergreen_irq_set(rdev); |
548 | 1979 | ||
549 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | 1980 | r = radeon_ring_init(rdev, rdev->cp.ring_size); |
550 | if (r) | 1981 | if (r) |
@@ -552,12 +1983,12 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
552 | r = evergreen_cp_load_microcode(rdev); | 1983 | r = evergreen_cp_load_microcode(rdev); |
553 | if (r) | 1984 | if (r) |
554 | return r; | 1985 | return r; |
555 | r = r600_cp_resume(rdev); | 1986 | r = evergreen_cp_resume(rdev); |
556 | if (r) | 1987 | if (r) |
557 | return r; | 1988 | return r; |
558 | /* write back buffer are not vital so don't worry about failure */ | 1989 | /* write back buffer are not vital so don't worry about failure */ |
559 | r600_wb_enable(rdev); | 1990 | r600_wb_enable(rdev); |
560 | #endif | 1991 | |
561 | return 0; | 1992 | return 0; |
562 | } | 1993 | } |
563 | 1994 | ||
@@ -582,13 +2013,13 @@ int evergreen_resume(struct radeon_device *rdev) | |||
582 | DRM_ERROR("r600 startup failed on resume\n"); | 2013 | DRM_ERROR("r600 startup failed on resume\n"); |
583 | return r; | 2014 | return r; |
584 | } | 2015 | } |
585 | #if 0 | 2016 | |
586 | r = r600_ib_test(rdev); | 2017 | r = r600_ib_test(rdev); |
587 | if (r) { | 2018 | if (r) { |
588 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 2019 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
589 | return r; | 2020 | return r; |
590 | } | 2021 | } |
591 | #endif | 2022 | |
592 | return r; | 2023 | return r; |
593 | 2024 | ||
594 | } | 2025 | } |
@@ -597,12 +2028,14 @@ int evergreen_suspend(struct radeon_device *rdev) | |||
597 | { | 2028 | { |
598 | #if 0 | 2029 | #if 0 |
599 | int r; | 2030 | int r; |
600 | 2031 | #endif | |
601 | /* FIXME: we should wait for ring to be empty */ | 2032 | /* FIXME: we should wait for ring to be empty */ |
602 | r700_cp_stop(rdev); | 2033 | r700_cp_stop(rdev); |
603 | rdev->cp.ready = false; | 2034 | rdev->cp.ready = false; |
2035 | evergreen_irq_suspend(rdev); | ||
604 | r600_wb_disable(rdev); | 2036 | r600_wb_disable(rdev); |
605 | evergreen_pcie_gart_disable(rdev); | 2037 | evergreen_pcie_gart_disable(rdev); |
2038 | #if 0 | ||
606 | /* unpin shaders bo */ | 2039 | /* unpin shaders bo */ |
607 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 2040 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
608 | if (likely(r == 0)) { | 2041 | if (likely(r == 0)) { |
@@ -682,8 +2115,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
682 | r = radeon_clocks_init(rdev); | 2115 | r = radeon_clocks_init(rdev); |
683 | if (r) | 2116 | if (r) |
684 | return r; | 2117 | return r; |
685 | /* Initialize power management */ | ||
686 | radeon_pm_init(rdev); | ||
687 | /* Fence driver */ | 2118 | /* Fence driver */ |
688 | r = radeon_fence_driver_init(rdev); | 2119 | r = radeon_fence_driver_init(rdev); |
689 | if (r) | 2120 | if (r) |
@@ -702,7 +2133,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
702 | r = radeon_bo_init(rdev); | 2133 | r = radeon_bo_init(rdev); |
703 | if (r) | 2134 | if (r) |
704 | return r; | 2135 | return r; |
705 | #if 0 | 2136 | |
706 | r = radeon_irq_kms_init(rdev); | 2137 | r = radeon_irq_kms_init(rdev); |
707 | if (r) | 2138 | if (r) |
708 | return r; | 2139 | return r; |
@@ -716,14 +2147,16 @@ int evergreen_init(struct radeon_device *rdev) | |||
716 | r = r600_pcie_gart_init(rdev); | 2147 | r = r600_pcie_gart_init(rdev); |
717 | if (r) | 2148 | if (r) |
718 | return r; | 2149 | return r; |
719 | #endif | 2150 | |
720 | rdev->accel_working = false; | 2151 | rdev->accel_working = false; |
721 | r = evergreen_startup(rdev); | 2152 | r = evergreen_startup(rdev); |
722 | if (r) { | 2153 | if (r) { |
723 | evergreen_suspend(rdev); | 2154 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
724 | /*r600_wb_fini(rdev);*/ | 2155 | r700_cp_fini(rdev); |
725 | /*radeon_ring_fini(rdev);*/ | 2156 | r600_wb_fini(rdev); |
726 | /*evergreen_pcie_gart_fini(rdev);*/ | 2157 | r600_irq_fini(rdev); |
2158 | radeon_irq_kms_fini(rdev); | ||
2159 | evergreen_pcie_gart_fini(rdev); | ||
727 | rdev->accel_working = false; | 2160 | rdev->accel_working = false; |
728 | } | 2161 | } |
729 | if (rdev->accel_working) { | 2162 | if (rdev->accel_working) { |
@@ -743,16 +2176,12 @@ int evergreen_init(struct radeon_device *rdev) | |||
743 | 2176 | ||
744 | void evergreen_fini(struct radeon_device *rdev) | 2177 | void evergreen_fini(struct radeon_device *rdev) |
745 | { | 2178 | { |
746 | radeon_pm_fini(rdev); | 2179 | /*r600_blit_fini(rdev);*/ |
747 | evergreen_suspend(rdev); | 2180 | r700_cp_fini(rdev); |
748 | #if 0 | 2181 | r600_wb_fini(rdev); |
749 | r600_blit_fini(rdev); | ||
750 | r600_irq_fini(rdev); | 2182 | r600_irq_fini(rdev); |
751 | radeon_irq_kms_fini(rdev); | 2183 | radeon_irq_kms_fini(rdev); |
752 | radeon_ring_fini(rdev); | ||
753 | r600_wb_fini(rdev); | ||
754 | evergreen_pcie_gart_fini(rdev); | 2184 | evergreen_pcie_gart_fini(rdev); |
755 | #endif | ||
756 | radeon_gem_fini(rdev); | 2185 | radeon_gem_fini(rdev); |
757 | radeon_fence_driver_fini(rdev); | 2186 | radeon_fence_driver_fini(rdev); |
758 | radeon_clocks_fini(rdev); | 2187 | radeon_clocks_fini(rdev); |