diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 245 |
1 files changed, 197 insertions, 48 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9e93eabcf16..824cc6480a06 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
65 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 65 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
66 | */ | 66 | */ |
67 | 67 | ||
68 | /* hpd for digital panel detect/disconnect */ | ||
69 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | ||
70 | { | ||
71 | bool connected = false; | ||
72 | |||
73 | switch (hpd) { | ||
74 | case RADEON_HPD_1: | ||
75 | if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) | ||
76 | connected = true; | ||
77 | break; | ||
78 | case RADEON_HPD_2: | ||
79 | if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) | ||
80 | connected = true; | ||
81 | break; | ||
82 | default: | ||
83 | break; | ||
84 | } | ||
85 | return connected; | ||
86 | } | ||
87 | |||
88 | void r100_hpd_set_polarity(struct radeon_device *rdev, | ||
89 | enum radeon_hpd_id hpd) | ||
90 | { | ||
91 | u32 tmp; | ||
92 | bool connected = r100_hpd_sense(rdev, hpd); | ||
93 | |||
94 | switch (hpd) { | ||
95 | case RADEON_HPD_1: | ||
96 | tmp = RREG32(RADEON_FP_GEN_CNTL); | ||
97 | if (connected) | ||
98 | tmp &= ~RADEON_FP_DETECT_INT_POL; | ||
99 | else | ||
100 | tmp |= RADEON_FP_DETECT_INT_POL; | ||
101 | WREG32(RADEON_FP_GEN_CNTL, tmp); | ||
102 | break; | ||
103 | case RADEON_HPD_2: | ||
104 | tmp = RREG32(RADEON_FP2_GEN_CNTL); | ||
105 | if (connected) | ||
106 | tmp &= ~RADEON_FP2_DETECT_INT_POL; | ||
107 | else | ||
108 | tmp |= RADEON_FP2_DETECT_INT_POL; | ||
109 | WREG32(RADEON_FP2_GEN_CNTL, tmp); | ||
110 | break; | ||
111 | default: | ||
112 | break; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | void r100_hpd_init(struct radeon_device *rdev) | ||
117 | { | ||
118 | struct drm_device *dev = rdev->ddev; | ||
119 | struct drm_connector *connector; | ||
120 | |||
121 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
122 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
123 | switch (radeon_connector->hpd.hpd) { | ||
124 | case RADEON_HPD_1: | ||
125 | rdev->irq.hpd[0] = true; | ||
126 | break; | ||
127 | case RADEON_HPD_2: | ||
128 | rdev->irq.hpd[1] = true; | ||
129 | break; | ||
130 | default: | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | r100_irq_set(rdev); | ||
135 | } | ||
136 | |||
137 | void r100_hpd_fini(struct radeon_device *rdev) | ||
138 | { | ||
139 | struct drm_device *dev = rdev->ddev; | ||
140 | struct drm_connector *connector; | ||
141 | |||
142 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
143 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
144 | switch (radeon_connector->hpd.hpd) { | ||
145 | case RADEON_HPD_1: | ||
146 | rdev->irq.hpd[0] = false; | ||
147 | break; | ||
148 | case RADEON_HPD_2: | ||
149 | rdev->irq.hpd[1] = false; | ||
150 | break; | ||
151 | default: | ||
152 | break; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
68 | /* | 157 | /* |
69 | * PCI GART | 158 | * PCI GART |
70 | */ | 159 | */ |
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
94 | return radeon_gart_table_ram_alloc(rdev); | 183 | return radeon_gart_table_ram_alloc(rdev); |
95 | } | 184 | } |
96 | 185 | ||
186 | /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ | ||
187 | void r100_enable_bm(struct radeon_device *rdev) | ||
188 | { | ||
189 | uint32_t tmp; | ||
190 | /* Enable bus mastering */ | ||
191 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
192 | WREG32(RADEON_BUS_CNTL, tmp); | ||
193 | } | ||
194 | |||
97 | int r100_pci_gart_enable(struct radeon_device *rdev) | 195 | int r100_pci_gart_enable(struct radeon_device *rdev) |
98 | { | 196 | { |
99 | uint32_t tmp; | 197 | uint32_t tmp; |
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev) | |||
105 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); | 203 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); |
106 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 204 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
107 | WREG32(RADEON_AIC_HI_ADDR, tmp); | 205 | WREG32(RADEON_AIC_HI_ADDR, tmp); |
108 | /* Enable bus mastering */ | ||
109 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
110 | WREG32(RADEON_BUS_CNTL, tmp); | ||
111 | /* set PCI GART page-table base address */ | 206 | /* set PCI GART page-table base address */ |
112 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); | 207 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); |
113 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; | 208 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; |
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev) | |||
157 | if (rdev->irq.crtc_vblank_int[1]) { | 252 | if (rdev->irq.crtc_vblank_int[1]) { |
158 | tmp |= RADEON_CRTC2_VBLANK_MASK; | 253 | tmp |= RADEON_CRTC2_VBLANK_MASK; |
159 | } | 254 | } |
255 | if (rdev->irq.hpd[0]) { | ||
256 | tmp |= RADEON_FP_DETECT_MASK; | ||
257 | } | ||
258 | if (rdev->irq.hpd[1]) { | ||
259 | tmp |= RADEON_FP2_DETECT_MASK; | ||
260 | } | ||
160 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 261 | WREG32(RADEON_GEN_INT_CNTL, tmp); |
161 | return 0; | 262 | return 0; |
162 | } | 263 | } |
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev) | |||
175 | static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | 276 | static inline uint32_t r100_irq_ack(struct radeon_device *rdev) |
176 | { | 277 | { |
177 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | 278 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); |
178 | uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | | 279 | uint32_t irq_mask = RADEON_SW_INT_TEST | |
179 | RADEON_CRTC2_VBLANK_STAT; | 280 | RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | |
281 | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; | ||
180 | 282 | ||
181 | if (irqs) { | 283 | if (irqs) { |
182 | WREG32(RADEON_GEN_INT_STATUS, irqs); | 284 | WREG32(RADEON_GEN_INT_STATUS, irqs); |
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | |||
187 | int r100_irq_process(struct radeon_device *rdev) | 289 | int r100_irq_process(struct radeon_device *rdev) |
188 | { | 290 | { |
189 | uint32_t status, msi_rearm; | 291 | uint32_t status, msi_rearm; |
292 | bool queue_hotplug = false; | ||
190 | 293 | ||
191 | status = r100_irq_ack(rdev); | 294 | status = r100_irq_ack(rdev); |
192 | if (!status) { | 295 | if (!status) { |
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev) | |||
207 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 310 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
208 | drm_handle_vblank(rdev->ddev, 1); | 311 | drm_handle_vblank(rdev->ddev, 1); |
209 | } | 312 | } |
313 | if (status & RADEON_FP_DETECT_STAT) { | ||
314 | queue_hotplug = true; | ||
315 | DRM_DEBUG("HPD1\n"); | ||
316 | } | ||
317 | if (status & RADEON_FP2_DETECT_STAT) { | ||
318 | queue_hotplug = true; | ||
319 | DRM_DEBUG("HPD2\n"); | ||
320 | } | ||
210 | status = r100_irq_ack(rdev); | 321 | status = r100_irq_ack(rdev); |
211 | } | 322 | } |
323 | if (queue_hotplug) | ||
324 | queue_work(rdev->wq, &rdev->hotplug_work); | ||
212 | if (rdev->msi_enabled) { | 325 | if (rdev->msi_enabled) { |
213 | switch (rdev->family) { | 326 | switch (rdev->family) { |
214 | case CHIP_RS400: | 327 | case CHIP_RS400: |
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev) | |||
255 | int r; | 368 | int r; |
256 | 369 | ||
257 | if (rdev->wb.wb_obj == NULL) { | 370 | if (rdev->wb.wb_obj == NULL) { |
258 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | 371 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
259 | true, | 372 | RADEON_GEM_DOMAIN_GTT, |
260 | RADEON_GEM_DOMAIN_GTT, | 373 | &rdev->wb.wb_obj); |
261 | false, &rdev->wb.wb_obj); | ||
262 | if (r) { | 374 | if (r) { |
263 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | 375 | dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); |
264 | return r; | 376 | return r; |
265 | } | 377 | } |
266 | r = radeon_object_pin(rdev->wb.wb_obj, | 378 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
267 | RADEON_GEM_DOMAIN_GTT, | 379 | if (unlikely(r != 0)) |
268 | &rdev->wb.gpu_addr); | 380 | return r; |
381 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
382 | &rdev->wb.gpu_addr); | ||
269 | if (r) { | 383 | if (r) { |
270 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | 384 | dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); |
385 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
271 | return r; | 386 | return r; |
272 | } | 387 | } |
273 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 388 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
389 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
274 | if (r) { | 390 | if (r) { |
275 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | 391 | dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); |
276 | return r; | 392 | return r; |
277 | } | 393 | } |
278 | } | 394 | } |
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev) | |||
290 | 406 | ||
291 | void r100_wb_fini(struct radeon_device *rdev) | 407 | void r100_wb_fini(struct radeon_device *rdev) |
292 | { | 408 | { |
409 | int r; | ||
410 | |||
293 | r100_wb_disable(rdev); | 411 | r100_wb_disable(rdev); |
294 | if (rdev->wb.wb_obj) { | 412 | if (rdev->wb.wb_obj) { |
295 | radeon_object_kunmap(rdev->wb.wb_obj); | 413 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
296 | radeon_object_unpin(rdev->wb.wb_obj); | 414 | if (unlikely(r != 0)) { |
297 | radeon_object_unref(&rdev->wb.wb_obj); | 415 | dev_err(rdev->dev, "(%d) can't finish WB\n", r); |
416 | return; | ||
417 | } | ||
418 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
419 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
420 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
421 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
298 | rdev->wb.wb = NULL; | 422 | rdev->wb.wb = NULL; |
299 | rdev->wb.wb_obj = NULL; | 423 | rdev->wb.wb_obj = NULL; |
300 | } | 424 | } |
@@ -1288,17 +1412,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1288 | 1412 | ||
1289 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | 1413 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
1290 | struct radeon_cs_packet *pkt, | 1414 | struct radeon_cs_packet *pkt, |
1291 | struct radeon_object *robj) | 1415 | struct radeon_bo *robj) |
1292 | { | 1416 | { |
1293 | unsigned idx; | 1417 | unsigned idx; |
1294 | u32 value; | 1418 | u32 value; |
1295 | idx = pkt->idx + 1; | 1419 | idx = pkt->idx + 1; |
1296 | value = radeon_get_ib_value(p, idx + 2); | 1420 | value = radeon_get_ib_value(p, idx + 2); |
1297 | if ((value + 1) > radeon_object_size(robj)) { | 1421 | if ((value + 1) > radeon_bo_size(robj)) { |
1298 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " | 1422 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " |
1299 | "(need %u have %lu) !\n", | 1423 | "(need %u have %lu) !\n", |
1300 | value + 1, | 1424 | value + 1, |
1301 | radeon_object_size(robj)); | 1425 | radeon_bo_size(robj)); |
1302 | return -EINVAL; | 1426 | return -EINVAL; |
1303 | } | 1427 | } |
1304 | return 0; | 1428 | return 0; |
@@ -1583,6 +1707,14 @@ void r100_gpu_init(struct radeon_device *rdev) | |||
1583 | r100_hdp_reset(rdev); | 1707 | r100_hdp_reset(rdev); |
1584 | } | 1708 | } |
1585 | 1709 | ||
1710 | void r100_hdp_flush(struct radeon_device *rdev) | ||
1711 | { | ||
1712 | u32 tmp; | ||
1713 | tmp = RREG32(RADEON_HOST_PATH_CNTL); | ||
1714 | tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE; | ||
1715 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
1716 | } | ||
1717 | |||
1586 | void r100_hdp_reset(struct radeon_device *rdev) | 1718 | void r100_hdp_reset(struct radeon_device *rdev) |
1587 | { | 1719 | { |
1588 | uint32_t tmp; | 1720 | uint32_t tmp; |
@@ -1650,6 +1782,17 @@ int r100_gpu_reset(struct radeon_device *rdev) | |||
1650 | return 0; | 1782 | return 0; |
1651 | } | 1783 | } |
1652 | 1784 | ||
1785 | void r100_set_common_regs(struct radeon_device *rdev) | ||
1786 | { | ||
1787 | /* set these so they don't interfere with anything */ | ||
1788 | WREG32(RADEON_OV0_SCALE_CNTL, 0); | ||
1789 | WREG32(RADEON_SUBPIC_CNTL, 0); | ||
1790 | WREG32(RADEON_VIPH_CONTROL, 0); | ||
1791 | WREG32(RADEON_I2C_CNTL_1, 0); | ||
1792 | WREG32(RADEON_DVI_I2C_CNTL_1, 0); | ||
1793 | WREG32(RADEON_CAP0_TRIG_CNTL, 0); | ||
1794 | WREG32(RADEON_CAP1_TRIG_CNTL, 0); | ||
1795 | } | ||
1653 | 1796 | ||
1654 | /* | 1797 | /* |
1655 | * VRAM info | 1798 | * VRAM info |
@@ -2594,7 +2737,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
2594 | struct r100_cs_track *track, unsigned idx) | 2737 | struct r100_cs_track *track, unsigned idx) |
2595 | { | 2738 | { |
2596 | unsigned face, w, h; | 2739 | unsigned face, w, h; |
2597 | struct radeon_object *cube_robj; | 2740 | struct radeon_bo *cube_robj; |
2598 | unsigned long size; | 2741 | unsigned long size; |
2599 | 2742 | ||
2600 | for (face = 0; face < 5; face++) { | 2743 | for (face = 0; face < 5; face++) { |
@@ -2607,9 +2750,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
2607 | 2750 | ||
2608 | size += track->textures[idx].cube_info[face].offset; | 2751 | size += track->textures[idx].cube_info[face].offset; |
2609 | 2752 | ||
2610 | if (size > radeon_object_size(cube_robj)) { | 2753 | if (size > radeon_bo_size(cube_robj)) { |
2611 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | 2754 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", |
2612 | size, radeon_object_size(cube_robj)); | 2755 | size, radeon_bo_size(cube_robj)); |
2613 | r100_cs_track_texture_print(&track->textures[idx]); | 2756 | r100_cs_track_texture_print(&track->textures[idx]); |
2614 | return -1; | 2757 | return -1; |
2615 | } | 2758 | } |
@@ -2620,7 +2763,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
2620 | static int r100_cs_track_texture_check(struct radeon_device *rdev, | 2763 | static int r100_cs_track_texture_check(struct radeon_device *rdev, |
2621 | struct r100_cs_track *track) | 2764 | struct r100_cs_track *track) |
2622 | { | 2765 | { |
2623 | struct radeon_object *robj; | 2766 | struct radeon_bo *robj; |
2624 | unsigned long size; | 2767 | unsigned long size; |
2625 | unsigned u, i, w, h; | 2768 | unsigned u, i, w, h; |
2626 | int ret; | 2769 | int ret; |
@@ -2676,9 +2819,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
2676 | "%u\n", track->textures[u].tex_coord_type, u); | 2819 | "%u\n", track->textures[u].tex_coord_type, u); |
2677 | return -EINVAL; | 2820 | return -EINVAL; |
2678 | } | 2821 | } |
2679 | if (size > radeon_object_size(robj)) { | 2822 | if (size > radeon_bo_size(robj)) { |
2680 | DRM_ERROR("Texture of unit %u needs %lu bytes but is " | 2823 | DRM_ERROR("Texture of unit %u needs %lu bytes but is " |
2681 | "%lu\n", u, size, radeon_object_size(robj)); | 2824 | "%lu\n", u, size, radeon_bo_size(robj)); |
2682 | r100_cs_track_texture_print(&track->textures[u]); | 2825 | r100_cs_track_texture_print(&track->textures[u]); |
2683 | return -EINVAL; | 2826 | return -EINVAL; |
2684 | } | 2827 | } |
@@ -2700,10 +2843,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2700 | } | 2843 | } |
2701 | size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; | 2844 | size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; |
2702 | size += track->cb[i].offset; | 2845 | size += track->cb[i].offset; |
2703 | if (size > radeon_object_size(track->cb[i].robj)) { | 2846 | if (size > radeon_bo_size(track->cb[i].robj)) { |
2704 | DRM_ERROR("[drm] Buffer too small for color buffer %d " | 2847 | DRM_ERROR("[drm] Buffer too small for color buffer %d " |
2705 | "(need %lu have %lu) !\n", i, size, | 2848 | "(need %lu have %lu) !\n", i, size, |
2706 | radeon_object_size(track->cb[i].robj)); | 2849 | radeon_bo_size(track->cb[i].robj)); |
2707 | DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", | 2850 | DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", |
2708 | i, track->cb[i].pitch, track->cb[i].cpp, | 2851 | i, track->cb[i].pitch, track->cb[i].cpp, |
2709 | track->cb[i].offset, track->maxy); | 2852 | track->cb[i].offset, track->maxy); |
@@ -2717,10 +2860,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2717 | } | 2860 | } |
2718 | size = track->zb.pitch * track->zb.cpp * track->maxy; | 2861 | size = track->zb.pitch * track->zb.cpp * track->maxy; |
2719 | size += track->zb.offset; | 2862 | size += track->zb.offset; |
2720 | if (size > radeon_object_size(track->zb.robj)) { | 2863 | if (size > radeon_bo_size(track->zb.robj)) { |
2721 | DRM_ERROR("[drm] Buffer too small for z buffer " | 2864 | DRM_ERROR("[drm] Buffer too small for z buffer " |
2722 | "(need %lu have %lu) !\n", size, | 2865 | "(need %lu have %lu) !\n", size, |
2723 | radeon_object_size(track->zb.robj)); | 2866 | radeon_bo_size(track->zb.robj)); |
2724 | DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", | 2867 | DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", |
2725 | track->zb.pitch, track->zb.cpp, | 2868 | track->zb.pitch, track->zb.cpp, |
2726 | track->zb.offset, track->maxy); | 2869 | track->zb.offset, track->maxy); |
@@ -2738,11 +2881,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2738 | "bound\n", prim_walk, i); | 2881 | "bound\n", prim_walk, i); |
2739 | return -EINVAL; | 2882 | return -EINVAL; |
2740 | } | 2883 | } |
2741 | if (size > radeon_object_size(track->arrays[i].robj)) { | 2884 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
2742 | DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " | 2885 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
2743 | "have %lu dwords\n", prim_walk, i, | 2886 | "need %lu dwords have %lu dwords\n", |
2744 | size >> 2, | 2887 | prim_walk, i, size >> 2, |
2745 | radeon_object_size(track->arrays[i].robj) >> 2); | 2888 | radeon_bo_size(track->arrays[i].robj) |
2889 | >> 2); | ||
2746 | DRM_ERROR("Max indices %u\n", track->max_indx); | 2890 | DRM_ERROR("Max indices %u\n", track->max_indx); |
2747 | return -EINVAL; | 2891 | return -EINVAL; |
2748 | } | 2892 | } |
@@ -2756,10 +2900,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2756 | "bound\n", prim_walk, i); | 2900 | "bound\n", prim_walk, i); |
2757 | return -EINVAL; | 2901 | return -EINVAL; |
2758 | } | 2902 | } |
2759 | if (size > radeon_object_size(track->arrays[i].robj)) { | 2903 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
2760 | DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " | 2904 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
2761 | "have %lu dwords\n", prim_walk, i, size >> 2, | 2905 | "need %lu dwords have %lu dwords\n", |
2762 | radeon_object_size(track->arrays[i].robj) >> 2); | 2906 | prim_walk, i, size >> 2, |
2907 | radeon_bo_size(track->arrays[i].robj) | ||
2908 | >> 2); | ||
2763 | return -EINVAL; | 2909 | return -EINVAL; |
2764 | } | 2910 | } |
2765 | } | 2911 | } |
@@ -3101,6 +3247,9 @@ static int r100_startup(struct radeon_device *rdev) | |||
3101 | { | 3247 | { |
3102 | int r; | 3248 | int r; |
3103 | 3249 | ||
3250 | /* set common regs */ | ||
3251 | r100_set_common_regs(rdev); | ||
3252 | /* program mc */ | ||
3104 | r100_mc_program(rdev); | 3253 | r100_mc_program(rdev); |
3105 | /* Resume clock */ | 3254 | /* Resume clock */ |
3106 | r100_clock_startup(rdev); | 3255 | r100_clock_startup(rdev); |
@@ -3108,13 +3257,13 @@ static int r100_startup(struct radeon_device *rdev) | |||
3108 | r100_gpu_init(rdev); | 3257 | r100_gpu_init(rdev); |
3109 | /* Initialize GART (initialize after TTM so we can allocate | 3258 | /* Initialize GART (initialize after TTM so we can allocate |
3110 | * memory through TTM but finalize after TTM) */ | 3259 | * memory through TTM but finalize after TTM) */ |
3260 | r100_enable_bm(rdev); | ||
3111 | if (rdev->flags & RADEON_IS_PCI) { | 3261 | if (rdev->flags & RADEON_IS_PCI) { |
3112 | r = r100_pci_gart_enable(rdev); | 3262 | r = r100_pci_gart_enable(rdev); |
3113 | if (r) | 3263 | if (r) |
3114 | return r; | 3264 | return r; |
3115 | } | 3265 | } |
3116 | /* Enable IRQ */ | 3266 | /* Enable IRQ */ |
3117 | rdev->irq.sw_int = true; | ||
3118 | r100_irq_set(rdev); | 3267 | r100_irq_set(rdev); |
3119 | /* 1M ring buffer */ | 3268 | /* 1M ring buffer */ |
3120 | r = r100_cp_init(rdev, 1024 * 1024); | 3269 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -3150,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev) | |||
3150 | radeon_combios_asic_init(rdev->ddev); | 3299 | radeon_combios_asic_init(rdev->ddev); |
3151 | /* Resume clock after posting */ | 3300 | /* Resume clock after posting */ |
3152 | r100_clock_startup(rdev); | 3301 | r100_clock_startup(rdev); |
3302 | /* Initialize surface registers */ | ||
3303 | radeon_surface_init(rdev); | ||
3153 | return r100_startup(rdev); | 3304 | return r100_startup(rdev); |
3154 | } | 3305 | } |
3155 | 3306 | ||
@@ -3174,7 +3325,7 @@ void r100_fini(struct radeon_device *rdev) | |||
3174 | r100_pci_gart_fini(rdev); | 3325 | r100_pci_gart_fini(rdev); |
3175 | radeon_irq_kms_fini(rdev); | 3326 | radeon_irq_kms_fini(rdev); |
3176 | radeon_fence_driver_fini(rdev); | 3327 | radeon_fence_driver_fini(rdev); |
3177 | radeon_object_fini(rdev); | 3328 | radeon_bo_fini(rdev); |
3178 | radeon_atombios_fini(rdev); | 3329 | radeon_atombios_fini(rdev); |
3179 | kfree(rdev->bios); | 3330 | kfree(rdev->bios); |
3180 | rdev->bios = NULL; | 3331 | rdev->bios = NULL; |
@@ -3242,10 +3393,8 @@ int r100_init(struct radeon_device *rdev) | |||
3242 | RREG32(R_0007C0_CP_STAT)); | 3393 | RREG32(R_0007C0_CP_STAT)); |
3243 | } | 3394 | } |
3244 | /* check if cards are posted or not */ | 3395 | /* check if cards are posted or not */ |
3245 | if (!radeon_card_posted(rdev) && rdev->bios) { | 3396 | if (radeon_boot_test_post_card(rdev) == false) |
3246 | DRM_INFO("GPU not posted. posting now...\n"); | 3397 | return -EINVAL; |
3247 | radeon_combios_asic_init(rdev->ddev); | ||
3248 | } | ||
3249 | /* Set asic errata */ | 3398 | /* Set asic errata */ |
3250 | r100_errata(rdev); | 3399 | r100_errata(rdev); |
3251 | /* Initialize clocks */ | 3400 | /* Initialize clocks */ |
@@ -3264,7 +3413,7 @@ int r100_init(struct radeon_device *rdev) | |||
3264 | if (r) | 3413 | if (r) |
3265 | return r; | 3414 | return r; |
3266 | /* Memory manager */ | 3415 | /* Memory manager */ |
3267 | r = radeon_object_init(rdev); | 3416 | r = radeon_bo_init(rdev); |
3268 | if (r) | 3417 | if (r) |
3269 | return r; | 3418 | return r; |
3270 | if (rdev->flags & RADEON_IS_PCI) { | 3419 | if (rdev->flags & RADEON_IS_PCI) { |