diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r600_cs.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600_cs.c | 186 |
1 files changed, 156 insertions, 30 deletions
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 33b89cd8743e..d28970db6a2d 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "r600d.h" | 30 | #include "r600d.h" |
31 | #include "avivod.h" | ||
32 | 31 | ||
33 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | 32 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, |
34 | struct radeon_cs_reloc **cs_reloc); | 33 | struct radeon_cs_reloc **cs_reloc); |
@@ -57,7 +56,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p, | |||
57 | idx, ib_chunk->length_dw); | 56 | idx, ib_chunk->length_dw); |
58 | return -EINVAL; | 57 | return -EINVAL; |
59 | } | 58 | } |
60 | header = ib_chunk->kdata[idx]; | 59 | header = radeon_get_ib_value(p, idx); |
61 | pkt->idx = idx; | 60 | pkt->idx = idx; |
62 | pkt->type = CP_PACKET_GET_TYPE(header); | 61 | pkt->type = CP_PACKET_GET_TYPE(header); |
63 | pkt->count = CP_PACKET_GET_COUNT(header); | 62 | pkt->count = CP_PACKET_GET_COUNT(header); |
@@ -98,7 +97,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p, | |||
98 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | 97 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, |
99 | struct radeon_cs_reloc **cs_reloc) | 98 | struct radeon_cs_reloc **cs_reloc) |
100 | { | 99 | { |
101 | struct radeon_cs_chunk *ib_chunk; | ||
102 | struct radeon_cs_chunk *relocs_chunk; | 100 | struct radeon_cs_chunk *relocs_chunk; |
103 | struct radeon_cs_packet p3reloc; | 101 | struct radeon_cs_packet p3reloc; |
104 | unsigned idx; | 102 | unsigned idx; |
@@ -109,7 +107,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
109 | return -EINVAL; | 107 | return -EINVAL; |
110 | } | 108 | } |
111 | *cs_reloc = NULL; | 109 | *cs_reloc = NULL; |
112 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
113 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 110 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
114 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | 111 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); |
115 | if (r) { | 112 | if (r) { |
@@ -121,7 +118,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
121 | p3reloc.idx); | 118 | p3reloc.idx); |
122 | return -EINVAL; | 119 | return -EINVAL; |
123 | } | 120 | } |
124 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 121 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
125 | if (idx >= relocs_chunk->length_dw) { | 122 | if (idx >= relocs_chunk->length_dw) { |
126 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 123 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
127 | idx, relocs_chunk->length_dw); | 124 | idx, relocs_chunk->length_dw); |
@@ -146,7 +143,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
146 | static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | 143 | static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, |
147 | struct radeon_cs_reloc **cs_reloc) | 144 | struct radeon_cs_reloc **cs_reloc) |
148 | { | 145 | { |
149 | struct radeon_cs_chunk *ib_chunk; | ||
150 | struct radeon_cs_chunk *relocs_chunk; | 146 | struct radeon_cs_chunk *relocs_chunk; |
151 | struct radeon_cs_packet p3reloc; | 147 | struct radeon_cs_packet p3reloc; |
152 | unsigned idx; | 148 | unsigned idx; |
@@ -157,7 +153,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
157 | return -EINVAL; | 153 | return -EINVAL; |
158 | } | 154 | } |
159 | *cs_reloc = NULL; | 155 | *cs_reloc = NULL; |
160 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
161 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 156 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
162 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | 157 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); |
163 | if (r) { | 158 | if (r) { |
@@ -169,7 +164,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
169 | p3reloc.idx); | 164 | p3reloc.idx); |
170 | return -EINVAL; | 165 | return -EINVAL; |
171 | } | 166 | } |
172 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 167 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
173 | if (idx >= relocs_chunk->length_dw) { | 168 | if (idx >= relocs_chunk->length_dw) { |
174 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 169 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
175 | idx, relocs_chunk->length_dw); | 170 | idx, relocs_chunk->length_dw); |
@@ -181,13 +176,136 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
181 | return 0; | 176 | return 0; |
182 | } | 177 | } |
183 | 178 | ||
179 | /** | ||
180 | * r600_cs_packet_next_vline() - parse userspace VLINE packet | ||
181 | * @parser: parser structure holding parsing context. | ||
182 | * | ||
183 | * Userspace sends a special sequence for VLINE waits. | ||
184 | * PACKET0 - VLINE_START_END + value | ||
185 | * PACKET3 - WAIT_REG_MEM poll vline status reg | ||
186 | * RELOC (P3) - crtc_id in reloc. | ||
187 | * | ||
188 | * This function parses this and relocates the VLINE START END | ||
189 | * and WAIT_REG_MEM packets to the correct crtc. | ||
190 | * It also detects a switched off crtc and nulls out the | ||
191 | * wait in that case. | ||
192 | */ | ||
193 | static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | ||
194 | { | ||
195 | struct drm_mode_object *obj; | ||
196 | struct drm_crtc *crtc; | ||
197 | struct radeon_crtc *radeon_crtc; | ||
198 | struct radeon_cs_packet p3reloc, wait_reg_mem; | ||
199 | int crtc_id; | ||
200 | int r; | ||
201 | uint32_t header, h_idx, reg, wait_reg_mem_info; | ||
202 | volatile uint32_t *ib; | ||
203 | |||
204 | ib = p->ib->ptr; | ||
205 | |||
206 | /* parse the WAIT_REG_MEM */ | ||
207 | r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); | ||
208 | if (r) | ||
209 | return r; | ||
210 | |||
211 | /* check its a WAIT_REG_MEM */ | ||
212 | if (wait_reg_mem.type != PACKET_TYPE3 || | ||
213 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { | ||
214 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); | ||
215 | r = -EINVAL; | ||
216 | return r; | ||
217 | } | ||
218 | |||
219 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); | ||
220 | /* bit 4 is reg (0) or mem (1) */ | ||
221 | if (wait_reg_mem_info & 0x10) { | ||
222 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); | ||
223 | r = -EINVAL; | ||
224 | return r; | ||
225 | } | ||
226 | /* waiting for value to be equal */ | ||
227 | if ((wait_reg_mem_info & 0x7) != 0x3) { | ||
228 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); | ||
229 | r = -EINVAL; | ||
230 | return r; | ||
231 | } | ||
232 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { | ||
233 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); | ||
234 | r = -EINVAL; | ||
235 | return r; | ||
236 | } | ||
237 | |||
238 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { | ||
239 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); | ||
240 | r = -EINVAL; | ||
241 | return r; | ||
242 | } | ||
243 | |||
244 | /* jump over the NOP */ | ||
245 | r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); | ||
246 | if (r) | ||
247 | return r; | ||
248 | |||
249 | h_idx = p->idx - 2; | ||
250 | p->idx += wait_reg_mem.count + 2; | ||
251 | p->idx += p3reloc.count + 2; | ||
252 | |||
253 | header = radeon_get_ib_value(p, h_idx); | ||
254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | ||
255 | reg = header >> 2; | ||
256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | ||
258 | if (!obj) { | ||
259 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | ||
260 | r = -EINVAL; | ||
261 | goto out; | ||
262 | } | ||
263 | crtc = obj_to_crtc(obj); | ||
264 | radeon_crtc = to_radeon_crtc(crtc); | ||
265 | crtc_id = radeon_crtc->crtc_id; | ||
266 | |||
267 | if (!crtc->enabled) { | ||
268 | /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ | ||
269 | ib[h_idx + 2] = PACKET2(0); | ||
270 | ib[h_idx + 3] = PACKET2(0); | ||
271 | ib[h_idx + 4] = PACKET2(0); | ||
272 | ib[h_idx + 5] = PACKET2(0); | ||
273 | ib[h_idx + 6] = PACKET2(0); | ||
274 | ib[h_idx + 7] = PACKET2(0); | ||
275 | ib[h_idx + 8] = PACKET2(0); | ||
276 | } else if (crtc_id == 1) { | ||
277 | switch (reg) { | ||
278 | case AVIVO_D1MODE_VLINE_START_END: | ||
279 | header &= ~R600_CP_PACKET0_REG_MASK; | ||
280 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; | ||
281 | break; | ||
282 | default: | ||
283 | DRM_ERROR("unknown crtc reloc\n"); | ||
284 | r = -EINVAL; | ||
285 | goto out; | ||
286 | } | ||
287 | ib[h_idx] = header; | ||
288 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; | ||
289 | } | ||
290 | out: | ||
291 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
292 | return r; | ||
293 | } | ||
294 | |||
184 | static int r600_packet0_check(struct radeon_cs_parser *p, | 295 | static int r600_packet0_check(struct radeon_cs_parser *p, |
185 | struct radeon_cs_packet *pkt, | 296 | struct radeon_cs_packet *pkt, |
186 | unsigned idx, unsigned reg) | 297 | unsigned idx, unsigned reg) |
187 | { | 298 | { |
299 | int r; | ||
300 | |||
188 | switch (reg) { | 301 | switch (reg) { |
189 | case AVIVO_D1MODE_VLINE_START_END: | 302 | case AVIVO_D1MODE_VLINE_START_END: |
190 | case AVIVO_D2MODE_VLINE_START_END: | 303 | r = r600_cs_packet_parse_vline(p); |
304 | if (r) { | ||
305 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
306 | idx, reg); | ||
307 | return r; | ||
308 | } | ||
191 | break; | 309 | break; |
192 | default: | 310 | default: |
193 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 311 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
@@ -218,17 +336,18 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p, | |||
218 | static int r600_packet3_check(struct radeon_cs_parser *p, | 336 | static int r600_packet3_check(struct radeon_cs_parser *p, |
219 | struct radeon_cs_packet *pkt) | 337 | struct radeon_cs_packet *pkt) |
220 | { | 338 | { |
221 | struct radeon_cs_chunk *ib_chunk; | ||
222 | struct radeon_cs_reloc *reloc; | 339 | struct radeon_cs_reloc *reloc; |
223 | volatile u32 *ib; | 340 | volatile u32 *ib; |
224 | unsigned idx; | 341 | unsigned idx; |
225 | unsigned i; | 342 | unsigned i; |
226 | unsigned start_reg, end_reg, reg; | 343 | unsigned start_reg, end_reg, reg; |
227 | int r; | 344 | int r; |
345 | u32 idx_value; | ||
228 | 346 | ||
229 | ib = p->ib->ptr; | 347 | ib = p->ib->ptr; |
230 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
231 | idx = pkt->idx + 1; | 348 | idx = pkt->idx + 1; |
349 | idx_value = radeon_get_ib_value(p, idx); | ||
350 | |||
232 | switch (pkt->opcode) { | 351 | switch (pkt->opcode) { |
233 | case PACKET3_START_3D_CMDBUF: | 352 | case PACKET3_START_3D_CMDBUF: |
234 | if (p->family >= CHIP_RV770 || pkt->count) { | 353 | if (p->family >= CHIP_RV770 || pkt->count) { |
@@ -259,8 +378,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
259 | DRM_ERROR("bad DRAW_INDEX\n"); | 378 | DRM_ERROR("bad DRAW_INDEX\n"); |
260 | return -EINVAL; | 379 | return -EINVAL; |
261 | } | 380 | } |
262 | ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 381 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
263 | ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 382 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
264 | break; | 383 | break; |
265 | case PACKET3_DRAW_INDEX_AUTO: | 384 | case PACKET3_DRAW_INDEX_AUTO: |
266 | if (pkt->count != 1) { | 385 | if (pkt->count != 1) { |
@@ -281,14 +400,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
281 | return -EINVAL; | 400 | return -EINVAL; |
282 | } | 401 | } |
283 | /* bit 4 is reg (0) or mem (1) */ | 402 | /* bit 4 is reg (0) or mem (1) */ |
284 | if (ib_chunk->kdata[idx+0] & 0x10) { | 403 | if (idx_value & 0x10) { |
285 | r = r600_cs_packet_next_reloc(p, &reloc); | 404 | r = r600_cs_packet_next_reloc(p, &reloc); |
286 | if (r) { | 405 | if (r) { |
287 | DRM_ERROR("bad WAIT_REG_MEM\n"); | 406 | DRM_ERROR("bad WAIT_REG_MEM\n"); |
288 | return -EINVAL; | 407 | return -EINVAL; |
289 | } | 408 | } |
290 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 409 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
291 | ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 410 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
292 | } | 411 | } |
293 | break; | 412 | break; |
294 | case PACKET3_SURFACE_SYNC: | 413 | case PACKET3_SURFACE_SYNC: |
@@ -297,8 +416,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
297 | return -EINVAL; | 416 | return -EINVAL; |
298 | } | 417 | } |
299 | /* 0xffffffff/0x0 is flush all cache flag */ | 418 | /* 0xffffffff/0x0 is flush all cache flag */ |
300 | if (ib_chunk->kdata[idx+1] != 0xffffffff || | 419 | if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || |
301 | ib_chunk->kdata[idx+2] != 0) { | 420 | radeon_get_ib_value(p, idx + 2) != 0) { |
302 | r = r600_cs_packet_next_reloc(p, &reloc); | 421 | r = r600_cs_packet_next_reloc(p, &reloc); |
303 | if (r) { | 422 | if (r) { |
304 | DRM_ERROR("bad SURFACE_SYNC\n"); | 423 | DRM_ERROR("bad SURFACE_SYNC\n"); |
@@ -319,7 +438,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
319 | return -EINVAL; | 438 | return -EINVAL; |
320 | } | 439 | } |
321 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 440 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
322 | ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 441 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
323 | } | 442 | } |
324 | break; | 443 | break; |
325 | case PACKET3_EVENT_WRITE_EOP: | 444 | case PACKET3_EVENT_WRITE_EOP: |
@@ -333,10 +452,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
333 | return -EINVAL; | 452 | return -EINVAL; |
334 | } | 453 | } |
335 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 454 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
336 | ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 455 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
337 | break; | 456 | break; |
338 | case PACKET3_SET_CONFIG_REG: | 457 | case PACKET3_SET_CONFIG_REG: |
339 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET; | 458 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; |
340 | end_reg = 4 * pkt->count + start_reg - 4; | 459 | end_reg = 4 * pkt->count + start_reg - 4; |
341 | if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || | 460 | if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || |
342 | (start_reg >= PACKET3_SET_CONFIG_REG_END) || | 461 | (start_reg >= PACKET3_SET_CONFIG_REG_END) || |
@@ -356,7 +475,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
356 | } | 475 | } |
357 | break; | 476 | break; |
358 | case PACKET3_SET_CONTEXT_REG: | 477 | case PACKET3_SET_CONTEXT_REG: |
359 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; | 478 | start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; |
360 | end_reg = 4 * pkt->count + start_reg - 4; | 479 | end_reg = 4 * pkt->count + start_reg - 4; |
361 | if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || | 480 | if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || |
362 | (start_reg >= PACKET3_SET_CONTEXT_REG_END) || | 481 | (start_reg >= PACKET3_SET_CONTEXT_REG_END) || |
@@ -421,7 +540,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
421 | DRM_ERROR("bad SET_RESOURCE\n"); | 540 | DRM_ERROR("bad SET_RESOURCE\n"); |
422 | return -EINVAL; | 541 | return -EINVAL; |
423 | } | 542 | } |
424 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET; | 543 | start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; |
425 | end_reg = 4 * pkt->count + start_reg - 4; | 544 | end_reg = 4 * pkt->count + start_reg - 4; |
426 | if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || | 545 | if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || |
427 | (start_reg >= PACKET3_SET_RESOURCE_END) || | 546 | (start_reg >= PACKET3_SET_RESOURCE_END) || |
@@ -430,7 +549,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
430 | return -EINVAL; | 549 | return -EINVAL; |
431 | } | 550 | } |
432 | for (i = 0; i < (pkt->count / 7); i++) { | 551 | for (i = 0; i < (pkt->count / 7); i++) { |
433 | switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) { | 552 | switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { |
434 | case SQ_TEX_VTX_VALID_TEXTURE: | 553 | case SQ_TEX_VTX_VALID_TEXTURE: |
435 | /* tex base */ | 554 | /* tex base */ |
436 | r = r600_cs_packet_next_reloc(p, &reloc); | 555 | r = r600_cs_packet_next_reloc(p, &reloc); |
@@ -455,7 +574,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
455 | return -EINVAL; | 574 | return -EINVAL; |
456 | } | 575 | } |
457 | ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); | 576 | ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); |
458 | ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 577 | ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
459 | break; | 578 | break; |
460 | case SQ_TEX_VTX_INVALID_TEXTURE: | 579 | case SQ_TEX_VTX_INVALID_TEXTURE: |
461 | case SQ_TEX_VTX_INVALID_BUFFER: | 580 | case SQ_TEX_VTX_INVALID_BUFFER: |
@@ -466,7 +585,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
466 | } | 585 | } |
467 | break; | 586 | break; |
468 | case PACKET3_SET_ALU_CONST: | 587 | case PACKET3_SET_ALU_CONST: |
469 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET; | 588 | start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; |
470 | end_reg = 4 * pkt->count + start_reg - 4; | 589 | end_reg = 4 * pkt->count + start_reg - 4; |
471 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || | 590 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || |
472 | (start_reg >= PACKET3_SET_ALU_CONST_END) || | 591 | (start_reg >= PACKET3_SET_ALU_CONST_END) || |
@@ -476,7 +595,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
476 | } | 595 | } |
477 | break; | 596 | break; |
478 | case PACKET3_SET_BOOL_CONST: | 597 | case PACKET3_SET_BOOL_CONST: |
479 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET; | 598 | start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; |
480 | end_reg = 4 * pkt->count + start_reg - 4; | 599 | end_reg = 4 * pkt->count + start_reg - 4; |
481 | if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || | 600 | if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || |
482 | (start_reg >= PACKET3_SET_BOOL_CONST_END) || | 601 | (start_reg >= PACKET3_SET_BOOL_CONST_END) || |
@@ -486,7 +605,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
486 | } | 605 | } |
487 | break; | 606 | break; |
488 | case PACKET3_SET_LOOP_CONST: | 607 | case PACKET3_SET_LOOP_CONST: |
489 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET; | 608 | start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; |
490 | end_reg = 4 * pkt->count + start_reg - 4; | 609 | end_reg = 4 * pkt->count + start_reg - 4; |
491 | if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || | 610 | if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || |
492 | (start_reg >= PACKET3_SET_LOOP_CONST_END) || | 611 | (start_reg >= PACKET3_SET_LOOP_CONST_END) || |
@@ -496,7 +615,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
496 | } | 615 | } |
497 | break; | 616 | break; |
498 | case PACKET3_SET_CTL_CONST: | 617 | case PACKET3_SET_CTL_CONST: |
499 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET; | 618 | start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; |
500 | end_reg = 4 * pkt->count + start_reg - 4; | 619 | end_reg = 4 * pkt->count + start_reg - 4; |
501 | if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || | 620 | if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || |
502 | (start_reg >= PACKET3_SET_CTL_CONST_END) || | 621 | (start_reg >= PACKET3_SET_CTL_CONST_END) || |
@@ -510,7 +629,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
510 | DRM_ERROR("bad SET_SAMPLER\n"); | 629 | DRM_ERROR("bad SET_SAMPLER\n"); |
511 | return -EINVAL; | 630 | return -EINVAL; |
512 | } | 631 | } |
513 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET; | 632 | start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; |
514 | end_reg = 4 * pkt->count + start_reg - 4; | 633 | end_reg = 4 * pkt->count + start_reg - 4; |
515 | if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || | 634 | if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || |
516 | (start_reg >= PACKET3_SET_SAMPLER_END) || | 635 | (start_reg >= PACKET3_SET_SAMPLER_END) || |
@@ -602,6 +721,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
602 | kfree(parser->relocs); | 721 | kfree(parser->relocs); |
603 | for (i = 0; i < parser->nchunks; i++) { | 722 | for (i = 0; i < parser->nchunks; i++) { |
604 | kfree(parser->chunks[i].kdata); | 723 | kfree(parser->chunks[i].kdata); |
724 | kfree(parser->chunks[i].kpage[0]); | ||
725 | kfree(parser->chunks[i].kpage[1]); | ||
605 | } | 726 | } |
606 | kfree(parser->chunks); | 727 | kfree(parser->chunks); |
607 | kfree(parser->chunks_array); | 728 | kfree(parser->chunks_array); |
@@ -639,7 +760,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
639 | * uncached). */ | 760 | * uncached). */ |
640 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; | 761 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
641 | parser.ib->length_dw = ib_chunk->length_dw; | 762 | parser.ib->length_dw = ib_chunk->length_dw; |
642 | memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4); | ||
643 | *l = parser.ib->length_dw; | 763 | *l = parser.ib->length_dw; |
644 | r = r600_cs_parse(&parser); | 764 | r = r600_cs_parse(&parser); |
645 | if (r) { | 765 | if (r) { |
@@ -647,6 +767,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
647 | r600_cs_parser_fini(&parser, r); | 767 | r600_cs_parser_fini(&parser, r); |
648 | return r; | 768 | return r; |
649 | } | 769 | } |
770 | r = radeon_cs_finish_pages(&parser); | ||
771 | if (r) { | ||
772 | DRM_ERROR("Invalid command stream !\n"); | ||
773 | r600_cs_parser_fini(&parser, r); | ||
774 | return r; | ||
775 | } | ||
650 | r600_cs_parser_fini(&parser, r); | 776 | r600_cs_parser_fini(&parser, r); |
651 | return r; | 777 | return r; |
652 | } | 778 | } |