diff options
Diffstat (limited to 'drivers/char/drm/r300_cmdbuf.c')
-rw-r--r-- | drivers/char/drm/r300_cmdbuf.c | 502 |
1 files changed, 262 insertions, 240 deletions
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c index 623f1f460cb5..aa66f46279e7 100644 --- a/drivers/char/drm/r300_cmdbuf.c +++ b/drivers/char/drm/r300_cmdbuf.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include "radeon_drv.h" | 37 | #include "radeon_drv.h" |
38 | #include "r300_reg.h" | 38 | #include "r300_reg.h" |
39 | 39 | ||
40 | |||
41 | #define R300_SIMULTANEOUS_CLIPRECTS 4 | 40 | #define R300_SIMULTANEOUS_CLIPRECTS 4 |
42 | 41 | ||
43 | /* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects | 42 | /* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects |
@@ -49,14 +48,12 @@ static const int r300_cliprect_cntl[4] = { | |||
49 | 0xFFFE | 48 | 0xFFFE |
50 | }; | 49 | }; |
51 | 50 | ||
52 | |||
53 | /** | 51 | /** |
54 | * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command | 52 | * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command |
55 | * buffer, starting with index n. | 53 | * buffer, starting with index n. |
56 | */ | 54 | */ |
57 | static int r300_emit_cliprects(drm_radeon_private_t* dev_priv, | 55 | static int r300_emit_cliprects(drm_radeon_private_t * dev_priv, |
58 | drm_radeon_cmd_buffer_t* cmdbuf, | 56 | drm_radeon_cmd_buffer_t * cmdbuf, int n) |
59 | int n) | ||
60 | { | 57 | { |
61 | drm_clip_rect_t box; | 58 | drm_clip_rect_t box; |
62 | int nr; | 59 | int nr; |
@@ -70,38 +67,47 @@ static int r300_emit_cliprects(drm_radeon_private_t* dev_priv, | |||
70 | DRM_DEBUG("%i cliprects\n", nr); | 67 | DRM_DEBUG("%i cliprects\n", nr); |
71 | 68 | ||
72 | if (nr) { | 69 | if (nr) { |
73 | BEGIN_RING(6 + nr*2); | 70 | BEGIN_RING(6 + nr * 2); |
74 | OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) ); | 71 | OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); |
75 | 72 | ||
76 | for(i = 0; i < nr; ++i) { | 73 | for (i = 0; i < nr; ++i) { |
77 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) { | 74 | if (DRM_COPY_FROM_USER_UNCHECKED |
75 | (&box, &cmdbuf->boxes[n + i], sizeof(box))) { | ||
78 | DRM_ERROR("copy cliprect faulted\n"); | 76 | DRM_ERROR("copy cliprect faulted\n"); |
79 | return DRM_ERR(EFAULT); | 77 | return DRM_ERR(EFAULT); |
80 | } | 78 | } |
81 | 79 | ||
82 | box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | 80 | box.x1 = |
83 | box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | 81 | (box.x1 + |
84 | box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | 82 | R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; |
85 | box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | 83 | box.y1 = |
84 | (box.y1 + | ||
85 | R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | ||
86 | box.x2 = | ||
87 | (box.x2 + | ||
88 | R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | ||
89 | box.y2 = | ||
90 | (box.y2 + | ||
91 | R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | ||
86 | 92 | ||
87 | OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | | 93 | OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | |
88 | (box.y1 << R300_CLIPRECT_Y_SHIFT)); | 94 | (box.y1 << R300_CLIPRECT_Y_SHIFT)); |
89 | OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | | 95 | OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | |
90 | (box.y2 << R300_CLIPRECT_Y_SHIFT)); | 96 | (box.y2 << R300_CLIPRECT_Y_SHIFT)); |
91 | } | 97 | } |
92 | 98 | ||
93 | OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] ); | 99 | OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]); |
94 | 100 | ||
95 | /* TODO/SECURITY: Force scissors to a safe value, otherwise the | 101 | /* TODO/SECURITY: Force scissors to a safe value, otherwise the |
96 | * client might be able to trample over memory. | 102 | * client might be able to trample over memory. |
97 | * The impact should be very limited, but I'd rather be safe than | 103 | * The impact should be very limited, but I'd rather be safe than |
98 | * sorry. | 104 | * sorry. |
99 | */ | 105 | */ |
100 | OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) ); | 106 | OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1)); |
101 | OUT_RING( 0 ); | 107 | OUT_RING(0); |
102 | OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK ); | 108 | OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK); |
103 | ADVANCE_RING(); | 109 | ADVANCE_RING(); |
104 | } else { | 110 | } else { |
105 | /* Why we allow zero cliprect rendering: | 111 | /* Why we allow zero cliprect rendering: |
106 | * There are some commands in a command buffer that must be submitted | 112 | * There are some commands in a command buffer that must be submitted |
107 | * even when there are no cliprects, e.g. DMA buffer discard | 113 | * even when there are no cliprects, e.g. DMA buffer discard |
@@ -118,28 +124,27 @@ static int r300_emit_cliprects(drm_radeon_private_t* dev_priv, | |||
118 | * can't produce any fragments. | 124 | * can't produce any fragments. |
119 | */ | 125 | */ |
120 | BEGIN_RING(2); | 126 | BEGIN_RING(2); |
121 | OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 ); | 127 | OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0); |
122 | ADVANCE_RING(); | 128 | ADVANCE_RING(); |
123 | } | 129 | } |
124 | 130 | ||
125 | return 0; | 131 | return 0; |
126 | } | 132 | } |
127 | 133 | ||
128 | u8 r300_reg_flags[0x10000>>2]; | 134 | u8 r300_reg_flags[0x10000 >> 2]; |
129 | |||
130 | 135 | ||
131 | void r300_init_reg_flags(void) | 136 | void r300_init_reg_flags(void) |
132 | { | 137 | { |
133 | int i; | 138 | int i; |
134 | memset(r300_reg_flags, 0, 0x10000>>2); | 139 | memset(r300_reg_flags, 0, 0x10000 >> 2); |
135 | #define ADD_RANGE_MARK(reg, count,mark) \ | 140 | #define ADD_RANGE_MARK(reg, count,mark) \ |
136 | for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ | 141 | for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ |
137 | r300_reg_flags[i]|=(mark); | 142 | r300_reg_flags[i]|=(mark); |
138 | 143 | ||
139 | #define MARK_SAFE 1 | 144 | #define MARK_SAFE 1 |
140 | #define MARK_CHECK_OFFSET 2 | 145 | #define MARK_CHECK_OFFSET 2 |
141 | 146 | ||
142 | #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE) | 147 | #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE) |
143 | 148 | ||
144 | /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */ | 149 | /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */ |
145 | ADD_RANGE(R300_SE_VPORT_XSCALE, 6); | 150 | ADD_RANGE(R300_SE_VPORT_XSCALE, 6); |
@@ -193,15 +198,15 @@ void r300_init_reg_flags(void) | |||
193 | ADD_RANGE(R300_RB3D_CBLEND, 2); | 198 | ADD_RANGE(R300_RB3D_CBLEND, 2); |
194 | ADD_RANGE(R300_RB3D_COLORMASK, 1); | 199 | ADD_RANGE(R300_RB3D_COLORMASK, 1); |
195 | ADD_RANGE(0x4E10, 3); | 200 | ADD_RANGE(0x4E10, 3); |
196 | ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */ | 201 | ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */ |
197 | ADD_RANGE(R300_RB3D_COLORPITCH0, 1); | 202 | ADD_RANGE(R300_RB3D_COLORPITCH0, 1); |
198 | ADD_RANGE(0x4E50, 9); | 203 | ADD_RANGE(0x4E50, 9); |
199 | ADD_RANGE(0x4E88, 1); | 204 | ADD_RANGE(0x4E88, 1); |
200 | ADD_RANGE(0x4EA0, 2); | 205 | ADD_RANGE(0x4EA0, 2); |
201 | ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3); | 206 | ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3); |
202 | ADD_RANGE(0x4F10, 4); | 207 | ADD_RANGE(0x4F10, 4); |
203 | ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ | 208 | ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ |
204 | ADD_RANGE(R300_RB3D_DEPTHPITCH, 1); | 209 | ADD_RANGE(R300_RB3D_DEPTHPITCH, 1); |
205 | ADD_RANGE(0x4F28, 1); | 210 | ADD_RANGE(0x4F28, 1); |
206 | ADD_RANGE(0x4F30, 2); | 211 | ADD_RANGE(0x4F30, 2); |
207 | ADD_RANGE(0x4F44, 1); | 212 | ADD_RANGE(0x4F44, 1); |
@@ -211,7 +216,7 @@ void r300_init_reg_flags(void) | |||
211 | ADD_RANGE(R300_TX_UNK1_0, 16); | 216 | ADD_RANGE(R300_TX_UNK1_0, 16); |
212 | ADD_RANGE(R300_TX_SIZE_0, 16); | 217 | ADD_RANGE(R300_TX_SIZE_0, 16); |
213 | ADD_RANGE(R300_TX_FORMAT_0, 16); | 218 | ADD_RANGE(R300_TX_FORMAT_0, 16); |
214 | /* Texture offset is dangerous and needs more checking */ | 219 | /* Texture offset is dangerous and needs more checking */ |
215 | ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); | 220 | ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); |
216 | ADD_RANGE(R300_TX_UNK4_0, 16); | 221 | ADD_RANGE(R300_TX_UNK4_0, 16); |
217 | ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); | 222 | ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); |
@@ -224,33 +229,41 @@ void r300_init_reg_flags(void) | |||
224 | 229 | ||
225 | } | 230 | } |
226 | 231 | ||
227 | static __inline__ int r300_check_range(unsigned reg, int count) | 232 | static __inline__ int r300_check_range(unsigned reg, int count) |
228 | { | 233 | { |
229 | int i; | 234 | int i; |
230 | if(reg & ~0xffff)return -1; | 235 | if (reg & ~0xffff) |
231 | for(i=(reg>>2);i<(reg>>2)+count;i++) | 236 | return -1; |
232 | if(r300_reg_flags[i]!=MARK_SAFE)return 1; | 237 | for (i = (reg >> 2); i < (reg >> 2) + count; i++) |
238 | if (r300_reg_flags[i] != MARK_SAFE) | ||
239 | return 1; | ||
233 | return 0; | 240 | return 0; |
234 | } | 241 | } |
235 | 242 | ||
236 | /* we expect offsets passed to the framebuffer to be either within video memory or | 243 | /* we expect offsets passed to the framebuffer to be either within video memory or |
237 | within AGP space */ | 244 | within AGP space */ |
238 | static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset) | 245 | static __inline__ int r300_check_offset(drm_radeon_private_t * dev_priv, |
246 | u32 offset) | ||
239 | { | 247 | { |
240 | /* we realy want to check against end of video aperture | 248 | /* we realy want to check against end of video aperture |
241 | but this value is not being kept. | 249 | but this value is not being kept. |
242 | This code is correct for now (does the same thing as the | 250 | This code is correct for now (does the same thing as the |
243 | code that sets MC_FB_LOCATION) in radeon_cp.c */ | 251 | code that sets MC_FB_LOCATION) in radeon_cp.c */ |
244 | if((offset>=dev_priv->fb_location) && | 252 | if ((offset >= dev_priv->fb_location) && |
245 | (offset<dev_priv->gart_vm_start))return 0; | 253 | (offset < dev_priv->gart_vm_start)) |
246 | if((offset>=dev_priv->gart_vm_start) && | 254 | return 0; |
247 | (offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0; | 255 | if ((offset >= dev_priv->gart_vm_start) && |
256 | (offset < dev_priv->gart_vm_start + dev_priv->gart_size)) | ||
257 | return 0; | ||
248 | return 1; | 258 | return 1; |
249 | } | 259 | } |
250 | 260 | ||
251 | static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv, | 261 | static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * |
252 | drm_radeon_cmd_buffer_t* cmdbuf, | 262 | dev_priv, |
253 | drm_r300_cmd_header_t header) | 263 | drm_radeon_cmd_buffer_t |
264 | * cmdbuf, | ||
265 | drm_r300_cmd_header_t | ||
266 | header) | ||
254 | { | 267 | { |
255 | int reg; | 268 | int reg; |
256 | int sz; | 269 | int sz; |
@@ -260,35 +273,40 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* | |||
260 | 273 | ||
261 | sz = header.packet0.count; | 274 | sz = header.packet0.count; |
262 | reg = (header.packet0.reghi << 8) | header.packet0.reglo; | 275 | reg = (header.packet0.reghi << 8) | header.packet0.reglo; |
263 | 276 | ||
264 | if((sz>64)||(sz<0)){ | 277 | if ((sz > 64) || (sz < 0)) { |
265 | DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz); | 278 | DRM_ERROR |
279 | ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", | ||
280 | reg, sz); | ||
266 | return DRM_ERR(EINVAL); | 281 | return DRM_ERR(EINVAL); |
267 | } | 282 | } |
268 | for(i=0;i<sz;i++){ | 283 | for (i = 0; i < sz; i++) { |
269 | values[i]=((int __user*)cmdbuf->buf)[i]; | 284 | values[i] = ((int __user *)cmdbuf->buf)[i]; |
270 | switch(r300_reg_flags[(reg>>2)+i]){ | 285 | switch (r300_reg_flags[(reg >> 2) + i]) { |
271 | case MARK_SAFE: | 286 | case MARK_SAFE: |
272 | break; | 287 | break; |
273 | case MARK_CHECK_OFFSET: | 288 | case MARK_CHECK_OFFSET: |
274 | if(r300_check_offset(dev_priv, (u32)values[i])){ | 289 | if (r300_check_offset(dev_priv, (u32) values[i])) { |
275 | DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz); | 290 | DRM_ERROR |
291 | ("Offset failed range check (reg=%04x sz=%d)\n", | ||
292 | reg, sz); | ||
276 | return DRM_ERR(EINVAL); | 293 | return DRM_ERR(EINVAL); |
277 | } | 294 | } |
278 | break; | 295 | break; |
279 | default: | 296 | default: |
280 | DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]); | 297 | DRM_ERROR("Register %04x failed check as flag=%02x\n", |
298 | reg + i * 4, r300_reg_flags[(reg >> 2) + i]); | ||
281 | return DRM_ERR(EINVAL); | 299 | return DRM_ERR(EINVAL); |
282 | } | ||
283 | } | 300 | } |
284 | 301 | } | |
285 | BEGIN_RING(1+sz); | 302 | |
286 | OUT_RING( CP_PACKET0( reg, sz-1 ) ); | 303 | BEGIN_RING(1 + sz); |
287 | OUT_RING_TABLE( values, sz ); | 304 | OUT_RING(CP_PACKET0(reg, sz - 1)); |
305 | OUT_RING_TABLE(values, sz); | ||
288 | ADVANCE_RING(); | 306 | ADVANCE_RING(); |
289 | 307 | ||
290 | cmdbuf->buf += sz*4; | 308 | cmdbuf->buf += sz * 4; |
291 | cmdbuf->bufsz -= sz*4; | 309 | cmdbuf->bufsz -= sz * 4; |
292 | 310 | ||
293 | return 0; | 311 | return 0; |
294 | } | 312 | } |
@@ -299,9 +317,9 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* | |||
299 | * | 317 | * |
300 | * Note that checks are performed on contents and addresses of the registers | 318 | * Note that checks are performed on contents and addresses of the registers |
301 | */ | 319 | */ |
302 | static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv, | 320 | static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv, |
303 | drm_radeon_cmd_buffer_t* cmdbuf, | 321 | drm_radeon_cmd_buffer_t * cmdbuf, |
304 | drm_r300_cmd_header_t header) | 322 | drm_r300_cmd_header_t header) |
305 | { | 323 | { |
306 | int reg; | 324 | int reg; |
307 | int sz; | 325 | int sz; |
@@ -313,39 +331,40 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv, | |||
313 | if (!sz) | 331 | if (!sz) |
314 | return 0; | 332 | return 0; |
315 | 333 | ||
316 | if (sz*4 > cmdbuf->bufsz) | 334 | if (sz * 4 > cmdbuf->bufsz) |
317 | return DRM_ERR(EINVAL); | 335 | return DRM_ERR(EINVAL); |
318 | 336 | ||
319 | if (reg+sz*4 >= 0x10000){ | 337 | if (reg + sz * 4 >= 0x10000) { |
320 | DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz); | 338 | DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, |
339 | sz); | ||
321 | return DRM_ERR(EINVAL); | 340 | return DRM_ERR(EINVAL); |
322 | } | 341 | } |
323 | 342 | ||
324 | if(r300_check_range(reg, sz)){ | 343 | if (r300_check_range(reg, sz)) { |
325 | /* go and check everything */ | 344 | /* go and check everything */ |
326 | return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header); | 345 | return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, |
327 | } | 346 | header); |
347 | } | ||
328 | /* the rest of the data is safe to emit, whatever the values the user passed */ | 348 | /* the rest of the data is safe to emit, whatever the values the user passed */ |
329 | 349 | ||
330 | BEGIN_RING(1+sz); | 350 | BEGIN_RING(1 + sz); |
331 | OUT_RING( CP_PACKET0( reg, sz-1 ) ); | 351 | OUT_RING(CP_PACKET0(reg, sz - 1)); |
332 | OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz ); | 352 | OUT_RING_TABLE((int __user *)cmdbuf->buf, sz); |
333 | ADVANCE_RING(); | 353 | ADVANCE_RING(); |
334 | 354 | ||
335 | cmdbuf->buf += sz*4; | 355 | cmdbuf->buf += sz * 4; |
336 | cmdbuf->bufsz -= sz*4; | 356 | cmdbuf->bufsz -= sz * 4; |
337 | 357 | ||
338 | return 0; | 358 | return 0; |
339 | } | 359 | } |
340 | 360 | ||
341 | |||
342 | /** | 361 | /** |
343 | * Uploads user-supplied vertex program instructions or parameters onto | 362 | * Uploads user-supplied vertex program instructions or parameters onto |
344 | * the graphics card. | 363 | * the graphics card. |
345 | * Called by r300_do_cp_cmdbuf. | 364 | * Called by r300_do_cp_cmdbuf. |
346 | */ | 365 | */ |
347 | static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv, | 366 | static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv, |
348 | drm_radeon_cmd_buffer_t* cmdbuf, | 367 | drm_radeon_cmd_buffer_t * cmdbuf, |
349 | drm_r300_cmd_header_t header) | 368 | drm_r300_cmd_header_t header) |
350 | { | 369 | { |
351 | int sz; | 370 | int sz; |
@@ -357,114 +376,121 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv, | |||
357 | 376 | ||
358 | if (!sz) | 377 | if (!sz) |
359 | return 0; | 378 | return 0; |
360 | if (sz*16 > cmdbuf->bufsz) | 379 | if (sz * 16 > cmdbuf->bufsz) |
361 | return DRM_ERR(EINVAL); | 380 | return DRM_ERR(EINVAL); |
362 | 381 | ||
363 | BEGIN_RING(5+sz*4); | 382 | BEGIN_RING(5 + sz * 4); |
364 | /* Wait for VAP to come to senses.. */ | 383 | /* Wait for VAP to come to senses.. */ |
365 | /* there is no need to emit it multiple times, (only once before VAP is programmed, | 384 | /* there is no need to emit it multiple times, (only once before VAP is programmed, |
366 | but this optimization is for later */ | 385 | but this optimization is for later */ |
367 | OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 ); | 386 | OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0); |
368 | OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr ); | 387 | OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); |
369 | OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) ); | 388 | OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); |
370 | OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 ); | 389 | OUT_RING_TABLE((int __user *)cmdbuf->buf, sz * 4); |
371 | 390 | ||
372 | ADVANCE_RING(); | 391 | ADVANCE_RING(); |
373 | 392 | ||
374 | cmdbuf->buf += sz*16; | 393 | cmdbuf->buf += sz * 16; |
375 | cmdbuf->bufsz -= sz*16; | 394 | cmdbuf->bufsz -= sz * 16; |
376 | 395 | ||
377 | return 0; | 396 | return 0; |
378 | } | 397 | } |
379 | 398 | ||
380 | |||
381 | /** | 399 | /** |
382 | * Emit a clear packet from userspace. | 400 | * Emit a clear packet from userspace. |
383 | * Called by r300_emit_packet3. | 401 | * Called by r300_emit_packet3. |
384 | */ | 402 | */ |
385 | static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv, | 403 | static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv, |
386 | drm_radeon_cmd_buffer_t* cmdbuf) | 404 | drm_radeon_cmd_buffer_t * cmdbuf) |
387 | { | 405 | { |
388 | RING_LOCALS; | 406 | RING_LOCALS; |
389 | 407 | ||
390 | if (8*4 > cmdbuf->bufsz) | 408 | if (8 * 4 > cmdbuf->bufsz) |
391 | return DRM_ERR(EINVAL); | 409 | return DRM_ERR(EINVAL); |
392 | 410 | ||
393 | BEGIN_RING(10); | 411 | BEGIN_RING(10); |
394 | OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) ); | 412 | OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); |
395 | OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING| | 413 | OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | |
396 | (1<<R300_PRIM_NUM_VERTICES_SHIFT) ); | 414 | (1 << R300_PRIM_NUM_VERTICES_SHIFT)); |
397 | OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 ); | 415 | OUT_RING_TABLE((int __user *)cmdbuf->buf, 8); |
398 | ADVANCE_RING(); | 416 | ADVANCE_RING(); |
399 | 417 | ||
400 | cmdbuf->buf += 8*4; | 418 | cmdbuf->buf += 8 * 4; |
401 | cmdbuf->bufsz -= 8*4; | 419 | cmdbuf->bufsz -= 8 * 4; |
402 | 420 | ||
403 | return 0; | 421 | return 0; |
404 | } | 422 | } |
405 | 423 | ||
406 | static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv, | 424 | static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv, |
407 | drm_radeon_cmd_buffer_t* cmdbuf, | 425 | drm_radeon_cmd_buffer_t * cmdbuf, |
408 | u32 header) | 426 | u32 header) |
409 | { | 427 | { |
410 | int count, i,k; | 428 | int count, i, k; |
411 | #define MAX_ARRAY_PACKET 64 | 429 | #define MAX_ARRAY_PACKET 64 |
412 | u32 payload[MAX_ARRAY_PACKET]; | 430 | u32 payload[MAX_ARRAY_PACKET]; |
413 | u32 narrays; | 431 | u32 narrays; |
414 | RING_LOCALS; | 432 | RING_LOCALS; |
415 | 433 | ||
416 | count=(header>>16) & 0x3fff; | 434 | count = (header >> 16) & 0x3fff; |
417 | 435 | ||
418 | if((count+1)>MAX_ARRAY_PACKET){ | 436 | if ((count + 1) > MAX_ARRAY_PACKET) { |
419 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count); | 437 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", |
438 | count); | ||
420 | return DRM_ERR(EINVAL); | 439 | return DRM_ERR(EINVAL); |
421 | } | 440 | } |
422 | memset(payload, 0, MAX_ARRAY_PACKET*4); | 441 | memset(payload, 0, MAX_ARRAY_PACKET * 4); |
423 | memcpy(payload, cmdbuf->buf+4, (count+1)*4); | 442 | memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); |
424 | 443 | ||
425 | /* carefully check packet contents */ | 444 | /* carefully check packet contents */ |
426 | 445 | ||
427 | narrays=payload[0]; | 446 | narrays = payload[0]; |
428 | k=0; | 447 | k = 0; |
429 | i=1; | 448 | i = 1; |
430 | while((k<narrays) && (i<(count+1))){ | 449 | while ((k < narrays) && (i < (count + 1))) { |
431 | i++; /* skip attribute field */ | 450 | i++; /* skip attribute field */ |
432 | if(r300_check_offset(dev_priv, payload[i])){ | 451 | if (r300_check_offset(dev_priv, payload[i])) { |
433 | DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); | 452 | DRM_ERROR |
453 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", | ||
454 | k, i); | ||
434 | return DRM_ERR(EINVAL); | 455 | return DRM_ERR(EINVAL); |
435 | } | 456 | } |
436 | k++; | 457 | k++; |
437 | i++; | 458 | i++; |
438 | if(k==narrays)break; | 459 | if (k == narrays) |
460 | break; | ||
439 | /* have one more to process, they come in pairs */ | 461 | /* have one more to process, they come in pairs */ |
440 | if(r300_check_offset(dev_priv, payload[i])){ | 462 | if (r300_check_offset(dev_priv, payload[i])) { |
441 | DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); | 463 | DRM_ERROR |
464 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", | ||
465 | k, i); | ||
442 | return DRM_ERR(EINVAL); | 466 | return DRM_ERR(EINVAL); |
443 | } | ||
444 | k++; | ||
445 | i++; | ||
446 | } | 467 | } |
468 | k++; | ||
469 | i++; | ||
470 | } | ||
447 | /* do the counts match what we expect ? */ | 471 | /* do the counts match what we expect ? */ |
448 | if((k!=narrays) || (i!=(count+1))){ | 472 | if ((k != narrays) || (i != (count + 1))) { |
449 | DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1); | 473 | DRM_ERROR |
474 | ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", | ||
475 | k, i, narrays, count + 1); | ||
450 | return DRM_ERR(EINVAL); | 476 | return DRM_ERR(EINVAL); |
451 | } | 477 | } |
452 | 478 | ||
453 | /* all clear, output packet */ | 479 | /* all clear, output packet */ |
454 | 480 | ||
455 | BEGIN_RING(count+2); | 481 | BEGIN_RING(count + 2); |
456 | OUT_RING(header); | 482 | OUT_RING(header); |
457 | OUT_RING_TABLE(payload, count+1); | 483 | OUT_RING_TABLE(payload, count + 1); |
458 | ADVANCE_RING(); | 484 | ADVANCE_RING(); |
459 | 485 | ||
460 | cmdbuf->buf += (count+2)*4; | 486 | cmdbuf->buf += (count + 2) * 4; |
461 | cmdbuf->bufsz -= (count+2)*4; | 487 | cmdbuf->bufsz -= (count + 2) * 4; |
462 | 488 | ||
463 | return 0; | 489 | return 0; |
464 | } | 490 | } |
465 | 491 | ||
466 | static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv, | 492 | static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv, |
467 | drm_radeon_cmd_buffer_t* cmdbuf) | 493 | drm_radeon_cmd_buffer_t * cmdbuf) |
468 | { | 494 | { |
469 | u32 header; | 495 | u32 header; |
470 | int count; | 496 | int count; |
@@ -473,36 +499,37 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv, | |||
473 | if (4 > cmdbuf->bufsz) | 499 | if (4 > cmdbuf->bufsz) |
474 | return DRM_ERR(EINVAL); | 500 | return DRM_ERR(EINVAL); |
475 | 501 | ||
476 | /* Fixme !! This simply emits a packet without much checking. | 502 | /* Fixme !! This simply emits a packet without much checking. |
477 | We need to be smarter. */ | 503 | We need to be smarter. */ |
478 | 504 | ||
479 | /* obtain first word - actual packet3 header */ | 505 | /* obtain first word - actual packet3 header */ |
480 | header = *(u32 __user*)cmdbuf->buf; | 506 | header = *(u32 __user *) cmdbuf->buf; |
481 | 507 | ||
482 | /* Is it packet 3 ? */ | 508 | /* Is it packet 3 ? */ |
483 | if( (header>>30)!=0x3 ) { | 509 | if ((header >> 30) != 0x3) { |
484 | DRM_ERROR("Not a packet3 header (0x%08x)\n", header); | 510 | DRM_ERROR("Not a packet3 header (0x%08x)\n", header); |
485 | return DRM_ERR(EINVAL); | 511 | return DRM_ERR(EINVAL); |
486 | } | 512 | } |
487 | 513 | ||
488 | count=(header>>16) & 0x3fff; | 514 | count = (header >> 16) & 0x3fff; |
489 | 515 | ||
490 | /* Check again now that we know how much data to expect */ | 516 | /* Check again now that we know how much data to expect */ |
491 | if ((count+2)*4 > cmdbuf->bufsz){ | 517 | if ((count + 2) * 4 > cmdbuf->bufsz) { |
492 | DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n", | 518 | DRM_ERROR |
493 | (count+2)*4, cmdbuf->bufsz); | 519 | ("Expected packet3 of length %d but have only %d bytes left\n", |
520 | (count + 2) * 4, cmdbuf->bufsz); | ||
494 | return DRM_ERR(EINVAL); | 521 | return DRM_ERR(EINVAL); |
495 | } | 522 | } |
496 | 523 | ||
497 | /* Is it a packet type we know about ? */ | 524 | /* Is it a packet type we know about ? */ |
498 | switch(header & 0xff00){ | 525 | switch (header & 0xff00) { |
499 | case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ | 526 | case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ |
500 | return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); | 527 | return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); |
501 | 528 | ||
502 | case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ | 529 | case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ |
503 | case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ | 530 | case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ |
504 | case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ | 531 | case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ |
505 | case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ | 532 | case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ |
506 | case RADEON_WAIT_FOR_IDLE: | 533 | case RADEON_WAIT_FOR_IDLE: |
507 | case RADEON_CP_NOP: | 534 | case RADEON_CP_NOP: |
508 | /* these packets are safe */ | 535 | /* these packets are safe */ |
@@ -510,32 +537,30 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv, | |||
510 | default: | 537 | default: |
511 | DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); | 538 | DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); |
512 | return DRM_ERR(EINVAL); | 539 | return DRM_ERR(EINVAL); |
513 | } | 540 | } |
514 | |||
515 | 541 | ||
516 | BEGIN_RING(count+2); | 542 | BEGIN_RING(count + 2); |
517 | OUT_RING(header); | 543 | OUT_RING(header); |
518 | OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1); | 544 | OUT_RING_TABLE((int __user *)(cmdbuf->buf + 4), count + 1); |
519 | ADVANCE_RING(); | 545 | ADVANCE_RING(); |
520 | 546 | ||
521 | cmdbuf->buf += (count+2)*4; | 547 | cmdbuf->buf += (count + 2) * 4; |
522 | cmdbuf->bufsz -= (count+2)*4; | 548 | cmdbuf->bufsz -= (count + 2) * 4; |
523 | 549 | ||
524 | return 0; | 550 | return 0; |
525 | } | 551 | } |
526 | 552 | ||
527 | |||
528 | /** | 553 | /** |
529 | * Emit a rendering packet3 from userspace. | 554 | * Emit a rendering packet3 from userspace. |
530 | * Called by r300_do_cp_cmdbuf. | 555 | * Called by r300_do_cp_cmdbuf. |
531 | */ | 556 | */ |
532 | static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv, | 557 | static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv, |
533 | drm_radeon_cmd_buffer_t* cmdbuf, | 558 | drm_radeon_cmd_buffer_t * cmdbuf, |
534 | drm_r300_cmd_header_t header) | 559 | drm_r300_cmd_header_t header) |
535 | { | 560 | { |
536 | int n; | 561 | int n; |
537 | int ret; | 562 | int ret; |
538 | char __user* orig_buf = cmdbuf->buf; | 563 | char __user *orig_buf = cmdbuf->buf; |
539 | int orig_bufsz = cmdbuf->bufsz; | 564 | int orig_bufsz = cmdbuf->bufsz; |
540 | 565 | ||
541 | /* This is a do-while-loop so that we run the interior at least once, | 566 | /* This is a do-while-loop so that we run the interior at least once, |
@@ -550,16 +575,16 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv, | |||
550 | 575 | ||
551 | cmdbuf->buf = orig_buf; | 576 | cmdbuf->buf = orig_buf; |
552 | cmdbuf->bufsz = orig_bufsz; | 577 | cmdbuf->bufsz = orig_bufsz; |
553 | } | 578 | } |
554 | 579 | ||
555 | switch(header.packet3.packet) { | 580 | switch (header.packet3.packet) { |
556 | case R300_CMD_PACKET3_CLEAR: | 581 | case R300_CMD_PACKET3_CLEAR: |
557 | DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n"); | 582 | DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n"); |
558 | ret = r300_emit_clear(dev_priv, cmdbuf); | 583 | ret = r300_emit_clear(dev_priv, cmdbuf); |
559 | if (ret) { | 584 | if (ret) { |
560 | DRM_ERROR("r300_emit_clear failed\n"); | 585 | DRM_ERROR("r300_emit_clear failed\n"); |
561 | return ret; | 586 | return ret; |
562 | } | 587 | } |
563 | break; | 588 | break; |
564 | 589 | ||
565 | case R300_CMD_PACKET3_RAW: | 590 | case R300_CMD_PACKET3_RAW: |
@@ -568,18 +593,18 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv, | |||
568 | if (ret) { | 593 | if (ret) { |
569 | DRM_ERROR("r300_emit_raw_packet3 failed\n"); | 594 | DRM_ERROR("r300_emit_raw_packet3 failed\n"); |
570 | return ret; | 595 | return ret; |
571 | } | 596 | } |
572 | break; | 597 | break; |
573 | 598 | ||
574 | default: | 599 | default: |
575 | DRM_ERROR("bad packet3 type %i at %p\n", | 600 | DRM_ERROR("bad packet3 type %i at %p\n", |
576 | header.packet3.packet, | 601 | header.packet3.packet, |
577 | cmdbuf->buf - sizeof(header)); | 602 | cmdbuf->buf - sizeof(header)); |
578 | return DRM_ERR(EINVAL); | 603 | return DRM_ERR(EINVAL); |
579 | } | 604 | } |
580 | 605 | ||
581 | n += R300_SIMULTANEOUS_CLIPRECTS; | 606 | n += R300_SIMULTANEOUS_CLIPRECTS; |
582 | } while(n < cmdbuf->nbox); | 607 | } while (n < cmdbuf->nbox); |
583 | 608 | ||
584 | return 0; | 609 | return 0; |
585 | } | 610 | } |
@@ -598,21 +623,20 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv, | |||
598 | /** | 623 | /** |
599 | * Emit the sequence to pacify R300. | 624 | * Emit the sequence to pacify R300. |
600 | */ | 625 | */ |
601 | static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv) | 626 | static __inline__ void r300_pacify(drm_radeon_private_t * dev_priv) |
602 | { | 627 | { |
603 | RING_LOCALS; | 628 | RING_LOCALS; |
604 | 629 | ||
605 | BEGIN_RING(6); | 630 | BEGIN_RING(6); |
606 | OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) ); | 631 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
607 | OUT_RING( 0xa ); | 632 | OUT_RING(0xa); |
608 | OUT_RING( CP_PACKET0( 0x4f18, 0 ) ); | 633 | OUT_RING(CP_PACKET0(0x4f18, 0)); |
609 | OUT_RING( 0x3 ); | 634 | OUT_RING(0x3); |
610 | OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) ); | 635 | OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0)); |
611 | OUT_RING( 0x0 ); | 636 | OUT_RING(0x0); |
612 | ADVANCE_RING(); | 637 | ADVANCE_RING(); |
613 | } | 638 | } |
614 | 639 | ||
615 | |||
616 | /** | 640 | /** |
617 | * Called by r300_do_cp_cmdbuf to update the internal buffer age and state. | 641 | * Called by r300_do_cp_cmdbuf to update the internal buffer age and state. |
618 | * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must | 642 | * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must |
@@ -628,20 +652,18 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf) | |||
628 | buf->used = 0; | 652 | buf->used = 0; |
629 | } | 653 | } |
630 | 654 | ||
631 | |||
632 | /** | 655 | /** |
633 | * Parses and validates a user-supplied command buffer and emits appropriate | 656 | * Parses and validates a user-supplied command buffer and emits appropriate |
634 | * commands on the DMA ring buffer. | 657 | * commands on the DMA ring buffer. |
635 | * Called by the ioctl handler function radeon_cp_cmdbuf. | 658 | * Called by the ioctl handler function radeon_cp_cmdbuf. |
636 | */ | 659 | */ |
637 | int r300_do_cp_cmdbuf(drm_device_t* dev, | 660 | int r300_do_cp_cmdbuf(drm_device_t * dev, |
638 | DRMFILE filp, | 661 | DRMFILE filp, |
639 | drm_file_t* filp_priv, | 662 | drm_file_t * filp_priv, drm_radeon_cmd_buffer_t * cmdbuf) |
640 | drm_radeon_cmd_buffer_t* cmdbuf) | ||
641 | { | 663 | { |
642 | drm_radeon_private_t *dev_priv = dev->dev_private; | 664 | drm_radeon_private_t *dev_priv = dev->dev_private; |
643 | drm_device_dma_t *dma = dev->dma; | 665 | drm_device_dma_t *dma = dev->dma; |
644 | drm_buf_t *buf = NULL; | 666 | drm_buf_t *buf = NULL; |
645 | int emit_dispatch_age = 0; | 667 | int emit_dispatch_age = 0; |
646 | int ret = 0; | 668 | int ret = 0; |
647 | 669 | ||
@@ -655,9 +677,9 @@ int r300_do_cp_cmdbuf(drm_device_t* dev, | |||
655 | ret = r300_emit_cliprects(dev_priv, cmdbuf, 0); | 677 | ret = r300_emit_cliprects(dev_priv, cmdbuf, 0); |
656 | if (ret) | 678 | if (ret) |
657 | goto cleanup; | 679 | goto cleanup; |
658 | } | 680 | } |
659 | 681 | ||
660 | while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { | 682 | while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { |
661 | int idx; | 683 | int idx; |
662 | drm_r300_cmd_header_t header; | 684 | drm_r300_cmd_header_t header; |
663 | 685 | ||
@@ -666,14 +688,14 @@ int r300_do_cp_cmdbuf(drm_device_t* dev, | |||
666 | cmdbuf->buf += sizeof(header); | 688 | cmdbuf->buf += sizeof(header); |
667 | cmdbuf->bufsz -= sizeof(header); | 689 | cmdbuf->bufsz -= sizeof(header); |
668 | 690 | ||
669 | switch(header.header.cmd_type) { | 691 | switch (header.header.cmd_type) { |
670 | case R300_CMD_PACKET0: | 692 | case R300_CMD_PACKET0: |
671 | DRM_DEBUG("R300_CMD_PACKET0\n"); | 693 | DRM_DEBUG("R300_CMD_PACKET0\n"); |
672 | ret = r300_emit_packet0(dev_priv, cmdbuf, header); | 694 | ret = r300_emit_packet0(dev_priv, cmdbuf, header); |
673 | if (ret) { | 695 | if (ret) { |
674 | DRM_ERROR("r300_emit_packet0 failed\n"); | 696 | DRM_ERROR("r300_emit_packet0 failed\n"); |
675 | goto cleanup; | 697 | goto cleanup; |
676 | } | 698 | } |
677 | break; | 699 | break; |
678 | 700 | ||
679 | case R300_CMD_VPU: | 701 | case R300_CMD_VPU: |
@@ -682,7 +704,7 @@ int r300_do_cp_cmdbuf(drm_device_t* dev, | |||
682 | if (ret) { | 704 | if (ret) { |
683 | DRM_ERROR("r300_emit_vpu failed\n"); | 705 | DRM_ERROR("r300_emit_vpu failed\n"); |
684 | goto cleanup; | 706 | goto cleanup; |
685 | } | 707 | } |
686 | break; | 708 | break; |
687 | 709 | ||
688 | case R300_CMD_PACKET3: | 710 | case R300_CMD_PACKET3: |
@@ -691,26 +713,26 @@ int r300_do_cp_cmdbuf(drm_device_t* dev, | |||
691 | if (ret) { | 713 | if (ret) { |
692 | DRM_ERROR("r300_emit_packet3 failed\n"); | 714 | DRM_ERROR("r300_emit_packet3 failed\n"); |
693 | goto cleanup; | 715 | goto cleanup; |
694 | } | 716 | } |
695 | break; | 717 | break; |
696 | 718 | ||
697 | case R300_CMD_END3D: | 719 | case R300_CMD_END3D: |
698 | DRM_DEBUG("R300_CMD_END3D\n"); | 720 | DRM_DEBUG("R300_CMD_END3D\n"); |
699 | /* TODO: | 721 | /* TODO: |
700 | Ideally userspace driver should not need to issue this call, | 722 | Ideally userspace driver should not need to issue this call, |
701 | i.e. the drm driver should issue it automatically and prevent | 723 | i.e. the drm driver should issue it automatically and prevent |
702 | lockups. | 724 | lockups. |
703 | 725 | ||
704 | In practice, we do not understand why this call is needed and what | 726 | In practice, we do not understand why this call is needed and what |
705 | it does (except for some vague guesses that it has to do with cache | 727 | it does (except for some vague guesses that it has to do with cache |
706 | coherence) and so the user space driver does it. | 728 | coherence) and so the user space driver does it. |
707 | 729 | ||
708 | Once we are sure which uses prevent lockups the code could be moved | 730 | Once we are sure which uses prevent lockups the code could be moved |
709 | into the kernel and the userspace driver will not | 731 | into the kernel and the userspace driver will not |
710 | need to use this command. | 732 | need to use this command. |
711 | 733 | ||
712 | Note that issuing this command does not hurt anything | 734 | Note that issuing this command does not hurt anything |
713 | except, possibly, performance */ | 735 | except, possibly, performance */ |
714 | r300_pacify(dev_priv); | 736 | r300_pacify(dev_priv); |
715 | break; | 737 | break; |
716 | 738 | ||
@@ -722,7 +744,7 @@ int r300_do_cp_cmdbuf(drm_device_t* dev, | |||
722 | RING_LOCALS; | 744 | RING_LOCALS; |
723 | 745 | ||
724 | BEGIN_RING(header.delay.count); | 746 | BEGIN_RING(header.delay.count); |
725 | for(i=0;i<header.delay.count;i++) | 747 | for (i = 0; i < header.delay.count; i++) |
726 | OUT_RING(RADEON_CP_PACKET2); | 748 | OUT_RING(RADEON_CP_PACKET2); |
727 | ADVANCE_RING(); | 749 | ADVANCE_RING(); |
728 | } | 750 | } |
@@ -730,53 +752,54 @@ int r300_do_cp_cmdbuf(drm_device_t* dev, | |||
730 | 752 | ||
731 | case R300_CMD_DMA_DISCARD: | 753 | case R300_CMD_DMA_DISCARD: |
732 | DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); | 754 | DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); |
733 | idx = header.dma.buf_idx; | 755 | idx = header.dma.buf_idx; |
734 | if (idx < 0 || idx >= dma->buf_count) { | 756 | if (idx < 0 || idx >= dma->buf_count) { |
735 | DRM_ERROR("buffer index %d (of %d max)\n", | 757 | DRM_ERROR("buffer index %d (of %d max)\n", |
736 | idx, dma->buf_count - 1); | 758 | idx, dma->buf_count - 1); |
737 | ret = DRM_ERR(EINVAL); | 759 | ret = DRM_ERR(EINVAL); |
738 | goto cleanup; | ||
739 | } | ||
740 | |||
741 | buf = dma->buflist[idx]; | ||
742 | if (buf->filp != filp || buf->pending) { | ||
743 | DRM_ERROR("bad buffer %p %p %d\n", | ||
744 | buf->filp, filp, buf->pending); | ||
745 | ret = DRM_ERR(EINVAL); | ||
746 | goto cleanup; | 760 | goto cleanup; |
747 | } | 761 | } |
762 | |||
763 | buf = dma->buflist[idx]; | ||
764 | if (buf->filp != filp || buf->pending) { | ||
765 | DRM_ERROR("bad buffer %p %p %d\n", | ||
766 | buf->filp, filp, buf->pending); | ||
767 | ret = DRM_ERR(EINVAL); | ||
768 | goto cleanup; | ||
769 | } | ||
748 | 770 | ||
749 | emit_dispatch_age = 1; | 771 | emit_dispatch_age = 1; |
750 | r300_discard_buffer(dev, buf); | 772 | r300_discard_buffer(dev, buf); |
751 | break; | 773 | break; |
752 | 774 | ||
753 | case R300_CMD_WAIT: | 775 | case R300_CMD_WAIT: |
754 | /* simple enough, we can do it here */ | 776 | /* simple enough, we can do it here */ |
755 | DRM_DEBUG("R300_CMD_WAIT\n"); | 777 | DRM_DEBUG("R300_CMD_WAIT\n"); |
756 | if(header.wait.flags==0)break; /* nothing to do */ | 778 | if (header.wait.flags == 0) |
779 | break; /* nothing to do */ | ||
757 | 780 | ||
758 | { | 781 | { |
759 | RING_LOCALS; | 782 | RING_LOCALS; |
760 | 783 | ||
761 | BEGIN_RING(2); | 784 | BEGIN_RING(2); |
762 | OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); | 785 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); |
763 | OUT_RING( (header.wait.flags & 0xf)<<14 ); | 786 | OUT_RING((header.wait.flags & 0xf) << 14); |
764 | ADVANCE_RING(); | 787 | ADVANCE_RING(); |
765 | } | 788 | } |
766 | break; | 789 | break; |
767 | 790 | ||
768 | default: | 791 | default: |
769 | DRM_ERROR("bad cmd_type %i at %p\n", | 792 | DRM_ERROR("bad cmd_type %i at %p\n", |
770 | header.header.cmd_type, | 793 | header.header.cmd_type, |
771 | cmdbuf->buf - sizeof(header)); | 794 | cmdbuf->buf - sizeof(header)); |
772 | ret = DRM_ERR(EINVAL); | 795 | ret = DRM_ERR(EINVAL); |
773 | goto cleanup; | 796 | goto cleanup; |
774 | } | 797 | } |
775 | } | 798 | } |
776 | 799 | ||
777 | DRM_DEBUG("END\n"); | 800 | DRM_DEBUG("END\n"); |
778 | 801 | ||
779 | cleanup: | 802 | cleanup: |
780 | r300_pacify(dev_priv); | 803 | r300_pacify(dev_priv); |
781 | 804 | ||
782 | /* We emit the vertex buffer age here, outside the pacifier "brackets" | 805 | /* We emit the vertex buffer age here, outside the pacifier "brackets" |
@@ -792,10 +815,9 @@ cleanup: | |||
792 | BEGIN_RING(2); | 815 | BEGIN_RING(2); |
793 | RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); | 816 | RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); |
794 | ADVANCE_RING(); | 817 | ADVANCE_RING(); |
795 | } | 818 | } |
796 | 819 | ||
797 | COMMIT_RING(); | 820 | COMMIT_RING(); |
798 | 821 | ||
799 | return ret; | 822 | return ret; |
800 | } | 823 | } |
801 | |||