aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cs.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c253
1 files changed, 203 insertions, 50 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6559cc455135..4d595403b50c 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -90,11 +90,32 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
90 return radeon_bo_list_validate(&p->validated); 90 return radeon_bo_list_validate(&p->validated);
91} 91}
92 92
93static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
94{
95 p->priority = priority;
96
97 switch (ring) {
98 default:
99 DRM_ERROR("unknown ring id: %d\n", ring);
100 return -EINVAL;
101 case RADEON_CS_RING_GFX:
102 p->ring = RADEON_RING_TYPE_GFX_INDEX;
103 break;
104 case RADEON_CS_RING_COMPUTE:
105 /* for now */
106 p->ring = RADEON_RING_TYPE_GFX_INDEX;
107 break;
108 }
109 return 0;
110}
111
93int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 112int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
94{ 113{
95 struct drm_radeon_cs *cs = data; 114 struct drm_radeon_cs *cs = data;
96 uint64_t *chunk_array_ptr; 115 uint64_t *chunk_array_ptr;
97 unsigned size, i, flags = 0; 116 unsigned size, i;
117 u32 ring = RADEON_CS_RING_GFX;
118 s32 priority = 0;
98 119
99 if (!cs->num_chunks) { 120 if (!cs->num_chunks) {
100 return 0; 121 return 0;
@@ -104,6 +125,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
104 p->idx = 0; 125 p->idx = 0;
105 p->chunk_ib_idx = -1; 126 p->chunk_ib_idx = -1;
106 p->chunk_relocs_idx = -1; 127 p->chunk_relocs_idx = -1;
128 p->chunk_flags_idx = -1;
107 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 129 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
108 if (p->chunks_array == NULL) { 130 if (p->chunks_array == NULL) {
109 return -ENOMEM; 131 return -ENOMEM;
@@ -113,6 +135,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
113 sizeof(uint64_t)*cs->num_chunks)) { 135 sizeof(uint64_t)*cs->num_chunks)) {
114 return -EFAULT; 136 return -EFAULT;
115 } 137 }
138 p->cs_flags = 0;
116 p->nchunks = cs->num_chunks; 139 p->nchunks = cs->num_chunks;
117 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); 140 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
118 if (p->chunks == NULL) { 141 if (p->chunks == NULL) {
@@ -141,16 +164,19 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
141 if (p->chunks[i].length_dw == 0) 164 if (p->chunks[i].length_dw == 0)
142 return -EINVAL; 165 return -EINVAL;
143 } 166 }
144 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS && 167 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
145 !p->chunks[i].length_dw) { 168 p->chunk_flags_idx = i;
146 return -EINVAL; 169 /* zero length flags aren't useful */
170 if (p->chunks[i].length_dw == 0)
171 return -EINVAL;
147 } 172 }
148 173
149 p->chunks[i].length_dw = user_chunk.length_dw; 174 p->chunks[i].length_dw = user_chunk.length_dw;
150 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 175 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
151 176
152 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 177 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
153 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) { 178 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
179 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
154 size = p->chunks[i].length_dw * sizeof(uint32_t); 180 size = p->chunks[i].length_dw * sizeof(uint32_t);
155 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 181 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
156 if (p->chunks[i].kdata == NULL) { 182 if (p->chunks[i].kdata == NULL) {
@@ -161,29 +187,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
161 return -EFAULT; 187 return -EFAULT;
162 } 188 }
163 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 189 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
164 flags = p->chunks[i].kdata[0]; 190 p->cs_flags = p->chunks[i].kdata[0];
191 if (p->chunks[i].length_dw > 1)
192 ring = p->chunks[i].kdata[1];
193 if (p->chunks[i].length_dw > 2)
194 priority = (s32)p->chunks[i].kdata[2];
165 } 195 }
166 } else {
167 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
168 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
169 if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
170 kfree(p->chunks[i].kpage[0]);
171 kfree(p->chunks[i].kpage[1]);
172 return -ENOMEM;
173 }
174 p->chunks[i].kpage_idx[0] = -1;
175 p->chunks[i].kpage_idx[1] = -1;
176 p->chunks[i].last_copied_page = -1;
177 p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
178 } 196 }
179 } 197 }
180 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { 198
181 DRM_ERROR("cs IB too big: %d\n", 199 if ((p->cs_flags & RADEON_CS_USE_VM) &&
182 p->chunks[p->chunk_ib_idx].length_dw); 200 (p->rdev->family < CHIP_CAYMAN)) {
201 DRM_ERROR("VM not supported on asic!\n");
202 if (p->chunk_relocs_idx != -1)
203 kfree(p->chunks[p->chunk_relocs_idx].kdata);
204 if (p->chunk_flags_idx != -1)
205 kfree(p->chunks[p->chunk_flags_idx].kdata);
183 return -EINVAL; 206 return -EINVAL;
184 } 207 }
185 208
186 p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0; 209 if (radeon_cs_get_ring(p, ring, priority)) {
210 if (p->chunk_relocs_idx != -1)
211 kfree(p->chunks[p->chunk_relocs_idx].kdata);
212 if (p->chunk_flags_idx != -1)
213 kfree(p->chunks[p->chunk_flags_idx].kdata);
214 return -EINVAL;
215 }
216
217
218 /* deal with non-vm */
219 if ((p->chunk_ib_idx != -1) &&
220 ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
221 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
222 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
223 DRM_ERROR("cs IB too big: %d\n",
224 p->chunks[p->chunk_ib_idx].length_dw);
225 return -EINVAL;
226 }
227 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
228 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
229 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
230 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
231 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
232 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
233 return -ENOMEM;
234 }
235 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
236 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
237 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
238 p->chunks[p->chunk_ib_idx].last_page_index =
239 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
240 }
241
187 return 0; 242 return 0;
188} 243}
189 244
@@ -225,11 +280,131 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
225 radeon_ib_free(parser->rdev, &parser->ib); 280 radeon_ib_free(parser->rdev, &parser->ib);
226} 281}
227 282
283static int radeon_cs_ib_chunk(struct radeon_device *rdev,
284 struct radeon_cs_parser *parser)
285{
286 struct radeon_cs_chunk *ib_chunk;
287 int r;
288
289 if (parser->chunk_ib_idx == -1)
290 return 0;
291
292 if (parser->cs_flags & RADEON_CS_USE_VM)
293 return 0;
294
295 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
296 /* Copy the packet into the IB, the parser will read from the
297 * input memory (cached) and write to the IB (which can be
298 * uncached).
299 */
300 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
301 ib_chunk->length_dw * 4);
302 if (r) {
303 DRM_ERROR("Failed to get ib !\n");
304 return r;
305 }
306 parser->ib->length_dw = ib_chunk->length_dw;
307 r = radeon_cs_parse(parser);
308 if (r || parser->parser_error) {
309 DRM_ERROR("Invalid command stream !\n");
310 return r;
311 }
312 r = radeon_cs_finish_pages(parser);
313 if (r) {
314 DRM_ERROR("Invalid command stream !\n");
315 return r;
316 }
317 parser->ib->vm_id = 0;
318 r = radeon_ib_schedule(rdev, parser->ib);
319 if (r) {
320 DRM_ERROR("Failed to schedule IB !\n");
321 }
322 return 0;
323}
324
325static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
326 struct radeon_vm *vm)
327{
328 struct radeon_bo_list *lobj;
329 struct radeon_bo *bo;
330 int r;
331
332 list_for_each_entry(lobj, &parser->validated, tv.head) {
333 bo = lobj->bo;
334 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
335 if (r) {
336 return r;
337 }
338 }
339 return 0;
340}
341
342static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
343 struct radeon_cs_parser *parser)
344{
345 struct radeon_cs_chunk *ib_chunk;
346 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
347 struct radeon_vm *vm = &fpriv->vm;
348 int r;
349
350 if (parser->chunk_ib_idx == -1)
351 return 0;
352
353 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
354 return 0;
355
356 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
357 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
358 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
359 return -EINVAL;
360 }
361 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
362 ib_chunk->length_dw * 4);
363 if (r) {
364 DRM_ERROR("Failed to get ib !\n");
365 return r;
366 }
367 parser->ib->length_dw = ib_chunk->length_dw;
368 /* Copy the packet into the IB */
369 if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
370 ib_chunk->length_dw * 4)) {
371 return -EFAULT;
372 }
373 r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
374 if (r) {
375 return r;
376 }
377
378 mutex_lock(&vm->mutex);
379 r = radeon_vm_bind(rdev, vm);
380 if (r) {
381 goto out;
382 }
383 r = radeon_bo_vm_update_pte(parser, vm);
384 if (r) {
385 goto out;
386 }
387 parser->ib->vm_id = vm->id;
388 /* ib pool is bind at 0 in virtual address space to gpu_addr is the
389 * offset inside the pool bo
390 */
391 parser->ib->gpu_addr = parser->ib->sa_bo.offset;
392 r = radeon_ib_schedule(rdev, parser->ib);
393out:
394 if (!r) {
395 if (vm->fence) {
396 radeon_fence_unref(&vm->fence);
397 }
398 vm->fence = radeon_fence_ref(parser->ib->fence);
399 }
400 mutex_unlock(&fpriv->vm.mutex);
401 return r;
402}
403
228int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 404int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
229{ 405{
230 struct radeon_device *rdev = dev->dev_private; 406 struct radeon_device *rdev = dev->dev_private;
231 struct radeon_cs_parser parser; 407 struct radeon_cs_parser parser;
232 struct radeon_cs_chunk *ib_chunk;
233 int r; 408 int r;
234 409
235 radeon_mutex_lock(&rdev->cs_mutex); 410 radeon_mutex_lock(&rdev->cs_mutex);
@@ -246,15 +421,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
246 radeon_mutex_unlock(&rdev->cs_mutex); 421 radeon_mutex_unlock(&rdev->cs_mutex);
247 return r; 422 return r;
248 } 423 }
249 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
250 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &parser.ib,
251 ib_chunk->length_dw * 4);
252 if (r) {
253 DRM_ERROR("Failed to get ib !\n");
254 radeon_cs_parser_fini(&parser, r);
255 radeon_mutex_unlock(&rdev->cs_mutex);
256 return r;
257 }
258 r = radeon_cs_parser_relocs(&parser); 424 r = radeon_cs_parser_relocs(&parser);
259 if (r) { 425 if (r) {
260 if (r != -ERESTARTSYS) 426 if (r != -ERESTARTSYS)
@@ -263,28 +429,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
263 radeon_mutex_unlock(&rdev->cs_mutex); 429 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 430 return r;
265 } 431 }
266 /* Copy the packet into the IB, the parser will read from the 432 r = radeon_cs_ib_chunk(rdev, &parser);
267 * input memory (cached) and write to the IB (which can be
268 * uncached). */
269 parser.ib->length_dw = ib_chunk->length_dw;
270 r = radeon_cs_parse(&parser);
271 if (r || parser.parser_error) {
272 DRM_ERROR("Invalid command stream !\n");
273 radeon_cs_parser_fini(&parser, r);
274 radeon_mutex_unlock(&rdev->cs_mutex);
275 return r;
276 }
277 r = radeon_cs_finish_pages(&parser);
278 if (r) { 433 if (r) {
279 DRM_ERROR("Invalid command stream !\n"); 434 goto out;
280 radeon_cs_parser_fini(&parser, r);
281 radeon_mutex_unlock(&rdev->cs_mutex);
282 return r;
283 } 435 }
284 r = radeon_ib_schedule(rdev, parser.ib); 436 r = radeon_cs_ib_vm_chunk(rdev, &parser);
285 if (r) { 437 if (r) {
286 DRM_ERROR("Failed to schedule IB !\n"); 438 goto out;
287 } 439 }
440out:
288 radeon_cs_parser_fini(&parser, r); 441 radeon_cs_parser_fini(&parser, r);
289 radeon_mutex_unlock(&rdev->cs_mutex); 442 radeon_mutex_unlock(&rdev->cs_mutex);
290 return r; 443 return r;