aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/ni_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/ni_dma.c')
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c178
1 files changed, 123 insertions, 55 deletions
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 6378e0276691..8a3e6221cece 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
307} 307}
308 308
309/** 309/**
310 * cayman_dma_vm_set_page - update the page tables using the DMA 310 * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
311 *
312 * @rdev: radeon_device pointer
313 * @ib: indirect buffer to fill with commands
314 * @pe: addr of the page entry
315 * @src: src addr where to copy from
316 * @count: number of page entries to update
317 *
318 * Update PTEs by copying them from the GART using the DMA (cayman/TN).
319 */
320void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
321 struct radeon_ib *ib,
322 uint64_t pe, uint64_t src,
323 unsigned count)
324{
325 unsigned ndw;
326
327 while (count) {
328 ndw = count * 2;
329 if (ndw > 0xFFFFE)
330 ndw = 0xFFFFE;
331
332 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
333 0, 0, ndw);
334 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
335 ib->ptr[ib->length_dw++] = lower_32_bits(src);
336 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
337 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
338
339 pe += ndw * 4;
340 src += ndw * 4;
341 count -= ndw / 2;
342 }
343}
344
345/**
346 * cayman_dma_vm_write_pages - update PTEs by writing them manually
311 * 347 *
312 * @rdev: radeon_device pointer 348 * @rdev: radeon_device pointer
313 * @ib: indirect buffer to fill with commands 349 * @ib: indirect buffer to fill with commands
@@ -315,71 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
315 * @addr: dst addr to write into pe 351 * @addr: dst addr to write into pe
316 * @count: number of page entries to update 352 * @count: number of page entries to update
317 * @incr: increase next addr by incr bytes 353 * @incr: increase next addr by incr bytes
318 * @flags: hw access flags 354 * @flags: hw access flags
319 * 355 *
320 * Update the page tables using the DMA (cayman/TN). 356 * Update PTEs by writing them manually using the DMA (cayman/TN).
321 */ 357 */
322void cayman_dma_vm_set_page(struct radeon_device *rdev, 358void cayman_dma_vm_write_pages(struct radeon_device *rdev,
323 struct radeon_ib *ib, 359 struct radeon_ib *ib,
324 uint64_t pe, 360 uint64_t pe,
325 uint64_t addr, unsigned count, 361 uint64_t addr, unsigned count,
326 uint32_t incr, uint32_t flags) 362 uint32_t incr, uint32_t flags)
327{ 363{
328 uint64_t value; 364 uint64_t value;
329 unsigned ndw; 365 unsigned ndw;
330 366
331 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 367 while (count) {
332 368 ndw = count * 2;
333 if ((flags & R600_PTE_SYSTEM) || (count == 1)) { 369 if (ndw > 0xFFFFE)
334 while (count) { 370 ndw = 0xFFFFE;
335 ndw = count * 2; 371
336 if (ndw > 0xFFFFE) 372 /* for non-physically contiguous pages (system) */
337 ndw = 0xFFFFE; 373 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
338 374 0, 0, ndw);
339 /* for non-physically contiguous pages (system) */ 375 ib->ptr[ib->length_dw++] = pe;
340 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); 376 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
341 ib->ptr[ib->length_dw++] = pe; 377 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
342 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 378 if (flags & R600_PTE_SYSTEM) {
343 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 379 value = radeon_vm_map_gart(rdev, addr);
344 if (flags & R600_PTE_SYSTEM) { 380 value &= 0xFFFFFFFFFFFFF000ULL;
345 value = radeon_vm_map_gart(rdev, addr); 381 } else if (flags & R600_PTE_VALID) {
346 value &= 0xFFFFFFFFFFFFF000ULL;
347 } else if (flags & R600_PTE_VALID) {
348 value = addr;
349 } else {
350 value = 0;
351 }
352 addr += incr;
353 value |= flags;
354 ib->ptr[ib->length_dw++] = value;
355 ib->ptr[ib->length_dw++] = upper_32_bits(value);
356 }
357 }
358 } else {
359 while (count) {
360 ndw = count * 2;
361 if (ndw > 0xFFFFE)
362 ndw = 0xFFFFE;
363
364 if (flags & R600_PTE_VALID)
365 value = addr; 382 value = addr;
366 else 383 } else {
367 value = 0; 384 value = 0;
368 /* for physically contiguous pages (vram) */ 385 }
369 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 386 addr += incr;
370 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 387 value |= flags;
371 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 388 ib->ptr[ib->length_dw++] = value;
372 ib->ptr[ib->length_dw++] = flags; /* mask */
373 ib->ptr[ib->length_dw++] = 0;
374 ib->ptr[ib->length_dw++] = value; /* value */
375 ib->ptr[ib->length_dw++] = upper_32_bits(value); 389 ib->ptr[ib->length_dw++] = upper_32_bits(value);
376 ib->ptr[ib->length_dw++] = incr; /* increment size */
377 ib->ptr[ib->length_dw++] = 0;
378 pe += ndw * 4;
379 addr += (ndw / 2) * incr;
380 count -= ndw / 2;
381 } 390 }
382 } 391 }
392}
393
394/**
395 * cayman_dma_vm_set_pages - update the page tables using the DMA
396 *
397 * @rdev: radeon_device pointer
398 * @ib: indirect buffer to fill with commands
399 * @pe: addr of the page entry
400 * @addr: dst addr to write into pe
401 * @count: number of page entries to update
402 * @incr: increase next addr by incr bytes
403 * @flags: hw access flags
404 *
405 * Update the page tables using the DMA (cayman/TN).
406 */
407void cayman_dma_vm_set_pages(struct radeon_device *rdev,
408 struct radeon_ib *ib,
409 uint64_t pe,
410 uint64_t addr, unsigned count,
411 uint32_t incr, uint32_t flags)
412{
413 uint64_t value;
414 unsigned ndw;
415
416 while (count) {
417 ndw = count * 2;
418 if (ndw > 0xFFFFE)
419 ndw = 0xFFFFE;
420
421 if (flags & R600_PTE_VALID)
422 value = addr;
423 else
424 value = 0;
425
426 /* for physically contiguous pages (vram) */
427 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
428 ib->ptr[ib->length_dw++] = pe; /* dst addr */
429 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
430 ib->ptr[ib->length_dw++] = flags; /* mask */
431 ib->ptr[ib->length_dw++] = 0;
432 ib->ptr[ib->length_dw++] = value; /* value */
433 ib->ptr[ib->length_dw++] = upper_32_bits(value);
434 ib->ptr[ib->length_dw++] = incr; /* increment size */
435 ib->ptr[ib->length_dw++] = 0;
436
437 pe += ndw * 4;
438 addr += (ndw / 2) * incr;
439 count -= ndw / 2;
440 }
441}
442
443/**
444 * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
445 *
446 * @ib: indirect buffer to fill with padding
447 *
448 */
449void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
450{
383 while (ib->length_dw & 0x7) 451 while (ib->length_dw & 0x7)
384 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); 452 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
385} 453}