aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorTom St Denis <tom.stdenis@amd.com>2016-09-06 08:42:02 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-09-12 18:12:20 -0400
commitcb5df31b2d218a0a01952f9551f114b1bc32064c (patch)
tree8ca2e9de2586f8a123e41eb85be5652a339f8ddb /drivers/gpu
parenta3fd80740a659ae1d20d090749958cc06f172b7e (diff)
drm/amd/amdgpu: Tidy up SI DMA code
Signed-off-by: Tom St Denis <tom.stdenis@amd.com> Reviewed-by: Edward O'Callaghan <funfunctor@folklore1984.net> Reviewed-by: Huang Rui <ray.huang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c35
1 files changed, 5 insertions, 30 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 377f4ae9e777..2abdaa681797 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -39,16 +39,11 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
39 39
40static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) 40static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
41{ 41{
42 u32 rptr; 42 return ring->adev->wb.wb[ring->rptr_offs>>2];
43
44 rptr = ring->adev->wb.wb[ring->rptr_offs/4];
45
46 return rptr;
47} 43}
48 44
49static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) 45static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
50{ 46{
51
52 struct amdgpu_device *adev = ring->adev; 47 struct amdgpu_device *adev = ring->adev;
53 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 48 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
54 49
@@ -188,7 +183,6 @@ static int si_dma_start(struct amdgpu_device *adev)
188 183
189 ring->wptr = 0; 184 ring->wptr = 0;
190 WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2); 185 WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
191
192 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); 186 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
193 187
194 ring->ready = true; 188 ring->ready = true;
@@ -476,11 +470,10 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
476 unsigned vm_id, uint64_t pd_addr) 470 unsigned vm_id, uint64_t pd_addr)
477{ 471{
478 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 472 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
479 if (vm_id < 8) { 473 if (vm_id < 8)
480 amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 474 amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
481 } else { 475 else
482 amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); 476 amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
483 }
484 amdgpu_ring_write(ring, pd_addr >> 12); 477 amdgpu_ring_write(ring, pd_addr >> 12);
485 478
486 /* bits 0-7 are the VM contexts0-7 */ 479 /* bits 0-7 are the VM contexts0-7 */
@@ -558,14 +551,9 @@ static int si_dma_sw_fini(void *handle)
558 551
559static int si_dma_hw_init(void *handle) 552static int si_dma_hw_init(void *handle)
560{ 553{
561 int r;
562 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 554 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
563 555
564 r = si_dma_start(adev); 556 return si_dma_start(adev);
565 if (r)
566 return r;
567
568 return r;
569} 557}
570 558
571static int si_dma_hw_fini(void *handle) 559static int si_dma_hw_fini(void *handle)
@@ -605,13 +593,10 @@ static bool si_dma_is_idle(void *handle)
605static int si_dma_wait_for_idle(void *handle) 593static int si_dma_wait_for_idle(void *handle)
606{ 594{
607 unsigned i; 595 unsigned i;
608 u32 tmp;
609 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 596 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610 597
611 for (i = 0; i < adev->usec_timeout; i++) { 598 for (i = 0; i < adev->usec_timeout; i++) {
612 tmp = RREG32(SRBM_STATUS2) & (DMA_BUSY_MASK | DMA1_BUSY_MASK); 599 if (si_dma_is_idle(handle))
613
614 if (!tmp)
615 return 0; 600 return 0;
616 udelay(1); 601 udelay(1);
617 } 602 }
@@ -674,11 +659,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
674 struct amdgpu_irq_src *source, 659 struct amdgpu_irq_src *source,
675 struct amdgpu_iv_entry *entry) 660 struct amdgpu_iv_entry *entry)
676{ 661{
677 u8 instance_id, queue_id;
678
679 instance_id = (entry->ring_id & 0x3) >> 0;
680 queue_id = (entry->ring_id & 0xc) >> 2;
681
682 amdgpu_fence_process(&adev->sdma.instance[0].ring); 662 amdgpu_fence_process(&adev->sdma.instance[0].ring);
683 663
684 return 0; 664 return 0;
@@ -688,11 +668,6 @@ static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
688 struct amdgpu_irq_src *source, 668 struct amdgpu_irq_src *source,
689 struct amdgpu_iv_entry *entry) 669 struct amdgpu_iv_entry *entry)
690{ 670{
691 u8 instance_id, queue_id;
692
693 instance_id = (entry->ring_id & 0x3) >> 0;
694 queue_id = (entry->ring_id & 0xc) >> 2;
695
696 amdgpu_fence_process(&adev->sdma.instance[1].ring); 671 amdgpu_fence_process(&adev->sdma.instance[1].ring);
697 672
698 return 0; 673 return 0;