diff options
author | Christian König <christian.koenig@amd.com> | 2013-10-30 11:51:09 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-11-01 15:25:52 -0400 |
commit | 24c164393dd2fa1c1fb51d5fec2f50bd6b7c037b (patch) | |
tree | 4eb520dad2e03ef9a0f189696e3907c0d3a8fbef /drivers/gpu | |
parent | 74d360f66b99231ed7007eb197dd18cda72c961c (diff) |
drm/radeon: drop CP page table updates & cleanup v2
The DMA ring seems to be stable now.
v2: remove pt_ring_index as well
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/radeon/cik.c | 61 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/cik_sdma.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni_dma.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.h | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 60 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/si_dma.c | 21 |
10 files changed, 73 insertions, 267 deletions
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index afee68a49b3d..2e48f902e3b5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -67,11 +67,6 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev); | |||
67 | extern int cik_sdma_resume(struct radeon_device *rdev); | 67 | extern int cik_sdma_resume(struct radeon_device *rdev); |
68 | extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); | 68 | extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); |
69 | extern void cik_sdma_fini(struct radeon_device *rdev); | 69 | extern void cik_sdma_fini(struct radeon_device *rdev); |
70 | extern void cik_sdma_vm_set_page(struct radeon_device *rdev, | ||
71 | struct radeon_ib *ib, | ||
72 | uint64_t pe, | ||
73 | uint64_t addr, unsigned count, | ||
74 | uint32_t incr, uint32_t flags); | ||
75 | static void cik_rlc_stop(struct radeon_device *rdev); | 70 | static void cik_rlc_stop(struct radeon_device *rdev); |
76 | static void cik_pcie_gen3_enable(struct radeon_device *rdev); | 71 | static void cik_pcie_gen3_enable(struct radeon_device *rdev); |
77 | static void cik_program_aspm(struct radeon_device *rdev); | 72 | static void cik_program_aspm(struct radeon_device *rdev); |
@@ -4903,62 +4898,6 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
4903 | } | 4898 | } |
4904 | } | 4899 | } |
4905 | 4900 | ||
4906 | /** | ||
4907 | * cik_vm_set_page - update the page tables using sDMA | ||
4908 | * | ||
4909 | * @rdev: radeon_device pointer | ||
4910 | * @ib: indirect buffer to fill with commands | ||
4911 | * @pe: addr of the page entry | ||
4912 | * @addr: dst addr to write into pe | ||
4913 | * @count: number of page entries to update | ||
4914 | * @incr: increase next addr by incr bytes | ||
4915 | * @flags: access flags | ||
4916 | * | ||
4917 | * Update the page tables using CP or sDMA (CIK). | ||
4918 | */ | ||
4919 | void cik_vm_set_page(struct radeon_device *rdev, | ||
4920 | struct radeon_ib *ib, | ||
4921 | uint64_t pe, | ||
4922 | uint64_t addr, unsigned count, | ||
4923 | uint32_t incr, uint32_t flags) | ||
4924 | { | ||
4925 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | ||
4926 | uint64_t value; | ||
4927 | unsigned ndw; | ||
4928 | |||
4929 | if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { | ||
4930 | /* CP */ | ||
4931 | while (count) { | ||
4932 | ndw = 2 + count * 2; | ||
4933 | if (ndw > 0x3FFE) | ||
4934 | ndw = 0x3FFE; | ||
4935 | |||
4936 | ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw); | ||
4937 | ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) | | ||
4938 | WRITE_DATA_DST_SEL(1)); | ||
4939 | ib->ptr[ib->length_dw++] = pe; | ||
4940 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
4941 | for (; ndw > 2; ndw -= 2, --count, pe += 8) { | ||
4942 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
4943 | value = radeon_vm_map_gart(rdev, addr); | ||
4944 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
4945 | } else if (flags & RADEON_VM_PAGE_VALID) { | ||
4946 | value = addr; | ||
4947 | } else { | ||
4948 | value = 0; | ||
4949 | } | ||
4950 | addr += incr; | ||
4951 | value |= r600_flags; | ||
4952 | ib->ptr[ib->length_dw++] = value; | ||
4953 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
4954 | } | ||
4955 | } | ||
4956 | } else { | ||
4957 | /* DMA */ | ||
4958 | cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags); | ||
4959 | } | ||
4960 | } | ||
4961 | |||
4962 | /* | 4901 | /* |
4963 | * RLC | 4902 | * RLC |
4964 | * The RLC is a multi-purpose microengine that handles a | 4903 | * The RLC is a multi-purpose microengine that handles a |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index ec91427094e5..8d84ebe2b6fa 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -654,13 +654,12 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, | |||
654 | uint64_t addr, unsigned count, | 654 | uint64_t addr, unsigned count, |
655 | uint32_t incr, uint32_t flags) | 655 | uint32_t incr, uint32_t flags) |
656 | { | 656 | { |
657 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | ||
658 | uint64_t value; | 657 | uint64_t value; |
659 | unsigned ndw; | 658 | unsigned ndw; |
660 | 659 | ||
661 | trace_radeon_vm_set_page(pe, addr, count, incr, r600_flags); | 660 | trace_radeon_vm_set_page(pe, addr, count, incr, flags); |
662 | 661 | ||
663 | if (flags & RADEON_VM_PAGE_SYSTEM) { | 662 | if (flags & R600_PTE_SYSTEM) { |
664 | while (count) { | 663 | while (count) { |
665 | ndw = count * 2; | 664 | ndw = count * 2; |
666 | if (ndw > 0xFFFFE) | 665 | if (ndw > 0xFFFFE) |
@@ -672,16 +671,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, | |||
672 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | 671 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); |
673 | ib->ptr[ib->length_dw++] = ndw; | 672 | ib->ptr[ib->length_dw++] = ndw; |
674 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 673 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
675 | if (flags & RADEON_VM_PAGE_SYSTEM) { | 674 | value = radeon_vm_map_gart(rdev, addr); |
676 | value = radeon_vm_map_gart(rdev, addr); | 675 | value &= 0xFFFFFFFFFFFFF000ULL; |
677 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
678 | } else if (flags & RADEON_VM_PAGE_VALID) { | ||
679 | value = addr; | ||
680 | } else { | ||
681 | value = 0; | ||
682 | } | ||
683 | addr += incr; | 676 | addr += incr; |
684 | value |= r600_flags; | 677 | value |= flags; |
685 | ib->ptr[ib->length_dw++] = value; | 678 | ib->ptr[ib->length_dw++] = value; |
686 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | 679 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |
687 | } | 680 | } |
@@ -692,7 +685,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, | |||
692 | if (ndw > 0x7FFFF) | 685 | if (ndw > 0x7FFFF) |
693 | ndw = 0x7FFFF; | 686 | ndw = 0x7FFFF; |
694 | 687 | ||
695 | if (flags & RADEON_VM_PAGE_VALID) | 688 | if (flags & R600_PTE_VALID) |
696 | value = addr; | 689 | value = addr; |
697 | else | 690 | else |
698 | value = 0; | 691 | value = 0; |
@@ -700,7 +693,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, | |||
700 | ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); | 693 | ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); |
701 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | 694 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ |
702 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | 695 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); |
703 | ib->ptr[ib->length_dw++] = r600_flags; /* mask */ | 696 | ib->ptr[ib->length_dw++] = flags; /* mask */ |
704 | ib->ptr[ib->length_dw++] = 0; | 697 | ib->ptr[ib->length_dw++] = 0; |
705 | ib->ptr[ib->length_dw++] = value; /* value */ | 698 | ib->ptr[ib->length_dw++] = value; /* value */ |
706 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | 699 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 93c1f9ef5da9..e299a38e683a 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -174,11 +174,6 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | |||
174 | extern void evergreen_program_aspm(struct radeon_device *rdev); | 174 | extern void evergreen_program_aspm(struct radeon_device *rdev); |
175 | extern void sumo_rlc_fini(struct radeon_device *rdev); | 175 | extern void sumo_rlc_fini(struct radeon_device *rdev); |
176 | extern int sumo_rlc_init(struct radeon_device *rdev); | 176 | extern int sumo_rlc_init(struct radeon_device *rdev); |
177 | extern void cayman_dma_vm_set_page(struct radeon_device *rdev, | ||
178 | struct radeon_ib *ib, | ||
179 | uint64_t pe, | ||
180 | uint64_t addr, unsigned count, | ||
181 | uint32_t incr, uint32_t flags); | ||
182 | 177 | ||
183 | /* Firmware Names */ | 178 | /* Firmware Names */ |
184 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); | 179 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); |
@@ -2399,77 +2394,6 @@ void cayman_vm_decode_fault(struct radeon_device *rdev, | |||
2399 | block, mc_id); | 2394 | block, mc_id); |
2400 | } | 2395 | } |
2401 | 2396 | ||
2402 | #define R600_ENTRY_VALID (1 << 0) | ||
2403 | #define R600_PTE_SYSTEM (1 << 1) | ||
2404 | #define R600_PTE_SNOOPED (1 << 2) | ||
2405 | #define R600_PTE_READABLE (1 << 5) | ||
2406 | #define R600_PTE_WRITEABLE (1 << 6) | ||
2407 | |||
2408 | uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags) | ||
2409 | { | ||
2410 | uint32_t r600_flags = 0; | ||
2411 | r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0; | ||
2412 | r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; | ||
2413 | r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; | ||
2414 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
2415 | r600_flags |= R600_PTE_SYSTEM; | ||
2416 | r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; | ||
2417 | } | ||
2418 | return r600_flags; | ||
2419 | } | ||
2420 | |||
2421 | /** | ||
2422 | * cayman_vm_set_page - update the page tables using the CP | ||
2423 | * | ||
2424 | * @rdev: radeon_device pointer | ||
2425 | * @ib: indirect buffer to fill with commands | ||
2426 | * @pe: addr of the page entry | ||
2427 | * @addr: dst addr to write into pe | ||
2428 | * @count: number of page entries to update | ||
2429 | * @incr: increase next addr by incr bytes | ||
2430 | * @flags: access flags | ||
2431 | * | ||
2432 | * Update the page tables using the CP (cayman/TN). | ||
2433 | */ | ||
2434 | void cayman_vm_set_page(struct radeon_device *rdev, | ||
2435 | struct radeon_ib *ib, | ||
2436 | uint64_t pe, | ||
2437 | uint64_t addr, unsigned count, | ||
2438 | uint32_t incr, uint32_t flags) | ||
2439 | { | ||
2440 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | ||
2441 | uint64_t value; | ||
2442 | unsigned ndw; | ||
2443 | |||
2444 | if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { | ||
2445 | while (count) { | ||
2446 | ndw = 1 + count * 2; | ||
2447 | if (ndw > 0x3FFF) | ||
2448 | ndw = 0x3FFF; | ||
2449 | |||
2450 | ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw); | ||
2451 | ib->ptr[ib->length_dw++] = pe; | ||
2452 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | ||
2453 | for (; ndw > 1; ndw -= 2, --count, pe += 8) { | ||
2454 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
2455 | value = radeon_vm_map_gart(rdev, addr); | ||
2456 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
2457 | } else if (flags & RADEON_VM_PAGE_VALID) { | ||
2458 | value = addr; | ||
2459 | } else { | ||
2460 | value = 0; | ||
2461 | } | ||
2462 | addr += incr; | ||
2463 | value |= r600_flags; | ||
2464 | ib->ptr[ib->length_dw++] = value; | ||
2465 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
2466 | } | ||
2467 | } | ||
2468 | } else { | ||
2469 | cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags); | ||
2470 | } | ||
2471 | } | ||
2472 | |||
2473 | /** | 2397 | /** |
2474 | * cayman_vm_flush - vm flush using the CP | 2398 | * cayman_vm_flush - vm flush using the CP |
2475 | * | 2399 | * |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index e9cfe8aaf298..bdeb65ed3658 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c | |||
@@ -246,8 +246,7 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
246 | * @addr: dst addr to write into pe | 246 | * @addr: dst addr to write into pe |
247 | * @count: number of page entries to update | 247 | * @count: number of page entries to update |
248 | * @incr: increase next addr by incr bytes | 248 | * @incr: increase next addr by incr bytes |
249 | * @flags: access flags | 249 | * @flags: hw access flags |
250 | * @r600_flags: hw access flags | ||
251 | * | 250 | * |
252 | * Update the page tables using the DMA (cayman/TN). | 251 | * Update the page tables using the DMA (cayman/TN). |
253 | */ | 252 | */ |
@@ -257,13 +256,12 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, | |||
257 | uint64_t addr, unsigned count, | 256 | uint64_t addr, unsigned count, |
258 | uint32_t incr, uint32_t flags) | 257 | uint32_t incr, uint32_t flags) |
259 | { | 258 | { |
260 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | ||
261 | uint64_t value; | 259 | uint64_t value; |
262 | unsigned ndw; | 260 | unsigned ndw; |
263 | 261 | ||
264 | trace_radeon_vm_set_page(pe, addr, count, incr, r600_flags); | 262 | trace_radeon_vm_set_page(pe, addr, count, incr, flags); |
265 | 263 | ||
266 | if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) { | 264 | if ((flags & R600_PTE_SYSTEM) || (count == 1)) { |
267 | while (count) { | 265 | while (count) { |
268 | ndw = count * 2; | 266 | ndw = count * 2; |
269 | if (ndw > 0xFFFFE) | 267 | if (ndw > 0xFFFFE) |
@@ -274,16 +272,16 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, | |||
274 | ib->ptr[ib->length_dw++] = pe; | 272 | ib->ptr[ib->length_dw++] = pe; |
275 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | 273 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; |
276 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 274 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
277 | if (flags & RADEON_VM_PAGE_SYSTEM) { | 275 | if (flags & R600_PTE_SYSTEM) { |
278 | value = radeon_vm_map_gart(rdev, addr); | 276 | value = radeon_vm_map_gart(rdev, addr); |
279 | value &= 0xFFFFFFFFFFFFF000ULL; | 277 | value &= 0xFFFFFFFFFFFFF000ULL; |
280 | } else if (flags & RADEON_VM_PAGE_VALID) { | 278 | } else if (flags & R600_PTE_VALID) { |
281 | value = addr; | 279 | value = addr; |
282 | } else { | 280 | } else { |
283 | value = 0; | 281 | value = 0; |
284 | } | 282 | } |
285 | addr += incr; | 283 | addr += incr; |
286 | value |= r600_flags; | 284 | value |= flags; |
287 | ib->ptr[ib->length_dw++] = value; | 285 | ib->ptr[ib->length_dw++] = value; |
288 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | 286 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |
289 | } | 287 | } |
@@ -294,7 +292,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, | |||
294 | if (ndw > 0xFFFFE) | 292 | if (ndw > 0xFFFFE) |
295 | ndw = 0xFFFFE; | 293 | ndw = 0xFFFFE; |
296 | 294 | ||
297 | if (flags & RADEON_VM_PAGE_VALID) | 295 | if (flags & R600_PTE_VALID) |
298 | value = addr; | 296 | value = addr; |
299 | else | 297 | else |
300 | value = 0; | 298 | value = 0; |
@@ -302,7 +300,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, | |||
302 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); | 300 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); |
303 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | 301 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ |
304 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | 302 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; |
305 | ib->ptr[ib->length_dw++] = r600_flags; /* mask */ | 303 | ib->ptr[ib->length_dw++] = flags; /* mask */ |
306 | ib->ptr[ib->length_dw++] = 0; | 304 | ib->ptr[ib->length_dw++] = 0; |
307 | ib->ptr[ib->length_dw++] = value; /* value */ | 305 | ib->ptr[ib->length_dw++] = value; /* value */ |
308 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | 306 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 14531e155b35..6af0d1022da6 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -832,6 +832,12 @@ struct radeon_mec { | |||
832 | #define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1) | 832 | #define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1) |
833 | #define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK) | 833 | #define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK) |
834 | 834 | ||
835 | #define R600_PTE_VALID (1 << 0) | ||
836 | #define R600_PTE_SYSTEM (1 << 1) | ||
837 | #define R600_PTE_SNOOPED (1 << 2) | ||
838 | #define R600_PTE_READABLE (1 << 5) | ||
839 | #define R600_PTE_WRITEABLE (1 << 6) | ||
840 | |||
835 | struct radeon_vm { | 841 | struct radeon_vm { |
836 | struct list_head list; | 842 | struct list_head list; |
837 | struct list_head va; | 843 | struct list_head va; |
@@ -1675,8 +1681,6 @@ struct radeon_asic { | |||
1675 | struct { | 1681 | struct { |
1676 | int (*init)(struct radeon_device *rdev); | 1682 | int (*init)(struct radeon_device *rdev); |
1677 | void (*fini)(struct radeon_device *rdev); | 1683 | void (*fini)(struct radeon_device *rdev); |
1678 | |||
1679 | u32 pt_ring_index; | ||
1680 | void (*set_page)(struct radeon_device *rdev, | 1684 | void (*set_page)(struct radeon_device *rdev, |
1681 | struct radeon_ib *ib, | 1685 | struct radeon_ib *ib, |
1682 | uint64_t pe, | 1686 | uint64_t pe, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index eee5c4a2f166..d4b91675671d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1622,8 +1622,7 @@ static struct radeon_asic cayman_asic = { | |||
1622 | .vm = { | 1622 | .vm = { |
1623 | .init = &cayman_vm_init, | 1623 | .init = &cayman_vm_init, |
1624 | .fini = &cayman_vm_fini, | 1624 | .fini = &cayman_vm_fini, |
1625 | .pt_ring_index = R600_RING_TYPE_DMA_INDEX, | 1625 | .set_page = &cayman_dma_vm_set_page, |
1626 | .set_page = &cayman_vm_set_page, | ||
1627 | }, | 1626 | }, |
1628 | .ring = { | 1627 | .ring = { |
1629 | [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, | 1628 | [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, |
@@ -1723,8 +1722,7 @@ static struct radeon_asic trinity_asic = { | |||
1723 | .vm = { | 1722 | .vm = { |
1724 | .init = &cayman_vm_init, | 1723 | .init = &cayman_vm_init, |
1725 | .fini = &cayman_vm_fini, | 1724 | .fini = &cayman_vm_fini, |
1726 | .pt_ring_index = R600_RING_TYPE_DMA_INDEX, | 1725 | .set_page = &cayman_dma_vm_set_page, |
1727 | .set_page = &cayman_vm_set_page, | ||
1728 | }, | 1726 | }, |
1729 | .ring = { | 1727 | .ring = { |
1730 | [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, | 1728 | [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, |
@@ -1854,8 +1852,7 @@ static struct radeon_asic si_asic = { | |||
1854 | .vm = { | 1852 | .vm = { |
1855 | .init = &si_vm_init, | 1853 | .init = &si_vm_init, |
1856 | .fini = &si_vm_fini, | 1854 | .fini = &si_vm_fini, |
1857 | .pt_ring_index = R600_RING_TYPE_DMA_INDEX, | 1855 | .set_page = &si_dma_vm_set_page, |
1858 | .set_page = &si_vm_set_page, | ||
1859 | }, | 1856 | }, |
1860 | .ring = { | 1857 | .ring = { |
1861 | [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, | 1858 | [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, |
@@ -2000,8 +1997,7 @@ static struct radeon_asic ci_asic = { | |||
2000 | .vm = { | 1997 | .vm = { |
2001 | .init = &cik_vm_init, | 1998 | .init = &cik_vm_init, |
2002 | .fini = &cik_vm_fini, | 1999 | .fini = &cik_vm_fini, |
2003 | .pt_ring_index = R600_RING_TYPE_DMA_INDEX, | 2000 | .set_page = &cik_sdma_vm_set_page, |
2004 | .set_page = &cik_vm_set_page, | ||
2005 | }, | 2001 | }, |
2006 | .ring = { | 2002 | .ring = { |
2007 | [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, | 2003 | [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, |
@@ -2100,8 +2096,7 @@ static struct radeon_asic kv_asic = { | |||
2100 | .vm = { | 2096 | .vm = { |
2101 | .init = &cik_vm_init, | 2097 | .init = &cik_vm_init, |
2102 | .fini = &cik_vm_fini, | 2098 | .fini = &cik_vm_fini, |
2103 | .pt_ring_index = R600_RING_TYPE_DMA_INDEX, | 2099 | .set_page = &cik_sdma_vm_set_page, |
2104 | .set_page = &cik_vm_set_page, | ||
2105 | }, | 2100 | }, |
2106 | .ring = { | 2101 | .ring = { |
2107 | [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, | 2102 | [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 86d871bb2f60..f2833ee3a613 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -581,17 +581,18 @@ int cayman_vm_init(struct radeon_device *rdev); | |||
581 | void cayman_vm_fini(struct radeon_device *rdev); | 581 | void cayman_vm_fini(struct radeon_device *rdev); |
582 | void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 582 | void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
583 | uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); | 583 | uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); |
584 | void cayman_vm_set_page(struct radeon_device *rdev, | ||
585 | struct radeon_ib *ib, | ||
586 | uint64_t pe, | ||
587 | uint64_t addr, unsigned count, | ||
588 | uint32_t incr, uint32_t flags); | ||
589 | int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); | 584 | int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
590 | int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); | 585 | int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
591 | void cayman_dma_ring_ib_execute(struct radeon_device *rdev, | 586 | void cayman_dma_ring_ib_execute(struct radeon_device *rdev, |
592 | struct radeon_ib *ib); | 587 | struct radeon_ib *ib); |
593 | bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); | 588 | bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
594 | bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); | 589 | bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
590 | void cayman_dma_vm_set_page(struct radeon_device *rdev, | ||
591 | struct radeon_ib *ib, | ||
592 | uint64_t pe, | ||
593 | uint64_t addr, unsigned count, | ||
594 | uint32_t incr, uint32_t flags); | ||
595 | |||
595 | void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 596 | void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
596 | 597 | ||
597 | int ni_dpm_init(struct radeon_device *rdev); | 598 | int ni_dpm_init(struct radeon_device *rdev); |
@@ -653,17 +654,17 @@ int si_irq_set(struct radeon_device *rdev); | |||
653 | int si_irq_process(struct radeon_device *rdev); | 654 | int si_irq_process(struct radeon_device *rdev); |
654 | int si_vm_init(struct radeon_device *rdev); | 655 | int si_vm_init(struct radeon_device *rdev); |
655 | void si_vm_fini(struct radeon_device *rdev); | 656 | void si_vm_fini(struct radeon_device *rdev); |
656 | void si_vm_set_page(struct radeon_device *rdev, | ||
657 | struct radeon_ib *ib, | ||
658 | uint64_t pe, | ||
659 | uint64_t addr, unsigned count, | ||
660 | uint32_t incr, uint32_t flags); | ||
661 | void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 657 | void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
662 | int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); | 658 | int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
663 | int si_copy_dma(struct radeon_device *rdev, | 659 | int si_copy_dma(struct radeon_device *rdev, |
664 | uint64_t src_offset, uint64_t dst_offset, | 660 | uint64_t src_offset, uint64_t dst_offset, |
665 | unsigned num_gpu_pages, | 661 | unsigned num_gpu_pages, |
666 | struct radeon_fence **fence); | 662 | struct radeon_fence **fence); |
663 | void si_dma_vm_set_page(struct radeon_device *rdev, | ||
664 | struct radeon_ib *ib, | ||
665 | uint64_t pe, | ||
666 | uint64_t addr, unsigned count, | ||
667 | uint32_t incr, uint32_t flags); | ||
667 | void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 668 | void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
668 | u32 si_get_xclk(struct radeon_device *rdev); | 669 | u32 si_get_xclk(struct radeon_device *rdev); |
669 | uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); | 670 | uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); |
@@ -735,11 +736,11 @@ int cik_irq_process(struct radeon_device *rdev); | |||
735 | int cik_vm_init(struct radeon_device *rdev); | 736 | int cik_vm_init(struct radeon_device *rdev); |
736 | void cik_vm_fini(struct radeon_device *rdev); | 737 | void cik_vm_fini(struct radeon_device *rdev); |
737 | void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 738 | void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
738 | void cik_vm_set_page(struct radeon_device *rdev, | 739 | void cik_sdma_vm_set_page(struct radeon_device *rdev, |
739 | struct radeon_ib *ib, | 740 | struct radeon_ib *ib, |
740 | uint64_t pe, | 741 | uint64_t pe, |
741 | uint64_t addr, unsigned count, | 742 | uint64_t addr, unsigned count, |
742 | uint32_t incr, uint32_t flags); | 743 | uint32_t incr, uint32_t flags); |
743 | void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); | 744 | void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
744 | int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); | 745 | int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
745 | u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | 746 | u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index b990b1a2bd50..f6947dd2a992 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -914,6 +914,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | |||
914 | } | 914 | } |
915 | 915 | ||
916 | /** | 916 | /** |
917 | * radeon_vm_page_flags - translate page flags to what the hw uses | ||
918 | * | ||
919 | * @flags: flags comming from userspace | ||
920 | * | ||
921 | * Translate the flags the userspace ABI uses to hw flags. | ||
922 | */ | ||
923 | static uint32_t radeon_vm_page_flags(uint32_t flags) | ||
924 | { | ||
925 | uint32_t hw_flags = 0; | ||
926 | hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; | ||
927 | hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; | ||
928 | hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; | ||
929 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
930 | hw_flags |= R600_PTE_SYSTEM; | ||
931 | hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; | ||
932 | } | ||
933 | return hw_flags; | ||
934 | } | ||
935 | |||
936 | /** | ||
917 | * radeon_vm_update_pdes - make sure that page directory is valid | 937 | * radeon_vm_update_pdes - make sure that page directory is valid |
918 | * | 938 | * |
919 | * @rdev: radeon_device pointer | 939 | * @rdev: radeon_device pointer |
@@ -974,7 +994,7 @@ retry: | |||
974 | if (count) { | 994 | if (count) { |
975 | radeon_asic_vm_set_page(rdev, ib, last_pde, | 995 | radeon_asic_vm_set_page(rdev, ib, last_pde, |
976 | last_pt, count, incr, | 996 | last_pt, count, incr, |
977 | RADEON_VM_PAGE_VALID); | 997 | R600_PTE_VALID); |
978 | } | 998 | } |
979 | 999 | ||
980 | count = 1; | 1000 | count = 1; |
@@ -987,7 +1007,7 @@ retry: | |||
987 | 1007 | ||
988 | if (count) { | 1008 | if (count) { |
989 | radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, | 1009 | radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, |
990 | incr, RADEON_VM_PAGE_VALID); | 1010 | incr, R600_PTE_VALID); |
991 | 1011 | ||
992 | } | 1012 | } |
993 | 1013 | ||
@@ -1082,7 +1102,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
1082 | struct radeon_bo *bo, | 1102 | struct radeon_bo *bo, |
1083 | struct ttm_mem_reg *mem) | 1103 | struct ttm_mem_reg *mem) |
1084 | { | 1104 | { |
1085 | unsigned ridx = rdev->asic->vm.pt_ring_index; | ||
1086 | struct radeon_ib ib; | 1105 | struct radeon_ib ib; |
1087 | struct radeon_bo_va *bo_va; | 1106 | struct radeon_bo_va *bo_va; |
1088 | unsigned nptes, npdes, ndw; | 1107 | unsigned nptes, npdes, ndw; |
@@ -1155,7 +1174,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
1155 | if (ndw > 0xfffff) | 1174 | if (ndw > 0xfffff) |
1156 | return -ENOMEM; | 1175 | return -ENOMEM; |
1157 | 1176 | ||
1158 | r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4); | 1177 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); |
1159 | ib.length_dw = 0; | 1178 | ib.length_dw = 0; |
1160 | 1179 | ||
1161 | r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); | 1180 | r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); |
@@ -1165,7 +1184,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
1165 | } | 1184 | } |
1166 | 1185 | ||
1167 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, | 1186 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, |
1168 | addr, bo_va->flags); | 1187 | addr, radeon_vm_page_flags(bo_va->flags)); |
1169 | 1188 | ||
1170 | radeon_ib_sync_to(&ib, vm->fence); | 1189 | radeon_ib_sync_to(&ib, vm->fence); |
1171 | r = radeon_ib_schedule(rdev, &ib, NULL); | 1190 | r = radeon_ib_schedule(rdev, &ib, NULL); |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index b2071afa7972..2acfe561796f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -78,11 +78,6 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ | |||
78 | extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); | 78 | extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); |
79 | extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); | 79 | extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); |
80 | extern bool evergreen_is_display_hung(struct radeon_device *rdev); | 80 | extern bool evergreen_is_display_hung(struct radeon_device *rdev); |
81 | extern void si_dma_vm_set_page(struct radeon_device *rdev, | ||
82 | struct radeon_ib *ib, | ||
83 | uint64_t pe, | ||
84 | uint64_t addr, unsigned count, | ||
85 | uint32_t incr, uint32_t flags); | ||
86 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, | 81 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, |
87 | bool enable); | 82 | bool enable); |
88 | 83 | ||
@@ -4662,61 +4657,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev, | |||
4662 | block, mc_id); | 4657 | block, mc_id); |
4663 | } | 4658 | } |
4664 | 4659 | ||
4665 | /** | ||
4666 | * si_vm_set_page - update the page tables using the CP | ||
4667 | * | ||
4668 | * @rdev: radeon_device pointer | ||
4669 | * @ib: indirect buffer to fill with commands | ||
4670 | * @pe: addr of the page entry | ||
4671 | * @addr: dst addr to write into pe | ||
4672 | * @count: number of page entries to update | ||
4673 | * @incr: increase next addr by incr bytes | ||
4674 | * @flags: access flags | ||
4675 | * | ||
4676 | * Update the page tables using the CP (SI). | ||
4677 | */ | ||
4678 | void si_vm_set_page(struct radeon_device *rdev, | ||
4679 | struct radeon_ib *ib, | ||
4680 | uint64_t pe, | ||
4681 | uint64_t addr, unsigned count, | ||
4682 | uint32_t incr, uint32_t flags) | ||
4683 | { | ||
4684 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | ||
4685 | uint64_t value; | ||
4686 | unsigned ndw; | ||
4687 | |||
4688 | if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { | ||
4689 | while (count) { | ||
4690 | ndw = 2 + count * 2; | ||
4691 | if (ndw > 0x3FFE) | ||
4692 | ndw = 0x3FFE; | ||
4693 | |||
4694 | ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw); | ||
4695 | ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) | | ||
4696 | WRITE_DATA_DST_SEL(1)); | ||
4697 | ib->ptr[ib->length_dw++] = pe; | ||
4698 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
4699 | for (; ndw > 2; ndw -= 2, --count, pe += 8) { | ||
4700 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
4701 | value = radeon_vm_map_gart(rdev, addr); | ||
4702 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
4703 | } else if (flags & RADEON_VM_PAGE_VALID) { | ||
4704 | value = addr; | ||
4705 | } else { | ||
4706 | value = 0; | ||
4707 | } | ||
4708 | addr += incr; | ||
4709 | value |= r600_flags; | ||
4710 | ib->ptr[ib->length_dw++] = value; | ||
4711 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
4712 | } | ||
4713 | } | ||
4714 | } else { | ||
4715 | /* DMA */ | ||
4716 | si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags); | ||
4717 | } | ||
4718 | } | ||
4719 | |||
4720 | void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | 4660 | void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
4721 | { | 4661 | { |
4722 | struct radeon_ring *ring = &rdev->ring[ridx]; | 4662 | struct radeon_ring *ring = &rdev->ring[ridx]; |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 17205fd2643a..8e8f46133532 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
@@ -76,13 +76,12 @@ void si_dma_vm_set_page(struct radeon_device *rdev, | |||
76 | uint64_t addr, unsigned count, | 76 | uint64_t addr, unsigned count, |
77 | uint32_t incr, uint32_t flags) | 77 | uint32_t incr, uint32_t flags) |
78 | { | 78 | { |
79 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | ||
80 | uint64_t value; | 79 | uint64_t value; |
81 | unsigned ndw; | 80 | unsigned ndw; |
82 | 81 | ||
83 | trace_radeon_vm_set_page(pe, addr, count, incr, r600_flags); | 82 | trace_radeon_vm_set_page(pe, addr, count, incr, flags); |
84 | 83 | ||
85 | if (flags & RADEON_VM_PAGE_SYSTEM) { | 84 | if (flags & R600_PTE_SYSTEM) { |
86 | while (count) { | 85 | while (count) { |
87 | ndw = count * 2; | 86 | ndw = count * 2; |
88 | if (ndw > 0xFFFFE) | 87 | if (ndw > 0xFFFFE) |
@@ -93,16 +92,10 @@ void si_dma_vm_set_page(struct radeon_device *rdev, | |||
93 | ib->ptr[ib->length_dw++] = pe; | 92 | ib->ptr[ib->length_dw++] = pe; |
94 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | 93 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; |
95 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 94 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
96 | if (flags & RADEON_VM_PAGE_SYSTEM) { | 95 | value = radeon_vm_map_gart(rdev, addr); |
97 | value = radeon_vm_map_gart(rdev, addr); | 96 | value &= 0xFFFFFFFFFFFFF000ULL; |
98 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
99 | } else if (flags & RADEON_VM_PAGE_VALID) { | ||
100 | value = addr; | ||
101 | } else { | ||
102 | value = 0; | ||
103 | } | ||
104 | addr += incr; | 97 | addr += incr; |
105 | value |= r600_flags; | 98 | value |= flags; |
106 | ib->ptr[ib->length_dw++] = value; | 99 | ib->ptr[ib->length_dw++] = value; |
107 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | 100 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |
108 | } | 101 | } |
@@ -113,7 +106,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev, | |||
113 | if (ndw > 0xFFFFE) | 106 | if (ndw > 0xFFFFE) |
114 | ndw = 0xFFFFE; | 107 | ndw = 0xFFFFE; |
115 | 108 | ||
116 | if (flags & RADEON_VM_PAGE_VALID) | 109 | if (flags & R600_PTE_VALID) |
117 | value = addr; | 110 | value = addr; |
118 | else | 111 | else |
119 | value = 0; | 112 | value = 0; |
@@ -121,7 +114,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev, | |||
121 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); | 114 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); |
122 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | 115 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ |
123 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; | 116 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; |
124 | ib->ptr[ib->length_dw++] = r600_flags; /* mask */ | 117 | ib->ptr[ib->length_dw++] = flags; /* mask */ |
125 | ib->ptr[ib->length_dw++] = 0; | 118 | ib->ptr[ib->length_dw++] = 0; |
126 | ib->ptr[ib->length_dw++] = value; /* value */ | 119 | ib->ptr[ib->length_dw++] = value; /* value */ |
127 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | 120 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |